1 /* 2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx 3 * 4 * based on qla2x00t.c code: 5 * 6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net> 7 * Copyright (C) 2004 - 2005 Leonid Stoljar 8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us> 9 * Copyright (C) 2006 - 2010 ID7 Ltd. 10 * 11 * Forward port and refactoring to modern qla2xxx and target/configfs 12 * 13 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation, version 2 18 * of the License. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 */ 25 26 #include <linux/module.h> 27 #include <linux/init.h> 28 #include <linux/types.h> 29 #include <linux/blkdev.h> 30 #include <linux/interrupt.h> 31 #include <linux/pci.h> 32 #include <linux/delay.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <asm/unaligned.h> 36 #include <scsi/scsi.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi_tcq.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 42 #include "qla_def.h" 43 #include "qla_target.h" 44 45 static int ql2xtgt_tape_enable; 46 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR); 47 MODULE_PARM_DESC(ql2xtgt_tape_enable, 48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER."); 49 50 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 51 module_param(qlini_mode, charp, S_IRUGO); 52 MODULE_PARM_DESC(qlini_mode, 53 "Determines when initiator mode will be enabled. Possible values: " 54 "\"exclusive\" - initiator mode will be enabled on load, " 55 "disabled on enabling target mode and then on disabling target mode " 56 "enabled back; " 57 "\"disabled\" - initiator mode will never be enabled; " 58 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated " 59 "when ready " 60 "\"enabled\" (default) - initiator mode will always stay enabled."); 61 62 static int ql_dm_tgt_ex_pct = 0; 63 module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR); 64 MODULE_PARM_DESC(ql_dm_tgt_ex_pct, 65 "For Dual Mode (qlini_mode=dual), this parameter determines " 66 "the percentage of exchanges/cmds FW will allocate resources " 67 "for Target mode."); 68 69 int ql2xuctrlirq = 1; 70 module_param(ql2xuctrlirq, int, 0644); 71 MODULE_PARM_DESC(ql2xuctrlirq, 72 "User to control IRQ placement via smp_affinity." 73 "Valid with qlini_mode=disabled." 74 "1(default): enable"); 75 76 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 77 78 static int qla_sam_status = SAM_STAT_BUSY; 79 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */ 80 81 /* 82 * From scsi/fc/fc_fcp.h 83 */ 84 enum fcp_resp_rsp_codes { 85 FCP_TMF_CMPL = 0, 86 FCP_DATA_LEN_INVALID = 1, 87 FCP_CMND_FIELDS_INVALID = 2, 88 FCP_DATA_PARAM_MISMATCH = 3, 89 FCP_TMF_REJECTED = 4, 90 FCP_TMF_FAILED = 5, 91 FCP_TMF_INVALID_LUN = 9, 92 }; 93 94 /* 95 * fc_pri_ta from scsi/fc/fc_fcp.h 96 */ 97 #define FCP_PTA_SIMPLE 0 /* simple task attribute */ 98 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */ 99 #define FCP_PTA_ORDERED 2 /* ordered task attribute */ 100 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */ 101 #define FCP_PTA_MASK 7 /* mask for task attribute field */ 102 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */ 103 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */ 104 105 /* 106 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which 107 * must be called under HW lock and could unlock/lock it inside. 108 * It isn't an issue, since in the current implementation on the time when 109 * those functions are called: 110 * 111 * - Either context is IRQ and only IRQ handler can modify HW data, 112 * including rings related fields, 113 * 114 * - Or access to target mode variables from struct qla_tgt doesn't 115 * cross those functions boundaries, except tgt_stop, which 116 * additionally protected by irq_cmd_count. 117 */ 118 /* Predefs for callbacks handed to qla2xxx LLD */ 119 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha, 120 struct atio_from_isp *pkt, uint8_t); 121 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp, 122 response_t *pkt); 123 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 124 int fn, void *iocb, int flags); 125 static void qlt_send_term_exchange(struct qla_qpair *, struct qla_tgt_cmd 126 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); 127 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 128 struct atio_from_isp *atio, uint16_t status, int qfull); 129 static void qlt_disable_vha(struct scsi_qla_host *vha); 130 static void qlt_clear_tgt_db(struct qla_tgt *tgt); 131 static void qlt_send_notify_ack(struct qla_qpair *qpair, 132 struct imm_ntfy_from_isp *ntfy, 133 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 134 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); 135 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 136 struct imm_ntfy_from_isp *imm, int ha_locked); 137 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, 138 fc_port_t *fcport, bool local); 139 void qlt_unreg_sess(struct fc_port *sess); 140 static void qlt_24xx_handle_abts(struct scsi_qla_host *, 141 struct abts_recv_from_24xx *); 142 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *, 143 uint16_t); 144 145 /* 146 * Global Variables 147 */ 148 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; 149 struct kmem_cache *qla_tgt_plogi_cachep; 150 static mempool_t *qla_tgt_mgmt_cmd_mempool; 151 static struct workqueue_struct *qla_tgt_wq; 152 static DEFINE_MUTEX(qla_tgt_mutex); 153 static LIST_HEAD(qla_tgt_glist); 154 155 static const char *prot_op_str(u32 prot_op) 156 { 157 switch (prot_op) { 158 case TARGET_PROT_NORMAL: return "NORMAL"; 159 case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; 160 case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; 161 case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; 162 case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; 163 case TARGET_PROT_DIN_PASS: return "DIN_PASS"; 164 case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; 165 default: return "UNKNOWN"; 166 } 167 } 168 169 /* This API intentionally takes dest as a parameter, rather than returning 170 * int value to avoid caller forgetting to issue wmb() after the store */ 171 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) 172 { 173 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); 174 *dest = atomic_inc_return(&base_vha->generation_tick); 175 /* memory barrier */ 176 wmb(); 177 } 178 179 /* Might release hw lock, then reaquire!! */ 180 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) 181 { 182 /* Send marker if required */ 183 if (unlikely(vha->marker_needed != 0)) { 184 int rc = qla2x00_issue_marker(vha, vha_locked); 185 if (rc != QLA_SUCCESS) { 186 ql_dbg(ql_dbg_tgt, vha, 0xe03d, 187 "qla_target(%d): issue_marker() failed\n", 188 vha->vp_idx); 189 } 190 return rc; 191 } 192 return QLA_SUCCESS; 193 } 194 195 static inline 196 struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, 197 uint8_t *d_id) 198 { 199 struct scsi_qla_host *host; 200 uint32_t key = 0; 201 202 if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && 203 (vha->d_id.b.al_pa == d_id[2])) 204 return vha; 205 206 key = (uint32_t)d_id[0] << 16; 207 key |= (uint32_t)d_id[1] << 8; 208 key |= (uint32_t)d_id[2]; 209 210 host = btree_lookup32(&vha->hw->tgt.host_map, key); 211 if (!host) 212 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, 213 "Unable to find host %06x\n", key); 214 215 return host; 216 } 217 218 static inline 219 struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, 220 uint16_t vp_idx) 221 { 222 struct qla_hw_data *ha = vha->hw; 223 224 if (vha->vp_idx == vp_idx) 225 return vha; 226 227 BUG_ON(ha->tgt.tgt_vp_map == NULL); 228 if (likely(test_bit(vp_idx, ha->vp_idx_map))) 229 return ha->tgt.tgt_vp_map[vp_idx].vha; 230 231 return NULL; 232 } 233 234 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) 235 { 236 unsigned long flags; 237 238 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 239 240 vha->hw->tgt.num_pend_cmds++; 241 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds) 242 vha->qla_stats.stat_max_pend_cmds = 243 vha->hw->tgt.num_pend_cmds; 244 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 245 } 246 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha) 247 { 248 unsigned long flags; 249 250 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 251 vha->hw->tgt.num_pend_cmds--; 252 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 253 } 254 255 256 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha, 257 struct atio_from_isp *atio, uint8_t ha_locked) 258 { 259 struct qla_tgt_sess_op *u; 260 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 261 unsigned long flags; 262 263 if (tgt->tgt_stop) { 264 ql_dbg(ql_dbg_async, vha, 0x502c, 265 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped", 266 vha->vp_idx); 267 goto out_term; 268 } 269 270 u = kzalloc(sizeof(*u), GFP_ATOMIC); 271 if (u == NULL) 272 goto out_term; 273 274 u->vha = vha; 275 memcpy(&u->atio, atio, sizeof(*atio)); 276 INIT_LIST_HEAD(&u->cmd_list); 277 278 spin_lock_irqsave(&vha->cmd_list_lock, flags); 279 list_add_tail(&u->cmd_list, &vha->unknown_atio_list); 280 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 281 282 schedule_delayed_work(&vha->unknown_atio_work, 1); 283 284 out: 285 return; 286 287 out_term: 288 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0); 289 goto out; 290 } 291 292 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, 293 uint8_t ha_locked) 294 { 295 struct qla_tgt_sess_op *u, *t; 296 scsi_qla_host_t *host; 297 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 298 unsigned long flags; 299 uint8_t queued = 0; 300 301 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { 302 if (u->aborted) { 303 ql_dbg(ql_dbg_async, vha, 0x502e, 304 "Freeing unknown %s %p, because of Abort\n", 305 "ATIO_TYPE7", u); 306 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 307 &u->atio, ha_locked, 0); 308 goto abort; 309 } 310 311 host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); 312 if (host != NULL) { 313 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, 314 "Requeuing unknown ATIO_TYPE7 %p\n", u); 315 qlt_24xx_atio_pkt(host, &u->atio, ha_locked); 316 } else if (tgt->tgt_stop) { 317 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, 318 "Freeing unknown %s %p, because tgt is being stopped\n", 319 "ATIO_TYPE7", u); 320 qlt_send_term_exchange(vha->hw->base_qpair, NULL, 321 &u->atio, ha_locked, 0); 322 } else { 323 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, 324 "Reschedule u %p, vha %p, host %p\n", u, vha, host); 325 if (!queued) { 326 queued = 1; 327 schedule_delayed_work(&vha->unknown_atio_work, 328 1); 329 } 330 continue; 331 } 332 333 abort: 334 spin_lock_irqsave(&vha->cmd_list_lock, flags); 335 list_del(&u->cmd_list); 336 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 337 kfree(u); 338 } 339 } 340 341 void qlt_unknown_atio_work_fn(struct work_struct *work) 342 { 343 struct scsi_qla_host *vha = container_of(to_delayed_work(work), 344 struct scsi_qla_host, unknown_atio_work); 345 346 qlt_try_to_dequeue_unknown_atios(vha, 0); 347 } 348 349 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 350 struct atio_from_isp *atio, uint8_t ha_locked) 351 { 352 ql_dbg(ql_dbg_tgt, vha, 0xe072, 353 "%s: qla_target(%d): type %x ox_id %04x\n", 354 __func__, vha->vp_idx, atio->u.raw.entry_type, 355 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 356 357 switch (atio->u.raw.entry_type) { 358 case ATIO_TYPE7: 359 { 360 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, 361 atio->u.isp24.fcp_hdr.d_id); 362 if (unlikely(NULL == host)) { 363 ql_dbg(ql_dbg_tgt, vha, 0xe03e, 364 "qla_target(%d): Received ATIO_TYPE7 " 365 "with unknown d_id %x:%x:%x\n", vha->vp_idx, 366 atio->u.isp24.fcp_hdr.d_id[0], 367 atio->u.isp24.fcp_hdr.d_id[1], 368 atio->u.isp24.fcp_hdr.d_id[2]); 369 370 371 qlt_queue_unknown_atio(vha, atio, ha_locked); 372 break; 373 } 374 if (unlikely(!list_empty(&vha->unknown_atio_list))) 375 qlt_try_to_dequeue_unknown_atios(vha, ha_locked); 376 377 qlt_24xx_atio_pkt(host, atio, ha_locked); 378 break; 379 } 380 381 case IMMED_NOTIFY_TYPE: 382 { 383 struct scsi_qla_host *host = vha; 384 struct imm_ntfy_from_isp *entry = 385 (struct imm_ntfy_from_isp *)atio; 386 387 qlt_issue_marker(vha, ha_locked); 388 389 if ((entry->u.isp24.vp_index != 0xFF) && 390 (entry->u.isp24.nport_handle != 0xFFFF)) { 391 host = qlt_find_host_by_vp_idx(vha, 392 entry->u.isp24.vp_index); 393 if (unlikely(!host)) { 394 ql_dbg(ql_dbg_tgt, vha, 0xe03f, 395 "qla_target(%d): Received " 396 "ATIO (IMMED_NOTIFY_TYPE) " 397 "with unknown vp_index %d\n", 398 vha->vp_idx, entry->u.isp24.vp_index); 399 break; 400 } 401 } 402 qlt_24xx_atio_pkt(host, atio, ha_locked); 403 break; 404 } 405 406 case VP_RPT_ID_IOCB_TYPE: 407 qla24xx_report_id_acquisition(vha, 408 (struct vp_rpt_id_entry_24xx *)atio); 409 break; 410 411 case ABTS_RECV_24XX: 412 { 413 struct abts_recv_from_24xx *entry = 414 (struct abts_recv_from_24xx *)atio; 415 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 416 entry->vp_index); 417 unsigned long flags; 418 419 if (unlikely(!host)) { 420 ql_dbg(ql_dbg_tgt, vha, 0xe00a, 421 "qla_target(%d): Response pkt (ABTS_RECV_24XX) " 422 "received, with unknown vp_index %d\n", 423 vha->vp_idx, entry->vp_index); 424 break; 425 } 426 if (!ha_locked) 427 spin_lock_irqsave(&host->hw->hardware_lock, flags); 428 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); 429 if (!ha_locked) 430 spin_unlock_irqrestore(&host->hw->hardware_lock, flags); 431 break; 432 } 433 434 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ 435 436 default: 437 ql_dbg(ql_dbg_tgt, vha, 0xe040, 438 "qla_target(%d): Received unknown ATIO atio " 439 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 440 break; 441 } 442 443 return false; 444 } 445 446 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, 447 struct rsp_que *rsp, response_t *pkt) 448 { 449 switch (pkt->entry_type) { 450 case CTIO_CRC2: 451 ql_dbg(ql_dbg_tgt, vha, 0xe073, 452 "qla_target(%d):%s: CRC2 Response pkt\n", 453 vha->vp_idx, __func__); 454 /* fall through */ 455 case CTIO_TYPE7: 456 { 457 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 458 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 459 entry->vp_index); 460 if (unlikely(!host)) { 461 ql_dbg(ql_dbg_tgt, vha, 0xe041, 462 "qla_target(%d): Response pkt (CTIO_TYPE7) " 463 "received, with unknown vp_index %d\n", 464 vha->vp_idx, entry->vp_index); 465 break; 466 } 467 qlt_response_pkt(host, rsp, pkt); 468 break; 469 } 470 471 case IMMED_NOTIFY_TYPE: 472 { 473 struct scsi_qla_host *host = vha; 474 struct imm_ntfy_from_isp *entry = 475 (struct imm_ntfy_from_isp *)pkt; 476 477 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); 478 if (unlikely(!host)) { 479 ql_dbg(ql_dbg_tgt, vha, 0xe042, 480 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " 481 "received, with unknown vp_index %d\n", 482 vha->vp_idx, entry->u.isp24.vp_index); 483 break; 484 } 485 qlt_response_pkt(host, rsp, pkt); 486 break; 487 } 488 489 case NOTIFY_ACK_TYPE: 490 { 491 struct scsi_qla_host *host = vha; 492 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 493 494 if (0xFF != entry->u.isp24.vp_index) { 495 host = qlt_find_host_by_vp_idx(vha, 496 entry->u.isp24.vp_index); 497 if (unlikely(!host)) { 498 ql_dbg(ql_dbg_tgt, vha, 0xe043, 499 "qla_target(%d): Response " 500 "pkt (NOTIFY_ACK_TYPE) " 501 "received, with unknown " 502 "vp_index %d\n", vha->vp_idx, 503 entry->u.isp24.vp_index); 504 break; 505 } 506 } 507 qlt_response_pkt(host, rsp, pkt); 508 break; 509 } 510 511 case ABTS_RECV_24XX: 512 { 513 struct abts_recv_from_24xx *entry = 514 (struct abts_recv_from_24xx *)pkt; 515 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 516 entry->vp_index); 517 if (unlikely(!host)) { 518 ql_dbg(ql_dbg_tgt, vha, 0xe044, 519 "qla_target(%d): Response pkt " 520 "(ABTS_RECV_24XX) received, with unknown " 521 "vp_index %d\n", vha->vp_idx, entry->vp_index); 522 break; 523 } 524 qlt_response_pkt(host, rsp, pkt); 525 break; 526 } 527 528 case ABTS_RESP_24XX: 529 { 530 struct abts_resp_to_24xx *entry = 531 (struct abts_resp_to_24xx *)pkt; 532 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, 533 entry->vp_index); 534 if (unlikely(!host)) { 535 ql_dbg(ql_dbg_tgt, vha, 0xe045, 536 "qla_target(%d): Response pkt " 537 "(ABTS_RECV_24XX) received, with unknown " 538 "vp_index %d\n", vha->vp_idx, entry->vp_index); 539 break; 540 } 541 qlt_response_pkt(host, rsp, pkt); 542 break; 543 } 544 545 default: 546 qlt_response_pkt(vha, rsp, pkt); 547 break; 548 } 549 550 } 551 552 /* 553 * All qlt_plogi_ack_t operations are protected by hardware_lock 554 */ 555 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, 556 struct imm_ntfy_from_isp *ntfy, int type) 557 { 558 struct qla_work_evt *e; 559 e = qla2x00_alloc_work(vha, QLA_EVT_NACK); 560 if (!e) 561 return QLA_FUNCTION_FAILED; 562 563 e->u.nack.fcport = fcport; 564 e->u.nack.type = type; 565 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp)); 566 return qla2x00_post_work(vha, e); 567 } 568 569 static 570 void qla2x00_async_nack_sp_done(void *s, int res) 571 { 572 struct srb *sp = (struct srb *)s; 573 struct scsi_qla_host *vha = sp->vha; 574 unsigned long flags; 575 576 ql_dbg(ql_dbg_disc, vha, 0x20f2, 577 "Async done-%s res %x %8phC type %d\n", 578 sp->name, res, sp->fcport->port_name, sp->type); 579 580 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 581 sp->fcport->flags &= ~FCF_ASYNC_SENT; 582 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 583 584 switch (sp->type) { 585 case SRB_NACK_PLOGI: 586 sp->fcport->login_gen++; 587 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; 588 sp->fcport->logout_on_delete = 1; 589 sp->fcport->plogi_nack_done_deadline = jiffies + HZ; 590 sp->fcport->send_els_logo = 0; 591 break; 592 593 case SRB_NACK_PRLI: 594 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP; 595 sp->fcport->deleted = 0; 596 sp->fcport->send_els_logo = 0; 597 598 if (!sp->fcport->login_succ && 599 !IS_SW_RESV_ADDR(sp->fcport->d_id)) { 600 sp->fcport->login_succ = 1; 601 602 vha->fcport_count++; 603 604 ql_dbg(ql_dbg_disc, vha, 0x20f3, 605 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 606 __func__, __LINE__, 607 sp->fcport->port_name, 608 vha->fcport_count); 609 sp->fcport->disc_state = DSC_UPD_FCPORT; 610 qla24xx_post_upd_fcport_work(vha, sp->fcport); 611 } else { 612 sp->fcport->login_retry = 0; 613 sp->fcport->disc_state = DSC_LOGIN_COMPLETE; 614 sp->fcport->deleted = 0; 615 sp->fcport->logout_on_delete = 1; 616 } 617 break; 618 619 case SRB_NACK_LOGO: 620 sp->fcport->login_gen++; 621 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; 622 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE); 623 break; 624 } 625 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 626 627 sp->free(sp); 628 } 629 630 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, 631 struct imm_ntfy_from_isp *ntfy, int type) 632 { 633 int rval = QLA_FUNCTION_FAILED; 634 srb_t *sp; 635 char *c = NULL; 636 637 fcport->flags |= FCF_ASYNC_SENT; 638 switch (type) { 639 case SRB_NACK_PLOGI: 640 fcport->fw_login_state = DSC_LS_PLOGI_PEND; 641 c = "PLOGI"; 642 break; 643 case SRB_NACK_PRLI: 644 fcport->fw_login_state = DSC_LS_PRLI_PEND; 645 fcport->deleted = 0; 646 c = "PRLI"; 647 break; 648 case SRB_NACK_LOGO: 649 fcport->fw_login_state = DSC_LS_LOGO_PEND; 650 c = "LOGO"; 651 break; 652 } 653 654 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 655 if (!sp) 656 goto done; 657 658 sp->type = type; 659 sp->name = "nack"; 660 661 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; 662 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); 663 664 sp->u.iocb_cmd.u.nack.ntfy = ntfy; 665 sp->done = qla2x00_async_nack_sp_done; 666 667 rval = qla2x00_start_sp(sp); 668 if (rval != QLA_SUCCESS) 669 goto done_free_sp; 670 671 ql_dbg(ql_dbg_disc, vha, 0x20f4, 672 "Async-%s %8phC hndl %x %s\n", 673 sp->name, fcport->port_name, sp->handle, c); 674 675 return rval; 676 677 done_free_sp: 678 sp->free(sp); 679 done: 680 fcport->flags &= ~FCF_ASYNC_SENT; 681 return rval; 682 } 683 684 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) 685 { 686 fc_port_t *t; 687 unsigned long flags; 688 689 switch (e->u.nack.type) { 690 case SRB_NACK_PRLI: 691 mutex_lock(&vha->vha_tgt.tgt_mutex); 692 t = qlt_create_sess(vha, e->u.nack.fcport, 0); 693 mutex_unlock(&vha->vha_tgt.tgt_mutex); 694 if (t) { 695 ql_log(ql_log_info, vha, 0xd034, 696 "%s create sess success %p", __func__, t); 697 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 698 /* create sess has an extra kref */ 699 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); 700 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 701 } 702 break; 703 } 704 qla24xx_async_notify_ack(vha, e->u.nack.fcport, 705 (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type); 706 } 707 708 void qla24xx_delete_sess_fn(struct work_struct *work) 709 { 710 fc_port_t *fcport = container_of(work, struct fc_port, del_work); 711 struct qla_hw_data *ha = fcport->vha->hw; 712 unsigned long flags; 713 714 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 715 716 if (fcport->se_sess) { 717 ha->tgt.tgt_ops->shutdown_sess(fcport); 718 ha->tgt.tgt_ops->put_sess(fcport); 719 } else { 720 qlt_unreg_sess(fcport); 721 } 722 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 723 } 724 725 /* 726 * Called from qla2x00_reg_remote_port() 727 */ 728 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 729 { 730 struct qla_hw_data *ha = vha->hw; 731 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 732 struct fc_port *sess = fcport; 733 unsigned long flags; 734 735 if (!vha->hw->tgt.tgt_ops) 736 return; 737 738 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 739 if (tgt->tgt_stop) { 740 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 741 return; 742 } 743 744 if (fcport->disc_state == DSC_DELETE_PEND) { 745 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 746 return; 747 } 748 749 if (!sess->se_sess) { 750 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 751 752 mutex_lock(&vha->vha_tgt.tgt_mutex); 753 sess = qlt_create_sess(vha, fcport, false); 754 mutex_unlock(&vha->vha_tgt.tgt_mutex); 755 756 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 757 } else { 758 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) { 759 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 760 return; 761 } 762 763 if (!kref_get_unless_zero(&sess->sess_kref)) { 764 ql_dbg(ql_dbg_disc, vha, 0x2107, 765 "%s: kref_get fail sess %8phC \n", 766 __func__, sess->port_name); 767 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 768 return; 769 } 770 771 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c, 772 "qla_target(%u): %ssession for port %8phC " 773 "(loop ID %d) reappeared\n", vha->vp_idx, 774 sess->local ? "local " : "", sess->port_name, sess->loop_id); 775 776 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, 777 "Reappeared sess %p\n", sess); 778 779 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, 780 fcport->loop_id, 781 (fcport->flags & FCF_CONF_COMP_SUPPORTED)); 782 } 783 784 if (sess && sess->local) { 785 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d, 786 "qla_target(%u): local session for " 787 "port %8phC (loop ID %d) became global\n", vha->vp_idx, 788 fcport->port_name, sess->loop_id); 789 sess->local = 0; 790 } 791 ha->tgt.tgt_ops->put_sess(sess); 792 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 793 } 794 795 /* 796 * This is a zero-base ref-counting solution, since hardware_lock 797 * guarantees that ref_count is not modified concurrently. 798 * Upon successful return content of iocb is undefined 799 */ 800 static struct qlt_plogi_ack_t * 801 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, 802 struct imm_ntfy_from_isp *iocb) 803 { 804 struct qlt_plogi_ack_t *pla; 805 806 list_for_each_entry(pla, &vha->plogi_ack_list, list) { 807 if (pla->id.b24 == id->b24) { 808 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, 809 "%s %d %8phC Term INOT due to new INOT", 810 __func__, __LINE__, 811 pla->iocb.u.isp24.port_name); 812 qlt_send_term_imm_notif(vha, &pla->iocb, 1); 813 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 814 return pla; 815 } 816 } 817 818 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC); 819 if (!pla) { 820 ql_dbg(ql_dbg_async, vha, 0x5088, 821 "qla_target(%d): Allocation of plogi_ack failed\n", 822 vha->vp_idx); 823 return NULL; 824 } 825 826 memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); 827 pla->id = *id; 828 list_add_tail(&pla->list, &vha->plogi_ack_list); 829 830 return pla; 831 } 832 833 void qlt_plogi_ack_unref(struct scsi_qla_host *vha, 834 struct qlt_plogi_ack_t *pla) 835 { 836 struct imm_ntfy_from_isp *iocb = &pla->iocb; 837 port_id_t port_id; 838 uint16_t loop_id; 839 fc_port_t *fcport = pla->fcport; 840 841 BUG_ON(!pla->ref_count); 842 pla->ref_count--; 843 844 if (pla->ref_count) 845 return; 846 847 ql_dbg(ql_dbg_disc, vha, 0x5089, 848 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x" 849 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name, 850 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], 851 iocb->u.isp24.port_id[0], 852 le16_to_cpu(iocb->u.isp24.nport_handle), 853 iocb->u.isp24.exchange_address, iocb->ox_id); 854 855 port_id.b.domain = iocb->u.isp24.port_id[2]; 856 port_id.b.area = iocb->u.isp24.port_id[1]; 857 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 858 port_id.b.rsvd_1 = 0; 859 860 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 861 862 fcport->loop_id = loop_id; 863 fcport->d_id = port_id; 864 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 865 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI); 866 else 867 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI); 868 869 list_for_each_entry(fcport, &vha->vp_fcports, list) { 870 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla) 871 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 872 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla) 873 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 874 } 875 876 list_del(&pla->list); 877 kmem_cache_free(qla_tgt_plogi_cachep, pla); 878 } 879 880 void 881 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, 882 struct fc_port *sess, enum qlt_plogi_link_t link) 883 { 884 struct imm_ntfy_from_isp *iocb = &pla->iocb; 885 /* Inc ref_count first because link might already be pointing at pla */ 886 pla->ref_count++; 887 888 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, 889 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" 890 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", 891 sess, link, sess->port_name, 892 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], 893 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 894 pla->ref_count, pla, link); 895 896 if (link == QLT_PLOGI_LINK_CONFLICT) { 897 switch (sess->disc_state) { 898 case DSC_DELETED: 899 case DSC_DELETE_PEND: 900 pla->ref_count--; 901 return; 902 default: 903 break; 904 } 905 } 906 907 if (sess->plogi_link[link]) 908 qlt_plogi_ack_unref(vha, sess->plogi_link[link]); 909 910 if (link == QLT_PLOGI_LINK_SAME_WWN) 911 pla->fcport = sess; 912 913 sess->plogi_link[link] = pla; 914 } 915 916 typedef struct { 917 /* These fields must be initialized by the caller */ 918 port_id_t id; 919 /* 920 * number of cmds dropped while we were waiting for 921 * initiator to ack LOGO initialize to 1 if LOGO is 922 * triggered by a command, otherwise, to 0 923 */ 924 int cmd_count; 925 926 /* These fields are used by callee */ 927 struct list_head list; 928 } qlt_port_logo_t; 929 930 static void 931 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) 932 { 933 qlt_port_logo_t *tmp; 934 int res; 935 936 mutex_lock(&vha->vha_tgt.tgt_mutex); 937 938 list_for_each_entry(tmp, &vha->logo_list, list) { 939 if (tmp->id.b24 == logo->id.b24) { 940 tmp->cmd_count += logo->cmd_count; 941 mutex_unlock(&vha->vha_tgt.tgt_mutex); 942 return; 943 } 944 } 945 946 list_add_tail(&logo->list, &vha->logo_list); 947 948 mutex_unlock(&vha->vha_tgt.tgt_mutex); 949 950 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id); 951 952 mutex_lock(&vha->vha_tgt.tgt_mutex); 953 list_del(&logo->list); 954 mutex_unlock(&vha->vha_tgt.tgt_mutex); 955 956 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, 957 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", 958 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, 959 logo->cmd_count, res); 960 } 961 962 void qlt_free_session_done(struct work_struct *work) 963 { 964 struct fc_port *sess = container_of(work, struct fc_port, 965 free_work); 966 struct qla_tgt *tgt = sess->tgt; 967 struct scsi_qla_host *vha = sess->vha; 968 struct qla_hw_data *ha = vha->hw; 969 unsigned long flags; 970 bool logout_started = false; 971 scsi_qla_host_t *base_vha; 972 struct qlt_plogi_ack_t *own = 973 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 974 975 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, 976 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" 977 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", 978 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, 979 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, 980 sess->logout_on_delete, sess->keep_nport_handle, 981 sess->send_els_logo); 982 983 if (!IS_SW_RESV_ADDR(sess->d_id)) { 984 if (sess->send_els_logo) { 985 qlt_port_logo_t logo; 986 987 logo.id = sess->d_id; 988 logo.cmd_count = 0; 989 if (!own) 990 qlt_send_first_logo(vha, &logo); 991 sess->send_els_logo = 0; 992 } 993 994 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { 995 int rc; 996 997 if (!own || 998 (own && 999 (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) { 1000 rc = qla2x00_post_async_logout_work(vha, sess, 1001 NULL); 1002 if (rc != QLA_SUCCESS) 1003 ql_log(ql_log_warn, vha, 0xf085, 1004 "Schedule logo failed sess %p rc %d\n", 1005 sess, rc); 1006 else 1007 logout_started = true; 1008 } else if (own && (own->iocb.u.isp24.status_subcode == 1009 ELS_PRLI) && ha->flags.rida_fmt2) { 1010 rc = qla2x00_post_async_prlo_work(vha, sess, 1011 NULL); 1012 if (rc != QLA_SUCCESS) 1013 ql_log(ql_log_warn, vha, 0xf085, 1014 "Schedule PRLO failed sess %p rc %d\n", 1015 sess, rc); 1016 else 1017 logout_started = true; 1018 } 1019 } 1020 } 1021 1022 /* 1023 * Release the target session for FC Nexus from fabric module code. 1024 */ 1025 if (sess->se_sess != NULL) 1026 ha->tgt.tgt_ops->free_session(sess); 1027 1028 if (logout_started) { 1029 bool traced = false; 1030 1031 while (!READ_ONCE(sess->logout_completed)) { 1032 if (!traced) { 1033 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, 1034 "%s: waiting for sess %p logout\n", 1035 __func__, sess); 1036 traced = true; 1037 } 1038 msleep(100); 1039 } 1040 1041 ql_dbg(ql_dbg_disc, vha, 0xf087, 1042 "%s: sess %p logout completed\n", __func__, sess); 1043 } 1044 1045 if (sess->logo_ack_needed) { 1046 sess->logo_ack_needed = 0; 1047 qla24xx_async_notify_ack(vha, sess, 1048 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); 1049 } 1050 1051 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1052 if (sess->se_sess) { 1053 sess->se_sess = NULL; 1054 if (tgt && !IS_SW_RESV_ADDR(sess->d_id)) 1055 tgt->sess_count--; 1056 } 1057 1058 sess->disc_state = DSC_DELETED; 1059 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1060 sess->deleted = QLA_SESS_DELETED; 1061 1062 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { 1063 vha->fcport_count--; 1064 sess->login_succ = 0; 1065 } 1066 1067 qla2x00_clear_loop_id(sess); 1068 1069 if (sess->conflict) { 1070 sess->conflict->login_pause = 0; 1071 sess->conflict = NULL; 1072 if (!test_bit(UNLOADING, &vha->dpc_flags)) 1073 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1074 } 1075 1076 { 1077 struct qlt_plogi_ack_t *con = 1078 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; 1079 struct imm_ntfy_from_isp *iocb; 1080 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; 1081 1082 if (con) { 1083 iocb = &con->iocb; 1084 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, 1085 "se_sess %p / sess %p port %8phC is gone," 1086 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", 1087 sess->se_sess, sess, sess->port_name, 1088 own ? "releasing own PLOGI" : "no own PLOGI pending", 1089 own ? own->ref_count : -1, 1090 iocb->u.isp24.port_name, con->ref_count); 1091 qlt_plogi_ack_unref(vha, con); 1092 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; 1093 } else { 1094 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, 1095 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", 1096 sess->se_sess, sess, sess->port_name, 1097 own ? "releasing own PLOGI" : 1098 "no own PLOGI pending", 1099 own ? own->ref_count : -1); 1100 } 1101 1102 if (own) { 1103 sess->fw_login_state = DSC_LS_PLOGI_PEND; 1104 qlt_plogi_ack_unref(vha, own); 1105 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL; 1106 } 1107 } 1108 1109 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1110 1111 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 1112 "Unregistration of sess %p %8phC finished fcp_cnt %d\n", 1113 sess, sess->port_name, vha->fcport_count); 1114 1115 if (tgt && (tgt->sess_count == 0)) 1116 wake_up_all(&tgt->waitQ); 1117 1118 if (vha->fcport_count == 0) 1119 wake_up_all(&vha->fcport_waitQ); 1120 1121 base_vha = pci_get_drvdata(ha->pdev); 1122 1123 sess->free_pending = 0; 1124 1125 if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) 1126 return; 1127 1128 if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { 1129 switch (vha->host->active_mode) { 1130 case MODE_INITIATOR: 1131 case MODE_DUAL: 1132 set_bit(RELOGIN_NEEDED, &vha->dpc_flags); 1133 qla2xxx_wake_dpc(vha); 1134 break; 1135 case MODE_TARGET: 1136 default: 1137 /* no-op */ 1138 break; 1139 } 1140 } 1141 } 1142 1143 /* ha->tgt.sess_lock supposed to be held on entry */ 1144 void qlt_unreg_sess(struct fc_port *sess) 1145 { 1146 struct scsi_qla_host *vha = sess->vha; 1147 unsigned long flags; 1148 1149 ql_dbg(ql_dbg_disc, sess->vha, 0x210a, 1150 "%s sess %p for deletion %8phC\n", 1151 __func__, sess, sess->port_name); 1152 1153 spin_lock_irqsave(&sess->vha->work_lock, flags); 1154 if (sess->free_pending) { 1155 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1156 return; 1157 } 1158 sess->free_pending = 1; 1159 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1160 1161 if (sess->se_sess) 1162 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 1163 1164 qla2x00_mark_device_lost(vha, sess, 0, 0); 1165 1166 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1167 sess->disc_state = DSC_DELETE_PEND; 1168 sess->last_rscn_gen = sess->rscn_gen; 1169 sess->last_login_gen = sess->login_gen; 1170 1171 if (sess->nvme_flag & NVME_FLAG_REGISTERED && 1172 !(sess->nvme_flag & NVME_FLAG_DELETING)) { 1173 sess->nvme_flag |= NVME_FLAG_DELETING; 1174 schedule_work(&sess->nvme_del_work); 1175 } else { 1176 INIT_WORK(&sess->free_work, qlt_free_session_done); 1177 schedule_work(&sess->free_work); 1178 } 1179 } 1180 EXPORT_SYMBOL(qlt_unreg_sess); 1181 1182 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) 1183 { 1184 struct qla_hw_data *ha = vha->hw; 1185 struct fc_port *sess = NULL; 1186 uint16_t loop_id; 1187 int res = 0; 1188 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb; 1189 unsigned long flags; 1190 1191 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 1192 if (loop_id == 0xFFFF) { 1193 /* Global event */ 1194 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 1195 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1196 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); 1197 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1198 } else { 1199 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1200 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 1201 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1202 } 1203 1204 ql_dbg(ql_dbg_tgt, vha, 0xe000, 1205 "Using sess for qla_tgt_reset: %p\n", sess); 1206 if (!sess) { 1207 res = -ESRCH; 1208 return res; 1209 } 1210 1211 ql_dbg(ql_dbg_tgt, vha, 0xe047, 1212 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " 1213 "loop_id %d)\n", vha->host_no, sess, sess->port_name, 1214 mcmd, loop_id); 1215 1216 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK); 1217 } 1218 1219 static void qla24xx_chk_fcp_state(struct fc_port *sess) 1220 { 1221 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) { 1222 sess->logout_on_delete = 0; 1223 sess->logo_ack_needed = 0; 1224 sess->fw_login_state = DSC_LS_PORT_UNAVAIL; 1225 sess->scan_state = 0; 1226 } 1227 } 1228 1229 void qlt_schedule_sess_for_deletion(struct fc_port *sess) 1230 { 1231 struct qla_tgt *tgt = sess->tgt; 1232 unsigned long flags; 1233 1234 if (sess->disc_state == DSC_DELETE_PEND) 1235 return; 1236 1237 if (sess->disc_state == DSC_DELETED) { 1238 if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) 1239 wake_up_all(&tgt->waitQ); 1240 if (sess->vha->fcport_count == 0) 1241 wake_up_all(&sess->vha->fcport_waitQ); 1242 1243 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && 1244 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) 1245 return; 1246 } 1247 1248 if (sess->deleted == QLA_SESS_DELETED) 1249 sess->logout_on_delete = 0; 1250 1251 spin_lock_irqsave(&sess->vha->work_lock, flags); 1252 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { 1253 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1254 return; 1255 } 1256 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; 1257 spin_unlock_irqrestore(&sess->vha->work_lock, flags); 1258 1259 sess->disc_state = DSC_DELETE_PEND; 1260 1261 qla24xx_chk_fcp_state(sess); 1262 1263 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 1264 "Scheduling sess %p for deletion\n", sess); 1265 1266 INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); 1267 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); 1268 } 1269 1270 static void qlt_clear_tgt_db(struct qla_tgt *tgt) 1271 { 1272 struct fc_port *sess; 1273 scsi_qla_host_t *vha = tgt->vha; 1274 1275 list_for_each_entry(sess, &vha->vp_fcports, list) { 1276 if (sess->se_sess) 1277 qlt_schedule_sess_for_deletion(sess); 1278 } 1279 1280 /* At this point tgt could be already dead */ 1281 } 1282 1283 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, 1284 uint16_t *loop_id) 1285 { 1286 struct qla_hw_data *ha = vha->hw; 1287 dma_addr_t gid_list_dma; 1288 struct gid_list_info *gid_list; 1289 char *id_iter; 1290 int res, rc, i; 1291 uint16_t entries; 1292 1293 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1294 &gid_list_dma, GFP_KERNEL); 1295 if (!gid_list) { 1296 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044, 1297 "qla_target(%d): DMA Alloc failed of %u\n", 1298 vha->vp_idx, qla2x00_gid_list_size(ha)); 1299 return -ENOMEM; 1300 } 1301 1302 /* Get list of logged in devices */ 1303 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); 1304 if (rc != QLA_SUCCESS) { 1305 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, 1306 "qla_target(%d): get_id_list() failed: %x\n", 1307 vha->vp_idx, rc); 1308 res = -EBUSY; 1309 goto out_free_id_list; 1310 } 1311 1312 id_iter = (char *)gid_list; 1313 res = -ENOENT; 1314 for (i = 0; i < entries; i++) { 1315 struct gid_list_info *gid = (struct gid_list_info *)id_iter; 1316 if ((gid->al_pa == s_id[2]) && 1317 (gid->area == s_id[1]) && 1318 (gid->domain == s_id[0])) { 1319 *loop_id = le16_to_cpu(gid->loop_id); 1320 res = 0; 1321 break; 1322 } 1323 id_iter += ha->gid_list_info_size; 1324 } 1325 1326 out_free_id_list: 1327 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), 1328 gid_list, gid_list_dma); 1329 return res; 1330 } 1331 1332 /* 1333 * Adds an extra ref to allow to drop hw lock after adding sess to the list. 1334 * Caller must put it. 1335 */ 1336 static struct fc_port *qlt_create_sess( 1337 struct scsi_qla_host *vha, 1338 fc_port_t *fcport, 1339 bool local) 1340 { 1341 struct qla_hw_data *ha = vha->hw; 1342 struct fc_port *sess = fcport; 1343 unsigned long flags; 1344 1345 if (vha->vha_tgt.qla_tgt->tgt_stop) 1346 return NULL; 1347 1348 if (fcport->se_sess) { 1349 if (!kref_get_unless_zero(&sess->sess_kref)) { 1350 ql_dbg(ql_dbg_disc, vha, 0x20f6, 1351 "%s: kref_get_unless_zero failed for %8phC\n", 1352 __func__, sess->port_name); 1353 return NULL; 1354 } 1355 return fcport; 1356 } 1357 sess->tgt = vha->vha_tgt.qla_tgt; 1358 sess->local = local; 1359 1360 /* 1361 * Under normal circumstances we want to logout from firmware when 1362 * session eventually ends and release corresponding nport handle. 1363 * In the exception cases (e.g. when new PLOGI is waiting) corresponding 1364 * code will adjust these flags as necessary. 1365 */ 1366 sess->logout_on_delete = 1; 1367 sess->keep_nport_handle = 0; 1368 sess->logout_completed = 0; 1369 1370 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha, 1371 &fcport->port_name[0], sess) < 0) { 1372 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015, 1373 "(%d) %8phC check_initiator_node_acl failed\n", 1374 vha->vp_idx, fcport->port_name); 1375 return NULL; 1376 } else { 1377 kref_init(&fcport->sess_kref); 1378 /* 1379 * Take an extra reference to ->sess_kref here to handle 1380 * fc_port access across ->tgt.sess_lock reaquire. 1381 */ 1382 if (!kref_get_unless_zero(&sess->sess_kref)) { 1383 ql_dbg(ql_dbg_disc, vha, 0x20f7, 1384 "%s: kref_get_unless_zero failed for %8phC\n", 1385 __func__, sess->port_name); 1386 return NULL; 1387 } 1388 1389 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1390 if (!IS_SW_RESV_ADDR(sess->d_id)) 1391 vha->vha_tgt.qla_tgt->sess_count++; 1392 1393 qlt_do_generation_tick(vha, &sess->generation); 1394 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1395 } 1396 1397 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 1398 "Adding sess %p se_sess %p to tgt %p sess_count %d\n", 1399 sess, sess->se_sess, vha->vha_tgt.qla_tgt, 1400 vha->vha_tgt.qla_tgt->sess_count); 1401 1402 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 1403 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, " 1404 "s_id %x:%x:%x, confirmed completion %ssupported) added\n", 1405 vha->vp_idx, local ? "local " : "", fcport->port_name, 1406 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area, 1407 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not "); 1408 1409 return sess; 1410 } 1411 1412 /* 1413 * max_gen - specifies maximum session generation 1414 * at which this deletion requestion is still valid 1415 */ 1416 void 1417 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) 1418 { 1419 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1420 struct fc_port *sess = fcport; 1421 unsigned long flags; 1422 1423 if (!vha->hw->tgt.tgt_ops) 1424 return; 1425 1426 if (!tgt) 1427 return; 1428 1429 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1430 if (tgt->tgt_stop) { 1431 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1432 return; 1433 } 1434 if (!sess->se_sess) { 1435 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1436 return; 1437 } 1438 1439 if (max_gen - sess->generation < 0) { 1440 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1441 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, 1442 "Ignoring stale deletion request for se_sess %p / sess %p" 1443 " for port %8phC, req_gen %d, sess_gen %d\n", 1444 sess->se_sess, sess, sess->port_name, max_gen, 1445 sess->generation); 1446 return; 1447 } 1448 1449 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); 1450 1451 sess->local = 1; 1452 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 1453 qlt_schedule_sess_for_deletion(sess); 1454 } 1455 1456 static inline int test_tgt_sess_count(struct qla_tgt *tgt) 1457 { 1458 struct qla_hw_data *ha = tgt->ha; 1459 unsigned long flags; 1460 int res; 1461 /* 1462 * We need to protect against race, when tgt is freed before or 1463 * inside wake_up() 1464 */ 1465 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 1466 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, 1467 "tgt %p, sess_count=%d\n", 1468 tgt, tgt->sess_count); 1469 res = (tgt->sess_count == 0); 1470 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 1471 1472 return res; 1473 } 1474 1475 /* Called by tcm_qla2xxx configfs code */ 1476 int qlt_stop_phase1(struct qla_tgt *tgt) 1477 { 1478 struct scsi_qla_host *vha = tgt->vha; 1479 struct qla_hw_data *ha = tgt->ha; 1480 unsigned long flags; 1481 1482 mutex_lock(&qla_tgt_mutex); 1483 if (!vha->fc_vport) { 1484 struct Scsi_Host *sh = vha->host; 1485 struct fc_host_attrs *fc_host = shost_to_fc_host(sh); 1486 bool npiv_vports; 1487 1488 spin_lock_irqsave(sh->host_lock, flags); 1489 npiv_vports = (fc_host->npiv_vports_inuse); 1490 spin_unlock_irqrestore(sh->host_lock, flags); 1491 1492 if (npiv_vports) { 1493 mutex_unlock(&qla_tgt_mutex); 1494 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 1495 "NPIV is in use. Can not stop target\n"); 1496 return -EPERM; 1497 } 1498 } 1499 if (tgt->tgt_stop || tgt->tgt_stopped) { 1500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e, 1501 "Already in tgt->tgt_stop or tgt_stopped state\n"); 1502 mutex_unlock(&qla_tgt_mutex); 1503 return -EPERM; 1504 } 1505 1506 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", 1507 vha->host_no, vha); 1508 /* 1509 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. 1510 * Lock is needed, because we still can get an incoming packet. 1511 */ 1512 mutex_lock(&vha->vha_tgt.tgt_mutex); 1513 tgt->tgt_stop = 1; 1514 qlt_clear_tgt_db(tgt); 1515 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1516 mutex_unlock(&qla_tgt_mutex); 1517 1518 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 1519 "Waiting for sess works (tgt %p)", tgt); 1520 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1521 while (!list_empty(&tgt->sess_works_list)) { 1522 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1523 flush_scheduled_work(); 1524 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1525 } 1526 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1527 1528 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, 1529 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); 1530 1531 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1532 1533 /* Big hammer */ 1534 if (!ha->flags.host_shutting_down && 1535 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))) 1536 qlt_disable_vha(vha); 1537 1538 /* Wait for sessions to clear out (just in case) */ 1539 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); 1540 return 0; 1541 } 1542 EXPORT_SYMBOL(qlt_stop_phase1); 1543 1544 /* Called by tcm_qla2xxx configfs code */ 1545 void qlt_stop_phase2(struct qla_tgt *tgt) 1546 { 1547 scsi_qla_host_t *vha = tgt->vha; 1548 1549 if (tgt->tgt_stopped) { 1550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f, 1551 "Already in tgt->tgt_stopped state\n"); 1552 dump_stack(); 1553 return; 1554 } 1555 if (!tgt->tgt_stop) { 1556 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b, 1557 "%s: phase1 stop is not completed\n", __func__); 1558 dump_stack(); 1559 return; 1560 } 1561 1562 mutex_lock(&vha->vha_tgt.tgt_mutex); 1563 tgt->tgt_stop = 0; 1564 tgt->tgt_stopped = 1; 1565 mutex_unlock(&vha->vha_tgt.tgt_mutex); 1566 1567 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", 1568 tgt); 1569 } 1570 EXPORT_SYMBOL(qlt_stop_phase2); 1571 1572 /* Called from qlt_remove_target() -> qla2x00_remove_one() */ 1573 static void qlt_release(struct qla_tgt *tgt) 1574 { 1575 scsi_qla_host_t *vha = tgt->vha; 1576 void *node; 1577 u64 key = 0; 1578 u16 i; 1579 struct qla_qpair_hint *h; 1580 struct qla_hw_data *ha = vha->hw; 1581 1582 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop && 1583 !tgt->tgt_stopped) 1584 qlt_stop_phase1(tgt); 1585 1586 if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) 1587 qlt_stop_phase2(tgt); 1588 1589 for (i = 0; i < vha->hw->max_qpairs + 1; i++) { 1590 unsigned long flags; 1591 1592 h = &tgt->qphints[i]; 1593 if (h->qpair) { 1594 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags); 1595 list_del(&h->hint_elem); 1596 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags); 1597 h->qpair = NULL; 1598 } 1599 } 1600 kfree(tgt->qphints); 1601 mutex_lock(&qla_tgt_mutex); 1602 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 1603 mutex_unlock(&qla_tgt_mutex); 1604 1605 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 1606 btree_remove64(&tgt->lun_qpair_map, key); 1607 1608 btree_destroy64(&tgt->lun_qpair_map); 1609 1610 if (vha->vp_idx) 1611 if (ha->tgt.tgt_ops && 1612 ha->tgt.tgt_ops->remove_target && 1613 vha->vha_tgt.target_lport_ptr) 1614 ha->tgt.tgt_ops->remove_target(vha); 1615 1616 vha->vha_tgt.qla_tgt = NULL; 1617 1618 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, 1619 "Release of tgt %p finished\n", tgt); 1620 1621 kfree(tgt); 1622 } 1623 1624 /* ha->hardware_lock supposed to be held on entry */ 1625 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, 1626 const void *param, unsigned int param_size) 1627 { 1628 struct qla_tgt_sess_work_param *prm; 1629 unsigned long flags; 1630 1631 prm = kzalloc(sizeof(*prm), GFP_ATOMIC); 1632 if (!prm) { 1633 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050, 1634 "qla_target(%d): Unable to create session " 1635 "work, command will be refused", 0); 1636 return -ENOMEM; 1637 } 1638 1639 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, 1640 "Scheduling work (type %d, prm %p)" 1641 " to find session for param %p (size %d, tgt %p)\n", 1642 type, prm, param, param_size, tgt); 1643 1644 prm->type = type; 1645 memcpy(&prm->tm_iocb, param, param_size); 1646 1647 spin_lock_irqsave(&tgt->sess_work_lock, flags); 1648 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list); 1649 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 1650 1651 schedule_work(&tgt->sess_work); 1652 1653 return 0; 1654 } 1655 1656 /* 1657 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1658 */ 1659 static void qlt_send_notify_ack(struct qla_qpair *qpair, 1660 struct imm_ntfy_from_isp *ntfy, 1661 uint32_t add_flags, uint16_t resp_code, int resp_code_valid, 1662 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan) 1663 { 1664 struct scsi_qla_host *vha = qpair->vha; 1665 struct qla_hw_data *ha = vha->hw; 1666 request_t *pkt; 1667 struct nack_to_isp *nack; 1668 1669 if (!ha->flags.fw_started) 1670 return; 1671 1672 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); 1673 1674 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 1675 if (!pkt) { 1676 ql_dbg(ql_dbg_tgt, vha, 0xe049, 1677 "qla_target(%d): %s failed: unable to allocate " 1678 "request packet\n", vha->vp_idx, __func__); 1679 return; 1680 } 1681 1682 if (vha->vha_tgt.qla_tgt != NULL) 1683 vha->vha_tgt.qla_tgt->notify_ack_expected++; 1684 1685 pkt->entry_type = NOTIFY_ACK_TYPE; 1686 pkt->entry_count = 1; 1687 1688 nack = (struct nack_to_isp *)pkt; 1689 nack->ox_id = ntfy->ox_id; 1690 1691 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE; 1692 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1693 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1694 nack->u.isp24.flags = ntfy->u.isp24.flags & 1695 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 1696 } 1697 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1698 nack->u.isp24.status = ntfy->u.isp24.status; 1699 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 1700 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 1701 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 1702 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 1703 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 1704 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags); 1705 nack->u.isp24.srr_reject_code = srr_reject_code; 1706 nack->u.isp24.srr_reject_code_expl = srr_explan; 1707 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 1708 1709 ql_dbg(ql_dbg_tgt, vha, 0xe005, 1710 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1711 vha->vp_idx, nack->u.isp24.status); 1712 1713 /* Memory Barrier */ 1714 wmb(); 1715 qla2x00_start_iocbs(vha, qpair->req); 1716 } 1717 1718 /* 1719 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1720 */ 1721 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, 1722 struct abts_recv_from_24xx *abts, uint32_t status, 1723 bool ids_reversed) 1724 { 1725 struct scsi_qla_host *vha = qpair->vha; 1726 struct qla_hw_data *ha = vha->hw; 1727 struct abts_resp_to_24xx *resp; 1728 uint32_t f_ctl; 1729 uint8_t *p; 1730 1731 ql_dbg(ql_dbg_tgt, vha, 0xe006, 1732 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", 1733 ha, abts, status); 1734 1735 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, 1736 NULL); 1737 if (!resp) { 1738 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1739 "qla_target(%d): %s failed: unable to allocate " 1740 "request packet", vha->vp_idx, __func__); 1741 return; 1742 } 1743 1744 resp->entry_type = ABTS_RESP_24XX; 1745 resp->entry_count = 1; 1746 resp->nport_handle = abts->nport_handle; 1747 resp->vp_index = vha->vp_idx; 1748 resp->sof_type = abts->sof_type; 1749 resp->exchange_address = abts->exchange_address; 1750 resp->fcp_hdr_le = abts->fcp_hdr_le; 1751 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1752 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1753 F_CTL_SEQ_INITIATIVE); 1754 p = (uint8_t *)&f_ctl; 1755 resp->fcp_hdr_le.f_ctl[0] = *p++; 1756 resp->fcp_hdr_le.f_ctl[1] = *p++; 1757 resp->fcp_hdr_le.f_ctl[2] = *p; 1758 if (ids_reversed) { 1759 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; 1760 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; 1761 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; 1762 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; 1763 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; 1764 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; 1765 } else { 1766 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; 1767 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; 1768 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; 1769 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; 1770 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; 1771 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; 1772 } 1773 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; 1774 if (status == FCP_TMF_CMPL) { 1775 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; 1776 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; 1777 resp->payload.ba_acct.low_seq_cnt = 0x0000; 1778 resp->payload.ba_acct.high_seq_cnt = 0xFFFF; 1779 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; 1780 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; 1781 } else { 1782 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT; 1783 resp->payload.ba_rjt.reason_code = 1784 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM; 1785 /* Other bytes are zero */ 1786 } 1787 1788 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1789 1790 /* Memory Barrier */ 1791 wmb(); 1792 if (qpair->reqq_start_iocbs) 1793 qpair->reqq_start_iocbs(qpair); 1794 else 1795 qla2x00_start_iocbs(vha, qpair->req); 1796 } 1797 1798 /* 1799 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 1800 */ 1801 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, 1802 struct abts_resp_from_24xx_fw *entry) 1803 { 1804 struct ctio7_to_24xx *ctio; 1805 1806 ql_dbg(ql_dbg_tgt, vha, 0xe007, 1807 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw); 1808 1809 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready( 1810 vha->hw->base_qpair, NULL); 1811 if (ctio == NULL) { 1812 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1813 "qla_target(%d): %s failed: unable to allocate " 1814 "request packet\n", vha->vp_idx, __func__); 1815 return; 1816 } 1817 1818 /* 1819 * We've got on entrance firmware's response on by us generated 1820 * ABTS response. So, in it ID fields are reversed. 1821 */ 1822 1823 ctio->entry_type = CTIO_TYPE7; 1824 ctio->entry_count = 1; 1825 ctio->nport_handle = entry->nport_handle; 1826 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1827 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 1828 ctio->vp_index = vha->vp_idx; 1829 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1830 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1831 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1832 ctio->exchange_addr = entry->exchange_addr_to_abort; 1833 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1834 CTIO7_FLAGS_TERMINATE); 1835 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); 1836 1837 /* Memory Barrier */ 1838 wmb(); 1839 qla2x00_start_iocbs(vha, vha->req); 1840 1841 qlt_24xx_send_abts_resp(vha->hw->base_qpair, 1842 (struct abts_recv_from_24xx *)entry, 1843 FCP_TMF_CMPL, true); 1844 } 1845 1846 static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) 1847 { 1848 struct qla_tgt_sess_op *op; 1849 struct qla_tgt_cmd *cmd; 1850 unsigned long flags; 1851 1852 spin_lock_irqsave(&vha->cmd_list_lock, flags); 1853 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1854 if (tag == op->atio.u.isp24.exchange_addr) { 1855 op->aborted = true; 1856 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1857 return 1; 1858 } 1859 } 1860 1861 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1862 if (tag == op->atio.u.isp24.exchange_addr) { 1863 op->aborted = true; 1864 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1865 return 1; 1866 } 1867 } 1868 1869 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1870 if (tag == cmd->atio.u.isp24.exchange_addr) { 1871 cmd->aborted = 1; 1872 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1873 return 1; 1874 } 1875 } 1876 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1877 1878 return 0; 1879 } 1880 1881 /* drop cmds for the given lun 1882 * XXX only looks for cmds on the port through which lun reset was recieved 1883 * XXX does not go through the list of other port (which may have cmds 1884 * for the same lun) 1885 */ 1886 static void abort_cmds_for_lun(struct scsi_qla_host *vha, 1887 u64 lun, uint8_t *s_id) 1888 { 1889 struct qla_tgt_sess_op *op; 1890 struct qla_tgt_cmd *cmd; 1891 uint32_t key; 1892 unsigned long flags; 1893 1894 key = sid_to_key(s_id); 1895 spin_lock_irqsave(&vha->cmd_list_lock, flags); 1896 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 1897 uint32_t op_key; 1898 u64 op_lun; 1899 1900 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1901 op_lun = scsilun_to_int( 1902 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1903 if (op_key == key && op_lun == lun) 1904 op->aborted = true; 1905 } 1906 1907 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 1908 uint32_t op_key; 1909 u64 op_lun; 1910 1911 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 1912 op_lun = scsilun_to_int( 1913 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); 1914 if (op_key == key && op_lun == lun) 1915 op->aborted = true; 1916 } 1917 1918 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 1919 uint32_t cmd_key; 1920 u64 cmd_lun; 1921 1922 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 1923 cmd_lun = scsilun_to_int( 1924 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); 1925 if (cmd_key == key && cmd_lun == lun) 1926 cmd->aborted = 1; 1927 } 1928 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 1929 } 1930 1931 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha, 1932 uint64_t unpacked_lun) 1933 { 1934 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 1935 struct qla_qpair_hint *h = NULL; 1936 1937 if (vha->flags.qpairs_available) { 1938 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun); 1939 if (!h) 1940 h = &tgt->qphints[0]; 1941 } else { 1942 h = &tgt->qphints[0]; 1943 } 1944 1945 return h; 1946 } 1947 1948 static void qlt_do_tmr_work(struct work_struct *work) 1949 { 1950 struct qla_tgt_mgmt_cmd *mcmd = 1951 container_of(work, struct qla_tgt_mgmt_cmd, work); 1952 struct qla_hw_data *ha = mcmd->vha->hw; 1953 int rc = EIO; 1954 uint32_t tag; 1955 unsigned long flags; 1956 1957 switch (mcmd->tmr_func) { 1958 case QLA_TGT_ABTS: 1959 tag = mcmd->orig_iocb.abts.exchange_addr_to_abort; 1960 break; 1961 default: 1962 tag = 0; 1963 break; 1964 } 1965 1966 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun, 1967 mcmd->tmr_func, tag); 1968 1969 if (rc != 0) { 1970 spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags); 1971 switch (mcmd->tmr_func) { 1972 case QLA_TGT_ABTS: 1973 qlt_24xx_send_abts_resp(mcmd->qpair, 1974 &mcmd->orig_iocb.abts, 1975 FCP_TMF_REJECTED, false); 1976 break; 1977 case QLA_TGT_LUN_RESET: 1978 case QLA_TGT_CLEAR_TS: 1979 case QLA_TGT_ABORT_TS: 1980 case QLA_TGT_CLEAR_ACA: 1981 case QLA_TGT_TARGET_RESET: 1982 qlt_send_busy(mcmd->qpair, &mcmd->orig_iocb.atio, 1983 qla_sam_status); 1984 break; 1985 1986 case QLA_TGT_ABORT_ALL: 1987 case QLA_TGT_NEXUS_LOSS_SESS: 1988 case QLA_TGT_NEXUS_LOSS: 1989 qlt_send_notify_ack(mcmd->qpair, 1990 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 1991 break; 1992 } 1993 spin_unlock_irqrestore(mcmd->qpair->qp_lock_ptr, flags); 1994 1995 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052, 1996 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 1997 mcmd->vha->vp_idx, rc); 1998 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 1999 } 2000 } 2001 2002 /* ha->hardware_lock supposed to be held on entry */ 2003 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2004 struct abts_recv_from_24xx *abts, struct fc_port *sess) 2005 { 2006 struct qla_hw_data *ha = vha->hw; 2007 struct qla_tgt_mgmt_cmd *mcmd; 2008 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 2009 2010 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { 2011 /* send TASK_ABORT response immediately */ 2012 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false); 2013 return 0; 2014 } 2015 2016 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 2017 "qla_target(%d): task abort (tag=%d)\n", 2018 vha->vp_idx, abts->exchange_addr_to_abort); 2019 2020 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 2021 if (mcmd == NULL) { 2022 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051, 2023 "qla_target(%d): %s: Allocation of ABORT cmd failed", 2024 vha->vp_idx, __func__); 2025 return -ENOMEM; 2026 } 2027 memset(mcmd, 0, sizeof(*mcmd)); 2028 2029 mcmd->sess = sess; 2030 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 2031 mcmd->reset_count = ha->base_qpair->chip_reset; 2032 mcmd->tmr_func = QLA_TGT_ABTS; 2033 mcmd->qpair = h->qpair; 2034 mcmd->vha = vha; 2035 2036 /* 2037 * LUN is looked up by target-core internally based on the passed 2038 * abts->exchange_addr_to_abort tag. 2039 */ 2040 mcmd->se_cmd.cpuid = h->cpuid; 2041 2042 if (ha->tgt.tgt_ops->find_cmd_by_tag) { 2043 struct qla_tgt_cmd *abort_cmd; 2044 2045 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, 2046 abts->exchange_addr_to_abort); 2047 if (abort_cmd && abort_cmd->qpair) { 2048 mcmd->qpair = abort_cmd->qpair; 2049 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; 2050 } 2051 } 2052 2053 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 2054 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work); 2055 2056 return 0; 2057 } 2058 2059 /* 2060 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2061 */ 2062 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, 2063 struct abts_recv_from_24xx *abts) 2064 { 2065 struct qla_hw_data *ha = vha->hw; 2066 struct fc_port *sess; 2067 uint32_t tag = abts->exchange_addr_to_abort; 2068 uint8_t s_id[3]; 2069 int rc; 2070 unsigned long flags; 2071 2072 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) { 2073 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053, 2074 "qla_target(%d): ABTS: Abort Sequence not " 2075 "supported\n", vha->vp_idx); 2076 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2077 false); 2078 return; 2079 } 2080 2081 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) { 2082 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010, 2083 "qla_target(%d): ABTS: Unknown Exchange " 2084 "Address received\n", vha->vp_idx); 2085 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2086 false); 2087 return; 2088 } 2089 2090 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, 2091 "qla_target(%d): task abort (s_id=%x:%x:%x, " 2092 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], 2093 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, 2094 le32_to_cpu(abts->fcp_hdr_le.parameter)); 2095 2096 s_id[0] = abts->fcp_hdr_le.s_id[2]; 2097 s_id[1] = abts->fcp_hdr_le.s_id[1]; 2098 s_id[2] = abts->fcp_hdr_le.s_id[0]; 2099 2100 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 2101 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 2102 if (!sess) { 2103 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012, 2104 "qla_target(%d): task abort for non-existent session\n", 2105 vha->vp_idx); 2106 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2107 2108 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2109 false); 2110 return; 2111 } 2112 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 2113 2114 2115 if (sess->deleted) { 2116 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2117 false); 2118 return; 2119 } 2120 2121 rc = __qlt_24xx_handle_abts(vha, abts, sess); 2122 if (rc != 0) { 2123 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 2124 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n", 2125 vha->vp_idx, rc); 2126 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED, 2127 false); 2128 return; 2129 } 2130 } 2131 2132 /* 2133 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2134 */ 2135 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, 2136 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code) 2137 { 2138 struct scsi_qla_host *ha = mcmd->vha; 2139 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 2140 struct ctio7_to_24xx *ctio; 2141 uint16_t temp; 2142 2143 ql_dbg(ql_dbg_tgt, ha, 0xe008, 2144 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 2145 ha, atio, resp_code); 2146 2147 2148 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL); 2149 if (ctio == NULL) { 2150 ql_dbg(ql_dbg_tgt, ha, 0xe04c, 2151 "qla_target(%d): %s failed: unable to allocate " 2152 "request packet\n", ha->vp_idx, __func__); 2153 return; 2154 } 2155 2156 ctio->entry_type = CTIO_TYPE7; 2157 ctio->entry_count = 1; 2158 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 2159 ctio->nport_handle = mcmd->sess->loop_id; 2160 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2161 ctio->vp_index = ha->vp_idx; 2162 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2163 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2164 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2165 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2166 temp = (atio->u.isp24.attr << 9)| 2167 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2168 ctio->u.status1.flags = cpu_to_le16(temp); 2169 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2170 ctio->u.status1.ox_id = cpu_to_le16(temp); 2171 ctio->u.status1.scsi_status = 2172 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 2173 ctio->u.status1.response_len = cpu_to_le16(8); 2174 ctio->u.status1.sense_data[0] = resp_code; 2175 2176 /* Memory Barrier */ 2177 wmb(); 2178 if (qpair->reqq_start_iocbs) 2179 qpair->reqq_start_iocbs(qpair); 2180 else 2181 qla2x00_start_iocbs(ha, qpair->req); 2182 } 2183 2184 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) 2185 { 2186 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 2187 } 2188 EXPORT_SYMBOL(qlt_free_mcmd); 2189 2190 /* 2191 * ha->hardware_lock supposed to be held on entry. Might drop it, then 2192 * reacquire 2193 */ 2194 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 2195 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) 2196 { 2197 struct atio_from_isp *atio = &cmd->atio; 2198 struct ctio7_to_24xx *ctio; 2199 uint16_t temp; 2200 struct scsi_qla_host *vha = cmd->vha; 2201 2202 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, 2203 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " 2204 "sense_key=%02x, asc=%02x, ascq=%02x", 2205 vha, atio, scsi_status, sense_key, asc, ascq); 2206 2207 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 2208 if (!ctio) { 2209 ql_dbg(ql_dbg_async, vha, 0x3067, 2210 "qla2x00t(%ld): %s failed: unable to allocate request packet", 2211 vha->host_no, __func__); 2212 goto out; 2213 } 2214 2215 ctio->entry_type = CTIO_TYPE7; 2216 ctio->entry_count = 1; 2217 ctio->handle = QLA_TGT_SKIP_HANDLE; 2218 ctio->nport_handle = cmd->sess->loop_id; 2219 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2220 ctio->vp_index = vha->vp_idx; 2221 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2222 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2223 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2224 ctio->exchange_addr = atio->u.isp24.exchange_addr; 2225 temp = (atio->u.isp24.attr << 9) | 2226 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; 2227 ctio->u.status1.flags = cpu_to_le16(temp); 2228 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2229 ctio->u.status1.ox_id = cpu_to_le16(temp); 2230 ctio->u.status1.scsi_status = 2231 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); 2232 ctio->u.status1.response_len = cpu_to_le16(18); 2233 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); 2234 2235 if (ctio->u.status1.residual != 0) 2236 ctio->u.status1.scsi_status |= 2237 cpu_to_le16(SS_RESIDUAL_UNDER); 2238 2239 /* Response code and sense key */ 2240 put_unaligned_le32(((0x70 << 24) | (sense_key << 8)), 2241 (&ctio->u.status1.sense_data)[0]); 2242 /* Additional sense length */ 2243 put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]); 2244 /* ASC and ASCQ */ 2245 put_unaligned_le32(((asc << 24) | (ascq << 16)), 2246 (&ctio->u.status1.sense_data)[3]); 2247 2248 /* Memory Barrier */ 2249 wmb(); 2250 2251 if (qpair->reqq_start_iocbs) 2252 qpair->reqq_start_iocbs(qpair); 2253 else 2254 qla2x00_start_iocbs(vha, qpair->req); 2255 2256 out: 2257 return; 2258 } 2259 2260 /* callback from target fabric module code */ 2261 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) 2262 { 2263 struct scsi_qla_host *vha = mcmd->sess->vha; 2264 struct qla_hw_data *ha = vha->hw; 2265 unsigned long flags; 2266 struct qla_qpair *qpair = mcmd->qpair; 2267 2268 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, 2269 "TM response mcmd (%p) status %#x state %#x", 2270 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 2271 2272 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 2273 2274 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) { 2275 /* 2276 * Either the port is not online or this request was from 2277 * previous life, just abort the processing. 2278 */ 2279 ql_dbg(ql_dbg_async, vha, 0xe100, 2280 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n", 2281 vha->flags.online, qla2x00_reset_active(vha), 2282 mcmd->reset_count, qpair->chip_reset); 2283 ha->tgt.tgt_ops->free_mcmd(mcmd); 2284 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2285 return; 2286 } 2287 2288 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) { 2289 if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2290 ELS_LOGO || 2291 mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2292 ELS_PRLO || 2293 mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode == 2294 ELS_TPRLO) { 2295 ql_dbg(ql_dbg_disc, vha, 0x2106, 2296 "TM response logo %phC status %#x state %#x", 2297 mcmd->sess->port_name, mcmd->fc_tm_rsp, 2298 mcmd->flags); 2299 qlt_schedule_sess_for_deletion(mcmd->sess); 2300 } else { 2301 qlt_send_notify_ack(vha->hw->base_qpair, 2302 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); 2303 } 2304 } else { 2305 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) 2306 qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts, 2307 mcmd->fc_tm_rsp, false); 2308 else 2309 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd, 2310 mcmd->fc_tm_rsp); 2311 } 2312 /* 2313 * Make the callback for ->free_mcmd() to queue_work() and invoke 2314 * target_put_sess_cmd() to drop cmd_kref to 1. The final 2315 * target_put_sess_cmd() call will be made from TFO->check_stop_free() 2316 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd 2317 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() -> 2318 * qlt_xmit_tm_rsp() returns here.. 2319 */ 2320 ha->tgt.tgt_ops->free_mcmd(mcmd); 2321 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 2322 } 2323 EXPORT_SYMBOL(qlt_xmit_tm_rsp); 2324 2325 /* No locks */ 2326 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) 2327 { 2328 struct qla_tgt_cmd *cmd = prm->cmd; 2329 2330 BUG_ON(cmd->sg_cnt == 0); 2331 2332 prm->sg = (struct scatterlist *)cmd->sg; 2333 prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg, 2334 cmd->sg_cnt, cmd->dma_data_direction); 2335 if (unlikely(prm->seg_cnt == 0)) 2336 goto out_err; 2337 2338 prm->cmd->sg_mapped = 1; 2339 2340 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { 2341 /* 2342 * If greater than four sg entries then we need to allocate 2343 * the continuation entries 2344 */ 2345 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX) 2346 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - 2347 QLA_TGT_DATASEGS_PER_CMD_24XX, 2348 QLA_TGT_DATASEGS_PER_CONT_24XX); 2349 } else { 2350 /* DIF */ 2351 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2352 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2353 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); 2354 prm->tot_dsds = prm->seg_cnt; 2355 } else 2356 prm->tot_dsds = prm->seg_cnt; 2357 2358 if (cmd->prot_sg_cnt) { 2359 prm->prot_sg = cmd->prot_sg; 2360 prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev, 2361 cmd->prot_sg, cmd->prot_sg_cnt, 2362 cmd->dma_data_direction); 2363 if (unlikely(prm->prot_seg_cnt == 0)) 2364 goto out_err; 2365 2366 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || 2367 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { 2368 /* Dif Bundling not support here */ 2369 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, 2370 cmd->blk_sz); 2371 prm->tot_dsds += prm->prot_seg_cnt; 2372 } else 2373 prm->tot_dsds += prm->prot_seg_cnt; 2374 } 2375 } 2376 2377 return 0; 2378 2379 out_err: 2380 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d, 2381 "qla_target(%d): PCI mapping failed: sg_cnt=%d", 2382 0, prm->cmd->sg_cnt); 2383 return -1; 2384 } 2385 2386 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 2387 { 2388 struct qla_hw_data *ha; 2389 struct qla_qpair *qpair; 2390 if (!cmd->sg_mapped) 2391 return; 2392 2393 qpair = cmd->qpair; 2394 2395 pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt, 2396 cmd->dma_data_direction); 2397 cmd->sg_mapped = 0; 2398 2399 if (cmd->prot_sg_cnt) 2400 pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt, 2401 cmd->dma_data_direction); 2402 2403 if (!cmd->ctx) 2404 return; 2405 ha = vha->hw; 2406 if (cmd->ctx_dsd_alloced) 2407 qla2x00_clean_dsd_pool(ha, cmd->ctx); 2408 2409 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); 2410 } 2411 2412 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, 2413 uint32_t req_cnt) 2414 { 2415 uint32_t cnt; 2416 struct req_que *req = qpair->req; 2417 2418 if (req->cnt < (req_cnt + 2)) { 2419 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : 2420 RD_REG_DWORD_RELAXED(req->req_q_out)); 2421 2422 if (req->ring_index < cnt) 2423 req->cnt = cnt - req->ring_index; 2424 else 2425 req->cnt = req->length - (req->ring_index - cnt); 2426 2427 if (unlikely(req->cnt < (req_cnt + 2))) 2428 return -EAGAIN; 2429 } 2430 2431 req->cnt -= req_cnt; 2432 2433 return 0; 2434 } 2435 2436 /* 2437 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 2438 */ 2439 static inline void *qlt_get_req_pkt(struct req_que *req) 2440 { 2441 /* Adjust ring index. */ 2442 req->ring_index++; 2443 if (req->ring_index == req->length) { 2444 req->ring_index = 0; 2445 req->ring_ptr = req->ring; 2446 } else { 2447 req->ring_ptr++; 2448 } 2449 return (cont_entry_t *)req->ring_ptr; 2450 } 2451 2452 /* ha->hardware_lock supposed to be held on entry */ 2453 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair) 2454 { 2455 uint32_t h; 2456 int index; 2457 uint8_t found = 0; 2458 struct req_que *req = qpair->req; 2459 2460 h = req->current_outstanding_cmd; 2461 2462 for (index = 1; index < req->num_outstanding_cmds; index++) { 2463 h++; 2464 if (h == req->num_outstanding_cmds) 2465 h = 1; 2466 2467 if (h == QLA_TGT_SKIP_HANDLE) 2468 continue; 2469 2470 if (!req->outstanding_cmds[h]) { 2471 found = 1; 2472 break; 2473 } 2474 } 2475 2476 if (found) { 2477 req->current_outstanding_cmd = h; 2478 } else { 2479 ql_dbg(ql_dbg_io, qpair->vha, 0x305b, 2480 "qla_target(%d): Ran out of empty cmd slots\n", 2481 qpair->vha->vp_idx); 2482 h = QLA_TGT_NULL_HANDLE; 2483 } 2484 2485 return h; 2486 } 2487 2488 /* ha->hardware_lock supposed to be held on entry */ 2489 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, 2490 struct qla_tgt_prm *prm) 2491 { 2492 uint32_t h; 2493 struct ctio7_to_24xx *pkt; 2494 struct atio_from_isp *atio = &prm->cmd->atio; 2495 uint16_t temp; 2496 2497 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; 2498 prm->pkt = pkt; 2499 memset(pkt, 0, sizeof(*pkt)); 2500 2501 pkt->entry_type = CTIO_TYPE7; 2502 pkt->entry_count = (uint8_t)prm->req_cnt; 2503 pkt->vp_index = prm->cmd->vp_idx; 2504 2505 h = qlt_make_handle(qpair); 2506 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 2507 /* 2508 * CTIO type 7 from the firmware doesn't provide a way to 2509 * know the initiator's LOOP ID, hence we can't find 2510 * the session and, so, the command. 2511 */ 2512 return -EAGAIN; 2513 } else 2514 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 2515 2516 pkt->handle = MAKE_HANDLE(qpair->req->id, h); 2517 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 2518 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 2519 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 2520 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2521 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2522 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2523 pkt->exchange_addr = atio->u.isp24.exchange_addr; 2524 temp = atio->u.isp24.attr << 9; 2525 pkt->u.status0.flags |= cpu_to_le16(temp); 2526 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2527 pkt->u.status0.ox_id = cpu_to_le16(temp); 2528 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 2529 2530 return 0; 2531 } 2532 2533 /* 2534 * ha->hardware_lock supposed to be held on entry. We have already made sure 2535 * that there is sufficient amount of request entries to not drop it. 2536 */ 2537 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm) 2538 { 2539 int cnt; 2540 uint32_t *dword_ptr; 2541 2542 /* Build continuation packets */ 2543 while (prm->seg_cnt > 0) { 2544 cont_a64_entry_t *cont_pkt64 = 2545 (cont_a64_entry_t *)qlt_get_req_pkt( 2546 prm->cmd->qpair->req); 2547 2548 /* 2549 * Make sure that from cont_pkt64 none of 2550 * 64-bit specific fields used for 32-bit 2551 * addressing. Cast to (cont_entry_t *) for 2552 * that. 2553 */ 2554 2555 memset(cont_pkt64, 0, sizeof(*cont_pkt64)); 2556 2557 cont_pkt64->entry_count = 1; 2558 cont_pkt64->sys_define = 0; 2559 2560 cont_pkt64->entry_type = CONTINUE_A64_TYPE; 2561 dword_ptr = (uint32_t *)&cont_pkt64->dseg_0_address; 2562 2563 /* Load continuation entry data segments */ 2564 for (cnt = 0; 2565 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt; 2566 cnt++, prm->seg_cnt--) { 2567 *dword_ptr++ = 2568 cpu_to_le32(pci_dma_lo32 2569 (sg_dma_address(prm->sg))); 2570 *dword_ptr++ = cpu_to_le32(pci_dma_hi32 2571 (sg_dma_address(prm->sg))); 2572 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2573 2574 prm->sg = sg_next(prm->sg); 2575 } 2576 } 2577 } 2578 2579 /* 2580 * ha->hardware_lock supposed to be held on entry. We have already made sure 2581 * that there is sufficient amount of request entries to not drop it. 2582 */ 2583 static void qlt_load_data_segments(struct qla_tgt_prm *prm) 2584 { 2585 int cnt; 2586 uint32_t *dword_ptr; 2587 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 2588 2589 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 2590 2591 /* Setup packet address segment pointer */ 2592 dword_ptr = pkt24->u.status0.dseg_0_address; 2593 2594 /* Set total data segment count */ 2595 if (prm->seg_cnt) 2596 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt); 2597 2598 if (prm->seg_cnt == 0) { 2599 /* No data transfer */ 2600 *dword_ptr++ = 0; 2601 *dword_ptr = 0; 2602 return; 2603 } 2604 2605 /* If scatter gather */ 2606 2607 /* Load command entry data segments */ 2608 for (cnt = 0; 2609 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt; 2610 cnt++, prm->seg_cnt--) { 2611 *dword_ptr++ = 2612 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg))); 2613 2614 *dword_ptr++ = cpu_to_le32(pci_dma_hi32( 2615 sg_dma_address(prm->sg))); 2616 2617 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 2618 2619 prm->sg = sg_next(prm->sg); 2620 } 2621 2622 qlt_load_cont_data_segments(prm); 2623 } 2624 2625 static inline int qlt_has_data(struct qla_tgt_cmd *cmd) 2626 { 2627 return cmd->bufflen > 0; 2628 } 2629 2630 static void qlt_print_dif_err(struct qla_tgt_prm *prm) 2631 { 2632 struct qla_tgt_cmd *cmd; 2633 struct scsi_qla_host *vha; 2634 2635 /* asc 0x10=dif error */ 2636 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { 2637 cmd = prm->cmd; 2638 vha = cmd->vha; 2639 /* ASCQ */ 2640 switch (prm->sense_buffer[13]) { 2641 case 1: 2642 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, 2643 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2644 "se_cmd=%p tag[%x]", 2645 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2646 cmd->atio.u.isp24.exchange_addr); 2647 break; 2648 case 2: 2649 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, 2650 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2651 "se_cmd=%p tag[%x]", 2652 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2653 cmd->atio.u.isp24.exchange_addr); 2654 break; 2655 case 3: 2656 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, 2657 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " 2658 "se_cmd=%p tag[%x]", 2659 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2660 cmd->atio.u.isp24.exchange_addr); 2661 break; 2662 default: 2663 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, 2664 "BE detected Dif ERR: lba[%llx|%lld] len[%x] " 2665 "se_cmd=%p tag[%x]", 2666 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, 2667 cmd->atio.u.isp24.exchange_addr); 2668 break; 2669 } 2670 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16); 2671 } 2672 } 2673 2674 /* 2675 * Called without ha->hardware_lock held 2676 */ 2677 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, 2678 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status, 2679 uint32_t *full_req_cnt) 2680 { 2681 struct se_cmd *se_cmd = &cmd->se_cmd; 2682 struct qla_qpair *qpair = cmd->qpair; 2683 2684 prm->cmd = cmd; 2685 prm->tgt = cmd->tgt; 2686 prm->pkt = NULL; 2687 prm->rq_result = scsi_status; 2688 prm->sense_buffer = &cmd->sense_buffer[0]; 2689 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER; 2690 prm->sg = NULL; 2691 prm->seg_cnt = -1; 2692 prm->req_cnt = 1; 2693 prm->residual = 0; 2694 prm->add_status_pkt = 0; 2695 prm->prot_sg = NULL; 2696 prm->prot_seg_cnt = 0; 2697 prm->tot_dsds = 0; 2698 2699 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 2700 if (qlt_pci_map_calc_cnt(prm) != 0) 2701 return -EAGAIN; 2702 } 2703 2704 *full_req_cnt = prm->req_cnt; 2705 2706 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 2707 prm->residual = se_cmd->residual_count; 2708 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c, 2709 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2710 prm->residual, se_cmd->tag, 2711 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 2712 cmd->bufflen, prm->rq_result); 2713 prm->rq_result |= SS_RESIDUAL_UNDER; 2714 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 2715 prm->residual = se_cmd->residual_count; 2716 ql_dbg_qp(ql_dbg_io, qpair, 0x305d, 2717 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n", 2718 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ? 2719 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result); 2720 prm->rq_result |= SS_RESIDUAL_OVER; 2721 } 2722 2723 if (xmit_type & QLA_TGT_XMIT_STATUS) { 2724 /* 2725 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be 2726 * ignored in *xmit_response() below 2727 */ 2728 if (qlt_has_data(cmd)) { 2729 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) || 2730 (IS_FWI2_CAPABLE(cmd->vha->hw) && 2731 (prm->rq_result != 0))) { 2732 prm->add_status_pkt = 1; 2733 (*full_req_cnt)++; 2734 } 2735 } 2736 } 2737 2738 return 0; 2739 } 2740 2741 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd, 2742 int sending_sense) 2743 { 2744 if (cmd->qpair->enable_class_2) 2745 return 0; 2746 2747 if (sending_sense) 2748 return cmd->conf_compl_supported; 2749 else 2750 return cmd->qpair->enable_explicit_conf && 2751 cmd->conf_compl_supported; 2752 } 2753 2754 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, 2755 struct qla_tgt_prm *prm) 2756 { 2757 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2758 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2759 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS); 2760 if (qlt_need_explicit_conf(prm->cmd, 0)) { 2761 ctio->u.status0.flags |= cpu_to_le16( 2762 CTIO7_FLAGS_EXPLICIT_CONFORM | 2763 CTIO7_FLAGS_CONFORM_REQ); 2764 } 2765 ctio->u.status0.residual = cpu_to_le32(prm->residual); 2766 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result); 2767 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) { 2768 int i; 2769 2770 if (qlt_need_explicit_conf(prm->cmd, 1)) { 2771 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) { 2772 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017, 2773 "Skipping EXPLICIT_CONFORM and " 2774 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ " 2775 "non GOOD status\n"); 2776 goto skip_explict_conf; 2777 } 2778 ctio->u.status1.flags |= cpu_to_le16( 2779 CTIO7_FLAGS_EXPLICIT_CONFORM | 2780 CTIO7_FLAGS_CONFORM_REQ); 2781 } 2782 skip_explict_conf: 2783 ctio->u.status1.flags &= 2784 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2785 ctio->u.status1.flags |= 2786 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2787 ctio->u.status1.scsi_status |= 2788 cpu_to_le16(SS_SENSE_LEN_VALID); 2789 ctio->u.status1.sense_length = 2790 cpu_to_le16(prm->sense_buffer_len); 2791 for (i = 0; i < prm->sense_buffer_len/4; i++) 2792 ((uint32_t *)ctio->u.status1.sense_data)[i] = 2793 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); 2794 2795 qlt_print_dif_err(prm); 2796 2797 } else { 2798 ctio->u.status1.flags &= 2799 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2800 ctio->u.status1.flags |= 2801 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2802 ctio->u.status1.sense_length = 0; 2803 memset(ctio->u.status1.sense_data, 0, 2804 sizeof(ctio->u.status1.sense_data)); 2805 } 2806 2807 /* Sense with len > 24, is it possible ??? */ 2808 } 2809 2810 static inline int 2811 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) 2812 { 2813 switch (se_cmd->prot_op) { 2814 case TARGET_PROT_DOUT_INSERT: 2815 case TARGET_PROT_DIN_STRIP: 2816 if (ql2xenablehba_err_chk >= 1) 2817 return 1; 2818 break; 2819 case TARGET_PROT_DOUT_PASS: 2820 case TARGET_PROT_DIN_PASS: 2821 if (ql2xenablehba_err_chk >= 2) 2822 return 1; 2823 break; 2824 case TARGET_PROT_DIN_INSERT: 2825 case TARGET_PROT_DOUT_STRIP: 2826 return 1; 2827 default: 2828 break; 2829 } 2830 return 0; 2831 } 2832 2833 static inline int 2834 qla_tgt_ref_mask_check(struct se_cmd *se_cmd) 2835 { 2836 switch (se_cmd->prot_op) { 2837 case TARGET_PROT_DIN_INSERT: 2838 case TARGET_PROT_DOUT_INSERT: 2839 case TARGET_PROT_DIN_STRIP: 2840 case TARGET_PROT_DOUT_STRIP: 2841 case TARGET_PROT_DIN_PASS: 2842 case TARGET_PROT_DOUT_PASS: 2843 return 1; 2844 default: 2845 return 0; 2846 } 2847 return 0; 2848 } 2849 2850 /* 2851 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command 2852 */ 2853 static void 2854 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, 2855 uint16_t *pfw_prot_opts) 2856 { 2857 struct se_cmd *se_cmd = &cmd->se_cmd; 2858 uint32_t lba = 0xffffffff & se_cmd->t_task_lba; 2859 scsi_qla_host_t *vha = cmd->tgt->vha; 2860 struct qla_hw_data *ha = vha->hw; 2861 uint32_t t32 = 0; 2862 2863 /* 2864 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 2865 * have been immplemented by TCM, before AppTag is avail. 2866 * Look for modesense_handlers[] 2867 */ 2868 ctx->app_tag = 0; 2869 ctx->app_tag_mask[0] = 0x0; 2870 ctx->app_tag_mask[1] = 0x0; 2871 2872 if (IS_PI_UNINIT_CAPABLE(ha)) { 2873 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 2874 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 2875 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; 2876 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 2877 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 2878 } 2879 2880 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); 2881 2882 switch (se_cmd->prot_type) { 2883 case TARGET_DIF_TYPE0_PROT: 2884 /* 2885 * No check for ql2xenablehba_err_chk, as it 2886 * would be an I/O error if hba tag generation 2887 * is not done. 2888 */ 2889 ctx->ref_tag = cpu_to_le32(lba); 2890 /* enable ALL bytes of the ref tag */ 2891 ctx->ref_tag_mask[0] = 0xff; 2892 ctx->ref_tag_mask[1] = 0xff; 2893 ctx->ref_tag_mask[2] = 0xff; 2894 ctx->ref_tag_mask[3] = 0xff; 2895 break; 2896 case TARGET_DIF_TYPE1_PROT: 2897 /* 2898 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit 2899 * REF tag, and 16 bit app tag. 2900 */ 2901 ctx->ref_tag = cpu_to_le32(lba); 2902 if (!qla_tgt_ref_mask_check(se_cmd) || 2903 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2904 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2905 break; 2906 } 2907 /* enable ALL bytes of the ref tag */ 2908 ctx->ref_tag_mask[0] = 0xff; 2909 ctx->ref_tag_mask[1] = 0xff; 2910 ctx->ref_tag_mask[2] = 0xff; 2911 ctx->ref_tag_mask[3] = 0xff; 2912 break; 2913 case TARGET_DIF_TYPE2_PROT: 2914 /* 2915 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF 2916 * tag has to match LBA in CDB + N 2917 */ 2918 ctx->ref_tag = cpu_to_le32(lba); 2919 if (!qla_tgt_ref_mask_check(se_cmd) || 2920 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { 2921 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2922 break; 2923 } 2924 /* enable ALL bytes of the ref tag */ 2925 ctx->ref_tag_mask[0] = 0xff; 2926 ctx->ref_tag_mask[1] = 0xff; 2927 ctx->ref_tag_mask[2] = 0xff; 2928 ctx->ref_tag_mask[3] = 0xff; 2929 break; 2930 case TARGET_DIF_TYPE3_PROT: 2931 /* For TYPE 3 protection: 16 bit GUARD only */ 2932 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; 2933 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = 2934 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; 2935 break; 2936 } 2937 } 2938 2939 static inline int 2940 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) 2941 { 2942 uint32_t *cur_dsd; 2943 uint32_t transfer_length = 0; 2944 uint32_t data_bytes; 2945 uint32_t dif_bytes; 2946 uint8_t bundling = 1; 2947 struct crc_context *crc_ctx_pkt = NULL; 2948 struct qla_hw_data *ha; 2949 struct ctio_crc2_to_fw *pkt; 2950 dma_addr_t crc_ctx_dma; 2951 uint16_t fw_prot_opts = 0; 2952 struct qla_tgt_cmd *cmd = prm->cmd; 2953 struct se_cmd *se_cmd = &cmd->se_cmd; 2954 uint32_t h; 2955 struct atio_from_isp *atio = &prm->cmd->atio; 2956 struct qla_tc_param tc; 2957 uint16_t t16; 2958 scsi_qla_host_t *vha = cmd->vha; 2959 2960 ha = vha->hw; 2961 2962 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr; 2963 prm->pkt = pkt; 2964 memset(pkt, 0, sizeof(*pkt)); 2965 2966 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, 2967 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", 2968 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, 2969 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); 2970 2971 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || 2972 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) 2973 bundling = 0; 2974 2975 /* Compute dif len and adjust data len to incude protection */ 2976 data_bytes = cmd->bufflen; 2977 dif_bytes = (data_bytes / cmd->blk_sz) * 8; 2978 2979 switch (se_cmd->prot_op) { 2980 case TARGET_PROT_DIN_INSERT: 2981 case TARGET_PROT_DOUT_STRIP: 2982 transfer_length = data_bytes; 2983 if (cmd->prot_sg_cnt) 2984 data_bytes += dif_bytes; 2985 break; 2986 case TARGET_PROT_DIN_STRIP: 2987 case TARGET_PROT_DOUT_INSERT: 2988 case TARGET_PROT_DIN_PASS: 2989 case TARGET_PROT_DOUT_PASS: 2990 transfer_length = data_bytes + dif_bytes; 2991 break; 2992 default: 2993 BUG(); 2994 break; 2995 } 2996 2997 if (!qlt_hba_err_chk_enabled(se_cmd)) 2998 fw_prot_opts |= 0x10; /* Disable Guard tag checking */ 2999 /* HBA error checking enabled */ 3000 else if (IS_PI_UNINIT_CAPABLE(ha)) { 3001 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || 3002 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) 3003 fw_prot_opts |= PO_DIS_VALD_APP_ESC; 3004 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) 3005 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; 3006 } 3007 3008 switch (se_cmd->prot_op) { 3009 case TARGET_PROT_DIN_INSERT: 3010 case TARGET_PROT_DOUT_INSERT: 3011 fw_prot_opts |= PO_MODE_DIF_INSERT; 3012 break; 3013 case TARGET_PROT_DIN_STRIP: 3014 case TARGET_PROT_DOUT_STRIP: 3015 fw_prot_opts |= PO_MODE_DIF_REMOVE; 3016 break; 3017 case TARGET_PROT_DIN_PASS: 3018 case TARGET_PROT_DOUT_PASS: 3019 fw_prot_opts |= PO_MODE_DIF_PASS; 3020 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ 3021 break; 3022 default:/* Normal Request */ 3023 fw_prot_opts |= PO_MODE_DIF_PASS; 3024 break; 3025 } 3026 3027 /* ---- PKT ---- */ 3028 /* Update entry type to indicate Command Type CRC_2 IOCB */ 3029 pkt->entry_type = CTIO_CRC2; 3030 pkt->entry_count = 1; 3031 pkt->vp_index = cmd->vp_idx; 3032 3033 h = qlt_make_handle(qpair); 3034 if (unlikely(h == QLA_TGT_NULL_HANDLE)) { 3035 /* 3036 * CTIO type 7 from the firmware doesn't provide a way to 3037 * know the initiator's LOOP ID, hence we can't find 3038 * the session and, so, the command. 3039 */ 3040 return -EAGAIN; 3041 } else 3042 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; 3043 3044 pkt->handle = MAKE_HANDLE(qpair->req->id, h); 3045 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; 3046 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); 3047 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3048 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3049 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3050 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3051 pkt->exchange_addr = atio->u.isp24.exchange_addr; 3052 3053 /* silence compile warning */ 3054 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3055 pkt->ox_id = cpu_to_le16(t16); 3056 3057 t16 = (atio->u.isp24.attr << 9); 3058 pkt->flags |= cpu_to_le16(t16); 3059 pkt->relative_offset = cpu_to_le32(prm->cmd->offset); 3060 3061 /* Set transfer direction */ 3062 if (cmd->dma_data_direction == DMA_TO_DEVICE) 3063 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN); 3064 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 3065 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 3066 3067 pkt->dseg_count = prm->tot_dsds; 3068 /* Fibre channel byte count */ 3069 pkt->transfer_length = cpu_to_le32(transfer_length); 3070 3071 /* ----- CRC context -------- */ 3072 3073 /* Allocate CRC context from global pool */ 3074 crc_ctx_pkt = cmd->ctx = 3075 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); 3076 3077 if (!crc_ctx_pkt) 3078 goto crc_queuing_error; 3079 3080 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; 3081 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); 3082 3083 /* Set handle */ 3084 crc_ctx_pkt->handle = pkt->handle; 3085 3086 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); 3087 3088 pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); 3089 pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); 3090 pkt->crc_context_len = CRC_CONTEXT_LEN_FW; 3091 3092 if (!bundling) { 3093 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; 3094 } else { 3095 /* 3096 * Configure Bundling if we need to fetch interlaving 3097 * protection PCI accesses 3098 */ 3099 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; 3100 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); 3101 crc_ctx_pkt->u.bundling.dseg_count = 3102 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); 3103 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; 3104 } 3105 3106 /* Finish the common fields of CRC pkt */ 3107 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 3108 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 3109 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 3110 crc_ctx_pkt->guard_seed = cpu_to_le16(0); 3111 3112 memset((uint8_t *)&tc, 0 , sizeof(tc)); 3113 tc.vha = vha; 3114 tc.blk_sz = cmd->blk_sz; 3115 tc.bufflen = cmd->bufflen; 3116 tc.sg = cmd->sg; 3117 tc.prot_sg = cmd->prot_sg; 3118 tc.ctx = crc_ctx_pkt; 3119 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; 3120 3121 /* Walks data segments */ 3122 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 3123 3124 if (!bundling && prm->prot_seg_cnt) { 3125 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 3126 prm->tot_dsds, &tc)) 3127 goto crc_queuing_error; 3128 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, 3129 (prm->tot_dsds - prm->prot_seg_cnt), &tc)) 3130 goto crc_queuing_error; 3131 3132 if (bundling && prm->prot_seg_cnt) { 3133 /* Walks dif segments */ 3134 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; 3135 3136 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 3137 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, 3138 prm->prot_seg_cnt, &tc)) 3139 goto crc_queuing_error; 3140 } 3141 return QLA_SUCCESS; 3142 3143 crc_queuing_error: 3144 /* Cleanup will be performed by the caller */ 3145 qpair->req->outstanding_cmds[h] = NULL; 3146 3147 return QLA_FUNCTION_FAILED; 3148 } 3149 3150 /* 3151 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * 3152 * QLA_TGT_XMIT_STATUS for >= 24xx silicon 3153 */ 3154 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, 3155 uint8_t scsi_status) 3156 { 3157 struct scsi_qla_host *vha = cmd->vha; 3158 struct qla_qpair *qpair = cmd->qpair; 3159 struct ctio7_to_24xx *pkt; 3160 struct qla_tgt_prm prm; 3161 uint32_t full_req_cnt = 0; 3162 unsigned long flags = 0; 3163 int res; 3164 3165 if (cmd->sess && cmd->sess->deleted) { 3166 cmd->state = QLA_TGT_STATE_PROCESSED; 3167 if (cmd->sess->logout_completed) 3168 /* no need to terminate. FW already freed exchange. */ 3169 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3170 else 3171 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0); 3172 return 0; 3173 } 3174 3175 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, 3176 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", 3177 (xmit_type & QLA_TGT_XMIT_STATUS) ? 3178 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, 3179 &cmd->se_cmd, qpair->id); 3180 3181 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 3182 &full_req_cnt); 3183 if (unlikely(res != 0)) { 3184 return res; 3185 } 3186 3187 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3188 3189 if (xmit_type == QLA_TGT_XMIT_STATUS) 3190 qpair->tgt_counters.core_qla_snd_status++; 3191 else 3192 qpair->tgt_counters.core_qla_que_buf++; 3193 3194 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) { 3195 /* 3196 * Either the port is not online or this request was from 3197 * previous life, just abort the processing. 3198 */ 3199 cmd->state = QLA_TGT_STATE_PROCESSED; 3200 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3201 ql_dbg_qp(ql_dbg_async, qpair, 0xe101, 3202 "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", 3203 vha->flags.online, qla2x00_reset_active(vha), 3204 cmd->reset_count, qpair->chip_reset); 3205 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3206 return 0; 3207 } 3208 3209 /* Does F/W have an IOCBs for this request */ 3210 res = qlt_check_reserve_free_req(qpair, full_req_cnt); 3211 if (unlikely(res)) 3212 goto out_unmap_unlock; 3213 3214 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) 3215 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3216 else 3217 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3218 if (unlikely(res != 0)) { 3219 qpair->req->cnt += full_req_cnt; 3220 goto out_unmap_unlock; 3221 } 3222 3223 pkt = (struct ctio7_to_24xx *)prm.pkt; 3224 3225 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 3226 pkt->u.status0.flags |= 3227 cpu_to_le16(CTIO7_FLAGS_DATA_IN | 3228 CTIO7_FLAGS_STATUS_MODE_0); 3229 3230 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3231 qlt_load_data_segments(&prm); 3232 3233 if (prm.add_status_pkt == 0) { 3234 if (xmit_type & QLA_TGT_XMIT_STATUS) { 3235 pkt->u.status0.scsi_status = 3236 cpu_to_le16(prm.rq_result); 3237 pkt->u.status0.residual = 3238 cpu_to_le32(prm.residual); 3239 pkt->u.status0.flags |= cpu_to_le16( 3240 CTIO7_FLAGS_SEND_STATUS); 3241 if (qlt_need_explicit_conf(cmd, 0)) { 3242 pkt->u.status0.flags |= 3243 cpu_to_le16( 3244 CTIO7_FLAGS_EXPLICIT_CONFORM | 3245 CTIO7_FLAGS_CONFORM_REQ); 3246 } 3247 } 3248 3249 } else { 3250 /* 3251 * We have already made sure that there is sufficient 3252 * amount of request entries to not drop HW lock in 3253 * req_pkt(). 3254 */ 3255 struct ctio7_to_24xx *ctio = 3256 (struct ctio7_to_24xx *)qlt_get_req_pkt( 3257 qpair->req); 3258 3259 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, 3260 "Building additional status packet 0x%p.\n", 3261 ctio); 3262 3263 /* 3264 * T10Dif: ctio_crc2_to_fw overlay ontop of 3265 * ctio7_to_24xx 3266 */ 3267 memcpy(ctio, pkt, sizeof(*ctio)); 3268 /* reset back to CTIO7 */ 3269 ctio->entry_count = 1; 3270 ctio->entry_type = CTIO_TYPE7; 3271 ctio->dseg_count = 0; 3272 ctio->u.status1.flags &= ~cpu_to_le16( 3273 CTIO7_FLAGS_DATA_IN); 3274 3275 /* Real finish is ctio_m1's finish */ 3276 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 3277 pkt->u.status0.flags |= cpu_to_le16( 3278 CTIO7_FLAGS_DONT_RET_CTIO); 3279 3280 /* qlt_24xx_init_ctio_to_isp will correct 3281 * all neccessary fields that's part of CTIO7. 3282 * There should be no residual of CTIO-CRC2 data. 3283 */ 3284 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, 3285 &prm); 3286 } 3287 } else 3288 qlt_24xx_init_ctio_to_isp(pkt, &prm); 3289 3290 3291 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 3292 cmd->cmd_sent_to_fw = 1; 3293 3294 /* Memory Barrier */ 3295 wmb(); 3296 if (qpair->reqq_start_iocbs) 3297 qpair->reqq_start_iocbs(qpair); 3298 else 3299 qla2x00_start_iocbs(vha, qpair->req); 3300 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3301 3302 return 0; 3303 3304 out_unmap_unlock: 3305 qlt_unmap_sg(vha, cmd); 3306 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3307 3308 return res; 3309 } 3310 EXPORT_SYMBOL(qlt_xmit_response); 3311 3312 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) 3313 { 3314 struct ctio7_to_24xx *pkt; 3315 struct scsi_qla_host *vha = cmd->vha; 3316 struct qla_tgt *tgt = cmd->tgt; 3317 struct qla_tgt_prm prm; 3318 unsigned long flags = 0; 3319 int res = 0; 3320 struct qla_qpair *qpair = cmd->qpair; 3321 3322 memset(&prm, 0, sizeof(prm)); 3323 prm.cmd = cmd; 3324 prm.tgt = tgt; 3325 prm.sg = NULL; 3326 prm.req_cnt = 1; 3327 3328 /* Calculate number of entries and segments required */ 3329 if (qlt_pci_map_calc_cnt(&prm) != 0) 3330 return -EAGAIN; 3331 3332 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || 3333 (cmd->sess && cmd->sess->deleted)) { 3334 /* 3335 * Either the port is not online or this request was from 3336 * previous life, just abort the processing. 3337 */ 3338 cmd->state = QLA_TGT_STATE_NEED_DATA; 3339 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 3340 ql_dbg_qp(ql_dbg_async, qpair, 0xe102, 3341 "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n", 3342 vha->flags.online, qla2x00_reset_active(vha), 3343 cmd->reset_count, qpair->chip_reset); 3344 return 0; 3345 } 3346 3347 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3348 /* Does F/W have an IOCBs for this request */ 3349 res = qlt_check_reserve_free_req(qpair, prm.req_cnt); 3350 if (res != 0) 3351 goto out_unlock_free_unmap; 3352 if (cmd->se_cmd.prot_op) 3353 res = qlt_build_ctio_crc2_pkt(qpair, &prm); 3354 else 3355 res = qlt_24xx_build_ctio_pkt(qpair, &prm); 3356 3357 if (unlikely(res != 0)) { 3358 qpair->req->cnt += prm.req_cnt; 3359 goto out_unlock_free_unmap; 3360 } 3361 3362 pkt = (struct ctio7_to_24xx *)prm.pkt; 3363 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 3364 CTIO7_FLAGS_STATUS_MODE_0); 3365 3366 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 3367 qlt_load_data_segments(&prm); 3368 3369 cmd->state = QLA_TGT_STATE_NEED_DATA; 3370 cmd->cmd_sent_to_fw = 1; 3371 3372 /* Memory Barrier */ 3373 wmb(); 3374 if (qpair->reqq_start_iocbs) 3375 qpair->reqq_start_iocbs(qpair); 3376 else 3377 qla2x00_start_iocbs(vha, qpair->req); 3378 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3379 3380 return res; 3381 3382 out_unlock_free_unmap: 3383 qlt_unmap_sg(vha, cmd); 3384 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3385 3386 return res; 3387 } 3388 EXPORT_SYMBOL(qlt_rdy_to_xfer); 3389 3390 3391 /* 3392 * it is assumed either hardware_lock or qpair lock is held. 3393 */ 3394 static void 3395 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, 3396 struct ctio_crc_from_fw *sts) 3397 { 3398 uint8_t *ap = &sts->actual_dif[0]; 3399 uint8_t *ep = &sts->expected_dif[0]; 3400 uint64_t lba = cmd->se_cmd.t_task_lba; 3401 uint8_t scsi_status, sense_key, asc, ascq; 3402 unsigned long flags; 3403 struct scsi_qla_host *vha = cmd->vha; 3404 3405 cmd->trc_flags |= TRC_DIF_ERR; 3406 3407 cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); 3408 cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); 3409 cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); 3410 3411 cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); 3412 cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); 3413 cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); 3414 3415 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, 3416 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); 3417 3418 scsi_status = sense_key = asc = ascq = 0; 3419 3420 /* check appl tag */ 3421 if (cmd->e_app_tag != cmd->a_app_tag) { 3422 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, 3423 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3424 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3425 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3426 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3427 cmd->atio.u.isp24.fcp_hdr.ox_id); 3428 3429 cmd->dif_err_code = DIF_ERR_APP; 3430 scsi_status = SAM_STAT_CHECK_CONDITION; 3431 sense_key = ABORTED_COMMAND; 3432 asc = 0x10; 3433 ascq = 0x2; 3434 } 3435 3436 /* check ref tag */ 3437 if (cmd->e_ref_tag != cmd->a_ref_tag) { 3438 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, 3439 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", 3440 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3441 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3442 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3443 cmd->atio.u.isp24.fcp_hdr.ox_id); 3444 3445 cmd->dif_err_code = DIF_ERR_REF; 3446 scsi_status = SAM_STAT_CHECK_CONDITION; 3447 sense_key = ABORTED_COMMAND; 3448 asc = 0x10; 3449 ascq = 0x3; 3450 goto out; 3451 } 3452 3453 /* check guard */ 3454 if (cmd->e_guard != cmd->a_guard) { 3455 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, 3456 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", 3457 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, 3458 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, 3459 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, 3460 cmd->atio.u.isp24.fcp_hdr.ox_id); 3461 3462 cmd->dif_err_code = DIF_ERR_GRD; 3463 scsi_status = SAM_STAT_CHECK_CONDITION; 3464 sense_key = ABORTED_COMMAND; 3465 asc = 0x10; 3466 ascq = 0x1; 3467 } 3468 out: 3469 switch (cmd->state) { 3470 case QLA_TGT_STATE_NEED_DATA: 3471 /* handle_data will load DIF error code */ 3472 cmd->state = QLA_TGT_STATE_DATA_IN; 3473 vha->hw->tgt.tgt_ops->handle_data(cmd); 3474 break; 3475 default: 3476 spin_lock_irqsave(&cmd->cmd_lock, flags); 3477 if (cmd->aborted) { 3478 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3479 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3480 break; 3481 } 3482 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3483 3484 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc, 3485 ascq); 3486 /* assume scsi status gets out on the wire. 3487 * Will not wait for completion. 3488 */ 3489 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3490 break; 3491 } 3492 } 3493 3494 /* If hardware_lock held on entry, might drop it, then reaquire */ 3495 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 3496 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3497 struct imm_ntfy_from_isp *ntfy) 3498 { 3499 struct nack_to_isp *nack; 3500 struct qla_hw_data *ha = vha->hw; 3501 request_t *pkt; 3502 int ret = 0; 3503 3504 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, 3505 "Sending TERM ELS CTIO (ha=%p)\n", ha); 3506 3507 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 3508 if (pkt == NULL) { 3509 ql_dbg(ql_dbg_tgt, vha, 0xe080, 3510 "qla_target(%d): %s failed: unable to allocate " 3511 "request packet\n", vha->vp_idx, __func__); 3512 return -ENOMEM; 3513 } 3514 3515 pkt->entry_type = NOTIFY_ACK_TYPE; 3516 pkt->entry_count = 1; 3517 pkt->handle = QLA_TGT_SKIP_HANDLE; 3518 3519 nack = (struct nack_to_isp *)pkt; 3520 nack->ox_id = ntfy->ox_id; 3521 3522 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 3523 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 3524 nack->u.isp24.flags = ntfy->u.isp24.flags & 3525 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 3526 } 3527 3528 /* terminate */ 3529 nack->u.isp24.flags |= 3530 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); 3531 3532 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 3533 nack->u.isp24.status = ntfy->u.isp24.status; 3534 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; 3535 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; 3536 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; 3537 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; 3538 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; 3539 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; 3540 3541 qla2x00_start_iocbs(vha, vha->req); 3542 return ret; 3543 } 3544 3545 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, 3546 struct imm_ntfy_from_isp *imm, int ha_locked) 3547 { 3548 unsigned long flags = 0; 3549 int rc; 3550 3551 if (ha_locked) { 3552 rc = __qlt_send_term_imm_notif(vha, imm); 3553 3554 #if 0 /* Todo */ 3555 if (rc == -ENOMEM) 3556 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3557 #else 3558 if (rc) { 3559 } 3560 #endif 3561 goto done; 3562 } 3563 3564 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 3565 rc = __qlt_send_term_imm_notif(vha, imm); 3566 3567 #if 0 /* Todo */ 3568 if (rc == -ENOMEM) 3569 qlt_alloc_qfull_cmd(vha, imm, 0, 0); 3570 #endif 3571 3572 done: 3573 if (!ha_locked) 3574 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 3575 } 3576 3577 /* 3578 * If hardware_lock held on entry, might drop it, then reaquire 3579 * This function sends the appropriate CTIO to ISP 2xxx or 24xx 3580 */ 3581 static int __qlt_send_term_exchange(struct qla_qpair *qpair, 3582 struct qla_tgt_cmd *cmd, 3583 struct atio_from_isp *atio) 3584 { 3585 struct scsi_qla_host *vha = qpair->vha; 3586 struct ctio7_to_24xx *ctio24; 3587 struct qla_hw_data *ha = vha->hw; 3588 request_t *pkt; 3589 int ret = 0; 3590 uint16_t temp; 3591 3592 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 3593 3594 if (cmd) 3595 vha = cmd->vha; 3596 3597 pkt = (request_t *)qla2x00_alloc_iocbs_ready(qpair, NULL); 3598 if (pkt == NULL) { 3599 ql_dbg(ql_dbg_tgt, vha, 0xe050, 3600 "qla_target(%d): %s failed: unable to allocate " 3601 "request packet\n", vha->vp_idx, __func__); 3602 return -ENOMEM; 3603 } 3604 3605 if (cmd != NULL) { 3606 if (cmd->state < QLA_TGT_STATE_PROCESSED) { 3607 ql_dbg(ql_dbg_tgt, vha, 0xe051, 3608 "qla_target(%d): Terminating cmd %p with " 3609 "incorrect state %d\n", vha->vp_idx, cmd, 3610 cmd->state); 3611 } else 3612 ret = 1; 3613 } 3614 3615 qpair->tgt_counters.num_term_xchg_sent++; 3616 pkt->entry_count = 1; 3617 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 3618 3619 ctio24 = (struct ctio7_to_24xx *)pkt; 3620 ctio24->entry_type = CTIO_TYPE7; 3621 ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; 3622 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 3623 ctio24->vp_index = vha->vp_idx; 3624 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 3625 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 3626 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 3627 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 3628 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | 3629 CTIO7_FLAGS_TERMINATE; 3630 ctio24->u.status1.flags = cpu_to_le16(temp); 3631 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 3632 ctio24->u.status1.ox_id = cpu_to_le16(temp); 3633 3634 /* Memory Barrier */ 3635 wmb(); 3636 if (qpair->reqq_start_iocbs) 3637 qpair->reqq_start_iocbs(qpair); 3638 else 3639 qla2x00_start_iocbs(vha, qpair->req); 3640 return ret; 3641 } 3642 3643 static void qlt_send_term_exchange(struct qla_qpair *qpair, 3644 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, 3645 int ul_abort) 3646 { 3647 struct scsi_qla_host *vha; 3648 unsigned long flags = 0; 3649 int rc; 3650 3651 /* why use different vha? NPIV */ 3652 if (cmd) 3653 vha = cmd->vha; 3654 else 3655 vha = qpair->vha; 3656 3657 if (ha_locked) { 3658 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3659 if (rc == -ENOMEM) 3660 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3661 goto done; 3662 } 3663 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 3664 rc = __qlt_send_term_exchange(qpair, cmd, atio); 3665 if (rc == -ENOMEM) 3666 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3667 3668 done: 3669 if (cmd && !ul_abort && !cmd->aborted) { 3670 if (cmd->sg_mapped) 3671 qlt_unmap_sg(vha, cmd); 3672 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3673 } 3674 3675 if (!ha_locked) 3676 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 3677 3678 return; 3679 } 3680 3681 static void qlt_init_term_exchange(struct scsi_qla_host *vha) 3682 { 3683 struct list_head free_list; 3684 struct qla_tgt_cmd *cmd, *tcmd; 3685 3686 vha->hw->tgt.leak_exchg_thresh_hold = 3687 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT; 3688 3689 cmd = tcmd = NULL; 3690 if (!list_empty(&vha->hw->tgt.q_full_list)) { 3691 INIT_LIST_HEAD(&free_list); 3692 list_splice_init(&vha->hw->tgt.q_full_list, &free_list); 3693 3694 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 3695 list_del(&cmd->cmd_list); 3696 /* This cmd was never sent to TCM. There is no need 3697 * to schedule free or call free_cmd 3698 */ 3699 qlt_free_cmd(cmd); 3700 vha->hw->tgt.num_qfull_cmds_alloc--; 3701 } 3702 } 3703 vha->hw->tgt.num_qfull_cmds_dropped = 0; 3704 } 3705 3706 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) 3707 { 3708 uint32_t total_leaked; 3709 3710 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped; 3711 3712 if (vha->hw->tgt.leak_exchg_thresh_hold && 3713 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) { 3714 3715 ql_dbg(ql_dbg_tgt, vha, 0xe079, 3716 "Chip reset due to exchange starvation: %d/%d.\n", 3717 total_leaked, vha->hw->cur_fw_xcb_count); 3718 3719 if (IS_P3P_TYPE(vha->hw)) 3720 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3721 else 3722 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3723 qla2xxx_wake_dpc(vha); 3724 } 3725 3726 } 3727 3728 int qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3729 { 3730 struct qla_tgt *tgt = cmd->tgt; 3731 struct scsi_qla_host *vha = tgt->vha; 3732 struct se_cmd *se_cmd = &cmd->se_cmd; 3733 unsigned long flags; 3734 3735 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3736 "qla_target(%d): terminating exchange for aborted cmd=%p " 3737 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3738 se_cmd->tag); 3739 3740 spin_lock_irqsave(&cmd->cmd_lock, flags); 3741 if (cmd->aborted) { 3742 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3743 /* 3744 * It's normal to see 2 calls in this path: 3745 * 1) XFER Rdy completion + CMD_T_ABORT 3746 * 2) TCM TMR - drain_state_list 3747 */ 3748 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, 3749 "multiple abort. %p transport_state %x, t_state %x, " 3750 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, 3751 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); 3752 return EIO; 3753 } 3754 cmd->aborted = 1; 3755 cmd->trc_flags |= TRC_ABORT; 3756 spin_unlock_irqrestore(&cmd->cmd_lock, flags); 3757 3758 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1); 3759 return 0; 3760 } 3761 EXPORT_SYMBOL(qlt_abort_cmd); 3762 3763 void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3764 { 3765 struct fc_port *sess = cmd->sess; 3766 3767 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, 3768 "%s: se_cmd[%p] ox_id %04x\n", 3769 __func__, &cmd->se_cmd, 3770 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 3771 3772 BUG_ON(cmd->cmd_in_wq); 3773 3774 if (cmd->sg_mapped) 3775 qlt_unmap_sg(cmd->vha, cmd); 3776 3777 if (!cmd->q_full) 3778 qlt_decr_num_pend_cmds(cmd->vha); 3779 3780 BUG_ON(cmd->sg_mapped); 3781 cmd->jiffies_at_free = get_jiffies_64(); 3782 if (unlikely(cmd->free_sg)) 3783 kfree(cmd->sg); 3784 3785 if (!sess || !sess->se_sess) { 3786 WARN_ON(1); 3787 return; 3788 } 3789 cmd->jiffies_at_free = get_jiffies_64(); 3790 target_free_tag(sess->se_sess, &cmd->se_cmd); 3791 } 3792 EXPORT_SYMBOL(qlt_free_cmd); 3793 3794 /* 3795 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3796 */ 3797 static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, 3798 struct qla_tgt_cmd *cmd, uint32_t status) 3799 { 3800 int term = 0; 3801 struct scsi_qla_host *vha = qpair->vha; 3802 3803 if (cmd->se_cmd.prot_op) 3804 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, 3805 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " 3806 "se_cmd=%p tag[%x] op %#x/%s", 3807 cmd->lba, cmd->lba, 3808 cmd->num_blks, &cmd->se_cmd, 3809 cmd->atio.u.isp24.exchange_addr, 3810 cmd->se_cmd.prot_op, 3811 prot_op_str(cmd->se_cmd.prot_op)); 3812 3813 if (ctio != NULL) { 3814 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3815 term = !(c->flags & 3816 cpu_to_le16(OF_TERM_EXCH)); 3817 } else 3818 term = 1; 3819 3820 if (term) 3821 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0); 3822 3823 return term; 3824 } 3825 3826 3827 /* ha->hardware_lock supposed to be held on entry */ 3828 static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, 3829 struct rsp_que *rsp, uint32_t handle, void *ctio) 3830 { 3831 struct qla_tgt_cmd *cmd = NULL; 3832 struct req_que *req; 3833 int qid = GET_QID(handle); 3834 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK; 3835 3836 if (unlikely(h == QLA_TGT_SKIP_HANDLE)) 3837 return NULL; 3838 3839 if (qid == rsp->req->id) { 3840 req = rsp->req; 3841 } else if (vha->hw->req_q_map[qid]) { 3842 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a, 3843 "qla_target(%d): CTIO completion with different QID %d handle %x\n", 3844 vha->vp_idx, rsp->id, handle); 3845 req = vha->hw->req_q_map[qid]; 3846 } else { 3847 return NULL; 3848 } 3849 3850 h &= QLA_CMD_HANDLE_MASK; 3851 3852 if (h != QLA_TGT_NULL_HANDLE) { 3853 if (unlikely(h >= req->num_outstanding_cmds)) { 3854 ql_dbg(ql_dbg_tgt, vha, 0xe052, 3855 "qla_target(%d): Wrong handle %x received\n", 3856 vha->vp_idx, handle); 3857 return NULL; 3858 } 3859 3860 cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h]; 3861 if (unlikely(cmd == NULL)) { 3862 ql_dbg(ql_dbg_async, vha, 0xe053, 3863 "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", 3864 vha->vp_idx, handle, req->id, rsp->id); 3865 return NULL; 3866 } 3867 req->outstanding_cmds[h] = NULL; 3868 } else if (ctio != NULL) { 3869 /* We can't get loop ID from CTIO7 */ 3870 ql_dbg(ql_dbg_tgt, vha, 0xe054, 3871 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't " 3872 "support NULL handles\n", vha->vp_idx); 3873 return NULL; 3874 } 3875 3876 return cmd; 3877 } 3878 3879 /* hardware_lock should be held by caller. */ 3880 void 3881 qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) 3882 { 3883 struct qla_hw_data *ha = vha->hw; 3884 3885 if (cmd->sg_mapped) 3886 qlt_unmap_sg(vha, cmd); 3887 3888 /* TODO: fix debug message type and ids. */ 3889 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3890 ql_dbg(ql_dbg_io, vha, 0xff00, 3891 "HOST-ABORT: state=PROCESSED.\n"); 3892 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3893 cmd->write_data_transferred = 0; 3894 cmd->state = QLA_TGT_STATE_DATA_IN; 3895 3896 ql_dbg(ql_dbg_io, vha, 0xff01, 3897 "HOST-ABORT: state=DATA_IN.\n"); 3898 3899 ha->tgt.tgt_ops->handle_data(cmd); 3900 return; 3901 } else { 3902 ql_dbg(ql_dbg_io, vha, 0xff03, 3903 "HOST-ABORT: state=BAD(%d).\n", 3904 cmd->state); 3905 dump_stack(); 3906 } 3907 3908 cmd->trc_flags |= TRC_FLUSH; 3909 ha->tgt.tgt_ops->free_cmd(cmd); 3910 } 3911 3912 /* 3913 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3914 */ 3915 static void qlt_do_ctio_completion(struct scsi_qla_host *vha, 3916 struct rsp_que *rsp, uint32_t handle, uint32_t status, void *ctio) 3917 { 3918 struct qla_hw_data *ha = vha->hw; 3919 struct se_cmd *se_cmd; 3920 struct qla_tgt_cmd *cmd; 3921 struct qla_qpair *qpair = rsp->qpair; 3922 3923 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3924 /* That could happen only in case of an error/reset/abort */ 3925 if (status != CTIO_SUCCESS) { 3926 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d, 3927 "Intermediate CTIO received" 3928 " (status %x)\n", status); 3929 } 3930 return; 3931 } 3932 3933 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio); 3934 if (cmd == NULL) 3935 return; 3936 3937 se_cmd = &cmd->se_cmd; 3938 cmd->cmd_sent_to_fw = 0; 3939 3940 qlt_unmap_sg(vha, cmd); 3941 3942 if (unlikely(status != CTIO_SUCCESS)) { 3943 switch (status & 0xFFFF) { 3944 case CTIO_LIP_RESET: 3945 case CTIO_TARGET_RESET: 3946 case CTIO_ABORTED: 3947 /* driver request abort via Terminate exchange */ 3948 case CTIO_TIMEOUT: 3949 case CTIO_INVALID_RX_ID: 3950 /* They are OK */ 3951 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, 3952 "qla_target(%d): CTIO with " 3953 "status %#x received, state %x, se_cmd %p, " 3954 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " 3955 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, 3956 status, cmd->state, se_cmd); 3957 break; 3958 3959 case CTIO_PORT_LOGGED_OUT: 3960 case CTIO_PORT_UNAVAILABLE: 3961 { 3962 int logged_out = 3963 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; 3964 3965 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3966 "qla_target(%d): CTIO with %s status %x " 3967 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3968 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", 3969 status, cmd->state, se_cmd); 3970 3971 if (logged_out && cmd->sess) { 3972 /* 3973 * Session is already logged out, but we need 3974 * to notify initiator, who's not aware of this 3975 */ 3976 cmd->sess->logout_on_delete = 0; 3977 cmd->sess->send_els_logo = 1; 3978 ql_dbg(ql_dbg_disc, vha, 0x20f8, 3979 "%s %d %8phC post del sess\n", 3980 __func__, __LINE__, cmd->sess->port_name); 3981 3982 qlt_schedule_sess_for_deletion(cmd->sess); 3983 } 3984 break; 3985 } 3986 case CTIO_DIF_ERROR: { 3987 struct ctio_crc_from_fw *crc = 3988 (struct ctio_crc_from_fw *)ctio; 3989 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, 3990 "qla_target(%d): CTIO with DIF_ERROR status %x " 3991 "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " 3992 "expect_dif[0x%llx]\n", 3993 vha->vp_idx, status, cmd->state, se_cmd, 3994 *((u64 *)&crc->actual_dif[0]), 3995 *((u64 *)&crc->expected_dif[0])); 3996 3997 qlt_handle_dif_error(qpair, cmd, ctio); 3998 return; 3999 } 4000 default: 4001 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, 4002 "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", 4003 vha->vp_idx, status, cmd->state, se_cmd); 4004 break; 4005 } 4006 4007 4008 /* "cmd->aborted" means 4009 * cmd is already aborted/terminated, we don't 4010 * need to terminate again. The exchange is already 4011 * cleaned up/freed at FW level. Just cleanup at driver 4012 * level. 4013 */ 4014 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 4015 (!cmd->aborted)) { 4016 cmd->trc_flags |= TRC_CTIO_ERR; 4017 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status)) 4018 return; 4019 } 4020 } 4021 4022 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 4023 cmd->trc_flags |= TRC_CTIO_DONE; 4024 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 4025 cmd->state = QLA_TGT_STATE_DATA_IN; 4026 4027 if (status == CTIO_SUCCESS) 4028 cmd->write_data_transferred = 1; 4029 4030 ha->tgt.tgt_ops->handle_data(cmd); 4031 return; 4032 } else if (cmd->aborted) { 4033 cmd->trc_flags |= TRC_CTIO_ABORTED; 4034 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 4035 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 4036 } else { 4037 cmd->trc_flags |= TRC_CTIO_STRANGE; 4038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 4039 "qla_target(%d): A command in state (%d) should " 4040 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 4041 } 4042 4043 if (unlikely(status != CTIO_SUCCESS) && 4044 !cmd->aborted) { 4045 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); 4046 dump_stack(); 4047 } 4048 4049 ha->tgt.tgt_ops->free_cmd(cmd); 4050 } 4051 4052 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, 4053 uint8_t task_codes) 4054 { 4055 int fcp_task_attr; 4056 4057 switch (task_codes) { 4058 case ATIO_SIMPLE_QUEUE: 4059 fcp_task_attr = TCM_SIMPLE_TAG; 4060 break; 4061 case ATIO_HEAD_OF_QUEUE: 4062 fcp_task_attr = TCM_HEAD_TAG; 4063 break; 4064 case ATIO_ORDERED_QUEUE: 4065 fcp_task_attr = TCM_ORDERED_TAG; 4066 break; 4067 case ATIO_ACA_QUEUE: 4068 fcp_task_attr = TCM_ACA_TAG; 4069 break; 4070 case ATIO_UNTAGGED: 4071 fcp_task_attr = TCM_SIMPLE_TAG; 4072 break; 4073 default: 4074 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d, 4075 "qla_target: unknown task code %x, use ORDERED instead\n", 4076 task_codes); 4077 fcp_task_attr = TCM_ORDERED_TAG; 4078 break; 4079 } 4080 4081 return fcp_task_attr; 4082 } 4083 4084 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *, 4085 uint8_t *); 4086 /* 4087 * Process context for I/O path into tcm_qla2xxx code 4088 */ 4089 static void __qlt_do_work(struct qla_tgt_cmd *cmd) 4090 { 4091 scsi_qla_host_t *vha = cmd->vha; 4092 struct qla_hw_data *ha = vha->hw; 4093 struct fc_port *sess = cmd->sess; 4094 struct atio_from_isp *atio = &cmd->atio; 4095 unsigned char *cdb; 4096 unsigned long flags; 4097 uint32_t data_length; 4098 int ret, fcp_task_attr, data_dir, bidi = 0; 4099 struct qla_qpair *qpair = cmd->qpair; 4100 4101 cmd->cmd_in_wq = 0; 4102 cmd->trc_flags |= TRC_DO_WORK; 4103 4104 if (cmd->aborted) { 4105 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, 4106 "cmd with tag %u is aborted\n", 4107 cmd->atio.u.isp24.exchange_addr); 4108 goto out_term; 4109 } 4110 4111 spin_lock_init(&cmd->cmd_lock); 4112 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 4113 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 4114 4115 if (atio->u.isp24.fcp_cmnd.rddata && 4116 atio->u.isp24.fcp_cmnd.wrdata) { 4117 bidi = 1; 4118 data_dir = DMA_TO_DEVICE; 4119 } else if (atio->u.isp24.fcp_cmnd.rddata) 4120 data_dir = DMA_FROM_DEVICE; 4121 else if (atio->u.isp24.fcp_cmnd.wrdata) 4122 data_dir = DMA_TO_DEVICE; 4123 else 4124 data_dir = DMA_NONE; 4125 4126 fcp_task_attr = qlt_get_fcp_task_attr(vha, 4127 atio->u.isp24.fcp_cmnd.task_attr); 4128 data_length = get_datalen_for_atio(atio); 4129 4130 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 4131 fcp_task_attr, data_dir, bidi); 4132 if (ret != 0) 4133 goto out_term; 4134 /* 4135 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*( 4136 */ 4137 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4138 ha->tgt.tgt_ops->put_sess(sess); 4139 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4140 return; 4141 4142 out_term: 4143 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); 4144 /* 4145 * cmd has not sent to target yet, so pass NULL as the second 4146 * argument to qlt_send_term_exchange() and free the memory here. 4147 */ 4148 cmd->trc_flags |= TRC_DO_WORK_ERR; 4149 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 4150 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); 4151 4152 qlt_decr_num_pend_cmds(vha); 4153 target_free_tag(sess->se_sess, &cmd->se_cmd); 4154 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 4155 4156 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4157 ha->tgt.tgt_ops->put_sess(sess); 4158 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4159 } 4160 4161 static void qlt_do_work(struct work_struct *work) 4162 { 4163 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 4164 scsi_qla_host_t *vha = cmd->vha; 4165 unsigned long flags; 4166 4167 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4168 list_del(&cmd->cmd_list); 4169 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4170 4171 __qlt_do_work(cmd); 4172 } 4173 4174 void qlt_clr_qp_table(struct scsi_qla_host *vha) 4175 { 4176 unsigned long flags; 4177 struct qla_hw_data *ha = vha->hw; 4178 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4179 void *node; 4180 u64 key = 0; 4181 4182 ql_log(ql_log_info, vha, 0x706c, 4183 "User update Number of Active Qpairs %d\n", 4184 ha->tgt.num_act_qpairs); 4185 4186 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4187 4188 btree_for_each_safe64(&tgt->lun_qpair_map, key, node) 4189 btree_remove64(&tgt->lun_qpair_map, key); 4190 4191 ha->base_qpair->lun_cnt = 0; 4192 for (key = 0; key < ha->max_qpairs; key++) 4193 if (ha->queue_pair_map[key]) 4194 ha->queue_pair_map[key]->lun_cnt = 0; 4195 4196 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4197 } 4198 4199 static void qlt_assign_qpair(struct scsi_qla_host *vha, 4200 struct qla_tgt_cmd *cmd) 4201 { 4202 struct qla_qpair *qpair, *qp; 4203 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4204 struct qla_qpair_hint *h; 4205 4206 if (vha->flags.qpairs_available) { 4207 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun); 4208 if (unlikely(!h)) { 4209 /* spread lun to qpair ratio evently */ 4210 int lcnt = 0, rc; 4211 struct scsi_qla_host *base_vha = 4212 pci_get_drvdata(vha->hw->pdev); 4213 4214 qpair = vha->hw->base_qpair; 4215 if (qpair->lun_cnt == 0) { 4216 qpair->lun_cnt++; 4217 h = qla_qpair_to_hint(tgt, qpair); 4218 BUG_ON(!h); 4219 rc = btree_insert64(&tgt->lun_qpair_map, 4220 cmd->unpacked_lun, h, GFP_ATOMIC); 4221 if (rc) { 4222 qpair->lun_cnt--; 4223 ql_log(ql_log_info, vha, 0xd037, 4224 "Unable to insert lun %llx into lun_qpair_map\n", 4225 cmd->unpacked_lun); 4226 } 4227 goto out; 4228 } else { 4229 lcnt = qpair->lun_cnt; 4230 } 4231 4232 h = NULL; 4233 list_for_each_entry(qp, &base_vha->qp_list, 4234 qp_list_elem) { 4235 if (qp->lun_cnt == 0) { 4236 qp->lun_cnt++; 4237 h = qla_qpair_to_hint(tgt, qp); 4238 BUG_ON(!h); 4239 rc = btree_insert64(&tgt->lun_qpair_map, 4240 cmd->unpacked_lun, h, GFP_ATOMIC); 4241 if (rc) { 4242 qp->lun_cnt--; 4243 ql_log(ql_log_info, vha, 0xd038, 4244 "Unable to insert lun %llx into lun_qpair_map\n", 4245 cmd->unpacked_lun); 4246 } 4247 qpair = qp; 4248 goto out; 4249 } else { 4250 if (qp->lun_cnt < lcnt) { 4251 lcnt = qp->lun_cnt; 4252 qpair = qp; 4253 continue; 4254 } 4255 } 4256 } 4257 BUG_ON(!qpair); 4258 qpair->lun_cnt++; 4259 h = qla_qpair_to_hint(tgt, qpair); 4260 BUG_ON(!h); 4261 rc = btree_insert64(&tgt->lun_qpair_map, 4262 cmd->unpacked_lun, h, GFP_ATOMIC); 4263 if (rc) { 4264 qpair->lun_cnt--; 4265 ql_log(ql_log_info, vha, 0xd039, 4266 "Unable to insert lun %llx into lun_qpair_map\n", 4267 cmd->unpacked_lun); 4268 } 4269 } 4270 } else { 4271 h = &tgt->qphints[0]; 4272 } 4273 out: 4274 cmd->qpair = h->qpair; 4275 cmd->se_cmd.cpuid = h->cpuid; 4276 } 4277 4278 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, 4279 struct fc_port *sess, 4280 struct atio_from_isp *atio) 4281 { 4282 struct se_session *se_sess = sess->se_sess; 4283 struct qla_tgt_cmd *cmd; 4284 int tag, cpu; 4285 4286 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 4287 if (tag < 0) 4288 return NULL; 4289 4290 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 4291 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 4292 cmd->cmd_type = TYPE_TGT_CMD; 4293 memcpy(&cmd->atio, atio, sizeof(*atio)); 4294 cmd->state = QLA_TGT_STATE_NEW; 4295 cmd->tgt = vha->vha_tgt.qla_tgt; 4296 qlt_incr_num_pend_cmds(vha); 4297 cmd->vha = vha; 4298 cmd->se_cmd.map_tag = tag; 4299 cmd->se_cmd.map_cpu = cpu; 4300 cmd->sess = sess; 4301 cmd->loop_id = sess->loop_id; 4302 cmd->conf_compl_supported = sess->conf_compl_supported; 4303 4304 cmd->trc_flags = 0; 4305 cmd->jiffies_at_alloc = get_jiffies_64(); 4306 4307 cmd->unpacked_lun = scsilun_to_int( 4308 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun); 4309 qlt_assign_qpair(vha, cmd); 4310 cmd->reset_count = vha->hw->base_qpair->chip_reset; 4311 cmd->vp_idx = vha->vp_idx; 4312 4313 return cmd; 4314 } 4315 4316 /* ha->hardware_lock supposed to be held on entry */ 4317 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, 4318 struct atio_from_isp *atio) 4319 { 4320 struct qla_hw_data *ha = vha->hw; 4321 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4322 struct fc_port *sess; 4323 struct qla_tgt_cmd *cmd; 4324 unsigned long flags; 4325 port_id_t id; 4326 4327 if (unlikely(tgt->tgt_stop)) { 4328 ql_dbg(ql_dbg_io, vha, 0x3061, 4329 "New command while device %p is shutting down\n", tgt); 4330 return -ENODEV; 4331 } 4332 4333 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; 4334 id.b.area = atio->u.isp24.fcp_hdr.s_id[1]; 4335 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0]; 4336 if (IS_SW_RESV_ADDR(id)) 4337 return -EBUSY; 4338 4339 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); 4340 if (unlikely(!sess)) 4341 return -EFAULT; 4342 4343 /* Another WWN used to have our s_id. Our PLOGI scheduled its 4344 * session deletion, but it's still in sess_del_work wq */ 4345 if (sess->deleted) { 4346 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, 4347 "New command while old session %p is being deleted\n", 4348 sess); 4349 return -EFAULT; 4350 } 4351 4352 /* 4353 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 4354 */ 4355 if (!kref_get_unless_zero(&sess->sess_kref)) { 4356 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 4357 "%s: kref_get fail, %8phC oxid %x \n", 4358 __func__, sess->port_name, 4359 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); 4360 return -EFAULT; 4361 } 4362 4363 cmd = qlt_get_tag(vha, sess, atio); 4364 if (!cmd) { 4365 ql_dbg(ql_dbg_io, vha, 0x3062, 4366 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 4367 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4368 ha->tgt.tgt_ops->put_sess(sess); 4369 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4370 return -EBUSY; 4371 } 4372 4373 cmd->cmd_in_wq = 1; 4374 cmd->trc_flags |= TRC_NEW_CMD; 4375 4376 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4377 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4378 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4379 4380 INIT_WORK(&cmd->work, qlt_do_work); 4381 if (vha->flags.qpairs_available) { 4382 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work); 4383 } else if (ha->msix_count) { 4384 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4385 queue_work_on(smp_processor_id(), qla_tgt_wq, 4386 &cmd->work); 4387 else 4388 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, 4389 &cmd->work); 4390 } else { 4391 queue_work(qla_tgt_wq, &cmd->work); 4392 } 4393 4394 return 0; 4395 } 4396 4397 /* ha->hardware_lock supposed to be held on entry */ 4398 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun, 4399 int fn, void *iocb, int flags) 4400 { 4401 struct scsi_qla_host *vha = sess->vha; 4402 struct qla_hw_data *ha = vha->hw; 4403 struct qla_tgt_mgmt_cmd *mcmd; 4404 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4405 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0]; 4406 4407 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4408 if (!mcmd) { 4409 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009, 4410 "qla_target(%d): Allocation of management " 4411 "command failed, some commands and their data could " 4412 "leak\n", vha->vp_idx); 4413 return -ENOMEM; 4414 } 4415 memset(mcmd, 0, sizeof(*mcmd)); 4416 mcmd->sess = sess; 4417 4418 if (iocb) { 4419 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4420 sizeof(mcmd->orig_iocb.imm_ntfy)); 4421 } 4422 mcmd->tmr_func = fn; 4423 mcmd->flags = flags; 4424 mcmd->reset_count = ha->base_qpair->chip_reset; 4425 mcmd->qpair = h->qpair; 4426 mcmd->vha = vha; 4427 mcmd->se_cmd.cpuid = h->cpuid; 4428 mcmd->unpacked_lun = lun; 4429 4430 switch (fn) { 4431 case QLA_TGT_LUN_RESET: 4432 case QLA_TGT_CLEAR_TS: 4433 case QLA_TGT_ABORT_TS: 4434 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); 4435 /* drop through */ 4436 case QLA_TGT_CLEAR_ACA: 4437 h = qlt_find_qphint(vha, mcmd->unpacked_lun); 4438 mcmd->qpair = h->qpair; 4439 mcmd->se_cmd.cpuid = h->cpuid; 4440 break; 4441 4442 case QLA_TGT_TARGET_RESET: 4443 case QLA_TGT_NEXUS_LOSS_SESS: 4444 case QLA_TGT_NEXUS_LOSS: 4445 case QLA_TGT_ABORT_ALL: 4446 default: 4447 /* no-op */ 4448 break; 4449 } 4450 4451 INIT_WORK(&mcmd->work, qlt_do_tmr_work); 4452 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, 4453 &mcmd->work); 4454 4455 return 0; 4456 } 4457 4458 /* ha->hardware_lock supposed to be held on entry */ 4459 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) 4460 { 4461 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4462 struct qla_hw_data *ha = vha->hw; 4463 struct fc_port *sess; 4464 u64 unpacked_lun; 4465 int fn; 4466 unsigned long flags; 4467 4468 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 4469 4470 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4471 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 4472 a->u.isp24.fcp_hdr.s_id); 4473 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4474 4475 unpacked_lun = 4476 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4477 4478 if (sess == NULL || sess->deleted) 4479 return -EFAULT; 4480 4481 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 4482 } 4483 4484 /* ha->hardware_lock supposed to be held on entry */ 4485 static int __qlt_abort_task(struct scsi_qla_host *vha, 4486 struct imm_ntfy_from_isp *iocb, struct fc_port *sess) 4487 { 4488 struct atio_from_isp *a = (struct atio_from_isp *)iocb; 4489 struct qla_hw_data *ha = vha->hw; 4490 struct qla_tgt_mgmt_cmd *mcmd; 4491 u64 unpacked_lun; 4492 int rc; 4493 4494 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC); 4495 if (mcmd == NULL) { 4496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f, 4497 "qla_target(%d): %s: Allocation of ABORT cmd failed\n", 4498 vha->vp_idx, __func__); 4499 return -ENOMEM; 4500 } 4501 memset(mcmd, 0, sizeof(*mcmd)); 4502 4503 mcmd->sess = sess; 4504 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb, 4505 sizeof(mcmd->orig_iocb.imm_ntfy)); 4506 4507 unpacked_lun = 4508 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 4509 mcmd->reset_count = ha->base_qpair->chip_reset; 4510 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK; 4511 mcmd->qpair = ha->base_qpair; 4512 4513 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func, 4514 le16_to_cpu(iocb->u.isp2x.seq_id)); 4515 if (rc != 0) { 4516 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060, 4517 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n", 4518 vha->vp_idx, rc); 4519 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool); 4520 return -EFAULT; 4521 } 4522 4523 return 0; 4524 } 4525 4526 /* ha->hardware_lock supposed to be held on entry */ 4527 static int qlt_abort_task(struct scsi_qla_host *vha, 4528 struct imm_ntfy_from_isp *iocb) 4529 { 4530 struct qla_hw_data *ha = vha->hw; 4531 struct fc_port *sess; 4532 int loop_id; 4533 unsigned long flags; 4534 4535 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb); 4536 4537 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 4538 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id); 4539 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 4540 4541 if (sess == NULL) { 4542 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025, 4543 "qla_target(%d): task abort for unexisting " 4544 "session\n", vha->vp_idx); 4545 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt, 4546 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb)); 4547 } 4548 4549 return __qlt_abort_task(vha, iocb, sess); 4550 } 4551 4552 void qlt_logo_completion_handler(fc_port_t *fcport, int rc) 4553 { 4554 if (rc != MBS_COMMAND_COMPLETE) { 4555 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, 4556 "%s: se_sess %p / sess %p from" 4557 " port %8phC loop_id %#04x s_id %02x:%02x:%02x" 4558 " LOGO failed: %#x\n", 4559 __func__, 4560 fcport->se_sess, 4561 fcport, 4562 fcport->port_name, fcport->loop_id, 4563 fcport->d_id.b.domain, fcport->d_id.b.area, 4564 fcport->d_id.b.al_pa, rc); 4565 } 4566 4567 fcport->logout_completed = 1; 4568 } 4569 4570 /* 4571 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) 4572 * 4573 * Schedules sessions with matching port_id/loop_id but different wwn for 4574 * deletion. Returns existing session with matching wwn if present. 4575 * Null otherwise. 4576 */ 4577 struct fc_port * 4578 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, 4579 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess) 4580 { 4581 struct fc_port *sess = NULL, *other_sess; 4582 uint64_t other_wwn; 4583 4584 *conflict_sess = NULL; 4585 4586 list_for_each_entry(other_sess, &vha->vp_fcports, list) { 4587 4588 other_wwn = wwn_to_u64(other_sess->port_name); 4589 4590 if (wwn == other_wwn) { 4591 WARN_ON(sess); 4592 sess = other_sess; 4593 continue; 4594 } 4595 4596 /* find other sess with nport_id collision */ 4597 if (port_id.b24 == other_sess->d_id.b24) { 4598 if (loop_id != other_sess->loop_id) { 4599 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, 4600 "Invalidating sess %p loop_id %d wwn %llx.\n", 4601 other_sess, other_sess->loop_id, other_wwn); 4602 4603 /* 4604 * logout_on_delete is set by default, but another 4605 * session that has the same s_id/loop_id combo 4606 * might have cleared it when requested this session 4607 * deletion, so don't touch it 4608 */ 4609 qlt_schedule_sess_for_deletion(other_sess); 4610 } else { 4611 /* 4612 * Another wwn used to have our s_id/loop_id 4613 * kill the session, but don't free the loop_id 4614 */ 4615 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, 4616 "Invalidating sess %p loop_id %d wwn %llx.\n", 4617 other_sess, other_sess->loop_id, other_wwn); 4618 4619 other_sess->keep_nport_handle = 1; 4620 if (other_sess->disc_state != DSC_DELETED) 4621 *conflict_sess = other_sess; 4622 qlt_schedule_sess_for_deletion(other_sess); 4623 } 4624 continue; 4625 } 4626 4627 /* find other sess with nport handle collision */ 4628 if ((loop_id == other_sess->loop_id) && 4629 (loop_id != FC_NO_LOOP_ID)) { 4630 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, 4631 "Invalidating sess %p loop_id %d wwn %llx.\n", 4632 other_sess, other_sess->loop_id, other_wwn); 4633 4634 /* Same loop_id but different s_id 4635 * Ok to kill and logout */ 4636 qlt_schedule_sess_for_deletion(other_sess); 4637 } 4638 } 4639 4640 return sess; 4641 } 4642 4643 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ 4644 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) 4645 { 4646 struct qla_tgt_sess_op *op; 4647 struct qla_tgt_cmd *cmd; 4648 uint32_t key; 4649 int count = 0; 4650 unsigned long flags; 4651 4652 key = (((u32)s_id->b.domain << 16) | 4653 ((u32)s_id->b.area << 8) | 4654 ((u32)s_id->b.al_pa)); 4655 4656 spin_lock_irqsave(&vha->cmd_list_lock, flags); 4657 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { 4658 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4659 4660 if (op_key == key) { 4661 op->aborted = true; 4662 count++; 4663 } 4664 } 4665 4666 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) { 4667 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); 4668 if (op_key == key) { 4669 op->aborted = true; 4670 count++; 4671 } 4672 } 4673 4674 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { 4675 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); 4676 if (cmd_key == key) { 4677 cmd->aborted = 1; 4678 count++; 4679 } 4680 } 4681 spin_unlock_irqrestore(&vha->cmd_list_lock, flags); 4682 4683 return count; 4684 } 4685 4686 static int qlt_handle_login(struct scsi_qla_host *vha, 4687 struct imm_ntfy_from_isp *iocb) 4688 { 4689 struct fc_port *sess = NULL, *conflict_sess = NULL; 4690 uint64_t wwn; 4691 port_id_t port_id; 4692 uint16_t loop_id, wd3_lo; 4693 int res = 0; 4694 struct qlt_plogi_ack_t *pla; 4695 unsigned long flags; 4696 4697 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4698 4699 port_id.b.domain = iocb->u.isp24.port_id[2]; 4700 port_id.b.area = iocb->u.isp24.port_id[1]; 4701 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4702 port_id.b.rsvd_1 = 0; 4703 4704 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4705 4706 /* Mark all stale commands sitting in qla_tgt_wq for deletion */ 4707 abort_cmds_for_s_id(vha, &port_id); 4708 4709 if (wwn) { 4710 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 4711 sess = qlt_find_sess_invalidate_other(vha, wwn, 4712 port_id, loop_id, &conflict_sess); 4713 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 4714 } 4715 4716 if (IS_SW_RESV_ADDR(port_id)) { 4717 res = 1; 4718 goto out; 4719 } 4720 4721 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); 4722 if (!pla) { 4723 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, 4724 "%s %d %8phC Term INOT due to mem alloc fail", 4725 __func__, __LINE__, 4726 iocb->u.isp24.port_name); 4727 qlt_send_term_imm_notif(vha, iocb, 1); 4728 goto out; 4729 } 4730 4731 if (conflict_sess) { 4732 conflict_sess->login_gen++; 4733 qlt_plogi_ack_link(vha, pla, conflict_sess, 4734 QLT_PLOGI_LINK_CONFLICT); 4735 } 4736 4737 if (!sess) { 4738 pla->ref_count++; 4739 ql_dbg(ql_dbg_disc, vha, 0xffff, 4740 "%s %d %8phC post new sess\n", 4741 __func__, __LINE__, iocb->u.isp24.port_name); 4742 if (iocb->u.isp24.status_subcode == ELS_PLOGI) 4743 qla24xx_post_newsess_work(vha, &port_id, 4744 iocb->u.isp24.port_name, 4745 iocb->u.isp24.u.plogi.node_name, 4746 pla, FC4_TYPE_UNKNOWN); 4747 else 4748 qla24xx_post_newsess_work(vha, &port_id, 4749 iocb->u.isp24.port_name, NULL, 4750 pla, FC4_TYPE_UNKNOWN); 4751 4752 goto out; 4753 } 4754 4755 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); 4756 sess->d_id = port_id; 4757 sess->login_gen++; 4758 4759 if (iocb->u.isp24.status_subcode == ELS_PRLI) { 4760 sess->fw_login_state = DSC_LS_PRLI_PEND; 4761 sess->local = 0; 4762 sess->loop_id = loop_id; 4763 sess->d_id = port_id; 4764 sess->fw_login_state = DSC_LS_PRLI_PEND; 4765 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4766 4767 if (wd3_lo & BIT_7) 4768 sess->conf_compl_supported = 1; 4769 4770 if ((wd3_lo & BIT_4) == 0) 4771 sess->port_type = FCT_INITIATOR; 4772 else 4773 sess->port_type = FCT_TARGET; 4774 4775 } else 4776 sess->fw_login_state = DSC_LS_PLOGI_PEND; 4777 4778 4779 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4780 "%s %d %8phC DS %d\n", 4781 __func__, __LINE__, sess->port_name, sess->disc_state); 4782 4783 switch (sess->disc_state) { 4784 case DSC_DELETED: 4785 qlt_plogi_ack_unref(vha, pla); 4786 break; 4787 4788 default: 4789 /* 4790 * Under normal circumstances we want to release nport handle 4791 * during LOGO process to avoid nport handle leaks inside FW. 4792 * The exception is when LOGO is done while another PLOGI with 4793 * the same nport handle is waiting as might be the case here. 4794 * Note: there is always a possibily of a race where session 4795 * deletion has already started for other reasons (e.g. ACL 4796 * removal) and now PLOGI arrives: 4797 * 1. if PLOGI arrived in FW after nport handle has been freed, 4798 * FW must have assigned this PLOGI a new/same handle and we 4799 * can proceed ACK'ing it as usual when session deletion 4800 * completes. 4801 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT 4802 * bit reached it, the handle has now been released. We'll 4803 * get an error when we ACK this PLOGI. Nothing will be sent 4804 * back to initiator. Initiator should eventually retry 4805 * PLOGI and situation will correct itself. 4806 */ 4807 sess->keep_nport_handle = ((sess->loop_id == loop_id) && 4808 (sess->d_id.b24 == port_id.b24)); 4809 4810 ql_dbg(ql_dbg_disc, vha, 0x20f9, 4811 "%s %d %8phC post del sess\n", 4812 __func__, __LINE__, sess->port_name); 4813 4814 4815 qlt_schedule_sess_for_deletion(sess); 4816 break; 4817 } 4818 out: 4819 return res; 4820 } 4821 4822 /* 4823 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4824 */ 4825 static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4826 struct imm_ntfy_from_isp *iocb) 4827 { 4828 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4829 struct qla_hw_data *ha = vha->hw; 4830 struct fc_port *sess = NULL, *conflict_sess = NULL; 4831 uint64_t wwn; 4832 port_id_t port_id; 4833 uint16_t loop_id; 4834 uint16_t wd3_lo; 4835 int res = 0; 4836 unsigned long flags; 4837 4838 wwn = wwn_to_u64(iocb->u.isp24.port_name); 4839 4840 port_id.b.domain = iocb->u.isp24.port_id[2]; 4841 port_id.b.area = iocb->u.isp24.port_id[1]; 4842 port_id.b.al_pa = iocb->u.isp24.port_id[0]; 4843 port_id.b.rsvd_1 = 0; 4844 4845 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); 4846 4847 ql_dbg(ql_dbg_disc, vha, 0xf026, 4848 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n", 4849 vha->vp_idx, iocb->u.isp24.port_id[2], 4850 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], 4851 iocb->u.isp24.status_subcode, loop_id, 4852 iocb->u.isp24.port_name); 4853 4854 /* res = 1 means ack at the end of thread 4855 * res = 0 means ack async/later. 4856 */ 4857 switch (iocb->u.isp24.status_subcode) { 4858 case ELS_PLOGI: 4859 res = qlt_handle_login(vha, iocb); 4860 break; 4861 4862 case ELS_PRLI: 4863 if (N2N_TOPO(ha)) { 4864 sess = qla2x00_find_fcport_by_wwpn(vha, 4865 iocb->u.isp24.port_name, 1); 4866 4867 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { 4868 ql_dbg(ql_dbg_disc, vha, 0xffff, 4869 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", 4870 __func__, __LINE__, 4871 iocb->u.isp24.port_name); 4872 qlt_send_term_imm_notif(vha, iocb, 1); 4873 break; 4874 } 4875 4876 res = qlt_handle_login(vha, iocb); 4877 break; 4878 } 4879 4880 if (IS_SW_RESV_ADDR(port_id)) { 4881 res = 1; 4882 break; 4883 } 4884 4885 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); 4886 4887 if (wwn) { 4888 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4889 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id, 4890 loop_id, &conflict_sess); 4891 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4892 } 4893 4894 if (conflict_sess) { 4895 switch (conflict_sess->disc_state) { 4896 case DSC_DELETED: 4897 case DSC_DELETE_PEND: 4898 break; 4899 default: 4900 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, 4901 "PRLI with conflicting sess %p port %8phC\n", 4902 conflict_sess, conflict_sess->port_name); 4903 conflict_sess->fw_login_state = 4904 DSC_LS_PORT_UNAVAIL; 4905 qlt_send_term_imm_notif(vha, iocb, 1); 4906 res = 0; 4907 break; 4908 } 4909 } 4910 4911 if (sess != NULL) { 4912 bool delete = false; 4913 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); 4914 switch (sess->fw_login_state) { 4915 case DSC_LS_PLOGI_PEND: 4916 case DSC_LS_PLOGI_COMP: 4917 case DSC_LS_PRLI_COMP: 4918 break; 4919 default: 4920 delete = true; 4921 break; 4922 } 4923 4924 switch (sess->disc_state) { 4925 case DSC_LOGIN_PEND: 4926 case DSC_GPDB: 4927 case DSC_UPD_FCPORT: 4928 case DSC_LOGIN_COMPLETE: 4929 case DSC_ADISC: 4930 delete = false; 4931 break; 4932 default: 4933 break; 4934 } 4935 4936 if (delete) { 4937 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, 4938 flags); 4939 /* 4940 * Impatient initiator sent PRLI before last 4941 * PLOGI could finish. Will force him to re-try, 4942 * while last one finishes. 4943 */ 4944 ql_log(ql_log_warn, sess->vha, 0xf095, 4945 "sess %p PRLI received, before plogi ack.\n", 4946 sess); 4947 qlt_send_term_imm_notif(vha, iocb, 1); 4948 res = 0; 4949 break; 4950 } 4951 4952 /* 4953 * This shouldn't happen under normal circumstances, 4954 * since we have deleted the old session during PLOGI 4955 */ 4956 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, 4957 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", 4958 sess->loop_id, sess, iocb->u.isp24.nport_handle); 4959 4960 sess->local = 0; 4961 sess->loop_id = loop_id; 4962 sess->d_id = port_id; 4963 sess->fw_login_state = DSC_LS_PRLI_PEND; 4964 4965 if (wd3_lo & BIT_7) 4966 sess->conf_compl_supported = 1; 4967 4968 if ((wd3_lo & BIT_4) == 0) 4969 sess->port_type = FCT_INITIATOR; 4970 else 4971 sess->port_type = FCT_TARGET; 4972 4973 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags); 4974 } 4975 res = 1; /* send notify ack */ 4976 4977 /* Make session global (not used in fabric mode) */ 4978 if (ha->current_topology != ISP_CFG_F) { 4979 if (sess) { 4980 ql_dbg(ql_dbg_disc, vha, 0x20fa, 4981 "%s %d %8phC post nack\n", 4982 __func__, __LINE__, sess->port_name); 4983 qla24xx_post_nack_work(vha, sess, iocb, 4984 SRB_NACK_PRLI); 4985 res = 0; 4986 } else { 4987 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 4988 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 4989 qla2xxx_wake_dpc(vha); 4990 } 4991 } else { 4992 if (sess) { 4993 ql_dbg(ql_dbg_disc, vha, 0x20fb, 4994 "%s %d %8phC post nack\n", 4995 __func__, __LINE__, sess->port_name); 4996 qla24xx_post_nack_work(vha, sess, iocb, 4997 SRB_NACK_PRLI); 4998 res = 0; 4999 } 5000 } 5001 break; 5002 5003 case ELS_TPRLO: 5004 if (le16_to_cpu(iocb->u.isp24.flags) & 5005 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { 5006 loop_id = 0xFFFF; 5007 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS); 5008 res = 1; 5009 break; 5010 } 5011 /* fall through */ 5012 case ELS_LOGO: 5013 case ELS_PRLO: 5014 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5015 sess = qla2x00_find_fcport_by_loopid(vha, loop_id); 5016 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5017 5018 if (sess) { 5019 sess->login_gen++; 5020 sess->fw_login_state = DSC_LS_LOGO_PEND; 5021 sess->logo_ack_needed = 1; 5022 memcpy(sess->iocb, iocb, IOCB_SIZE); 5023 } 5024 5025 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5026 5027 ql_dbg(ql_dbg_disc, vha, 0x20fc, 5028 "%s: logo %llx res %d sess %p ", 5029 __func__, wwn, res, sess); 5030 if (res == 0) { 5031 /* 5032 * cmd went upper layer, look for qlt_xmit_tm_rsp() 5033 * for LOGO_ACK & sess delete 5034 */ 5035 BUG_ON(!sess); 5036 res = 0; 5037 } else { 5038 /* cmd did not go to upper layer. */ 5039 if (sess) { 5040 qlt_schedule_sess_for_deletion(sess); 5041 res = 0; 5042 } 5043 /* else logo will be ack */ 5044 } 5045 break; 5046 case ELS_PDISC: 5047 case ELS_ADISC: 5048 { 5049 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5050 if (tgt->link_reinit_iocb_pending) { 5051 qlt_send_notify_ack(ha->base_qpair, 5052 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5053 tgt->link_reinit_iocb_pending = 0; 5054 } 5055 5056 sess = qla2x00_find_fcport_by_wwpn(vha, 5057 iocb->u.isp24.port_name, 1); 5058 if (sess) { 5059 ql_dbg(ql_dbg_disc, vha, 0x20fd, 5060 "sess %p lid %d|%d DS %d LS %d\n", 5061 sess, sess->loop_id, loop_id, 5062 sess->disc_state, sess->fw_login_state); 5063 } 5064 5065 res = 1; /* send notify ack */ 5066 break; 5067 } 5068 5069 case ELS_FLOGI: /* should never happen */ 5070 default: 5071 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 5072 "qla_target(%d): Unsupported ELS command %x " 5073 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode); 5074 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 5075 break; 5076 } 5077 5078 ql_dbg(ql_dbg_disc, vha, 0xf026, 5079 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n", 5080 vha->vp_idx, iocb->u.isp24.status_subcode, res); 5081 5082 return res; 5083 } 5084 5085 /* 5086 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5087 */ 5088 static void qlt_handle_imm_notify(struct scsi_qla_host *vha, 5089 struct imm_ntfy_from_isp *iocb) 5090 { 5091 struct qla_hw_data *ha = vha->hw; 5092 uint32_t add_flags = 0; 5093 int send_notify_ack = 1; 5094 uint16_t status; 5095 5096 status = le16_to_cpu(iocb->u.isp2x.status); 5097 switch (status) { 5098 case IMM_NTFY_LIP_RESET: 5099 { 5100 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032, 5101 "qla_target(%d): LIP reset (loop %#x), subcode %x\n", 5102 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle), 5103 iocb->u.isp24.status_subcode); 5104 5105 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5106 send_notify_ack = 0; 5107 break; 5108 } 5109 5110 case IMM_NTFY_LIP_LINK_REINIT: 5111 { 5112 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5113 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033, 5114 "qla_target(%d): LINK REINIT (loop %#x, " 5115 "subcode %x)\n", vha->vp_idx, 5116 le16_to_cpu(iocb->u.isp24.nport_handle), 5117 iocb->u.isp24.status_subcode); 5118 if (tgt->link_reinit_iocb_pending) { 5119 qlt_send_notify_ack(ha->base_qpair, 5120 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); 5121 } 5122 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb)); 5123 tgt->link_reinit_iocb_pending = 1; 5124 /* 5125 * QLogic requires to wait after LINK REINIT for possible 5126 * PDISC or ADISC ELS commands 5127 */ 5128 send_notify_ack = 0; 5129 break; 5130 } 5131 5132 case IMM_NTFY_PORT_LOGOUT: 5133 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034, 5134 "qla_target(%d): Port logout (loop " 5135 "%#x, subcode %x)\n", vha->vp_idx, 5136 le16_to_cpu(iocb->u.isp24.nport_handle), 5137 iocb->u.isp24.status_subcode); 5138 5139 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0) 5140 send_notify_ack = 0; 5141 /* The sessions will be cleared in the callback, if needed */ 5142 break; 5143 5144 case IMM_NTFY_GLBL_TPRLO: 5145 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035, 5146 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status); 5147 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5148 send_notify_ack = 0; 5149 /* The sessions will be cleared in the callback, if needed */ 5150 break; 5151 5152 case IMM_NTFY_PORT_CONFIG: 5153 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036, 5154 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx, 5155 status); 5156 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0) 5157 send_notify_ack = 0; 5158 /* The sessions will be cleared in the callback, if needed */ 5159 break; 5160 5161 case IMM_NTFY_GLBL_LOGO: 5162 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a, 5163 "qla_target(%d): Link failure detected\n", 5164 vha->vp_idx); 5165 /* I_T nexus loss */ 5166 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0) 5167 send_notify_ack = 0; 5168 break; 5169 5170 case IMM_NTFY_IOCB_OVERFLOW: 5171 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b, 5172 "qla_target(%d): Cannot provide requested " 5173 "capability (IOCB overflowed the immediate notify " 5174 "resource count)\n", vha->vp_idx); 5175 break; 5176 5177 case IMM_NTFY_ABORT_TASK: 5178 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037, 5179 "qla_target(%d): Abort Task (S %08x I %#x -> " 5180 "L %#x)\n", vha->vp_idx, 5181 le16_to_cpu(iocb->u.isp2x.seq_id), 5182 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb), 5183 le16_to_cpu(iocb->u.isp2x.lun)); 5184 if (qlt_abort_task(vha, iocb) == 0) 5185 send_notify_ack = 0; 5186 break; 5187 5188 case IMM_NTFY_RESOURCE: 5189 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c, 5190 "qla_target(%d): Out of resources, host %ld\n", 5191 vha->vp_idx, vha->host_no); 5192 break; 5193 5194 case IMM_NTFY_MSG_RX: 5195 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038, 5196 "qla_target(%d): Immediate notify task %x\n", 5197 vha->vp_idx, iocb->u.isp2x.task_flags); 5198 break; 5199 5200 case IMM_NTFY_ELS: 5201 if (qlt_24xx_handle_els(vha, iocb) == 0) 5202 send_notify_ack = 0; 5203 break; 5204 default: 5205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d, 5206 "qla_target(%d): Received unknown immediate " 5207 "notify status %x\n", vha->vp_idx, status); 5208 break; 5209 } 5210 5211 if (send_notify_ack) 5212 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0, 5213 0, 0); 5214 } 5215 5216 /* 5217 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5218 * This function sends busy to ISP 2xxx or 24xx. 5219 */ 5220 static int __qlt_send_busy(struct qla_qpair *qpair, 5221 struct atio_from_isp *atio, uint16_t status) 5222 { 5223 struct scsi_qla_host *vha = qpair->vha; 5224 struct ctio7_to_24xx *ctio24; 5225 struct qla_hw_data *ha = vha->hw; 5226 request_t *pkt; 5227 struct fc_port *sess = NULL; 5228 unsigned long flags; 5229 u16 temp; 5230 port_id_t id; 5231 5232 id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; 5233 id.b.area = atio->u.isp24.fcp_hdr.s_id[1]; 5234 id.b.domain = atio->u.isp24.fcp_hdr.s_id[0]; 5235 id.b.rsvd_1 = 0; 5236 5237 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 5238 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); 5239 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5240 if (!sess) { 5241 qlt_send_term_exchange(qpair, NULL, atio, 1, 0); 5242 return 0; 5243 } 5244 /* Sending marker isn't necessary, since we called from ISR */ 5245 5246 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); 5247 if (!pkt) { 5248 ql_dbg(ql_dbg_io, vha, 0x3063, 5249 "qla_target(%d): %s failed: unable to allocate " 5250 "request packet", vha->vp_idx, __func__); 5251 return -ENOMEM; 5252 } 5253 5254 qpair->tgt_counters.num_q_full_sent++; 5255 pkt->entry_count = 1; 5256 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 5257 5258 ctio24 = (struct ctio7_to_24xx *)pkt; 5259 ctio24->entry_type = CTIO_TYPE7; 5260 ctio24->nport_handle = sess->loop_id; 5261 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); 5262 ctio24->vp_index = vha->vp_idx; 5263 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 5264 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 5265 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 5266 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 5267 temp = (atio->u.isp24.attr << 9) | 5268 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 5269 CTIO7_FLAGS_DONT_RET_CTIO; 5270 ctio24->u.status1.flags = cpu_to_le16(temp); 5271 /* 5272 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, 5273 * if the explicit conformation is used. 5274 */ 5275 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 5276 ctio24->u.status1.scsi_status = cpu_to_le16(status); 5277 5278 ctio24->u.status1.residual = get_datalen_for_atio(atio); 5279 5280 if (ctio24->u.status1.residual != 0) 5281 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 5282 5283 /* Memory Barrier */ 5284 wmb(); 5285 if (qpair->reqq_start_iocbs) 5286 qpair->reqq_start_iocbs(qpair); 5287 else 5288 qla2x00_start_iocbs(vha, qpair->req); 5289 return 0; 5290 } 5291 5292 /* 5293 * This routine is used to allocate a command for either a QFull condition 5294 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go 5295 * out previously. 5296 */ 5297 static void 5298 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 5299 struct atio_from_isp *atio, uint16_t status, int qfull) 5300 { 5301 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5302 struct qla_hw_data *ha = vha->hw; 5303 struct fc_port *sess; 5304 struct se_session *se_sess; 5305 struct qla_tgt_cmd *cmd; 5306 int tag, cpu; 5307 unsigned long flags; 5308 5309 if (unlikely(tgt->tgt_stop)) { 5310 ql_dbg(ql_dbg_io, vha, 0x300a, 5311 "New command while device %p is shutting down\n", tgt); 5312 return; 5313 } 5314 5315 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) { 5316 vha->hw->tgt.num_qfull_cmds_dropped++; 5317 if (vha->hw->tgt.num_qfull_cmds_dropped > 5318 vha->qla_stats.stat_max_qfull_cmds_dropped) 5319 vha->qla_stats.stat_max_qfull_cmds_dropped = 5320 vha->hw->tgt.num_qfull_cmds_dropped; 5321 5322 ql_dbg(ql_dbg_io, vha, 0x3068, 5323 "qla_target(%d): %s: QFull CMD dropped[%d]\n", 5324 vha->vp_idx, __func__, 5325 vha->hw->tgt.num_qfull_cmds_dropped); 5326 5327 qlt_chk_exch_leak_thresh_hold(vha); 5328 return; 5329 } 5330 5331 sess = ha->tgt.tgt_ops->find_sess_by_s_id 5332 (vha, atio->u.isp24.fcp_hdr.s_id); 5333 if (!sess) 5334 return; 5335 5336 se_sess = sess->se_sess; 5337 5338 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); 5339 if (tag < 0) 5340 return; 5341 5342 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; 5343 if (!cmd) { 5344 ql_dbg(ql_dbg_io, vha, 0x3009, 5345 "qla_target(%d): %s: Allocation of cmd failed\n", 5346 vha->vp_idx, __func__); 5347 5348 vha->hw->tgt.num_qfull_cmds_dropped++; 5349 if (vha->hw->tgt.num_qfull_cmds_dropped > 5350 vha->qla_stats.stat_max_qfull_cmds_dropped) 5351 vha->qla_stats.stat_max_qfull_cmds_dropped = 5352 vha->hw->tgt.num_qfull_cmds_dropped; 5353 5354 qlt_chk_exch_leak_thresh_hold(vha); 5355 return; 5356 } 5357 5358 memset(cmd, 0, sizeof(struct qla_tgt_cmd)); 5359 5360 qlt_incr_num_pend_cmds(vha); 5361 INIT_LIST_HEAD(&cmd->cmd_list); 5362 memcpy(&cmd->atio, atio, sizeof(*atio)); 5363 5364 cmd->tgt = vha->vha_tgt.qla_tgt; 5365 cmd->vha = vha; 5366 cmd->reset_count = ha->base_qpair->chip_reset; 5367 cmd->q_full = 1; 5368 cmd->qpair = ha->base_qpair; 5369 cmd->se_cmd.map_cpu = cpu; 5370 5371 if (qfull) { 5372 cmd->q_full = 1; 5373 /* NOTE: borrowing the state field to carry the status */ 5374 cmd->state = status; 5375 } else 5376 cmd->term_exchg = 1; 5377 5378 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5379 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list); 5380 5381 vha->hw->tgt.num_qfull_cmds_alloc++; 5382 if (vha->hw->tgt.num_qfull_cmds_alloc > 5383 vha->qla_stats.stat_max_qfull_cmds_alloc) 5384 vha->qla_stats.stat_max_qfull_cmds_alloc = 5385 vha->hw->tgt.num_qfull_cmds_alloc; 5386 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5387 } 5388 5389 int 5390 qlt_free_qfull_cmds(struct qla_qpair *qpair) 5391 { 5392 struct scsi_qla_host *vha = qpair->vha; 5393 struct qla_hw_data *ha = vha->hw; 5394 unsigned long flags; 5395 struct qla_tgt_cmd *cmd, *tcmd; 5396 struct list_head free_list, q_full_list; 5397 int rc = 0; 5398 5399 if (list_empty(&ha->tgt.q_full_list)) 5400 return 0; 5401 5402 INIT_LIST_HEAD(&free_list); 5403 INIT_LIST_HEAD(&q_full_list); 5404 5405 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5406 if (list_empty(&ha->tgt.q_full_list)) { 5407 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5408 return 0; 5409 } 5410 5411 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list); 5412 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5413 5414 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 5415 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) { 5416 if (cmd->q_full) 5417 /* cmd->state is a borrowed field to hold status */ 5418 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state); 5419 else if (cmd->term_exchg) 5420 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio); 5421 5422 if (rc == -ENOMEM) 5423 break; 5424 5425 if (cmd->q_full) 5426 ql_dbg(ql_dbg_io, vha, 0x3006, 5427 "%s: busy sent for ox_id[%04x]\n", __func__, 5428 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5429 else if (cmd->term_exchg) 5430 ql_dbg(ql_dbg_io, vha, 0x3007, 5431 "%s: Term exchg sent for ox_id[%04x]\n", __func__, 5432 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 5433 else 5434 ql_dbg(ql_dbg_io, vha, 0x3008, 5435 "%s: Unexpected cmd in QFull list %p\n", __func__, 5436 cmd); 5437 5438 list_del(&cmd->cmd_list); 5439 list_add_tail(&cmd->cmd_list, &free_list); 5440 5441 /* piggy back on hardware_lock for protection */ 5442 vha->hw->tgt.num_qfull_cmds_alloc--; 5443 } 5444 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 5445 5446 cmd = NULL; 5447 5448 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) { 5449 list_del(&cmd->cmd_list); 5450 /* This cmd was never sent to TCM. There is no need 5451 * to schedule free or call free_cmd 5452 */ 5453 qlt_free_cmd(cmd); 5454 } 5455 5456 if (!list_empty(&q_full_list)) { 5457 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags); 5458 list_splice(&q_full_list, &vha->hw->tgt.q_full_list); 5459 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 5460 } 5461 5462 return rc; 5463 } 5464 5465 static void 5466 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio, 5467 uint16_t status) 5468 { 5469 int rc = 0; 5470 struct scsi_qla_host *vha = qpair->vha; 5471 5472 rc = __qlt_send_busy(qpair, atio, status); 5473 if (rc == -ENOMEM) 5474 qlt_alloc_qfull_cmd(vha, atio, status, 1); 5475 } 5476 5477 static int 5478 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair, 5479 struct atio_from_isp *atio, uint8_t ha_locked) 5480 { 5481 struct qla_hw_data *ha = vha->hw; 5482 unsigned long flags; 5483 5484 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) 5485 return 0; 5486 5487 if (!ha_locked) 5488 spin_lock_irqsave(&ha->hardware_lock, flags); 5489 qlt_send_busy(qpair, atio, qla_sam_status); 5490 if (!ha_locked) 5491 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5492 5493 return 1; 5494 } 5495 5496 /* ha->hardware_lock supposed to be held on entry */ 5497 /* called via callback from qla2xxx */ 5498 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, 5499 struct atio_from_isp *atio, uint8_t ha_locked) 5500 { 5501 struct qla_hw_data *ha = vha->hw; 5502 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5503 int rc; 5504 unsigned long flags = 0; 5505 5506 if (unlikely(tgt == NULL)) { 5507 ql_dbg(ql_dbg_tgt, vha, 0x3064, 5508 "ATIO pkt, but no tgt (ha %p)", ha); 5509 return; 5510 } 5511 /* 5512 * In tgt_stop mode we also should allow all requests to pass. 5513 * Otherwise, some commands can stuck. 5514 */ 5515 5516 tgt->atio_irq_cmd_count++; 5517 5518 switch (atio->u.raw.entry_type) { 5519 case ATIO_TYPE7: 5520 if (unlikely(atio->u.isp24.exchange_addr == 5521 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 5522 ql_dbg(ql_dbg_io, vha, 0x3065, 5523 "qla_target(%d): ATIO_TYPE7 " 5524 "received with UNKNOWN exchange address, " 5525 "sending QUEUE_FULL\n", vha->vp_idx); 5526 if (!ha_locked) 5527 spin_lock_irqsave(&ha->hardware_lock, flags); 5528 qlt_send_busy(ha->base_qpair, atio, qla_sam_status); 5529 if (!ha_locked) 5530 spin_unlock_irqrestore(&ha->hardware_lock, 5531 flags); 5532 break; 5533 } 5534 5535 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { 5536 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair, 5537 atio, ha_locked); 5538 if (rc != 0) { 5539 tgt->atio_irq_cmd_count--; 5540 return; 5541 } 5542 rc = qlt_handle_cmd_for_atio(vha, atio); 5543 } else { 5544 rc = qlt_handle_task_mgmt(vha, atio); 5545 } 5546 if (unlikely(rc != 0)) { 5547 if (!ha_locked) 5548 spin_lock_irqsave(&ha->hardware_lock, flags); 5549 switch (rc) { 5550 case -ENODEV: 5551 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5552 "qla_target: Unable to send command to target\n"); 5553 break; 5554 case -EBADF: 5555 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5556 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5557 qlt_send_term_exchange(ha->base_qpair, NULL, 5558 atio, 1, 0); 5559 break; 5560 case -EBUSY: 5561 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5562 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5563 vha->vp_idx); 5564 qlt_send_busy(ha->base_qpair, atio, 5565 tc_sam_status); 5566 break; 5567 default: 5568 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5569 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5570 vha->vp_idx); 5571 qlt_send_busy(ha->base_qpair, atio, 5572 qla_sam_status); 5573 break; 5574 } 5575 if (!ha_locked) 5576 spin_unlock_irqrestore(&ha->hardware_lock, 5577 flags); 5578 } 5579 break; 5580 5581 case IMMED_NOTIFY_TYPE: 5582 { 5583 if (unlikely(atio->u.isp2x.entry_status != 0)) { 5584 ql_dbg(ql_dbg_tgt, vha, 0xe05b, 5585 "qla_target(%d): Received ATIO packet %x " 5586 "with error status %x\n", vha->vp_idx, 5587 atio->u.raw.entry_type, 5588 atio->u.isp2x.entry_status); 5589 break; 5590 } 5591 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO"); 5592 5593 if (!ha_locked) 5594 spin_lock_irqsave(&ha->hardware_lock, flags); 5595 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio); 5596 if (!ha_locked) 5597 spin_unlock_irqrestore(&ha->hardware_lock, flags); 5598 break; 5599 } 5600 5601 default: 5602 ql_dbg(ql_dbg_tgt, vha, 0xe05c, 5603 "qla_target(%d): Received unknown ATIO atio " 5604 "type %x\n", vha->vp_idx, atio->u.raw.entry_type); 5605 break; 5606 } 5607 5608 tgt->atio_irq_cmd_count--; 5609 } 5610 5611 /* ha->hardware_lock supposed to be held on entry */ 5612 /* called via callback from qla2xxx */ 5613 static void qlt_response_pkt(struct scsi_qla_host *vha, 5614 struct rsp_que *rsp, response_t *pkt) 5615 { 5616 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5617 5618 if (unlikely(tgt == NULL)) { 5619 ql_dbg(ql_dbg_tgt, vha, 0xe05d, 5620 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", 5621 vha->vp_idx, pkt->entry_type, vha->hw); 5622 return; 5623 } 5624 5625 /* 5626 * In tgt_stop mode we also should allow all requests to pass. 5627 * Otherwise, some commands can stuck. 5628 */ 5629 5630 switch (pkt->entry_type) { 5631 case CTIO_CRC2: 5632 case CTIO_TYPE7: 5633 { 5634 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 5635 qlt_do_ctio_completion(vha, rsp, entry->handle, 5636 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5637 entry); 5638 break; 5639 } 5640 5641 case ACCEPT_TGT_IO_TYPE: 5642 { 5643 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5644 int rc; 5645 if (atio->u.isp2x.status != 5646 cpu_to_le16(ATIO_CDB_VALID)) { 5647 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5648 "qla_target(%d): ATIO with error " 5649 "status %x received\n", vha->vp_idx, 5650 le16_to_cpu(atio->u.isp2x.status)); 5651 break; 5652 } 5653 5654 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1); 5655 if (rc != 0) 5656 return; 5657 5658 rc = qlt_handle_cmd_for_atio(vha, atio); 5659 if (unlikely(rc != 0)) { 5660 switch (rc) { 5661 case -ENODEV: 5662 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5663 "qla_target: Unable to send command to target\n"); 5664 break; 5665 case -EBADF: 5666 ql_dbg(ql_dbg_tgt, vha, 0xe05f, 5667 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n"); 5668 qlt_send_term_exchange(rsp->qpair, NULL, 5669 atio, 1, 0); 5670 break; 5671 case -EBUSY: 5672 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5673 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5674 vha->vp_idx); 5675 qlt_send_busy(rsp->qpair, atio, 5676 tc_sam_status); 5677 break; 5678 default: 5679 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5680 "qla_target(%d): Unable to send command to target, sending BUSY status\n", 5681 vha->vp_idx); 5682 qlt_send_busy(rsp->qpair, atio, 5683 qla_sam_status); 5684 break; 5685 } 5686 } 5687 } 5688 break; 5689 5690 case CONTINUE_TGT_IO_TYPE: 5691 { 5692 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5693 qlt_do_ctio_completion(vha, rsp, entry->handle, 5694 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5695 entry); 5696 break; 5697 } 5698 5699 case CTIO_A64_TYPE: 5700 { 5701 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 5702 qlt_do_ctio_completion(vha, rsp, entry->handle, 5703 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 5704 entry); 5705 break; 5706 } 5707 5708 case IMMED_NOTIFY_TYPE: 5709 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n"); 5710 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt); 5711 break; 5712 5713 case NOTIFY_ACK_TYPE: 5714 if (tgt->notify_ack_expected > 0) { 5715 struct nack_to_isp *entry = (struct nack_to_isp *)pkt; 5716 ql_dbg(ql_dbg_tgt, vha, 0xe036, 5717 "NOTIFY_ACK seq %08x status %x\n", 5718 le16_to_cpu(entry->u.isp2x.seq_id), 5719 le16_to_cpu(entry->u.isp2x.status)); 5720 tgt->notify_ack_expected--; 5721 if (entry->u.isp2x.status != 5722 cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5723 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5724 "qla_target(%d): NOTIFY_ACK " 5725 "failed %x\n", vha->vp_idx, 5726 le16_to_cpu(entry->u.isp2x.status)); 5727 } 5728 } else { 5729 ql_dbg(ql_dbg_tgt, vha, 0xe062, 5730 "qla_target(%d): Unexpected NOTIFY_ACK received\n", 5731 vha->vp_idx); 5732 } 5733 break; 5734 5735 case ABTS_RECV_24XX: 5736 ql_dbg(ql_dbg_tgt, vha, 0xe037, 5737 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx); 5738 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt); 5739 break; 5740 5741 case ABTS_RESP_24XX: 5742 if (tgt->abts_resp_expected > 0) { 5743 struct abts_resp_from_24xx_fw *entry = 5744 (struct abts_resp_from_24xx_fw *)pkt; 5745 ql_dbg(ql_dbg_tgt, vha, 0xe038, 5746 "ABTS_RESP_24XX: compl_status %x\n", 5747 entry->compl_status); 5748 tgt->abts_resp_expected--; 5749 if (le16_to_cpu(entry->compl_status) != 5750 ABTS_RESP_COMPL_SUCCESS) { 5751 if ((entry->error_subcode1 == 0x1E) && 5752 (entry->error_subcode2 == 0)) { 5753 /* 5754 * We've got a race here: aborted 5755 * exchange not terminated, i.e. 5756 * response for the aborted command was 5757 * sent between the abort request was 5758 * received and processed. 5759 * Unfortunately, the firmware has a 5760 * silly requirement that all aborted 5761 * exchanges must be explicitely 5762 * terminated, otherwise it refuses to 5763 * send responses for the abort 5764 * requests. So, we have to 5765 * (re)terminate the exchange and retry 5766 * the abort response. 5767 */ 5768 qlt_24xx_retry_term_exchange(vha, 5769 entry); 5770 } else 5771 ql_dbg(ql_dbg_tgt, vha, 0xe063, 5772 "qla_target(%d): ABTS_RESP_24XX " 5773 "failed %x (subcode %x:%x)", 5774 vha->vp_idx, entry->compl_status, 5775 entry->error_subcode1, 5776 entry->error_subcode2); 5777 } 5778 } else { 5779 ql_dbg(ql_dbg_tgt, vha, 0xe064, 5780 "qla_target(%d): Unexpected ABTS_RESP_24XX " 5781 "received\n", vha->vp_idx); 5782 } 5783 break; 5784 5785 default: 5786 ql_dbg(ql_dbg_tgt, vha, 0xe065, 5787 "qla_target(%d): Received unknown response pkt " 5788 "type %x\n", vha->vp_idx, pkt->entry_type); 5789 break; 5790 } 5791 5792 } 5793 5794 /* 5795 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 5796 */ 5797 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, 5798 uint16_t *mailbox) 5799 { 5800 struct qla_hw_data *ha = vha->hw; 5801 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5802 int login_code; 5803 5804 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped) 5805 return; 5806 5807 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) && 5808 IS_QLA2100(ha)) 5809 return; 5810 /* 5811 * In tgt_stop mode we also should allow all requests to pass. 5812 * Otherwise, some commands can stuck. 5813 */ 5814 5815 5816 switch (code) { 5817 case MBA_RESET: /* Reset */ 5818 case MBA_SYSTEM_ERR: /* System Error */ 5819 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 5820 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 5821 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a, 5822 "qla_target(%d): System error async event %#x " 5823 "occurred", vha->vp_idx, code); 5824 break; 5825 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */ 5826 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 5827 break; 5828 5829 case MBA_LOOP_UP: 5830 { 5831 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, 5832 "qla_target(%d): Async LOOP_UP occurred " 5833 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, 5834 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5835 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5836 if (tgt->link_reinit_iocb_pending) { 5837 qlt_send_notify_ack(ha->base_qpair, 5838 (void *)&tgt->link_reinit_iocb, 5839 0, 0, 0, 0, 0, 0); 5840 tgt->link_reinit_iocb_pending = 0; 5841 } 5842 break; 5843 } 5844 5845 case MBA_LIP_OCCURRED: 5846 case MBA_LOOP_DOWN: 5847 case MBA_LIP_RESET: 5848 case MBA_RSCN_UPDATE: 5849 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, 5850 "qla_target(%d): Async event %#x occurred " 5851 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5852 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5853 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5854 break; 5855 5856 case MBA_REJECTED_FCP_CMD: 5857 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, 5858 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", 5859 vha->vp_idx, 5860 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5861 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5862 5863 if (le16_to_cpu(mailbox[3]) == 1) { 5864 /* exchange starvation. */ 5865 vha->hw->exch_starvation++; 5866 if (vha->hw->exch_starvation > 5) { 5867 ql_log(ql_log_warn, vha, 0xd03a, 5868 "Exchange starvation-. Resetting RISC\n"); 5869 5870 vha->hw->exch_starvation = 0; 5871 if (IS_P3P_TYPE(vha->hw)) 5872 set_bit(FCOE_CTX_RESET_NEEDED, 5873 &vha->dpc_flags); 5874 else 5875 set_bit(ISP_ABORT_NEEDED, 5876 &vha->dpc_flags); 5877 qla2xxx_wake_dpc(vha); 5878 } 5879 } 5880 break; 5881 5882 case MBA_PORT_UPDATE: 5883 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d, 5884 "qla_target(%d): Port update async event %#x " 5885 "occurred: updating the ports database (m[0]=%x, m[1]=%x, " 5886 "m[2]=%x, m[3]=%x)", vha->vp_idx, code, 5887 le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), 5888 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); 5889 5890 login_code = le16_to_cpu(mailbox[2]); 5891 if (login_code == 0x4) { 5892 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, 5893 "Async MB 2: Got PLOGI Complete\n"); 5894 vha->hw->exch_starvation = 0; 5895 } else if (login_code == 0x7) 5896 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f, 5897 "Async MB 2: Port Logged Out\n"); 5898 break; 5899 default: 5900 break; 5901 } 5902 5903 } 5904 5905 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, 5906 uint16_t loop_id) 5907 { 5908 fc_port_t *fcport, *tfcp, *del; 5909 int rc; 5910 unsigned long flags; 5911 u8 newfcport = 0; 5912 5913 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); 5914 if (!fcport) { 5915 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, 5916 "qla_target(%d): Allocation of tmp FC port failed", 5917 vha->vp_idx); 5918 return NULL; 5919 } 5920 5921 fcport->loop_id = loop_id; 5922 5923 rc = qla24xx_gpdb_wait(vha, fcport, 0); 5924 if (rc != QLA_SUCCESS) { 5925 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, 5926 "qla_target(%d): Failed to retrieve fcport " 5927 "information -- get_port_database() returned %x " 5928 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id); 5929 kfree(fcport); 5930 return NULL; 5931 } 5932 5933 del = NULL; 5934 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 5935 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1); 5936 5937 if (tfcp) { 5938 tfcp->d_id = fcport->d_id; 5939 tfcp->port_type = fcport->port_type; 5940 tfcp->supported_classes = fcport->supported_classes; 5941 tfcp->flags |= fcport->flags; 5942 tfcp->scan_state = QLA_FCPORT_FOUND; 5943 5944 del = fcport; 5945 fcport = tfcp; 5946 } else { 5947 if (vha->hw->current_topology == ISP_CFG_F) 5948 fcport->flags |= FCF_FABRIC_DEVICE; 5949 5950 list_add_tail(&fcport->list, &vha->vp_fcports); 5951 if (!IS_SW_RESV_ADDR(fcport->d_id)) 5952 vha->fcport_count++; 5953 fcport->login_gen++; 5954 fcport->disc_state = DSC_LOGIN_COMPLETE; 5955 fcport->login_succ = 1; 5956 newfcport = 1; 5957 } 5958 5959 fcport->deleted = 0; 5960 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); 5961 5962 switch (vha->host->active_mode) { 5963 case MODE_INITIATOR: 5964 case MODE_DUAL: 5965 if (newfcport) { 5966 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) { 5967 ql_dbg(ql_dbg_disc, vha, 0x20fe, 5968 "%s %d %8phC post upd_fcport fcp_cnt %d\n", 5969 __func__, __LINE__, fcport->port_name, vha->fcport_count); 5970 qla24xx_post_upd_fcport_work(vha, fcport); 5971 } else { 5972 ql_dbg(ql_dbg_disc, vha, 0x20ff, 5973 "%s %d %8phC post gpsc fcp_cnt %d\n", 5974 __func__, __LINE__, fcport->port_name, vha->fcport_count); 5975 qla24xx_post_gpsc_work(vha, fcport); 5976 } 5977 } 5978 break; 5979 5980 case MODE_TARGET: 5981 default: 5982 break; 5983 } 5984 if (del) 5985 qla2x00_free_fcport(del); 5986 5987 return fcport; 5988 } 5989 5990 /* Must be called under tgt_mutex */ 5991 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, 5992 uint8_t *s_id) 5993 { 5994 struct fc_port *sess = NULL; 5995 fc_port_t *fcport = NULL; 5996 int rc, global_resets; 5997 uint16_t loop_id = 0; 5998 5999 if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { 6000 /* 6001 * This is Domain Controller, so it should be 6002 * OK to drop SCSI commands from it. 6003 */ 6004 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, 6005 "Unable to find initiator with S_ID %x:%x:%x", 6006 s_id[0], s_id[1], s_id[2]); 6007 return NULL; 6008 } 6009 6010 mutex_lock(&vha->vha_tgt.tgt_mutex); 6011 6012 retry: 6013 global_resets = 6014 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); 6015 6016 rc = qla24xx_get_loop_id(vha, s_id, &loop_id); 6017 if (rc != 0) { 6018 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6019 6020 ql_log(ql_log_info, vha, 0xf071, 6021 "qla_target(%d): Unable to find " 6022 "initiator with S_ID %x:%x:%x", 6023 vha->vp_idx, s_id[0], s_id[1], 6024 s_id[2]); 6025 6026 if (rc == -ENOENT) { 6027 qlt_port_logo_t logo; 6028 sid_to_portid(s_id, &logo.id); 6029 logo.cmd_count = 1; 6030 qlt_send_first_logo(vha, &logo); 6031 } 6032 6033 return NULL; 6034 } 6035 6036 fcport = qlt_get_port_database(vha, loop_id); 6037 if (!fcport) { 6038 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6039 return NULL; 6040 } 6041 6042 if (global_resets != 6043 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) { 6044 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043, 6045 "qla_target(%d): global reset during session discovery " 6046 "(counter was %d, new %d), retrying", vha->vp_idx, 6047 global_resets, 6048 atomic_read(&vha->vha_tgt. 6049 qla_tgt->tgt_global_resets_count)); 6050 goto retry; 6051 } 6052 6053 sess = qlt_create_sess(vha, fcport, true); 6054 6055 mutex_unlock(&vha->vha_tgt.tgt_mutex); 6056 6057 return sess; 6058 } 6059 6060 static void qlt_abort_work(struct qla_tgt *tgt, 6061 struct qla_tgt_sess_work_param *prm) 6062 { 6063 struct scsi_qla_host *vha = tgt->vha; 6064 struct qla_hw_data *ha = vha->hw; 6065 struct fc_port *sess = NULL; 6066 unsigned long flags = 0, flags2 = 0; 6067 uint32_t be_s_id; 6068 uint8_t s_id[3]; 6069 int rc; 6070 6071 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6072 6073 if (tgt->tgt_stop) 6074 goto out_term2; 6075 6076 s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; 6077 s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; 6078 s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; 6079 6080 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 6081 (unsigned char *)&be_s_id); 6082 if (!sess) { 6083 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6084 6085 sess = qlt_make_local_sess(vha, s_id); 6086 /* sess has got an extra creation ref */ 6087 6088 spin_lock_irqsave(&ha->tgt.sess_lock, flags2); 6089 if (!sess) 6090 goto out_term2; 6091 } else { 6092 if (sess->deleted) { 6093 sess = NULL; 6094 goto out_term2; 6095 } 6096 6097 if (!kref_get_unless_zero(&sess->sess_kref)) { 6098 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c, 6099 "%s: kref_get fail %8phC \n", 6100 __func__, sess->port_name); 6101 sess = NULL; 6102 goto out_term2; 6103 } 6104 } 6105 6106 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); 6107 ha->tgt.tgt_ops->put_sess(sess); 6108 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6109 6110 if (rc != 0) 6111 goto out_term; 6112 return; 6113 6114 out_term2: 6115 if (sess) 6116 ha->tgt.tgt_ops->put_sess(sess); 6117 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); 6118 6119 out_term: 6120 spin_lock_irqsave(&ha->hardware_lock, flags); 6121 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, 6122 FCP_TMF_REJECTED, false); 6123 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6124 } 6125 6126 static void qlt_tmr_work(struct qla_tgt *tgt, 6127 struct qla_tgt_sess_work_param *prm) 6128 { 6129 struct atio_from_isp *a = &prm->tm_iocb2; 6130 struct scsi_qla_host *vha = tgt->vha; 6131 struct qla_hw_data *ha = vha->hw; 6132 struct fc_port *sess = NULL; 6133 unsigned long flags; 6134 uint8_t *s_id = NULL; /* to hide compiler warnings */ 6135 int rc; 6136 u64 unpacked_lun; 6137 int fn; 6138 void *iocb; 6139 6140 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6141 6142 if (tgt->tgt_stop) 6143 goto out_term2; 6144 6145 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; 6146 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); 6147 if (!sess) { 6148 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6149 6150 sess = qlt_make_local_sess(vha, s_id); 6151 /* sess has got an extra creation ref */ 6152 6153 spin_lock_irqsave(&ha->tgt.sess_lock, flags); 6154 if (!sess) 6155 goto out_term2; 6156 } else { 6157 if (sess->deleted) { 6158 sess = NULL; 6159 goto out_term2; 6160 } 6161 6162 if (!kref_get_unless_zero(&sess->sess_kref)) { 6163 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020, 6164 "%s: kref_get fail %8phC\n", 6165 __func__, sess->port_name); 6166 sess = NULL; 6167 goto out_term2; 6168 } 6169 } 6170 6171 iocb = a; 6172 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 6173 unpacked_lun = 6174 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun); 6175 6176 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 6177 ha->tgt.tgt_ops->put_sess(sess); 6178 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6179 6180 if (rc != 0) 6181 goto out_term; 6182 return; 6183 6184 out_term2: 6185 if (sess) 6186 ha->tgt.tgt_ops->put_sess(sess); 6187 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 6188 out_term: 6189 qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0); 6190 } 6191 6192 static void qlt_sess_work_fn(struct work_struct *work) 6193 { 6194 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work); 6195 struct scsi_qla_host *vha = tgt->vha; 6196 unsigned long flags; 6197 6198 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); 6199 6200 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6201 while (!list_empty(&tgt->sess_works_list)) { 6202 struct qla_tgt_sess_work_param *prm = list_entry( 6203 tgt->sess_works_list.next, typeof(*prm), 6204 sess_works_list_entry); 6205 6206 /* 6207 * This work can be scheduled on several CPUs at time, so we 6208 * must delete the entry to eliminate double processing 6209 */ 6210 list_del(&prm->sess_works_list_entry); 6211 6212 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6213 6214 switch (prm->type) { 6215 case QLA_TGT_SESS_WORK_ABORT: 6216 qlt_abort_work(tgt, prm); 6217 break; 6218 case QLA_TGT_SESS_WORK_TM: 6219 qlt_tmr_work(tgt, prm); 6220 break; 6221 default: 6222 BUG_ON(1); 6223 break; 6224 } 6225 6226 spin_lock_irqsave(&tgt->sess_work_lock, flags); 6227 6228 kfree(prm); 6229 } 6230 spin_unlock_irqrestore(&tgt->sess_work_lock, flags); 6231 } 6232 6233 /* Must be called under tgt_host_action_mutex */ 6234 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) 6235 { 6236 struct qla_tgt *tgt; 6237 int rc, i; 6238 struct qla_qpair_hint *h; 6239 6240 if (!QLA_TGT_MODE_ENABLED()) 6241 return 0; 6242 6243 if (!IS_TGT_MODE_CAPABLE(ha)) { 6244 ql_log(ql_log_warn, base_vha, 0xe070, 6245 "This adapter does not support target mode.\n"); 6246 return 0; 6247 } 6248 6249 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, 6250 "Registering target for host %ld(%p).\n", base_vha->host_no, ha); 6251 6252 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); 6253 6254 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL); 6255 if (!tgt) { 6256 ql_dbg(ql_dbg_tgt, base_vha, 0xe066, 6257 "Unable to allocate struct qla_tgt\n"); 6258 return -ENOMEM; 6259 } 6260 6261 tgt->qphints = kcalloc(ha->max_qpairs + 1, 6262 sizeof(struct qla_qpair_hint), 6263 GFP_KERNEL); 6264 if (!tgt->qphints) { 6265 kfree(tgt); 6266 ql_log(ql_log_warn, base_vha, 0x0197, 6267 "Unable to allocate qpair hints.\n"); 6268 return -ENOMEM; 6269 } 6270 6271 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET)) 6272 base_vha->host->hostt->supported_mode |= MODE_TARGET; 6273 6274 rc = btree_init64(&tgt->lun_qpair_map); 6275 if (rc) { 6276 kfree(tgt->qphints); 6277 kfree(tgt); 6278 ql_log(ql_log_info, base_vha, 0x0198, 6279 "Unable to initialize lun_qpair_map btree\n"); 6280 return -EIO; 6281 } 6282 h = &tgt->qphints[0]; 6283 h->qpair = ha->base_qpair; 6284 INIT_LIST_HEAD(&h->hint_elem); 6285 h->cpuid = ha->base_qpair->cpuid; 6286 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list); 6287 6288 for (i = 0; i < ha->max_qpairs; i++) { 6289 unsigned long flags; 6290 6291 struct qla_qpair *qpair = ha->queue_pair_map[i]; 6292 h = &tgt->qphints[i + 1]; 6293 INIT_LIST_HEAD(&h->hint_elem); 6294 if (qpair) { 6295 h->qpair = qpair; 6296 spin_lock_irqsave(qpair->qp_lock_ptr, flags); 6297 list_add_tail(&h->hint_elem, &qpair->hints_list); 6298 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); 6299 h->cpuid = qpair->cpuid; 6300 } 6301 } 6302 6303 tgt->ha = ha; 6304 tgt->vha = base_vha; 6305 init_waitqueue_head(&tgt->waitQ); 6306 INIT_LIST_HEAD(&tgt->del_sess_list); 6307 spin_lock_init(&tgt->sess_work_lock); 6308 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn); 6309 INIT_LIST_HEAD(&tgt->sess_works_list); 6310 atomic_set(&tgt->tgt_global_resets_count, 0); 6311 6312 base_vha->vha_tgt.qla_tgt = tgt; 6313 6314 ql_dbg(ql_dbg_tgt, base_vha, 0xe067, 6315 "qla_target(%d): using 64 Bit PCI addressing", 6316 base_vha->vp_idx); 6317 /* 3 is reserved */ 6318 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3); 6319 6320 mutex_lock(&qla_tgt_mutex); 6321 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); 6322 mutex_unlock(&qla_tgt_mutex); 6323 6324 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) 6325 ha->tgt.tgt_ops->add_target(base_vha); 6326 6327 return 0; 6328 } 6329 6330 /* Must be called under tgt_host_action_mutex */ 6331 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) 6332 { 6333 if (!vha->vha_tgt.qla_tgt) 6334 return 0; 6335 6336 if (vha->fc_vport) { 6337 qlt_release(vha->vha_tgt.qla_tgt); 6338 return 0; 6339 } 6340 6341 /* free left over qfull cmds */ 6342 qlt_init_term_exchange(vha); 6343 6344 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", 6345 vha->host_no, ha); 6346 qlt_release(vha->vha_tgt.qla_tgt); 6347 6348 return 0; 6349 } 6350 6351 void qlt_remove_target_resources(struct qla_hw_data *ha) 6352 { 6353 struct scsi_qla_host *node; 6354 u32 key = 0; 6355 6356 btree_for_each_safe32(&ha->tgt.host_map, key, node) 6357 btree_remove32(&ha->tgt.host_map, key); 6358 6359 btree_destroy32(&ha->tgt.host_map); 6360 } 6361 6362 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, 6363 unsigned char *b) 6364 { 6365 int i; 6366 6367 pr_debug("qla2xxx HW vha->node_name: "); 6368 for (i = 0; i < WWN_SIZE; i++) 6369 pr_debug("%02x ", vha->node_name[i]); 6370 pr_debug("\n"); 6371 pr_debug("qla2xxx HW vha->port_name: "); 6372 for (i = 0; i < WWN_SIZE; i++) 6373 pr_debug("%02x ", vha->port_name[i]); 6374 pr_debug("\n"); 6375 6376 pr_debug("qla2xxx passed configfs WWPN: "); 6377 put_unaligned_be64(wwpn, b); 6378 for (i = 0; i < WWN_SIZE; i++) 6379 pr_debug("%02x ", b[i]); 6380 pr_debug("\n"); 6381 } 6382 6383 /** 6384 * qla_tgt_lport_register - register lport with external module 6385 * 6386 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data 6387 * @phys_wwpn: 6388 * @npiv_wwpn: 6389 * @npiv_wwnn: 6390 * @callback: lport initialization callback for tcm_qla2xxx code 6391 */ 6392 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, 6393 u64 npiv_wwpn, u64 npiv_wwnn, 6394 int (*callback)(struct scsi_qla_host *, void *, u64, u64)) 6395 { 6396 struct qla_tgt *tgt; 6397 struct scsi_qla_host *vha; 6398 struct qla_hw_data *ha; 6399 struct Scsi_Host *host; 6400 unsigned long flags; 6401 int rc; 6402 u8 b[WWN_SIZE]; 6403 6404 mutex_lock(&qla_tgt_mutex); 6405 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) { 6406 vha = tgt->vha; 6407 ha = vha->hw; 6408 6409 host = vha->host; 6410 if (!host) 6411 continue; 6412 6413 if (!(host->hostt->supported_mode & MODE_TARGET)) 6414 continue; 6415 6416 spin_lock_irqsave(&ha->hardware_lock, flags); 6417 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) { 6418 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n", 6419 host->host_no); 6420 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6421 continue; 6422 } 6423 if (tgt->tgt_stop) { 6424 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", 6425 host->host_no); 6426 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6427 continue; 6428 } 6429 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6430 6431 if (!scsi_host_get(host)) { 6432 ql_dbg(ql_dbg_tgt, vha, 0xe068, 6433 "Unable to scsi_host_get() for" 6434 " qla2xxx scsi_host\n"); 6435 continue; 6436 } 6437 qlt_lport_dump(vha, phys_wwpn, b); 6438 6439 if (memcmp(vha->port_name, b, WWN_SIZE)) { 6440 scsi_host_put(host); 6441 continue; 6442 } 6443 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); 6444 if (rc != 0) 6445 scsi_host_put(host); 6446 6447 mutex_unlock(&qla_tgt_mutex); 6448 return rc; 6449 } 6450 mutex_unlock(&qla_tgt_mutex); 6451 6452 return -ENODEV; 6453 } 6454 EXPORT_SYMBOL(qlt_lport_register); 6455 6456 /** 6457 * qla_tgt_lport_deregister - Degister lport 6458 * 6459 * @vha: Registered scsi_qla_host pointer 6460 */ 6461 void qlt_lport_deregister(struct scsi_qla_host *vha) 6462 { 6463 struct qla_hw_data *ha = vha->hw; 6464 struct Scsi_Host *sh = vha->host; 6465 /* 6466 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data 6467 */ 6468 vha->vha_tgt.target_lport_ptr = NULL; 6469 ha->tgt.tgt_ops = NULL; 6470 /* 6471 * Release the Scsi_Host reference for the underlying qla2xxx host 6472 */ 6473 scsi_host_put(sh); 6474 } 6475 EXPORT_SYMBOL(qlt_lport_deregister); 6476 6477 /* Must be called under HW lock */ 6478 static void qlt_set_mode(struct scsi_qla_host *vha) 6479 { 6480 switch (ql2x_ini_mode) { 6481 case QLA2XXX_INI_MODE_DISABLED: 6482 case QLA2XXX_INI_MODE_EXCLUSIVE: 6483 vha->host->active_mode = MODE_TARGET; 6484 break; 6485 case QLA2XXX_INI_MODE_ENABLED: 6486 vha->host->active_mode = MODE_UNKNOWN; 6487 break; 6488 case QLA2XXX_INI_MODE_DUAL: 6489 vha->host->active_mode = MODE_DUAL; 6490 break; 6491 default: 6492 break; 6493 } 6494 } 6495 6496 /* Must be called under HW lock */ 6497 static void qlt_clear_mode(struct scsi_qla_host *vha) 6498 { 6499 switch (ql2x_ini_mode) { 6500 case QLA2XXX_INI_MODE_DISABLED: 6501 vha->host->active_mode = MODE_UNKNOWN; 6502 break; 6503 case QLA2XXX_INI_MODE_EXCLUSIVE: 6504 vha->host->active_mode = MODE_INITIATOR; 6505 break; 6506 case QLA2XXX_INI_MODE_ENABLED: 6507 case QLA2XXX_INI_MODE_DUAL: 6508 vha->host->active_mode = MODE_INITIATOR; 6509 break; 6510 default: 6511 break; 6512 } 6513 } 6514 6515 /* 6516 * qla_tgt_enable_vha - NO LOCK HELD 6517 * 6518 * host_reset, bring up w/ Target Mode Enabled 6519 */ 6520 void 6521 qlt_enable_vha(struct scsi_qla_host *vha) 6522 { 6523 struct qla_hw_data *ha = vha->hw; 6524 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6525 unsigned long flags; 6526 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 6527 6528 if (!tgt) { 6529 ql_dbg(ql_dbg_tgt, vha, 0xe069, 6530 "Unable to locate qla_tgt pointer from" 6531 " struct qla_hw_data\n"); 6532 dump_stack(); 6533 return; 6534 } 6535 6536 spin_lock_irqsave(&ha->hardware_lock, flags); 6537 tgt->tgt_stopped = 0; 6538 qlt_set_mode(vha); 6539 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6540 6541 if (vha->vp_idx) { 6542 qla24xx_disable_vp(vha); 6543 qla24xx_enable_vp(vha); 6544 } else { 6545 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); 6546 qla2xxx_wake_dpc(base_vha); 6547 qla2x00_wait_for_hba_online(base_vha); 6548 } 6549 } 6550 EXPORT_SYMBOL(qlt_enable_vha); 6551 6552 /* 6553 * qla_tgt_disable_vha - NO LOCK HELD 6554 * 6555 * Disable Target Mode and reset the adapter 6556 */ 6557 static void qlt_disable_vha(struct scsi_qla_host *vha) 6558 { 6559 struct qla_hw_data *ha = vha->hw; 6560 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 6561 unsigned long flags; 6562 6563 if (!tgt) { 6564 ql_dbg(ql_dbg_tgt, vha, 0xe06a, 6565 "Unable to locate qla_tgt pointer from" 6566 " struct qla_hw_data\n"); 6567 dump_stack(); 6568 return; 6569 } 6570 6571 spin_lock_irqsave(&ha->hardware_lock, flags); 6572 qlt_clear_mode(vha); 6573 spin_unlock_irqrestore(&ha->hardware_lock, flags); 6574 6575 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 6576 qla2xxx_wake_dpc(vha); 6577 qla2x00_wait_for_hba_online(vha); 6578 } 6579 6580 /* 6581 * Called from qla_init.c:qla24xx_vport_create() contex to setup 6582 * the target mode specific struct scsi_qla_host and struct qla_hw_data 6583 * members. 6584 */ 6585 void 6586 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) 6587 { 6588 vha->vha_tgt.qla_tgt = NULL; 6589 6590 mutex_init(&vha->vha_tgt.tgt_mutex); 6591 mutex_init(&vha->vha_tgt.tgt_host_action_mutex); 6592 6593 qlt_clear_mode(vha); 6594 6595 /* 6596 * NOTE: Currently the value is kept the same for <24xx and 6597 * >=24xx ISPs. If it is necessary to change it, 6598 * the check should be added for specific ISPs, 6599 * assigning the value appropriately. 6600 */ 6601 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 6602 6603 qlt_add_target(ha, vha); 6604 } 6605 6606 u8 6607 qlt_rff_id(struct scsi_qla_host *vha) 6608 { 6609 u8 fc4_feature = 0; 6610 /* 6611 * FC-4 Feature bit 0 indicates target functionality to the name server. 6612 */ 6613 if (qla_tgt_mode_enabled(vha)) { 6614 fc4_feature = BIT_0; 6615 } else if (qla_ini_mode_enabled(vha)) { 6616 fc4_feature = BIT_1; 6617 } else if (qla_dual_mode_enabled(vha)) 6618 fc4_feature = BIT_0 | BIT_1; 6619 6620 return fc4_feature; 6621 } 6622 6623 /* 6624 * qlt_init_atio_q_entries() - Initializes ATIO queue entries. 6625 * @ha: HA context 6626 * 6627 * Beginning of ATIO ring has initialization control block already built 6628 * by nvram config routine. 6629 * 6630 * Returns 0 on success. 6631 */ 6632 void 6633 qlt_init_atio_q_entries(struct scsi_qla_host *vha) 6634 { 6635 struct qla_hw_data *ha = vha->hw; 6636 uint16_t cnt; 6637 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring; 6638 6639 if (qla_ini_mode_enabled(vha)) 6640 return; 6641 6642 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { 6643 pkt->u.raw.signature = ATIO_PROCESSED; 6644 pkt++; 6645 } 6646 6647 } 6648 6649 /* 6650 * qlt_24xx_process_atio_queue() - Process ATIO queue entries. 6651 * @ha: SCSI driver HA context 6652 */ 6653 void 6654 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) 6655 { 6656 struct qla_hw_data *ha = vha->hw; 6657 struct atio_from_isp *pkt; 6658 int cnt, i; 6659 6660 if (!ha->flags.fw_started) 6661 return; 6662 6663 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || 6664 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { 6665 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6666 cnt = pkt->u.raw.entry_count; 6667 6668 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { 6669 /* 6670 * This packet is corrupted. The header + payload 6671 * can not be trusted. There is no point in passing 6672 * it further up. 6673 */ 6674 ql_log(ql_log_warn, vha, 0xd03c, 6675 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", 6676 pkt->u.isp24.fcp_hdr.s_id, 6677 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), 6678 le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); 6679 6680 adjust_corrupted_atio(pkt); 6681 qlt_send_term_exchange(ha->base_qpair, NULL, pkt, 6682 ha_locked, 0); 6683 } else { 6684 qlt_24xx_atio_pkt_all_vps(vha, 6685 (struct atio_from_isp *)pkt, ha_locked); 6686 } 6687 6688 for (i = 0; i < cnt; i++) { 6689 ha->tgt.atio_ring_index++; 6690 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) { 6691 ha->tgt.atio_ring_index = 0; 6692 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring; 6693 } else 6694 ha->tgt.atio_ring_ptr++; 6695 6696 pkt->u.raw.signature = ATIO_PROCESSED; 6697 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; 6698 } 6699 wmb(); 6700 } 6701 6702 /* Adjust ring index */ 6703 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6704 } 6705 6706 void 6707 qlt_24xx_config_rings(struct scsi_qla_host *vha) 6708 { 6709 struct qla_hw_data *ha = vha->hw; 6710 struct qla_msix_entry *msix = &ha->msix_entries[2]; 6711 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb; 6712 6713 if (!QLA_TGT_MODE_ENABLED()) 6714 return; 6715 6716 WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); 6717 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); 6718 RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); 6719 6720 if (ha->flags.msix_enabled) { 6721 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 6722 if (IS_QLA2071(ha)) { 6723 /* 4 ports Baker: Enable Interrupt Handshake */ 6724 icb->msix_atio = 0; 6725 icb->firmware_options_2 |= BIT_26; 6726 } else { 6727 icb->msix_atio = cpu_to_le16(msix->entry); 6728 icb->firmware_options_2 &= ~BIT_26; 6729 } 6730 ql_dbg(ql_dbg_init, vha, 0xf072, 6731 "Registering ICB vector 0x%x for atio que.\n", 6732 msix->entry); 6733 } 6734 } else { 6735 /* INTx|MSI */ 6736 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 6737 icb->msix_atio = 0; 6738 icb->firmware_options_2 |= BIT_26; 6739 ql_dbg(ql_dbg_init, vha, 0xf072, 6740 "%s: Use INTx for ATIOQ.\n", __func__); 6741 } 6742 } 6743 } 6744 6745 void 6746 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv) 6747 { 6748 struct qla_hw_data *ha = vha->hw; 6749 u32 tmp; 6750 6751 if (!QLA_TGT_MODE_ENABLED()) 6752 return; 6753 6754 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6755 if (!ha->tgt.saved_set) { 6756 /* We save only once */ 6757 ha->tgt.saved_exchange_count = nv->exchange_count; 6758 ha->tgt.saved_firmware_options_1 = 6759 nv->firmware_options_1; 6760 ha->tgt.saved_firmware_options_2 = 6761 nv->firmware_options_2; 6762 ha->tgt.saved_firmware_options_3 = 6763 nv->firmware_options_3; 6764 ha->tgt.saved_set = 1; 6765 } 6766 6767 if (qla_tgt_mode_enabled(vha)) 6768 nv->exchange_count = cpu_to_le16(0xFFFF); 6769 else /* dual */ 6770 nv->exchange_count = cpu_to_le16(ql2xexchoffld); 6771 6772 /* Enable target mode */ 6773 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6774 6775 /* Disable ini mode, if requested */ 6776 if (qla_tgt_mode_enabled(vha)) 6777 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6778 6779 /* Disable Full Login after LIP */ 6780 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6781 /* Enable initial LIP */ 6782 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6783 if (ql2xtgt_tape_enable) 6784 /* Enable FC Tape support */ 6785 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6786 else 6787 /* Disable FC Tape support */ 6788 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6789 6790 /* Disable Full Login after LIP */ 6791 nv->host_p &= cpu_to_le32(~BIT_10); 6792 6793 /* 6794 * clear BIT 15 explicitly as we have seen at least 6795 * a couple of instances where this was set and this 6796 * was causing the firmware to not be initialized. 6797 */ 6798 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6799 /* Enable target PRLI control */ 6800 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6801 6802 if (IS_QLA25XX(ha)) { 6803 /* Change Loop-prefer to Pt-Pt */ 6804 tmp = ~(BIT_4|BIT_5|BIT_6); 6805 nv->firmware_options_2 &= cpu_to_le32(tmp); 6806 tmp = P2P << 4; 6807 nv->firmware_options_2 |= cpu_to_le32(tmp); 6808 } 6809 } else { 6810 if (ha->tgt.saved_set) { 6811 nv->exchange_count = ha->tgt.saved_exchange_count; 6812 nv->firmware_options_1 = 6813 ha->tgt.saved_firmware_options_1; 6814 nv->firmware_options_2 = 6815 ha->tgt.saved_firmware_options_2; 6816 nv->firmware_options_3 = 6817 ha->tgt.saved_firmware_options_3; 6818 } 6819 return; 6820 } 6821 6822 if (ha->base_qpair->enable_class_2) { 6823 if (vha->flags.init_done) 6824 fc_host_supported_classes(vha->host) = 6825 FC_COS_CLASS2 | FC_COS_CLASS3; 6826 6827 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6828 } else { 6829 if (vha->flags.init_done) 6830 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6831 6832 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6833 } 6834 } 6835 6836 void 6837 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha, 6838 struct init_cb_24xx *icb) 6839 { 6840 struct qla_hw_data *ha = vha->hw; 6841 6842 if (!QLA_TGT_MODE_ENABLED()) 6843 return; 6844 6845 if (ha->tgt.node_name_set) { 6846 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6847 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6848 } 6849 6850 /* disable ZIO at start time. */ 6851 if (!vha->flags.init_done) { 6852 uint32_t tmp; 6853 tmp = le32_to_cpu(icb->firmware_options_2); 6854 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 6855 icb->firmware_options_2 = cpu_to_le32(tmp); 6856 } 6857 } 6858 6859 void 6860 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv) 6861 { 6862 struct qla_hw_data *ha = vha->hw; 6863 u32 tmp; 6864 6865 if (!QLA_TGT_MODE_ENABLED()) 6866 return; 6867 6868 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { 6869 if (!ha->tgt.saved_set) { 6870 /* We save only once */ 6871 ha->tgt.saved_exchange_count = nv->exchange_count; 6872 ha->tgt.saved_firmware_options_1 = 6873 nv->firmware_options_1; 6874 ha->tgt.saved_firmware_options_2 = 6875 nv->firmware_options_2; 6876 ha->tgt.saved_firmware_options_3 = 6877 nv->firmware_options_3; 6878 ha->tgt.saved_set = 1; 6879 } 6880 6881 if (qla_tgt_mode_enabled(vha)) 6882 nv->exchange_count = cpu_to_le16(0xFFFF); 6883 else /* dual */ 6884 nv->exchange_count = cpu_to_le16(ql2xexchoffld); 6885 6886 /* Enable target mode */ 6887 nv->firmware_options_1 |= cpu_to_le32(BIT_4); 6888 6889 /* Disable ini mode, if requested */ 6890 if (qla_tgt_mode_enabled(vha)) 6891 nv->firmware_options_1 |= cpu_to_le32(BIT_5); 6892 /* Disable Full Login after LIP */ 6893 nv->firmware_options_1 &= cpu_to_le32(~BIT_13); 6894 /* Enable initial LIP */ 6895 nv->firmware_options_1 &= cpu_to_le32(~BIT_9); 6896 /* 6897 * clear BIT 15 explicitly as we have seen at 6898 * least a couple of instances where this was set 6899 * and this was causing the firmware to not be 6900 * initialized. 6901 */ 6902 nv->firmware_options_1 &= cpu_to_le32(~BIT_15); 6903 if (ql2xtgt_tape_enable) 6904 /* Enable FC tape support */ 6905 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6906 else 6907 /* Disable FC tape support */ 6908 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6909 6910 /* Disable Full Login after LIP */ 6911 nv->host_p &= cpu_to_le32(~BIT_10); 6912 /* Enable target PRLI control */ 6913 nv->firmware_options_2 |= cpu_to_le32(BIT_14); 6914 6915 /* Change Loop-prefer to Pt-Pt */ 6916 tmp = ~(BIT_4|BIT_5|BIT_6); 6917 nv->firmware_options_2 &= cpu_to_le32(tmp); 6918 tmp = P2P << 4; 6919 nv->firmware_options_2 |= cpu_to_le32(tmp); 6920 } else { 6921 if (ha->tgt.saved_set) { 6922 nv->exchange_count = ha->tgt.saved_exchange_count; 6923 nv->firmware_options_1 = 6924 ha->tgt.saved_firmware_options_1; 6925 nv->firmware_options_2 = 6926 ha->tgt.saved_firmware_options_2; 6927 nv->firmware_options_3 = 6928 ha->tgt.saved_firmware_options_3; 6929 } 6930 return; 6931 } 6932 6933 if (ha->base_qpair->enable_class_2) { 6934 if (vha->flags.init_done) 6935 fc_host_supported_classes(vha->host) = 6936 FC_COS_CLASS2 | FC_COS_CLASS3; 6937 6938 nv->firmware_options_2 |= cpu_to_le32(BIT_8); 6939 } else { 6940 if (vha->flags.init_done) 6941 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6942 6943 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8); 6944 } 6945 } 6946 6947 void 6948 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha, 6949 struct init_cb_81xx *icb) 6950 { 6951 struct qla_hw_data *ha = vha->hw; 6952 6953 if (!QLA_TGT_MODE_ENABLED()) 6954 return; 6955 6956 if (ha->tgt.node_name_set) { 6957 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6958 icb->firmware_options_1 |= cpu_to_le32(BIT_14); 6959 } 6960 6961 /* disable ZIO at start time. */ 6962 if (!vha->flags.init_done) { 6963 uint32_t tmp; 6964 tmp = le32_to_cpu(icb->firmware_options_2); 6965 tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0); 6966 icb->firmware_options_2 = cpu_to_le32(tmp); 6967 } 6968 6969 } 6970 6971 void 6972 qlt_83xx_iospace_config(struct qla_hw_data *ha) 6973 { 6974 if (!QLA_TGT_MODE_ENABLED()) 6975 return; 6976 6977 ha->msix_count += 1; /* For ATIO Q */ 6978 } 6979 6980 6981 void 6982 qlt_modify_vp_config(struct scsi_qla_host *vha, 6983 struct vp_config_entry_24xx *vpmod) 6984 { 6985 /* enable target mode. Bit5 = 1 => disable */ 6986 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) 6987 vpmod->options_idx1 &= ~BIT_5; 6988 6989 /* Disable ini mode, if requested. bit4 = 1 => disable */ 6990 if (qla_tgt_mode_enabled(vha)) 6991 vpmod->options_idx1 &= ~BIT_4; 6992 } 6993 6994 void 6995 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) 6996 { 6997 int rc; 6998 6999 if (!QLA_TGT_MODE_ENABLED()) 7000 return; 7001 7002 if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 7003 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 7004 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 7005 } else { 7006 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in; 7007 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; 7008 } 7009 7010 mutex_init(&base_vha->vha_tgt.tgt_mutex); 7011 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); 7012 7013 INIT_LIST_HEAD(&base_vha->unknown_atio_list); 7014 INIT_DELAYED_WORK(&base_vha->unknown_atio_work, 7015 qlt_unknown_atio_work_fn); 7016 7017 qlt_clear_mode(base_vha); 7018 7019 rc = btree_init32(&ha->tgt.host_map); 7020 if (rc) 7021 ql_log(ql_log_info, base_vha, 0xd03d, 7022 "Unable to initialize ha->host_map btree\n"); 7023 7024 qlt_update_vp_map(base_vha, SET_VP_IDX); 7025 } 7026 7027 irqreturn_t 7028 qla83xx_msix_atio_q(int irq, void *dev_id) 7029 { 7030 struct rsp_que *rsp; 7031 scsi_qla_host_t *vha; 7032 struct qla_hw_data *ha; 7033 unsigned long flags; 7034 7035 rsp = (struct rsp_que *) dev_id; 7036 ha = rsp->hw; 7037 vha = pci_get_drvdata(ha->pdev); 7038 7039 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7040 7041 qlt_24xx_process_atio_queue(vha, 0); 7042 7043 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7044 7045 return IRQ_HANDLED; 7046 } 7047 7048 static void 7049 qlt_handle_abts_recv_work(struct work_struct *work) 7050 { 7051 struct qla_tgt_sess_op *op = container_of(work, 7052 struct qla_tgt_sess_op, work); 7053 scsi_qla_host_t *vha = op->vha; 7054 struct qla_hw_data *ha = vha->hw; 7055 unsigned long flags; 7056 7057 if (qla2x00_reset_active(vha) || 7058 (op->chip_reset != ha->base_qpair->chip_reset)) 7059 return; 7060 7061 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 7062 qlt_24xx_process_atio_queue(vha, 0); 7063 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 7064 7065 spin_lock_irqsave(&ha->hardware_lock, flags); 7066 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio); 7067 spin_unlock_irqrestore(&ha->hardware_lock, flags); 7068 7069 kfree(op); 7070 } 7071 7072 void 7073 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp, 7074 response_t *pkt) 7075 { 7076 struct qla_tgt_sess_op *op; 7077 7078 op = kzalloc(sizeof(*op), GFP_ATOMIC); 7079 7080 if (!op) { 7081 /* do not reach for ATIO queue here. This is best effort err 7082 * recovery at this point. 7083 */ 7084 qlt_response_pkt_all_vps(vha, rsp, pkt); 7085 return; 7086 } 7087 7088 memcpy(&op->atio, pkt, sizeof(*pkt)); 7089 op->vha = vha; 7090 op->chip_reset = vha->hw->base_qpair->chip_reset; 7091 op->rsp = rsp; 7092 INIT_WORK(&op->work, qlt_handle_abts_recv_work); 7093 queue_work(qla_tgt_wq, &op->work); 7094 return; 7095 } 7096 7097 int 7098 qlt_mem_alloc(struct qla_hw_data *ha) 7099 { 7100 if (!QLA_TGT_MODE_ENABLED()) 7101 return 0; 7102 7103 ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC, 7104 sizeof(struct qla_tgt_vp_map), 7105 GFP_KERNEL); 7106 if (!ha->tgt.tgt_vp_map) 7107 return -ENOMEM; 7108 7109 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, 7110 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), 7111 &ha->tgt.atio_dma, GFP_KERNEL); 7112 if (!ha->tgt.atio_ring) { 7113 kfree(ha->tgt.tgt_vp_map); 7114 return -ENOMEM; 7115 } 7116 return 0; 7117 } 7118 7119 void 7120 qlt_mem_free(struct qla_hw_data *ha) 7121 { 7122 if (!QLA_TGT_MODE_ENABLED()) 7123 return; 7124 7125 if (ha->tgt.atio_ring) { 7126 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * 7127 sizeof(struct atio_from_isp), ha->tgt.atio_ring, 7128 ha->tgt.atio_dma); 7129 } 7130 kfree(ha->tgt.tgt_vp_map); 7131 } 7132 7133 /* vport_slock to be held by the caller */ 7134 void 7135 qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) 7136 { 7137 void *slot; 7138 u32 key; 7139 int rc; 7140 7141 if (!QLA_TGT_MODE_ENABLED()) 7142 return; 7143 7144 key = vha->d_id.b24; 7145 7146 switch (cmd) { 7147 case SET_VP_IDX: 7148 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; 7149 break; 7150 case SET_AL_PA: 7151 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7152 if (!slot) { 7153 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, 7154 "Save vha in host_map %p %06x\n", vha, key); 7155 rc = btree_insert32(&vha->hw->tgt.host_map, 7156 key, vha, GFP_ATOMIC); 7157 if (rc) 7158 ql_log(ql_log_info, vha, 0xd03e, 7159 "Unable to insert s_id into host_map: %06x\n", 7160 key); 7161 return; 7162 } 7163 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 7164 "replace existing vha in host_map %p %06x\n", vha, key); 7165 btree_update32(&vha->hw->tgt.host_map, key, vha); 7166 break; 7167 case RESET_VP_IDX: 7168 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; 7169 break; 7170 case RESET_AL_PA: 7171 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, 7172 "clear vha in host_map %p %06x\n", vha, key); 7173 slot = btree_lookup32(&vha->hw->tgt.host_map, key); 7174 if (slot) 7175 btree_remove32(&vha->hw->tgt.host_map, key); 7176 vha->d_id.b24 = 0; 7177 break; 7178 } 7179 } 7180 7181 void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) 7182 { 7183 7184 if (!vha->d_id.b24) { 7185 vha->d_id = id; 7186 qlt_update_vp_map(vha, SET_AL_PA); 7187 } else if (vha->d_id.b24 != id.b24) { 7188 qlt_update_vp_map(vha, RESET_AL_PA); 7189 vha->d_id = id; 7190 qlt_update_vp_map(vha, SET_AL_PA); 7191 } 7192 } 7193 7194 static int __init qlt_parse_ini_mode(void) 7195 { 7196 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) 7197 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 7198 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0) 7199 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED; 7200 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0) 7201 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED; 7202 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0) 7203 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL; 7204 else 7205 return false; 7206 7207 return true; 7208 } 7209 7210 int __init qlt_init(void) 7211 { 7212 int ret; 7213 7214 if (!qlt_parse_ini_mode()) { 7215 ql_log(ql_log_fatal, NULL, 0xe06b, 7216 "qlt_parse_ini_mode() failed\n"); 7217 return -EINVAL; 7218 } 7219 7220 if (!QLA_TGT_MODE_ENABLED()) 7221 return 0; 7222 7223 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", 7224 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct 7225 qla_tgt_mgmt_cmd), 0, NULL); 7226 if (!qla_tgt_mgmt_cmd_cachep) { 7227 ql_log(ql_log_fatal, NULL, 0xd04b, 7228 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); 7229 return -ENOMEM; 7230 } 7231 7232 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep", 7233 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t), 7234 0, NULL); 7235 7236 if (!qla_tgt_plogi_cachep) { 7237 ql_log(ql_log_fatal, NULL, 0xe06d, 7238 "kmem_cache_create for qla_tgt_plogi_cachep failed\n"); 7239 ret = -ENOMEM; 7240 goto out_mgmt_cmd_cachep; 7241 } 7242 7243 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, 7244 mempool_free_slab, qla_tgt_mgmt_cmd_cachep); 7245 if (!qla_tgt_mgmt_cmd_mempool) { 7246 ql_log(ql_log_fatal, NULL, 0xe06e, 7247 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n"); 7248 ret = -ENOMEM; 7249 goto out_plogi_cachep; 7250 } 7251 7252 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0); 7253 if (!qla_tgt_wq) { 7254 ql_log(ql_log_fatal, NULL, 0xe06f, 7255 "alloc_workqueue for qla_tgt_wq failed\n"); 7256 ret = -ENOMEM; 7257 goto out_cmd_mempool; 7258 } 7259 /* 7260 * Return 1 to signal that initiator-mode is being disabled 7261 */ 7262 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0; 7263 7264 out_cmd_mempool: 7265 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7266 out_plogi_cachep: 7267 kmem_cache_destroy(qla_tgt_plogi_cachep); 7268 out_mgmt_cmd_cachep: 7269 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7270 return ret; 7271 } 7272 7273 void qlt_exit(void) 7274 { 7275 if (!QLA_TGT_MODE_ENABLED()) 7276 return; 7277 7278 destroy_workqueue(qla_tgt_wq); 7279 mempool_destroy(qla_tgt_mgmt_cmd_mempool); 7280 kmem_cache_destroy(qla_tgt_plogi_cachep); 7281 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); 7282 } 7283