1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
4 *
5 * based on qla2x00t.c code:
6 *
7 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
8 * Copyright (C) 2004 - 2005 Leonid Stoljar
9 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
10 * Copyright (C) 2006 - 2010 ID7 Ltd.
11 *
12 * Forward port and refactoring to modern qla2xxx and target/configfs
13 *
14 * Copyright (C) 2010-2013 Nicholas A. Bellinger <nab@kernel.org>
15 */
16
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/types.h>
20 #include <linux/blkdev.h>
21 #include <linux/interrupt.h>
22 #include <linux/pci.h>
23 #include <linux/delay.h>
24 #include <linux/list.h>
25 #include <linux/workqueue.h>
26 #include <linux/unaligned.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30
31 #include "qla_def.h"
32 #include "qla_target.h"
33
34 static int ql2xtgt_tape_enable;
35 module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
36 MODULE_PARM_DESC(ql2xtgt_tape_enable,
37 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
38
39 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
40 module_param(qlini_mode, charp, S_IRUGO);
41 MODULE_PARM_DESC(qlini_mode,
42 "Determines when initiator mode will be enabled. Possible values: "
43 "\"exclusive\" - initiator mode will be enabled on load, "
44 "disabled on enabling target mode and then on disabling target mode "
45 "enabled back; "
46 "\"disabled\" - initiator mode will never be enabled; "
47 "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
48 "when ready "
49 "\"enabled\" (default) - initiator mode will always stay enabled.");
50
51 int ql2xuctrlirq = 1;
52 module_param(ql2xuctrlirq, int, 0644);
53 MODULE_PARM_DESC(ql2xuctrlirq,
54 "User to control IRQ placement via smp_affinity."
55 "Valid with qlini_mode=disabled."
56 "1(default): enable");
57
58 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
59
60 static int qla_sam_status = SAM_STAT_BUSY;
61 static int tc_sam_status = SAM_STAT_TASK_SET_FULL; /* target core */
62
63 /*
64 * From scsi/fc/fc_fcp.h
65 */
66 enum fcp_resp_rsp_codes {
67 FCP_TMF_CMPL = 0,
68 FCP_DATA_LEN_INVALID = 1,
69 FCP_CMND_FIELDS_INVALID = 2,
70 FCP_DATA_PARAM_MISMATCH = 3,
71 FCP_TMF_REJECTED = 4,
72 FCP_TMF_FAILED = 5,
73 FCP_TMF_INVALID_LUN = 9,
74 };
75
76 /*
77 * fc_pri_ta from scsi/fc/fc_fcp.h
78 */
79 #define FCP_PTA_SIMPLE 0 /* simple task attribute */
80 #define FCP_PTA_HEADQ 1 /* head of queue task attribute */
81 #define FCP_PTA_ORDERED 2 /* ordered task attribute */
82 #define FCP_PTA_ACA 4 /* auto. contingent allegiance */
83 #define FCP_PTA_MASK 7 /* mask for task attribute field */
84 #define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
85 #define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
86
87 /*
88 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
89 * must be called under HW lock and could unlock/lock it inside.
90 * It isn't an issue, since in the current implementation on the time when
91 * those functions are called:
92 *
93 * - Either context is IRQ and only IRQ handler can modify HW data,
94 * including rings related fields,
95 *
96 * - Or access to target mode variables from struct qla_tgt doesn't
97 * cross those functions boundaries, except tgt_stop, which
98 * additionally protected by irq_cmd_count.
99 */
100 /* Predefs for callbacks handed to qla2xxx LLD */
101 static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
102 struct atio_from_isp *pkt, uint8_t);
103 static void qlt_response_pkt(struct scsi_qla_host *ha, struct rsp_que *rsp,
104 response_t *pkt);
105 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
106 int fn, void *iocb, int flags);
107 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
108 struct atio_from_isp *atio, uint16_t status, int qfull);
109 static void qlt_disable_vha(struct scsi_qla_host *vha);
110 static void qlt_clear_tgt_db(struct qla_tgt *tgt);
111 static void qlt_send_notify_ack(struct qla_qpair *qpair,
112 struct imm_ntfy_from_isp *ntfy,
113 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
114 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
115 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
116 struct imm_ntfy_from_isp *imm, int ha_locked);
117 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
118 fc_port_t *fcport, bool local);
119 void qlt_unreg_sess(struct fc_port *sess);
120 static void qlt_24xx_handle_abts(struct scsi_qla_host *,
121 struct abts_recv_from_24xx *);
122 static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
123 uint16_t);
124 static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
125 static inline uint32_t qlt_make_handle(struct qla_qpair *);
126
127 /*
128 * Global Variables
129 */
130 static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
131 struct kmem_cache *qla_tgt_plogi_cachep;
132 static mempool_t *qla_tgt_mgmt_cmd_mempool;
133 static struct workqueue_struct *qla_tgt_wq;
134 static DEFINE_MUTEX(qla_tgt_mutex);
135 static LIST_HEAD(qla_tgt_glist);
136
137 /* This API intentionally takes dest as a parameter, rather than returning
138 * int value to avoid caller forgetting to issue wmb() after the store */
qlt_do_generation_tick(struct scsi_qla_host * vha,int * dest)139 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
140 {
141 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
142 *dest = atomic_inc_return(&base_vha->generation_tick);
143 /* memory barrier */
144 wmb();
145 }
146
147 /* Might release hw lock, then reaquire!! */
qlt_issue_marker(struct scsi_qla_host * vha,int vha_locked)148 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
149 {
150 /* Send marker if required */
151 if (unlikely(vha->marker_needed != 0)) {
152 int rc = qla2x00_issue_marker(vha, vha_locked);
153
154 if (rc != QLA_SUCCESS) {
155 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
156 "qla_target(%d): issue_marker() failed\n",
157 vha->vp_idx);
158 }
159 return rc;
160 }
161 return QLA_SUCCESS;
162 }
163
qla_find_host_by_d_id(struct scsi_qla_host * vha,be_id_t d_id)164 struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
165 be_id_t d_id)
166 {
167 struct scsi_qla_host *host;
168 uint32_t key;
169
170 if (vha->d_id.b.area == d_id.area &&
171 vha->d_id.b.domain == d_id.domain &&
172 vha->d_id.b.al_pa == d_id.al_pa)
173 return vha;
174
175 key = be_to_port_id(d_id).b24;
176
177 host = btree_lookup32(&vha->hw->host_map, key);
178 if (!host)
179 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
180 "Unable to find host %06x\n", key);
181
182 return host;
183 }
184
qlt_incr_num_pend_cmds(struct scsi_qla_host * vha)185 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
186 {
187 unsigned long flags;
188
189 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
190
191 vha->hw->tgt.num_pend_cmds++;
192 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
193 vha->qla_stats.stat_max_pend_cmds =
194 vha->hw->tgt.num_pend_cmds;
195 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
196 }
qlt_decr_num_pend_cmds(struct scsi_qla_host * vha)197 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
198 {
199 unsigned long flags;
200
201 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
202 vha->hw->tgt.num_pend_cmds--;
203 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
204 }
205
206
qlt_queue_unknown_atio(scsi_qla_host_t * vha,struct atio_from_isp * atio,uint8_t ha_locked)207 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
208 struct atio_from_isp *atio, uint8_t ha_locked)
209 {
210 struct qla_tgt_sess_op *u;
211 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
212 unsigned long flags;
213 unsigned int add_cdb_len = 0;
214
215 /* atio must be the last member of qla_tgt_sess_op for add_cdb_len */
216 BUILD_BUG_ON(offsetof(struct qla_tgt_sess_op, atio) + sizeof(u->atio) != sizeof(*u));
217
218 if (tgt->tgt_stop) {
219 ql_dbg(ql_dbg_async, vha, 0x502c,
220 "qla_target(%d): dropping unknown ATIO_TYPE7, because tgt is being stopped",
221 vha->vp_idx);
222 goto out_term;
223 }
224
225 if (atio->u.raw.entry_type == ATIO_TYPE7 &&
226 atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)
227 add_cdb_len =
228 ((unsigned int) atio->u.isp24.fcp_cmnd.add_cdb_len) * 4;
229
230 u = kzalloc(sizeof(*u) + add_cdb_len, GFP_ATOMIC);
231 if (u == NULL)
232 goto out_term;
233
234 u->vha = vha;
235 memcpy(&u->atio, atio, sizeof(*atio) + add_cdb_len);
236 INIT_LIST_HEAD(&u->cmd_list);
237
238 spin_lock_irqsave(&vha->cmd_list_lock, flags);
239 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
240 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
241
242 schedule_delayed_work(&vha->unknown_atio_work, 1);
243
244 out:
245 return;
246
247 out_term:
248 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked);
249 goto out;
250 }
251
qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host * vha,uint8_t ha_locked)252 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
253 uint8_t ha_locked)
254 {
255 struct qla_tgt_sess_op *u, *t;
256 scsi_qla_host_t *host;
257 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
258 unsigned long flags;
259 uint8_t queued = 0;
260
261 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
262 if (u->aborted) {
263 ql_dbg(ql_dbg_async, vha, 0x502e,
264 "Freeing unknown %s %p, because of Abort\n",
265 "ATIO_TYPE7", u);
266 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
267 &u->atio, ha_locked);
268 goto abort;
269 }
270
271 host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
272 if (host != NULL) {
273 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
274 "Requeuing unknown ATIO_TYPE7 %p\n", u);
275 qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
276 } else if (tgt->tgt_stop) {
277 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
278 "Freeing unknown %s %p, because tgt is being stopped\n",
279 "ATIO_TYPE7", u);
280 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
281 &u->atio, ha_locked);
282 } else {
283 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
284 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
285 if (!queued) {
286 queued = 1;
287 schedule_delayed_work(&vha->unknown_atio_work,
288 1);
289 }
290 continue;
291 }
292
293 abort:
294 spin_lock_irqsave(&vha->cmd_list_lock, flags);
295 list_del(&u->cmd_list);
296 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
297 kfree(u);
298 }
299 }
300
qlt_unknown_atio_work_fn(struct work_struct * work)301 void qlt_unknown_atio_work_fn(struct work_struct *work)
302 {
303 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
304 struct scsi_qla_host, unknown_atio_work);
305
306 qlt_try_to_dequeue_unknown_atios(vha, 0);
307 }
308
qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint8_t ha_locked)309 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
310 struct atio_from_isp *atio, uint8_t ha_locked)
311 {
312 ql_dbg(ql_dbg_tgt, vha, 0xe072,
313 "%s: qla_target(%d): type %x ox_id %04x\n",
314 __func__, vha->vp_idx, atio->u.raw.entry_type,
315 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
316
317 switch (atio->u.raw.entry_type) {
318 case ATIO_TYPE7:
319 {
320 struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
321 atio->u.isp24.fcp_hdr.d_id);
322 if (unlikely(NULL == host)) {
323 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
324 "qla_target(%d): Received ATIO_TYPE7 "
325 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
326 atio->u.isp24.fcp_hdr.d_id.domain,
327 atio->u.isp24.fcp_hdr.d_id.area,
328 atio->u.isp24.fcp_hdr.d_id.al_pa);
329
330
331 qlt_queue_unknown_atio(vha, atio, ha_locked);
332 break;
333 }
334 if (unlikely(!list_empty(&vha->unknown_atio_list)))
335 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
336
337 qlt_24xx_atio_pkt(host, atio, ha_locked);
338 break;
339 }
340
341 case IMMED_NOTIFY_TYPE:
342 {
343 struct scsi_qla_host *host = vha;
344 struct imm_ntfy_from_isp *entry =
345 (struct imm_ntfy_from_isp *)atio;
346
347 qlt_issue_marker(vha, ha_locked);
348
349 if ((entry->u.isp24.vp_index != 0xFF) &&
350 (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
351 host = qla_find_host_by_vp_idx(vha,
352 entry->u.isp24.vp_index);
353 if (unlikely(!host)) {
354 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
355 "qla_target(%d): Received "
356 "ATIO (IMMED_NOTIFY_TYPE) "
357 "with unknown vp_index %d\n",
358 vha->vp_idx, entry->u.isp24.vp_index);
359 break;
360 }
361 }
362 qlt_24xx_atio_pkt(host, atio, ha_locked);
363 break;
364 }
365
366 case VP_RPT_ID_IOCB_TYPE:
367 qla24xx_report_id_acquisition(vha,
368 (struct vp_rpt_id_entry_24xx *)atio);
369 break;
370
371 case ABTS_RECV_24XX:
372 {
373 struct abts_recv_from_24xx *entry =
374 (struct abts_recv_from_24xx *)atio;
375 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
376 entry->vp_index);
377 unsigned long flags;
378
379 if (unlikely(!host)) {
380 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
381 "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
382 "received, with unknown vp_index %d\n",
383 vha->vp_idx, entry->vp_index);
384 break;
385 }
386 if (!ha_locked)
387 spin_lock_irqsave(&host->hw->hardware_lock, flags);
388 qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
389 if (!ha_locked)
390 spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
391 break;
392 }
393
394 /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
395
396 default:
397 ql_dbg(ql_dbg_tgt, vha, 0xe040,
398 "qla_target(%d): Received unknown ATIO atio "
399 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
400 break;
401 }
402
403 return false;
404 }
405
qlt_response_pkt_all_vps(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)406 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
407 struct rsp_que *rsp, response_t *pkt)
408 {
409 switch (pkt->entry_type) {
410 case CTIO_CRC2:
411 ql_dbg(ql_dbg_tgt, vha, 0xe073,
412 "qla_target(%d):%s: CRC2 Response pkt\n",
413 vha->vp_idx, __func__);
414 fallthrough;
415 case CTIO_TYPE7:
416 {
417 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
418 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
419 entry->vp_index);
420 if (unlikely(!host)) {
421 ql_dbg(ql_dbg_tgt, vha, 0xe041,
422 "qla_target(%d): Response pkt (CTIO_TYPE7) "
423 "received, with unknown vp_index %d\n",
424 vha->vp_idx, entry->vp_index);
425 break;
426 }
427 qlt_response_pkt(host, rsp, pkt);
428 break;
429 }
430
431 case IMMED_NOTIFY_TYPE:
432 {
433 struct scsi_qla_host *host;
434 struct imm_ntfy_from_isp *entry =
435 (struct imm_ntfy_from_isp *)pkt;
436
437 host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
438 if (unlikely(!host)) {
439 ql_dbg(ql_dbg_tgt, vha, 0xe042,
440 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
441 "received, with unknown vp_index %d\n",
442 vha->vp_idx, entry->u.isp24.vp_index);
443 break;
444 }
445 qlt_response_pkt(host, rsp, pkt);
446 break;
447 }
448
449 case NOTIFY_ACK_TYPE:
450 {
451 struct scsi_qla_host *host = vha;
452 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
453
454 if (0xFF != entry->u.isp24.vp_index) {
455 host = qla_find_host_by_vp_idx(vha,
456 entry->u.isp24.vp_index);
457 if (unlikely(!host)) {
458 ql_dbg(ql_dbg_tgt, vha, 0xe043,
459 "qla_target(%d): Response "
460 "pkt (NOTIFY_ACK_TYPE) "
461 "received, with unknown "
462 "vp_index %d\n", vha->vp_idx,
463 entry->u.isp24.vp_index);
464 break;
465 }
466 }
467 qlt_response_pkt(host, rsp, pkt);
468 break;
469 }
470
471 case ABTS_RECV_24XX:
472 {
473 struct abts_recv_from_24xx *entry =
474 (struct abts_recv_from_24xx *)pkt;
475 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
476 entry->vp_index);
477 if (unlikely(!host)) {
478 ql_dbg(ql_dbg_tgt, vha, 0xe044,
479 "qla_target(%d): Response pkt "
480 "(ABTS_RECV_24XX) received, with unknown "
481 "vp_index %d\n", vha->vp_idx, entry->vp_index);
482 break;
483 }
484 qlt_response_pkt(host, rsp, pkt);
485 break;
486 }
487
488 case ABTS_RESP_24XX:
489 {
490 struct abts_resp_to_24xx *entry =
491 (struct abts_resp_to_24xx *)pkt;
492 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
493 entry->vp_index);
494 if (unlikely(!host)) {
495 ql_dbg(ql_dbg_tgt, vha, 0xe045,
496 "qla_target(%d): Response pkt "
497 "(ABTS_RECV_24XX) received, with unknown "
498 "vp_index %d\n", vha->vp_idx, entry->vp_index);
499 break;
500 }
501 qlt_response_pkt(host, rsp, pkt);
502 break;
503 }
504 default:
505 qlt_response_pkt(vha, rsp, pkt);
506 break;
507 }
508
509 }
510
511 /*
512 * All qlt_plogi_ack_t operations are protected by hardware_lock
513 */
qla24xx_post_nack_work(struct scsi_qla_host * vha,fc_port_t * fcport,struct imm_ntfy_from_isp * ntfy,int type)514 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
515 struct imm_ntfy_from_isp *ntfy, int type)
516 {
517 struct qla_work_evt *e;
518
519 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
520 if (!e)
521 return QLA_FUNCTION_FAILED;
522
523 e->u.nack.fcport = fcport;
524 e->u.nack.type = type;
525 memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
526 return qla2x00_post_work(vha, e);
527 }
528
qla2x00_async_nack_sp_done(srb_t * sp,int res)529 static void qla2x00_async_nack_sp_done(srb_t *sp, int res)
530 {
531 struct scsi_qla_host *vha = sp->vha;
532 unsigned long flags;
533
534 ql_dbg(ql_dbg_disc, vha, 0x20f2,
535 "Async done-%s res %x %8phC type %d\n",
536 sp->name, res, sp->fcport->port_name, sp->type);
537
538 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
539 sp->fcport->flags &= ~FCF_ASYNC_SENT;
540 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
541
542 switch (sp->type) {
543 case SRB_NACK_PLOGI:
544 sp->fcport->login_gen++;
545 sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
546 sp->fcport->logout_on_delete = 1;
547 sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
548 sp->fcport->send_els_logo = 0;
549
550 if (sp->fcport->flags & FCF_FCSP_DEVICE) {
551 ql_dbg(ql_dbg_edif, vha, 0x20ef,
552 "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__,
553 sp->fcport->port_name);
554 qla2x00_set_fcport_disc_state(sp->fcport,
555 DSC_LOGIN_AUTH_PEND);
556 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
557 sp->fcport->d_id.b24);
558 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
559 0, sp->fcport);
560 }
561 break;
562
563 case SRB_NACK_PRLI:
564 sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
565 sp->fcport->deleted = 0;
566 sp->fcport->send_els_logo = 0;
567
568 if (!sp->fcport->login_succ &&
569 !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
570 sp->fcport->login_succ = 1;
571
572 vha->fcport_count++;
573 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
574 qla24xx_sched_upd_fcport(sp->fcport);
575 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
576 } else {
577 sp->fcport->login_retry = 0;
578 qla2x00_set_fcport_disc_state(sp->fcport,
579 DSC_LOGIN_COMPLETE);
580 sp->fcport->deleted = 0;
581 sp->fcport->logout_on_delete = 1;
582 }
583 break;
584
585 case SRB_NACK_LOGO:
586 sp->fcport->login_gen++;
587 sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
588 qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
589 break;
590 }
591 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
592
593 kref_put(&sp->cmd_kref, qla2x00_sp_release);
594 }
595
qla24xx_async_notify_ack(scsi_qla_host_t * vha,fc_port_t * fcport,struct imm_ntfy_from_isp * ntfy,int type)596 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
597 struct imm_ntfy_from_isp *ntfy, int type)
598 {
599 int rval = QLA_FUNCTION_FAILED;
600 srb_t *sp;
601 char *c = NULL;
602
603 fcport->flags |= FCF_ASYNC_SENT;
604 switch (type) {
605 case SRB_NACK_PLOGI:
606 fcport->fw_login_state = DSC_LS_PLOGI_PEND;
607 c = "PLOGI";
608 if (vha->hw->flags.edif_enabled &&
609 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP))
610 fcport->flags |= FCF_FCSP_DEVICE;
611 break;
612 case SRB_NACK_PRLI:
613 fcport->fw_login_state = DSC_LS_PRLI_PEND;
614 fcport->deleted = 0;
615 c = "PRLI";
616 break;
617 case SRB_NACK_LOGO:
618 fcport->fw_login_state = DSC_LS_LOGO_PEND;
619 c = "LOGO";
620 break;
621 }
622
623 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
624 if (!sp)
625 goto done;
626
627 sp->type = type;
628 sp->name = "nack";
629 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
630 qla2x00_async_nack_sp_done);
631
632 sp->u.iocb_cmd.u.nack.ntfy = ntfy;
633
634 ql_dbg(ql_dbg_disc, vha, 0x20f4,
635 "Async-%s %8phC hndl %x %s\n",
636 sp->name, fcport->port_name, sp->handle, c);
637
638 rval = qla2x00_start_sp(sp);
639 if (rval != QLA_SUCCESS)
640 goto done_free_sp;
641
642 return rval;
643
644 done_free_sp:
645 kref_put(&sp->cmd_kref, qla2x00_sp_release);
646 done:
647 fcport->flags &= ~FCF_ASYNC_SENT;
648 return rval;
649 }
650
qla24xx_do_nack_work(struct scsi_qla_host * vha,struct qla_work_evt * e)651 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
652 {
653 fc_port_t *t;
654
655 switch (e->u.nack.type) {
656 case SRB_NACK_PRLI:
657 t = e->u.nack.fcport;
658 flush_work(&t->del_work);
659 flush_work(&t->free_work);
660 mutex_lock(&vha->vha_tgt.tgt_mutex);
661 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
662 mutex_unlock(&vha->vha_tgt.tgt_mutex);
663 if (t) {
664 ql_log(ql_log_info, vha, 0xd034,
665 "%s create sess success %p", __func__, t);
666 /* create sess has an extra kref */
667 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
668 }
669 break;
670 }
671 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
672 (struct imm_ntfy_from_isp *)e->u.nack.iocb, e->u.nack.type);
673 }
674
qla24xx_delete_sess_fn(struct work_struct * work)675 void qla24xx_delete_sess_fn(struct work_struct *work)
676 {
677 fc_port_t *fcport = container_of(work, struct fc_port, del_work);
678 struct qla_hw_data *ha = NULL;
679
680 if (!fcport || !fcport->vha || !fcport->vha->hw)
681 return;
682
683 ha = fcport->vha->hw;
684
685 if (fcport->se_sess) {
686 ha->tgt.tgt_ops->shutdown_sess(fcport);
687 ha->tgt.tgt_ops->put_sess(fcport);
688 } else {
689 qlt_unreg_sess(fcport);
690 }
691 }
692
693 /*
694 * Called from qla2x00_reg_remote_port()
695 */
qlt_fc_port_added(struct scsi_qla_host * vha,fc_port_t * fcport)696 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
697 {
698 struct qla_hw_data *ha = vha->hw;
699 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
700 struct fc_port *sess = fcport;
701 unsigned long flags;
702
703 if (!vha->hw->tgt.tgt_ops)
704 return;
705
706 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
707 if (tgt->tgt_stop) {
708 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
709 return;
710 }
711
712 if (fcport->disc_state == DSC_DELETE_PEND) {
713 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
714 return;
715 }
716
717 if (!sess->se_sess) {
718 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
719
720 mutex_lock(&vha->vha_tgt.tgt_mutex);
721 sess = qlt_create_sess(vha, fcport, false);
722 mutex_unlock(&vha->vha_tgt.tgt_mutex);
723
724 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
725 } else {
726 if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
727 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
728 return;
729 }
730
731 if (!kref_get_unless_zero(&sess->sess_kref)) {
732 ql_dbg(ql_dbg_disc, vha, 0x2107,
733 "%s: kref_get fail sess %8phC \n",
734 __func__, sess->port_name);
735 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
736 return;
737 }
738
739 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
740 "qla_target(%u): %ssession for port %8phC "
741 "(loop ID %d) reappeared\n", vha->vp_idx,
742 sess->local ? "local " : "", sess->port_name, sess->loop_id);
743
744 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
745 "Reappeared sess %p\n", sess);
746
747 ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
748 fcport->loop_id,
749 (fcport->flags & FCF_CONF_COMP_SUPPORTED));
750 }
751
752 if (sess && sess->local) {
753 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
754 "qla_target(%u): local session for "
755 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
756 fcport->port_name, sess->loop_id);
757 sess->local = 0;
758 }
759 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
760
761 ha->tgt.tgt_ops->put_sess(sess);
762 }
763
764 /*
765 * This is a zero-base ref-counting solution, since hardware_lock
766 * guarantees that ref_count is not modified concurrently.
767 * Upon successful return content of iocb is undefined
768 */
769 static struct qlt_plogi_ack_t *
qlt_plogi_ack_find_add(struct scsi_qla_host * vha,port_id_t * id,struct imm_ntfy_from_isp * iocb)770 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
771 struct imm_ntfy_from_isp *iocb)
772 {
773 struct qlt_plogi_ack_t *pla;
774
775 lockdep_assert_held(&vha->hw->hardware_lock);
776
777 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
778 if (pla->id.b24 == id->b24) {
779 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
780 "%s %d %8phC Term INOT due to new INOT",
781 __func__, __LINE__,
782 pla->iocb.u.isp24.port_name);
783 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
784 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
785 return pla;
786 }
787 }
788
789 pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
790 if (!pla) {
791 ql_dbg(ql_dbg_async, vha, 0x5088,
792 "qla_target(%d): Allocation of plogi_ack failed\n",
793 vha->vp_idx);
794 return NULL;
795 }
796
797 memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
798 pla->id = *id;
799 list_add_tail(&pla->list, &vha->plogi_ack_list);
800
801 return pla;
802 }
803
qlt_plogi_ack_unref(struct scsi_qla_host * vha,struct qlt_plogi_ack_t * pla)804 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
805 struct qlt_plogi_ack_t *pla)
806 {
807 struct imm_ntfy_from_isp *iocb = &pla->iocb;
808 port_id_t port_id;
809 uint16_t loop_id;
810 fc_port_t *fcport = pla->fcport;
811
812 BUG_ON(!pla->ref_count);
813 pla->ref_count--;
814
815 if (pla->ref_count)
816 return;
817
818 ql_dbg(ql_dbg_disc, vha, 0x5089,
819 "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
820 " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
821 iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
822 iocb->u.isp24.port_id[0],
823 le16_to_cpu(iocb->u.isp24.nport_handle),
824 iocb->u.isp24.exchange_address, iocb->ox_id);
825
826 port_id.b.domain = iocb->u.isp24.port_id[2];
827 port_id.b.area = iocb->u.isp24.port_id[1];
828 port_id.b.al_pa = iocb->u.isp24.port_id[0];
829 port_id.b.rsvd_1 = 0;
830
831 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
832
833 fcport->loop_id = loop_id;
834 fcport->d_id = port_id;
835 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
836 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
837 else
838 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
839
840 list_for_each_entry(fcport, &vha->vp_fcports, list) {
841 if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
842 fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
843 if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
844 fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
845 }
846
847 list_del(&pla->list);
848 kmem_cache_free(qla_tgt_plogi_cachep, pla);
849 }
850
851 void
qlt_plogi_ack_link(struct scsi_qla_host * vha,struct qlt_plogi_ack_t * pla,struct fc_port * sess,enum qlt_plogi_link_t link)852 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
853 struct fc_port *sess, enum qlt_plogi_link_t link)
854 {
855 struct imm_ntfy_from_isp *iocb = &pla->iocb;
856 /* Inc ref_count first because link might already be pointing at pla */
857 pla->ref_count++;
858
859 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
860 "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
861 " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
862 sess, link, sess->port_name,
863 iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
864 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
865 pla->ref_count, pla, link);
866
867 if (link == QLT_PLOGI_LINK_CONFLICT) {
868 switch (sess->disc_state) {
869 case DSC_DELETED:
870 case DSC_DELETE_PEND:
871 pla->ref_count--;
872 return;
873 default:
874 break;
875 }
876 }
877
878 if (sess->plogi_link[link])
879 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
880
881 if (link == QLT_PLOGI_LINK_SAME_WWN)
882 pla->fcport = sess;
883
884 sess->plogi_link[link] = pla;
885 }
886
887 typedef struct {
888 /* These fields must be initialized by the caller */
889 port_id_t id;
890 /*
891 * number of cmds dropped while we were waiting for
892 * initiator to ack LOGO initialize to 1 if LOGO is
893 * triggered by a command, otherwise, to 0
894 */
895 int cmd_count;
896
897 /* These fields are used by callee */
898 struct list_head list;
899 } qlt_port_logo_t;
900
901 static void
qlt_send_first_logo(struct scsi_qla_host * vha,qlt_port_logo_t * logo)902 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
903 {
904 qlt_port_logo_t *tmp;
905 int res;
906
907 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
908 res = 0;
909 goto out;
910 }
911
912 mutex_lock(&vha->vha_tgt.tgt_mutex);
913
914 list_for_each_entry(tmp, &vha->logo_list, list) {
915 if (tmp->id.b24 == logo->id.b24) {
916 tmp->cmd_count += logo->cmd_count;
917 mutex_unlock(&vha->vha_tgt.tgt_mutex);
918 return;
919 }
920 }
921
922 list_add_tail(&logo->list, &vha->logo_list);
923
924 mutex_unlock(&vha->vha_tgt.tgt_mutex);
925
926 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
927
928 mutex_lock(&vha->vha_tgt.tgt_mutex);
929 list_del(&logo->list);
930 mutex_unlock(&vha->vha_tgt.tgt_mutex);
931
932 out:
933 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
934 "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
935 logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
936 logo->cmd_count, res);
937 }
938
qlt_free_session_done(struct work_struct * work)939 void qlt_free_session_done(struct work_struct *work)
940 {
941 struct fc_port *sess = container_of(work, struct fc_port,
942 free_work);
943 struct qla_tgt *tgt = sess->tgt;
944 struct scsi_qla_host *vha = sess->vha;
945 struct qla_hw_data *ha = vha->hw;
946 unsigned long flags;
947 bool logout_started = false;
948 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
949 struct qlt_plogi_ack_t *own =
950 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
951
952 ql_dbg(ql_dbg_disc, vha, 0xf084,
953 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
954 " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
955 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
956 sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
957 sess->logout_on_delete, sess->keep_nport_handle,
958 sess->send_els_logo);
959
960 if (!IS_SW_RESV_ADDR(sess->d_id)) {
961 qla2x00_mark_device_lost(vha, sess, 0);
962
963 if (sess->send_els_logo) {
964 qlt_port_logo_t logo;
965
966 logo.id = sess->d_id;
967 logo.cmd_count = 0;
968 INIT_LIST_HEAD(&logo.list);
969 if (!own)
970 qlt_send_first_logo(vha, &logo);
971 sess->send_els_logo = 0;
972 }
973
974 if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
975 int rc;
976
977 if (!own ||
978 (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
979 sess->logout_completed = 0;
980 rc = qla2x00_post_async_logout_work(vha, sess,
981 NULL);
982 if (rc != QLA_SUCCESS)
983 ql_log(ql_log_warn, vha, 0xf085,
984 "Schedule logo failed sess %p rc %d\n",
985 sess, rc);
986 else
987 logout_started = true;
988 } else if (own && (own->iocb.u.isp24.status_subcode ==
989 ELS_PRLI) && ha->flags.rida_fmt2) {
990 rc = qla2x00_post_async_prlo_work(vha, sess,
991 NULL);
992 if (rc != QLA_SUCCESS)
993 ql_log(ql_log_warn, vha, 0xf085,
994 "Schedule PRLO failed sess %p rc %d\n",
995 sess, rc);
996 else
997 logout_started = true;
998 }
999 } /* if sess->logout_on_delete */
1000
1001 if (sess->nvme_flag & NVME_FLAG_REGISTERED &&
1002 !(sess->nvme_flag & NVME_FLAG_DELETING)) {
1003 sess->nvme_flag |= NVME_FLAG_DELETING;
1004 qla_nvme_unregister_remote_port(sess);
1005 }
1006
1007 if (ha->flags.edif_enabled &&
1008 (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
1009 sess->edif.authok = 0;
1010 if (!ha->flags.host_shutting_down) {
1011 ql_dbg(ql_dbg_edif, vha, 0x911e,
1012 "%s wwpn %8phC calling qla2x00_release_all_sadb\n",
1013 __func__, sess->port_name);
1014 qla2x00_release_all_sadb(vha, sess);
1015 } else {
1016 ql_dbg(ql_dbg_edif, vha, 0x911e,
1017 "%s bypassing release_all_sadb\n",
1018 __func__);
1019 }
1020
1021 qla_edif_clear_appdata(vha, sess);
1022 qla_edif_sess_down(vha, sess);
1023 }
1024 }
1025
1026 /*
1027 * Release the target session for FC Nexus from fabric module code.
1028 */
1029 if (sess->se_sess != NULL)
1030 ha->tgt.tgt_ops->free_session(sess);
1031
1032 if (logout_started) {
1033 bool traced = false;
1034 u16 cnt = 0;
1035
1036 while (!READ_ONCE(sess->logout_completed)) {
1037 if (!traced) {
1038 ql_dbg(ql_dbg_disc, vha, 0xf086,
1039 "%s: waiting for sess %p logout\n",
1040 __func__, sess);
1041 traced = true;
1042 }
1043 msleep(100);
1044 cnt++;
1045 /*
1046 * Driver timeout is set to 22 Sec, update count value to loop
1047 * long enough for log-out to complete before advancing. Otherwise,
1048 * straddling logout can interfere with re-login attempt.
1049 */
1050 if (cnt > 230)
1051 break;
1052 }
1053
1054 ql_dbg(ql_dbg_disc, vha, 0xf087,
1055 "%s: sess %p logout completed\n", __func__, sess);
1056 }
1057
1058 /* check for any straggling io left behind */
1059 if (!(sess->flags & FCF_FCP2_DEVICE) &&
1060 qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) {
1061 ql_log(ql_log_warn, vha, 0x3027,
1062 "IO not return. Resetting.\n");
1063 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1064 qla2xxx_wake_dpc(vha);
1065 qla2x00_wait_for_chip_reset(vha);
1066 }
1067
1068 if (sess->logo_ack_needed) {
1069 sess->logo_ack_needed = 0;
1070 qla24xx_async_notify_ack(vha, sess,
1071 (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
1072 }
1073
1074 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1075 if (sess->se_sess) {
1076 sess->se_sess = NULL;
1077 if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
1078 tgt->sess_count--;
1079 }
1080
1081 qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
1082 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1083
1084 if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
1085 vha->fcport_count--;
1086 sess->login_succ = 0;
1087 }
1088
1089 qla2x00_clear_loop_id(sess);
1090
1091 if (sess->conflict) {
1092 sess->conflict->login_pause = 0;
1093 sess->conflict = NULL;
1094 if (!test_bit(UNLOADING, &vha->dpc_flags))
1095 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1096 }
1097
1098 {
1099 struct qlt_plogi_ack_t *con =
1100 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
1101 struct imm_ntfy_from_isp *iocb;
1102
1103 own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
1104
1105 if (con) {
1106 iocb = &con->iocb;
1107 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1108 "se_sess %p / sess %p port %8phC is gone,"
1109 " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
1110 sess->se_sess, sess, sess->port_name,
1111 own ? "releasing own PLOGI" : "no own PLOGI pending",
1112 own ? own->ref_count : -1,
1113 iocb->u.isp24.port_name, con->ref_count);
1114 qlt_plogi_ack_unref(vha, con);
1115 sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
1116 } else {
1117 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1118 "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
1119 sess->se_sess, sess, sess->port_name,
1120 own ? "releasing own PLOGI" :
1121 "no own PLOGI pending",
1122 own ? own->ref_count : -1);
1123 }
1124
1125 if (own) {
1126 sess->fw_login_state = DSC_LS_PLOGI_PEND;
1127 qlt_plogi_ack_unref(vha, own);
1128 sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
1129 }
1130 }
1131
1132 sess->explicit_logout = 0;
1133 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1134
1135 qla2x00_dfs_remove_rport(vha, sess);
1136
1137 spin_lock_irqsave(&vha->work_lock, flags);
1138 sess->flags &= ~FCF_ASYNC_SENT;
1139 sess->deleted = QLA_SESS_DELETED;
1140 sess->free_pending = 0;
1141 spin_unlock_irqrestore(&vha->work_lock, flags);
1142
1143 ql_dbg(ql_dbg_disc, vha, 0xf001,
1144 "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
1145 sess, sess->port_name, vha->fcport_count);
1146
1147 if (tgt && (tgt->sess_count == 0))
1148 wake_up_all(&tgt->waitQ);
1149
1150 if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
1151 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1152 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1153 switch (vha->host->active_mode) {
1154 case MODE_INITIATOR:
1155 case MODE_DUAL:
1156 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1157 qla2xxx_wake_dpc(vha);
1158 break;
1159 case MODE_TARGET:
1160 default:
1161 /* no-op */
1162 break;
1163 }
1164 }
1165
1166 if (vha->fcport_count == 0)
1167 wake_up_all(&vha->fcport_waitQ);
1168 }
1169
1170 /* ha->tgt.sess_lock supposed to be held on entry */
qlt_unreg_sess(struct fc_port * sess)1171 void qlt_unreg_sess(struct fc_port *sess)
1172 {
1173 struct scsi_qla_host *vha = sess->vha;
1174 unsigned long flags;
1175
1176 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1177 "%s sess %p for deletion %8phC\n",
1178 __func__, sess, sess->port_name);
1179
1180 spin_lock_irqsave(&sess->vha->work_lock, flags);
1181 if (sess->free_pending) {
1182 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1183 return;
1184 }
1185 sess->free_pending = 1;
1186 /*
1187 * Use FCF_ASYNC_SENT flag to block other cmds used in sess
1188 * management from being sent.
1189 */
1190 sess->flags |= FCF_ASYNC_SENT;
1191 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1192 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1193
1194 if (sess->se_sess)
1195 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1196
1197 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1198 sess->last_rscn_gen = sess->rscn_gen;
1199 sess->last_login_gen = sess->login_gen;
1200
1201 queue_work(sess->vha->hw->wq, &sess->free_work);
1202 }
1203 EXPORT_SYMBOL(qlt_unreg_sess);
1204
qlt_reset(struct scsi_qla_host * vha,void * iocb,int mcmd)1205 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1206 {
1207 struct qla_hw_data *ha = vha->hw;
1208 struct fc_port *sess = NULL;
1209 uint16_t loop_id;
1210 int res = 0;
1211 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
1212 unsigned long flags;
1213
1214 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
1215 if (loop_id == 0xFFFF) {
1216 /* Global event */
1217 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1218 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1219 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1220 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1221 } else {
1222 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1223 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1224 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1225 }
1226
1227 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1228 "Using sess for qla_tgt_reset: %p\n", sess);
1229 if (!sess) {
1230 res = -ESRCH;
1231 return res;
1232 }
1233
1234 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1235 "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
1236 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1237 mcmd, loop_id);
1238
1239 return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
1240 }
1241
qla24xx_chk_fcp_state(struct fc_port * sess)1242 static void qla24xx_chk_fcp_state(struct fc_port *sess)
1243 {
1244 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1245 sess->logout_on_delete = 0;
1246 sess->logo_ack_needed = 0;
1247 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
1248 }
1249 }
1250
qlt_schedule_sess_for_deletion(struct fc_port * sess)1251 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
1252 {
1253 struct qla_tgt *tgt = sess->tgt;
1254 unsigned long flags;
1255 u16 sec;
1256
1257 switch (sess->disc_state) {
1258 case DSC_DELETE_PEND:
1259 return;
1260 case DSC_DELETED:
1261 if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
1262 !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
1263 if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
1264 wake_up_all(&tgt->waitQ);
1265
1266 if (sess->vha->fcport_count == 0)
1267 wake_up_all(&sess->vha->fcport_waitQ);
1268 return;
1269 }
1270 break;
1271 case DSC_UPD_FCPORT:
1272 /*
1273 * This port is not done reporting to upper layer.
1274 * let it finish
1275 */
1276 sess->next_disc_state = DSC_DELETE_PEND;
1277 sec = jiffies_to_msecs(jiffies -
1278 sess->jiffies_at_registration)/1000;
1279 if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
1280 sess->sec_since_registration = sec;
1281 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1282 "%s %8phC : Slow Rport registration(%d Sec)\n",
1283 __func__, sess->port_name, sec);
1284 }
1285 return;
1286 default:
1287 break;
1288 }
1289
1290 spin_lock_irqsave(&sess->vha->work_lock, flags);
1291 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1292 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1293 return;
1294 }
1295 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
1296 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1297
1298 sess->prli_pend_timer = 0;
1299 qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
1300
1301 qla24xx_chk_fcp_state(sess);
1302
1303 ql_dbg(ql_log_warn, sess->vha, 0xe001,
1304 "Scheduling sess %p for deletion %8phC fc4_type %x\n",
1305 sess, sess->port_name, sess->fc4_type);
1306
1307 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1308 }
1309
qlt_clear_tgt_db(struct qla_tgt * tgt)1310 static void qlt_clear_tgt_db(struct qla_tgt *tgt)
1311 {
1312 struct fc_port *sess;
1313 scsi_qla_host_t *vha = tgt->vha;
1314
1315 list_for_each_entry(sess, &vha->vp_fcports, list) {
1316 if (sess->se_sess)
1317 qlt_schedule_sess_for_deletion(sess);
1318 }
1319
1320 /* At this point tgt could be already dead */
1321 }
1322
qla24xx_get_loop_id(struct scsi_qla_host * vha,be_id_t s_id,uint16_t * loop_id)1323 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1324 uint16_t *loop_id)
1325 {
1326 struct qla_hw_data *ha = vha->hw;
1327 dma_addr_t gid_list_dma;
1328 struct gid_list_info *gid_list, *gid;
1329 int res, rc, i;
1330 uint16_t entries;
1331
1332 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1333 &gid_list_dma, GFP_KERNEL);
1334 if (!gid_list) {
1335 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1336 "qla_target(%d): DMA Alloc failed of %u\n",
1337 vha->vp_idx, qla2x00_gid_list_size(ha));
1338 return -ENOMEM;
1339 }
1340
1341 /* Get list of logged in devices */
1342 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1343 if (rc != QLA_SUCCESS) {
1344 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1345 "qla_target(%d): get_id_list() failed: %x\n",
1346 vha->vp_idx, rc);
1347 res = -EBUSY;
1348 goto out_free_id_list;
1349 }
1350
1351 gid = gid_list;
1352 res = -ENOENT;
1353 for (i = 0; i < entries; i++) {
1354 if (gid->al_pa == s_id.al_pa &&
1355 gid->area == s_id.area &&
1356 gid->domain == s_id.domain) {
1357 *loop_id = le16_to_cpu(gid->loop_id);
1358 res = 0;
1359 break;
1360 }
1361 gid = (void *)gid + ha->gid_list_info_size;
1362 }
1363
1364 out_free_id_list:
1365 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
1366 gid_list, gid_list_dma);
1367 return res;
1368 }
1369
1370 /*
1371 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
1372 * Caller must put it.
1373 */
qlt_create_sess(struct scsi_qla_host * vha,fc_port_t * fcport,bool local)1374 static struct fc_port *qlt_create_sess(
1375 struct scsi_qla_host *vha,
1376 fc_port_t *fcport,
1377 bool local)
1378 {
1379 struct qla_hw_data *ha = vha->hw;
1380 struct fc_port *sess = fcport;
1381 unsigned long flags;
1382
1383 if (vha->vha_tgt.qla_tgt->tgt_stop)
1384 return NULL;
1385
1386 if (fcport->se_sess) {
1387 if (!kref_get_unless_zero(&sess->sess_kref)) {
1388 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1389 "%s: kref_get_unless_zero failed for %8phC\n",
1390 __func__, sess->port_name);
1391 return NULL;
1392 }
1393 return fcport;
1394 }
1395 sess->tgt = vha->vha_tgt.qla_tgt;
1396 sess->local = local;
1397
1398 /*
1399 * Under normal circumstances we want to logout from firmware when
1400 * session eventually ends and release corresponding nport handle.
1401 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
1402 * code will adjust these flags as necessary.
1403 */
1404 sess->logout_on_delete = 1;
1405 sess->keep_nport_handle = 0;
1406 sess->logout_completed = 0;
1407
1408 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1409 &fcport->port_name[0], sess) < 0) {
1410 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1411 "(%d) %8phC check_initiator_node_acl failed\n",
1412 vha->vp_idx, fcport->port_name);
1413 return NULL;
1414 } else {
1415 kref_init(&fcport->sess_kref);
1416 /*
1417 * Take an extra reference to ->sess_kref here to handle
1418 * fc_port access across ->tgt.sess_lock reaquire.
1419 */
1420 if (!kref_get_unless_zero(&sess->sess_kref)) {
1421 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1422 "%s: kref_get_unless_zero failed for %8phC\n",
1423 __func__, sess->port_name);
1424 return NULL;
1425 }
1426
1427 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1428 if (!IS_SW_RESV_ADDR(sess->d_id))
1429 vha->vha_tgt.qla_tgt->sess_count++;
1430
1431 qlt_do_generation_tick(vha, &sess->generation);
1432 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1433 }
1434
1435 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1436 "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
1437 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1438 vha->vha_tgt.qla_tgt->sess_count);
1439
1440 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1441 "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
1442 "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
1443 vha->vp_idx, local ? "local " : "", fcport->port_name,
1444 fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
1445 sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
1446
1447 return sess;
1448 }
1449
test_tgt_sess_count(struct qla_tgt * tgt)1450 static inline int test_tgt_sess_count(struct qla_tgt *tgt)
1451 {
1452 struct qla_hw_data *ha = tgt->ha;
1453 unsigned long flags;
1454 int res;
1455 /*
1456 * We need to protect against race, when tgt is freed before or
1457 * inside wake_up()
1458 */
1459 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
1460 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1461 "tgt %p, sess_count=%d\n",
1462 tgt, tgt->sess_count);
1463 res = (tgt->sess_count == 0);
1464 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
1465
1466 return res;
1467 }
1468
1469 /* Called by tcm_qla2xxx configfs code */
qlt_stop_phase1(struct qla_tgt * tgt)1470 int qlt_stop_phase1(struct qla_tgt *tgt)
1471 {
1472 struct scsi_qla_host *vha = tgt->vha;
1473 struct qla_hw_data *ha = tgt->ha;
1474 unsigned long flags;
1475
1476 mutex_lock(&ha->optrom_mutex);
1477 mutex_lock(&qla_tgt_mutex);
1478
1479 if (tgt->tgt_stop || tgt->tgt_stopped) {
1480 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1481 "Already in tgt->tgt_stop or tgt_stopped state\n");
1482 mutex_unlock(&qla_tgt_mutex);
1483 mutex_unlock(&ha->optrom_mutex);
1484 return -EPERM;
1485 }
1486
1487 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1488 vha->host_no, vha);
1489 /*
1490 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
1491 * Lock is needed, because we still can get an incoming packet.
1492 */
1493 mutex_lock(&vha->vha_tgt.tgt_mutex);
1494 tgt->tgt_stop = 1;
1495 qlt_clear_tgt_db(tgt);
1496 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1497 mutex_unlock(&qla_tgt_mutex);
1498
1499 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1500 "Waiting for sess works (tgt %p)", tgt);
1501 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1502 do {
1503 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1504 flush_work(&tgt->sess_work);
1505 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1506 } while (!list_empty(&tgt->sess_works_list));
1507 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1508
1509 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1510 "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
1511
1512 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1513
1514 /* Big hammer */
1515 if (!ha->flags.host_shutting_down &&
1516 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1517 qlt_disable_vha(vha);
1518
1519 /* Wait for sessions to clear out (just in case) */
1520 wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
1521 mutex_unlock(&ha->optrom_mutex);
1522
1523 return 0;
1524 }
1525 EXPORT_SYMBOL(qlt_stop_phase1);
1526
1527 /* Called by tcm_qla2xxx configfs code */
qlt_stop_phase2(struct qla_tgt * tgt)1528 void qlt_stop_phase2(struct qla_tgt *tgt)
1529 {
1530 scsi_qla_host_t *vha = tgt->vha;
1531
1532 if (tgt->tgt_stopped) {
1533 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1534 "Already in tgt->tgt_stopped state\n");
1535 dump_stack();
1536 return;
1537 }
1538 if (!tgt->tgt_stop) {
1539 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1540 "%s: phase1 stop is not completed\n", __func__);
1541 dump_stack();
1542 return;
1543 }
1544
1545 mutex_lock(&tgt->ha->optrom_mutex);
1546 mutex_lock(&vha->vha_tgt.tgt_mutex);
1547 tgt->tgt_stop = 0;
1548 tgt->tgt_stopped = 1;
1549 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1550 mutex_unlock(&tgt->ha->optrom_mutex);
1551
1552 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1553 tgt);
1554
1555 switch (vha->qlini_mode) {
1556 case QLA2XXX_INI_MODE_EXCLUSIVE:
1557 vha->flags.online = 1;
1558 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1559 break;
1560 default:
1561 break;
1562 }
1563 }
1564 EXPORT_SYMBOL(qlt_stop_phase2);
1565
1566 /* Called from qlt_remove_target() -> qla2x00_remove_one() */
qlt_release(struct qla_tgt * tgt)1567 static void qlt_release(struct qla_tgt *tgt)
1568 {
1569 scsi_qla_host_t *vha = tgt->vha;
1570 void *node;
1571 u64 key = 0;
1572 u16 i;
1573 struct qla_qpair_hint *h;
1574 struct qla_hw_data *ha = vha->hw;
1575
1576 if (!tgt->tgt_stop && !tgt->tgt_stopped)
1577 qlt_stop_phase1(tgt);
1578
1579 if (!tgt->tgt_stopped)
1580 qlt_stop_phase2(tgt);
1581
1582 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1583 unsigned long flags;
1584
1585 h = &tgt->qphints[i];
1586 if (h->qpair) {
1587 spin_lock_irqsave(h->qpair->qp_lock_ptr, flags);
1588 list_del(&h->hint_elem);
1589 spin_unlock_irqrestore(h->qpair->qp_lock_ptr, flags);
1590 h->qpair = NULL;
1591 }
1592 }
1593 kfree(tgt->qphints);
1594 mutex_lock(&qla_tgt_mutex);
1595 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1596 mutex_unlock(&qla_tgt_mutex);
1597
1598 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
1599 btree_remove64(&tgt->lun_qpair_map, key);
1600
1601 btree_destroy64(&tgt->lun_qpair_map);
1602
1603 if (vha->vp_idx)
1604 if (ha->tgt.tgt_ops &&
1605 ha->tgt.tgt_ops->remove_target &&
1606 vha->vha_tgt.target_lport_ptr)
1607 ha->tgt.tgt_ops->remove_target(vha);
1608
1609 vha->vha_tgt.qla_tgt = NULL;
1610
1611 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1612 "Release of tgt %p finished\n", tgt);
1613
1614 kfree(tgt);
1615 }
1616
1617 /* ha->hardware_lock supposed to be held on entry */
qlt_sched_sess_work(struct qla_tgt * tgt,int type,const void * param,unsigned int param_size)1618 static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1619 const void *param, unsigned int param_size)
1620 {
1621 struct qla_tgt_sess_work_param *prm;
1622 unsigned long flags;
1623
1624 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1625 if (!prm) {
1626 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1627 "qla_target(%d): Unable to create session "
1628 "work, command will be refused", 0);
1629 return -ENOMEM;
1630 }
1631
1632 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1633 "Scheduling work (type %d, prm %p)"
1634 " to find session for param %p (size %d, tgt %p)\n",
1635 type, prm, param, param_size, tgt);
1636
1637 prm->type = type;
1638 memcpy(&prm->tm_iocb, param, param_size);
1639
1640 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1641 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1642 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1643
1644 schedule_work(&tgt->sess_work);
1645
1646 return 0;
1647 }
1648
1649 /*
1650 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1651 */
qlt_send_notify_ack(struct qla_qpair * qpair,struct imm_ntfy_from_isp * ntfy,uint32_t add_flags,uint16_t resp_code,int resp_code_valid,uint16_t srr_flags,uint16_t srr_reject_code,uint8_t srr_explan)1652 static void qlt_send_notify_ack(struct qla_qpair *qpair,
1653 struct imm_ntfy_from_isp *ntfy,
1654 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1655 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1656 {
1657 struct scsi_qla_host *vha = qpair->vha;
1658 struct qla_hw_data *ha = vha->hw;
1659 request_t *pkt;
1660 struct nack_to_isp *nack;
1661
1662 if (!ha->flags.fw_started)
1663 return;
1664
1665 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1666
1667 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
1668 if (!pkt) {
1669 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1670 "qla_target(%d): %s failed: unable to allocate "
1671 "request packet\n", vha->vp_idx, __func__);
1672 return;
1673 }
1674
1675 if (vha->vha_tgt.qla_tgt != NULL)
1676 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1677
1678 pkt->entry_type = NOTIFY_ACK_TYPE;
1679 pkt->entry_count = 1;
1680
1681 nack = (struct nack_to_isp *)pkt;
1682 nack->ox_id = ntfy->ox_id;
1683
1684 nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
1685 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1686 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1687 nack->u.isp24.flags = ntfy->u.isp24.flags &
1688 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
1689 }
1690 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1691 nack->u.isp24.status = ntfy->u.isp24.status;
1692 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1693 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
1694 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1695 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1696 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1697 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1698 nack->u.isp24.srr_reject_code = srr_reject_code;
1699 nack->u.isp24.srr_reject_code_expl = srr_explan;
1700 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1701
1702 /* TODO qualify this with EDIF enable */
1703 if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
1704 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
1705 nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
1706 }
1707
1708 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1709 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1710 vha->vp_idx, nack->u.isp24.status);
1711
1712 /* Memory Barrier */
1713 wmb();
1714 qla2x00_start_iocbs(vha, qpair->req);
1715 }
1716
qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd * mcmd)1717 static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
1718 {
1719 struct scsi_qla_host *vha = mcmd->vha;
1720 struct qla_hw_data *ha = vha->hw;
1721 struct abts_resp_to_24xx *resp;
1722 __le32 f_ctl;
1723 uint32_t h;
1724 uint8_t *p;
1725 int rc;
1726 struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
1727 struct qla_qpair *qpair = mcmd->qpair;
1728
1729 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1730 "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
1731 ha, mcmd->fc_tm_rsp);
1732
1733 rc = qlt_check_reserve_free_req(qpair, 1);
1734 if (rc) {
1735 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1736 "qla_target(%d): %s failed: unable to allocate request packet\n",
1737 vha->vp_idx, __func__);
1738 return -EAGAIN;
1739 }
1740
1741 resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
1742 memset(resp, 0, sizeof(*resp));
1743
1744 h = qlt_make_handle(qpair);
1745 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1746 /*
1747 * CTIO type 7 from the firmware doesn't provide a way to
1748 * know the initiator's LOOP ID, hence we can't find
1749 * the session and, so, the command.
1750 */
1751 return -EAGAIN;
1752 } else {
1753 qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
1754 }
1755
1756 resp->handle = make_handle(qpair->req->id, h);
1757 resp->entry_type = ABTS_RESP_24XX;
1758 resp->entry_count = 1;
1759 resp->nport_handle = abts->nport_handle;
1760 resp->vp_index = vha->vp_idx;
1761 resp->sof_type = abts->sof_type;
1762 resp->exchange_address = abts->exchange_address;
1763 resp->fcp_hdr_le = abts->fcp_hdr_le;
1764 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1765 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1766 F_CTL_SEQ_INITIATIVE);
1767 p = (uint8_t *)&f_ctl;
1768 resp->fcp_hdr_le.f_ctl[0] = *p++;
1769 resp->fcp_hdr_le.f_ctl[1] = *p++;
1770 resp->fcp_hdr_le.f_ctl[2] = *p;
1771
1772 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1773 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1774
1775 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1776 if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
1777 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1778 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1779 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1780 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1781 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1782 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1783 } else {
1784 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1785 resp->payload.ba_rjt.reason_code =
1786 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1787 /* Other bytes are zero */
1788 }
1789
1790 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1791
1792 /* Memory Barrier */
1793 wmb();
1794 if (qpair->reqq_start_iocbs)
1795 qpair->reqq_start_iocbs(qpair);
1796 else
1797 qla2x00_start_iocbs(vha, qpair->req);
1798
1799 return rc;
1800 }
1801
1802 /*
1803 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1804 */
qlt_24xx_send_abts_resp(struct qla_qpair * qpair,struct abts_recv_from_24xx * abts,uint32_t status,bool ids_reversed)1805 static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
1806 struct abts_recv_from_24xx *abts, uint32_t status,
1807 bool ids_reversed)
1808 {
1809 struct scsi_qla_host *vha = qpair->vha;
1810 struct qla_hw_data *ha = vha->hw;
1811 struct abts_resp_to_24xx *resp;
1812 __le32 f_ctl;
1813 uint8_t *p;
1814
1815 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1816 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1817 ha, abts, status);
1818
1819 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair,
1820 NULL);
1821 if (!resp) {
1822 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1823 "qla_target(%d): %s failed: unable to allocate "
1824 "request packet", vha->vp_idx, __func__);
1825 return;
1826 }
1827
1828 resp->entry_type = ABTS_RESP_24XX;
1829 resp->handle = QLA_TGT_SKIP_HANDLE;
1830 resp->entry_count = 1;
1831 resp->nport_handle = abts->nport_handle;
1832 resp->vp_index = vha->vp_idx;
1833 resp->sof_type = abts->sof_type;
1834 resp->exchange_address = abts->exchange_address;
1835 resp->fcp_hdr_le = abts->fcp_hdr_le;
1836 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1837 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1838 F_CTL_SEQ_INITIATIVE);
1839 p = (uint8_t *)&f_ctl;
1840 resp->fcp_hdr_le.f_ctl[0] = *p++;
1841 resp->fcp_hdr_le.f_ctl[1] = *p++;
1842 resp->fcp_hdr_le.f_ctl[2] = *p;
1843 if (ids_reversed) {
1844 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id;
1845 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id;
1846 } else {
1847 resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id;
1848 resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id;
1849 }
1850 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1851 if (status == FCP_TMF_CMPL) {
1852 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1853 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1854 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1855 resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF);
1856 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1857 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1858 } else {
1859 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1860 resp->payload.ba_rjt.reason_code =
1861 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1862 /* Other bytes are zero */
1863 }
1864
1865 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1866
1867 /* Memory Barrier */
1868 wmb();
1869 if (qpair->reqq_start_iocbs)
1870 qpair->reqq_start_iocbs(qpair);
1871 else
1872 qla2x00_start_iocbs(vha, qpair->req);
1873 }
1874
1875 /*
1876 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1877 */
qlt_24xx_retry_term_exchange(struct scsi_qla_host * vha,struct qla_qpair * qpair,response_t * pkt,struct qla_tgt_mgmt_cmd * mcmd)1878 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1879 struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
1880 {
1881 struct ctio7_to_24xx *ctio;
1882 u16 tmp;
1883 struct abts_recv_from_24xx *entry;
1884
1885 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
1886 if (ctio == NULL) {
1887 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1888 "qla_target(%d): %s failed: unable to allocate "
1889 "request packet\n", vha->vp_idx, __func__);
1890 return;
1891 }
1892
1893 if (mcmd)
1894 /* abts from remote port */
1895 entry = &mcmd->orig_iocb.abts;
1896 else
1897 /* abts from this driver. */
1898 entry = (struct abts_recv_from_24xx *)pkt;
1899
1900 /*
1901 * We've got on entrance firmware's response on by us generated
1902 * ABTS response. So, in it ID fields are reversed.
1903 */
1904
1905 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe082,
1906 "qla_target(%d): tag %u: Sending TERM EXCH CTIO for ABTS\n",
1907 vha->vp_idx, le32_to_cpu(entry->exchange_addr_to_abort));
1908
1909 ctio->entry_type = CTIO_TYPE7;
1910 ctio->entry_count = 1;
1911 ctio->nport_handle = entry->nport_handle;
1912 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1913 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1914 ctio->vp_index = vha->vp_idx;
1915 ctio->exchange_addr = entry->exchange_addr_to_abort;
1916 tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
1917
1918 if (mcmd) {
1919 ctio->initiator_id = entry->fcp_hdr_le.s_id;
1920
1921 if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
1922 tmp |= (mcmd->abort_io_attr << 9);
1923 else if (qpair->retry_term_cnt & 1)
1924 tmp |= (0x4 << 9);
1925 } else {
1926 ctio->initiator_id = entry->fcp_hdr_le.d_id;
1927
1928 if (qpair->retry_term_cnt & 1)
1929 tmp |= (0x4 << 9);
1930 }
1931 ctio->u.status1.flags = cpu_to_le16(tmp);
1932 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1933
1934 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1935 "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
1936 le16_to_cpu(ctio->u.status1.flags),
1937 le16_to_cpu(ctio->u.status1.ox_id),
1938 (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
1939
1940 /* Memory Barrier */
1941 wmb();
1942 if (qpair->reqq_start_iocbs)
1943 qpair->reqq_start_iocbs(qpair);
1944 else
1945 qla2x00_start_iocbs(vha, qpair->req);
1946
1947 if (mcmd)
1948 qlt_build_abts_resp_iocb(mcmd);
1949 else
1950 qlt_24xx_send_abts_resp(qpair,
1951 (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
1952
1953 }
1954
1955 /* drop cmds for the given lun
1956 * XXX only looks for cmds on the port through which lun reset was recieved
1957 * XXX does not go through the list of other port (which may have cmds
1958 * for the same lun)
1959 */
abort_cmds_for_lun(struct scsi_qla_host * vha,u64 lun,be_id_t s_id)1960 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
1961 {
1962 struct qla_tgt_sess_op *op;
1963 struct qla_tgt_cmd *cmd;
1964 uint32_t key;
1965 unsigned long flags;
1966
1967 key = sid_to_key(s_id);
1968 spin_lock_irqsave(&vha->cmd_list_lock, flags);
1969 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
1970 uint32_t op_key;
1971 u64 op_lun;
1972
1973 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1974 op_lun = scsilun_to_int(
1975 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1976 if (op_key == key && op_lun == lun)
1977 op->aborted = true;
1978 }
1979
1980 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1981 uint32_t cmd_key;
1982 u64 cmd_lun;
1983
1984 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1985 cmd_lun = scsilun_to_int(
1986 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1987 if (cmd_key == key && cmd_lun == lun) {
1988 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe085,
1989 "qla_target(%d): tag %lld: aborted by TMR\n",
1990 vha->vp_idx, cmd->se_cmd.tag);
1991 cmd->aborted = 1;
1992 }
1993 }
1994 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
1995 }
1996
qlt_find_qphint(struct scsi_qla_host * vha,uint64_t unpacked_lun)1997 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
1998 uint64_t unpacked_lun)
1999 {
2000 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2001 struct qla_qpair_hint *h = NULL;
2002
2003 if (vha->flags.qpairs_available) {
2004 h = btree_lookup64(&tgt->lun_qpair_map, unpacked_lun);
2005 if (!h)
2006 h = &tgt->qphints[0];
2007 } else {
2008 h = &tgt->qphints[0];
2009 }
2010
2011 return h;
2012 }
2013
qlt_do_tmr_work(struct work_struct * work)2014 static void qlt_do_tmr_work(struct work_struct *work)
2015 {
2016 struct qla_tgt_mgmt_cmd *mcmd =
2017 container_of(work, struct qla_tgt_mgmt_cmd, work);
2018 struct qla_hw_data *ha = mcmd->vha->hw;
2019 int rc;
2020 uint32_t tag;
2021
2022 switch (mcmd->tmr_func) {
2023 case QLA_TGT_ABTS:
2024 tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort);
2025 break;
2026 default:
2027 tag = 0;
2028 break;
2029 }
2030
2031 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, mcmd->unpacked_lun,
2032 mcmd->tmr_func, tag);
2033
2034 if (rc != 0) {
2035 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2036 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2037 mcmd->vha->vp_idx, rc);
2038 mcmd->flags |= QLA24XX_MGMT_LLD_OWNED;
2039 mcmd->fc_tm_rsp = FCP_TMF_FAILED;
2040 qlt_xmit_tm_rsp(mcmd);
2041 }
2042 }
2043
2044 /* ha->hardware_lock supposed to be held on entry */
__qlt_24xx_handle_abts(struct scsi_qla_host * vha,struct abts_recv_from_24xx * abts,struct fc_port * sess)2045 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2046 struct abts_recv_from_24xx *abts, struct fc_port *sess)
2047 {
2048 struct qla_hw_data *ha = vha->hw;
2049 struct qla_tgt_mgmt_cmd *mcmd;
2050 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2051 struct qla_tgt_cmd *abort_cmd;
2052
2053 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2054 "qla_target(%d): task abort (tag=%d)\n",
2055 vha->vp_idx, abts->exchange_addr_to_abort);
2056
2057 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2058 if (mcmd == NULL) {
2059 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2060 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2061 vha->vp_idx, __func__);
2062 return -ENOMEM;
2063 }
2064 memset(mcmd, 0, sizeof(*mcmd));
2065 mcmd->cmd_type = TYPE_TGT_TMCMD;
2066 mcmd->sess = sess;
2067 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
2068 mcmd->reset_count = ha->base_qpair->chip_reset;
2069 mcmd->tmr_func = QLA_TGT_ABTS;
2070 mcmd->qpair = h->qpair;
2071 mcmd->vha = vha;
2072
2073 /*
2074 * LUN is looked up by target-core internally based on the passed
2075 * abts->exchange_addr_to_abort tag.
2076 */
2077 mcmd->se_cmd.cpuid = h->cpuid;
2078
2079 abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess,
2080 le32_to_cpu(abts->exchange_addr_to_abort));
2081 if (!abort_cmd) {
2082 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2083 return -EIO;
2084 }
2085 mcmd->unpacked_lun = abort_cmd->se_cmd.orig_fe_lun;
2086
2087 if (abort_cmd->qpair) {
2088 mcmd->qpair = abort_cmd->qpair;
2089 mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
2090 mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
2091 mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
2092 }
2093
2094 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
2095 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq, &mcmd->work);
2096
2097 return 0;
2098 }
2099
2100 /*
2101 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2102 */
qlt_24xx_handle_abts(struct scsi_qla_host * vha,struct abts_recv_from_24xx * abts)2103 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2104 struct abts_recv_from_24xx *abts)
2105 {
2106 struct qla_hw_data *ha = vha->hw;
2107 struct fc_port *sess;
2108 uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort);
2109 be_id_t s_id;
2110 int rc;
2111 unsigned long flags;
2112
2113 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
2114 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2115 "qla_target(%d): ABTS: Abort Sequence not "
2116 "supported\n", vha->vp_idx);
2117 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2118 false);
2119 return;
2120 }
2121
2122 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
2123 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2124 "qla_target(%d): ABTS: Unknown Exchange "
2125 "Address received\n", vha->vp_idx);
2126 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2127 false);
2128 return;
2129 }
2130
2131 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2132 "qla_target(%d): task abort (s_id=%x:%x:%x, "
2133 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2134 abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag,
2135 le32_to_cpu(abts->fcp_hdr_le.parameter));
2136
2137 s_id = le_id_to_be(abts->fcp_hdr_le.s_id);
2138
2139 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2140 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2141 if (!sess) {
2142 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2143 "qla_target(%d): task abort for non-existent session\n",
2144 vha->vp_idx);
2145 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2146
2147 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2148 false);
2149 return;
2150 }
2151 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2152
2153
2154 if (sess->deleted) {
2155 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2156 false);
2157 return;
2158 }
2159
2160 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2161 if (rc != 0) {
2162 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2163 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
2164 vha->vp_idx, rc);
2165 qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_REJECTED,
2166 false);
2167 return;
2168 }
2169 }
2170
2171 /*
2172 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2173 */
qlt_24xx_send_task_mgmt_ctio(struct qla_qpair * qpair,struct qla_tgt_mgmt_cmd * mcmd,uint32_t resp_code)2174 static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair,
2175 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
2176 {
2177 struct scsi_qla_host *ha = mcmd->vha;
2178 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
2179 struct ctio7_to_24xx *ctio;
2180 uint16_t temp;
2181
2182 ql_dbg(ql_dbg_tgt, ha, 0xe008,
2183 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
2184 ha, atio, resp_code);
2185
2186
2187 ctio = (struct ctio7_to_24xx *)__qla2x00_alloc_iocbs(qpair, NULL);
2188 if (ctio == NULL) {
2189 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
2190 "qla_target(%d): %s failed: unable to allocate "
2191 "request packet\n", ha->vp_idx, __func__);
2192 return;
2193 }
2194
2195 ctio->entry_type = CTIO_TYPE7;
2196 ctio->entry_count = 1;
2197 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2198 ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id);
2199 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2200 ctio->vp_index = ha->vp_idx;
2201 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2202 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2203 temp = (atio->u.isp24.attr << 9)|
2204 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2205 ctio->u.status1.flags = cpu_to_le16(temp);
2206 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2207 ctio->u.status1.ox_id = cpu_to_le16(temp);
2208 ctio->u.status1.scsi_status =
2209 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
2210 ctio->u.status1.response_len = cpu_to_le16(8);
2211 ctio->u.status1.sense_data[0] = resp_code;
2212
2213 /* Memory Barrier */
2214 wmb();
2215 if (qpair->reqq_start_iocbs)
2216 qpair->reqq_start_iocbs(qpair);
2217 else
2218 qla2x00_start_iocbs(ha, qpair->req);
2219 }
2220
qlt_free_mcmd(struct qla_tgt_mgmt_cmd * mcmd)2221 void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
2222 {
2223 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2224 }
2225 EXPORT_SYMBOL(qlt_free_mcmd);
2226
2227 /*
2228 * If the upper layer knows about this mgmt cmd, then call its ->free_cmd()
2229 * callback, which will eventually call qlt_free_mcmd(). Otherwise, call
2230 * qlt_free_mcmd() directly.
2231 */
qlt_free_ul_mcmd(struct qla_hw_data * ha,struct qla_tgt_mgmt_cmd * mcmd)2232 void qlt_free_ul_mcmd(struct qla_hw_data *ha, struct qla_tgt_mgmt_cmd *mcmd)
2233 {
2234 if (!mcmd)
2235 return;
2236 if (mcmd->flags & QLA24XX_MGMT_LLD_OWNED)
2237 qlt_free_mcmd(mcmd);
2238 else
2239 ha->tgt.tgt_ops->free_mcmd(mcmd);
2240 }
2241
2242 /*
2243 * ha->hardware_lock supposed to be held on entry. Might drop it, then
2244 * reacquire
2245 */
qlt_send_resp_ctio(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,uint8_t scsi_status,uint8_t sense_key,uint8_t asc,uint8_t ascq)2246 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2247 uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
2248 {
2249 struct atio_from_isp *atio = &cmd->atio;
2250 struct ctio7_to_24xx *ctio;
2251 uint16_t temp;
2252 struct scsi_qla_host *vha = cmd->vha;
2253
2254 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2255 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2256 "sense_key=%02x, asc=%02x, ascq=%02x",
2257 vha, atio, scsi_status, sense_key, asc, ascq);
2258
2259 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2260 if (!ctio) {
2261 ql_dbg(ql_dbg_async, vha, 0x3067,
2262 "qla2x00t(%ld): %s failed: unable to allocate request packet",
2263 vha->host_no, __func__);
2264 goto out;
2265 }
2266
2267 ctio->entry_type = CTIO_TYPE7;
2268 ctio->entry_count = 1;
2269 ctio->handle = QLA_TGT_SKIP_HANDLE;
2270 ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
2271 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2272 ctio->vp_index = vha->vp_idx;
2273 ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2274 ctio->exchange_addr = atio->u.isp24.exchange_addr;
2275 temp = (atio->u.isp24.attr << 9) |
2276 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS;
2277 ctio->u.status1.flags = cpu_to_le16(temp);
2278 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2279 ctio->u.status1.ox_id = cpu_to_le16(temp);
2280 ctio->u.status1.scsi_status =
2281 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
2282 ctio->u.status1.response_len = cpu_to_le16(18);
2283 ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
2284
2285 if (ctio->u.status1.residual != 0)
2286 ctio->u.status1.scsi_status |=
2287 cpu_to_le16(SS_RESIDUAL_UNDER);
2288
2289 /* Fixed format sense data. */
2290 ctio->u.status1.sense_data[0] = 0x70;
2291 ctio->u.status1.sense_data[2] = sense_key;
2292 /* Additional sense length */
2293 ctio->u.status1.sense_data[7] = 0xa;
2294 /* ASC and ASCQ */
2295 ctio->u.status1.sense_data[12] = asc;
2296 ctio->u.status1.sense_data[13] = ascq;
2297
2298 /* Memory Barrier */
2299 wmb();
2300
2301 if (qpair->reqq_start_iocbs)
2302 qpair->reqq_start_iocbs(qpair);
2303 else
2304 qla2x00_start_iocbs(vha, qpair->req);
2305
2306 out:
2307 return;
2308 }
2309
2310 /* callback from target fabric module code */
qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd * mcmd)2311 void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
2312 {
2313 struct scsi_qla_host *vha = mcmd->sess->vha;
2314 struct qla_hw_data *ha = vha->hw;
2315 unsigned long flags;
2316 struct qla_qpair *qpair = mcmd->qpair;
2317 bool free_mcmd = true;
2318
2319 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2320 "TM response mcmd (%p) status %#x state %#x",
2321 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
2322
2323 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
2324
2325 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2326 /*
2327 * Either the port is not online or this request was from
2328 * previous life, just abort the processing.
2329 */
2330 ql_dbg(ql_dbg_async, vha, 0xe100,
2331 "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
2332 vha->flags.online, qla2x00_reset_active(vha),
2333 mcmd->reset_count, qpair->chip_reset);
2334 qlt_free_ul_mcmd(ha, mcmd);
2335 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2336 return;
2337 }
2338
2339 if (mcmd->flags & QLA24XX_MGMT_SEND_NACK) {
2340 switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
2341 case ELS_LOGO:
2342 case ELS_PRLO:
2343 case ELS_TPRLO:
2344 ql_dbg(ql_dbg_disc, vha, 0x2106,
2345 "TM response logo %8phC status %#x state %#x",
2346 mcmd->sess->port_name, mcmd->fc_tm_rsp,
2347 mcmd->flags);
2348 qlt_schedule_sess_for_deletion(mcmd->sess);
2349 break;
2350 default:
2351 qlt_send_notify_ack(vha->hw->base_qpair,
2352 &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
2353 break;
2354 }
2355 } else {
2356 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
2357 qlt_build_abts_resp_iocb(mcmd);
2358 free_mcmd = false;
2359 } else
2360 qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
2361 mcmd->fc_tm_rsp);
2362 }
2363 /*
2364 * Make the callback for ->free_mcmd() to queue_work() and invoke
2365 * target_put_sess_cmd() to drop cmd_kref to 1. The final
2366 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
2367 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
2368 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
2369 * qlt_xmit_tm_rsp() returns here..
2370 */
2371 if (free_mcmd)
2372 qlt_free_ul_mcmd(ha, mcmd);
2373
2374 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
2375 }
2376 EXPORT_SYMBOL(qlt_xmit_tm_rsp);
2377
2378 /* No locks */
qlt_pci_map_calc_cnt(struct qla_tgt_prm * prm)2379 static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
2380 {
2381 struct qla_tgt_cmd *cmd = prm->cmd;
2382
2383 BUG_ON(cmd->sg_cnt == 0);
2384
2385 prm->sg = (struct scatterlist *)cmd->sg;
2386 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2387 cmd->sg_cnt, cmd->dma_data_direction);
2388 if (unlikely(prm->seg_cnt == 0))
2389 goto out_err;
2390
2391 prm->cmd->sg_mapped = 1;
2392
2393 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2394 /*
2395 * If greater than four sg entries then we need to allocate
2396 * the continuation entries
2397 */
2398 if (prm->seg_cnt > QLA_TGT_DATASEGS_PER_CMD_24XX)
2399 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
2400 QLA_TGT_DATASEGS_PER_CMD_24XX,
2401 QLA_TGT_DATASEGS_PER_CONT_24XX);
2402 } else {
2403 /* DIF */
2404 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2405 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2406 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2407 prm->tot_dsds = prm->seg_cnt;
2408 } else
2409 prm->tot_dsds = prm->seg_cnt;
2410
2411 if (cmd->prot_sg_cnt) {
2412 prm->prot_sg = cmd->prot_sg;
2413 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2414 cmd->prot_sg, cmd->prot_sg_cnt,
2415 cmd->dma_data_direction);
2416 if (unlikely(prm->prot_seg_cnt == 0))
2417 goto out_err;
2418
2419 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2420 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2421 /* Dif Bundling not support here */
2422 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2423 cmd->blk_sz);
2424 prm->tot_dsds += prm->prot_seg_cnt;
2425 } else
2426 prm->tot_dsds += prm->prot_seg_cnt;
2427 }
2428 }
2429
2430 return 0;
2431
2432 out_err:
2433 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2434 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
2435 0, prm->cmd->sg_cnt);
2436 return -1;
2437 }
2438
qlt_unmap_sg(struct scsi_qla_host * vha,struct qla_tgt_cmd * cmd)2439 void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2440 {
2441 struct qla_hw_data *ha;
2442 struct qla_qpair *qpair;
2443
2444 if (!cmd->sg_mapped)
2445 return;
2446
2447 qpair = cmd->qpair;
2448
2449 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2450 cmd->dma_data_direction);
2451 cmd->sg_mapped = 0;
2452
2453 if (cmd->prot_sg_cnt)
2454 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2455 cmd->dma_data_direction);
2456
2457 if (!cmd->ctx)
2458 return;
2459 ha = vha->hw;
2460 if (cmd->ctx_dsd_alloced)
2461 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2462
2463 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2464 }
2465
qlt_check_reserve_free_req(struct qla_qpair * qpair,uint32_t req_cnt)2466 static int qlt_check_reserve_free_req(struct qla_qpair *qpair,
2467 uint32_t req_cnt)
2468 {
2469 uint32_t cnt;
2470 struct req_que *req = qpair->req;
2471
2472 if (req->cnt < (req_cnt + 2)) {
2473 cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr :
2474 rd_reg_dword_relaxed(req->req_q_out));
2475
2476 if (req->ring_index < cnt)
2477 req->cnt = cnt - req->ring_index;
2478 else
2479 req->cnt = req->length - (req->ring_index - cnt);
2480
2481 if (unlikely(req->cnt < (req_cnt + 2)))
2482 return -EAGAIN;
2483 }
2484
2485 req->cnt -= req_cnt;
2486
2487 return 0;
2488 }
2489
2490 /*
2491 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2492 */
qlt_get_req_pkt(struct req_que * req)2493 static inline void *qlt_get_req_pkt(struct req_que *req)
2494 {
2495 /* Adjust ring index. */
2496 req->ring_index++;
2497 if (req->ring_index == req->length) {
2498 req->ring_index = 0;
2499 req->ring_ptr = req->ring;
2500 } else {
2501 req->ring_ptr++;
2502 }
2503 return (cont_entry_t *)req->ring_ptr;
2504 }
2505
2506 /* ha->hardware_lock supposed to be held on entry */
qlt_make_handle(struct qla_qpair * qpair)2507 static inline uint32_t qlt_make_handle(struct qla_qpair *qpair)
2508 {
2509 uint32_t h;
2510 int index;
2511 uint8_t found = 0;
2512 struct req_que *req = qpair->req;
2513
2514 h = req->current_outstanding_cmd;
2515
2516 for (index = 1; index < req->num_outstanding_cmds; index++) {
2517 h++;
2518 if (h == req->num_outstanding_cmds)
2519 h = 1;
2520
2521 if (h == QLA_TGT_SKIP_HANDLE)
2522 continue;
2523
2524 if (!req->outstanding_cmds[h]) {
2525 found = 1;
2526 break;
2527 }
2528 }
2529
2530 if (found) {
2531 req->current_outstanding_cmd = h;
2532 } else {
2533 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2534 "qla_target(%d): Ran out of empty cmd slots\n",
2535 qpair->vha->vp_idx);
2536 h = QLA_TGT_NULL_HANDLE;
2537 }
2538
2539 return h;
2540 }
2541
2542 /* ha->hardware_lock supposed to be held on entry */
qlt_24xx_build_ctio_pkt(struct qla_qpair * qpair,struct qla_tgt_prm * prm)2543 static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair,
2544 struct qla_tgt_prm *prm)
2545 {
2546 uint32_t h;
2547 struct ctio7_to_24xx *pkt;
2548 struct atio_from_isp *atio = &prm->cmd->atio;
2549 uint16_t temp;
2550 struct qla_tgt_cmd *cmd = prm->cmd;
2551
2552 pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr;
2553 prm->pkt = pkt;
2554 memset(pkt, 0, sizeof(*pkt));
2555
2556 pkt->entry_type = CTIO_TYPE7;
2557 pkt->entry_count = (uint8_t)prm->req_cnt;
2558 pkt->vp_index = prm->cmd->vp_idx;
2559
2560 h = qlt_make_handle(qpair);
2561 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
2562 /*
2563 * CTIO type 7 from the firmware doesn't provide a way to
2564 * know the initiator's LOOP ID, hence we can't find
2565 * the session and, so, the command.
2566 */
2567 return -EAGAIN;
2568 } else
2569 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2570
2571 pkt->handle = make_handle(qpair->req->id, h);
2572 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
2573 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2574 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2575 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
2576 pkt->exchange_addr = atio->u.isp24.exchange_addr;
2577 temp = atio->u.isp24.attr << 9;
2578 pkt->u.status0.flags |= cpu_to_le16(temp);
2579 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2580 pkt->u.status0.ox_id = cpu_to_le16(temp);
2581 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2582
2583 if (cmd->edif) {
2584 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2585 prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
2586 if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2587 prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
2588
2589 pkt->u.status0.edif_flags |= EF_EN_EDIF;
2590 }
2591
2592 return 0;
2593 }
2594
2595 /*
2596 * ha->hardware_lock supposed to be held on entry. We have already made sure
2597 * that there is sufficient amount of request entries to not drop it.
2598 */
qlt_load_cont_data_segments(struct qla_tgt_prm * prm)2599 static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
2600 {
2601 int cnt;
2602 struct dsd64 *cur_dsd;
2603
2604 /* Build continuation packets */
2605 while (prm->seg_cnt > 0) {
2606 cont_a64_entry_t *cont_pkt64 =
2607 (cont_a64_entry_t *)qlt_get_req_pkt(
2608 prm->cmd->qpair->req);
2609
2610 /*
2611 * Make sure that from cont_pkt64 none of
2612 * 64-bit specific fields used for 32-bit
2613 * addressing. Cast to (cont_entry_t *) for
2614 * that.
2615 */
2616
2617 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
2618
2619 cont_pkt64->entry_count = 1;
2620 cont_pkt64->sys_define = 0;
2621
2622 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
2623 cur_dsd = cont_pkt64->dsd;
2624
2625 /* Load continuation entry data segments */
2626 for (cnt = 0;
2627 cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
2628 cnt++, prm->seg_cnt--) {
2629 append_dsd64(&cur_dsd, prm->sg);
2630 prm->sg = sg_next(prm->sg);
2631 }
2632 }
2633 }
2634
2635 /*
2636 * ha->hardware_lock supposed to be held on entry. We have already made sure
2637 * that there is sufficient amount of request entries to not drop it.
2638 */
qlt_load_data_segments(struct qla_tgt_prm * prm)2639 static void qlt_load_data_segments(struct qla_tgt_prm *prm)
2640 {
2641 int cnt;
2642 struct dsd64 *cur_dsd;
2643 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
2644
2645 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2646
2647 /* Setup packet address segment pointer */
2648 cur_dsd = &pkt24->u.status0.dsd;
2649
2650 /* Set total data segment count */
2651 if (prm->seg_cnt)
2652 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
2653
2654 if (prm->seg_cnt == 0) {
2655 /* No data transfer */
2656 cur_dsd->address = 0;
2657 cur_dsd->length = 0;
2658 return;
2659 }
2660
2661 /* If scatter gather */
2662
2663 /* Load command entry data segments */
2664 for (cnt = 0;
2665 (cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
2666 cnt++, prm->seg_cnt--) {
2667 append_dsd64(&cur_dsd, prm->sg);
2668 prm->sg = sg_next(prm->sg);
2669 }
2670
2671 qlt_load_cont_data_segments(prm);
2672 }
2673
qlt_has_data(struct qla_tgt_cmd * cmd)2674 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2675 {
2676 return cmd->bufflen > 0;
2677 }
2678
qlt_print_dif_err(struct qla_tgt_prm * prm)2679 static void qlt_print_dif_err(struct qla_tgt_prm *prm)
2680 {
2681 struct qla_tgt_cmd *cmd;
2682 struct scsi_qla_host *vha;
2683
2684 /* asc 0x10=dif error */
2685 if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
2686 cmd = prm->cmd;
2687 vha = cmd->vha;
2688 /* ASCQ */
2689 switch (prm->sense_buffer[13]) {
2690 case 1:
2691 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2692 "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2693 "se_cmd=%p tag[%x]",
2694 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2695 cmd->atio.u.isp24.exchange_addr);
2696 break;
2697 case 2:
2698 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2699 "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2700 "se_cmd=%p tag[%x]",
2701 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2702 cmd->atio.u.isp24.exchange_addr);
2703 break;
2704 case 3:
2705 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2706 "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
2707 "se_cmd=%p tag[%x]",
2708 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2709 cmd->atio.u.isp24.exchange_addr);
2710 break;
2711 default:
2712 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2713 "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
2714 "se_cmd=%p tag[%x]",
2715 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2716 cmd->atio.u.isp24.exchange_addr);
2717 break;
2718 }
2719 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2720 }
2721 }
2722
2723 /*
2724 * Called without ha->hardware_lock held
2725 */
qlt_pre_xmit_response(struct qla_tgt_cmd * cmd,struct qla_tgt_prm * prm,int xmit_type,uint8_t scsi_status,uint32_t * full_req_cnt)2726 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2727 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
2728 uint32_t *full_req_cnt)
2729 {
2730 struct se_cmd *se_cmd = &cmd->se_cmd;
2731 struct qla_qpair *qpair = cmd->qpair;
2732
2733 prm->cmd = cmd;
2734 prm->tgt = cmd->tgt;
2735 prm->pkt = NULL;
2736 prm->rq_result = scsi_status;
2737 prm->sense_buffer = &cmd->sense_buffer[0];
2738 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
2739 prm->sg = NULL;
2740 prm->seg_cnt = -1;
2741 prm->req_cnt = 1;
2742 prm->residual = 0;
2743 prm->add_status_pkt = 0;
2744 prm->prot_sg = NULL;
2745 prm->prot_seg_cnt = 0;
2746 prm->tot_dsds = 0;
2747
2748 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2749 if (qlt_pci_map_calc_cnt(prm) != 0)
2750 return -EAGAIN;
2751 }
2752
2753 *full_req_cnt = prm->req_cnt;
2754
2755 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2756 prm->residual = se_cmd->residual_count;
2757 ql_dbg_qp(ql_dbg_io + ql_dbg_verbose, qpair, 0x305c,
2758 "Residual underflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2759 prm->residual, se_cmd->tag,
2760 se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2761 cmd->bufflen, prm->rq_result);
2762 prm->rq_result |= SS_RESIDUAL_UNDER;
2763 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
2764 prm->residual = se_cmd->residual_count;
2765 ql_dbg_qp(ql_dbg_io, qpair, 0x305d,
2766 "Residual overflow: %d (tag %lld, op %x, bufflen %d, rq_result %x)\n",
2767 prm->residual, se_cmd->tag, se_cmd->t_task_cdb ?
2768 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2769 prm->rq_result |= SS_RESIDUAL_OVER;
2770 }
2771
2772 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2773 /*
2774 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
2775 * ignored in *xmit_response() below
2776 */
2777 if (qlt_has_data(cmd)) {
2778 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
2779 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2780 (prm->rq_result != 0))) {
2781 prm->add_status_pkt = 1;
2782 (*full_req_cnt)++;
2783 }
2784 }
2785 }
2786
2787 return 0;
2788 }
2789
qlt_need_explicit_conf(struct qla_tgt_cmd * cmd,int sending_sense)2790 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2791 int sending_sense)
2792 {
2793 if (cmd->qpair->enable_class_2)
2794 return 0;
2795
2796 if (sending_sense)
2797 return cmd->conf_compl_supported;
2798 else
2799 return cmd->qpair->enable_explicit_conf &&
2800 cmd->conf_compl_supported;
2801 }
2802
qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx * ctio,struct qla_tgt_prm * prm)2803 static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2804 struct qla_tgt_prm *prm)
2805 {
2806 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2807 (uint32_t)sizeof(ctio->u.status1.sense_data));
2808 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2809 if (qlt_need_explicit_conf(prm->cmd, 0)) {
2810 ctio->u.status0.flags |= cpu_to_le16(
2811 CTIO7_FLAGS_EXPLICIT_CONFORM |
2812 CTIO7_FLAGS_CONFORM_REQ);
2813 }
2814 ctio->u.status0.residual = cpu_to_le32(prm->residual);
2815 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
2816 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
2817 int i;
2818
2819 if (qlt_need_explicit_conf(prm->cmd, 1)) {
2820 if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
2821 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2822 "Skipping EXPLICIT_CONFORM and "
2823 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
2824 "non GOOD status\n");
2825 goto skip_explict_conf;
2826 }
2827 ctio->u.status1.flags |= cpu_to_le16(
2828 CTIO7_FLAGS_EXPLICIT_CONFORM |
2829 CTIO7_FLAGS_CONFORM_REQ);
2830 }
2831 skip_explict_conf:
2832 ctio->u.status1.flags &=
2833 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2834 ctio->u.status1.flags |=
2835 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2836 ctio->u.status1.scsi_status |=
2837 cpu_to_le16(SS_SENSE_LEN_VALID);
2838 ctio->u.status1.sense_length =
2839 cpu_to_le16(prm->sense_buffer_len);
2840 for (i = 0; i < prm->sense_buffer_len/4; i++) {
2841 uint32_t v;
2842
2843 v = get_unaligned_be32(
2844 &((uint32_t *)prm->sense_buffer)[i]);
2845 put_unaligned_le32(v,
2846 &((uint32_t *)ctio->u.status1.sense_data)[i]);
2847 }
2848 qlt_print_dif_err(prm);
2849
2850 } else {
2851 ctio->u.status1.flags &=
2852 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2853 ctio->u.status1.flags |=
2854 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2855 ctio->u.status1.sense_length = 0;
2856 memset(ctio->u.status1.sense_data, 0,
2857 sizeof(ctio->u.status1.sense_data));
2858 }
2859
2860 /* Sense with len > 24, is it possible ??? */
2861 }
2862
2863 static inline int
qlt_hba_err_chk_enabled(struct se_cmd * se_cmd)2864 qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
2865 {
2866 switch (se_cmd->prot_op) {
2867 case TARGET_PROT_DOUT_INSERT:
2868 case TARGET_PROT_DIN_STRIP:
2869 if (ql2xenablehba_err_chk >= 1)
2870 return 1;
2871 break;
2872 case TARGET_PROT_DOUT_PASS:
2873 case TARGET_PROT_DIN_PASS:
2874 if (ql2xenablehba_err_chk >= 2)
2875 return 1;
2876 break;
2877 case TARGET_PROT_DIN_INSERT:
2878 case TARGET_PROT_DOUT_STRIP:
2879 return 1;
2880 default:
2881 break;
2882 }
2883 return 0;
2884 }
2885
2886 static inline int
qla_tgt_ref_mask_check(struct se_cmd * se_cmd)2887 qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
2888 {
2889 switch (se_cmd->prot_op) {
2890 case TARGET_PROT_DIN_INSERT:
2891 case TARGET_PROT_DOUT_INSERT:
2892 case TARGET_PROT_DIN_STRIP:
2893 case TARGET_PROT_DOUT_STRIP:
2894 case TARGET_PROT_DIN_PASS:
2895 case TARGET_PROT_DOUT_PASS:
2896 return 1;
2897 default:
2898 return 0;
2899 }
2900 return 0;
2901 }
2902
2903 /*
2904 * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
2905 */
2906 static void
qla_tgt_set_dif_tags(struct qla_tgt_cmd * cmd,struct crc_context * ctx,uint16_t * pfw_prot_opts)2907 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2908 uint16_t *pfw_prot_opts)
2909 {
2910 struct se_cmd *se_cmd = &cmd->se_cmd;
2911 uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
2912 scsi_qla_host_t *vha = cmd->tgt->vha;
2913 struct qla_hw_data *ha = vha->hw;
2914 uint32_t t32 = 0;
2915
2916 /*
2917 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2918 * have been immplemented by TCM, before AppTag is avail.
2919 * Look for modesense_handlers[]
2920 */
2921 ctx->app_tag = 0;
2922 ctx->app_tag_mask[0] = 0x0;
2923 ctx->app_tag_mask[1] = 0x0;
2924
2925 if (IS_PI_UNINIT_CAPABLE(ha)) {
2926 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
2927 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
2928 *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
2929 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
2930 *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
2931 }
2932
2933 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
2934
2935 switch (se_cmd->prot_type) {
2936 case TARGET_DIF_TYPE0_PROT:
2937 /*
2938 * No check for ql2xenablehba_err_chk, as it
2939 * would be an I/O error if hba tag generation
2940 * is not done.
2941 */
2942 ctx->ref_tag = cpu_to_le32(lba);
2943 /* enable ALL bytes of the ref tag */
2944 ctx->ref_tag_mask[0] = 0xff;
2945 ctx->ref_tag_mask[1] = 0xff;
2946 ctx->ref_tag_mask[2] = 0xff;
2947 ctx->ref_tag_mask[3] = 0xff;
2948 break;
2949 case TARGET_DIF_TYPE1_PROT:
2950 /*
2951 * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
2952 * REF tag, and 16 bit app tag.
2953 */
2954 ctx->ref_tag = cpu_to_le32(lba);
2955 if (!qla_tgt_ref_mask_check(se_cmd) ||
2956 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2957 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2958 break;
2959 }
2960 /* enable ALL bytes of the ref tag */
2961 ctx->ref_tag_mask[0] = 0xff;
2962 ctx->ref_tag_mask[1] = 0xff;
2963 ctx->ref_tag_mask[2] = 0xff;
2964 ctx->ref_tag_mask[3] = 0xff;
2965 break;
2966 case TARGET_DIF_TYPE2_PROT:
2967 /*
2968 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
2969 * tag has to match LBA in CDB + N
2970 */
2971 ctx->ref_tag = cpu_to_le32(lba);
2972 if (!qla_tgt_ref_mask_check(se_cmd) ||
2973 !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
2974 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2975 break;
2976 }
2977 /* enable ALL bytes of the ref tag */
2978 ctx->ref_tag_mask[0] = 0xff;
2979 ctx->ref_tag_mask[1] = 0xff;
2980 ctx->ref_tag_mask[2] = 0xff;
2981 ctx->ref_tag_mask[3] = 0xff;
2982 break;
2983 case TARGET_DIF_TYPE3_PROT:
2984 /* For TYPE 3 protection: 16 bit GUARD only */
2985 *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
2986 ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
2987 ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
2988 break;
2989 }
2990 }
2991
2992 static inline int
qlt_build_ctio_crc2_pkt(struct qla_qpair * qpair,struct qla_tgt_prm * prm)2993 qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm)
2994 {
2995 struct dsd64 *cur_dsd;
2996 uint32_t transfer_length = 0;
2997 uint32_t data_bytes;
2998 uint32_t dif_bytes;
2999 uint8_t bundling = 1;
3000 struct crc_context *crc_ctx_pkt = NULL;
3001 struct qla_hw_data *ha;
3002 struct ctio_crc2_to_fw *pkt;
3003 dma_addr_t crc_ctx_dma;
3004 uint16_t fw_prot_opts = 0;
3005 struct qla_tgt_cmd *cmd = prm->cmd;
3006 struct se_cmd *se_cmd = &cmd->se_cmd;
3007 uint32_t h;
3008 struct atio_from_isp *atio = &prm->cmd->atio;
3009 struct qla_tc_param tc;
3010 uint16_t t16;
3011 scsi_qla_host_t *vha = cmd->vha;
3012
3013 ha = vha->hw;
3014
3015 pkt = (struct ctio_crc2_to_fw *)qpair->req->ring_ptr;
3016 prm->pkt = pkt;
3017 memset(pkt, 0, sizeof(*pkt));
3018
3019 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3020 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3021 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3022 prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
3023
3024 if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
3025 (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
3026 bundling = 0;
3027
3028 /* Compute dif len and adjust data len to incude protection */
3029 data_bytes = cmd->bufflen;
3030 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
3031
3032 switch (se_cmd->prot_op) {
3033 case TARGET_PROT_DIN_INSERT:
3034 case TARGET_PROT_DOUT_STRIP:
3035 transfer_length = data_bytes;
3036 if (cmd->prot_sg_cnt)
3037 data_bytes += dif_bytes;
3038 break;
3039 case TARGET_PROT_DIN_STRIP:
3040 case TARGET_PROT_DOUT_INSERT:
3041 case TARGET_PROT_DIN_PASS:
3042 case TARGET_PROT_DOUT_PASS:
3043 transfer_length = data_bytes + dif_bytes;
3044 break;
3045 default:
3046 BUG();
3047 break;
3048 }
3049
3050 if (!qlt_hba_err_chk_enabled(se_cmd))
3051 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
3052 /* HBA error checking enabled */
3053 else if (IS_PI_UNINIT_CAPABLE(ha)) {
3054 if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
3055 (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
3056 fw_prot_opts |= PO_DIS_VALD_APP_ESC;
3057 else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
3058 fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
3059 }
3060
3061 switch (se_cmd->prot_op) {
3062 case TARGET_PROT_DIN_INSERT:
3063 case TARGET_PROT_DOUT_INSERT:
3064 fw_prot_opts |= PO_MODE_DIF_INSERT;
3065 break;
3066 case TARGET_PROT_DIN_STRIP:
3067 case TARGET_PROT_DOUT_STRIP:
3068 fw_prot_opts |= PO_MODE_DIF_REMOVE;
3069 break;
3070 case TARGET_PROT_DIN_PASS:
3071 case TARGET_PROT_DOUT_PASS:
3072 fw_prot_opts |= PO_MODE_DIF_PASS;
3073 /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
3074 break;
3075 default:/* Normal Request */
3076 fw_prot_opts |= PO_MODE_DIF_PASS;
3077 break;
3078 }
3079
3080 /* ---- PKT ---- */
3081 /* Update entry type to indicate Command Type CRC_2 IOCB */
3082 pkt->entry_type = CTIO_CRC2;
3083 pkt->entry_count = 1;
3084 pkt->vp_index = cmd->vp_idx;
3085
3086 h = qlt_make_handle(qpair);
3087 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
3088 /*
3089 * CTIO type 7 from the firmware doesn't provide a way to
3090 * know the initiator's LOOP ID, hence we can't find
3091 * the session and, so, the command.
3092 */
3093 return -EAGAIN;
3094 } else
3095 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3096
3097 pkt->handle = make_handle(qpair->req->id, h);
3098 pkt->handle |= CTIO_COMPLETION_HANDLE_MARK;
3099 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3100 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3101 pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3102 pkt->exchange_addr = atio->u.isp24.exchange_addr;
3103
3104 /* silence compile warning */
3105 t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3106 pkt->ox_id = cpu_to_le16(t16);
3107
3108 t16 = (atio->u.isp24.attr << 9);
3109 pkt->flags |= cpu_to_le16(t16);
3110 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3111
3112 /* Set transfer direction */
3113 if (cmd->dma_data_direction == DMA_TO_DEVICE)
3114 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
3115 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3116 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
3117
3118 pkt->dseg_count = cpu_to_le16(prm->tot_dsds);
3119 /* Fibre channel byte count */
3120 pkt->transfer_length = cpu_to_le32(transfer_length);
3121
3122 /* ----- CRC context -------- */
3123
3124 /* Allocate CRC context from global pool */
3125 crc_ctx_pkt = cmd->ctx =
3126 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
3127
3128 if (!crc_ctx_pkt)
3129 goto crc_queuing_error;
3130
3131 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
3132 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
3133
3134 /* Set handle */
3135 crc_ctx_pkt->handle = pkt->handle;
3136
3137 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3138
3139 put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address);
3140 pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
3141
3142 if (!bundling) {
3143 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
3144 } else {
3145 /*
3146 * Configure Bundling if we need to fetch interlaving
3147 * protection PCI accesses
3148 */
3149 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
3150 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
3151 crc_ctx_pkt->u.bundling.dseg_count =
3152 cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
3153 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
3154 }
3155
3156 /* Finish the common fields of CRC pkt */
3157 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
3158 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
3159 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
3160 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
3161
3162 memset((uint8_t *)&tc, 0 , sizeof(tc));
3163 tc.vha = vha;
3164 tc.blk_sz = cmd->blk_sz;
3165 tc.bufflen = cmd->bufflen;
3166 tc.sg = cmd->sg;
3167 tc.prot_sg = cmd->prot_sg;
3168 tc.ctx = crc_ctx_pkt;
3169 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3170
3171 /* Walks data segments */
3172 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
3173
3174 if (!bundling && prm->prot_seg_cnt) {
3175 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
3176 prm->tot_dsds, &tc))
3177 goto crc_queuing_error;
3178 } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
3179 (prm->tot_dsds - prm->prot_seg_cnt), &tc))
3180 goto crc_queuing_error;
3181
3182 if (bundling && prm->prot_seg_cnt) {
3183 /* Walks dif segments */
3184 pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA;
3185
3186 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
3187 if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
3188 prm->prot_seg_cnt, cmd))
3189 goto crc_queuing_error;
3190 }
3191 return QLA_SUCCESS;
3192
3193 crc_queuing_error:
3194 /* Cleanup will be performed by the caller */
3195 qpair->req->outstanding_cmds[h] = NULL;
3196
3197 return QLA_FUNCTION_FAILED;
3198 }
3199
3200 /*
3201 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
3202 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
3203 */
qlt_xmit_response(struct qla_tgt_cmd * cmd,int xmit_type,uint8_t scsi_status)3204 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3205 uint8_t scsi_status)
3206 {
3207 struct scsi_qla_host *vha = cmd->vha;
3208 struct qla_qpair *qpair = cmd->qpair;
3209 struct ctio7_to_24xx *pkt;
3210 struct qla_tgt_prm prm;
3211 uint32_t full_req_cnt = 0;
3212 unsigned long flags = 0;
3213 int res;
3214 int pre_xmit_res;
3215
3216 ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
3217 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3218 (xmit_type & QLA_TGT_XMIT_STATUS) ?
3219 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3220 &cmd->se_cmd, qpair->id);
3221
3222 pre_xmit_res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3223 &full_req_cnt);
3224 /*
3225 * Check pre_xmit_res later because we want to check other errors
3226 * first.
3227 */
3228
3229 /* Begin timer on the first call, not on SRR retry. */
3230 if (likely(cmd->jiffies_at_hw_st_entry == 0))
3231 cmd->jiffies_at_hw_st_entry = get_jiffies_64();
3232
3233 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3234
3235 if (unlikely(cmd->sent_term_exchg ||
3236 cmd->sess->deleted ||
3237 !qpair->fw_started ||
3238 cmd->reset_count != qpair->chip_reset)) {
3239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe101,
3240 "qla_target(%d): tag %lld: skipping send response for aborted cmd\n",
3241 vha->vp_idx, cmd->se_cmd.tag);
3242 qlt_unmap_sg(vha, cmd);
3243 cmd->state = QLA_TGT_STATE_PROCESSED;
3244 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3245 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3246 return 0;
3247 }
3248
3249 /* Check for errors from qlt_pre_xmit_response(). */
3250 res = pre_xmit_res;
3251 if (unlikely(res))
3252 goto out_unmap_unlock;
3253
3254 if (xmit_type == QLA_TGT_XMIT_STATUS)
3255 qpair->tgt_counters.core_qla_snd_status++;
3256 else
3257 qpair->tgt_counters.core_qla_que_buf++;
3258
3259 /* Does F/W have an IOCBs for this request */
3260 res = qlt_check_reserve_free_req(qpair, full_req_cnt);
3261 if (unlikely(res))
3262 goto out_unmap_unlock;
3263
3264 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3265 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3266 else
3267 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3268 if (unlikely(res != 0)) {
3269 qpair->req->cnt += full_req_cnt;
3270 goto out_unmap_unlock;
3271 }
3272
3273 pkt = (struct ctio7_to_24xx *)prm.pkt;
3274
3275 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3276 pkt->u.status0.flags |=
3277 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
3278 CTIO7_FLAGS_STATUS_MODE_0);
3279
3280 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3281 qlt_load_data_segments(&prm);
3282
3283 if (prm.add_status_pkt == 0) {
3284 if (xmit_type & QLA_TGT_XMIT_STATUS) {
3285 pkt->u.status0.scsi_status =
3286 cpu_to_le16(prm.rq_result);
3287 if (!cmd->edif)
3288 pkt->u.status0.residual =
3289 cpu_to_le32(prm.residual);
3290
3291 pkt->u.status0.flags |= cpu_to_le16(
3292 CTIO7_FLAGS_SEND_STATUS);
3293 if (qlt_need_explicit_conf(cmd, 0)) {
3294 pkt->u.status0.flags |=
3295 cpu_to_le16(
3296 CTIO7_FLAGS_EXPLICIT_CONFORM |
3297 CTIO7_FLAGS_CONFORM_REQ);
3298 }
3299 }
3300
3301 } else {
3302 /*
3303 * We have already made sure that there is sufficient
3304 * amount of request entries to not drop HW lock in
3305 * req_pkt().
3306 */
3307 struct ctio7_to_24xx *ctio =
3308 (struct ctio7_to_24xx *)qlt_get_req_pkt(
3309 qpair->req);
3310
3311 ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e,
3312 "Building additional status packet 0x%p.\n",
3313 ctio);
3314
3315 /*
3316 * T10Dif: ctio_crc2_to_fw overlay ontop of
3317 * ctio7_to_24xx
3318 */
3319 memcpy(ctio, pkt, sizeof(*ctio));
3320 /* reset back to CTIO7 */
3321 ctio->entry_count = 1;
3322 ctio->entry_type = CTIO_TYPE7;
3323 ctio->dseg_count = 0;
3324 ctio->u.status1.flags &= ~cpu_to_le16(
3325 CTIO7_FLAGS_DATA_IN);
3326
3327 /* Real finish is ctio_m1's finish */
3328 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
3329 pkt->u.status0.flags |= cpu_to_le16(
3330 CTIO7_FLAGS_DONT_RET_CTIO);
3331
3332 /* qlt_24xx_init_ctio_to_isp will correct
3333 * all neccessary fields that's part of CTIO7.
3334 * There should be no residual of CTIO-CRC2 data.
3335 */
3336 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
3337 &prm);
3338 }
3339 } else
3340 qlt_24xx_init_ctio_to_isp(pkt, &prm);
3341
3342
3343 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3344 cmd->cmd_sent_to_fw = 1;
3345 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3346
3347 /* Memory Barrier */
3348 wmb();
3349 if (qpair->reqq_start_iocbs)
3350 qpair->reqq_start_iocbs(qpair);
3351 else
3352 qla2x00_start_iocbs(vha, qpair->req);
3353 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3354
3355 return 0;
3356
3357 out_unmap_unlock:
3358 qlt_unmap_sg(vha, cmd);
3359 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3360
3361 return res;
3362 }
3363 EXPORT_SYMBOL(qlt_xmit_response);
3364
qlt_rdy_to_xfer(struct qla_tgt_cmd * cmd)3365 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3366 {
3367 struct ctio7_to_24xx *pkt;
3368 struct scsi_qla_host *vha = cmd->vha;
3369 struct qla_tgt *tgt = cmd->tgt;
3370 struct qla_tgt_prm prm;
3371 unsigned long flags = 0;
3372 int res = 0;
3373 int pci_map_res;
3374 struct qla_qpair *qpair = cmd->qpair;
3375
3376 /* Begin timer on the first call, not on SRR retry. */
3377 if (likely(cmd->jiffies_at_hw_st_entry == 0))
3378 cmd->jiffies_at_hw_st_entry = get_jiffies_64();
3379
3380 memset(&prm, 0, sizeof(prm));
3381 prm.cmd = cmd;
3382 prm.tgt = tgt;
3383 prm.sg = NULL;
3384 prm.req_cnt = 1;
3385
3386 /* Calculate number of entries and segments required */
3387 pci_map_res = qlt_pci_map_calc_cnt(&prm);
3388 /*
3389 * Check pci_map_res later because we want to check other errors first.
3390 */
3391
3392 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3393
3394 if (unlikely(cmd->sent_term_exchg ||
3395 cmd->sess->deleted ||
3396 !qpair->fw_started ||
3397 cmd->reset_count != qpair->chip_reset)) {
3398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe102,
3399 "qla_target(%d): tag %lld: skipping data-out for aborted cmd\n",
3400 vha->vp_idx, cmd->se_cmd.tag);
3401 qlt_unmap_sg(vha, cmd);
3402 cmd->aborted = 1;
3403 cmd->write_data_transferred = 0;
3404 cmd->state = QLA_TGT_STATE_DATA_IN;
3405 cmd->jiffies_at_hw_st_entry = 0;
3406 vha->hw->tgt.tgt_ops->handle_data(cmd);
3407 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3408 return 0;
3409 }
3410
3411 /* Check for errors from qlt_pci_map_calc_cnt(). */
3412 if (unlikely(pci_map_res != 0)) {
3413 res = -EAGAIN;
3414 goto out_unlock_free_unmap;
3415 }
3416
3417 /* Does F/W have an IOCBs for this request */
3418 res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
3419 if (res != 0)
3420 goto out_unlock_free_unmap;
3421 if (cmd->se_cmd.prot_op)
3422 res = qlt_build_ctio_crc2_pkt(qpair, &prm);
3423 else
3424 res = qlt_24xx_build_ctio_pkt(qpair, &prm);
3425
3426 if (unlikely(res != 0)) {
3427 qpair->req->cnt += prm.req_cnt;
3428 goto out_unlock_free_unmap;
3429 }
3430
3431 pkt = (struct ctio7_to_24xx *)prm.pkt;
3432 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
3433 CTIO7_FLAGS_STATUS_MODE_0);
3434
3435 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3436 qlt_load_data_segments(&prm);
3437
3438 cmd->state = QLA_TGT_STATE_NEED_DATA;
3439 cmd->cmd_sent_to_fw = 1;
3440 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3441
3442 /* Memory Barrier */
3443 wmb();
3444 if (qpair->reqq_start_iocbs)
3445 qpair->reqq_start_iocbs(qpair);
3446 else
3447 qla2x00_start_iocbs(vha, qpair->req);
3448 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3449
3450 return res;
3451
3452 out_unlock_free_unmap:
3453 cmd->jiffies_at_hw_st_entry = 0;
3454 qlt_unmap_sg(vha, cmd);
3455 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3456
3457 return res;
3458 }
3459 EXPORT_SYMBOL(qlt_rdy_to_xfer);
3460
3461
3462 /*
3463 * it is assumed either hardware_lock or qpair lock is held.
3464 */
3465 static void
qlt_handle_dif_error(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct ctio_crc_from_fw * sts)3466 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3467 struct ctio_crc_from_fw *sts)
3468 {
3469 uint8_t *ap = &sts->actual_dif[0];
3470 uint8_t *ep = &sts->expected_dif[0];
3471 uint64_t lba = cmd->se_cmd.t_task_lba;
3472 uint8_t scsi_status, sense_key, asc, ascq;
3473 struct scsi_qla_host *vha = cmd->vha;
3474
3475 cmd->trc_flags |= TRC_DIF_ERR;
3476
3477 cmd->a_guard = get_unaligned_be16(ap + 0);
3478 cmd->a_app_tag = get_unaligned_be16(ap + 2);
3479 cmd->a_ref_tag = get_unaligned_be32(ap + 4);
3480
3481 cmd->e_guard = get_unaligned_be16(ep + 0);
3482 cmd->e_app_tag = get_unaligned_be16(ep + 2);
3483 cmd->e_ref_tag = get_unaligned_be32(ep + 4);
3484
3485 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3486 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3487
3488 scsi_status = sense_key = asc = ascq = 0;
3489
3490 /* check appl tag */
3491 if (cmd->e_app_tag != cmd->a_app_tag) {
3492 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3493 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3494 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3495 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3496 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3497 cmd->atio.u.isp24.fcp_hdr.ox_id);
3498
3499 cmd->dif_err_code = DIF_ERR_APP;
3500 scsi_status = SAM_STAT_CHECK_CONDITION;
3501 sense_key = ABORTED_COMMAND;
3502 asc = 0x10;
3503 ascq = 0x2;
3504 }
3505
3506 /* check ref tag */
3507 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3508 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3509 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3510 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3511 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3512 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3513 cmd->atio.u.isp24.fcp_hdr.ox_id);
3514
3515 cmd->dif_err_code = DIF_ERR_REF;
3516 scsi_status = SAM_STAT_CHECK_CONDITION;
3517 sense_key = ABORTED_COMMAND;
3518 asc = 0x10;
3519 ascq = 0x3;
3520 goto out;
3521 }
3522
3523 /* check guard */
3524 if (cmd->e_guard != cmd->a_guard) {
3525 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3526 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3527 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3528 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3529 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3530 cmd->atio.u.isp24.fcp_hdr.ox_id);
3531
3532 cmd->dif_err_code = DIF_ERR_GRD;
3533 scsi_status = SAM_STAT_CHECK_CONDITION;
3534 sense_key = ABORTED_COMMAND;
3535 asc = 0x10;
3536 ascq = 0x1;
3537 }
3538 out:
3539 switch (cmd->state) {
3540 case QLA_TGT_STATE_NEED_DATA:
3541 /* handle_data will load DIF error code */
3542 cmd->state = QLA_TGT_STATE_DATA_IN;
3543 cmd->jiffies_at_hw_st_entry = 0;
3544 vha->hw->tgt.tgt_ops->handle_data(cmd);
3545 break;
3546 default:
3547 if (cmd->sent_term_exchg) {
3548 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3549 break;
3550 }
3551
3552 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3553 ascq);
3554 /* assume scsi status gets out on the wire.
3555 * Will not wait for completion.
3556 */
3557 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3558 break;
3559 }
3560 }
3561
3562 /* If hardware_lock held on entry, might drop it, then reaquire */
3563 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
__qlt_send_term_imm_notif(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * ntfy)3564 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3565 struct imm_ntfy_from_isp *ntfy)
3566 {
3567 struct nack_to_isp *nack;
3568 struct qla_hw_data *ha = vha->hw;
3569 request_t *pkt;
3570 int ret = 0;
3571
3572 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3573 "Sending TERM ELS CTIO (ha=%p)\n", ha);
3574
3575 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3576 if (pkt == NULL) {
3577 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3578 "qla_target(%d): %s failed: unable to allocate "
3579 "request packet\n", vha->vp_idx, __func__);
3580 return -ENOMEM;
3581 }
3582
3583 pkt->entry_type = NOTIFY_ACK_TYPE;
3584 pkt->entry_count = 1;
3585 pkt->handle = QLA_TGT_SKIP_HANDLE;
3586
3587 nack = (struct nack_to_isp *)pkt;
3588 nack->ox_id = ntfy->ox_id;
3589
3590 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3591 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3592 nack->u.isp24.flags = ntfy->u.isp24.flags &
3593 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3594 }
3595
3596 /* terminate */
3597 nack->u.isp24.flags |=
3598 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
3599
3600 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3601 nack->u.isp24.status = ntfy->u.isp24.status;
3602 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3603 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3604 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3605 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3606 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3607 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3608
3609 qla2x00_start_iocbs(vha, vha->req);
3610 return ret;
3611 }
3612
qlt_send_term_imm_notif(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * imm,int ha_locked)3613 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3614 struct imm_ntfy_from_isp *imm, int ha_locked)
3615 {
3616 int rc;
3617
3618 WARN_ON_ONCE(!ha_locked);
3619 rc = __qlt_send_term_imm_notif(vha, imm);
3620 pr_debug("rc = %d\n", rc);
3621 }
3622
3623 /*
3624 * Handle a SRR that had been previously associated with a command when the
3625 * command has been aborted or otherwise cannot process the SRR.
3626 *
3627 * If reject is true, then attempt to reject the SRR. Otherwise abort the
3628 * immediate notify exchange.
3629 */
qlt_srr_abort(struct qla_tgt_cmd * cmd,bool reject)3630 void qlt_srr_abort(struct qla_tgt_cmd *cmd, bool reject)
3631 {
3632 struct scsi_qla_host *vha = cmd->vha;
3633 struct qla_tgt_srr *srr = cmd->srr;
3634
3635 if (srr->imm_ntfy_recvd) {
3636 if (reject)
3637 srr->reject = true;
3638 else
3639 srr->aborted = true;
3640
3641 if (srr->ctio_recvd) {
3642 /*
3643 * The SRR should already be scheduled for processing,
3644 * and the SRR processing code should see that the cmd
3645 * has been aborted and take appropriate action. In
3646 * addition, the cmd refcount should have been
3647 * incremented, preventing the cmd from being freed
3648 * until SRR processing is done.
3649 */
3650 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102e,
3651 "qla_target(%d): tag %lld: %s: SRR already scheduled\n",
3652 vha->vp_idx, cmd->se_cmd.tag, __func__);
3653 } else {
3654 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3655 unsigned long flags;
3656
3657 /* Shedule processing for the SRR immediate notify. */
3658 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102f,
3659 "qla_target(%d): tag %lld: %s: schedule SRR %s\n",
3660 vha->vp_idx, cmd->se_cmd.tag, __func__,
3661 reject ? "reject" : "abort");
3662 cmd->srr = NULL;
3663 srr->cmd = NULL;
3664 spin_lock_irqsave(&tgt->srr_lock, flags);
3665 list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
3666 queue_work(qla_tgt_wq, &tgt->srr_work);
3667 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3668 }
3669 } else {
3670 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11030,
3671 "qla_target(%d): tag %lld: %s: no IMM SRR; free SRR\n",
3672 vha->vp_idx, cmd->se_cmd.tag, __func__);
3673 cmd->srr = NULL;
3674 kfree(srr);
3675 }
3676 }
3677 EXPORT_SYMBOL(qlt_srr_abort);
3678
3679 /*
3680 * If hardware_lock held on entry, might drop it, then reaquire
3681 * This function sends the appropriate CTIO to ISP 2xxx or 24xx
3682 */
__qlt_send_term_exchange(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct atio_from_isp * atio)3683 static int __qlt_send_term_exchange(struct qla_qpair *qpair,
3684 struct qla_tgt_cmd *cmd,
3685 struct atio_from_isp *atio)
3686 {
3687 struct ctio7_to_24xx *ctio24;
3688 struct scsi_qla_host *vha;
3689 uint16_t loop_id;
3690 uint16_t temp;
3691
3692 if (cmd) {
3693 vha = cmd->vha;
3694 loop_id = cmd->loop_id;
3695 } else {
3696 port_id_t id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
3697 struct qla_hw_data *ha;
3698 struct fc_port *sess;
3699 unsigned long flags;
3700
3701 vha = qpair->vha;
3702 ha = vha->hw;
3703
3704 /*
3705 * CTIO7_NHANDLE_UNRECOGNIZED works when aborting an idle
3706 * command but not when aborting a command with an active CTIO
3707 * exchange.
3708 */
3709 loop_id = CTIO7_NHANDLE_UNRECOGNIZED;
3710 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
3711 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
3712 if (sess)
3713 loop_id = sess->loop_id;
3714 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
3715 }
3716
3717 if (cmd) {
3718 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe009,
3719 "qla_target(%d): tag %lld: Sending TERM EXCH CTIO state %d cmd_sent_to_fw %u\n",
3720 vha->vp_idx, cmd->se_cmd.tag, cmd->state,
3721 cmd->cmd_sent_to_fw);
3722 } else {
3723 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe009,
3724 "qla_target(%d): tag %u: Sending TERM EXCH CTIO (no cmd)\n",
3725 vha->vp_idx, le32_to_cpu(atio->u.isp24.exchange_addr));
3726 }
3727
3728 ctio24 = qla2x00_alloc_iocbs_ready(qpair, NULL);
3729 if (!ctio24) {
3730 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3731 "qla_target(%d): %s failed: unable to allocate "
3732 "request packet\n", vha->vp_idx, __func__);
3733 return -ENOMEM;
3734 }
3735
3736 qpair->tgt_counters.num_term_xchg_sent++;
3737
3738 ctio24->entry_type = CTIO_TYPE7;
3739 ctio24->entry_count = 1;
3740 ctio24->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3741 ctio24->nport_handle = cpu_to_le16(loop_id);
3742 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
3743 ctio24->vp_index = vha->vp_idx;
3744 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
3745 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3746 temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 |
3747 CTIO7_FLAGS_TERMINATE;
3748 ctio24->u.status1.flags = cpu_to_le16(temp);
3749 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
3750 ctio24->u.status1.ox_id = cpu_to_le16(temp);
3751
3752 /* Memory Barrier */
3753 wmb();
3754 if (qpair->reqq_start_iocbs)
3755 qpair->reqq_start_iocbs(qpair);
3756 else
3757 qla2x00_start_iocbs(vha, qpair->req);
3758 return 0;
3759 }
3760
3761 /*
3762 * Aborting a command that is active in the FW (i.e. cmd->cmd_sent_to_fw == 1)
3763 * will usually trigger the FW to send a completion CTIO with error status,
3764 * and the driver will then call the ->handle_data() or ->free_cmd() callbacks.
3765 * This can be used to clear a command that is locked up in the FW unless there
3766 * is something more seriously wrong.
3767 *
3768 * Aborting a command that is not active in the FW (i.e.
3769 * cmd->cmd_sent_to_fw == 0) will not directly trigger any callbacks. Instead,
3770 * when the target mode midlevel calls qlt_rdy_to_xfer() or
3771 * qlt_xmit_response(), the driver will see that the cmd has been aborted and
3772 * call the appropriate callback immediately without performing the requested
3773 * operation.
3774 */
qlt_send_term_exchange(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd,struct atio_from_isp * atio,int ha_locked)3775 void qlt_send_term_exchange(struct qla_qpair *qpair,
3776 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
3777 {
3778 struct scsi_qla_host *vha;
3779 unsigned long flags = 0;
3780 int rc;
3781
3782 /* why use different vha? NPIV */
3783 if (cmd)
3784 vha = cmd->vha;
3785 else
3786 vha = qpair->vha;
3787
3788 if (ha_locked) {
3789 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3790 if (rc == -ENOMEM)
3791 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3792 goto done;
3793 }
3794 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3795 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3796 if (rc == -ENOMEM)
3797 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3798
3799 done:
3800 if (cmd) {
3801 /*
3802 * Set this even if -ENOMEM above, since term exchange will be
3803 * sent eventually...
3804 */
3805 cmd->sent_term_exchg = 1;
3806 cmd->aborted = 1;
3807 cmd->jiffies_at_term_exchg = jiffies;
3808 }
3809
3810 if (!ha_locked)
3811 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3812
3813 return;
3814 }
3815 EXPORT_SYMBOL(qlt_send_term_exchange);
3816
qlt_init_term_exchange(struct scsi_qla_host * vha)3817 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3818 {
3819 struct list_head free_list;
3820 struct qla_tgt_cmd *cmd, *tcmd;
3821
3822 vha->hw->tgt.leak_exchg_thresh_hold =
3823 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3824
3825 cmd = tcmd = NULL;
3826 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3827 INIT_LIST_HEAD(&free_list);
3828 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3829
3830 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3831 list_del(&cmd->cmd_list);
3832 /* This cmd was never sent to TCM. There is no need
3833 * to schedule free or call free_cmd
3834 */
3835 qlt_free_cmd(cmd);
3836 vha->hw->tgt.num_qfull_cmds_alloc--;
3837 }
3838 }
3839 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3840 }
3841
qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host * vha)3842 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3843 {
3844 uint32_t total_leaked;
3845
3846 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3847
3848 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3849 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3850
3851 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3852 "Chip reset due to exchange starvation: %d/%d.\n",
3853 total_leaked, vha->hw->cur_fw_xcb_count);
3854
3855 if (IS_P3P_TYPE(vha->hw))
3856 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3857 else
3858 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3859 qla2xxx_wake_dpc(vha);
3860 }
3861
3862 }
3863
qlt_abort_cmd(struct qla_tgt_cmd * cmd)3864 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3865 {
3866 struct scsi_qla_host *vha = cmd->vha;
3867 struct qla_qpair *qpair = cmd->qpair;
3868 unsigned long flags;
3869
3870 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3871
3872 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3873 "qla_target(%d): tag %lld: cmd being aborted (state %d) %s; %s\n",
3874 vha->vp_idx, cmd->se_cmd.tag, cmd->state,
3875 cmd->cmd_sent_to_fw ? "sent to fw" : "not sent to fw",
3876 cmd->aborted ? "aborted" : "not aborted");
3877
3878 if (cmd->state != QLA_TGT_STATE_DONE && !cmd->sent_term_exchg) {
3879 if (!qpair->fw_started ||
3880 cmd->reset_count != qpair->chip_reset) {
3881 /*
3882 * Chip was reset; just pretend that we sent the term
3883 * exchange.
3884 */
3885 cmd->sent_term_exchg = 1;
3886 cmd->aborted = 1;
3887 cmd->jiffies_at_term_exchg = jiffies;
3888 } else {
3889 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
3890 }
3891 }
3892
3893 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3894
3895 return 0;
3896 }
3897 EXPORT_SYMBOL(qlt_abort_cmd);
3898
qlt_free_cmd(struct qla_tgt_cmd * cmd)3899 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3900 {
3901 struct fc_port *sess = cmd->sess;
3902
3903 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3904 "%s: se_cmd[%p] ox_id %04x\n",
3905 __func__, &cmd->se_cmd,
3906 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3907
3908 BUG_ON(cmd->cmd_in_wq);
3909
3910 if (!cmd->q_full)
3911 qlt_decr_num_pend_cmds(cmd->vha);
3912
3913 BUG_ON(cmd->sg_mapped);
3914 if (unlikely(cmd->free_sg)) {
3915 cmd->free_sg = 0;
3916 qlt_free_sg(cmd);
3917 }
3918 if (unlikely(cmd->srr))
3919 qlt_srr_abort(cmd, false);
3920
3921 if (unlikely(cmd->aborted ||
3922 (cmd->trc_flags & (TRC_CTIO_STRANGE | TRC_CTIO_ERR |
3923 TRC_SRR_CTIO | TRC_SRR_IMM)))) {
3924 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xe086,
3925 "qla_target(%d): tag %lld: free cmd (trc_flags %x, aborted %u, sent_term_exchg %u, rsp_sent %u)\n",
3926 cmd->vha->vp_idx, cmd->se_cmd.tag,
3927 cmd->trc_flags, cmd->aborted, cmd->sent_term_exchg,
3928 cmd->rsp_sent);
3929 }
3930
3931 if (unlikely(cmd->cdb != &cmd->atio.u.isp24.fcp_cmnd.cdb[0])) {
3932 kfree(cmd->cdb);
3933 cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
3934 cmd->cdb_len = 16;
3935 }
3936
3937 cmd->jiffies_at_free = get_jiffies_64();
3938
3939 if (!sess || !sess->se_sess) {
3940 WARN_ON(1);
3941 return;
3942 }
3943 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3944 }
3945 EXPORT_SYMBOL(qlt_free_cmd);
3946
3947 /*
3948 * Process a CTIO response for a SCSI command that failed due to SRR.
3949 *
3950 * qpair->qp_lock_ptr supposed to be held on entry
3951 */
qlt_prepare_srr_ctio(struct qla_qpair * qpair,struct qla_tgt_cmd * cmd)3952 static int qlt_prepare_srr_ctio(struct qla_qpair *qpair,
3953 struct qla_tgt_cmd *cmd)
3954 {
3955 struct scsi_qla_host *vha = cmd->vha;
3956 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
3957 struct qla_tgt_srr *srr;
3958
3959 cmd->trc_flags |= TRC_SRR_CTIO;
3960
3961 srr = cmd->srr;
3962 if (srr != NULL) {
3963 /* qlt_prepare_srr_imm() was called first. */
3964
3965 WARN_ON(srr->ctio_recvd);
3966 WARN_ON(!srr->imm_ntfy_recvd);
3967
3968 if (vha->hw->tgt.tgt_ops->get_cmd_ref(cmd)) {
3969 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11037,
3970 "qla_target(%d): tag %lld: unable to get cmd ref for SRR processing\n",
3971 vha->vp_idx, cmd->se_cmd.tag);
3972 qlt_srr_abort(cmd, true);
3973 return -ESHUTDOWN;
3974 }
3975
3976 srr->ctio_recvd = true;
3977
3978 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100f,
3979 "qla_target(%d): tag %lld: Scheduling SRR work\n",
3980 vha->vp_idx, cmd->se_cmd.tag);
3981
3982 /* Schedule the srr for processing in qlt_handle_srr(). */
3983 /* IRQ is already OFF */
3984 spin_lock(&tgt->srr_lock);
3985 list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
3986 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &tgt->srr_work);
3987 spin_unlock(&tgt->srr_lock);
3988 return 0;
3989 }
3990
3991 srr = kzalloc(sizeof(*srr), GFP_ATOMIC);
3992 if (!srr)
3993 return -ENOMEM;
3994
3995 /* Expect qlt_prepare_srr_imm() to be called. */
3996 srr->ctio_recvd = true;
3997 srr->cmd = cmd;
3998 srr->reset_count = cmd->reset_count;
3999 cmd->srr = srr;
4000 return 0;
4001 }
4002
4003 /* ha->hardware_lock supposed to be held on entry */
qlt_ctio_to_cmd(struct scsi_qla_host * vha,struct rsp_que * rsp,uint32_t handle,uint8_t cmd_type,const void * ctio)4004 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
4005 struct rsp_que *rsp, uint32_t handle, uint8_t cmd_type,
4006 const void *ctio)
4007 {
4008 void *cmd = NULL;
4009 struct req_que *req;
4010 int qid = GET_QID(handle);
4011 uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
4012
4013 if (unlikely(h == QLA_TGT_SKIP_HANDLE))
4014 return NULL;
4015
4016 if (qid == rsp->req->id) {
4017 req = rsp->req;
4018 } else if (vha->hw->req_q_map[qid]) {
4019 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
4020 "qla_target(%d): CTIO completion with different QID %d handle %x\n",
4021 vha->vp_idx, rsp->id, handle);
4022 req = vha->hw->req_q_map[qid];
4023 } else {
4024 return NULL;
4025 }
4026
4027 h &= QLA_CMD_HANDLE_MASK;
4028
4029 if (h == QLA_TGT_NULL_HANDLE) {
4030 /* We can't get loop ID from CTIO7 */
4031 ql_dbg(ql_dbg_tgt, vha, 0xe054,
4032 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
4033 "support NULL handles\n", vha->vp_idx);
4034 return NULL;
4035 }
4036 if (unlikely(h >= req->num_outstanding_cmds)) {
4037 ql_dbg(ql_dbg_tgt, vha, 0xe052,
4038 "qla_target(%d): Wrong handle %x received\n",
4039 vha->vp_idx, handle);
4040 return NULL;
4041 }
4042
4043 /*
4044 * We passed a numeric handle for a cmd to the hardware, and the
4045 * hardware passed the handle back to us. Look up the associated cmd,
4046 * and validate that the cmd_type and exchange address match what the
4047 * caller expects. This guards against buggy HBA firmware that returns
4048 * the same CTIO multiple times.
4049 */
4050
4051 cmd = req->outstanding_cmds[h];
4052
4053 if (unlikely(cmd == NULL)) {
4054 if (cmd_type == TYPE_TGT_CMD) {
4055 __le32 ctio_exchange_addr =
4056 ((const struct ctio7_from_24xx *)ctio)->
4057 exchange_address;
4058
4059 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe053,
4060 "qla_target(%d): tag %u: handle %x: cmd detached; ignoring CTIO (handle %x req->id %d rsp->id %d)\n",
4061 vha->vp_idx, le32_to_cpu(ctio_exchange_addr), h,
4062 handle, req->id, rsp->id);
4063 } else {
4064 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe053,
4065 "qla_target(%d): cmd detached; ignoring CTIO (handle %x req->id %d rsp->id %d)\n",
4066 vha->vp_idx, handle, req->id, rsp->id);
4067 }
4068 return NULL;
4069 }
4070
4071 if (unlikely(((srb_t *)cmd)->cmd_type != cmd_type)) {
4072 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe087,
4073 "qla_target(%d): handle %x: cmd detached; ignoring CTIO (cmd_type mismatch)\n",
4074 vha->vp_idx, h);
4075 return NULL;
4076 }
4077
4078 switch (cmd_type) {
4079 case TYPE_TGT_CMD: {
4080 __le32 ctio_exchange_addr =
4081 ((const struct ctio7_from_24xx *)ctio)->
4082 exchange_address;
4083 __le32 cmd_exchange_addr =
4084 ((struct qla_tgt_cmd *)cmd)->
4085 atio.u.isp24.exchange_addr;
4086
4087 BUILD_BUG_ON(offsetof(struct ctio7_from_24xx,
4088 exchange_address) !=
4089 offsetof(struct ctio_crc_from_fw,
4090 exchange_address));
4091
4092 if (unlikely(ctio_exchange_addr != cmd_exchange_addr)) {
4093 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe088,
4094 "qla_target(%d): tag %u: handle %x: cmd detached; ignoring CTIO (exchange address mismatch)\n",
4095 vha->vp_idx, le32_to_cpu(ctio_exchange_addr), h);
4096 return NULL;
4097 }
4098 break;
4099 }
4100
4101 case TYPE_TGT_TMCMD: {
4102 __le32 ctio_exchange_addr =
4103 ((const struct abts_resp_from_24xx_fw *)ctio)->
4104 exchange_address;
4105 __le32 cmd_exchange_addr =
4106 ((struct qla_tgt_mgmt_cmd *)cmd)->
4107 orig_iocb.abts.exchange_address;
4108
4109 if (unlikely(ctio_exchange_addr != cmd_exchange_addr)) {
4110 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe089,
4111 "qla_target(%d): ABTS: handle %x: cmd detached; ignoring CTIO (exchange address mismatch)\n",
4112 vha->vp_idx, h);
4113 return NULL;
4114 }
4115 break;
4116 }
4117 }
4118
4119 req->outstanding_cmds[h] = NULL;
4120
4121 return cmd;
4122 }
4123
4124 /*
4125 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4126 */
qlt_do_ctio_completion(struct scsi_qla_host * vha,struct rsp_que * rsp,uint32_t handle,uint32_t status,struct ctio7_from_24xx * ctio)4127 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
4128 struct rsp_que *rsp, uint32_t handle, uint32_t status,
4129 struct ctio7_from_24xx *ctio)
4130 {
4131 struct qla_hw_data *ha = vha->hw;
4132 struct qla_tgt_cmd *cmd;
4133 struct qla_qpair *qpair = rsp->qpair;
4134 uint16_t ctio_flags;
4135
4136 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
4137 /* That could happen only in case of an error/reset/abort */
4138 if (status != CTIO_SUCCESS) {
4139 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
4140 "Intermediate CTIO received"
4141 " (status %x)\n", status);
4142 }
4143 return;
4144 }
4145
4146 ctio_flags = le16_to_cpu(ctio->flags);
4147
4148 cmd = qlt_ctio_to_cmd(vha, rsp, handle, TYPE_TGT_CMD, ctio);
4149 if (unlikely(cmd == NULL)) {
4150 if ((handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE &&
4151 (ctio_flags & 0xe1ff) == (CTIO7_FLAGS_STATUS_MODE_1 |
4152 CTIO7_FLAGS_TERMINATE)) {
4153 u32 tag = le32_to_cpu(ctio->exchange_address);
4154
4155 if (status == CTIO_SUCCESS)
4156 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe083,
4157 "qla_target(%d): tag %u: term exchange successful\n",
4158 vha->vp_idx, tag);
4159 else
4160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe084,
4161 "qla_target(%d): tag %u: term exchange failed; status = 0x%x\n",
4162 vha->vp_idx, tag, status);
4163 }
4164 return;
4165 }
4166
4167 if ((ctio_flags & CTIO7_FLAGS_DATA_OUT) && cmd->sess)
4168 qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, ctio);
4169
4170 cmd->cmd_sent_to_fw = 0;
4171
4172 qlt_unmap_sg(vha, cmd);
4173
4174 if (unlikely(status != CTIO_SUCCESS)) {
4175 u8 op = cmd->cdb ? cmd->cdb[0] : 0;
4176 bool term_exchg = false;
4177
4178 /*
4179 * If the hardware terminated the exchange, then we don't need
4180 * to send an explicit term exchange message.
4181 */
4182 if (ctio_flags & OF_TERM_EXCH) {
4183 cmd->sent_term_exchg = 1;
4184 cmd->aborted = 1;
4185 cmd->jiffies_at_term_exchg = jiffies;
4186 }
4187
4188 switch (status & 0xFFFF) {
4189 case CTIO_INVALID_RX_ID:
4190 term_exchg = true;
4191 if (printk_ratelimit())
4192 dev_info(&vha->hw->pdev->dev,
4193 "qla_target(%d): tag %lld, op %x: CTIO with INVALID_RX_ID status 0x%x received (state %d, port %8phC, LUN %lld, ATIO attr %x, CTIO Flags %x|%x)\n",
4194 vha->vp_idx, cmd->se_cmd.tag, op,
4195 status, cmd->state, cmd->sess->port_name,
4196 cmd->unpacked_lun, cmd->atio.u.isp24.attr,
4197 ((cmd->ctio_flags >> 9) & 0xf),
4198 cmd->ctio_flags);
4199 break;
4200
4201 case CTIO_LIP_RESET:
4202 case CTIO_TARGET_RESET:
4203 case CTIO_ABORTED:
4204 term_exchg = true;
4205 fallthrough;
4206 case CTIO_TIMEOUT:
4207 {
4208 const char *status_str;
4209
4210 switch (status & 0xFFFF) {
4211 case CTIO_LIP_RESET:
4212 status_str = "LIP_RESET";
4213 break;
4214 case CTIO_TARGET_RESET:
4215 status_str = "TARGET_RESET";
4216 break;
4217 case CTIO_ABORTED:
4218 status_str = "ABORTED";
4219 break;
4220 case CTIO_TIMEOUT:
4221 default:
4222 status_str = "TIMEOUT";
4223 break;
4224 }
4225 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
4226 "qla_target(%d): tag %lld, op %x: CTIO with %s status 0x%x received (state %d, port %8phC, LUN %lld)\n",
4227 vha->vp_idx, cmd->se_cmd.tag, op,
4228 status_str, status, cmd->state,
4229 cmd->sess->port_name, cmd->unpacked_lun);
4230 break;
4231 }
4232
4233 case CTIO_PORT_LOGGED_OUT:
4234 case CTIO_PORT_UNAVAILABLE:
4235 {
4236 int logged_out =
4237 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
4238
4239 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
4240 "qla_target(%d): tag %lld, op %x: CTIO with %s status 0x%x received (state %d, port %8phC, LUN %lld)\n",
4241 vha->vp_idx, cmd->se_cmd.tag, op,
4242 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
4243 status, cmd->state, cmd->sess->port_name,
4244 cmd->unpacked_lun);
4245
4246 term_exchg = true;
4247 if (logged_out && cmd->sess) {
4248 /*
4249 * Session is already logged out, but we need
4250 * to notify initiator, who's not aware of this
4251 */
4252 cmd->sess->send_els_logo = 1;
4253 ql_dbg(ql_dbg_disc, vha, 0x20f8,
4254 "%s %d %8phC post del sess\n",
4255 __func__, __LINE__, cmd->sess->port_name);
4256
4257 qlt_schedule_sess_for_deletion(cmd->sess);
4258 }
4259 break;
4260 }
4261
4262 case CTIO_SRR_RECEIVED:
4263 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100e,
4264 "qla_target(%d): tag %lld, op %x: CTIO with SRR status 0x%x received (state %d, port %8phC, LUN %lld, bufflen %d)\n",
4265 vha->vp_idx, cmd->se_cmd.tag, op, status,
4266 cmd->state, cmd->sess->port_name,
4267 cmd->unpacked_lun, cmd->bufflen);
4268
4269 if (qlt_prepare_srr_ctio(qpair, cmd) == 0)
4270 return;
4271 break;
4272
4273 case CTIO_DIF_ERROR: {
4274 struct ctio_crc_from_fw *crc =
4275 (struct ctio_crc_from_fw *)ctio;
4276 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4277 "qla_target(%d): tag %lld, op %x: CTIO with DIF_ERROR status 0x%x received (state %d, port %8phC, LUN %lld, actual_dif[0x%llx] expect_dif[0x%llx])\n",
4278 vha->vp_idx, cmd->se_cmd.tag, op, status,
4279 cmd->state, cmd->sess->port_name,
4280 cmd->unpacked_lun,
4281 *((u64 *)&crc->actual_dif[0]),
4282 *((u64 *)&crc->expected_dif[0]));
4283
4284 qlt_handle_dif_error(qpair, cmd, crc);
4285 return;
4286 }
4287
4288 case CTIO_FAST_AUTH_ERR:
4289 case CTIO_FAST_INCOMP_PAD_LEN:
4290 case CTIO_FAST_INVALID_REQ:
4291 case CTIO_FAST_SPI_ERR:
4292 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4293 "qla_target(%d): tag %lld, op %x: CTIO with EDIF error status 0x%x received (state %d, port %8phC, LUN %lld)\n",
4294 vha->vp_idx, cmd->se_cmd.tag, op, status,
4295 cmd->state, cmd->sess->port_name,
4296 cmd->unpacked_lun);
4297 break;
4298
4299 default:
4300 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4301 "qla_target(%d): tag %lld, op %x: CTIO with error status 0x%x received (state %d, port %8phC, LUN %lld)\n",
4302 vha->vp_idx, cmd->se_cmd.tag, op, status,
4303 cmd->state, cmd->sess->port_name,
4304 cmd->unpacked_lun);
4305 break;
4306 }
4307
4308 cmd->trc_flags |= TRC_CTIO_ERR;
4309
4310 /*
4311 * In state QLA_TGT_STATE_NEED_DATA the failed CTIO was for
4312 * Data-Out, so either abort the exchange or try sending check
4313 * condition with sense data depending on the severity of
4314 * the error. In state QLA_TGT_STATE_PROCESSED the failed CTIO
4315 * was for status (and possibly Data-In), so don't try sending
4316 * an error status again in that case (if the error was for
4317 * Data-In with status, we could try sending status without
4318 * Data-In, but we don't do that currently).
4319 */
4320 if (!cmd->sent_term_exchg &&
4321 (term_exchg || cmd->state != QLA_TGT_STATE_NEED_DATA))
4322 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
4323 }
4324
4325 if (unlikely(cmd->srr != NULL)) {
4326 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11031,
4327 "qla_target(%d): tag %lld, op %x: expected CTIO with SRR status; got status 0x%x: state %d, bufflen %d\n",
4328 vha->vp_idx, cmd->se_cmd.tag,
4329 cmd->cdb ? cmd->cdb[0] : 0, status, cmd->state,
4330 cmd->bufflen);
4331 qlt_srr_abort(cmd, true);
4332 }
4333
4334 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4335 cmd->trc_flags |= TRC_CTIO_DONE;
4336
4337 if (likely(status == CTIO_SUCCESS))
4338 cmd->rsp_sent = 1;
4339
4340 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4341 cmd->state = QLA_TGT_STATE_DATA_IN;
4342
4343 if (status == CTIO_SUCCESS)
4344 cmd->write_data_transferred = 1;
4345
4346 cmd->jiffies_at_hw_st_entry = 0;
4347 ha->tgt.tgt_ops->handle_data(cmd);
4348 return;
4349 } else if (cmd->aborted) {
4350 cmd->trc_flags |= TRC_CTIO_ABORTED;
4351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4352 "qla_target(%d): tag %lld: Aborted command finished\n",
4353 vha->vp_idx, cmd->se_cmd.tag);
4354 } else {
4355 cmd->trc_flags |= TRC_CTIO_STRANGE;
4356 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4357 "qla_target(%d): tag %lld: A command in state (%d) should not return a CTIO complete\n",
4358 vha->vp_idx, cmd->se_cmd.tag, cmd->state);
4359 }
4360
4361 if (unlikely(status != CTIO_SUCCESS) &&
4362 !cmd->aborted) {
4363 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4364 dump_stack();
4365 }
4366
4367 ha->tgt.tgt_ops->free_cmd(cmd);
4368 }
4369
qlt_get_fcp_task_attr(struct scsi_qla_host * vha,uint8_t task_codes)4370 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4371 uint8_t task_codes)
4372 {
4373 int fcp_task_attr;
4374
4375 switch (task_codes) {
4376 case ATIO_SIMPLE_QUEUE:
4377 fcp_task_attr = TCM_SIMPLE_TAG;
4378 break;
4379 case ATIO_HEAD_OF_QUEUE:
4380 fcp_task_attr = TCM_HEAD_TAG;
4381 break;
4382 case ATIO_ORDERED_QUEUE:
4383 fcp_task_attr = TCM_ORDERED_TAG;
4384 break;
4385 case ATIO_ACA_QUEUE:
4386 fcp_task_attr = TCM_ACA_TAG;
4387 break;
4388 case ATIO_UNTAGGED:
4389 fcp_task_attr = TCM_SIMPLE_TAG;
4390 break;
4391 default:
4392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4393 "qla_target: unknown task code %x, use ORDERED instead\n",
4394 task_codes);
4395 fcp_task_attr = TCM_ORDERED_TAG;
4396 break;
4397 }
4398
4399 return fcp_task_attr;
4400 }
4401
4402 /*
4403 * Process context for I/O path into tcm_qla2xxx code
4404 */
__qlt_do_work(struct qla_tgt_cmd * cmd)4405 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4406 {
4407 scsi_qla_host_t *vha = cmd->vha;
4408 struct qla_hw_data *ha = vha->hw;
4409 struct fc_port *sess = cmd->sess;
4410 struct atio_from_isp *atio = &cmd->atio;
4411 unsigned long flags;
4412 uint32_t data_length;
4413 int ret, fcp_task_attr, data_dir, bidi = 0;
4414 struct qla_qpair *qpair = cmd->qpair;
4415
4416 cmd->cmd_in_wq = 0;
4417 cmd->trc_flags |= TRC_DO_WORK;
4418
4419 if (cmd->aborted) {
4420 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4421 "cmd with tag %u is aborted\n",
4422 cmd->atio.u.isp24.exchange_addr);
4423 goto out_term;
4424 }
4425
4426 cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
4427
4428 if (atio->u.isp24.fcp_cmnd.rddata &&
4429 atio->u.isp24.fcp_cmnd.wrdata) {
4430 bidi = 1;
4431 data_dir = DMA_TO_DEVICE;
4432 } else if (atio->u.isp24.fcp_cmnd.rddata)
4433 data_dir = DMA_FROM_DEVICE;
4434 else if (atio->u.isp24.fcp_cmnd.wrdata)
4435 data_dir = DMA_TO_DEVICE;
4436 else
4437 data_dir = DMA_NONE;
4438
4439 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4440 atio->u.isp24.fcp_cmnd.task_attr);
4441 data_length = get_datalen_for_atio(atio);
4442
4443 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cmd->cdb, data_length,
4444 fcp_task_attr, data_dir, bidi);
4445 if (ret != 0)
4446 goto out_term;
4447 /*
4448 * Drop extra session reference from qlt_handle_cmd_for_atio().
4449 */
4450 ha->tgt.tgt_ops->put_sess(sess);
4451 return;
4452
4453 out_term:
4454 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4455 /*
4456 * cmd has not sent to target yet, so pass NULL as the second
4457 * argument to qlt_send_term_exchange() and free the memory here.
4458 */
4459 cmd->trc_flags |= TRC_DO_WORK_ERR;
4460 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
4461 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1);
4462
4463 qlt_decr_num_pend_cmds(vha);
4464 if (unlikely(cmd->cdb != &cmd->atio.u.isp24.fcp_cmnd.cdb[0])) {
4465 kfree(cmd->cdb);
4466 cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
4467 cmd->cdb_len = 16;
4468 }
4469 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4470 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
4471
4472 ha->tgt.tgt_ops->put_sess(sess);
4473 }
4474
qlt_do_work(struct work_struct * work)4475 static void qlt_do_work(struct work_struct *work)
4476 {
4477 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4478 scsi_qla_host_t *vha = cmd->vha;
4479 unsigned long flags;
4480
4481 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4482 list_del(&cmd->cmd_list);
4483 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4484
4485 __qlt_do_work(cmd);
4486 }
4487
qlt_clr_qp_table(struct scsi_qla_host * vha)4488 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4489 {
4490 unsigned long flags;
4491 struct qla_hw_data *ha = vha->hw;
4492 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4493 void *node;
4494 u64 key = 0;
4495
4496 ql_log(ql_log_info, vha, 0x706c,
4497 "User update Number of Active Qpairs %d\n",
4498 ha->tgt.num_act_qpairs);
4499
4500 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4501
4502 btree_for_each_safe64(&tgt->lun_qpair_map, key, node)
4503 btree_remove64(&tgt->lun_qpair_map, key);
4504
4505 ha->base_qpair->lun_cnt = 0;
4506 for (key = 0; key < ha->max_qpairs; key++)
4507 if (ha->queue_pair_map[key])
4508 ha->queue_pair_map[key]->lun_cnt = 0;
4509
4510 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4511 }
4512
qlt_assign_qpair(struct scsi_qla_host * vha,struct qla_tgt_cmd * cmd)4513 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4514 struct qla_tgt_cmd *cmd)
4515 {
4516 struct qla_qpair *qpair, *qp;
4517 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4518 struct qla_qpair_hint *h;
4519
4520 if (vha->flags.qpairs_available) {
4521 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4522 if (unlikely(!h)) {
4523 /* spread lun to qpair ratio evently */
4524 int lcnt = 0, rc;
4525 struct scsi_qla_host *base_vha =
4526 pci_get_drvdata(vha->hw->pdev);
4527
4528 qpair = vha->hw->base_qpair;
4529 if (qpair->lun_cnt == 0) {
4530 qpair->lun_cnt++;
4531 h = qla_qpair_to_hint(tgt, qpair);
4532 BUG_ON(!h);
4533 rc = btree_insert64(&tgt->lun_qpair_map,
4534 cmd->unpacked_lun, h, GFP_ATOMIC);
4535 if (rc) {
4536 qpair->lun_cnt--;
4537 ql_log(ql_log_info, vha, 0xd037,
4538 "Unable to insert lun %llx into lun_qpair_map\n",
4539 cmd->unpacked_lun);
4540 }
4541 goto out;
4542 } else {
4543 lcnt = qpair->lun_cnt;
4544 }
4545
4546 h = NULL;
4547 list_for_each_entry(qp, &base_vha->qp_list,
4548 qp_list_elem) {
4549 if (qp->lun_cnt == 0) {
4550 qp->lun_cnt++;
4551 h = qla_qpair_to_hint(tgt, qp);
4552 BUG_ON(!h);
4553 rc = btree_insert64(&tgt->lun_qpair_map,
4554 cmd->unpacked_lun, h, GFP_ATOMIC);
4555 if (rc) {
4556 qp->lun_cnt--;
4557 ql_log(ql_log_info, vha, 0xd038,
4558 "Unable to insert lun %llx into lun_qpair_map\n",
4559 cmd->unpacked_lun);
4560 }
4561 qpair = qp;
4562 goto out;
4563 } else {
4564 if (qp->lun_cnt < lcnt) {
4565 lcnt = qp->lun_cnt;
4566 qpair = qp;
4567 continue;
4568 }
4569 }
4570 }
4571 BUG_ON(!qpair);
4572 qpair->lun_cnt++;
4573 h = qla_qpair_to_hint(tgt, qpair);
4574 BUG_ON(!h);
4575 rc = btree_insert64(&tgt->lun_qpair_map,
4576 cmd->unpacked_lun, h, GFP_ATOMIC);
4577 if (rc) {
4578 qpair->lun_cnt--;
4579 ql_log(ql_log_info, vha, 0xd039,
4580 "Unable to insert lun %llx into lun_qpair_map\n",
4581 cmd->unpacked_lun);
4582 }
4583 }
4584 } else {
4585 h = &tgt->qphints[0];
4586 }
4587 out:
4588 cmd->qpair = h->qpair;
4589 cmd->se_cmd.cpuid = h->cpuid;
4590 }
4591
4592 /*
4593 * Safely make a fixed-length copy of a variable-length atio by truncating the
4594 * CDB if necessary.
4595 */
memcpy_atio(struct atio_from_isp * dst,const struct atio_from_isp * src)4596 static void memcpy_atio(struct atio_from_isp *dst,
4597 const struct atio_from_isp *src)
4598 {
4599 int len;
4600
4601 memcpy(dst, src, sizeof(*dst));
4602
4603 /*
4604 * If the CDB was truncated, prevent get_datalen_for_atio() from
4605 * accessing invalid memory.
4606 */
4607 len = src->u.isp24.fcp_cmnd.add_cdb_len;
4608 if (unlikely(len != 0)) {
4609 dst->u.isp24.fcp_cmnd.add_cdb_len = 0;
4610 memcpy(&dst->u.isp24.fcp_cmnd.add_cdb[0],
4611 &src->u.isp24.fcp_cmnd.add_cdb[len * 4],
4612 4);
4613 }
4614 }
4615
qlt_get_tag(scsi_qla_host_t * vha,struct fc_port * sess,struct atio_from_isp * atio)4616 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4617 struct fc_port *sess,
4618 struct atio_from_isp *atio)
4619 {
4620 struct qla_tgt_cmd *cmd;
4621 int add_cdb_len;
4622
4623 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4624 if (!cmd)
4625 return NULL;
4626
4627 cmd->cmd_type = TYPE_TGT_CMD;
4628 memcpy_atio(&cmd->atio, atio);
4629 INIT_LIST_HEAD(&cmd->sess_cmd_list);
4630 cmd->state = QLA_TGT_STATE_NEW;
4631 cmd->tgt = vha->vha_tgt.qla_tgt;
4632 qlt_incr_num_pend_cmds(vha);
4633 cmd->vha = vha;
4634 cmd->sess = sess;
4635 cmd->loop_id = sess->loop_id;
4636 cmd->conf_compl_supported = sess->conf_compl_supported;
4637
4638 cmd->trc_flags = 0;
4639 cmd->jiffies_at_alloc = get_jiffies_64();
4640
4641 cmd->unpacked_lun = scsilun_to_int(
4642 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
4643 qlt_assign_qpair(vha, cmd);
4644 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4645 cmd->vp_idx = vha->vp_idx;
4646 cmd->edif = sess->edif.enable;
4647
4648 cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
4649 cmd->cdb_len = 16;
4650
4651 /*
4652 * NOTE: memcpy_atio() set cmd->atio.u.isp24.fcp_cmnd.add_cdb_len to 0,
4653 * so use the original value here.
4654 */
4655 add_cdb_len = atio->u.isp24.fcp_cmnd.add_cdb_len;
4656 if (unlikely(add_cdb_len != 0)) {
4657 int cdb_len = 16 + add_cdb_len * 4;
4658 u8 *cdb;
4659
4660 cdb = kmalloc(cdb_len, GFP_ATOMIC);
4661 if (unlikely(!cdb)) {
4662 vha->hw->tgt.tgt_ops->free_cmd(cmd);
4663 return NULL;
4664 }
4665 /* CAUTION: copy CDB from atio not cmd->atio */
4666 memcpy(cdb, atio->u.isp24.fcp_cmnd.cdb, cdb_len);
4667 cmd->cdb = cdb;
4668 cmd->cdb_len = cdb_len;
4669 }
4670
4671 return cmd;
4672 }
4673
4674 /* ha->hardware_lock supposed to be held on entry */
qlt_handle_cmd_for_atio(struct scsi_qla_host * vha,struct atio_from_isp * atio)4675 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4676 struct atio_from_isp *atio)
4677 {
4678 struct qla_hw_data *ha = vha->hw;
4679 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4680 struct fc_port *sess;
4681 struct qla_tgt_cmd *cmd;
4682 unsigned long flags;
4683 port_id_t id;
4684
4685 if (unlikely(tgt->tgt_stop)) {
4686 ql_dbg(ql_dbg_io, vha, 0x3061,
4687 "New command while device %p is shutting down\n", tgt);
4688 return -ENODEV;
4689 }
4690
4691 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
4692 if (IS_SW_RESV_ADDR(id))
4693 return -EBUSY;
4694
4695 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4696 if (unlikely(!sess))
4697 return -EFAULT;
4698
4699 /* Another WWN used to have our s_id. Our PLOGI scheduled its
4700 * session deletion, but it's still in sess_del_work wq */
4701 if (sess->deleted) {
4702 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4703 "New command while old session %p is being deleted\n",
4704 sess);
4705 return -EFAULT;
4706 }
4707
4708 /*
4709 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
4710 */
4711 if (!kref_get_unless_zero(&sess->sess_kref)) {
4712 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4713 "%s: kref_get fail, %8phC oxid %x \n",
4714 __func__, sess->port_name,
4715 be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
4716 return -EFAULT;
4717 }
4718
4719 cmd = qlt_get_tag(vha, sess, atio);
4720 if (!cmd) {
4721 ql_dbg(ql_dbg_io, vha, 0x3062,
4722 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4723 ha->tgt.tgt_ops->put_sess(sess);
4724 return -EBUSY;
4725 }
4726
4727 cmd->cmd_in_wq = 1;
4728 cmd->trc_flags |= TRC_NEW_CMD;
4729
4730 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4731 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4732 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4733
4734 INIT_WORK(&cmd->work, qlt_do_work);
4735 if (vha->flags.qpairs_available) {
4736 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4737 } else if (ha->msix_count) {
4738 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4739 queue_work(qla_tgt_wq, &cmd->work);
4740 else
4741 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4742 &cmd->work);
4743 } else {
4744 queue_work(qla_tgt_wq, &cmd->work);
4745 }
4746
4747 return 0;
4748 }
4749
4750 /* ha->hardware_lock supposed to be held on entry */
qlt_issue_task_mgmt(struct fc_port * sess,u64 lun,int fn,void * iocb,int flags)4751 static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
4752 int fn, void *iocb, int flags)
4753 {
4754 struct scsi_qla_host *vha = sess->vha;
4755 struct qla_hw_data *ha = vha->hw;
4756 struct qla_tgt_mgmt_cmd *mcmd;
4757 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4758 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4759
4760 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4761 if (!mcmd) {
4762 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4763 "qla_target(%d): Allocation of management "
4764 "command failed, some commands and their data could "
4765 "leak\n", vha->vp_idx);
4766 return -ENOMEM;
4767 }
4768 memset(mcmd, 0, sizeof(*mcmd));
4769 mcmd->sess = sess;
4770
4771 if (iocb) {
4772 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4773 sizeof(mcmd->orig_iocb.imm_ntfy));
4774 }
4775 mcmd->tmr_func = fn;
4776 mcmd->flags = flags;
4777 mcmd->reset_count = ha->base_qpair->chip_reset;
4778 mcmd->qpair = h->qpair;
4779 mcmd->vha = vha;
4780 mcmd->se_cmd.cpuid = h->cpuid;
4781 mcmd->unpacked_lun = lun;
4782
4783 switch (fn) {
4784 case QLA_TGT_LUN_RESET:
4785 case QLA_TGT_CLEAR_TS:
4786 case QLA_TGT_ABORT_TS:
4787 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4788 fallthrough;
4789 case QLA_TGT_CLEAR_ACA:
4790 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4791 mcmd->qpair = h->qpair;
4792 mcmd->se_cmd.cpuid = h->cpuid;
4793 break;
4794
4795 case QLA_TGT_TARGET_RESET:
4796 case QLA_TGT_NEXUS_LOSS_SESS:
4797 case QLA_TGT_NEXUS_LOSS:
4798 case QLA_TGT_ABORT_ALL:
4799 default:
4800 /* no-op */
4801 break;
4802 }
4803
4804 INIT_WORK(&mcmd->work, qlt_do_tmr_work);
4805 queue_work_on(mcmd->se_cmd.cpuid, qla_tgt_wq,
4806 &mcmd->work);
4807
4808 return 0;
4809 }
4810
4811 /* ha->hardware_lock supposed to be held on entry */
qlt_handle_task_mgmt(struct scsi_qla_host * vha,void * iocb)4812 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4813 {
4814 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4815 struct qla_hw_data *ha = vha->hw;
4816 struct fc_port *sess;
4817 u64 unpacked_lun;
4818 int fn;
4819 unsigned long flags;
4820
4821 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4822
4823 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4824 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4825 a->u.isp24.fcp_hdr.s_id);
4826 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4827
4828 unpacked_lun =
4829 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4830
4831 if (sess == NULL || sess->deleted)
4832 return -EFAULT;
4833
4834 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4835 }
4836
4837 /* ha->hardware_lock supposed to be held on entry */
__qlt_abort_task(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb,struct fc_port * sess)4838 static int __qlt_abort_task(struct scsi_qla_host *vha,
4839 struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
4840 {
4841 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
4842 struct qla_hw_data *ha = vha->hw;
4843 struct qla_tgt_mgmt_cmd *mcmd;
4844 u64 unpacked_lun;
4845 int rc;
4846
4847 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
4848 if (mcmd == NULL) {
4849 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4850 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4851 vha->vp_idx, __func__);
4852 return -ENOMEM;
4853 }
4854 memset(mcmd, 0, sizeof(*mcmd));
4855
4856 mcmd->sess = sess;
4857 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
4858 sizeof(mcmd->orig_iocb.imm_ntfy));
4859
4860 unpacked_lun =
4861 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
4862 mcmd->reset_count = ha->base_qpair->chip_reset;
4863 mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
4864 mcmd->qpair = ha->base_qpair;
4865
4866 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
4867 le16_to_cpu(iocb->u.isp2x.seq_id));
4868 if (rc != 0) {
4869 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4870 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
4871 vha->vp_idx, rc);
4872 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
4873 return -EFAULT;
4874 }
4875
4876 return 0;
4877 }
4878
4879 /* ha->hardware_lock supposed to be held on entry */
qlt_abort_task(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)4880 static int qlt_abort_task(struct scsi_qla_host *vha,
4881 struct imm_ntfy_from_isp *iocb)
4882 {
4883 struct qla_hw_data *ha = vha->hw;
4884 struct fc_port *sess;
4885 int loop_id;
4886 unsigned long flags;
4887
4888 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
4889
4890 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
4891 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4892 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
4893
4894 if (sess == NULL) {
4895 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4896 "qla_target(%d): task abort for unexisting "
4897 "session\n", vha->vp_idx);
4898 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4899 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
4900 }
4901
4902 return __qlt_abort_task(vha, iocb, sess);
4903 }
4904
qlt_logo_completion_handler(fc_port_t * fcport,int rc)4905 void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4906 {
4907 if (rc != MBS_COMMAND_COMPLETE) {
4908 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4909 "%s: se_sess %p / sess %p from"
4910 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4911 " LOGO failed: %#x\n",
4912 __func__,
4913 fcport->se_sess,
4914 fcport,
4915 fcport->port_name, fcport->loop_id,
4916 fcport->d_id.b.domain, fcport->d_id.b.area,
4917 fcport->d_id.b.al_pa, rc);
4918 }
4919
4920 fcport->logout_completed = 1;
4921 }
4922
4923 /*
4924 * ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4925 *
4926 * Schedules sessions with matching port_id/loop_id but different wwn for
4927 * deletion. Returns existing session with matching wwn if present.
4928 * Null otherwise.
4929 */
4930 struct fc_port *
qlt_find_sess_invalidate_other(scsi_qla_host_t * vha,uint64_t wwn,port_id_t port_id,uint16_t loop_id,struct fc_port ** conflict_sess)4931 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4932 port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
4933 {
4934 struct fc_port *sess = NULL, *other_sess;
4935 uint64_t other_wwn;
4936
4937 *conflict_sess = NULL;
4938
4939 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4940
4941 other_wwn = wwn_to_u64(other_sess->port_name);
4942
4943 if (wwn == other_wwn) {
4944 WARN_ON(sess);
4945 sess = other_sess;
4946 continue;
4947 }
4948
4949 /* find other sess with nport_id collision */
4950 if (port_id.b24 == other_sess->d_id.b24) {
4951 if (loop_id != other_sess->loop_id) {
4952 ql_dbg(ql_dbg_disc, vha, 0x1000c,
4953 "Invalidating sess %p loop_id %d wwn %llx.\n",
4954 other_sess, other_sess->loop_id, other_wwn);
4955
4956 /*
4957 * logout_on_delete is set by default, but another
4958 * session that has the same s_id/loop_id combo
4959 * might have cleared it when requested this session
4960 * deletion, so don't touch it
4961 */
4962 qlt_schedule_sess_for_deletion(other_sess);
4963 } else {
4964 /*
4965 * Another wwn used to have our s_id/loop_id
4966 * kill the session, but don't free the loop_id
4967 */
4968 ql_dbg(ql_dbg_disc, vha, 0xf01b,
4969 "Invalidating sess %p loop_id %d wwn %llx.\n",
4970 other_sess, other_sess->loop_id, other_wwn);
4971
4972 other_sess->keep_nport_handle = 1;
4973 if (other_sess->disc_state != DSC_DELETED)
4974 *conflict_sess = other_sess;
4975 qlt_schedule_sess_for_deletion(other_sess);
4976 }
4977 continue;
4978 }
4979
4980 /* find other sess with nport handle collision */
4981 if ((loop_id == other_sess->loop_id) &&
4982 (loop_id != FC_NO_LOOP_ID)) {
4983 ql_dbg(ql_dbg_disc, vha, 0x1000d,
4984 "Invalidating sess %p loop_id %d wwn %llx.\n",
4985 other_sess, other_sess->loop_id, other_wwn);
4986
4987 /* Same loop_id but different s_id
4988 * Ok to kill and logout */
4989 qlt_schedule_sess_for_deletion(other_sess);
4990 }
4991 }
4992
4993 return sess;
4994 }
4995
4996 /* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
abort_cmds_for_s_id(struct scsi_qla_host * vha,port_id_t * s_id)4997 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4998 {
4999 struct qla_tgt_sess_op *op;
5000 struct qla_tgt_cmd *cmd;
5001 uint32_t key;
5002 int count = 0;
5003 unsigned long flags;
5004
5005 key = (((u32)s_id->b.domain << 16) |
5006 ((u32)s_id->b.area << 8) |
5007 ((u32)s_id->b.al_pa));
5008
5009 spin_lock_irqsave(&vha->cmd_list_lock, flags);
5010 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
5011 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
5012
5013 if (op_key == key) {
5014 op->aborted = true;
5015 count++;
5016 }
5017 }
5018
5019 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
5020 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
5021
5022 if (cmd_key == key) {
5023 cmd->aborted = 1;
5024 count++;
5025 }
5026 }
5027 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
5028
5029 return count;
5030 }
5031
qlt_handle_login(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)5032 static int qlt_handle_login(struct scsi_qla_host *vha,
5033 struct imm_ntfy_from_isp *iocb)
5034 {
5035 struct fc_port *sess = NULL, *conflict_sess = NULL;
5036 uint64_t wwn;
5037 port_id_t port_id;
5038 uint16_t loop_id, wd3_lo;
5039 int res = 0;
5040 struct qlt_plogi_ack_t *pla;
5041 unsigned long flags;
5042
5043 lockdep_assert_held(&vha->hw->hardware_lock);
5044
5045 wwn = wwn_to_u64(iocb->u.isp24.port_name);
5046
5047 port_id.b.domain = iocb->u.isp24.port_id[2];
5048 port_id.b.area = iocb->u.isp24.port_id[1];
5049 port_id.b.al_pa = iocb->u.isp24.port_id[0];
5050 port_id.b.rsvd_1 = 0;
5051
5052 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
5053
5054 /* Mark all stale commands sitting in qla_tgt_wq for deletion */
5055 abort_cmds_for_s_id(vha, &port_id);
5056
5057 if (wwn) {
5058 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5059 sess = qlt_find_sess_invalidate_other(vha, wwn,
5060 port_id, loop_id, &conflict_sess);
5061 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5062 } else {
5063 ql_dbg(ql_dbg_disc, vha, 0xffff,
5064 "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
5065 __func__, __LINE__, loop_id, port_id.b24);
5066 qlt_send_term_imm_notif(vha, iocb, 1);
5067 goto out;
5068 }
5069
5070 if (IS_SW_RESV_ADDR(port_id)) {
5071 res = 1;
5072 goto out;
5073 }
5074
5075 if (vha->hw->flags.edif_enabled &&
5076 !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
5077 iocb->u.isp24.status_subcode == ELS_PLOGI &&
5078 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
5079 ql_dbg(ql_dbg_disc, vha, 0xffff,
5080 "%s %d Term INOT due to app not available lid=%d, NportID %06X ",
5081 __func__, __LINE__, loop_id, port_id.b24);
5082 qlt_send_term_imm_notif(vha, iocb, 1);
5083 goto out;
5084 }
5085
5086 if (vha->hw->flags.edif_enabled) {
5087 if (DBELL_INACTIVE(vha)) {
5088 ql_dbg(ql_dbg_disc, vha, 0xffff,
5089 "%s %d Term INOT due to app not started lid=%d, NportID %06X ",
5090 __func__, __LINE__, loop_id, port_id.b24);
5091 qlt_send_term_imm_notif(vha, iocb, 1);
5092 goto out;
5093 } else if (iocb->u.isp24.status_subcode == ELS_PLOGI &&
5094 !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) {
5095 ql_dbg(ql_dbg_disc, vha, 0xffff,
5096 "%s %d Term INOT due to unsecure lid=%d, NportID %06X ",
5097 __func__, __LINE__, loop_id, port_id.b24);
5098 qlt_send_term_imm_notif(vha, iocb, 1);
5099 goto out;
5100 }
5101 }
5102
5103 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
5104 if (!pla) {
5105 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
5106 "%s %d %8phC Term INOT due to mem alloc fail",
5107 __func__, __LINE__,
5108 iocb->u.isp24.port_name);
5109 qlt_send_term_imm_notif(vha, iocb, 1);
5110 goto out;
5111 }
5112
5113 if (conflict_sess) {
5114 conflict_sess->login_gen++;
5115 qlt_plogi_ack_link(vha, pla, conflict_sess,
5116 QLT_PLOGI_LINK_CONFLICT);
5117 }
5118
5119 if (!sess) {
5120 pla->ref_count++;
5121 ql_dbg(ql_dbg_disc, vha, 0xffff,
5122 "%s %d %8phC post new sess\n",
5123 __func__, __LINE__, iocb->u.isp24.port_name);
5124 if (iocb->u.isp24.status_subcode == ELS_PLOGI)
5125 qla24xx_post_newsess_work(vha, &port_id,
5126 iocb->u.isp24.port_name,
5127 iocb->u.isp24.u.plogi.node_name,
5128 pla, 0);
5129 else
5130 qla24xx_post_newsess_work(vha, &port_id,
5131 iocb->u.isp24.port_name, NULL,
5132 pla, 0);
5133
5134 goto out;
5135 }
5136
5137 if (sess->disc_state == DSC_UPD_FCPORT) {
5138 u16 sec;
5139
5140 /*
5141 * Remote port registration is still going on from
5142 * previous login. Allow it to finish before we
5143 * accept the new login.
5144 */
5145 sess->next_disc_state = DSC_DELETE_PEND;
5146 sec = jiffies_to_msecs(jiffies -
5147 sess->jiffies_at_registration) / 1000;
5148 if (sess->sec_since_registration < sec && sec &&
5149 !(sec % 5)) {
5150 sess->sec_since_registration = sec;
5151 ql_dbg(ql_dbg_disc, vha, 0xffff,
5152 "%s %8phC - Slow Rport registration (%d Sec)\n",
5153 __func__, sess->port_name, sec);
5154 }
5155
5156 if (!conflict_sess) {
5157 list_del(&pla->list);
5158 kmem_cache_free(qla_tgt_plogi_cachep, pla);
5159 }
5160
5161 qlt_send_term_imm_notif(vha, iocb, 1);
5162 goto out;
5163 }
5164
5165 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
5166 sess->d_id = port_id;
5167 sess->login_gen++;
5168 sess->loop_id = loop_id;
5169
5170 if (iocb->u.isp24.status_subcode == ELS_PLOGI) {
5171 /* remote port has assigned Port ID */
5172 if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
5173 vha->d_id = sess->d_id;
5174
5175 ql_dbg(ql_dbg_disc, vha, 0xffff,
5176 "%s %8phC - send port online\n",
5177 __func__, sess->port_name);
5178
5179 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
5180 sess->d_id.b24);
5181 }
5182
5183 if (iocb->u.isp24.status_subcode == ELS_PRLI) {
5184 sess->fw_login_state = DSC_LS_PRLI_PEND;
5185 sess->local = 0;
5186 sess->loop_id = loop_id;
5187 sess->d_id = port_id;
5188 sess->fw_login_state = DSC_LS_PRLI_PEND;
5189 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
5190
5191 if (wd3_lo & BIT_7)
5192 sess->conf_compl_supported = 1;
5193
5194 if ((wd3_lo & BIT_4) == 0)
5195 sess->port_type = FCT_INITIATOR;
5196 else
5197 sess->port_type = FCT_TARGET;
5198
5199 } else
5200 sess->fw_login_state = DSC_LS_PLOGI_PEND;
5201
5202
5203 ql_dbg(ql_dbg_disc, vha, 0x20f9,
5204 "%s %d %8phC DS %d\n",
5205 __func__, __LINE__, sess->port_name, sess->disc_state);
5206
5207 switch (sess->disc_state) {
5208 case DSC_DELETED:
5209 case DSC_LOGIN_PEND:
5210 qlt_plogi_ack_unref(vha, pla);
5211 break;
5212
5213 default:
5214 /*
5215 * Under normal circumstances we want to release nport handle
5216 * during LOGO process to avoid nport handle leaks inside FW.
5217 * The exception is when LOGO is done while another PLOGI with
5218 * the same nport handle is waiting as might be the case here.
5219 * Note: there is always a possibily of a race where session
5220 * deletion has already started for other reasons (e.g. ACL
5221 * removal) and now PLOGI arrives:
5222 * 1. if PLOGI arrived in FW after nport handle has been freed,
5223 * FW must have assigned this PLOGI a new/same handle and we
5224 * can proceed ACK'ing it as usual when session deletion
5225 * completes.
5226 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
5227 * bit reached it, the handle has now been released. We'll
5228 * get an error when we ACK this PLOGI. Nothing will be sent
5229 * back to initiator. Initiator should eventually retry
5230 * PLOGI and situation will correct itself.
5231 */
5232 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
5233 (sess->d_id.b24 == port_id.b24));
5234
5235 ql_dbg(ql_dbg_disc, vha, 0x20f9,
5236 "%s %d %8phC post del sess\n",
5237 __func__, __LINE__, sess->port_name);
5238
5239
5240 qlt_schedule_sess_for_deletion(sess);
5241 break;
5242 }
5243 out:
5244 return res;
5245 }
5246
5247 /*
5248 * Return true if the HBA firmware version is known to have bugs that
5249 * prevent Sequence Level Error Recovery (SLER) / Sequence Retransmission
5250 * Request (SRR) from working.
5251 *
5252 * Some bad versions are based on testing and some are based on "Marvell Fibre
5253 * Channel Firmware Release Notes".
5254 */
qlt_has_sler_fw_bug(struct qla_hw_data * ha)5255 static bool qlt_has_sler_fw_bug(struct qla_hw_data *ha)
5256 {
5257 bool has_sler_fw_bug = false;
5258
5259 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5260 /*
5261 * In the fw release notes:
5262 * ER147301 was added to v9.05.00 causing SLER regressions
5263 * FCD-259 was fixed in v9.08.00
5264 * FCD-371 was fixed in v9.08.00
5265 * FCD-1183 was fixed in v9.09.00
5266 *
5267 * QLE2694L (ISP2071) known bad firmware (tested):
5268 * 9.06.02
5269 * 9.07.00
5270 * 9.08.02
5271 * SRRs trigger hundreds of bogus entries in the response
5272 * queue and various other problems.
5273 *
5274 * QLE2694L known good firmware (tested):
5275 * 8.08.05
5276 * 9.09.00
5277 *
5278 * Suspected bad firmware (not confirmed by testing):
5279 * v9.05.xx
5280 *
5281 * unknown firmware:
5282 * 9.00.00 - 9.04.xx
5283 */
5284 if (ha->fw_major_version == 9 &&
5285 ha->fw_minor_version >= 5 &&
5286 ha->fw_minor_version <= 8)
5287 has_sler_fw_bug = true;
5288 }
5289
5290 return has_sler_fw_bug;
5291 }
5292
5293 /*
5294 * Return true and print a message if the HA has been reset since the SRR
5295 * immediate notify was received; else return false.
5296 */
qlt_srr_is_chip_reset(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct qla_tgt_srr * srr)5297 static bool qlt_srr_is_chip_reset(struct scsi_qla_host *vha,
5298 struct qla_qpair *qpair, struct qla_tgt_srr *srr)
5299 {
5300 if (!vha->flags.online ||
5301 !qpair->fw_started ||
5302 srr->reset_count != qpair->chip_reset) {
5303 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100d,
5304 "qla_target(%d): chip reset; discarding IMM SRR\n",
5305 vha->vp_idx);
5306 return true;
5307 }
5308 return false;
5309 }
5310
5311 /* Find and return the command associated with a SRR immediate notify. */
qlt_srr_to_cmd(struct scsi_qla_host * vha,const struct imm_ntfy_from_isp * iocb)5312 static struct qla_tgt_cmd *qlt_srr_to_cmd(struct scsi_qla_host *vha,
5313 const struct imm_ntfy_from_isp *iocb)
5314 {
5315 struct qla_hw_data *ha = vha->hw;
5316 struct fc_port *sess;
5317 struct qla_tgt_cmd *cmd;
5318 uint32_t tag = le32_to_cpu(iocb->u.isp24.exchange_address);
5319 uint16_t loop_id;
5320 be_id_t s_id;
5321 unsigned long flags;
5322
5323 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
5324 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11009,
5325 "qla_target(%d): IMM SRR with unknown exchange address; reject SRR\n",
5326 vha->vp_idx);
5327 return NULL;
5328 }
5329
5330 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
5331
5332 s_id.domain = iocb->u.isp24.port_id[2];
5333 s_id.area = iocb->u.isp24.port_id[1];
5334 s_id.al_pa = iocb->u.isp24.port_id[0];
5335
5336 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
5337 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
5338 if (!sess)
5339 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
5340 if (!sess || sess->deleted) {
5341 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100a,
5342 "qla_target(%d): could not find session for IMM SRR; reject SRR\n",
5343 vha->vp_idx);
5344 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5345 return NULL;
5346 }
5347 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5348
5349 cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, tag);
5350 if (!cmd) {
5351 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100b,
5352 "qla_target(%d): could not find cmd for IMM SRR; reject SRR\n",
5353 vha->vp_idx);
5354 } else {
5355 u16 srr_ox_id = le16_to_cpu(iocb->u.isp24.srr_ox_id);
5356 u16 cmd_ox_id = be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id);
5357
5358 if (srr_ox_id != cmd_ox_id) {
5359 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1100c,
5360 "qla_target(%d): tag %lld: IMM SRR: srr_ox_id[%04x] != cmd_ox_id[%04x]; reject SRR\n",
5361 vha->vp_idx, cmd->se_cmd.tag,
5362 srr_ox_id, cmd_ox_id);
5363 cmd = NULL;
5364 }
5365 }
5366
5367 return cmd;
5368 }
5369
5370 /*
5371 * Handle an immediate notify SRR (Sequence Retransmission Request) message from
5372 * the hardware. The hardware will also send a CTIO with CTIO_SRR_RECEIVED status
5373 * for the affected command.
5374 *
5375 * This may be called a second time for the same immediate notify SRR if
5376 * CTIO_SRR_RECEIVED is never received and qlt_srr_abort() is called.
5377 *
5378 * Process context, no locks
5379 */
qlt_handle_srr_imm(struct scsi_qla_host * vha,struct qla_tgt_srr * srr)5380 static void qlt_handle_srr_imm(struct scsi_qla_host *vha,
5381 struct qla_tgt_srr *srr)
5382 {
5383 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5384 struct qla_hw_data *ha = vha->hw;
5385 struct qla_qpair *qpair;
5386 struct qla_tgt_cmd *cmd;
5387 uint8_t srr_explain = NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL;
5388
5389 /* handle qlt_srr_abort() */
5390 if (srr->aborted) {
5391 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11004,
5392 "qla_target(%d): IMM SRR: terminating SRR for aborted cmd\n",
5393 vha->vp_idx);
5394 spin_lock_irq(&ha->hardware_lock);
5395 if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
5396 qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
5397 spin_unlock_irq(&ha->hardware_lock);
5398 kfree(srr);
5399 return;
5400 }
5401 if (srr->reject) {
5402 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11005,
5403 "qla_target(%d): IMM SRR: rejecting SRR for unknown cmd\n",
5404 vha->vp_idx);
5405 goto out_reject;
5406 }
5407
5408 /* Find the command associated with the SRR. */
5409 cmd = qlt_srr_to_cmd(vha, &srr->imm_ntfy);
5410 if (cmd == NULL) {
5411 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11005,
5412 "qla_target(%d): IMM SRR: rejecting SRR for unknown cmd\n",
5413 vha->vp_idx);
5414 srr_explain = NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_INVALID_OX_ID_RX_ID;
5415 goto out_reject;
5416 }
5417
5418 if (ha->tgt.tgt_ops->get_cmd_ref(cmd)) {
5419 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11038,
5420 "qla_target(%d): IMM SRR: unable to get cmd ref; rejecting SRR\n",
5421 vha->vp_idx);
5422 cmd = NULL;
5423 goto out_reject;
5424 }
5425
5426 qpair = cmd->qpair;
5427
5428 spin_lock_irq(qpair->qp_lock_ptr);
5429
5430 if (cmd->reset_count != srr->reset_count) {
5431 /* force a miscompare */
5432 srr->reset_count = qpair->chip_reset ^ 1;
5433 }
5434 if (qlt_srr_is_chip_reset(vha, qpair, srr)) {
5435 spin_unlock_irq(qpair->qp_lock_ptr);
5436 ha->tgt.tgt_ops->put_cmd_ref(cmd);
5437 kfree(srr);
5438 return;
5439 }
5440
5441 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11001,
5442 "qla_target(%d): tag %lld, op %x: received IMM SRR\n",
5443 vha->vp_idx, cmd->se_cmd.tag, cmd->cdb ? cmd->cdb[0] : 0);
5444
5445 cmd->trc_flags |= TRC_SRR_IMM;
5446
5447 if (cmd->srr != NULL) {
5448 if (cmd->srr->imm_ntfy_recvd) {
5449 /*
5450 * Received another immediate notify SRR message for
5451 * this command before the previous one could be processed
5452 * (not expected to happen).
5453 */
5454 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11006,
5455 "qla_target(%d): tag %lld: received multiple IMM SRR; reject SRR\n",
5456 vha->vp_idx, cmd->se_cmd.tag);
5457 spin_unlock_irq(qpair->qp_lock_ptr);
5458 ha->tgt.tgt_ops->put_cmd_ref(cmd);
5459 goto out_reject;
5460 }
5461
5462 /* qlt_prepare_srr_ctio() was called first. */
5463 WARN_ON(!cmd->srr->ctio_recvd);
5464
5465 /*
5466 * The immediate notify and CTIO handlers both allocated
5467 * separate srr structs; combine them.
5468 */
5469 memcpy(&cmd->srr->imm_ntfy, &srr->imm_ntfy,
5470 sizeof(srr->imm_ntfy));
5471 kfree(srr);
5472 srr = cmd->srr;
5473 srr->imm_ntfy_recvd = true;
5474
5475 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11002,
5476 "qla_target(%d): tag %lld: schedule SRR work\n",
5477 vha->vp_idx, cmd->se_cmd.tag);
5478
5479 /* Schedule the srr for processing in qlt_handle_srr(). */
5480 spin_lock(&tgt->srr_lock);
5481 list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
5482 /*
5483 * Already running the work function; no need to schedule
5484 * tgt->srr_work.
5485 */
5486 spin_unlock(&tgt->srr_lock);
5487 spin_unlock_irq(qpair->qp_lock_ptr);
5488 /* return with cmd refcount incremented */
5489 return;
5490 }
5491
5492 /* The CTIO SRR for this command has not yet been received. */
5493
5494 if (cmd->sent_term_exchg) {
5495 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11007,
5496 "qla_target(%d): tag %lld: IMM SRR: cmd already aborted\n",
5497 vha->vp_idx, cmd->se_cmd.tag);
5498 spin_unlock_irq(qpair->qp_lock_ptr);
5499 spin_lock_irq(&ha->hardware_lock);
5500 if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
5501 qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
5502 spin_unlock_irq(&ha->hardware_lock);
5503 kfree(srr);
5504 ha->tgt.tgt_ops->put_cmd_ref(cmd);
5505 return;
5506 }
5507
5508 /* If not expecting a CTIO, then reject IMM SRR. */
5509 if (!cmd->cmd_sent_to_fw) {
5510 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11008,
5511 "qla_target(%d): tag %lld: IMM SRR but !cmd_sent_to_fw (state %d); reject SRR\n",
5512 vha->vp_idx, cmd->se_cmd.tag, cmd->state);
5513 spin_unlock_irq(qpair->qp_lock_ptr);
5514 ha->tgt.tgt_ops->put_cmd_ref(cmd);
5515 goto out_reject;
5516 }
5517
5518 /* Expect qlt_prepare_srr_ctio() to be called. */
5519 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11003,
5520 "qla_target(%d): tag %lld: wait for CTIO SRR (state %d)\n",
5521 vha->vp_idx, cmd->se_cmd.tag, cmd->state);
5522 srr->cmd = cmd;
5523 cmd->srr = srr;
5524
5525 spin_unlock_irq(qpair->qp_lock_ptr);
5526
5527 ha->tgt.tgt_ops->put_cmd_ref(cmd);
5528 return;
5529
5530 out_reject:
5531 qpair = vha->hw->base_qpair;
5532 spin_lock_irq(qpair->qp_lock_ptr);
5533 if (!qlt_srr_is_chip_reset(vha, qpair, srr))
5534 qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
5535 NOTIFY_ACK_SRR_FLAGS_REJECT,
5536 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
5537 srr_explain);
5538 spin_unlock_irq(qpair->qp_lock_ptr);
5539 kfree(srr);
5540 }
5541
5542 /*
5543 * Handle an immediate notify SRR (Sequence Retransmission Request) message from
5544 * the hardware. The hardware will also send a CTIO with CTIO_SRR_RECEIVED status
5545 * for the affected command.
5546 *
5547 * ha->hardware_lock supposed to be held on entry
5548 */
qlt_prepare_srr_imm(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)5549 static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
5550 struct imm_ntfy_from_isp *iocb)
5551 {
5552 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5553 struct qla_tgt_srr *srr;
5554
5555 ql_log(ql_log_warn, vha, 0x11000, "qla_target(%d): received IMM SRR\n",
5556 vha->vp_idx);
5557
5558 /*
5559 * Need cmd->qpair->qp_lock_ptr, but have ha->hardware_lock. Defer
5560 * processing to a workqueue so that the right lock can be acquired
5561 * safely.
5562 */
5563
5564 srr = kzalloc(sizeof(*srr), GFP_ATOMIC);
5565 if (!srr)
5566 goto out_reject;
5567
5568 memcpy(&srr->imm_ntfy, iocb, sizeof(srr->imm_ntfy));
5569 srr->imm_ntfy_recvd = true;
5570 srr->reset_count = vha->hw->base_qpair->chip_reset;
5571 spin_lock(&tgt->srr_lock);
5572 list_add_tail(&srr->srr_list_entry, &tgt->srr_list);
5573 queue_work(qla_tgt_wq, &tgt->srr_work);
5574 spin_unlock(&tgt->srr_lock);
5575 /* resume processing in qlt_handle_srr_imm() */
5576 return;
5577
5578 out_reject:
5579 qlt_send_notify_ack(vha->hw->base_qpair, iocb, 0, 0, 0,
5580 NOTIFY_ACK_SRR_FLAGS_REJECT,
5581 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
5582 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
5583 }
5584
5585 /*
5586 * If possible, undo the effect of qlt_set_data_offset() and restore the cmd
5587 * data buffer back to its full size.
5588 */
qlt_restore_orig_sg(struct qla_tgt_cmd * cmd)5589 static int qlt_restore_orig_sg(struct qla_tgt_cmd *cmd)
5590 {
5591 struct scsi_qla_host *vha = cmd->vha;
5592 struct se_cmd *se_cmd = &cmd->se_cmd;
5593
5594 WARN_ON(cmd->sg_mapped);
5595
5596 if (cmd->offset == 0) {
5597 /* qlt_set_data_offset() has not been called. */
5598 return 0;
5599 }
5600
5601 if (se_cmd->t_data_sg == NULL ||
5602 se_cmd->t_data_nents == 0 ||
5603 se_cmd->data_length == 0) {
5604 /* The original scatterlist is not available. */
5605 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102c,
5606 "qla_target(%d): tag %lld: cannot restore original cmd buffer; keep modified buffer at offset %d\n",
5607 vha->vp_idx, cmd->se_cmd.tag, cmd->offset);
5608 return -ENOENT;
5609 }
5610
5611 /* Restore the original scatterlist. */
5612 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102d,
5613 "qla_target(%d): tag %lld: restore original cmd buffer: offset %d -> 0\n",
5614 vha->vp_idx, cmd->se_cmd.tag, cmd->offset);
5615 if (cmd->free_sg) {
5616 cmd->free_sg = 0;
5617 qlt_free_sg(cmd);
5618 }
5619 cmd->offset = 0;
5620 cmd->sg = se_cmd->t_data_sg;
5621 cmd->sg_cnt = se_cmd->t_data_nents;
5622 cmd->bufflen = se_cmd->data_length;
5623 return 0;
5624 }
5625
5626 /*
5627 * Adjust the data buffer of the given command to skip over offset bytes from
5628 * the beginning while also reducing the length by offset bytes.
5629 *
5630 * This may be called multiple times for a single command if there are multiple
5631 * SRRs, which each call reducing the buffer size further relative to the
5632 * previous call. Note that the buffer may be reset back to its original size
5633 * by calling qlt_restore_orig_sg().
5634 */
qlt_set_data_offset(struct qla_tgt_cmd * cmd,uint32_t offset)5635 static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
5636 {
5637 struct scsi_qla_host *vha = cmd->vha;
5638 struct scatterlist *sg_srr_start = NULL, *sg;
5639 uint32_t first_offset = offset;
5640 int sg_srr_cnt, i;
5641 int bufflen = 0;
5642
5643 WARN_ON(cmd->sg_mapped);
5644
5645 ql_dbg(ql_dbg_tgt, vha, 0x11020,
5646 "qla_target(%d): tag %lld: %s: sg %p sg_cnt %d dir %d cmd->offset %d cmd->bufflen %d add offset %u\n",
5647 vha->vp_idx, cmd->se_cmd.tag, __func__, cmd->sg,
5648 cmd->sg_cnt, cmd->dma_data_direction, cmd->offset, cmd->bufflen,
5649 offset);
5650
5651 if (cmd->se_cmd.prot_op != TARGET_PROT_NORMAL) {
5652 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11021,
5653 "qla_target(%d): tag %lld: %s: SRR with protection information at nonzero offset not implemented\n",
5654 vha->vp_idx, cmd->se_cmd.tag, __func__);
5655 return -EINVAL;
5656 }
5657
5658 if (!cmd->sg || !cmd->sg_cnt) {
5659 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11022,
5660 "qla_target(%d): tag %lld: %s: Missing cmd->sg or zero cmd->sg_cnt\n",
5661 vha->vp_idx, cmd->se_cmd.tag, __func__);
5662 return -EINVAL;
5663 }
5664
5665 /*
5666 * Walk the current cmd->sg list until we locate the new sg_srr_start
5667 */
5668 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
5669 ql_dbg(ql_dbg_tgt, vha, 0x11023,
5670 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
5671 i, sg, sg_page(sg), sg->length, sg->offset);
5672
5673 if (first_offset < sg->length) {
5674 sg_srr_start = sg;
5675 break;
5676 }
5677 first_offset -= sg->length;
5678 }
5679
5680 if (!sg_srr_start) {
5681 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11024,
5682 "qla_target(%d): tag %lld: Unable to locate sg_srr_start for offset: %u\n",
5683 vha->vp_idx, cmd->se_cmd.tag, offset);
5684 return -EINVAL;
5685 }
5686
5687 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11025,
5688 "qla_target(%d): tag %lld: prepare SRR sgl at sg index %d of %d byte offset %u of %u\n",
5689 vha->vp_idx, cmd->se_cmd.tag, i, cmd->sg_cnt,
5690 first_offset, sg_srr_start->length);
5691
5692 sg_srr_cnt = cmd->sg_cnt - i;
5693
5694 if (first_offset == 0 && !cmd->free_sg) {
5695 /*
5696 * The offset points to the beginning of a scatterlist element.
5697 * In this case there is no need to modify the first scatterlist
5698 * element, so we can just point directly inside the original
5699 * unmodified scatterlist.
5700 */
5701 ql_dbg(ql_dbg_tgt, vha, 0x11026, "point directly to old sgl\n");
5702 cmd->sg = sg_srr_start;
5703 } else {
5704 /*
5705 * Allocate at most 2 new scatterlist elements to reduce memory
5706 * requirements.
5707 */
5708 int n_alloc_sg = min(sg_srr_cnt, 2);
5709 struct scatterlist *sg_srr =
5710 kmalloc_array(n_alloc_sg, sizeof(*sg_srr), GFP_ATOMIC);
5711 if (!sg_srr) {
5712 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11027,
5713 "qla_target(%d): tag %lld: Unable to allocate SRR scatterlist\n",
5714 vha->vp_idx, cmd->se_cmd.tag);
5715 return -ENOMEM;
5716 }
5717 sg_init_table(sg_srr, n_alloc_sg);
5718
5719 /* Init the first sg element to skip over the unneeded data. */
5720 sg_set_page(&sg_srr[0], sg_page(sg_srr_start),
5721 sg_srr_start->length - first_offset,
5722 sg_srr_start->offset + first_offset);
5723 if (sg_srr_cnt == 1) {
5724 ql_dbg(ql_dbg_tgt, vha, 0x11028,
5725 "single-element array\n");
5726 } else if (sg_srr_cnt == 2) {
5727 /* Only two elements; copy the last element. */
5728 ql_dbg(ql_dbg_tgt, vha, 0x11029,
5729 "complete two-element array\n");
5730 sg = sg_next(sg_srr_start);
5731 sg_set_page(&sg_srr[1], sg_page(sg), sg->length,
5732 sg->offset);
5733 } else {
5734 /*
5735 * Three or more elements; chain our newly-allocated
5736 * 2-entry array to the rest of the original
5737 * scatterlist at the splice point.
5738 */
5739 ql_dbg(ql_dbg_tgt, vha, 0x1102a,
5740 "chain to original scatterlist\n");
5741 sg = sg_next(sg_srr_start);
5742 sg_chain(sg_srr, 2, sg);
5743 }
5744
5745 /*
5746 * If the previous scatterlist was allocated here on a previous
5747 * call, then it should be safe to free now.
5748 */
5749 if (cmd->free_sg)
5750 qlt_free_sg(cmd);
5751 cmd->sg = sg_srr;
5752 cmd->free_sg = 1;
5753 }
5754
5755 /* Note that sg_cnt doesn't include any extra chain elements. */
5756 cmd->sg_cnt = sg_srr_cnt;
5757 cmd->offset += offset;
5758 cmd->bufflen -= offset;
5759
5760 /* Check the scatterlist length for consistency. */
5761 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
5762 bufflen += sg->length;
5763 }
5764 if (bufflen != cmd->bufflen) {
5765 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1102b,
5766 "qla_target(%d): tag %lld: %s: bad sgl length: expected %d got %d\n",
5767 vha->vp_idx, cmd->se_cmd.tag, __func__, cmd->bufflen, bufflen);
5768 return -EINVAL;
5769 }
5770
5771 return 0;
5772 }
5773
5774 /*
5775 * Given the "SRR relative offset" (offset of data to retry), determine what
5776 * needs to be retransmitted (data and/or status) and return the mask in
5777 * xmit_type. If retrying data, adjust the command buffer to point to only the
5778 * data that need to be retried, skipping over the data that don't need to be
5779 * retried.
5780 *
5781 * Returns 0 for success or a negative error number.
5782 */
qlt_srr_adjust_data(struct qla_tgt_cmd * cmd,uint32_t srr_rel_offs,int * xmit_type)5783 static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
5784 uint32_t srr_rel_offs, int *xmit_type)
5785 {
5786 struct scsi_qla_host *vha = cmd->vha;
5787 int res = 0, rel_offs;
5788
5789 if (srr_rel_offs < cmd->offset ||
5790 srr_rel_offs > cmd->offset + cmd->bufflen) {
5791 *xmit_type = 0;
5792 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101e,
5793 "qla_target(%d): tag %lld: srr_rel_offs %u outside accepted range %u - %u\n",
5794 vha->vp_idx, cmd->se_cmd.tag, srr_rel_offs,
5795 cmd->offset, cmd->offset + cmd->bufflen);
5796 return -EINVAL;
5797 }
5798
5799 /*
5800 * srr_rel_offs is the offset of the data we need from the beginning of
5801 * the *original* buffer.
5802 *
5803 * cmd->offset is the offset of the current cmd scatterlist from the
5804 * beginning of the *original* buffer, which might be nonzero if there
5805 * was a previous SRR and the buffer could not be reset back to its
5806 * original size.
5807 *
5808 * rel_offs is the offset of the data we need from the beginning of the
5809 * current cmd scatterlist.
5810 */
5811 rel_offs = srr_rel_offs - cmd->offset;
5812
5813 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101f,
5814 "qla_target(%d): tag %lld: current buffer [%u - %u); srr_rel_offs=%d, rel_offs=%d\n",
5815 vha->vp_idx, cmd->se_cmd.tag, cmd->offset,
5816 cmd->offset + cmd->bufflen, srr_rel_offs, rel_offs);
5817
5818 *xmit_type = QLA_TGT_XMIT_ALL;
5819
5820 if (rel_offs == cmd->bufflen)
5821 *xmit_type = QLA_TGT_XMIT_STATUS;
5822 else if (rel_offs > 0)
5823 res = qlt_set_data_offset(cmd, rel_offs);
5824
5825 return res;
5826 }
5827
5828 /*
5829 * Process a SRR (Sequence Retransmission Request) for a SCSI command once both
5830 * the immediate notify SRR and CTIO SRR have been received from the hw.
5831 *
5832 * Process context, no locks
5833 */
qlt_handle_srr(struct scsi_qla_host * vha,struct qla_tgt_srr * srr)5834 static void qlt_handle_srr(struct scsi_qla_host *vha, struct qla_tgt_srr *srr)
5835 {
5836 struct qla_tgt_cmd *cmd = srr->cmd;
5837 struct se_cmd *se_cmd = &cmd->se_cmd;
5838 struct qla_qpair *qpair = cmd->qpair;
5839 struct qla_hw_data *ha = vha->hw;
5840 uint8_t op = cmd->cdb ? cmd->cdb[0] : 0;
5841 uint32_t srr_rel_offs = le32_to_cpu(srr->imm_ntfy.u.isp24.srr_rel_offs);
5842 uint16_t srr_ui = le16_to_cpu(srr->imm_ntfy.u.isp24.srr_ui);
5843 int xmit_type = 0;
5844 bool xmit_response = false;
5845 bool rdy_to_xfer = false;
5846 bool did_timeout;
5847 bool send_term_exch = false;
5848
5849 spin_lock_irq(qpair->qp_lock_ptr);
5850
5851 WARN_ON(cmd->cmd_sent_to_fw);
5852
5853 cmd->srr = NULL;
5854
5855 if (qlt_srr_is_chip_reset(vha, qpair, srr))
5856 goto out_advance_cmd;
5857
5858 if (cmd->sent_term_exchg || cmd->sess->deleted || srr->aborted) {
5859 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11010,
5860 "qla_target(%d): tag %lld: IMM SRR: cmd already aborted\n",
5861 vha->vp_idx, cmd->se_cmd.tag);
5862
5863 spin_unlock_irq(qpair->qp_lock_ptr);
5864
5865 spin_lock_irq(&ha->hardware_lock);
5866 if (!qlt_srr_is_chip_reset(vha, ha->base_qpair, srr))
5867 qlt_send_term_imm_notif(vha, &srr->imm_ntfy, 1);
5868 spin_unlock_irq(&ha->hardware_lock);
5869
5870 send_term_exch = true;
5871
5872 spin_lock_irq(qpair->qp_lock_ptr);
5873 goto out_advance_cmd;
5874 }
5875
5876 if (srr->reject)
5877 goto out_reject;
5878
5879 /*
5880 * If we receive multiple SRRs for the same command, place a time limit
5881 * on how long we are willing to retry. This timeout should be less
5882 * than SQA_MAX_HW_PENDING_TIME in scst_qla2xxx.c.
5883 */
5884 did_timeout = time_is_before_jiffies64((cmd->jiffies_at_hw_st_entry ? :
5885 cmd->jiffies_at_alloc) + 30 * HZ);
5886
5887 qlt_restore_orig_sg(cmd);
5888
5889 switch (srr_ui) {
5890 case SRR_IU_STATUS:
5891 if (cmd->state != QLA_TGT_STATE_PROCESSED) {
5892 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11011,
5893 "qla_target(%d): tag %lld, op %x: reject SRR_IU_STATUS due to unexpected state %d\n",
5894 vha->vp_idx, se_cmd->tag, op,
5895 cmd->state);
5896 goto out_reject;
5897 }
5898
5899 if (did_timeout) {
5900 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11033,
5901 "qla_target(%d): tag %lld, op %x: reject SRR_IU_STATUS due to timeout\n",
5902 vha->vp_idx, se_cmd->tag, op);
5903 goto out_reject;
5904 }
5905
5906 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11012,
5907 "qla_target(%d): tag %lld, op %x: accept SRR_IU_STATUS and retransmit scsi_status=%x\n",
5908 vha->vp_idx, se_cmd->tag, op,
5909 se_cmd->scsi_status);
5910 xmit_type = QLA_TGT_XMIT_STATUS;
5911 xmit_response = true;
5912 cmd->trc_flags |= TRC_SRR_RSP;
5913 break;
5914
5915 case SRR_IU_DATA_IN:
5916 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11013,
5917 "qla_target(%d): tag %lld, op %x: process SRR_IU_DATA_IN: bufflen=%d, sg_cnt=%d, offset=%d, srr_offset=%d, scsi_status=%x\n",
5918 vha->vp_idx, se_cmd->tag, op, cmd->bufflen,
5919 cmd->sg_cnt, cmd->offset, srr_rel_offs,
5920 se_cmd->scsi_status);
5921
5922 if (cmd->state != QLA_TGT_STATE_PROCESSED) {
5923 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11014,
5924 "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN due to unexpected state %d\n",
5925 vha->vp_idx, se_cmd->tag, cmd->state);
5926 goto out_reject;
5927 }
5928
5929 /*
5930 * QLA_TGT_STATE_PROCESSED does not necessarily imply data-in
5931 */
5932 if (!qlt_has_data(cmd)) {
5933 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11015,
5934 "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN because cmd has no data to send\n",
5935 vha->vp_idx, se_cmd->tag);
5936 goto out_reject;
5937 }
5938
5939 if (!cmd->sg || !cmd->sg_cnt) {
5940 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11016,
5941 "qla_target(%d): tag %lld: reject SRR_IU_DATA_IN because buffer is missing\n",
5942 vha->vp_idx, se_cmd->tag);
5943 goto out_reject;
5944 }
5945
5946 if (did_timeout) {
5947 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11034,
5948 "qla_target(%d): tag %lld, op %x: reject SRR_IU_DATA_IN due to timeout\n",
5949 vha->vp_idx, se_cmd->tag, op);
5950 goto out_reject;
5951 }
5952
5953 if (qlt_srr_adjust_data(cmd, srr_rel_offs, &xmit_type) != 0)
5954 goto out_reject;
5955
5956 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11017,
5957 "qla_target(%d): tag %lld: accept SRR_IU_DATA_IN and retransmit data: bufflen=%d, offset=%d\n",
5958 vha->vp_idx, se_cmd->tag, cmd->bufflen,
5959 cmd->offset);
5960 xmit_response = true;
5961 cmd->trc_flags |= TRC_SRR_RSP;
5962 break;
5963
5964 case SRR_IU_DATA_OUT:
5965 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11018,
5966 "qla_target(%d): tag %lld, op %x: process SRR_IU_DATA_OUT: bufflen=%d, sg_cnt=%d, offset=%d, srr_offset=%d\n",
5967 vha->vp_idx, se_cmd->tag, op, cmd->bufflen,
5968 cmd->sg_cnt, cmd->offset, srr_rel_offs);
5969
5970 if (cmd->state != QLA_TGT_STATE_NEED_DATA) {
5971 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11019,
5972 "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT due to unexpected state %d\n",
5973 vha->vp_idx, se_cmd->tag, cmd->state);
5974 goto out_reject;
5975 }
5976
5977 /*
5978 * QLA_TGT_STATE_NEED_DATA implies there should be data-out
5979 */
5980 if (!qlt_has_data(cmd) || !cmd->sg || !cmd->sg_cnt) {
5981 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101a,
5982 "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT because buffer is missing\n",
5983 vha->vp_idx, se_cmd->tag);
5984 goto out_reject;
5985 }
5986
5987 if (did_timeout) {
5988 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11035,
5989 "qla_target(%d): tag %lld, op %x: reject SRR_IU_DATA_OUT due to timeout\n",
5990 vha->vp_idx, se_cmd->tag, op);
5991 goto out_reject;
5992 }
5993
5994 if (qlt_srr_adjust_data(cmd, srr_rel_offs, &xmit_type) != 0)
5995 goto out_reject;
5996
5997 if (!(xmit_type & QLA_TGT_XMIT_DATA)) {
5998 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101b,
5999 "qla_target(%d): tag %lld: reject SRR_IU_DATA_OUT: bad offset\n",
6000 vha->vp_idx, se_cmd->tag);
6001 goto out_reject;
6002 }
6003
6004 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101c,
6005 "qla_target(%d): tag %lld: accept SRR_IU_DATA_OUT and receive data again: bufflen=%d, offset=%d\n",
6006 vha->vp_idx, se_cmd->tag, cmd->bufflen,
6007 cmd->offset);
6008 cmd->trc_flags |= TRC_SRR_XRDY;
6009 rdy_to_xfer = true;
6010 break;
6011
6012 default:
6013 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1101d,
6014 "qla_target(%d): tag %lld, op %x: reject unknown srr_ui value 0x%x: state=%d, bufflen=%d, offset=%d, srr_offset=%d\n",
6015 vha->vp_idx, se_cmd->tag, op, srr_ui, cmd->state,
6016 cmd->bufflen, cmd->offset, srr_rel_offs);
6017 goto out_reject;
6018 }
6019
6020 qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
6021 NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
6022
6023 spin_unlock_irq(qpair->qp_lock_ptr);
6024
6025 if (xmit_response) {
6026 /* For status and data-in, retransmit the response. */
6027 if (qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status)) {
6028 send_term_exch = true;
6029 spin_lock_irq(qpair->qp_lock_ptr);
6030 goto out_advance_cmd;
6031 }
6032 } else if (rdy_to_xfer) {
6033 /* For data-out, receive data again. */
6034 if (qlt_rdy_to_xfer(cmd)) {
6035 send_term_exch = true;
6036 spin_lock_irq(qpair->qp_lock_ptr);
6037 goto out_advance_cmd;
6038 }
6039 }
6040
6041 return;
6042
6043 out_reject:
6044 qlt_send_notify_ack(qpair, &srr->imm_ntfy, 0, 0, 0,
6045 NOTIFY_ACK_SRR_FLAGS_REJECT,
6046 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
6047 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
6048
6049 out_advance_cmd:
6050 if (!cmd->sent_term_exchg &&
6051 (send_term_exch || cmd->state != QLA_TGT_STATE_NEED_DATA) &&
6052 !qlt_srr_is_chip_reset(vha, qpair, srr)) {
6053 cmd->trc_flags |= TRC_SRR_TERM;
6054 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1);
6055 }
6056 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
6057 /*
6058 * The initiator should abort the command, but if not, try to
6059 * return an error.
6060 */
6061 cmd->srr_failed = 1;
6062 cmd->write_data_transferred = 0;
6063 cmd->state = QLA_TGT_STATE_DATA_IN;
6064 cmd->jiffies_at_hw_st_entry = 0;
6065 vha->hw->tgt.tgt_ops->handle_data(cmd);
6066 } else {
6067 vha->hw->tgt.tgt_ops->free_cmd(cmd);
6068 }
6069 spin_unlock_irq(qpair->qp_lock_ptr);
6070 }
6071
6072 /* Workqueue function for processing SRR work in process context. */
qlt_handle_srr_work(struct work_struct * work)6073 static void qlt_handle_srr_work(struct work_struct *work)
6074 {
6075 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
6076 struct scsi_qla_host *vha = tgt->vha;
6077
6078 ql_dbg(ql_dbg_tgt_mgt, vha, 0x11032,
6079 "qla_target(%d): Entering SRR work\n", vha->vp_idx);
6080
6081 for (;;) {
6082 struct qla_tgt_srr *srr;
6083
6084 spin_lock_irq(&tgt->srr_lock);
6085 srr = list_first_entry_or_null(&tgt->srr_list, typeof(*srr),
6086 srr_list_entry);
6087 if (!srr) {
6088 spin_unlock_irq(&tgt->srr_lock);
6089 break;
6090 }
6091 list_del(&srr->srr_list_entry);
6092 spin_unlock_irq(&tgt->srr_lock);
6093
6094 if (!srr->cmd) {
6095 qlt_handle_srr_imm(vha, srr);
6096 } else {
6097 qlt_handle_srr(vha, srr);
6098 vha->hw->tgt.tgt_ops->put_cmd_ref(srr->cmd);
6099 kfree(srr);
6100 }
6101 }
6102 }
6103
6104 /*
6105 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
6106 */
qlt_24xx_handle_els(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)6107 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
6108 struct imm_ntfy_from_isp *iocb)
6109 {
6110 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6111 struct qla_hw_data *ha = vha->hw;
6112 struct fc_port *sess = NULL, *conflict_sess = NULL;
6113 uint64_t wwn;
6114 port_id_t port_id;
6115 uint16_t loop_id;
6116 uint16_t wd3_lo;
6117 int res = 0;
6118 unsigned long flags;
6119
6120 lockdep_assert_held(&ha->hardware_lock);
6121
6122 wwn = wwn_to_u64(iocb->u.isp24.port_name);
6123
6124 port_id.b.domain = iocb->u.isp24.port_id[2];
6125 port_id.b.area = iocb->u.isp24.port_id[1];
6126 port_id.b.al_pa = iocb->u.isp24.port_id[0];
6127 port_id.b.rsvd_1 = 0;
6128
6129 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
6130
6131 ql_dbg(ql_dbg_disc, vha, 0xf026,
6132 "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
6133 vha->vp_idx, iocb->u.isp24.port_id[2],
6134 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
6135 iocb->u.isp24.status_subcode, loop_id,
6136 iocb->u.isp24.port_name);
6137
6138 /* res = 1 means ack at the end of thread
6139 * res = 0 means ack async/later.
6140 */
6141 switch (iocb->u.isp24.status_subcode) {
6142 case ELS_PLOGI:
6143 res = qlt_handle_login(vha, iocb);
6144 break;
6145
6146 case ELS_PRLI:
6147 if (N2N_TOPO(ha)) {
6148 sess = qla2x00_find_fcport_by_wwpn(vha,
6149 iocb->u.isp24.port_name, 1);
6150
6151 if (vha->hw->flags.edif_enabled && sess &&
6152 (!(sess->flags & FCF_FCSP_DEVICE) ||
6153 !sess->edif.authok)) {
6154 ql_dbg(ql_dbg_disc, vha, 0xffff,
6155 "%s %d %8phC Term PRLI due to unauthorize PRLI\n",
6156 __func__, __LINE__, iocb->u.isp24.port_name);
6157 qlt_send_term_imm_notif(vha, iocb, 1);
6158 break;
6159 }
6160
6161 if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) {
6162 ql_dbg(ql_dbg_disc, vha, 0xffff,
6163 "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n",
6164 __func__, __LINE__,
6165 iocb->u.isp24.port_name);
6166 qlt_send_term_imm_notif(vha, iocb, 1);
6167 break;
6168 }
6169
6170 res = qlt_handle_login(vha, iocb);
6171 break;
6172 }
6173
6174 if (IS_SW_RESV_ADDR(port_id)) {
6175 res = 1;
6176 break;
6177 }
6178
6179 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
6180
6181 if (wwn) {
6182 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
6183 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
6184 loop_id, &conflict_sess);
6185 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
6186 }
6187
6188 if (conflict_sess) {
6189 switch (conflict_sess->disc_state) {
6190 case DSC_DELETED:
6191 case DSC_DELETE_PEND:
6192 break;
6193 default:
6194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
6195 "PRLI with conflicting sess %p port %8phC\n",
6196 conflict_sess, conflict_sess->port_name);
6197 conflict_sess->fw_login_state =
6198 DSC_LS_PORT_UNAVAIL;
6199 qlt_send_term_imm_notif(vha, iocb, 1);
6200 res = 0;
6201 break;
6202 }
6203 }
6204
6205 if (sess != NULL) {
6206 bool delete = false;
6207 int sec;
6208
6209 if (vha->hw->flags.edif_enabled && sess &&
6210 (!(sess->flags & FCF_FCSP_DEVICE) ||
6211 !sess->edif.authok)) {
6212 ql_dbg(ql_dbg_disc, vha, 0xffff,
6213 "%s %d %8phC Term PRLI due to unauthorize prli\n",
6214 __func__, __LINE__, iocb->u.isp24.port_name);
6215 qlt_send_term_imm_notif(vha, iocb, 1);
6216 break;
6217 }
6218
6219 spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
6220 switch (sess->fw_login_state) {
6221 case DSC_LS_PLOGI_PEND:
6222 case DSC_LS_PLOGI_COMP:
6223 case DSC_LS_PRLI_COMP:
6224 break;
6225 default:
6226 delete = true;
6227 break;
6228 }
6229
6230 switch (sess->disc_state) {
6231 case DSC_UPD_FCPORT:
6232 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
6233 flags);
6234
6235 sec = jiffies_to_msecs(jiffies -
6236 sess->jiffies_at_registration)/1000;
6237 if (sess->sec_since_registration < sec && sec &&
6238 !(sec % 5)) {
6239 sess->sec_since_registration = sec;
6240 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
6241 "%s %8phC : Slow Rport registration(%d Sec)\n",
6242 __func__, sess->port_name, sec);
6243 }
6244 qlt_send_term_imm_notif(vha, iocb, 1);
6245 return 0;
6246
6247 case DSC_LOGIN_PEND:
6248 case DSC_GPDB:
6249 case DSC_LOGIN_COMPLETE:
6250 case DSC_ADISC:
6251 delete = false;
6252 break;
6253 default:
6254 break;
6255 }
6256
6257 if (delete) {
6258 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
6259 flags);
6260 /*
6261 * Impatient initiator sent PRLI before last
6262 * PLOGI could finish. Will force him to re-try,
6263 * while last one finishes.
6264 */
6265 ql_log(ql_log_warn, sess->vha, 0xf095,
6266 "sess %p PRLI received, before plogi ack.\n",
6267 sess);
6268 qlt_send_term_imm_notif(vha, iocb, 1);
6269 res = 0;
6270 break;
6271 }
6272
6273 /*
6274 * This shouldn't happen under normal circumstances,
6275 * since we have deleted the old session during PLOGI
6276 */
6277 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
6278 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
6279 sess->loop_id, sess, iocb->u.isp24.nport_handle);
6280
6281 sess->local = 0;
6282 sess->loop_id = loop_id;
6283 sess->d_id = port_id;
6284 sess->fw_login_state = DSC_LS_PRLI_PEND;
6285
6286 if (wd3_lo & BIT_7)
6287 sess->conf_compl_supported = 1;
6288
6289 if ((wd3_lo & BIT_4) == 0)
6290 sess->port_type = FCT_INITIATOR;
6291 else
6292 sess->port_type = FCT_TARGET;
6293
6294 spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
6295 }
6296 res = 1; /* send notify ack */
6297
6298 /* Make session global (not used in fabric mode) */
6299 if (ha->current_topology != ISP_CFG_F) {
6300 if (sess) {
6301 ql_dbg(ql_dbg_disc, vha, 0x20fa,
6302 "%s %d %8phC post nack\n",
6303 __func__, __LINE__, sess->port_name);
6304 qla24xx_post_nack_work(vha, sess, iocb,
6305 SRB_NACK_PRLI);
6306 res = 0;
6307 } else {
6308 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6309 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
6310 qla2xxx_wake_dpc(vha);
6311 }
6312 } else {
6313 if (sess) {
6314 ql_dbg(ql_dbg_disc, vha, 0x20fb,
6315 "%s %d %8phC post nack\n",
6316 __func__, __LINE__, sess->port_name);
6317 qla24xx_post_nack_work(vha, sess, iocb,
6318 SRB_NACK_PRLI);
6319 res = 0;
6320 }
6321 }
6322 break;
6323
6324 case ELS_TPRLO:
6325 if (le16_to_cpu(iocb->u.isp24.flags) &
6326 NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
6327 loop_id = 0xFFFF;
6328 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
6329 res = 1;
6330 break;
6331 }
6332 fallthrough;
6333 case ELS_LOGO:
6334 case ELS_PRLO:
6335 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6336 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
6337 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6338
6339 if (sess) {
6340 sess->login_gen++;
6341 sess->fw_login_state = DSC_LS_LOGO_PEND;
6342 sess->logo_ack_needed = 1;
6343 memcpy(sess->iocb, iocb, IOCB_SIZE);
6344 }
6345
6346 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
6347
6348 ql_dbg(ql_dbg_disc, vha, 0x20fc,
6349 "%s: logo %llx res %d sess %p ",
6350 __func__, wwn, res, sess);
6351 if (res == 0) {
6352 /*
6353 * cmd went upper layer, look for qlt_xmit_tm_rsp()
6354 * for LOGO_ACK & sess delete
6355 */
6356 BUG_ON(!sess);
6357 res = 0;
6358 } else {
6359 /* cmd did not go to upper layer. */
6360 if (sess) {
6361 qlt_schedule_sess_for_deletion(sess);
6362 res = 0;
6363 }
6364 /* else logo will be ack */
6365 }
6366 break;
6367 case ELS_PDISC:
6368 case ELS_ADISC:
6369 {
6370 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6371
6372 if (tgt->link_reinit_iocb_pending) {
6373 qlt_send_notify_ack(ha->base_qpair,
6374 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
6375 tgt->link_reinit_iocb_pending = 0;
6376 }
6377
6378 sess = qla2x00_find_fcport_by_wwpn(vha,
6379 iocb->u.isp24.port_name, 1);
6380 if (sess) {
6381 ql_dbg(ql_dbg_disc, vha, 0x20fd,
6382 "sess %p lid %d|%d DS %d LS %d\n",
6383 sess, sess->loop_id, loop_id,
6384 sess->disc_state, sess->fw_login_state);
6385 }
6386
6387 res = 1; /* send notify ack */
6388 break;
6389 }
6390
6391 case ELS_FLOGI: /* should never happen */
6392 default:
6393 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
6394 "qla_target(%d): Unsupported ELS command %x "
6395 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
6396 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
6397 break;
6398 }
6399
6400 ql_dbg(ql_dbg_disc, vha, 0xf026,
6401 "qla_target(%d): Exit ELS opcode: 0x%02x res %d\n",
6402 vha->vp_idx, iocb->u.isp24.status_subcode, res);
6403
6404 return res;
6405 }
6406
6407 /*
6408 * ha->hardware_lock supposed to be held on entry.
6409 * Might drop it, then reacquire.
6410 */
qlt_handle_imm_notify(struct scsi_qla_host * vha,struct imm_ntfy_from_isp * iocb)6411 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
6412 struct imm_ntfy_from_isp *iocb)
6413 {
6414 struct qla_hw_data *ha = vha->hw;
6415 uint32_t add_flags = 0;
6416 int send_notify_ack = 1;
6417 uint16_t status;
6418
6419 lockdep_assert_held(&ha->hardware_lock);
6420
6421 status = le16_to_cpu(iocb->u.isp2x.status);
6422 switch (status) {
6423 case IMM_NTFY_LIP_RESET:
6424 {
6425 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
6426 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
6427 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
6428 iocb->u.isp24.status_subcode);
6429
6430 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
6431 send_notify_ack = 0;
6432 break;
6433 }
6434
6435 case IMM_NTFY_LIP_LINK_REINIT:
6436 {
6437 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6438
6439 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
6440 "qla_target(%d): LINK REINIT (loop %#x, "
6441 "subcode %x)\n", vha->vp_idx,
6442 le16_to_cpu(iocb->u.isp24.nport_handle),
6443 iocb->u.isp24.status_subcode);
6444 if (tgt->link_reinit_iocb_pending) {
6445 qlt_send_notify_ack(ha->base_qpair,
6446 &tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0);
6447 }
6448 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
6449 tgt->link_reinit_iocb_pending = 1;
6450 /*
6451 * QLogic requires to wait after LINK REINIT for possible
6452 * PDISC or ADISC ELS commands
6453 */
6454 send_notify_ack = 0;
6455 break;
6456 }
6457
6458 case IMM_NTFY_PORT_LOGOUT:
6459 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
6460 "qla_target(%d): Port logout (loop "
6461 "%#x, subcode %x)\n", vha->vp_idx,
6462 le16_to_cpu(iocb->u.isp24.nport_handle),
6463 iocb->u.isp24.status_subcode);
6464
6465 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
6466 send_notify_ack = 0;
6467 /* The sessions will be cleared in the callback, if needed */
6468 break;
6469
6470 case IMM_NTFY_GLBL_TPRLO:
6471 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
6472 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
6473 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
6474 send_notify_ack = 0;
6475 /* The sessions will be cleared in the callback, if needed */
6476 break;
6477
6478 case IMM_NTFY_PORT_CONFIG:
6479 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
6480 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
6481 status);
6482 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
6483 send_notify_ack = 0;
6484 /* The sessions will be cleared in the callback, if needed */
6485 break;
6486
6487 case IMM_NTFY_GLBL_LOGO:
6488 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
6489 "qla_target(%d): Link failure detected\n",
6490 vha->vp_idx);
6491 /* I_T nexus loss */
6492 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
6493 send_notify_ack = 0;
6494 break;
6495
6496 case IMM_NTFY_IOCB_OVERFLOW:
6497 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
6498 "qla_target(%d): Cannot provide requested "
6499 "capability (IOCB overflowed the immediate notify "
6500 "resource count)\n", vha->vp_idx);
6501 break;
6502
6503 case IMM_NTFY_ABORT_TASK:
6504 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
6505 "qla_target(%d): Abort Task (S %08x I %#x -> "
6506 "L %#x)\n", vha->vp_idx,
6507 le16_to_cpu(iocb->u.isp2x.seq_id),
6508 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
6509 le16_to_cpu(iocb->u.isp2x.lun));
6510 if (qlt_abort_task(vha, iocb) == 0)
6511 send_notify_ack = 0;
6512 break;
6513
6514 case IMM_NTFY_RESOURCE:
6515 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
6516 "qla_target(%d): Out of resources, host %ld\n",
6517 vha->vp_idx, vha->host_no);
6518 break;
6519
6520 case IMM_NTFY_MSG_RX:
6521 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
6522 "qla_target(%d): Immediate notify task %x\n",
6523 vha->vp_idx, iocb->u.isp2x.task_flags);
6524 break;
6525
6526 case IMM_NTFY_ELS:
6527 if (qlt_24xx_handle_els(vha, iocb) == 0)
6528 send_notify_ack = 0;
6529 break;
6530
6531 case IMM_NTFY_SRR:
6532 qlt_prepare_srr_imm(vha, iocb);
6533 send_notify_ack = 0;
6534 break;
6535
6536 default:
6537 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
6538 "qla_target(%d): Received unknown immediate "
6539 "notify status %x\n", vha->vp_idx, status);
6540 break;
6541 }
6542
6543 if (send_notify_ack)
6544 qlt_send_notify_ack(ha->base_qpair, iocb, add_flags, 0, 0, 0,
6545 0, 0);
6546 }
6547
6548 /*
6549 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
6550 * This function sends busy to ISP 2xxx or 24xx.
6551 */
__qlt_send_busy(struct qla_qpair * qpair,struct atio_from_isp * atio,uint16_t status)6552 static int __qlt_send_busy(struct qla_qpair *qpair,
6553 struct atio_from_isp *atio, uint16_t status)
6554 {
6555 struct scsi_qla_host *vha = qpair->vha;
6556 struct ctio7_to_24xx *ctio24;
6557 struct qla_hw_data *ha = vha->hw;
6558 request_t *pkt;
6559 struct fc_port *sess = NULL;
6560 unsigned long flags;
6561 u16 temp;
6562 port_id_t id;
6563
6564 id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id);
6565
6566 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6567 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
6568 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6569 if (!sess) {
6570 qlt_send_term_exchange(qpair, NULL, atio, 1);
6571 return 0;
6572 }
6573 /* Sending marker isn't necessary, since we called from ISR */
6574
6575 pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL);
6576 if (!pkt) {
6577 ql_dbg(ql_dbg_io, vha, 0x3063,
6578 "qla_target(%d): %s failed: unable to allocate "
6579 "request packet", vha->vp_idx, __func__);
6580 return -ENOMEM;
6581 }
6582
6583 qpair->tgt_counters.num_q_full_sent++;
6584 pkt->entry_count = 1;
6585 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
6586
6587 ctio24 = (struct ctio7_to_24xx *)pkt;
6588 ctio24->entry_type = CTIO_TYPE7;
6589 ctio24->nport_handle = cpu_to_le16(sess->loop_id);
6590 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
6591 ctio24->vp_index = vha->vp_idx;
6592 ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id);
6593 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
6594 temp = (atio->u.isp24.attr << 9) |
6595 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
6596 CTIO7_FLAGS_DONT_RET_CTIO;
6597 ctio24->u.status1.flags = cpu_to_le16(temp);
6598 /*
6599 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
6600 * if the explicit conformation is used.
6601 */
6602 ctio24->u.status1.ox_id =
6603 cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
6604 ctio24->u.status1.scsi_status = cpu_to_le16(status);
6605
6606 ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
6607
6608 if (ctio24->u.status1.residual != 0)
6609 ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER);
6610
6611 /* Memory Barrier */
6612 wmb();
6613 if (qpair->reqq_start_iocbs)
6614 qpair->reqq_start_iocbs(qpair);
6615 else
6616 qla2x00_start_iocbs(vha, qpair->req);
6617 return 0;
6618 }
6619
6620 /*
6621 * This routine is used to allocate a command for either a QFull condition
6622 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
6623 * out previously.
6624 */
6625 static void
qlt_alloc_qfull_cmd(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint16_t status,int qfull)6626 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
6627 struct atio_from_isp *atio, uint16_t status, int qfull)
6628 {
6629 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6630 struct qla_hw_data *ha = vha->hw;
6631 struct fc_port *sess;
6632 struct qla_tgt_cmd *cmd;
6633 unsigned long flags;
6634
6635 if (unlikely(tgt->tgt_stop)) {
6636 ql_dbg(ql_dbg_io, vha, 0x300a,
6637 "New command while device %p is shutting down\n", tgt);
6638 return;
6639 }
6640
6641 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
6642 vha->hw->tgt.num_qfull_cmds_dropped++;
6643 if (vha->hw->tgt.num_qfull_cmds_dropped >
6644 vha->qla_stats.stat_max_qfull_cmds_dropped)
6645 vha->qla_stats.stat_max_qfull_cmds_dropped =
6646 vha->hw->tgt.num_qfull_cmds_dropped;
6647
6648 ql_dbg(ql_dbg_io, vha, 0x3068,
6649 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
6650 vha->vp_idx, __func__,
6651 vha->hw->tgt.num_qfull_cmds_dropped);
6652
6653 qlt_chk_exch_leak_thresh_hold(vha);
6654 return;
6655 }
6656
6657 sess = ha->tgt.tgt_ops->find_sess_by_s_id
6658 (vha, atio->u.isp24.fcp_hdr.s_id);
6659 if (!sess)
6660 return;
6661
6662 cmd = ha->tgt.tgt_ops->get_cmd(sess);
6663 if (!cmd) {
6664 ql_dbg(ql_dbg_io, vha, 0x3009,
6665 "qla_target(%d): %s: Allocation of cmd failed\n",
6666 vha->vp_idx, __func__);
6667
6668 vha->hw->tgt.num_qfull_cmds_dropped++;
6669 if (vha->hw->tgt.num_qfull_cmds_dropped >
6670 vha->qla_stats.stat_max_qfull_cmds_dropped)
6671 vha->qla_stats.stat_max_qfull_cmds_dropped =
6672 vha->hw->tgt.num_qfull_cmds_dropped;
6673
6674 qlt_chk_exch_leak_thresh_hold(vha);
6675 return;
6676 }
6677
6678 qlt_incr_num_pend_cmds(vha);
6679 INIT_LIST_HEAD(&cmd->cmd_list);
6680 memcpy_atio(&cmd->atio, atio);
6681
6682 cmd->tgt = vha->vha_tgt.qla_tgt;
6683 cmd->vha = vha;
6684 cmd->reset_count = ha->base_qpair->chip_reset;
6685 cmd->q_full = 1;
6686 cmd->qpair = ha->base_qpair;
6687 cmd->cdb = &cmd->atio.u.isp24.fcp_cmnd.cdb[0];
6688 cmd->cdb_len = 16;
6689
6690 if (qfull) {
6691 cmd->q_full = 1;
6692 /* NOTE: borrowing the state field to carry the status */
6693 cmd->state = status;
6694 } else
6695 cmd->term_exchg = 1;
6696
6697 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
6698 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
6699
6700 vha->hw->tgt.num_qfull_cmds_alloc++;
6701 if (vha->hw->tgt.num_qfull_cmds_alloc >
6702 vha->qla_stats.stat_max_qfull_cmds_alloc)
6703 vha->qla_stats.stat_max_qfull_cmds_alloc =
6704 vha->hw->tgt.num_qfull_cmds_alloc;
6705 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
6706 }
6707
6708 static void
qlt_send_busy(struct qla_qpair * qpair,struct atio_from_isp * atio,uint16_t status)6709 qlt_send_busy(struct qla_qpair *qpair, struct atio_from_isp *atio,
6710 uint16_t status)
6711 {
6712 int rc = 0;
6713 struct scsi_qla_host *vha = qpair->vha;
6714
6715 rc = __qlt_send_busy(qpair, atio, status);
6716 if (rc == -ENOMEM)
6717 qlt_alloc_qfull_cmd(vha, atio, status, 1);
6718 }
6719
6720 static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct atio_from_isp * atio,uint8_t ha_locked)6721 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
6722 struct atio_from_isp *atio, uint8_t ha_locked)
6723 {
6724 struct qla_hw_data *ha = vha->hw;
6725 unsigned long flags;
6726
6727 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
6728 return 0;
6729
6730 if (!ha_locked)
6731 spin_lock_irqsave(&ha->hardware_lock, flags);
6732 qlt_send_busy(qpair, atio, qla_sam_status);
6733 if (!ha_locked)
6734 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6735
6736 return 1;
6737 }
6738
6739 /* ha->hardware_lock supposed to be held on entry */
6740 /* called via callback from qla2xxx */
qlt_24xx_atio_pkt(struct scsi_qla_host * vha,struct atio_from_isp * atio,uint8_t ha_locked)6741 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
6742 struct atio_from_isp *atio, uint8_t ha_locked)
6743 {
6744 struct qla_hw_data *ha = vha->hw;
6745 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6746 int rc;
6747 unsigned long flags = 0;
6748
6749 if (unlikely(tgt == NULL)) {
6750 ql_dbg(ql_dbg_tgt, vha, 0x3064,
6751 "ATIO pkt, but no tgt (ha %p)", ha);
6752 return;
6753 }
6754 /*
6755 * In tgt_stop mode we also should allow all requests to pass.
6756 * Otherwise, some commands can stuck.
6757 */
6758
6759 tgt->atio_irq_cmd_count++;
6760
6761 switch (atio->u.raw.entry_type) {
6762 case ATIO_TYPE7:
6763 if (unlikely(atio->u.isp24.exchange_addr ==
6764 cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) {
6765 ql_dbg(ql_dbg_io, vha, 0x3065,
6766 "qla_target(%d): ATIO_TYPE7 "
6767 "received with UNKNOWN exchange address, "
6768 "sending QUEUE_FULL\n", vha->vp_idx);
6769 if (!ha_locked)
6770 spin_lock_irqsave(&ha->hardware_lock, flags);
6771 qlt_send_busy(ha->base_qpair, atio, qla_sam_status);
6772 if (!ha_locked)
6773 spin_unlock_irqrestore(&ha->hardware_lock,
6774 flags);
6775 break;
6776 }
6777
6778 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
6779 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
6780 atio, ha_locked);
6781 if (rc != 0) {
6782 tgt->atio_irq_cmd_count--;
6783 return;
6784 }
6785 rc = qlt_handle_cmd_for_atio(vha, atio);
6786 } else {
6787 rc = qlt_handle_task_mgmt(vha, atio);
6788 }
6789 if (unlikely(rc != 0)) {
6790 if (!ha_locked)
6791 spin_lock_irqsave(&ha->hardware_lock, flags);
6792 switch (rc) {
6793 case -ENODEV:
6794 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
6795 "qla_target: Unable to send command to target\n");
6796 break;
6797 case -EBADF:
6798 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
6799 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
6800 qlt_send_term_exchange(ha->base_qpair, NULL,
6801 atio, 1);
6802 break;
6803 case -EBUSY:
6804 ql_dbg(ql_dbg_tgt, vha, 0xe060,
6805 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
6806 vha->vp_idx);
6807 qlt_send_busy(ha->base_qpair, atio,
6808 tc_sam_status);
6809 break;
6810 default:
6811 ql_dbg(ql_dbg_tgt, vha, 0xe060,
6812 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
6813 vha->vp_idx);
6814 qlt_send_busy(ha->base_qpair, atio,
6815 qla_sam_status);
6816 break;
6817 }
6818 if (!ha_locked)
6819 spin_unlock_irqrestore(&ha->hardware_lock,
6820 flags);
6821 }
6822 break;
6823
6824 case IMMED_NOTIFY_TYPE:
6825 {
6826 if (unlikely(atio->u.isp2x.entry_status != 0)) {
6827 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
6828 "qla_target(%d): Received ATIO packet %x "
6829 "with error status %x\n", vha->vp_idx,
6830 atio->u.raw.entry_type,
6831 atio->u.isp2x.entry_status);
6832 break;
6833 }
6834 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
6835
6836 if (!ha_locked)
6837 spin_lock_irqsave(&ha->hardware_lock, flags);
6838 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
6839 if (!ha_locked)
6840 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6841 break;
6842 }
6843
6844 default:
6845 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
6846 "qla_target(%d): Received unknown ATIO atio "
6847 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
6848 break;
6849 }
6850
6851 tgt->atio_irq_cmd_count--;
6852 }
6853
6854 /*
6855 * qpair lock is assume to be held
6856 * rc = 0 : send terminate & abts respond
6857 * rc != 0: do not send term & abts respond
6858 */
qlt_chk_unresolv_exchg(struct scsi_qla_host * vha,struct qla_qpair * qpair,struct abts_resp_from_24xx_fw * entry)6859 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
6860 struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
6861 {
6862 struct qla_hw_data *ha = vha->hw;
6863 int rc = 0;
6864
6865 /*
6866 * Detect unresolved exchange. If the same ABTS is unable
6867 * to terminate an existing command and the same ABTS loops
6868 * between FW & Driver, then force FW dump. Under 1 jiff,
6869 * we should see multiple loops.
6870 */
6871 if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
6872 qpair->retry_term_jiff == jiffies) {
6873 /* found existing exchange */
6874 qpair->retry_term_cnt++;
6875 if (qpair->retry_term_cnt >= 5) {
6876 rc = -EIO;
6877 qpair->retry_term_cnt = 0;
6878 ql_log(ql_log_warn, vha, 0xffff,
6879 "Unable to send ABTS Respond. Dumping firmware.\n");
6880 ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
6881 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
6882
6883 if (qpair == ha->base_qpair)
6884 ha->isp_ops->fw_dump(vha);
6885 else
6886 qla2xxx_dump_fw(vha);
6887
6888 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6889 qla2xxx_wake_dpc(vha);
6890 }
6891 } else if (qpair->retry_term_jiff != jiffies) {
6892 qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
6893 qpair->retry_term_cnt = 0;
6894 qpair->retry_term_jiff = jiffies;
6895 }
6896
6897 return rc;
6898 }
6899
6900
qlt_handle_abts_completion(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)6901 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
6902 struct rsp_que *rsp, response_t *pkt)
6903 {
6904 struct abts_resp_from_24xx_fw *entry =
6905 (struct abts_resp_from_24xx_fw *)pkt;
6906 u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
6907 struct qla_tgt_mgmt_cmd *mcmd;
6908 struct qla_hw_data *ha = vha->hw;
6909
6910 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, TYPE_TGT_TMCMD, pkt);
6911 if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
6912 ql_dbg(ql_dbg_async, vha, 0xe064,
6913 "qla_target(%d): ABTS Comp without mcmd\n",
6914 vha->vp_idx);
6915 return;
6916 }
6917
6918 if (mcmd)
6919 vha = mcmd->vha;
6920 vha->vha_tgt.qla_tgt->abts_resp_expected--;
6921
6922 ql_dbg(ql_dbg_tgt, vha, 0xe038,
6923 "ABTS_RESP_24XX: compl_status %x\n",
6924 entry->compl_status);
6925
6926 if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
6927 if (le32_to_cpu(entry->error_subcode1) == 0x1E &&
6928 le32_to_cpu(entry->error_subcode2) == 0) {
6929 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
6930 qlt_free_ul_mcmd(ha, mcmd);
6931 return;
6932 }
6933 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
6934 pkt, mcmd);
6935 } else {
6936 ql_dbg(ql_dbg_tgt, vha, 0xe063,
6937 "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
6938 vha->vp_idx, entry->compl_status,
6939 entry->error_subcode1,
6940 entry->error_subcode2);
6941 qlt_free_ul_mcmd(ha, mcmd);
6942 }
6943 } else if (mcmd) {
6944 qlt_free_ul_mcmd(ha, mcmd);
6945 }
6946 }
6947
6948 /* ha->hardware_lock supposed to be held on entry */
6949 /* called via callback from qla2xxx */
qlt_response_pkt(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)6950 static void qlt_response_pkt(struct scsi_qla_host *vha,
6951 struct rsp_que *rsp, response_t *pkt)
6952 {
6953 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6954
6955 if (unlikely(tgt == NULL)) {
6956 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
6957 "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n",
6958 vha->vp_idx, pkt->entry_type, vha->hw);
6959 return;
6960 }
6961
6962 /*
6963 * In tgt_stop mode we also should allow all requests to pass.
6964 * Otherwise, some commands can stuck.
6965 */
6966
6967 switch (pkt->entry_type) {
6968 case CTIO_CRC2:
6969 case CTIO_TYPE7:
6970 {
6971 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
6972
6973 qlt_do_ctio_completion(vha, rsp, entry->handle,
6974 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
6975 entry);
6976 break;
6977 }
6978
6979 case ACCEPT_TGT_IO_TYPE:
6980 {
6981 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
6982 int rc;
6983
6984 if (atio->u.isp2x.status !=
6985 cpu_to_le16(ATIO_CDB_VALID)) {
6986 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
6987 "qla_target(%d): ATIO with error "
6988 "status %x received\n", vha->vp_idx,
6989 le16_to_cpu(atio->u.isp2x.status));
6990 break;
6991 }
6992
6993 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
6994 if (rc != 0)
6995 return;
6996
6997 rc = qlt_handle_cmd_for_atio(vha, atio);
6998 if (unlikely(rc != 0)) {
6999 switch (rc) {
7000 case -ENODEV:
7001 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
7002 "qla_target: Unable to send command to target\n");
7003 break;
7004 case -EBADF:
7005 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
7006 "qla_target: Unable to send command to target, sending TERM EXCHANGE for rsp\n");
7007 qlt_send_term_exchange(rsp->qpair, NULL,
7008 atio, 1);
7009 break;
7010 case -EBUSY:
7011 ql_dbg(ql_dbg_tgt, vha, 0xe060,
7012 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
7013 vha->vp_idx);
7014 qlt_send_busy(rsp->qpair, atio,
7015 tc_sam_status);
7016 break;
7017 default:
7018 ql_dbg(ql_dbg_tgt, vha, 0xe060,
7019 "qla_target(%d): Unable to send command to target, sending BUSY status\n",
7020 vha->vp_idx);
7021 qlt_send_busy(rsp->qpair, atio,
7022 qla_sam_status);
7023 break;
7024 }
7025 }
7026 }
7027 break;
7028
7029 case IMMED_NOTIFY_TYPE:
7030 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
7031 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
7032 break;
7033
7034 case NOTIFY_ACK_TYPE:
7035 if (tgt->notify_ack_expected > 0) {
7036 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
7037
7038 ql_dbg(ql_dbg_tgt, vha, 0xe036,
7039 "NOTIFY_ACK seq %08x status %x\n",
7040 le16_to_cpu(entry->u.isp2x.seq_id),
7041 le16_to_cpu(entry->u.isp2x.status));
7042 tgt->notify_ack_expected--;
7043 if (entry->u.isp2x.status !=
7044 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
7045 ql_dbg(ql_dbg_tgt, vha, 0xe061,
7046 "qla_target(%d): NOTIFY_ACK "
7047 "failed %x\n", vha->vp_idx,
7048 le16_to_cpu(entry->u.isp2x.status));
7049 }
7050 } else {
7051 ql_dbg(ql_dbg_tgt, vha, 0xe062,
7052 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
7053 vha->vp_idx);
7054 }
7055 break;
7056
7057 case ABTS_RECV_24XX:
7058 ql_dbg(ql_dbg_tgt, vha, 0xe037,
7059 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
7060 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
7061 break;
7062
7063 case ABTS_RESP_24XX:
7064 if (tgt->abts_resp_expected > 0) {
7065 qlt_handle_abts_completion(vha, rsp, pkt);
7066 } else {
7067 ql_dbg(ql_dbg_tgt, vha, 0xe064,
7068 "qla_target(%d): Unexpected ABTS_RESP_24XX "
7069 "received\n", vha->vp_idx);
7070 }
7071 break;
7072
7073 default:
7074 ql_dbg(ql_dbg_tgt, vha, 0xe065,
7075 "qla_target(%d): Received unknown response pkt "
7076 "type %x\n", vha->vp_idx, pkt->entry_type);
7077 break;
7078 }
7079
7080 }
7081
7082 /*
7083 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
7084 */
qlt_async_event(uint16_t code,struct scsi_qla_host * vha,uint16_t * mailbox)7085 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
7086 uint16_t *mailbox)
7087 {
7088 struct qla_hw_data *ha = vha->hw;
7089 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
7090 int login_code;
7091
7092 if (!tgt || tgt->tgt_stop || tgt->tgt_stopped)
7093 return;
7094
7095 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
7096 IS_QLA2100(ha))
7097 return;
7098 /*
7099 * In tgt_stop mode we also should allow all requests to pass.
7100 * Otherwise, some commands can stuck.
7101 */
7102
7103
7104 switch (code) {
7105 case MBA_RESET: /* Reset */
7106 case MBA_SYSTEM_ERR: /* System Error */
7107 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
7108 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
7109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
7110 "qla_target(%d): System error async event %#x "
7111 "occurred", vha->vp_idx, code);
7112 break;
7113 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
7114 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
7115 break;
7116
7117 case MBA_LOOP_UP:
7118 {
7119 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
7120 "qla_target(%d): Async LOOP_UP occurred "
7121 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
7122 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
7123 if (tgt->link_reinit_iocb_pending) {
7124 qlt_send_notify_ack(ha->base_qpair,
7125 &tgt->link_reinit_iocb,
7126 0, 0, 0, 0, 0, 0);
7127 tgt->link_reinit_iocb_pending = 0;
7128 }
7129 break;
7130 }
7131
7132 case MBA_LIP_OCCURRED:
7133 case MBA_LOOP_DOWN:
7134 case MBA_LIP_RESET:
7135 case MBA_RSCN_UPDATE:
7136 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
7137 "qla_target(%d): Async event %#x occurred "
7138 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
7139 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
7140 break;
7141
7142 case MBA_REJECTED_FCP_CMD:
7143 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
7144 "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)",
7145 vha->vp_idx,
7146 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
7147
7148 if (mailbox[3] == 1) {
7149 /* exchange starvation. */
7150 vha->hw->exch_starvation++;
7151 if (vha->hw->exch_starvation > 5) {
7152 ql_log(ql_log_warn, vha, 0xd03a,
7153 "Exchange starvation-. Resetting RISC\n");
7154
7155 vha->hw->exch_starvation = 0;
7156 if (IS_P3P_TYPE(vha->hw))
7157 set_bit(FCOE_CTX_RESET_NEEDED,
7158 &vha->dpc_flags);
7159 else
7160 set_bit(ISP_ABORT_NEEDED,
7161 &vha->dpc_flags);
7162 qla2xxx_wake_dpc(vha);
7163 }
7164 }
7165 break;
7166
7167 case MBA_PORT_UPDATE:
7168 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
7169 "qla_target(%d): Port update async event %#x "
7170 "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
7171 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
7172 mailbox[0], mailbox[1], mailbox[2], mailbox[3]);
7173
7174 login_code = mailbox[2];
7175 if (login_code == 0x4) {
7176 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
7177 "Async MB 2: Got PLOGI Complete\n");
7178 vha->hw->exch_starvation = 0;
7179 } else if (login_code == 0x7)
7180 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
7181 "Async MB 2: Port Logged Out\n");
7182 break;
7183 default:
7184 break;
7185 }
7186
7187 }
7188
qlt_get_port_database(struct scsi_qla_host * vha,uint16_t loop_id)7189 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
7190 uint16_t loop_id)
7191 {
7192 fc_port_t *fcport, *tfcp, *del;
7193 int rc;
7194 unsigned long flags;
7195 u8 newfcport = 0;
7196
7197 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
7198 if (!fcport) {
7199 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
7200 "qla_target(%d): Allocation of tmp FC port failed",
7201 vha->vp_idx);
7202 return NULL;
7203 }
7204
7205 fcport->loop_id = loop_id;
7206
7207 rc = qla24xx_gpdb_wait(vha, fcport, 0);
7208 if (rc != QLA_SUCCESS) {
7209 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
7210 "qla_target(%d): Failed to retrieve fcport "
7211 "information -- get_port_database() returned %x "
7212 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
7213 kfree(fcport);
7214 return NULL;
7215 }
7216
7217 del = NULL;
7218 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
7219 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
7220
7221 if (tfcp) {
7222 tfcp->d_id = fcport->d_id;
7223 tfcp->port_type = fcport->port_type;
7224 tfcp->supported_classes = fcport->supported_classes;
7225 tfcp->flags |= fcport->flags;
7226 tfcp->scan_state = QLA_FCPORT_FOUND;
7227
7228 del = fcport;
7229 fcport = tfcp;
7230 } else {
7231 if (vha->hw->current_topology == ISP_CFG_F)
7232 fcport->flags |= FCF_FABRIC_DEVICE;
7233
7234 list_add_tail(&fcport->list, &vha->vp_fcports);
7235 if (!IS_SW_RESV_ADDR(fcport->d_id))
7236 vha->fcport_count++;
7237 fcport->login_gen++;
7238 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
7239 fcport->login_succ = 1;
7240 newfcport = 1;
7241 }
7242
7243 fcport->deleted = 0;
7244 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
7245
7246 switch (vha->host->active_mode) {
7247 case MODE_INITIATOR:
7248 case MODE_DUAL:
7249 if (newfcport) {
7250 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
7251 qla24xx_sched_upd_fcport(fcport);
7252 } else {
7253 ql_dbg(ql_dbg_disc, vha, 0x20ff,
7254 "%s %d %8phC post gpsc fcp_cnt %d\n",
7255 __func__, __LINE__, fcport->port_name, vha->fcport_count);
7256 qla24xx_post_gpsc_work(vha, fcport);
7257 }
7258 }
7259 break;
7260
7261 case MODE_TARGET:
7262 default:
7263 break;
7264 }
7265 if (del)
7266 qla2x00_free_fcport(del);
7267
7268 return fcport;
7269 }
7270
7271 /* Must be called under tgt_mutex */
qlt_make_local_sess(struct scsi_qla_host * vha,be_id_t s_id)7272 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
7273 be_id_t s_id)
7274 {
7275 struct fc_port *sess = NULL;
7276 fc_port_t *fcport = NULL;
7277 int rc, global_resets;
7278 uint16_t loop_id = 0;
7279
7280 if (s_id.domain == 0xFF && s_id.area == 0xFC) {
7281 /*
7282 * This is Domain Controller, so it should be
7283 * OK to drop SCSI commands from it.
7284 */
7285 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
7286 "Unable to find initiator with S_ID %x:%x:%x",
7287 s_id.domain, s_id.area, s_id.al_pa);
7288 return NULL;
7289 }
7290
7291 mutex_lock(&vha->vha_tgt.tgt_mutex);
7292
7293 retry:
7294 global_resets =
7295 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
7296
7297 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
7298 if (rc != 0) {
7299 mutex_unlock(&vha->vha_tgt.tgt_mutex);
7300
7301 ql_log(ql_log_info, vha, 0xf071,
7302 "qla_target(%d): Unable to find "
7303 "initiator with S_ID %x:%x:%x",
7304 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
7305
7306 if (rc == -ENOENT) {
7307 qlt_port_logo_t logo;
7308
7309 logo.id = be_to_port_id(s_id);
7310 logo.cmd_count = 1;
7311 qlt_send_first_logo(vha, &logo);
7312 }
7313
7314 return NULL;
7315 }
7316
7317 fcport = qlt_get_port_database(vha, loop_id);
7318 if (!fcport) {
7319 mutex_unlock(&vha->vha_tgt.tgt_mutex);
7320 return NULL;
7321 }
7322
7323 if (global_resets !=
7324 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
7325 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
7326 "qla_target(%d): global reset during session discovery "
7327 "(counter was %d, new %d), retrying", vha->vp_idx,
7328 global_resets,
7329 atomic_read(&vha->vha_tgt.
7330 qla_tgt->tgt_global_resets_count));
7331 goto retry;
7332 }
7333
7334 sess = qlt_create_sess(vha, fcport, true);
7335
7336 mutex_unlock(&vha->vha_tgt.tgt_mutex);
7337
7338 return sess;
7339 }
7340
qlt_abort_work(struct qla_tgt * tgt,struct qla_tgt_sess_work_param * prm)7341 static void qlt_abort_work(struct qla_tgt *tgt,
7342 struct qla_tgt_sess_work_param *prm)
7343 {
7344 struct scsi_qla_host *vha = tgt->vha;
7345 struct qla_hw_data *ha = vha->hw;
7346 struct fc_port *sess = NULL;
7347 unsigned long flags = 0, flags2 = 0;
7348 be_id_t s_id;
7349 int rc;
7350
7351 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
7352
7353 if (tgt->tgt_stop)
7354 goto out_term2;
7355
7356 s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id);
7357
7358 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
7359 if (!sess) {
7360 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
7361
7362 sess = qlt_make_local_sess(vha, s_id);
7363 /* sess has got an extra creation ref */
7364
7365 spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
7366 if (!sess)
7367 goto out_term2;
7368 } else {
7369 if (sess->deleted) {
7370 sess = NULL;
7371 goto out_term2;
7372 }
7373
7374 if (!kref_get_unless_zero(&sess->sess_kref)) {
7375 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
7376 "%s: kref_get fail %8phC \n",
7377 __func__, sess->port_name);
7378 sess = NULL;
7379 goto out_term2;
7380 }
7381 }
7382
7383 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
7384 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
7385
7386 ha->tgt.tgt_ops->put_sess(sess);
7387
7388 if (rc != 0)
7389 goto out_term;
7390 return;
7391
7392 out_term2:
7393 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
7394
7395 out_term:
7396 spin_lock_irqsave(&ha->hardware_lock, flags);
7397 qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts,
7398 FCP_TMF_REJECTED, false);
7399 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7400 }
7401
qlt_sess_work_fn(struct work_struct * work)7402 static void qlt_sess_work_fn(struct work_struct *work)
7403 {
7404 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
7405 struct scsi_qla_host *vha = tgt->vha;
7406 unsigned long flags;
7407
7408 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
7409
7410 spin_lock_irqsave(&tgt->sess_work_lock, flags);
7411 while (!list_empty(&tgt->sess_works_list)) {
7412 struct qla_tgt_sess_work_param *prm = list_entry(
7413 tgt->sess_works_list.next, typeof(*prm),
7414 sess_works_list_entry);
7415
7416 /*
7417 * This work can be scheduled on several CPUs at time, so we
7418 * must delete the entry to eliminate double processing
7419 */
7420 list_del(&prm->sess_works_list_entry);
7421
7422 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
7423
7424 switch (prm->type) {
7425 case QLA_TGT_SESS_WORK_ABORT:
7426 qlt_abort_work(tgt, prm);
7427 break;
7428 default:
7429 BUG_ON(1);
7430 break;
7431 }
7432
7433 spin_lock_irqsave(&tgt->sess_work_lock, flags);
7434
7435 kfree(prm);
7436 }
7437 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
7438 }
7439
7440 /* Must be called under tgt_host_action_mutex */
qlt_add_target(struct qla_hw_data * ha,struct scsi_qla_host * base_vha)7441 int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
7442 {
7443 struct qla_tgt *tgt;
7444 int rc, i;
7445 struct qla_qpair_hint *h;
7446
7447 if (!QLA_TGT_MODE_ENABLED())
7448 return 0;
7449
7450 if (!IS_TGT_MODE_CAPABLE(ha)) {
7451 ql_log(ql_log_warn, base_vha, 0xe070,
7452 "This adapter does not support target mode.\n");
7453 return 0;
7454 }
7455
7456 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
7457 "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
7458
7459 BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
7460
7461 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
7462 if (!tgt) {
7463 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
7464 "Unable to allocate struct qla_tgt\n");
7465 return -ENOMEM;
7466 }
7467
7468 tgt->qphints = kcalloc(ha->max_qpairs + 1,
7469 sizeof(struct qla_qpair_hint),
7470 GFP_KERNEL);
7471 if (!tgt->qphints) {
7472 kfree(tgt);
7473 ql_log(ql_log_warn, base_vha, 0x0197,
7474 "Unable to allocate qpair hints.\n");
7475 return -ENOMEM;
7476 }
7477
7478 qla2xxx_driver_template.supported_mode |= MODE_TARGET;
7479
7480 rc = btree_init64(&tgt->lun_qpair_map);
7481 if (rc) {
7482 kfree(tgt->qphints);
7483 kfree(tgt);
7484 ql_log(ql_log_info, base_vha, 0x0198,
7485 "Unable to initialize lun_qpair_map btree\n");
7486 return -EIO;
7487 }
7488 h = &tgt->qphints[0];
7489 h->qpair = ha->base_qpair;
7490 INIT_LIST_HEAD(&h->hint_elem);
7491 h->cpuid = ha->base_qpair->cpuid;
7492 list_add_tail(&h->hint_elem, &ha->base_qpair->hints_list);
7493
7494 for (i = 0; i < ha->max_qpairs; i++) {
7495 unsigned long flags;
7496
7497 struct qla_qpair *qpair = ha->queue_pair_map[i];
7498
7499 h = &tgt->qphints[i + 1];
7500 INIT_LIST_HEAD(&h->hint_elem);
7501 if (qpair) {
7502 h->qpair = qpair;
7503 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
7504 list_add_tail(&h->hint_elem, &qpair->hints_list);
7505 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
7506 h->cpuid = qpair->cpuid;
7507 }
7508 }
7509
7510 tgt->ha = ha;
7511 tgt->vha = base_vha;
7512 init_waitqueue_head(&tgt->waitQ);
7513 spin_lock_init(&tgt->sess_work_lock);
7514 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
7515 INIT_LIST_HEAD(&tgt->sess_works_list);
7516 spin_lock_init(&tgt->srr_lock);
7517 INIT_LIST_HEAD(&tgt->srr_list);
7518 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
7519 atomic_set(&tgt->tgt_global_resets_count, 0);
7520
7521 base_vha->vha_tgt.qla_tgt = tgt;
7522
7523 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
7524 "qla_target(%d): using 64 Bit PCI addressing",
7525 base_vha->vp_idx);
7526 /* 3 is reserved */
7527 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
7528
7529 mutex_lock(&qla_tgt_mutex);
7530 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
7531 mutex_unlock(&qla_tgt_mutex);
7532
7533 if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
7534 ha->tgt.tgt_ops->add_target(base_vha);
7535
7536 return 0;
7537 }
7538
7539 /* Must be called under tgt_host_action_mutex */
qlt_remove_target(struct qla_hw_data * ha,struct scsi_qla_host * vha)7540 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
7541 {
7542 if (!vha->vha_tgt.qla_tgt)
7543 return 0;
7544
7545 if (vha->fc_vport) {
7546 qlt_release(vha->vha_tgt.qla_tgt);
7547 return 0;
7548 }
7549
7550 /* free left over qfull cmds */
7551 qlt_init_term_exchange(vha);
7552
7553 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
7554 vha->host_no, ha);
7555 qlt_release(vha->vha_tgt.qla_tgt);
7556
7557 return 0;
7558 }
7559
qla_remove_hostmap(struct qla_hw_data * ha)7560 void qla_remove_hostmap(struct qla_hw_data *ha)
7561 {
7562 struct scsi_qla_host *node;
7563 u32 key = 0;
7564
7565 btree_for_each_safe32(&ha->host_map, key, node)
7566 btree_remove32(&ha->host_map, key);
7567
7568 btree_destroy32(&ha->host_map);
7569 }
7570
qlt_lport_dump(struct scsi_qla_host * vha,u64 wwpn,unsigned char * b)7571 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
7572 unsigned char *b)
7573 {
7574 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
7575 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
7576 put_unaligned_be64(wwpn, b);
7577 pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b);
7578 }
7579
7580 /**
7581 * qlt_lport_register - register lport with external module
7582 *
7583 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
7584 * @phys_wwpn: physical port WWPN
7585 * @npiv_wwpn: NPIV WWPN
7586 * @npiv_wwnn: NPIV WWNN
7587 * @callback: lport initialization callback for tcm_qla2xxx code
7588 */
qlt_lport_register(void * target_lport_ptr,u64 phys_wwpn,u64 npiv_wwpn,u64 npiv_wwnn,int (* callback)(struct scsi_qla_host *,void *,u64,u64))7589 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
7590 u64 npiv_wwpn, u64 npiv_wwnn,
7591 int (*callback)(struct scsi_qla_host *, void *, u64, u64))
7592 {
7593 struct qla_tgt *tgt;
7594 struct scsi_qla_host *vha;
7595 struct qla_hw_data *ha;
7596 struct Scsi_Host *host;
7597 unsigned long flags;
7598 int rc;
7599 u8 b[WWN_SIZE];
7600
7601 mutex_lock(&qla_tgt_mutex);
7602 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
7603 vha = tgt->vha;
7604 ha = vha->hw;
7605
7606 host = vha->host;
7607 if (!host)
7608 continue;
7609
7610 if (!(host->hostt->supported_mode & MODE_TARGET))
7611 continue;
7612
7613 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
7614 continue;
7615
7616 spin_lock_irqsave(&ha->hardware_lock, flags);
7617 if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
7618 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
7619 host->host_no);
7620 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7621 continue;
7622 }
7623 if (tgt->tgt_stop) {
7624 pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
7625 host->host_no);
7626 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7627 continue;
7628 }
7629 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7630
7631 if (!scsi_host_get(host)) {
7632 ql_dbg(ql_dbg_tgt, vha, 0xe068,
7633 "Unable to scsi_host_get() for"
7634 " qla2xxx scsi_host\n");
7635 continue;
7636 }
7637 qlt_lport_dump(vha, phys_wwpn, b);
7638
7639 if (memcmp(vha->port_name, b, WWN_SIZE)) {
7640 scsi_host_put(host);
7641 continue;
7642 }
7643 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
7644 if (rc != 0)
7645 scsi_host_put(host);
7646
7647 mutex_unlock(&qla_tgt_mutex);
7648 return rc;
7649 }
7650 mutex_unlock(&qla_tgt_mutex);
7651
7652 return -ENODEV;
7653 }
7654 EXPORT_SYMBOL(qlt_lport_register);
7655
7656 /**
7657 * qlt_lport_deregister - Degister lport
7658 *
7659 * @vha: Registered scsi_qla_host pointer
7660 */
qlt_lport_deregister(struct scsi_qla_host * vha)7661 void qlt_lport_deregister(struct scsi_qla_host *vha)
7662 {
7663 struct qla_hw_data *ha = vha->hw;
7664 struct Scsi_Host *sh = vha->host;
7665 /*
7666 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
7667 */
7668 vha->vha_tgt.target_lport_ptr = NULL;
7669 ha->tgt.tgt_ops = NULL;
7670 /*
7671 * Release the Scsi_Host reference for the underlying qla2xxx host
7672 */
7673 scsi_host_put(sh);
7674 }
7675 EXPORT_SYMBOL(qlt_lport_deregister);
7676
7677 /* Must be called under HW lock */
qlt_set_mode(struct scsi_qla_host * vha)7678 void qlt_set_mode(struct scsi_qla_host *vha)
7679 {
7680 switch (vha->qlini_mode) {
7681 case QLA2XXX_INI_MODE_DISABLED:
7682 case QLA2XXX_INI_MODE_EXCLUSIVE:
7683 vha->host->active_mode = MODE_TARGET;
7684 break;
7685 case QLA2XXX_INI_MODE_ENABLED:
7686 vha->host->active_mode = MODE_INITIATOR;
7687 break;
7688 case QLA2XXX_INI_MODE_DUAL:
7689 vha->host->active_mode = MODE_DUAL;
7690 break;
7691 default:
7692 break;
7693 }
7694 }
7695
7696 /* Must be called under HW lock */
qlt_clear_mode(struct scsi_qla_host * vha)7697 static void qlt_clear_mode(struct scsi_qla_host *vha)
7698 {
7699 switch (vha->qlini_mode) {
7700 case QLA2XXX_INI_MODE_DISABLED:
7701 vha->host->active_mode = MODE_UNKNOWN;
7702 break;
7703 case QLA2XXX_INI_MODE_EXCLUSIVE:
7704 vha->host->active_mode = MODE_INITIATOR;
7705 break;
7706 case QLA2XXX_INI_MODE_ENABLED:
7707 case QLA2XXX_INI_MODE_DUAL:
7708 vha->host->active_mode = MODE_INITIATOR;
7709 break;
7710 default:
7711 break;
7712 }
7713 }
7714
7715 /*
7716 * qla_tgt_enable_vha - NO LOCK HELD
7717 *
7718 * host_reset, bring up w/ Target Mode Enabled
7719 */
7720 void
qlt_enable_vha(struct scsi_qla_host * vha)7721 qlt_enable_vha(struct scsi_qla_host *vha)
7722 {
7723 struct qla_hw_data *ha = vha->hw;
7724 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
7725 unsigned long flags;
7726 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
7727
7728 if (!tgt) {
7729 ql_dbg(ql_dbg_tgt, vha, 0xe069,
7730 "Unable to locate qla_tgt pointer from"
7731 " struct qla_hw_data\n");
7732 dump_stack();
7733 return;
7734 }
7735 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
7736 return;
7737
7738 if (ha->tgt.num_act_qpairs > ha->max_qpairs)
7739 ha->tgt.num_act_qpairs = ha->max_qpairs;
7740 spin_lock_irqsave(&ha->hardware_lock, flags);
7741 tgt->tgt_stopped = 0;
7742 qlt_set_mode(vha);
7743 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7744
7745 mutex_lock(&ha->optrom_mutex);
7746 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
7747 "%s.\n", __func__);
7748 if (vha->vp_idx) {
7749 qla24xx_disable_vp(vha);
7750 qla24xx_enable_vp(vha);
7751 } else {
7752 set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
7753 qla2xxx_wake_dpc(base_vha);
7754 WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) !=
7755 QLA_SUCCESS);
7756 }
7757 mutex_unlock(&ha->optrom_mutex);
7758 }
7759 EXPORT_SYMBOL(qlt_enable_vha);
7760
7761 /*
7762 * qla_tgt_disable_vha - NO LOCK HELD
7763 *
7764 * Disable Target Mode and reset the adapter
7765 */
qlt_disable_vha(struct scsi_qla_host * vha)7766 static void qlt_disable_vha(struct scsi_qla_host *vha)
7767 {
7768 struct qla_hw_data *ha = vha->hw;
7769 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
7770 unsigned long flags;
7771
7772 if (!tgt) {
7773 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
7774 "Unable to locate qla_tgt pointer from"
7775 " struct qla_hw_data\n");
7776 dump_stack();
7777 return;
7778 }
7779
7780 spin_lock_irqsave(&ha->hardware_lock, flags);
7781 qlt_clear_mode(vha);
7782 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7783
7784 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
7785 qla2xxx_wake_dpc(vha);
7786
7787 /*
7788 * We are expecting the offline state.
7789 * QLA_FUNCTION_FAILED means that adapter is offline.
7790 */
7791 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
7792 ql_dbg(ql_dbg_tgt, vha, 0xe081,
7793 "adapter is offline\n");
7794 }
7795
7796 /*
7797 * Called from qla_init.c:qla24xx_vport_create() contex to setup
7798 * the target mode specific struct scsi_qla_host and struct qla_hw_data
7799 * members.
7800 */
7801 void
qlt_vport_create(struct scsi_qla_host * vha,struct qla_hw_data * ha)7802 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
7803 {
7804 vha->vha_tgt.qla_tgt = NULL;
7805
7806 mutex_init(&vha->vha_tgt.tgt_mutex);
7807 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
7808
7809 INIT_LIST_HEAD(&vha->unknown_atio_list);
7810 INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn);
7811
7812 qlt_clear_mode(vha);
7813
7814 /*
7815 * NOTE: Currently the value is kept the same for <24xx and
7816 * >=24xx ISPs. If it is necessary to change it,
7817 * the check should be added for specific ISPs,
7818 * assigning the value appropriately.
7819 */
7820 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
7821
7822 qlt_add_target(ha, vha);
7823 }
7824
7825 u8
qlt_rff_id(struct scsi_qla_host * vha)7826 qlt_rff_id(struct scsi_qla_host *vha)
7827 {
7828 u8 fc4_feature = 0;
7829 /*
7830 * FC-4 Feature bit 0 indicates target functionality to the name server.
7831 */
7832 if (qla_tgt_mode_enabled(vha)) {
7833 fc4_feature = BIT_0;
7834 } else if (qla_ini_mode_enabled(vha)) {
7835 fc4_feature = BIT_1;
7836 } else if (qla_dual_mode_enabled(vha))
7837 fc4_feature = BIT_0 | BIT_1;
7838
7839 return fc4_feature;
7840 }
7841
7842 /*
7843 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
7844 * @ha: HA context
7845 *
7846 * Beginning of ATIO ring has initialization control block already built
7847 * by nvram config routine.
7848 *
7849 * Returns 0 on success.
7850 */
7851 void
qlt_init_atio_q_entries(struct scsi_qla_host * vha)7852 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
7853 {
7854 struct qla_hw_data *ha = vha->hw;
7855 uint16_t cnt;
7856 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
7857
7858 if (qla_ini_mode_enabled(vha))
7859 return;
7860
7861 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
7862 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
7863 pkt++;
7864 }
7865
7866 }
7867
7868 /*
7869 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
7870 * @ha: SCSI driver HA context
7871 */
7872 void
qlt_24xx_process_atio_queue(struct scsi_qla_host * vha,uint8_t ha_locked)7873 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
7874 {
7875 struct qla_hw_data *ha = vha->hw;
7876 struct atio_from_isp *pkt;
7877 int cnt, i;
7878
7879 if (!ha->flags.fw_started)
7880 return;
7881
7882 while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
7883 fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
7884 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
7885 cnt = pkt->u.raw.entry_count;
7886
7887 if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
7888 /*
7889 * This packet is corrupted. The header + payload
7890 * can not be trusted. There is no point in passing
7891 * it further up.
7892 */
7893 ql_log(ql_log_warn, vha, 0xd03c,
7894 "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
7895 &pkt->u.isp24.fcp_hdr.s_id,
7896 be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
7897 pkt->u.isp24.exchange_addr, pkt);
7898
7899 adjust_corrupted_atio(pkt);
7900 qlt_send_term_exchange(ha->base_qpair, NULL, pkt,
7901 ha_locked);
7902 } else {
7903 qlt_24xx_atio_pkt_all_vps(vha,
7904 (struct atio_from_isp *)pkt, ha_locked);
7905 }
7906
7907 for (i = 0; i < cnt; i++) {
7908 ha->tgt.atio_ring_index++;
7909 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
7910 ha->tgt.atio_ring_index = 0;
7911 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
7912 } else
7913 ha->tgt.atio_ring_ptr++;
7914
7915 pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED);
7916 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
7917 }
7918 wmb();
7919 }
7920
7921 /* Adjust ring index */
7922 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
7923 }
7924
7925 void
qlt_24xx_config_rings(struct scsi_qla_host * vha)7926 qlt_24xx_config_rings(struct scsi_qla_host *vha)
7927 {
7928 struct qla_hw_data *ha = vha->hw;
7929 struct qla_msix_entry *msix = &ha->msix_entries[2];
7930 struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
7931
7932 if (!QLA_TGT_MODE_ENABLED())
7933 return;
7934
7935 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
7936 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
7937 rd_reg_dword(ISP_ATIO_Q_OUT(vha));
7938
7939 if (ha->flags.msix_enabled) {
7940 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
7941 icb->msix_atio = cpu_to_le16(msix->entry);
7942 icb->firmware_options_2 &= cpu_to_le32(~BIT_26);
7943 ql_dbg(ql_dbg_init, vha, 0xf072,
7944 "Registering ICB vector 0x%x for atio que.\n",
7945 msix->entry);
7946 }
7947 } else {
7948 /* INTx|MSI */
7949 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
7950 icb->msix_atio = 0;
7951 icb->firmware_options_2 |= cpu_to_le32(BIT_26);
7952 ql_dbg(ql_dbg_init, vha, 0xf072,
7953 "%s: Use INTx for ATIOQ.\n", __func__);
7954 }
7955 }
7956 }
7957
7958 void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host * vha,struct nvram_24xx * nv)7959 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
7960 {
7961 struct qla_hw_data *ha = vha->hw;
7962 u32 tmp;
7963
7964 if (!QLA_TGT_MODE_ENABLED())
7965 return;
7966
7967 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
7968 if (!ha->tgt.saved_set) {
7969 /* We save only once */
7970 ha->tgt.saved_exchange_count = nv->exchange_count;
7971 ha->tgt.saved_firmware_options_1 =
7972 nv->firmware_options_1;
7973 ha->tgt.saved_firmware_options_2 =
7974 nv->firmware_options_2;
7975 ha->tgt.saved_firmware_options_3 =
7976 nv->firmware_options_3;
7977 ha->tgt.saved_set = 1;
7978 }
7979
7980 if (qla_tgt_mode_enabled(vha))
7981 nv->exchange_count = cpu_to_le16(0xFFFF);
7982 else /* dual */
7983 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
7984
7985 /* Enable target mode */
7986 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
7987
7988 /* Disable ini mode, if requested */
7989 if (qla_tgt_mode_enabled(vha))
7990 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
7991
7992 /* Disable Full Login after LIP */
7993 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7994 /* Enable initial LIP */
7995 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
7996 if (ql2xtgt_tape_enable)
7997 /* Enable FC Tape support */
7998 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7999 else
8000 /* Disable FC Tape support */
8001 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
8002
8003 /* Disable Full Login after LIP */
8004 nv->host_p &= cpu_to_le32(~BIT_10);
8005
8006 /*
8007 * clear BIT 15 explicitly as we have seen at least
8008 * a couple of instances where this was set and this
8009 * was causing the firmware to not be initialized.
8010 */
8011 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
8012 /* Enable target PRLI control */
8013 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
8014
8015 if (IS_QLA25XX(ha)) {
8016 /* Change Loop-prefer to Pt-Pt */
8017 tmp = ~(BIT_4|BIT_5|BIT_6);
8018 nv->firmware_options_2 &= cpu_to_le32(tmp);
8019 tmp = P2P << 4;
8020 nv->firmware_options_2 |= cpu_to_le32(tmp);
8021 }
8022 } else {
8023 if (ha->tgt.saved_set) {
8024 nv->exchange_count = ha->tgt.saved_exchange_count;
8025 nv->firmware_options_1 =
8026 ha->tgt.saved_firmware_options_1;
8027 nv->firmware_options_2 =
8028 ha->tgt.saved_firmware_options_2;
8029 nv->firmware_options_3 =
8030 ha->tgt.saved_firmware_options_3;
8031 }
8032 return;
8033 }
8034
8035 if (ha->base_qpair->enable_class_2) {
8036 if (vha->flags.init_done)
8037 fc_host_supported_classes(vha->host) =
8038 FC_COS_CLASS2 | FC_COS_CLASS3;
8039
8040 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
8041 } else {
8042 if (vha->flags.init_done)
8043 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
8044
8045 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
8046 }
8047 }
8048
8049 void
qlt_24xx_config_nvram_stage2(struct scsi_qla_host * vha,struct init_cb_24xx * icb)8050 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
8051 struct init_cb_24xx *icb)
8052 {
8053 struct qla_hw_data *ha = vha->hw;
8054
8055 if (!QLA_TGT_MODE_ENABLED())
8056 return;
8057
8058 if (ha->tgt.node_name_set) {
8059 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
8060 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
8061 }
8062 }
8063
8064 void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host * vha,struct nvram_81xx * nv)8065 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
8066 {
8067 struct qla_hw_data *ha = vha->hw;
8068 u32 tmp;
8069
8070 if (!QLA_TGT_MODE_ENABLED())
8071 return;
8072
8073 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
8074 if (!ha->tgt.saved_set) {
8075 /* We save only once */
8076 ha->tgt.saved_exchange_count = nv->exchange_count;
8077 ha->tgt.saved_firmware_options_1 =
8078 nv->firmware_options_1;
8079 ha->tgt.saved_firmware_options_2 =
8080 nv->firmware_options_2;
8081 ha->tgt.saved_firmware_options_3 =
8082 nv->firmware_options_3;
8083 ha->tgt.saved_set = 1;
8084 }
8085
8086 if (qla_tgt_mode_enabled(vha))
8087 nv->exchange_count = cpu_to_le16(0xFFFF);
8088 else /* dual */
8089 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
8090
8091 /* Enable target mode */
8092 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
8093
8094 /* Disable ini mode, if requested */
8095 if (qla_tgt_mode_enabled(vha))
8096 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
8097 /* Disable Full Login after LIP */
8098 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
8099 /* Enable initial LIP */
8100 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
8101 /*
8102 * clear BIT 15 explicitly as we have seen at
8103 * least a couple of instances where this was set
8104 * and this was causing the firmware to not be
8105 * initialized.
8106 */
8107 nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
8108 if (ql2xtgt_tape_enable)
8109 /* Enable FC tape support */
8110 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8111 else
8112 /* Disable FC tape support */
8113 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
8114
8115 /* Disable Full Login after LIP */
8116 nv->host_p &= cpu_to_le32(~BIT_10);
8117 /* Enable target PRLI control */
8118 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
8119
8120 /* Change Loop-prefer to Pt-Pt */
8121 tmp = ~(BIT_4|BIT_5|BIT_6);
8122 nv->firmware_options_2 &= cpu_to_le32(tmp);
8123 tmp = P2P << 4;
8124 nv->firmware_options_2 |= cpu_to_le32(tmp);
8125 } else {
8126 if (ha->tgt.saved_set) {
8127 nv->exchange_count = ha->tgt.saved_exchange_count;
8128 nv->firmware_options_1 =
8129 ha->tgt.saved_firmware_options_1;
8130 nv->firmware_options_2 =
8131 ha->tgt.saved_firmware_options_2;
8132 nv->firmware_options_3 =
8133 ha->tgt.saved_firmware_options_3;
8134 }
8135 return;
8136 }
8137
8138 if (ha->base_qpair->enable_class_2) {
8139 if (vha->flags.init_done)
8140 fc_host_supported_classes(vha->host) =
8141 FC_COS_CLASS2 | FC_COS_CLASS3;
8142
8143 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
8144 } else {
8145 if (vha->flags.init_done)
8146 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
8147
8148 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
8149 }
8150 }
8151
8152 void
qlt_81xx_config_nvram_stage2(struct scsi_qla_host * vha,struct init_cb_81xx * icb)8153 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
8154 struct init_cb_81xx *icb)
8155 {
8156 struct qla_hw_data *ha = vha->hw;
8157
8158 if (!QLA_TGT_MODE_ENABLED())
8159 return;
8160
8161 if (ha->tgt.node_name_set) {
8162 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
8163 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
8164 }
8165 }
8166
8167 /* Update any settings that depend on ha->fw_*_version. */
8168 void
qlt_config_nvram_with_fw_version(struct scsi_qla_host * vha)8169 qlt_config_nvram_with_fw_version(struct scsi_qla_host *vha)
8170 {
8171 struct qla_hw_data *ha = vha->hw;
8172
8173 if (!QLA_TGT_MODE_ENABLED())
8174 return;
8175
8176 if (ql2xtgt_tape_enable && qlt_has_sler_fw_bug(ha)) {
8177 ql_log(ql_log_warn, vha, 0x11036,
8178 "WARNING: ignoring ql2xtgt_tape_enable due to buggy HBA firmware; please upgrade FW\n");
8179
8180 /* Disable FC Tape support */
8181 if (ha->isp_ops->nvram_config == qla81xx_nvram_config) {
8182 struct init_cb_81xx *icb =
8183 (struct init_cb_81xx *)ha->init_cb;
8184 icb->firmware_options_2 &= cpu_to_le32(~BIT_12);
8185 } else {
8186 struct init_cb_24xx *icb =
8187 (struct init_cb_24xx *)ha->init_cb;
8188 icb->firmware_options_2 &= cpu_to_le32(~BIT_12);
8189 }
8190 }
8191 }
8192
8193 void
qlt_modify_vp_config(struct scsi_qla_host * vha,struct vp_config_entry_24xx * vpmod)8194 qlt_modify_vp_config(struct scsi_qla_host *vha,
8195 struct vp_config_entry_24xx *vpmod)
8196 {
8197 /* enable target mode. Bit5 = 1 => disable */
8198 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
8199 vpmod->options_idx1 &= ~BIT_5;
8200
8201 /* Disable ini mode, if requested. bit4 = 1 => disable */
8202 if (qla_tgt_mode_enabled(vha))
8203 vpmod->options_idx1 &= ~BIT_4;
8204 }
8205
8206 void
qlt_probe_one_stage1(struct scsi_qla_host * base_vha,struct qla_hw_data * ha)8207 qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
8208 {
8209 mutex_init(&base_vha->vha_tgt.tgt_mutex);
8210 if (!QLA_TGT_MODE_ENABLED())
8211 return;
8212
8213 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
8214 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
8215 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
8216 } else {
8217 ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
8218 ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
8219 }
8220
8221 mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
8222
8223 INIT_LIST_HEAD(&base_vha->unknown_atio_list);
8224 INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
8225 qlt_unknown_atio_work_fn);
8226
8227 qlt_clear_mode(base_vha);
8228
8229 qla_update_vp_map(base_vha, SET_VP_IDX);
8230 }
8231
8232 irqreturn_t
qla83xx_msix_atio_q(int irq,void * dev_id)8233 qla83xx_msix_atio_q(int irq, void *dev_id)
8234 {
8235 struct rsp_que *rsp;
8236 scsi_qla_host_t *vha;
8237 struct qla_hw_data *ha;
8238 unsigned long flags;
8239
8240 rsp = (struct rsp_que *) dev_id;
8241 ha = rsp->hw;
8242 vha = pci_get_drvdata(ha->pdev);
8243
8244 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
8245
8246 qlt_24xx_process_atio_queue(vha, 0);
8247
8248 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
8249
8250 return IRQ_HANDLED;
8251 }
8252
8253 static void
qlt_handle_abts_recv_work(struct work_struct * work)8254 qlt_handle_abts_recv_work(struct work_struct *work)
8255 {
8256 struct qla_tgt_sess_op *op = container_of(work,
8257 struct qla_tgt_sess_op, work);
8258 scsi_qla_host_t *vha = op->vha;
8259 struct qla_hw_data *ha = vha->hw;
8260 unsigned long flags;
8261
8262 if (qla2x00_reset_active(vha) ||
8263 (op->chip_reset != ha->base_qpair->chip_reset))
8264 return;
8265
8266 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
8267 qlt_24xx_process_atio_queue(vha, 0);
8268 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
8269
8270 spin_lock_irqsave(&ha->hardware_lock, flags);
8271 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
8272 spin_unlock_irqrestore(&ha->hardware_lock, flags);
8273
8274 kfree(op);
8275 }
8276
8277 void
qlt_handle_abts_recv(struct scsi_qla_host * vha,struct rsp_que * rsp,response_t * pkt)8278 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
8279 response_t *pkt)
8280 {
8281 struct qla_tgt_sess_op *op;
8282
8283 op = kzalloc(sizeof(*op), GFP_ATOMIC);
8284
8285 if (!op) {
8286 /* do not reach for ATIO queue here. This is best effort err
8287 * recovery at this point.
8288 */
8289 qlt_response_pkt_all_vps(vha, rsp, pkt);
8290 return;
8291 }
8292
8293 memcpy(&op->atio, pkt, sizeof(*pkt));
8294 op->vha = vha;
8295 op->chip_reset = vha->hw->base_qpair->chip_reset;
8296 op->rsp = rsp;
8297 INIT_WORK(&op->work, qlt_handle_abts_recv_work);
8298 queue_work(qla_tgt_wq, &op->work);
8299 return;
8300 }
8301
8302 int
qlt_mem_alloc(struct qla_hw_data * ha)8303 qlt_mem_alloc(struct qla_hw_data *ha)
8304 {
8305 if (!QLA_TGT_MODE_ENABLED())
8306 return 0;
8307
8308 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
8309 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
8310 &ha->tgt.atio_dma, GFP_KERNEL);
8311 if (!ha->tgt.atio_ring) {
8312 return -ENOMEM;
8313 }
8314 return 0;
8315 }
8316
8317 void
qlt_mem_free(struct qla_hw_data * ha)8318 qlt_mem_free(struct qla_hw_data *ha)
8319 {
8320 if (!QLA_TGT_MODE_ENABLED())
8321 return;
8322
8323 if (ha->tgt.atio_ring) {
8324 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
8325 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
8326 ha->tgt.atio_dma);
8327 }
8328 ha->tgt.atio_ring = NULL;
8329 ha->tgt.atio_dma = 0;
8330 }
8331
qlt_parse_ini_mode(void)8332 static int __init qlt_parse_ini_mode(void)
8333 {
8334 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
8335 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
8336 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
8337 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
8338 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
8339 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
8340 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
8341 ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
8342 else
8343 return false;
8344
8345 return true;
8346 }
8347
qlt_init(void)8348 int __init qlt_init(void)
8349 {
8350 int ret;
8351
8352 BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64);
8353 BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64);
8354
8355 if (!qlt_parse_ini_mode()) {
8356 ql_log(ql_log_fatal, NULL, 0xe06b,
8357 "qlt_parse_ini_mode() failed\n");
8358 return -EINVAL;
8359 }
8360
8361 if (!QLA_TGT_MODE_ENABLED())
8362 return 0;
8363
8364 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
8365 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
8366 qla_tgt_mgmt_cmd), 0, NULL);
8367 if (!qla_tgt_mgmt_cmd_cachep) {
8368 ql_log(ql_log_fatal, NULL, 0xd04b,
8369 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
8370 return -ENOMEM;
8371 }
8372
8373 qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
8374 sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
8375 0, NULL);
8376
8377 if (!qla_tgt_plogi_cachep) {
8378 ql_log(ql_log_fatal, NULL, 0xe06d,
8379 "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
8380 ret = -ENOMEM;
8381 goto out_mgmt_cmd_cachep;
8382 }
8383
8384 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
8385 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
8386 if (!qla_tgt_mgmt_cmd_mempool) {
8387 ql_log(ql_log_fatal, NULL, 0xe06e,
8388 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
8389 ret = -ENOMEM;
8390 goto out_plogi_cachep;
8391 }
8392
8393 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
8394 if (!qla_tgt_wq) {
8395 ql_log(ql_log_fatal, NULL, 0xe06f,
8396 "alloc_workqueue for qla_tgt_wq failed\n");
8397 ret = -ENOMEM;
8398 goto out_cmd_mempool;
8399 }
8400 /*
8401 * Return 1 to signal that initiator-mode is being disabled
8402 */
8403 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
8404
8405 out_cmd_mempool:
8406 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
8407 out_plogi_cachep:
8408 kmem_cache_destroy(qla_tgt_plogi_cachep);
8409 out_mgmt_cmd_cachep:
8410 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
8411 return ret;
8412 }
8413
qlt_exit(void)8414 void qlt_exit(void)
8415 {
8416 if (!QLA_TGT_MODE_ENABLED())
8417 return;
8418
8419 destroy_workqueue(qla_tgt_wq);
8420 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
8421 kmem_cache_destroy(qla_tgt_plogi_cachep);
8422 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
8423 }
8424