1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_target.h"
8
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11
12 #include <scsi/scsi_tcq.h>
13
14 static int qla_start_scsi_type6(srb_t *sp);
15 /**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @sp: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21 static inline uint16_t
qla2x00_get_cmd_direction(srb_t * sp)22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
27
28 cflags = 0;
29
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE;
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ;
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
39 }
40 return (cflags);
41 }
42
43 /**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment descriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51 uint16_t
qla2x00_calc_iocbs_32(uint16_t dsds)52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63 }
64
65 /**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment descriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73 uint16_t
qla2x00_calc_iocbs_64(uint16_t dsds)74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85 }
86
87 /**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @vha: HA context
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93 static inline cont_entry_t *
qla2x00_prep_cont_type0_iocb(struct scsi_qla_host * vha)94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
99 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
103 } else {
104 req->ring_ptr++;
105 }
106
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109 /* Load packet defaults. */
110 put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
111
112 return (cont_pkt);
113 }
114
115 /**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @vha: HA context
118 * @req: request queue
119 *
120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */
122 cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t * vha,struct req_que * req)123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125 cont_a64_entry_t *cont_pkt;
126
127 /* Adjust ring index. */
128 req->ring_index++;
129 if (req->ring_index == req->length) {
130 req->ring_index = 0;
131 req->ring_ptr = req->ring;
132 } else {
133 req->ring_ptr++;
134 }
135
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138 /* Load packet defaults. */
139 put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
140 CONTINUE_A64_TYPE, &cont_pkt->entry_type);
141
142 return (cont_pkt);
143 }
144
145 inline int
qla24xx_configure_prot_mode(srb_t * sp,uint16_t * fw_prot_opts)146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149
150 /* We always use DIFF Bundling for best performance */
151 *fw_prot_opts = 0;
152
153 /* Translate SCSI opcode to a protection opcode */
154 switch (scsi_get_prot_op(cmd)) {
155 case SCSI_PROT_READ_STRIP:
156 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
157 break;
158 case SCSI_PROT_WRITE_INSERT:
159 *fw_prot_opts |= PO_MODE_DIF_INSERT;
160 break;
161 case SCSI_PROT_READ_INSERT:
162 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163 break;
164 case SCSI_PROT_WRITE_STRIP:
165 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
166 break;
167 case SCSI_PROT_READ_PASS:
168 case SCSI_PROT_WRITE_PASS:
169 if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
170 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
171 else
172 *fw_prot_opts |= PO_MODE_DIF_PASS;
173 break;
174 default: /* Normal Request */
175 *fw_prot_opts |= PO_MODE_DIF_PASS;
176 break;
177 }
178
179 if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
180 *fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
181
182 return scsi_prot_sg_count(cmd);
183 }
184
185 /*
186 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187 * capable IOCB types.
188 *
189 * @sp: SRB command to process
190 * @cmd_pkt: Command type 2 IOCB
191 * @tot_dsds: Total number of segments to transfer
192 */
qla2x00_build_scsi_iocbs_32(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194 uint16_t tot_dsds)
195 {
196 uint16_t avail_dsds;
197 struct dsd32 *cur_dsd;
198 scsi_qla_host_t *vha;
199 struct scsi_cmnd *cmd;
200 struct scatterlist *sg;
201 int i;
202
203 cmd = GET_CMD_SP(sp);
204
205 /* Update entry type to indicate Command Type 2 IOCB */
206 put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
207
208 /* No data transfer */
209 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
210 cmd_pkt->byte_count = cpu_to_le32(0);
211 return;
212 }
213
214 vha = sp->vha;
215 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216
217 /* Three DSDs are available in the Command Type 2 IOCB */
218 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
219 cur_dsd = cmd_pkt->dsd32;
220
221 /* Load data segments */
222 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223 cont_entry_t *cont_pkt;
224
225 /* Allocate additional continuation packets? */
226 if (avail_dsds == 0) {
227 /*
228 * Seven DSDs are available in the Continuation
229 * Type 0 IOCB.
230 */
231 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232 cur_dsd = cont_pkt->dsd;
233 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
234 }
235
236 append_dsd32(&cur_dsd, sg);
237 avail_dsds--;
238 }
239 }
240
241 /**
242 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243 * capable IOCB types.
244 *
245 * @sp: SRB command to process
246 * @cmd_pkt: Command type 3 IOCB
247 * @tot_dsds: Total number of segments to transfer
248 */
qla2x00_build_scsi_iocbs_64(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250 uint16_t tot_dsds)
251 {
252 uint16_t avail_dsds;
253 struct dsd64 *cur_dsd;
254 scsi_qla_host_t *vha;
255 struct scsi_cmnd *cmd;
256 struct scatterlist *sg;
257 int i;
258
259 cmd = GET_CMD_SP(sp);
260
261 /* Update entry type to indicate Command Type 3 IOCB */
262 put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
263
264 /* No data transfer */
265 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266 cmd_pkt->byte_count = cpu_to_le32(0);
267 return;
268 }
269
270 vha = sp->vha;
271 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272
273 /* Two DSDs are available in the Command Type 3 IOCB */
274 avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
275 cur_dsd = cmd_pkt->dsd64;
276
277 /* Load data segments */
278 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279 cont_a64_entry_t *cont_pkt;
280
281 /* Allocate additional continuation packets? */
282 if (avail_dsds == 0) {
283 /*
284 * Five DSDs are available in the Continuation
285 * Type 1 IOCB.
286 */
287 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
288 cur_dsd = cont_pkt->dsd;
289 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
290 }
291
292 append_dsd64(&cur_dsd, sg);
293 avail_dsds--;
294 }
295 }
296
297 /*
298 * Find the first handle that is not in use, starting from
299 * req->current_outstanding_cmd + 1. The caller must hold the lock that is
300 * associated with @req.
301 */
qla2xxx_get_next_handle(struct req_que * req)302 uint32_t qla2xxx_get_next_handle(struct req_que *req)
303 {
304 uint32_t index, handle = req->current_outstanding_cmd;
305
306 for (index = 1; index < req->num_outstanding_cmds; index++) {
307 handle++;
308 if (handle == req->num_outstanding_cmds)
309 handle = 1;
310 if (!req->outstanding_cmds[handle])
311 return handle;
312 }
313
314 return 0;
315 }
316
317 /**
318 * qla2x00_start_scsi() - Send a SCSI command to the ISP
319 * @sp: command to send to the ISP
320 *
321 * Returns non-zero if a failure occurred, else zero.
322 */
323 int
qla2x00_start_scsi(srb_t * sp)324 qla2x00_start_scsi(srb_t *sp)
325 {
326 int nseg;
327 unsigned long flags;
328 scsi_qla_host_t *vha;
329 struct scsi_cmnd *cmd;
330 uint32_t *clr_ptr;
331 uint32_t handle;
332 cmd_entry_t *cmd_pkt;
333 uint16_t cnt;
334 uint16_t req_cnt;
335 uint16_t tot_dsds;
336 struct device_reg_2xxx __iomem *reg;
337 struct qla_hw_data *ha;
338 struct req_que *req;
339 struct rsp_que *rsp;
340
341 /* Setup device pointers. */
342 vha = sp->vha;
343 ha = vha->hw;
344 reg = &ha->iobase->isp;
345 cmd = GET_CMD_SP(sp);
346 req = ha->req_q_map[0];
347 rsp = ha->rsp_q_map[0];
348 /* So we know we haven't pci_map'ed anything yet */
349 tot_dsds = 0;
350
351 /* Send marker if required */
352 if (vha->marker_needed != 0) {
353 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
354 QLA_SUCCESS) {
355 return (QLA_FUNCTION_FAILED);
356 }
357 vha->marker_needed = 0;
358 }
359
360 /* Acquire ring specific lock */
361 spin_lock_irqsave(&ha->hardware_lock, flags);
362
363 handle = qla2xxx_get_next_handle(req);
364 if (handle == 0)
365 goto queuing_error;
366
367 /* Map the sg table so we have an accurate count of sg entries needed */
368 if (scsi_sg_count(cmd)) {
369 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
370 scsi_sg_count(cmd), cmd->sc_data_direction);
371 if (unlikely(!nseg))
372 goto queuing_error;
373 } else
374 nseg = 0;
375
376 tot_dsds = nseg;
377
378 /* Calculate the number of request entries needed. */
379 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
380 if (req->cnt < (req_cnt + 2)) {
381 cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
382 if (req->ring_index < cnt)
383 req->cnt = cnt - req->ring_index;
384 else
385 req->cnt = req->length -
386 (req->ring_index - cnt);
387 /* If still no head room then bail out */
388 if (req->cnt < (req_cnt + 2))
389 goto queuing_error;
390 }
391
392 /* Build command packet */
393 req->current_outstanding_cmd = handle;
394 req->outstanding_cmds[handle] = sp;
395 sp->handle = handle;
396 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
397 req->cnt -= req_cnt;
398
399 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
400 cmd_pkt->handle = handle;
401 /* Zero out remaining portion of packet. */
402 clr_ptr = (uint32_t *)cmd_pkt + 2;
403 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
404 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
405
406 /* Set target ID and LUN number*/
407 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
408 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
409 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
410
411 /* Load SCSI command packet. */
412 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
413 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
414
415 /* Build IOCB segments */
416 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
417
418 /* Set total data segment count. */
419 cmd_pkt->entry_count = (uint8_t)req_cnt;
420 wmb();
421
422 /* Adjust ring index. */
423 req->ring_index++;
424 if (req->ring_index == req->length) {
425 req->ring_index = 0;
426 req->ring_ptr = req->ring;
427 } else
428 req->ring_ptr++;
429
430 sp->flags |= SRB_DMA_VALID;
431
432 /* Set chip new ring index. */
433 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
434 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
435
436 /* Manage unprocessed RIO/ZIO commands in response queue. */
437 if (vha->flags.process_response_queue &&
438 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
439 qla2x00_process_response_queue(rsp);
440
441 spin_unlock_irqrestore(&ha->hardware_lock, flags);
442 return (QLA_SUCCESS);
443
444 queuing_error:
445 if (tot_dsds)
446 scsi_dma_unmap(cmd);
447
448 spin_unlock_irqrestore(&ha->hardware_lock, flags);
449
450 return (QLA_FUNCTION_FAILED);
451 }
452
453 /**
454 * qla2x00_start_iocbs() - Execute the IOCB command
455 * @vha: HA context
456 * @req: request queue
457 */
458 void
qla2x00_start_iocbs(struct scsi_qla_host * vha,struct req_que * req)459 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
460 {
461 struct qla_hw_data *ha = vha->hw;
462 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
463
464 if (IS_P3P_TYPE(ha)) {
465 qla82xx_start_iocbs(vha);
466 } else {
467 /* Adjust ring index. */
468 req->ring_index++;
469 if (req->ring_index == req->length) {
470 req->ring_index = 0;
471 req->ring_ptr = req->ring;
472 } else
473 req->ring_ptr++;
474
475 /* Set chip new ring index. */
476 if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
477 wrt_reg_dword(req->req_q_in, req->ring_index);
478 } else if (IS_QLA83XX(ha)) {
479 wrt_reg_dword(req->req_q_in, req->ring_index);
480 rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
481 } else if (IS_QLAFX00(ha)) {
482 wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index);
483 rd_reg_dword_relaxed(®->ispfx00.req_q_in);
484 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
485 } else if (IS_FWI2_CAPABLE(ha)) {
486 wrt_reg_dword(®->isp24.req_q_in, req->ring_index);
487 rd_reg_dword_relaxed(®->isp24.req_q_in);
488 } else {
489 wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp),
490 req->ring_index);
491 rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp));
492 }
493 }
494 }
495
496 /**
497 * __qla2x00_marker() - Send a marker IOCB to the firmware.
498 * @vha: HA context
499 * @qpair: queue pair pointer
500 * @loop_id: loop ID
501 * @lun: LUN
502 * @type: marker modifier
503 *
504 * Can be called from both normal and interrupt context.
505 *
506 * Returns non-zero if a failure occurred, else zero.
507 */
508 static int
__qla2x00_marker(struct scsi_qla_host * vha,struct qla_qpair * qpair,uint16_t loop_id,uint64_t lun,uint8_t type)509 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
510 uint16_t loop_id, uint64_t lun, uint8_t type)
511 {
512 mrk_entry_t *mrk;
513 struct mrk_entry_24xx *mrk24 = NULL;
514 struct req_que *req = qpair->req;
515 struct qla_hw_data *ha = vha->hw;
516 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
517
518 mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
519 if (mrk == NULL) {
520 ql_log(ql_log_warn, base_vha, 0x3026,
521 "Failed to allocate Marker IOCB.\n");
522
523 return (QLA_FUNCTION_FAILED);
524 }
525
526 mrk24 = (struct mrk_entry_24xx *)mrk;
527
528 mrk->entry_type = MARKER_TYPE;
529 mrk->modifier = type;
530 if (type != MK_SYNC_ALL) {
531 if (IS_FWI2_CAPABLE(ha)) {
532 mrk24->nport_handle = cpu_to_le16(loop_id);
533 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
534 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
535 mrk24->vp_index = vha->vp_idx;
536 } else {
537 SET_TARGET_ID(ha, mrk->target, loop_id);
538 mrk->lun = cpu_to_le16((uint16_t)lun);
539 }
540 }
541
542 if (IS_FWI2_CAPABLE(ha))
543 mrk24->handle = QLA_SKIP_HANDLE;
544
545 wmb();
546
547 qla2x00_start_iocbs(vha, req);
548
549 return (QLA_SUCCESS);
550 }
551
552 int
qla2x00_marker(struct scsi_qla_host * vha,struct qla_qpair * qpair,uint16_t loop_id,uint64_t lun,uint8_t type)553 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
554 uint16_t loop_id, uint64_t lun, uint8_t type)
555 {
556 int ret;
557 unsigned long flags = 0;
558
559 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
560 ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
561 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
562
563 return (ret);
564 }
565
566 /*
567 * qla2x00_issue_marker
568 *
569 * Issue marker
570 * Caller CAN have hardware lock held as specified by ha_locked parameter.
571 * Might release it, then reaquire.
572 */
qla2x00_issue_marker(scsi_qla_host_t * vha,int ha_locked)573 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
574 {
575 if (ha_locked) {
576 if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
577 MK_SYNC_ALL) != QLA_SUCCESS)
578 return QLA_FUNCTION_FAILED;
579 } else {
580 if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
581 MK_SYNC_ALL) != QLA_SUCCESS)
582 return QLA_FUNCTION_FAILED;
583 }
584 vha->marker_needed = 0;
585
586 return QLA_SUCCESS;
587 }
588
589 static inline int
qla24xx_build_scsi_type_6_iocbs(srb_t * sp,struct cmd_type_6 * cmd_pkt,uint16_t tot_dsds)590 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
591 uint16_t tot_dsds)
592 {
593 struct dsd64 *cur_dsd = NULL, *next_dsd;
594 struct scsi_cmnd *cmd;
595 struct scatterlist *cur_seg;
596 uint8_t avail_dsds;
597 uint8_t first_iocb = 1;
598 uint32_t dsd_list_len;
599 struct dsd_dma *dsd_ptr;
600 struct ct6_dsd *ctx;
601 struct qla_qpair *qpair = sp->qpair;
602
603 cmd = GET_CMD_SP(sp);
604
605 /* Update entry type to indicate Command Type 3 IOCB */
606 put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
607
608 /* No data transfer */
609 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE ||
610 tot_dsds == 0) {
611 cmd_pkt->byte_count = cpu_to_le32(0);
612 return 0;
613 }
614
615 /* Set transfer direction */
616 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
617 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
618 qpair->counters.output_bytes += scsi_bufflen(cmd);
619 qpair->counters.output_requests++;
620 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
621 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
622 qpair->counters.input_bytes += scsi_bufflen(cmd);
623 qpair->counters.input_requests++;
624 }
625
626 cur_seg = scsi_sglist(cmd);
627 ctx = &sp->u.scmd.ct6_ctx;
628
629 while (tot_dsds) {
630 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
631 QLA_DSDS_PER_IOCB : tot_dsds;
632 tot_dsds -= avail_dsds;
633 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
634
635 dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list);
636 next_dsd = dsd_ptr->dsd_addr;
637 list_del(&dsd_ptr->list);
638 qpair->dsd_avail--;
639 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640 ctx->dsd_use_cnt++;
641 qpair->dsd_inuse++;
642
643 if (first_iocb) {
644 first_iocb = 0;
645 put_unaligned_le64(dsd_ptr->dsd_list_dma,
646 &cmd_pkt->fcp_dsd.address);
647 cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
648 } else {
649 put_unaligned_le64(dsd_ptr->dsd_list_dma,
650 &cur_dsd->address);
651 cur_dsd->length = cpu_to_le32(dsd_list_len);
652 cur_dsd++;
653 }
654 cur_dsd = next_dsd;
655 while (avail_dsds) {
656 append_dsd64(&cur_dsd, cur_seg);
657 cur_seg = sg_next(cur_seg);
658 avail_dsds--;
659 }
660 }
661
662 /* Null termination */
663 cur_dsd->address = 0;
664 cur_dsd->length = 0;
665 cur_dsd++;
666 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
667 return 0;
668 }
669
670 /*
671 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
672 * for Command Type 6.
673 *
674 * @dsds: number of data segment descriptors needed
675 *
676 * Returns the number of dsd list needed to store @dsds.
677 */
678 static inline uint16_t
qla24xx_calc_dsd_lists(uint16_t dsds)679 qla24xx_calc_dsd_lists(uint16_t dsds)
680 {
681 uint16_t dsd_lists = 0;
682
683 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
684 if (dsds % QLA_DSDS_PER_IOCB)
685 dsd_lists++;
686 return dsd_lists;
687 }
688
689
690 /**
691 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
692 * IOCB types.
693 *
694 * @sp: SRB command to process
695 * @cmd_pkt: Command type 3 IOCB
696 * @tot_dsds: Total number of segments to transfer
697 * @req: pointer to request queue
698 */
699 inline void
qla24xx_build_scsi_iocbs(srb_t * sp,struct cmd_type_7 * cmd_pkt,uint16_t tot_dsds,struct req_que * req)700 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
701 uint16_t tot_dsds, struct req_que *req)
702 {
703 uint16_t avail_dsds;
704 struct dsd64 *cur_dsd;
705 scsi_qla_host_t *vha;
706 struct scsi_cmnd *cmd;
707 struct scatterlist *sg;
708 int i;
709 struct qla_qpair *qpair = sp->qpair;
710
711 cmd = GET_CMD_SP(sp);
712
713 /* Update entry type to indicate Command Type 3 IOCB */
714 put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
715
716 /* No data transfer */
717 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
718 cmd_pkt->byte_count = cpu_to_le32(0);
719 return;
720 }
721
722 vha = sp->vha;
723
724 /* Set transfer direction */
725 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
726 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
727 qpair->counters.output_bytes += scsi_bufflen(cmd);
728 qpair->counters.output_requests++;
729 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
730 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
731 qpair->counters.input_bytes += scsi_bufflen(cmd);
732 qpair->counters.input_requests++;
733 }
734
735 /* One DSD is available in the Command Type 3 IOCB */
736 avail_dsds = 1;
737 cur_dsd = &cmd_pkt->dsd;
738
739 /* Load data segments */
740
741 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
742 cont_a64_entry_t *cont_pkt;
743
744 /* Allocate additional continuation packets? */
745 if (avail_dsds == 0) {
746 /*
747 * Five DSDs are available in the Continuation
748 * Type 1 IOCB.
749 */
750 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
751 cur_dsd = cont_pkt->dsd;
752 avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
753 }
754
755 append_dsd64(&cur_dsd, sg);
756 avail_dsds--;
757 }
758 }
759
760 struct fw_dif_context {
761 __le32 ref_tag;
762 __le16 app_tag;
763 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
764 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
765 };
766
767 /*
768 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
769 *
770 */
771 static inline void
qla24xx_set_t10dif_tags(srb_t * sp,struct fw_dif_context * pkt,unsigned int protcnt)772 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
773 unsigned int protcnt)
774 {
775 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
776
777 pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
778
779 if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
780 qla2x00_hba_err_chk_enabled(sp)) {
781 pkt->ref_tag_mask[0] = 0xff;
782 pkt->ref_tag_mask[1] = 0xff;
783 pkt->ref_tag_mask[2] = 0xff;
784 pkt->ref_tag_mask[3] = 0xff;
785 }
786
787 pkt->app_tag = cpu_to_le16(0);
788 pkt->app_tag_mask[0] = 0x0;
789 pkt->app_tag_mask[1] = 0x0;
790 }
791
792 int
qla24xx_get_one_block_sg(uint32_t blk_sz,struct qla2_sgx * sgx,uint32_t * partial)793 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
794 uint32_t *partial)
795 {
796 struct scatterlist *sg;
797 uint32_t cumulative_partial, sg_len;
798 dma_addr_t sg_dma_addr;
799
800 if (sgx->num_bytes == sgx->tot_bytes)
801 return 0;
802
803 sg = sgx->cur_sg;
804 cumulative_partial = sgx->tot_partial;
805
806 sg_dma_addr = sg_dma_address(sg);
807 sg_len = sg_dma_len(sg);
808
809 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
810
811 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
812 sgx->dma_len = (blk_sz - cumulative_partial);
813 sgx->tot_partial = 0;
814 sgx->num_bytes += blk_sz;
815 *partial = 0;
816 } else {
817 sgx->dma_len = sg_len - sgx->bytes_consumed;
818 sgx->tot_partial += sgx->dma_len;
819 *partial = 1;
820 }
821
822 sgx->bytes_consumed += sgx->dma_len;
823
824 if (sg_len == sgx->bytes_consumed) {
825 sg = sg_next(sg);
826 sgx->num_sg++;
827 sgx->cur_sg = sg;
828 sgx->bytes_consumed = 0;
829 }
830
831 return 1;
832 }
833
834 int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data * ha,srb_t * sp,struct dsd64 * dsd,uint16_t tot_dsds,struct qla_tc_param * tc)835 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
836 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
837 {
838 void *next_dsd;
839 uint8_t avail_dsds = 0;
840 uint32_t dsd_list_len;
841 struct dsd_dma *dsd_ptr;
842 struct scatterlist *sg_prot;
843 struct dsd64 *cur_dsd = dsd;
844 uint16_t used_dsds = tot_dsds;
845 uint32_t prot_int; /* protection interval */
846 uint32_t partial;
847 struct qla2_sgx sgx;
848 dma_addr_t sle_dma;
849 uint32_t sle_dma_len, tot_prot_dma_len = 0;
850 struct scsi_cmnd *cmd;
851
852 memset(&sgx, 0, sizeof(struct qla2_sgx));
853 if (sp) {
854 cmd = GET_CMD_SP(sp);
855 prot_int = scsi_prot_interval(cmd);
856
857 sgx.tot_bytes = scsi_bufflen(cmd);
858 sgx.cur_sg = scsi_sglist(cmd);
859 sgx.sp = sp;
860
861 sg_prot = scsi_prot_sglist(cmd);
862 } else if (tc) {
863 prot_int = tc->blk_sz;
864 sgx.tot_bytes = tc->bufflen;
865 sgx.cur_sg = tc->sg;
866 sg_prot = tc->prot_sg;
867 } else {
868 BUG();
869 return 1;
870 }
871
872 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
873
874 sle_dma = sgx.dma_addr;
875 sle_dma_len = sgx.dma_len;
876 alloc_and_fill:
877 /* Allocate additional continuation packets? */
878 if (avail_dsds == 0) {
879 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
880 QLA_DSDS_PER_IOCB : used_dsds;
881 dsd_list_len = (avail_dsds + 1) * 12;
882 used_dsds -= avail_dsds;
883
884 /* allocate tracking DS */
885 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
886 if (!dsd_ptr)
887 return 1;
888
889 /* allocate new list */
890 dsd_ptr->dsd_addr = next_dsd =
891 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
892 &dsd_ptr->dsd_list_dma);
893
894 if (!next_dsd) {
895 /*
896 * Need to cleanup only this dsd_ptr, rest
897 * will be done by sp_free_dma()
898 */
899 kfree(dsd_ptr);
900 return 1;
901 }
902
903 if (sp) {
904 list_add_tail(&dsd_ptr->list,
905 &sp->u.scmd.crc_ctx->dsd_list);
906
907 sp->flags |= SRB_CRC_CTX_DSD_VALID;
908 } else {
909 list_add_tail(&dsd_ptr->list,
910 &(tc->ctx->dsd_list));
911 *tc->ctx_dsd_alloced = 1;
912 }
913
914
915 /* add new list to cmd iocb or last list */
916 put_unaligned_le64(dsd_ptr->dsd_list_dma,
917 &cur_dsd->address);
918 cur_dsd->length = cpu_to_le32(dsd_list_len);
919 cur_dsd = next_dsd;
920 }
921 put_unaligned_le64(sle_dma, &cur_dsd->address);
922 cur_dsd->length = cpu_to_le32(sle_dma_len);
923 cur_dsd++;
924 avail_dsds--;
925
926 if (partial == 0) {
927 /* Got a full protection interval */
928 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
929 sle_dma_len = 8;
930
931 tot_prot_dma_len += sle_dma_len;
932 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
933 tot_prot_dma_len = 0;
934 sg_prot = sg_next(sg_prot);
935 }
936
937 partial = 1; /* So as to not re-enter this block */
938 goto alloc_and_fill;
939 }
940 }
941 /* Null termination */
942 cur_dsd->address = 0;
943 cur_dsd->length = 0;
944 cur_dsd++;
945 return 0;
946 }
947
948 int
qla24xx_walk_and_build_sglist(struct qla_hw_data * ha,srb_t * sp,struct dsd64 * dsd,uint16_t tot_dsds,struct qla_tc_param * tc)949 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
950 struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
951 {
952 void *next_dsd;
953 uint8_t avail_dsds = 0;
954 uint32_t dsd_list_len;
955 struct dsd_dma *dsd_ptr;
956 struct scatterlist *sg, *sgl;
957 struct dsd64 *cur_dsd = dsd;
958 int i;
959 uint16_t used_dsds = tot_dsds;
960 struct scsi_cmnd *cmd;
961
962 if (sp) {
963 cmd = GET_CMD_SP(sp);
964 sgl = scsi_sglist(cmd);
965 } else if (tc) {
966 sgl = tc->sg;
967 } else {
968 BUG();
969 return 1;
970 }
971
972
973 for_each_sg(sgl, sg, tot_dsds, i) {
974 /* Allocate additional continuation packets? */
975 if (avail_dsds == 0) {
976 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
977 QLA_DSDS_PER_IOCB : used_dsds;
978 dsd_list_len = (avail_dsds + 1) * 12;
979 used_dsds -= avail_dsds;
980
981 /* allocate tracking DS */
982 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
983 if (!dsd_ptr)
984 return 1;
985
986 /* allocate new list */
987 dsd_ptr->dsd_addr = next_dsd =
988 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
989 &dsd_ptr->dsd_list_dma);
990
991 if (!next_dsd) {
992 /*
993 * Need to cleanup only this dsd_ptr, rest
994 * will be done by sp_free_dma()
995 */
996 kfree(dsd_ptr);
997 return 1;
998 }
999
1000 if (sp) {
1001 list_add_tail(&dsd_ptr->list,
1002 &sp->u.scmd.crc_ctx->dsd_list);
1003
1004 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1005 } else {
1006 list_add_tail(&dsd_ptr->list,
1007 &(tc->ctx->dsd_list));
1008 *tc->ctx_dsd_alloced = 1;
1009 }
1010
1011 /* add new list to cmd iocb or last list */
1012 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1013 &cur_dsd->address);
1014 cur_dsd->length = cpu_to_le32(dsd_list_len);
1015 cur_dsd = next_dsd;
1016 }
1017 append_dsd64(&cur_dsd, sg);
1018 avail_dsds--;
1019
1020 }
1021 /* Null termination */
1022 cur_dsd->address = 0;
1023 cur_dsd->length = 0;
1024 cur_dsd++;
1025 return 0;
1026 }
1027
1028 int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data * ha,srb_t * sp,struct dsd64 * cur_dsd,uint16_t tot_dsds,struct qla_tgt_cmd * tc)1029 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1030 struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1031 {
1032 struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1033 struct scatterlist *sg, *sgl;
1034 struct crc_context *difctx = NULL;
1035 struct scsi_qla_host *vha;
1036 uint dsd_list_len;
1037 uint avail_dsds = 0;
1038 uint used_dsds = tot_dsds;
1039 bool dif_local_dma_alloc = false;
1040 bool direction_to_device = false;
1041 int i;
1042
1043 if (sp) {
1044 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1045
1046 sgl = scsi_prot_sglist(cmd);
1047 vha = sp->vha;
1048 difctx = sp->u.scmd.crc_ctx;
1049 direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1050 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1051 "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1052 __func__, cmd, difctx, sp);
1053 } else if (tc) {
1054 vha = tc->vha;
1055 sgl = tc->prot_sg;
1056 difctx = tc->ctx;
1057 direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1058 } else {
1059 BUG();
1060 return 1;
1061 }
1062
1063 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1064 "%s: enter (write=%u)\n", __func__, direction_to_device);
1065
1066 /* if initiator doing write or target doing read */
1067 if (direction_to_device) {
1068 for_each_sg(sgl, sg, tot_dsds, i) {
1069 u64 sle_phys = sg_phys(sg);
1070
1071 /* If SGE addr + len flips bits in upper 32-bits */
1072 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1073 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1074 "%s: page boundary crossing (phys=%llx len=%x)\n",
1075 __func__, sle_phys, sg->length);
1076
1077 if (difctx) {
1078 ha->dif_bundle_crossed_pages++;
1079 dif_local_dma_alloc = true;
1080 } else {
1081 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1082 vha, 0xe022,
1083 "%s: difctx pointer is NULL\n",
1084 __func__);
1085 }
1086 break;
1087 }
1088 }
1089 ha->dif_bundle_writes++;
1090 } else {
1091 ha->dif_bundle_reads++;
1092 }
1093
1094 if (ql2xdifbundlinginternalbuffers)
1095 dif_local_dma_alloc = direction_to_device;
1096
1097 if (dif_local_dma_alloc) {
1098 u32 track_difbundl_buf = 0;
1099 u32 ldma_sg_len = 0;
1100 u8 ldma_needed = 1;
1101
1102 difctx->no_dif_bundl = 0;
1103 difctx->dif_bundl_len = 0;
1104
1105 /* Track DSD buffers */
1106 INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1107 /* Track local DMA buffers */
1108 INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1109
1110 for_each_sg(sgl, sg, tot_dsds, i) {
1111 u32 sglen = sg_dma_len(sg);
1112
1113 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1114 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1115 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1116 difctx->dif_bundl_len, ldma_needed);
1117
1118 while (sglen) {
1119 u32 xfrlen = 0;
1120
1121 if (ldma_needed) {
1122 /*
1123 * Allocate list item to store
1124 * the DMA buffers
1125 */
1126 dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1127 GFP_ATOMIC);
1128 if (!dsd_ptr) {
1129 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1130 "%s: failed alloc dsd_ptr\n",
1131 __func__);
1132 return 1;
1133 }
1134 ha->dif_bundle_kallocs++;
1135
1136 /* allocate dma buffer */
1137 dsd_ptr->dsd_addr = dma_pool_alloc
1138 (ha->dif_bundl_pool, GFP_ATOMIC,
1139 &dsd_ptr->dsd_list_dma);
1140 if (!dsd_ptr->dsd_addr) {
1141 ql_dbg(ql_dbg_tgt, vha, 0xe024,
1142 "%s: failed alloc ->dsd_ptr\n",
1143 __func__);
1144 /*
1145 * need to cleanup only this
1146 * dsd_ptr rest will be done
1147 * by sp_free_dma()
1148 */
1149 kfree(dsd_ptr);
1150 ha->dif_bundle_kallocs--;
1151 return 1;
1152 }
1153 ha->dif_bundle_dma_allocs++;
1154 ldma_needed = 0;
1155 difctx->no_dif_bundl++;
1156 list_add_tail(&dsd_ptr->list,
1157 &difctx->ldif_dma_hndl_list);
1158 }
1159
1160 /* xfrlen is min of dma pool size and sglen */
1161 xfrlen = (sglen >
1162 (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1163 DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1164 sglen;
1165
1166 /* replace with local allocated dma buffer */
1167 sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1168 dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1169 difctx->dif_bundl_len);
1170 difctx->dif_bundl_len += xfrlen;
1171 sglen -= xfrlen;
1172 ldma_sg_len += xfrlen;
1173 if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1174 sg_is_last(sg)) {
1175 ldma_needed = 1;
1176 ldma_sg_len = 0;
1177 }
1178 }
1179 }
1180
1181 track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1182 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1183 "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1184 difctx->dif_bundl_len, difctx->no_dif_bundl,
1185 track_difbundl_buf);
1186
1187 if (sp)
1188 sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1189 else
1190 tc->prot_flags = DIF_BUNDL_DMA_VALID;
1191
1192 list_for_each_entry_safe(dif_dsd, nxt_dsd,
1193 &difctx->ldif_dma_hndl_list, list) {
1194 u32 sglen = (difctx->dif_bundl_len >
1195 DIF_BUNDLING_DMA_POOL_SIZE) ?
1196 DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1197
1198 BUG_ON(track_difbundl_buf == 0);
1199
1200 /* Allocate additional continuation packets? */
1201 if (avail_dsds == 0) {
1202 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1203 0xe024,
1204 "%s: adding continuation iocb's\n",
1205 __func__);
1206 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1207 QLA_DSDS_PER_IOCB : used_dsds;
1208 dsd_list_len = (avail_dsds + 1) * 12;
1209 used_dsds -= avail_dsds;
1210
1211 /* allocate tracking DS */
1212 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1213 if (!dsd_ptr) {
1214 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1215 "%s: failed alloc dsd_ptr\n",
1216 __func__);
1217 return 1;
1218 }
1219 ha->dif_bundle_kallocs++;
1220
1221 difctx->no_ldif_dsd++;
1222 /* allocate new list */
1223 dsd_ptr->dsd_addr =
1224 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1225 &dsd_ptr->dsd_list_dma);
1226 if (!dsd_ptr->dsd_addr) {
1227 ql_dbg(ql_dbg_tgt, vha, 0xe026,
1228 "%s: failed alloc ->dsd_addr\n",
1229 __func__);
1230 /*
1231 * need to cleanup only this dsd_ptr
1232 * rest will be done by sp_free_dma()
1233 */
1234 kfree(dsd_ptr);
1235 ha->dif_bundle_kallocs--;
1236 return 1;
1237 }
1238 ha->dif_bundle_dma_allocs++;
1239
1240 if (sp) {
1241 list_add_tail(&dsd_ptr->list,
1242 &difctx->ldif_dsd_list);
1243 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1244 } else {
1245 list_add_tail(&dsd_ptr->list,
1246 &difctx->ldif_dsd_list);
1247 tc->ctx_dsd_alloced = 1;
1248 }
1249
1250 /* add new list to cmd iocb or last list */
1251 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1252 &cur_dsd->address);
1253 cur_dsd->length = cpu_to_le32(dsd_list_len);
1254 cur_dsd = dsd_ptr->dsd_addr;
1255 }
1256 put_unaligned_le64(dif_dsd->dsd_list_dma,
1257 &cur_dsd->address);
1258 cur_dsd->length = cpu_to_le32(sglen);
1259 cur_dsd++;
1260 avail_dsds--;
1261 difctx->dif_bundl_len -= sglen;
1262 track_difbundl_buf--;
1263 }
1264
1265 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1266 "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1267 difctx->no_ldif_dsd, difctx->no_dif_bundl);
1268 } else {
1269 for_each_sg(sgl, sg, tot_dsds, i) {
1270 /* Allocate additional continuation packets? */
1271 if (avail_dsds == 0) {
1272 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1273 QLA_DSDS_PER_IOCB : used_dsds;
1274 dsd_list_len = (avail_dsds + 1) * 12;
1275 used_dsds -= avail_dsds;
1276
1277 /* allocate tracking DS */
1278 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1279 if (!dsd_ptr) {
1280 ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1281 vha, 0xe027,
1282 "%s: failed alloc dsd_dma...\n",
1283 __func__);
1284 return 1;
1285 }
1286
1287 /* allocate new list */
1288 dsd_ptr->dsd_addr =
1289 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1290 &dsd_ptr->dsd_list_dma);
1291 if (!dsd_ptr->dsd_addr) {
1292 /* need to cleanup only this dsd_ptr */
1293 /* rest will be done by sp_free_dma() */
1294 kfree(dsd_ptr);
1295 return 1;
1296 }
1297
1298 if (sp) {
1299 list_add_tail(&dsd_ptr->list,
1300 &difctx->dsd_list);
1301 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1302 } else {
1303 list_add_tail(&dsd_ptr->list,
1304 &difctx->dsd_list);
1305 tc->ctx_dsd_alloced = 1;
1306 }
1307
1308 /* add new list to cmd iocb or last list */
1309 put_unaligned_le64(dsd_ptr->dsd_list_dma,
1310 &cur_dsd->address);
1311 cur_dsd->length = cpu_to_le32(dsd_list_len);
1312 cur_dsd = dsd_ptr->dsd_addr;
1313 }
1314 append_dsd64(&cur_dsd, sg);
1315 avail_dsds--;
1316 }
1317 }
1318 /* Null termination */
1319 cur_dsd->address = 0;
1320 cur_dsd->length = 0;
1321 cur_dsd++;
1322 return 0;
1323 }
1324
1325 /**
1326 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1327 * Type 6 IOCB types.
1328 *
1329 * @sp: SRB command to process
1330 * @cmd_pkt: Command type 3 IOCB
1331 * @tot_dsds: Total number of segments to transfer
1332 * @tot_prot_dsds: Total number of segments with protection information
1333 * @fw_prot_opts: Protection options to be passed to firmware
1334 */
1335 static inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t * sp,struct cmd_type_crc_2 * cmd_pkt,uint16_t tot_dsds,uint16_t tot_prot_dsds,uint16_t fw_prot_opts)1336 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1337 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1338 {
1339 struct dsd64 *cur_dsd;
1340 __be32 *fcp_dl;
1341 scsi_qla_host_t *vha;
1342 struct scsi_cmnd *cmd;
1343 uint32_t total_bytes = 0;
1344 uint32_t data_bytes;
1345 uint32_t dif_bytes;
1346 uint8_t bundling = 1;
1347 uint16_t blk_size;
1348 struct crc_context *crc_ctx_pkt = NULL;
1349 struct qla_hw_data *ha;
1350 uint8_t additional_fcpcdb_len;
1351 uint16_t fcp_cmnd_len;
1352 struct fcp_cmnd *fcp_cmnd;
1353 dma_addr_t crc_ctx_dma;
1354
1355 cmd = GET_CMD_SP(sp);
1356
1357 /* Update entry type to indicate Command Type CRC_2 IOCB */
1358 put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1359
1360 vha = sp->vha;
1361 ha = vha->hw;
1362
1363 /* No data transfer */
1364 data_bytes = scsi_bufflen(cmd);
1365 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1366 cmd_pkt->byte_count = cpu_to_le32(0);
1367 return QLA_SUCCESS;
1368 }
1369
1370 cmd_pkt->vp_index = sp->vha->vp_idx;
1371
1372 /* Set transfer direction */
1373 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1374 cmd_pkt->control_flags =
1375 cpu_to_le16(CF_WRITE_DATA);
1376 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1377 cmd_pkt->control_flags =
1378 cpu_to_le16(CF_READ_DATA);
1379 }
1380
1381 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1382 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1383 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1384 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1385 bundling = 0;
1386
1387 /* Allocate CRC context from global pool */
1388 crc_ctx_pkt = sp->u.scmd.crc_ctx =
1389 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1390
1391 if (!crc_ctx_pkt)
1392 goto crc_queuing_error;
1393
1394 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1395
1396 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1397
1398 /* Set handle */
1399 crc_ctx_pkt->handle = cmd_pkt->handle;
1400
1401 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1402
1403 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1404 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1405
1406 put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1407 cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1408
1409 /* Determine SCSI command length -- align to 4 byte boundary */
1410 if (cmd->cmd_len > 16) {
1411 additional_fcpcdb_len = cmd->cmd_len - 16;
1412 if ((cmd->cmd_len % 4) != 0) {
1413 /* SCSI cmd > 16 bytes must be multiple of 4 */
1414 goto crc_queuing_error;
1415 }
1416 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1417 } else {
1418 additional_fcpcdb_len = 0;
1419 fcp_cmnd_len = 12 + 16 + 4;
1420 }
1421
1422 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1423
1424 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1425 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1426 fcp_cmnd->additional_cdb_len |= 1;
1427 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1428 fcp_cmnd->additional_cdb_len |= 2;
1429
1430 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1431 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1432 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1433 put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1434 &cmd_pkt->fcp_cmnd_dseg_address);
1435 fcp_cmnd->task_management = 0;
1436 fcp_cmnd->task_attribute = TSK_SIMPLE;
1437
1438 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1439
1440 /* Compute dif len and adjust data len to incude protection */
1441 dif_bytes = 0;
1442 blk_size = cmd->device->sector_size;
1443 dif_bytes = (data_bytes / blk_size) * 8;
1444
1445 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1446 case SCSI_PROT_READ_INSERT:
1447 case SCSI_PROT_WRITE_STRIP:
1448 total_bytes = data_bytes;
1449 data_bytes += dif_bytes;
1450 break;
1451
1452 case SCSI_PROT_READ_STRIP:
1453 case SCSI_PROT_WRITE_INSERT:
1454 case SCSI_PROT_READ_PASS:
1455 case SCSI_PROT_WRITE_PASS:
1456 total_bytes = data_bytes + dif_bytes;
1457 break;
1458 default:
1459 BUG();
1460 }
1461
1462 if (!qla2x00_hba_err_chk_enabled(sp))
1463 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1464 /* HBA error checking enabled */
1465 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1466 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1467 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1468 SCSI_PROT_DIF_TYPE2))
1469 fw_prot_opts |= BIT_10;
1470 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1471 SCSI_PROT_DIF_TYPE3)
1472 fw_prot_opts |= BIT_11;
1473 }
1474
1475 if (!bundling) {
1476 cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1477 } else {
1478 /*
1479 * Configure Bundling if we need to fetch interlaving
1480 * protection PCI accesses
1481 */
1482 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1483 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1484 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1485 tot_prot_dsds);
1486 cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1487 }
1488
1489 /* Finish the common fields of CRC pkt */
1490 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1491 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1492 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1493 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1494 /* Fibre channel byte count */
1495 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1496 fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1497 additional_fcpcdb_len);
1498 *fcp_dl = htonl(total_bytes);
1499
1500 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1501 cmd_pkt->byte_count = cpu_to_le32(0);
1502 return QLA_SUCCESS;
1503 }
1504 /* Walks data segments */
1505
1506 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1507
1508 if (!bundling && tot_prot_dsds) {
1509 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1510 cur_dsd, tot_dsds, NULL))
1511 goto crc_queuing_error;
1512 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1513 (tot_dsds - tot_prot_dsds), NULL))
1514 goto crc_queuing_error;
1515
1516 if (bundling && tot_prot_dsds) {
1517 /* Walks dif segments */
1518 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1519 cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1520 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1521 tot_prot_dsds, NULL))
1522 goto crc_queuing_error;
1523 }
1524 return QLA_SUCCESS;
1525
1526 crc_queuing_error:
1527 /* Cleanup will be performed by the caller */
1528
1529 return QLA_FUNCTION_FAILED;
1530 }
1531
1532 /**
1533 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1534 * @sp: command to send to the ISP
1535 *
1536 * Returns non-zero if a failure occurred, else zero.
1537 */
1538 int
qla24xx_start_scsi(srb_t * sp)1539 qla24xx_start_scsi(srb_t *sp)
1540 {
1541 int nseg;
1542 unsigned long flags;
1543 uint32_t *clr_ptr;
1544 uint32_t handle;
1545 struct cmd_type_7 *cmd_pkt;
1546 uint16_t cnt;
1547 uint16_t req_cnt;
1548 uint16_t tot_dsds;
1549 struct req_que *req = NULL;
1550 struct rsp_que *rsp;
1551 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1552 struct scsi_qla_host *vha = sp->vha;
1553 struct qla_hw_data *ha = vha->hw;
1554
1555 if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
1556 return qla28xx_start_scsi_edif(sp);
1557
1558 /* Setup device pointers. */
1559 req = vha->req;
1560 rsp = req->rsp;
1561
1562 /* So we know we haven't pci_map'ed anything yet */
1563 tot_dsds = 0;
1564
1565 /* Send marker if required */
1566 if (vha->marker_needed != 0) {
1567 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1568 QLA_SUCCESS)
1569 return QLA_FUNCTION_FAILED;
1570 vha->marker_needed = 0;
1571 }
1572
1573 /* Acquire ring specific lock */
1574 spin_lock_irqsave(&ha->hardware_lock, flags);
1575
1576 handle = qla2xxx_get_next_handle(req);
1577 if (handle == 0)
1578 goto queuing_error;
1579
1580 /* Map the sg table so we have an accurate count of sg entries needed */
1581 if (scsi_sg_count(cmd)) {
1582 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1583 scsi_sg_count(cmd), cmd->sc_data_direction);
1584 if (unlikely(!nseg))
1585 goto queuing_error;
1586 } else
1587 nseg = 0;
1588
1589 tot_dsds = nseg;
1590 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1591
1592 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
1593 sp->iores.exch_cnt = 1;
1594 sp->iores.iocb_cnt = req_cnt;
1595 if (qla_get_fw_resources(sp->qpair, &sp->iores))
1596 goto queuing_error;
1597
1598 if (req->cnt < (req_cnt + 2)) {
1599 if (IS_SHADOW_REG_CAPABLE(ha)) {
1600 cnt = *req->out_ptr;
1601 } else {
1602 cnt = rd_reg_dword_relaxed(req->req_q_out);
1603 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1604 goto queuing_error;
1605 }
1606
1607 if (req->ring_index < cnt)
1608 req->cnt = cnt - req->ring_index;
1609 else
1610 req->cnt = req->length -
1611 (req->ring_index - cnt);
1612 if (req->cnt < (req_cnt + 2))
1613 goto queuing_error;
1614 }
1615
1616 /* Build command packet. */
1617 req->current_outstanding_cmd = handle;
1618 req->outstanding_cmds[handle] = sp;
1619 sp->handle = handle;
1620 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1621 req->cnt -= req_cnt;
1622
1623 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1624 cmd_pkt->handle = make_handle(req->id, handle);
1625
1626 /* Zero out remaining portion of packet. */
1627 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1628 clr_ptr = (uint32_t *)cmd_pkt + 2;
1629 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1630 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1631
1632 /* Set NPORT-ID and LUN number*/
1633 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1634 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1635 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1636 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1637 cmd_pkt->vp_index = sp->vha->vp_idx;
1638
1639 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1640 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1641
1642 cmd_pkt->task = TSK_SIMPLE;
1643
1644 /* Load SCSI command packet. */
1645 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1646 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1647
1648 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1649
1650 /* Build IOCB segments */
1651 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1652
1653 /* Set total data segment count. */
1654 cmd_pkt->entry_count = (uint8_t)req_cnt;
1655 wmb();
1656 /* Adjust ring index. */
1657 req->ring_index++;
1658 if (req->ring_index == req->length) {
1659 req->ring_index = 0;
1660 req->ring_ptr = req->ring;
1661 } else
1662 req->ring_ptr++;
1663
1664 sp->qpair->cmd_cnt++;
1665 sp->flags |= SRB_DMA_VALID;
1666
1667 /* Set chip new ring index. */
1668 wrt_reg_dword(req->req_q_in, req->ring_index);
1669
1670 /* Manage unprocessed RIO/ZIO commands in response queue. */
1671 if (vha->flags.process_response_queue &&
1672 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1673 qla24xx_process_response_queue(vha, rsp);
1674
1675 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1676 return QLA_SUCCESS;
1677
1678 queuing_error:
1679 if (tot_dsds)
1680 scsi_dma_unmap(cmd);
1681
1682 qla_put_fw_resources(sp->qpair, &sp->iores);
1683 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1684
1685 return QLA_FUNCTION_FAILED;
1686 }
1687
1688 /**
1689 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1690 * @sp: command to send to the ISP
1691 *
1692 * Returns non-zero if a failure occurred, else zero.
1693 */
1694 int
qla24xx_dif_start_scsi(srb_t * sp)1695 qla24xx_dif_start_scsi(srb_t *sp)
1696 {
1697 int nseg;
1698 unsigned long flags;
1699 uint32_t *clr_ptr;
1700 uint32_t handle;
1701 uint16_t cnt;
1702 uint16_t req_cnt = 0;
1703 uint16_t tot_dsds;
1704 uint16_t tot_prot_dsds;
1705 uint16_t fw_prot_opts = 0;
1706 struct req_que *req = NULL;
1707 struct rsp_que *rsp = NULL;
1708 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1709 struct scsi_qla_host *vha = sp->vha;
1710 struct qla_hw_data *ha = vha->hw;
1711 struct cmd_type_crc_2 *cmd_pkt;
1712 uint32_t status = 0;
1713
1714 #define QDSS_GOT_Q_SPACE BIT_0
1715
1716 /* Only process protection or >16 cdb in this routine */
1717 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1718 if (cmd->cmd_len <= 16)
1719 return qla24xx_start_scsi(sp);
1720 else
1721 return qla_start_scsi_type6(sp);
1722 }
1723
1724 /* Setup device pointers. */
1725 req = vha->req;
1726 rsp = req->rsp;
1727
1728 /* So we know we haven't pci_map'ed anything yet */
1729 tot_dsds = 0;
1730
1731 /* Send marker if required */
1732 if (vha->marker_needed != 0) {
1733 if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1734 QLA_SUCCESS)
1735 return QLA_FUNCTION_FAILED;
1736 vha->marker_needed = 0;
1737 }
1738
1739 /* Acquire ring specific lock */
1740 spin_lock_irqsave(&ha->hardware_lock, flags);
1741
1742 handle = qla2xxx_get_next_handle(req);
1743 if (handle == 0)
1744 goto queuing_error;
1745
1746 /* Compute number of required data segments */
1747 /* Map the sg table so we have an accurate count of sg entries needed */
1748 if (scsi_sg_count(cmd)) {
1749 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1750 scsi_sg_count(cmd), cmd->sc_data_direction);
1751 if (unlikely(!nseg))
1752 goto queuing_error;
1753 else
1754 sp->flags |= SRB_DMA_VALID;
1755
1756 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1757 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1758 struct qla2_sgx sgx;
1759 uint32_t partial;
1760
1761 memset(&sgx, 0, sizeof(struct qla2_sgx));
1762 sgx.tot_bytes = scsi_bufflen(cmd);
1763 sgx.cur_sg = scsi_sglist(cmd);
1764 sgx.sp = sp;
1765
1766 nseg = 0;
1767 while (qla24xx_get_one_block_sg(
1768 cmd->device->sector_size, &sgx, &partial))
1769 nseg++;
1770 }
1771 } else
1772 nseg = 0;
1773
1774 /* number of required data segments */
1775 tot_dsds = nseg;
1776
1777 /* Compute number of required protection segments */
1778 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1779 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1780 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1781 if (unlikely(!nseg))
1782 goto queuing_error;
1783 else
1784 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1785
1786 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1787 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1788 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1789 }
1790 } else {
1791 nseg = 0;
1792 }
1793
1794 req_cnt = 1;
1795 /* Total Data and protection sg segment(s) */
1796 tot_prot_dsds = nseg;
1797 tot_dsds += nseg;
1798
1799 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
1800 sp->iores.exch_cnt = 1;
1801 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1802 if (qla_get_fw_resources(sp->qpair, &sp->iores))
1803 goto queuing_error;
1804
1805 if (req->cnt < (req_cnt + 2)) {
1806 if (IS_SHADOW_REG_CAPABLE(ha)) {
1807 cnt = *req->out_ptr;
1808 } else {
1809 cnt = rd_reg_dword_relaxed(req->req_q_out);
1810 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1811 goto queuing_error;
1812 }
1813 if (req->ring_index < cnt)
1814 req->cnt = cnt - req->ring_index;
1815 else
1816 req->cnt = req->length -
1817 (req->ring_index - cnt);
1818 if (req->cnt < (req_cnt + 2))
1819 goto queuing_error;
1820 }
1821
1822 status |= QDSS_GOT_Q_SPACE;
1823
1824 /* Build header part of command packet (excluding the OPCODE). */
1825 req->current_outstanding_cmd = handle;
1826 req->outstanding_cmds[handle] = sp;
1827 sp->handle = handle;
1828 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1829 req->cnt -= req_cnt;
1830
1831 /* Fill-in common area */
1832 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1833 cmd_pkt->handle = make_handle(req->id, handle);
1834
1835 clr_ptr = (uint32_t *)cmd_pkt + 2;
1836 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1837
1838 /* Set NPORT-ID and LUN number*/
1839 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1840 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1841 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1842 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1843
1844 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1845 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1846
1847 /* Total Data and protection segment(s) */
1848 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1849
1850 /* Build IOCB segments and adjust for data protection segments */
1851 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1852 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1853 QLA_SUCCESS)
1854 goto queuing_error;
1855
1856 cmd_pkt->entry_count = (uint8_t)req_cnt;
1857 /* Specify response queue number where completion should happen */
1858 cmd_pkt->entry_status = (uint8_t) rsp->id;
1859 cmd_pkt->timeout = cpu_to_le16(0);
1860 wmb();
1861
1862 /* Adjust ring index. */
1863 req->ring_index++;
1864 if (req->ring_index == req->length) {
1865 req->ring_index = 0;
1866 req->ring_ptr = req->ring;
1867 } else
1868 req->ring_ptr++;
1869
1870 sp->qpair->cmd_cnt++;
1871 /* Set chip new ring index. */
1872 wrt_reg_dword(req->req_q_in, req->ring_index);
1873
1874 /* Manage unprocessed RIO/ZIO commands in response queue. */
1875 if (vha->flags.process_response_queue &&
1876 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1877 qla24xx_process_response_queue(vha, rsp);
1878
1879 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1880
1881 return QLA_SUCCESS;
1882
1883 queuing_error:
1884 if (status & QDSS_GOT_Q_SPACE) {
1885 req->outstanding_cmds[handle] = NULL;
1886 req->cnt += req_cnt;
1887 }
1888 /* Cleanup will be performed by the caller (queuecommand) */
1889
1890 qla_put_fw_resources(sp->qpair, &sp->iores);
1891 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1892
1893 return QLA_FUNCTION_FAILED;
1894 }
1895
1896 /**
1897 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1898 * @sp: command to send to the ISP
1899 *
1900 * Returns non-zero if a failure occurred, else zero.
1901 */
1902 static int
qla2xxx_start_scsi_mq(srb_t * sp)1903 qla2xxx_start_scsi_mq(srb_t *sp)
1904 {
1905 int nseg;
1906 unsigned long flags;
1907 uint32_t *clr_ptr;
1908 uint32_t handle;
1909 struct cmd_type_7 *cmd_pkt;
1910 uint16_t cnt;
1911 uint16_t req_cnt;
1912 uint16_t tot_dsds;
1913 struct req_que *req = NULL;
1914 struct rsp_que *rsp;
1915 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1916 struct scsi_qla_host *vha = sp->fcport->vha;
1917 struct qla_hw_data *ha = vha->hw;
1918 struct qla_qpair *qpair = sp->qpair;
1919
1920 if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
1921 return qla28xx_start_scsi_edif(sp);
1922
1923 /* Acquire qpair specific lock */
1924 spin_lock_irqsave(&qpair->qp_lock, flags);
1925
1926 /* Setup qpair pointers */
1927 req = qpair->req;
1928 rsp = qpair->rsp;
1929
1930 /* So we know we haven't pci_map'ed anything yet */
1931 tot_dsds = 0;
1932
1933 /* Send marker if required */
1934 if (vha->marker_needed != 0) {
1935 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1936 QLA_SUCCESS) {
1937 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1938 return QLA_FUNCTION_FAILED;
1939 }
1940 vha->marker_needed = 0;
1941 }
1942
1943 handle = qla2xxx_get_next_handle(req);
1944 if (handle == 0)
1945 goto queuing_error;
1946
1947 /* Map the sg table so we have an accurate count of sg entries needed */
1948 if (scsi_sg_count(cmd)) {
1949 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1950 scsi_sg_count(cmd), cmd->sc_data_direction);
1951 if (unlikely(!nseg))
1952 goto queuing_error;
1953 } else
1954 nseg = 0;
1955
1956 tot_dsds = nseg;
1957 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1958
1959 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
1960 sp->iores.exch_cnt = 1;
1961 sp->iores.iocb_cnt = req_cnt;
1962 if (qla_get_fw_resources(sp->qpair, &sp->iores))
1963 goto queuing_error;
1964
1965 if (req->cnt < (req_cnt + 2)) {
1966 if (IS_SHADOW_REG_CAPABLE(ha)) {
1967 cnt = *req->out_ptr;
1968 } else {
1969 cnt = rd_reg_dword_relaxed(req->req_q_out);
1970 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1971 goto queuing_error;
1972 }
1973
1974 if (req->ring_index < cnt)
1975 req->cnt = cnt - req->ring_index;
1976 else
1977 req->cnt = req->length -
1978 (req->ring_index - cnt);
1979 if (req->cnt < (req_cnt + 2))
1980 goto queuing_error;
1981 }
1982
1983 /* Build command packet. */
1984 req->current_outstanding_cmd = handle;
1985 req->outstanding_cmds[handle] = sp;
1986 sp->handle = handle;
1987 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1988 req->cnt -= req_cnt;
1989
1990 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1991 cmd_pkt->handle = make_handle(req->id, handle);
1992
1993 /* Zero out remaining portion of packet. */
1994 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1995 clr_ptr = (uint32_t *)cmd_pkt + 2;
1996 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1997 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1998
1999 /* Set NPORT-ID and LUN number*/
2000 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2001 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2002 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2003 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2004 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2005
2006 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2007 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2008
2009 cmd_pkt->task = TSK_SIMPLE;
2010
2011 /* Load SCSI command packet. */
2012 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2013 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2014
2015 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2016
2017 /* Build IOCB segments */
2018 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2019
2020 /* Set total data segment count. */
2021 cmd_pkt->entry_count = (uint8_t)req_cnt;
2022 wmb();
2023 /* Adjust ring index. */
2024 req->ring_index++;
2025 if (req->ring_index == req->length) {
2026 req->ring_index = 0;
2027 req->ring_ptr = req->ring;
2028 } else
2029 req->ring_ptr++;
2030
2031 sp->qpair->cmd_cnt++;
2032 sp->flags |= SRB_DMA_VALID;
2033
2034 /* Set chip new ring index. */
2035 wrt_reg_dword(req->req_q_in, req->ring_index);
2036
2037 /* Manage unprocessed RIO/ZIO commands in response queue. */
2038 if (vha->flags.process_response_queue &&
2039 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2040 qla24xx_process_response_queue(vha, rsp);
2041
2042 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2043 return QLA_SUCCESS;
2044
2045 queuing_error:
2046 if (tot_dsds)
2047 scsi_dma_unmap(cmd);
2048
2049 qla_put_fw_resources(sp->qpair, &sp->iores);
2050 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2051
2052 return QLA_FUNCTION_FAILED;
2053 }
2054
2055
2056 /**
2057 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2058 * @sp: command to send to the ISP
2059 *
2060 * Returns non-zero if a failure occurred, else zero.
2061 */
2062 int
qla2xxx_dif_start_scsi_mq(srb_t * sp)2063 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2064 {
2065 int nseg;
2066 unsigned long flags;
2067 uint32_t *clr_ptr;
2068 uint32_t handle;
2069 uint16_t cnt;
2070 uint16_t req_cnt = 0;
2071 uint16_t tot_dsds;
2072 uint16_t tot_prot_dsds;
2073 uint16_t fw_prot_opts = 0;
2074 struct req_que *req = NULL;
2075 struct rsp_que *rsp = NULL;
2076 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2077 struct scsi_qla_host *vha = sp->fcport->vha;
2078 struct qla_hw_data *ha = vha->hw;
2079 struct cmd_type_crc_2 *cmd_pkt;
2080 uint32_t status = 0;
2081 struct qla_qpair *qpair = sp->qpair;
2082
2083 #define QDSS_GOT_Q_SPACE BIT_0
2084
2085 /* Check for host side state */
2086 if (!qpair->online) {
2087 cmd->result = DID_NO_CONNECT << 16;
2088 return QLA_INTERFACE_ERROR;
2089 }
2090
2091 if (!qpair->difdix_supported &&
2092 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2093 cmd->result = DID_NO_CONNECT << 16;
2094 return QLA_INTERFACE_ERROR;
2095 }
2096
2097 /* Only process protection or >16 cdb in this routine */
2098 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2099 if (cmd->cmd_len <= 16)
2100 return qla2xxx_start_scsi_mq(sp);
2101 else
2102 return qla_start_scsi_type6(sp);
2103 }
2104
2105 spin_lock_irqsave(&qpair->qp_lock, flags);
2106
2107 /* Setup qpair pointers */
2108 rsp = qpair->rsp;
2109 req = qpair->req;
2110
2111 /* So we know we haven't pci_map'ed anything yet */
2112 tot_dsds = 0;
2113
2114 /* Send marker if required */
2115 if (vha->marker_needed != 0) {
2116 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2117 QLA_SUCCESS) {
2118 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2119 return QLA_FUNCTION_FAILED;
2120 }
2121 vha->marker_needed = 0;
2122 }
2123
2124 handle = qla2xxx_get_next_handle(req);
2125 if (handle == 0)
2126 goto queuing_error;
2127
2128 /* Compute number of required data segments */
2129 /* Map the sg table so we have an accurate count of sg entries needed */
2130 if (scsi_sg_count(cmd)) {
2131 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2132 scsi_sg_count(cmd), cmd->sc_data_direction);
2133 if (unlikely(!nseg))
2134 goto queuing_error;
2135 else
2136 sp->flags |= SRB_DMA_VALID;
2137
2138 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2139 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2140 struct qla2_sgx sgx;
2141 uint32_t partial;
2142
2143 memset(&sgx, 0, sizeof(struct qla2_sgx));
2144 sgx.tot_bytes = scsi_bufflen(cmd);
2145 sgx.cur_sg = scsi_sglist(cmd);
2146 sgx.sp = sp;
2147
2148 nseg = 0;
2149 while (qla24xx_get_one_block_sg(
2150 cmd->device->sector_size, &sgx, &partial))
2151 nseg++;
2152 }
2153 } else
2154 nseg = 0;
2155
2156 /* number of required data segments */
2157 tot_dsds = nseg;
2158
2159 /* Compute number of required protection segments */
2160 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2161 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2162 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2163 if (unlikely(!nseg))
2164 goto queuing_error;
2165 else
2166 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2167
2168 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2169 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2170 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2171 }
2172 } else {
2173 nseg = 0;
2174 }
2175
2176 req_cnt = 1;
2177 /* Total Data and protection sg segment(s) */
2178 tot_prot_dsds = nseg;
2179 tot_dsds += nseg;
2180
2181 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
2182 sp->iores.exch_cnt = 1;
2183 sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2184 if (qla_get_fw_resources(sp->qpair, &sp->iores))
2185 goto queuing_error;
2186
2187 if (req->cnt < (req_cnt + 2)) {
2188 if (IS_SHADOW_REG_CAPABLE(ha)) {
2189 cnt = *req->out_ptr;
2190 } else {
2191 cnt = rd_reg_dword_relaxed(req->req_q_out);
2192 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2193 goto queuing_error;
2194 }
2195
2196 if (req->ring_index < cnt)
2197 req->cnt = cnt - req->ring_index;
2198 else
2199 req->cnt = req->length -
2200 (req->ring_index - cnt);
2201 if (req->cnt < (req_cnt + 2))
2202 goto queuing_error;
2203 }
2204
2205 status |= QDSS_GOT_Q_SPACE;
2206
2207 /* Build header part of command packet (excluding the OPCODE). */
2208 req->current_outstanding_cmd = handle;
2209 req->outstanding_cmds[handle] = sp;
2210 sp->handle = handle;
2211 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2212 req->cnt -= req_cnt;
2213
2214 /* Fill-in common area */
2215 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2216 cmd_pkt->handle = make_handle(req->id, handle);
2217
2218 clr_ptr = (uint32_t *)cmd_pkt + 2;
2219 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2220
2221 /* Set NPORT-ID and LUN number*/
2222 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2223 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2224 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2225 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2226
2227 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2228 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2229
2230 /* Total Data and protection segment(s) */
2231 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2232
2233 /* Build IOCB segments and adjust for data protection segments */
2234 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2235 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2236 QLA_SUCCESS)
2237 goto queuing_error;
2238
2239 cmd_pkt->entry_count = (uint8_t)req_cnt;
2240 cmd_pkt->timeout = cpu_to_le16(0);
2241 wmb();
2242
2243 /* Adjust ring index. */
2244 req->ring_index++;
2245 if (req->ring_index == req->length) {
2246 req->ring_index = 0;
2247 req->ring_ptr = req->ring;
2248 } else
2249 req->ring_ptr++;
2250
2251 sp->qpair->cmd_cnt++;
2252 /* Set chip new ring index. */
2253 wrt_reg_dword(req->req_q_in, req->ring_index);
2254
2255 /* Manage unprocessed RIO/ZIO commands in response queue. */
2256 if (vha->flags.process_response_queue &&
2257 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2258 qla24xx_process_response_queue(vha, rsp);
2259
2260 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2261
2262 return QLA_SUCCESS;
2263
2264 queuing_error:
2265 if (status & QDSS_GOT_Q_SPACE) {
2266 req->outstanding_cmds[handle] = NULL;
2267 req->cnt += req_cnt;
2268 }
2269 /* Cleanup will be performed by the caller (queuecommand) */
2270
2271 qla_put_fw_resources(sp->qpair, &sp->iores);
2272 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2273
2274 return QLA_FUNCTION_FAILED;
2275 }
2276
2277 /* Generic Control-SRB manipulation functions. */
2278
2279 /* hardware_lock assumed to be held. */
2280
2281 void *
__qla2x00_alloc_iocbs(struct qla_qpair * qpair,srb_t * sp)2282 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2283 {
2284 scsi_qla_host_t *vha = qpair->vha;
2285 struct qla_hw_data *ha = vha->hw;
2286 struct req_que *req = qpair->req;
2287 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2288 uint32_t handle;
2289 request_t *pkt;
2290 uint16_t cnt, req_cnt;
2291
2292 pkt = NULL;
2293 req_cnt = 1;
2294 handle = 0;
2295
2296 if (sp && (sp->type != SRB_SCSI_CMD)) {
2297 /* Adjust entry-counts as needed. */
2298 req_cnt = sp->iocbs;
2299 }
2300
2301 /* Check for room on request queue. */
2302 if (req->cnt < req_cnt + 2) {
2303 if (qpair->use_shadow_reg)
2304 cnt = *req->out_ptr;
2305 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2306 IS_QLA28XX(ha))
2307 cnt = rd_reg_dword(®->isp25mq.req_q_out);
2308 else if (IS_P3P_TYPE(ha))
2309 cnt = rd_reg_dword(reg->isp82.req_q_out);
2310 else if (IS_FWI2_CAPABLE(ha))
2311 cnt = rd_reg_dword(®->isp24.req_q_out);
2312 else if (IS_QLAFX00(ha))
2313 cnt = rd_reg_dword(®->ispfx00.req_q_out);
2314 else
2315 cnt = qla2x00_debounce_register(
2316 ISP_REQ_Q_OUT(ha, ®->isp));
2317
2318 if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
2319 qla_schedule_eeh_work(vha);
2320 return NULL;
2321 }
2322
2323 if (req->ring_index < cnt)
2324 req->cnt = cnt - req->ring_index;
2325 else
2326 req->cnt = req->length -
2327 (req->ring_index - cnt);
2328 }
2329 if (req->cnt < req_cnt + 2)
2330 goto queuing_error;
2331
2332 if (sp) {
2333 handle = qla2xxx_get_next_handle(req);
2334 if (handle == 0) {
2335 ql_log(ql_log_warn, vha, 0x700b,
2336 "No room on outstanding cmd array.\n");
2337 goto queuing_error;
2338 }
2339
2340 /* Prep command array. */
2341 req->current_outstanding_cmd = handle;
2342 req->outstanding_cmds[handle] = sp;
2343 sp->handle = handle;
2344 }
2345
2346 /* Prep packet */
2347 req->cnt -= req_cnt;
2348 pkt = req->ring_ptr;
2349 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2350 if (IS_QLAFX00(ha)) {
2351 wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2352 wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2353 } else {
2354 pkt->entry_count = req_cnt;
2355 pkt->handle = handle;
2356 }
2357
2358 return pkt;
2359
2360 queuing_error:
2361 qpair->tgt_counters.num_alloc_iocb_failed++;
2362 return pkt;
2363 }
2364
2365 void *
qla2x00_alloc_iocbs_ready(struct qla_qpair * qpair,srb_t * sp)2366 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2367 {
2368 scsi_qla_host_t *vha = qpair->vha;
2369
2370 if (qla2x00_reset_active(vha))
2371 return NULL;
2372
2373 return __qla2x00_alloc_iocbs(qpair, sp);
2374 }
2375
2376 void *
qla2x00_alloc_iocbs(struct scsi_qla_host * vha,srb_t * sp)2377 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2378 {
2379 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2380 }
2381
2382 static void
qla24xx_prli_iocb(srb_t * sp,struct logio_entry_24xx * logio)2383 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2384 {
2385 struct srb_iocb *lio = &sp->u.iocb_cmd;
2386
2387 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2388 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2389 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2390 logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2391 if (sp->vha->flags.nvme_first_burst)
2392 logio->io_parameter[0] =
2393 cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2394 if (sp->vha->flags.nvme2_enabled) {
2395 /* Set service parameter BIT_7 for NVME CONF support */
2396 logio->io_parameter[0] |=
2397 cpu_to_le32(NVME_PRLI_SP_CONF);
2398 /* Set service parameter BIT_8 for SLER support */
2399 logio->io_parameter[0] |=
2400 cpu_to_le32(NVME_PRLI_SP_SLER);
2401 /* Set service parameter BIT_9 for PI control support */
2402 logio->io_parameter[0] |=
2403 cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2404 }
2405 }
2406
2407 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2408 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2409 logio->port_id[1] = sp->fcport->d_id.b.area;
2410 logio->port_id[2] = sp->fcport->d_id.b.domain;
2411 logio->vp_index = sp->vha->vp_idx;
2412 }
2413
2414 static void
qla24xx_login_iocb(srb_t * sp,struct logio_entry_24xx * logio)2415 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2416 {
2417 struct srb_iocb *lio = &sp->u.iocb_cmd;
2418
2419 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2420 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2421
2422 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2423 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2424 } else {
2425 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2426 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2427 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2428 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2429 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2430 if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
2431 logio->control_flags |=
2432 cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
2433 logio->io_parameter[0] =
2434 cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
2435 }
2436 }
2437 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2438 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2439 logio->port_id[1] = sp->fcport->d_id.b.area;
2440 logio->port_id[2] = sp->fcport->d_id.b.domain;
2441 logio->vp_index = sp->vha->vp_idx;
2442 }
2443
2444 static void
qla2x00_login_iocb(srb_t * sp,struct mbx_entry * mbx)2445 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2446 {
2447 struct qla_hw_data *ha = sp->vha->hw;
2448 struct srb_iocb *lio = &sp->u.iocb_cmd;
2449 uint16_t opts;
2450
2451 mbx->entry_type = MBX_IOCB_TYPE;
2452 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2453 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2454 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2455 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2456 if (HAS_EXTENDED_IDS(ha)) {
2457 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2458 mbx->mb10 = cpu_to_le16(opts);
2459 } else {
2460 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2461 }
2462 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2463 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2464 sp->fcport->d_id.b.al_pa);
2465 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2466 }
2467
2468 static void
qla24xx_logout_iocb(srb_t * sp,struct logio_entry_24xx * logio)2469 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2470 {
2471 u16 control_flags = LCF_COMMAND_LOGO;
2472 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2473
2474 if (sp->fcport->explicit_logout) {
2475 control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2476 } else {
2477 control_flags |= LCF_IMPL_LOGO;
2478
2479 if (!sp->fcport->keep_nport_handle)
2480 control_flags |= LCF_FREE_NPORT;
2481 }
2482
2483 logio->control_flags = cpu_to_le16(control_flags);
2484 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2485 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2486 logio->port_id[1] = sp->fcport->d_id.b.area;
2487 logio->port_id[2] = sp->fcport->d_id.b.domain;
2488 logio->vp_index = sp->vha->vp_idx;
2489 }
2490
2491 static void
qla2x00_logout_iocb(srb_t * sp,struct mbx_entry * mbx)2492 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2493 {
2494 struct qla_hw_data *ha = sp->vha->hw;
2495
2496 mbx->entry_type = MBX_IOCB_TYPE;
2497 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2498 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2499 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2500 cpu_to_le16(sp->fcport->loop_id) :
2501 cpu_to_le16(sp->fcport->loop_id << 8);
2502 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2503 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2504 sp->fcport->d_id.b.al_pa);
2505 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2506 /* Implicit: mbx->mbx10 = 0. */
2507 }
2508
2509 static void
qla24xx_adisc_iocb(srb_t * sp,struct logio_entry_24xx * logio)2510 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2511 {
2512 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2513 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2514 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2515 logio->vp_index = sp->vha->vp_idx;
2516 }
2517
2518 static void
qla2x00_adisc_iocb(srb_t * sp,struct mbx_entry * mbx)2519 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2520 {
2521 struct qla_hw_data *ha = sp->vha->hw;
2522
2523 mbx->entry_type = MBX_IOCB_TYPE;
2524 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2525 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2526 if (HAS_EXTENDED_IDS(ha)) {
2527 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2528 mbx->mb10 = cpu_to_le16(BIT_0);
2529 } else {
2530 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2531 }
2532 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2533 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2534 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2535 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2536 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2537 }
2538
2539 static void
qla24xx_tm_iocb(srb_t * sp,struct tsk_mgmt_entry * tsk)2540 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2541 {
2542 uint32_t flags;
2543 uint64_t lun;
2544 struct fc_port *fcport = sp->fcport;
2545 scsi_qla_host_t *vha = fcport->vha;
2546 struct qla_hw_data *ha = vha->hw;
2547 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2548 struct req_que *req = sp->qpair->req;
2549
2550 flags = iocb->u.tmf.flags;
2551 lun = iocb->u.tmf.lun;
2552
2553 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2554 tsk->entry_count = 1;
2555 tsk->handle = make_handle(req->id, tsk->handle);
2556 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2557 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2558 tsk->control_flags = cpu_to_le32(flags);
2559 tsk->port_id[0] = fcport->d_id.b.al_pa;
2560 tsk->port_id[1] = fcport->d_id.b.area;
2561 tsk->port_id[2] = fcport->d_id.b.domain;
2562 tsk->vp_index = fcport->vha->vp_idx;
2563
2564 if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET|
2565 TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
2566 int_to_scsilun(lun, &tsk->lun);
2567 host_to_fcp_swap((uint8_t *)&tsk->lun,
2568 sizeof(tsk->lun));
2569 }
2570 }
2571
2572 static void
qla2x00_async_done(struct srb * sp,int res)2573 qla2x00_async_done(struct srb *sp, int res)
2574 {
2575 if (del_timer(&sp->u.iocb_cmd.timer)) {
2576 /*
2577 * Successfully cancelled the timeout handler
2578 * ref: TMR
2579 */
2580 if (kref_put(&sp->cmd_kref, qla2x00_sp_release))
2581 return;
2582 }
2583 sp->async_done(sp, res);
2584 }
2585
2586 void
qla2x00_sp_release(struct kref * kref)2587 qla2x00_sp_release(struct kref *kref)
2588 {
2589 struct srb *sp = container_of(kref, struct srb, cmd_kref);
2590 struct scsi_qla_host *vha = sp->vha;
2591
2592 switch (sp->type) {
2593 case SRB_CT_PTHRU_CMD:
2594 /* GPSC & GFPNID use fcport->ct_desc.ct_sns for both req & rsp */
2595 if (sp->u.iocb_cmd.u.ctarg.req &&
2596 (!sp->fcport ||
2597 sp->u.iocb_cmd.u.ctarg.req != sp->fcport->ct_desc.ct_sns)) {
2598 dma_free_coherent(&vha->hw->pdev->dev,
2599 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
2600 sp->u.iocb_cmd.u.ctarg.req,
2601 sp->u.iocb_cmd.u.ctarg.req_dma);
2602 sp->u.iocb_cmd.u.ctarg.req = NULL;
2603 }
2604 if (sp->u.iocb_cmd.u.ctarg.rsp &&
2605 (!sp->fcport ||
2606 sp->u.iocb_cmd.u.ctarg.rsp != sp->fcport->ct_desc.ct_sns)) {
2607 dma_free_coherent(&vha->hw->pdev->dev,
2608 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
2609 sp->u.iocb_cmd.u.ctarg.rsp,
2610 sp->u.iocb_cmd.u.ctarg.rsp_dma);
2611 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
2612 }
2613 break;
2614 default:
2615 break;
2616 }
2617
2618 sp->free(sp);
2619 }
2620
2621 void
qla2x00_init_async_sp(srb_t * sp,unsigned long tmo,void (* done)(struct srb * sp,int res))2622 qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
2623 void (*done)(struct srb *sp, int res))
2624 {
2625 timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2626 sp->done = qla2x00_async_done;
2627 sp->async_done = done;
2628 sp->free = qla2x00_sp_free;
2629 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
2630 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2631 if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2632 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2633 sp->start_timer = 1;
2634 }
2635
qla2x00_els_dcmd_sp_free(srb_t * sp)2636 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2637 {
2638 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2639
2640 if (sp->fcport)
2641 qla2x00_free_fcport(sp->fcport);
2642
2643 if (elsio->u.els_logo.els_logo_pyld)
2644 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2645 elsio->u.els_logo.els_logo_pyld,
2646 elsio->u.els_logo.els_logo_pyld_dma);
2647
2648 del_timer(&elsio->timer);
2649 qla2x00_rel_sp(sp);
2650 }
2651
2652 static void
qla2x00_els_dcmd_iocb_timeout(void * data)2653 qla2x00_els_dcmd_iocb_timeout(void *data)
2654 {
2655 srb_t *sp = data;
2656 fc_port_t *fcport = sp->fcport;
2657 struct scsi_qla_host *vha = sp->vha;
2658 struct srb_iocb *lio = &sp->u.iocb_cmd;
2659 unsigned long flags = 0;
2660 int res, h;
2661
2662 ql_dbg(ql_dbg_io, vha, 0x3069,
2663 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2664 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2665 fcport->d_id.b.al_pa);
2666
2667 /* Abort the exchange */
2668 res = qla24xx_async_abort_cmd(sp, false);
2669 if (res) {
2670 ql_dbg(ql_dbg_io, vha, 0x3070,
2671 "mbx abort_command failed.\n");
2672 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2673 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2674 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2675 sp->qpair->req->outstanding_cmds[h] = NULL;
2676 break;
2677 }
2678 }
2679 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2680 complete(&lio->u.els_logo.comp);
2681 } else {
2682 ql_dbg(ql_dbg_io, vha, 0x3071,
2683 "mbx abort_command success.\n");
2684 }
2685 }
2686
qla2x00_els_dcmd_sp_done(srb_t * sp,int res)2687 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2688 {
2689 fc_port_t *fcport = sp->fcport;
2690 struct srb_iocb *lio = &sp->u.iocb_cmd;
2691 struct scsi_qla_host *vha = sp->vha;
2692
2693 ql_dbg(ql_dbg_io, vha, 0x3072,
2694 "%s hdl=%x, portid=%02x%02x%02x done\n",
2695 sp->name, sp->handle, fcport->d_id.b.domain,
2696 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2697
2698 complete(&lio->u.els_logo.comp);
2699 }
2700
2701 int
qla24xx_els_dcmd_iocb(scsi_qla_host_t * vha,int els_opcode,port_id_t remote_did)2702 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2703 port_id_t remote_did)
2704 {
2705 srb_t *sp;
2706 fc_port_t *fcport = NULL;
2707 struct srb_iocb *elsio = NULL;
2708 struct qla_hw_data *ha = vha->hw;
2709 struct els_logo_payload logo_pyld;
2710 int rval = QLA_SUCCESS;
2711
2712 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2713 if (!fcport) {
2714 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2715 return -ENOMEM;
2716 }
2717
2718 /* Alloc SRB structure
2719 * ref: INIT
2720 */
2721 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2722 if (!sp) {
2723 qla2x00_free_fcport(fcport);
2724 ql_log(ql_log_info, vha, 0x70e6,
2725 "SRB allocation failed\n");
2726 return -ENOMEM;
2727 }
2728
2729 elsio = &sp->u.iocb_cmd;
2730 fcport->loop_id = 0xFFFF;
2731 fcport->d_id.b.domain = remote_did.b.domain;
2732 fcport->d_id.b.area = remote_did.b.area;
2733 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2734
2735 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2736 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2737
2738 sp->type = SRB_ELS_DCMD;
2739 sp->name = "ELS_DCMD";
2740 sp->fcport = fcport;
2741 qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT,
2742 qla2x00_els_dcmd_sp_done);
2743 sp->free = qla2x00_els_dcmd_sp_free;
2744 sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout;
2745 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2746
2747 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2748 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2749 GFP_KERNEL);
2750
2751 if (!elsio->u.els_logo.els_logo_pyld) {
2752 /* ref: INIT */
2753 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2754 qla2x00_free_fcport(fcport);
2755 return QLA_FUNCTION_FAILED;
2756 }
2757
2758 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2759
2760 elsio->u.els_logo.els_cmd = els_opcode;
2761 logo_pyld.opcode = els_opcode;
2762 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2763 logo_pyld.s_id[1] = vha->d_id.b.area;
2764 logo_pyld.s_id[2] = vha->d_id.b.domain;
2765 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2766 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2767
2768 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2769 sizeof(struct els_logo_payload));
2770 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2771 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2772 elsio->u.els_logo.els_logo_pyld,
2773 sizeof(*elsio->u.els_logo.els_logo_pyld));
2774
2775 rval = qla2x00_start_sp(sp);
2776 if (rval != QLA_SUCCESS) {
2777 /* ref: INIT */
2778 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2779 qla2x00_free_fcport(fcport);
2780 return QLA_FUNCTION_FAILED;
2781 }
2782
2783 ql_dbg(ql_dbg_io, vha, 0x3074,
2784 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2785 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2786 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2787
2788 wait_for_completion(&elsio->u.els_logo.comp);
2789
2790 /* ref: INIT */
2791 kref_put(&sp->cmd_kref, qla2x00_sp_release);
2792 return rval;
2793 }
2794
2795 static void
qla24xx_els_logo_iocb(srb_t * sp,struct els_entry_24xx * els_iocb)2796 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2797 {
2798 scsi_qla_host_t *vha = sp->vha;
2799 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2800
2801 els_iocb->entry_type = ELS_IOCB_TYPE;
2802 els_iocb->entry_count = 1;
2803 els_iocb->sys_define = 0;
2804 els_iocb->entry_status = 0;
2805 els_iocb->handle = sp->handle;
2806 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2807 els_iocb->tx_dsd_count = cpu_to_le16(1);
2808 els_iocb->vp_index = vha->vp_idx;
2809 els_iocb->sof_type = EST_SOFI3;
2810 els_iocb->rx_dsd_count = 0;
2811 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2812
2813 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2814 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2815 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2816 /* For SID the byte order is different than DID */
2817 els_iocb->s_id[1] = vha->d_id.b.al_pa;
2818 els_iocb->s_id[2] = vha->d_id.b.area;
2819 els_iocb->s_id[0] = vha->d_id.b.domain;
2820
2821 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2822 if (vha->hw->flags.edif_enabled)
2823 els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN);
2824 else
2825 els_iocb->control_flags = 0;
2826 els_iocb->tx_byte_count = els_iocb->tx_len =
2827 cpu_to_le32(sizeof(struct els_plogi_payload));
2828 put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2829 &els_iocb->tx_address);
2830 els_iocb->rx_dsd_count = cpu_to_le16(1);
2831 els_iocb->rx_byte_count = els_iocb->rx_len =
2832 cpu_to_le32(sizeof(struct els_plogi_payload));
2833 put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2834 &els_iocb->rx_address);
2835
2836 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2837 "PLOGI ELS IOCB:\n");
2838 ql_dump_buffer(ql_log_info, vha, 0x0109,
2839 (uint8_t *)els_iocb,
2840 sizeof(*els_iocb));
2841 } else {
2842 els_iocb->tx_byte_count =
2843 cpu_to_le32(sizeof(struct els_logo_payload));
2844 put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2845 &els_iocb->tx_address);
2846 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2847
2848 els_iocb->rx_byte_count = 0;
2849 els_iocb->rx_address = 0;
2850 els_iocb->rx_len = 0;
2851 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2852 "LOGO ELS IOCB:");
2853 ql_dump_buffer(ql_log_info, vha, 0x010b,
2854 els_iocb,
2855 sizeof(*els_iocb));
2856 }
2857
2858 sp->vha->qla_stats.control_requests++;
2859 }
2860
2861 void
qla2x00_els_dcmd2_iocb_timeout(void * data)2862 qla2x00_els_dcmd2_iocb_timeout(void *data)
2863 {
2864 srb_t *sp = data;
2865 fc_port_t *fcport = sp->fcport;
2866 struct scsi_qla_host *vha = sp->vha;
2867 unsigned long flags = 0;
2868 int res, h;
2869
2870 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2871 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2872 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2873
2874 /* Abort the exchange */
2875 res = qla24xx_async_abort_cmd(sp, false);
2876 ql_dbg(ql_dbg_io, vha, 0x3070,
2877 "mbx abort_command %s\n",
2878 (res == QLA_SUCCESS) ? "successful" : "failed");
2879 if (res) {
2880 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2881 for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2882 if (sp->qpair->req->outstanding_cmds[h] == sp) {
2883 sp->qpair->req->outstanding_cmds[h] = NULL;
2884 break;
2885 }
2886 }
2887 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2888 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2889 }
2890 }
2891
qla2x00_els_dcmd2_free(scsi_qla_host_t * vha,struct els_plogi * els_plogi)2892 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2893 {
2894 if (els_plogi->els_plogi_pyld)
2895 dma_free_coherent(&vha->hw->pdev->dev,
2896 els_plogi->tx_size,
2897 els_plogi->els_plogi_pyld,
2898 els_plogi->els_plogi_pyld_dma);
2899
2900 if (els_plogi->els_resp_pyld)
2901 dma_free_coherent(&vha->hw->pdev->dev,
2902 els_plogi->rx_size,
2903 els_plogi->els_resp_pyld,
2904 els_plogi->els_resp_pyld_dma);
2905 }
2906
qla2x00_els_dcmd2_sp_done(srb_t * sp,int res)2907 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2908 {
2909 fc_port_t *fcport = sp->fcport;
2910 struct srb_iocb *lio = &sp->u.iocb_cmd;
2911 struct scsi_qla_host *vha = sp->vha;
2912 struct event_arg ea;
2913 struct qla_work_evt *e;
2914 struct fc_port *conflict_fcport;
2915 port_id_t cid; /* conflict Nport id */
2916 const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2917 u16 lid;
2918
2919 ql_dbg(ql_dbg_disc, vha, 0x3072,
2920 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2921 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2922
2923 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2924 /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
2925 fcport->logout_on_delete = 1;
2926 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2927
2928 if (sp->flags & SRB_WAKEUP_ON_COMP)
2929 complete(&lio->u.els_plogi.comp);
2930 else {
2931 switch (le32_to_cpu(fw_status[0])) {
2932 case CS_DATA_UNDERRUN:
2933 case CS_COMPLETE:
2934 memset(&ea, 0, sizeof(ea));
2935 ea.fcport = fcport;
2936 ea.rc = res;
2937 qla_handle_els_plogi_done(vha, &ea);
2938 break;
2939
2940 case CS_IOCB_ERROR:
2941 switch (le32_to_cpu(fw_status[1])) {
2942 case LSC_SCODE_PORTID_USED:
2943 lid = le32_to_cpu(fw_status[2]) & 0xffff;
2944 qlt_find_sess_invalidate_other(vha,
2945 wwn_to_u64(fcport->port_name),
2946 fcport->d_id, lid, &conflict_fcport);
2947 if (conflict_fcport) {
2948 /*
2949 * Another fcport shares the same
2950 * loop_id & nport id; conflict
2951 * fcport needs to finish cleanup
2952 * before this fcport can proceed
2953 * to login.
2954 */
2955 conflict_fcport->conflict = fcport;
2956 fcport->login_pause = 1;
2957 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2958 "%s %d %8phC pid %06x inuse with lid %#x.\n",
2959 __func__, __LINE__,
2960 fcport->port_name,
2961 fcport->d_id.b24, lid);
2962 } else {
2963 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2964 "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2965 __func__, __LINE__,
2966 fcport->port_name,
2967 fcport->d_id.b24, lid);
2968 qla2x00_clear_loop_id(fcport);
2969 set_bit(lid, vha->hw->loop_id_map);
2970 fcport->loop_id = lid;
2971 fcport->keep_nport_handle = 0;
2972 qlt_schedule_sess_for_deletion(fcport);
2973 }
2974 break;
2975
2976 case LSC_SCODE_NPORT_USED:
2977 cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2978 & 0xff;
2979 cid.b.area = (le32_to_cpu(fw_status[2]) >> 8)
2980 & 0xff;
2981 cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff;
2982 cid.b.rsvd_1 = 0;
2983
2984 ql_dbg(ql_dbg_disc, vha, 0x20ec,
2985 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2986 __func__, __LINE__, fcport->port_name,
2987 fcport->loop_id, cid.b24);
2988 set_bit(fcport->loop_id,
2989 vha->hw->loop_id_map);
2990 fcport->loop_id = FC_NO_LOOP_ID;
2991 qla24xx_post_gnl_work(vha, fcport);
2992 break;
2993
2994 case LSC_SCODE_NOXCB:
2995 vha->hw->exch_starvation++;
2996 if (vha->hw->exch_starvation > 5) {
2997 ql_log(ql_log_warn, vha, 0xd046,
2998 "Exchange starvation. Resetting RISC\n");
2999 vha->hw->exch_starvation = 0;
3000 set_bit(ISP_ABORT_NEEDED,
3001 &vha->dpc_flags);
3002 qla2xxx_wake_dpc(vha);
3003 break;
3004 }
3005 fallthrough;
3006 default:
3007 ql_dbg(ql_dbg_disc, vha, 0x20eb,
3008 "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
3009 __func__, sp->fcport->port_name,
3010 fw_status[0], fw_status[1], fw_status[2]);
3011
3012 fcport->flags &= ~FCF_ASYNC_SENT;
3013 qlt_schedule_sess_for_deletion(fcport);
3014 break;
3015 }
3016 break;
3017
3018 default:
3019 ql_dbg(ql_dbg_disc, vha, 0x20eb,
3020 "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
3021 __func__, sp->fcport->port_name,
3022 fw_status[0], fw_status[1], fw_status[2]);
3023
3024 sp->fcport->flags &= ~FCF_ASYNC_SENT;
3025 qlt_schedule_sess_for_deletion(fcport);
3026 break;
3027 }
3028
3029 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3030 if (!e) {
3031 struct srb_iocb *elsio = &sp->u.iocb_cmd;
3032
3033 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3034 /* ref: INIT */
3035 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3036 return;
3037 }
3038 e->u.iosb.sp = sp;
3039 qla2x00_post_work(vha, e);
3040 }
3041 }
3042
3043 int
qla24xx_els_dcmd2_iocb(scsi_qla_host_t * vha,int els_opcode,fc_port_t * fcport)3044 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
3045 fc_port_t *fcport)
3046 {
3047 srb_t *sp;
3048 struct srb_iocb *elsio = NULL;
3049 struct qla_hw_data *ha = vha->hw;
3050 int rval = QLA_SUCCESS;
3051 void *ptr, *resp_ptr;
3052
3053 /* Alloc SRB structure
3054 * ref: INIT
3055 */
3056 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3057 if (!sp) {
3058 ql_log(ql_log_info, vha, 0x70e6,
3059 "SRB allocation failed\n");
3060 goto done;
3061 }
3062
3063 fcport->flags |= FCF_ASYNC_SENT;
3064 qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
3065 elsio = &sp->u.iocb_cmd;
3066 ql_dbg(ql_dbg_io, vha, 0x3073,
3067 "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
3068
3069 sp->type = SRB_ELS_DCMD;
3070 sp->name = "ELS_DCMD";
3071 sp->fcport = fcport;
3072 qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2,
3073 qla2x00_els_dcmd2_sp_done);
3074 sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
3075
3076 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
3077
3078 ptr = elsio->u.els_plogi.els_plogi_pyld =
3079 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
3080 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
3081
3082 if (!elsio->u.els_plogi.els_plogi_pyld) {
3083 rval = QLA_FUNCTION_FAILED;
3084 goto done_free_sp;
3085 }
3086
3087 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
3088 dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3089 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
3090
3091 if (!elsio->u.els_plogi.els_resp_pyld) {
3092 rval = QLA_FUNCTION_FAILED;
3093 goto done_free_sp;
3094 }
3095
3096 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
3097
3098 memset(ptr, 0, sizeof(struct els_plogi_payload));
3099 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
3100 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
3101 (void *)&ha->plogi_els_payld + offsetof(struct fc_els_flogi, fl_csp),
3102 sizeof(ha->plogi_els_payld) - offsetof(struct fc_els_flogi, fl_csp));
3103
3104 elsio->u.els_plogi.els_cmd = els_opcode;
3105 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
3106
3107 if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
3108 struct fc_els_flogi *p = ptr;
3109 p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
3110 }
3111
3112 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3113 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3114 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3115 sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3116
3117 rval = qla2x00_start_sp(sp);
3118 if (rval != QLA_SUCCESS) {
3119 fcport->flags |= FCF_LOGIN_NEEDED;
3120 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3121 goto done_free_sp;
3122 } else {
3123 ql_dbg(ql_dbg_disc, vha, 0x3074,
3124 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3125 sp->name, sp->handle, fcport->loop_id,
3126 fcport->d_id.b24, vha->d_id.b24);
3127 }
3128
3129 return rval;
3130
3131 done_free_sp:
3132 qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3133 /* ref: INIT */
3134 kref_put(&sp->cmd_kref, qla2x00_sp_release);
3135 done:
3136 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3137 qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
3138 return rval;
3139 }
3140
3141 /* it is assume qpair lock is held */
qla_els_pt_iocb(struct scsi_qla_host * vha,struct els_entry_24xx * els_iocb,struct qla_els_pt_arg * a)3142 void qla_els_pt_iocb(struct scsi_qla_host *vha,
3143 struct els_entry_24xx *els_iocb,
3144 struct qla_els_pt_arg *a)
3145 {
3146 els_iocb->entry_type = ELS_IOCB_TYPE;
3147 els_iocb->entry_count = 1;
3148 els_iocb->sys_define = 0;
3149 els_iocb->entry_status = 0;
3150 els_iocb->handle = QLA_SKIP_HANDLE;
3151 els_iocb->nport_handle = a->nport_handle;
3152 els_iocb->rx_xchg_address = a->rx_xchg_address;
3153 els_iocb->tx_dsd_count = cpu_to_le16(1);
3154 els_iocb->vp_index = a->vp_idx;
3155 els_iocb->sof_type = EST_SOFI3;
3156 els_iocb->rx_dsd_count = cpu_to_le16(0);
3157 els_iocb->opcode = a->els_opcode;
3158
3159 els_iocb->d_id[0] = a->did.b.al_pa;
3160 els_iocb->d_id[1] = a->did.b.area;
3161 els_iocb->d_id[2] = a->did.b.domain;
3162 /* For SID the byte order is different than DID */
3163 els_iocb->s_id[1] = vha->d_id.b.al_pa;
3164 els_iocb->s_id[2] = vha->d_id.b.area;
3165 els_iocb->s_id[0] = vha->d_id.b.domain;
3166
3167 els_iocb->control_flags = cpu_to_le16(a->control_flags);
3168
3169 els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
3170 els_iocb->tx_len = cpu_to_le32(a->tx_len);
3171 put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
3172
3173 els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
3174 els_iocb->rx_len = cpu_to_le32(a->rx_len);
3175 put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
3176 }
3177
3178 static void
qla24xx_els_iocb(srb_t * sp,struct els_entry_24xx * els_iocb)3179 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3180 {
3181 struct bsg_job *bsg_job = sp->u.bsg_job;
3182 struct fc_bsg_request *bsg_request = bsg_job->request;
3183
3184 els_iocb->entry_type = ELS_IOCB_TYPE;
3185 els_iocb->entry_count = 1;
3186 els_iocb->sys_define = 0;
3187 els_iocb->entry_status = 0;
3188 els_iocb->handle = sp->handle;
3189 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3190 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3191 els_iocb->vp_index = sp->vha->vp_idx;
3192 els_iocb->sof_type = EST_SOFI3;
3193 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3194
3195 els_iocb->opcode =
3196 sp->type == SRB_ELS_CMD_RPT ?
3197 bsg_request->rqst_data.r_els.els_code :
3198 bsg_request->rqst_data.h_els.command_code;
3199 els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3200 els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3201 els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3202 els_iocb->control_flags = 0;
3203 els_iocb->rx_byte_count =
3204 cpu_to_le32(bsg_job->reply_payload.payload_len);
3205 els_iocb->tx_byte_count =
3206 cpu_to_le32(bsg_job->request_payload.payload_len);
3207
3208 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3209 &els_iocb->tx_address);
3210 els_iocb->tx_len = cpu_to_le32(sg_dma_len
3211 (bsg_job->request_payload.sg_list));
3212
3213 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3214 &els_iocb->rx_address);
3215 els_iocb->rx_len = cpu_to_le32(sg_dma_len
3216 (bsg_job->reply_payload.sg_list));
3217
3218 sp->vha->qla_stats.control_requests++;
3219 }
3220
3221 static void
qla2x00_ct_iocb(srb_t * sp,ms_iocb_entry_t * ct_iocb)3222 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3223 {
3224 uint16_t avail_dsds;
3225 struct dsd64 *cur_dsd;
3226 struct scatterlist *sg;
3227 int index;
3228 uint16_t tot_dsds;
3229 scsi_qla_host_t *vha = sp->vha;
3230 struct qla_hw_data *ha = vha->hw;
3231 struct bsg_job *bsg_job = sp->u.bsg_job;
3232 int entry_count = 1;
3233
3234 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3235 ct_iocb->entry_type = CT_IOCB_TYPE;
3236 ct_iocb->entry_status = 0;
3237 ct_iocb->handle1 = sp->handle;
3238 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3239 ct_iocb->status = cpu_to_le16(0);
3240 ct_iocb->control_flags = cpu_to_le16(0);
3241 ct_iocb->timeout = 0;
3242 ct_iocb->cmd_dsd_count =
3243 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3244 ct_iocb->total_dsd_count =
3245 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3246 ct_iocb->req_bytecount =
3247 cpu_to_le32(bsg_job->request_payload.payload_len);
3248 ct_iocb->rsp_bytecount =
3249 cpu_to_le32(bsg_job->reply_payload.payload_len);
3250
3251 put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3252 &ct_iocb->req_dsd.address);
3253 ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3254
3255 put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3256 &ct_iocb->rsp_dsd.address);
3257 ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3258
3259 avail_dsds = 1;
3260 cur_dsd = &ct_iocb->rsp_dsd;
3261 index = 0;
3262 tot_dsds = bsg_job->reply_payload.sg_cnt;
3263
3264 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3265 cont_a64_entry_t *cont_pkt;
3266
3267 /* Allocate additional continuation packets? */
3268 if (avail_dsds == 0) {
3269 /*
3270 * Five DSDs are available in the Cont.
3271 * Type 1 IOCB.
3272 */
3273 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3274 vha->hw->req_q_map[0]);
3275 cur_dsd = cont_pkt->dsd;
3276 avail_dsds = 5;
3277 entry_count++;
3278 }
3279
3280 append_dsd64(&cur_dsd, sg);
3281 avail_dsds--;
3282 }
3283 ct_iocb->entry_count = entry_count;
3284
3285 sp->vha->qla_stats.control_requests++;
3286 }
3287
3288 static void
qla24xx_ct_iocb(srb_t * sp,struct ct_entry_24xx * ct_iocb)3289 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3290 {
3291 uint16_t avail_dsds;
3292 struct dsd64 *cur_dsd;
3293 struct scatterlist *sg;
3294 int index;
3295 uint16_t cmd_dsds, rsp_dsds;
3296 scsi_qla_host_t *vha = sp->vha;
3297 struct qla_hw_data *ha = vha->hw;
3298 struct bsg_job *bsg_job = sp->u.bsg_job;
3299 int entry_count = 1;
3300 cont_a64_entry_t *cont_pkt = NULL;
3301
3302 ct_iocb->entry_type = CT_IOCB_TYPE;
3303 ct_iocb->entry_status = 0;
3304 ct_iocb->sys_define = 0;
3305 ct_iocb->handle = sp->handle;
3306
3307 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3308 ct_iocb->vp_index = sp->vha->vp_idx;
3309 ct_iocb->comp_status = cpu_to_le16(0);
3310
3311 cmd_dsds = bsg_job->request_payload.sg_cnt;
3312 rsp_dsds = bsg_job->reply_payload.sg_cnt;
3313
3314 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3315 ct_iocb->timeout = 0;
3316 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3317 ct_iocb->cmd_byte_count =
3318 cpu_to_le32(bsg_job->request_payload.payload_len);
3319
3320 avail_dsds = 2;
3321 cur_dsd = ct_iocb->dsd;
3322 index = 0;
3323
3324 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3325 /* Allocate additional continuation packets? */
3326 if (avail_dsds == 0) {
3327 /*
3328 * Five DSDs are available in the Cont.
3329 * Type 1 IOCB.
3330 */
3331 cont_pkt = qla2x00_prep_cont_type1_iocb(
3332 vha, ha->req_q_map[0]);
3333 cur_dsd = cont_pkt->dsd;
3334 avail_dsds = 5;
3335 entry_count++;
3336 }
3337
3338 append_dsd64(&cur_dsd, sg);
3339 avail_dsds--;
3340 }
3341
3342 index = 0;
3343
3344 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3345 /* Allocate additional continuation packets? */
3346 if (avail_dsds == 0) {
3347 /*
3348 * Five DSDs are available in the Cont.
3349 * Type 1 IOCB.
3350 */
3351 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3352 ha->req_q_map[0]);
3353 cur_dsd = cont_pkt->dsd;
3354 avail_dsds = 5;
3355 entry_count++;
3356 }
3357
3358 append_dsd64(&cur_dsd, sg);
3359 avail_dsds--;
3360 }
3361 ct_iocb->entry_count = entry_count;
3362 }
3363
3364 /*
3365 * qla82xx_start_scsi() - Send a SCSI command to the ISP
3366 * @sp: command to send to the ISP
3367 *
3368 * Returns non-zero if a failure occurred, else zero.
3369 */
3370 int
qla82xx_start_scsi(srb_t * sp)3371 qla82xx_start_scsi(srb_t *sp)
3372 {
3373 int nseg;
3374 unsigned long flags;
3375 struct scsi_cmnd *cmd;
3376 uint32_t *clr_ptr;
3377 uint32_t handle;
3378 uint16_t cnt;
3379 uint16_t req_cnt;
3380 uint16_t tot_dsds;
3381 struct device_reg_82xx __iomem *reg;
3382 uint32_t dbval;
3383 __be32 *fcp_dl;
3384 uint8_t additional_cdb_len;
3385 struct ct6_dsd *ctx;
3386 struct scsi_qla_host *vha = sp->vha;
3387 struct qla_hw_data *ha = vha->hw;
3388 struct req_que *req = NULL;
3389 struct rsp_que *rsp = NULL;
3390 struct qla_qpair *qpair = sp->qpair;
3391
3392 /* Setup device pointers. */
3393 reg = &ha->iobase->isp82;
3394 cmd = GET_CMD_SP(sp);
3395 req = vha->req;
3396 rsp = ha->rsp_q_map[0];
3397
3398 /* So we know we haven't pci_map'ed anything yet */
3399 tot_dsds = 0;
3400
3401 dbval = 0x04 | (ha->portnum << 5);
3402
3403 /* Send marker if required */
3404 if (vha->marker_needed != 0) {
3405 if (qla2x00_marker(vha, ha->base_qpair,
3406 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3407 ql_log(ql_log_warn, vha, 0x300c,
3408 "qla2x00_marker failed for cmd=%p.\n", cmd);
3409 return QLA_FUNCTION_FAILED;
3410 }
3411 vha->marker_needed = 0;
3412 }
3413
3414 /* Acquire ring specific lock */
3415 spin_lock_irqsave(&ha->hardware_lock, flags);
3416
3417 handle = qla2xxx_get_next_handle(req);
3418 if (handle == 0)
3419 goto queuing_error;
3420
3421 /* Map the sg table so we have an accurate count of sg entries needed */
3422 if (scsi_sg_count(cmd)) {
3423 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3424 scsi_sg_count(cmd), cmd->sc_data_direction);
3425 if (unlikely(!nseg))
3426 goto queuing_error;
3427 } else
3428 nseg = 0;
3429
3430 tot_dsds = nseg;
3431
3432 if (tot_dsds > ql2xshiftctondsd) {
3433 struct cmd_type_6 *cmd_pkt;
3434 uint16_t more_dsd_lists = 0;
3435 struct dsd_dma *dsd_ptr;
3436 uint16_t i;
3437
3438 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3439 if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
3440 ql_dbg(ql_dbg_io, vha, 0x300d,
3441 "Num of DSD list %d is than %d for cmd=%p.\n",
3442 more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN,
3443 cmd);
3444 goto queuing_error;
3445 }
3446
3447 if (more_dsd_lists <= qpair->dsd_avail)
3448 goto sufficient_dsds;
3449 else
3450 more_dsd_lists -= qpair->dsd_avail;
3451
3452 for (i = 0; i < more_dsd_lists; i++) {
3453 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3454 if (!dsd_ptr) {
3455 ql_log(ql_log_fatal, vha, 0x300e,
3456 "Failed to allocate memory for dsd_dma "
3457 "for cmd=%p.\n", cmd);
3458 goto queuing_error;
3459 }
3460
3461 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3462 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3463 if (!dsd_ptr->dsd_addr) {
3464 kfree(dsd_ptr);
3465 ql_log(ql_log_fatal, vha, 0x300f,
3466 "Failed to allocate memory for dsd_addr "
3467 "for cmd=%p.\n", cmd);
3468 goto queuing_error;
3469 }
3470 list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
3471 qpair->dsd_avail++;
3472 }
3473
3474 sufficient_dsds:
3475 req_cnt = 1;
3476
3477 if (req->cnt < (req_cnt + 2)) {
3478 cnt = (uint16_t)rd_reg_dword_relaxed(
3479 ®->req_q_out[0]);
3480 if (req->ring_index < cnt)
3481 req->cnt = cnt - req->ring_index;
3482 else
3483 req->cnt = req->length -
3484 (req->ring_index - cnt);
3485 if (req->cnt < (req_cnt + 2))
3486 goto queuing_error;
3487 }
3488
3489 ctx = &sp->u.scmd.ct6_ctx;
3490
3491 memset(ctx, 0, sizeof(struct ct6_dsd));
3492 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3493 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3494 if (!ctx->fcp_cmnd) {
3495 ql_log(ql_log_fatal, vha, 0x3011,
3496 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3497 goto queuing_error;
3498 }
3499
3500 /* Initialize the DSD list and dma handle */
3501 INIT_LIST_HEAD(&ctx->dsd_list);
3502 ctx->dsd_use_cnt = 0;
3503
3504 if (cmd->cmd_len > 16) {
3505 additional_cdb_len = cmd->cmd_len - 16;
3506 if ((cmd->cmd_len % 4) != 0) {
3507 /* SCSI command bigger than 16 bytes must be
3508 * multiple of 4
3509 */
3510 ql_log(ql_log_warn, vha, 0x3012,
3511 "scsi cmd len %d not multiple of 4 "
3512 "for cmd=%p.\n", cmd->cmd_len, cmd);
3513 goto queuing_error_fcp_cmnd;
3514 }
3515 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3516 } else {
3517 additional_cdb_len = 0;
3518 ctx->fcp_cmnd_len = 12 + 16 + 4;
3519 }
3520
3521 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3522 cmd_pkt->handle = make_handle(req->id, handle);
3523
3524 /* Zero out remaining portion of packet. */
3525 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3526 clr_ptr = (uint32_t *)cmd_pkt + 2;
3527 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3528 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3529
3530 /* Set NPORT-ID and LUN number*/
3531 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3532 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3533 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3534 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3535 cmd_pkt->vp_index = sp->vha->vp_idx;
3536
3537 /* Build IOCB segments */
3538 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3539 goto queuing_error_fcp_cmnd;
3540
3541 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3542 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3543
3544 /* build FCP_CMND IU */
3545 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3546 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3547
3548 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3549 ctx->fcp_cmnd->additional_cdb_len |= 1;
3550 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3551 ctx->fcp_cmnd->additional_cdb_len |= 2;
3552
3553 /* Populate the FCP_PRIO. */
3554 if (ha->flags.fcp_prio_enabled)
3555 ctx->fcp_cmnd->task_attribute |=
3556 sp->fcport->fcp_prio << 3;
3557
3558 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3559
3560 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3561 additional_cdb_len);
3562 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3563
3564 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3565 put_unaligned_le64(ctx->fcp_cmnd_dma,
3566 &cmd_pkt->fcp_cmnd_dseg_address);
3567
3568 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3569 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3570 /* Set total data segment count. */
3571 cmd_pkt->entry_count = (uint8_t)req_cnt;
3572 /* Specify response queue number where
3573 * completion should happen
3574 */
3575 cmd_pkt->entry_status = (uint8_t) rsp->id;
3576 } else {
3577 struct cmd_type_7 *cmd_pkt;
3578
3579 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3580 if (req->cnt < (req_cnt + 2)) {
3581 cnt = (uint16_t)rd_reg_dword_relaxed(
3582 ®->req_q_out[0]);
3583 if (req->ring_index < cnt)
3584 req->cnt = cnt - req->ring_index;
3585 else
3586 req->cnt = req->length -
3587 (req->ring_index - cnt);
3588 }
3589 if (req->cnt < (req_cnt + 2))
3590 goto queuing_error;
3591
3592 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3593 cmd_pkt->handle = make_handle(req->id, handle);
3594
3595 /* Zero out remaining portion of packet. */
3596 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3597 clr_ptr = (uint32_t *)cmd_pkt + 2;
3598 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3599 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3600
3601 /* Set NPORT-ID and LUN number*/
3602 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3603 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3604 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3605 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3606 cmd_pkt->vp_index = sp->vha->vp_idx;
3607
3608 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3609 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3610 sizeof(cmd_pkt->lun));
3611
3612 /* Populate the FCP_PRIO. */
3613 if (ha->flags.fcp_prio_enabled)
3614 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3615
3616 /* Load SCSI command packet. */
3617 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3618 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3619
3620 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3621
3622 /* Build IOCB segments */
3623 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3624
3625 /* Set total data segment count. */
3626 cmd_pkt->entry_count = (uint8_t)req_cnt;
3627 /* Specify response queue number where
3628 * completion should happen.
3629 */
3630 cmd_pkt->entry_status = (uint8_t) rsp->id;
3631
3632 }
3633 /* Build command packet. */
3634 req->current_outstanding_cmd = handle;
3635 req->outstanding_cmds[handle] = sp;
3636 sp->handle = handle;
3637 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3638 req->cnt -= req_cnt;
3639 wmb();
3640
3641 /* Adjust ring index. */
3642 req->ring_index++;
3643 if (req->ring_index == req->length) {
3644 req->ring_index = 0;
3645 req->ring_ptr = req->ring;
3646 } else
3647 req->ring_ptr++;
3648
3649 sp->flags |= SRB_DMA_VALID;
3650
3651 /* Set chip new ring index. */
3652 /* write, read and verify logic */
3653 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3654 if (ql2xdbwr)
3655 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3656 else {
3657 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3658 wmb();
3659 while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3660 wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3661 wmb();
3662 }
3663 }
3664
3665 /* Manage unprocessed RIO/ZIO commands in response queue. */
3666 if (vha->flags.process_response_queue &&
3667 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3668 qla24xx_process_response_queue(vha, rsp);
3669
3670 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3671 return QLA_SUCCESS;
3672
3673 queuing_error_fcp_cmnd:
3674 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3675 queuing_error:
3676 if (tot_dsds)
3677 scsi_dma_unmap(cmd);
3678
3679 if (sp->u.scmd.crc_ctx) {
3680 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3681 sp->u.scmd.crc_ctx = NULL;
3682 }
3683 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3684
3685 return QLA_FUNCTION_FAILED;
3686 }
3687
3688 static void
qla24xx_abort_iocb(srb_t * sp,struct abort_entry_24xx * abt_iocb)3689 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3690 {
3691 struct srb_iocb *aio = &sp->u.iocb_cmd;
3692 scsi_qla_host_t *vha = sp->vha;
3693 struct req_que *req = sp->qpair->req;
3694 srb_t *orig_sp = sp->cmd_sp;
3695
3696 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3697 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3698 abt_iocb->entry_count = 1;
3699 abt_iocb->handle = make_handle(req->id, sp->handle);
3700 if (sp->fcport) {
3701 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3702 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3703 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3704 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3705 }
3706 abt_iocb->handle_to_abort =
3707 make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3708 aio->u.abt.cmd_hndl);
3709 abt_iocb->vp_index = vha->vp_idx;
3710 abt_iocb->req_que_no = aio->u.abt.req_que_no;
3711
3712 /* need to pass original sp */
3713 if (orig_sp)
3714 qla_nvme_abort_set_option(abt_iocb, orig_sp);
3715
3716 /* Send the command to the firmware */
3717 wmb();
3718 }
3719
3720 static void
qla2x00_mb_iocb(srb_t * sp,struct mbx_24xx_entry * mbx)3721 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3722 {
3723 int i, sz;
3724
3725 mbx->entry_type = MBX_IOCB_TYPE;
3726 mbx->handle = sp->handle;
3727 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3728
3729 for (i = 0; i < sz; i++)
3730 mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3731 }
3732
3733 static void
qla2x00_ctpthru_cmd_iocb(srb_t * sp,struct ct_entry_24xx * ct_pkt)3734 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3735 {
3736 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3737 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3738 ct_pkt->handle = sp->handle;
3739 }
3740
qla2x00_send_notify_ack_iocb(srb_t * sp,struct nack_to_isp * nack)3741 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3742 struct nack_to_isp *nack)
3743 {
3744 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3745
3746 nack->entry_type = NOTIFY_ACK_TYPE;
3747 nack->entry_count = 1;
3748 nack->ox_id = ntfy->ox_id;
3749
3750 nack->u.isp24.handle = sp->handle;
3751 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3752 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3753 nack->u.isp24.flags = ntfy->u.isp24.flags &
3754 cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3755 }
3756 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3757 nack->u.isp24.status = ntfy->u.isp24.status;
3758 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3759 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3760 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3761 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3762 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3763 nack->u.isp24.srr_flags = 0;
3764 nack->u.isp24.srr_reject_code = 0;
3765 nack->u.isp24.srr_reject_code_expl = 0;
3766 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3767
3768 if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
3769 (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
3770 sp->vha->hw->flags.edif_enabled) {
3771 ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
3772 "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
3773 sp->name, sp->handle, sp->fcport->loop_id,
3774 sp->fcport->d_id.b24);
3775 nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
3776 }
3777 }
3778
3779 /*
3780 * Build NVME LS request
3781 */
3782 static void
qla_nvme_ls(srb_t * sp,struct pt_ls4_request * cmd_pkt)3783 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3784 {
3785 struct srb_iocb *nvme;
3786
3787 nvme = &sp->u.iocb_cmd;
3788 cmd_pkt->entry_type = PT_LS4_REQUEST;
3789 cmd_pkt->entry_count = 1;
3790 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3791 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3792
3793 if (sp->unsol_rsp) {
3794 cmd_pkt->control_flags =
3795 cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT);
3796 cmd_pkt->nport_handle = nvme->u.nvme.nport_handle;
3797 cmd_pkt->exchange_address = nvme->u.nvme.exchange_address;
3798 } else {
3799 cmd_pkt->control_flags =
3800 cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3801 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3802 cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3803 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3804 cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
3805 put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3806 }
3807
3808 cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3809 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3810 cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
3811 put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3812 }
3813
3814 static void
qla25xx_ctrlvp_iocb(srb_t * sp,struct vp_ctrl_entry_24xx * vce)3815 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3816 {
3817 int map, pos;
3818
3819 vce->entry_type = VP_CTRL_IOCB_TYPE;
3820 vce->handle = sp->handle;
3821 vce->entry_count = 1;
3822 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3823 vce->vp_count = cpu_to_le16(1);
3824
3825 /*
3826 * index map in firmware starts with 1; decrement index
3827 * this is ok as we never use index 0
3828 */
3829 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3830 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3831 vce->vp_idx_map[map] |= 1 << pos;
3832 }
3833
3834 static void
qla24xx_prlo_iocb(srb_t * sp,struct logio_entry_24xx * logio)3835 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3836 {
3837 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3838 logio->control_flags =
3839 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3840
3841 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3842 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3843 logio->port_id[1] = sp->fcport->d_id.b.area;
3844 logio->port_id[2] = sp->fcport->d_id.b.domain;
3845 logio->vp_index = sp->fcport->vha->vp_idx;
3846 }
3847
qla_get_iocbs_resource(struct srb * sp)3848 static int qla_get_iocbs_resource(struct srb *sp)
3849 {
3850 bool get_exch;
3851 bool push_it_through = false;
3852
3853 if (!ql2xenforce_iocb_limit) {
3854 sp->iores.res_type = RESOURCE_NONE;
3855 return 0;
3856 }
3857 sp->iores.res_type = RESOURCE_NONE;
3858
3859 switch (sp->type) {
3860 case SRB_TM_CMD:
3861 case SRB_PRLI_CMD:
3862 case SRB_ADISC_CMD:
3863 push_it_through = true;
3864 fallthrough;
3865 case SRB_LOGIN_CMD:
3866 case SRB_ELS_CMD_RPT:
3867 case SRB_ELS_CMD_HST:
3868 case SRB_ELS_CMD_HST_NOLOGIN:
3869 case SRB_CT_CMD:
3870 case SRB_NVME_LS:
3871 case SRB_ELS_DCMD:
3872 get_exch = true;
3873 break;
3874
3875 case SRB_FXIOCB_DCMD:
3876 case SRB_FXIOCB_BCMD:
3877 sp->iores.res_type = RESOURCE_NONE;
3878 return 0;
3879
3880 case SRB_SA_UPDATE:
3881 case SRB_SA_REPLACE:
3882 case SRB_MB_IOCB:
3883 case SRB_ABT_CMD:
3884 case SRB_NACK_PLOGI:
3885 case SRB_NACK_PRLI:
3886 case SRB_NACK_LOGO:
3887 case SRB_LOGOUT_CMD:
3888 case SRB_CTRL_VP:
3889 case SRB_MARKER:
3890 default:
3891 push_it_through = true;
3892 get_exch = false;
3893 }
3894
3895 sp->iores.res_type |= RESOURCE_IOCB;
3896 sp->iores.iocb_cnt = 1;
3897 if (get_exch) {
3898 sp->iores.res_type |= RESOURCE_EXCH;
3899 sp->iores.exch_cnt = 1;
3900 }
3901 if (push_it_through)
3902 sp->iores.res_type |= RESOURCE_FORCE;
3903
3904 return qla_get_fw_resources(sp->qpair, &sp->iores);
3905 }
3906
3907 static void
qla_marker_iocb(srb_t * sp,struct mrk_entry_24xx * mrk)3908 qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
3909 {
3910 mrk->entry_type = MARKER_TYPE;
3911 mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
3912 mrk->handle = make_handle(sp->qpair->req->id, sp->handle);
3913 if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
3914 mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
3915 int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
3916 host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
3917 mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index;
3918 }
3919 }
3920
3921 int
qla2x00_start_sp(srb_t * sp)3922 qla2x00_start_sp(srb_t *sp)
3923 {
3924 int rval = QLA_SUCCESS;
3925 scsi_qla_host_t *vha = sp->vha;
3926 struct qla_hw_data *ha = vha->hw;
3927 struct qla_qpair *qp = sp->qpair;
3928 void *pkt;
3929 unsigned long flags;
3930
3931 if (vha->hw->flags.eeh_busy)
3932 return -EIO;
3933
3934 spin_lock_irqsave(qp->qp_lock_ptr, flags);
3935 rval = qla_get_iocbs_resource(sp);
3936 if (rval) {
3937 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3938 return -EAGAIN;
3939 }
3940
3941 pkt = qla2x00_alloc_iocbs_ready(sp->qpair, sp);
3942 if (!pkt) {
3943 rval = -EAGAIN;
3944 ql_log(ql_log_warn, vha, 0x700c,
3945 "qla2x00_alloc_iocbs failed.\n");
3946 goto done;
3947 }
3948
3949 switch (sp->type) {
3950 case SRB_LOGIN_CMD:
3951 IS_FWI2_CAPABLE(ha) ?
3952 qla24xx_login_iocb(sp, pkt) :
3953 qla2x00_login_iocb(sp, pkt);
3954 break;
3955 case SRB_PRLI_CMD:
3956 qla24xx_prli_iocb(sp, pkt);
3957 break;
3958 case SRB_LOGOUT_CMD:
3959 IS_FWI2_CAPABLE(ha) ?
3960 qla24xx_logout_iocb(sp, pkt) :
3961 qla2x00_logout_iocb(sp, pkt);
3962 break;
3963 case SRB_ELS_CMD_RPT:
3964 case SRB_ELS_CMD_HST:
3965 qla24xx_els_iocb(sp, pkt);
3966 break;
3967 case SRB_ELS_CMD_HST_NOLOGIN:
3968 qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg);
3969 ((struct els_entry_24xx *)pkt)->handle = sp->handle;
3970 break;
3971 case SRB_CT_CMD:
3972 IS_FWI2_CAPABLE(ha) ?
3973 qla24xx_ct_iocb(sp, pkt) :
3974 qla2x00_ct_iocb(sp, pkt);
3975 break;
3976 case SRB_ADISC_CMD:
3977 IS_FWI2_CAPABLE(ha) ?
3978 qla24xx_adisc_iocb(sp, pkt) :
3979 qla2x00_adisc_iocb(sp, pkt);
3980 break;
3981 case SRB_TM_CMD:
3982 IS_QLAFX00(ha) ?
3983 qlafx00_tm_iocb(sp, pkt) :
3984 qla24xx_tm_iocb(sp, pkt);
3985 break;
3986 case SRB_FXIOCB_DCMD:
3987 case SRB_FXIOCB_BCMD:
3988 qlafx00_fxdisc_iocb(sp, pkt);
3989 break;
3990 case SRB_NVME_LS:
3991 qla_nvme_ls(sp, pkt);
3992 break;
3993 case SRB_ABT_CMD:
3994 IS_QLAFX00(ha) ?
3995 qlafx00_abort_iocb(sp, pkt) :
3996 qla24xx_abort_iocb(sp, pkt);
3997 break;
3998 case SRB_ELS_DCMD:
3999 qla24xx_els_logo_iocb(sp, pkt);
4000 break;
4001 case SRB_CT_PTHRU_CMD:
4002 qla2x00_ctpthru_cmd_iocb(sp, pkt);
4003 break;
4004 case SRB_MB_IOCB:
4005 qla2x00_mb_iocb(sp, pkt);
4006 break;
4007 case SRB_NACK_PLOGI:
4008 case SRB_NACK_PRLI:
4009 case SRB_NACK_LOGO:
4010 qla2x00_send_notify_ack_iocb(sp, pkt);
4011 break;
4012 case SRB_CTRL_VP:
4013 qla25xx_ctrlvp_iocb(sp, pkt);
4014 break;
4015 case SRB_PRLO_CMD:
4016 qla24xx_prlo_iocb(sp, pkt);
4017 break;
4018 case SRB_SA_UPDATE:
4019 qla24xx_sa_update_iocb(sp, pkt);
4020 break;
4021 case SRB_SA_REPLACE:
4022 qla24xx_sa_replace_iocb(sp, pkt);
4023 break;
4024 case SRB_MARKER:
4025 qla_marker_iocb(sp, pkt);
4026 break;
4027 default:
4028 break;
4029 }
4030
4031 if (sp->start_timer) {
4032 /* ref: TMR timer ref
4033 * this code should be just before start_iocbs function
4034 * This will make sure that caller function don't to do
4035 * kref_put even on failure
4036 */
4037 kref_get(&sp->cmd_kref);
4038 add_timer(&sp->u.iocb_cmd.timer);
4039 }
4040
4041 wmb();
4042 qla2x00_start_iocbs(vha, qp->req);
4043 done:
4044 if (rval)
4045 qla_put_fw_resources(sp->qpair, &sp->iores);
4046 spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
4047 return rval;
4048 }
4049
4050 static void
qla25xx_build_bidir_iocb(srb_t * sp,struct scsi_qla_host * vha,struct cmd_bidir * cmd_pkt,uint32_t tot_dsds)4051 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
4052 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
4053 {
4054 uint16_t avail_dsds;
4055 struct dsd64 *cur_dsd;
4056 uint32_t req_data_len = 0;
4057 uint32_t rsp_data_len = 0;
4058 struct scatterlist *sg;
4059 int index;
4060 int entry_count = 1;
4061 struct bsg_job *bsg_job = sp->u.bsg_job;
4062
4063 /*Update entry type to indicate bidir command */
4064 put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
4065
4066 /* Set the transfer direction, in this set both flags
4067 * Also set the BD_WRAP_BACK flag, firmware will take care
4068 * assigning DID=SID for outgoing pkts.
4069 */
4070 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
4071 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
4072 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
4073 BD_WRAP_BACK);
4074
4075 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
4076 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
4077 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
4078 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
4079
4080 vha->bidi_stats.transfer_bytes += req_data_len;
4081 vha->bidi_stats.io_count++;
4082
4083 vha->qla_stats.output_bytes += req_data_len;
4084 vha->qla_stats.output_requests++;
4085
4086 /* Only one dsd is available for bidirectional IOCB, remaining dsds
4087 * are bundled in continuation iocb
4088 */
4089 avail_dsds = 1;
4090 cur_dsd = &cmd_pkt->fcp_dsd;
4091
4092 index = 0;
4093
4094 for_each_sg(bsg_job->request_payload.sg_list, sg,
4095 bsg_job->request_payload.sg_cnt, index) {
4096 cont_a64_entry_t *cont_pkt;
4097
4098 /* Allocate additional continuation packets */
4099 if (avail_dsds == 0) {
4100 /* Continuation type 1 IOCB can accomodate
4101 * 5 DSDS
4102 */
4103 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
4104 cur_dsd = cont_pkt->dsd;
4105 avail_dsds = 5;
4106 entry_count++;
4107 }
4108 append_dsd64(&cur_dsd, sg);
4109 avail_dsds--;
4110 }
4111 /* For read request DSD will always goes to continuation IOCB
4112 * and follow the write DSD. If there is room on the current IOCB
4113 * then it is added to that IOCB else new continuation IOCB is
4114 * allocated.
4115 */
4116 for_each_sg(bsg_job->reply_payload.sg_list, sg,
4117 bsg_job->reply_payload.sg_cnt, index) {
4118 cont_a64_entry_t *cont_pkt;
4119
4120 /* Allocate additional continuation packets */
4121 if (avail_dsds == 0) {
4122 /* Continuation type 1 IOCB can accomodate
4123 * 5 DSDS
4124 */
4125 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
4126 cur_dsd = cont_pkt->dsd;
4127 avail_dsds = 5;
4128 entry_count++;
4129 }
4130 append_dsd64(&cur_dsd, sg);
4131 avail_dsds--;
4132 }
4133 /* This value should be same as number of IOCB required for this cmd */
4134 cmd_pkt->entry_count = entry_count;
4135 }
4136
4137 int
qla2x00_start_bidir(srb_t * sp,struct scsi_qla_host * vha,uint32_t tot_dsds)4138 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
4139 {
4140
4141 struct qla_hw_data *ha = vha->hw;
4142 unsigned long flags;
4143 uint32_t handle;
4144 uint16_t req_cnt;
4145 uint16_t cnt;
4146 uint32_t *clr_ptr;
4147 struct cmd_bidir *cmd_pkt = NULL;
4148 struct rsp_que *rsp;
4149 struct req_que *req;
4150 int rval = EXT_STATUS_OK;
4151
4152 rval = QLA_SUCCESS;
4153
4154 rsp = ha->rsp_q_map[0];
4155 req = vha->req;
4156
4157 /* Send marker if required */
4158 if (vha->marker_needed != 0) {
4159 if (qla2x00_marker(vha, ha->base_qpair,
4160 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
4161 return EXT_STATUS_MAILBOX;
4162 vha->marker_needed = 0;
4163 }
4164
4165 /* Acquire ring specific lock */
4166 spin_lock_irqsave(&ha->hardware_lock, flags);
4167
4168 handle = qla2xxx_get_next_handle(req);
4169 if (handle == 0) {
4170 rval = EXT_STATUS_BUSY;
4171 goto queuing_error;
4172 }
4173
4174 /* Calculate number of IOCB required */
4175 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
4176
4177 /* Check for room on request queue. */
4178 if (req->cnt < req_cnt + 2) {
4179 if (IS_SHADOW_REG_CAPABLE(ha)) {
4180 cnt = *req->out_ptr;
4181 } else {
4182 cnt = rd_reg_dword_relaxed(req->req_q_out);
4183 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
4184 goto queuing_error;
4185 }
4186
4187 if (req->ring_index < cnt)
4188 req->cnt = cnt - req->ring_index;
4189 else
4190 req->cnt = req->length -
4191 (req->ring_index - cnt);
4192 }
4193 if (req->cnt < req_cnt + 2) {
4194 rval = EXT_STATUS_BUSY;
4195 goto queuing_error;
4196 }
4197
4198 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
4199 cmd_pkt->handle = make_handle(req->id, handle);
4200
4201 /* Zero out remaining portion of packet. */
4202 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
4203 clr_ptr = (uint32_t *)cmd_pkt + 2;
4204 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
4205
4206 /* Set NPORT-ID (of vha)*/
4207 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
4208 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
4209 cmd_pkt->port_id[1] = vha->d_id.b.area;
4210 cmd_pkt->port_id[2] = vha->d_id.b.domain;
4211
4212 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
4213 cmd_pkt->entry_status = (uint8_t) rsp->id;
4214 /* Build command packet. */
4215 req->current_outstanding_cmd = handle;
4216 req->outstanding_cmds[handle] = sp;
4217 sp->handle = handle;
4218 req->cnt -= req_cnt;
4219
4220 /* Send the command to the firmware */
4221 wmb();
4222 qla2x00_start_iocbs(vha, req);
4223 queuing_error:
4224 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4225
4226 return rval;
4227 }
4228
4229 /**
4230 * qla_start_scsi_type6() - Send a SCSI command to the ISP
4231 * @sp: command to send to the ISP
4232 *
4233 * Returns non-zero if a failure occurred, else zero.
4234 */
4235 static int
qla_start_scsi_type6(srb_t * sp)4236 qla_start_scsi_type6(srb_t *sp)
4237 {
4238 int nseg;
4239 unsigned long flags;
4240 uint32_t *clr_ptr;
4241 uint32_t handle;
4242 struct cmd_type_6 *cmd_pkt;
4243 uint16_t cnt;
4244 uint16_t req_cnt;
4245 uint16_t tot_dsds;
4246 struct req_que *req = NULL;
4247 struct rsp_que *rsp;
4248 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
4249 struct scsi_qla_host *vha = sp->fcport->vha;
4250 struct qla_hw_data *ha = vha->hw;
4251 struct qla_qpair *qpair = sp->qpair;
4252 uint16_t more_dsd_lists = 0;
4253 struct dsd_dma *dsd_ptr;
4254 uint16_t i;
4255 __be32 *fcp_dl;
4256 uint8_t additional_cdb_len;
4257 struct ct6_dsd *ctx;
4258
4259 /* Acquire qpair specific lock */
4260 spin_lock_irqsave(&qpair->qp_lock, flags);
4261
4262 /* Setup qpair pointers */
4263 req = qpair->req;
4264 rsp = qpair->rsp;
4265
4266 /* So we know we haven't pci_map'ed anything yet */
4267 tot_dsds = 0;
4268
4269 /* Send marker if required */
4270 if (vha->marker_needed != 0) {
4271 if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
4272 spin_unlock_irqrestore(&qpair->qp_lock, flags);
4273 return QLA_FUNCTION_FAILED;
4274 }
4275 vha->marker_needed = 0;
4276 }
4277
4278 handle = qla2xxx_get_next_handle(req);
4279 if (handle == 0)
4280 goto queuing_error;
4281
4282 /* Map the sg table so we have an accurate count of sg entries needed */
4283 if (scsi_sg_count(cmd)) {
4284 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
4285 scsi_sg_count(cmd), cmd->sc_data_direction);
4286 if (unlikely(!nseg))
4287 goto queuing_error;
4288 } else {
4289 nseg = 0;
4290 }
4291
4292 tot_dsds = nseg;
4293
4294 /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */
4295 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
4296
4297 sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
4298 sp->iores.exch_cnt = 1;
4299 sp->iores.iocb_cnt = req_cnt;
4300
4301 if (qla_get_fw_resources(sp->qpair, &sp->iores))
4302 goto queuing_error;
4303
4304 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
4305 if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
4306 ql_dbg(ql_dbg_io, vha, 0x3028,
4307 "Num of DSD list %d is than %d for cmd=%p.\n",
4308 more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd);
4309 goto queuing_error;
4310 }
4311
4312 if (more_dsd_lists <= qpair->dsd_avail)
4313 goto sufficient_dsds;
4314 else
4315 more_dsd_lists -= qpair->dsd_avail;
4316
4317 for (i = 0; i < more_dsd_lists; i++) {
4318 dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
4319 if (!dsd_ptr) {
4320 ql_log(ql_log_fatal, vha, 0x3029,
4321 "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd);
4322 goto queuing_error;
4323 }
4324 INIT_LIST_HEAD(&dsd_ptr->list);
4325
4326 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
4327 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
4328 if (!dsd_ptr->dsd_addr) {
4329 kfree(dsd_ptr);
4330 ql_log(ql_log_fatal, vha, 0x302a,
4331 "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd);
4332 goto queuing_error;
4333 }
4334 list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
4335 qpair->dsd_avail++;
4336 }
4337
4338 sufficient_dsds:
4339 req_cnt = 1;
4340
4341 if (req->cnt < (req_cnt + 2)) {
4342 if (IS_SHADOW_REG_CAPABLE(ha)) {
4343 cnt = *req->out_ptr;
4344 } else {
4345 cnt = (uint16_t)rd_reg_dword_relaxed(req->req_q_out);
4346 if (qla2x00_check_reg16_for_disconnect(vha, cnt))
4347 goto queuing_error;
4348 }
4349
4350 if (req->ring_index < cnt)
4351 req->cnt = cnt - req->ring_index;
4352 else
4353 req->cnt = req->length - (req->ring_index - cnt);
4354 if (req->cnt < (req_cnt + 2))
4355 goto queuing_error;
4356 }
4357
4358 ctx = &sp->u.scmd.ct6_ctx;
4359
4360 memset(ctx, 0, sizeof(struct ct6_dsd));
4361 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
4362 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
4363 if (!ctx->fcp_cmnd) {
4364 ql_log(ql_log_fatal, vha, 0x3031,
4365 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
4366 goto queuing_error;
4367 }
4368
4369 /* Initialize the DSD list and dma handle */
4370 INIT_LIST_HEAD(&ctx->dsd_list);
4371 ctx->dsd_use_cnt = 0;
4372
4373 if (cmd->cmd_len > 16) {
4374 additional_cdb_len = cmd->cmd_len - 16;
4375 if (cmd->cmd_len % 4 ||
4376 cmd->cmd_len > QLA_CDB_BUF_SIZE) {
4377 /*
4378 * SCSI command bigger than 16 bytes must be
4379 * multiple of 4 or too big.
4380 */
4381 ql_log(ql_log_warn, vha, 0x3033,
4382 "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
4383 cmd->cmd_len, cmd);
4384 goto queuing_error_fcp_cmnd;
4385 }
4386 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
4387 } else {
4388 additional_cdb_len = 0;
4389 ctx->fcp_cmnd_len = 12 + 16 + 4;
4390 }
4391
4392 /* Build command packet. */
4393 req->current_outstanding_cmd = handle;
4394 req->outstanding_cmds[handle] = sp;
4395 sp->handle = handle;
4396 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
4397 req->cnt -= req_cnt;
4398
4399 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
4400 cmd_pkt->handle = make_handle(req->id, handle);
4401
4402 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
4403 clr_ptr = (uint32_t *)cmd_pkt + 2;
4404 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
4405 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
4406
4407 /* Set NPORT-ID and LUN number */
4408 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
4409 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
4410 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
4411 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
4412 cmd_pkt->vp_index = sp->vha->vp_idx;
4413
4414 /* Build IOCB segments */
4415 qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds);
4416
4417 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
4418 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
4419
4420 /* build FCP_CMND IU */
4421 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
4422 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
4423
4424 if (cmd->sc_data_direction == DMA_TO_DEVICE)
4425 ctx->fcp_cmnd->additional_cdb_len |= 1;
4426 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
4427 ctx->fcp_cmnd->additional_cdb_len |= 2;
4428
4429 /* Populate the FCP_PRIO. */
4430 if (ha->flags.fcp_prio_enabled)
4431 ctx->fcp_cmnd->task_attribute |=
4432 sp->fcport->fcp_prio << 3;
4433
4434 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
4435
4436 fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
4437 additional_cdb_len);
4438 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
4439
4440 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
4441 put_unaligned_le64(ctx->fcp_cmnd_dma,
4442 &cmd_pkt->fcp_cmnd_dseg_address);
4443
4444 sp->flags |= SRB_FCP_CMND_DMA_VALID;
4445 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
4446 /* Set total data segment count. */
4447 cmd_pkt->entry_count = (uint8_t)req_cnt;
4448
4449 wmb();
4450 /* Adjust ring index. */
4451 req->ring_index++;
4452 if (req->ring_index == req->length) {
4453 req->ring_index = 0;
4454 req->ring_ptr = req->ring;
4455 } else {
4456 req->ring_ptr++;
4457 }
4458
4459 sp->qpair->cmd_cnt++;
4460 sp->flags |= SRB_DMA_VALID;
4461
4462 /* Set chip new ring index. */
4463 wrt_reg_dword(req->req_q_in, req->ring_index);
4464
4465 /* Manage unprocessed RIO/ZIO commands in response queue. */
4466 if (vha->flags.process_response_queue &&
4467 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
4468 qla24xx_process_response_queue(vha, rsp);
4469
4470 spin_unlock_irqrestore(&qpair->qp_lock, flags);
4471
4472 return QLA_SUCCESS;
4473
4474 queuing_error_fcp_cmnd:
4475 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
4476
4477 queuing_error:
4478 if (tot_dsds)
4479 scsi_dma_unmap(cmd);
4480
4481 qla_put_fw_resources(sp->qpair, &sp->iores);
4482
4483 if (sp->u.scmd.crc_ctx) {
4484 mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
4485 sp->u.scmd.crc_ctx = NULL;
4486 }
4487
4488 spin_unlock_irqrestore(&qpair->qp_lock, flags);
4489
4490 return QLA_FUNCTION_FAILED;
4491 }
4492