xref: /linux/drivers/scsi/lpfc/lpfc_scsi.c (revision ff5599816711d2e67da2d7561fd36ac48debd433)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2013 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <asm/unaligned.h>
27 #include <linux/crc-t10dif.h>
28 #include <net/checksum.h>
29 
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_tcq.h>
35 #include <scsi/scsi_transport_fc.h>
36 
37 #include "lpfc_version.h"
38 #include "lpfc_hw4.h"
39 #include "lpfc_hw.h"
40 #include "lpfc_sli.h"
41 #include "lpfc_sli4.h"
42 #include "lpfc_nl.h"
43 #include "lpfc_disc.h"
44 #include "lpfc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc_logmsg.h"
47 #include "lpfc_crtn.h"
48 #include "lpfc_vport.h"
49 
50 #define LPFC_RESET_WAIT  2
51 #define LPFC_ABORT_WAIT  2
52 
53 int _dump_buf_done = 1;
54 
55 static char *dif_op_str[] = {
56 	"PROT_NORMAL",
57 	"PROT_READ_INSERT",
58 	"PROT_WRITE_STRIP",
59 	"PROT_READ_STRIP",
60 	"PROT_WRITE_INSERT",
61 	"PROT_READ_PASS",
62 	"PROT_WRITE_PASS",
63 };
64 
65 struct scsi_dif_tuple {
66 	__be16 guard_tag;       /* Checksum */
67 	__be16 app_tag;         /* Opaque storage */
68 	__be32 ref_tag;         /* Target LBA or indirect LBA */
69 };
70 
71 static void
72 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
73 static void
74 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
75 static int
76 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
77 
78 static void
79 lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
80 {
81 	void *src, *dst;
82 	struct scatterlist *sgde = scsi_sglist(cmnd);
83 
84 	if (!_dump_buf_data) {
85 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
86 			"9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
87 				__func__);
88 		return;
89 	}
90 
91 
92 	if (!sgde) {
93 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
94 			"9051 BLKGRD: ERROR: data scatterlist is null\n");
95 		return;
96 	}
97 
98 	dst = (void *) _dump_buf_data;
99 	while (sgde) {
100 		src = sg_virt(sgde);
101 		memcpy(dst, src, sgde->length);
102 		dst += sgde->length;
103 		sgde = sg_next(sgde);
104 	}
105 }
106 
107 static void
108 lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd)
109 {
110 	void *src, *dst;
111 	struct scatterlist *sgde = scsi_prot_sglist(cmnd);
112 
113 	if (!_dump_buf_dif) {
114 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
115 			"9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n",
116 				__func__);
117 		return;
118 	}
119 
120 	if (!sgde) {
121 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
122 			"9053 BLKGRD: ERROR: prot scatterlist is null\n");
123 		return;
124 	}
125 
126 	dst = _dump_buf_dif;
127 	while (sgde) {
128 		src = sg_virt(sgde);
129 		memcpy(dst, src, sgde->length);
130 		dst += sgde->length;
131 		sgde = sg_next(sgde);
132 	}
133 }
134 
135 static inline unsigned
136 lpfc_cmd_blksize(struct scsi_cmnd *sc)
137 {
138 	return sc->device->sector_size;
139 }
140 
141 #define LPFC_CHECK_PROTECT_GUARD	1
142 #define LPFC_CHECK_PROTECT_REF		2
143 static inline unsigned
144 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
145 {
146 	return 1;
147 }
148 
149 static inline unsigned
150 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
151 {
152 	if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
153 		return 0;
154 	if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
155 		return 1;
156 	return 0;
157 }
158 
159 /**
160  * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
161  * @phba: Pointer to HBA object.
162  * @lpfc_cmd: lpfc scsi command object pointer.
163  *
164  * This function is called from the lpfc_prep_task_mgmt_cmd function to
165  * set the last bit in the response sge entry.
166  **/
167 static void
168 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
169 				struct lpfc_scsi_buf *lpfc_cmd)
170 {
171 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
172 	if (sgl) {
173 		sgl += 1;
174 		sgl->word2 = le32_to_cpu(sgl->word2);
175 		bf_set(lpfc_sli4_sge_last, sgl, 1);
176 		sgl->word2 = cpu_to_le32(sgl->word2);
177 	}
178 }
179 
180 /**
181  * lpfc_update_stats - Update statistical data for the command completion
182  * @phba: Pointer to HBA object.
183  * @lpfc_cmd: lpfc scsi command object pointer.
184  *
185  * This function is called when there is a command completion and this
186  * function updates the statistical data for the command completion.
187  **/
188 static void
189 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
190 {
191 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
192 	struct lpfc_nodelist *pnode = rdata->pnode;
193 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
194 	unsigned long flags;
195 	struct Scsi_Host  *shost = cmd->device->host;
196 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
197 	unsigned long latency;
198 	int i;
199 
200 	if (cmd->result)
201 		return;
202 
203 	latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
204 
205 	spin_lock_irqsave(shost->host_lock, flags);
206 	if (!vport->stat_data_enabled ||
207 		vport->stat_data_blocked ||
208 		!pnode ||
209 		!pnode->lat_data ||
210 		(phba->bucket_type == LPFC_NO_BUCKET)) {
211 		spin_unlock_irqrestore(shost->host_lock, flags);
212 		return;
213 	}
214 
215 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
216 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
217 			phba->bucket_step;
218 		/* check array subscript bounds */
219 		if (i < 0)
220 			i = 0;
221 		else if (i >= LPFC_MAX_BUCKET_COUNT)
222 			i = LPFC_MAX_BUCKET_COUNT - 1;
223 	} else {
224 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
225 			if (latency <= (phba->bucket_base +
226 				((1<<i)*phba->bucket_step)))
227 				break;
228 	}
229 
230 	pnode->lat_data[i].cmd_count++;
231 	spin_unlock_irqrestore(shost->host_lock, flags);
232 }
233 
234 /**
235  * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
236  * @phba: Pointer to HBA context object.
237  * @vport: Pointer to vport object.
238  * @ndlp: Pointer to FC node associated with the target.
239  * @lun: Lun number of the scsi device.
240  * @old_val: Old value of the queue depth.
241  * @new_val: New value of the queue depth.
242  *
243  * This function sends an event to the mgmt application indicating
244  * there is a change in the scsi device queue depth.
245  **/
246 static void
247 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
248 		struct lpfc_vport  *vport,
249 		struct lpfc_nodelist *ndlp,
250 		uint32_t lun,
251 		uint32_t old_val,
252 		uint32_t new_val)
253 {
254 	struct lpfc_fast_path_event *fast_path_evt;
255 	unsigned long flags;
256 
257 	fast_path_evt = lpfc_alloc_fast_evt(phba);
258 	if (!fast_path_evt)
259 		return;
260 
261 	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
262 		FC_REG_SCSI_EVENT;
263 	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
264 		LPFC_EVENT_VARQUEDEPTH;
265 
266 	/* Report all luns with change in queue depth */
267 	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
268 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
269 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
270 			&ndlp->nlp_portname, sizeof(struct lpfc_name));
271 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
272 			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
273 	}
274 
275 	fast_path_evt->un.queue_depth_evt.oldval = old_val;
276 	fast_path_evt->un.queue_depth_evt.newval = new_val;
277 	fast_path_evt->vport = vport;
278 
279 	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
280 	spin_lock_irqsave(&phba->hbalock, flags);
281 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
282 	spin_unlock_irqrestore(&phba->hbalock, flags);
283 	lpfc_worker_wake_up(phba);
284 
285 	return;
286 }
287 
288 /**
289  * lpfc_change_queue_depth - Alter scsi device queue depth
290  * @sdev: Pointer the scsi device on which to change the queue depth.
291  * @qdepth: New queue depth to set the sdev to.
292  * @reason: The reason for the queue depth change.
293  *
294  * This function is called by the midlayer and the LLD to alter the queue
295  * depth for a scsi device. This function sets the queue depth to the new
296  * value and sends an event out to log the queue depth change.
297  **/
298 int
299 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
300 {
301 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
302 	struct lpfc_hba   *phba = vport->phba;
303 	struct lpfc_rport_data *rdata;
304 	unsigned long new_queue_depth, old_queue_depth;
305 
306 	old_queue_depth = sdev->queue_depth;
307 	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
308 	new_queue_depth = sdev->queue_depth;
309 	rdata = sdev->hostdata;
310 	if (rdata)
311 		lpfc_send_sdev_queuedepth_change_event(phba, vport,
312 						       rdata->pnode, sdev->lun,
313 						       old_queue_depth,
314 						       new_queue_depth);
315 	return sdev->queue_depth;
316 }
317 
318 /**
319  * lpfc_change_queue_type() - Change a device's scsi tag queuing type
320  * @sdev: Pointer the scsi device whose queue depth is to change
321  * @tag_type: Identifier for queue tag type
322  */
323 static int
324 lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
325 {
326 	if (sdev->tagged_supported) {
327 		scsi_set_tag_type(sdev, tag_type);
328 		if (tag_type)
329 			scsi_activate_tcq(sdev, sdev->queue_depth);
330 		else
331 			scsi_deactivate_tcq(sdev, sdev->queue_depth);
332 	} else
333 		tag_type = 0;
334 
335 	return tag_type;
336 }
337 
338 /**
339  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
340  * @phba: The Hba for which this call is being executed.
341  *
342  * This routine is called when there is resource error in driver or firmware.
343  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
344  * posts at most 1 event each second. This routine wakes up worker thread of
345  * @phba to process WORKER_RAM_DOWN_EVENT event.
346  *
347  * This routine should be called with no lock held.
348  **/
349 void
350 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
351 {
352 	unsigned long flags;
353 	uint32_t evt_posted;
354 
355 	spin_lock_irqsave(&phba->hbalock, flags);
356 	atomic_inc(&phba->num_rsrc_err);
357 	phba->last_rsrc_error_time = jiffies;
358 
359 	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
360 		spin_unlock_irqrestore(&phba->hbalock, flags);
361 		return;
362 	}
363 
364 	phba->last_ramp_down_time = jiffies;
365 
366 	spin_unlock_irqrestore(&phba->hbalock, flags);
367 
368 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
369 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
370 	if (!evt_posted)
371 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
372 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
373 
374 	if (!evt_posted)
375 		lpfc_worker_wake_up(phba);
376 	return;
377 }
378 
379 /**
380  * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
381  * @phba: The Hba for which this call is being executed.
382  *
383  * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
384  * post at most 1 event every 5 minute after last_ramp_up_time or
385  * last_rsrc_error_time.  This routine wakes up worker thread of @phba
386  * to process WORKER_RAM_DOWN_EVENT event.
387  *
388  * This routine should be called with no lock held.
389  **/
390 static inline void
391 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
392 			uint32_t queue_depth)
393 {
394 	unsigned long flags;
395 	struct lpfc_hba *phba = vport->phba;
396 	uint32_t evt_posted;
397 	atomic_inc(&phba->num_cmd_success);
398 
399 	if (vport->cfg_lun_queue_depth <= queue_depth)
400 		return;
401 	spin_lock_irqsave(&phba->hbalock, flags);
402 	if (time_before(jiffies,
403 			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) ||
404 	    time_before(jiffies,
405 			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) {
406 		spin_unlock_irqrestore(&phba->hbalock, flags);
407 		return;
408 	}
409 	phba->last_ramp_up_time = jiffies;
410 	spin_unlock_irqrestore(&phba->hbalock, flags);
411 
412 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
413 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
414 	if (!evt_posted)
415 		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
416 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
417 
418 	if (!evt_posted)
419 		lpfc_worker_wake_up(phba);
420 	return;
421 }
422 
423 /**
424  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
425  * @phba: The Hba for which this call is being executed.
426  *
427  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
428  * thread.This routine reduces queue depth for all scsi device on each vport
429  * associated with @phba.
430  **/
431 void
432 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
433 {
434 	struct lpfc_vport **vports;
435 	struct Scsi_Host  *shost;
436 	struct scsi_device *sdev;
437 	unsigned long new_queue_depth;
438 	unsigned long num_rsrc_err, num_cmd_success;
439 	int i;
440 
441 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
442 	num_cmd_success = atomic_read(&phba->num_cmd_success);
443 
444 	/*
445 	 * The error and success command counters are global per
446 	 * driver instance.  If another handler has already
447 	 * operated on this error event, just exit.
448 	 */
449 	if (num_rsrc_err == 0)
450 		return;
451 
452 	vports = lpfc_create_vport_work_array(phba);
453 	if (vports != NULL)
454 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
455 			shost = lpfc_shost_from_vport(vports[i]);
456 			shost_for_each_device(sdev, shost) {
457 				new_queue_depth =
458 					sdev->queue_depth * num_rsrc_err /
459 					(num_rsrc_err + num_cmd_success);
460 				if (!new_queue_depth)
461 					new_queue_depth = sdev->queue_depth - 1;
462 				else
463 					new_queue_depth = sdev->queue_depth -
464 								new_queue_depth;
465 				lpfc_change_queue_depth(sdev, new_queue_depth,
466 							SCSI_QDEPTH_DEFAULT);
467 			}
468 		}
469 	lpfc_destroy_vport_work_array(phba, vports);
470 	atomic_set(&phba->num_rsrc_err, 0);
471 	atomic_set(&phba->num_cmd_success, 0);
472 }
473 
474 /**
475  * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
476  * @phba: The Hba for which this call is being executed.
477  *
478  * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
479  * thread.This routine increases queue depth for all scsi device on each vport
480  * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
481  * num_cmd_success to zero.
482  **/
483 void
484 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
485 {
486 	struct lpfc_vport **vports;
487 	struct Scsi_Host  *shost;
488 	struct scsi_device *sdev;
489 	int i;
490 
491 	vports = lpfc_create_vport_work_array(phba);
492 	if (vports != NULL)
493 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
494 			shost = lpfc_shost_from_vport(vports[i]);
495 			shost_for_each_device(sdev, shost) {
496 				if (vports[i]->cfg_lun_queue_depth <=
497 				    sdev->queue_depth)
498 					continue;
499 				lpfc_change_queue_depth(sdev,
500 							sdev->queue_depth+1,
501 							SCSI_QDEPTH_RAMP_UP);
502 			}
503 		}
504 	lpfc_destroy_vport_work_array(phba, vports);
505 	atomic_set(&phba->num_rsrc_err, 0);
506 	atomic_set(&phba->num_cmd_success, 0);
507 }
508 
509 /**
510  * lpfc_scsi_dev_block - set all scsi hosts to block state
511  * @phba: Pointer to HBA context object.
512  *
513  * This function walks vport list and set each SCSI host to block state
514  * by invoking fc_remote_port_delete() routine. This function is invoked
515  * with EEH when device's PCI slot has been permanently disabled.
516  **/
517 void
518 lpfc_scsi_dev_block(struct lpfc_hba *phba)
519 {
520 	struct lpfc_vport **vports;
521 	struct Scsi_Host  *shost;
522 	struct scsi_device *sdev;
523 	struct fc_rport *rport;
524 	int i;
525 
526 	vports = lpfc_create_vport_work_array(phba);
527 	if (vports != NULL)
528 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
529 			shost = lpfc_shost_from_vport(vports[i]);
530 			shost_for_each_device(sdev, shost) {
531 				rport = starget_to_rport(scsi_target(sdev));
532 				fc_remote_port_delete(rport);
533 			}
534 		}
535 	lpfc_destroy_vport_work_array(phba, vports);
536 }
537 
538 /**
539  * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
540  * @vport: The virtual port for which this call being executed.
541  * @num_to_allocate: The requested number of buffers to allocate.
542  *
543  * This routine allocates a scsi buffer for device with SLI-3 interface spec,
544  * the scsi buffer contains all the necessary information needed to initiate
545  * a SCSI I/O. The non-DMAable buffer region contains information to build
546  * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
547  * and the initial BPL. In addition to allocating memory, the FCP CMND and
548  * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
549  *
550  * Return codes:
551  *   int - number of scsi buffers that were allocated.
552  *   0 = failure, less than num_to_alloc is a partial failure.
553  **/
554 static int
555 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
556 {
557 	struct lpfc_hba *phba = vport->phba;
558 	struct lpfc_scsi_buf *psb;
559 	struct ulp_bde64 *bpl;
560 	IOCB_t *iocb;
561 	dma_addr_t pdma_phys_fcp_cmd;
562 	dma_addr_t pdma_phys_fcp_rsp;
563 	dma_addr_t pdma_phys_bpl;
564 	uint16_t iotag;
565 	int bcnt, bpl_size;
566 
567 	bpl_size = phba->cfg_sg_dma_buf_size -
568 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
569 
570 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
571 			 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
572 			 num_to_alloc, phba->cfg_sg_dma_buf_size,
573 			 (int)sizeof(struct fcp_cmnd),
574 			 (int)sizeof(struct fcp_rsp), bpl_size);
575 
576 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
577 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
578 		if (!psb)
579 			break;
580 
581 		/*
582 		 * Get memory from the pci pool to map the virt space to pci
583 		 * bus space for an I/O.  The DMA buffer includes space for the
584 		 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
585 		 * necessary to support the sg_tablesize.
586 		 */
587 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
588 					GFP_KERNEL, &psb->dma_handle);
589 		if (!psb->data) {
590 			kfree(psb);
591 			break;
592 		}
593 
594 		/* Initialize virtual ptrs to dma_buf region. */
595 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
596 
597 		/* Allocate iotag for psb->cur_iocbq. */
598 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
599 		if (iotag == 0) {
600 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
601 					psb->data, psb->dma_handle);
602 			kfree(psb);
603 			break;
604 		}
605 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
606 
607 		psb->fcp_cmnd = psb->data;
608 		psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
609 		psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
610 			sizeof(struct fcp_rsp);
611 
612 		/* Initialize local short-hand pointers. */
613 		bpl = psb->fcp_bpl;
614 		pdma_phys_fcp_cmd = psb->dma_handle;
615 		pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
616 		pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
617 			sizeof(struct fcp_rsp);
618 
619 		/*
620 		 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
621 		 * are sg list bdes.  Initialize the first two and leave the
622 		 * rest for queuecommand.
623 		 */
624 		bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
625 		bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
626 		bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
627 		bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
628 		bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
629 
630 		/* Setup the physical region for the FCP RSP */
631 		bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
632 		bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
633 		bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
634 		bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
635 		bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
636 
637 		/*
638 		 * Since the IOCB for the FCP I/O is built into this
639 		 * lpfc_scsi_buf, initialize it with all known data now.
640 		 */
641 		iocb = &psb->cur_iocbq.iocb;
642 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
643 		if ((phba->sli_rev == 3) &&
644 				!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
645 			/* fill in immediate fcp command BDE */
646 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
647 			iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
648 			iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
649 					unsli3.fcp_ext.icd);
650 			iocb->un.fcpi64.bdl.addrHigh = 0;
651 			iocb->ulpBdeCount = 0;
652 			iocb->ulpLe = 0;
653 			/* fill in response BDE */
654 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
655 							BUFF_TYPE_BDE_64;
656 			iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
657 				sizeof(struct fcp_rsp);
658 			iocb->unsli3.fcp_ext.rbde.addrLow =
659 				putPaddrLow(pdma_phys_fcp_rsp);
660 			iocb->unsli3.fcp_ext.rbde.addrHigh =
661 				putPaddrHigh(pdma_phys_fcp_rsp);
662 		} else {
663 			iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
664 			iocb->un.fcpi64.bdl.bdeSize =
665 					(2 * sizeof(struct ulp_bde64));
666 			iocb->un.fcpi64.bdl.addrLow =
667 					putPaddrLow(pdma_phys_bpl);
668 			iocb->un.fcpi64.bdl.addrHigh =
669 					putPaddrHigh(pdma_phys_bpl);
670 			iocb->ulpBdeCount = 1;
671 			iocb->ulpLe = 1;
672 		}
673 		iocb->ulpClass = CLASS3;
674 		psb->status = IOSTAT_SUCCESS;
675 		/* Put it back into the SCSI buffer list */
676 		psb->cur_iocbq.context1  = psb;
677 		lpfc_release_scsi_buf_s3(phba, psb);
678 
679 	}
680 
681 	return bcnt;
682 }
683 
684 /**
685  * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
686  * @vport: pointer to lpfc vport data structure.
687  *
688  * This routine is invoked by the vport cleanup for deletions and the cleanup
689  * for an ndlp on removal.
690  **/
691 void
692 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
693 {
694 	struct lpfc_hba *phba = vport->phba;
695 	struct lpfc_scsi_buf *psb, *next_psb;
696 	unsigned long iflag = 0;
697 
698 	spin_lock_irqsave(&phba->hbalock, iflag);
699 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
700 	list_for_each_entry_safe(psb, next_psb,
701 				&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
702 		if (psb->rdata && psb->rdata->pnode
703 			&& psb->rdata->pnode->vport == vport)
704 			psb->rdata = NULL;
705 	}
706 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
707 	spin_unlock_irqrestore(&phba->hbalock, iflag);
708 }
709 
710 /**
711  * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
712  * @phba: pointer to lpfc hba data structure.
713  * @axri: pointer to the fcp xri abort wcqe structure.
714  *
715  * This routine is invoked by the worker thread to process a SLI4 fast-path
716  * FCP aborted xri.
717  **/
718 void
719 lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
720 			  struct sli4_wcqe_xri_aborted *axri)
721 {
722 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
723 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
724 	struct lpfc_scsi_buf *psb, *next_psb;
725 	unsigned long iflag = 0;
726 	struct lpfc_iocbq *iocbq;
727 	int i;
728 	struct lpfc_nodelist *ndlp;
729 	int rrq_empty = 0;
730 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
731 
732 	spin_lock_irqsave(&phba->hbalock, iflag);
733 	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
734 	list_for_each_entry_safe(psb, next_psb,
735 		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
736 		if (psb->cur_iocbq.sli4_xritag == xri) {
737 			list_del(&psb->list);
738 			psb->exch_busy = 0;
739 			psb->status = IOSTAT_SUCCESS;
740 			spin_unlock(
741 				&phba->sli4_hba.abts_scsi_buf_list_lock);
742 			if (psb->rdata && psb->rdata->pnode)
743 				ndlp = psb->rdata->pnode;
744 			else
745 				ndlp = NULL;
746 
747 			rrq_empty = list_empty(&phba->active_rrq_list);
748 			spin_unlock_irqrestore(&phba->hbalock, iflag);
749 			if (ndlp) {
750 				lpfc_set_rrq_active(phba, ndlp,
751 					psb->cur_iocbq.sli4_lxritag, rxid, 1);
752 				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
753 			}
754 			lpfc_release_scsi_buf_s4(phba, psb);
755 			if (rrq_empty)
756 				lpfc_worker_wake_up(phba);
757 			return;
758 		}
759 	}
760 	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
761 	for (i = 1; i <= phba->sli.last_iotag; i++) {
762 		iocbq = phba->sli.iocbq_lookup[i];
763 
764 		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
765 			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
766 			continue;
767 		if (iocbq->sli4_xritag != xri)
768 			continue;
769 		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
770 		psb->exch_busy = 0;
771 		spin_unlock_irqrestore(&phba->hbalock, iflag);
772 		if (!list_empty(&pring->txq))
773 			lpfc_worker_wake_up(phba);
774 		return;
775 
776 	}
777 	spin_unlock_irqrestore(&phba->hbalock, iflag);
778 }
779 
780 /**
781  * lpfc_sli4_post_scsi_sgl_list - Psot blocks of scsi buffer sgls from a list
782  * @phba: pointer to lpfc hba data structure.
783  * @post_sblist: pointer to the scsi buffer list.
784  *
785  * This routine walks a list of scsi buffers that was passed in. It attempts
786  * to construct blocks of scsi buffer sgls which contains contiguous xris and
787  * uses the non-embedded SGL block post mailbox commands to post to the port.
788  * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
789  * embedded SGL post mailbox command for posting. The @post_sblist passed in
790  * must be local list, thus no lock is needed when manipulate the list.
791  *
792  * Returns: 0 = failure, non-zero number of successfully posted buffers.
793  **/
794 int
795 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
796 			     struct list_head *post_sblist, int sb_count)
797 {
798 	struct lpfc_scsi_buf *psb, *psb_next;
799 	int status, sgl_size;
800 	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
801 	dma_addr_t pdma_phys_bpl1;
802 	int last_xritag = NO_XRI;
803 	LIST_HEAD(prep_sblist);
804 	LIST_HEAD(blck_sblist);
805 	LIST_HEAD(scsi_sblist);
806 
807 	/* sanity check */
808 	if (sb_count <= 0)
809 		return -EINVAL;
810 
811 	sgl_size = phba->cfg_sg_dma_buf_size -
812 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
813 
814 	list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
815 		list_del_init(&psb->list);
816 		block_cnt++;
817 		if ((last_xritag != NO_XRI) &&
818 		    (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
819 			/* a hole in xri block, form a sgl posting block */
820 			list_splice_init(&prep_sblist, &blck_sblist);
821 			post_cnt = block_cnt - 1;
822 			/* prepare list for next posting block */
823 			list_add_tail(&psb->list, &prep_sblist);
824 			block_cnt = 1;
825 		} else {
826 			/* prepare list for next posting block */
827 			list_add_tail(&psb->list, &prep_sblist);
828 			/* enough sgls for non-embed sgl mbox command */
829 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
830 				list_splice_init(&prep_sblist, &blck_sblist);
831 				post_cnt = block_cnt;
832 				block_cnt = 0;
833 			}
834 		}
835 		num_posting++;
836 		last_xritag = psb->cur_iocbq.sli4_xritag;
837 
838 		/* end of repost sgl list condition for SCSI buffers */
839 		if (num_posting == sb_count) {
840 			if (post_cnt == 0) {
841 				/* last sgl posting block */
842 				list_splice_init(&prep_sblist, &blck_sblist);
843 				post_cnt = block_cnt;
844 			} else if (block_cnt == 1) {
845 				/* last single sgl with non-contiguous xri */
846 				if (sgl_size > SGL_PAGE_SIZE)
847 					pdma_phys_bpl1 = psb->dma_phys_bpl +
848 								SGL_PAGE_SIZE;
849 				else
850 					pdma_phys_bpl1 = 0;
851 				status = lpfc_sli4_post_sgl(phba,
852 						psb->dma_phys_bpl,
853 						pdma_phys_bpl1,
854 						psb->cur_iocbq.sli4_xritag);
855 				if (status) {
856 					/* failure, put on abort scsi list */
857 					psb->exch_busy = 1;
858 				} else {
859 					/* success, put on SCSI buffer list */
860 					psb->exch_busy = 0;
861 					psb->status = IOSTAT_SUCCESS;
862 					num_posted++;
863 				}
864 				/* success, put on SCSI buffer sgl list */
865 				list_add_tail(&psb->list, &scsi_sblist);
866 			}
867 		}
868 
869 		/* continue until a nembed page worth of sgls */
870 		if (post_cnt == 0)
871 			continue;
872 
873 		/* post block of SCSI buffer list sgls */
874 		status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
875 						       post_cnt);
876 
877 		/* don't reset xirtag due to hole in xri block */
878 		if (block_cnt == 0)
879 			last_xritag = NO_XRI;
880 
881 		/* reset SCSI buffer post count for next round of posting */
882 		post_cnt = 0;
883 
884 		/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
885 		while (!list_empty(&blck_sblist)) {
886 			list_remove_head(&blck_sblist, psb,
887 					 struct lpfc_scsi_buf, list);
888 			if (status) {
889 				/* failure, put on abort scsi list */
890 				psb->exch_busy = 1;
891 			} else {
892 				/* success, put on SCSI buffer list */
893 				psb->exch_busy = 0;
894 				psb->status = IOSTAT_SUCCESS;
895 				num_posted++;
896 			}
897 			list_add_tail(&psb->list, &scsi_sblist);
898 		}
899 	}
900 	/* Push SCSI buffers with sgl posted to the availble list */
901 	while (!list_empty(&scsi_sblist)) {
902 		list_remove_head(&scsi_sblist, psb,
903 				 struct lpfc_scsi_buf, list);
904 		lpfc_release_scsi_buf_s4(phba, psb);
905 	}
906 	return num_posted;
907 }
908 
909 /**
910  * lpfc_sli4_repost_scsi_sgl_list - Repsot all the allocated scsi buffer sgls
911  * @phba: pointer to lpfc hba data structure.
912  *
913  * This routine walks the list of scsi buffers that have been allocated and
914  * repost them to the port by using SGL block post. This is needed after a
915  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
916  * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
917  * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
918  *
919  * Returns: 0 = success, non-zero failure.
920  **/
921 int
922 lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
923 {
924 	LIST_HEAD(post_sblist);
925 	int num_posted, rc = 0;
926 
927 	/* get all SCSI buffers need to repost to a local list */
928 	spin_lock_irq(&phba->scsi_buf_list_get_lock);
929 	spin_lock_irq(&phba->scsi_buf_list_put_lock);
930 	list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
931 	list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
932 	spin_unlock_irq(&phba->scsi_buf_list_put_lock);
933 	spin_unlock_irq(&phba->scsi_buf_list_get_lock);
934 
935 	/* post the list of scsi buffer sgls to port if available */
936 	if (!list_empty(&post_sblist)) {
937 		num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
938 						phba->sli4_hba.scsi_xri_cnt);
939 		/* failed to post any scsi buffer, return error */
940 		if (num_posted == 0)
941 			rc = -EIO;
942 	}
943 	return rc;
944 }
945 
946 /**
947  * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
948  * @vport: The virtual port for which this call being executed.
949  * @num_to_allocate: The requested number of buffers to allocate.
950  *
951  * This routine allocates scsi buffers for device with SLI-4 interface spec,
952  * the scsi buffer contains all the necessary information needed to initiate
953  * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
954  * them on a list, it post them to the port by using SGL block post.
955  *
956  * Return codes:
957  *   int - number of scsi buffers that were allocated and posted.
958  *   0 = failure, less than num_to_alloc is a partial failure.
959  **/
960 static int
961 lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
962 {
963 	struct lpfc_hba *phba = vport->phba;
964 	struct lpfc_scsi_buf *psb;
965 	struct sli4_sge *sgl;
966 	IOCB_t *iocb;
967 	dma_addr_t pdma_phys_fcp_cmd;
968 	dma_addr_t pdma_phys_fcp_rsp;
969 	dma_addr_t pdma_phys_bpl;
970 	uint16_t iotag, lxri = 0;
971 	int bcnt, num_posted, sgl_size;
972 	LIST_HEAD(prep_sblist);
973 	LIST_HEAD(post_sblist);
974 	LIST_HEAD(scsi_sblist);
975 
976 	sgl_size = phba->cfg_sg_dma_buf_size -
977 		(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
978 
979 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
980 			 "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
981 			 num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
982 			 (int)sizeof(struct fcp_cmnd),
983 			 (int)sizeof(struct fcp_rsp));
984 
985 	for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
986 		psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
987 		if (!psb)
988 			break;
989 		/*
990 		 * Get memory from the pci pool to map the virt space to
991 		 * pci bus space for an I/O. The DMA buffer includes space
992 		 * for the struct fcp_cmnd, struct fcp_rsp and the number
993 		 * of bde's necessary to support the sg_tablesize.
994 		 */
995 		psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
996 						GFP_KERNEL, &psb->dma_handle);
997 		if (!psb->data) {
998 			kfree(psb);
999 			break;
1000 		}
1001 		memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
1002 
1003 		/* Page alignment is CRITICAL, double check to be sure */
1004 		if (((unsigned long)(psb->data) &
1005 		    (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) {
1006 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
1007 				      psb->data, psb->dma_handle);
1008 			kfree(psb);
1009 			break;
1010 		}
1011 
1012 		/* Allocate iotag for psb->cur_iocbq. */
1013 		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
1014 		if (iotag == 0) {
1015 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
1016 				psb->data, psb->dma_handle);
1017 			kfree(psb);
1018 			break;
1019 		}
1020 
1021 		lxri = lpfc_sli4_next_xritag(phba);
1022 		if (lxri == NO_XRI) {
1023 			pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
1024 			      psb->data, psb->dma_handle);
1025 			kfree(psb);
1026 			break;
1027 		}
1028 		psb->cur_iocbq.sli4_lxritag = lxri;
1029 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
1030 		psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
1031 		psb->fcp_bpl = psb->data;
1032 		psb->fcp_cmnd = (psb->data + sgl_size);
1033 		psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
1034 					sizeof(struct fcp_cmnd));
1035 
1036 		/* Initialize local short-hand pointers. */
1037 		sgl = (struct sli4_sge *)psb->fcp_bpl;
1038 		pdma_phys_bpl = psb->dma_handle;
1039 		pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
1040 		pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
1041 
1042 		/*
1043 		 * The first two bdes are the FCP_CMD and FCP_RSP.
1044 		 * The balance are sg list bdes. Initialize the
1045 		 * first two and leave the rest for queuecommand.
1046 		 */
1047 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
1048 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
1049 		sgl->word2 = le32_to_cpu(sgl->word2);
1050 		bf_set(lpfc_sli4_sge_last, sgl, 0);
1051 		sgl->word2 = cpu_to_le32(sgl->word2);
1052 		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
1053 		sgl++;
1054 
1055 		/* Setup the physical region for the FCP RSP */
1056 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
1057 		sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
1058 		sgl->word2 = le32_to_cpu(sgl->word2);
1059 		bf_set(lpfc_sli4_sge_last, sgl, 1);
1060 		sgl->word2 = cpu_to_le32(sgl->word2);
1061 		sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
1062 
1063 		/*
1064 		 * Since the IOCB for the FCP I/O is built into this
1065 		 * lpfc_scsi_buf, initialize it with all known data now.
1066 		 */
1067 		iocb = &psb->cur_iocbq.iocb;
1068 		iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
1069 		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
1070 		/* setting the BLP size to 2 * sizeof BDE may not be correct.
1071 		 * We are setting the bpl to point to out sgl. An sgl's
1072 		 * entries are 16 bytes, a bpl entries are 12 bytes.
1073 		 */
1074 		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
1075 		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
1076 		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
1077 		iocb->ulpBdeCount = 1;
1078 		iocb->ulpLe = 1;
1079 		iocb->ulpClass = CLASS3;
1080 		psb->cur_iocbq.context1 = psb;
1081 		psb->dma_phys_bpl = pdma_phys_bpl;
1082 
1083 		/* add the scsi buffer to a post list */
1084 		list_add_tail(&psb->list, &post_sblist);
1085 		spin_lock_irq(&phba->scsi_buf_list_get_lock);
1086 		phba->sli4_hba.scsi_xri_cnt++;
1087 		spin_unlock_irq(&phba->scsi_buf_list_get_lock);
1088 	}
1089 	lpfc_printf_log(phba, KERN_INFO, LOG_BG,
1090 			"3021 Allocate %d out of %d requested new SCSI "
1091 			"buffers\n", bcnt, num_to_alloc);
1092 
1093 	/* post the list of scsi buffer sgls to port if available */
1094 	if (!list_empty(&post_sblist))
1095 		num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
1096 							  &post_sblist, bcnt);
1097 	else
1098 		num_posted = 0;
1099 
1100 	return num_posted;
1101 }
1102 
1103 /**
1104  * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
1105  * @vport: The virtual port for which this call being executed.
1106  * @num_to_allocate: The requested number of buffers to allocate.
1107  *
1108  * This routine wraps the actual SCSI buffer allocator function pointer from
1109  * the lpfc_hba struct.
1110  *
1111  * Return codes:
1112  *   int - number of scsi buffers that were allocated.
1113  *   0 = failure, less than num_to_alloc is a partial failure.
1114  **/
1115 static inline int
1116 lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
1117 {
1118 	return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
1119 }
1120 
1121 /**
1122  * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1123  * @phba: The HBA for which this call is being executed.
1124  *
1125  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1126  * and returns to caller.
1127  *
1128  * Return codes:
1129  *   NULL - Error
1130  *   Pointer to lpfc_scsi_buf - Success
1131  **/
1132 static struct lpfc_scsi_buf*
1133 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1134 {
1135 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
1136 	struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
1137 	unsigned long gflag = 0;
1138 	unsigned long pflag = 0;
1139 
1140 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1141 	list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf,
1142 			 list);
1143 	if (!lpfc_cmd) {
1144 		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1145 		list_splice(&phba->lpfc_scsi_buf_list_put,
1146 			    &phba->lpfc_scsi_buf_list_get);
1147 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1148 		list_remove_head(scsi_buf_list_get, lpfc_cmd,
1149 				 struct lpfc_scsi_buf, list);
1150 		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1151 	}
1152 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1153 	return  lpfc_cmd;
1154 }
1155 /**
1156  * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1157  * @phba: The HBA for which this call is being executed.
1158  *
1159  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1160  * and returns to caller.
1161  *
1162  * Return codes:
1163  *   NULL - Error
1164  *   Pointer to lpfc_scsi_buf - Success
1165  **/
1166 static struct lpfc_scsi_buf*
1167 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1168 {
1169 	struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
1170 	unsigned long gflag = 0;
1171 	unsigned long pflag = 0;
1172 	int found = 0;
1173 
1174 	spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag);
1175 	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1176 				 &phba->lpfc_scsi_buf_list_get, list) {
1177 		if (lpfc_test_rrq_active(phba, ndlp,
1178 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
1179 			continue;
1180 		list_del(&lpfc_cmd->list);
1181 		found = 1;
1182 		break;
1183 	}
1184 	if (!found) {
1185 		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag);
1186 		list_splice(&phba->lpfc_scsi_buf_list_put,
1187 			    &phba->lpfc_scsi_buf_list_get);
1188 		INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
1189 		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag);
1190 		list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
1191 					 &phba->lpfc_scsi_buf_list_get, list) {
1192 			if (lpfc_test_rrq_active(
1193 				phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
1194 				continue;
1195 			list_del(&lpfc_cmd->list);
1196 			found = 1;
1197 			break;
1198 		}
1199 	}
1200 	spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag);
1201 	if (!found)
1202 		return NULL;
1203 	return  lpfc_cmd;
1204 }
1205 /**
1206  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
1207  * @phba: The HBA for which this call is being executed.
1208  *
1209  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
1210  * and returns to caller.
1211  *
1212  * Return codes:
1213  *   NULL - Error
1214  *   Pointer to lpfc_scsi_buf - Success
1215  **/
1216 static struct lpfc_scsi_buf*
1217 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
1218 {
1219 	return  phba->lpfc_get_scsi_buf(phba, ndlp);
1220 }
1221 
1222 /**
1223  * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
1224  * @phba: The Hba for which this call is being executed.
1225  * @psb: The scsi buffer which is being released.
1226  *
1227  * This routine releases @psb scsi buffer by adding it to tail of @phba
1228  * lpfc_scsi_buf_list list.
1229  **/
1230 static void
1231 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1232 {
1233 	unsigned long iflag = 0;
1234 
1235 	psb->seg_cnt = 0;
1236 	psb->nonsg_phys = 0;
1237 	psb->prot_seg_cnt = 0;
1238 
1239 	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1240 	psb->pCmd = NULL;
1241 	psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1242 	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1243 	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1244 }
1245 
1246 /**
1247  * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
1248  * @phba: The Hba for which this call is being executed.
1249  * @psb: The scsi buffer which is being released.
1250  *
1251  * This routine releases @psb scsi buffer by adding it to tail of @phba
1252  * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
1253  * and cannot be reused for at least RA_TOV amount of time if it was
1254  * aborted.
1255  **/
1256 static void
1257 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1258 {
1259 	unsigned long iflag = 0;
1260 
1261 	psb->seg_cnt = 0;
1262 	psb->nonsg_phys = 0;
1263 	psb->prot_seg_cnt = 0;
1264 
1265 	if (psb->exch_busy) {
1266 		spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
1267 					iflag);
1268 		psb->pCmd = NULL;
1269 		list_add_tail(&psb->list,
1270 			&phba->sli4_hba.lpfc_abts_scsi_buf_list);
1271 		spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
1272 					iflag);
1273 	} else {
1274 		psb->pCmd = NULL;
1275 		psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
1276 		spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
1277 		list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
1278 		spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
1279 	}
1280 }
1281 
1282 /**
1283  * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
1284  * @phba: The Hba for which this call is being executed.
1285  * @psb: The scsi buffer which is being released.
1286  *
1287  * This routine releases @psb scsi buffer by adding it to tail of @phba
1288  * lpfc_scsi_buf_list list.
1289  **/
1290 static void
1291 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
1292 {
1293 
1294 	phba->lpfc_release_scsi_buf(phba, psb);
1295 }
1296 
1297 /**
1298  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
1299  * @phba: The Hba for which this call is being executed.
1300  * @lpfc_cmd: The scsi buffer which is going to be mapped.
1301  *
1302  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
1303  * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
1304  * through sg elements and format the bdea. This routine also initializes all
1305  * IOCB fields which are dependent on scsi command request buffer.
1306  *
1307  * Return codes:
1308  *   1 - Error
1309  *   0 - Success
1310  **/
1311 static int
1312 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1313 {
1314 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1315 	struct scatterlist *sgel = NULL;
1316 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1317 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1318 	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
1319 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1320 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
1321 	dma_addr_t physaddr;
1322 	uint32_t num_bde = 0;
1323 	int nseg, datadir = scsi_cmnd->sc_data_direction;
1324 
1325 	/*
1326 	 * There are three possibilities here - use scatter-gather segment, use
1327 	 * the single mapping, or neither.  Start the lpfc command prep by
1328 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1329 	 * data bde entry.
1330 	 */
1331 	bpl += 2;
1332 	if (scsi_sg_count(scsi_cmnd)) {
1333 		/*
1334 		 * The driver stores the segment count returned from pci_map_sg
1335 		 * because this a count of dma-mappings used to map the use_sg
1336 		 * pages.  They are not guaranteed to be the same for those
1337 		 * architectures that implement an IOMMU.
1338 		 */
1339 
1340 		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
1341 				  scsi_sg_count(scsi_cmnd), datadir);
1342 		if (unlikely(!nseg))
1343 			return 1;
1344 
1345 		lpfc_cmd->seg_cnt = nseg;
1346 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1347 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1348 				"9064 BLKGRD: %s: Too many sg segments from "
1349 			       "dma_map_sg.  Config %d, seg_cnt %d\n",
1350 			       __func__, phba->cfg_sg_seg_cnt,
1351 			       lpfc_cmd->seg_cnt);
1352 			lpfc_cmd->seg_cnt = 0;
1353 			scsi_dma_unmap(scsi_cmnd);
1354 			return 1;
1355 		}
1356 
1357 		/*
1358 		 * The driver established a maximum scatter-gather segment count
1359 		 * during probe that limits the number of sg elements in any
1360 		 * single scsi command.  Just run through the seg_cnt and format
1361 		 * the bde's.
1362 		 * When using SLI-3 the driver will try to fit all the BDEs into
1363 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
1364 		 * does for SLI-2 mode.
1365 		 */
1366 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
1367 			physaddr = sg_dma_address(sgel);
1368 			if (phba->sli_rev == 3 &&
1369 			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1370 			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
1371 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
1372 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1373 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
1374 				data_bde->addrLow = putPaddrLow(physaddr);
1375 				data_bde->addrHigh = putPaddrHigh(physaddr);
1376 				data_bde++;
1377 			} else {
1378 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1379 				bpl->tus.f.bdeSize = sg_dma_len(sgel);
1380 				bpl->tus.w = le32_to_cpu(bpl->tus.w);
1381 				bpl->addrLow =
1382 					le32_to_cpu(putPaddrLow(physaddr));
1383 				bpl->addrHigh =
1384 					le32_to_cpu(putPaddrHigh(physaddr));
1385 				bpl++;
1386 			}
1387 		}
1388 	}
1389 
1390 	/*
1391 	 * Finish initializing those IOCB fields that are dependent on the
1392 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
1393 	 * explicitly reinitialized and for SLI-3 the extended bde count is
1394 	 * explicitly reinitialized since all iocb memory resources are reused.
1395 	 */
1396 	if (phba->sli_rev == 3 &&
1397 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
1398 	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
1399 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
1400 			/*
1401 			 * The extended IOCB format can only fit 3 BDE or a BPL.
1402 			 * This I/O has more than 3 BDE so the 1st data bde will
1403 			 * be a BPL that is filled in here.
1404 			 */
1405 			physaddr = lpfc_cmd->dma_handle;
1406 			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
1407 			data_bde->tus.f.bdeSize = (num_bde *
1408 						   sizeof(struct ulp_bde64));
1409 			physaddr += (sizeof(struct fcp_cmnd) +
1410 				     sizeof(struct fcp_rsp) +
1411 				     (2 * sizeof(struct ulp_bde64)));
1412 			data_bde->addrHigh = putPaddrHigh(physaddr);
1413 			data_bde->addrLow = putPaddrLow(physaddr);
1414 			/* ebde count includes the response bde and data bpl */
1415 			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
1416 		} else {
1417 			/* ebde count includes the response bde and data bdes */
1418 			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1419 		}
1420 	} else {
1421 		iocb_cmd->un.fcpi64.bdl.bdeSize =
1422 			((num_bde + 2) * sizeof(struct ulp_bde64));
1423 		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
1424 	}
1425 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
1426 
1427 	/*
1428 	 * Due to difference in data length between DIF/non-DIF paths,
1429 	 * we need to set word 4 of IOCB here
1430 	 */
1431 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1432 	return 0;
1433 }
1434 
1435 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1436 
1437 /* Return if if error injection is detected by Initiator */
1438 #define BG_ERR_INIT	0x1
1439 /* Return if if error injection is detected by Target */
1440 #define BG_ERR_TGT	0x2
1441 /* Return if if swapping CSUM<-->CRC is required for error injection */
1442 #define BG_ERR_SWAP	0x10
1443 /* Return if disabling Guard/Ref/App checking is required for error injection */
1444 #define BG_ERR_CHECK	0x20
1445 
1446 /**
1447  * lpfc_bg_err_inject - Determine if we should inject an error
1448  * @phba: The Hba for which this call is being executed.
1449  * @sc: The SCSI command to examine
1450  * @reftag: (out) BlockGuard reference tag for transmitted data
1451  * @apptag: (out) BlockGuard application tag for transmitted data
1452  * @new_guard (in) Value to replace CRC with if needed
1453  *
1454  * Returns BG_ERR_* bit mask or 0 if request ignored
1455  **/
1456 static int
1457 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1458 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1459 {
1460 	struct scatterlist *sgpe; /* s/g prot entry */
1461 	struct scatterlist *sgde; /* s/g data entry */
1462 	struct lpfc_scsi_buf *lpfc_cmd = NULL;
1463 	struct scsi_dif_tuple *src = NULL;
1464 	struct lpfc_nodelist *ndlp;
1465 	struct lpfc_rport_data *rdata;
1466 	uint32_t op = scsi_get_prot_op(sc);
1467 	uint32_t blksize;
1468 	uint32_t numblks;
1469 	sector_t lba;
1470 	int rc = 0;
1471 	int blockoff = 0;
1472 
1473 	if (op == SCSI_PROT_NORMAL)
1474 		return 0;
1475 
1476 	sgpe = scsi_prot_sglist(sc);
1477 	sgde = scsi_sglist(sc);
1478 	lba = scsi_get_lba(sc);
1479 
1480 	/* First check if we need to match the LBA */
1481 	if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1482 		blksize = lpfc_cmd_blksize(sc);
1483 		numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1484 
1485 		/* Make sure we have the right LBA if one is specified */
1486 		if ((phba->lpfc_injerr_lba < lba) ||
1487 			(phba->lpfc_injerr_lba >= (lba + numblks)))
1488 			return 0;
1489 		if (sgpe) {
1490 			blockoff = phba->lpfc_injerr_lba - lba;
1491 			numblks = sg_dma_len(sgpe) /
1492 				sizeof(struct scsi_dif_tuple);
1493 			if (numblks < blockoff)
1494 				blockoff = numblks;
1495 		}
1496 	}
1497 
1498 	/* Next check if we need to match the remote NPortID or WWPN */
1499 	rdata = sc->device->hostdata;
1500 	if (rdata && rdata->pnode) {
1501 		ndlp = rdata->pnode;
1502 
1503 		/* Make sure we have the right NPortID if one is specified */
1504 		if (phba->lpfc_injerr_nportid  &&
1505 			(phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1506 			return 0;
1507 
1508 		/*
1509 		 * Make sure we have the right WWPN if one is specified.
1510 		 * wwn[0] should be a non-zero NAA in a good WWPN.
1511 		 */
1512 		if (phba->lpfc_injerr_wwpn.u.wwn[0]  &&
1513 			(memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1514 				sizeof(struct lpfc_name)) != 0))
1515 			return 0;
1516 	}
1517 
1518 	/* Setup a ptr to the protection data if the SCSI host provides it */
1519 	if (sgpe) {
1520 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1521 		src += blockoff;
1522 		lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble;
1523 	}
1524 
1525 	/* Should we change the Reference Tag */
1526 	if (reftag) {
1527 		if (phba->lpfc_injerr_wref_cnt) {
1528 			switch (op) {
1529 			case SCSI_PROT_WRITE_PASS:
1530 				if (src) {
1531 					/*
1532 					 * For WRITE_PASS, force the error
1533 					 * to be sent on the wire. It should
1534 					 * be detected by the Target.
1535 					 * If blockoff != 0 error will be
1536 					 * inserted in middle of the IO.
1537 					 */
1538 
1539 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1540 					"9076 BLKGRD: Injecting reftag error: "
1541 					"write lba x%lx + x%x oldrefTag x%x\n",
1542 					(unsigned long)lba, blockoff,
1543 					be32_to_cpu(src->ref_tag));
1544 
1545 					/*
1546 					 * Save the old ref_tag so we can
1547 					 * restore it on completion.
1548 					 */
1549 					if (lpfc_cmd) {
1550 						lpfc_cmd->prot_data_type =
1551 							LPFC_INJERR_REFTAG;
1552 						lpfc_cmd->prot_data_segment =
1553 							src;
1554 						lpfc_cmd->prot_data =
1555 							src->ref_tag;
1556 					}
1557 					src->ref_tag = cpu_to_be32(0xDEADBEEF);
1558 					phba->lpfc_injerr_wref_cnt--;
1559 					if (phba->lpfc_injerr_wref_cnt == 0) {
1560 						phba->lpfc_injerr_nportid = 0;
1561 						phba->lpfc_injerr_lba =
1562 							LPFC_INJERR_LBA_OFF;
1563 						memset(&phba->lpfc_injerr_wwpn,
1564 						  0, sizeof(struct lpfc_name));
1565 					}
1566 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1567 
1568 					break;
1569 				}
1570 				/* Drop thru */
1571 			case SCSI_PROT_WRITE_INSERT:
1572 				/*
1573 				 * For WRITE_INSERT, force the error
1574 				 * to be sent on the wire. It should be
1575 				 * detected by the Target.
1576 				 */
1577 				/* DEADBEEF will be the reftag on the wire */
1578 				*reftag = 0xDEADBEEF;
1579 				phba->lpfc_injerr_wref_cnt--;
1580 				if (phba->lpfc_injerr_wref_cnt == 0) {
1581 					phba->lpfc_injerr_nportid = 0;
1582 					phba->lpfc_injerr_lba =
1583 					LPFC_INJERR_LBA_OFF;
1584 					memset(&phba->lpfc_injerr_wwpn,
1585 						0, sizeof(struct lpfc_name));
1586 				}
1587 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1588 
1589 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1590 					"9078 BLKGRD: Injecting reftag error: "
1591 					"write lba x%lx\n", (unsigned long)lba);
1592 				break;
1593 			case SCSI_PROT_WRITE_STRIP:
1594 				/*
1595 				 * For WRITE_STRIP and WRITE_PASS,
1596 				 * force the error on data
1597 				 * being copied from SLI-Host to SLI-Port.
1598 				 */
1599 				*reftag = 0xDEADBEEF;
1600 				phba->lpfc_injerr_wref_cnt--;
1601 				if (phba->lpfc_injerr_wref_cnt == 0) {
1602 					phba->lpfc_injerr_nportid = 0;
1603 					phba->lpfc_injerr_lba =
1604 						LPFC_INJERR_LBA_OFF;
1605 					memset(&phba->lpfc_injerr_wwpn,
1606 						0, sizeof(struct lpfc_name));
1607 				}
1608 				rc = BG_ERR_INIT;
1609 
1610 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1611 					"9077 BLKGRD: Injecting reftag error: "
1612 					"write lba x%lx\n", (unsigned long)lba);
1613 				break;
1614 			}
1615 		}
1616 		if (phba->lpfc_injerr_rref_cnt) {
1617 			switch (op) {
1618 			case SCSI_PROT_READ_INSERT:
1619 			case SCSI_PROT_READ_STRIP:
1620 			case SCSI_PROT_READ_PASS:
1621 				/*
1622 				 * For READ_STRIP and READ_PASS, force the
1623 				 * error on data being read off the wire. It
1624 				 * should force an IO error to the driver.
1625 				 */
1626 				*reftag = 0xDEADBEEF;
1627 				phba->lpfc_injerr_rref_cnt--;
1628 				if (phba->lpfc_injerr_rref_cnt == 0) {
1629 					phba->lpfc_injerr_nportid = 0;
1630 					phba->lpfc_injerr_lba =
1631 						LPFC_INJERR_LBA_OFF;
1632 					memset(&phba->lpfc_injerr_wwpn,
1633 						0, sizeof(struct lpfc_name));
1634 				}
1635 				rc = BG_ERR_INIT;
1636 
1637 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1638 					"9079 BLKGRD: Injecting reftag error: "
1639 					"read lba x%lx\n", (unsigned long)lba);
1640 				break;
1641 			}
1642 		}
1643 	}
1644 
1645 	/* Should we change the Application Tag */
1646 	if (apptag) {
1647 		if (phba->lpfc_injerr_wapp_cnt) {
1648 			switch (op) {
1649 			case SCSI_PROT_WRITE_PASS:
1650 				if (src) {
1651 					/*
1652 					 * For WRITE_PASS, force the error
1653 					 * to be sent on the wire. It should
1654 					 * be detected by the Target.
1655 					 * If blockoff != 0 error will be
1656 					 * inserted in middle of the IO.
1657 					 */
1658 
1659 					lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1660 					"9080 BLKGRD: Injecting apptag error: "
1661 					"write lba x%lx + x%x oldappTag x%x\n",
1662 					(unsigned long)lba, blockoff,
1663 					be16_to_cpu(src->app_tag));
1664 
1665 					/*
1666 					 * Save the old app_tag so we can
1667 					 * restore it on completion.
1668 					 */
1669 					if (lpfc_cmd) {
1670 						lpfc_cmd->prot_data_type =
1671 							LPFC_INJERR_APPTAG;
1672 						lpfc_cmd->prot_data_segment =
1673 							src;
1674 						lpfc_cmd->prot_data =
1675 							src->app_tag;
1676 					}
1677 					src->app_tag = cpu_to_be16(0xDEAD);
1678 					phba->lpfc_injerr_wapp_cnt--;
1679 					if (phba->lpfc_injerr_wapp_cnt == 0) {
1680 						phba->lpfc_injerr_nportid = 0;
1681 						phba->lpfc_injerr_lba =
1682 							LPFC_INJERR_LBA_OFF;
1683 						memset(&phba->lpfc_injerr_wwpn,
1684 						  0, sizeof(struct lpfc_name));
1685 					}
1686 					rc = BG_ERR_TGT | BG_ERR_CHECK;
1687 					break;
1688 				}
1689 				/* Drop thru */
1690 			case SCSI_PROT_WRITE_INSERT:
1691 				/*
1692 				 * For WRITE_INSERT, force the
1693 				 * error to be sent on the wire. It should be
1694 				 * detected by the Target.
1695 				 */
1696 				/* DEAD will be the apptag on the wire */
1697 				*apptag = 0xDEAD;
1698 				phba->lpfc_injerr_wapp_cnt--;
1699 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1700 					phba->lpfc_injerr_nportid = 0;
1701 					phba->lpfc_injerr_lba =
1702 						LPFC_INJERR_LBA_OFF;
1703 					memset(&phba->lpfc_injerr_wwpn,
1704 						0, sizeof(struct lpfc_name));
1705 				}
1706 				rc = BG_ERR_TGT | BG_ERR_CHECK;
1707 
1708 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1709 					"0813 BLKGRD: Injecting apptag error: "
1710 					"write lba x%lx\n", (unsigned long)lba);
1711 				break;
1712 			case SCSI_PROT_WRITE_STRIP:
1713 				/*
1714 				 * For WRITE_STRIP and WRITE_PASS,
1715 				 * force the error on data
1716 				 * being copied from SLI-Host to SLI-Port.
1717 				 */
1718 				*apptag = 0xDEAD;
1719 				phba->lpfc_injerr_wapp_cnt--;
1720 				if (phba->lpfc_injerr_wapp_cnt == 0) {
1721 					phba->lpfc_injerr_nportid = 0;
1722 					phba->lpfc_injerr_lba =
1723 						LPFC_INJERR_LBA_OFF;
1724 					memset(&phba->lpfc_injerr_wwpn,
1725 						0, sizeof(struct lpfc_name));
1726 				}
1727 				rc = BG_ERR_INIT;
1728 
1729 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1730 					"0812 BLKGRD: Injecting apptag error: "
1731 					"write lba x%lx\n", (unsigned long)lba);
1732 				break;
1733 			}
1734 		}
1735 		if (phba->lpfc_injerr_rapp_cnt) {
1736 			switch (op) {
1737 			case SCSI_PROT_READ_INSERT:
1738 			case SCSI_PROT_READ_STRIP:
1739 			case SCSI_PROT_READ_PASS:
1740 				/*
1741 				 * For READ_STRIP and READ_PASS, force the
1742 				 * error on data being read off the wire. It
1743 				 * should force an IO error to the driver.
1744 				 */
1745 				*apptag = 0xDEAD;
1746 				phba->lpfc_injerr_rapp_cnt--;
1747 				if (phba->lpfc_injerr_rapp_cnt == 0) {
1748 					phba->lpfc_injerr_nportid = 0;
1749 					phba->lpfc_injerr_lba =
1750 						LPFC_INJERR_LBA_OFF;
1751 					memset(&phba->lpfc_injerr_wwpn,
1752 						0, sizeof(struct lpfc_name));
1753 				}
1754 				rc = BG_ERR_INIT;
1755 
1756 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1757 					"0814 BLKGRD: Injecting apptag error: "
1758 					"read lba x%lx\n", (unsigned long)lba);
1759 				break;
1760 			}
1761 		}
1762 	}
1763 
1764 
1765 	/* Should we change the Guard Tag */
1766 	if (new_guard) {
1767 		if (phba->lpfc_injerr_wgrd_cnt) {
1768 			switch (op) {
1769 			case SCSI_PROT_WRITE_PASS:
1770 				rc = BG_ERR_CHECK;
1771 				/* Drop thru */
1772 
1773 			case SCSI_PROT_WRITE_INSERT:
1774 				/*
1775 				 * For WRITE_INSERT, force the
1776 				 * error to be sent on the wire. It should be
1777 				 * detected by the Target.
1778 				 */
1779 				phba->lpfc_injerr_wgrd_cnt--;
1780 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1781 					phba->lpfc_injerr_nportid = 0;
1782 					phba->lpfc_injerr_lba =
1783 						LPFC_INJERR_LBA_OFF;
1784 					memset(&phba->lpfc_injerr_wwpn,
1785 						0, sizeof(struct lpfc_name));
1786 				}
1787 
1788 				rc |= BG_ERR_TGT | BG_ERR_SWAP;
1789 				/* Signals the caller to swap CRC->CSUM */
1790 
1791 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1792 					"0817 BLKGRD: Injecting guard error: "
1793 					"write lba x%lx\n", (unsigned long)lba);
1794 				break;
1795 			case SCSI_PROT_WRITE_STRIP:
1796 				/*
1797 				 * For WRITE_STRIP and WRITE_PASS,
1798 				 * force the error on data
1799 				 * being copied from SLI-Host to SLI-Port.
1800 				 */
1801 				phba->lpfc_injerr_wgrd_cnt--;
1802 				if (phba->lpfc_injerr_wgrd_cnt == 0) {
1803 					phba->lpfc_injerr_nportid = 0;
1804 					phba->lpfc_injerr_lba =
1805 						LPFC_INJERR_LBA_OFF;
1806 					memset(&phba->lpfc_injerr_wwpn,
1807 						0, sizeof(struct lpfc_name));
1808 				}
1809 
1810 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1811 				/* Signals the caller to swap CRC->CSUM */
1812 
1813 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1814 					"0816 BLKGRD: Injecting guard error: "
1815 					"write lba x%lx\n", (unsigned long)lba);
1816 				break;
1817 			}
1818 		}
1819 		if (phba->lpfc_injerr_rgrd_cnt) {
1820 			switch (op) {
1821 			case SCSI_PROT_READ_INSERT:
1822 			case SCSI_PROT_READ_STRIP:
1823 			case SCSI_PROT_READ_PASS:
1824 				/*
1825 				 * For READ_STRIP and READ_PASS, force the
1826 				 * error on data being read off the wire. It
1827 				 * should force an IO error to the driver.
1828 				 */
1829 				phba->lpfc_injerr_rgrd_cnt--;
1830 				if (phba->lpfc_injerr_rgrd_cnt == 0) {
1831 					phba->lpfc_injerr_nportid = 0;
1832 					phba->lpfc_injerr_lba =
1833 						LPFC_INJERR_LBA_OFF;
1834 					memset(&phba->lpfc_injerr_wwpn,
1835 						0, sizeof(struct lpfc_name));
1836 				}
1837 
1838 				rc = BG_ERR_INIT | BG_ERR_SWAP;
1839 				/* Signals the caller to swap CRC->CSUM */
1840 
1841 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1842 					"0818 BLKGRD: Injecting guard error: "
1843 					"read lba x%lx\n", (unsigned long)lba);
1844 			}
1845 		}
1846 	}
1847 
1848 	return rc;
1849 }
1850 #endif
1851 
1852 /**
1853  * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1854  * the specified SCSI command.
1855  * @phba: The Hba for which this call is being executed.
1856  * @sc: The SCSI command to examine
1857  * @txopt: (out) BlockGuard operation for transmitted data
1858  * @rxopt: (out) BlockGuard operation for received data
1859  *
1860  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1861  *
1862  **/
1863 static int
1864 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1865 		uint8_t *txop, uint8_t *rxop)
1866 {
1867 	uint8_t ret = 0;
1868 
1869 	if (lpfc_cmd_guard_csum(sc)) {
1870 		switch (scsi_get_prot_op(sc)) {
1871 		case SCSI_PROT_READ_INSERT:
1872 		case SCSI_PROT_WRITE_STRIP:
1873 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1874 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1875 			break;
1876 
1877 		case SCSI_PROT_READ_STRIP:
1878 		case SCSI_PROT_WRITE_INSERT:
1879 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1880 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1881 			break;
1882 
1883 		case SCSI_PROT_READ_PASS:
1884 		case SCSI_PROT_WRITE_PASS:
1885 			*rxop = BG_OP_IN_CRC_OUT_CSUM;
1886 			*txop = BG_OP_IN_CSUM_OUT_CRC;
1887 			break;
1888 
1889 		case SCSI_PROT_NORMAL:
1890 		default:
1891 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1892 				"9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1893 					scsi_get_prot_op(sc));
1894 			ret = 1;
1895 			break;
1896 
1897 		}
1898 	} else {
1899 		switch (scsi_get_prot_op(sc)) {
1900 		case SCSI_PROT_READ_STRIP:
1901 		case SCSI_PROT_WRITE_INSERT:
1902 			*rxop = BG_OP_IN_CRC_OUT_NODIF;
1903 			*txop = BG_OP_IN_NODIF_OUT_CRC;
1904 			break;
1905 
1906 		case SCSI_PROT_READ_PASS:
1907 		case SCSI_PROT_WRITE_PASS:
1908 			*rxop = BG_OP_IN_CRC_OUT_CRC;
1909 			*txop = BG_OP_IN_CRC_OUT_CRC;
1910 			break;
1911 
1912 		case SCSI_PROT_READ_INSERT:
1913 		case SCSI_PROT_WRITE_STRIP:
1914 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1915 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1916 			break;
1917 
1918 		case SCSI_PROT_NORMAL:
1919 		default:
1920 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1921 				"9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1922 					scsi_get_prot_op(sc));
1923 			ret = 1;
1924 			break;
1925 		}
1926 	}
1927 
1928 	return ret;
1929 }
1930 
1931 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1932 /**
1933  * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1934  * the specified SCSI command in order to force a guard tag error.
1935  * @phba: The Hba for which this call is being executed.
1936  * @sc: The SCSI command to examine
1937  * @txopt: (out) BlockGuard operation for transmitted data
1938  * @rxopt: (out) BlockGuard operation for received data
1939  *
1940  * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1941  *
1942  **/
1943 static int
1944 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1945 		uint8_t *txop, uint8_t *rxop)
1946 {
1947 	uint8_t ret = 0;
1948 
1949 	if (lpfc_cmd_guard_csum(sc)) {
1950 		switch (scsi_get_prot_op(sc)) {
1951 		case SCSI_PROT_READ_INSERT:
1952 		case SCSI_PROT_WRITE_STRIP:
1953 			*rxop = BG_OP_IN_NODIF_OUT_CRC;
1954 			*txop = BG_OP_IN_CRC_OUT_NODIF;
1955 			break;
1956 
1957 		case SCSI_PROT_READ_STRIP:
1958 		case SCSI_PROT_WRITE_INSERT:
1959 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1960 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1961 			break;
1962 
1963 		case SCSI_PROT_READ_PASS:
1964 		case SCSI_PROT_WRITE_PASS:
1965 			*rxop = BG_OP_IN_CSUM_OUT_CRC;
1966 			*txop = BG_OP_IN_CRC_OUT_CSUM;
1967 			break;
1968 
1969 		case SCSI_PROT_NORMAL:
1970 		default:
1971 			break;
1972 
1973 		}
1974 	} else {
1975 		switch (scsi_get_prot_op(sc)) {
1976 		case SCSI_PROT_READ_STRIP:
1977 		case SCSI_PROT_WRITE_INSERT:
1978 			*rxop = BG_OP_IN_CSUM_OUT_NODIF;
1979 			*txop = BG_OP_IN_NODIF_OUT_CSUM;
1980 			break;
1981 
1982 		case SCSI_PROT_READ_PASS:
1983 		case SCSI_PROT_WRITE_PASS:
1984 			*rxop = BG_OP_IN_CSUM_OUT_CSUM;
1985 			*txop = BG_OP_IN_CSUM_OUT_CSUM;
1986 			break;
1987 
1988 		case SCSI_PROT_READ_INSERT:
1989 		case SCSI_PROT_WRITE_STRIP:
1990 			*rxop = BG_OP_IN_NODIF_OUT_CSUM;
1991 			*txop = BG_OP_IN_CSUM_OUT_NODIF;
1992 			break;
1993 
1994 		case SCSI_PROT_NORMAL:
1995 		default:
1996 			break;
1997 		}
1998 	}
1999 
2000 	return ret;
2001 }
2002 #endif
2003 
2004 /**
2005  * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
2006  * @phba: The Hba for which this call is being executed.
2007  * @sc: pointer to scsi command we're working on
2008  * @bpl: pointer to buffer list for protection groups
2009  * @datacnt: number of segments of data that have been dma mapped
2010  *
2011  * This function sets up BPL buffer list for protection groups of
2012  * type LPFC_PG_TYPE_NO_DIF
2013  *
2014  * This is usually used when the HBA is instructed to generate
2015  * DIFs and insert them into data stream (or strip DIF from
2016  * incoming data stream)
2017  *
2018  * The buffer list consists of just one protection group described
2019  * below:
2020  *                                +-------------------------+
2021  *   start of prot group  -->     |          PDE_5          |
2022  *                                +-------------------------+
2023  *                                |          PDE_6          |
2024  *                                +-------------------------+
2025  *                                |         Data BDE        |
2026  *                                +-------------------------+
2027  *                                |more Data BDE's ... (opt)|
2028  *                                +-------------------------+
2029  *
2030  *
2031  * Note: Data s/g buffers have been dma mapped
2032  *
2033  * Returns the number of BDEs added to the BPL.
2034  **/
2035 static int
2036 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2037 		struct ulp_bde64 *bpl, int datasegcnt)
2038 {
2039 	struct scatterlist *sgde = NULL; /* s/g data entry */
2040 	struct lpfc_pde5 *pde5 = NULL;
2041 	struct lpfc_pde6 *pde6 = NULL;
2042 	dma_addr_t physaddr;
2043 	int i = 0, num_bde = 0, status;
2044 	int datadir = sc->sc_data_direction;
2045 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2046 	uint32_t rc;
2047 #endif
2048 	uint32_t checking = 1;
2049 	uint32_t reftag;
2050 	unsigned blksize;
2051 	uint8_t txop, rxop;
2052 
2053 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2054 	if (status)
2055 		goto out;
2056 
2057 	/* extract some info from the scsi command for pde*/
2058 	blksize = lpfc_cmd_blksize(sc);
2059 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2060 
2061 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2062 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2063 	if (rc) {
2064 		if (rc & BG_ERR_SWAP)
2065 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2066 		if (rc & BG_ERR_CHECK)
2067 			checking = 0;
2068 	}
2069 #endif
2070 
2071 	/* setup PDE5 with what we have */
2072 	pde5 = (struct lpfc_pde5 *) bpl;
2073 	memset(pde5, 0, sizeof(struct lpfc_pde5));
2074 	bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2075 
2076 	/* Endianness conversion if necessary for PDE5 */
2077 	pde5->word0 = cpu_to_le32(pde5->word0);
2078 	pde5->reftag = cpu_to_le32(reftag);
2079 
2080 	/* advance bpl and increment bde count */
2081 	num_bde++;
2082 	bpl++;
2083 	pde6 = (struct lpfc_pde6 *) bpl;
2084 
2085 	/* setup PDE6 with the rest of the info */
2086 	memset(pde6, 0, sizeof(struct lpfc_pde6));
2087 	bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2088 	bf_set(pde6_optx, pde6, txop);
2089 	bf_set(pde6_oprx, pde6, rxop);
2090 
2091 	/*
2092 	 * We only need to check the data on READs, for WRITEs
2093 	 * protection data is automatically generated, not checked.
2094 	 */
2095 	if (datadir == DMA_FROM_DEVICE) {
2096 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2097 			bf_set(pde6_ce, pde6, checking);
2098 		else
2099 			bf_set(pde6_ce, pde6, 0);
2100 
2101 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2102 			bf_set(pde6_re, pde6, checking);
2103 		else
2104 			bf_set(pde6_re, pde6, 0);
2105 	}
2106 	bf_set(pde6_ai, pde6, 1);
2107 	bf_set(pde6_ae, pde6, 0);
2108 	bf_set(pde6_apptagval, pde6, 0);
2109 
2110 	/* Endianness conversion if necessary for PDE6 */
2111 	pde6->word0 = cpu_to_le32(pde6->word0);
2112 	pde6->word1 = cpu_to_le32(pde6->word1);
2113 	pde6->word2 = cpu_to_le32(pde6->word2);
2114 
2115 	/* advance bpl and increment bde count */
2116 	num_bde++;
2117 	bpl++;
2118 
2119 	/* assumption: caller has already run dma_map_sg on command data */
2120 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2121 		physaddr = sg_dma_address(sgde);
2122 		bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
2123 		bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2124 		bpl->tus.f.bdeSize = sg_dma_len(sgde);
2125 		if (datadir == DMA_TO_DEVICE)
2126 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2127 		else
2128 			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2129 		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2130 		bpl++;
2131 		num_bde++;
2132 	}
2133 
2134 out:
2135 	return num_bde;
2136 }
2137 
2138 /**
2139  * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
2140  * @phba: The Hba for which this call is being executed.
2141  * @sc: pointer to scsi command we're working on
2142  * @bpl: pointer to buffer list for protection groups
2143  * @datacnt: number of segments of data that have been dma mapped
2144  * @protcnt: number of segment of protection data that have been dma mapped
2145  *
2146  * This function sets up BPL buffer list for protection groups of
2147  * type LPFC_PG_TYPE_DIF
2148  *
2149  * This is usually used when DIFs are in their own buffers,
2150  * separate from the data. The HBA can then by instructed
2151  * to place the DIFs in the outgoing stream.  For read operations,
2152  * The HBA could extract the DIFs and place it in DIF buffers.
2153  *
2154  * The buffer list for this type consists of one or more of the
2155  * protection groups described below:
2156  *                                    +-------------------------+
2157  *   start of first prot group  -->   |          PDE_5          |
2158  *                                    +-------------------------+
2159  *                                    |          PDE_6          |
2160  *                                    +-------------------------+
2161  *                                    |      PDE_7 (Prot BDE)   |
2162  *                                    +-------------------------+
2163  *                                    |        Data BDE         |
2164  *                                    +-------------------------+
2165  *                                    |more Data BDE's ... (opt)|
2166  *                                    +-------------------------+
2167  *   start of new  prot group  -->    |          PDE_5          |
2168  *                                    +-------------------------+
2169  *                                    |          ...            |
2170  *                                    +-------------------------+
2171  *
2172  * Note: It is assumed that both data and protection s/g buffers have been
2173  *       mapped for DMA
2174  *
2175  * Returns the number of BDEs added to the BPL.
2176  **/
2177 static int
2178 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2179 		struct ulp_bde64 *bpl, int datacnt, int protcnt)
2180 {
2181 	struct scatterlist *sgde = NULL; /* s/g data entry */
2182 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2183 	struct lpfc_pde5 *pde5 = NULL;
2184 	struct lpfc_pde6 *pde6 = NULL;
2185 	struct lpfc_pde7 *pde7 = NULL;
2186 	dma_addr_t dataphysaddr, protphysaddr;
2187 	unsigned short curr_data = 0, curr_prot = 0;
2188 	unsigned int split_offset;
2189 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2190 	unsigned int protgrp_blks, protgrp_bytes;
2191 	unsigned int remainder, subtotal;
2192 	int status;
2193 	int datadir = sc->sc_data_direction;
2194 	unsigned char pgdone = 0, alldone = 0;
2195 	unsigned blksize;
2196 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2197 	uint32_t rc;
2198 #endif
2199 	uint32_t checking = 1;
2200 	uint32_t reftag;
2201 	uint8_t txop, rxop;
2202 	int num_bde = 0;
2203 
2204 	sgpe = scsi_prot_sglist(sc);
2205 	sgde = scsi_sglist(sc);
2206 
2207 	if (!sgpe || !sgde) {
2208 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2209 				"9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
2210 				sgpe, sgde);
2211 		return 0;
2212 	}
2213 
2214 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2215 	if (status)
2216 		goto out;
2217 
2218 	/* extract some info from the scsi command */
2219 	blksize = lpfc_cmd_blksize(sc);
2220 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2221 
2222 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2223 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2224 	if (rc) {
2225 		if (rc & BG_ERR_SWAP)
2226 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2227 		if (rc & BG_ERR_CHECK)
2228 			checking = 0;
2229 	}
2230 #endif
2231 
2232 	split_offset = 0;
2233 	do {
2234 		/* Check to see if we ran out of space */
2235 		if (num_bde >= (phba->cfg_total_seg_cnt - 2))
2236 			return num_bde + 3;
2237 
2238 		/* setup PDE5 with what we have */
2239 		pde5 = (struct lpfc_pde5 *) bpl;
2240 		memset(pde5, 0, sizeof(struct lpfc_pde5));
2241 		bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
2242 
2243 		/* Endianness conversion if necessary for PDE5 */
2244 		pde5->word0 = cpu_to_le32(pde5->word0);
2245 		pde5->reftag = cpu_to_le32(reftag);
2246 
2247 		/* advance bpl and increment bde count */
2248 		num_bde++;
2249 		bpl++;
2250 		pde6 = (struct lpfc_pde6 *) bpl;
2251 
2252 		/* setup PDE6 with the rest of the info */
2253 		memset(pde6, 0, sizeof(struct lpfc_pde6));
2254 		bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
2255 		bf_set(pde6_optx, pde6, txop);
2256 		bf_set(pde6_oprx, pde6, rxop);
2257 
2258 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2259 			bf_set(pde6_ce, pde6, checking);
2260 		else
2261 			bf_set(pde6_ce, pde6, 0);
2262 
2263 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2264 			bf_set(pde6_re, pde6, checking);
2265 		else
2266 			bf_set(pde6_re, pde6, 0);
2267 
2268 		bf_set(pde6_ai, pde6, 1);
2269 		bf_set(pde6_ae, pde6, 0);
2270 		bf_set(pde6_apptagval, pde6, 0);
2271 
2272 		/* Endianness conversion if necessary for PDE6 */
2273 		pde6->word0 = cpu_to_le32(pde6->word0);
2274 		pde6->word1 = cpu_to_le32(pde6->word1);
2275 		pde6->word2 = cpu_to_le32(pde6->word2);
2276 
2277 		/* advance bpl and increment bde count */
2278 		num_bde++;
2279 		bpl++;
2280 
2281 		/* setup the first BDE that points to protection buffer */
2282 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2283 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2284 
2285 		/* must be integer multiple of the DIF block length */
2286 		BUG_ON(protgroup_len % 8);
2287 
2288 		pde7 = (struct lpfc_pde7 *) bpl;
2289 		memset(pde7, 0, sizeof(struct lpfc_pde7));
2290 		bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
2291 
2292 		pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
2293 		pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
2294 
2295 		protgrp_blks = protgroup_len / 8;
2296 		protgrp_bytes = protgrp_blks * blksize;
2297 
2298 		/* check if this pde is crossing the 4K boundary; if so split */
2299 		if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
2300 			protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
2301 			protgroup_offset += protgroup_remainder;
2302 			protgrp_blks = protgroup_remainder / 8;
2303 			protgrp_bytes = protgrp_blks * blksize;
2304 		} else {
2305 			protgroup_offset = 0;
2306 			curr_prot++;
2307 		}
2308 
2309 		num_bde++;
2310 
2311 		/* setup BDE's for data blocks associated with DIF data */
2312 		pgdone = 0;
2313 		subtotal = 0; /* total bytes processed for current prot grp */
2314 		while (!pgdone) {
2315 			/* Check to see if we ran out of space */
2316 			if (num_bde >= phba->cfg_total_seg_cnt)
2317 				return num_bde + 1;
2318 
2319 			if (!sgde) {
2320 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2321 					"9065 BLKGRD:%s Invalid data segment\n",
2322 						__func__);
2323 				return 0;
2324 			}
2325 			bpl++;
2326 			dataphysaddr = sg_dma_address(sgde) + split_offset;
2327 			bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
2328 			bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
2329 
2330 			remainder = sg_dma_len(sgde) - split_offset;
2331 
2332 			if ((subtotal + remainder) <= protgrp_bytes) {
2333 				/* we can use this whole buffer */
2334 				bpl->tus.f.bdeSize = remainder;
2335 				split_offset = 0;
2336 
2337 				if ((subtotal + remainder) == protgrp_bytes)
2338 					pgdone = 1;
2339 			} else {
2340 				/* must split this buffer with next prot grp */
2341 				bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
2342 				split_offset += bpl->tus.f.bdeSize;
2343 			}
2344 
2345 			subtotal += bpl->tus.f.bdeSize;
2346 
2347 			if (datadir == DMA_TO_DEVICE)
2348 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2349 			else
2350 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2351 			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2352 
2353 			num_bde++;
2354 			curr_data++;
2355 
2356 			if (split_offset)
2357 				break;
2358 
2359 			/* Move to the next s/g segment if possible */
2360 			sgde = sg_next(sgde);
2361 
2362 		}
2363 
2364 		if (protgroup_offset) {
2365 			/* update the reference tag */
2366 			reftag += protgrp_blks;
2367 			bpl++;
2368 			continue;
2369 		}
2370 
2371 		/* are we done ? */
2372 		if (curr_prot == protcnt) {
2373 			alldone = 1;
2374 		} else if (curr_prot < protcnt) {
2375 			/* advance to next prot buffer */
2376 			sgpe = sg_next(sgpe);
2377 			bpl++;
2378 
2379 			/* update the reference tag */
2380 			reftag += protgrp_blks;
2381 		} else {
2382 			/* if we're here, we have a bug */
2383 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2384 				"9054 BLKGRD: bug in %s\n", __func__);
2385 		}
2386 
2387 	} while (!alldone);
2388 out:
2389 
2390 	return num_bde;
2391 }
2392 
2393 /**
2394  * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2395  * @phba: The Hba for which this call is being executed.
2396  * @sc: pointer to scsi command we're working on
2397  * @sgl: pointer to buffer list for protection groups
2398  * @datacnt: number of segments of data that have been dma mapped
2399  *
2400  * This function sets up SGL buffer list for protection groups of
2401  * type LPFC_PG_TYPE_NO_DIF
2402  *
2403  * This is usually used when the HBA is instructed to generate
2404  * DIFs and insert them into data stream (or strip DIF from
2405  * incoming data stream)
2406  *
2407  * The buffer list consists of just one protection group described
2408  * below:
2409  *                                +-------------------------+
2410  *   start of prot group  -->     |         DI_SEED         |
2411  *                                +-------------------------+
2412  *                                |         Data SGE        |
2413  *                                +-------------------------+
2414  *                                |more Data SGE's ... (opt)|
2415  *                                +-------------------------+
2416  *
2417  *
2418  * Note: Data s/g buffers have been dma mapped
2419  *
2420  * Returns the number of SGEs added to the SGL.
2421  **/
2422 static int
2423 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2424 		struct sli4_sge *sgl, int datasegcnt)
2425 {
2426 	struct scatterlist *sgde = NULL; /* s/g data entry */
2427 	struct sli4_sge_diseed *diseed = NULL;
2428 	dma_addr_t physaddr;
2429 	int i = 0, num_sge = 0, status;
2430 	uint32_t reftag;
2431 	unsigned blksize;
2432 	uint8_t txop, rxop;
2433 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2434 	uint32_t rc;
2435 #endif
2436 	uint32_t checking = 1;
2437 	uint32_t dma_len;
2438 	uint32_t dma_offset = 0;
2439 
2440 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2441 	if (status)
2442 		goto out;
2443 
2444 	/* extract some info from the scsi command for pde*/
2445 	blksize = lpfc_cmd_blksize(sc);
2446 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2447 
2448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2449 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2450 	if (rc) {
2451 		if (rc & BG_ERR_SWAP)
2452 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2453 		if (rc & BG_ERR_CHECK)
2454 			checking = 0;
2455 	}
2456 #endif
2457 
2458 	/* setup DISEED with what we have */
2459 	diseed = (struct sli4_sge_diseed *) sgl;
2460 	memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2461 	bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2462 
2463 	/* Endianness conversion if necessary */
2464 	diseed->ref_tag = cpu_to_le32(reftag);
2465 	diseed->ref_tag_tran = diseed->ref_tag;
2466 
2467 	/*
2468 	 * We only need to check the data on READs, for WRITEs
2469 	 * protection data is automatically generated, not checked.
2470 	 */
2471 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2472 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2473 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2474 		else
2475 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2476 
2477 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2478 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2479 		else
2480 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2481 	}
2482 
2483 	/* setup DISEED with the rest of the info */
2484 	bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2485 	bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2486 
2487 	bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2488 	bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2489 
2490 	/* Endianness conversion if necessary for DISEED */
2491 	diseed->word2 = cpu_to_le32(diseed->word2);
2492 	diseed->word3 = cpu_to_le32(diseed->word3);
2493 
2494 	/* advance bpl and increment sge count */
2495 	num_sge++;
2496 	sgl++;
2497 
2498 	/* assumption: caller has already run dma_map_sg on command data */
2499 	scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2500 		physaddr = sg_dma_address(sgde);
2501 		dma_len = sg_dma_len(sgde);
2502 		sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2503 		sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2504 		if ((i + 1) == datasegcnt)
2505 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2506 		else
2507 			bf_set(lpfc_sli4_sge_last, sgl, 0);
2508 		bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2509 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2510 
2511 		sgl->sge_len = cpu_to_le32(dma_len);
2512 		dma_offset += dma_len;
2513 
2514 		sgl++;
2515 		num_sge++;
2516 	}
2517 
2518 out:
2519 	return num_sge;
2520 }
2521 
2522 /**
2523  * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2524  * @phba: The Hba for which this call is being executed.
2525  * @sc: pointer to scsi command we're working on
2526  * @sgl: pointer to buffer list for protection groups
2527  * @datacnt: number of segments of data that have been dma mapped
2528  * @protcnt: number of segment of protection data that have been dma mapped
2529  *
2530  * This function sets up SGL buffer list for protection groups of
2531  * type LPFC_PG_TYPE_DIF
2532  *
2533  * This is usually used when DIFs are in their own buffers,
2534  * separate from the data. The HBA can then by instructed
2535  * to place the DIFs in the outgoing stream.  For read operations,
2536  * The HBA could extract the DIFs and place it in DIF buffers.
2537  *
2538  * The buffer list for this type consists of one or more of the
2539  * protection groups described below:
2540  *                                    +-------------------------+
2541  *   start of first prot group  -->   |         DISEED          |
2542  *                                    +-------------------------+
2543  *                                    |      DIF (Prot SGE)     |
2544  *                                    +-------------------------+
2545  *                                    |        Data SGE         |
2546  *                                    +-------------------------+
2547  *                                    |more Data SGE's ... (opt)|
2548  *                                    +-------------------------+
2549  *   start of new  prot group  -->    |         DISEED          |
2550  *                                    +-------------------------+
2551  *                                    |          ...            |
2552  *                                    +-------------------------+
2553  *
2554  * Note: It is assumed that both data and protection s/g buffers have been
2555  *       mapped for DMA
2556  *
2557  * Returns the number of SGEs added to the SGL.
2558  **/
2559 static int
2560 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2561 		struct sli4_sge *sgl, int datacnt, int protcnt)
2562 {
2563 	struct scatterlist *sgde = NULL; /* s/g data entry */
2564 	struct scatterlist *sgpe = NULL; /* s/g prot entry */
2565 	struct sli4_sge_diseed *diseed = NULL;
2566 	dma_addr_t dataphysaddr, protphysaddr;
2567 	unsigned short curr_data = 0, curr_prot = 0;
2568 	unsigned int split_offset;
2569 	unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2570 	unsigned int protgrp_blks, protgrp_bytes;
2571 	unsigned int remainder, subtotal;
2572 	int status;
2573 	unsigned char pgdone = 0, alldone = 0;
2574 	unsigned blksize;
2575 	uint32_t reftag;
2576 	uint8_t txop, rxop;
2577 	uint32_t dma_len;
2578 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2579 	uint32_t rc;
2580 #endif
2581 	uint32_t checking = 1;
2582 	uint32_t dma_offset = 0;
2583 	int num_sge = 0;
2584 
2585 	sgpe = scsi_prot_sglist(sc);
2586 	sgde = scsi_sglist(sc);
2587 
2588 	if (!sgpe || !sgde) {
2589 		lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2590 				"9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2591 				sgpe, sgde);
2592 		return 0;
2593 	}
2594 
2595 	status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2596 	if (status)
2597 		goto out;
2598 
2599 	/* extract some info from the scsi command */
2600 	blksize = lpfc_cmd_blksize(sc);
2601 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2602 
2603 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2604 	rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2605 	if (rc) {
2606 		if (rc & BG_ERR_SWAP)
2607 			lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2608 		if (rc & BG_ERR_CHECK)
2609 			checking = 0;
2610 	}
2611 #endif
2612 
2613 	split_offset = 0;
2614 	do {
2615 		/* Check to see if we ran out of space */
2616 		if (num_sge >= (phba->cfg_total_seg_cnt - 2))
2617 			return num_sge + 3;
2618 
2619 		/* setup DISEED with what we have */
2620 		diseed = (struct sli4_sge_diseed *) sgl;
2621 		memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2622 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2623 
2624 		/* Endianness conversion if necessary */
2625 		diseed->ref_tag = cpu_to_le32(reftag);
2626 		diseed->ref_tag_tran = diseed->ref_tag;
2627 
2628 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2629 			bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2630 
2631 		} else {
2632 			bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2633 			/*
2634 			 * When in this mode, the hardware will replace
2635 			 * the guard tag from the host with a
2636 			 * newly generated good CRC for the wire.
2637 			 * Switch to raw mode here to avoid this
2638 			 * behavior. What the host sends gets put on the wire.
2639 			 */
2640 			if (txop == BG_OP_IN_CRC_OUT_CRC) {
2641 				txop = BG_OP_RAW_MODE;
2642 				rxop = BG_OP_RAW_MODE;
2643 			}
2644 		}
2645 
2646 
2647 		if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2648 			bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2649 		else
2650 			bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2651 
2652 		/* setup DISEED with the rest of the info */
2653 		bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2654 		bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2655 
2656 		bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2657 		bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2658 
2659 		/* Endianness conversion if necessary for DISEED */
2660 		diseed->word2 = cpu_to_le32(diseed->word2);
2661 		diseed->word3 = cpu_to_le32(diseed->word3);
2662 
2663 		/* advance sgl and increment bde count */
2664 		num_sge++;
2665 		sgl++;
2666 
2667 		/* setup the first BDE that points to protection buffer */
2668 		protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2669 		protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2670 
2671 		/* must be integer multiple of the DIF block length */
2672 		BUG_ON(protgroup_len % 8);
2673 
2674 		/* Now setup DIF SGE */
2675 		sgl->word2 = 0;
2676 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2677 		sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2678 		sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2679 		sgl->word2 = cpu_to_le32(sgl->word2);
2680 
2681 		protgrp_blks = protgroup_len / 8;
2682 		protgrp_bytes = protgrp_blks * blksize;
2683 
2684 		/* check if DIF SGE is crossing the 4K boundary; if so split */
2685 		if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2686 			protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2687 			protgroup_offset += protgroup_remainder;
2688 			protgrp_blks = protgroup_remainder / 8;
2689 			protgrp_bytes = protgrp_blks * blksize;
2690 		} else {
2691 			protgroup_offset = 0;
2692 			curr_prot++;
2693 		}
2694 
2695 		num_sge++;
2696 
2697 		/* setup SGE's for data blocks associated with DIF data */
2698 		pgdone = 0;
2699 		subtotal = 0; /* total bytes processed for current prot grp */
2700 		while (!pgdone) {
2701 			/* Check to see if we ran out of space */
2702 			if (num_sge >= phba->cfg_total_seg_cnt)
2703 				return num_sge + 1;
2704 
2705 			if (!sgde) {
2706 				lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2707 					"9086 BLKGRD:%s Invalid data segment\n",
2708 						__func__);
2709 				return 0;
2710 			}
2711 			sgl++;
2712 			dataphysaddr = sg_dma_address(sgde) + split_offset;
2713 
2714 			remainder = sg_dma_len(sgde) - split_offset;
2715 
2716 			if ((subtotal + remainder) <= protgrp_bytes) {
2717 				/* we can use this whole buffer */
2718 				dma_len = remainder;
2719 				split_offset = 0;
2720 
2721 				if ((subtotal + remainder) == protgrp_bytes)
2722 					pgdone = 1;
2723 			} else {
2724 				/* must split this buffer with next prot grp */
2725 				dma_len = protgrp_bytes - subtotal;
2726 				split_offset += dma_len;
2727 			}
2728 
2729 			subtotal += dma_len;
2730 
2731 			sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2732 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2733 			bf_set(lpfc_sli4_sge_last, sgl, 0);
2734 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2735 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2736 
2737 			sgl->sge_len = cpu_to_le32(dma_len);
2738 			dma_offset += dma_len;
2739 
2740 			num_sge++;
2741 			curr_data++;
2742 
2743 			if (split_offset)
2744 				break;
2745 
2746 			/* Move to the next s/g segment if possible */
2747 			sgde = sg_next(sgde);
2748 		}
2749 
2750 		if (protgroup_offset) {
2751 			/* update the reference tag */
2752 			reftag += protgrp_blks;
2753 			sgl++;
2754 			continue;
2755 		}
2756 
2757 		/* are we done ? */
2758 		if (curr_prot == protcnt) {
2759 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2760 			alldone = 1;
2761 		} else if (curr_prot < protcnt) {
2762 			/* advance to next prot buffer */
2763 			sgpe = sg_next(sgpe);
2764 			sgl++;
2765 
2766 			/* update the reference tag */
2767 			reftag += protgrp_blks;
2768 		} else {
2769 			/* if we're here, we have a bug */
2770 			lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2771 				"9085 BLKGRD: bug in %s\n", __func__);
2772 		}
2773 
2774 	} while (!alldone);
2775 
2776 out:
2777 
2778 	return num_sge;
2779 }
2780 
2781 /**
2782  * lpfc_prot_group_type - Get prtotection group type of SCSI command
2783  * @phba: The Hba for which this call is being executed.
2784  * @sc: pointer to scsi command we're working on
2785  *
2786  * Given a SCSI command that supports DIF, determine composition of protection
2787  * groups involved in setting up buffer lists
2788  *
2789  * Returns: Protection group type (with or without DIF)
2790  *
2791  **/
2792 static int
2793 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2794 {
2795 	int ret = LPFC_PG_TYPE_INVALID;
2796 	unsigned char op = scsi_get_prot_op(sc);
2797 
2798 	switch (op) {
2799 	case SCSI_PROT_READ_STRIP:
2800 	case SCSI_PROT_WRITE_INSERT:
2801 		ret = LPFC_PG_TYPE_NO_DIF;
2802 		break;
2803 	case SCSI_PROT_READ_INSERT:
2804 	case SCSI_PROT_WRITE_STRIP:
2805 	case SCSI_PROT_READ_PASS:
2806 	case SCSI_PROT_WRITE_PASS:
2807 		ret = LPFC_PG_TYPE_DIF_BUF;
2808 		break;
2809 	default:
2810 		if (phba)
2811 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2812 					"9021 Unsupported protection op:%d\n",
2813 					op);
2814 		break;
2815 	}
2816 	return ret;
2817 }
2818 
2819 /**
2820  * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2821  * @phba: The Hba for which this call is being executed.
2822  * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2823  *
2824  * Adjust the data length to account for how much data
2825  * is actually on the wire.
2826  *
2827  * returns the adjusted data length
2828  **/
2829 static int
2830 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2831 		       struct lpfc_scsi_buf *lpfc_cmd)
2832 {
2833 	struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2834 	int fcpdl;
2835 
2836 	fcpdl = scsi_bufflen(sc);
2837 
2838 	/* Check if there is protection data on the wire */
2839 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2840 		/* Read check for protection data */
2841 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_READ_INSERT)
2842 			return fcpdl;
2843 
2844 	} else {
2845 		/* Write check for protection data */
2846 		if (scsi_get_prot_op(sc) ==  SCSI_PROT_WRITE_STRIP)
2847 			return fcpdl;
2848 	}
2849 
2850 	/*
2851 	 * If we are in DIF Type 1 mode every data block has a 8 byte
2852 	 * DIF (trailer) attached to it. Must ajust FCP data length
2853 	 * to account for the protection data.
2854 	 */
2855 	fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2856 
2857 	return fcpdl;
2858 }
2859 
2860 /**
2861  * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2862  * @phba: The Hba for which this call is being executed.
2863  * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2864  *
2865  * This is the protection/DIF aware version of
2866  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2867  * two functions eventually, but for now, it's here
2868  **/
2869 static int
2870 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2871 		struct lpfc_scsi_buf *lpfc_cmd)
2872 {
2873 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2874 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2875 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
2876 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2877 	uint32_t num_bde = 0;
2878 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2879 	int prot_group_type = 0;
2880 	int fcpdl;
2881 
2882 	/*
2883 	 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2884 	 *  fcp_rsp regions to the first data bde entry
2885 	 */
2886 	bpl += 2;
2887 	if (scsi_sg_count(scsi_cmnd)) {
2888 		/*
2889 		 * The driver stores the segment count returned from pci_map_sg
2890 		 * because this a count of dma-mappings used to map the use_sg
2891 		 * pages.  They are not guaranteed to be the same for those
2892 		 * architectures that implement an IOMMU.
2893 		 */
2894 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
2895 					scsi_sglist(scsi_cmnd),
2896 					scsi_sg_count(scsi_cmnd), datadir);
2897 		if (unlikely(!datasegcnt))
2898 			return 1;
2899 
2900 		lpfc_cmd->seg_cnt = datasegcnt;
2901 
2902 		/* First check if data segment count from SCSI Layer is good */
2903 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
2904 			goto err;
2905 
2906 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2907 
2908 		switch (prot_group_type) {
2909 		case LPFC_PG_TYPE_NO_DIF:
2910 
2911 			/* Here we need to add a PDE5 and PDE6 to the count */
2912 			if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt)
2913 				goto err;
2914 
2915 			num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2916 					datasegcnt);
2917 			/* we should have 2 or more entries in buffer list */
2918 			if (num_bde < 2)
2919 				goto err;
2920 			break;
2921 
2922 		case LPFC_PG_TYPE_DIF_BUF:
2923 			/*
2924 			 * This type indicates that protection buffers are
2925 			 * passed to the driver, so that needs to be prepared
2926 			 * for DMA
2927 			 */
2928 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
2929 					scsi_prot_sglist(scsi_cmnd),
2930 					scsi_prot_sg_count(scsi_cmnd), datadir);
2931 			if (unlikely(!protsegcnt)) {
2932 				scsi_dma_unmap(scsi_cmnd);
2933 				return 1;
2934 			}
2935 
2936 			lpfc_cmd->prot_seg_cnt = protsegcnt;
2937 
2938 			/*
2939 			 * There is a minimun of 4 BPLs used for every
2940 			 * protection data segment.
2941 			 */
2942 			if ((lpfc_cmd->prot_seg_cnt * 4) >
2943 			    (phba->cfg_total_seg_cnt - 2))
2944 				goto err;
2945 
2946 			num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2947 					datasegcnt, protsegcnt);
2948 			/* we should have 3 or more entries in buffer list */
2949 			if ((num_bde < 3) ||
2950 			    (num_bde > phba->cfg_total_seg_cnt))
2951 				goto err;
2952 			break;
2953 
2954 		case LPFC_PG_TYPE_INVALID:
2955 		default:
2956 			scsi_dma_unmap(scsi_cmnd);
2957 			lpfc_cmd->seg_cnt = 0;
2958 
2959 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2960 					"9022 Unexpected protection group %i\n",
2961 					prot_group_type);
2962 			return 1;
2963 		}
2964 	}
2965 
2966 	/*
2967 	 * Finish initializing those IOCB fields that are dependent on the
2968 	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
2969 	 * reinitialized since all iocb memory resources are used many times
2970 	 * for transmit, receive, and continuation bpl's.
2971 	 */
2972 	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2973 	iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2974 	iocb_cmd->ulpBdeCount = 1;
2975 	iocb_cmd->ulpLe = 1;
2976 
2977 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2978 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2979 
2980 	/*
2981 	 * Due to difference in data length between DIF/non-DIF paths,
2982 	 * we need to set word 4 of IOCB here
2983 	 */
2984 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2985 
2986 	return 0;
2987 err:
2988 	if (lpfc_cmd->seg_cnt)
2989 		scsi_dma_unmap(scsi_cmnd);
2990 	if (lpfc_cmd->prot_seg_cnt)
2991 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2992 			     scsi_prot_sg_count(scsi_cmnd),
2993 			     scsi_cmnd->sc_data_direction);
2994 
2995 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2996 			"9023 Cannot setup S/G List for HBA"
2997 			"IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2998 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2999 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3000 			prot_group_type, num_bde);
3001 
3002 	lpfc_cmd->seg_cnt = 0;
3003 	lpfc_cmd->prot_seg_cnt = 0;
3004 	return 1;
3005 }
3006 
3007 /*
3008  * This function calcuates the T10 DIF guard tag
3009  * on the specified data using a CRC algorithmn
3010  * using crc_t10dif.
3011  */
3012 uint16_t
3013 lpfc_bg_crc(uint8_t *data, int count)
3014 {
3015 	uint16_t crc = 0;
3016 	uint16_t x;
3017 
3018 	crc = crc_t10dif(data, count);
3019 	x = cpu_to_be16(crc);
3020 	return x;
3021 }
3022 
3023 /*
3024  * This function calcuates the T10 DIF guard tag
3025  * on the specified data using a CSUM algorithmn
3026  * using ip_compute_csum.
3027  */
3028 uint16_t
3029 lpfc_bg_csum(uint8_t *data, int count)
3030 {
3031 	uint16_t ret;
3032 
3033 	ret = ip_compute_csum(data, count);
3034 	return ret;
3035 }
3036 
3037 /*
3038  * This function examines the protection data to try to determine
3039  * what type of T10-DIF error occurred.
3040  */
3041 void
3042 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3043 {
3044 	struct scatterlist *sgpe; /* s/g prot entry */
3045 	struct scatterlist *sgde; /* s/g data entry */
3046 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3047 	struct scsi_dif_tuple *src = NULL;
3048 	uint8_t *data_src = NULL;
3049 	uint16_t guard_tag, guard_type;
3050 	uint16_t start_app_tag, app_tag;
3051 	uint32_t start_ref_tag, ref_tag;
3052 	int prot, protsegcnt;
3053 	int err_type, len, data_len;
3054 	int chk_ref, chk_app, chk_guard;
3055 	uint16_t sum;
3056 	unsigned blksize;
3057 
3058 	err_type = BGS_GUARD_ERR_MASK;
3059 	sum = 0;
3060 	guard_tag = 0;
3061 
3062 	/* First check to see if there is protection data to examine */
3063 	prot = scsi_get_prot_op(cmd);
3064 	if ((prot == SCSI_PROT_READ_STRIP) ||
3065 	    (prot == SCSI_PROT_WRITE_INSERT) ||
3066 	    (prot == SCSI_PROT_NORMAL))
3067 		goto out;
3068 
3069 	/* Currently the driver just supports ref_tag and guard_tag checking */
3070 	chk_ref = 1;
3071 	chk_app = 0;
3072 	chk_guard = 0;
3073 
3074 	/* Setup a ptr to the protection data provided by the SCSI host */
3075 	sgpe = scsi_prot_sglist(cmd);
3076 	protsegcnt = lpfc_cmd->prot_seg_cnt;
3077 
3078 	if (sgpe && protsegcnt) {
3079 
3080 		/*
3081 		 * We will only try to verify guard tag if the segment
3082 		 * data length is a multiple of the blksize.
3083 		 */
3084 		sgde = scsi_sglist(cmd);
3085 		blksize = lpfc_cmd_blksize(cmd);
3086 		data_src = (uint8_t *)sg_virt(sgde);
3087 		data_len = sgde->length;
3088 		if ((data_len & (blksize - 1)) == 0)
3089 			chk_guard = 1;
3090 		guard_type = scsi_host_get_guard(cmd->device->host);
3091 
3092 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3093 		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
3094 		start_app_tag = src->app_tag;
3095 		len = sgpe->length;
3096 		while (src && protsegcnt) {
3097 			while (len) {
3098 
3099 				/*
3100 				 * First check to see if a protection data
3101 				 * check is valid
3102 				 */
3103 				if ((src->ref_tag == 0xffffffff) ||
3104 				    (src->app_tag == 0xffff)) {
3105 					start_ref_tag++;
3106 					goto skipit;
3107 				}
3108 
3109 				/* First Guard Tag checking */
3110 				if (chk_guard) {
3111 					guard_tag = src->guard_tag;
3112 					if (lpfc_cmd_guard_csum(cmd))
3113 						sum = lpfc_bg_csum(data_src,
3114 								   blksize);
3115 					else
3116 						sum = lpfc_bg_crc(data_src,
3117 								  blksize);
3118 					if ((guard_tag != sum)) {
3119 						err_type = BGS_GUARD_ERR_MASK;
3120 						goto out;
3121 					}
3122 				}
3123 
3124 				/* Reference Tag checking */
3125 				ref_tag = be32_to_cpu(src->ref_tag);
3126 				if (chk_ref && (ref_tag != start_ref_tag)) {
3127 					err_type = BGS_REFTAG_ERR_MASK;
3128 					goto out;
3129 				}
3130 				start_ref_tag++;
3131 
3132 				/* App Tag checking */
3133 				app_tag = src->app_tag;
3134 				if (chk_app && (app_tag != start_app_tag)) {
3135 					err_type = BGS_APPTAG_ERR_MASK;
3136 					goto out;
3137 				}
3138 skipit:
3139 				len -= sizeof(struct scsi_dif_tuple);
3140 				if (len < 0)
3141 					len = 0;
3142 				src++;
3143 
3144 				data_src += blksize;
3145 				data_len -= blksize;
3146 
3147 				/*
3148 				 * Are we at the end of the Data segment?
3149 				 * The data segment is only used for Guard
3150 				 * tag checking.
3151 				 */
3152 				if (chk_guard && (data_len == 0)) {
3153 					chk_guard = 0;
3154 					sgde = sg_next(sgde);
3155 					if (!sgde)
3156 						goto out;
3157 
3158 					data_src = (uint8_t *)sg_virt(sgde);
3159 					data_len = sgde->length;
3160 					if ((data_len & (blksize - 1)) == 0)
3161 						chk_guard = 1;
3162 				}
3163 			}
3164 
3165 			/* Goto the next Protection data segment */
3166 			sgpe = sg_next(sgpe);
3167 			if (sgpe) {
3168 				src = (struct scsi_dif_tuple *)sg_virt(sgpe);
3169 				len = sgpe->length;
3170 			} else {
3171 				src = NULL;
3172 			}
3173 			protsegcnt--;
3174 		}
3175 	}
3176 out:
3177 	if (err_type == BGS_GUARD_ERR_MASK) {
3178 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3179 					0x10, 0x1);
3180 		cmd->result = DRIVER_SENSE << 24
3181 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3182 		phba->bg_guard_err_cnt++;
3183 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3184 				"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
3185 				(unsigned long)scsi_get_lba(cmd),
3186 				sum, guard_tag);
3187 
3188 	} else if (err_type == BGS_REFTAG_ERR_MASK) {
3189 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3190 					0x10, 0x3);
3191 		cmd->result = DRIVER_SENSE << 24
3192 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3193 
3194 		phba->bg_reftag_err_cnt++;
3195 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3196 				"9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
3197 				(unsigned long)scsi_get_lba(cmd),
3198 				ref_tag, start_ref_tag);
3199 
3200 	} else if (err_type == BGS_APPTAG_ERR_MASK) {
3201 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3202 					0x10, 0x2);
3203 		cmd->result = DRIVER_SENSE << 24
3204 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3205 
3206 		phba->bg_apptag_err_cnt++;
3207 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3208 				"9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
3209 				(unsigned long)scsi_get_lba(cmd),
3210 				app_tag, start_app_tag);
3211 	}
3212 }
3213 
3214 
3215 /*
3216  * This function checks for BlockGuard errors detected by
3217  * the HBA.  In case of errors, the ASC/ASCQ fields in the
3218  * sense buffer will be set accordingly, paired with
3219  * ILLEGAL_REQUEST to signal to the kernel that the HBA
3220  * detected corruption.
3221  *
3222  * Returns:
3223  *  0 - No error found
3224  *  1 - BlockGuard error found
3225  * -1 - Internal error (bad profile, ...etc)
3226  */
3227 static int
3228 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
3229 			struct lpfc_iocbq *pIocbOut)
3230 {
3231 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3232 	struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3233 	int ret = 0;
3234 	uint32_t bghm = bgf->bghm;
3235 	uint32_t bgstat = bgf->bgstat;
3236 	uint64_t failing_sector = 0;
3237 
3238 	spin_lock(&_dump_buf_lock);
3239 	if (!_dump_buf_done) {
3240 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,  "9070 BLKGRD: Saving"
3241 			" Data for %u blocks to debugfs\n",
3242 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3243 		lpfc_debug_save_data(phba, cmd);
3244 
3245 		/* If we have a prot sgl, save the DIF buffer */
3246 		if (lpfc_prot_group_type(phba, cmd) ==
3247 				LPFC_PG_TYPE_DIF_BUF) {
3248 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: "
3249 				"Saving DIF for %u blocks to debugfs\n",
3250 				(cmd->cmnd[7] << 8 | cmd->cmnd[8]));
3251 			lpfc_debug_save_dif(phba, cmd);
3252 		}
3253 
3254 		_dump_buf_done = 1;
3255 	}
3256 	spin_unlock(&_dump_buf_lock);
3257 
3258 	if (lpfc_bgs_get_invalid_prof(bgstat)) {
3259 		cmd->result = ScsiResult(DID_ERROR, 0);
3260 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3261 				"9072 BLKGRD: Invalid BG Profile in cmd"
3262 				" 0x%x lba 0x%llx blk cnt 0x%x "
3263 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3264 				(unsigned long long)scsi_get_lba(cmd),
3265 				blk_rq_sectors(cmd->request), bgstat, bghm);
3266 		ret = (-1);
3267 		goto out;
3268 	}
3269 
3270 	if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3271 		cmd->result = ScsiResult(DID_ERROR, 0);
3272 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3273 				"9073 BLKGRD: Invalid BG PDIF Block in cmd"
3274 				" 0x%x lba 0x%llx blk cnt 0x%x "
3275 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3276 				(unsigned long long)scsi_get_lba(cmd),
3277 				blk_rq_sectors(cmd->request), bgstat, bghm);
3278 		ret = (-1);
3279 		goto out;
3280 	}
3281 
3282 	if (lpfc_bgs_get_guard_err(bgstat)) {
3283 		ret = 1;
3284 
3285 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3286 				0x10, 0x1);
3287 		cmd->result = DRIVER_SENSE << 24
3288 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3289 		phba->bg_guard_err_cnt++;
3290 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3291 				"9055 BLKGRD: Guard Tag error in cmd"
3292 				" 0x%x lba 0x%llx blk cnt 0x%x "
3293 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3294 				(unsigned long long)scsi_get_lba(cmd),
3295 				blk_rq_sectors(cmd->request), bgstat, bghm);
3296 	}
3297 
3298 	if (lpfc_bgs_get_reftag_err(bgstat)) {
3299 		ret = 1;
3300 
3301 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3302 				0x10, 0x3);
3303 		cmd->result = DRIVER_SENSE << 24
3304 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3305 
3306 		phba->bg_reftag_err_cnt++;
3307 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3308 				"9056 BLKGRD: Ref Tag error in cmd"
3309 				" 0x%x lba 0x%llx blk cnt 0x%x "
3310 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3311 				(unsigned long long)scsi_get_lba(cmd),
3312 				blk_rq_sectors(cmd->request), bgstat, bghm);
3313 	}
3314 
3315 	if (lpfc_bgs_get_apptag_err(bgstat)) {
3316 		ret = 1;
3317 
3318 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
3319 				0x10, 0x2);
3320 		cmd->result = DRIVER_SENSE << 24
3321 			| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
3322 
3323 		phba->bg_apptag_err_cnt++;
3324 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3325 				"9061 BLKGRD: App Tag error in cmd"
3326 				" 0x%x lba 0x%llx blk cnt 0x%x "
3327 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3328 				(unsigned long long)scsi_get_lba(cmd),
3329 				blk_rq_sectors(cmd->request), bgstat, bghm);
3330 	}
3331 
3332 	if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3333 		/*
3334 		 * setup sense data descriptor 0 per SPC-4 as an information
3335 		 * field, and put the failing LBA in it.
3336 		 * This code assumes there was also a guard/app/ref tag error
3337 		 * indication.
3338 		 */
3339 		cmd->sense_buffer[7] = 0xc;   /* Additional sense length */
3340 		cmd->sense_buffer[8] = 0;     /* Information descriptor type */
3341 		cmd->sense_buffer[9] = 0xa;   /* Additional descriptor length */
3342 		cmd->sense_buffer[10] = 0x80; /* Validity bit */
3343 
3344 		/* bghm is a "on the wire" FC frame based count */
3345 		switch (scsi_get_prot_op(cmd)) {
3346 		case SCSI_PROT_READ_INSERT:
3347 		case SCSI_PROT_WRITE_STRIP:
3348 			bghm /= cmd->device->sector_size;
3349 			break;
3350 		case SCSI_PROT_READ_STRIP:
3351 		case SCSI_PROT_WRITE_INSERT:
3352 		case SCSI_PROT_READ_PASS:
3353 		case SCSI_PROT_WRITE_PASS:
3354 			bghm /= (cmd->device->sector_size +
3355 				sizeof(struct scsi_dif_tuple));
3356 			break;
3357 		}
3358 
3359 		failing_sector = scsi_get_lba(cmd);
3360 		failing_sector += bghm;
3361 
3362 		/* Descriptor Information */
3363 		put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3364 	}
3365 
3366 	if (!ret) {
3367 		/* No error was reported - problem in FW? */
3368 		lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3369 				"9057 BLKGRD: Unknown error in cmd"
3370 				" 0x%x lba 0x%llx blk cnt 0x%x "
3371 				"bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3372 				(unsigned long long)scsi_get_lba(cmd),
3373 				blk_rq_sectors(cmd->request), bgstat, bghm);
3374 
3375 		/* Calcuate what type of error it was */
3376 		lpfc_calc_bg_err(phba, lpfc_cmd);
3377 	}
3378 out:
3379 	return ret;
3380 }
3381 
3382 /**
3383  * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3384  * @phba: The Hba for which this call is being executed.
3385  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3386  *
3387  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3388  * field of @lpfc_cmd for device with SLI-4 interface spec.
3389  *
3390  * Return codes:
3391  *	1 - Error
3392  *	0 - Success
3393  **/
3394 static int
3395 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3396 {
3397 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3398 	struct scatterlist *sgel = NULL;
3399 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3400 	struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
3401 	struct sli4_sge *first_data_sgl;
3402 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3403 	dma_addr_t physaddr;
3404 	uint32_t num_bde = 0;
3405 	uint32_t dma_len;
3406 	uint32_t dma_offset = 0;
3407 	int nseg;
3408 	struct ulp_bde64 *bde;
3409 
3410 	/*
3411 	 * There are three possibilities here - use scatter-gather segment, use
3412 	 * the single mapping, or neither.  Start the lpfc command prep by
3413 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3414 	 * data bde entry.
3415 	 */
3416 	if (scsi_sg_count(scsi_cmnd)) {
3417 		/*
3418 		 * The driver stores the segment count returned from pci_map_sg
3419 		 * because this a count of dma-mappings used to map the use_sg
3420 		 * pages.  They are not guaranteed to be the same for those
3421 		 * architectures that implement an IOMMU.
3422 		 */
3423 
3424 		nseg = scsi_dma_map(scsi_cmnd);
3425 		if (unlikely(!nseg))
3426 			return 1;
3427 		sgl += 1;
3428 		/* clear the last flag in the fcp_rsp map entry */
3429 		sgl->word2 = le32_to_cpu(sgl->word2);
3430 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3431 		sgl->word2 = cpu_to_le32(sgl->word2);
3432 		sgl += 1;
3433 		first_data_sgl = sgl;
3434 		lpfc_cmd->seg_cnt = nseg;
3435 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3436 			lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3437 				" %s: Too many sg segments from "
3438 				"dma_map_sg.  Config %d, seg_cnt %d\n",
3439 				__func__, phba->cfg_sg_seg_cnt,
3440 			       lpfc_cmd->seg_cnt);
3441 			lpfc_cmd->seg_cnt = 0;
3442 			scsi_dma_unmap(scsi_cmnd);
3443 			return 1;
3444 		}
3445 
3446 		/*
3447 		 * The driver established a maximum scatter-gather segment count
3448 		 * during probe that limits the number of sg elements in any
3449 		 * single scsi command.  Just run through the seg_cnt and format
3450 		 * the sge's.
3451 		 * When using SLI-3 the driver will try to fit all the BDEs into
3452 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
3453 		 * does for SLI-2 mode.
3454 		 */
3455 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
3456 			physaddr = sg_dma_address(sgel);
3457 			dma_len = sg_dma_len(sgel);
3458 			sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
3459 			sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
3460 			sgl->word2 = le32_to_cpu(sgl->word2);
3461 			if ((num_bde + 1) == nseg)
3462 				bf_set(lpfc_sli4_sge_last, sgl, 1);
3463 			else
3464 				bf_set(lpfc_sli4_sge_last, sgl, 0);
3465 			bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3466 			bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3467 			sgl->word2 = cpu_to_le32(sgl->word2);
3468 			sgl->sge_len = cpu_to_le32(dma_len);
3469 			dma_offset += dma_len;
3470 			sgl++;
3471 		}
3472 		/* setup the performance hint (first data BDE) if enabled */
3473 		if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
3474 			bde = (struct ulp_bde64 *)
3475 					&(iocb_cmd->unsli3.sli3Words[5]);
3476 			bde->addrLow = first_data_sgl->addr_lo;
3477 			bde->addrHigh = first_data_sgl->addr_hi;
3478 			bde->tus.f.bdeSize =
3479 					le32_to_cpu(first_data_sgl->sge_len);
3480 			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3481 			bde->tus.w = cpu_to_le32(bde->tus.w);
3482 		}
3483 	} else {
3484 		sgl += 1;
3485 		/* clear the last flag in the fcp_rsp map entry */
3486 		sgl->word2 = le32_to_cpu(sgl->word2);
3487 		bf_set(lpfc_sli4_sge_last, sgl, 1);
3488 		sgl->word2 = cpu_to_le32(sgl->word2);
3489 	}
3490 
3491 	/*
3492 	 * Finish initializing those IOCB fields that are dependent on the
3493 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
3494 	 * explicitly reinitialized.
3495 	 * all iocb memory resources are reused.
3496 	 */
3497 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3498 
3499 	/*
3500 	 * Due to difference in data length between DIF/non-DIF paths,
3501 	 * we need to set word 4 of IOCB here
3502 	 */
3503 	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3504 	return 0;
3505 }
3506 
3507 /**
3508  * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3509  * @phba: The Hba for which this call is being executed.
3510  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3511  *
3512  * This is the protection/DIF aware version of
3513  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3514  * two functions eventually, but for now, it's here
3515  **/
3516 static int
3517 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3518 		struct lpfc_scsi_buf *lpfc_cmd)
3519 {
3520 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3521 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3522 	struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
3523 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3524 	uint32_t num_sge = 0;
3525 	int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3526 	int prot_group_type = 0;
3527 	int fcpdl;
3528 
3529 	/*
3530 	 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3531 	 *  fcp_rsp regions to the first data sge entry
3532 	 */
3533 	if (scsi_sg_count(scsi_cmnd)) {
3534 		/*
3535 		 * The driver stores the segment count returned from pci_map_sg
3536 		 * because this a count of dma-mappings used to map the use_sg
3537 		 * pages.  They are not guaranteed to be the same for those
3538 		 * architectures that implement an IOMMU.
3539 		 */
3540 		datasegcnt = dma_map_sg(&phba->pcidev->dev,
3541 					scsi_sglist(scsi_cmnd),
3542 					scsi_sg_count(scsi_cmnd), datadir);
3543 		if (unlikely(!datasegcnt))
3544 			return 1;
3545 
3546 		sgl += 1;
3547 		/* clear the last flag in the fcp_rsp map entry */
3548 		sgl->word2 = le32_to_cpu(sgl->word2);
3549 		bf_set(lpfc_sli4_sge_last, sgl, 0);
3550 		sgl->word2 = cpu_to_le32(sgl->word2);
3551 
3552 		sgl += 1;
3553 		lpfc_cmd->seg_cnt = datasegcnt;
3554 
3555 		/* First check if data segment count from SCSI Layer is good */
3556 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt)
3557 			goto err;
3558 
3559 		prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3560 
3561 		switch (prot_group_type) {
3562 		case LPFC_PG_TYPE_NO_DIF:
3563 			/* Here we need to add a DISEED to the count */
3564 			if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt)
3565 				goto err;
3566 
3567 			num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3568 					datasegcnt);
3569 
3570 			/* we should have 2 or more entries in buffer list */
3571 			if (num_sge < 2)
3572 				goto err;
3573 			break;
3574 
3575 		case LPFC_PG_TYPE_DIF_BUF:
3576 			/*
3577 			 * This type indicates that protection buffers are
3578 			 * passed to the driver, so that needs to be prepared
3579 			 * for DMA
3580 			 */
3581 			protsegcnt = dma_map_sg(&phba->pcidev->dev,
3582 					scsi_prot_sglist(scsi_cmnd),
3583 					scsi_prot_sg_count(scsi_cmnd), datadir);
3584 			if (unlikely(!protsegcnt)) {
3585 				scsi_dma_unmap(scsi_cmnd);
3586 				return 1;
3587 			}
3588 
3589 			lpfc_cmd->prot_seg_cnt = protsegcnt;
3590 			/*
3591 			 * There is a minimun of 3 SGEs used for every
3592 			 * protection data segment.
3593 			 */
3594 			if ((lpfc_cmd->prot_seg_cnt * 3) >
3595 			    (phba->cfg_total_seg_cnt - 2))
3596 				goto err;
3597 
3598 			num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3599 					datasegcnt, protsegcnt);
3600 
3601 			/* we should have 3 or more entries in buffer list */
3602 			if ((num_sge < 3) ||
3603 			    (num_sge > phba->cfg_total_seg_cnt))
3604 				goto err;
3605 			break;
3606 
3607 		case LPFC_PG_TYPE_INVALID:
3608 		default:
3609 			scsi_dma_unmap(scsi_cmnd);
3610 			lpfc_cmd->seg_cnt = 0;
3611 
3612 			lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3613 					"9083 Unexpected protection group %i\n",
3614 					prot_group_type);
3615 			return 1;
3616 		}
3617 	}
3618 
3619 	switch (scsi_get_prot_op(scsi_cmnd)) {
3620 	case SCSI_PROT_WRITE_STRIP:
3621 	case SCSI_PROT_READ_STRIP:
3622 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3623 		break;
3624 	case SCSI_PROT_WRITE_INSERT:
3625 	case SCSI_PROT_READ_INSERT:
3626 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3627 		break;
3628 	case SCSI_PROT_WRITE_PASS:
3629 	case SCSI_PROT_READ_PASS:
3630 		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3631 		break;
3632 	}
3633 
3634 	fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3635 	fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3636 
3637 	/*
3638 	 * Due to difference in data length between DIF/non-DIF paths,
3639 	 * we need to set word 4 of IOCB here
3640 	 */
3641 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3642 
3643 	return 0;
3644 err:
3645 	if (lpfc_cmd->seg_cnt)
3646 		scsi_dma_unmap(scsi_cmnd);
3647 	if (lpfc_cmd->prot_seg_cnt)
3648 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3649 			     scsi_prot_sg_count(scsi_cmnd),
3650 			     scsi_cmnd->sc_data_direction);
3651 
3652 	lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3653 			"9084 Cannot setup S/G List for HBA"
3654 			"IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3655 			lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3656 			phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3657 			prot_group_type, num_sge);
3658 
3659 	lpfc_cmd->seg_cnt = 0;
3660 	lpfc_cmd->prot_seg_cnt = 0;
3661 	return 1;
3662 }
3663 
3664 /**
3665  * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3666  * @phba: The Hba for which this call is being executed.
3667  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3668  *
3669  * This routine wraps the actual DMA mapping function pointer from the
3670  * lpfc_hba struct.
3671  *
3672  * Return codes:
3673  *	1 - Error
3674  *	0 - Success
3675  **/
3676 static inline int
3677 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3678 {
3679 	return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3680 }
3681 
3682 /**
3683  * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3684  * using BlockGuard.
3685  * @phba: The Hba for which this call is being executed.
3686  * @lpfc_cmd: The scsi buffer which is going to be mapped.
3687  *
3688  * This routine wraps the actual DMA mapping function pointer from the
3689  * lpfc_hba struct.
3690  *
3691  * Return codes:
3692  *	1 - Error
3693  *	0 - Success
3694  **/
3695 static inline int
3696 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3697 {
3698 	return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3699 }
3700 
3701 /**
3702  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3703  * @phba: Pointer to hba context object.
3704  * @vport: Pointer to vport object.
3705  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3706  * @rsp_iocb: Pointer to response iocb object which reported error.
3707  *
3708  * This function posts an event when there is a SCSI command reporting
3709  * error from the scsi device.
3710  **/
3711 static void
3712 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3713 		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3714 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3715 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3716 	uint32_t resp_info = fcprsp->rspStatus2;
3717 	uint32_t scsi_status = fcprsp->rspStatus3;
3718 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3719 	struct lpfc_fast_path_event *fast_path_evt = NULL;
3720 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3721 	unsigned long flags;
3722 
3723 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3724 		return;
3725 
3726 	/* If there is queuefull or busy condition send a scsi event */
3727 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3728 		(cmnd->result == SAM_STAT_BUSY)) {
3729 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3730 		if (!fast_path_evt)
3731 			return;
3732 		fast_path_evt->un.scsi_evt.event_type =
3733 			FC_REG_SCSI_EVENT;
3734 		fast_path_evt->un.scsi_evt.subcategory =
3735 		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3736 		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3737 		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3738 		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3739 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3740 		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3741 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3742 	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3743 		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3744 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3745 		if (!fast_path_evt)
3746 			return;
3747 		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3748 			FC_REG_SCSI_EVENT;
3749 		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3750 			LPFC_EVENT_CHECK_COND;
3751 		fast_path_evt->un.check_cond_evt.scsi_event.lun =
3752 			cmnd->device->lun;
3753 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3754 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3755 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3756 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3757 		fast_path_evt->un.check_cond_evt.sense_key =
3758 			cmnd->sense_buffer[2] & 0xf;
3759 		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3760 		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3761 	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3762 		     fcpi_parm &&
3763 		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3764 			((scsi_status == SAM_STAT_GOOD) &&
3765 			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
3766 		/*
3767 		 * If status is good or resid does not match with fcp_param and
3768 		 * there is valid fcpi_parm, then there is a read_check error
3769 		 */
3770 		fast_path_evt = lpfc_alloc_fast_evt(phba);
3771 		if (!fast_path_evt)
3772 			return;
3773 		fast_path_evt->un.read_check_error.header.event_type =
3774 			FC_REG_FABRIC_EVENT;
3775 		fast_path_evt->un.read_check_error.header.subcategory =
3776 			LPFC_EVENT_FCPRDCHKERR;
3777 		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3778 			&pnode->nlp_portname, sizeof(struct lpfc_name));
3779 		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3780 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
3781 		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3782 		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3783 		fast_path_evt->un.read_check_error.fcpiparam =
3784 			fcpi_parm;
3785 	} else
3786 		return;
3787 
3788 	fast_path_evt->vport = vport;
3789 	spin_lock_irqsave(&phba->hbalock, flags);
3790 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3791 	spin_unlock_irqrestore(&phba->hbalock, flags);
3792 	lpfc_worker_wake_up(phba);
3793 	return;
3794 }
3795 
3796 /**
3797  * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3798  * @phba: The HBA for which this call is being executed.
3799  * @psb: The scsi buffer which is going to be un-mapped.
3800  *
3801  * This routine does DMA un-mapping of scatter gather list of scsi command
3802  * field of @lpfc_cmd for device with SLI-3 interface spec.
3803  **/
3804 static void
3805 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
3806 {
3807 	/*
3808 	 * There are only two special cases to consider.  (1) the scsi command
3809 	 * requested scatter-gather usage or (2) the scsi command allocated
3810 	 * a request buffer, but did not request use_sg.  There is a third
3811 	 * case, but it does not require resource deallocation.
3812 	 */
3813 	if (psb->seg_cnt > 0)
3814 		scsi_dma_unmap(psb->pCmd);
3815 	if (psb->prot_seg_cnt > 0)
3816 		dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3817 				scsi_prot_sg_count(psb->pCmd),
3818 				psb->pCmd->sc_data_direction);
3819 }
3820 
3821 /**
3822  * lpfc_handler_fcp_err - FCP response handler
3823  * @vport: The virtual port for which this call is being executed.
3824  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
3825  * @rsp_iocb: The response IOCB which contains FCP error.
3826  *
3827  * This routine is called to process response IOCB with status field
3828  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3829  * based upon SCSI and FCP error.
3830  **/
3831 static void
3832 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
3833 		    struct lpfc_iocbq *rsp_iocb)
3834 {
3835 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3836 	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3837 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3838 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3839 	uint32_t resp_info = fcprsp->rspStatus2;
3840 	uint32_t scsi_status = fcprsp->rspStatus3;
3841 	uint32_t *lp;
3842 	uint32_t host_status = DID_OK;
3843 	uint32_t rsplen = 0;
3844 	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3845 
3846 
3847 	/*
3848 	 *  If this is a task management command, there is no
3849 	 *  scsi packet associated with this lpfc_cmd.  The driver
3850 	 *  consumes it.
3851 	 */
3852 	if (fcpcmd->fcpCntl2) {
3853 		scsi_status = 0;
3854 		goto out;
3855 	}
3856 
3857 	if (resp_info & RSP_LEN_VALID) {
3858 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
3859 		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3860 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3861 				 "2719 Invalid response length: "
3862 				 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
3863 				 cmnd->device->id,
3864 				 cmnd->device->lun, cmnd->cmnd[0],
3865 				 rsplen);
3866 			host_status = DID_ERROR;
3867 			goto out;
3868 		}
3869 		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3870 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3871 				 "2757 Protocol failure detected during "
3872 				 "processing of FCP I/O op: "
3873 				 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
3874 				 cmnd->device->id,
3875 				 cmnd->device->lun, cmnd->cmnd[0],
3876 				 fcprsp->rspInfo3);
3877 			host_status = DID_ERROR;
3878 			goto out;
3879 		}
3880 	}
3881 
3882 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3883 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3884 		if (snslen > SCSI_SENSE_BUFFERSIZE)
3885 			snslen = SCSI_SENSE_BUFFERSIZE;
3886 
3887 		if (resp_info & RSP_LEN_VALID)
3888 		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
3889 		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3890 	}
3891 	lp = (uint32_t *)cmnd->sense_buffer;
3892 
3893 	/* special handling for under run conditions */
3894 	if (!scsi_status && (resp_info & RESID_UNDER)) {
3895 		/* don't log under runs if fcp set... */
3896 		if (vport->cfg_log_verbose & LOG_FCP)
3897 			logit = LOG_FCP_ERROR;
3898 		/* unless operator says so */
3899 		if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3900 			logit = LOG_FCP_UNDER;
3901 	}
3902 
3903 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
3904 			 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3905 			 "Data: x%x x%x x%x x%x x%x\n",
3906 			 cmnd->cmnd[0], scsi_status,
3907 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3908 			 be32_to_cpu(fcprsp->rspResId),
3909 			 be32_to_cpu(fcprsp->rspSnsLen),
3910 			 be32_to_cpu(fcprsp->rspRspLen),
3911 			 fcprsp->rspInfo3);
3912 
3913 	scsi_set_resid(cmnd, 0);
3914 	if (resp_info & RESID_UNDER) {
3915 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3916 
3917 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3918 				 "9025 FCP Read Underrun, expected %d, "
3919 				 "residual %d Data: x%x x%x x%x\n",
3920 				 be32_to_cpu(fcpcmd->fcpDl),
3921 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3922 				 cmnd->underflow);
3923 
3924 		/*
3925 		 * If there is an under run check if under run reported by
3926 		 * storage array is same as the under run reported by HBA.
3927 		 * If this is not same, there is a dropped frame.
3928 		 */
3929 		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3930 			fcpi_parm &&
3931 			(scsi_get_resid(cmnd) != fcpi_parm)) {
3932 			lpfc_printf_vlog(vport, KERN_WARNING,
3933 					 LOG_FCP | LOG_FCP_ERROR,
3934 					 "9026 FCP Read Check Error "
3935 					 "and Underrun Data: x%x x%x x%x x%x\n",
3936 					 be32_to_cpu(fcpcmd->fcpDl),
3937 					 scsi_get_resid(cmnd), fcpi_parm,
3938 					 cmnd->cmnd[0]);
3939 			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3940 			host_status = DID_ERROR;
3941 		}
3942 		/*
3943 		 * The cmnd->underflow is the minimum number of bytes that must
3944 		 * be transferred for this command.  Provided a sense condition
3945 		 * is not present, make sure the actual amount transferred is at
3946 		 * least the underflow value or fail.
3947 		 */
3948 		if (!(resp_info & SNS_LEN_VALID) &&
3949 		    (scsi_status == SAM_STAT_GOOD) &&
3950 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3951 		     < cmnd->underflow)) {
3952 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3953 					 "9027 FCP command x%x residual "
3954 					 "underrun converted to error "
3955 					 "Data: x%x x%x x%x\n",
3956 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
3957 					 scsi_get_resid(cmnd), cmnd->underflow);
3958 			host_status = DID_ERROR;
3959 		}
3960 	} else if (resp_info & RESID_OVER) {
3961 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3962 				 "9028 FCP command x%x residual overrun error. "
3963 				 "Data: x%x x%x\n", cmnd->cmnd[0],
3964 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3965 		host_status = DID_ERROR;
3966 
3967 	/*
3968 	 * Check SLI validation that all the transfer was actually done
3969 	 * (fcpi_parm should be zero).
3970 	 */
3971 	} else if (fcpi_parm) {
3972 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3973 				 "9029 FCP Data Transfer Check Error: "
3974 				 "x%x x%x x%x x%x x%x\n",
3975 				 be32_to_cpu(fcpcmd->fcpDl),
3976 				 be32_to_cpu(fcprsp->rspResId),
3977 				 fcpi_parm, cmnd->cmnd[0], scsi_status);
3978 		switch (scsi_status) {
3979 		case SAM_STAT_GOOD:
3980 		case SAM_STAT_CHECK_CONDITION:
3981 			/* Fabric dropped a data frame. Fail any successful
3982 			 * command in which we detected dropped frames.
3983 			 * A status of good or some check conditions could
3984 			 * be considered a successful command.
3985 			 */
3986 			host_status = DID_ERROR;
3987 			break;
3988 		}
3989 		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3990 	}
3991 
3992  out:
3993 	cmnd->result = ScsiResult(host_status, scsi_status);
3994 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3995 }
3996 
3997 /**
3998  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3999  * @phba: The Hba for which this call is being executed.
4000  * @pIocbIn: The command IOCBQ for the scsi cmnd.
4001  * @pIocbOut: The response IOCBQ for the scsi cmnd.
4002  *
4003  * This routine assigns scsi command result by looking into response IOCB
4004  * status field appropriately. This routine handles QUEUE FULL condition as
4005  * well by ramping down device queue depth.
4006  **/
4007 static void
4008 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4009 			struct lpfc_iocbq *pIocbOut)
4010 {
4011 	struct lpfc_scsi_buf *lpfc_cmd =
4012 		(struct lpfc_scsi_buf *) pIocbIn->context1;
4013 	struct lpfc_vport      *vport = pIocbIn->vport;
4014 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4015 	struct lpfc_nodelist *pnode = rdata->pnode;
4016 	struct scsi_cmnd *cmd;
4017 	int result;
4018 	struct scsi_device *tmp_sdev;
4019 	int depth;
4020 	unsigned long flags;
4021 	struct lpfc_fast_path_event *fast_path_evt;
4022 	struct Scsi_Host *shost;
4023 	uint32_t queue_depth, scsi_id;
4024 	uint32_t logit = LOG_FCP;
4025 
4026 	/* Sanity check on return of outstanding command */
4027 	if (!(lpfc_cmd->pCmd))
4028 		return;
4029 	cmd = lpfc_cmd->pCmd;
4030 	shost = cmd->device->host;
4031 
4032 	lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4033 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4034 	/* pick up SLI4 exhange busy status from HBA */
4035 	lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
4036 
4037 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4038 	if (lpfc_cmd->prot_data_type) {
4039 		struct scsi_dif_tuple *src = NULL;
4040 
4041 		src =  (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4042 		/*
4043 		 * Used to restore any changes to protection
4044 		 * data for error injection.
4045 		 */
4046 		switch (lpfc_cmd->prot_data_type) {
4047 		case LPFC_INJERR_REFTAG:
4048 			src->ref_tag =
4049 				lpfc_cmd->prot_data;
4050 			break;
4051 		case LPFC_INJERR_APPTAG:
4052 			src->app_tag =
4053 				(uint16_t)lpfc_cmd->prot_data;
4054 			break;
4055 		case LPFC_INJERR_GUARD:
4056 			src->guard_tag =
4057 				(uint16_t)lpfc_cmd->prot_data;
4058 			break;
4059 		default:
4060 			break;
4061 		}
4062 
4063 		lpfc_cmd->prot_data = 0;
4064 		lpfc_cmd->prot_data_type = 0;
4065 		lpfc_cmd->prot_data_segment = NULL;
4066 	}
4067 #endif
4068 	if (pnode && NLP_CHK_NODE_ACT(pnode))
4069 		atomic_dec(&pnode->cmd_pending);
4070 
4071 	if (lpfc_cmd->status) {
4072 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4073 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
4074 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4075 		else if (lpfc_cmd->status >= IOSTAT_CNT)
4076 			lpfc_cmd->status = IOSTAT_DEFAULT;
4077 		if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4078 		    !lpfc_cmd->fcp_rsp->rspStatus3 &&
4079 		    (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4080 		    !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4081 			logit = 0;
4082 		else
4083 			logit = LOG_FCP | LOG_FCP_UNDER;
4084 		lpfc_printf_vlog(vport, KERN_WARNING, logit,
4085 			 "9030 FCP cmd x%x failed <%d/%d> "
4086 			 "status: x%x result: x%x "
4087 			 "sid: x%x did: x%x oxid: x%x "
4088 			 "Data: x%x x%x\n",
4089 			 cmd->cmnd[0],
4090 			 cmd->device ? cmd->device->id : 0xffff,
4091 			 cmd->device ? cmd->device->lun : 0xffff,
4092 			 lpfc_cmd->status, lpfc_cmd->result,
4093 			 vport->fc_myDID,
4094 			 (pnode) ? pnode->nlp_DID : 0,
4095 			 phba->sli_rev == LPFC_SLI_REV4 ?
4096 			     lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4097 			 pIocbOut->iocb.ulpContext,
4098 			 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4099 
4100 		switch (lpfc_cmd->status) {
4101 		case IOSTAT_FCP_RSP_ERROR:
4102 			/* Call FCP RSP handler to determine result */
4103 			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
4104 			break;
4105 		case IOSTAT_NPORT_BSY:
4106 		case IOSTAT_FABRIC_BSY:
4107 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
4108 			fast_path_evt = lpfc_alloc_fast_evt(phba);
4109 			if (!fast_path_evt)
4110 				break;
4111 			fast_path_evt->un.fabric_evt.event_type =
4112 				FC_REG_FABRIC_EVENT;
4113 			fast_path_evt->un.fabric_evt.subcategory =
4114 				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4115 				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4116 			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4117 				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4118 					&pnode->nlp_portname,
4119 					sizeof(struct lpfc_name));
4120 				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4121 					&pnode->nlp_nodename,
4122 					sizeof(struct lpfc_name));
4123 			}
4124 			fast_path_evt->vport = vport;
4125 			fast_path_evt->work_evt.evt =
4126 				LPFC_EVT_FASTPATH_MGMT_EVT;
4127 			spin_lock_irqsave(&phba->hbalock, flags);
4128 			list_add_tail(&fast_path_evt->work_evt.evt_listp,
4129 				&phba->work_list);
4130 			spin_unlock_irqrestore(&phba->hbalock, flags);
4131 			lpfc_worker_wake_up(phba);
4132 			break;
4133 		case IOSTAT_LOCAL_REJECT:
4134 		case IOSTAT_REMOTE_STOP:
4135 			if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4136 			    lpfc_cmd->result ==
4137 					IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4138 			    lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4139 			    lpfc_cmd->result ==
4140 					IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4141 				cmd->result = ScsiResult(DID_NO_CONNECT, 0);
4142 				break;
4143 			}
4144 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4145 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
4146 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4147 			    lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4148 				cmd->result = ScsiResult(DID_REQUEUE, 0);
4149 				break;
4150 			}
4151 			if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4152 			     lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4153 			     pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4154 				if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4155 					/*
4156 					 * This is a response for a BG enabled
4157 					 * cmd. Parse BG error
4158 					 */
4159 					lpfc_parse_bg_err(phba, lpfc_cmd,
4160 							pIocbOut);
4161 					break;
4162 				} else {
4163 					lpfc_printf_vlog(vport, KERN_WARNING,
4164 							LOG_BG,
4165 							"9031 non-zero BGSTAT "
4166 							"on unprotected cmd\n");
4167 				}
4168 			}
4169 			if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4170 				&& (phba->sli_rev == LPFC_SLI_REV4)
4171 				&& (pnode && NLP_CHK_NODE_ACT(pnode))) {
4172 				/* This IO was aborted by the target, we don't
4173 				 * know the rxid and because we did not send the
4174 				 * ABTS we cannot generate and RRQ.
4175 				 */
4176 				lpfc_set_rrq_active(phba, pnode,
4177 					lpfc_cmd->cur_iocbq.sli4_lxritag,
4178 					0, 0);
4179 			}
4180 		/* else: fall through */
4181 		default:
4182 			cmd->result = ScsiResult(DID_ERROR, 0);
4183 			break;
4184 		}
4185 
4186 		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
4187 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4188 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
4189 						 SAM_STAT_BUSY);
4190 	} else
4191 		cmd->result = ScsiResult(DID_OK, 0);
4192 
4193 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4194 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4195 
4196 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4197 				 "0710 Iodone <%d/%d> cmd %p, error "
4198 				 "x%x SNS x%x x%x Data: x%x x%x\n",
4199 				 cmd->device->id, cmd->device->lun, cmd,
4200 				 cmd->result, *lp, *(lp + 3), cmd->retries,
4201 				 scsi_get_resid(cmd));
4202 	}
4203 
4204 	lpfc_update_stats(phba, lpfc_cmd);
4205 	result = cmd->result;
4206 	if (vport->cfg_max_scsicmpl_time &&
4207 	   time_after(jiffies, lpfc_cmd->start_time +
4208 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4209 		spin_lock_irqsave(shost->host_lock, flags);
4210 		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4211 			if (pnode->cmd_qdepth >
4212 				atomic_read(&pnode->cmd_pending) &&
4213 				(atomic_read(&pnode->cmd_pending) >
4214 				LPFC_MIN_TGT_QDEPTH) &&
4215 				((cmd->cmnd[0] == READ_10) ||
4216 				(cmd->cmnd[0] == WRITE_10)))
4217 				pnode->cmd_qdepth =
4218 					atomic_read(&pnode->cmd_pending);
4219 
4220 			pnode->last_change_time = jiffies;
4221 		}
4222 		spin_unlock_irqrestore(shost->host_lock, flags);
4223 	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4224 		if ((pnode->cmd_qdepth < vport->cfg_tgt_queue_depth) &&
4225 		   time_after(jiffies, pnode->last_change_time +
4226 			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
4227 			spin_lock_irqsave(shost->host_lock, flags);
4228 			depth = pnode->cmd_qdepth * LPFC_TGTQ_RAMPUP_PCENT
4229 				/ 100;
4230 			depth = depth ? depth : 1;
4231 			pnode->cmd_qdepth += depth;
4232 			if (pnode->cmd_qdepth > vport->cfg_tgt_queue_depth)
4233 				pnode->cmd_qdepth = vport->cfg_tgt_queue_depth;
4234 			pnode->last_change_time = jiffies;
4235 			spin_unlock_irqrestore(shost->host_lock, flags);
4236 		}
4237 	}
4238 
4239 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4240 
4241 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
4242 	queue_depth = cmd->device->queue_depth;
4243 	scsi_id = cmd->device->id;
4244 	cmd->scsi_done(cmd);
4245 
4246 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4247 		spin_lock_irqsave(&phba->hbalock, flags);
4248 		lpfc_cmd->pCmd = NULL;
4249 		spin_unlock_irqrestore(&phba->hbalock, flags);
4250 
4251 		/*
4252 		 * If there is a thread waiting for command completion
4253 		 * wake up the thread.
4254 		 */
4255 		spin_lock_irqsave(shost->host_lock, flags);
4256 		if (lpfc_cmd->waitq)
4257 			wake_up(lpfc_cmd->waitq);
4258 		spin_unlock_irqrestore(shost->host_lock, flags);
4259 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4260 		return;
4261 	}
4262 
4263 	if (!result)
4264 		lpfc_rampup_queue_depth(vport, queue_depth);
4265 
4266 	/*
4267 	 * Check for queue full.  If the lun is reporting queue full, then
4268 	 * back off the lun queue depth to prevent target overloads.
4269 	 */
4270 	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
4271 	    NLP_CHK_NODE_ACT(pnode)) {
4272 		shost_for_each_device(tmp_sdev, shost) {
4273 			if (tmp_sdev->id != scsi_id)
4274 				continue;
4275 			depth = scsi_track_queue_full(tmp_sdev,
4276 						      tmp_sdev->queue_depth-1);
4277 			if (depth <= 0)
4278 				continue;
4279 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4280 					 "0711 detected queue full - lun queue "
4281 					 "depth adjusted to %d.\n", depth);
4282 			lpfc_send_sdev_queuedepth_change_event(phba, vport,
4283 							       pnode,
4284 							       tmp_sdev->lun,
4285 							       depth+1, depth);
4286 		}
4287 	}
4288 
4289 	spin_lock_irqsave(&phba->hbalock, flags);
4290 	lpfc_cmd->pCmd = NULL;
4291 	spin_unlock_irqrestore(&phba->hbalock, flags);
4292 
4293 	/*
4294 	 * If there is a thread waiting for command completion
4295 	 * wake up the thread.
4296 	 */
4297 	spin_lock_irqsave(shost->host_lock, flags);
4298 	if (lpfc_cmd->waitq)
4299 		wake_up(lpfc_cmd->waitq);
4300 	spin_unlock_irqrestore(shost->host_lock, flags);
4301 
4302 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4303 }
4304 
4305 /**
4306  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4307  * @data: A pointer to the immediate command data portion of the IOCB.
4308  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4309  *
4310  * The routine copies the entire FCP command from @fcp_cmnd to @data while
4311  * byte swapping the data to big endian format for transmission on the wire.
4312  **/
4313 static void
4314 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4315 {
4316 	int i, j;
4317 	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4318 	     i += sizeof(uint32_t), j++) {
4319 		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4320 	}
4321 }
4322 
4323 /**
4324  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4325  * @vport: The virtual port for which this call is being executed.
4326  * @lpfc_cmd: The scsi command which needs to send.
4327  * @pnode: Pointer to lpfc_nodelist.
4328  *
4329  * This routine initializes fcp_cmnd and iocb data structure from scsi command
4330  * to transfer for device with SLI3 interface spec.
4331  **/
4332 static void
4333 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
4334 		    struct lpfc_nodelist *pnode)
4335 {
4336 	struct lpfc_hba *phba = vport->phba;
4337 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4338 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4339 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4340 	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4341 	int datadir = scsi_cmnd->sc_data_direction;
4342 	char tag[2];
4343 	uint8_t *ptr;
4344 	bool sli4;
4345 
4346 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4347 		return;
4348 
4349 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4350 	/* clear task management bits */
4351 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4352 
4353 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4354 			&lpfc_cmd->fcp_cmnd->fcp_lun);
4355 
4356 	ptr = &fcp_cmnd->fcpCdb[0];
4357 	memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4358 	if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4359 		ptr += scsi_cmnd->cmd_len;
4360 		memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4361 	}
4362 
4363 	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
4364 		switch (tag[0]) {
4365 		case HEAD_OF_QUEUE_TAG:
4366 			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
4367 			break;
4368 		case ORDERED_QUEUE_TAG:
4369 			fcp_cmnd->fcpCntl1 = ORDERED_Q;
4370 			break;
4371 		default:
4372 			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4373 			break;
4374 		}
4375 	} else
4376 		fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4377 
4378 	sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4379 
4380 	/*
4381 	 * There are three possibilities here - use scatter-gather segment, use
4382 	 * the single mapping, or neither.  Start the lpfc command prep by
4383 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4384 	 * data bde entry.
4385 	 */
4386 	if (scsi_sg_count(scsi_cmnd)) {
4387 		if (datadir == DMA_TO_DEVICE) {
4388 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4389 			if (sli4)
4390 				iocb_cmd->ulpPU = PARM_READ_CHECK;
4391 			else {
4392 				iocb_cmd->un.fcpi.fcpi_parm = 0;
4393 				iocb_cmd->ulpPU = 0;
4394 			}
4395 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
4396 			phba->fc4OutputRequests++;
4397 		} else {
4398 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4399 			iocb_cmd->ulpPU = PARM_READ_CHECK;
4400 			fcp_cmnd->fcpCntl3 = READ_DATA;
4401 			phba->fc4InputRequests++;
4402 		}
4403 	} else {
4404 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4405 		iocb_cmd->un.fcpi.fcpi_parm = 0;
4406 		iocb_cmd->ulpPU = 0;
4407 		fcp_cmnd->fcpCntl3 = 0;
4408 		phba->fc4ControlRequests++;
4409 	}
4410 	if (phba->sli_rev == 3 &&
4411 	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4412 		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4413 	/*
4414 	 * Finish initializing those IOCB fields that are independent
4415 	 * of the scsi_cmnd request_buffer
4416 	 */
4417 	piocbq->iocb.ulpContext = pnode->nlp_rpi;
4418 	if (sli4)
4419 		piocbq->iocb.ulpContext =
4420 		  phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4421 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4422 		piocbq->iocb.ulpFCP2Rcvy = 1;
4423 	else
4424 		piocbq->iocb.ulpFCP2Rcvy = 0;
4425 
4426 	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4427 	piocbq->context1  = lpfc_cmd;
4428 	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4429 	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4430 	piocbq->vport = vport;
4431 }
4432 
4433 /**
4434  * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4435  * @vport: The virtual port for which this call is being executed.
4436  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
4437  * @lun: Logical unit number.
4438  * @task_mgmt_cmd: SCSI task management command.
4439  *
4440  * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4441  * for device with SLI-3 interface spec.
4442  *
4443  * Return codes:
4444  *   0 - Error
4445  *   1 - Success
4446  **/
4447 static int
4448 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4449 			     struct lpfc_scsi_buf *lpfc_cmd,
4450 			     unsigned int lun,
4451 			     uint8_t task_mgmt_cmd)
4452 {
4453 	struct lpfc_iocbq *piocbq;
4454 	IOCB_t *piocb;
4455 	struct fcp_cmnd *fcp_cmnd;
4456 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4457 	struct lpfc_nodelist *ndlp = rdata->pnode;
4458 
4459 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4460 	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4461 		return 0;
4462 
4463 	piocbq = &(lpfc_cmd->cur_iocbq);
4464 	piocbq->vport = vport;
4465 
4466 	piocb = &piocbq->iocb;
4467 
4468 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
4469 	/* Clear out any old data in the FCP command area */
4470 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4471 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4472 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4473 	if (vport->phba->sli_rev == 3 &&
4474 	    !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4475 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4476 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4477 	piocb->ulpContext = ndlp->nlp_rpi;
4478 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4479 		piocb->ulpContext =
4480 		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4481 	}
4482 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4483 		piocb->ulpFCP2Rcvy = 1;
4484 	}
4485 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4486 
4487 	/* ulpTimeout is only one byte */
4488 	if (lpfc_cmd->timeout > 0xff) {
4489 		/*
4490 		 * Do not timeout the command at the firmware level.
4491 		 * The driver will provide the timeout mechanism.
4492 		 */
4493 		piocb->ulpTimeout = 0;
4494 	} else
4495 		piocb->ulpTimeout = lpfc_cmd->timeout;
4496 
4497 	if (vport->phba->sli_rev == LPFC_SLI_REV4)
4498 		lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4499 
4500 	return 1;
4501 }
4502 
4503 /**
4504  * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4505  * @phba: The hba struct for which this call is being executed.
4506  * @dev_grp: The HBA PCI-Device group number.
4507  *
4508  * This routine sets up the SCSI interface API function jump table in @phba
4509  * struct.
4510  * Returns: 0 - success, -ENODEV - failure.
4511  **/
4512 int
4513 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4514 {
4515 
4516 	phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4517 	phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4518 
4519 	switch (dev_grp) {
4520 	case LPFC_PCI_DEV_LP:
4521 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
4522 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4523 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4524 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4525 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4526 		break;
4527 	case LPFC_PCI_DEV_OC:
4528 		phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
4529 		phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4530 		phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4531 		phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4532 		phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4533 		break;
4534 	default:
4535 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4536 				"1418 Invalid HBA PCI-device group: 0x%x\n",
4537 				dev_grp);
4538 		return -ENODEV;
4539 		break;
4540 	}
4541 	phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4542 	phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4543 	return 0;
4544 }
4545 
4546 /**
4547  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4548  * @phba: The Hba for which this call is being executed.
4549  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4550  * @rspiocbq: Pointer to lpfc_iocbq data structure.
4551  *
4552  * This routine is IOCB completion routine for device reset and target reset
4553  * routine. This routine release scsi buffer associated with lpfc_cmd.
4554  **/
4555 static void
4556 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4557 			struct lpfc_iocbq *cmdiocbq,
4558 			struct lpfc_iocbq *rspiocbq)
4559 {
4560 	struct lpfc_scsi_buf *lpfc_cmd =
4561 		(struct lpfc_scsi_buf *) cmdiocbq->context1;
4562 	if (lpfc_cmd)
4563 		lpfc_release_scsi_buf(phba, lpfc_cmd);
4564 	return;
4565 }
4566 
4567 /**
4568  * lpfc_info - Info entry point of scsi_host_template data structure
4569  * @host: The scsi host for which this call is being executed.
4570  *
4571  * This routine provides module information about hba.
4572  *
4573  * Reutrn code:
4574  *   Pointer to char - Success.
4575  **/
4576 const char *
4577 lpfc_info(struct Scsi_Host *host)
4578 {
4579 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4580 	struct lpfc_hba   *phba = vport->phba;
4581 	int len, link_speed = 0;
4582 	static char  lpfcinfobuf[384];
4583 
4584 	memset(lpfcinfobuf,0,384);
4585 	if (phba && phba->pcidev){
4586 		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
4587 		len = strlen(lpfcinfobuf);
4588 		snprintf(lpfcinfobuf + len,
4589 			384-len,
4590 			" on PCI bus %02x device %02x irq %d",
4591 			phba->pcidev->bus->number,
4592 			phba->pcidev->devfn,
4593 			phba->pcidev->irq);
4594 		len = strlen(lpfcinfobuf);
4595 		if (phba->Port[0]) {
4596 			snprintf(lpfcinfobuf + len,
4597 				 384-len,
4598 				 " port %s",
4599 				 phba->Port);
4600 		}
4601 		len = strlen(lpfcinfobuf);
4602 		if (phba->sli_rev <= LPFC_SLI_REV3) {
4603 			link_speed = lpfc_sli_port_speed_get(phba);
4604 		} else {
4605 			if (phba->sli4_hba.link_state.logical_speed)
4606 				link_speed =
4607 				      phba->sli4_hba.link_state.logical_speed;
4608 			else
4609 				link_speed = phba->sli4_hba.link_state.speed;
4610 		}
4611 		if (link_speed != 0)
4612 			snprintf(lpfcinfobuf + len, 384-len,
4613 				 " Logical Link Speed: %d Mbps", link_speed);
4614 	}
4615 	return lpfcinfobuf;
4616 }
4617 
4618 /**
4619  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4620  * @phba: The Hba for which this call is being executed.
4621  *
4622  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
4623  * The default value of cfg_poll_tmo is 10 milliseconds.
4624  **/
4625 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4626 {
4627 	unsigned long  poll_tmo_expires =
4628 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4629 
4630 	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
4631 		mod_timer(&phba->fcp_poll_timer,
4632 			  poll_tmo_expires);
4633 }
4634 
4635 /**
4636  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4637  * @phba: The Hba for which this call is being executed.
4638  *
4639  * This routine starts the fcp_poll_timer of @phba.
4640  **/
4641 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4642 {
4643 	lpfc_poll_rearm_timer(phba);
4644 }
4645 
4646 /**
4647  * lpfc_poll_timeout - Restart polling timer
4648  * @ptr: Map to lpfc_hba data structure pointer.
4649  *
4650  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
4651  * and FCP Ring interrupt is disable.
4652  **/
4653 
4654 void lpfc_poll_timeout(unsigned long ptr)
4655 {
4656 	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
4657 
4658 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4659 		lpfc_sli_handle_fast_ring_event(phba,
4660 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4661 
4662 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4663 			lpfc_poll_rearm_timer(phba);
4664 	}
4665 }
4666 
4667 /**
4668  * lpfc_queuecommand - scsi_host_template queuecommand entry point
4669  * @cmnd: Pointer to scsi_cmnd data structure.
4670  * @done: Pointer to done routine.
4671  *
4672  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4673  * This routine prepares an IOCB from scsi command and provides to firmware.
4674  * The @done callback is invoked after driver finished processing the command.
4675  *
4676  * Return value :
4677  *   0 - Success
4678  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4679  **/
4680 static int
4681 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4682 {
4683 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4684 	struct lpfc_hba   *phba = vport->phba;
4685 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
4686 	struct lpfc_nodelist *ndlp;
4687 	struct lpfc_scsi_buf *lpfc_cmd;
4688 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4689 	int err;
4690 
4691 	err = fc_remote_port_chkready(rport);
4692 	if (err) {
4693 		cmnd->result = err;
4694 		goto out_fail_command;
4695 	}
4696 	ndlp = rdata->pnode;
4697 
4698 	if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4699 		(!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4700 
4701 		lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4702 				"9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4703 				" op:%02x str=%s without registering for"
4704 				" BlockGuard - Rejecting command\n",
4705 				cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4706 				dif_op_str[scsi_get_prot_op(cmnd)]);
4707 		goto out_fail_command;
4708 	}
4709 
4710 	/*
4711 	 * Catch race where our node has transitioned, but the
4712 	 * transport is still transitioning.
4713 	 */
4714 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4715 		goto out_tgt_busy;
4716 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
4717 		goto out_tgt_busy;
4718 
4719 	lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
4720 	if (lpfc_cmd == NULL) {
4721 		lpfc_rampdown_queue_depth(phba);
4722 
4723 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4724 				 "0707 driver's buffer pool is empty, "
4725 				 "IO busied\n");
4726 		goto out_host_busy;
4727 	}
4728 
4729 	/*
4730 	 * Store the midlayer's command structure for the completion phase
4731 	 * and complete the command initialization.
4732 	 */
4733 	lpfc_cmd->pCmd  = cmnd;
4734 	lpfc_cmd->rdata = rdata;
4735 	lpfc_cmd->timeout = 0;
4736 	lpfc_cmd->start_time = jiffies;
4737 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4738 
4739 	if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4740 		if (vport->phba->cfg_enable_bg) {
4741 			lpfc_printf_vlog(vport,
4742 					 KERN_INFO, LOG_SCSI_CMD,
4743 					 "9033 BLKGRD: rcvd %s cmd:x%x "
4744 					 "sector x%llx cnt %u pt %x\n",
4745 					 dif_op_str[scsi_get_prot_op(cmnd)],
4746 					 cmnd->cmnd[0],
4747 					 (unsigned long long)scsi_get_lba(cmnd),
4748 					 blk_rq_sectors(cmnd->request),
4749 					 (cmnd->cmnd[1]>>5));
4750 		}
4751 		err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4752 	} else {
4753 		if (vport->phba->cfg_enable_bg) {
4754 			lpfc_printf_vlog(vport,
4755 					 KERN_INFO, LOG_SCSI_CMD,
4756 					 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4757 					 "x%x sector x%llx cnt %u pt %x\n",
4758 					 cmnd->cmnd[0],
4759 					 (unsigned long long)scsi_get_lba(cmnd),
4760 					 blk_rq_sectors(cmnd->request),
4761 					 (cmnd->cmnd[1]>>5));
4762 		}
4763 		err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4764 	}
4765 
4766 	if (err)
4767 		goto out_host_busy_free_buf;
4768 
4769 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4770 
4771 	atomic_inc(&ndlp->cmd_pending);
4772 	err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4773 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4774 	if (err) {
4775 		atomic_dec(&ndlp->cmd_pending);
4776 		goto out_host_busy_free_buf;
4777 	}
4778 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4779 		lpfc_sli_handle_fast_ring_event(phba,
4780 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4781 
4782 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4783 			lpfc_poll_rearm_timer(phba);
4784 	}
4785 
4786 	return 0;
4787 
4788  out_host_busy_free_buf:
4789 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4790 	lpfc_release_scsi_buf(phba, lpfc_cmd);
4791  out_host_busy:
4792 	return SCSI_MLQUEUE_HOST_BUSY;
4793 
4794  out_tgt_busy:
4795 	return SCSI_MLQUEUE_TARGET_BUSY;
4796 
4797  out_fail_command:
4798 	cmnd->scsi_done(cmnd);
4799 	return 0;
4800 }
4801 
4802 
4803 /**
4804  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4805  * @cmnd: Pointer to scsi_cmnd data structure.
4806  *
4807  * This routine aborts @cmnd pending in base driver.
4808  *
4809  * Return code :
4810  *   0x2003 - Error
4811  *   0x2002 - Success
4812  **/
4813 static int
4814 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4815 {
4816 	struct Scsi_Host  *shost = cmnd->device->host;
4817 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4818 	struct lpfc_hba   *phba = vport->phba;
4819 	struct lpfc_iocbq *iocb;
4820 	struct lpfc_iocbq *abtsiocb;
4821 	struct lpfc_scsi_buf *lpfc_cmd;
4822 	IOCB_t *cmd, *icmd;
4823 	int ret = SUCCESS, status = 0;
4824 	unsigned long flags;
4825 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4826 
4827 	status = fc_block_scsi_eh(cmnd);
4828 	if (status != 0 && status != SUCCESS)
4829 		return status;
4830 
4831 	spin_lock_irqsave(&phba->hbalock, flags);
4832 	/* driver queued commands are in process of being flushed */
4833 	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
4834 		spin_unlock_irqrestore(&phba->hbalock, flags);
4835 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4836 			"3168 SCSI Layer abort requested I/O has been "
4837 			"flushed by LLD.\n");
4838 		return FAILED;
4839 	}
4840 
4841 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
4842 	if (!lpfc_cmd || !lpfc_cmd->pCmd) {
4843 		spin_unlock_irqrestore(&phba->hbalock, flags);
4844 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4845 			 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4846 			 "x%x ID %d LUN %d\n",
4847 			 SUCCESS, cmnd->device->id, cmnd->device->lun);
4848 		return SUCCESS;
4849 	}
4850 
4851 	iocb = &lpfc_cmd->cur_iocbq;
4852 	/* the command is in process of being cancelled */
4853 	if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4854 		spin_unlock_irqrestore(&phba->hbalock, flags);
4855 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4856 			"3169 SCSI Layer abort requested I/O has been "
4857 			"cancelled by LLD.\n");
4858 		return FAILED;
4859 	}
4860 	/*
4861 	 * If pCmd field of the corresponding lpfc_scsi_buf structure
4862 	 * points to a different SCSI command, then the driver has
4863 	 * already completed this command, but the midlayer did not
4864 	 * see the completion before the eh fired. Just return SUCCESS.
4865 	 */
4866 	if (lpfc_cmd->pCmd != cmnd) {
4867 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4868 			"3170 SCSI Layer abort requested I/O has been "
4869 			"completed by LLD.\n");
4870 		goto out_unlock;
4871 	}
4872 
4873 	BUG_ON(iocb->context1 != lpfc_cmd);
4874 
4875 	abtsiocb = __lpfc_sli_get_iocbq(phba);
4876 	if (abtsiocb == NULL) {
4877 		ret = FAILED;
4878 		goto out_unlock;
4879 	}
4880 
4881 	/*
4882 	 * The scsi command can not be in txq and it is in flight because the
4883 	 * pCmd is still pointig at the SCSI command we have to abort. There
4884 	 * is no need to search the txcmplq. Just send an abort to the FW.
4885 	 */
4886 
4887 	cmd = &iocb->iocb;
4888 	icmd = &abtsiocb->iocb;
4889 	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4890 	icmd->un.acxri.abortContextTag = cmd->ulpContext;
4891 	if (phba->sli_rev == LPFC_SLI_REV4)
4892 		icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4893 	else
4894 		icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4895 
4896 	icmd->ulpLe = 1;
4897 	icmd->ulpClass = cmd->ulpClass;
4898 
4899 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
4900 	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4901 	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4902 
4903 	if (lpfc_is_link_up(phba))
4904 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
4905 	else
4906 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4907 
4908 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4909 	abtsiocb->vport = vport;
4910 	/* no longer need the lock after this point */
4911 	spin_unlock_irqrestore(&phba->hbalock, flags);
4912 
4913 	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
4914 	    IOCB_ERROR) {
4915 		lpfc_sli_release_iocbq(phba, abtsiocb);
4916 		ret = FAILED;
4917 		goto out;
4918 	}
4919 
4920 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4921 		lpfc_sli_handle_fast_ring_event(phba,
4922 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
4923 
4924 	lpfc_cmd->waitq = &waitq;
4925 	/* Wait for abort to complete */
4926 	wait_event_timeout(waitq,
4927 			  (lpfc_cmd->pCmd != cmnd),
4928 			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4929 	lpfc_cmd->waitq = NULL;
4930 
4931 	if (lpfc_cmd->pCmd == cmnd) {
4932 		ret = FAILED;
4933 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4934 				 "0748 abort handler timed out waiting "
4935 				 "for abortng I/O (xri:x%x) to complete: "
4936 				 "ret %#x, ID %d, LUN %d\n",
4937 				 iocb->sli4_xritag, ret,
4938 				 cmnd->device->id, cmnd->device->lun);
4939 	}
4940 	goto out;
4941 
4942 out_unlock:
4943 	spin_unlock_irqrestore(&phba->hbalock, flags);
4944 out:
4945 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4946 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4947 			 "LUN %d\n", ret, cmnd->device->id,
4948 			 cmnd->device->lun);
4949 	return ret;
4950 }
4951 
4952 static char *
4953 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4954 {
4955 	switch (task_mgmt_cmd) {
4956 	case FCP_ABORT_TASK_SET:
4957 		return "ABORT_TASK_SET";
4958 	case FCP_CLEAR_TASK_SET:
4959 		return "FCP_CLEAR_TASK_SET";
4960 	case FCP_BUS_RESET:
4961 		return "FCP_BUS_RESET";
4962 	case FCP_LUN_RESET:
4963 		return "FCP_LUN_RESET";
4964 	case FCP_TARGET_RESET:
4965 		return "FCP_TARGET_RESET";
4966 	case FCP_CLEAR_ACA:
4967 		return "FCP_CLEAR_ACA";
4968 	case FCP_TERMINATE_TASK:
4969 		return "FCP_TERMINATE_TASK";
4970 	default:
4971 		return "unknown";
4972 	}
4973 }
4974 
4975 /**
4976  * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
4977  * @vport: The virtual port for which this call is being executed.
4978  * @rdata: Pointer to remote port local data
4979  * @tgt_id: Target ID of remote device.
4980  * @lun_id: Lun number for the TMF
4981  * @task_mgmt_cmd: type of TMF to send
4982  *
4983  * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
4984  * a remote port.
4985  *
4986  * Return Code:
4987  *   0x2003 - Error
4988  *   0x2002 - Success.
4989  **/
4990 static int
4991 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
4992 		    unsigned  tgt_id, unsigned int lun_id,
4993 		    uint8_t task_mgmt_cmd)
4994 {
4995 	struct lpfc_hba   *phba = vport->phba;
4996 	struct lpfc_scsi_buf *lpfc_cmd;
4997 	struct lpfc_iocbq *iocbq;
4998 	struct lpfc_iocbq *iocbqrsp;
4999 	struct lpfc_nodelist *pnode = rdata->pnode;
5000 	int ret;
5001 	int status;
5002 
5003 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5004 		return FAILED;
5005 
5006 	lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode);
5007 	if (lpfc_cmd == NULL)
5008 		return FAILED;
5009 	lpfc_cmd->timeout = 60;
5010 	lpfc_cmd->rdata = rdata;
5011 
5012 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5013 					   task_mgmt_cmd);
5014 	if (!status) {
5015 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5016 		return FAILED;
5017 	}
5018 
5019 	iocbq = &lpfc_cmd->cur_iocbq;
5020 	iocbqrsp = lpfc_sli_get_iocbq(phba);
5021 	if (iocbqrsp == NULL) {
5022 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5023 		return FAILED;
5024 	}
5025 
5026 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5027 			 "0702 Issue %s to TGT %d LUN %d "
5028 			 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5029 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5030 			 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5031 			 iocbq->iocb_flag);
5032 
5033 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5034 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
5035 	if (status != IOCB_SUCCESS) {
5036 		if (status == IOCB_TIMEDOUT) {
5037 			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5038 			ret = TIMEOUT_ERROR;
5039 		} else
5040 			ret = FAILED;
5041 		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
5042 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5043 			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
5044 			 "iocb_flag x%x\n",
5045 			 lpfc_taskmgmt_name(task_mgmt_cmd),
5046 			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
5047 			 iocbqrsp->iocb.un.ulpWord[4],
5048 			 iocbq->iocb_flag);
5049 	} else if (status == IOCB_BUSY)
5050 		ret = FAILED;
5051 	else
5052 		ret = SUCCESS;
5053 
5054 	lpfc_sli_release_iocbq(phba, iocbqrsp);
5055 
5056 	if (ret != TIMEOUT_ERROR)
5057 		lpfc_release_scsi_buf(phba, lpfc_cmd);
5058 
5059 	return ret;
5060 }
5061 
5062 /**
5063  * lpfc_chk_tgt_mapped -
5064  * @vport: The virtual port to check on
5065  * @cmnd: Pointer to scsi_cmnd data structure.
5066  *
5067  * This routine delays until the scsi target (aka rport) for the
5068  * command exists (is present and logged in) or we declare it non-existent.
5069  *
5070  * Return code :
5071  *  0x2003 - Error
5072  *  0x2002 - Success
5073  **/
5074 static int
5075 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5076 {
5077 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
5078 	struct lpfc_nodelist *pnode;
5079 	unsigned long later;
5080 
5081 	if (!rdata) {
5082 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5083 			"0797 Tgt Map rport failure: rdata x%p\n", rdata);
5084 		return FAILED;
5085 	}
5086 	pnode = rdata->pnode;
5087 	/*
5088 	 * If target is not in a MAPPED state, delay until
5089 	 * target is rediscovered or devloss timeout expires.
5090 	 */
5091 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5092 	while (time_after(later, jiffies)) {
5093 		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5094 			return FAILED;
5095 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5096 			return SUCCESS;
5097 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5098 		rdata = cmnd->device->hostdata;
5099 		if (!rdata)
5100 			return FAILED;
5101 		pnode = rdata->pnode;
5102 	}
5103 	if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5104 	    (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5105 		return FAILED;
5106 	return SUCCESS;
5107 }
5108 
5109 /**
5110  * lpfc_reset_flush_io_context -
5111  * @vport: The virtual port (scsi_host) for the flush context
5112  * @tgt_id: If aborting by Target contect - specifies the target id
5113  * @lun_id: If aborting by Lun context - specifies the lun id
5114  * @context: specifies the context level to flush at.
5115  *
5116  * After a reset condition via TMF, we need to flush orphaned i/o
5117  * contexts from the adapter. This routine aborts any contexts
5118  * outstanding, then waits for their completions. The wait is
5119  * bounded by devloss_tmo though.
5120  *
5121  * Return code :
5122  *  0x2003 - Error
5123  *  0x2002 - Success
5124  **/
5125 static int
5126 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5127 			uint64_t lun_id, lpfc_ctx_cmd context)
5128 {
5129 	struct lpfc_hba   *phba = vport->phba;
5130 	unsigned long later;
5131 	int cnt;
5132 
5133 	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5134 	if (cnt)
5135 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
5136 				    tgt_id, lun_id, context);
5137 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5138 	while (time_after(later, jiffies) && cnt) {
5139 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5140 		cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5141 	}
5142 	if (cnt) {
5143 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5144 			"0724 I/O flush failure for context %s : cnt x%x\n",
5145 			((context == LPFC_CTX_LUN) ? "LUN" :
5146 			 ((context == LPFC_CTX_TGT) ? "TGT" :
5147 			  ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5148 			cnt);
5149 		return FAILED;
5150 	}
5151 	return SUCCESS;
5152 }
5153 
5154 /**
5155  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5156  * @cmnd: Pointer to scsi_cmnd data structure.
5157  *
5158  * This routine does a device reset by sending a LUN_RESET task management
5159  * command.
5160  *
5161  * Return code :
5162  *  0x2003 - Error
5163  *  0x2002 - Success
5164  **/
5165 static int
5166 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5167 {
5168 	struct Scsi_Host  *shost = cmnd->device->host;
5169 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5170 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
5171 	struct lpfc_nodelist *pnode;
5172 	unsigned tgt_id = cmnd->device->id;
5173 	unsigned int lun_id = cmnd->device->lun;
5174 	struct lpfc_scsi_event_header scsi_event;
5175 	int status, ret = SUCCESS;
5176 
5177 	if (!rdata) {
5178 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5179 			"0798 Device Reset rport failure: rdata x%p\n", rdata);
5180 		return FAILED;
5181 	}
5182 	pnode = rdata->pnode;
5183 	status = fc_block_scsi_eh(cmnd);
5184 	if (status != 0 && status != SUCCESS)
5185 		return status;
5186 
5187 	status = lpfc_chk_tgt_mapped(vport, cmnd);
5188 	if (status == FAILED) {
5189 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5190 			"0721 Device Reset rport failure: rdata x%p\n", rdata);
5191 		return FAILED;
5192 	}
5193 
5194 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5195 	scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5196 	scsi_event.lun = lun_id;
5197 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5198 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5199 
5200 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5201 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5202 
5203 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5204 						FCP_LUN_RESET);
5205 
5206 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5207 			 "0713 SCSI layer issued Device Reset (%d, %d) "
5208 			 "return x%x\n", tgt_id, lun_id, status);
5209 
5210 	/*
5211 	 * We have to clean up i/o as : they may be orphaned by the TMF;
5212 	 * or if the TMF failed, they may be in an indeterminate state.
5213 	 * So, continue on.
5214 	 * We will report success if all the i/o aborts successfully.
5215 	 */
5216 	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5217 						LPFC_CTX_LUN);
5218 	return ret;
5219 }
5220 
5221 /**
5222  * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5223  * @cmnd: Pointer to scsi_cmnd data structure.
5224  *
5225  * This routine does a target reset by sending a TARGET_RESET task management
5226  * command.
5227  *
5228  * Return code :
5229  *  0x2003 - Error
5230  *  0x2002 - Success
5231  **/
5232 static int
5233 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5234 {
5235 	struct Scsi_Host  *shost = cmnd->device->host;
5236 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5237 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
5238 	struct lpfc_nodelist *pnode;
5239 	unsigned tgt_id = cmnd->device->id;
5240 	unsigned int lun_id = cmnd->device->lun;
5241 	struct lpfc_scsi_event_header scsi_event;
5242 	int status, ret = SUCCESS;
5243 
5244 	if (!rdata) {
5245 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5246 			"0799 Target Reset rport failure: rdata x%p\n", rdata);
5247 		return FAILED;
5248 	}
5249 	pnode = rdata->pnode;
5250 	status = fc_block_scsi_eh(cmnd);
5251 	if (status != 0 && status != SUCCESS)
5252 		return status;
5253 
5254 	status = lpfc_chk_tgt_mapped(vport, cmnd);
5255 	if (status == FAILED) {
5256 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5257 			"0722 Target Reset rport failure: rdata x%p\n", rdata);
5258 		return FAILED;
5259 	}
5260 
5261 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5262 	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5263 	scsi_event.lun = 0;
5264 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5265 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5266 
5267 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5268 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5269 
5270 	status = lpfc_send_taskmgmt(vport, rdata, tgt_id, lun_id,
5271 					FCP_TARGET_RESET);
5272 
5273 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5274 			 "0723 SCSI layer issued Target Reset (%d, %d) "
5275 			 "return x%x\n", tgt_id, lun_id, status);
5276 
5277 	/*
5278 	 * We have to clean up i/o as : they may be orphaned by the TMF;
5279 	 * or if the TMF failed, they may be in an indeterminate state.
5280 	 * So, continue on.
5281 	 * We will report success if all the i/o aborts successfully.
5282 	 */
5283 	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5284 					  LPFC_CTX_TGT);
5285 	return ret;
5286 }
5287 
5288 /**
5289  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5290  * @cmnd: Pointer to scsi_cmnd data structure.
5291  *
5292  * This routine does target reset to all targets on @cmnd->device->host.
5293  * This emulates Parallel SCSI Bus Reset Semantics.
5294  *
5295  * Return code :
5296  *  0x2003 - Error
5297  *  0x2002 - Success
5298  **/
5299 static int
5300 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5301 {
5302 	struct Scsi_Host  *shost = cmnd->device->host;
5303 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5304 	struct lpfc_nodelist *ndlp = NULL;
5305 	struct lpfc_scsi_event_header scsi_event;
5306 	int match;
5307 	int ret = SUCCESS, status, i;
5308 
5309 	scsi_event.event_type = FC_REG_SCSI_EVENT;
5310 	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5311 	scsi_event.lun = 0;
5312 	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5313 	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5314 
5315 	fc_host_post_vendor_event(shost, fc_get_event_number(),
5316 		sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5317 
5318 	status = fc_block_scsi_eh(cmnd);
5319 	if (status != 0 && status != SUCCESS)
5320 		return status;
5321 
5322 	/*
5323 	 * Since the driver manages a single bus device, reset all
5324 	 * targets known to the driver.  Should any target reset
5325 	 * fail, this routine returns failure to the midlayer.
5326 	 */
5327 	for (i = 0; i < LPFC_MAX_TARGET; i++) {
5328 		/* Search for mapped node by target ID */
5329 		match = 0;
5330 		spin_lock_irq(shost->host_lock);
5331 		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5332 			if (!NLP_CHK_NODE_ACT(ndlp))
5333 				continue;
5334 			if (vport->phba->cfg_fcp2_no_tgt_reset &&
5335 			    (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5336 				continue;
5337 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5338 			    ndlp->nlp_sid == i &&
5339 			    ndlp->rport) {
5340 				match = 1;
5341 				break;
5342 			}
5343 		}
5344 		spin_unlock_irq(shost->host_lock);
5345 		if (!match)
5346 			continue;
5347 
5348 		status = lpfc_send_taskmgmt(vport, ndlp->rport->dd_data,
5349 					i, 0, FCP_TARGET_RESET);
5350 
5351 		if (status != SUCCESS) {
5352 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5353 					 "0700 Bus Reset on target %d failed\n",
5354 					 i);
5355 			ret = FAILED;
5356 		}
5357 	}
5358 	/*
5359 	 * We have to clean up i/o as : they may be orphaned by the TMFs
5360 	 * above; or if any of the TMFs failed, they may be in an
5361 	 * indeterminate state.
5362 	 * We will report success if all the i/o aborts successfully.
5363 	 */
5364 
5365 	status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5366 	if (status != SUCCESS)
5367 		ret = FAILED;
5368 
5369 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5370 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5371 	return ret;
5372 }
5373 
5374 /**
5375  * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5376  * @cmnd: Pointer to scsi_cmnd data structure.
5377  *
5378  * This routine does host reset to the adaptor port. It brings the HBA
5379  * offline, performs a board restart, and then brings the board back online.
5380  * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5381  * reject all outstanding SCSI commands to the host and error returned
5382  * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5383  * of error handling, it will only return error if resetting of the adapter
5384  * is not successful; in all other cases, will return success.
5385  *
5386  * Return code :
5387  *  0x2003 - Error
5388  *  0x2002 - Success
5389  **/
5390 static int
5391 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5392 {
5393 	struct Scsi_Host *shost = cmnd->device->host;
5394 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5395 	struct lpfc_hba *phba = vport->phba;
5396 	int rc, ret = SUCCESS;
5397 
5398 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5399 			 "3172 SCSI layer issued Host Reset Data:\n");
5400 
5401 	lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5402 	lpfc_offline(phba);
5403 	rc = lpfc_sli_brdrestart(phba);
5404 	if (rc)
5405 		ret = FAILED;
5406 	rc = lpfc_online(phba);
5407 	if (rc)
5408 		ret = FAILED;
5409 	lpfc_unblock_mgmt_io(phba);
5410 
5411 	if (ret == FAILED) {
5412 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5413 				 "3323 Failed host reset, bring it offline\n");
5414 		lpfc_sli4_offline_eratt(phba);
5415 	}
5416 	return ret;
5417 }
5418 
5419 /**
5420  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5421  * @sdev: Pointer to scsi_device.
5422  *
5423  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
5424  * globally available list of scsi buffers. This routine also makes sure scsi
5425  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5426  * of scsi buffer exists for the lifetime of the driver.
5427  *
5428  * Return codes:
5429  *   non-0 - Error
5430  *   0 - Success
5431  **/
5432 static int
5433 lpfc_slave_alloc(struct scsi_device *sdev)
5434 {
5435 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5436 	struct lpfc_hba   *phba = vport->phba;
5437 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5438 	uint32_t total = 0;
5439 	uint32_t num_to_alloc = 0;
5440 	int num_allocated = 0;
5441 	uint32_t sdev_cnt;
5442 
5443 	if (!rport || fc_remote_port_chkready(rport))
5444 		return -ENXIO;
5445 
5446 	sdev->hostdata = rport->dd_data;
5447 	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5448 
5449 	/*
5450 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5451 	 * available list of scsi buffers.  Don't allocate more than the
5452 	 * HBA limit conveyed to the midlayer via the host structure.  The
5453 	 * formula accounts for the lun_queue_depth + error handlers + 1
5454 	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
5455 	 */
5456 	total = phba->total_scsi_bufs;
5457 	num_to_alloc = vport->cfg_lun_queue_depth + 2;
5458 
5459 	/* If allocated buffers are enough do nothing */
5460 	if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5461 		return 0;
5462 
5463 	/* Allow some exchanges to be available always to complete discovery */
5464 	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5465 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5466 				 "0704 At limitation of %d preallocated "
5467 				 "command buffers\n", total);
5468 		return 0;
5469 	/* Allow some exchanges to be available always to complete discovery */
5470 	} else if (total + num_to_alloc >
5471 		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5472 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5473 				 "0705 Allocation request of %d "
5474 				 "command buffers will exceed max of %d.  "
5475 				 "Reducing allocation request to %d.\n",
5476 				 num_to_alloc, phba->cfg_hba_queue_depth,
5477 				 (phba->cfg_hba_queue_depth - total));
5478 		num_to_alloc = phba->cfg_hba_queue_depth - total;
5479 	}
5480 	num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
5481 	if (num_to_alloc != num_allocated) {
5482 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5483 					 "0708 Allocation request of %d "
5484 					 "command buffers did not succeed.  "
5485 					 "Allocated %d buffers.\n",
5486 					 num_to_alloc, num_allocated);
5487 	}
5488 	if (num_allocated > 0)
5489 		phba->total_scsi_bufs += num_allocated;
5490 	return 0;
5491 }
5492 
5493 /**
5494  * lpfc_slave_configure - scsi_host_template slave_configure entry point
5495  * @sdev: Pointer to scsi_device.
5496  *
5497  * This routine configures following items
5498  *   - Tag command queuing support for @sdev if supported.
5499  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5500  *
5501  * Return codes:
5502  *   0 - Success
5503  **/
5504 static int
5505 lpfc_slave_configure(struct scsi_device *sdev)
5506 {
5507 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5508 	struct lpfc_hba   *phba = vport->phba;
5509 
5510 	if (sdev->tagged_supported)
5511 		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
5512 	else
5513 		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
5514 
5515 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5516 		lpfc_sli_handle_fast_ring_event(phba,
5517 			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
5518 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5519 			lpfc_poll_rearm_timer(phba);
5520 	}
5521 
5522 	return 0;
5523 }
5524 
5525 /**
5526  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5527  * @sdev: Pointer to scsi_device.
5528  *
5529  * This routine sets @sdev hostatdata filed to null.
5530  **/
5531 static void
5532 lpfc_slave_destroy(struct scsi_device *sdev)
5533 {
5534 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5535 	struct lpfc_hba   *phba = vport->phba;
5536 	atomic_dec(&phba->sdev_cnt);
5537 	sdev->hostdata = NULL;
5538 	return;
5539 }
5540 
5541 
5542 struct scsi_host_template lpfc_template = {
5543 	.module			= THIS_MODULE,
5544 	.name			= LPFC_DRIVER_NAME,
5545 	.info			= lpfc_info,
5546 	.queuecommand		= lpfc_queuecommand,
5547 	.eh_abort_handler	= lpfc_abort_handler,
5548 	.eh_device_reset_handler = lpfc_device_reset_handler,
5549 	.eh_target_reset_handler = lpfc_target_reset_handler,
5550 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5551 	.eh_host_reset_handler  = lpfc_host_reset_handler,
5552 	.slave_alloc		= lpfc_slave_alloc,
5553 	.slave_configure	= lpfc_slave_configure,
5554 	.slave_destroy		= lpfc_slave_destroy,
5555 	.scan_finished		= lpfc_scan_finished,
5556 	.this_id		= -1,
5557 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5558 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
5559 	.use_clustering		= ENABLE_CLUSTERING,
5560 	.shost_attrs		= lpfc_hba_attrs,
5561 	.max_sectors		= 0xFFFF,
5562 	.vendor_id		= LPFC_NL_VENDOR_ID,
5563 	.change_queue_depth	= lpfc_change_queue_depth,
5564 	.change_queue_type	= lpfc_change_queue_type,
5565 };
5566 
5567 struct scsi_host_template lpfc_vport_template = {
5568 	.module			= THIS_MODULE,
5569 	.name			= LPFC_DRIVER_NAME,
5570 	.info			= lpfc_info,
5571 	.queuecommand		= lpfc_queuecommand,
5572 	.eh_abort_handler	= lpfc_abort_handler,
5573 	.eh_device_reset_handler = lpfc_device_reset_handler,
5574 	.eh_target_reset_handler = lpfc_target_reset_handler,
5575 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
5576 	.slave_alloc		= lpfc_slave_alloc,
5577 	.slave_configure	= lpfc_slave_configure,
5578 	.slave_destroy		= lpfc_slave_destroy,
5579 	.scan_finished		= lpfc_scan_finished,
5580 	.this_id		= -1,
5581 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
5582 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
5583 	.use_clustering		= ENABLE_CLUSTERING,
5584 	.shost_attrs		= lpfc_vport_attrs,
5585 	.max_sectors		= 0xFFFF,
5586 	.change_queue_depth	= lpfc_change_queue_depth,
5587 	.change_queue_type	= lpfc_change_queue_type,
5588 };
5589