xref: /linux/drivers/scsi/lpfc/lpfc_scsi.c (revision 367b8112fe2ea5c39a7bb4d263dcdd9b612fae18)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
31 
32 #include "lpfc_version.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_nl.h"
36 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
38 #include "lpfc.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_crtn.h"
41 #include "lpfc_vport.h"
42 
43 #define LPFC_RESET_WAIT  2
44 #define LPFC_ABORT_WAIT  2
45 
46 /**
47  * lpfc_update_stats: Update statistical data for the command completion.
48  * @phba: Pointer to HBA object.
49  * @lpfc_cmd: lpfc scsi command object pointer.
50  *
51  * This function is called when there is a command completion and this
52  * function updates the statistical data for the command completion.
53  **/
54 static void
55 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
56 {
57 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
58 	struct lpfc_nodelist *pnode = rdata->pnode;
59 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
60 	unsigned long flags;
61 	struct Scsi_Host  *shost = cmd->device->host;
62 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
63 	unsigned long latency;
64 	int i;
65 
66 	if (cmd->result)
67 		return;
68 
69 	spin_lock_irqsave(shost->host_lock, flags);
70 	if (!vport->stat_data_enabled ||
71 		vport->stat_data_blocked ||
72 		!pnode->lat_data ||
73 		(phba->bucket_type == LPFC_NO_BUCKET)) {
74 		spin_unlock_irqrestore(shost->host_lock, flags);
75 		return;
76 	}
77 	latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
78 
79 	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
81 			phba->bucket_step;
82 		if (i >= LPFC_MAX_BUCKET_COUNT)
83 			i = LPFC_MAX_BUCKET_COUNT;
84 	} else {
85 		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 			if (latency <= (phba->bucket_base +
87 				((1<<i)*phba->bucket_step)))
88 				break;
89 	}
90 
91 	pnode->lat_data[i].cmd_count++;
92 	spin_unlock_irqrestore(shost->host_lock, flags);
93 }
94 
95 
96 /**
97  * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
98  *                   event.
99  * @phba: Pointer to HBA context object.
100  * @vport: Pointer to vport object.
101  * @ndlp: Pointer to FC node associated with the target.
102  * @lun: Lun number of the scsi device.
103  * @old_val: Old value of the queue depth.
104  * @new_val: New value of the queue depth.
105  *
106  * This function sends an event to the mgmt application indicating
107  * there is a change in the scsi device queue depth.
108  **/
109 static void
110 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
111 		struct lpfc_vport  *vport,
112 		struct lpfc_nodelist *ndlp,
113 		uint32_t lun,
114 		uint32_t old_val,
115 		uint32_t new_val)
116 {
117 	struct lpfc_fast_path_event *fast_path_evt;
118 	unsigned long flags;
119 
120 	fast_path_evt = lpfc_alloc_fast_evt(phba);
121 	if (!fast_path_evt)
122 		return;
123 
124 	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
125 		FC_REG_SCSI_EVENT;
126 	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
127 		LPFC_EVENT_VARQUEDEPTH;
128 
129 	/* Report all luns with change in queue depth */
130 	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
131 	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
132 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
133 			&ndlp->nlp_portname, sizeof(struct lpfc_name));
134 		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
135 			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
136 	}
137 
138 	fast_path_evt->un.queue_depth_evt.oldval = old_val;
139 	fast_path_evt->un.queue_depth_evt.newval = new_val;
140 	fast_path_evt->vport = vport;
141 
142 	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
143 	spin_lock_irqsave(&phba->hbalock, flags);
144 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
145 	spin_unlock_irqrestore(&phba->hbalock, flags);
146 	lpfc_worker_wake_up(phba);
147 
148 	return;
149 }
150 
151 /*
152  * This function is called with no lock held when there is a resource
153  * error in driver or in firmware.
154  */
155 void
156 lpfc_adjust_queue_depth(struct lpfc_hba *phba)
157 {
158 	unsigned long flags;
159 	uint32_t evt_posted;
160 
161 	spin_lock_irqsave(&phba->hbalock, flags);
162 	atomic_inc(&phba->num_rsrc_err);
163 	phba->last_rsrc_error_time = jiffies;
164 
165 	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
166 		spin_unlock_irqrestore(&phba->hbalock, flags);
167 		return;
168 	}
169 
170 	phba->last_ramp_down_time = jiffies;
171 
172 	spin_unlock_irqrestore(&phba->hbalock, flags);
173 
174 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
175 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
176 	if (!evt_posted)
177 		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
178 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
179 
180 	if (!evt_posted)
181 		lpfc_worker_wake_up(phba);
182 	return;
183 }
184 
185 /*
186  * This function is called with no lock held when there is a successful
187  * SCSI command completion.
188  */
189 static inline void
190 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
191 			struct scsi_device *sdev)
192 {
193 	unsigned long flags;
194 	struct lpfc_hba *phba = vport->phba;
195 	uint32_t evt_posted;
196 	atomic_inc(&phba->num_cmd_success);
197 
198 	if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
199 		return;
200 	spin_lock_irqsave(&phba->hbalock, flags);
201 	if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
202 	 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
203 		spin_unlock_irqrestore(&phba->hbalock, flags);
204 		return;
205 	}
206 	phba->last_ramp_up_time = jiffies;
207 	spin_unlock_irqrestore(&phba->hbalock, flags);
208 
209 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
210 	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
211 	if (!evt_posted)
212 		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
213 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
214 
215 	if (!evt_posted)
216 		lpfc_worker_wake_up(phba);
217 	return;
218 }
219 
220 void
221 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
222 {
223 	struct lpfc_vport **vports;
224 	struct Scsi_Host  *shost;
225 	struct scsi_device *sdev;
226 	unsigned long new_queue_depth, old_queue_depth;
227 	unsigned long num_rsrc_err, num_cmd_success;
228 	int i;
229 	struct lpfc_rport_data *rdata;
230 
231 	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
232 	num_cmd_success = atomic_read(&phba->num_cmd_success);
233 
234 	vports = lpfc_create_vport_work_array(phba);
235 	if (vports != NULL)
236 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
237 			shost = lpfc_shost_from_vport(vports[i]);
238 			shost_for_each_device(sdev, shost) {
239 				new_queue_depth =
240 					sdev->queue_depth * num_rsrc_err /
241 					(num_rsrc_err + num_cmd_success);
242 				if (!new_queue_depth)
243 					new_queue_depth = sdev->queue_depth - 1;
244 				else
245 					new_queue_depth = sdev->queue_depth -
246 								new_queue_depth;
247 				old_queue_depth = sdev->queue_depth;
248 				if (sdev->ordered_tags)
249 					scsi_adjust_queue_depth(sdev,
250 							MSG_ORDERED_TAG,
251 							new_queue_depth);
252 				else
253 					scsi_adjust_queue_depth(sdev,
254 							MSG_SIMPLE_TAG,
255 							new_queue_depth);
256 				rdata = sdev->hostdata;
257 				if (rdata)
258 					lpfc_send_sdev_queuedepth_change_event(
259 						phba, vports[i],
260 						rdata->pnode,
261 						sdev->lun, old_queue_depth,
262 						new_queue_depth);
263 			}
264 		}
265 	lpfc_destroy_vport_work_array(phba, vports);
266 	atomic_set(&phba->num_rsrc_err, 0);
267 	atomic_set(&phba->num_cmd_success, 0);
268 }
269 
270 void
271 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
272 {
273 	struct lpfc_vport **vports;
274 	struct Scsi_Host  *shost;
275 	struct scsi_device *sdev;
276 	int i;
277 	struct lpfc_rport_data *rdata;
278 
279 	vports = lpfc_create_vport_work_array(phba);
280 	if (vports != NULL)
281 		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
282 			shost = lpfc_shost_from_vport(vports[i]);
283 			shost_for_each_device(sdev, shost) {
284 				if (vports[i]->cfg_lun_queue_depth <=
285 				    sdev->queue_depth)
286 					continue;
287 				if (sdev->ordered_tags)
288 					scsi_adjust_queue_depth(sdev,
289 							MSG_ORDERED_TAG,
290 							sdev->queue_depth+1);
291 				else
292 					scsi_adjust_queue_depth(sdev,
293 							MSG_SIMPLE_TAG,
294 							sdev->queue_depth+1);
295 				rdata = sdev->hostdata;
296 				if (rdata)
297 					lpfc_send_sdev_queuedepth_change_event(
298 						phba, vports[i],
299 						rdata->pnode,
300 						sdev->lun,
301 						sdev->queue_depth - 1,
302 						sdev->queue_depth);
303 			}
304 		}
305 	lpfc_destroy_vport_work_array(phba, vports);
306 	atomic_set(&phba->num_rsrc_err, 0);
307 	atomic_set(&phba->num_cmd_success, 0);
308 }
309 
310 /**
311  * lpfc_scsi_dev_block: set all scsi hosts to block state.
312  * @phba: Pointer to HBA context object.
313  *
314  * This function walks vport list and set each SCSI host to block state
315  * by invoking fc_remote_port_delete() routine. This function is invoked
316  * with EEH when device's PCI slot has been permanently disabled.
317  **/
318 void
319 lpfc_scsi_dev_block(struct lpfc_hba *phba)
320 {
321 	struct lpfc_vport **vports;
322 	struct Scsi_Host  *shost;
323 	struct scsi_device *sdev;
324 	struct fc_rport *rport;
325 	int i;
326 
327 	vports = lpfc_create_vport_work_array(phba);
328 	if (vports != NULL)
329 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
330 			shost = lpfc_shost_from_vport(vports[i]);
331 			shost_for_each_device(sdev, shost) {
332 				rport = starget_to_rport(scsi_target(sdev));
333 				fc_remote_port_delete(rport);
334 			}
335 		}
336 	lpfc_destroy_vport_work_array(phba, vports);
337 }
338 
339 /*
340  * This routine allocates a scsi buffer, which contains all the necessary
341  * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
342  * contains information to build the IOCB.  The DMAable region contains
343  * memory for the FCP CMND, FCP RSP, and the inital BPL.  In addition to
344  * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
345  * and the BPL BDE is setup in the IOCB.
346  */
347 static struct lpfc_scsi_buf *
348 lpfc_new_scsi_buf(struct lpfc_vport *vport)
349 {
350 	struct lpfc_hba *phba = vport->phba;
351 	struct lpfc_scsi_buf *psb;
352 	struct ulp_bde64 *bpl;
353 	IOCB_t *iocb;
354 	dma_addr_t pdma_phys_fcp_cmd;
355 	dma_addr_t pdma_phys_fcp_rsp;
356 	dma_addr_t pdma_phys_bpl;
357 	uint16_t iotag;
358 
359 	psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
360 	if (!psb)
361 		return NULL;
362 
363 	/*
364 	 * Get memory from the pci pool to map the virt space to pci bus space
365 	 * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
366 	 * struct fcp_rsp and the number of bde's necessary to support the
367 	 * sg_tablesize.
368 	 */
369 	psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
370 							&psb->dma_handle);
371 	if (!psb->data) {
372 		kfree(psb);
373 		return NULL;
374 	}
375 
376 	/* Initialize virtual ptrs to dma_buf region. */
377 	memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
378 
379 	/* Allocate iotag for psb->cur_iocbq. */
380 	iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
381 	if (iotag == 0) {
382 		pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
383 			      psb->data, psb->dma_handle);
384 		kfree (psb);
385 		return NULL;
386 	}
387 	psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
388 
389 	psb->fcp_cmnd = psb->data;
390 	psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
391 	psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
392 							sizeof(struct fcp_rsp);
393 
394 	/* Initialize local short-hand pointers. */
395 	bpl = psb->fcp_bpl;
396 	pdma_phys_fcp_cmd = psb->dma_handle;
397 	pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
398 	pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
399 			sizeof(struct fcp_rsp);
400 
401 	/*
402 	 * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
403 	 * list bdes.  Initialize the first two and leave the rest for
404 	 * queuecommand.
405 	 */
406 	bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
407 	bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
408 	bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
409 	bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
410 	bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
411 
412 	/* Setup the physical region for the FCP RSP */
413 	bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
414 	bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
415 	bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
416 	bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
417 	bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
418 
419 	/*
420 	 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
421 	 * initialize it with all known data now.
422 	 */
423 	iocb = &psb->cur_iocbq.iocb;
424 	iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
425 	if (phba->sli_rev == 3) {
426 		/* fill in immediate fcp command BDE */
427 		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
428 		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
429 		iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
430 						       unsli3.fcp_ext.icd);
431 		iocb->un.fcpi64.bdl.addrHigh = 0;
432 		iocb->ulpBdeCount = 0;
433 		iocb->ulpLe = 0;
434 		/* fill in responce BDE */
435 		iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
436 		iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
437 						sizeof(struct fcp_rsp);
438 		iocb->unsli3.fcp_ext.rbde.addrLow =
439 						putPaddrLow(pdma_phys_fcp_rsp);
440 		iocb->unsli3.fcp_ext.rbde.addrHigh =
441 						putPaddrHigh(pdma_phys_fcp_rsp);
442 	} else {
443 		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
444 		iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
445 		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
446 		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
447 		iocb->ulpBdeCount = 1;
448 		iocb->ulpLe = 1;
449 	}
450 	iocb->ulpClass = CLASS3;
451 
452 	return psb;
453 }
454 
455 static struct lpfc_scsi_buf*
456 lpfc_get_scsi_buf(struct lpfc_hba * phba)
457 {
458 	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
459 	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
460 	unsigned long iflag = 0;
461 
462 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
463 	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
464 	if (lpfc_cmd) {
465 		lpfc_cmd->seg_cnt = 0;
466 		lpfc_cmd->nonsg_phys = 0;
467 	}
468 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
469 	return  lpfc_cmd;
470 }
471 
472 static void
473 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
474 {
475 	unsigned long iflag = 0;
476 
477 	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
478 	psb->pCmd = NULL;
479 	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
480 	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
481 }
482 
483 static int
484 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
485 {
486 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
487 	struct scatterlist *sgel = NULL;
488 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
489 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
490 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
491 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
492 	dma_addr_t physaddr;
493 	uint32_t num_bde = 0;
494 	int nseg, datadir = scsi_cmnd->sc_data_direction;
495 
496 	/*
497 	 * There are three possibilities here - use scatter-gather segment, use
498 	 * the single mapping, or neither.  Start the lpfc command prep by
499 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
500 	 * data bde entry.
501 	 */
502 	bpl += 2;
503 	if (scsi_sg_count(scsi_cmnd)) {
504 		/*
505 		 * The driver stores the segment count returned from pci_map_sg
506 		 * because this a count of dma-mappings used to map the use_sg
507 		 * pages.  They are not guaranteed to be the same for those
508 		 * architectures that implement an IOMMU.
509 		 */
510 
511 		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
512 				  scsi_sg_count(scsi_cmnd), datadir);
513 		if (unlikely(!nseg))
514 			return 1;
515 
516 		lpfc_cmd->seg_cnt = nseg;
517 		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
518 			printk(KERN_ERR "%s: Too many sg segments from "
519 			       "dma_map_sg.  Config %d, seg_cnt %d",
520 			       __func__, phba->cfg_sg_seg_cnt,
521 			       lpfc_cmd->seg_cnt);
522 			scsi_dma_unmap(scsi_cmnd);
523 			return 1;
524 		}
525 
526 		/*
527 		 * The driver established a maximum scatter-gather segment count
528 		 * during probe that limits the number of sg elements in any
529 		 * single scsi command.  Just run through the seg_cnt and format
530 		 * the bde's.
531 		 * When using SLI-3 the driver will try to fit all the BDEs into
532 		 * the IOCB. If it can't then the BDEs get added to a BPL as it
533 		 * does for SLI-2 mode.
534 		 */
535 		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
536 			physaddr = sg_dma_address(sgel);
537 			if (phba->sli_rev == 3 &&
538 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
539 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
540 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
541 				data_bde->addrLow = putPaddrLow(physaddr);
542 				data_bde->addrHigh = putPaddrHigh(physaddr);
543 				data_bde++;
544 			} else {
545 				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
546 				bpl->tus.f.bdeSize = sg_dma_len(sgel);
547 				bpl->tus.w = le32_to_cpu(bpl->tus.w);
548 				bpl->addrLow =
549 					le32_to_cpu(putPaddrLow(physaddr));
550 				bpl->addrHigh =
551 					le32_to_cpu(putPaddrHigh(physaddr));
552 				bpl++;
553 			}
554 		}
555 	}
556 
557 	/*
558 	 * Finish initializing those IOCB fields that are dependent on the
559 	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
560 	 * explicitly reinitialized and for SLI-3 the extended bde count is
561 	 * explicitly reinitialized since all iocb memory resources are reused.
562 	 */
563 	if (phba->sli_rev == 3) {
564 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
565 			/*
566 			 * The extended IOCB format can only fit 3 BDE or a BPL.
567 			 * This I/O has more than 3 BDE so the 1st data bde will
568 			 * be a BPL that is filled in here.
569 			 */
570 			physaddr = lpfc_cmd->dma_handle;
571 			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
572 			data_bde->tus.f.bdeSize = (num_bde *
573 						   sizeof(struct ulp_bde64));
574 			physaddr += (sizeof(struct fcp_cmnd) +
575 				     sizeof(struct fcp_rsp) +
576 				     (2 * sizeof(struct ulp_bde64)));
577 			data_bde->addrHigh = putPaddrHigh(physaddr);
578 			data_bde->addrLow = putPaddrLow(physaddr);
579 			/* ebde count includes the responce bde and data bpl */
580 			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
581 		} else {
582 			/* ebde count includes the responce bde and data bdes */
583 			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
584 		}
585 	} else {
586 		iocb_cmd->un.fcpi64.bdl.bdeSize =
587 			((num_bde + 2) * sizeof(struct ulp_bde64));
588 	}
589 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
590 	return 0;
591 }
592 
593 /**
594  * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
595  * @phba: Pointer to hba context object.
596  * @vport: Pointer to vport object.
597  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
598  * @rsp_iocb: Pointer to response iocb object which reported error.
599  *
600  * This function posts an event when there is a SCSI command reporting
601  * error from the scsi device.
602  **/
603 static void
604 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
605 		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
606 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
607 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
608 	uint32_t resp_info = fcprsp->rspStatus2;
609 	uint32_t scsi_status = fcprsp->rspStatus3;
610 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
611 	struct lpfc_fast_path_event *fast_path_evt = NULL;
612 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
613 	unsigned long flags;
614 
615 	/* If there is queuefull or busy condition send a scsi event */
616 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
617 		(cmnd->result == SAM_STAT_BUSY)) {
618 		fast_path_evt = lpfc_alloc_fast_evt(phba);
619 		if (!fast_path_evt)
620 			return;
621 		fast_path_evt->un.scsi_evt.event_type =
622 			FC_REG_SCSI_EVENT;
623 		fast_path_evt->un.scsi_evt.subcategory =
624 		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
625 		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
626 		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
627 		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
628 			&pnode->nlp_portname, sizeof(struct lpfc_name));
629 		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
630 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
631 	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
632 		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
633 		fast_path_evt = lpfc_alloc_fast_evt(phba);
634 		if (!fast_path_evt)
635 			return;
636 		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
637 			FC_REG_SCSI_EVENT;
638 		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
639 			LPFC_EVENT_CHECK_COND;
640 		fast_path_evt->un.check_cond_evt.scsi_event.lun =
641 			cmnd->device->lun;
642 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
643 			&pnode->nlp_portname, sizeof(struct lpfc_name));
644 		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
645 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
646 		fast_path_evt->un.check_cond_evt.sense_key =
647 			cmnd->sense_buffer[2] & 0xf;
648 		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
649 		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
650 	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
651 		     fcpi_parm &&
652 		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
653 			((scsi_status == SAM_STAT_GOOD) &&
654 			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
655 		/*
656 		 * If status is good or resid does not match with fcp_param and
657 		 * there is valid fcpi_parm, then there is a read_check error
658 		 */
659 		fast_path_evt = lpfc_alloc_fast_evt(phba);
660 		if (!fast_path_evt)
661 			return;
662 		fast_path_evt->un.read_check_error.header.event_type =
663 			FC_REG_FABRIC_EVENT;
664 		fast_path_evt->un.read_check_error.header.subcategory =
665 			LPFC_EVENT_FCPRDCHKERR;
666 		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
667 			&pnode->nlp_portname, sizeof(struct lpfc_name));
668 		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
669 			&pnode->nlp_nodename, sizeof(struct lpfc_name));
670 		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
671 		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
672 		fast_path_evt->un.read_check_error.fcpiparam =
673 			fcpi_parm;
674 	} else
675 		return;
676 
677 	fast_path_evt->vport = vport;
678 	spin_lock_irqsave(&phba->hbalock, flags);
679 	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
680 	spin_unlock_irqrestore(&phba->hbalock, flags);
681 	lpfc_worker_wake_up(phba);
682 	return;
683 }
684 static void
685 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
686 {
687 	/*
688 	 * There are only two special cases to consider.  (1) the scsi command
689 	 * requested scatter-gather usage or (2) the scsi command allocated
690 	 * a request buffer, but did not request use_sg.  There is a third
691 	 * case, but it does not require resource deallocation.
692 	 */
693 	if (psb->seg_cnt > 0)
694 		scsi_dma_unmap(psb->pCmd);
695 }
696 
697 static void
698 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
699 		    struct lpfc_iocbq *rsp_iocb)
700 {
701 	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
702 	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
703 	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
704 	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
705 	uint32_t resp_info = fcprsp->rspStatus2;
706 	uint32_t scsi_status = fcprsp->rspStatus3;
707 	uint32_t *lp;
708 	uint32_t host_status = DID_OK;
709 	uint32_t rsplen = 0;
710 	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
711 
712 
713 	/*
714 	 *  If this is a task management command, there is no
715 	 *  scsi packet associated with this lpfc_cmd.  The driver
716 	 *  consumes it.
717 	 */
718 	if (fcpcmd->fcpCntl2) {
719 		scsi_status = 0;
720 		goto out;
721 	}
722 
723 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
724 		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
725 		if (snslen > SCSI_SENSE_BUFFERSIZE)
726 			snslen = SCSI_SENSE_BUFFERSIZE;
727 
728 		if (resp_info & RSP_LEN_VALID)
729 		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
730 		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
731 	}
732 	lp = (uint32_t *)cmnd->sense_buffer;
733 
734 	if (!scsi_status && (resp_info & RESID_UNDER))
735 		logit = LOG_FCP;
736 
737 	lpfc_printf_vlog(vport, KERN_WARNING, logit,
738 			 "0730 FCP command x%x failed: x%x SNS x%x x%x "
739 			 "Data: x%x x%x x%x x%x x%x\n",
740 			 cmnd->cmnd[0], scsi_status,
741 			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
742 			 be32_to_cpu(fcprsp->rspResId),
743 			 be32_to_cpu(fcprsp->rspSnsLen),
744 			 be32_to_cpu(fcprsp->rspRspLen),
745 			 fcprsp->rspInfo3);
746 
747 	if (resp_info & RSP_LEN_VALID) {
748 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
749 		if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
750 		    (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
751 			host_status = DID_ERROR;
752 			goto out;
753 		}
754 	}
755 
756 	scsi_set_resid(cmnd, 0);
757 	if (resp_info & RESID_UNDER) {
758 		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
759 
760 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
761 				 "0716 FCP Read Underrun, expected %d, "
762 				 "residual %d Data: x%x x%x x%x\n",
763 				 be32_to_cpu(fcpcmd->fcpDl),
764 				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
765 				 cmnd->underflow);
766 
767 		/*
768 		 * If there is an under run check if under run reported by
769 		 * storage array is same as the under run reported by HBA.
770 		 * If this is not same, there is a dropped frame.
771 		 */
772 		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
773 			fcpi_parm &&
774 			(scsi_get_resid(cmnd) != fcpi_parm)) {
775 			lpfc_printf_vlog(vport, KERN_WARNING,
776 					 LOG_FCP | LOG_FCP_ERROR,
777 					 "0735 FCP Read Check Error "
778 					 "and Underrun Data: x%x x%x x%x x%x\n",
779 					 be32_to_cpu(fcpcmd->fcpDl),
780 					 scsi_get_resid(cmnd), fcpi_parm,
781 					 cmnd->cmnd[0]);
782 			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
783 			host_status = DID_ERROR;
784 		}
785 		/*
786 		 * The cmnd->underflow is the minimum number of bytes that must
787 		 * be transfered for this command.  Provided a sense condition
788 		 * is not present, make sure the actual amount transferred is at
789 		 * least the underflow value or fail.
790 		 */
791 		if (!(resp_info & SNS_LEN_VALID) &&
792 		    (scsi_status == SAM_STAT_GOOD) &&
793 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
794 		     < cmnd->underflow)) {
795 			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
796 					 "0717 FCP command x%x residual "
797 					 "underrun converted to error "
798 					 "Data: x%x x%x x%x\n",
799 					 cmnd->cmnd[0], scsi_bufflen(cmnd),
800 					 scsi_get_resid(cmnd), cmnd->underflow);
801 			host_status = DID_ERROR;
802 		}
803 	} else if (resp_info & RESID_OVER) {
804 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
805 				 "0720 FCP command x%x residual overrun error. "
806 				 "Data: x%x x%x \n", cmnd->cmnd[0],
807 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
808 		host_status = DID_ERROR;
809 
810 	/*
811 	 * Check SLI validation that all the transfer was actually done
812 	 * (fcpi_parm should be zero). Apply check only to reads.
813 	 */
814 	} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
815 			(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
816 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
817 				 "0734 FCP Read Check Error Data: "
818 				 "x%x x%x x%x x%x\n",
819 				 be32_to_cpu(fcpcmd->fcpDl),
820 				 be32_to_cpu(fcprsp->rspResId),
821 				 fcpi_parm, cmnd->cmnd[0]);
822 		host_status = DID_ERROR;
823 		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
824 	}
825 
826  out:
827 	cmnd->result = ScsiResult(host_status, scsi_status);
828 	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
829 }
830 
831 static void
832 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
833 			struct lpfc_iocbq *pIocbOut)
834 {
835 	struct lpfc_scsi_buf *lpfc_cmd =
836 		(struct lpfc_scsi_buf *) pIocbIn->context1;
837 	struct lpfc_vport      *vport = pIocbIn->vport;
838 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
839 	struct lpfc_nodelist *pnode = rdata->pnode;
840 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
841 	int result;
842 	struct scsi_device *sdev, *tmp_sdev;
843 	int depth = 0;
844 	unsigned long flags;
845 	struct lpfc_fast_path_event *fast_path_evt;
846 
847 	lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
848 	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
849 	atomic_dec(&pnode->cmd_pending);
850 
851 	if (lpfc_cmd->status) {
852 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
853 		    (lpfc_cmd->result & IOERR_DRVR_MASK))
854 			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
855 		else if (lpfc_cmd->status >= IOSTAT_CNT)
856 			lpfc_cmd->status = IOSTAT_DEFAULT;
857 
858 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
859 				 "0729 FCP cmd x%x failed <%d/%d> "
860 				 "status: x%x result: x%x Data: x%x x%x\n",
861 				 cmd->cmnd[0],
862 				 cmd->device ? cmd->device->id : 0xffff,
863 				 cmd->device ? cmd->device->lun : 0xffff,
864 				 lpfc_cmd->status, lpfc_cmd->result,
865 				 pIocbOut->iocb.ulpContext,
866 				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
867 
868 		switch (lpfc_cmd->status) {
869 		case IOSTAT_FCP_RSP_ERROR:
870 			/* Call FCP RSP handler to determine result */
871 			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
872 			break;
873 		case IOSTAT_NPORT_BSY:
874 		case IOSTAT_FABRIC_BSY:
875 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
876 			fast_path_evt = lpfc_alloc_fast_evt(phba);
877 			if (!fast_path_evt)
878 				break;
879 			fast_path_evt->un.fabric_evt.event_type =
880 				FC_REG_FABRIC_EVENT;
881 			fast_path_evt->un.fabric_evt.subcategory =
882 				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
883 				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
884 			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
885 				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
886 					&pnode->nlp_portname,
887 					sizeof(struct lpfc_name));
888 				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
889 					&pnode->nlp_nodename,
890 					sizeof(struct lpfc_name));
891 			}
892 			fast_path_evt->vport = vport;
893 			fast_path_evt->work_evt.evt =
894 				LPFC_EVT_FASTPATH_MGMT_EVT;
895 			spin_lock_irqsave(&phba->hbalock, flags);
896 			list_add_tail(&fast_path_evt->work_evt.evt_listp,
897 				&phba->work_list);
898 			spin_unlock_irqrestore(&phba->hbalock, flags);
899 			lpfc_worker_wake_up(phba);
900 			break;
901 		case IOSTAT_LOCAL_REJECT:
902 			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
903 			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
904 			    lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
905 				cmd->result = ScsiResult(DID_REQUEUE, 0);
906 				break;
907 			} /* else: fall through */
908 		default:
909 			cmd->result = ScsiResult(DID_ERROR, 0);
910 			break;
911 		}
912 
913 		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
914 		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
915 			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
916 						 SAM_STAT_BUSY);
917 	} else {
918 		cmd->result = ScsiResult(DID_OK, 0);
919 	}
920 
921 	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
922 		uint32_t *lp = (uint32_t *)cmd->sense_buffer;
923 
924 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
925 				 "0710 Iodone <%d/%d> cmd %p, error "
926 				 "x%x SNS x%x x%x Data: x%x x%x\n",
927 				 cmd->device->id, cmd->device->lun, cmd,
928 				 cmd->result, *lp, *(lp + 3), cmd->retries,
929 				 scsi_get_resid(cmd));
930 	}
931 
932 	lpfc_update_stats(phba, lpfc_cmd);
933 	result = cmd->result;
934 	sdev = cmd->device;
935 	if (vport->cfg_max_scsicmpl_time &&
936 	   time_after(jiffies, lpfc_cmd->start_time +
937 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
938 		spin_lock_irqsave(sdev->host->host_lock, flags);
939 		if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
940 		    (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
941 		    ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
942 			pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
943 
944 		pnode->last_change_time = jiffies;
945 		spin_unlock_irqrestore(sdev->host->host_lock, flags);
946 	} else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
947 		   time_after(jiffies, pnode->last_change_time +
948 			msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
949 		spin_lock_irqsave(sdev->host->host_lock, flags);
950 		pnode->cmd_qdepth += pnode->cmd_qdepth *
951 			LPFC_TGTQ_RAMPUP_PCENT / 100;
952 		if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
953 			pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
954 		pnode->last_change_time = jiffies;
955 		spin_unlock_irqrestore(sdev->host->host_lock, flags);
956 	}
957 
958 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
959 	cmd->scsi_done(cmd);
960 
961 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
962 		/*
963 		 * If there is a thread waiting for command completion
964 		 * wake up the thread.
965 		 */
966 		spin_lock_irqsave(sdev->host->host_lock, flags);
967 		lpfc_cmd->pCmd = NULL;
968 		if (lpfc_cmd->waitq)
969 			wake_up(lpfc_cmd->waitq);
970 		spin_unlock_irqrestore(sdev->host->host_lock, flags);
971 		lpfc_release_scsi_buf(phba, lpfc_cmd);
972 		return;
973 	}
974 
975 
976 	if (!result)
977 		lpfc_rampup_queue_depth(vport, sdev);
978 
979 	if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
980 	   ((jiffies - pnode->last_ramp_up_time) >
981 		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
982 	   ((jiffies - pnode->last_q_full_time) >
983 		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
984 	   (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
985 		shost_for_each_device(tmp_sdev, sdev->host) {
986 			if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
987 				if (tmp_sdev->id != sdev->id)
988 					continue;
989 				if (tmp_sdev->ordered_tags)
990 					scsi_adjust_queue_depth(tmp_sdev,
991 						MSG_ORDERED_TAG,
992 						tmp_sdev->queue_depth+1);
993 				else
994 					scsi_adjust_queue_depth(tmp_sdev,
995 						MSG_SIMPLE_TAG,
996 						tmp_sdev->queue_depth+1);
997 
998 				pnode->last_ramp_up_time = jiffies;
999 			}
1000 		}
1001 		lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1002 			0xFFFFFFFF,
1003 			sdev->queue_depth - 1, sdev->queue_depth);
1004 	}
1005 
1006 	/*
1007 	 * Check for queue full.  If the lun is reporting queue full, then
1008 	 * back off the lun queue depth to prevent target overloads.
1009 	 */
1010 	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1011 	    NLP_CHK_NODE_ACT(pnode)) {
1012 		pnode->last_q_full_time = jiffies;
1013 
1014 		shost_for_each_device(tmp_sdev, sdev->host) {
1015 			if (tmp_sdev->id != sdev->id)
1016 				continue;
1017 			depth = scsi_track_queue_full(tmp_sdev,
1018 					tmp_sdev->queue_depth - 1);
1019 		}
1020 		/*
1021 		 * The queue depth cannot be lowered any more.
1022 		 * Modify the returned error code to store
1023 		 * the final depth value set by
1024 		 * scsi_track_queue_full.
1025 		 */
1026 		if (depth == -1)
1027 			depth = sdev->host->cmd_per_lun;
1028 
1029 		if (depth) {
1030 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1031 					 "0711 detected queue full - lun queue "
1032 					 "depth adjusted to %d.\n", depth);
1033 			lpfc_send_sdev_queuedepth_change_event(phba, vport,
1034 				pnode, 0xFFFFFFFF,
1035 				depth+1, depth);
1036 		}
1037 	}
1038 
1039 	/*
1040 	 * If there is a thread waiting for command completion
1041 	 * wake up the thread.
1042 	 */
1043 	spin_lock_irqsave(sdev->host->host_lock, flags);
1044 	lpfc_cmd->pCmd = NULL;
1045 	if (lpfc_cmd->waitq)
1046 		wake_up(lpfc_cmd->waitq);
1047 	spin_unlock_irqrestore(sdev->host->host_lock, flags);
1048 
1049 	lpfc_release_scsi_buf(phba, lpfc_cmd);
1050 }
1051 
1052 /**
1053  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
1054  * @data: A pointer to the immediate command data portion of the IOCB.
1055  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1056  *
1057  * The routine copies the entire FCP command from @fcp_cmnd to @data while
1058  * byte swapping the data to big endian format for transmission on the wire.
1059  **/
1060 static void
1061 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1062 {
1063 	int i, j;
1064 	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1065 	     i += sizeof(uint32_t), j++) {
1066 		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1067 	}
1068 }
1069 
1070 static void
1071 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1072 		    struct lpfc_nodelist *pnode)
1073 {
1074 	struct lpfc_hba *phba = vport->phba;
1075 	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1076 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1077 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1078 	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1079 	int datadir = scsi_cmnd->sc_data_direction;
1080 	char tag[2];
1081 
1082 	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1083 		return;
1084 
1085 	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
1086 	/* clear task management bits */
1087 	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
1088 
1089 	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1090 			&lpfc_cmd->fcp_cmnd->fcp_lun);
1091 
1092 	memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1093 
1094 	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1095 		switch (tag[0]) {
1096 		case HEAD_OF_QUEUE_TAG:
1097 			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1098 			break;
1099 		case ORDERED_QUEUE_TAG:
1100 			fcp_cmnd->fcpCntl1 = ORDERED_Q;
1101 			break;
1102 		default:
1103 			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
1104 			break;
1105 		}
1106 	} else
1107 		fcp_cmnd->fcpCntl1 = 0;
1108 
1109 	/*
1110 	 * There are three possibilities here - use scatter-gather segment, use
1111 	 * the single mapping, or neither.  Start the lpfc command prep by
1112 	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1113 	 * data bde entry.
1114 	 */
1115 	if (scsi_sg_count(scsi_cmnd)) {
1116 		if (datadir == DMA_TO_DEVICE) {
1117 			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
1118 			iocb_cmd->un.fcpi.fcpi_parm = 0;
1119 			iocb_cmd->ulpPU = 0;
1120 			fcp_cmnd->fcpCntl3 = WRITE_DATA;
1121 			phba->fc4OutputRequests++;
1122 		} else {
1123 			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
1124 			iocb_cmd->ulpPU = PARM_READ_CHECK;
1125 			iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1126 			fcp_cmnd->fcpCntl3 = READ_DATA;
1127 			phba->fc4InputRequests++;
1128 		}
1129 	} else {
1130 		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
1131 		iocb_cmd->un.fcpi.fcpi_parm = 0;
1132 		iocb_cmd->ulpPU = 0;
1133 		fcp_cmnd->fcpCntl3 = 0;
1134 		phba->fc4ControlRequests++;
1135 	}
1136 	if (phba->sli_rev == 3)
1137 		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1138 	/*
1139 	 * Finish initializing those IOCB fields that are independent
1140 	 * of the scsi_cmnd request_buffer
1141 	 */
1142 	piocbq->iocb.ulpContext = pnode->nlp_rpi;
1143 	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
1144 		piocbq->iocb.ulpFCP2Rcvy = 1;
1145 	else
1146 		piocbq->iocb.ulpFCP2Rcvy = 0;
1147 
1148 	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
1149 	piocbq->context1  = lpfc_cmd;
1150 	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
1151 	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
1152 	piocbq->vport = vport;
1153 }
1154 
1155 static int
1156 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1157 			     struct lpfc_scsi_buf *lpfc_cmd,
1158 			     unsigned int lun,
1159 			     uint8_t task_mgmt_cmd)
1160 {
1161 	struct lpfc_iocbq *piocbq;
1162 	IOCB_t *piocb;
1163 	struct fcp_cmnd *fcp_cmnd;
1164 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
1165 	struct lpfc_nodelist *ndlp = rdata->pnode;
1166 
1167 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1168 	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
1169 		return 0;
1170 
1171 	piocbq = &(lpfc_cmd->cur_iocbq);
1172 	piocbq->vport = vport;
1173 
1174 	piocb = &piocbq->iocb;
1175 
1176 	fcp_cmnd = lpfc_cmd->fcp_cmnd;
1177 	/* Clear out any old data in the FCP command area */
1178 	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1179 	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
1180 	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
1181 	if (vport->phba->sli_rev == 3)
1182 		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
1183 	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
1184 	piocb->ulpContext = ndlp->nlp_rpi;
1185 	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
1186 		piocb->ulpFCP2Rcvy = 1;
1187 	}
1188 	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
1189 
1190 	/* ulpTimeout is only one byte */
1191 	if (lpfc_cmd->timeout > 0xff) {
1192 		/*
1193 		 * Do not timeout the command at the firmware level.
1194 		 * The driver will provide the timeout mechanism.
1195 		 */
1196 		piocb->ulpTimeout = 0;
1197 	} else {
1198 		piocb->ulpTimeout = lpfc_cmd->timeout;
1199 	}
1200 
1201 	return 1;
1202 }
1203 
1204 static void
1205 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1206 			struct lpfc_iocbq *cmdiocbq,
1207 			struct lpfc_iocbq *rspiocbq)
1208 {
1209 	struct lpfc_scsi_buf *lpfc_cmd =
1210 		(struct lpfc_scsi_buf *) cmdiocbq->context1;
1211 	if (lpfc_cmd)
1212 		lpfc_release_scsi_buf(phba, lpfc_cmd);
1213 	return;
1214 }
1215 
1216 static int
1217 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1218 		    unsigned  tgt_id, unsigned int lun,
1219 		    struct lpfc_rport_data *rdata)
1220 {
1221 	struct lpfc_hba   *phba = vport->phba;
1222 	struct lpfc_iocbq *iocbq;
1223 	struct lpfc_iocbq *iocbqrsp;
1224 	int ret;
1225 	int status;
1226 
1227 	if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
1228 		return FAILED;
1229 
1230 	lpfc_cmd->rdata = rdata;
1231 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
1232 					   FCP_TARGET_RESET);
1233 	if (!status)
1234 		return FAILED;
1235 
1236 	iocbq = &lpfc_cmd->cur_iocbq;
1237 	iocbqrsp = lpfc_sli_get_iocbq(phba);
1238 
1239 	if (!iocbqrsp)
1240 		return FAILED;
1241 
1242 	/* Issue Target Reset to TGT <num> */
1243 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1244 			 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
1245 			 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
1246 	status = lpfc_sli_issue_iocb_wait(phba,
1247 				       &phba->sli.ring[phba->sli.fcp_ring],
1248 				       iocbq, iocbqrsp, lpfc_cmd->timeout);
1249 	if (status != IOCB_SUCCESS) {
1250 		if (status == IOCB_TIMEDOUT) {
1251 			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1252 			ret = TIMEOUT_ERROR;
1253 		} else
1254 			ret = FAILED;
1255 		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1256 	} else {
1257 		ret = SUCCESS;
1258 		lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
1259 		lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
1260 		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1261 			(lpfc_cmd->result & IOERR_DRVR_MASK))
1262 				lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1263 	}
1264 
1265 	lpfc_sli_release_iocbq(phba, iocbqrsp);
1266 	return ret;
1267 }
1268 
1269 const char *
1270 lpfc_info(struct Scsi_Host *host)
1271 {
1272 	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
1273 	struct lpfc_hba   *phba = vport->phba;
1274 	int len;
1275 	static char  lpfcinfobuf[384];
1276 
1277 	memset(lpfcinfobuf,0,384);
1278 	if (phba && phba->pcidev){
1279 		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
1280 		len = strlen(lpfcinfobuf);
1281 		snprintf(lpfcinfobuf + len,
1282 			384-len,
1283 			" on PCI bus %02x device %02x irq %d",
1284 			phba->pcidev->bus->number,
1285 			phba->pcidev->devfn,
1286 			phba->pcidev->irq);
1287 		len = strlen(lpfcinfobuf);
1288 		if (phba->Port[0]) {
1289 			snprintf(lpfcinfobuf + len,
1290 				 384-len,
1291 				 " port %s",
1292 				 phba->Port);
1293 		}
1294 	}
1295 	return lpfcinfobuf;
1296 }
1297 
1298 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1299 {
1300 	unsigned long  poll_tmo_expires =
1301 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
1302 
1303 	if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
1304 		mod_timer(&phba->fcp_poll_timer,
1305 			  poll_tmo_expires);
1306 }
1307 
1308 void lpfc_poll_start_timer(struct lpfc_hba * phba)
1309 {
1310 	lpfc_poll_rearm_timer(phba);
1311 }
1312 
1313 void lpfc_poll_timeout(unsigned long ptr)
1314 {
1315 	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1316 
1317 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1318 		lpfc_sli_poll_fcp_ring (phba);
1319 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1320 			lpfc_poll_rearm_timer(phba);
1321 	}
1322 }
1323 
1324 static int
1325 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1326 {
1327 	struct Scsi_Host  *shost = cmnd->device->host;
1328 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1329 	struct lpfc_hba   *phba = vport->phba;
1330 	struct lpfc_sli   *psli = &phba->sli;
1331 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1332 	struct lpfc_nodelist *ndlp = rdata->pnode;
1333 	struct lpfc_scsi_buf *lpfc_cmd;
1334 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1335 	int err;
1336 
1337 	err = fc_remote_port_chkready(rport);
1338 	if (err) {
1339 		cmnd->result = err;
1340 		goto out_fail_command;
1341 	}
1342 
1343 	/*
1344 	 * Catch race where our node has transitioned, but the
1345 	 * transport is still transitioning.
1346 	 */
1347 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1348 		cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1349 		goto out_fail_command;
1350 	}
1351 	if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
1352 		goto out_host_busy;
1353 
1354 	lpfc_cmd = lpfc_get_scsi_buf(phba);
1355 	if (lpfc_cmd == NULL) {
1356 		lpfc_adjust_queue_depth(phba);
1357 
1358 		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1359 				 "0707 driver's buffer pool is empty, "
1360 				 "IO busied\n");
1361 		goto out_host_busy;
1362 	}
1363 
1364 	lpfc_cmd->start_time = jiffies;
1365 	/*
1366 	 * Store the midlayer's command structure for the completion phase
1367 	 * and complete the command initialization.
1368 	 */
1369 	lpfc_cmd->pCmd  = cmnd;
1370 	lpfc_cmd->rdata = rdata;
1371 	lpfc_cmd->timeout = 0;
1372 	lpfc_cmd->start_time = jiffies;
1373 	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1374 	cmnd->scsi_done = done;
1375 
1376 	err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1377 	if (err)
1378 		goto out_host_busy_free_buf;
1379 
1380 	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
1381 
1382 	atomic_inc(&ndlp->cmd_pending);
1383 	err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1384 				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1385 	if (err)
1386 		goto out_host_busy_free_buf;
1387 
1388 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1389 		lpfc_sli_poll_fcp_ring(phba);
1390 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1391 			lpfc_poll_rearm_timer(phba);
1392 	}
1393 
1394 	return 0;
1395 
1396  out_host_busy_free_buf:
1397 	atomic_dec(&ndlp->cmd_pending);
1398 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1399 	lpfc_release_scsi_buf(phba, lpfc_cmd);
1400  out_host_busy:
1401 	return SCSI_MLQUEUE_HOST_BUSY;
1402 
1403  out_fail_command:
1404 	done(cmnd);
1405 	return 0;
1406 }
1407 
1408 static void
1409 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1410 {
1411 	struct Scsi_Host *shost = cmnd->device->host;
1412 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1413 
1414 	spin_lock_irq(shost->host_lock);
1415 	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1416 		spin_unlock_irq(shost->host_lock);
1417 		msleep(1000);
1418 		spin_lock_irq(shost->host_lock);
1419 	}
1420 	spin_unlock_irq(shost->host_lock);
1421 	return;
1422 }
1423 
1424 static int
1425 lpfc_abort_handler(struct scsi_cmnd *cmnd)
1426 {
1427 	struct Scsi_Host  *shost = cmnd->device->host;
1428 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1429 	struct lpfc_hba   *phba = vport->phba;
1430 	struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
1431 	struct lpfc_iocbq *iocb;
1432 	struct lpfc_iocbq *abtsiocb;
1433 	struct lpfc_scsi_buf *lpfc_cmd;
1434 	IOCB_t *cmd, *icmd;
1435 	int ret = SUCCESS;
1436 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
1437 
1438 	lpfc_block_error_handler(cmnd);
1439 	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
1440 	BUG_ON(!lpfc_cmd);
1441 
1442 	/*
1443 	 * If pCmd field of the corresponding lpfc_scsi_buf structure
1444 	 * points to a different SCSI command, then the driver has
1445 	 * already completed this command, but the midlayer did not
1446 	 * see the completion before the eh fired.  Just return
1447 	 * SUCCESS.
1448 	 */
1449 	iocb = &lpfc_cmd->cur_iocbq;
1450 	if (lpfc_cmd->pCmd != cmnd)
1451 		goto out;
1452 
1453 	BUG_ON(iocb->context1 != lpfc_cmd);
1454 
1455 	abtsiocb = lpfc_sli_get_iocbq(phba);
1456 	if (abtsiocb == NULL) {
1457 		ret = FAILED;
1458 		goto out;
1459 	}
1460 
1461 	/*
1462 	 * The scsi command can not be in txq and it is in flight because the
1463 	 * pCmd is still pointig at the SCSI command we have to abort. There
1464 	 * is no need to search the txcmplq. Just send an abort to the FW.
1465 	 */
1466 
1467 	cmd = &iocb->iocb;
1468 	icmd = &abtsiocb->iocb;
1469 	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1470 	icmd->un.acxri.abortContextTag = cmd->ulpContext;
1471 	icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
1472 
1473 	icmd->ulpLe = 1;
1474 	icmd->ulpClass = cmd->ulpClass;
1475 	if (lpfc_is_link_up(phba))
1476 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
1477 	else
1478 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
1479 
1480 	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1481 	abtsiocb->vport = vport;
1482 	if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
1483 		lpfc_sli_release_iocbq(phba, abtsiocb);
1484 		ret = FAILED;
1485 		goto out;
1486 	}
1487 
1488 	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1489 		lpfc_sli_poll_fcp_ring (phba);
1490 
1491 	lpfc_cmd->waitq = &waitq;
1492 	/* Wait for abort to complete */
1493 	wait_event_timeout(waitq,
1494 			  (lpfc_cmd->pCmd != cmnd),
1495 			   (2*vport->cfg_devloss_tmo*HZ));
1496 
1497 	spin_lock_irq(shost->host_lock);
1498 	lpfc_cmd->waitq = NULL;
1499 	spin_unlock_irq(shost->host_lock);
1500 
1501 	if (lpfc_cmd->pCmd == cmnd) {
1502 		ret = FAILED;
1503 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1504 				 "0748 abort handler timed out waiting "
1505 				 "for abort to complete: ret %#x, ID %d, "
1506 				 "LUN %d, snum %#lx\n",
1507 				 ret, cmnd->device->id, cmnd->device->lun,
1508 				 cmnd->serial_number);
1509 	}
1510 
1511  out:
1512 	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1513 			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1514 			 "LUN %d snum %#lx\n", ret, cmnd->device->id,
1515 			 cmnd->device->lun, cmnd->serial_number);
1516 	return ret;
1517 }
1518 
1519 static int
1520 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1521 {
1522 	struct Scsi_Host  *shost = cmnd->device->host;
1523 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1524 	struct lpfc_hba   *phba = vport->phba;
1525 	struct lpfc_scsi_buf *lpfc_cmd;
1526 	struct lpfc_iocbq *iocbq, *iocbqrsp;
1527 	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1528 	struct lpfc_nodelist *pnode = rdata->pnode;
1529 	unsigned long later;
1530 	int ret = SUCCESS;
1531 	int status;
1532 	int cnt;
1533 	struct lpfc_scsi_event_header scsi_event;
1534 
1535 	lpfc_block_error_handler(cmnd);
1536 	/*
1537 	 * If target is not in a MAPPED state, delay the reset until
1538 	 * target is rediscovered or devloss timeout expires.
1539 	 */
1540 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1541 	while (time_after(later, jiffies)) {
1542 		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1543 			return FAILED;
1544 		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1545 			break;
1546 		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1547 		rdata = cmnd->device->hostdata;
1548 		if (!rdata)
1549 			break;
1550 		pnode = rdata->pnode;
1551 	}
1552 
1553 	scsi_event.event_type = FC_REG_SCSI_EVENT;
1554 	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
1555 	scsi_event.lun = 0;
1556 	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
1557 	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
1558 
1559 	fc_host_post_vendor_event(shost,
1560 		fc_get_event_number(),
1561 		sizeof(scsi_event),
1562 		(char *)&scsi_event,
1563 		SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1564 
1565 	if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1566 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1567 				 "0721 LUN Reset rport "
1568 				 "failure: msec x%x rdata x%p\n",
1569 				 jiffies_to_msecs(jiffies - later), rdata);
1570 		return FAILED;
1571 	}
1572 	lpfc_cmd = lpfc_get_scsi_buf(phba);
1573 	if (lpfc_cmd == NULL)
1574 		return FAILED;
1575 	lpfc_cmd->timeout = 60;
1576 	lpfc_cmd->rdata = rdata;
1577 
1578 	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1579 					      cmnd->device->lun,
1580 					      FCP_TARGET_RESET);
1581 	if (!status) {
1582 		lpfc_release_scsi_buf(phba, lpfc_cmd);
1583 		return FAILED;
1584 	}
1585 	iocbq = &lpfc_cmd->cur_iocbq;
1586 
1587 	/* get a buffer for this IOCB command response */
1588 	iocbqrsp = lpfc_sli_get_iocbq(phba);
1589 	if (iocbqrsp == NULL) {
1590 		lpfc_release_scsi_buf(phba, lpfc_cmd);
1591 		return FAILED;
1592 	}
1593 	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1594 			 "0703 Issue target reset to TGT %d LUN %d "
1595 			 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1596 			 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1597 	status = lpfc_sli_issue_iocb_wait(phba,
1598 					  &phba->sli.ring[phba->sli.fcp_ring],
1599 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
1600 	if (status == IOCB_TIMEDOUT) {
1601 		iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1602 		ret = TIMEOUT_ERROR;
1603 	} else {
1604 		if (status != IOCB_SUCCESS)
1605 			ret = FAILED;
1606 		lpfc_release_scsi_buf(phba, lpfc_cmd);
1607 	}
1608 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1609 			 "0713 SCSI layer issued device reset (%d, %d) "
1610 			 "return x%x status x%x result x%x\n",
1611 			 cmnd->device->id, cmnd->device->lun, ret,
1612 			 iocbqrsp->iocb.ulpStatus,
1613 			 iocbqrsp->iocb.un.ulpWord[4]);
1614 	lpfc_sli_release_iocbq(phba, iocbqrsp);
1615 	cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1616 				LPFC_CTX_TGT);
1617 	if (cnt)
1618 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1619 				    cmnd->device->id, cmnd->device->lun,
1620 				    LPFC_CTX_TGT);
1621 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1622 	while (time_after(later, jiffies) && cnt) {
1623 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1624 		cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1625 					cmnd->device->lun, LPFC_CTX_TGT);
1626 	}
1627 	if (cnt) {
1628 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1629 				 "0719 device reset I/O flush failure: "
1630 				 "cnt x%x\n", cnt);
1631 		ret = FAILED;
1632 	}
1633 	return ret;
1634 }
1635 
1636 static int
1637 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1638 {
1639 	struct Scsi_Host  *shost = cmnd->device->host;
1640 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1641 	struct lpfc_hba   *phba = vport->phba;
1642 	struct lpfc_nodelist *ndlp = NULL;
1643 	int match;
1644 	int ret = SUCCESS, status = SUCCESS, i;
1645 	int cnt;
1646 	struct lpfc_scsi_buf * lpfc_cmd;
1647 	unsigned long later;
1648 	struct lpfc_scsi_event_header scsi_event;
1649 
1650 	scsi_event.event_type = FC_REG_SCSI_EVENT;
1651 	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
1652 	scsi_event.lun = 0;
1653 	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
1654 	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
1655 
1656 	fc_host_post_vendor_event(shost,
1657 		fc_get_event_number(),
1658 		sizeof(scsi_event),
1659 		(char *)&scsi_event,
1660 		SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1661 
1662 	lpfc_block_error_handler(cmnd);
1663 	/*
1664 	 * Since the driver manages a single bus device, reset all
1665 	 * targets known to the driver.  Should any target reset
1666 	 * fail, this routine returns failure to the midlayer.
1667 	 */
1668 	for (i = 0; i < LPFC_MAX_TARGET; i++) {
1669 		/* Search for mapped node by target ID */
1670 		match = 0;
1671 		spin_lock_irq(shost->host_lock);
1672 		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1673 			if (!NLP_CHK_NODE_ACT(ndlp))
1674 				continue;
1675 			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1676 			    ndlp->nlp_sid == i &&
1677 			    ndlp->rport) {
1678 				match = 1;
1679 				break;
1680 			}
1681 		}
1682 		spin_unlock_irq(shost->host_lock);
1683 		if (!match)
1684 			continue;
1685 		lpfc_cmd = lpfc_get_scsi_buf(phba);
1686 		if (lpfc_cmd) {
1687 			lpfc_cmd->timeout = 60;
1688 			status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1689 						     cmnd->device->lun,
1690 						     ndlp->rport->dd_data);
1691 			if (status != TIMEOUT_ERROR)
1692 				lpfc_release_scsi_buf(phba, lpfc_cmd);
1693 		}
1694 		if (!lpfc_cmd || status != SUCCESS) {
1695 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1696 					 "0700 Bus Reset on target %d failed\n",
1697 					 i);
1698 			ret = FAILED;
1699 		}
1700 	}
1701 	/*
1702 	 * All outstanding txcmplq I/Os should have been aborted by
1703 	 * the targets.  Unfortunately, some targets do not abide by
1704 	 * this forcing the driver to double check.
1705 	 */
1706 	cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1707 	if (cnt)
1708 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1709 				    0, 0, LPFC_CTX_HOST);
1710 	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1711 	while (time_after(later, jiffies) && cnt) {
1712 		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1713 		cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1714 	}
1715 	if (cnt) {
1716 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1717 				 "0715 Bus Reset I/O flush failure: "
1718 				 "cnt x%x left x%x\n", cnt, i);
1719 		ret = FAILED;
1720 	}
1721 	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1722 			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1723 	return ret;
1724 }
1725 
1726 static int
1727 lpfc_slave_alloc(struct scsi_device *sdev)
1728 {
1729 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1730 	struct lpfc_hba   *phba = vport->phba;
1731 	struct lpfc_scsi_buf *scsi_buf = NULL;
1732 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1733 	uint32_t total = 0, i;
1734 	uint32_t num_to_alloc = 0;
1735 	unsigned long flags;
1736 
1737 	if (!rport || fc_remote_port_chkready(rport))
1738 		return -ENXIO;
1739 
1740 	sdev->hostdata = rport->dd_data;
1741 
1742 	/*
1743 	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1744 	 * available list of scsi buffers.  Don't allocate more than the
1745 	 * HBA limit conveyed to the midlayer via the host structure.  The
1746 	 * formula accounts for the lun_queue_depth + error handlers + 1
1747 	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
1748 	 */
1749 	total = phba->total_scsi_bufs;
1750 	num_to_alloc = vport->cfg_lun_queue_depth + 2;
1751 
1752 	/* Allow some exchanges to be available always to complete discovery */
1753 	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1754 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1755 				 "0704 At limitation of %d preallocated "
1756 				 "command buffers\n", total);
1757 		return 0;
1758 	/* Allow some exchanges to be available always to complete discovery */
1759 	} else if (total + num_to_alloc >
1760 		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1761 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1762 				 "0705 Allocation request of %d "
1763 				 "command buffers will exceed max of %d.  "
1764 				 "Reducing allocation request to %d.\n",
1765 				 num_to_alloc, phba->cfg_hba_queue_depth,
1766 				 (phba->cfg_hba_queue_depth - total));
1767 		num_to_alloc = phba->cfg_hba_queue_depth - total;
1768 	}
1769 
1770 	for (i = 0; i < num_to_alloc; i++) {
1771 		scsi_buf = lpfc_new_scsi_buf(vport);
1772 		if (!scsi_buf) {
1773 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1774 					 "0706 Failed to allocate "
1775 					 "command buffer\n");
1776 			break;
1777 		}
1778 
1779 		spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
1780 		phba->total_scsi_bufs++;
1781 		list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1782 		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
1783 	}
1784 	return 0;
1785 }
1786 
1787 static int
1788 lpfc_slave_configure(struct scsi_device *sdev)
1789 {
1790 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1791 	struct lpfc_hba   *phba = vport->phba;
1792 	struct fc_rport   *rport = starget_to_rport(sdev->sdev_target);
1793 
1794 	if (sdev->tagged_supported)
1795 		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
1796 	else
1797 		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
1798 
1799 	/*
1800 	 * Initialize the fc transport attributes for the target
1801 	 * containing this scsi device.  Also note that the driver's
1802 	 * target pointer is stored in the starget_data for the
1803 	 * driver's sysfs entry point functions.
1804 	 */
1805 	rport->dev_loss_tmo = vport->cfg_devloss_tmo;
1806 
1807 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1808 		lpfc_sli_poll_fcp_ring(phba);
1809 		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1810 			lpfc_poll_rearm_timer(phba);
1811 	}
1812 
1813 	return 0;
1814 }
1815 
1816 static void
1817 lpfc_slave_destroy(struct scsi_device *sdev)
1818 {
1819 	sdev->hostdata = NULL;
1820 	return;
1821 }
1822 
1823 
1824 struct scsi_host_template lpfc_template = {
1825 	.module			= THIS_MODULE,
1826 	.name			= LPFC_DRIVER_NAME,
1827 	.info			= lpfc_info,
1828 	.queuecommand		= lpfc_queuecommand,
1829 	.eh_abort_handler	= lpfc_abort_handler,
1830 	.eh_device_reset_handler= lpfc_device_reset_handler,
1831 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
1832 	.slave_alloc		= lpfc_slave_alloc,
1833 	.slave_configure	= lpfc_slave_configure,
1834 	.slave_destroy		= lpfc_slave_destroy,
1835 	.scan_finished		= lpfc_scan_finished,
1836 	.this_id		= -1,
1837 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
1838 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
1839 	.use_clustering		= ENABLE_CLUSTERING,
1840 	.shost_attrs		= lpfc_hba_attrs,
1841 	.max_sectors		= 0xFFFF,
1842 };
1843 
1844 struct scsi_host_template lpfc_vport_template = {
1845 	.module			= THIS_MODULE,
1846 	.name			= LPFC_DRIVER_NAME,
1847 	.info			= lpfc_info,
1848 	.queuecommand		= lpfc_queuecommand,
1849 	.eh_abort_handler	= lpfc_abort_handler,
1850 	.eh_device_reset_handler= lpfc_device_reset_handler,
1851 	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
1852 	.slave_alloc		= lpfc_slave_alloc,
1853 	.slave_configure	= lpfc_slave_configure,
1854 	.slave_destroy		= lpfc_slave_destroy,
1855 	.scan_finished		= lpfc_scan_finished,
1856 	.this_id		= -1,
1857 	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
1858 	.cmd_per_lun		= LPFC_CMD_PER_LUN,
1859 	.use_clustering		= ENABLE_CLUSTERING,
1860 	.shost_attrs		= lpfc_vport_attrs,
1861 	.max_sectors		= 0xFFFF,
1862 };
1863