xref: /linux/drivers/scsi/fnic/fnic_scsi.c (revision 88e45067a30918ebb4942120892963e2311330af)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
4  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
5  */
6 #include <linux/mempool.h>
7 #include <linux/errno.h>
8 #include <linux/init.h>
9 #include <linux/workqueue.h>
10 #include <linux/pci.h>
11 #include <linux/scatterlist.h>
12 #include <linux/skbuff.h>
13 #include <linux/spinlock.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_ether.h>
16 #include <linux/if_vlan.h>
17 #include <linux/delay.h>
18 #include <linux/gfp.h>
19 #include <scsi/scsi.h>
20 #include <scsi/scsi_host.h>
21 #include <scsi/scsi_device.h>
22 #include <scsi/scsi_cmnd.h>
23 #include <scsi/scsi_tcq.h>
24 #include <scsi/fc/fc_els.h>
25 #include <scsi/fc/fc_fcoe.h>
26 #include <scsi/fc_frame.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include "fnic_io.h"
29 #include "fnic.h"
30 
31 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
32 
33 const char *fnic_state_str[] = {
34 	[FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
35 	[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
36 	[FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
37 	[FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
38 };
39 
40 static const char *fnic_ioreq_state_str[] = {
41 	[FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
42 	[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
43 	[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
44 	[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
45 	[FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
46 };
47 
48 static const char *fcpio_status_str[] =  {
49 	[FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
50 	[FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
51 	[FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
52 	[FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
53 	[FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
54 	[FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
55 	[FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
56 	[FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
57 	[FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
58 	[FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
59 	[FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
60 	[FCPIO_FW_ERR] = "FCPIO_FW_ERR",
61 	[FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
62 	[FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
63 	[FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
64 	[FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
65 	[FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
66 	[FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
67 	[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
68 };
69 
70 enum terminate_io_return {
71 	TERM_SUCCESS = 0,
72 	TERM_NO_SC = 1,
73 	TERM_IO_REQ_NOT_FOUND,
74 	TERM_ANOTHER_PORT,
75 	TERM_GSTATE,
76 	TERM_IO_BLOCKED,
77 	TERM_OUT_OF_WQ_DESC,
78 	TERM_TIMED_OUT,
79 	TERM_MISC,
80 };
81 
fnic_state_to_str(unsigned int state)82 const char *fnic_state_to_str(unsigned int state)
83 {
84 	if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
85 		return "unknown";
86 
87 	return fnic_state_str[state];
88 }
89 
fnic_ioreq_state_to_str(unsigned int state)90 static const char *fnic_ioreq_state_to_str(unsigned int state)
91 {
92 	if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
93 	    !fnic_ioreq_state_str[state])
94 		return "unknown";
95 
96 	return fnic_ioreq_state_str[state];
97 }
98 
fnic_fcpio_status_to_str(unsigned int status)99 static const char *fnic_fcpio_status_to_str(unsigned int status)
100 {
101 	if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
102 		return "unknown";
103 
104 	return fcpio_status_str[status];
105 }
106 
107 /*
108  * Unmap the data buffer and sense buffer for an io_req,
109  * also unmap and free the device-private scatter/gather list.
110  */
fnic_release_ioreq_buf(struct fnic * fnic,struct fnic_io_req * io_req,struct scsi_cmnd * sc)111 static void fnic_release_ioreq_buf(struct fnic *fnic,
112 				   struct fnic_io_req *io_req,
113 				   struct scsi_cmnd *sc)
114 {
115 	if (io_req->sgl_list_pa)
116 		dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
117 				 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
118 				 DMA_TO_DEVICE);
119 	scsi_dma_unmap(sc);
120 
121 	if (io_req->sgl_cnt)
122 		mempool_free(io_req->sgl_list_alloc,
123 			     fnic->io_sgl_pool[io_req->sgl_type]);
124 	if (io_req->sense_buf_pa)
125 		dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
126 				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
127 }
128 
129 static bool
fnic_count_portid_ioreqs_iter(struct fnic * fnic,struct scsi_cmnd * sc,void * data1,void * data2)130 fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
131 				void *data1, void *data2)
132 {
133 	u32 *portid = data1;
134 	unsigned int *count = data2;
135 	struct fnic_io_req *io_req = fnic_priv(sc)->io_req;
136 
137 	if (!io_req || (*portid && (io_req->port_id != *portid)))
138 		return true;
139 
140 	*count += 1;
141 	return true;
142 }
143 
fnic_count_ioreqs(struct fnic * fnic,u32 portid)144 unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid)
145 {
146 	unsigned int count = 0;
147 
148 	fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter,
149 				&portid, &count);
150 
151 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
152 		      "portid = 0x%x count = %u\n", portid, count);
153 	return count;
154 }
155 
fnic_count_all_ioreqs(struct fnic * fnic)156 unsigned int fnic_count_all_ioreqs(struct fnic *fnic)
157 {
158 	return fnic_count_ioreqs(fnic, 0);
159 }
160 
161 static bool
fnic_count_lun_ioreqs_iter(struct fnic * fnic,struct scsi_cmnd * sc,void * data1,void * data2)162 fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc,
163 				void *data1, void *data2)
164 {
165 	struct scsi_device *scsi_device = data1;
166 	unsigned int *count = data2;
167 
168 	if (sc->device != scsi_device || !fnic_priv(sc)->io_req)
169 		return true;
170 
171 	*count += 1;
172 	return true;
173 }
174 
175 unsigned int
fnic_count_lun_ioreqs(struct fnic * fnic,struct scsi_device * scsi_device)176 fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device)
177 {
178 	unsigned int count = 0;
179 
180 	fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter,
181 				scsi_device, &count);
182 
183 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
184 		      "lun = %p count = %u\n", scsi_device, count);
185 	return count;
186 }
187 
188 /* Free up Copy Wq descriptors. Called with copy_wq lock held */
free_wq_copy_descs(struct fnic * fnic,struct vnic_wq_copy * wq,unsigned int hwq)189 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq)
190 {
191 	/* if no Ack received from firmware, then nothing to clean */
192 	if (!fnic->fw_ack_recd[hwq])
193 		return 1;
194 
195 	/*
196 	 * Update desc_available count based on number of freed descriptors
197 	 * Account for wraparound
198 	 */
199 	if (wq->to_clean_index <= fnic->fw_ack_index[hwq])
200 		wq->ring.desc_avail += (fnic->fw_ack_index[hwq]
201 					- wq->to_clean_index + 1);
202 	else
203 		wq->ring.desc_avail += (wq->ring.desc_count
204 					- wq->to_clean_index
205 					+ fnic->fw_ack_index[hwq] + 1);
206 
207 	/*
208 	 * just bump clean index to ack_index+1 accounting for wraparound
209 	 * this will essentially free up all descriptors between
210 	 * to_clean_index and fw_ack_index, both inclusive
211 	 */
212 	wq->to_clean_index =
213 		(fnic->fw_ack_index[hwq] + 1) % wq->ring.desc_count;
214 
215 	/* we have processed the acks received so far */
216 	fnic->fw_ack_recd[hwq] = 0;
217 	return 0;
218 }
219 
220 
221 /*
222  * __fnic_set_state_flags
223  * Sets/Clears bits in fnic's state_flags
224  **/
225 void
__fnic_set_state_flags(struct fnic * fnic,unsigned long st_flags,unsigned long clearbits)226 __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
227 			unsigned long clearbits)
228 {
229 	unsigned long flags = 0;
230 
231 	spin_lock_irqsave(&fnic->fnic_lock, flags);
232 
233 	if (clearbits)
234 		fnic->state_flags &= ~st_flags;
235 	else
236 		fnic->state_flags |= st_flags;
237 
238 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
239 
240 	return;
241 }
242 
243 
244 /*
245  * fnic_fw_reset_handler
246  * Routine to send reset msg to fw
247  */
fnic_fw_reset_handler(struct fnic * fnic)248 int fnic_fw_reset_handler(struct fnic *fnic)
249 {
250 	struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
251 	int ret = 0;
252 	unsigned long flags;
253 	unsigned int ioreq_count;
254 
255 	/* indicate fwreset to io path */
256 	fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
257 	ioreq_count = fnic_count_all_ioreqs(fnic);
258 
259 	/* wait for io cmpl */
260 	while (atomic_read(&fnic->in_flight))
261 		schedule_timeout(msecs_to_jiffies(1));
262 
263 	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
264 
265 	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
266 		free_wq_copy_descs(fnic, wq, 0);
267 
268 	if (!vnic_wq_copy_desc_avail(wq))
269 		ret = -EAGAIN;
270 	else {
271 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
272 			  "ioreq_count: %u\n", ioreq_count);
273 		fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
274 		atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
275 		if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
276 			  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
277 			atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
278 				atomic64_read(
279 				  &fnic->fnic_stats.fw_stats.active_fw_reqs));
280 	}
281 
282 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
283 
284 	if (!ret) {
285 		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
286 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
287 				"Issued fw reset\n");
288 	} else {
289 		fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
290 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
291 				"Failed to issue fw reset\n");
292 	}
293 
294 	return ret;
295 }
296 
297 
298 /*
299  * fnic_flogi_reg_handler
300  * Routine to send flogi register msg to fw
301  */
fnic_flogi_reg_handler(struct fnic * fnic,u32 fc_id)302 int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
303 {
304 	struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0];
305 	enum fcpio_flogi_reg_format_type format;
306 	u8 gw_mac[ETH_ALEN];
307 	int ret = 0;
308 	unsigned long flags;
309 	struct fnic_iport_s *iport = &fnic->iport;
310 
311 	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
312 
313 	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
314 		free_wq_copy_descs(fnic, wq, 0);
315 
316 	if (!vnic_wq_copy_desc_avail(wq)) {
317 		ret = -EAGAIN;
318 		goto flogi_reg_ioreq_end;
319 	}
320 
321 	memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN);
322 	format = FCPIO_FLOGI_REG_GW_DEST;
323 
324 	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
325 		fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
326 						fc_id, gw_mac,
327 						fnic->iport.fpma,
328 						iport->r_a_tov, iport->e_d_tov);
329 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
330 			      "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n",
331 				  fc_id, fnic->iport.fpma, gw_mac);
332 	} else {
333 		fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
334 						  format, fc_id, gw_mac);
335 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
336 			"FLOGI reg issued fcid 0x%x dest %p\n",
337 			fc_id, gw_mac);
338 	}
339 
340 	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
341 	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
342 		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
343 		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
344 		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
345 
346 flogi_reg_ioreq_end:
347 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
348 	return ret;
349 }
350 
351 /*
352  * fnic_queue_wq_copy_desc
353  * Routine to enqueue a wq copy desc
354  */
fnic_queue_wq_copy_desc(struct fnic * fnic,struct vnic_wq_copy * wq,struct fnic_io_req * io_req,struct scsi_cmnd * sc,int sg_count,uint32_t mqtag,uint16_t hwq)355 static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
356 					  struct vnic_wq_copy *wq,
357 					  struct fnic_io_req *io_req,
358 					  struct scsi_cmnd *sc,
359 					  int sg_count,
360 					  uint32_t mqtag,
361 					  uint16_t hwq)
362 {
363 	struct scatterlist *sg;
364 	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
365 	struct host_sg_desc *desc;
366 	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
367 	unsigned int i;
368 	int flags;
369 	u8 exch_flags;
370 	struct scsi_lun fc_lun;
371 	struct fnic_tport_s *tport;
372 	struct rport_dd_data_s *rdd_data;
373 
374 	rdd_data = rport->dd_data;
375 	tport = rdd_data->tport;
376 
377 	if (sg_count) {
378 		/* For each SGE, create a device desc entry */
379 		desc = io_req->sgl_list;
380 		for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
381 			desc->addr = cpu_to_le64(sg_dma_address(sg));
382 			desc->len = cpu_to_le32(sg_dma_len(sg));
383 			desc->_resvd = 0;
384 			desc++;
385 		}
386 
387 		io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
388 				io_req->sgl_list,
389 				sizeof(io_req->sgl_list[0]) * sg_count,
390 				DMA_TO_DEVICE);
391 		if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
392 			printk(KERN_ERR "DMA mapping failed\n");
393 			return SCSI_MLQUEUE_HOST_BUSY;
394 		}
395 	}
396 
397 	io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
398 					      sc->sense_buffer,
399 					      SCSI_SENSE_BUFFERSIZE,
400 					      DMA_FROM_DEVICE);
401 	if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
402 		dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
403 				sizeof(io_req->sgl_list[0]) * sg_count,
404 				DMA_TO_DEVICE);
405 		printk(KERN_ERR "DMA mapping failed\n");
406 		return SCSI_MLQUEUE_HOST_BUSY;
407 	}
408 
409 	int_to_scsilun(sc->device->lun, &fc_lun);
410 
411 	/* Enqueue the descriptor in the Copy WQ */
412 	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
413 		free_wq_copy_descs(fnic, wq, hwq);
414 
415 	if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
416 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
417 			  "fnic_queue_wq_copy_desc failure - no descriptors\n");
418 		atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
419 		return SCSI_MLQUEUE_HOST_BUSY;
420 	}
421 
422 	flags = 0;
423 	if (sc->sc_data_direction == DMA_FROM_DEVICE)
424 		flags = FCPIO_ICMND_RDDATA;
425 	else if (sc->sc_data_direction == DMA_TO_DEVICE)
426 		flags = FCPIO_ICMND_WRDATA;
427 
428 	exch_flags = 0;
429 	if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
430 		(tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY))
431 		exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
432 
433 	fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag,
434 					 0, exch_flags, io_req->sgl_cnt,
435 					 SCSI_SENSE_BUFFERSIZE,
436 					 io_req->sgl_list_pa,
437 					 io_req->sense_buf_pa,
438 					 0, /* scsi cmd ref, always 0 */
439 					 FCPIO_ICMND_PTA_SIMPLE,
440 					 	/* scsi pri and tag */
441 					 flags,	/* command flags */
442 					 sc->cmnd, sc->cmd_len,
443 					 scsi_bufflen(sc),
444 					 fc_lun.scsi_lun, io_req->port_id,
445 					 tport->max_payload_size,
446 					 tport->r_a_tov, tport->e_d_tov);
447 
448 	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
449 	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
450 		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
451 		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
452 		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
453 
454 	return 0;
455 }
456 
fnic_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * sc)457 int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
458 {
459 	struct request *const rq = scsi_cmd_to_rq(sc);
460 	uint32_t mqtag = 0;
461 	void (*done)(struct scsi_cmnd *) = scsi_done;
462 	struct fc_rport *rport;
463 	struct fnic_io_req *io_req = NULL;
464 	struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host));
465 	struct fnic_iport_s *iport = NULL;
466 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
467 	struct vnic_wq_copy *wq;
468 	int ret = 1;
469 	u64 cmd_trace;
470 	int sg_count = 0;
471 	unsigned long flags = 0;
472 	unsigned long ptr;
473 	int io_lock_acquired = 0;
474 	uint16_t hwq = 0;
475 	struct fnic_tport_s *tport = NULL;
476 	struct rport_dd_data_s *rdd_data;
477 	uint16_t lun0_delay = 0;
478 
479 	rport = starget_to_rport(scsi_target(sc->device));
480 	if (!rport) {
481 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
482 				"returning DID_NO_CONNECT for IO as rport is NULL\n");
483 		sc->result = DID_NO_CONNECT << 16;
484 		done(sc);
485 		return 0;
486 	}
487 
488 	ret = fc_remote_port_chkready(rport);
489 	if (ret) {
490 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
491 				"rport is not ready\n");
492 		atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
493 		sc->result = ret;
494 		done(sc);
495 		return 0;
496 	}
497 
498 	mqtag = blk_mq_unique_tag(rq);
499 	spin_lock_irqsave(&fnic->fnic_lock, flags);
500 	iport = &fnic->iport;
501 
502 	if (iport->state != FNIC_IPORT_STATE_READY) {
503 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
504 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
505 					  "returning DID_NO_CONNECT for IO as iport state: %d\n",
506 					  iport->state);
507 		sc->result = DID_NO_CONNECT << 16;
508 		done(sc);
509 		return 0;
510 	}
511 
512 	/* fc_remote_port_add() may have added the tport to
513 	 * fc_transport but dd_data not yet set
514 	 */
515 	rdd_data = rport->dd_data;
516 	tport = rdd_data->tport;
517 	if (!tport || (rdd_data->iport != iport)) {
518 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
519 					  "dd_data not yet set in SCSI for rport portid: 0x%x\n",
520 					  rport->port_id);
521 		tport = fnic_find_tport_by_fcid(iport, rport->port_id);
522 		if (!tport) {
523 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
524 			FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
525 						  "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n",
526 						  rport->port_id);
527 			sc->result = DID_BUS_BUSY << 16;
528 			done(sc);
529 			return 0;
530 		}
531 
532 		/* Re-assign same params as in fnic_fdls_add_tport */
533 		rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN;
534 		rport->supported_classes =
535 			FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET;
536 		/* the dd_data is allocated by fctransport of size dd_fcrport_size */
537 		rdd_data = rport->dd_data;
538 		rdd_data->tport = tport;
539 		rdd_data->iport = iport;
540 		tport->rport = rport;
541 		tport->flags |= FNIC_FDLS_SCSI_REGISTERED;
542 	}
543 
544 	if ((tport->state != FDLS_TGT_STATE_READY)
545 		&& (tport->state != FDLS_TGT_STATE_ADISC)) {
546 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
547 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
548 					  "returning DID_NO_CONNECT for IO as tport state: %d\n",
549 					  tport->state);
550 		sc->result = DID_NO_CONNECT << 16;
551 		done(sc);
552 		return 0;
553 	}
554 
555 	atomic_inc(&fnic->in_flight);
556 	atomic_inc(&tport->in_flight);
557 
558 	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
559 		atomic_dec(&fnic->in_flight);
560 		atomic_dec(&tport->in_flight);
561 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
562 		return SCSI_MLQUEUE_HOST_BUSY;
563 	}
564 
565 	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) {
566 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
567 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
568 		  "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n",
569 		  fnic->state_flags);
570 		return SCSI_MLQUEUE_HOST_BUSY;
571 	}
572 
573 	if (!tport->lun0_delay) {
574 		lun0_delay = 1;
575 		tport->lun0_delay++;
576 	}
577 
578 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
579 
580 	fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
581 	fnic_priv(sc)->flags = FNIC_NO_FLAGS;
582 
583 	/* Get a new io_req for this SCSI IO */
584 	io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
585 	if (!io_req) {
586 		atomic64_inc(&fnic_stats->io_stats.alloc_failures);
587 		ret = SCSI_MLQUEUE_HOST_BUSY;
588 		goto out;
589 	}
590 	memset(io_req, 0, sizeof(*io_req));
591 
592 	/* Map the data buffer */
593 	sg_count = scsi_dma_map(sc);
594 	if (sg_count < 0) {
595 		FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
596 			  mqtag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
597 		mempool_free(io_req, fnic->io_req_pool);
598 		goto out;
599 	}
600 
601 	io_req->tport = tport;
602 	/* Determine the type of scatter/gather list we need */
603 	io_req->sgl_cnt = sg_count;
604 	io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
605 	if (sg_count > FNIC_DFLT_SG_DESC_CNT)
606 		io_req->sgl_type = FNIC_SGL_CACHE_MAX;
607 
608 	if (sg_count) {
609 		io_req->sgl_list =
610 			mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
611 				      GFP_ATOMIC);
612 		if (!io_req->sgl_list) {
613 			atomic64_inc(&fnic_stats->io_stats.alloc_failures);
614 			ret = SCSI_MLQUEUE_HOST_BUSY;
615 			scsi_dma_unmap(sc);
616 			mempool_free(io_req, fnic->io_req_pool);
617 			goto out;
618 		}
619 
620 		/* Cache sgl list allocated address before alignment */
621 		io_req->sgl_list_alloc = io_req->sgl_list;
622 		ptr = (unsigned long) io_req->sgl_list;
623 		if (ptr % FNIC_SG_DESC_ALIGN) {
624 			io_req->sgl_list = (struct host_sg_desc *)
625 				(((unsigned long) ptr
626 				  + FNIC_SG_DESC_ALIGN - 1)
627 				 & ~(FNIC_SG_DESC_ALIGN - 1));
628 		}
629 	}
630 
631 	/*
632 	* Will acquire lock before setting to IO initialized.
633 	*/
634 	hwq = blk_mq_unique_tag_to_hwq(mqtag);
635 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
636 
637 	/* initialize rest of io_req */
638 	io_lock_acquired = 1;
639 	io_req->port_id = rport->port_id;
640 	io_req->start_time = jiffies;
641 	fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
642 	fnic_priv(sc)->io_req = io_req;
643 	fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
644 	io_req->sc = sc;
645 
646 	if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) {
647 		WARN(1, "fnic<%d>: %s: hwq: %d tag 0x%x already exists\n",
648 				fnic->fnic_num, __func__, hwq, blk_mq_unique_tag_to_tag(mqtag));
649 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
650 		return SCSI_MLQUEUE_HOST_BUSY;
651 	}
652 
653 	fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = io_req;
654 	io_req->tag = mqtag;
655 
656 	/* create copy wq desc and enqueue it */
657 	wq = &fnic->hw_copy_wq[hwq];
658 	atomic64_inc(&fnic_stats->io_stats.ios[hwq]);
659 	ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count, mqtag, hwq);
660 	if (ret) {
661 		/*
662 		 * In case another thread cancelled the request,
663 		 * refetch the pointer under the lock.
664 		 */
665 		FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
666 			  mqtag, sc, 0, 0, 0, fnic_flags_and_state(sc));
667 		io_req = fnic_priv(sc)->io_req;
668 		fnic_priv(sc)->io_req = NULL;
669 		if (io_req)
670 			fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL;
671 		fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
672 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
673 		if (io_req) {
674 			fnic_release_ioreq_buf(fnic, io_req, sc);
675 			mempool_free(io_req, fnic->io_req_pool);
676 		}
677 		atomic_dec(&fnic->in_flight);
678 		atomic_dec(&tport->in_flight);
679 		return ret;
680 	} else {
681 		atomic64_inc(&fnic_stats->io_stats.active_ios);
682 		atomic64_inc(&fnic_stats->io_stats.num_ios);
683 		if (atomic64_read(&fnic_stats->io_stats.active_ios) >
684 			  atomic64_read(&fnic_stats->io_stats.max_active_ios))
685 			atomic64_set(&fnic_stats->io_stats.max_active_ios,
686 			     atomic64_read(&fnic_stats->io_stats.active_ios));
687 
688 		/* REVISIT: Use per IO lock in the final code */
689 		fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
690 	}
691 out:
692 	cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
693 			(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
694 			(u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
695 			sc->cmnd[5]);
696 
697 	FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
698 		   mqtag, sc, io_req, sg_count, cmd_trace,
699 		   fnic_flags_and_state(sc));
700 
701 	/* if only we issued IO, will we have the io lock */
702 	if (io_lock_acquired)
703 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
704 
705 	atomic_dec(&fnic->in_flight);
706 	atomic_dec(&tport->in_flight);
707 
708 	if (lun0_delay) {
709 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
710 					  "LUN0 delay\n");
711 		mdelay(LUN0_DELAY_TIME);
712 	}
713 
714 	return ret;
715 }
716 
717 
718 /*
719  * fnic_fcpio_fw_reset_cmpl_handler
720  * Routine to handle fw reset completion
721  */
fnic_fcpio_fw_reset_cmpl_handler(struct fnic * fnic,struct fcpio_fw_req * desc)722 static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
723 					    struct fcpio_fw_req *desc)
724 {
725 	u8 type;
726 	u8 hdr_status;
727 	struct fcpio_tag tag;
728 	int ret = 0;
729 	unsigned long flags;
730 	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
731 
732 	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
733 
734 	atomic64_inc(&reset_stats->fw_reset_completions);
735 
736 	/* Clean up all outstanding io requests */
737 	fnic_cleanup_io(fnic, SCSI_NO_TAG);
738 
739 	atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
740 	atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
741 	atomic64_set(&fnic->io_cmpl_skip, 0);
742 
743 	spin_lock_irqsave(&fnic->fnic_lock, flags);
744 
745 	/* fnic should be in FC_TRANS_ETH_MODE */
746 	if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
747 		/* Check status of reset completion */
748 		if (!hdr_status) {
749 			FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
750 					"reset cmpl success\n");
751 			/* Ready to send flogi out */
752 			fnic->state = FNIC_IN_ETH_MODE;
753 		} else {
754 			FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
755 				"reset failed with header status: %s\n",
756 				fnic_fcpio_status_to_str(hdr_status));
757 
758 			fnic->state = FNIC_IN_FC_MODE;
759 			atomic64_inc(&reset_stats->fw_reset_failures);
760 			ret = -1;
761 		}
762 	} else {
763 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
764 			"Unexpected state while processing reset completion: %s\n",
765 			fnic_state_to_str(fnic->state));
766 		atomic64_inc(&reset_stats->fw_reset_failures);
767 		ret = -1;
768 	}
769 
770 	if (fnic->fw_reset_done)
771 		complete(fnic->fw_reset_done);
772 
773 	/*
774 	 * If fnic is being removed, or fw reset failed
775 	 * free the flogi frame. Else, send it out
776 	 */
777 	if (ret) {
778 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
779 		fnic_free_txq(&fnic->tx_queue);
780 		goto reset_cmpl_handler_end;
781 	}
782 
783 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
784 
785 	queue_work(fnic_event_queue, &fnic->flush_work);
786 
787  reset_cmpl_handler_end:
788 	fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
789 
790 	return ret;
791 }
792 
793 /*
794  * fnic_fcpio_flogi_reg_cmpl_handler
795  * Routine to handle flogi register completion
796  */
fnic_fcpio_flogi_reg_cmpl_handler(struct fnic * fnic,struct fcpio_fw_req * desc)797 static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
798 					     struct fcpio_fw_req *desc)
799 {
800 	u8 type;
801 	u8 hdr_status;
802 	struct fcpio_tag tag;
803 	int ret = 0;
804 	unsigned long flags;
805 
806 	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
807 
808 	/* Update fnic state based on status of flogi reg completion */
809 	spin_lock_irqsave(&fnic->fnic_lock, flags);
810 
811 	if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
812 
813 		/* Check flogi registration completion status */
814 		if (!hdr_status) {
815 			FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
816 				      "FLOGI reg succeeded\n");
817 			fnic->state = FNIC_IN_FC_MODE;
818 		} else {
819 			FNIC_SCSI_DBG(KERN_DEBUG,
820 				      fnic->host, fnic->fnic_num,
821 				      "fnic flogi reg failed: %s\n",
822 				      fnic_fcpio_status_to_str(hdr_status));
823 			fnic->state = FNIC_IN_ETH_MODE;
824 			ret = -1;
825 		}
826 	} else {
827 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
828 			      "Unexpected fnic state %s while"
829 			      " processing flogi reg completion\n",
830 			      fnic_state_to_str(fnic->state));
831 		ret = -1;
832 	}
833 
834 	if (!ret) {
835 		if (fnic->stop_rx_link_events) {
836 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
837 			goto reg_cmpl_handler_end;
838 		}
839 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
840 
841 		queue_work(fnic_event_queue, &fnic->flush_work);
842 		queue_work(fnic_event_queue, &fnic->frame_work);
843 	} else {
844 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
845 	}
846 
847 reg_cmpl_handler_end:
848 	return ret;
849 }
850 
is_ack_index_in_range(struct vnic_wq_copy * wq,u16 request_out)851 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
852 					u16 request_out)
853 {
854 	if (wq->to_clean_index <= wq->to_use_index) {
855 		/* out of range, stale request_out index */
856 		if (request_out < wq->to_clean_index ||
857 		    request_out >= wq->to_use_index)
858 			return 0;
859 	} else {
860 		/* out of range, stale request_out index */
861 		if (request_out < wq->to_clean_index &&
862 		    request_out >= wq->to_use_index)
863 			return 0;
864 	}
865 	/* request_out index is in range */
866 	return 1;
867 }
868 
869 
870 /*
871  * Mark that ack received and store the Ack index. If there are multiple
872  * acks received before Tx thread cleans it up, the latest value will be
873  * used which is correct behavior. This state should be in the copy Wq
874  * instead of in the fnic
875  */
fnic_fcpio_ack_handler(struct fnic * fnic,unsigned int cq_index,struct fcpio_fw_req * desc)876 static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
877 					  unsigned int cq_index,
878 					  struct fcpio_fw_req *desc)
879 {
880 	struct vnic_wq_copy *wq;
881 	u16 request_out = desc->u.ack.request_out;
882 	unsigned long flags;
883 	u64 *ox_id_tag = (u64 *)(void *)desc;
884 	unsigned int wq_index = cq_index;
885 
886 	/* mark the ack state */
887 	wq = &fnic->hw_copy_wq[cq_index];
888 	spin_lock_irqsave(&fnic->wq_copy_lock[wq_index], flags);
889 
890 	fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
891 	if (is_ack_index_in_range(wq, request_out)) {
892 		fnic->fw_ack_index[wq_index] = request_out;
893 		fnic->fw_ack_recd[wq_index] = 1;
894 	} else
895 		atomic64_inc(
896 			&fnic->fnic_stats.misc_stats.ack_index_out_of_range);
897 
898 	spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags);
899 	FNIC_TRACE(fnic_fcpio_ack_handler,
900 		  fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
901 		  ox_id_tag[4], ox_id_tag[5]);
902 }
903 
904 /*
905  * fnic_fcpio_icmnd_cmpl_handler
906  * Routine to handle icmnd completions
907  */
fnic_fcpio_icmnd_cmpl_handler(struct fnic * fnic,unsigned int cq_index,struct fcpio_fw_req * desc)908 static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_index,
909 					 struct fcpio_fw_req *desc)
910 {
911 	u8 type;
912 	u8 hdr_status;
913 	struct fcpio_tag ftag;
914 	u32 id;
915 	u64 xfer_len = 0;
916 	struct fcpio_icmnd_cmpl *icmnd_cmpl;
917 	struct fnic_io_req *io_req;
918 	struct scsi_cmnd *sc;
919 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
920 	unsigned long flags;
921 	u64 cmd_trace;
922 	unsigned long start_time;
923 	unsigned long io_duration_time;
924 	unsigned int hwq = 0;
925 	unsigned int mqtag = 0;
926 	unsigned int tag = 0;
927 
928 	/* Decode the cmpl description to get the io_req id */
929 	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
930 	fcpio_tag_id_dec(&ftag, &id);
931 	icmnd_cmpl = &desc->u.icmnd_cmpl;
932 
933 	mqtag = id;
934 	tag = blk_mq_unique_tag_to_tag(mqtag);
935 	hwq = blk_mq_unique_tag_to_hwq(mqtag);
936 
937 	if (hwq != cq_index) {
938 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
939 			"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
940 			hwq, mqtag, tag, cq_index);
941 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
942 			"hdr status: %s icmnd completion on the wrong queue\n",
943 			fnic_fcpio_status_to_str(hdr_status));
944 	}
945 
946 	if (tag >= fnic->fnic_max_tag_id) {
947 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
948 			"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
949 			hwq, mqtag, tag, cq_index);
950 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
951 			"hdr status: %s Out of range tag\n",
952 			fnic_fcpio_status_to_str(hdr_status));
953 		return;
954 	}
955 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
956 
957 	sc = scsi_host_find_tag(fnic->host, id);
958 	WARN_ON_ONCE(!sc);
959 	if (!sc) {
960 		atomic64_inc(&fnic_stats->io_stats.sc_null);
961 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
962 		shost_printk(KERN_ERR, fnic->host,
963 			  "icmnd_cmpl sc is null - "
964 			  "hdr status = %s tag = 0x%x desc = 0x%p\n",
965 			  fnic_fcpio_status_to_str(hdr_status), id, desc);
966 		FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
967 			  fnic->host->host_no, id,
968 			  ((u64)icmnd_cmpl->_resvd0[1] << 16 |
969 			  (u64)icmnd_cmpl->_resvd0[0]),
970 			  ((u64)hdr_status << 16 |
971 			  (u64)icmnd_cmpl->scsi_status << 8 |
972 			  (u64)icmnd_cmpl->flags), desc,
973 			  (u64)icmnd_cmpl->residual, 0);
974 		return;
975 	}
976 
977 	io_req = fnic_priv(sc)->io_req;
978 	if (fnic->sw_copy_wq[hwq].io_req_table[tag] != io_req) {
979 		WARN(1, "%s: %d: hwq: %d mqtag: 0x%x tag: 0x%x io_req tag mismatch\n",
980 			__func__, __LINE__, hwq, mqtag, tag);
981 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
982 		return;
983 	}
984 
985 	WARN_ON_ONCE(!io_req);
986 	if (!io_req) {
987 		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
988 		fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
989 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
990 		shost_printk(KERN_ERR, fnic->host,
991 			  "icmnd_cmpl io_req is null - "
992 			  "hdr status = %s tag = 0x%x sc 0x%p\n",
993 			  fnic_fcpio_status_to_str(hdr_status), id, sc);
994 		return;
995 	}
996 	start_time = io_req->start_time;
997 
998 	/* firmware completed the io */
999 	io_req->io_completed = 1;
1000 
1001 	/*
1002 	 *  if SCSI-ML has already issued abort on this command,
1003 	 *  set completion of the IO. The abts path will clean it up
1004 	 */
1005 	if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1006 
1007 		/*
1008 		 * set the FNIC_IO_DONE so that this doesn't get
1009 		 * flagged as 'out of order' if it was not aborted
1010 		 */
1011 		fnic_priv(sc)->flags |= FNIC_IO_DONE;
1012 		fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
1013 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1014 		if(FCPIO_ABORTED == hdr_status)
1015 			fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
1016 
1017 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1018 			"icmnd_cmpl abts pending "
1019 			  "hdr status = %s tag = 0x%x sc = 0x%p "
1020 			  "scsi_status = %x residual = %d\n",
1021 			  fnic_fcpio_status_to_str(hdr_status),
1022 			  id, sc,
1023 			  icmnd_cmpl->scsi_status,
1024 			  icmnd_cmpl->residual);
1025 		return;
1026 	}
1027 
1028 	/* Mark the IO as complete */
1029 	fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
1030 
1031 	icmnd_cmpl = &desc->u.icmnd_cmpl;
1032 
1033 	switch (hdr_status) {
1034 	case FCPIO_SUCCESS:
1035 		sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
1036 		xfer_len = scsi_bufflen(sc);
1037 
1038 		if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) {
1039 			xfer_len -= icmnd_cmpl->residual;
1040 			scsi_set_resid(sc, icmnd_cmpl->residual);
1041 		}
1042 
1043 		if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
1044 			atomic64_inc(&fnic_stats->misc_stats.check_condition);
1045 
1046 		if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
1047 			atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
1048 
1049 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1050 				"xfer_len: %llu", xfer_len);
1051 		break;
1052 
1053 	case FCPIO_TIMEOUT:          /* request was timed out */
1054 		atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
1055 		sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
1056 		break;
1057 
1058 	case FCPIO_ABORTED:          /* request was aborted */
1059 		atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
1060 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1061 		break;
1062 
1063 	case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
1064 		atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
1065 		scsi_set_resid(sc, icmnd_cmpl->residual);
1066 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1067 		break;
1068 
1069 	case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
1070 		atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
1071 		sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
1072 		break;
1073 
1074 	case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
1075 		atomic64_inc(&fnic_stats->io_stats.io_not_found);
1076 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1077 		break;
1078 
1079 	case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
1080 		atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
1081 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1082 		break;
1083 
1084 	case FCPIO_FW_ERR:           /* request was terminated due fw error */
1085 		atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
1086 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1087 		break;
1088 
1089 	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
1090 		atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
1091 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1092 		break;
1093 
1094 	case FCPIO_INVALID_HEADER:   /* header contains invalid data */
1095 	case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
1096 	case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
1097 	default:
1098 		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
1099 		break;
1100 	}
1101 
1102 	/* Break link with the SCSI command */
1103 	fnic_priv(sc)->io_req = NULL;
1104 	io_req->sc = NULL;
1105 	fnic_priv(sc)->flags |= FNIC_IO_DONE;
1106 	fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
1107 
1108 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1109 
1110 	if (hdr_status != FCPIO_SUCCESS) {
1111 		atomic64_inc(&fnic_stats->io_stats.io_failures);
1112 		shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n",
1113 			     fnic_fcpio_status_to_str(hdr_status));
1114 	}
1115 
1116 	fnic_release_ioreq_buf(fnic, io_req, sc);
1117 
1118 	cmd_trace = ((u64)hdr_status << 56) |
1119 		  (u64)icmnd_cmpl->scsi_status << 48 |
1120 		  (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
1121 		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1122 		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
1123 
1124 	FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
1125 		  sc->device->host->host_no, id, sc,
1126 		  ((u64)icmnd_cmpl->_resvd0[1] << 56 |
1127 		  (u64)icmnd_cmpl->_resvd0[0] << 48 |
1128 		  jiffies_to_msecs(jiffies - start_time)),
1129 		  desc, cmd_trace, fnic_flags_and_state(sc));
1130 
1131 	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1132 		fnic_stats->host_stats.fcp_input_requests++;
1133 		fnic->fcp_input_bytes += xfer_len;
1134 	} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
1135 		fnic_stats->host_stats.fcp_output_requests++;
1136 		fnic->fcp_output_bytes += xfer_len;
1137 	} else
1138 		fnic_stats->host_stats.fcp_control_requests++;
1139 
1140 	/* Call SCSI completion function to complete the IO */
1141 	scsi_done(sc);
1142 
1143 	mempool_free(io_req, fnic->io_req_pool);
1144 
1145 	atomic64_dec(&fnic_stats->io_stats.active_ios);
1146 	if (atomic64_read(&fnic->io_cmpl_skip))
1147 		atomic64_dec(&fnic->io_cmpl_skip);
1148 	else
1149 		atomic64_inc(&fnic_stats->io_stats.io_completions);
1150 
1151 
1152 	io_duration_time = jiffies_to_msecs(jiffies) -
1153 						jiffies_to_msecs(start_time);
1154 
1155 	if(io_duration_time <= 10)
1156 		atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
1157 	else if(io_duration_time <= 100)
1158 		atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
1159 	else if(io_duration_time <= 500)
1160 		atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
1161 	else if(io_duration_time <= 5000)
1162 		atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
1163 	else if(io_duration_time <= 10000)
1164 		atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
1165 	else if(io_duration_time <= 30000)
1166 		atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
1167 	else {
1168 		atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
1169 
1170 		if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
1171 			atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
1172 	}
1173 }
1174 
1175 /* fnic_fcpio_itmf_cmpl_handler
1176  * Routine to handle itmf completions
1177  */
fnic_fcpio_itmf_cmpl_handler(struct fnic * fnic,unsigned int cq_index,struct fcpio_fw_req * desc)1178 static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_index,
1179 					struct fcpio_fw_req *desc)
1180 {
1181 	u8 type;
1182 	u8 hdr_status;
1183 	struct fcpio_tag ftag;
1184 	u32 id;
1185 	struct scsi_cmnd *sc = NULL;
1186 	struct fnic_io_req *io_req;
1187 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1188 	struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
1189 	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1190 	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1191 	unsigned long flags;
1192 	unsigned long start_time;
1193 	unsigned int hwq = cq_index;
1194 	unsigned int mqtag;
1195 	unsigned int tag;
1196 
1197 	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
1198 	fcpio_tag_id_dec(&ftag, &id);
1199 
1200 	mqtag = id & FNIC_TAG_MASK;
1201 	tag = blk_mq_unique_tag_to_tag(id & FNIC_TAG_MASK);
1202 	hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK);
1203 
1204 	if (hwq != cq_index) {
1205 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1206 			"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
1207 			hwq, mqtag, tag, cq_index);
1208 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1209 			"hdr status: %s ITMF completion on the wrong queue\n",
1210 			fnic_fcpio_status_to_str(hdr_status));
1211 	}
1212 
1213 	if (tag > fnic->fnic_max_tag_id) {
1214 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1215 			"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
1216 			hwq, mqtag, tag, cq_index);
1217 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1218 			"hdr status: %s Tag out of range\n",
1219 			fnic_fcpio_status_to_str(hdr_status));
1220 		return;
1221 	}  else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) {
1222 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1223 			"hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ",
1224 			hwq, mqtag, tag, cq_index);
1225 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1226 			"hdr status: %s Tag out of range\n",
1227 			fnic_fcpio_status_to_str(hdr_status));
1228 		return;
1229 	}
1230 
1231 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1232 
1233 	/* If it is sg3utils allocated SC then tag_id
1234 	 * is max_tag_id and SC is retrieved from io_req
1235 	 */
1236 	if ((mqtag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) {
1237 		io_req = fnic->sw_copy_wq[hwq].io_req_table[tag];
1238 		if (io_req)
1239 			sc = io_req->sc;
1240 	} else {
1241 		sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK);
1242 	}
1243 
1244 	WARN_ON_ONCE(!sc);
1245 	if (!sc) {
1246 		atomic64_inc(&fnic_stats->io_stats.sc_null);
1247 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1248 		shost_printk(KERN_ERR, fnic->host,
1249 			  "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
1250 			  fnic_fcpio_status_to_str(hdr_status), tag);
1251 		return;
1252 	}
1253 
1254 	io_req = fnic_priv(sc)->io_req;
1255 	WARN_ON_ONCE(!io_req);
1256 	if (!io_req) {
1257 		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1258 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1259 		fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
1260 		shost_printk(KERN_ERR, fnic->host,
1261 			  "itmf_cmpl io_req is null - "
1262 			  "hdr status = %s tag = 0x%x sc 0x%p\n",
1263 			  fnic_fcpio_status_to_str(hdr_status), tag, sc);
1264 		return;
1265 	}
1266 	start_time = io_req->start_time;
1267 
1268 	if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
1269 		/* Abort and terminate completion of device reset req */
1270 		/* REVISIT : Add asserts about various flags */
1271 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1272 			"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n",
1273 			hwq, mqtag, tag,
1274 			fnic_fcpio_status_to_str(hdr_status));
1275 		fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
1276 		fnic_priv(sc)->abts_status = hdr_status;
1277 		fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1278 		if (io_req->abts_done)
1279 			complete(io_req->abts_done);
1280 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1281 	} else if (id & FNIC_TAG_ABORT) {
1282 		/* Completion of abort cmd */
1283 		shost_printk(KERN_DEBUG, fnic->host,
1284 			"hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n",
1285 			hwq, mqtag, tag,
1286 			fnic_fcpio_status_to_str(hdr_status));
1287 		switch (hdr_status) {
1288 		case FCPIO_SUCCESS:
1289 			break;
1290 		case FCPIO_TIMEOUT:
1291 			if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1292 				atomic64_inc(&abts_stats->abort_fw_timeouts);
1293 			else
1294 				atomic64_inc(
1295 					&term_stats->terminate_fw_timeouts);
1296 			break;
1297 		case FCPIO_ITMF_REJECTED:
1298 			FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1299 				"abort reject recd. id %d\n",
1300 				(int)(id & FNIC_TAG_MASK));
1301 			break;
1302 		case FCPIO_IO_NOT_FOUND:
1303 			if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1304 				atomic64_inc(&abts_stats->abort_io_not_found);
1305 			else
1306 				atomic64_inc(
1307 					&term_stats->terminate_io_not_found);
1308 			break;
1309 		default:
1310 			if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
1311 				atomic64_inc(&abts_stats->abort_failures);
1312 			else
1313 				atomic64_inc(
1314 					&term_stats->terminate_failures);
1315 			break;
1316 		}
1317 		if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
1318 			/* This is a late completion. Ignore it */
1319 			spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1320 			return;
1321 		}
1322 
1323 		fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
1324 		fnic_priv(sc)->abts_status = hdr_status;
1325 
1326 		/* If the status is IO not found consider it as success */
1327 		if (hdr_status == FCPIO_IO_NOT_FOUND)
1328 			fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
1329 
1330 		if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
1331 			atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
1332 
1333 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1334 			      "abts cmpl recd. id %d status %s\n",
1335 			      (int)(id & FNIC_TAG_MASK),
1336 			      fnic_fcpio_status_to_str(hdr_status));
1337 
1338 		/*
1339 		 * If scsi_eh thread is blocked waiting for abts to complete,
1340 		 * signal completion to it. IO will be cleaned in the thread
1341 		 * else clean it in this context
1342 		 */
1343 		if (io_req->abts_done) {
1344 			complete(io_req->abts_done);
1345 			spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1346 			shost_printk(KERN_INFO, fnic->host,
1347 					"hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n",
1348 					hwq, mqtag, tag);
1349 		} else {
1350 			FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1351 				"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n",
1352 				hwq, mqtag,
1353 				tag, fnic_fcpio_status_to_str(hdr_status));
1354 			fnic_priv(sc)->io_req = NULL;
1355 			sc->result = (DID_ERROR << 16);
1356 			fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
1357 			spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1358 
1359 			fnic_release_ioreq_buf(fnic, io_req, sc);
1360 			mempool_free(io_req, fnic->io_req_pool);
1361 			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1362 				   sc->device->host->host_no, id,
1363 				   sc,
1364 				   jiffies_to_msecs(jiffies - start_time),
1365 				   desc,
1366 				   (((u64)hdr_status << 40) |
1367 				    (u64)sc->cmnd[0] << 32 |
1368 				    (u64)sc->cmnd[2] << 24 |
1369 				    (u64)sc->cmnd[3] << 16 |
1370 				    (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1371 				   fnic_flags_and_state(sc));
1372 			scsi_done(sc);
1373 			atomic64_dec(&fnic_stats->io_stats.active_ios);
1374 			if (atomic64_read(&fnic->io_cmpl_skip))
1375 				atomic64_dec(&fnic->io_cmpl_skip);
1376 			else
1377 				atomic64_inc(&fnic_stats->io_stats.io_completions);
1378 		}
1379 	} else if (id & FNIC_TAG_DEV_RST) {
1380 		/* Completion of device reset */
1381 		shost_printk(KERN_INFO, fnic->host,
1382 			"hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n",
1383 			hwq, mqtag,
1384 			tag, fnic_fcpio_status_to_str(hdr_status));
1385 		fnic_priv(sc)->lr_status = hdr_status;
1386 		if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1387 			spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1388 			fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
1389 			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1390 				  sc->device->host->host_no, id, sc,
1391 				  jiffies_to_msecs(jiffies - start_time),
1392 				  desc, 0, fnic_flags_and_state(sc));
1393 			FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1394 				"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n",
1395 				hwq, mqtag,
1396 				tag, fnic_fcpio_status_to_str(hdr_status));
1397 			return;
1398 		}
1399 		if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
1400 			/* Need to wait for terminate completion */
1401 			spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1402 			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
1403 				  sc->device->host->host_no, id, sc,
1404 				  jiffies_to_msecs(jiffies - start_time),
1405 				  desc, 0, fnic_flags_and_state(sc));
1406 			FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1407 				"dev reset cmpl recd after time out. "
1408 				"id %d status %s\n",
1409 				(int)(id & FNIC_TAG_MASK),
1410 				fnic_fcpio_status_to_str(hdr_status));
1411 			return;
1412 		}
1413 		fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
1414 		fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1415 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
1416 			"hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n",
1417 			hwq, mqtag,
1418 			tag, fnic_fcpio_status_to_str(hdr_status));
1419 		if (io_req->dr_done)
1420 			complete(io_req->dr_done);
1421 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1422 
1423 	} else {
1424 		shost_printk(KERN_ERR, fnic->host,
1425 			"%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n",
1426 			__func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
1427 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1428 	}
1429 
1430 }
1431 
1432 /*
1433  * fnic_fcpio_cmpl_handler
1434  * Routine to service the cq for wq_copy
1435  */
fnic_fcpio_cmpl_handler(struct vnic_dev * vdev,unsigned int cq_index,struct fcpio_fw_req * desc)1436 static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
1437 				   unsigned int cq_index,
1438 				   struct fcpio_fw_req *desc)
1439 {
1440 	struct fnic *fnic = vnic_dev_priv(vdev);
1441 
1442 	switch (desc->hdr.type) {
1443 	case FCPIO_ICMND_CMPL: /* fw completed a command */
1444 	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1445 	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1446 	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1447 	case FCPIO_RESET_CMPL: /* fw completed reset */
1448 		atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1449 		break;
1450 	default:
1451 		break;
1452 	}
1453 
1454 	cq_index -= fnic->copy_wq_base;
1455 
1456 	switch (desc->hdr.type) {
1457 	case FCPIO_ACK: /* fw copied copy wq desc to its queue */
1458 		fnic_fcpio_ack_handler(fnic, cq_index, desc);
1459 		break;
1460 
1461 	case FCPIO_ICMND_CMPL: /* fw completed a command */
1462 		fnic_fcpio_icmnd_cmpl_handler(fnic, cq_index, desc);
1463 		break;
1464 
1465 	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
1466 		fnic_fcpio_itmf_cmpl_handler(fnic, cq_index, desc);
1467 		break;
1468 
1469 	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1470 	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1471 		fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1472 		break;
1473 
1474 	case FCPIO_RESET_CMPL: /* fw completed reset */
1475 		fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1476 		break;
1477 
1478 	default:
1479 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1480 			      "firmware completion type %d\n",
1481 			      desc->hdr.type);
1482 		break;
1483 	}
1484 
1485 	return 0;
1486 }
1487 
1488 /*
1489  * fnic_wq_copy_cmpl_handler
1490  * Routine to process wq copy
1491  */
fnic_wq_copy_cmpl_handler(struct fnic * fnic,int copy_work_to_do,unsigned int cq_index)1492 int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index)
1493 {
1494 	unsigned int cur_work_done;
1495 	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1496 	u64 start_jiffies = 0;
1497 	u64 end_jiffies = 0;
1498 	u64 delta_jiffies = 0;
1499 	u64 delta_ms = 0;
1500 
1501 	start_jiffies = jiffies;
1502 	cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
1503 					fnic_fcpio_cmpl_handler,
1504 					copy_work_to_do);
1505 	end_jiffies = jiffies;
1506 	delta_jiffies = end_jiffies - start_jiffies;
1507 	if (delta_jiffies > (u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
1508 		atomic64_set(&misc_stats->max_isr_jiffies, delta_jiffies);
1509 		delta_ms = jiffies_to_msecs(delta_jiffies);
1510 		atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
1511 		atomic64_set(&misc_stats->corr_work_done, cur_work_done);
1512 	}
1513 
1514 	return cur_work_done;
1515 }
1516 
fnic_cleanup_io_iter(struct scsi_cmnd * sc,void * data)1517 static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
1518 {
1519 	struct request *const rq = scsi_cmd_to_rq(sc);
1520 	struct fnic *fnic = data;
1521 	struct fnic_io_req *io_req;
1522 	unsigned long start_time = 0;
1523 	unsigned long flags;
1524 	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1525 	uint16_t hwq = 0;
1526 	int tag;
1527 	int mqtag;
1528 
1529 	mqtag = blk_mq_unique_tag(rq);
1530 	hwq = blk_mq_unique_tag_to_hwq(mqtag);
1531 	tag = blk_mq_unique_tag_to_tag(mqtag);
1532 
1533 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1534 
1535 	fnic->sw_copy_wq[hwq].io_req_table[tag] = NULL;
1536 
1537 	io_req = fnic_priv(sc)->io_req;
1538 	if (!io_req) {
1539 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1540 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1541 			"hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n",
1542 			hwq, mqtag, tag, fnic_priv(sc)->flags);
1543 		return true;
1544 	}
1545 
1546 	if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
1547 		!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
1548 		/*
1549 		 * We will be here only when FW completes reset
1550 		 * without sending completions for outstanding ios.
1551 		 */
1552 		fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1553 		if (io_req && io_req->dr_done)
1554 			complete(io_req->dr_done);
1555 		else if (io_req && io_req->abts_done)
1556 			complete(io_req->abts_done);
1557 
1558 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1559 		return true;
1560 	} else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
1561 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1562 		return true;
1563 	}
1564 
1565 	fnic_priv(sc)->io_req = NULL;
1566 	io_req->sc = NULL;
1567 	start_time = io_req->start_time;
1568 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1569 
1570 	/*
1571 	 * If there is a scsi_cmnd associated with this io_req, then
1572 	 * free the corresponding state
1573 	 */
1574 	fnic_release_ioreq_buf(fnic, io_req, sc);
1575 	mempool_free(io_req, fnic->io_req_pool);
1576 
1577 	sc->result = DID_TRANSPORT_DISRUPTED << 16;
1578 	FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1579 	"mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
1580 		mqtag, tag, sc, (jiffies - start_time));
1581 
1582 	if (atomic64_read(&fnic->io_cmpl_skip))
1583 		atomic64_dec(&fnic->io_cmpl_skip);
1584 	else
1585 		atomic64_inc(&fnic_stats->io_stats.io_completions);
1586 
1587 	FNIC_TRACE(fnic_cleanup_io,
1588 			   sc->device->host->host_no, tag, sc,
1589 			   jiffies_to_msecs(jiffies - start_time),
1590 			   0, ((u64) sc->cmnd[0] << 32 |
1591 				   (u64) sc->cmnd[2] << 24 |
1592 				   (u64) sc->cmnd[3] << 16 |
1593 				   (u64) sc->cmnd[4] << 8 | sc->cmnd[5]),
1594 			   (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)->
1595 				state));
1596 
1597 	/* Complete the command to SCSI */
1598 	scsi_done(sc);
1599 	return true;
1600 }
1601 
fnic_cleanup_io(struct fnic * fnic,int exclude_id)1602 static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
1603 {
1604 	unsigned int io_count = 0;
1605 	unsigned long flags;
1606 	struct fnic_io_req *io_req = NULL;
1607 	struct scsi_cmnd *sc = NULL;
1608 
1609 	io_count = fnic_count_all_ioreqs(fnic);
1610 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1611 				  "Outstanding ioreq count: %d active io count: %lld Waiting\n",
1612 				  io_count,
1613 				  atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
1614 
1615 	scsi_host_busy_iter(fnic->host,
1616 						fnic_cleanup_io_iter, fnic);
1617 
1618 	/* with sg3utils device reset, SC needs to be retrieved from ioreq */
1619 	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
1620 	io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id];
1621 	if (io_req) {
1622 		sc = io_req->sc;
1623 		if (sc) {
1624 			if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
1625 				&& !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
1626 				fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
1627 				if (io_req && io_req->dr_done)
1628 					complete(io_req->dr_done);
1629 			}
1630 		}
1631 	}
1632 	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
1633 
1634 	while ((io_count = fnic_count_all_ioreqs(fnic))) {
1635 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1636 		  "Outstanding ioreq count: %d active io count: %lld Waiting\n",
1637 		  io_count,
1638 		  atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
1639 
1640 		schedule_timeout(msecs_to_jiffies(100));
1641 	}
1642 }
1643 
fnic_wq_copy_cleanup_handler(struct vnic_wq_copy * wq,struct fcpio_host_req * desc)1644 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
1645 				  struct fcpio_host_req *desc)
1646 {
1647 	u32 id;
1648 	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1649 	struct fnic_io_req *io_req;
1650 	struct scsi_cmnd *sc;
1651 	unsigned long flags;
1652 	unsigned long start_time = 0;
1653 	uint16_t hwq;
1654 
1655 	/* get the tag reference */
1656 	fcpio_tag_id_dec(&desc->hdr.tag, &id);
1657 	id &= FNIC_TAG_MASK;
1658 
1659 	if (id >= fnic->fnic_max_tag_id)
1660 		return;
1661 
1662 	sc = scsi_host_find_tag(fnic->host, id);
1663 	if (!sc)
1664 		return;
1665 
1666 	hwq = blk_mq_unique_tag_to_hwq(id);
1667 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1668 
1669 	/* Get the IO context which this desc refers to */
1670 	io_req = fnic_priv(sc)->io_req;
1671 
1672 	/* fnic interrupts are turned off by now */
1673 
1674 	if (!io_req) {
1675 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1676 		goto wq_copy_cleanup_scsi_cmd;
1677 	}
1678 
1679 	fnic_priv(sc)->io_req = NULL;
1680 	io_req->sc = NULL;
1681 	fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(id)] = NULL;
1682 
1683 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1684 
1685 	start_time = io_req->start_time;
1686 	fnic_release_ioreq_buf(fnic, io_req, sc);
1687 	mempool_free(io_req, fnic->io_req_pool);
1688 
1689 wq_copy_cleanup_scsi_cmd:
1690 	sc->result = DID_NO_CONNECT << 16;
1691 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:"
1692 		      " DID_NO_CONNECT\n");
1693 
1694 	FNIC_TRACE(fnic_wq_copy_cleanup_handler,
1695 		   sc->device->host->host_no, id, sc,
1696 		   jiffies_to_msecs(jiffies - start_time),
1697 		   0, ((u64)sc->cmnd[0] << 32 |
1698 		       (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
1699 		       (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
1700 		   fnic_flags_and_state(sc));
1701 
1702 	scsi_done(sc);
1703 }
1704 
fnic_queue_abort_io_req(struct fnic * fnic,int tag,u32 task_req,u8 * fc_lun,struct fnic_io_req * io_req,unsigned int hwq)1705 static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
1706 					  u32 task_req, u8 *fc_lun,
1707 					  struct fnic_io_req *io_req,
1708 					  unsigned int hwq)
1709 {
1710 	struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq];
1711 	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1712 	unsigned long flags;
1713 	struct fnic_tport_s *tport = io_req->tport;
1714 
1715 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1716 	if (unlikely(fnic_chk_state_flags_locked(fnic,
1717 						FNIC_FLAGS_IO_BLOCKED))) {
1718 		atomic_dec(&fnic->in_flight);
1719 		atomic_dec(&tport->in_flight);
1720 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1721 		return 1;
1722 	} else
1723 		atomic_inc(&fnic->in_flight);
1724 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1725 
1726 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1727 
1728 	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
1729 		free_wq_copy_descs(fnic, wq, hwq);
1730 
1731 	if (!vnic_wq_copy_desc_avail(wq)) {
1732 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1733 		atomic_dec(&fnic->in_flight);
1734 		atomic_dec(&tport->in_flight);
1735 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1736 			"fnic_queue_abort_io_req: failure: no descriptors\n");
1737 		atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
1738 		return 1;
1739 	}
1740 	fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
1741 				     0, task_req, tag, fc_lun, io_req->port_id,
1742 				     fnic->config.ra_tov, fnic->config.ed_tov);
1743 
1744 	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
1745 	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
1746 		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
1747 		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
1748 		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
1749 
1750 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1751 	atomic_dec(&fnic->in_flight);
1752 
1753 	return 0;
1754 }
1755 
1756 struct fnic_rport_abort_io_iter_data {
1757 	struct fnic *fnic;
1758 	u32 port_id;
1759 	int term_cnt;
1760 };
1761 
fnic_rport_abort_io_iter(struct scsi_cmnd * sc,void * data)1762 static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
1763 {
1764 	struct request *const rq = scsi_cmd_to_rq(sc);
1765 	struct fnic_rport_abort_io_iter_data *iter_data = data;
1766 	struct fnic *fnic = iter_data->fnic;
1767 	int abt_tag = 0;
1768 	struct fnic_io_req *io_req;
1769 	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
1770 	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1771 	struct scsi_lun fc_lun;
1772 	enum fnic_ioreq_state old_ioreq_state;
1773 	uint16_t hwq = 0;
1774 	unsigned long flags;
1775 
1776 	abt_tag = blk_mq_unique_tag(rq);
1777 	hwq = blk_mq_unique_tag_to_hwq(abt_tag);
1778 
1779 	if (!sc) {
1780 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1781 					  "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq);
1782 		return true;
1783 	}
1784 
1785 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1786 	io_req = fnic_priv(sc)->io_req;
1787 	if (!io_req || io_req->port_id != iter_data->port_id) {
1788 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1789 		return true;
1790 	}
1791 
1792 	if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
1793 	    !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
1794 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1795 			"hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n",
1796 			hwq, abt_tag, fnic_priv(sc)->flags);
1797 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1798 		return true;
1799 	}
1800 
1801 	/*
1802 	 * Found IO that is still pending with firmware and
1803 	 * belongs to rport that went away
1804 	 */
1805 	if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
1806 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1807 		return true;
1808 	}
1809 
1810 	if (io_req->abts_done) {
1811 		shost_printk(KERN_ERR, fnic->host,
1812 			"fnic_rport_exch_reset: io_req->abts_done is set state is %s\n",
1813 			fnic_ioreq_state_to_str(fnic_priv(sc)->state));
1814 	}
1815 
1816 	if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
1817 		shost_printk(KERN_ERR, fnic->host,
1818 			"rport_exch_reset IO not yet issued %p abt_tag 0x%x",
1819 			sc, abt_tag);
1820 		shost_printk(KERN_ERR, fnic->host,
1821 			"flags %x state %d\n", fnic_priv(sc)->flags,
1822 			fnic_priv(sc)->state);
1823 	}
1824 	old_ioreq_state = fnic_priv(sc)->state;
1825 	fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
1826 	fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
1827 
1828 	if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
1829 		atomic64_inc(&reset_stats->device_reset_terminates);
1830 		abt_tag |= FNIC_TAG_DEV_RST;
1831 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1832 					  "dev reset sc 0x%p\n", sc);
1833 	}
1834 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1835 		      "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc);
1836 	WARN_ON_ONCE(io_req->abts_done);
1837 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1838 		      "fnic_rport_reset_exch: Issuing abts\n");
1839 
1840 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1841 
1842 	/* Queue the abort command to firmware */
1843 	int_to_scsilun(sc->device->lun, &fc_lun);
1844 
1845 	if (fnic_queue_abort_io_req(fnic, abt_tag,
1846 				    FCPIO_ITMF_ABT_TASK_TERM,
1847 				    fc_lun.scsi_lun, io_req, hwq)) {
1848 		/*
1849 		 * Revert the cmd state back to old state, if
1850 		 * it hasn't changed in between. This cmd will get
1851 		 * aborted later by scsi_eh, or cleaned up during
1852 		 * lun reset
1853 		 */
1854 		spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1855 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
1856 			"hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n",
1857 			hwq, abt_tag, fnic_priv(sc)->flags);
1858 		if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
1859 			fnic_priv(sc)->state = old_ioreq_state;
1860 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1861 	} else {
1862 		spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
1863 		if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
1864 			fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
1865 		else
1866 			fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
1867 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
1868 		atomic64_inc(&term_stats->terminates);
1869 		iter_data->term_cnt++;
1870 	}
1871 
1872 	return true;
1873 }
1874 
fnic_rport_exch_reset(struct fnic * fnic,u32 port_id)1875 void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
1876 {
1877 	unsigned int io_count = 0;
1878 	unsigned long flags;
1879 	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
1880 	struct fnic_rport_abort_io_iter_data iter_data = {
1881 		.fnic = fnic,
1882 		.port_id = port_id,
1883 		.term_cnt = 0,
1884 	};
1885 
1886 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1887 				  "fnic rport exchange reset for tport: 0x%06x\n",
1888 				  port_id);
1889 
1890 	if (fnic->in_remove)
1891 		return;
1892 
1893 	io_count = fnic_count_ioreqs(fnic, port_id);
1894 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1895 				  "Starting terminates: rport:0x%x  portid-io-count: %d active-io-count: %lld\n",
1896 				  port_id, io_count,
1897 				  atomic64_read(&fnic->fnic_stats.io_stats.active_ios));
1898 
1899 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1900 	/* Bump in_flight counter to hold off fnic_fw_reset_handler. */
1901 	atomic_inc(&fnic->in_flight);
1902 	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) {
1903 		atomic_dec(&fnic->in_flight);
1904 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1905 		return;
1906 	}
1907 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1908 
1909 	scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter,
1910 			    &iter_data);
1911 
1912 	if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
1913 		atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
1914 
1915 	atomic_dec(&fnic->in_flight);
1916 
1917 	while ((io_count = fnic_count_ioreqs(fnic, port_id)))
1918 		schedule_timeout(msecs_to_jiffies(1000));
1919 
1920 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
1921 				  "rport: 0x%x remaining portid-io-count: %d ",
1922 				  port_id, io_count);
1923 }
1924 
fnic_terminate_rport_io(struct fc_rport * rport)1925 void fnic_terminate_rport_io(struct fc_rport *rport)
1926 {
1927 	struct fnic_tport_s *tport;
1928 	struct rport_dd_data_s *rdd_data;
1929 	struct fnic_iport_s *iport = NULL;
1930 	struct fnic *fnic = NULL;
1931 
1932 	if (!rport) {
1933 		pr_err("rport is NULL\n");
1934 		return;
1935 	}
1936 
1937 	rdd_data = rport->dd_data;
1938 	if (rdd_data) {
1939 		tport = rdd_data->tport;
1940 		if (!tport) {
1941 			pr_err(
1942 			"term rport io called after tport is deleted. Returning 0x%8x\n",
1943 		   rport->port_id);
1944 		} else {
1945 			pr_err(
1946 			   "term rport io called after tport is set 0x%8x\n",
1947 			   rport->port_id);
1948 			pr_err(
1949 			   "tport maybe rediscovered\n");
1950 
1951 			iport = (struct fnic_iport_s *) tport->iport;
1952 			fnic = iport->fnic;
1953 			fnic_rport_exch_reset(fnic, rport->port_id);
1954 		}
1955 	}
1956 }
1957 
1958 /*
1959  * FCP-SCSI specific handling for module unload
1960  *
1961  */
fnic_scsi_unload(struct fnic * fnic)1962 void fnic_scsi_unload(struct fnic *fnic)
1963 {
1964 	unsigned long flags;
1965 
1966 	/*
1967 	 * Mark state so that the workqueue thread stops forwarding
1968 	 * received frames and link events to the local port. ISR and
1969 	 * other threads that can queue work items will also stop
1970 	 * creating work items on the fnic workqueue
1971 	 */
1972 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1973 	fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT;
1974 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1975 
1976 	if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT)
1977 		fnic_scsi_fcpio_reset(fnic);
1978 
1979 	spin_lock_irqsave(&fnic->fnic_lock, flags);
1980 	fnic->in_remove = 1;
1981 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1982 
1983 	fnic_flush_tport_event_list(fnic);
1984 	fnic_delete_fcp_tports(fnic);
1985 }
1986 
fnic_scsi_unload_cleanup(struct fnic * fnic)1987 void fnic_scsi_unload_cleanup(struct fnic *fnic)
1988 {
1989 	int hwq = 0;
1990 
1991 	fc_remove_host(fnic->host);
1992 	scsi_remove_host(fnic->host);
1993 	for (hwq = 0; hwq < fnic->wq_copy_count; hwq++)
1994 		kfree(fnic->sw_copy_wq[hwq].io_req_table);
1995 }
1996 
1997 /*
1998  * This function is exported to SCSI for sending abort cmnds.
1999  * A SCSI IO is represented by a io_req in the driver.
2000  * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
2001  */
fnic_abort_cmd(struct scsi_cmnd * sc)2002 int fnic_abort_cmd(struct scsi_cmnd *sc)
2003 {
2004 	struct request *const rq = scsi_cmd_to_rq(sc);
2005 	struct fnic_iport_s *iport;
2006 	struct fnic_tport_s *tport;
2007 	struct fnic *fnic;
2008 	struct fnic_io_req *io_req = NULL;
2009 	struct fc_rport *rport;
2010 	struct rport_dd_data_s *rdd_data;
2011 	unsigned long flags;
2012 	unsigned long start_time = 0;
2013 	int ret = SUCCESS;
2014 	u32 task_req = 0;
2015 	struct scsi_lun fc_lun;
2016 	struct fnic_stats *fnic_stats;
2017 	struct abort_stats *abts_stats;
2018 	struct terminate_stats *term_stats;
2019 	enum fnic_ioreq_state old_ioreq_state;
2020 	int mqtag;
2021 	unsigned long abt_issued_time;
2022 	uint16_t hwq = 0;
2023 
2024 	DECLARE_COMPLETION_ONSTACK(tm_done);
2025 
2026 	/* Wait for rport to unblock */
2027 	fc_block_scsi_eh(sc);
2028 
2029 	/* Get local-port, check ready and link up */
2030 	fnic = *((struct fnic **) shost_priv(sc->device->host));
2031 
2032 	spin_lock_irqsave(&fnic->fnic_lock, flags);
2033 	iport = &fnic->iport;
2034 
2035 	fnic_stats = &fnic->fnic_stats;
2036 	abts_stats = &fnic->fnic_stats.abts_stats;
2037 	term_stats = &fnic->fnic_stats.term_stats;
2038 
2039 	rport = starget_to_rport(scsi_target(sc->device));
2040 	mqtag = blk_mq_unique_tag(rq);
2041 	hwq = blk_mq_unique_tag_to_hwq(mqtag);
2042 
2043 	fnic_priv(sc)->flags = FNIC_NO_FLAGS;
2044 
2045 	rdd_data = rport->dd_data;
2046 	tport = rdd_data->tport;
2047 
2048 	if (!tport) {
2049 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2050 			  "Abort cmd called after tport delete! rport fcid: 0x%x",
2051 			  rport->port_id);
2052 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2053 			  "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n",
2054 			  sc->device->lun, hwq, mqtag,
2055 			  sc->cmnd[0], fnic_priv(sc)->flags);
2056 		ret = FAILED;
2057 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2058 		goto fnic_abort_cmd_end;
2059 	}
2060 
2061 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2062 	  "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x",
2063 	  rport->port_id, sc->device->lun, hwq, mqtag);
2064 
2065 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2066 				  "Op: 0x%x flags: 0x%x\n",
2067 				  sc->cmnd[0],
2068 				  fnic_priv(sc)->flags);
2069 
2070 	if (iport->state != FNIC_IPORT_STATE_READY) {
2071 		atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
2072 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2073 					  "iport NOT in READY state");
2074 		ret = FAILED;
2075 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2076 		goto fnic_abort_cmd_end;
2077 	}
2078 
2079 	if ((tport->state != FDLS_TGT_STATE_READY) &&
2080 		(tport->state != FDLS_TGT_STATE_ADISC)) {
2081 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2082 					  "tport state: %d\n", tport->state);
2083 		ret = FAILED;
2084 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2085 		goto fnic_abort_cmd_end;
2086 	}
2087 
2088 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2089 	/*
2090 	 * Avoid a race between SCSI issuing the abort and the device
2091 	 * completing the command.
2092 	 *
2093 	 * If the command is already completed by the fw cmpl code,
2094 	 * we just return SUCCESS from here. This means that the abort
2095 	 * succeeded. In the SCSI ML, since the timeout for command has
2096 	 * happened, the completion wont actually complete the command
2097 	 * and it will be considered as an aborted command
2098 	 *
2099 	 * .io_req will not be cleared except while holding io_req_lock.
2100 	 */
2101 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2102 	io_req = fnic_priv(sc)->io_req;
2103 	if (!io_req) {
2104 		ret = FAILED;
2105 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2106 		goto fnic_abort_cmd_end;
2107 	}
2108 
2109 	io_req->abts_done = &tm_done;
2110 
2111 	if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
2112 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2113 		goto wait_pending;
2114 	}
2115 
2116 	abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
2117 	if (abt_issued_time <= 6000)
2118 		atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
2119 	else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
2120 		atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
2121 	else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
2122 		atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
2123 	else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
2124 		atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
2125 	else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
2126 		atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
2127 	else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
2128 		atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
2129 	else
2130 		atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
2131 
2132 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2133 		"CDB Opcode: 0x%02x Abort issued time: %lu msec\n",
2134 		sc->cmnd[0], abt_issued_time);
2135 	/*
2136 	 * Command is still pending, need to abort it
2137 	 * If the firmware completes the command after this point,
2138 	 * the completion wont be done till mid-layer, since abort
2139 	 * has already started.
2140 	 */
2141 	old_ioreq_state = fnic_priv(sc)->state;
2142 	fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
2143 	fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
2144 
2145 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2146 
2147 	/*
2148 	 * Check readiness of the remote port. If the path to remote
2149 	 * port is up, then send abts to the remote port to terminate
2150 	 * the IO. Else, just locally terminate the IO in the firmware
2151 	 */
2152 	if (fc_remote_port_chkready(rport) == 0)
2153 		task_req = FCPIO_ITMF_ABT_TASK;
2154 	else {
2155 		atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
2156 		task_req = FCPIO_ITMF_ABT_TASK_TERM;
2157 	}
2158 
2159 	/* Now queue the abort command to firmware */
2160 	int_to_scsilun(sc->device->lun, &fc_lun);
2161 
2162 	if (fnic_queue_abort_io_req(fnic, mqtag, task_req, fc_lun.scsi_lun,
2163 				    io_req, hwq)) {
2164 		spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2165 		if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
2166 			fnic_priv(sc)->state = old_ioreq_state;
2167 		io_req = fnic_priv(sc)->io_req;
2168 		if (io_req)
2169 			io_req->abts_done = NULL;
2170 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2171 		ret = FAILED;
2172 		goto fnic_abort_cmd_end;
2173 	}
2174 	if (task_req == FCPIO_ITMF_ABT_TASK) {
2175 		fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
2176 		atomic64_inc(&fnic_stats->abts_stats.aborts);
2177 	} else {
2178 		fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
2179 		atomic64_inc(&fnic_stats->term_stats.terminates);
2180 	}
2181 
2182 	/*
2183 	 * We queued an abort IO, wait for its completion.
2184 	 * Once the firmware completes the abort command, it will
2185 	 * wake up this thread.
2186 	 */
2187  wait_pending:
2188 	wait_for_completion_timeout(&tm_done,
2189 				    msecs_to_jiffies
2190 				    (2 * fnic->config.ra_tov +
2191 				     fnic->config.ed_tov));
2192 
2193 	/* Check the abort status */
2194 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2195 
2196 	io_req = fnic_priv(sc)->io_req;
2197 	if (!io_req) {
2198 		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
2199 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2200 		fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
2201 		ret = FAILED;
2202 		goto fnic_abort_cmd_end;
2203 	}
2204 	io_req->abts_done = NULL;
2205 
2206 	/* fw did not complete abort, timed out */
2207 	if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
2208 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2209 		if (task_req == FCPIO_ITMF_ABT_TASK) {
2210 			atomic64_inc(&abts_stats->abort_drv_timeouts);
2211 		} else {
2212 			atomic64_inc(&term_stats->terminate_drv_timeouts);
2213 		}
2214 		fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
2215 		ret = FAILED;
2216 		goto fnic_abort_cmd_end;
2217 	}
2218 
2219 	/* IO out of order */
2220 
2221 	if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
2222 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2223 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2224 			      "Issuing host reset due to out of order IO\n");
2225 
2226 		ret = FAILED;
2227 		goto fnic_abort_cmd_end;
2228 	}
2229 
2230 	fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
2231 
2232 	start_time = io_req->start_time;
2233 	/*
2234 	 * firmware completed the abort, check the status,
2235 	 * free the io_req if successful. If abort fails,
2236 	 * Device reset will clean the I/O.
2237 	 */
2238 	if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS ||
2239 		(fnic_priv(sc)->abts_status == FCPIO_ABORTED)) {
2240 		fnic_priv(sc)->io_req = NULL;
2241 		io_req->sc = NULL;
2242 	} else {
2243 		ret = FAILED;
2244 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2245 		goto fnic_abort_cmd_end;
2246 	}
2247 
2248 	fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] = NULL;
2249 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2250 
2251 	fnic_release_ioreq_buf(fnic, io_req, sc);
2252 	mempool_free(io_req, fnic->io_req_pool);
2253 
2254 	/* Call SCSI completion function to complete the IO */
2255 	sc->result = DID_ABORT << 16;
2256 	scsi_done(sc);
2257 	atomic64_dec(&fnic_stats->io_stats.active_ios);
2258 	if (atomic64_read(&fnic->io_cmpl_skip))
2259 		atomic64_dec(&fnic->io_cmpl_skip);
2260 	else
2261 		atomic64_inc(&fnic_stats->io_stats.io_completions);
2262 
2263 fnic_abort_cmd_end:
2264 	FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, mqtag, sc,
2265 		  jiffies_to_msecs(jiffies - start_time),
2266 		  0, ((u64)sc->cmnd[0] << 32 |
2267 		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2268 		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2269 		  fnic_flags_and_state(sc));
2270 
2271 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2272 		      "Returning from abort cmd type %x %s\n", task_req,
2273 		      (ret == SUCCESS) ?
2274 		      "SUCCESS" : "FAILED");
2275 	return ret;
2276 }
2277 
fnic_queue_dr_io_req(struct fnic * fnic,struct scsi_cmnd * sc,struct fnic_io_req * io_req)2278 static inline int fnic_queue_dr_io_req(struct fnic *fnic,
2279 				       struct scsi_cmnd *sc,
2280 				       struct fnic_io_req *io_req)
2281 {
2282 	struct vnic_wq_copy *wq;
2283 	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
2284 	struct scsi_lun fc_lun;
2285 	int ret = 0;
2286 	unsigned long flags;
2287 	uint16_t hwq = 0;
2288 	uint32_t tag = 0;
2289 	struct fnic_tport_s *tport = io_req->tport;
2290 
2291 	tag = io_req->tag;
2292 	hwq = blk_mq_unique_tag_to_hwq(tag);
2293 	wq = &fnic->hw_copy_wq[hwq];
2294 
2295 	spin_lock_irqsave(&fnic->fnic_lock, flags);
2296 	if (unlikely(fnic_chk_state_flags_locked(fnic,
2297 						FNIC_FLAGS_IO_BLOCKED))) {
2298 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2299 		return FAILED;
2300 	} else {
2301 		atomic_inc(&fnic->in_flight);
2302 		atomic_inc(&tport->in_flight);
2303 	}
2304 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2305 
2306 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2307 
2308 	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[hwq])
2309 		free_wq_copy_descs(fnic, wq, hwq);
2310 
2311 	if (!vnic_wq_copy_desc_avail(wq)) {
2312 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2313 			  "queue_dr_io_req failure - no descriptors\n");
2314 		atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
2315 		ret = -EAGAIN;
2316 		goto lr_io_req_end;
2317 	}
2318 
2319 	/* fill in the lun info */
2320 	int_to_scsilun(sc->device->lun, &fc_lun);
2321 
2322 	tag |= FNIC_TAG_DEV_RST;
2323 	fnic_queue_wq_copy_desc_itmf(wq, tag,
2324 				     0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
2325 				     fc_lun.scsi_lun, io_req->port_id,
2326 				     fnic->config.ra_tov, fnic->config.ed_tov);
2327 
2328 	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
2329 	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
2330 		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
2331 		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
2332 		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
2333 
2334 lr_io_req_end:
2335 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2336 	atomic_dec(&fnic->in_flight);
2337 	atomic_dec(&tport->in_flight);
2338 
2339 	return ret;
2340 }
2341 
2342 struct fnic_pending_aborts_iter_data {
2343 	struct fnic *fnic;
2344 	struct scsi_cmnd *lr_sc;
2345 	struct scsi_device *lun_dev;
2346 	int ret;
2347 };
2348 
fnic_pending_aborts_iter(struct scsi_cmnd * sc,void * data)2349 static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
2350 {
2351 	struct request *const rq = scsi_cmd_to_rq(sc);
2352 	struct fnic_pending_aborts_iter_data *iter_data = data;
2353 	struct fnic *fnic = iter_data->fnic;
2354 	struct scsi_device *lun_dev = iter_data->lun_dev;
2355 	unsigned long abt_tag = 0;
2356 	uint16_t hwq = 0;
2357 	struct fnic_io_req *io_req;
2358 	unsigned long flags;
2359 	struct scsi_lun fc_lun;
2360 	DECLARE_COMPLETION_ONSTACK(tm_done);
2361 	enum fnic_ioreq_state old_ioreq_state;
2362 
2363 	if (sc == iter_data->lr_sc || sc->device != lun_dev)
2364 		return true;
2365 
2366 	abt_tag = blk_mq_unique_tag(rq);
2367 	hwq = blk_mq_unique_tag_to_hwq(abt_tag);
2368 
2369 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2370 	io_req = fnic_priv(sc)->io_req;
2371 	if (!io_req) {
2372 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2373 		return true;
2374 	}
2375 
2376 	/*
2377 	 * Found IO that is still pending with firmware and
2378 	 * belongs to the LUN that we are resetting
2379 	 */
2380 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2381 		      "Found IO in %s on lun\n",
2382 		      fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2383 
2384 	if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
2385 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2386 		return true;
2387 	}
2388 	if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
2389 	    (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
2390 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2391 			      "dev rst not pending sc 0x%p\n", sc);
2392 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2393 		return true;
2394 	}
2395 
2396 	if (io_req->abts_done)
2397 		shost_printk(KERN_ERR, fnic->host,
2398 			     "%s: io_req->abts_done is set state is %s\n",
2399 			     __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2400 	old_ioreq_state = fnic_priv(sc)->state;
2401 	/*
2402 	 * Any pending IO issued prior to reset is expected to be
2403 	 * in abts pending state, if not we need to set
2404 	 * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
2405 	 * When IO is completed, the IO will be handed over and
2406 	 * handled in this function.
2407 	 */
2408 	fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
2409 
2410 	BUG_ON(io_req->abts_done);
2411 
2412 	if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
2413 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2414 			      "dev rst sc 0x%p\n", sc);
2415 	}
2416 
2417 	fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
2418 	io_req->abts_done = &tm_done;
2419 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2420 
2421 	/* Now queue the abort command to firmware */
2422 	int_to_scsilun(sc->device->lun, &fc_lun);
2423 
2424 	if (fnic_queue_abort_io_req(fnic, abt_tag,
2425 				    FCPIO_ITMF_ABT_TASK_TERM,
2426 				    fc_lun.scsi_lun, io_req, hwq)) {
2427 		spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2428 		io_req = fnic_priv(sc)->io_req;
2429 		if (io_req)
2430 			io_req->abts_done = NULL;
2431 		if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
2432 			fnic_priv(sc)->state = old_ioreq_state;
2433 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2434 		iter_data->ret = FAILED;
2435 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2436 			"hwq: %d abt_tag: 0x%lx Abort could not be queued\n",
2437 			hwq, abt_tag);
2438 		return false;
2439 	} else {
2440 		spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2441 		if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
2442 			fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
2443 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2444 	}
2445 	fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
2446 
2447 	wait_for_completion_timeout(&tm_done, msecs_to_jiffies
2448 				    (fnic->config.ed_tov));
2449 
2450 	/* Recheck cmd state to check if it is now aborted */
2451 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2452 	io_req = fnic_priv(sc)->io_req;
2453 	if (!io_req) {
2454 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2455 		fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
2456 		return true;
2457 	}
2458 
2459 	io_req->abts_done = NULL;
2460 
2461 	/* if abort is still pending with fw, fail */
2462 	if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
2463 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2464 		fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
2465 		iter_data->ret = FAILED;
2466 		return false;
2467 	}
2468 	fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
2469 
2470 	/* original sc used for lr is handled by dev reset code */
2471 	if (sc != iter_data->lr_sc) {
2472 		fnic_priv(sc)->io_req = NULL;
2473 		fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(abt_tag)] = NULL;
2474 	}
2475 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2476 
2477 	/* original sc used for lr is handled by dev reset code */
2478 	if (sc != iter_data->lr_sc) {
2479 		fnic_release_ioreq_buf(fnic, io_req, sc);
2480 		mempool_free(io_req, fnic->io_req_pool);
2481 	}
2482 
2483 	/*
2484 	 * Any IO is returned during reset, it needs to call scsi_done
2485 	 * to return the scsi_cmnd to upper layer.
2486 	 */
2487 	/* Set result to let upper SCSI layer retry */
2488 	sc->result = DID_RESET << 16;
2489 	scsi_done(sc);
2490 
2491 	return true;
2492 }
2493 
2494 /*
2495  * Clean up any pending aborts on the lun
2496  * For each outstanding IO on this lun, whose abort is not completed by fw,
2497  * issue a local abort. Wait for abort to complete. Return 0 if all commands
2498  * successfully aborted, 1 otherwise
2499  */
fnic_clean_pending_aborts(struct fnic * fnic,struct scsi_cmnd * lr_sc,bool new_sc)2500 static int fnic_clean_pending_aborts(struct fnic *fnic,
2501 				     struct scsi_cmnd *lr_sc,
2502 				     bool new_sc)
2503 
2504 {
2505 	int ret = 0;
2506 	struct fnic_pending_aborts_iter_data iter_data = {
2507 		.fnic = fnic,
2508 		.lun_dev = lr_sc->device,
2509 		.ret = SUCCESS,
2510 	};
2511 
2512 	iter_data.lr_sc = lr_sc;
2513 
2514 	scsi_host_busy_iter(fnic->host,
2515 			    fnic_pending_aborts_iter, &iter_data);
2516 	if (iter_data.ret == FAILED) {
2517 		ret = iter_data.ret;
2518 		goto clean_pending_aborts_end;
2519 	}
2520 	schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
2521 
2522 	/* walk again to check, if IOs are still pending in fw */
2523 	if (fnic_is_abts_pending(fnic, lr_sc))
2524 		ret = 1;
2525 
2526 clean_pending_aborts_end:
2527 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2528 			"exit status: %d\n", ret);
2529 	return ret;
2530 }
2531 
2532 /*
2533  * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
2534  * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
2535  * on the LUN.
2536  */
fnic_device_reset(struct scsi_cmnd * sc)2537 int fnic_device_reset(struct scsi_cmnd *sc)
2538 {
2539 	struct request *rq = scsi_cmd_to_rq(sc);
2540 	struct fnic *fnic;
2541 	struct fnic_io_req *io_req = NULL;
2542 	struct fc_rport *rport;
2543 	int status;
2544 	int count = 0;
2545 	int ret = FAILED;
2546 	unsigned long flags;
2547 	unsigned long start_time = 0;
2548 	struct scsi_lun fc_lun;
2549 	struct fnic_stats *fnic_stats;
2550 	struct reset_stats *reset_stats;
2551 	int mqtag = rq->tag;
2552 	DECLARE_COMPLETION_ONSTACK(tm_done);
2553 	bool new_sc = 0;
2554 	uint16_t hwq = 0;
2555 	struct fnic_iport_s *iport = NULL;
2556 	struct rport_dd_data_s *rdd_data;
2557 	struct fnic_tport_s *tport;
2558 	u32 old_soft_reset_count;
2559 	u32 old_link_down_cnt;
2560 	int exit_dr = 0;
2561 
2562 	/* Wait for rport to unblock */
2563 	fc_block_scsi_eh(sc);
2564 
2565 	/* Get local-port, check ready and link up */
2566 	fnic = *((struct fnic **) shost_priv(sc->device->host));
2567 	iport = &fnic->iport;
2568 
2569 	fnic_stats = &fnic->fnic_stats;
2570 	reset_stats = &fnic_stats->reset_stats;
2571 
2572 	atomic64_inc(&reset_stats->device_resets);
2573 
2574 	rport = starget_to_rport(scsi_target(sc->device));
2575 
2576 	spin_lock_irqsave(&fnic->fnic_lock, flags);
2577 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2578 		"fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n",
2579 		rport->port_id, sc->device->lun, hwq, mqtag,
2580 		fnic_priv(sc)->flags);
2581 
2582 	rdd_data = rport->dd_data;
2583 	tport = rdd_data->tport;
2584 	if (!tport) {
2585 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2586 		  "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n",
2587 		  rport->port_id, sc->device->lun);
2588 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2589 		goto fnic_device_reset_end;
2590 	}
2591 
2592 	if (iport->state != FNIC_IPORT_STATE_READY) {
2593 		atomic64_inc(&fnic_stats->misc_stats.iport_not_ready);
2594 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2595 					  "iport NOT in READY state");
2596 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2597 		goto fnic_device_reset_end;
2598 	}
2599 
2600 	if ((tport->state != FDLS_TGT_STATE_READY) &&
2601 		(tport->state != FDLS_TGT_STATE_ADISC)) {
2602 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2603 					  "tport state: %d\n", tport->state);
2604 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2605 		goto fnic_device_reset_end;
2606 	}
2607 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2608 
2609 	/* Check if remote port up */
2610 	if (fc_remote_port_chkready(rport)) {
2611 		atomic64_inc(&fnic_stats->misc_stats.tport_not_ready);
2612 		goto fnic_device_reset_end;
2613 	}
2614 
2615 	fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
2616 
2617 	if (unlikely(mqtag < 0)) {
2618 		/*
2619 		 * For device reset issued through sg3utils, we let
2620 		 * only one LUN_RESET to go through and use a special
2621 		 * tag equal to max_tag_id so that we don't have to allocate
2622 		 * or free it. It won't interact with tags
2623 		 * allocated by mid layer.
2624 		 */
2625 		mutex_lock(&fnic->sgreset_mutex);
2626 		mqtag = fnic->fnic_max_tag_id;
2627 		new_sc = 1;
2628 	}  else {
2629 		mqtag = blk_mq_unique_tag(rq);
2630 		hwq = blk_mq_unique_tag_to_hwq(mqtag);
2631 	}
2632 
2633 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2634 	io_req = fnic_priv(sc)->io_req;
2635 
2636 	/*
2637 	 * If there is a io_req attached to this command, then use it,
2638 	 * else allocate a new one.
2639 	 */
2640 	if (!io_req) {
2641 		io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
2642 		if (!io_req) {
2643 			spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2644 			goto fnic_device_reset_end;
2645 		}
2646 		memset(io_req, 0, sizeof(*io_req));
2647 		io_req->port_id = rport->port_id;
2648 		io_req->tag = mqtag;
2649 		fnic_priv(sc)->io_req = io_req;
2650 		io_req->tport = tport;
2651 		io_req->sc = sc;
2652 
2653 		if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL)
2654 			WARN(1, "fnic<%d>: %s: tag 0x%x already exists\n",
2655 					fnic->fnic_num, __func__, blk_mq_unique_tag_to_tag(mqtag));
2656 
2657 		fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] =
2658 				io_req;
2659 	}
2660 	io_req->dr_done = &tm_done;
2661 	fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
2662 	fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
2663 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2664 
2665 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag);
2666 
2667 	/*
2668 	 * issue the device reset, if enqueue failed, clean up the ioreq
2669 	 * and break assoc with scsi cmd
2670 	 */
2671 	if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
2672 		spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2673 		io_req = fnic_priv(sc)->io_req;
2674 		if (io_req)
2675 			io_req->dr_done = NULL;
2676 		goto fnic_device_reset_clean;
2677 	}
2678 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2679 	fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
2680 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2681 
2682 	spin_lock_irqsave(&fnic->fnic_lock, flags);
2683 	old_link_down_cnt = iport->fnic->link_down_cnt;
2684 	old_soft_reset_count = fnic->soft_reset_count;
2685 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2686 
2687 	/*
2688 	 * Wait on the local completion for LUN reset.  The io_req may be
2689 	 * freed while we wait since we hold no lock.
2690 	 */
2691 	wait_for_completion_timeout(&tm_done,
2692 				    msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
2693 
2694 	/*
2695 	 * Wake up can be due to the following reasons:
2696 	 * 1) The device reset completed from target.
2697 	 * 2) Device reset timed out.
2698 	 * 3) A link-down/host_reset may have happened in between.
2699 	 * 4) The device reset was aborted and io_req->dr_done was called.
2700 	 */
2701 
2702 	exit_dr = 0;
2703 	spin_lock_irqsave(&fnic->fnic_lock, flags);
2704 	if ((old_link_down_cnt != fnic->link_down_cnt) ||
2705 		(fnic->reset_in_progress) ||
2706 		(fnic->soft_reset_count != old_soft_reset_count) ||
2707 		(iport->state != FNIC_IPORT_STATE_READY))
2708 		exit_dr = 1;
2709 
2710 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2711 
2712 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2713 	io_req = fnic_priv(sc)->io_req;
2714 	if (!io_req) {
2715 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2716 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2717 				"io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc);
2718 		goto fnic_device_reset_end;
2719 	}
2720 
2721 	if (exit_dr) {
2722 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2723 					  "Host reset called for fnic. Exit device reset\n");
2724 		io_req->dr_done = NULL;
2725 		goto fnic_device_reset_clean;
2726 	}
2727 	io_req->dr_done = NULL;
2728 
2729 	status = fnic_priv(sc)->lr_status;
2730 
2731 	/*
2732 	 * If lun reset not completed, bail out with failed. io_req
2733 	 * gets cleaned up during higher levels of EH
2734 	 */
2735 	if (status == FCPIO_INVALID_CODE) {
2736 		atomic64_inc(&reset_stats->device_reset_timeouts);
2737 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2738 			      "Device reset timed out\n");
2739 		fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
2740 		int_to_scsilun(sc->device->lun, &fc_lun);
2741 		goto fnic_device_reset_clean;
2742 	} else {
2743 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2744 	}
2745 
2746 	/* Completed, but not successful, clean up the io_req, return fail */
2747 	if (status != FCPIO_SUCCESS) {
2748 		spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2749 		FNIC_SCSI_DBG(KERN_DEBUG,
2750 			      fnic->host, fnic->fnic_num,
2751 			      "Device reset completed - failed\n");
2752 		io_req = fnic_priv(sc)->io_req;
2753 		goto fnic_device_reset_clean;
2754 	}
2755 
2756 	/*
2757 	 * Clean up any aborts on this lun that have still not
2758 	 * completed. If any of these fail, then LUN reset fails.
2759 	 * clean_pending_aborts cleans all cmds on this lun except
2760 	 * the lun reset cmd. If all cmds get cleaned, the lun reset
2761 	 * succeeds
2762 	 */
2763 	if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
2764 		spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2765 		io_req = fnic_priv(sc)->io_req;
2766 		FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2767 					  "Device reset failed: Cannot abort all IOs\n");
2768 		goto fnic_device_reset_clean;
2769 	}
2770 
2771 	/* Clean lun reset command */
2772 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2773 	io_req = fnic_priv(sc)->io_req;
2774 	if (io_req)
2775 		/* Completed, and successful */
2776 		ret = SUCCESS;
2777 
2778 fnic_device_reset_clean:
2779 	if (io_req) {
2780 		fnic_priv(sc)->io_req = NULL;
2781 		io_req->sc = NULL;
2782 		fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(io_req->tag)] = NULL;
2783 	}
2784 
2785 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2786 
2787 	if (io_req) {
2788 		start_time = io_req->start_time;
2789 		fnic_release_ioreq_buf(fnic, io_req, sc);
2790 		mempool_free(io_req, fnic->io_req_pool);
2791 	}
2792 
2793 	/*
2794 	 * If link-event is seen while LUN reset is issued we need
2795 	 * to complete the LUN reset here
2796 	 */
2797 	if (!new_sc) {
2798 		sc->result = DID_RESET << 16;
2799 		scsi_done(sc);
2800 	}
2801 
2802 fnic_device_reset_end:
2803 	FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
2804 		  jiffies_to_msecs(jiffies - start_time),
2805 		  0, ((u64)sc->cmnd[0] << 32 |
2806 		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
2807 		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
2808 		  fnic_flags_and_state(sc));
2809 
2810 	if (new_sc) {
2811 		fnic->sgreset_sc = NULL;
2812 		mutex_unlock(&fnic->sgreset_mutex);
2813 	}
2814 
2815 	while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) {
2816 		if (count >= 2) {
2817 			ret = FAILED;
2818 			break;
2819 		}
2820 		FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
2821 					  "Cannot clean up all IOs for the LUN\n");
2822 		schedule_timeout(msecs_to_jiffies(1000));
2823 		count++;
2824 	}
2825 
2826 	FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num,
2827 		      "Returning from device reset %s\n",
2828 		      (ret == SUCCESS) ?
2829 		      "SUCCESS" : "FAILED");
2830 
2831 	if (ret == FAILED)
2832 		atomic64_inc(&reset_stats->device_reset_failures);
2833 
2834 	return ret;
2835 }
2836 
fnic_post_flogo_linkflap(struct fnic * fnic)2837 static void fnic_post_flogo_linkflap(struct fnic *fnic)
2838 {
2839 	unsigned long flags;
2840 
2841 	fnic_fdls_link_status_change(fnic, 0);
2842 	spin_lock_irqsave(&fnic->fnic_lock, flags);
2843 
2844 	if (fnic->link_status) {
2845 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2846 		fnic_fdls_link_status_change(fnic, 1);
2847 		return;
2848 	}
2849 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2850 }
2851 
2852 /* Logout from all the targets and simulate link flap */
fnic_reset(struct Scsi_Host * shost)2853 void fnic_reset(struct Scsi_Host *shost)
2854 {
2855 	struct fnic *fnic;
2856 	struct reset_stats *reset_stats;
2857 
2858 	fnic = *((struct fnic **) shost_priv(shost));
2859 	reset_stats = &fnic->fnic_stats.reset_stats;
2860 
2861 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2862 				  "Issuing fnic reset\n");
2863 
2864 	atomic64_inc(&reset_stats->fnic_resets);
2865 	fnic_post_flogo_linkflap(fnic);
2866 
2867 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2868 				  "Returning from fnic reset");
2869 
2870 	atomic64_inc(&reset_stats->fnic_reset_completions);
2871 }
2872 
fnic_issue_fc_host_lip(struct Scsi_Host * shost)2873 int fnic_issue_fc_host_lip(struct Scsi_Host *shost)
2874 {
2875 	int ret = 0;
2876 	struct fnic *fnic = *((struct fnic **) shost_priv(shost));
2877 
2878 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2879 				  "FC host lip issued");
2880 
2881 	ret = fnic_host_reset(shost);
2882 	return ret;
2883 }
2884 
fnic_host_reset(struct Scsi_Host * shost)2885 int fnic_host_reset(struct Scsi_Host *shost)
2886 {
2887 	int ret = SUCCESS;
2888 	unsigned long wait_host_tmo;
2889 	struct fnic *fnic = *((struct fnic **) shost_priv(shost));
2890 	unsigned long flags;
2891 	struct fnic_iport_s *iport = &fnic->iport;
2892 
2893 	spin_lock_irqsave(&fnic->fnic_lock, flags);
2894 	if (fnic->reset_in_progress == NOT_IN_PROGRESS) {
2895 		fnic->reset_in_progress = IN_PROGRESS;
2896 	} else {
2897 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2898 		wait_for_completion_timeout(&fnic->reset_completion_wait,
2899 									msecs_to_jiffies(10000));
2900 
2901 		spin_lock_irqsave(&fnic->fnic_lock, flags);
2902 		if (fnic->reset_in_progress == IN_PROGRESS) {
2903 			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2904 			FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num,
2905 			  "Firmware reset in progress. Skipping another host reset\n");
2906 			return SUCCESS;
2907 		}
2908 		fnic->reset_in_progress = IN_PROGRESS;
2909 	}
2910 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2911 
2912 	/*
2913 	 * If fnic_reset is successful, wait for fabric login to complete
2914 	 * scsi-ml tries to send a TUR to every device if host reset is
2915 	 * successful, so before returning to scsi, fabric should be up
2916 	 */
2917 	fnic_reset(shost);
2918 
2919 	spin_lock_irqsave(&fnic->fnic_lock, flags);
2920 	fnic->reset_in_progress = NOT_IN_PROGRESS;
2921 	complete(&fnic->reset_completion_wait);
2922 	fnic->soft_reset_count++;
2923 
2924 	/* wait till the link is up */
2925 	if (fnic->link_status) {
2926 		wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
2927 		ret = FAILED;
2928 		while (time_before(jiffies, wait_host_tmo)) {
2929 			if (iport->state != FNIC_IPORT_STATE_READY
2930 				&& fnic->link_status) {
2931 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2932 				ssleep(1);
2933 				spin_lock_irqsave(&fnic->fnic_lock, flags);
2934 			} else {
2935 				ret = SUCCESS;
2936 				break;
2937 			}
2938 		}
2939 	}
2940 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
2941 
2942 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2943 				  "host reset return status: %d\n", ret);
2944 	return ret;
2945 }
2946 
fnic_abts_pending_iter(struct scsi_cmnd * sc,void * data)2947 static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
2948 {
2949 	struct request *const rq = scsi_cmd_to_rq(sc);
2950 	struct fnic_pending_aborts_iter_data *iter_data = data;
2951 	struct fnic *fnic = iter_data->fnic;
2952 	int cmd_state;
2953 	struct fnic_io_req *io_req;
2954 	unsigned long flags;
2955 	uint16_t hwq = 0;
2956 	int tag;
2957 
2958 	tag = blk_mq_unique_tag(rq);
2959 	hwq = blk_mq_unique_tag_to_hwq(tag);
2960 
2961 	/*
2962 	 * ignore this lun reset cmd or cmds that do not belong to
2963 	 * this lun
2964 	 */
2965 	if (iter_data->lr_sc && sc == iter_data->lr_sc)
2966 		return true;
2967 	if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
2968 		return true;
2969 
2970 	spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags);
2971 
2972 	io_req = fnic_priv(sc)->io_req;
2973 	if (!io_req) {
2974 		spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2975 		return true;
2976 	}
2977 
2978 	/*
2979 	 * Found IO that is still pending with firmware and
2980 	 * belongs to the LUN that we are resetting
2981 	 */
2982 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
2983 		"hwq: %d tag: 0x%x Found IO in state: %s on lun\n",
2984 		hwq, tag,
2985 		fnic_ioreq_state_to_str(fnic_priv(sc)->state));
2986 	cmd_state = fnic_priv(sc)->state;
2987 	spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags);
2988 	if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
2989 		iter_data->ret = 1;
2990 
2991 	return iter_data->ret ? false : true;
2992 }
2993 
2994 /*
2995  * fnic_is_abts_pending() is a helper function that
2996  * walks through tag map to check if there is any IOs pending,if there is one,
2997  * then it returns 1 (true), otherwise 0 (false)
2998  * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
2999  * otherwise, it checks for all IOs.
3000  */
fnic_is_abts_pending(struct fnic * fnic,struct scsi_cmnd * lr_sc)3001 int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
3002 {
3003 	struct fnic_pending_aborts_iter_data iter_data = {
3004 		.fnic = fnic,
3005 		.lun_dev = NULL,
3006 		.ret = 0,
3007 	};
3008 
3009 	if (lr_sc) {
3010 		iter_data.lun_dev = lr_sc->device;
3011 		iter_data.lr_sc = lr_sc;
3012 	}
3013 
3014 	/* walk again to check, if IOs are still pending in fw */
3015 	scsi_host_busy_iter(fnic->host,
3016 			    fnic_abts_pending_iter, &iter_data);
3017 
3018 	return iter_data.ret;
3019 }
3020 
3021 /*
3022  * SCSI Error handling calls driver's eh_host_reset if all prior
3023  * error handling levels return FAILED. If host reset completes
3024  * successfully, and if link is up, then Fabric login begins.
3025  *
3026  * Host Reset is the highest level of error recovery. If this fails, then
3027  * host is offlined by SCSI.
3028  *
3029  */
fnic_eh_host_reset_handler(struct scsi_cmnd * sc)3030 int fnic_eh_host_reset_handler(struct scsi_cmnd *sc)
3031 {
3032 	int ret = 0;
3033 	struct Scsi_Host *shost = sc->device->host;
3034 	struct fnic *fnic = *((struct fnic **) shost_priv(shost));
3035 
3036 	FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num,
3037 				  "SCSI error handling: fnic host reset");
3038 
3039 	ret = fnic_host_reset(shost);
3040 	return ret;
3041 }
3042 
3043 
fnic_scsi_fcpio_reset(struct fnic * fnic)3044 void fnic_scsi_fcpio_reset(struct fnic *fnic)
3045 {
3046 	unsigned long flags;
3047 	enum fnic_state old_state;
3048 	struct fnic_iport_s *iport = &fnic->iport;
3049 	DECLARE_COMPLETION_ONSTACK(fw_reset_done);
3050 	int time_remain;
3051 
3052 	/* issue fw reset */
3053 	spin_lock_irqsave(&fnic->fnic_lock, flags);
3054 	if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
3055 		/* fw reset is in progress, poll for its completion */
3056 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
3057 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
3058 			  "fnic is in unexpected state: %d for fw_reset\n",
3059 			  fnic->state);
3060 		return;
3061 	}
3062 
3063 	old_state = fnic->state;
3064 	fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
3065 
3066 	fnic_update_mac_locked(fnic, iport->hwmac);
3067 	fnic->fw_reset_done = &fw_reset_done;
3068 	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
3069 
3070 	FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
3071 				  "Issuing fw reset\n");
3072 	if (fnic_fw_reset_handler(fnic)) {
3073 		spin_lock_irqsave(&fnic->fnic_lock, flags);
3074 		if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
3075 			fnic->state = old_state;
3076 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
3077 	} else {
3078 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
3079 					  "Waiting for fw completion\n");
3080 		time_remain = wait_for_completion_timeout(&fw_reset_done,
3081 						  msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT));
3082 		FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
3083 					  "Woken up after fw completion timeout\n");
3084 		if (time_remain == 0) {
3085 			FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num,
3086 				  "FW reset completion timed out after %d ms)\n",
3087 				  FNIC_FW_RESET_TIMEOUT);
3088 		}
3089 		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts);
3090 	}
3091 	fnic->fw_reset_done = NULL;
3092 }
3093