xref: /linux/drivers/scsi/ibmvscsi/ibmvfc.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9 
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kthread.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/pm.h>
21 #include <linux/stringify.h>
22 #include <linux/bsg-lib.h>
23 #include <asm/firmware.h>
24 #include <asm/irq.h>
25 #include <asm/rtas.h>
26 #include <asm/vio.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/scsi_bsg_fc.h>
34 #include "ibmvfc.h"
35 
36 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
37 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
38 static u64 max_lun = IBMVFC_MAX_LUN;
39 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
40 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
41 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
42 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
43 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
44 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
45 static unsigned int mq_enabled = IBMVFC_MQ;
46 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
47 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
48 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
49 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
50 
51 static LIST_HEAD(ibmvfc_head);
52 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
53 static struct scsi_transport_template *ibmvfc_transport_template;
54 
55 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
56 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
59 
60 module_param_named(mq, mq_enabled, uint, S_IRUGO);
61 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
62 		 "[Default=" __stringify(IBMVFC_MQ) "]");
63 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
64 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
65 		 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
66 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
67 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
68 		 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
69 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
70 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
71 		 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
72 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
73 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
74 		 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
75 
76 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
77 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
78 		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
79 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(default_timeout,
81 		 "Default timeout in seconds for initialization and EH commands. "
82 		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
83 module_param_named(max_requests, max_requests, uint, S_IRUGO);
84 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
85 		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
86 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
87 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
88 		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
89 module_param_named(max_targets, max_targets, uint, S_IRUGO);
90 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
91 		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
92 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
93 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
94 		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
95 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(debug, "Enable driver debug information. "
97 		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
98 module_param_named(log_level, log_level, uint, 0);
99 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
100 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
101 module_param_named(cls3_error, cls3_error, uint, 0);
102 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
103 		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
104 
105 static const struct {
106 	u16 status;
107 	u16 error;
108 	u8 result;
109 	u8 retry;
110 	int log;
111 	char *name;
112 } cmd_status [] = {
113 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
114 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
115 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
116 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
117 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
118 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
119 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
120 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
121 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
122 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
123 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
124 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
125 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
126 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
127 
128 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
129 	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
130 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
131 	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
132 	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
133 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
134 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
135 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
136 	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
137 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
138 
139 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
140 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
141 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
142 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
143 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
144 	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
145 	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
146 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
147 	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
148 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
149 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
150 
151 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
152 	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
153 };
154 
155 static void ibmvfc_npiv_login(struct ibmvfc_host *);
156 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
157 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
158 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
159 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
160 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
161 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
162 
163 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *);
164 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *);
165 
166 static const char *unknown_error = "unknown error";
167 
168 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
169 			  unsigned long length, unsigned long *cookie,
170 			  unsigned long *irq)
171 {
172 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
173 	long rc;
174 
175 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
176 	*cookie = retbuf[0];
177 	*irq = retbuf[1];
178 
179 	return rc;
180 }
181 
182 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
183 {
184 	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
185 
186 	return (host_caps & cap_flags) ? 1 : 0;
187 }
188 
189 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
190 						   struct ibmvfc_cmd *vfc_cmd)
191 {
192 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
193 		return &vfc_cmd->v2.iu;
194 	else
195 		return &vfc_cmd->v1.iu;
196 }
197 
198 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
199 						 struct ibmvfc_cmd *vfc_cmd)
200 {
201 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
202 		return &vfc_cmd->v2.rsp;
203 	else
204 		return &vfc_cmd->v1.rsp;
205 }
206 
207 #ifdef CONFIG_SCSI_IBMVFC_TRACE
208 /**
209  * ibmvfc_trc_start - Log a start trace entry
210  * @evt:		ibmvfc event struct
211  *
212  **/
213 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
214 {
215 	struct ibmvfc_host *vhost = evt->vhost;
216 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
217 	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
218 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
219 	struct ibmvfc_trace_entry *entry;
220 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
221 
222 	entry = &vhost->trace[index];
223 	entry->evt = evt;
224 	entry->time = jiffies;
225 	entry->fmt = evt->crq.format;
226 	entry->type = IBMVFC_TRC_START;
227 
228 	switch (entry->fmt) {
229 	case IBMVFC_CMD_FORMAT:
230 		entry->op_code = iu->cdb[0];
231 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
232 		entry->lun = scsilun_to_int(&iu->lun);
233 		entry->tmf_flags = iu->tmf_flags;
234 		entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
235 		break;
236 	case IBMVFC_MAD_FORMAT:
237 		entry->op_code = be32_to_cpu(mad->opcode);
238 		break;
239 	default:
240 		break;
241 	}
242 }
243 
244 /**
245  * ibmvfc_trc_end - Log an end trace entry
246  * @evt:		ibmvfc event struct
247  *
248  **/
249 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
250 {
251 	struct ibmvfc_host *vhost = evt->vhost;
252 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
253 	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
254 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
255 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
256 	struct ibmvfc_trace_entry *entry;
257 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
258 
259 	entry = &vhost->trace[index];
260 	entry->evt = evt;
261 	entry->time = jiffies;
262 	entry->fmt = evt->crq.format;
263 	entry->type = IBMVFC_TRC_END;
264 
265 	switch (entry->fmt) {
266 	case IBMVFC_CMD_FORMAT:
267 		entry->op_code = iu->cdb[0];
268 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
269 		entry->lun = scsilun_to_int(&iu->lun);
270 		entry->tmf_flags = iu->tmf_flags;
271 		entry->u.end.status = be16_to_cpu(vfc_cmd->status);
272 		entry->u.end.error = be16_to_cpu(vfc_cmd->error);
273 		entry->u.end.fcp_rsp_flags = rsp->flags;
274 		entry->u.end.rsp_code = rsp->data.info.rsp_code;
275 		entry->u.end.scsi_status = rsp->scsi_status;
276 		break;
277 	case IBMVFC_MAD_FORMAT:
278 		entry->op_code = be32_to_cpu(mad->opcode);
279 		entry->u.end.status = be16_to_cpu(mad->status);
280 		break;
281 	default:
282 		break;
283 
284 	}
285 }
286 
287 #else
288 #define ibmvfc_trc_start(evt) do { } while (0)
289 #define ibmvfc_trc_end(evt) do { } while (0)
290 #endif
291 
292 /**
293  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
294  * @status:		status / error class
295  * @error:		error
296  *
297  * Return value:
298  *	index into cmd_status / -EINVAL on failure
299  **/
300 static int ibmvfc_get_err_index(u16 status, u16 error)
301 {
302 	int i;
303 
304 	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
305 		if ((cmd_status[i].status & status) == cmd_status[i].status &&
306 		    cmd_status[i].error == error)
307 			return i;
308 
309 	return -EINVAL;
310 }
311 
312 /**
313  * ibmvfc_get_cmd_error - Find the error description for the fcp response
314  * @status:		status / error class
315  * @error:		error
316  *
317  * Return value:
318  *	error description string
319  **/
320 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
321 {
322 	int rc = ibmvfc_get_err_index(status, error);
323 	if (rc >= 0)
324 		return cmd_status[rc].name;
325 	return unknown_error;
326 }
327 
328 /**
329  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
330  * @vhost:      ibmvfc host struct
331  * @vfc_cmd:	ibmvfc command struct
332  *
333  * Return value:
334  *	SCSI result value to return for completed command
335  **/
336 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
337 {
338 	int err;
339 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
340 	int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
341 
342 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
343 	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
344 	     rsp->data.info.rsp_code))
345 		return DID_ERROR << 16;
346 
347 	err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
348 	if (err >= 0)
349 		return rsp->scsi_status | (cmd_status[err].result << 16);
350 	return rsp->scsi_status | (DID_ERROR << 16);
351 }
352 
353 /**
354  * ibmvfc_retry_cmd - Determine if error status is retryable
355  * @status:		status / error class
356  * @error:		error
357  *
358  * Return value:
359  *	1 if error should be retried / 0 if it should not
360  **/
361 static int ibmvfc_retry_cmd(u16 status, u16 error)
362 {
363 	int rc = ibmvfc_get_err_index(status, error);
364 
365 	if (rc >= 0)
366 		return cmd_status[rc].retry;
367 	return 1;
368 }
369 
370 static const char *unknown_fc_explain = "unknown fc explain";
371 
372 static const struct {
373 	u16 fc_explain;
374 	char *name;
375 } ls_explain [] = {
376 	{ 0x00, "no additional explanation" },
377 	{ 0x01, "service parameter error - options" },
378 	{ 0x03, "service parameter error - initiator control" },
379 	{ 0x05, "service parameter error - recipient control" },
380 	{ 0x07, "service parameter error - received data field size" },
381 	{ 0x09, "service parameter error - concurrent seq" },
382 	{ 0x0B, "service parameter error - credit" },
383 	{ 0x0D, "invalid N_Port/F_Port_Name" },
384 	{ 0x0E, "invalid node/Fabric Name" },
385 	{ 0x0F, "invalid common service parameters" },
386 	{ 0x11, "invalid association header" },
387 	{ 0x13, "association header required" },
388 	{ 0x15, "invalid originator S_ID" },
389 	{ 0x17, "invalid OX_ID-RX-ID combination" },
390 	{ 0x19, "command (request) already in progress" },
391 	{ 0x1E, "N_Port Login requested" },
392 	{ 0x1F, "Invalid N_Port_ID" },
393 };
394 
395 static const struct {
396 	u16 fc_explain;
397 	char *name;
398 } gs_explain [] = {
399 	{ 0x00, "no additional explanation" },
400 	{ 0x01, "port identifier not registered" },
401 	{ 0x02, "port name not registered" },
402 	{ 0x03, "node name not registered" },
403 	{ 0x04, "class of service not registered" },
404 	{ 0x06, "initial process associator not registered" },
405 	{ 0x07, "FC-4 TYPEs not registered" },
406 	{ 0x08, "symbolic port name not registered" },
407 	{ 0x09, "symbolic node name not registered" },
408 	{ 0x0A, "port type not registered" },
409 	{ 0xF0, "authorization exception" },
410 	{ 0xF1, "authentication exception" },
411 	{ 0xF2, "data base full" },
412 	{ 0xF3, "data base empty" },
413 	{ 0xF4, "processing request" },
414 	{ 0xF5, "unable to verify connection" },
415 	{ 0xF6, "devices not in a common zone" },
416 };
417 
418 /**
419  * ibmvfc_get_ls_explain - Return the FC Explain description text
420  * @status:	FC Explain status
421  *
422  * Returns:
423  *	error string
424  **/
425 static const char *ibmvfc_get_ls_explain(u16 status)
426 {
427 	int i;
428 
429 	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
430 		if (ls_explain[i].fc_explain == status)
431 			return ls_explain[i].name;
432 
433 	return unknown_fc_explain;
434 }
435 
436 /**
437  * ibmvfc_get_gs_explain - Return the FC Explain description text
438  * @status:	FC Explain status
439  *
440  * Returns:
441  *	error string
442  **/
443 static const char *ibmvfc_get_gs_explain(u16 status)
444 {
445 	int i;
446 
447 	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
448 		if (gs_explain[i].fc_explain == status)
449 			return gs_explain[i].name;
450 
451 	return unknown_fc_explain;
452 }
453 
454 static const struct {
455 	enum ibmvfc_fc_type fc_type;
456 	char *name;
457 } fc_type [] = {
458 	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
459 	{ IBMVFC_PORT_REJECT, "port reject" },
460 	{ IBMVFC_LS_REJECT, "ELS reject" },
461 	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
462 	{ IBMVFC_PORT_BUSY, "port busy" },
463 	{ IBMVFC_BASIC_REJECT, "basic reject" },
464 };
465 
466 static const char *unknown_fc_type = "unknown fc type";
467 
468 /**
469  * ibmvfc_get_fc_type - Return the FC Type description text
470  * @status:	FC Type error status
471  *
472  * Returns:
473  *	error string
474  **/
475 static const char *ibmvfc_get_fc_type(u16 status)
476 {
477 	int i;
478 
479 	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
480 		if (fc_type[i].fc_type == status)
481 			return fc_type[i].name;
482 
483 	return unknown_fc_type;
484 }
485 
486 /**
487  * ibmvfc_set_tgt_action - Set the next init action for the target
488  * @tgt:		ibmvfc target struct
489  * @action:		action to perform
490  *
491  * Returns:
492  *	0 if action changed / non-zero if not changed
493  **/
494 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
495 				  enum ibmvfc_target_action action)
496 {
497 	int rc = -EINVAL;
498 
499 	switch (tgt->action) {
500 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
501 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
502 		    action == IBMVFC_TGT_ACTION_DEL_RPORT) {
503 			tgt->action = action;
504 			rc = 0;
505 		}
506 		break;
507 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
508 		if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
509 		    action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
510 			tgt->action = action;
511 			rc = 0;
512 		}
513 		break;
514 	case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
515 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
516 			tgt->action = action;
517 			rc = 0;
518 		}
519 		break;
520 	case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
521 		if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
522 			tgt->action = action;
523 			rc = 0;
524 		}
525 		break;
526 	case IBMVFC_TGT_ACTION_DEL_RPORT:
527 		if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
528 			tgt->action = action;
529 			rc = 0;
530 		}
531 		break;
532 	case IBMVFC_TGT_ACTION_DELETED_RPORT:
533 		break;
534 	default:
535 		tgt->action = action;
536 		rc = 0;
537 		break;
538 	}
539 
540 	if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
541 		tgt->add_rport = 0;
542 
543 	return rc;
544 }
545 
546 /**
547  * ibmvfc_set_host_state - Set the state for the host
548  * @vhost:		ibmvfc host struct
549  * @state:		state to set host to
550  *
551  * Returns:
552  *	0 if state changed / non-zero if not changed
553  **/
554 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
555 				  enum ibmvfc_host_state state)
556 {
557 	int rc = 0;
558 
559 	switch (vhost->state) {
560 	case IBMVFC_HOST_OFFLINE:
561 		rc = -EINVAL;
562 		break;
563 	default:
564 		vhost->state = state;
565 		break;
566 	}
567 
568 	return rc;
569 }
570 
571 /**
572  * ibmvfc_set_host_action - Set the next init action for the host
573  * @vhost:		ibmvfc host struct
574  * @action:		action to perform
575  *
576  **/
577 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
578 				   enum ibmvfc_host_action action)
579 {
580 	switch (action) {
581 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
582 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
583 			vhost->action = action;
584 		break;
585 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
586 		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
587 			vhost->action = action;
588 		break;
589 	case IBMVFC_HOST_ACTION_INIT_WAIT:
590 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
591 			vhost->action = action;
592 		break;
593 	case IBMVFC_HOST_ACTION_QUERY:
594 		switch (vhost->action) {
595 		case IBMVFC_HOST_ACTION_INIT_WAIT:
596 		case IBMVFC_HOST_ACTION_NONE:
597 		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
598 			vhost->action = action;
599 			break;
600 		default:
601 			break;
602 		}
603 		break;
604 	case IBMVFC_HOST_ACTION_TGT_INIT:
605 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
606 			vhost->action = action;
607 		break;
608 	case IBMVFC_HOST_ACTION_REENABLE:
609 	case IBMVFC_HOST_ACTION_RESET:
610 		vhost->action = action;
611 		break;
612 	case IBMVFC_HOST_ACTION_INIT:
613 	case IBMVFC_HOST_ACTION_TGT_DEL:
614 	case IBMVFC_HOST_ACTION_LOGO:
615 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
616 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
617 	case IBMVFC_HOST_ACTION_NONE:
618 	default:
619 		switch (vhost->action) {
620 		case IBMVFC_HOST_ACTION_RESET:
621 		case IBMVFC_HOST_ACTION_REENABLE:
622 			break;
623 		default:
624 			vhost->action = action;
625 			break;
626 		}
627 		break;
628 	}
629 }
630 
631 /**
632  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
633  * @vhost:		ibmvfc host struct
634  *
635  * Return value:
636  *	nothing
637  **/
638 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
639 {
640 	if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
641 	    vhost->state == IBMVFC_ACTIVE) {
642 		if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
643 			scsi_block_requests(vhost->host);
644 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
645 		}
646 	} else
647 		vhost->reinit = 1;
648 
649 	wake_up(&vhost->work_wait_q);
650 }
651 
652 /**
653  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
654  * @tgt:		ibmvfc target struct
655  **/
656 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
657 {
658 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
659 		tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
660 		tgt->init_retries = 0;
661 	}
662 	wake_up(&tgt->vhost->work_wait_q);
663 }
664 
665 /**
666  * ibmvfc_link_down - Handle a link down event from the adapter
667  * @vhost:	ibmvfc host struct
668  * @state:	ibmvfc host state to enter
669  *
670  **/
671 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
672 			     enum ibmvfc_host_state state)
673 {
674 	struct ibmvfc_target *tgt;
675 
676 	ENTER;
677 	scsi_block_requests(vhost->host);
678 	list_for_each_entry(tgt, &vhost->targets, queue)
679 		ibmvfc_del_tgt(tgt);
680 	ibmvfc_set_host_state(vhost, state);
681 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
682 	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
683 	wake_up(&vhost->work_wait_q);
684 	LEAVE;
685 }
686 
687 /**
688  * ibmvfc_init_host - Start host initialization
689  * @vhost:		ibmvfc host struct
690  *
691  * Return value:
692  *	nothing
693  **/
694 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
695 {
696 	struct ibmvfc_target *tgt;
697 
698 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
699 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
700 			dev_err(vhost->dev,
701 				"Host initialization retries exceeded. Taking adapter offline\n");
702 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
703 			return;
704 		}
705 	}
706 
707 	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
708 		memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
709 		vhost->async_crq.cur = 0;
710 
711 		list_for_each_entry(tgt, &vhost->targets, queue) {
712 			if (vhost->client_migrated)
713 				tgt->need_login = 1;
714 			else
715 				ibmvfc_del_tgt(tgt);
716 		}
717 
718 		scsi_block_requests(vhost->host);
719 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
720 		vhost->job_step = ibmvfc_npiv_login;
721 		wake_up(&vhost->work_wait_q);
722 	}
723 }
724 
725 /**
726  * ibmvfc_send_crq - Send a CRQ
727  * @vhost:	ibmvfc host struct
728  * @word1:	the first 64 bits of the data
729  * @word2:	the second 64 bits of the data
730  *
731  * Return value:
732  *	0 on success / other on failure
733  **/
734 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
735 {
736 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
737 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
738 }
739 
740 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
741 			       u64 word2, u64 word3, u64 word4)
742 {
743 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
744 
745 	return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
746 				  word1, word2, word3, word4);
747 }
748 
749 /**
750  * ibmvfc_send_crq_init - Send a CRQ init message
751  * @vhost:	ibmvfc host struct
752  *
753  * Return value:
754  *	0 on success / other on failure
755  **/
756 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
757 {
758 	ibmvfc_dbg(vhost, "Sending CRQ init\n");
759 	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
760 }
761 
762 /**
763  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
764  * @vhost:	ibmvfc host struct
765  *
766  * Return value:
767  *	0 on success / other on failure
768  **/
769 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
770 {
771 	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
772 	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
773 }
774 
775 /**
776  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
777  * @vhost:	ibmvfc host who owns the event pool
778  * @queue:      ibmvfc queue struct
779  * @size:       pool size
780  *
781  * Returns zero on success.
782  **/
783 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
784 				  struct ibmvfc_queue *queue,
785 				  unsigned int size)
786 {
787 	int i;
788 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
789 
790 	ENTER;
791 	if (!size)
792 		return 0;
793 
794 	pool->size = size;
795 	pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
796 	if (!pool->events)
797 		return -ENOMEM;
798 
799 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
800 					      size * sizeof(*pool->iu_storage),
801 					      &pool->iu_token, 0);
802 
803 	if (!pool->iu_storage) {
804 		kfree(pool->events);
805 		return -ENOMEM;
806 	}
807 
808 	INIT_LIST_HEAD(&queue->sent);
809 	INIT_LIST_HEAD(&queue->free);
810 	spin_lock_init(&queue->l_lock);
811 
812 	for (i = 0; i < size; ++i) {
813 		struct ibmvfc_event *evt = &pool->events[i];
814 
815 		/*
816 		 * evt->active states
817 		 *  1 = in flight
818 		 *  0 = being completed
819 		 * -1 = free/freed
820 		 */
821 		atomic_set(&evt->active, -1);
822 		atomic_set(&evt->free, 1);
823 		evt->crq.valid = 0x80;
824 		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
825 		evt->xfer_iu = pool->iu_storage + i;
826 		evt->vhost = vhost;
827 		evt->queue = queue;
828 		evt->ext_list = NULL;
829 		list_add_tail(&evt->queue_list, &queue->free);
830 	}
831 
832 	LEAVE;
833 	return 0;
834 }
835 
836 /**
837  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
838  * @vhost:	ibmvfc host who owns the event pool
839  * @queue:      ibmvfc queue struct
840  *
841  **/
842 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
843 				   struct ibmvfc_queue *queue)
844 {
845 	int i;
846 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
847 
848 	ENTER;
849 	for (i = 0; i < pool->size; ++i) {
850 		list_del(&pool->events[i].queue_list);
851 		BUG_ON(atomic_read(&pool->events[i].free) != 1);
852 		if (pool->events[i].ext_list)
853 			dma_pool_free(vhost->sg_pool,
854 				      pool->events[i].ext_list,
855 				      pool->events[i].ext_list_token);
856 	}
857 
858 	kfree(pool->events);
859 	dma_free_coherent(vhost->dev,
860 			  pool->size * sizeof(*pool->iu_storage),
861 			  pool->iu_storage, pool->iu_token);
862 	LEAVE;
863 }
864 
865 /**
866  * ibmvfc_free_queue - Deallocate queue
867  * @vhost:	ibmvfc host struct
868  * @queue:	ibmvfc queue struct
869  *
870  * Unmaps dma and deallocates page for messages
871  **/
872 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
873 			      struct ibmvfc_queue *queue)
874 {
875 	struct device *dev = vhost->dev;
876 
877 	dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
878 	free_page((unsigned long)queue->msgs.handle);
879 	queue->msgs.handle = NULL;
880 
881 	ibmvfc_free_event_pool(vhost, queue);
882 }
883 
884 /**
885  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
886  * @vhost:	ibmvfc host struct
887  *
888  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
889  * the crq with the hypervisor.
890  **/
891 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
892 {
893 	long rc = 0;
894 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
895 	struct ibmvfc_queue *crq = &vhost->crq;
896 
897 	ibmvfc_dbg(vhost, "Releasing CRQ\n");
898 	free_irq(vdev->irq, vhost);
899 	tasklet_kill(&vhost->tasklet);
900 	do {
901 		if (rc)
902 			msleep(100);
903 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
904 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
905 
906 	vhost->state = IBMVFC_NO_CRQ;
907 	vhost->logged_in = 0;
908 
909 	ibmvfc_free_queue(vhost, crq);
910 }
911 
912 /**
913  * ibmvfc_reenable_crq_queue - reenables the CRQ
914  * @vhost:	ibmvfc host struct
915  *
916  * Return value:
917  *	0 on success / other on failure
918  **/
919 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
920 {
921 	int rc = 0;
922 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
923 	unsigned long flags;
924 
925 	ibmvfc_dereg_sub_crqs(vhost);
926 
927 	/* Re-enable the CRQ */
928 	do {
929 		if (rc)
930 			msleep(100);
931 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
932 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
933 
934 	if (rc)
935 		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
936 
937 	spin_lock_irqsave(vhost->host->host_lock, flags);
938 	spin_lock(vhost->crq.q_lock);
939 	vhost->do_enquiry = 1;
940 	vhost->using_channels = 0;
941 	spin_unlock(vhost->crq.q_lock);
942 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
943 
944 	ibmvfc_reg_sub_crqs(vhost);
945 
946 	return rc;
947 }
948 
949 /**
950  * ibmvfc_reset_crq - resets a crq after a failure
951  * @vhost:	ibmvfc host struct
952  *
953  * Return value:
954  *	0 on success / other on failure
955  **/
956 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
957 {
958 	int rc = 0;
959 	unsigned long flags;
960 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
961 	struct ibmvfc_queue *crq = &vhost->crq;
962 
963 	ibmvfc_dereg_sub_crqs(vhost);
964 
965 	/* Close the CRQ */
966 	do {
967 		if (rc)
968 			msleep(100);
969 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
970 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
971 
972 	spin_lock_irqsave(vhost->host->host_lock, flags);
973 	spin_lock(vhost->crq.q_lock);
974 	vhost->state = IBMVFC_NO_CRQ;
975 	vhost->logged_in = 0;
976 	vhost->do_enquiry = 1;
977 	vhost->using_channels = 0;
978 
979 	/* Clean out the queue */
980 	memset(crq->msgs.crq, 0, PAGE_SIZE);
981 	crq->cur = 0;
982 
983 	/* And re-open it again */
984 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
985 				crq->msg_token, PAGE_SIZE);
986 
987 	if (rc == H_CLOSED)
988 		/* Adapter is good, but other end is not ready */
989 		dev_warn(vhost->dev, "Partner adapter not ready\n");
990 	else if (rc != 0)
991 		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
992 
993 	spin_unlock(vhost->crq.q_lock);
994 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
995 
996 	ibmvfc_reg_sub_crqs(vhost);
997 
998 	return rc;
999 }
1000 
1001 /**
1002  * ibmvfc_valid_event - Determines if event is valid.
1003  * @pool:	event_pool that contains the event
1004  * @evt:	ibmvfc event to be checked for validity
1005  *
1006  * Return value:
1007  *	1 if event is valid / 0 if event is not valid
1008  **/
1009 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
1010 			      struct ibmvfc_event *evt)
1011 {
1012 	int index = evt - pool->events;
1013 	if (index < 0 || index >= pool->size)	/* outside of bounds */
1014 		return 0;
1015 	if (evt != pool->events + index)	/* unaligned */
1016 		return 0;
1017 	return 1;
1018 }
1019 
1020 /**
1021  * ibmvfc_free_event - Free the specified event
1022  * @evt:	ibmvfc_event to be freed
1023  *
1024  **/
1025 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1026 {
1027 	struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1028 	unsigned long flags;
1029 
1030 	BUG_ON(!ibmvfc_valid_event(pool, evt));
1031 	BUG_ON(atomic_inc_return(&evt->free) != 1);
1032 	BUG_ON(atomic_dec_and_test(&evt->active));
1033 
1034 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1035 	list_add_tail(&evt->queue_list, &evt->queue->free);
1036 	if (evt->eh_comp)
1037 		complete(evt->eh_comp);
1038 	spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1039 }
1040 
1041 /**
1042  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1043  * @evt:	ibmvfc event struct
1044  *
1045  * This function does not setup any error status, that must be done
1046  * before this function gets called.
1047  **/
1048 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1049 {
1050 	struct scsi_cmnd *cmnd = evt->cmnd;
1051 
1052 	if (cmnd) {
1053 		scsi_dma_unmap(cmnd);
1054 		scsi_done(cmnd);
1055 	}
1056 
1057 	ibmvfc_free_event(evt);
1058 }
1059 
1060 /**
1061  * ibmvfc_complete_purge - Complete failed command list
1062  * @purge_list:		list head of failed commands
1063  *
1064  * This function runs completions on commands to fail as a result of a
1065  * host reset or platform migration.
1066  **/
1067 static void ibmvfc_complete_purge(struct list_head *purge_list)
1068 {
1069 	struct ibmvfc_event *evt, *pos;
1070 
1071 	list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1072 		list_del(&evt->queue_list);
1073 		ibmvfc_trc_end(evt);
1074 		evt->done(evt);
1075 	}
1076 }
1077 
1078 /**
1079  * ibmvfc_fail_request - Fail request with specified error code
1080  * @evt:		ibmvfc event struct
1081  * @error_code:	error code to fail request with
1082  *
1083  * Return value:
1084  *	none
1085  **/
1086 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1087 {
1088 	/*
1089 	 * Anything we are failing should still be active. Otherwise, it
1090 	 * implies we already got a response for the command and are doing
1091 	 * something bad like double completing it.
1092 	 */
1093 	BUG_ON(!atomic_dec_and_test(&evt->active));
1094 	if (evt->cmnd) {
1095 		evt->cmnd->result = (error_code << 16);
1096 		evt->done = ibmvfc_scsi_eh_done;
1097 	} else
1098 		evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1099 
1100 	del_timer(&evt->timer);
1101 }
1102 
1103 /**
1104  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1105  * @vhost:		ibmvfc host struct
1106  * @error_code:	error code to fail requests with
1107  *
1108  * Return value:
1109  *	none
1110  **/
1111 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1112 {
1113 	struct ibmvfc_event *evt, *pos;
1114 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1115 	unsigned long flags;
1116 	int hwqs = 0;
1117 	int i;
1118 
1119 	if (vhost->using_channels)
1120 		hwqs = vhost->scsi_scrqs.active_queues;
1121 
1122 	ibmvfc_dbg(vhost, "Purging all requests\n");
1123 	spin_lock_irqsave(&vhost->crq.l_lock, flags);
1124 	list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1125 		ibmvfc_fail_request(evt, error_code);
1126 	list_splice_init(&vhost->crq.sent, &vhost->purge);
1127 	spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1128 
1129 	for (i = 0; i < hwqs; i++) {
1130 		spin_lock_irqsave(queues[i].q_lock, flags);
1131 		spin_lock(&queues[i].l_lock);
1132 		list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1133 			ibmvfc_fail_request(evt, error_code);
1134 		list_splice_init(&queues[i].sent, &vhost->purge);
1135 		spin_unlock(&queues[i].l_lock);
1136 		spin_unlock_irqrestore(queues[i].q_lock, flags);
1137 	}
1138 }
1139 
1140 /**
1141  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1142  * @vhost:	struct ibmvfc host to reset
1143  **/
1144 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1145 {
1146 	ibmvfc_purge_requests(vhost, DID_ERROR);
1147 	ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1148 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1149 }
1150 
1151 /**
1152  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1153  * @vhost:	struct ibmvfc host to reset
1154  **/
1155 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1156 {
1157 	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1158 	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1159 		scsi_block_requests(vhost->host);
1160 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1161 		vhost->job_step = ibmvfc_npiv_logout;
1162 		wake_up(&vhost->work_wait_q);
1163 	} else
1164 		ibmvfc_hard_reset_host(vhost);
1165 }
1166 
1167 /**
1168  * ibmvfc_reset_host - Reset the connection to the server
1169  * @vhost:	ibmvfc host struct
1170  **/
1171 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1172 {
1173 	unsigned long flags;
1174 
1175 	spin_lock_irqsave(vhost->host->host_lock, flags);
1176 	__ibmvfc_reset_host(vhost);
1177 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1178 }
1179 
1180 /**
1181  * ibmvfc_retry_host_init - Retry host initialization if allowed
1182  * @vhost:	ibmvfc host struct
1183  *
1184  * Returns: 1 if init will be retried / 0 if not
1185  *
1186  **/
1187 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1188 {
1189 	int retry = 0;
1190 
1191 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1192 		vhost->delay_init = 1;
1193 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1194 			dev_err(vhost->dev,
1195 				"Host initialization retries exceeded. Taking adapter offline\n");
1196 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1197 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1198 			__ibmvfc_reset_host(vhost);
1199 		else {
1200 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1201 			retry = 1;
1202 		}
1203 	}
1204 
1205 	wake_up(&vhost->work_wait_q);
1206 	return retry;
1207 }
1208 
1209 /**
1210  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1211  * @starget:	scsi target struct
1212  *
1213  * Return value:
1214  *	ibmvfc_target struct / NULL if not found
1215  **/
1216 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1217 {
1218 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1219 	struct ibmvfc_host *vhost = shost_priv(shost);
1220 	struct ibmvfc_target *tgt;
1221 
1222 	list_for_each_entry(tgt, &vhost->targets, queue)
1223 		if (tgt->target_id == starget->id) {
1224 			kref_get(&tgt->kref);
1225 			return tgt;
1226 		}
1227 	return NULL;
1228 }
1229 
1230 /**
1231  * ibmvfc_get_target - Find the specified scsi_target
1232  * @starget:	scsi target struct
1233  *
1234  * Return value:
1235  *	ibmvfc_target struct / NULL if not found
1236  **/
1237 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1238 {
1239 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1240 	struct ibmvfc_target *tgt;
1241 	unsigned long flags;
1242 
1243 	spin_lock_irqsave(shost->host_lock, flags);
1244 	tgt = __ibmvfc_get_target(starget);
1245 	spin_unlock_irqrestore(shost->host_lock, flags);
1246 	return tgt;
1247 }
1248 
1249 /**
1250  * ibmvfc_get_host_speed - Get host port speed
1251  * @shost:		scsi host struct
1252  *
1253  * Return value:
1254  * 	none
1255  **/
1256 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1257 {
1258 	struct ibmvfc_host *vhost = shost_priv(shost);
1259 	unsigned long flags;
1260 
1261 	spin_lock_irqsave(shost->host_lock, flags);
1262 	if (vhost->state == IBMVFC_ACTIVE) {
1263 		switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1264 		case 1:
1265 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1266 			break;
1267 		case 2:
1268 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1269 			break;
1270 		case 4:
1271 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1272 			break;
1273 		case 8:
1274 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1275 			break;
1276 		case 10:
1277 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1278 			break;
1279 		case 16:
1280 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1281 			break;
1282 		default:
1283 			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1284 				   be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1285 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1286 			break;
1287 		}
1288 	} else
1289 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1290 	spin_unlock_irqrestore(shost->host_lock, flags);
1291 }
1292 
1293 /**
1294  * ibmvfc_get_host_port_state - Get host port state
1295  * @shost:		scsi host struct
1296  *
1297  * Return value:
1298  * 	none
1299  **/
1300 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1301 {
1302 	struct ibmvfc_host *vhost = shost_priv(shost);
1303 	unsigned long flags;
1304 
1305 	spin_lock_irqsave(shost->host_lock, flags);
1306 	switch (vhost->state) {
1307 	case IBMVFC_INITIALIZING:
1308 	case IBMVFC_ACTIVE:
1309 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1310 		break;
1311 	case IBMVFC_LINK_DOWN:
1312 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1313 		break;
1314 	case IBMVFC_LINK_DEAD:
1315 	case IBMVFC_HOST_OFFLINE:
1316 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1317 		break;
1318 	case IBMVFC_HALTED:
1319 		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1320 		break;
1321 	case IBMVFC_NO_CRQ:
1322 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1323 		break;
1324 	default:
1325 		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1326 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1327 		break;
1328 	}
1329 	spin_unlock_irqrestore(shost->host_lock, flags);
1330 }
1331 
1332 /**
1333  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1334  * @rport:		rport struct
1335  * @timeout:	timeout value
1336  *
1337  * Return value:
1338  * 	none
1339  **/
1340 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1341 {
1342 	if (timeout)
1343 		rport->dev_loss_tmo = timeout;
1344 	else
1345 		rport->dev_loss_tmo = 1;
1346 }
1347 
1348 /**
1349  * ibmvfc_release_tgt - Free memory allocated for a target
1350  * @kref:		kref struct
1351  *
1352  **/
1353 static void ibmvfc_release_tgt(struct kref *kref)
1354 {
1355 	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1356 	kfree(tgt);
1357 }
1358 
1359 /**
1360  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1361  * @starget:	scsi target struct
1362  *
1363  * Return value:
1364  * 	none
1365  **/
1366 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1367 {
1368 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1369 	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1370 	if (tgt)
1371 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1372 }
1373 
1374 /**
1375  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1376  * @starget:	scsi target struct
1377  *
1378  * Return value:
1379  * 	none
1380  **/
1381 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1382 {
1383 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1384 	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1385 	if (tgt)
1386 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1387 }
1388 
1389 /**
1390  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1391  * @starget:	scsi target struct
1392  *
1393  * Return value:
1394  * 	none
1395  **/
1396 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1397 {
1398 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1399 	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1400 	if (tgt)
1401 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1402 }
1403 
1404 /**
1405  * ibmvfc_wait_while_resetting - Wait while the host resets
1406  * @vhost:		ibmvfc host struct
1407  *
1408  * Return value:
1409  * 	0 on success / other on failure
1410  **/
1411 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1412 {
1413 	long timeout = wait_event_timeout(vhost->init_wait_q,
1414 					  ((vhost->state == IBMVFC_ACTIVE ||
1415 					    vhost->state == IBMVFC_HOST_OFFLINE ||
1416 					    vhost->state == IBMVFC_LINK_DEAD) &&
1417 					   vhost->action == IBMVFC_HOST_ACTION_NONE),
1418 					  (init_timeout * HZ));
1419 
1420 	return timeout ? 0 : -EIO;
1421 }
1422 
1423 /**
1424  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1425  * @shost:		scsi host struct
1426  *
1427  * Return value:
1428  * 	0 on success / other on failure
1429  **/
1430 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1431 {
1432 	struct ibmvfc_host *vhost = shost_priv(shost);
1433 
1434 	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1435 	ibmvfc_reset_host(vhost);
1436 	return ibmvfc_wait_while_resetting(vhost);
1437 }
1438 
1439 /**
1440  * ibmvfc_gather_partition_info - Gather info about the LPAR
1441  * @vhost:      ibmvfc host struct
1442  *
1443  * Return value:
1444  *	none
1445  **/
1446 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1447 {
1448 	struct device_node *rootdn;
1449 	const char *name;
1450 	const unsigned int *num;
1451 
1452 	rootdn = of_find_node_by_path("/");
1453 	if (!rootdn)
1454 		return;
1455 
1456 	name = of_get_property(rootdn, "ibm,partition-name", NULL);
1457 	if (name)
1458 		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1459 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
1460 	if (num)
1461 		vhost->partition_number = *num;
1462 	of_node_put(rootdn);
1463 }
1464 
1465 /**
1466  * ibmvfc_set_login_info - Setup info for NPIV login
1467  * @vhost:	ibmvfc host struct
1468  *
1469  * Return value:
1470  *	none
1471  **/
1472 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1473 {
1474 	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1475 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
1476 	struct device_node *of_node = vhost->dev->of_node;
1477 	const char *location;
1478 
1479 	memset(login_info, 0, sizeof(*login_info));
1480 
1481 	login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1482 	login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1483 	login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1484 	login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1485 	login_info->partition_num = cpu_to_be32(vhost->partition_number);
1486 	login_info->vfc_frame_version = cpu_to_be32(1);
1487 	login_info->fcp_version = cpu_to_be16(3);
1488 	login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1489 	if (vhost->client_migrated)
1490 		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1491 
1492 	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1493 	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1494 
1495 	if (vhost->mq_enabled || vhost->using_channels)
1496 		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1497 
1498 	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1499 	login_info->async.len = cpu_to_be32(async_crq->size *
1500 					    sizeof(*async_crq->msgs.async));
1501 	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1502 	strncpy(login_info->device_name,
1503 		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1504 
1505 	location = of_get_property(of_node, "ibm,loc-code", NULL);
1506 	location = location ? location : dev_name(vhost->dev);
1507 	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1508 }
1509 
1510 /**
1511  * ibmvfc_get_event - Gets the next free event in pool
1512  * @queue:      ibmvfc queue struct
1513  *
1514  * Returns a free event from the pool.
1515  **/
1516 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
1517 {
1518 	struct ibmvfc_event *evt;
1519 	unsigned long flags;
1520 
1521 	spin_lock_irqsave(&queue->l_lock, flags);
1522 	BUG_ON(list_empty(&queue->free));
1523 	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1524 	atomic_set(&evt->free, 0);
1525 	list_del(&evt->queue_list);
1526 	spin_unlock_irqrestore(&queue->l_lock, flags);
1527 	return evt;
1528 }
1529 
1530 /**
1531  * ibmvfc_locked_done - Calls evt completion with host_lock held
1532  * @evt:	ibmvfc evt to complete
1533  *
1534  * All non-scsi command completion callbacks have the expectation that the
1535  * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1536  * MAD evt with the host_lock.
1537  **/
1538 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1539 {
1540 	unsigned long flags;
1541 
1542 	spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1543 	evt->_done(evt);
1544 	spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1545 }
1546 
1547 /**
1548  * ibmvfc_init_event - Initialize fields in an event struct that are always
1549  *				required.
1550  * @evt:	The event
1551  * @done:	Routine to call when the event is responded to
1552  * @format:	SRP or MAD format
1553  **/
1554 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1555 			      void (*done) (struct ibmvfc_event *), u8 format)
1556 {
1557 	evt->cmnd = NULL;
1558 	evt->sync_iu = NULL;
1559 	evt->eh_comp = NULL;
1560 	evt->crq.format = format;
1561 	if (format == IBMVFC_CMD_FORMAT)
1562 		evt->done = done;
1563 	else {
1564 		evt->_done = done;
1565 		evt->done = ibmvfc_locked_done;
1566 	}
1567 	evt->hwq = 0;
1568 }
1569 
1570 /**
1571  * ibmvfc_map_sg_list - Initialize scatterlist
1572  * @scmd:	scsi command struct
1573  * @nseg:	number of scatterlist segments
1574  * @md:	memory descriptor list to initialize
1575  **/
1576 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1577 			       struct srp_direct_buf *md)
1578 {
1579 	int i;
1580 	struct scatterlist *sg;
1581 
1582 	scsi_for_each_sg(scmd, sg, nseg, i) {
1583 		md[i].va = cpu_to_be64(sg_dma_address(sg));
1584 		md[i].len = cpu_to_be32(sg_dma_len(sg));
1585 		md[i].key = 0;
1586 	}
1587 }
1588 
1589 /**
1590  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1591  * @scmd:		struct scsi_cmnd with the scatterlist
1592  * @evt:		ibmvfc event struct
1593  * @vfc_cmd:	vfc_cmd that contains the memory descriptor
1594  * @dev:		device for which to map dma memory
1595  *
1596  * Returns:
1597  *	0 on success / non-zero on failure
1598  **/
1599 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1600 			      struct ibmvfc_event *evt,
1601 			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1602 {
1603 
1604 	int sg_mapped;
1605 	struct srp_direct_buf *data = &vfc_cmd->ioba;
1606 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1607 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1608 
1609 	if (cls3_error)
1610 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1611 
1612 	sg_mapped = scsi_dma_map(scmd);
1613 	if (!sg_mapped) {
1614 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1615 		return 0;
1616 	} else if (unlikely(sg_mapped < 0)) {
1617 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1618 			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1619 		return sg_mapped;
1620 	}
1621 
1622 	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1623 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1624 		iu->add_cdb_len |= IBMVFC_WRDATA;
1625 	} else {
1626 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1627 		iu->add_cdb_len |= IBMVFC_RDDATA;
1628 	}
1629 
1630 	if (sg_mapped == 1) {
1631 		ibmvfc_map_sg_list(scmd, sg_mapped, data);
1632 		return 0;
1633 	}
1634 
1635 	vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1636 
1637 	if (!evt->ext_list) {
1638 		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1639 					       &evt->ext_list_token);
1640 
1641 		if (!evt->ext_list) {
1642 			scsi_dma_unmap(scmd);
1643 			if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1644 				scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1645 			return -ENOMEM;
1646 		}
1647 	}
1648 
1649 	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1650 
1651 	data->va = cpu_to_be64(evt->ext_list_token);
1652 	data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1653 	data->key = 0;
1654 	return 0;
1655 }
1656 
1657 /**
1658  * ibmvfc_timeout - Internal command timeout handler
1659  * @t:	struct ibmvfc_event that timed out
1660  *
1661  * Called when an internally generated command times out
1662  **/
1663 static void ibmvfc_timeout(struct timer_list *t)
1664 {
1665 	struct ibmvfc_event *evt = from_timer(evt, t, timer);
1666 	struct ibmvfc_host *vhost = evt->vhost;
1667 	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1668 	ibmvfc_reset_host(vhost);
1669 }
1670 
1671 /**
1672  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1673  * @evt:		event to be sent
1674  * @vhost:		ibmvfc host struct
1675  * @timeout:	timeout in seconds - 0 means do not time command
1676  *
1677  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1678  **/
1679 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1680 			     struct ibmvfc_host *vhost, unsigned long timeout)
1681 {
1682 	__be64 *crq_as_u64 = (__be64 *) &evt->crq;
1683 	unsigned long flags;
1684 	int rc;
1685 
1686 	/* Copy the IU into the transfer area */
1687 	*evt->xfer_iu = evt->iu;
1688 	if (evt->crq.format == IBMVFC_CMD_FORMAT)
1689 		evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1690 	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1691 		evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1692 	else
1693 		BUG();
1694 
1695 	timer_setup(&evt->timer, ibmvfc_timeout, 0);
1696 
1697 	if (timeout) {
1698 		evt->timer.expires = jiffies + (timeout * HZ);
1699 		add_timer(&evt->timer);
1700 	}
1701 
1702 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1703 	list_add_tail(&evt->queue_list, &evt->queue->sent);
1704 	atomic_set(&evt->active, 1);
1705 
1706 	mb();
1707 
1708 	if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1709 		rc = ibmvfc_send_sub_crq(vhost,
1710 					 evt->queue->vios_cookie,
1711 					 be64_to_cpu(crq_as_u64[0]),
1712 					 be64_to_cpu(crq_as_u64[1]),
1713 					 0, 0);
1714 	else
1715 		rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1716 				     be64_to_cpu(crq_as_u64[1]));
1717 
1718 	if (rc) {
1719 		atomic_set(&evt->active, 0);
1720 		list_del(&evt->queue_list);
1721 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1722 		del_timer(&evt->timer);
1723 
1724 		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1725 		 * Firmware will send a CRQ with a transport event (0xFF) to
1726 		 * tell this client what has happened to the transport. This
1727 		 * will be handled in ibmvfc_handle_crq()
1728 		 */
1729 		if (rc == H_CLOSED) {
1730 			if (printk_ratelimit())
1731 				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1732 			if (evt->cmnd)
1733 				scsi_dma_unmap(evt->cmnd);
1734 			ibmvfc_free_event(evt);
1735 			return SCSI_MLQUEUE_HOST_BUSY;
1736 		}
1737 
1738 		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1739 		if (evt->cmnd) {
1740 			evt->cmnd->result = DID_ERROR << 16;
1741 			evt->done = ibmvfc_scsi_eh_done;
1742 		} else
1743 			evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1744 
1745 		evt->done(evt);
1746 	} else {
1747 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1748 		ibmvfc_trc_start(evt);
1749 	}
1750 
1751 	return 0;
1752 }
1753 
1754 /**
1755  * ibmvfc_log_error - Log an error for the failed command if appropriate
1756  * @evt:	ibmvfc event to log
1757  *
1758  **/
1759 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1760 {
1761 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1762 	struct ibmvfc_host *vhost = evt->vhost;
1763 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1764 	struct scsi_cmnd *cmnd = evt->cmnd;
1765 	const char *err = unknown_error;
1766 	int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1767 	int logerr = 0;
1768 	int rsp_code = 0;
1769 
1770 	if (index >= 0) {
1771 		logerr = cmd_status[index].log;
1772 		err = cmd_status[index].name;
1773 	}
1774 
1775 	if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1776 		return;
1777 
1778 	if (rsp->flags & FCP_RSP_LEN_VALID)
1779 		rsp_code = rsp->data.info.rsp_code;
1780 
1781 	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1782 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1783 		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1784 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1785 }
1786 
1787 /**
1788  * ibmvfc_relogin - Log back into the specified device
1789  * @sdev:	scsi device struct
1790  *
1791  **/
1792 static void ibmvfc_relogin(struct scsi_device *sdev)
1793 {
1794 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
1795 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1796 	struct ibmvfc_target *tgt;
1797 	unsigned long flags;
1798 
1799 	spin_lock_irqsave(vhost->host->host_lock, flags);
1800 	list_for_each_entry(tgt, &vhost->targets, queue) {
1801 		if (rport == tgt->rport) {
1802 			ibmvfc_del_tgt(tgt);
1803 			break;
1804 		}
1805 	}
1806 
1807 	ibmvfc_reinit_host(vhost);
1808 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1809 }
1810 
1811 /**
1812  * ibmvfc_scsi_done - Handle responses from commands
1813  * @evt:	ibmvfc event to be handled
1814  *
1815  * Used as a callback when sending scsi cmds.
1816  **/
1817 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1818 {
1819 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1820 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1821 	struct scsi_cmnd *cmnd = evt->cmnd;
1822 	u32 rsp_len = 0;
1823 	u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1824 
1825 	if (cmnd) {
1826 		if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1827 			scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1828 		else if (rsp->flags & FCP_RESID_UNDER)
1829 			scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1830 		else
1831 			scsi_set_resid(cmnd, 0);
1832 
1833 		if (vfc_cmd->status) {
1834 			cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1835 
1836 			if (rsp->flags & FCP_RSP_LEN_VALID)
1837 				rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1838 			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1839 				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1840 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1841 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1842 			if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1843 			    (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1844 				ibmvfc_relogin(cmnd->device);
1845 
1846 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1847 				cmnd->result = (DID_ERROR << 16);
1848 
1849 			ibmvfc_log_error(evt);
1850 		}
1851 
1852 		if (!cmnd->result &&
1853 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1854 			cmnd->result = (DID_ERROR << 16);
1855 
1856 		scsi_dma_unmap(cmnd);
1857 		scsi_done(cmnd);
1858 	}
1859 
1860 	ibmvfc_free_event(evt);
1861 }
1862 
1863 /**
1864  * ibmvfc_host_chkready - Check if the host can accept commands
1865  * @vhost:	 struct ibmvfc host
1866  *
1867  * Returns:
1868  *	1 if host can accept command / 0 if not
1869  **/
1870 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1871 {
1872 	int result = 0;
1873 
1874 	switch (vhost->state) {
1875 	case IBMVFC_LINK_DEAD:
1876 	case IBMVFC_HOST_OFFLINE:
1877 		result = DID_NO_CONNECT << 16;
1878 		break;
1879 	case IBMVFC_NO_CRQ:
1880 	case IBMVFC_INITIALIZING:
1881 	case IBMVFC_HALTED:
1882 	case IBMVFC_LINK_DOWN:
1883 		result = DID_REQUEUE << 16;
1884 		break;
1885 	case IBMVFC_ACTIVE:
1886 		result = 0;
1887 		break;
1888 	}
1889 
1890 	return result;
1891 }
1892 
1893 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1894 {
1895 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1896 	struct ibmvfc_host *vhost = evt->vhost;
1897 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1898 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1899 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1900 	size_t offset;
1901 
1902 	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1903 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1904 		offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1905 		vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1906 	} else
1907 		offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1908 	vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1909 	vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1910 	vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1911 	vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1912 	vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1913 	vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1914 	vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1915 	int_to_scsilun(sdev->lun, &iu->lun);
1916 
1917 	return vfc_cmd;
1918 }
1919 
1920 /**
1921  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1922  * @shost:	scsi host struct
1923  * @cmnd:	struct scsi_cmnd to be executed
1924  *
1925  * Returns:
1926  *	0 on success / other on failure
1927  **/
1928 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1929 {
1930 	struct ibmvfc_host *vhost = shost_priv(shost);
1931 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1932 	struct ibmvfc_cmd *vfc_cmd;
1933 	struct ibmvfc_fcp_cmd_iu *iu;
1934 	struct ibmvfc_event *evt;
1935 	u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
1936 	u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1937 	u16 scsi_channel;
1938 	int rc;
1939 
1940 	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1941 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1942 		cmnd->result = rc;
1943 		scsi_done(cmnd);
1944 		return 0;
1945 	}
1946 
1947 	cmnd->result = (DID_OK << 16);
1948 	if (vhost->using_channels) {
1949 		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1950 		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1951 		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1952 	} else
1953 		evt = ibmvfc_get_event(&vhost->crq);
1954 
1955 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1956 	evt->cmnd = cmnd;
1957 
1958 	vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1959 	iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1960 
1961 	iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1962 	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1963 
1964 	if (cmnd->flags & SCMD_TAGGED) {
1965 		vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
1966 		iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
1967 	}
1968 
1969 	vfc_cmd->correlation = cpu_to_be64((u64)evt);
1970 
1971 	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1972 		return ibmvfc_send_event(evt, vhost, 0);
1973 
1974 	ibmvfc_free_event(evt);
1975 	if (rc == -ENOMEM)
1976 		return SCSI_MLQUEUE_HOST_BUSY;
1977 
1978 	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1979 		scmd_printk(KERN_ERR, cmnd,
1980 			    "Failed to map DMA buffer for command. rc=%d\n", rc);
1981 
1982 	cmnd->result = DID_ERROR << 16;
1983 	scsi_done(cmnd);
1984 	return 0;
1985 }
1986 
1987 /**
1988  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1989  * @evt:	ibmvfc event struct
1990  *
1991  **/
1992 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1993 {
1994 	/* copy the response back */
1995 	if (evt->sync_iu)
1996 		*evt->sync_iu = *evt->xfer_iu;
1997 
1998 	complete(&evt->comp);
1999 }
2000 
2001 /**
2002  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
2003  * @evt:	struct ibmvfc_event
2004  *
2005  **/
2006 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
2007 {
2008 	struct ibmvfc_host *vhost = evt->vhost;
2009 
2010 	ibmvfc_free_event(evt);
2011 	vhost->aborting_passthru = 0;
2012 	dev_info(vhost->dev, "Passthru command cancelled\n");
2013 }
2014 
2015 /**
2016  * ibmvfc_bsg_timeout - Handle a BSG timeout
2017  * @job:	struct bsg_job that timed out
2018  *
2019  * Returns:
2020  *	0 on success / other on failure
2021  **/
2022 static int ibmvfc_bsg_timeout(struct bsg_job *job)
2023 {
2024 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2025 	unsigned long port_id = (unsigned long)job->dd_data;
2026 	struct ibmvfc_event *evt;
2027 	struct ibmvfc_tmf *tmf;
2028 	unsigned long flags;
2029 	int rc;
2030 
2031 	ENTER;
2032 	spin_lock_irqsave(vhost->host->host_lock, flags);
2033 	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2034 		__ibmvfc_reset_host(vhost);
2035 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2036 		return 0;
2037 	}
2038 
2039 	vhost->aborting_passthru = 1;
2040 	evt = ibmvfc_get_event(&vhost->crq);
2041 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2042 
2043 	tmf = &evt->iu.tmf;
2044 	memset(tmf, 0, sizeof(*tmf));
2045 	tmf->common.version = cpu_to_be32(1);
2046 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2047 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2048 	tmf->scsi_id = cpu_to_be64(port_id);
2049 	tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2050 	tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2051 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2052 
2053 	if (rc != 0) {
2054 		vhost->aborting_passthru = 0;
2055 		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2056 		rc = -EIO;
2057 	} else
2058 		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2059 			 port_id);
2060 
2061 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2062 
2063 	LEAVE;
2064 	return rc;
2065 }
2066 
2067 /**
2068  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2069  * @vhost:		struct ibmvfc_host to send command
2070  * @port_id:	port ID to send command
2071  *
2072  * Returns:
2073  *	0 on success / other on failure
2074  **/
2075 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2076 {
2077 	struct ibmvfc_port_login *plogi;
2078 	struct ibmvfc_target *tgt;
2079 	struct ibmvfc_event *evt;
2080 	union ibmvfc_iu rsp_iu;
2081 	unsigned long flags;
2082 	int rc = 0, issue_login = 1;
2083 
2084 	ENTER;
2085 	spin_lock_irqsave(vhost->host->host_lock, flags);
2086 	list_for_each_entry(tgt, &vhost->targets, queue) {
2087 		if (tgt->scsi_id == port_id) {
2088 			issue_login = 0;
2089 			break;
2090 		}
2091 	}
2092 
2093 	if (!issue_login)
2094 		goto unlock_out;
2095 	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2096 		goto unlock_out;
2097 
2098 	evt = ibmvfc_get_event(&vhost->crq);
2099 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2100 	plogi = &evt->iu.plogi;
2101 	memset(plogi, 0, sizeof(*plogi));
2102 	plogi->common.version = cpu_to_be32(1);
2103 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2104 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
2105 	plogi->scsi_id = cpu_to_be64(port_id);
2106 	evt->sync_iu = &rsp_iu;
2107 	init_completion(&evt->comp);
2108 
2109 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2110 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2111 
2112 	if (rc)
2113 		return -EIO;
2114 
2115 	wait_for_completion(&evt->comp);
2116 
2117 	if (rsp_iu.plogi.common.status)
2118 		rc = -EIO;
2119 
2120 	spin_lock_irqsave(vhost->host->host_lock, flags);
2121 	ibmvfc_free_event(evt);
2122 unlock_out:
2123 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2124 	LEAVE;
2125 	return rc;
2126 }
2127 
2128 /**
2129  * ibmvfc_bsg_request - Handle a BSG request
2130  * @job:	struct bsg_job to be executed
2131  *
2132  * Returns:
2133  *	0 on success / other on failure
2134  **/
2135 static int ibmvfc_bsg_request(struct bsg_job *job)
2136 {
2137 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2138 	struct fc_rport *rport = fc_bsg_to_rport(job);
2139 	struct ibmvfc_passthru_mad *mad;
2140 	struct ibmvfc_event *evt;
2141 	union ibmvfc_iu rsp_iu;
2142 	unsigned long flags, port_id = -1;
2143 	struct fc_bsg_request *bsg_request = job->request;
2144 	struct fc_bsg_reply *bsg_reply = job->reply;
2145 	unsigned int code = bsg_request->msgcode;
2146 	int rc = 0, req_seg, rsp_seg, issue_login = 0;
2147 	u32 fc_flags, rsp_len;
2148 
2149 	ENTER;
2150 	bsg_reply->reply_payload_rcv_len = 0;
2151 	if (rport)
2152 		port_id = rport->port_id;
2153 
2154 	switch (code) {
2155 	case FC_BSG_HST_ELS_NOLOGIN:
2156 		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2157 			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
2158 			bsg_request->rqst_data.h_els.port_id[2];
2159 		fallthrough;
2160 	case FC_BSG_RPT_ELS:
2161 		fc_flags = IBMVFC_FC_ELS;
2162 		break;
2163 	case FC_BSG_HST_CT:
2164 		issue_login = 1;
2165 		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2166 			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2167 			bsg_request->rqst_data.h_ct.port_id[2];
2168 		fallthrough;
2169 	case FC_BSG_RPT_CT:
2170 		fc_flags = IBMVFC_FC_CT_IU;
2171 		break;
2172 	default:
2173 		return -ENOTSUPP;
2174 	}
2175 
2176 	if (port_id == -1)
2177 		return -EINVAL;
2178 	if (!mutex_trylock(&vhost->passthru_mutex))
2179 		return -EBUSY;
2180 
2181 	job->dd_data = (void *)port_id;
2182 	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2183 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2184 
2185 	if (!req_seg) {
2186 		mutex_unlock(&vhost->passthru_mutex);
2187 		return -ENOMEM;
2188 	}
2189 
2190 	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2191 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2192 
2193 	if (!rsp_seg) {
2194 		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2195 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2196 		mutex_unlock(&vhost->passthru_mutex);
2197 		return -ENOMEM;
2198 	}
2199 
2200 	if (req_seg > 1 || rsp_seg > 1) {
2201 		rc = -EINVAL;
2202 		goto out;
2203 	}
2204 
2205 	if (issue_login)
2206 		rc = ibmvfc_bsg_plogi(vhost, port_id);
2207 
2208 	spin_lock_irqsave(vhost->host->host_lock, flags);
2209 
2210 	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2211 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2212 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2213 		goto out;
2214 	}
2215 
2216 	evt = ibmvfc_get_event(&vhost->crq);
2217 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2218 	mad = &evt->iu.passthru;
2219 
2220 	memset(mad, 0, sizeof(*mad));
2221 	mad->common.version = cpu_to_be32(1);
2222 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2223 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2224 
2225 	mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2226 		offsetof(struct ibmvfc_passthru_mad, iu));
2227 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2228 
2229 	mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2230 	mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2231 	mad->iu.flags = cpu_to_be32(fc_flags);
2232 	mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2233 
2234 	mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2235 	mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2236 	mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2237 	mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2238 	mad->iu.scsi_id = cpu_to_be64(port_id);
2239 	mad->iu.tag = cpu_to_be64((u64)evt);
2240 	rsp_len = be32_to_cpu(mad->iu.rsp.len);
2241 
2242 	evt->sync_iu = &rsp_iu;
2243 	init_completion(&evt->comp);
2244 	rc = ibmvfc_send_event(evt, vhost, 0);
2245 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2246 
2247 	if (rc) {
2248 		rc = -EIO;
2249 		goto out;
2250 	}
2251 
2252 	wait_for_completion(&evt->comp);
2253 
2254 	if (rsp_iu.passthru.common.status)
2255 		rc = -EIO;
2256 	else
2257 		bsg_reply->reply_payload_rcv_len = rsp_len;
2258 
2259 	spin_lock_irqsave(vhost->host->host_lock, flags);
2260 	ibmvfc_free_event(evt);
2261 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2262 	bsg_reply->result = rc;
2263 	bsg_job_done(job, bsg_reply->result,
2264 		       bsg_reply->reply_payload_rcv_len);
2265 	rc = 0;
2266 out:
2267 	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2268 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2269 	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2270 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2271 	mutex_unlock(&vhost->passthru_mutex);
2272 	LEAVE;
2273 	return rc;
2274 }
2275 
2276 /**
2277  * ibmvfc_reset_device - Reset the device with the specified reset type
2278  * @sdev:	scsi device to reset
2279  * @type:	reset type
2280  * @desc:	reset type description for log messages
2281  *
2282  * Returns:
2283  *	0 on success / other on failure
2284  **/
2285 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2286 {
2287 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2288 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2289 	struct ibmvfc_cmd *tmf;
2290 	struct ibmvfc_event *evt = NULL;
2291 	union ibmvfc_iu rsp_iu;
2292 	struct ibmvfc_fcp_cmd_iu *iu;
2293 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2294 	int rsp_rc = -EBUSY;
2295 	unsigned long flags;
2296 	int rsp_code = 0;
2297 
2298 	spin_lock_irqsave(vhost->host->host_lock, flags);
2299 	if (vhost->state == IBMVFC_ACTIVE) {
2300 		if (vhost->using_channels)
2301 			evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2302 		else
2303 			evt = ibmvfc_get_event(&vhost->crq);
2304 
2305 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2306 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2307 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2308 
2309 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2310 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2311 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2312 		iu->tmf_flags = type;
2313 		evt->sync_iu = &rsp_iu;
2314 
2315 		init_completion(&evt->comp);
2316 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2317 	}
2318 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2319 
2320 	if (rsp_rc != 0) {
2321 		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2322 			    desc, rsp_rc);
2323 		return -EIO;
2324 	}
2325 
2326 	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2327 	wait_for_completion(&evt->comp);
2328 
2329 	if (rsp_iu.cmd.status)
2330 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2331 
2332 	if (rsp_code) {
2333 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2334 			rsp_code = fc_rsp->data.info.rsp_code;
2335 
2336 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2337 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2338 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2339 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2340 			    fc_rsp->scsi_status);
2341 		rsp_rc = -EIO;
2342 	} else
2343 		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2344 
2345 	spin_lock_irqsave(vhost->host->host_lock, flags);
2346 	ibmvfc_free_event(evt);
2347 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2348 	return rsp_rc;
2349 }
2350 
2351 /**
2352  * ibmvfc_match_rport - Match function for specified remote port
2353  * @evt:	ibmvfc event struct
2354  * @rport:	device to match
2355  *
2356  * Returns:
2357  *	1 if event matches rport / 0 if event does not match rport
2358  **/
2359 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2360 {
2361 	struct fc_rport *cmd_rport;
2362 
2363 	if (evt->cmnd) {
2364 		cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2365 		if (cmd_rport == rport)
2366 			return 1;
2367 	}
2368 	return 0;
2369 }
2370 
2371 /**
2372  * ibmvfc_match_target - Match function for specified target
2373  * @evt:	ibmvfc event struct
2374  * @device:	device to match (starget)
2375  *
2376  * Returns:
2377  *	1 if event matches starget / 0 if event does not match starget
2378  **/
2379 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2380 {
2381 	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2382 		return 1;
2383 	return 0;
2384 }
2385 
2386 /**
2387  * ibmvfc_match_lun - Match function for specified LUN
2388  * @evt:	ibmvfc event struct
2389  * @device:	device to match (sdev)
2390  *
2391  * Returns:
2392  *	1 if event matches sdev / 0 if event does not match sdev
2393  **/
2394 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2395 {
2396 	if (evt->cmnd && evt->cmnd->device == device)
2397 		return 1;
2398 	return 0;
2399 }
2400 
2401 /**
2402  * ibmvfc_event_is_free - Check if event is free or not
2403  * @evt:	ibmvfc event struct
2404  *
2405  * Returns:
2406  *	true / false
2407  **/
2408 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2409 {
2410 	struct ibmvfc_event *loop_evt;
2411 
2412 	list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2413 		if (loop_evt == evt)
2414 			return true;
2415 
2416 	return false;
2417 }
2418 
2419 /**
2420  * ibmvfc_wait_for_ops - Wait for ops to complete
2421  * @vhost:	ibmvfc host struct
2422  * @device:	device to match (starget or sdev)
2423  * @match:	match function
2424  *
2425  * Returns:
2426  *	SUCCESS / FAILED
2427  **/
2428 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2429 			       int (*match) (struct ibmvfc_event *, void *))
2430 {
2431 	struct ibmvfc_event *evt;
2432 	DECLARE_COMPLETION_ONSTACK(comp);
2433 	int wait, i, q_index, q_size;
2434 	unsigned long flags;
2435 	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2436 	struct ibmvfc_queue *queues;
2437 
2438 	ENTER;
2439 	if (vhost->mq_enabled && vhost->using_channels) {
2440 		queues = vhost->scsi_scrqs.scrqs;
2441 		q_size = vhost->scsi_scrqs.active_queues;
2442 	} else {
2443 		queues = &vhost->crq;
2444 		q_size = 1;
2445 	}
2446 
2447 	do {
2448 		wait = 0;
2449 		spin_lock_irqsave(vhost->host->host_lock, flags);
2450 		for (q_index = 0; q_index < q_size; q_index++) {
2451 			spin_lock(&queues[q_index].l_lock);
2452 			for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2453 				evt = &queues[q_index].evt_pool.events[i];
2454 				if (!ibmvfc_event_is_free(evt)) {
2455 					if (match(evt, device)) {
2456 						evt->eh_comp = &comp;
2457 						wait++;
2458 					}
2459 				}
2460 			}
2461 			spin_unlock(&queues[q_index].l_lock);
2462 		}
2463 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2464 
2465 		if (wait) {
2466 			timeout = wait_for_completion_timeout(&comp, timeout);
2467 
2468 			if (!timeout) {
2469 				wait = 0;
2470 				spin_lock_irqsave(vhost->host->host_lock, flags);
2471 				for (q_index = 0; q_index < q_size; q_index++) {
2472 					spin_lock(&queues[q_index].l_lock);
2473 					for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2474 						evt = &queues[q_index].evt_pool.events[i];
2475 						if (!ibmvfc_event_is_free(evt)) {
2476 							if (match(evt, device)) {
2477 								evt->eh_comp = NULL;
2478 								wait++;
2479 							}
2480 						}
2481 					}
2482 					spin_unlock(&queues[q_index].l_lock);
2483 				}
2484 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2485 				if (wait)
2486 					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2487 				LEAVE;
2488 				return wait ? FAILED : SUCCESS;
2489 			}
2490 		}
2491 	} while (wait);
2492 
2493 	LEAVE;
2494 	return SUCCESS;
2495 }
2496 
2497 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2498 					    struct scsi_device *sdev,
2499 					    int type)
2500 {
2501 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2502 	struct scsi_target *starget = scsi_target(sdev);
2503 	struct fc_rport *rport = starget_to_rport(starget);
2504 	struct ibmvfc_event *evt;
2505 	struct ibmvfc_tmf *tmf;
2506 
2507 	evt = ibmvfc_get_event(queue);
2508 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2509 
2510 	tmf = &evt->iu.tmf;
2511 	memset(tmf, 0, sizeof(*tmf));
2512 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2513 		tmf->common.version = cpu_to_be32(2);
2514 		tmf->target_wwpn = cpu_to_be64(rport->port_name);
2515 	} else {
2516 		tmf->common.version = cpu_to_be32(1);
2517 	}
2518 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2519 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2520 	tmf->scsi_id = cpu_to_be64(rport->port_id);
2521 	int_to_scsilun(sdev->lun, &tmf->lun);
2522 	if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2523 		type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2524 	if (vhost->state == IBMVFC_ACTIVE)
2525 		tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2526 	else
2527 		tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2528 	tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2529 	tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2530 
2531 	init_completion(&evt->comp);
2532 
2533 	return evt;
2534 }
2535 
2536 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2537 {
2538 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2539 	struct ibmvfc_event *evt, *found_evt, *temp;
2540 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2541 	unsigned long flags;
2542 	int num_hwq, i;
2543 	int fail = 0;
2544 	LIST_HEAD(cancelq);
2545 	u16 status;
2546 
2547 	ENTER;
2548 	spin_lock_irqsave(vhost->host->host_lock, flags);
2549 	num_hwq = vhost->scsi_scrqs.active_queues;
2550 	for (i = 0; i < num_hwq; i++) {
2551 		spin_lock(queues[i].q_lock);
2552 		spin_lock(&queues[i].l_lock);
2553 		found_evt = NULL;
2554 		list_for_each_entry(evt, &queues[i].sent, queue_list) {
2555 			if (evt->cmnd && evt->cmnd->device == sdev) {
2556 				found_evt = evt;
2557 				break;
2558 			}
2559 		}
2560 		spin_unlock(&queues[i].l_lock);
2561 
2562 		if (found_evt && vhost->logged_in) {
2563 			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2564 			evt->sync_iu = &queues[i].cancel_rsp;
2565 			ibmvfc_send_event(evt, vhost, default_timeout);
2566 			list_add_tail(&evt->cancel, &cancelq);
2567 		}
2568 
2569 		spin_unlock(queues[i].q_lock);
2570 	}
2571 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2572 
2573 	if (list_empty(&cancelq)) {
2574 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2575 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2576 		return 0;
2577 	}
2578 
2579 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2580 
2581 	list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2582 		wait_for_completion(&evt->comp);
2583 		status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2584 		list_del(&evt->cancel);
2585 		ibmvfc_free_event(evt);
2586 
2587 		if (status != IBMVFC_MAD_SUCCESS) {
2588 			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2589 			switch (status) {
2590 			case IBMVFC_MAD_DRIVER_FAILED:
2591 			case IBMVFC_MAD_CRQ_ERROR:
2592 			/* Host adapter most likely going through reset, return success to
2593 			 * the caller will wait for the command being cancelled to get returned
2594 			 */
2595 				break;
2596 			default:
2597 				fail = 1;
2598 				break;
2599 			}
2600 		}
2601 	}
2602 
2603 	if (fail)
2604 		return -EIO;
2605 
2606 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2607 	LEAVE;
2608 	return 0;
2609 }
2610 
2611 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2612 {
2613 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2614 	struct ibmvfc_event *evt, *found_evt;
2615 	union ibmvfc_iu rsp;
2616 	int rsp_rc = -EBUSY;
2617 	unsigned long flags;
2618 	u16 status;
2619 
2620 	ENTER;
2621 	found_evt = NULL;
2622 	spin_lock_irqsave(vhost->host->host_lock, flags);
2623 	spin_lock(&vhost->crq.l_lock);
2624 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2625 		if (evt->cmnd && evt->cmnd->device == sdev) {
2626 			found_evt = evt;
2627 			break;
2628 		}
2629 	}
2630 	spin_unlock(&vhost->crq.l_lock);
2631 
2632 	if (!found_evt) {
2633 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2634 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2635 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2636 		return 0;
2637 	}
2638 
2639 	if (vhost->logged_in) {
2640 		evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2641 		evt->sync_iu = &rsp;
2642 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2643 	}
2644 
2645 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2646 
2647 	if (rsp_rc != 0) {
2648 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2649 		/* If failure is received, the host adapter is most likely going
2650 		 through reset, return success so the caller will wait for the command
2651 		 being cancelled to get returned */
2652 		return 0;
2653 	}
2654 
2655 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2656 
2657 	wait_for_completion(&evt->comp);
2658 	status = be16_to_cpu(rsp.mad_common.status);
2659 	spin_lock_irqsave(vhost->host->host_lock, flags);
2660 	ibmvfc_free_event(evt);
2661 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2662 
2663 	if (status != IBMVFC_MAD_SUCCESS) {
2664 		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2665 		switch (status) {
2666 		case IBMVFC_MAD_DRIVER_FAILED:
2667 		case IBMVFC_MAD_CRQ_ERROR:
2668 			/* Host adapter most likely going through reset, return success to
2669 			 the caller will wait for the command being cancelled to get returned */
2670 			return 0;
2671 		default:
2672 			return -EIO;
2673 		};
2674 	}
2675 
2676 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2677 	return 0;
2678 }
2679 
2680 /**
2681  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2682  * @sdev:	scsi device to cancel commands
2683  * @type:	type of error recovery being performed
2684  *
2685  * This sends a cancel to the VIOS for the specified device. This does
2686  * NOT send any abort to the actual device. That must be done separately.
2687  *
2688  * Returns:
2689  *	0 on success / other on failure
2690  **/
2691 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2692 {
2693 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2694 
2695 	if (vhost->mq_enabled && vhost->using_channels)
2696 		return ibmvfc_cancel_all_mq(sdev, type);
2697 	else
2698 		return ibmvfc_cancel_all_sq(sdev, type);
2699 }
2700 
2701 /**
2702  * ibmvfc_match_key - Match function for specified cancel key
2703  * @evt:	ibmvfc event struct
2704  * @key:	cancel key to match
2705  *
2706  * Returns:
2707  *	1 if event matches key / 0 if event does not match key
2708  **/
2709 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2710 {
2711 	unsigned long cancel_key = (unsigned long)key;
2712 
2713 	if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2714 	    be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2715 		return 1;
2716 	return 0;
2717 }
2718 
2719 /**
2720  * ibmvfc_match_evt - Match function for specified event
2721  * @evt:	ibmvfc event struct
2722  * @match:	event to match
2723  *
2724  * Returns:
2725  *	1 if event matches key / 0 if event does not match key
2726  **/
2727 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2728 {
2729 	if (evt == match)
2730 		return 1;
2731 	return 0;
2732 }
2733 
2734 /**
2735  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2736  * @sdev:	scsi device to abort commands
2737  *
2738  * This sends an Abort Task Set to the VIOS for the specified device. This does
2739  * NOT send any cancel to the VIOS. That must be done separately.
2740  *
2741  * Returns:
2742  *	0 on success / other on failure
2743  **/
2744 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2745 {
2746 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2747 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2748 	struct ibmvfc_cmd *tmf;
2749 	struct ibmvfc_event *evt, *found_evt;
2750 	union ibmvfc_iu rsp_iu;
2751 	struct ibmvfc_fcp_cmd_iu *iu;
2752 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2753 	int rc, rsp_rc = -EBUSY;
2754 	unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2755 	int rsp_code = 0;
2756 
2757 	found_evt = NULL;
2758 	spin_lock_irqsave(vhost->host->host_lock, flags);
2759 	spin_lock(&vhost->crq.l_lock);
2760 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2761 		if (evt->cmnd && evt->cmnd->device == sdev) {
2762 			found_evt = evt;
2763 			break;
2764 		}
2765 	}
2766 	spin_unlock(&vhost->crq.l_lock);
2767 
2768 	if (!found_evt) {
2769 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2770 			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2771 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2772 		return 0;
2773 	}
2774 
2775 	if (vhost->state == IBMVFC_ACTIVE) {
2776 		evt = ibmvfc_get_event(&vhost->crq);
2777 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2778 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2779 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2780 
2781 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2782 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2783 		iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2784 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2785 		evt->sync_iu = &rsp_iu;
2786 
2787 		tmf->correlation = cpu_to_be64((u64)evt);
2788 
2789 		init_completion(&evt->comp);
2790 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2791 	}
2792 
2793 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2794 
2795 	if (rsp_rc != 0) {
2796 		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2797 		return -EIO;
2798 	}
2799 
2800 	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2801 	timeout = wait_for_completion_timeout(&evt->comp, timeout);
2802 
2803 	if (!timeout) {
2804 		rc = ibmvfc_cancel_all(sdev, 0);
2805 		if (!rc) {
2806 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2807 			if (rc == SUCCESS)
2808 				rc = 0;
2809 		}
2810 
2811 		if (rc) {
2812 			sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2813 			ibmvfc_reset_host(vhost);
2814 			rsp_rc = -EIO;
2815 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2816 
2817 			if (rc == SUCCESS)
2818 				rsp_rc = 0;
2819 
2820 			rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2821 			if (rc != SUCCESS) {
2822 				spin_lock_irqsave(vhost->host->host_lock, flags);
2823 				ibmvfc_hard_reset_host(vhost);
2824 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2825 				rsp_rc = 0;
2826 			}
2827 
2828 			goto out;
2829 		}
2830 	}
2831 
2832 	if (rsp_iu.cmd.status)
2833 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2834 
2835 	if (rsp_code) {
2836 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2837 			rsp_code = fc_rsp->data.info.rsp_code;
2838 
2839 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2840 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2841 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2842 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2843 			    fc_rsp->scsi_status);
2844 		rsp_rc = -EIO;
2845 	} else
2846 		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2847 
2848 out:
2849 	spin_lock_irqsave(vhost->host->host_lock, flags);
2850 	ibmvfc_free_event(evt);
2851 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2852 	return rsp_rc;
2853 }
2854 
2855 /**
2856  * ibmvfc_eh_abort_handler - Abort a command
2857  * @cmd:	scsi command to abort
2858  *
2859  * Returns:
2860  *	SUCCESS / FAST_IO_FAIL / FAILED
2861  **/
2862 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2863 {
2864 	struct scsi_device *sdev = cmd->device;
2865 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2866 	int cancel_rc, block_rc;
2867 	int rc = FAILED;
2868 
2869 	ENTER;
2870 	block_rc = fc_block_scsi_eh(cmd);
2871 	ibmvfc_wait_while_resetting(vhost);
2872 	if (block_rc != FAST_IO_FAIL) {
2873 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2874 		ibmvfc_abort_task_set(sdev);
2875 	} else
2876 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2877 
2878 	if (!cancel_rc)
2879 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2880 
2881 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2882 		rc = FAST_IO_FAIL;
2883 
2884 	LEAVE;
2885 	return rc;
2886 }
2887 
2888 /**
2889  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2890  * @cmd:	scsi command struct
2891  *
2892  * Returns:
2893  *	SUCCESS / FAST_IO_FAIL / FAILED
2894  **/
2895 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2896 {
2897 	struct scsi_device *sdev = cmd->device;
2898 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2899 	int cancel_rc, block_rc, reset_rc = 0;
2900 	int rc = FAILED;
2901 
2902 	ENTER;
2903 	block_rc = fc_block_scsi_eh(cmd);
2904 	ibmvfc_wait_while_resetting(vhost);
2905 	if (block_rc != FAST_IO_FAIL) {
2906 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2907 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2908 	} else
2909 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2910 
2911 	if (!cancel_rc && !reset_rc)
2912 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2913 
2914 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2915 		rc = FAST_IO_FAIL;
2916 
2917 	LEAVE;
2918 	return rc;
2919 }
2920 
2921 /**
2922  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2923  * @sdev:	scsi device struct
2924  * @data:	return code
2925  *
2926  **/
2927 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2928 {
2929 	unsigned long *rc = data;
2930 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2931 }
2932 
2933 /**
2934  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2935  * @sdev:	scsi device struct
2936  * @data:	return code
2937  *
2938  **/
2939 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2940 {
2941 	unsigned long *rc = data;
2942 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2943 }
2944 
2945 /**
2946  * ibmvfc_eh_target_reset_handler - Reset the target
2947  * @cmd:	scsi command struct
2948  *
2949  * Returns:
2950  *	SUCCESS / FAST_IO_FAIL / FAILED
2951  **/
2952 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2953 {
2954 	struct scsi_device *sdev = cmd->device;
2955 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2956 	struct scsi_target *starget = scsi_target(sdev);
2957 	int block_rc;
2958 	int reset_rc = 0;
2959 	int rc = FAILED;
2960 	unsigned long cancel_rc = 0;
2961 
2962 	ENTER;
2963 	block_rc = fc_block_scsi_eh(cmd);
2964 	ibmvfc_wait_while_resetting(vhost);
2965 	if (block_rc != FAST_IO_FAIL) {
2966 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2967 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2968 	} else
2969 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2970 
2971 	if (!cancel_rc && !reset_rc)
2972 		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2973 
2974 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2975 		rc = FAST_IO_FAIL;
2976 
2977 	LEAVE;
2978 	return rc;
2979 }
2980 
2981 /**
2982  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2983  * @cmd:	struct scsi_cmnd having problems
2984  *
2985  **/
2986 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2987 {
2988 	int rc;
2989 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2990 
2991 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2992 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
2993 
2994 	return rc ? FAILED : SUCCESS;
2995 }
2996 
2997 /**
2998  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2999  * @rport:		rport struct
3000  *
3001  * Return value:
3002  * 	none
3003  **/
3004 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
3005 {
3006 	struct Scsi_Host *shost = rport_to_shost(rport);
3007 	struct ibmvfc_host *vhost = shost_priv(shost);
3008 	struct fc_rport *dev_rport;
3009 	struct scsi_device *sdev;
3010 	struct ibmvfc_target *tgt;
3011 	unsigned long rc, flags;
3012 	unsigned int found;
3013 
3014 	ENTER;
3015 	shost_for_each_device(sdev, shost) {
3016 		dev_rport = starget_to_rport(scsi_target(sdev));
3017 		if (dev_rport != rport)
3018 			continue;
3019 		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
3020 	}
3021 
3022 	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
3023 
3024 	if (rc == FAILED)
3025 		ibmvfc_issue_fc_host_lip(shost);
3026 
3027 	spin_lock_irqsave(shost->host_lock, flags);
3028 	found = 0;
3029 	list_for_each_entry(tgt, &vhost->targets, queue) {
3030 		if (tgt->scsi_id == rport->port_id) {
3031 			found++;
3032 			break;
3033 		}
3034 	}
3035 
3036 	if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3037 		/*
3038 		 * If we get here, that means we previously attempted to send
3039 		 * an implicit logout to the target but it failed, most likely
3040 		 * due to I/O being pending, so we need to send it again
3041 		 */
3042 		ibmvfc_del_tgt(tgt);
3043 		ibmvfc_reinit_host(vhost);
3044 	}
3045 
3046 	spin_unlock_irqrestore(shost->host_lock, flags);
3047 	LEAVE;
3048 }
3049 
3050 static const struct ibmvfc_async_desc ae_desc [] = {
3051 	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3052 	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3053 	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3054 	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3055 	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3056 	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
3057 	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
3058 	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
3059 	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
3060 	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
3061 	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
3062 	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
3063 	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3064 };
3065 
3066 static const struct ibmvfc_async_desc unknown_ae = {
3067 	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3068 };
3069 
3070 /**
3071  * ibmvfc_get_ae_desc - Get text description for async event
3072  * @ae:	async event
3073  *
3074  **/
3075 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3076 {
3077 	int i;
3078 
3079 	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3080 		if (ae_desc[i].ae == ae)
3081 			return &ae_desc[i];
3082 
3083 	return &unknown_ae;
3084 }
3085 
3086 static const struct {
3087 	enum ibmvfc_ae_link_state state;
3088 	const char *desc;
3089 } link_desc [] = {
3090 	{ IBMVFC_AE_LS_LINK_UP,		" link up" },
3091 	{ IBMVFC_AE_LS_LINK_BOUNCED,	" link bounced" },
3092 	{ IBMVFC_AE_LS_LINK_DOWN,	" link down" },
3093 	{ IBMVFC_AE_LS_LINK_DEAD,	" link dead" },
3094 };
3095 
3096 /**
3097  * ibmvfc_get_link_state - Get text description for link state
3098  * @state:	link state
3099  *
3100  **/
3101 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3102 {
3103 	int i;
3104 
3105 	for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3106 		if (link_desc[i].state == state)
3107 			return link_desc[i].desc;
3108 
3109 	return "";
3110 }
3111 
3112 /**
3113  * ibmvfc_handle_async - Handle an async event from the adapter
3114  * @crq:	crq to process
3115  * @vhost:	ibmvfc host struct
3116  *
3117  **/
3118 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3119 				struct ibmvfc_host *vhost)
3120 {
3121 	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3122 	struct ibmvfc_target *tgt;
3123 
3124 	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3125 		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3126 		   be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3127 		   ibmvfc_get_link_state(crq->link_state));
3128 
3129 	switch (be64_to_cpu(crq->event)) {
3130 	case IBMVFC_AE_RESUME:
3131 		switch (crq->link_state) {
3132 		case IBMVFC_AE_LS_LINK_DOWN:
3133 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3134 			break;
3135 		case IBMVFC_AE_LS_LINK_DEAD:
3136 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3137 			break;
3138 		case IBMVFC_AE_LS_LINK_UP:
3139 		case IBMVFC_AE_LS_LINK_BOUNCED:
3140 		default:
3141 			vhost->events_to_log |= IBMVFC_AE_LINKUP;
3142 			vhost->delay_init = 1;
3143 			__ibmvfc_reset_host(vhost);
3144 			break;
3145 		}
3146 
3147 		break;
3148 	case IBMVFC_AE_LINK_UP:
3149 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
3150 		vhost->delay_init = 1;
3151 		__ibmvfc_reset_host(vhost);
3152 		break;
3153 	case IBMVFC_AE_SCN_FABRIC:
3154 	case IBMVFC_AE_SCN_DOMAIN:
3155 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3156 		if (vhost->state < IBMVFC_HALTED) {
3157 			vhost->delay_init = 1;
3158 			__ibmvfc_reset_host(vhost);
3159 		}
3160 		break;
3161 	case IBMVFC_AE_SCN_NPORT:
3162 	case IBMVFC_AE_SCN_GROUP:
3163 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3164 		ibmvfc_reinit_host(vhost);
3165 		break;
3166 	case IBMVFC_AE_ELS_LOGO:
3167 	case IBMVFC_AE_ELS_PRLO:
3168 	case IBMVFC_AE_ELS_PLOGI:
3169 		list_for_each_entry(tgt, &vhost->targets, queue) {
3170 			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3171 				break;
3172 			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3173 				continue;
3174 			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3175 				continue;
3176 			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3177 				continue;
3178 			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3179 				tgt->logo_rcvd = 1;
3180 			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3181 				ibmvfc_del_tgt(tgt);
3182 				ibmvfc_reinit_host(vhost);
3183 			}
3184 		}
3185 		break;
3186 	case IBMVFC_AE_LINK_DOWN:
3187 	case IBMVFC_AE_ADAPTER_FAILED:
3188 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3189 		break;
3190 	case IBMVFC_AE_LINK_DEAD:
3191 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3192 		break;
3193 	case IBMVFC_AE_HALT:
3194 		ibmvfc_link_down(vhost, IBMVFC_HALTED);
3195 		break;
3196 	default:
3197 		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3198 		break;
3199 	}
3200 }
3201 
3202 /**
3203  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3204  * @crq:	Command/Response queue
3205  * @vhost:	ibmvfc host struct
3206  * @evt_doneq:	Event done queue
3207  *
3208 **/
3209 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3210 			      struct list_head *evt_doneq)
3211 {
3212 	long rc;
3213 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3214 
3215 	switch (crq->valid) {
3216 	case IBMVFC_CRQ_INIT_RSP:
3217 		switch (crq->format) {
3218 		case IBMVFC_CRQ_INIT:
3219 			dev_info(vhost->dev, "Partner initialized\n");
3220 			/* Send back a response */
3221 			rc = ibmvfc_send_crq_init_complete(vhost);
3222 			if (rc == 0)
3223 				ibmvfc_init_host(vhost);
3224 			else
3225 				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3226 			break;
3227 		case IBMVFC_CRQ_INIT_COMPLETE:
3228 			dev_info(vhost->dev, "Partner initialization complete\n");
3229 			ibmvfc_init_host(vhost);
3230 			break;
3231 		default:
3232 			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3233 		}
3234 		return;
3235 	case IBMVFC_CRQ_XPORT_EVENT:
3236 		vhost->state = IBMVFC_NO_CRQ;
3237 		vhost->logged_in = 0;
3238 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3239 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3240 			/* We need to re-setup the interpartition connection */
3241 			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3242 			vhost->client_migrated = 1;
3243 
3244 			scsi_block_requests(vhost->host);
3245 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
3246 			ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
3247 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3248 			wake_up(&vhost->work_wait_q);
3249 		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3250 			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3251 			ibmvfc_purge_requests(vhost, DID_ERROR);
3252 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3253 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3254 		} else {
3255 			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3256 		}
3257 		return;
3258 	case IBMVFC_CRQ_CMD_RSP:
3259 		break;
3260 	default:
3261 		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3262 		return;
3263 	}
3264 
3265 	if (crq->format == IBMVFC_ASYNC_EVENT)
3266 		return;
3267 
3268 	/* The only kind of payload CRQs we should get are responses to
3269 	 * things we send. Make sure this response is to something we
3270 	 * actually sent
3271 	 */
3272 	if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3273 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3274 			crq->ioba);
3275 		return;
3276 	}
3277 
3278 	if (unlikely(atomic_dec_if_positive(&evt->active))) {
3279 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3280 			crq->ioba);
3281 		return;
3282 	}
3283 
3284 	spin_lock(&evt->queue->l_lock);
3285 	list_move_tail(&evt->queue_list, evt_doneq);
3286 	spin_unlock(&evt->queue->l_lock);
3287 }
3288 
3289 /**
3290  * ibmvfc_scan_finished - Check if the device scan is done.
3291  * @shost:	scsi host struct
3292  * @time:	current elapsed time
3293  *
3294  * Returns:
3295  *	0 if scan is not done / 1 if scan is done
3296  **/
3297 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3298 {
3299 	unsigned long flags;
3300 	struct ibmvfc_host *vhost = shost_priv(shost);
3301 	int done = 0;
3302 
3303 	spin_lock_irqsave(shost->host_lock, flags);
3304 	if (!vhost->scan_timeout)
3305 		done = 1;
3306 	else if (time >= (vhost->scan_timeout * HZ)) {
3307 		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3308 			 "continuing initialization\n", vhost->scan_timeout);
3309 		done = 1;
3310 	}
3311 
3312 	if (vhost->scan_complete) {
3313 		vhost->scan_timeout = init_timeout;
3314 		done = 1;
3315 	}
3316 	spin_unlock_irqrestore(shost->host_lock, flags);
3317 	return done;
3318 }
3319 
3320 /**
3321  * ibmvfc_slave_alloc - Setup the device's task set value
3322  * @sdev:	struct scsi_device device to configure
3323  *
3324  * Set the device's task set value so that error handling works as
3325  * expected.
3326  *
3327  * Returns:
3328  *	0 on success / -ENXIO if device does not exist
3329  **/
3330 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3331 {
3332 	struct Scsi_Host *shost = sdev->host;
3333 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3334 	struct ibmvfc_host *vhost = shost_priv(shost);
3335 	unsigned long flags = 0;
3336 
3337 	if (!rport || fc_remote_port_chkready(rport))
3338 		return -ENXIO;
3339 
3340 	spin_lock_irqsave(shost->host_lock, flags);
3341 	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3342 	spin_unlock_irqrestore(shost->host_lock, flags);
3343 	return 0;
3344 }
3345 
3346 /**
3347  * ibmvfc_target_alloc - Setup the target's task set value
3348  * @starget:	struct scsi_target
3349  *
3350  * Set the target's task set value so that error handling works as
3351  * expected.
3352  *
3353  * Returns:
3354  *	0 on success / -ENXIO if device does not exist
3355  **/
3356 static int ibmvfc_target_alloc(struct scsi_target *starget)
3357 {
3358 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3359 	struct ibmvfc_host *vhost = shost_priv(shost);
3360 	unsigned long flags = 0;
3361 
3362 	spin_lock_irqsave(shost->host_lock, flags);
3363 	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3364 	spin_unlock_irqrestore(shost->host_lock, flags);
3365 	return 0;
3366 }
3367 
3368 /**
3369  * ibmvfc_slave_configure - Configure the device
3370  * @sdev:	struct scsi_device device to configure
3371  *
3372  * Enable allow_restart for a device if it is a disk. Adjust the
3373  * queue_depth here also.
3374  *
3375  * Returns:
3376  *	0
3377  **/
3378 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3379 {
3380 	struct Scsi_Host *shost = sdev->host;
3381 	unsigned long flags = 0;
3382 
3383 	spin_lock_irqsave(shost->host_lock, flags);
3384 	if (sdev->type == TYPE_DISK) {
3385 		sdev->allow_restart = 1;
3386 		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3387 	}
3388 	spin_unlock_irqrestore(shost->host_lock, flags);
3389 	return 0;
3390 }
3391 
3392 /**
3393  * ibmvfc_change_queue_depth - Change the device's queue depth
3394  * @sdev:	scsi device struct
3395  * @qdepth:	depth to set
3396  *
3397  * Return value:
3398  * 	actual depth set
3399  **/
3400 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3401 {
3402 	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3403 		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3404 
3405 	return scsi_change_queue_depth(sdev, qdepth);
3406 }
3407 
3408 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3409 						 struct device_attribute *attr, char *buf)
3410 {
3411 	struct Scsi_Host *shost = class_to_shost(dev);
3412 	struct ibmvfc_host *vhost = shost_priv(shost);
3413 
3414 	return snprintf(buf, PAGE_SIZE, "%s\n",
3415 			vhost->login_buf->resp.partition_name);
3416 }
3417 
3418 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3419 					    struct device_attribute *attr, char *buf)
3420 {
3421 	struct Scsi_Host *shost = class_to_shost(dev);
3422 	struct ibmvfc_host *vhost = shost_priv(shost);
3423 
3424 	return snprintf(buf, PAGE_SIZE, "%s\n",
3425 			vhost->login_buf->resp.device_name);
3426 }
3427 
3428 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3429 					 struct device_attribute *attr, char *buf)
3430 {
3431 	struct Scsi_Host *shost = class_to_shost(dev);
3432 	struct ibmvfc_host *vhost = shost_priv(shost);
3433 
3434 	return snprintf(buf, PAGE_SIZE, "%s\n",
3435 			vhost->login_buf->resp.port_loc_code);
3436 }
3437 
3438 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3439 					 struct device_attribute *attr, char *buf)
3440 {
3441 	struct Scsi_Host *shost = class_to_shost(dev);
3442 	struct ibmvfc_host *vhost = shost_priv(shost);
3443 
3444 	return snprintf(buf, PAGE_SIZE, "%s\n",
3445 			vhost->login_buf->resp.drc_name);
3446 }
3447 
3448 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3449 					     struct device_attribute *attr, char *buf)
3450 {
3451 	struct Scsi_Host *shost = class_to_shost(dev);
3452 	struct ibmvfc_host *vhost = shost_priv(shost);
3453 	return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3454 }
3455 
3456 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3457 					     struct device_attribute *attr, char *buf)
3458 {
3459 	struct Scsi_Host *shost = class_to_shost(dev);
3460 	struct ibmvfc_host *vhost = shost_priv(shost);
3461 	return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3462 }
3463 
3464 /**
3465  * ibmvfc_show_log_level - Show the adapter's error logging level
3466  * @dev:	class device struct
3467  * @attr:	unused
3468  * @buf:	buffer
3469  *
3470  * Return value:
3471  * 	number of bytes printed to buffer
3472  **/
3473 static ssize_t ibmvfc_show_log_level(struct device *dev,
3474 				     struct device_attribute *attr, char *buf)
3475 {
3476 	struct Scsi_Host *shost = class_to_shost(dev);
3477 	struct ibmvfc_host *vhost = shost_priv(shost);
3478 	unsigned long flags = 0;
3479 	int len;
3480 
3481 	spin_lock_irqsave(shost->host_lock, flags);
3482 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3483 	spin_unlock_irqrestore(shost->host_lock, flags);
3484 	return len;
3485 }
3486 
3487 /**
3488  * ibmvfc_store_log_level - Change the adapter's error logging level
3489  * @dev:	class device struct
3490  * @attr:	unused
3491  * @buf:	buffer
3492  * @count:      buffer size
3493  *
3494  * Return value:
3495  * 	number of bytes printed to buffer
3496  **/
3497 static ssize_t ibmvfc_store_log_level(struct device *dev,
3498 				      struct device_attribute *attr,
3499 				      const char *buf, size_t count)
3500 {
3501 	struct Scsi_Host *shost = class_to_shost(dev);
3502 	struct ibmvfc_host *vhost = shost_priv(shost);
3503 	unsigned long flags = 0;
3504 
3505 	spin_lock_irqsave(shost->host_lock, flags);
3506 	vhost->log_level = simple_strtoul(buf, NULL, 10);
3507 	spin_unlock_irqrestore(shost->host_lock, flags);
3508 	return strlen(buf);
3509 }
3510 
3511 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3512 					 struct device_attribute *attr, char *buf)
3513 {
3514 	struct Scsi_Host *shost = class_to_shost(dev);
3515 	struct ibmvfc_host *vhost = shost_priv(shost);
3516 	unsigned long flags = 0;
3517 	int len;
3518 
3519 	spin_lock_irqsave(shost->host_lock, flags);
3520 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
3521 	spin_unlock_irqrestore(shost->host_lock, flags);
3522 	return len;
3523 }
3524 
3525 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3526 					 struct device_attribute *attr,
3527 					 const char *buf, size_t count)
3528 {
3529 	struct Scsi_Host *shost = class_to_shost(dev);
3530 	struct ibmvfc_host *vhost = shost_priv(shost);
3531 	unsigned long flags = 0;
3532 	unsigned int channels;
3533 
3534 	spin_lock_irqsave(shost->host_lock, flags);
3535 	channels = simple_strtoul(buf, NULL, 10);
3536 	vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
3537 	ibmvfc_hard_reset_host(vhost);
3538 	spin_unlock_irqrestore(shost->host_lock, flags);
3539 	return strlen(buf);
3540 }
3541 
3542 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3543 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3544 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3545 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3546 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3547 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3548 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3549 		   ibmvfc_show_log_level, ibmvfc_store_log_level);
3550 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3551 		   ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3552 
3553 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3554 /**
3555  * ibmvfc_read_trace - Dump the adapter trace
3556  * @filp:		open sysfs file
3557  * @kobj:		kobject struct
3558  * @bin_attr:	bin_attribute struct
3559  * @buf:		buffer
3560  * @off:		offset
3561  * @count:		buffer size
3562  *
3563  * Return value:
3564  *	number of bytes printed to buffer
3565  **/
3566 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3567 				 struct bin_attribute *bin_attr,
3568 				 char *buf, loff_t off, size_t count)
3569 {
3570 	struct device *dev = kobj_to_dev(kobj);
3571 	struct Scsi_Host *shost = class_to_shost(dev);
3572 	struct ibmvfc_host *vhost = shost_priv(shost);
3573 	unsigned long flags = 0;
3574 	int size = IBMVFC_TRACE_SIZE;
3575 	char *src = (char *)vhost->trace;
3576 
3577 	if (off > size)
3578 		return 0;
3579 	if (off + count > size) {
3580 		size -= off;
3581 		count = size;
3582 	}
3583 
3584 	spin_lock_irqsave(shost->host_lock, flags);
3585 	memcpy(buf, &src[off], count);
3586 	spin_unlock_irqrestore(shost->host_lock, flags);
3587 	return count;
3588 }
3589 
3590 static struct bin_attribute ibmvfc_trace_attr = {
3591 	.attr =	{
3592 		.name = "trace",
3593 		.mode = S_IRUGO,
3594 	},
3595 	.size = 0,
3596 	.read = ibmvfc_read_trace,
3597 };
3598 #endif
3599 
3600 static struct attribute *ibmvfc_host_attrs[] = {
3601 	&dev_attr_partition_name.attr,
3602 	&dev_attr_device_name.attr,
3603 	&dev_attr_port_loc_code.attr,
3604 	&dev_attr_drc_name.attr,
3605 	&dev_attr_npiv_version.attr,
3606 	&dev_attr_capabilities.attr,
3607 	&dev_attr_log_level.attr,
3608 	&dev_attr_nr_scsi_channels.attr,
3609 	NULL
3610 };
3611 
3612 ATTRIBUTE_GROUPS(ibmvfc_host);
3613 
3614 static const struct scsi_host_template driver_template = {
3615 	.module = THIS_MODULE,
3616 	.name = "IBM POWER Virtual FC Adapter",
3617 	.proc_name = IBMVFC_NAME,
3618 	.queuecommand = ibmvfc_queuecommand,
3619 	.eh_timed_out = fc_eh_timed_out,
3620 	.eh_abort_handler = ibmvfc_eh_abort_handler,
3621 	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3622 	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3623 	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3624 	.slave_alloc = ibmvfc_slave_alloc,
3625 	.slave_configure = ibmvfc_slave_configure,
3626 	.target_alloc = ibmvfc_target_alloc,
3627 	.scan_finished = ibmvfc_scan_finished,
3628 	.change_queue_depth = ibmvfc_change_queue_depth,
3629 	.cmd_per_lun = 16,
3630 	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3631 	.this_id = -1,
3632 	.sg_tablesize = SG_ALL,
3633 	.max_sectors = IBMVFC_MAX_SECTORS,
3634 	.shost_groups = ibmvfc_host_groups,
3635 	.track_queue_depth = 1,
3636 	.host_tagset = 1,
3637 };
3638 
3639 /**
3640  * ibmvfc_next_async_crq - Returns the next entry in async queue
3641  * @vhost:	ibmvfc host struct
3642  *
3643  * Returns:
3644  *	Pointer to next entry in queue / NULL if empty
3645  **/
3646 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3647 {
3648 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
3649 	struct ibmvfc_async_crq *crq;
3650 
3651 	crq = &async_crq->msgs.async[async_crq->cur];
3652 	if (crq->valid & 0x80) {
3653 		if (++async_crq->cur == async_crq->size)
3654 			async_crq->cur = 0;
3655 		rmb();
3656 	} else
3657 		crq = NULL;
3658 
3659 	return crq;
3660 }
3661 
3662 /**
3663  * ibmvfc_next_crq - Returns the next entry in message queue
3664  * @vhost:	ibmvfc host struct
3665  *
3666  * Returns:
3667  *	Pointer to next entry in queue / NULL if empty
3668  **/
3669 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3670 {
3671 	struct ibmvfc_queue *queue = &vhost->crq;
3672 	struct ibmvfc_crq *crq;
3673 
3674 	crq = &queue->msgs.crq[queue->cur];
3675 	if (crq->valid & 0x80) {
3676 		if (++queue->cur == queue->size)
3677 			queue->cur = 0;
3678 		rmb();
3679 	} else
3680 		crq = NULL;
3681 
3682 	return crq;
3683 }
3684 
3685 /**
3686  * ibmvfc_interrupt - Interrupt handler
3687  * @irq:		number of irq to handle, not used
3688  * @dev_instance: ibmvfc_host that received interrupt
3689  *
3690  * Returns:
3691  *	IRQ_HANDLED
3692  **/
3693 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3694 {
3695 	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3696 	unsigned long flags;
3697 
3698 	spin_lock_irqsave(vhost->host->host_lock, flags);
3699 	vio_disable_interrupts(to_vio_dev(vhost->dev));
3700 	tasklet_schedule(&vhost->tasklet);
3701 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3702 	return IRQ_HANDLED;
3703 }
3704 
3705 /**
3706  * ibmvfc_tasklet - Interrupt handler tasklet
3707  * @data:		ibmvfc host struct
3708  *
3709  * Returns:
3710  *	Nothing
3711  **/
3712 static void ibmvfc_tasklet(void *data)
3713 {
3714 	struct ibmvfc_host *vhost = data;
3715 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
3716 	struct ibmvfc_crq *crq;
3717 	struct ibmvfc_async_crq *async;
3718 	struct ibmvfc_event *evt, *temp;
3719 	unsigned long flags;
3720 	int done = 0;
3721 	LIST_HEAD(evt_doneq);
3722 
3723 	spin_lock_irqsave(vhost->host->host_lock, flags);
3724 	spin_lock(vhost->crq.q_lock);
3725 	while (!done) {
3726 		/* Pull all the valid messages off the async CRQ */
3727 		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3728 			ibmvfc_handle_async(async, vhost);
3729 			async->valid = 0;
3730 			wmb();
3731 		}
3732 
3733 		/* Pull all the valid messages off the CRQ */
3734 		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3735 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3736 			crq->valid = 0;
3737 			wmb();
3738 		}
3739 
3740 		vio_enable_interrupts(vdev);
3741 		if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3742 			vio_disable_interrupts(vdev);
3743 			ibmvfc_handle_async(async, vhost);
3744 			async->valid = 0;
3745 			wmb();
3746 		} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3747 			vio_disable_interrupts(vdev);
3748 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3749 			crq->valid = 0;
3750 			wmb();
3751 		} else
3752 			done = 1;
3753 	}
3754 
3755 	spin_unlock(vhost->crq.q_lock);
3756 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3757 
3758 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3759 		del_timer(&evt->timer);
3760 		list_del(&evt->queue_list);
3761 		ibmvfc_trc_end(evt);
3762 		evt->done(evt);
3763 	}
3764 }
3765 
3766 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3767 {
3768 	struct device *dev = scrq->vhost->dev;
3769 	struct vio_dev *vdev = to_vio_dev(dev);
3770 	unsigned long rc;
3771 	int irq_action = H_ENABLE_VIO_INTERRUPT;
3772 
3773 	if (!enable)
3774 		irq_action = H_DISABLE_VIO_INTERRUPT;
3775 
3776 	rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3777 				scrq->hw_irq, 0, 0);
3778 
3779 	if (rc)
3780 		dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3781 			enable ? "enable" : "disable", scrq->hwq_id, rc);
3782 
3783 	return rc;
3784 }
3785 
3786 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3787 			       struct list_head *evt_doneq)
3788 {
3789 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3790 
3791 	switch (crq->valid) {
3792 	case IBMVFC_CRQ_CMD_RSP:
3793 		break;
3794 	case IBMVFC_CRQ_XPORT_EVENT:
3795 		return;
3796 	default:
3797 		dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3798 		return;
3799 	}
3800 
3801 	/* The only kind of payload CRQs we should get are responses to
3802 	 * things we send. Make sure this response is to something we
3803 	 * actually sent
3804 	 */
3805 	if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3806 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3807 			crq->ioba);
3808 		return;
3809 	}
3810 
3811 	if (unlikely(atomic_dec_if_positive(&evt->active))) {
3812 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3813 			crq->ioba);
3814 		return;
3815 	}
3816 
3817 	spin_lock(&evt->queue->l_lock);
3818 	list_move_tail(&evt->queue_list, evt_doneq);
3819 	spin_unlock(&evt->queue->l_lock);
3820 }
3821 
3822 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3823 {
3824 	struct ibmvfc_crq *crq;
3825 
3826 	crq = &scrq->msgs.scrq[scrq->cur].crq;
3827 	if (crq->valid & 0x80) {
3828 		if (++scrq->cur == scrq->size)
3829 			scrq->cur = 0;
3830 		rmb();
3831 	} else
3832 		crq = NULL;
3833 
3834 	return crq;
3835 }
3836 
3837 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3838 {
3839 	struct ibmvfc_crq *crq;
3840 	struct ibmvfc_event *evt, *temp;
3841 	unsigned long flags;
3842 	int done = 0;
3843 	LIST_HEAD(evt_doneq);
3844 
3845 	spin_lock_irqsave(scrq->q_lock, flags);
3846 	while (!done) {
3847 		while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3848 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3849 			crq->valid = 0;
3850 			wmb();
3851 		}
3852 
3853 		ibmvfc_toggle_scrq_irq(scrq, 1);
3854 		if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3855 			ibmvfc_toggle_scrq_irq(scrq, 0);
3856 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3857 			crq->valid = 0;
3858 			wmb();
3859 		} else
3860 			done = 1;
3861 	}
3862 	spin_unlock_irqrestore(scrq->q_lock, flags);
3863 
3864 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3865 		del_timer(&evt->timer);
3866 		list_del(&evt->queue_list);
3867 		ibmvfc_trc_end(evt);
3868 		evt->done(evt);
3869 	}
3870 }
3871 
3872 static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
3873 {
3874 	struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3875 
3876 	ibmvfc_toggle_scrq_irq(scrq, 0);
3877 	ibmvfc_drain_sub_crq(scrq);
3878 
3879 	return IRQ_HANDLED;
3880 }
3881 
3882 /**
3883  * ibmvfc_init_tgt - Set the next init job step for the target
3884  * @tgt:		ibmvfc target struct
3885  * @job_step:	job step to perform
3886  *
3887  **/
3888 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3889 			    void (*job_step) (struct ibmvfc_target *))
3890 {
3891 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3892 		tgt->job_step = job_step;
3893 	wake_up(&tgt->vhost->work_wait_q);
3894 }
3895 
3896 /**
3897  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3898  * @tgt:		ibmvfc target struct
3899  * @job_step:	initialization job step
3900  *
3901  * Returns: 1 if step will be retried / 0 if not
3902  *
3903  **/
3904 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3905 				  void (*job_step) (struct ibmvfc_target *))
3906 {
3907 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3908 		ibmvfc_del_tgt(tgt);
3909 		wake_up(&tgt->vhost->work_wait_q);
3910 		return 0;
3911 	} else
3912 		ibmvfc_init_tgt(tgt, job_step);
3913 	return 1;
3914 }
3915 
3916 /* Defined in FC-LS */
3917 static const struct {
3918 	int code;
3919 	int retry;
3920 	int logged_in;
3921 } prli_rsp [] = {
3922 	{ 0, 1, 0 },
3923 	{ 1, 0, 1 },
3924 	{ 2, 1, 0 },
3925 	{ 3, 1, 0 },
3926 	{ 4, 0, 0 },
3927 	{ 5, 0, 0 },
3928 	{ 6, 0, 1 },
3929 	{ 7, 0, 0 },
3930 	{ 8, 1, 0 },
3931 };
3932 
3933 /**
3934  * ibmvfc_get_prli_rsp - Find PRLI response index
3935  * @flags:	PRLI response flags
3936  *
3937  **/
3938 static int ibmvfc_get_prli_rsp(u16 flags)
3939 {
3940 	int i;
3941 	int code = (flags & 0x0f00) >> 8;
3942 
3943 	for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3944 		if (prli_rsp[i].code == code)
3945 			return i;
3946 
3947 	return 0;
3948 }
3949 
3950 /**
3951  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3952  * @evt:	ibmvfc event struct
3953  *
3954  **/
3955 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3956 {
3957 	struct ibmvfc_target *tgt = evt->tgt;
3958 	struct ibmvfc_host *vhost = evt->vhost;
3959 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3960 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3961 	u32 status = be16_to_cpu(rsp->common.status);
3962 	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3963 
3964 	vhost->discovery_threads--;
3965 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3966 	switch (status) {
3967 	case IBMVFC_MAD_SUCCESS:
3968 		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3969 			parms->type, parms->flags, parms->service_parms);
3970 
3971 		if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3972 			index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3973 			if (prli_rsp[index].logged_in) {
3974 				if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3975 					tgt->need_login = 0;
3976 					tgt->ids.roles = 0;
3977 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3978 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3979 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3980 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3981 					tgt->add_rport = 1;
3982 				} else
3983 					ibmvfc_del_tgt(tgt);
3984 			} else if (prli_rsp[index].retry)
3985 				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3986 			else
3987 				ibmvfc_del_tgt(tgt);
3988 		} else
3989 			ibmvfc_del_tgt(tgt);
3990 		break;
3991 	case IBMVFC_MAD_DRIVER_FAILED:
3992 		break;
3993 	case IBMVFC_MAD_CRQ_ERROR:
3994 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3995 		break;
3996 	case IBMVFC_MAD_FAILED:
3997 	default:
3998 		if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3999 		     be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
4000 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4001 		else if (tgt->logo_rcvd)
4002 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4003 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4004 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4005 		else
4006 			ibmvfc_del_tgt(tgt);
4007 
4008 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
4009 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4010 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
4011 		break;
4012 	}
4013 
4014 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4015 	ibmvfc_free_event(evt);
4016 	wake_up(&vhost->work_wait_q);
4017 }
4018 
4019 /**
4020  * ibmvfc_tgt_send_prli - Send a process login
4021  * @tgt:	ibmvfc target struct
4022  *
4023  **/
4024 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
4025 {
4026 	struct ibmvfc_process_login *prli;
4027 	struct ibmvfc_host *vhost = tgt->vhost;
4028 	struct ibmvfc_event *evt;
4029 
4030 	if (vhost->discovery_threads >= disc_threads)
4031 		return;
4032 
4033 	kref_get(&tgt->kref);
4034 	evt = ibmvfc_get_event(&vhost->crq);
4035 	vhost->discovery_threads++;
4036 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4037 	evt->tgt = tgt;
4038 	prli = &evt->iu.prli;
4039 	memset(prli, 0, sizeof(*prli));
4040 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4041 		prli->common.version = cpu_to_be32(2);
4042 		prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4043 	} else {
4044 		prli->common.version = cpu_to_be32(1);
4045 	}
4046 	prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4047 	prli->common.length = cpu_to_be16(sizeof(*prli));
4048 	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4049 
4050 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4051 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4052 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4053 	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4054 
4055 	if (cls3_error)
4056 		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4057 
4058 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4059 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4060 		vhost->discovery_threads--;
4061 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4062 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4063 	} else
4064 		tgt_dbg(tgt, "Sent process login\n");
4065 }
4066 
4067 /**
4068  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4069  * @evt:	ibmvfc event struct
4070  *
4071  **/
4072 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4073 {
4074 	struct ibmvfc_target *tgt = evt->tgt;
4075 	struct ibmvfc_host *vhost = evt->vhost;
4076 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4077 	u32 status = be16_to_cpu(rsp->common.status);
4078 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4079 
4080 	vhost->discovery_threads--;
4081 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4082 	switch (status) {
4083 	case IBMVFC_MAD_SUCCESS:
4084 		tgt_dbg(tgt, "Port Login succeeded\n");
4085 		if (tgt->ids.port_name &&
4086 		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
4087 			vhost->reinit = 1;
4088 			tgt_dbg(tgt, "Port re-init required\n");
4089 			break;
4090 		}
4091 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4092 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4093 		tgt->ids.port_id = tgt->scsi_id;
4094 		memcpy(&tgt->service_parms, &rsp->service_parms,
4095 		       sizeof(tgt->service_parms));
4096 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4097 		       sizeof(tgt->service_parms_change));
4098 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4099 		break;
4100 	case IBMVFC_MAD_DRIVER_FAILED:
4101 		break;
4102 	case IBMVFC_MAD_CRQ_ERROR:
4103 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4104 		break;
4105 	case IBMVFC_MAD_FAILED:
4106 	default:
4107 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4108 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4109 		else
4110 			ibmvfc_del_tgt(tgt);
4111 
4112 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4113 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4114 					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4115 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4116 			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4117 		break;
4118 	}
4119 
4120 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4121 	ibmvfc_free_event(evt);
4122 	wake_up(&vhost->work_wait_q);
4123 }
4124 
4125 /**
4126  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4127  * @tgt:	ibmvfc target struct
4128  *
4129  **/
4130 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4131 {
4132 	struct ibmvfc_port_login *plogi;
4133 	struct ibmvfc_host *vhost = tgt->vhost;
4134 	struct ibmvfc_event *evt;
4135 
4136 	if (vhost->discovery_threads >= disc_threads)
4137 		return;
4138 
4139 	kref_get(&tgt->kref);
4140 	tgt->logo_rcvd = 0;
4141 	evt = ibmvfc_get_event(&vhost->crq);
4142 	vhost->discovery_threads++;
4143 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4144 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4145 	evt->tgt = tgt;
4146 	plogi = &evt->iu.plogi;
4147 	memset(plogi, 0, sizeof(*plogi));
4148 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4149 		plogi->common.version = cpu_to_be32(2);
4150 		plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4151 	} else {
4152 		plogi->common.version = cpu_to_be32(1);
4153 	}
4154 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4155 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
4156 	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4157 
4158 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4159 		vhost->discovery_threads--;
4160 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4161 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4162 	} else
4163 		tgt_dbg(tgt, "Sent port login\n");
4164 }
4165 
4166 /**
4167  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4168  * @evt:	ibmvfc event struct
4169  *
4170  **/
4171 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4172 {
4173 	struct ibmvfc_target *tgt = evt->tgt;
4174 	struct ibmvfc_host *vhost = evt->vhost;
4175 	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4176 	u32 status = be16_to_cpu(rsp->common.status);
4177 
4178 	vhost->discovery_threads--;
4179 	ibmvfc_free_event(evt);
4180 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4181 
4182 	switch (status) {
4183 	case IBMVFC_MAD_SUCCESS:
4184 		tgt_dbg(tgt, "Implicit Logout succeeded\n");
4185 		break;
4186 	case IBMVFC_MAD_DRIVER_FAILED:
4187 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4188 		wake_up(&vhost->work_wait_q);
4189 		return;
4190 	case IBMVFC_MAD_FAILED:
4191 	default:
4192 		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4193 		break;
4194 	}
4195 
4196 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4197 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4198 	wake_up(&vhost->work_wait_q);
4199 }
4200 
4201 /**
4202  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4203  * @tgt:		ibmvfc target struct
4204  * @done:		Routine to call when the event is responded to
4205  *
4206  * Returns:
4207  *	Allocated and initialized ibmvfc_event struct
4208  **/
4209 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4210 								 void (*done) (struct ibmvfc_event *))
4211 {
4212 	struct ibmvfc_implicit_logout *mad;
4213 	struct ibmvfc_host *vhost = tgt->vhost;
4214 	struct ibmvfc_event *evt;
4215 
4216 	kref_get(&tgt->kref);
4217 	evt = ibmvfc_get_event(&vhost->crq);
4218 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4219 	evt->tgt = tgt;
4220 	mad = &evt->iu.implicit_logout;
4221 	memset(mad, 0, sizeof(*mad));
4222 	mad->common.version = cpu_to_be32(1);
4223 	mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4224 	mad->common.length = cpu_to_be16(sizeof(*mad));
4225 	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4226 	return evt;
4227 }
4228 
4229 /**
4230  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4231  * @tgt:		ibmvfc target struct
4232  *
4233  **/
4234 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4235 {
4236 	struct ibmvfc_host *vhost = tgt->vhost;
4237 	struct ibmvfc_event *evt;
4238 
4239 	if (vhost->discovery_threads >= disc_threads)
4240 		return;
4241 
4242 	vhost->discovery_threads++;
4243 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4244 						   ibmvfc_tgt_implicit_logout_done);
4245 
4246 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4247 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4248 		vhost->discovery_threads--;
4249 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4250 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4251 	} else
4252 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4253 }
4254 
4255 /**
4256  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4257  * @evt:	ibmvfc event struct
4258  *
4259  **/
4260 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4261 {
4262 	struct ibmvfc_target *tgt = evt->tgt;
4263 	struct ibmvfc_host *vhost = evt->vhost;
4264 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4265 	u32 status = be16_to_cpu(mad->common.status);
4266 
4267 	vhost->discovery_threads--;
4268 	ibmvfc_free_event(evt);
4269 
4270 	/*
4271 	 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4272 	 * driver in which case we need to free up all the targets. If we are
4273 	 * not unloading, we will still go through a hard reset to get out of
4274 	 * offline state, so there is no need to track the old targets in that
4275 	 * case.
4276 	 */
4277 	if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4278 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4279 	else
4280 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4281 
4282 	tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4283 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4284 	wake_up(&vhost->work_wait_q);
4285 }
4286 
4287 /**
4288  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4289  * @tgt:		ibmvfc target struct
4290  *
4291  **/
4292 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4293 {
4294 	struct ibmvfc_host *vhost = tgt->vhost;
4295 	struct ibmvfc_event *evt;
4296 
4297 	if (!vhost->logged_in) {
4298 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4299 		return;
4300 	}
4301 
4302 	if (vhost->discovery_threads >= disc_threads)
4303 		return;
4304 
4305 	vhost->discovery_threads++;
4306 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4307 						   ibmvfc_tgt_implicit_logout_and_del_done);
4308 
4309 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4310 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4311 		vhost->discovery_threads--;
4312 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4313 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4314 	} else
4315 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4316 }
4317 
4318 /**
4319  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4320  * @evt:	ibmvfc event struct
4321  *
4322  **/
4323 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4324 {
4325 	struct ibmvfc_target *tgt = evt->tgt;
4326 	struct ibmvfc_host *vhost = evt->vhost;
4327 	struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4328 	u32 status = be16_to_cpu(rsp->common.status);
4329 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4330 
4331 	vhost->discovery_threads--;
4332 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4333 	switch (status) {
4334 	case IBMVFC_MAD_SUCCESS:
4335 		tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
4336 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4337 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4338 		tgt->scsi_id = tgt->new_scsi_id;
4339 		tgt->ids.port_id = tgt->scsi_id;
4340 		memcpy(&tgt->service_parms, &rsp->service_parms,
4341 		       sizeof(tgt->service_parms));
4342 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4343 		       sizeof(tgt->service_parms_change));
4344 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4345 		break;
4346 	case IBMVFC_MAD_DRIVER_FAILED:
4347 		break;
4348 	case IBMVFC_MAD_CRQ_ERROR:
4349 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4350 		break;
4351 	case IBMVFC_MAD_FAILED:
4352 	default:
4353 		level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4354 
4355 		tgt_log(tgt, level,
4356 			"Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4357 			tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4358 			status);
4359 		break;
4360 	}
4361 
4362 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4363 	ibmvfc_free_event(evt);
4364 	wake_up(&vhost->work_wait_q);
4365 }
4366 
4367 
4368 /**
4369  * ibmvfc_tgt_move_login - Initiate a move login for specified target
4370  * @tgt:		ibmvfc target struct
4371  *
4372  **/
4373 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4374 {
4375 	struct ibmvfc_host *vhost = tgt->vhost;
4376 	struct ibmvfc_move_login *move;
4377 	struct ibmvfc_event *evt;
4378 
4379 	if (vhost->discovery_threads >= disc_threads)
4380 		return;
4381 
4382 	kref_get(&tgt->kref);
4383 	evt = ibmvfc_get_event(&vhost->crq);
4384 	vhost->discovery_threads++;
4385 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4386 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4387 	evt->tgt = tgt;
4388 	move = &evt->iu.move_login;
4389 	memset(move, 0, sizeof(*move));
4390 	move->common.version = cpu_to_be32(1);
4391 	move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4392 	move->common.length = cpu_to_be16(sizeof(*move));
4393 
4394 	move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4395 	move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
4396 	move->wwpn = cpu_to_be64(tgt->wwpn);
4397 	move->node_name = cpu_to_be64(tgt->ids.node_name);
4398 
4399 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4400 		vhost->discovery_threads--;
4401 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4402 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4403 	} else
4404 		tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
4405 }
4406 
4407 /**
4408  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4409  * @mad:	ibmvfc passthru mad struct
4410  * @tgt:	ibmvfc target struct
4411  *
4412  * Returns:
4413  *	1 if PLOGI needed / 0 if PLOGI not needed
4414  **/
4415 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4416 				    struct ibmvfc_target *tgt)
4417 {
4418 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4419 		return 1;
4420 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4421 		return 1;
4422 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4423 		return 1;
4424 	return 0;
4425 }
4426 
4427 /**
4428  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4429  * @evt:	ibmvfc event struct
4430  *
4431  **/
4432 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4433 {
4434 	struct ibmvfc_target *tgt = evt->tgt;
4435 	struct ibmvfc_host *vhost = evt->vhost;
4436 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4437 	u32 status = be16_to_cpu(mad->common.status);
4438 	u8 fc_reason, fc_explain;
4439 
4440 	vhost->discovery_threads--;
4441 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4442 	del_timer(&tgt->timer);
4443 
4444 	switch (status) {
4445 	case IBMVFC_MAD_SUCCESS:
4446 		tgt_dbg(tgt, "ADISC succeeded\n");
4447 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
4448 			ibmvfc_del_tgt(tgt);
4449 		break;
4450 	case IBMVFC_MAD_DRIVER_FAILED:
4451 		break;
4452 	case IBMVFC_MAD_FAILED:
4453 	default:
4454 		ibmvfc_del_tgt(tgt);
4455 		fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4456 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4457 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4458 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4459 			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4460 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
4461 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4462 		break;
4463 	}
4464 
4465 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4466 	ibmvfc_free_event(evt);
4467 	wake_up(&vhost->work_wait_q);
4468 }
4469 
4470 /**
4471  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4472  * @evt:		ibmvfc event struct
4473  *
4474  **/
4475 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4476 {
4477 	struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4478 
4479 	memset(mad, 0, sizeof(*mad));
4480 	mad->common.version = cpu_to_be32(1);
4481 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4482 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4483 	mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4484 		offsetof(struct ibmvfc_passthru_mad, iu));
4485 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4486 	mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4487 	mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4488 	mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4489 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4490 		offsetof(struct ibmvfc_passthru_fc_iu, payload));
4491 	mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4492 	mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4493 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4494 		offsetof(struct ibmvfc_passthru_fc_iu, response));
4495 	mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4496 }
4497 
4498 /**
4499  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4500  * @evt:		ibmvfc event struct
4501  *
4502  * Just cleanup this event struct. Everything else is handled by
4503  * the ADISC completion handler. If the ADISC never actually comes
4504  * back, we still have the timer running on the ADISC event struct
4505  * which will fire and cause the CRQ to get reset.
4506  *
4507  **/
4508 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4509 {
4510 	struct ibmvfc_host *vhost = evt->vhost;
4511 	struct ibmvfc_target *tgt = evt->tgt;
4512 
4513 	tgt_dbg(tgt, "ADISC cancel complete\n");
4514 	vhost->abort_threads--;
4515 	ibmvfc_free_event(evt);
4516 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4517 	wake_up(&vhost->work_wait_q);
4518 }
4519 
4520 /**
4521  * ibmvfc_adisc_timeout - Handle an ADISC timeout
4522  * @t:		ibmvfc target struct
4523  *
4524  * If an ADISC times out, send a cancel. If the cancel times
4525  * out, reset the CRQ. When the ADISC comes back as cancelled,
4526  * log back into the target.
4527  **/
4528 static void ibmvfc_adisc_timeout(struct timer_list *t)
4529 {
4530 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4531 	struct ibmvfc_host *vhost = tgt->vhost;
4532 	struct ibmvfc_event *evt;
4533 	struct ibmvfc_tmf *tmf;
4534 	unsigned long flags;
4535 	int rc;
4536 
4537 	tgt_dbg(tgt, "ADISC timeout\n");
4538 	spin_lock_irqsave(vhost->host->host_lock, flags);
4539 	if (vhost->abort_threads >= disc_threads ||
4540 	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4541 	    vhost->state != IBMVFC_INITIALIZING ||
4542 	    vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4543 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4544 		return;
4545 	}
4546 
4547 	vhost->abort_threads++;
4548 	kref_get(&tgt->kref);
4549 	evt = ibmvfc_get_event(&vhost->crq);
4550 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4551 
4552 	evt->tgt = tgt;
4553 	tmf = &evt->iu.tmf;
4554 	memset(tmf, 0, sizeof(*tmf));
4555 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4556 		tmf->common.version = cpu_to_be32(2);
4557 		tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4558 	} else {
4559 		tmf->common.version = cpu_to_be32(1);
4560 	}
4561 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4562 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
4563 	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4564 	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4565 
4566 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
4567 
4568 	if (rc) {
4569 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4570 		vhost->abort_threads--;
4571 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4572 		__ibmvfc_reset_host(vhost);
4573 	} else
4574 		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4575 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4576 }
4577 
4578 /**
4579  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4580  * @tgt:		ibmvfc target struct
4581  *
4582  * When sending an ADISC we end up with two timers running. The
4583  * first timer is the timer in the ibmvfc target struct. If this
4584  * fires, we send a cancel to the target. The second timer is the
4585  * timer on the ibmvfc event for the ADISC, which is longer. If that
4586  * fires, it means the ADISC timed out and our attempt to cancel it
4587  * also failed, so we need to reset the CRQ.
4588  **/
4589 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4590 {
4591 	struct ibmvfc_passthru_mad *mad;
4592 	struct ibmvfc_host *vhost = tgt->vhost;
4593 	struct ibmvfc_event *evt;
4594 
4595 	if (vhost->discovery_threads >= disc_threads)
4596 		return;
4597 
4598 	kref_get(&tgt->kref);
4599 	evt = ibmvfc_get_event(&vhost->crq);
4600 	vhost->discovery_threads++;
4601 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4602 	evt->tgt = tgt;
4603 
4604 	ibmvfc_init_passthru(evt);
4605 	mad = &evt->iu.passthru;
4606 	mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4607 	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4608 	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4609 
4610 	mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4611 	memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4612 	       sizeof(vhost->login_buf->resp.port_name));
4613 	memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4614 	       sizeof(vhost->login_buf->resp.node_name));
4615 	mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4616 
4617 	if (timer_pending(&tgt->timer))
4618 		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4619 	else {
4620 		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4621 		add_timer(&tgt->timer);
4622 	}
4623 
4624 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4625 	if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4626 		vhost->discovery_threads--;
4627 		del_timer(&tgt->timer);
4628 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4629 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4630 	} else
4631 		tgt_dbg(tgt, "Sent ADISC\n");
4632 }
4633 
4634 /**
4635  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4636  * @evt:	ibmvfc event struct
4637  *
4638  **/
4639 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4640 {
4641 	struct ibmvfc_target *tgt = evt->tgt;
4642 	struct ibmvfc_host *vhost = evt->vhost;
4643 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4644 	u32 status = be16_to_cpu(rsp->common.status);
4645 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4646 
4647 	vhost->discovery_threads--;
4648 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4649 	switch (status) {
4650 	case IBMVFC_MAD_SUCCESS:
4651 		tgt_dbg(tgt, "Query Target succeeded\n");
4652 		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4653 			ibmvfc_del_tgt(tgt);
4654 		else
4655 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4656 		break;
4657 	case IBMVFC_MAD_DRIVER_FAILED:
4658 		break;
4659 	case IBMVFC_MAD_CRQ_ERROR:
4660 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4661 		break;
4662 	case IBMVFC_MAD_FAILED:
4663 	default:
4664 		if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4665 		    be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4666 		    be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4667 			ibmvfc_del_tgt(tgt);
4668 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4669 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4670 		else
4671 			ibmvfc_del_tgt(tgt);
4672 
4673 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4674 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4675 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4676 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4677 			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4678 			status);
4679 		break;
4680 	}
4681 
4682 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4683 	ibmvfc_free_event(evt);
4684 	wake_up(&vhost->work_wait_q);
4685 }
4686 
4687 /**
4688  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4689  * @tgt:	ibmvfc target struct
4690  *
4691  **/
4692 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4693 {
4694 	struct ibmvfc_query_tgt *query_tgt;
4695 	struct ibmvfc_host *vhost = tgt->vhost;
4696 	struct ibmvfc_event *evt;
4697 
4698 	if (vhost->discovery_threads >= disc_threads)
4699 		return;
4700 
4701 	kref_get(&tgt->kref);
4702 	evt = ibmvfc_get_event(&vhost->crq);
4703 	vhost->discovery_threads++;
4704 	evt->tgt = tgt;
4705 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4706 	query_tgt = &evt->iu.query_tgt;
4707 	memset(query_tgt, 0, sizeof(*query_tgt));
4708 	query_tgt->common.version = cpu_to_be32(1);
4709 	query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4710 	query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4711 	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4712 
4713 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4714 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4715 		vhost->discovery_threads--;
4716 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4717 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4718 	} else
4719 		tgt_dbg(tgt, "Sent Query Target\n");
4720 }
4721 
4722 /**
4723  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4724  * @vhost:		ibmvfc host struct
4725  * @target:		Holds SCSI ID to allocate target forand the WWPN
4726  *
4727  * Returns:
4728  *	0 on success / other on failure
4729  **/
4730 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4731 			       struct ibmvfc_discover_targets_entry *target)
4732 {
4733 	struct ibmvfc_target *stgt = NULL;
4734 	struct ibmvfc_target *wtgt = NULL;
4735 	struct ibmvfc_target *tgt;
4736 	unsigned long flags;
4737 	u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4738 	u64 wwpn = be64_to_cpu(target->wwpn);
4739 
4740 	/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4741 	spin_lock_irqsave(vhost->host->host_lock, flags);
4742 	list_for_each_entry(tgt, &vhost->targets, queue) {
4743 		if (tgt->wwpn == wwpn) {
4744 			wtgt = tgt;
4745 			break;
4746 		}
4747 	}
4748 
4749 	list_for_each_entry(tgt, &vhost->targets, queue) {
4750 		if (tgt->scsi_id == scsi_id) {
4751 			stgt = tgt;
4752 			break;
4753 		}
4754 	}
4755 
4756 	if (wtgt && !stgt) {
4757 		/*
4758 		 * A WWPN target has moved and we still are tracking the old
4759 		 * SCSI ID.  The only way we should be able to get here is if
4760 		 * we attempted to send an implicit logout for the old SCSI ID
4761 		 * and it failed for some reason, such as there being I/O
4762 		 * pending to the target. In this case, we will have already
4763 		 * deleted the rport from the FC transport so we do a move
4764 		 * login, which works even with I/O pending, however, if
4765 		 * there is still I/O pending, it will stay outstanding, so
4766 		 * we only do this if fast fail is disabled for the rport,
4767 		 * otherwise we let terminate_rport_io clean up the port
4768 		 * before we login at the new location.
4769 		 */
4770 		if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4771 			if (wtgt->move_login) {
4772 				/*
4773 				 * Do a move login here. The old target is no longer
4774 				 * known to the transport layer We don't use the
4775 				 * normal ibmvfc_set_tgt_action to set this, as we
4776 				 * don't normally want to allow this state change.
4777 				 */
4778 				wtgt->new_scsi_id = scsi_id;
4779 				wtgt->action = IBMVFC_TGT_ACTION_INIT;
4780 				wtgt->init_retries = 0;
4781 				ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4782 			}
4783 			goto unlock_out;
4784 		} else {
4785 			tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4786 				wtgt->action, wtgt->rport);
4787 		}
4788 	} else if (stgt) {
4789 		if (tgt->need_login)
4790 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4791 		goto unlock_out;
4792 	}
4793 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4794 
4795 	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4796 	memset(tgt, 0, sizeof(*tgt));
4797 	tgt->scsi_id = scsi_id;
4798 	tgt->wwpn = wwpn;
4799 	tgt->vhost = vhost;
4800 	tgt->need_login = 1;
4801 	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4802 	kref_init(&tgt->kref);
4803 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4804 	spin_lock_irqsave(vhost->host->host_lock, flags);
4805 	tgt->cancel_key = vhost->task_set++;
4806 	list_add_tail(&tgt->queue, &vhost->targets);
4807 
4808 unlock_out:
4809 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4810 	return 0;
4811 }
4812 
4813 /**
4814  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4815  * @vhost:		ibmvfc host struct
4816  *
4817  * Returns:
4818  *	0 on success / other on failure
4819  **/
4820 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4821 {
4822 	int i, rc;
4823 
4824 	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4825 		rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4826 
4827 	return rc;
4828 }
4829 
4830 /**
4831  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4832  * @evt:	ibmvfc event struct
4833  *
4834  **/
4835 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4836 {
4837 	struct ibmvfc_host *vhost = evt->vhost;
4838 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4839 	u32 mad_status = be16_to_cpu(rsp->common.status);
4840 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4841 
4842 	switch (mad_status) {
4843 	case IBMVFC_MAD_SUCCESS:
4844 		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4845 		vhost->num_targets = be32_to_cpu(rsp->num_written);
4846 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4847 		break;
4848 	case IBMVFC_MAD_FAILED:
4849 		level += ibmvfc_retry_host_init(vhost);
4850 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4851 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4852 			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4853 		break;
4854 	case IBMVFC_MAD_DRIVER_FAILED:
4855 		break;
4856 	default:
4857 		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4858 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4859 		break;
4860 	}
4861 
4862 	ibmvfc_free_event(evt);
4863 	wake_up(&vhost->work_wait_q);
4864 }
4865 
4866 /**
4867  * ibmvfc_discover_targets - Send Discover Targets MAD
4868  * @vhost:	ibmvfc host struct
4869  *
4870  **/
4871 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4872 {
4873 	struct ibmvfc_discover_targets *mad;
4874 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4875 
4876 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4877 	mad = &evt->iu.discover_targets;
4878 	memset(mad, 0, sizeof(*mad));
4879 	mad->common.version = cpu_to_be32(1);
4880 	mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4881 	mad->common.length = cpu_to_be16(sizeof(*mad));
4882 	mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4883 	mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4884 	mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4885 	mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4886 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4887 
4888 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4889 		ibmvfc_dbg(vhost, "Sent discover targets\n");
4890 	else
4891 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4892 }
4893 
4894 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
4895 {
4896 	struct ibmvfc_host *vhost = evt->vhost;
4897 	struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
4898 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4899 	u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
4900 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4901 	int flags, active_queues, i;
4902 
4903 	ibmvfc_free_event(evt);
4904 
4905 	switch (mad_status) {
4906 	case IBMVFC_MAD_SUCCESS:
4907 		ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
4908 		flags = be32_to_cpu(setup->flags);
4909 		vhost->do_enquiry = 0;
4910 		active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
4911 		scrqs->active_queues = active_queues;
4912 
4913 		if (flags & IBMVFC_CHANNELS_CANCELED) {
4914 			ibmvfc_dbg(vhost, "Channels Canceled\n");
4915 			vhost->using_channels = 0;
4916 		} else {
4917 			if (active_queues)
4918 				vhost->using_channels = 1;
4919 			for (i = 0; i < active_queues; i++)
4920 				scrqs->scrqs[i].vios_cookie =
4921 					be64_to_cpu(setup->channel_handles[i]);
4922 
4923 			ibmvfc_dbg(vhost, "Using %u channels\n",
4924 				   vhost->scsi_scrqs.active_queues);
4925 		}
4926 		break;
4927 	case IBMVFC_MAD_FAILED:
4928 		level += ibmvfc_retry_host_init(vhost);
4929 		ibmvfc_log(vhost, level, "Channel Setup failed\n");
4930 		fallthrough;
4931 	case IBMVFC_MAD_DRIVER_FAILED:
4932 		return;
4933 	default:
4934 		dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
4935 			mad_status);
4936 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4937 		return;
4938 	}
4939 
4940 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4941 	wake_up(&vhost->work_wait_q);
4942 }
4943 
4944 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
4945 {
4946 	struct ibmvfc_channel_setup_mad *mad;
4947 	struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
4948 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4949 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4950 	unsigned int num_channels =
4951 		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
4952 	int i;
4953 
4954 	memset(setup_buf, 0, sizeof(*setup_buf));
4955 	if (num_channels == 0)
4956 		setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
4957 	else {
4958 		setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
4959 		for (i = 0; i < num_channels; i++)
4960 			setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
4961 	}
4962 
4963 	ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
4964 	mad = &evt->iu.channel_setup;
4965 	memset(mad, 0, sizeof(*mad));
4966 	mad->common.version = cpu_to_be32(1);
4967 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
4968 	mad->common.length = cpu_to_be16(sizeof(*mad));
4969 	mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
4970 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
4971 
4972 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4973 
4974 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4975 		ibmvfc_dbg(vhost, "Sent channel setup\n");
4976 	else
4977 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
4978 }
4979 
4980 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
4981 {
4982 	struct ibmvfc_host *vhost = evt->vhost;
4983 	struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
4984 	u32 mad_status = be16_to_cpu(rsp->common.status);
4985 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4986 
4987 	switch (mad_status) {
4988 	case IBMVFC_MAD_SUCCESS:
4989 		ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
4990 		vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
4991 		ibmvfc_free_event(evt);
4992 		break;
4993 	case IBMVFC_MAD_FAILED:
4994 		level += ibmvfc_retry_host_init(vhost);
4995 		ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
4996 		fallthrough;
4997 	case IBMVFC_MAD_DRIVER_FAILED:
4998 		ibmvfc_free_event(evt);
4999 		return;
5000 	default:
5001 		dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
5002 			mad_status);
5003 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5004 		ibmvfc_free_event(evt);
5005 		return;
5006 	}
5007 
5008 	ibmvfc_channel_setup(vhost);
5009 }
5010 
5011 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
5012 {
5013 	struct ibmvfc_channel_enquiry *mad;
5014 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5015 
5016 	ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
5017 	mad = &evt->iu.channel_enquiry;
5018 	memset(mad, 0, sizeof(*mad));
5019 	mad->common.version = cpu_to_be32(1);
5020 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
5021 	mad->common.length = cpu_to_be16(sizeof(*mad));
5022 
5023 	if (mig_channels_only)
5024 		mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
5025 	if (mig_no_less_channels)
5026 		mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
5027 
5028 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5029 
5030 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5031 		ibmvfc_dbg(vhost, "Send channel enquiry\n");
5032 	else
5033 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5034 }
5035 
5036 /**
5037  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
5038  * @evt:	ibmvfc event struct
5039  *
5040  **/
5041 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5042 {
5043 	struct ibmvfc_host *vhost = evt->vhost;
5044 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5045 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5046 	unsigned int npiv_max_sectors;
5047 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
5048 
5049 	switch (mad_status) {
5050 	case IBMVFC_MAD_SUCCESS:
5051 		ibmvfc_free_event(evt);
5052 		break;
5053 	case IBMVFC_MAD_FAILED:
5054 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5055 			level += ibmvfc_retry_host_init(vhost);
5056 		else
5057 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5058 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5059 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5060 						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5061 		ibmvfc_free_event(evt);
5062 		return;
5063 	case IBMVFC_MAD_CRQ_ERROR:
5064 		ibmvfc_retry_host_init(vhost);
5065 		fallthrough;
5066 	case IBMVFC_MAD_DRIVER_FAILED:
5067 		ibmvfc_free_event(evt);
5068 		return;
5069 	default:
5070 		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5071 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5072 		ibmvfc_free_event(evt);
5073 		return;
5074 	}
5075 
5076 	vhost->client_migrated = 0;
5077 
5078 	if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5079 		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5080 			rsp->flags);
5081 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5082 		wake_up(&vhost->work_wait_q);
5083 		return;
5084 	}
5085 
5086 	if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5087 		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5088 			rsp->max_cmds);
5089 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5090 		wake_up(&vhost->work_wait_q);
5091 		return;
5092 	}
5093 
5094 	vhost->logged_in = 1;
5095 	npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5096 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5097 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5098 		 rsp->drc_name, npiv_max_sectors);
5099 
5100 	fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5101 	fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5102 	fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5103 	fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5104 	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5105 	fc_host_supported_classes(vhost->host) = 0;
5106 	if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5107 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5108 	if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5109 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5110 	if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5111 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5112 	fc_host_maxframe_size(vhost->host) =
5113 		be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5114 
5115 	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5116 	vhost->host->max_sectors = npiv_max_sectors;
5117 
5118 	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5119 		ibmvfc_channel_enquiry(vhost);
5120 	} else {
5121 		vhost->do_enquiry = 0;
5122 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5123 		wake_up(&vhost->work_wait_q);
5124 	}
5125 }
5126 
5127 /**
5128  * ibmvfc_npiv_login - Sends NPIV login
5129  * @vhost:	ibmvfc host struct
5130  *
5131  **/
5132 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5133 {
5134 	struct ibmvfc_npiv_login_mad *mad;
5135 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5136 
5137 	ibmvfc_gather_partition_info(vhost);
5138 	ibmvfc_set_login_info(vhost);
5139 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5140 
5141 	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5142 	mad = &evt->iu.npiv_login;
5143 	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5144 	mad->common.version = cpu_to_be32(1);
5145 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5146 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5147 	mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5148 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5149 
5150 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5151 
5152 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5153 		ibmvfc_dbg(vhost, "Sent NPIV login\n");
5154 	else
5155 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5156 }
5157 
5158 /**
5159  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5160  * @evt:		ibmvfc event struct
5161  *
5162  **/
5163 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5164 {
5165 	struct ibmvfc_host *vhost = evt->vhost;
5166 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5167 
5168 	ibmvfc_free_event(evt);
5169 
5170 	switch (mad_status) {
5171 	case IBMVFC_MAD_SUCCESS:
5172 		if (list_empty(&vhost->crq.sent) &&
5173 		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5174 			ibmvfc_init_host(vhost);
5175 			return;
5176 		}
5177 		break;
5178 	case IBMVFC_MAD_FAILED:
5179 	case IBMVFC_MAD_NOT_SUPPORTED:
5180 	case IBMVFC_MAD_CRQ_ERROR:
5181 	case IBMVFC_MAD_DRIVER_FAILED:
5182 	default:
5183 		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5184 		break;
5185 	}
5186 
5187 	ibmvfc_hard_reset_host(vhost);
5188 }
5189 
5190 /**
5191  * ibmvfc_npiv_logout - Issue an NPIV Logout
5192  * @vhost:		ibmvfc host struct
5193  *
5194  **/
5195 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5196 {
5197 	struct ibmvfc_npiv_logout_mad *mad;
5198 	struct ibmvfc_event *evt;
5199 
5200 	evt = ibmvfc_get_event(&vhost->crq);
5201 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5202 
5203 	mad = &evt->iu.npiv_logout;
5204 	memset(mad, 0, sizeof(*mad));
5205 	mad->common.version = cpu_to_be32(1);
5206 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5207 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5208 
5209 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5210 
5211 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5212 		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5213 	else
5214 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5215 }
5216 
5217 /**
5218  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5219  * @vhost:		ibmvfc host struct
5220  *
5221  * Returns:
5222  *	1 if work to do / 0 if not
5223  **/
5224 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5225 {
5226 	struct ibmvfc_target *tgt;
5227 
5228 	list_for_each_entry(tgt, &vhost->targets, queue) {
5229 		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5230 		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5231 			return 1;
5232 	}
5233 
5234 	return 0;
5235 }
5236 
5237 /**
5238  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5239  * @vhost:		ibmvfc host struct
5240  *
5241  * Returns:
5242  *	1 if work to do / 0 if not
5243  **/
5244 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5245 {
5246 	struct ibmvfc_target *tgt;
5247 
5248 	list_for_each_entry(tgt, &vhost->targets, queue) {
5249 		if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5250 		    tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5251 			return 1;
5252 	}
5253 	return 0;
5254 }
5255 
5256 /**
5257  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5258  * @vhost:		ibmvfc host struct
5259  *
5260  * Returns:
5261  *	1 if work to do / 0 if not
5262  **/
5263 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5264 {
5265 	struct ibmvfc_target *tgt;
5266 
5267 	if (kthread_should_stop())
5268 		return 1;
5269 	switch (vhost->action) {
5270 	case IBMVFC_HOST_ACTION_NONE:
5271 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5272 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5273 		return 0;
5274 	case IBMVFC_HOST_ACTION_TGT_INIT:
5275 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5276 		if (vhost->discovery_threads == disc_threads)
5277 			return 0;
5278 		list_for_each_entry(tgt, &vhost->targets, queue)
5279 			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5280 				return 1;
5281 		list_for_each_entry(tgt, &vhost->targets, queue)
5282 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5283 				return 0;
5284 		return 1;
5285 	case IBMVFC_HOST_ACTION_TGT_DEL:
5286 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5287 		if (vhost->discovery_threads == disc_threads)
5288 			return 0;
5289 		list_for_each_entry(tgt, &vhost->targets, queue)
5290 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5291 				return 1;
5292 		list_for_each_entry(tgt, &vhost->targets, queue)
5293 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5294 				return 0;
5295 		return 1;
5296 	case IBMVFC_HOST_ACTION_LOGO:
5297 	case IBMVFC_HOST_ACTION_INIT:
5298 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5299 	case IBMVFC_HOST_ACTION_QUERY:
5300 	case IBMVFC_HOST_ACTION_RESET:
5301 	case IBMVFC_HOST_ACTION_REENABLE:
5302 	default:
5303 		break;
5304 	}
5305 
5306 	return 1;
5307 }
5308 
5309 /**
5310  * ibmvfc_work_to_do - Is there task level work to do?
5311  * @vhost:		ibmvfc host struct
5312  *
5313  * Returns:
5314  *	1 if work to do / 0 if not
5315  **/
5316 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5317 {
5318 	unsigned long flags;
5319 	int rc;
5320 
5321 	spin_lock_irqsave(vhost->host->host_lock, flags);
5322 	rc = __ibmvfc_work_to_do(vhost);
5323 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5324 	return rc;
5325 }
5326 
5327 /**
5328  * ibmvfc_log_ae - Log async events if necessary
5329  * @vhost:		ibmvfc host struct
5330  * @events:		events to log
5331  *
5332  **/
5333 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5334 {
5335 	if (events & IBMVFC_AE_RSCN)
5336 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5337 	if ((events & IBMVFC_AE_LINKDOWN) &&
5338 	    vhost->state >= IBMVFC_HALTED)
5339 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5340 	if ((events & IBMVFC_AE_LINKUP) &&
5341 	    vhost->state == IBMVFC_INITIALIZING)
5342 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5343 }
5344 
5345 /**
5346  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5347  * @tgt:		ibmvfc target struct
5348  *
5349  **/
5350 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5351 {
5352 	struct ibmvfc_host *vhost = tgt->vhost;
5353 	struct fc_rport *rport;
5354 	unsigned long flags;
5355 
5356 	tgt_dbg(tgt, "Adding rport\n");
5357 	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5358 	spin_lock_irqsave(vhost->host->host_lock, flags);
5359 
5360 	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5361 		tgt_dbg(tgt, "Deleting rport\n");
5362 		list_del(&tgt->queue);
5363 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5364 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5365 		fc_remote_port_delete(rport);
5366 		del_timer_sync(&tgt->timer);
5367 		kref_put(&tgt->kref, ibmvfc_release_tgt);
5368 		return;
5369 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5370 		tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5371 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5372 		tgt->rport = NULL;
5373 		tgt->init_retries = 0;
5374 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5375 		fc_remote_port_delete(rport);
5376 		return;
5377 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5378 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5379 		return;
5380 	}
5381 
5382 	if (rport) {
5383 		tgt_dbg(tgt, "rport add succeeded\n");
5384 		tgt->rport = rport;
5385 		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5386 		rport->supported_classes = 0;
5387 		tgt->target_id = rport->scsi_target_id;
5388 		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5389 			rport->supported_classes |= FC_COS_CLASS1;
5390 		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5391 			rport->supported_classes |= FC_COS_CLASS2;
5392 		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5393 			rport->supported_classes |= FC_COS_CLASS3;
5394 		if (rport->rqst_q)
5395 			blk_queue_max_segments(rport->rqst_q, 1);
5396 	} else
5397 		tgt_dbg(tgt, "rport add failed\n");
5398 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5399 }
5400 
5401 /**
5402  * ibmvfc_do_work - Do task level work
5403  * @vhost:		ibmvfc host struct
5404  *
5405  **/
5406 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5407 {
5408 	struct ibmvfc_target *tgt;
5409 	unsigned long flags;
5410 	struct fc_rport *rport;
5411 	LIST_HEAD(purge);
5412 	int rc;
5413 
5414 	ibmvfc_log_ae(vhost, vhost->events_to_log);
5415 	spin_lock_irqsave(vhost->host->host_lock, flags);
5416 	vhost->events_to_log = 0;
5417 	switch (vhost->action) {
5418 	case IBMVFC_HOST_ACTION_NONE:
5419 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5420 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5421 		break;
5422 	case IBMVFC_HOST_ACTION_RESET:
5423 		list_splice_init(&vhost->purge, &purge);
5424 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5425 		ibmvfc_complete_purge(&purge);
5426 		rc = ibmvfc_reset_crq(vhost);
5427 
5428 		spin_lock_irqsave(vhost->host->host_lock, flags);
5429 		if (!rc || rc == H_CLOSED)
5430 			vio_enable_interrupts(to_vio_dev(vhost->dev));
5431 		if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5432 			/*
5433 			 * The only action we could have changed to would have
5434 			 * been reenable, in which case, we skip the rest of
5435 			 * this path and wait until we've done the re-enable
5436 			 * before sending the crq init.
5437 			 */
5438 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5439 
5440 			if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5441 			    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5442 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5443 				dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5444 			}
5445 		}
5446 		break;
5447 	case IBMVFC_HOST_ACTION_REENABLE:
5448 		list_splice_init(&vhost->purge, &purge);
5449 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5450 		ibmvfc_complete_purge(&purge);
5451 		rc = ibmvfc_reenable_crq_queue(vhost);
5452 
5453 		spin_lock_irqsave(vhost->host->host_lock, flags);
5454 		if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5455 			/*
5456 			 * The only action we could have changed to would have
5457 			 * been reset, in which case, we skip the rest of this
5458 			 * path and wait until we've done the reset before
5459 			 * sending the crq init.
5460 			 */
5461 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5462 			if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5463 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5464 				dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5465 			}
5466 		}
5467 		break;
5468 	case IBMVFC_HOST_ACTION_LOGO:
5469 		vhost->job_step(vhost);
5470 		break;
5471 	case IBMVFC_HOST_ACTION_INIT:
5472 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5473 		if (vhost->delay_init) {
5474 			vhost->delay_init = 0;
5475 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5476 			ssleep(15);
5477 			return;
5478 		} else
5479 			vhost->job_step(vhost);
5480 		break;
5481 	case IBMVFC_HOST_ACTION_QUERY:
5482 		list_for_each_entry(tgt, &vhost->targets, queue)
5483 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5484 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5485 		break;
5486 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5487 		list_for_each_entry(tgt, &vhost->targets, queue) {
5488 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5489 				tgt->job_step(tgt);
5490 				break;
5491 			}
5492 		}
5493 
5494 		if (!ibmvfc_dev_init_to_do(vhost))
5495 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5496 		break;
5497 	case IBMVFC_HOST_ACTION_TGT_DEL:
5498 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5499 		list_for_each_entry(tgt, &vhost->targets, queue) {
5500 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5501 				tgt->job_step(tgt);
5502 				break;
5503 			}
5504 		}
5505 
5506 		if (ibmvfc_dev_logo_to_do(vhost)) {
5507 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5508 			return;
5509 		}
5510 
5511 		list_for_each_entry(tgt, &vhost->targets, queue) {
5512 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5513 				tgt_dbg(tgt, "Deleting rport\n");
5514 				rport = tgt->rport;
5515 				tgt->rport = NULL;
5516 				list_del(&tgt->queue);
5517 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5518 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5519 				if (rport)
5520 					fc_remote_port_delete(rport);
5521 				del_timer_sync(&tgt->timer);
5522 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5523 				return;
5524 			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5525 				tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5526 				rport = tgt->rport;
5527 				tgt->rport = NULL;
5528 				tgt->init_retries = 0;
5529 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5530 
5531 				/*
5532 				 * If fast fail is enabled, we wait for it to fire and then clean up
5533 				 * the old port, since we expect the fast fail timer to clean up the
5534 				 * outstanding I/O faster than waiting for normal command timeouts.
5535 				 * However, if fast fail is disabled, any I/O outstanding to the
5536 				 * rport LUNs will stay outstanding indefinitely, since the EH handlers
5537 				 * won't get invoked for I/O's timing out. If this is a NPIV failover
5538 				 * scenario, the better alternative is to use the move login.
5539 				 */
5540 				if (rport && rport->fast_io_fail_tmo == -1)
5541 					tgt->move_login = 1;
5542 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5543 				if (rport)
5544 					fc_remote_port_delete(rport);
5545 				return;
5546 			}
5547 		}
5548 
5549 		if (vhost->state == IBMVFC_INITIALIZING) {
5550 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5551 				if (vhost->reinit) {
5552 					vhost->reinit = 0;
5553 					scsi_block_requests(vhost->host);
5554 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5555 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5556 				} else {
5557 					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5558 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5559 					wake_up(&vhost->init_wait_q);
5560 					schedule_work(&vhost->rport_add_work_q);
5561 					vhost->init_retries = 0;
5562 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5563 					scsi_unblock_requests(vhost->host);
5564 				}
5565 
5566 				return;
5567 			} else {
5568 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5569 				vhost->job_step = ibmvfc_discover_targets;
5570 			}
5571 		} else {
5572 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5573 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5574 			scsi_unblock_requests(vhost->host);
5575 			wake_up(&vhost->init_wait_q);
5576 			return;
5577 		}
5578 		break;
5579 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5580 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5581 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5582 		ibmvfc_alloc_targets(vhost);
5583 		spin_lock_irqsave(vhost->host->host_lock, flags);
5584 		break;
5585 	case IBMVFC_HOST_ACTION_TGT_INIT:
5586 		list_for_each_entry(tgt, &vhost->targets, queue) {
5587 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5588 				tgt->job_step(tgt);
5589 				break;
5590 			}
5591 		}
5592 
5593 		if (!ibmvfc_dev_init_to_do(vhost))
5594 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5595 		break;
5596 	default:
5597 		break;
5598 	}
5599 
5600 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5601 }
5602 
5603 /**
5604  * ibmvfc_work - Do task level work
5605  * @data:		ibmvfc host struct
5606  *
5607  * Returns:
5608  *	zero
5609  **/
5610 static int ibmvfc_work(void *data)
5611 {
5612 	struct ibmvfc_host *vhost = data;
5613 	int rc;
5614 
5615 	set_user_nice(current, MIN_NICE);
5616 
5617 	while (1) {
5618 		rc = wait_event_interruptible(vhost->work_wait_q,
5619 					      ibmvfc_work_to_do(vhost));
5620 
5621 		BUG_ON(rc);
5622 
5623 		if (kthread_should_stop())
5624 			break;
5625 
5626 		ibmvfc_do_work(vhost);
5627 	}
5628 
5629 	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5630 	return 0;
5631 }
5632 
5633 /**
5634  * ibmvfc_alloc_queue - Allocate queue
5635  * @vhost:	ibmvfc host struct
5636  * @queue:	ibmvfc queue to allocate
5637  * @fmt:	queue format to allocate
5638  *
5639  * Returns:
5640  *	0 on success / non-zero on failure
5641  **/
5642 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5643 			      struct ibmvfc_queue *queue,
5644 			      enum ibmvfc_msg_fmt fmt)
5645 {
5646 	struct device *dev = vhost->dev;
5647 	size_t fmt_size;
5648 	unsigned int pool_size = 0;
5649 
5650 	ENTER;
5651 	spin_lock_init(&queue->_lock);
5652 	queue->q_lock = &queue->_lock;
5653 
5654 	switch (fmt) {
5655 	case IBMVFC_CRQ_FMT:
5656 		fmt_size = sizeof(*queue->msgs.crq);
5657 		pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
5658 		break;
5659 	case IBMVFC_ASYNC_FMT:
5660 		fmt_size = sizeof(*queue->msgs.async);
5661 		break;
5662 	case IBMVFC_SUB_CRQ_FMT:
5663 		fmt_size = sizeof(*queue->msgs.scrq);
5664 		/* We need one extra event for Cancel Commands */
5665 		pool_size = max_requests + 1;
5666 		break;
5667 	default:
5668 		dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5669 		return -EINVAL;
5670 	}
5671 
5672 	if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
5673 		dev_err(dev, "Couldn't initialize event pool.\n");
5674 		return -ENOMEM;
5675 	}
5676 
5677 	queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5678 	if (!queue->msgs.handle)
5679 		return -ENOMEM;
5680 
5681 	queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5682 					  DMA_BIDIRECTIONAL);
5683 
5684 	if (dma_mapping_error(dev, queue->msg_token)) {
5685 		free_page((unsigned long)queue->msgs.handle);
5686 		queue->msgs.handle = NULL;
5687 		return -ENOMEM;
5688 	}
5689 
5690 	queue->cur = 0;
5691 	queue->fmt = fmt;
5692 	queue->size = PAGE_SIZE / fmt_size;
5693 
5694 	queue->vhost = vhost;
5695 	return 0;
5696 }
5697 
5698 /**
5699  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5700  * @vhost:	ibmvfc host struct
5701  *
5702  * Allocates a page for messages, maps it for dma, and registers
5703  * the crq with the hypervisor.
5704  *
5705  * Return value:
5706  *	zero on success / other on failure
5707  **/
5708 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5709 {
5710 	int rc, retrc = -ENOMEM;
5711 	struct device *dev = vhost->dev;
5712 	struct vio_dev *vdev = to_vio_dev(dev);
5713 	struct ibmvfc_queue *crq = &vhost->crq;
5714 
5715 	ENTER;
5716 	if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5717 		return -ENOMEM;
5718 
5719 	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5720 					crq->msg_token, PAGE_SIZE);
5721 
5722 	if (rc == H_RESOURCE)
5723 		/* maybe kexecing and resource is busy. try a reset */
5724 		retrc = rc = ibmvfc_reset_crq(vhost);
5725 
5726 	if (rc == H_CLOSED)
5727 		dev_warn(dev, "Partner adapter not ready\n");
5728 	else if (rc) {
5729 		dev_warn(dev, "Error %d opening adapter\n", rc);
5730 		goto reg_crq_failed;
5731 	}
5732 
5733 	retrc = 0;
5734 
5735 	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5736 
5737 	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5738 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5739 		goto req_irq_failed;
5740 	}
5741 
5742 	if ((rc = vio_enable_interrupts(vdev))) {
5743 		dev_err(dev, "Error %d enabling interrupts\n", rc);
5744 		goto req_irq_failed;
5745 	}
5746 
5747 	LEAVE;
5748 	return retrc;
5749 
5750 req_irq_failed:
5751 	tasklet_kill(&vhost->tasklet);
5752 	do {
5753 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5754 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5755 reg_crq_failed:
5756 	ibmvfc_free_queue(vhost, crq);
5757 	return retrc;
5758 }
5759 
5760 static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
5761 				  int index)
5762 {
5763 	struct device *dev = vhost->dev;
5764 	struct vio_dev *vdev = to_vio_dev(dev);
5765 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5766 	int rc = -ENOMEM;
5767 
5768 	ENTER;
5769 
5770 	rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5771 			   &scrq->cookie, &scrq->hw_irq);
5772 
5773 	/* H_CLOSED indicates successful register, but no CRQ partner */
5774 	if (rc && rc != H_CLOSED) {
5775 		dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5776 		if (rc == H_PARAMETER)
5777 			dev_warn_once(dev, "Firmware may not support MQ\n");
5778 		goto reg_failed;
5779 	}
5780 
5781 	scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5782 
5783 	if (!scrq->irq) {
5784 		rc = -EINVAL;
5785 		dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5786 		goto irq_failed;
5787 	}
5788 
5789 	snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5790 		 vdev->unit_address, index);
5791 	rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
5792 
5793 	if (rc) {
5794 		dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5795 		irq_dispose_mapping(scrq->irq);
5796 		goto irq_failed;
5797 	}
5798 
5799 	scrq->hwq_id = index;
5800 
5801 	LEAVE;
5802 	return 0;
5803 
5804 irq_failed:
5805 	do {
5806 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5807 	} while (rtas_busy_delay(rc));
5808 reg_failed:
5809 	LEAVE;
5810 	return rc;
5811 }
5812 
5813 static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
5814 {
5815 	struct device *dev = vhost->dev;
5816 	struct vio_dev *vdev = to_vio_dev(dev);
5817 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5818 	long rc;
5819 
5820 	ENTER;
5821 
5822 	free_irq(scrq->irq, scrq);
5823 	irq_dispose_mapping(scrq->irq);
5824 	scrq->irq = 0;
5825 
5826 	do {
5827 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5828 					scrq->cookie);
5829 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5830 
5831 	if (rc)
5832 		dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
5833 
5834 	/* Clean out the queue */
5835 	memset(scrq->msgs.crq, 0, PAGE_SIZE);
5836 	scrq->cur = 0;
5837 
5838 	LEAVE;
5839 }
5840 
5841 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost)
5842 {
5843 	int i, j;
5844 
5845 	ENTER;
5846 	if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
5847 		return;
5848 
5849 	for (i = 0; i < nr_scsi_hw_queues; i++) {
5850 		if (ibmvfc_register_scsi_channel(vhost, i)) {
5851 			for (j = i; j > 0; j--)
5852 				ibmvfc_deregister_scsi_channel(vhost, j - 1);
5853 			vhost->do_enquiry = 0;
5854 			return;
5855 		}
5856 	}
5857 
5858 	LEAVE;
5859 }
5860 
5861 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost)
5862 {
5863 	int i;
5864 
5865 	ENTER;
5866 	if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs)
5867 		return;
5868 
5869 	for (i = 0; i < nr_scsi_hw_queues; i++)
5870 		ibmvfc_deregister_scsi_channel(vhost, i);
5871 
5872 	LEAVE;
5873 }
5874 
5875 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
5876 {
5877 	struct ibmvfc_queue *scrq;
5878 	int i, j;
5879 
5880 	ENTER;
5881 	if (!vhost->mq_enabled)
5882 		return;
5883 
5884 	vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
5885 					  sizeof(*vhost->scsi_scrqs.scrqs),
5886 					  GFP_KERNEL);
5887 	if (!vhost->scsi_scrqs.scrqs) {
5888 		vhost->do_enquiry = 0;
5889 		return;
5890 	}
5891 
5892 	for (i = 0; i < nr_scsi_hw_queues; i++) {
5893 		scrq = &vhost->scsi_scrqs.scrqs[i];
5894 		if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) {
5895 			for (j = i; j > 0; j--) {
5896 				scrq = &vhost->scsi_scrqs.scrqs[j - 1];
5897 				ibmvfc_free_queue(vhost, scrq);
5898 			}
5899 			kfree(vhost->scsi_scrqs.scrqs);
5900 			vhost->scsi_scrqs.scrqs = NULL;
5901 			vhost->scsi_scrqs.active_queues = 0;
5902 			vhost->do_enquiry = 0;
5903 			vhost->mq_enabled = 0;
5904 			return;
5905 		}
5906 	}
5907 
5908 	ibmvfc_reg_sub_crqs(vhost);
5909 
5910 	LEAVE;
5911 }
5912 
5913 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
5914 {
5915 	struct ibmvfc_queue *scrq;
5916 	int i;
5917 
5918 	ENTER;
5919 	if (!vhost->scsi_scrqs.scrqs)
5920 		return;
5921 
5922 	ibmvfc_dereg_sub_crqs(vhost);
5923 
5924 	for (i = 0; i < nr_scsi_hw_queues; i++) {
5925 		scrq = &vhost->scsi_scrqs.scrqs[i];
5926 		ibmvfc_free_queue(vhost, scrq);
5927 	}
5928 
5929 	kfree(vhost->scsi_scrqs.scrqs);
5930 	vhost->scsi_scrqs.scrqs = NULL;
5931 	vhost->scsi_scrqs.active_queues = 0;
5932 	LEAVE;
5933 }
5934 
5935 /**
5936  * ibmvfc_free_mem - Free memory for vhost
5937  * @vhost:	ibmvfc host struct
5938  *
5939  * Return value:
5940  * 	none
5941  **/
5942 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
5943 {
5944 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5945 
5946 	ENTER;
5947 	mempool_destroy(vhost->tgt_pool);
5948 	kfree(vhost->trace);
5949 	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
5950 			  vhost->disc_buf_dma);
5951 	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
5952 			  vhost->login_buf, vhost->login_buf_dma);
5953 	dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
5954 			  vhost->channel_setup_buf, vhost->channel_setup_dma);
5955 	dma_pool_destroy(vhost->sg_pool);
5956 	ibmvfc_free_queue(vhost, async_q);
5957 	LEAVE;
5958 }
5959 
5960 /**
5961  * ibmvfc_alloc_mem - Allocate memory for vhost
5962  * @vhost:	ibmvfc host struct
5963  *
5964  * Return value:
5965  * 	0 on success / non-zero on failure
5966  **/
5967 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
5968 {
5969 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5970 	struct device *dev = vhost->dev;
5971 
5972 	ENTER;
5973 	if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
5974 		dev_err(dev, "Couldn't allocate/map async queue.\n");
5975 		goto nomem;
5976 	}
5977 
5978 	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
5979 					 SG_ALL * sizeof(struct srp_direct_buf),
5980 					 sizeof(struct srp_direct_buf), 0);
5981 
5982 	if (!vhost->sg_pool) {
5983 		dev_err(dev, "Failed to allocate sg pool\n");
5984 		goto unmap_async_crq;
5985 	}
5986 
5987 	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5988 					      &vhost->login_buf_dma, GFP_KERNEL);
5989 
5990 	if (!vhost->login_buf) {
5991 		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5992 		goto free_sg_pool;
5993 	}
5994 
5995 	vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5996 	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5997 					     &vhost->disc_buf_dma, GFP_KERNEL);
5998 
5999 	if (!vhost->disc_buf) {
6000 		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
6001 		goto free_login_buffer;
6002 	}
6003 
6004 	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
6005 			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
6006 	atomic_set(&vhost->trace_index, -1);
6007 
6008 	if (!vhost->trace)
6009 		goto free_disc_buffer;
6010 
6011 	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
6012 						      sizeof(struct ibmvfc_target));
6013 
6014 	if (!vhost->tgt_pool) {
6015 		dev_err(dev, "Couldn't allocate target memory pool\n");
6016 		goto free_trace;
6017 	}
6018 
6019 	vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
6020 						      &vhost->channel_setup_dma,
6021 						      GFP_KERNEL);
6022 
6023 	if (!vhost->channel_setup_buf) {
6024 		dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
6025 		goto free_tgt_pool;
6026 	}
6027 
6028 	LEAVE;
6029 	return 0;
6030 
6031 free_tgt_pool:
6032 	mempool_destroy(vhost->tgt_pool);
6033 free_trace:
6034 	kfree(vhost->trace);
6035 free_disc_buffer:
6036 	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
6037 			  vhost->disc_buf_dma);
6038 free_login_buffer:
6039 	dma_free_coherent(dev, sizeof(*vhost->login_buf),
6040 			  vhost->login_buf, vhost->login_buf_dma);
6041 free_sg_pool:
6042 	dma_pool_destroy(vhost->sg_pool);
6043 unmap_async_crq:
6044 	ibmvfc_free_queue(vhost, async_q);
6045 nomem:
6046 	LEAVE;
6047 	return -ENOMEM;
6048 }
6049 
6050 /**
6051  * ibmvfc_rport_add_thread - Worker thread for rport adds
6052  * @work:	work struct
6053  *
6054  **/
6055 static void ibmvfc_rport_add_thread(struct work_struct *work)
6056 {
6057 	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
6058 						 rport_add_work_q);
6059 	struct ibmvfc_target *tgt;
6060 	struct fc_rport *rport;
6061 	unsigned long flags;
6062 	int did_work;
6063 
6064 	ENTER;
6065 	spin_lock_irqsave(vhost->host->host_lock, flags);
6066 	do {
6067 		did_work = 0;
6068 		if (vhost->state != IBMVFC_ACTIVE)
6069 			break;
6070 
6071 		list_for_each_entry(tgt, &vhost->targets, queue) {
6072 			if (tgt->add_rport) {
6073 				did_work = 1;
6074 				tgt->add_rport = 0;
6075 				kref_get(&tgt->kref);
6076 				rport = tgt->rport;
6077 				if (!rport) {
6078 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
6079 					ibmvfc_tgt_add_rport(tgt);
6080 				} else if (get_device(&rport->dev)) {
6081 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
6082 					tgt_dbg(tgt, "Setting rport roles\n");
6083 					fc_remote_port_rolechg(rport, tgt->ids.roles);
6084 					put_device(&rport->dev);
6085 				} else {
6086 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
6087 				}
6088 
6089 				kref_put(&tgt->kref, ibmvfc_release_tgt);
6090 				spin_lock_irqsave(vhost->host->host_lock, flags);
6091 				break;
6092 			}
6093 		}
6094 	} while(did_work);
6095 
6096 	if (vhost->state == IBMVFC_ACTIVE)
6097 		vhost->scan_complete = 1;
6098 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6099 	LEAVE;
6100 }
6101 
6102 /**
6103  * ibmvfc_probe - Adapter hot plug add entry point
6104  * @vdev:	vio device struct
6105  * @id:	vio device id struct
6106  *
6107  * Return value:
6108  * 	0 on success / non-zero on failure
6109  **/
6110 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6111 {
6112 	struct ibmvfc_host *vhost;
6113 	struct Scsi_Host *shost;
6114 	struct device *dev = &vdev->dev;
6115 	int rc = -ENOMEM;
6116 	unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
6117 
6118 	ENTER;
6119 	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6120 	if (!shost) {
6121 		dev_err(dev, "Couldn't allocate host data\n");
6122 		goto out;
6123 	}
6124 
6125 	shost->transportt = ibmvfc_transport_template;
6126 	shost->can_queue = max_requests;
6127 	shost->max_lun = max_lun;
6128 	shost->max_id = max_targets;
6129 	shost->max_sectors = IBMVFC_MAX_SECTORS;
6130 	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6131 	shost->unique_id = shost->host_no;
6132 	shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6133 
6134 	vhost = shost_priv(shost);
6135 	INIT_LIST_HEAD(&vhost->targets);
6136 	INIT_LIST_HEAD(&vhost->purge);
6137 	sprintf(vhost->name, IBMVFC_NAME);
6138 	vhost->host = shost;
6139 	vhost->dev = dev;
6140 	vhost->partition_number = -1;
6141 	vhost->log_level = log_level;
6142 	vhost->task_set = 1;
6143 
6144 	vhost->mq_enabled = mq_enabled;
6145 	vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
6146 	vhost->using_channels = 0;
6147 	vhost->do_enquiry = 1;
6148 	vhost->scan_timeout = 0;
6149 
6150 	strcpy(vhost->partition_name, "UNKNOWN");
6151 	init_waitqueue_head(&vhost->work_wait_q);
6152 	init_waitqueue_head(&vhost->init_wait_q);
6153 	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6154 	mutex_init(&vhost->passthru_mutex);
6155 
6156 	if ((rc = ibmvfc_alloc_mem(vhost)))
6157 		goto free_scsi_host;
6158 
6159 	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6160 					 shost->host_no);
6161 
6162 	if (IS_ERR(vhost->work_thread)) {
6163 		dev_err(dev, "Couldn't create kernel thread: %ld\n",
6164 			PTR_ERR(vhost->work_thread));
6165 		rc = PTR_ERR(vhost->work_thread);
6166 		goto free_host_mem;
6167 	}
6168 
6169 	if ((rc = ibmvfc_init_crq(vhost))) {
6170 		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6171 		goto kill_kthread;
6172 	}
6173 
6174 	if ((rc = scsi_add_host(shost, dev)))
6175 		goto release_crq;
6176 
6177 	fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6178 
6179 	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6180 					   &ibmvfc_trace_attr))) {
6181 		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6182 		goto remove_shost;
6183 	}
6184 
6185 	ibmvfc_init_sub_crqs(vhost);
6186 
6187 	if (shost_to_fc_host(shost)->rqst_q)
6188 		blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
6189 	dev_set_drvdata(dev, vhost);
6190 	spin_lock(&ibmvfc_driver_lock);
6191 	list_add_tail(&vhost->queue, &ibmvfc_head);
6192 	spin_unlock(&ibmvfc_driver_lock);
6193 
6194 	ibmvfc_send_crq_init(vhost);
6195 	scsi_scan_host(shost);
6196 	return 0;
6197 
6198 remove_shost:
6199 	scsi_remove_host(shost);
6200 release_crq:
6201 	ibmvfc_release_crq_queue(vhost);
6202 kill_kthread:
6203 	kthread_stop(vhost->work_thread);
6204 free_host_mem:
6205 	ibmvfc_free_mem(vhost);
6206 free_scsi_host:
6207 	scsi_host_put(shost);
6208 out:
6209 	LEAVE;
6210 	return rc;
6211 }
6212 
6213 /**
6214  * ibmvfc_remove - Adapter hot plug remove entry point
6215  * @vdev:	vio device struct
6216  *
6217  * Return value:
6218  * 	0
6219  **/
6220 static void ibmvfc_remove(struct vio_dev *vdev)
6221 {
6222 	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6223 	LIST_HEAD(purge);
6224 	unsigned long flags;
6225 
6226 	ENTER;
6227 	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6228 
6229 	spin_lock_irqsave(vhost->host->host_lock, flags);
6230 	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6231 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6232 
6233 	ibmvfc_wait_while_resetting(vhost);
6234 	kthread_stop(vhost->work_thread);
6235 	fc_remove_host(vhost->host);
6236 	scsi_remove_host(vhost->host);
6237 
6238 	spin_lock_irqsave(vhost->host->host_lock, flags);
6239 	ibmvfc_purge_requests(vhost, DID_ERROR);
6240 	list_splice_init(&vhost->purge, &purge);
6241 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6242 	ibmvfc_complete_purge(&purge);
6243 	ibmvfc_release_sub_crqs(vhost);
6244 	ibmvfc_release_crq_queue(vhost);
6245 
6246 	ibmvfc_free_mem(vhost);
6247 	spin_lock(&ibmvfc_driver_lock);
6248 	list_del(&vhost->queue);
6249 	spin_unlock(&ibmvfc_driver_lock);
6250 	scsi_host_put(vhost->host);
6251 	LEAVE;
6252 }
6253 
6254 /**
6255  * ibmvfc_resume - Resume from suspend
6256  * @dev:	device struct
6257  *
6258  * We may have lost an interrupt across suspend/resume, so kick the
6259  * interrupt handler
6260  *
6261  */
6262 static int ibmvfc_resume(struct device *dev)
6263 {
6264 	unsigned long flags;
6265 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6266 	struct vio_dev *vdev = to_vio_dev(dev);
6267 
6268 	spin_lock_irqsave(vhost->host->host_lock, flags);
6269 	vio_disable_interrupts(vdev);
6270 	tasklet_schedule(&vhost->tasklet);
6271 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6272 	return 0;
6273 }
6274 
6275 /**
6276  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6277  * @vdev:	vio device struct
6278  *
6279  * Return value:
6280  *	Number of bytes the driver will need to DMA map at the same time in
6281  *	order to perform well.
6282  */
6283 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6284 {
6285 	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
6286 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6287 }
6288 
6289 static const struct vio_device_id ibmvfc_device_table[] = {
6290 	{"fcp", "IBM,vfc-client"},
6291 	{ "", "" }
6292 };
6293 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6294 
6295 static const struct dev_pm_ops ibmvfc_pm_ops = {
6296 	.resume = ibmvfc_resume
6297 };
6298 
6299 static struct vio_driver ibmvfc_driver = {
6300 	.id_table = ibmvfc_device_table,
6301 	.probe = ibmvfc_probe,
6302 	.remove = ibmvfc_remove,
6303 	.get_desired_dma = ibmvfc_get_desired_dma,
6304 	.name = IBMVFC_NAME,
6305 	.pm = &ibmvfc_pm_ops,
6306 };
6307 
6308 static struct fc_function_template ibmvfc_transport_functions = {
6309 	.show_host_fabric_name = 1,
6310 	.show_host_node_name = 1,
6311 	.show_host_port_name = 1,
6312 	.show_host_supported_classes = 1,
6313 	.show_host_port_type = 1,
6314 	.show_host_port_id = 1,
6315 	.show_host_maxframe_size = 1,
6316 
6317 	.get_host_port_state = ibmvfc_get_host_port_state,
6318 	.show_host_port_state = 1,
6319 
6320 	.get_host_speed = ibmvfc_get_host_speed,
6321 	.show_host_speed = 1,
6322 
6323 	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6324 	.terminate_rport_io = ibmvfc_terminate_rport_io,
6325 
6326 	.show_rport_maxframe_size = 1,
6327 	.show_rport_supported_classes = 1,
6328 
6329 	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6330 	.show_rport_dev_loss_tmo = 1,
6331 
6332 	.get_starget_node_name = ibmvfc_get_starget_node_name,
6333 	.show_starget_node_name = 1,
6334 
6335 	.get_starget_port_name = ibmvfc_get_starget_port_name,
6336 	.show_starget_port_name = 1,
6337 
6338 	.get_starget_port_id = ibmvfc_get_starget_port_id,
6339 	.show_starget_port_id = 1,
6340 
6341 	.bsg_request = ibmvfc_bsg_request,
6342 	.bsg_timeout = ibmvfc_bsg_timeout,
6343 };
6344 
6345 /**
6346  * ibmvfc_module_init - Initialize the ibmvfc module
6347  *
6348  * Return value:
6349  * 	0 on success / other on failure
6350  **/
6351 static int __init ibmvfc_module_init(void)
6352 {
6353 	int rc;
6354 
6355 	if (!firmware_has_feature(FW_FEATURE_VIO))
6356 		return -ENODEV;
6357 
6358 	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6359 	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6360 
6361 	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6362 	if (!ibmvfc_transport_template)
6363 		return -ENOMEM;
6364 
6365 	rc = vio_register_driver(&ibmvfc_driver);
6366 	if (rc)
6367 		fc_release_transport(ibmvfc_transport_template);
6368 	return rc;
6369 }
6370 
6371 /**
6372  * ibmvfc_module_exit - Teardown the ibmvfc module
6373  *
6374  * Return value:
6375  * 	nothing
6376  **/
6377 static void __exit ibmvfc_module_exit(void)
6378 {
6379 	vio_unregister_driver(&ibmvfc_driver);
6380 	fc_release_transport(ibmvfc_transport_template);
6381 }
6382 
6383 module_init(ibmvfc_module_init);
6384 module_exit(ibmvfc_module_exit);
6385