1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4 *
5 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) IBM Corporation, 2008
8 */
9
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kthread.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/pm.h>
21 #include <linux/stringify.h>
22 #include <linux/bsg-lib.h>
23 #include <asm/firmware.h>
24 #include <asm/irq.h>
25 #include <asm/vio.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/scsi_bsg_fc.h>
33 #include "ibmvfc.h"
34
35 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
36 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
37 static u64 max_lun = IBMVFC_MAX_LUN;
38 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
39 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
40 static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
41 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
42 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
43 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
44 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
45 static unsigned int mq_enabled = IBMVFC_MQ;
46 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
47 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
48 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
49 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
50
51 static LIST_HEAD(ibmvfc_head);
52 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
53 static struct scsi_transport_template *ibmvfc_transport_template;
54
55 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
56 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
59
60 module_param_named(mq, mq_enabled, uint, S_IRUGO);
61 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
62 "[Default=" __stringify(IBMVFC_MQ) "]");
63 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
64 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
65 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
66 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
67 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
68 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
69 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
70 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
71 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
72 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
73 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
74 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
75
76 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
77 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
78 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
79 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(default_timeout,
81 "Default timeout in seconds for initialization and EH commands. "
82 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
83 module_param_named(max_requests, max_requests, uint, S_IRUGO);
84 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
85 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
86 module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
87 MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
88 "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
89 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
90 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
91 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
92 module_param_named(max_targets, max_targets, uint, S_IRUGO);
93 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
94 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
95 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
96 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
97 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
98 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(debug, "Enable driver debug information. "
100 "[Default=" __stringify(IBMVFC_DEBUG) "]");
101 module_param_named(log_level, log_level, uint, 0);
102 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
103 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
104 module_param_named(cls3_error, cls3_error, uint, 0);
105 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
106 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
107
108 static const struct {
109 u16 status;
110 u16 error;
111 u8 result;
112 u8 retry;
113 int log;
114 char *name;
115 } cmd_status [] = {
116 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
117 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
118 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
119 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
120 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
121 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
122 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
123 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
124 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
125 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
126 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
127 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
128 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
129 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
130
131 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
132 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
133 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
134 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
135 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
136 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
137 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
138 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
139 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
140 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
141
142 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
143 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
144 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
145 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
146 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
147 { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
148 { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
149 { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
150 { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
151 { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
152 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
153
154 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
155 { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
156 };
157
158 static void ibmvfc_npiv_login(struct ibmvfc_host *);
159 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
160 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
161 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
162 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
163 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
164 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
165
166 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
167 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
168
169 static const char *unknown_error = "unknown error";
170
h_reg_sub_crq(unsigned long unit_address,unsigned long ioba,unsigned long length,unsigned long * cookie,unsigned long * irq)171 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
172 unsigned long length, unsigned long *cookie,
173 unsigned long *irq)
174 {
175 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
176 long rc;
177
178 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
179 *cookie = retbuf[0];
180 *irq = retbuf[1];
181
182 return rc;
183 }
184
ibmvfc_check_caps(struct ibmvfc_host * vhost,unsigned long cap_flags)185 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
186 {
187 u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
188
189 return (host_caps & cap_flags) ? 1 : 0;
190 }
191
ibmvfc_get_fcp_iu(struct ibmvfc_host * vhost,struct ibmvfc_cmd * vfc_cmd)192 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
193 struct ibmvfc_cmd *vfc_cmd)
194 {
195 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
196 return &vfc_cmd->v2.iu;
197 else
198 return &vfc_cmd->v1.iu;
199 }
200
ibmvfc_get_fcp_rsp(struct ibmvfc_host * vhost,struct ibmvfc_cmd * vfc_cmd)201 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
202 struct ibmvfc_cmd *vfc_cmd)
203 {
204 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
205 return &vfc_cmd->v2.rsp;
206 else
207 return &vfc_cmd->v1.rsp;
208 }
209
210 #ifdef CONFIG_SCSI_IBMVFC_TRACE
211 /**
212 * ibmvfc_trc_start - Log a start trace entry
213 * @evt: ibmvfc event struct
214 *
215 **/
ibmvfc_trc_start(struct ibmvfc_event * evt)216 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
217 {
218 struct ibmvfc_host *vhost = evt->vhost;
219 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
220 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
221 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
222 struct ibmvfc_trace_entry *entry;
223 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
224
225 entry = &vhost->trace[index];
226 entry->evt = evt;
227 entry->time = jiffies;
228 entry->fmt = evt->crq.format;
229 entry->type = IBMVFC_TRC_START;
230
231 switch (entry->fmt) {
232 case IBMVFC_CMD_FORMAT:
233 entry->op_code = iu->cdb[0];
234 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
235 entry->lun = scsilun_to_int(&iu->lun);
236 entry->tmf_flags = iu->tmf_flags;
237 entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
238 break;
239 case IBMVFC_MAD_FORMAT:
240 entry->op_code = be32_to_cpu(mad->opcode);
241 break;
242 default:
243 break;
244 }
245 }
246
247 /**
248 * ibmvfc_trc_end - Log an end trace entry
249 * @evt: ibmvfc event struct
250 *
251 **/
ibmvfc_trc_end(struct ibmvfc_event * evt)252 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
253 {
254 struct ibmvfc_host *vhost = evt->vhost;
255 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
256 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
257 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
258 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
259 struct ibmvfc_trace_entry *entry;
260 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
261
262 entry = &vhost->trace[index];
263 entry->evt = evt;
264 entry->time = jiffies;
265 entry->fmt = evt->crq.format;
266 entry->type = IBMVFC_TRC_END;
267
268 switch (entry->fmt) {
269 case IBMVFC_CMD_FORMAT:
270 entry->op_code = iu->cdb[0];
271 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
272 entry->lun = scsilun_to_int(&iu->lun);
273 entry->tmf_flags = iu->tmf_flags;
274 entry->u.end.status = be16_to_cpu(vfc_cmd->status);
275 entry->u.end.error = be16_to_cpu(vfc_cmd->error);
276 entry->u.end.fcp_rsp_flags = rsp->flags;
277 entry->u.end.rsp_code = rsp->data.info.rsp_code;
278 entry->u.end.scsi_status = rsp->scsi_status;
279 break;
280 case IBMVFC_MAD_FORMAT:
281 entry->op_code = be32_to_cpu(mad->opcode);
282 entry->u.end.status = be16_to_cpu(mad->status);
283 break;
284 default:
285 break;
286
287 }
288 }
289
290 #else
291 #define ibmvfc_trc_start(evt) do { } while (0)
292 #define ibmvfc_trc_end(evt) do { } while (0)
293 #endif
294
295 /**
296 * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
297 * @status: status / error class
298 * @error: error
299 *
300 * Return value:
301 * index into cmd_status / -EINVAL on failure
302 **/
ibmvfc_get_err_index(u16 status,u16 error)303 static int ibmvfc_get_err_index(u16 status, u16 error)
304 {
305 int i;
306
307 for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
308 if ((cmd_status[i].status & status) == cmd_status[i].status &&
309 cmd_status[i].error == error)
310 return i;
311
312 return -EINVAL;
313 }
314
315 /**
316 * ibmvfc_get_cmd_error - Find the error description for the fcp response
317 * @status: status / error class
318 * @error: error
319 *
320 * Return value:
321 * error description string
322 **/
ibmvfc_get_cmd_error(u16 status,u16 error)323 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
324 {
325 int rc = ibmvfc_get_err_index(status, error);
326 if (rc >= 0)
327 return cmd_status[rc].name;
328 return unknown_error;
329 }
330
331 /**
332 * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
333 * @vhost: ibmvfc host struct
334 * @vfc_cmd: ibmvfc command struct
335 *
336 * Return value:
337 * SCSI result value to return for completed command
338 **/
ibmvfc_get_err_result(struct ibmvfc_host * vhost,struct ibmvfc_cmd * vfc_cmd)339 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
340 {
341 int err;
342 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
343 int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
344
345 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
346 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
347 rsp->data.info.rsp_code))
348 return DID_ERROR << 16;
349
350 err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
351 if (err >= 0)
352 return rsp->scsi_status | (cmd_status[err].result << 16);
353 return rsp->scsi_status | (DID_ERROR << 16);
354 }
355
356 /**
357 * ibmvfc_retry_cmd - Determine if error status is retryable
358 * @status: status / error class
359 * @error: error
360 *
361 * Return value:
362 * 1 if error should be retried / 0 if it should not
363 **/
ibmvfc_retry_cmd(u16 status,u16 error)364 static int ibmvfc_retry_cmd(u16 status, u16 error)
365 {
366 int rc = ibmvfc_get_err_index(status, error);
367
368 if (rc >= 0)
369 return cmd_status[rc].retry;
370 return 1;
371 }
372
373 static const char *unknown_fc_explain = "unknown fc explain";
374
375 static const struct {
376 u16 fc_explain;
377 char *name;
378 } ls_explain [] = {
379 { 0x00, "no additional explanation" },
380 { 0x01, "service parameter error - options" },
381 { 0x03, "service parameter error - initiator control" },
382 { 0x05, "service parameter error - recipient control" },
383 { 0x07, "service parameter error - received data field size" },
384 { 0x09, "service parameter error - concurrent seq" },
385 { 0x0B, "service parameter error - credit" },
386 { 0x0D, "invalid N_Port/F_Port_Name" },
387 { 0x0E, "invalid node/Fabric Name" },
388 { 0x0F, "invalid common service parameters" },
389 { 0x11, "invalid association header" },
390 { 0x13, "association header required" },
391 { 0x15, "invalid originator S_ID" },
392 { 0x17, "invalid OX_ID-RX-ID combination" },
393 { 0x19, "command (request) already in progress" },
394 { 0x1E, "N_Port Login requested" },
395 { 0x1F, "Invalid N_Port_ID" },
396 };
397
398 static const struct {
399 u16 fc_explain;
400 char *name;
401 } gs_explain [] = {
402 { 0x00, "no additional explanation" },
403 { 0x01, "port identifier not registered" },
404 { 0x02, "port name not registered" },
405 { 0x03, "node name not registered" },
406 { 0x04, "class of service not registered" },
407 { 0x06, "initial process associator not registered" },
408 { 0x07, "FC-4 TYPEs not registered" },
409 { 0x08, "symbolic port name not registered" },
410 { 0x09, "symbolic node name not registered" },
411 { 0x0A, "port type not registered" },
412 { 0xF0, "authorization exception" },
413 { 0xF1, "authentication exception" },
414 { 0xF2, "data base full" },
415 { 0xF3, "data base empty" },
416 { 0xF4, "processing request" },
417 { 0xF5, "unable to verify connection" },
418 { 0xF6, "devices not in a common zone" },
419 };
420
421 /**
422 * ibmvfc_get_ls_explain - Return the FC Explain description text
423 * @status: FC Explain status
424 *
425 * Returns:
426 * error string
427 **/
ibmvfc_get_ls_explain(u16 status)428 static const char *ibmvfc_get_ls_explain(u16 status)
429 {
430 int i;
431
432 for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
433 if (ls_explain[i].fc_explain == status)
434 return ls_explain[i].name;
435
436 return unknown_fc_explain;
437 }
438
439 /**
440 * ibmvfc_get_gs_explain - Return the FC Explain description text
441 * @status: FC Explain status
442 *
443 * Returns:
444 * error string
445 **/
ibmvfc_get_gs_explain(u16 status)446 static const char *ibmvfc_get_gs_explain(u16 status)
447 {
448 int i;
449
450 for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
451 if (gs_explain[i].fc_explain == status)
452 return gs_explain[i].name;
453
454 return unknown_fc_explain;
455 }
456
457 static const struct {
458 enum ibmvfc_fc_type fc_type;
459 char *name;
460 } fc_type [] = {
461 { IBMVFC_FABRIC_REJECT, "fabric reject" },
462 { IBMVFC_PORT_REJECT, "port reject" },
463 { IBMVFC_LS_REJECT, "ELS reject" },
464 { IBMVFC_FABRIC_BUSY, "fabric busy" },
465 { IBMVFC_PORT_BUSY, "port busy" },
466 { IBMVFC_BASIC_REJECT, "basic reject" },
467 };
468
469 static const char *unknown_fc_type = "unknown fc type";
470
471 /**
472 * ibmvfc_get_fc_type - Return the FC Type description text
473 * @status: FC Type error status
474 *
475 * Returns:
476 * error string
477 **/
ibmvfc_get_fc_type(u16 status)478 static const char *ibmvfc_get_fc_type(u16 status)
479 {
480 int i;
481
482 for (i = 0; i < ARRAY_SIZE(fc_type); i++)
483 if (fc_type[i].fc_type == status)
484 return fc_type[i].name;
485
486 return unknown_fc_type;
487 }
488
489 /**
490 * ibmvfc_set_tgt_action - Set the next init action for the target
491 * @tgt: ibmvfc target struct
492 * @action: action to perform
493 *
494 * Returns:
495 * 0 if action changed / non-zero if not changed
496 **/
ibmvfc_set_tgt_action(struct ibmvfc_target * tgt,enum ibmvfc_target_action action)497 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
498 enum ibmvfc_target_action action)
499 {
500 int rc = -EINVAL;
501
502 switch (tgt->action) {
503 case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
504 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
505 action == IBMVFC_TGT_ACTION_DEL_RPORT) {
506 tgt->action = action;
507 rc = 0;
508 }
509 break;
510 case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
511 if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
512 action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
513 tgt->action = action;
514 rc = 0;
515 }
516 break;
517 case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
518 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
519 tgt->action = action;
520 rc = 0;
521 }
522 break;
523 case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
524 if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
525 tgt->action = action;
526 rc = 0;
527 }
528 break;
529 case IBMVFC_TGT_ACTION_DEL_RPORT:
530 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
531 tgt->action = action;
532 rc = 0;
533 }
534 break;
535 case IBMVFC_TGT_ACTION_DELETED_RPORT:
536 break;
537 default:
538 tgt->action = action;
539 rc = 0;
540 break;
541 }
542
543 if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
544 tgt->add_rport = 0;
545
546 return rc;
547 }
548
549 /**
550 * ibmvfc_set_host_state - Set the state for the host
551 * @vhost: ibmvfc host struct
552 * @state: state to set host to
553 *
554 * Returns:
555 * 0 if state changed / non-zero if not changed
556 **/
ibmvfc_set_host_state(struct ibmvfc_host * vhost,enum ibmvfc_host_state state)557 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
558 enum ibmvfc_host_state state)
559 {
560 int rc = 0;
561
562 switch (vhost->state) {
563 case IBMVFC_HOST_OFFLINE:
564 rc = -EINVAL;
565 break;
566 default:
567 vhost->state = state;
568 break;
569 }
570
571 return rc;
572 }
573
574 /**
575 * ibmvfc_set_host_action - Set the next init action for the host
576 * @vhost: ibmvfc host struct
577 * @action: action to perform
578 *
579 **/
ibmvfc_set_host_action(struct ibmvfc_host * vhost,enum ibmvfc_host_action action)580 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
581 enum ibmvfc_host_action action)
582 {
583 switch (action) {
584 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
585 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
586 vhost->action = action;
587 break;
588 case IBMVFC_HOST_ACTION_LOGO_WAIT:
589 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
590 vhost->action = action;
591 break;
592 case IBMVFC_HOST_ACTION_INIT_WAIT:
593 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
594 vhost->action = action;
595 break;
596 case IBMVFC_HOST_ACTION_QUERY:
597 switch (vhost->action) {
598 case IBMVFC_HOST_ACTION_INIT_WAIT:
599 case IBMVFC_HOST_ACTION_NONE:
600 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
601 vhost->action = action;
602 break;
603 default:
604 break;
605 }
606 break;
607 case IBMVFC_HOST_ACTION_TGT_INIT:
608 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
609 vhost->action = action;
610 break;
611 case IBMVFC_HOST_ACTION_REENABLE:
612 case IBMVFC_HOST_ACTION_RESET:
613 vhost->action = action;
614 break;
615 case IBMVFC_HOST_ACTION_INIT:
616 case IBMVFC_HOST_ACTION_TGT_DEL:
617 case IBMVFC_HOST_ACTION_LOGO:
618 case IBMVFC_HOST_ACTION_QUERY_TGTS:
619 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
620 case IBMVFC_HOST_ACTION_NONE:
621 default:
622 switch (vhost->action) {
623 case IBMVFC_HOST_ACTION_RESET:
624 case IBMVFC_HOST_ACTION_REENABLE:
625 break;
626 default:
627 vhost->action = action;
628 break;
629 }
630 break;
631 }
632 }
633
634 /**
635 * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
636 * @vhost: ibmvfc host struct
637 *
638 * Return value:
639 * nothing
640 **/
ibmvfc_reinit_host(struct ibmvfc_host * vhost)641 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
642 {
643 if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
644 vhost->state == IBMVFC_ACTIVE) {
645 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
646 scsi_block_requests(vhost->host);
647 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
648 }
649 } else
650 vhost->reinit = 1;
651
652 wake_up(&vhost->work_wait_q);
653 }
654
655 /**
656 * ibmvfc_del_tgt - Schedule cleanup and removal of the target
657 * @tgt: ibmvfc target struct
658 **/
ibmvfc_del_tgt(struct ibmvfc_target * tgt)659 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
660 {
661 if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
662 tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
663 tgt->init_retries = 0;
664 }
665 wake_up(&tgt->vhost->work_wait_q);
666 }
667
668 /**
669 * ibmvfc_link_down - Handle a link down event from the adapter
670 * @vhost: ibmvfc host struct
671 * @state: ibmvfc host state to enter
672 *
673 **/
ibmvfc_link_down(struct ibmvfc_host * vhost,enum ibmvfc_host_state state)674 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
675 enum ibmvfc_host_state state)
676 {
677 struct ibmvfc_target *tgt;
678
679 ENTER;
680 scsi_block_requests(vhost->host);
681 list_for_each_entry(tgt, &vhost->targets, queue)
682 ibmvfc_del_tgt(tgt);
683 ibmvfc_set_host_state(vhost, state);
684 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
685 vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
686 wake_up(&vhost->work_wait_q);
687 LEAVE;
688 }
689
690 /**
691 * ibmvfc_init_host - Start host initialization
692 * @vhost: ibmvfc host struct
693 *
694 * Return value:
695 * nothing
696 **/
ibmvfc_init_host(struct ibmvfc_host * vhost)697 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
698 {
699 struct ibmvfc_target *tgt;
700
701 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
702 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
703 dev_err(vhost->dev,
704 "Host initialization retries exceeded. Taking adapter offline\n");
705 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
706 return;
707 }
708 }
709
710 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
711 memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
712 vhost->async_crq.cur = 0;
713
714 list_for_each_entry(tgt, &vhost->targets, queue) {
715 if (vhost->client_migrated)
716 tgt->need_login = 1;
717 else
718 ibmvfc_del_tgt(tgt);
719 }
720
721 scsi_block_requests(vhost->host);
722 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
723 vhost->job_step = ibmvfc_npiv_login;
724 wake_up(&vhost->work_wait_q);
725 }
726 }
727
728 /**
729 * ibmvfc_send_crq - Send a CRQ
730 * @vhost: ibmvfc host struct
731 * @word1: the first 64 bits of the data
732 * @word2: the second 64 bits of the data
733 *
734 * Return value:
735 * 0 on success / other on failure
736 **/
ibmvfc_send_crq(struct ibmvfc_host * vhost,u64 word1,u64 word2)737 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
738 {
739 struct vio_dev *vdev = to_vio_dev(vhost->dev);
740 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
741 }
742
ibmvfc_send_sub_crq(struct ibmvfc_host * vhost,u64 cookie,u64 word1,u64 word2,u64 word3,u64 word4)743 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
744 u64 word2, u64 word3, u64 word4)
745 {
746 struct vio_dev *vdev = to_vio_dev(vhost->dev);
747
748 return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
749 word1, word2, word3, word4);
750 }
751
752 /**
753 * ibmvfc_send_crq_init - Send a CRQ init message
754 * @vhost: ibmvfc host struct
755 *
756 * Return value:
757 * 0 on success / other on failure
758 **/
ibmvfc_send_crq_init(struct ibmvfc_host * vhost)759 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
760 {
761 ibmvfc_dbg(vhost, "Sending CRQ init\n");
762 return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
763 }
764
765 /**
766 * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
767 * @vhost: ibmvfc host struct
768 *
769 * Return value:
770 * 0 on success / other on failure
771 **/
ibmvfc_send_crq_init_complete(struct ibmvfc_host * vhost)772 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
773 {
774 ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
775 return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
776 }
777
778 /**
779 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
780 * @vhost: ibmvfc host who owns the event pool
781 * @queue: ibmvfc queue struct
782 *
783 * Returns zero on success.
784 **/
ibmvfc_init_event_pool(struct ibmvfc_host * vhost,struct ibmvfc_queue * queue)785 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
786 struct ibmvfc_queue *queue)
787 {
788 int i;
789 struct ibmvfc_event_pool *pool = &queue->evt_pool;
790
791 ENTER;
792 if (!queue->total_depth)
793 return 0;
794
795 pool->size = queue->total_depth;
796 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
797 if (!pool->events)
798 return -ENOMEM;
799
800 pool->iu_storage = dma_alloc_coherent(vhost->dev,
801 pool->size * sizeof(*pool->iu_storage),
802 &pool->iu_token, 0);
803
804 if (!pool->iu_storage) {
805 kfree(pool->events);
806 return -ENOMEM;
807 }
808
809 INIT_LIST_HEAD(&queue->sent);
810 INIT_LIST_HEAD(&queue->free);
811 queue->evt_free = queue->evt_depth;
812 queue->reserved_free = queue->reserved_depth;
813 spin_lock_init(&queue->l_lock);
814
815 for (i = 0; i < pool->size; ++i) {
816 struct ibmvfc_event *evt = &pool->events[i];
817
818 /*
819 * evt->active states
820 * 1 = in flight
821 * 0 = being completed
822 * -1 = free/freed
823 */
824 atomic_set(&evt->active, -1);
825 atomic_set(&evt->free, 1);
826 evt->crq.valid = 0x80;
827 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
828 evt->xfer_iu = pool->iu_storage + i;
829 evt->vhost = vhost;
830 evt->queue = queue;
831 evt->ext_list = NULL;
832 list_add_tail(&evt->queue_list, &queue->free);
833 }
834
835 LEAVE;
836 return 0;
837 }
838
839 /**
840 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
841 * @vhost: ibmvfc host who owns the event pool
842 * @queue: ibmvfc queue struct
843 *
844 **/
ibmvfc_free_event_pool(struct ibmvfc_host * vhost,struct ibmvfc_queue * queue)845 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
846 struct ibmvfc_queue *queue)
847 {
848 int i;
849 struct ibmvfc_event_pool *pool = &queue->evt_pool;
850
851 ENTER;
852 for (i = 0; i < pool->size; ++i) {
853 list_del(&pool->events[i].queue_list);
854 BUG_ON(atomic_read(&pool->events[i].free) != 1);
855 if (pool->events[i].ext_list)
856 dma_pool_free(vhost->sg_pool,
857 pool->events[i].ext_list,
858 pool->events[i].ext_list_token);
859 }
860
861 kfree(pool->events);
862 dma_free_coherent(vhost->dev,
863 pool->size * sizeof(*pool->iu_storage),
864 pool->iu_storage, pool->iu_token);
865 LEAVE;
866 }
867
868 /**
869 * ibmvfc_free_queue - Deallocate queue
870 * @vhost: ibmvfc host struct
871 * @queue: ibmvfc queue struct
872 *
873 * Unmaps dma and deallocates page for messages
874 **/
ibmvfc_free_queue(struct ibmvfc_host * vhost,struct ibmvfc_queue * queue)875 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
876 struct ibmvfc_queue *queue)
877 {
878 struct device *dev = vhost->dev;
879
880 dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
881 free_page((unsigned long)queue->msgs.handle);
882 queue->msgs.handle = NULL;
883
884 ibmvfc_free_event_pool(vhost, queue);
885 }
886
887 /**
888 * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
889 * @vhost: ibmvfc host struct
890 *
891 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
892 * the crq with the hypervisor.
893 **/
ibmvfc_release_crq_queue(struct ibmvfc_host * vhost)894 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
895 {
896 long rc = 0;
897 struct vio_dev *vdev = to_vio_dev(vhost->dev);
898 struct ibmvfc_queue *crq = &vhost->crq;
899
900 ibmvfc_dbg(vhost, "Releasing CRQ\n");
901 free_irq(vdev->irq, vhost);
902 tasklet_kill(&vhost->tasklet);
903 do {
904 if (rc)
905 msleep(100);
906 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
907 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
908
909 vhost->state = IBMVFC_NO_CRQ;
910 vhost->logged_in = 0;
911
912 ibmvfc_free_queue(vhost, crq);
913 }
914
915 /**
916 * ibmvfc_reenable_crq_queue - reenables the CRQ
917 * @vhost: ibmvfc host struct
918 *
919 * Return value:
920 * 0 on success / other on failure
921 **/
ibmvfc_reenable_crq_queue(struct ibmvfc_host * vhost)922 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
923 {
924 int rc = 0;
925 struct vio_dev *vdev = to_vio_dev(vhost->dev);
926 unsigned long flags;
927
928 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
929
930 /* Re-enable the CRQ */
931 do {
932 if (rc)
933 msleep(100);
934 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
935 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
936
937 if (rc)
938 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
939
940 spin_lock_irqsave(vhost->host->host_lock, flags);
941 spin_lock(vhost->crq.q_lock);
942 vhost->do_enquiry = 1;
943 vhost->using_channels = 0;
944 spin_unlock(vhost->crq.q_lock);
945 spin_unlock_irqrestore(vhost->host->host_lock, flags);
946
947 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
948
949 return rc;
950 }
951
952 /**
953 * ibmvfc_reset_crq - resets a crq after a failure
954 * @vhost: ibmvfc host struct
955 *
956 * Return value:
957 * 0 on success / other on failure
958 **/
ibmvfc_reset_crq(struct ibmvfc_host * vhost)959 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
960 {
961 int rc = 0;
962 unsigned long flags;
963 struct vio_dev *vdev = to_vio_dev(vhost->dev);
964 struct ibmvfc_queue *crq = &vhost->crq;
965
966 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
967
968 /* Close the CRQ */
969 do {
970 if (rc)
971 msleep(100);
972 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
973 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
974
975 spin_lock_irqsave(vhost->host->host_lock, flags);
976 spin_lock(vhost->crq.q_lock);
977 vhost->state = IBMVFC_NO_CRQ;
978 vhost->logged_in = 0;
979 vhost->do_enquiry = 1;
980 vhost->using_channels = 0;
981
982 /* Clean out the queue */
983 memset(crq->msgs.crq, 0, PAGE_SIZE);
984 crq->cur = 0;
985
986 /* And re-open it again */
987 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
988 crq->msg_token, PAGE_SIZE);
989
990 if (rc == H_CLOSED)
991 /* Adapter is good, but other end is not ready */
992 dev_warn(vhost->dev, "Partner adapter not ready\n");
993 else if (rc != 0)
994 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
995
996 spin_unlock(vhost->crq.q_lock);
997 spin_unlock_irqrestore(vhost->host->host_lock, flags);
998
999 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
1000
1001 return rc;
1002 }
1003
1004 /**
1005 * ibmvfc_valid_event - Determines if event is valid.
1006 * @pool: event_pool that contains the event
1007 * @evt: ibmvfc event to be checked for validity
1008 *
1009 * Return value:
1010 * 1 if event is valid / 0 if event is not valid
1011 **/
ibmvfc_valid_event(struct ibmvfc_event_pool * pool,struct ibmvfc_event * evt)1012 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
1013 struct ibmvfc_event *evt)
1014 {
1015 int index = evt - pool->events;
1016 if (index < 0 || index >= pool->size) /* outside of bounds */
1017 return 0;
1018 if (evt != pool->events + index) /* unaligned */
1019 return 0;
1020 return 1;
1021 }
1022
1023 /**
1024 * ibmvfc_free_event - Free the specified event
1025 * @evt: ibmvfc_event to be freed
1026 *
1027 **/
ibmvfc_free_event(struct ibmvfc_event * evt)1028 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1029 {
1030 struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1031 unsigned long flags;
1032
1033 BUG_ON(!ibmvfc_valid_event(pool, evt));
1034 BUG_ON(atomic_inc_return(&evt->free) != 1);
1035 BUG_ON(atomic_dec_and_test(&evt->active));
1036
1037 spin_lock_irqsave(&evt->queue->l_lock, flags);
1038 list_add_tail(&evt->queue_list, &evt->queue->free);
1039 if (evt->reserved) {
1040 evt->reserved = 0;
1041 evt->queue->reserved_free++;
1042 } else {
1043 evt->queue->evt_free++;
1044 }
1045 if (evt->eh_comp)
1046 complete(evt->eh_comp);
1047 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1048 }
1049
1050 /**
1051 * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1052 * @evt: ibmvfc event struct
1053 *
1054 * This function does not setup any error status, that must be done
1055 * before this function gets called.
1056 **/
ibmvfc_scsi_eh_done(struct ibmvfc_event * evt)1057 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1058 {
1059 struct scsi_cmnd *cmnd = evt->cmnd;
1060
1061 if (cmnd) {
1062 scsi_dma_unmap(cmnd);
1063 scsi_done(cmnd);
1064 }
1065
1066 ibmvfc_free_event(evt);
1067 }
1068
1069 /**
1070 * ibmvfc_complete_purge - Complete failed command list
1071 * @purge_list: list head of failed commands
1072 *
1073 * This function runs completions on commands to fail as a result of a
1074 * host reset or platform migration.
1075 **/
ibmvfc_complete_purge(struct list_head * purge_list)1076 static void ibmvfc_complete_purge(struct list_head *purge_list)
1077 {
1078 struct ibmvfc_event *evt, *pos;
1079
1080 list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1081 list_del(&evt->queue_list);
1082 ibmvfc_trc_end(evt);
1083 evt->done(evt);
1084 }
1085 }
1086
1087 /**
1088 * ibmvfc_fail_request - Fail request with specified error code
1089 * @evt: ibmvfc event struct
1090 * @error_code: error code to fail request with
1091 *
1092 * Return value:
1093 * none
1094 **/
ibmvfc_fail_request(struct ibmvfc_event * evt,int error_code)1095 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1096 {
1097 /*
1098 * Anything we are failing should still be active. Otherwise, it
1099 * implies we already got a response for the command and are doing
1100 * something bad like double completing it.
1101 */
1102 BUG_ON(!atomic_dec_and_test(&evt->active));
1103 if (evt->cmnd) {
1104 evt->cmnd->result = (error_code << 16);
1105 evt->done = ibmvfc_scsi_eh_done;
1106 } else
1107 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1108
1109 del_timer(&evt->timer);
1110 }
1111
1112 /**
1113 * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1114 * @vhost: ibmvfc host struct
1115 * @error_code: error code to fail requests with
1116 *
1117 * Return value:
1118 * none
1119 **/
ibmvfc_purge_requests(struct ibmvfc_host * vhost,int error_code)1120 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1121 {
1122 struct ibmvfc_event *evt, *pos;
1123 struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1124 unsigned long flags;
1125 int hwqs = 0;
1126 int i;
1127
1128 if (vhost->using_channels)
1129 hwqs = vhost->scsi_scrqs.active_queues;
1130
1131 ibmvfc_dbg(vhost, "Purging all requests\n");
1132 spin_lock_irqsave(&vhost->crq.l_lock, flags);
1133 list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1134 ibmvfc_fail_request(evt, error_code);
1135 list_splice_init(&vhost->crq.sent, &vhost->purge);
1136 spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1137
1138 for (i = 0; i < hwqs; i++) {
1139 spin_lock_irqsave(queues[i].q_lock, flags);
1140 spin_lock(&queues[i].l_lock);
1141 list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1142 ibmvfc_fail_request(evt, error_code);
1143 list_splice_init(&queues[i].sent, &vhost->purge);
1144 spin_unlock(&queues[i].l_lock);
1145 spin_unlock_irqrestore(queues[i].q_lock, flags);
1146 }
1147 }
1148
1149 /**
1150 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1151 * @vhost: struct ibmvfc host to reset
1152 **/
ibmvfc_hard_reset_host(struct ibmvfc_host * vhost)1153 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1154 {
1155 ibmvfc_purge_requests(vhost, DID_ERROR);
1156 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1157 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1158 }
1159
1160 /**
1161 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1162 * @vhost: struct ibmvfc host to reset
1163 **/
__ibmvfc_reset_host(struct ibmvfc_host * vhost)1164 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1165 {
1166 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1167 !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1168 scsi_block_requests(vhost->host);
1169 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1170 vhost->job_step = ibmvfc_npiv_logout;
1171 wake_up(&vhost->work_wait_q);
1172 } else
1173 ibmvfc_hard_reset_host(vhost);
1174 }
1175
1176 /**
1177 * ibmvfc_reset_host - Reset the connection to the server
1178 * @vhost: ibmvfc host struct
1179 **/
ibmvfc_reset_host(struct ibmvfc_host * vhost)1180 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1181 {
1182 unsigned long flags;
1183
1184 spin_lock_irqsave(vhost->host->host_lock, flags);
1185 __ibmvfc_reset_host(vhost);
1186 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1187 }
1188
1189 /**
1190 * ibmvfc_retry_host_init - Retry host initialization if allowed
1191 * @vhost: ibmvfc host struct
1192 *
1193 * Returns: 1 if init will be retried / 0 if not
1194 *
1195 **/
ibmvfc_retry_host_init(struct ibmvfc_host * vhost)1196 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1197 {
1198 int retry = 0;
1199
1200 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1201 vhost->delay_init = 1;
1202 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1203 dev_err(vhost->dev,
1204 "Host initialization retries exceeded. Taking adapter offline\n");
1205 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1206 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1207 __ibmvfc_reset_host(vhost);
1208 else {
1209 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1210 retry = 1;
1211 }
1212 }
1213
1214 wake_up(&vhost->work_wait_q);
1215 return retry;
1216 }
1217
1218 /**
1219 * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1220 * @starget: scsi target struct
1221 *
1222 * Return value:
1223 * ibmvfc_target struct / NULL if not found
1224 **/
__ibmvfc_get_target(struct scsi_target * starget)1225 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1226 {
1227 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1228 struct ibmvfc_host *vhost = shost_priv(shost);
1229 struct ibmvfc_target *tgt;
1230
1231 list_for_each_entry(tgt, &vhost->targets, queue)
1232 if (tgt->target_id == starget->id) {
1233 kref_get(&tgt->kref);
1234 return tgt;
1235 }
1236 return NULL;
1237 }
1238
1239 /**
1240 * ibmvfc_get_target - Find the specified scsi_target
1241 * @starget: scsi target struct
1242 *
1243 * Return value:
1244 * ibmvfc_target struct / NULL if not found
1245 **/
ibmvfc_get_target(struct scsi_target * starget)1246 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1247 {
1248 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1249 struct ibmvfc_target *tgt;
1250 unsigned long flags;
1251
1252 spin_lock_irqsave(shost->host_lock, flags);
1253 tgt = __ibmvfc_get_target(starget);
1254 spin_unlock_irqrestore(shost->host_lock, flags);
1255 return tgt;
1256 }
1257
1258 /**
1259 * ibmvfc_get_host_speed - Get host port speed
1260 * @shost: scsi host struct
1261 *
1262 * Return value:
1263 * none
1264 **/
ibmvfc_get_host_speed(struct Scsi_Host * shost)1265 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1266 {
1267 struct ibmvfc_host *vhost = shost_priv(shost);
1268 unsigned long flags;
1269
1270 spin_lock_irqsave(shost->host_lock, flags);
1271 if (vhost->state == IBMVFC_ACTIVE) {
1272 switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1273 case 1:
1274 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1275 break;
1276 case 2:
1277 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1278 break;
1279 case 4:
1280 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1281 break;
1282 case 8:
1283 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1284 break;
1285 case 10:
1286 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1287 break;
1288 case 16:
1289 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1290 break;
1291 default:
1292 ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1293 be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1294 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1295 break;
1296 }
1297 } else
1298 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1299 spin_unlock_irqrestore(shost->host_lock, flags);
1300 }
1301
1302 /**
1303 * ibmvfc_get_host_port_state - Get host port state
1304 * @shost: scsi host struct
1305 *
1306 * Return value:
1307 * none
1308 **/
ibmvfc_get_host_port_state(struct Scsi_Host * shost)1309 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1310 {
1311 struct ibmvfc_host *vhost = shost_priv(shost);
1312 unsigned long flags;
1313
1314 spin_lock_irqsave(shost->host_lock, flags);
1315 switch (vhost->state) {
1316 case IBMVFC_INITIALIZING:
1317 case IBMVFC_ACTIVE:
1318 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1319 break;
1320 case IBMVFC_LINK_DOWN:
1321 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1322 break;
1323 case IBMVFC_LINK_DEAD:
1324 case IBMVFC_HOST_OFFLINE:
1325 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1326 break;
1327 case IBMVFC_HALTED:
1328 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1329 break;
1330 case IBMVFC_NO_CRQ:
1331 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1332 break;
1333 default:
1334 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1335 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1336 break;
1337 }
1338 spin_unlock_irqrestore(shost->host_lock, flags);
1339 }
1340
1341 /**
1342 * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1343 * @rport: rport struct
1344 * @timeout: timeout value
1345 *
1346 * Return value:
1347 * none
1348 **/
ibmvfc_set_rport_dev_loss_tmo(struct fc_rport * rport,u32 timeout)1349 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1350 {
1351 if (timeout)
1352 rport->dev_loss_tmo = timeout;
1353 else
1354 rport->dev_loss_tmo = 1;
1355 }
1356
1357 /**
1358 * ibmvfc_release_tgt - Free memory allocated for a target
1359 * @kref: kref struct
1360 *
1361 **/
ibmvfc_release_tgt(struct kref * kref)1362 static void ibmvfc_release_tgt(struct kref *kref)
1363 {
1364 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1365 kfree(tgt);
1366 }
1367
1368 /**
1369 * ibmvfc_get_starget_node_name - Get SCSI target's node name
1370 * @starget: scsi target struct
1371 *
1372 * Return value:
1373 * none
1374 **/
ibmvfc_get_starget_node_name(struct scsi_target * starget)1375 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1376 {
1377 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1378 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1379 if (tgt)
1380 kref_put(&tgt->kref, ibmvfc_release_tgt);
1381 }
1382
1383 /**
1384 * ibmvfc_get_starget_port_name - Get SCSI target's port name
1385 * @starget: scsi target struct
1386 *
1387 * Return value:
1388 * none
1389 **/
ibmvfc_get_starget_port_name(struct scsi_target * starget)1390 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1391 {
1392 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1393 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1394 if (tgt)
1395 kref_put(&tgt->kref, ibmvfc_release_tgt);
1396 }
1397
1398 /**
1399 * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1400 * @starget: scsi target struct
1401 *
1402 * Return value:
1403 * none
1404 **/
ibmvfc_get_starget_port_id(struct scsi_target * starget)1405 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1406 {
1407 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1408 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1409 if (tgt)
1410 kref_put(&tgt->kref, ibmvfc_release_tgt);
1411 }
1412
1413 /**
1414 * ibmvfc_wait_while_resetting - Wait while the host resets
1415 * @vhost: ibmvfc host struct
1416 *
1417 * Return value:
1418 * 0 on success / other on failure
1419 **/
ibmvfc_wait_while_resetting(struct ibmvfc_host * vhost)1420 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1421 {
1422 long timeout = wait_event_timeout(vhost->init_wait_q,
1423 ((vhost->state == IBMVFC_ACTIVE ||
1424 vhost->state == IBMVFC_HOST_OFFLINE ||
1425 vhost->state == IBMVFC_LINK_DEAD) &&
1426 vhost->action == IBMVFC_HOST_ACTION_NONE),
1427 (init_timeout * HZ));
1428
1429 return timeout ? 0 : -EIO;
1430 }
1431
1432 /**
1433 * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1434 * @shost: scsi host struct
1435 *
1436 * Return value:
1437 * 0 on success / other on failure
1438 **/
ibmvfc_issue_fc_host_lip(struct Scsi_Host * shost)1439 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1440 {
1441 struct ibmvfc_host *vhost = shost_priv(shost);
1442
1443 dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1444 ibmvfc_reset_host(vhost);
1445 return ibmvfc_wait_while_resetting(vhost);
1446 }
1447
1448 /**
1449 * ibmvfc_gather_partition_info - Gather info about the LPAR
1450 * @vhost: ibmvfc host struct
1451 *
1452 * Return value:
1453 * none
1454 **/
ibmvfc_gather_partition_info(struct ibmvfc_host * vhost)1455 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1456 {
1457 struct device_node *rootdn;
1458 const char *name;
1459 const unsigned int *num;
1460
1461 rootdn = of_find_node_by_path("/");
1462 if (!rootdn)
1463 return;
1464
1465 name = of_get_property(rootdn, "ibm,partition-name", NULL);
1466 if (name)
1467 strscpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1468 num = of_get_property(rootdn, "ibm,partition-no", NULL);
1469 if (num)
1470 vhost->partition_number = *num;
1471 of_node_put(rootdn);
1472 }
1473
1474 /**
1475 * ibmvfc_set_login_info - Setup info for NPIV login
1476 * @vhost: ibmvfc host struct
1477 *
1478 * Return value:
1479 * none
1480 **/
ibmvfc_set_login_info(struct ibmvfc_host * vhost)1481 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1482 {
1483 struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1484 struct ibmvfc_queue *async_crq = &vhost->async_crq;
1485 struct device_node *of_node = vhost->dev->of_node;
1486 const char *location;
1487 u16 max_cmds;
1488
1489 max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
1490 if (mq_enabled)
1491 max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
1492 vhost->scsi_scrqs.desired_queues;
1493
1494 memset(login_info, 0, sizeof(*login_info));
1495
1496 login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1497 login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1498 login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1499 login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1500 login_info->partition_num = cpu_to_be32(vhost->partition_number);
1501 login_info->vfc_frame_version = cpu_to_be32(1);
1502 login_info->fcp_version = cpu_to_be16(3);
1503 login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1504 if (vhost->client_migrated)
1505 login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1506
1507 login_info->max_cmds = cpu_to_be32(max_cmds);
1508 login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1509
1510 if (vhost->mq_enabled || vhost->using_channels)
1511 login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1512
1513 login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1514 login_info->async.len = cpu_to_be32(async_crq->size *
1515 sizeof(*async_crq->msgs.async));
1516 strscpy(login_info->partition_name, vhost->partition_name,
1517 sizeof(login_info->partition_name));
1518
1519 strscpy(login_info->device_name,
1520 dev_name(&vhost->host->shost_gendev), sizeof(login_info->device_name));
1521
1522 location = of_get_property(of_node, "ibm,loc-code", NULL);
1523 location = location ? location : dev_name(vhost->dev);
1524 strscpy(login_info->drc_name, location, sizeof(login_info->drc_name));
1525 }
1526
1527 /**
1528 * __ibmvfc_get_event - Gets the next free event in pool
1529 * @queue: ibmvfc queue struct
1530 * @reserved: event is for a reserved management command
1531 *
1532 * Returns a free event from the pool.
1533 **/
__ibmvfc_get_event(struct ibmvfc_queue * queue,int reserved)1534 static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
1535 {
1536 struct ibmvfc_event *evt = NULL;
1537 unsigned long flags;
1538
1539 spin_lock_irqsave(&queue->l_lock, flags);
1540 if (reserved && queue->reserved_free) {
1541 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1542 evt->reserved = 1;
1543 queue->reserved_free--;
1544 } else if (queue->evt_free) {
1545 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1546 queue->evt_free--;
1547 } else {
1548 goto out;
1549 }
1550
1551 atomic_set(&evt->free, 0);
1552 list_del(&evt->queue_list);
1553 out:
1554 spin_unlock_irqrestore(&queue->l_lock, flags);
1555 return evt;
1556 }
1557
1558 #define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
1559 #define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
1560
1561 /**
1562 * ibmvfc_locked_done - Calls evt completion with host_lock held
1563 * @evt: ibmvfc evt to complete
1564 *
1565 * All non-scsi command completion callbacks have the expectation that the
1566 * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1567 * MAD evt with the host_lock.
1568 **/
ibmvfc_locked_done(struct ibmvfc_event * evt)1569 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1570 {
1571 unsigned long flags;
1572
1573 spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1574 evt->_done(evt);
1575 spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1576 }
1577
1578 /**
1579 * ibmvfc_init_event - Initialize fields in an event struct that are always
1580 * required.
1581 * @evt: The event
1582 * @done: Routine to call when the event is responded to
1583 * @format: SRP or MAD format
1584 **/
ibmvfc_init_event(struct ibmvfc_event * evt,void (* done)(struct ibmvfc_event *),u8 format)1585 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1586 void (*done) (struct ibmvfc_event *), u8 format)
1587 {
1588 evt->cmnd = NULL;
1589 evt->sync_iu = NULL;
1590 evt->eh_comp = NULL;
1591 evt->crq.format = format;
1592 if (format == IBMVFC_CMD_FORMAT)
1593 evt->done = done;
1594 else {
1595 evt->_done = done;
1596 evt->done = ibmvfc_locked_done;
1597 }
1598 evt->hwq = 0;
1599 }
1600
1601 /**
1602 * ibmvfc_map_sg_list - Initialize scatterlist
1603 * @scmd: scsi command struct
1604 * @nseg: number of scatterlist segments
1605 * @md: memory descriptor list to initialize
1606 **/
ibmvfc_map_sg_list(struct scsi_cmnd * scmd,int nseg,struct srp_direct_buf * md)1607 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1608 struct srp_direct_buf *md)
1609 {
1610 int i;
1611 struct scatterlist *sg;
1612
1613 scsi_for_each_sg(scmd, sg, nseg, i) {
1614 md[i].va = cpu_to_be64(sg_dma_address(sg));
1615 md[i].len = cpu_to_be32(sg_dma_len(sg));
1616 md[i].key = 0;
1617 }
1618 }
1619
1620 /**
1621 * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1622 * @scmd: struct scsi_cmnd with the scatterlist
1623 * @evt: ibmvfc event struct
1624 * @vfc_cmd: vfc_cmd that contains the memory descriptor
1625 * @dev: device for which to map dma memory
1626 *
1627 * Returns:
1628 * 0 on success / non-zero on failure
1629 **/
ibmvfc_map_sg_data(struct scsi_cmnd * scmd,struct ibmvfc_event * evt,struct ibmvfc_cmd * vfc_cmd,struct device * dev)1630 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1631 struct ibmvfc_event *evt,
1632 struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1633 {
1634
1635 int sg_mapped;
1636 struct srp_direct_buf *data = &vfc_cmd->ioba;
1637 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1638 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1639
1640 if (cls3_error)
1641 vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1642
1643 sg_mapped = scsi_dma_map(scmd);
1644 if (!sg_mapped) {
1645 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1646 return 0;
1647 } else if (unlikely(sg_mapped < 0)) {
1648 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1649 scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1650 return sg_mapped;
1651 }
1652
1653 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1654 vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1655 iu->add_cdb_len |= IBMVFC_WRDATA;
1656 } else {
1657 vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1658 iu->add_cdb_len |= IBMVFC_RDDATA;
1659 }
1660
1661 if (sg_mapped == 1) {
1662 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1663 return 0;
1664 }
1665
1666 vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1667
1668 if (!evt->ext_list) {
1669 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1670 &evt->ext_list_token);
1671
1672 if (!evt->ext_list) {
1673 scsi_dma_unmap(scmd);
1674 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1675 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1676 return -ENOMEM;
1677 }
1678 }
1679
1680 ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1681
1682 data->va = cpu_to_be64(evt->ext_list_token);
1683 data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1684 data->key = 0;
1685 return 0;
1686 }
1687
1688 /**
1689 * ibmvfc_timeout - Internal command timeout handler
1690 * @t: struct ibmvfc_event that timed out
1691 *
1692 * Called when an internally generated command times out
1693 **/
ibmvfc_timeout(struct timer_list * t)1694 static void ibmvfc_timeout(struct timer_list *t)
1695 {
1696 struct ibmvfc_event *evt = from_timer(evt, t, timer);
1697 struct ibmvfc_host *vhost = evt->vhost;
1698 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1699 ibmvfc_reset_host(vhost);
1700 }
1701
1702 /**
1703 * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1704 * @evt: event to be sent
1705 * @vhost: ibmvfc host struct
1706 * @timeout: timeout in seconds - 0 means do not time command
1707 *
1708 * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1709 **/
ibmvfc_send_event(struct ibmvfc_event * evt,struct ibmvfc_host * vhost,unsigned long timeout)1710 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1711 struct ibmvfc_host *vhost, unsigned long timeout)
1712 {
1713 __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1714 unsigned long flags;
1715 int rc;
1716
1717 /* Copy the IU into the transfer area */
1718 *evt->xfer_iu = evt->iu;
1719 if (evt->crq.format == IBMVFC_CMD_FORMAT)
1720 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1721 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1722 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1723 else
1724 BUG();
1725
1726 timer_setup(&evt->timer, ibmvfc_timeout, 0);
1727
1728 if (timeout) {
1729 evt->timer.expires = jiffies + (timeout * HZ);
1730 add_timer(&evt->timer);
1731 }
1732
1733 spin_lock_irqsave(&evt->queue->l_lock, flags);
1734 list_add_tail(&evt->queue_list, &evt->queue->sent);
1735 atomic_set(&evt->active, 1);
1736
1737 mb();
1738
1739 if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1740 rc = ibmvfc_send_sub_crq(vhost,
1741 evt->queue->vios_cookie,
1742 be64_to_cpu(crq_as_u64[0]),
1743 be64_to_cpu(crq_as_u64[1]),
1744 0, 0);
1745 else
1746 rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1747 be64_to_cpu(crq_as_u64[1]));
1748
1749 if (rc) {
1750 atomic_set(&evt->active, 0);
1751 list_del(&evt->queue_list);
1752 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1753 del_timer(&evt->timer);
1754
1755 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1756 * Firmware will send a CRQ with a transport event (0xFF) to
1757 * tell this client what has happened to the transport. This
1758 * will be handled in ibmvfc_handle_crq()
1759 */
1760 if (rc == H_CLOSED) {
1761 if (printk_ratelimit())
1762 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1763 if (evt->cmnd)
1764 scsi_dma_unmap(evt->cmnd);
1765 ibmvfc_free_event(evt);
1766 return SCSI_MLQUEUE_HOST_BUSY;
1767 }
1768
1769 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1770 if (evt->cmnd) {
1771 evt->cmnd->result = DID_ERROR << 16;
1772 evt->done = ibmvfc_scsi_eh_done;
1773 } else
1774 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1775
1776 evt->done(evt);
1777 } else {
1778 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1779 ibmvfc_trc_start(evt);
1780 }
1781
1782 return 0;
1783 }
1784
1785 /**
1786 * ibmvfc_log_error - Log an error for the failed command if appropriate
1787 * @evt: ibmvfc event to log
1788 *
1789 **/
ibmvfc_log_error(struct ibmvfc_event * evt)1790 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1791 {
1792 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1793 struct ibmvfc_host *vhost = evt->vhost;
1794 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1795 struct scsi_cmnd *cmnd = evt->cmnd;
1796 const char *err = unknown_error;
1797 int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1798 int logerr = 0;
1799 int rsp_code = 0;
1800
1801 if (index >= 0) {
1802 logerr = cmd_status[index].log;
1803 err = cmd_status[index].name;
1804 }
1805
1806 if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1807 return;
1808
1809 if (rsp->flags & FCP_RSP_LEN_VALID)
1810 rsp_code = rsp->data.info.rsp_code;
1811
1812 scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1813 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1814 cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1815 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1816 }
1817
1818 /**
1819 * ibmvfc_relogin - Log back into the specified device
1820 * @sdev: scsi device struct
1821 *
1822 **/
ibmvfc_relogin(struct scsi_device * sdev)1823 static void ibmvfc_relogin(struct scsi_device *sdev)
1824 {
1825 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1826 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1827 struct ibmvfc_target *tgt;
1828 unsigned long flags;
1829
1830 spin_lock_irqsave(vhost->host->host_lock, flags);
1831 list_for_each_entry(tgt, &vhost->targets, queue) {
1832 if (rport == tgt->rport) {
1833 ibmvfc_del_tgt(tgt);
1834 break;
1835 }
1836 }
1837
1838 ibmvfc_reinit_host(vhost);
1839 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1840 }
1841
1842 /**
1843 * ibmvfc_scsi_done - Handle responses from commands
1844 * @evt: ibmvfc event to be handled
1845 *
1846 * Used as a callback when sending scsi cmds.
1847 **/
ibmvfc_scsi_done(struct ibmvfc_event * evt)1848 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1849 {
1850 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1851 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1852 struct scsi_cmnd *cmnd = evt->cmnd;
1853 u32 rsp_len = 0;
1854 u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1855
1856 if (cmnd) {
1857 if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1858 scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1859 else if (rsp->flags & FCP_RESID_UNDER)
1860 scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1861 else
1862 scsi_set_resid(cmnd, 0);
1863
1864 if (vfc_cmd->status) {
1865 cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1866
1867 if (rsp->flags & FCP_RSP_LEN_VALID)
1868 rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1869 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1870 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1871 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1872 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1873 if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1874 (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1875 ibmvfc_relogin(cmnd->device);
1876
1877 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1878 cmnd->result = (DID_ERROR << 16);
1879
1880 ibmvfc_log_error(evt);
1881 }
1882
1883 if (!cmnd->result &&
1884 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1885 cmnd->result = (DID_ERROR << 16);
1886
1887 scsi_dma_unmap(cmnd);
1888 scsi_done(cmnd);
1889 }
1890
1891 ibmvfc_free_event(evt);
1892 }
1893
1894 /**
1895 * ibmvfc_host_chkready - Check if the host can accept commands
1896 * @vhost: struct ibmvfc host
1897 *
1898 * Returns:
1899 * 1 if host can accept command / 0 if not
1900 **/
ibmvfc_host_chkready(struct ibmvfc_host * vhost)1901 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1902 {
1903 int result = 0;
1904
1905 switch (vhost->state) {
1906 case IBMVFC_LINK_DEAD:
1907 case IBMVFC_HOST_OFFLINE:
1908 result = DID_NO_CONNECT << 16;
1909 break;
1910 case IBMVFC_NO_CRQ:
1911 case IBMVFC_INITIALIZING:
1912 case IBMVFC_HALTED:
1913 case IBMVFC_LINK_DOWN:
1914 result = DID_REQUEUE << 16;
1915 break;
1916 case IBMVFC_ACTIVE:
1917 result = 0;
1918 break;
1919 }
1920
1921 return result;
1922 }
1923
ibmvfc_init_vfc_cmd(struct ibmvfc_event * evt,struct scsi_device * sdev)1924 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1925 {
1926 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1927 struct ibmvfc_host *vhost = evt->vhost;
1928 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1929 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1930 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1931 size_t offset;
1932
1933 memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1934 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1935 offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1936 vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1937 } else
1938 offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1939 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1940 vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1941 vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1942 vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1943 vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1944 vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1945 vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1946 int_to_scsilun(sdev->lun, &iu->lun);
1947
1948 return vfc_cmd;
1949 }
1950
1951 /**
1952 * ibmvfc_queuecommand - The queuecommand function of the scsi template
1953 * @shost: scsi host struct
1954 * @cmnd: struct scsi_cmnd to be executed
1955 *
1956 * Returns:
1957 * 0 on success / other on failure
1958 **/
ibmvfc_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)1959 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1960 {
1961 struct ibmvfc_host *vhost = shost_priv(shost);
1962 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1963 struct ibmvfc_cmd *vfc_cmd;
1964 struct ibmvfc_fcp_cmd_iu *iu;
1965 struct ibmvfc_event *evt;
1966 u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
1967 u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1968 u16 scsi_channel;
1969 int rc;
1970
1971 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1972 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1973 cmnd->result = rc;
1974 scsi_done(cmnd);
1975 return 0;
1976 }
1977
1978 cmnd->result = (DID_OK << 16);
1979 if (vhost->using_channels) {
1980 scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1981 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1982 if (!evt)
1983 return SCSI_MLQUEUE_HOST_BUSY;
1984
1985 evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1986 } else {
1987 evt = ibmvfc_get_event(&vhost->crq);
1988 if (!evt)
1989 return SCSI_MLQUEUE_HOST_BUSY;
1990 }
1991
1992 ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1993 evt->cmnd = cmnd;
1994
1995 vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1996 iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1997
1998 iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1999 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
2000
2001 if (cmnd->flags & SCMD_TAGGED) {
2002 vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
2003 iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
2004 }
2005
2006 vfc_cmd->correlation = cpu_to_be64((u64)evt);
2007
2008 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
2009 return ibmvfc_send_event(evt, vhost, 0);
2010
2011 ibmvfc_free_event(evt);
2012 if (rc == -ENOMEM)
2013 return SCSI_MLQUEUE_HOST_BUSY;
2014
2015 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2016 scmd_printk(KERN_ERR, cmnd,
2017 "Failed to map DMA buffer for command. rc=%d\n", rc);
2018
2019 cmnd->result = DID_ERROR << 16;
2020 scsi_done(cmnd);
2021 return 0;
2022 }
2023
2024 /**
2025 * ibmvfc_sync_completion - Signal that a synchronous command has completed
2026 * @evt: ibmvfc event struct
2027 *
2028 **/
ibmvfc_sync_completion(struct ibmvfc_event * evt)2029 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
2030 {
2031 /* copy the response back */
2032 if (evt->sync_iu)
2033 *evt->sync_iu = *evt->xfer_iu;
2034
2035 complete(&evt->comp);
2036 }
2037
2038 /**
2039 * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
2040 * @evt: struct ibmvfc_event
2041 *
2042 **/
ibmvfc_bsg_timeout_done(struct ibmvfc_event * evt)2043 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
2044 {
2045 struct ibmvfc_host *vhost = evt->vhost;
2046
2047 ibmvfc_free_event(evt);
2048 vhost->aborting_passthru = 0;
2049 dev_info(vhost->dev, "Passthru command cancelled\n");
2050 }
2051
2052 /**
2053 * ibmvfc_bsg_timeout - Handle a BSG timeout
2054 * @job: struct bsg_job that timed out
2055 *
2056 * Returns:
2057 * 0 on success / other on failure
2058 **/
ibmvfc_bsg_timeout(struct bsg_job * job)2059 static int ibmvfc_bsg_timeout(struct bsg_job *job)
2060 {
2061 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2062 unsigned long port_id = (unsigned long)job->dd_data;
2063 struct ibmvfc_event *evt;
2064 struct ibmvfc_tmf *tmf;
2065 unsigned long flags;
2066 int rc;
2067
2068 ENTER;
2069 spin_lock_irqsave(vhost->host->host_lock, flags);
2070 if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2071 __ibmvfc_reset_host(vhost);
2072 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2073 return 0;
2074 }
2075
2076 vhost->aborting_passthru = 1;
2077 evt = ibmvfc_get_reserved_event(&vhost->crq);
2078 if (!evt) {
2079 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2080 return -ENOMEM;
2081 }
2082
2083 ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2084
2085 tmf = &evt->iu.tmf;
2086 memset(tmf, 0, sizeof(*tmf));
2087 tmf->common.version = cpu_to_be32(1);
2088 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2089 tmf->common.length = cpu_to_be16(sizeof(*tmf));
2090 tmf->scsi_id = cpu_to_be64(port_id);
2091 tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2092 tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2093 rc = ibmvfc_send_event(evt, vhost, default_timeout);
2094
2095 if (rc != 0) {
2096 vhost->aborting_passthru = 0;
2097 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2098 rc = -EIO;
2099 } else
2100 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2101 port_id);
2102
2103 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2104
2105 LEAVE;
2106 return rc;
2107 }
2108
2109 /**
2110 * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2111 * @vhost: struct ibmvfc_host to send command
2112 * @port_id: port ID to send command
2113 *
2114 * Returns:
2115 * 0 on success / other on failure
2116 **/
ibmvfc_bsg_plogi(struct ibmvfc_host * vhost,unsigned int port_id)2117 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2118 {
2119 struct ibmvfc_port_login *plogi;
2120 struct ibmvfc_target *tgt;
2121 struct ibmvfc_event *evt;
2122 union ibmvfc_iu rsp_iu;
2123 unsigned long flags;
2124 int rc = 0, issue_login = 1;
2125
2126 ENTER;
2127 spin_lock_irqsave(vhost->host->host_lock, flags);
2128 list_for_each_entry(tgt, &vhost->targets, queue) {
2129 if (tgt->scsi_id == port_id) {
2130 issue_login = 0;
2131 break;
2132 }
2133 }
2134
2135 if (!issue_login)
2136 goto unlock_out;
2137 if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2138 goto unlock_out;
2139
2140 evt = ibmvfc_get_reserved_event(&vhost->crq);
2141 if (!evt) {
2142 rc = -ENOMEM;
2143 goto unlock_out;
2144 }
2145 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2146 plogi = &evt->iu.plogi;
2147 memset(plogi, 0, sizeof(*plogi));
2148 plogi->common.version = cpu_to_be32(1);
2149 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2150 plogi->common.length = cpu_to_be16(sizeof(*plogi));
2151 plogi->scsi_id = cpu_to_be64(port_id);
2152 evt->sync_iu = &rsp_iu;
2153 init_completion(&evt->comp);
2154
2155 rc = ibmvfc_send_event(evt, vhost, default_timeout);
2156 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2157
2158 if (rc)
2159 return -EIO;
2160
2161 wait_for_completion(&evt->comp);
2162
2163 if (rsp_iu.plogi.common.status)
2164 rc = -EIO;
2165
2166 spin_lock_irqsave(vhost->host->host_lock, flags);
2167 ibmvfc_free_event(evt);
2168 unlock_out:
2169 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2170 LEAVE;
2171 return rc;
2172 }
2173
2174 /**
2175 * ibmvfc_bsg_request - Handle a BSG request
2176 * @job: struct bsg_job to be executed
2177 *
2178 * Returns:
2179 * 0 on success / other on failure
2180 **/
ibmvfc_bsg_request(struct bsg_job * job)2181 static int ibmvfc_bsg_request(struct bsg_job *job)
2182 {
2183 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2184 struct fc_rport *rport = fc_bsg_to_rport(job);
2185 struct ibmvfc_passthru_mad *mad;
2186 struct ibmvfc_event *evt;
2187 union ibmvfc_iu rsp_iu;
2188 unsigned long flags, port_id = -1;
2189 struct fc_bsg_request *bsg_request = job->request;
2190 struct fc_bsg_reply *bsg_reply = job->reply;
2191 unsigned int code = bsg_request->msgcode;
2192 int rc = 0, req_seg, rsp_seg, issue_login = 0;
2193 u32 fc_flags, rsp_len;
2194
2195 ENTER;
2196 bsg_reply->reply_payload_rcv_len = 0;
2197 if (rport)
2198 port_id = rport->port_id;
2199
2200 switch (code) {
2201 case FC_BSG_HST_ELS_NOLOGIN:
2202 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2203 (bsg_request->rqst_data.h_els.port_id[1] << 8) |
2204 bsg_request->rqst_data.h_els.port_id[2];
2205 fallthrough;
2206 case FC_BSG_RPT_ELS:
2207 fc_flags = IBMVFC_FC_ELS;
2208 break;
2209 case FC_BSG_HST_CT:
2210 issue_login = 1;
2211 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2212 (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2213 bsg_request->rqst_data.h_ct.port_id[2];
2214 fallthrough;
2215 case FC_BSG_RPT_CT:
2216 fc_flags = IBMVFC_FC_CT_IU;
2217 break;
2218 default:
2219 return -ENOTSUPP;
2220 }
2221
2222 if (port_id == -1)
2223 return -EINVAL;
2224 if (!mutex_trylock(&vhost->passthru_mutex))
2225 return -EBUSY;
2226
2227 job->dd_data = (void *)port_id;
2228 req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2229 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2230
2231 if (!req_seg) {
2232 mutex_unlock(&vhost->passthru_mutex);
2233 return -ENOMEM;
2234 }
2235
2236 rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2237 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2238
2239 if (!rsp_seg) {
2240 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2241 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2242 mutex_unlock(&vhost->passthru_mutex);
2243 return -ENOMEM;
2244 }
2245
2246 if (req_seg > 1 || rsp_seg > 1) {
2247 rc = -EINVAL;
2248 goto out;
2249 }
2250
2251 if (issue_login)
2252 rc = ibmvfc_bsg_plogi(vhost, port_id);
2253
2254 spin_lock_irqsave(vhost->host->host_lock, flags);
2255
2256 if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2257 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2258 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2259 goto out;
2260 }
2261
2262 evt = ibmvfc_get_reserved_event(&vhost->crq);
2263 if (!evt) {
2264 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2265 rc = -ENOMEM;
2266 goto out;
2267 }
2268 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2269 mad = &evt->iu.passthru;
2270
2271 memset(mad, 0, sizeof(*mad));
2272 mad->common.version = cpu_to_be32(1);
2273 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2274 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2275
2276 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2277 offsetof(struct ibmvfc_passthru_mad, iu));
2278 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2279
2280 mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2281 mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2282 mad->iu.flags = cpu_to_be32(fc_flags);
2283 mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2284
2285 mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2286 mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2287 mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2288 mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2289 mad->iu.scsi_id = cpu_to_be64(port_id);
2290 mad->iu.tag = cpu_to_be64((u64)evt);
2291 rsp_len = be32_to_cpu(mad->iu.rsp.len);
2292
2293 evt->sync_iu = &rsp_iu;
2294 init_completion(&evt->comp);
2295 rc = ibmvfc_send_event(evt, vhost, 0);
2296 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2297
2298 if (rc) {
2299 rc = -EIO;
2300 goto out;
2301 }
2302
2303 wait_for_completion(&evt->comp);
2304
2305 if (rsp_iu.passthru.common.status)
2306 rc = -EIO;
2307 else
2308 bsg_reply->reply_payload_rcv_len = rsp_len;
2309
2310 spin_lock_irqsave(vhost->host->host_lock, flags);
2311 ibmvfc_free_event(evt);
2312 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2313 bsg_reply->result = rc;
2314 bsg_job_done(job, bsg_reply->result,
2315 bsg_reply->reply_payload_rcv_len);
2316 rc = 0;
2317 out:
2318 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2319 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2320 dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2321 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2322 mutex_unlock(&vhost->passthru_mutex);
2323 LEAVE;
2324 return rc;
2325 }
2326
2327 /**
2328 * ibmvfc_reset_device - Reset the device with the specified reset type
2329 * @sdev: scsi device to reset
2330 * @type: reset type
2331 * @desc: reset type description for log messages
2332 *
2333 * Returns:
2334 * 0 on success / other on failure
2335 **/
ibmvfc_reset_device(struct scsi_device * sdev,int type,char * desc)2336 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2337 {
2338 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2339 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2340 struct ibmvfc_cmd *tmf;
2341 struct ibmvfc_event *evt = NULL;
2342 union ibmvfc_iu rsp_iu;
2343 struct ibmvfc_fcp_cmd_iu *iu;
2344 struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2345 int rsp_rc = -EBUSY;
2346 unsigned long flags;
2347 int rsp_code = 0;
2348
2349 spin_lock_irqsave(vhost->host->host_lock, flags);
2350 if (vhost->state == IBMVFC_ACTIVE) {
2351 if (vhost->using_channels)
2352 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2353 else
2354 evt = ibmvfc_get_event(&vhost->crq);
2355
2356 if (!evt) {
2357 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2358 return -ENOMEM;
2359 }
2360
2361 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2362 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2363 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2364
2365 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2366 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2367 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2368 iu->tmf_flags = type;
2369 evt->sync_iu = &rsp_iu;
2370
2371 init_completion(&evt->comp);
2372 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2373 }
2374 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2375
2376 if (rsp_rc != 0) {
2377 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2378 desc, rsp_rc);
2379 return -EIO;
2380 }
2381
2382 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2383 wait_for_completion(&evt->comp);
2384
2385 if (rsp_iu.cmd.status)
2386 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2387
2388 if (rsp_code) {
2389 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2390 rsp_code = fc_rsp->data.info.rsp_code;
2391
2392 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2393 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2394 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2395 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2396 fc_rsp->scsi_status);
2397 rsp_rc = -EIO;
2398 } else
2399 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2400
2401 spin_lock_irqsave(vhost->host->host_lock, flags);
2402 ibmvfc_free_event(evt);
2403 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2404 return rsp_rc;
2405 }
2406
2407 /**
2408 * ibmvfc_match_rport - Match function for specified remote port
2409 * @evt: ibmvfc event struct
2410 * @rport: device to match
2411 *
2412 * Returns:
2413 * 1 if event matches rport / 0 if event does not match rport
2414 **/
ibmvfc_match_rport(struct ibmvfc_event * evt,void * rport)2415 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2416 {
2417 struct fc_rport *cmd_rport;
2418
2419 if (evt->cmnd) {
2420 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2421 if (cmd_rport == rport)
2422 return 1;
2423 }
2424 return 0;
2425 }
2426
2427 /**
2428 * ibmvfc_match_target - Match function for specified target
2429 * @evt: ibmvfc event struct
2430 * @device: device to match (starget)
2431 *
2432 * Returns:
2433 * 1 if event matches starget / 0 if event does not match starget
2434 **/
ibmvfc_match_target(struct ibmvfc_event * evt,void * device)2435 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2436 {
2437 if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2438 return 1;
2439 return 0;
2440 }
2441
2442 /**
2443 * ibmvfc_match_lun - Match function for specified LUN
2444 * @evt: ibmvfc event struct
2445 * @device: device to match (sdev)
2446 *
2447 * Returns:
2448 * 1 if event matches sdev / 0 if event does not match sdev
2449 **/
ibmvfc_match_lun(struct ibmvfc_event * evt,void * device)2450 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2451 {
2452 if (evt->cmnd && evt->cmnd->device == device)
2453 return 1;
2454 return 0;
2455 }
2456
2457 /**
2458 * ibmvfc_event_is_free - Check if event is free or not
2459 * @evt: ibmvfc event struct
2460 *
2461 * Returns:
2462 * true / false
2463 **/
ibmvfc_event_is_free(struct ibmvfc_event * evt)2464 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2465 {
2466 struct ibmvfc_event *loop_evt;
2467
2468 list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2469 if (loop_evt == evt)
2470 return true;
2471
2472 return false;
2473 }
2474
2475 /**
2476 * ibmvfc_wait_for_ops - Wait for ops to complete
2477 * @vhost: ibmvfc host struct
2478 * @device: device to match (starget or sdev)
2479 * @match: match function
2480 *
2481 * Returns:
2482 * SUCCESS / FAILED
2483 **/
ibmvfc_wait_for_ops(struct ibmvfc_host * vhost,void * device,int (* match)(struct ibmvfc_event *,void *))2484 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2485 int (*match) (struct ibmvfc_event *, void *))
2486 {
2487 struct ibmvfc_event *evt;
2488 DECLARE_COMPLETION_ONSTACK(comp);
2489 int wait, i, q_index, q_size;
2490 unsigned long flags;
2491 signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2492 struct ibmvfc_queue *queues;
2493
2494 ENTER;
2495 if (vhost->mq_enabled && vhost->using_channels) {
2496 queues = vhost->scsi_scrqs.scrqs;
2497 q_size = vhost->scsi_scrqs.active_queues;
2498 } else {
2499 queues = &vhost->crq;
2500 q_size = 1;
2501 }
2502
2503 do {
2504 wait = 0;
2505 spin_lock_irqsave(vhost->host->host_lock, flags);
2506 for (q_index = 0; q_index < q_size; q_index++) {
2507 spin_lock(&queues[q_index].l_lock);
2508 for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2509 evt = &queues[q_index].evt_pool.events[i];
2510 if (!ibmvfc_event_is_free(evt)) {
2511 if (match(evt, device)) {
2512 evt->eh_comp = ∁
2513 wait++;
2514 }
2515 }
2516 }
2517 spin_unlock(&queues[q_index].l_lock);
2518 }
2519 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2520
2521 if (wait) {
2522 timeout = wait_for_completion_timeout(&comp, timeout);
2523
2524 if (!timeout) {
2525 wait = 0;
2526 spin_lock_irqsave(vhost->host->host_lock, flags);
2527 for (q_index = 0; q_index < q_size; q_index++) {
2528 spin_lock(&queues[q_index].l_lock);
2529 for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2530 evt = &queues[q_index].evt_pool.events[i];
2531 if (!ibmvfc_event_is_free(evt)) {
2532 if (match(evt, device)) {
2533 evt->eh_comp = NULL;
2534 wait++;
2535 }
2536 }
2537 }
2538 spin_unlock(&queues[q_index].l_lock);
2539 }
2540 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2541 if (wait)
2542 dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2543 LEAVE;
2544 return wait ? FAILED : SUCCESS;
2545 }
2546 }
2547 } while (wait);
2548
2549 LEAVE;
2550 return SUCCESS;
2551 }
2552
ibmvfc_init_tmf(struct ibmvfc_queue * queue,struct scsi_device * sdev,int type)2553 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2554 struct scsi_device *sdev,
2555 int type)
2556 {
2557 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2558 struct scsi_target *starget = scsi_target(sdev);
2559 struct fc_rport *rport = starget_to_rport(starget);
2560 struct ibmvfc_event *evt;
2561 struct ibmvfc_tmf *tmf;
2562
2563 evt = ibmvfc_get_reserved_event(queue);
2564 if (!evt)
2565 return NULL;
2566 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2567
2568 tmf = &evt->iu.tmf;
2569 memset(tmf, 0, sizeof(*tmf));
2570 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2571 tmf->common.version = cpu_to_be32(2);
2572 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2573 } else {
2574 tmf->common.version = cpu_to_be32(1);
2575 }
2576 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2577 tmf->common.length = cpu_to_be16(sizeof(*tmf));
2578 tmf->scsi_id = cpu_to_be64(rport->port_id);
2579 int_to_scsilun(sdev->lun, &tmf->lun);
2580 if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2581 type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2582 if (vhost->state == IBMVFC_ACTIVE)
2583 tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2584 else
2585 tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2586 tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2587 tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2588
2589 init_completion(&evt->comp);
2590
2591 return evt;
2592 }
2593
ibmvfc_cancel_all_mq(struct scsi_device * sdev,int type)2594 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2595 {
2596 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2597 struct ibmvfc_event *evt, *found_evt, *temp;
2598 struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2599 unsigned long flags;
2600 int num_hwq, i;
2601 int fail = 0;
2602 LIST_HEAD(cancelq);
2603 u16 status;
2604
2605 ENTER;
2606 spin_lock_irqsave(vhost->host->host_lock, flags);
2607 num_hwq = vhost->scsi_scrqs.active_queues;
2608 for (i = 0; i < num_hwq; i++) {
2609 spin_lock(queues[i].q_lock);
2610 spin_lock(&queues[i].l_lock);
2611 found_evt = NULL;
2612 list_for_each_entry(evt, &queues[i].sent, queue_list) {
2613 if (evt->cmnd && evt->cmnd->device == sdev) {
2614 found_evt = evt;
2615 break;
2616 }
2617 }
2618 spin_unlock(&queues[i].l_lock);
2619
2620 if (found_evt && vhost->logged_in) {
2621 evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2622 if (!evt) {
2623 spin_unlock(queues[i].q_lock);
2624 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2625 return -ENOMEM;
2626 }
2627 evt->sync_iu = &queues[i].cancel_rsp;
2628 ibmvfc_send_event(evt, vhost, default_timeout);
2629 list_add_tail(&evt->cancel, &cancelq);
2630 }
2631
2632 spin_unlock(queues[i].q_lock);
2633 }
2634 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2635
2636 if (list_empty(&cancelq)) {
2637 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2638 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2639 return 0;
2640 }
2641
2642 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2643
2644 list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2645 wait_for_completion(&evt->comp);
2646 status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2647 list_del(&evt->cancel);
2648 ibmvfc_free_event(evt);
2649
2650 if (status != IBMVFC_MAD_SUCCESS) {
2651 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2652 switch (status) {
2653 case IBMVFC_MAD_DRIVER_FAILED:
2654 case IBMVFC_MAD_CRQ_ERROR:
2655 /* Host adapter most likely going through reset, return success to
2656 * the caller will wait for the command being cancelled to get returned
2657 */
2658 break;
2659 default:
2660 fail = 1;
2661 break;
2662 }
2663 }
2664 }
2665
2666 if (fail)
2667 return -EIO;
2668
2669 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2670 LEAVE;
2671 return 0;
2672 }
2673
ibmvfc_cancel_all_sq(struct scsi_device * sdev,int type)2674 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2675 {
2676 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2677 struct ibmvfc_event *evt, *found_evt;
2678 union ibmvfc_iu rsp;
2679 int rsp_rc = -EBUSY;
2680 unsigned long flags;
2681 u16 status;
2682
2683 ENTER;
2684 found_evt = NULL;
2685 spin_lock_irqsave(vhost->host->host_lock, flags);
2686 spin_lock(&vhost->crq.l_lock);
2687 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2688 if (evt->cmnd && evt->cmnd->device == sdev) {
2689 found_evt = evt;
2690 break;
2691 }
2692 }
2693 spin_unlock(&vhost->crq.l_lock);
2694
2695 if (!found_evt) {
2696 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2697 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2698 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2699 return 0;
2700 }
2701
2702 if (vhost->logged_in) {
2703 evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2704 evt->sync_iu = &rsp;
2705 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2706 }
2707
2708 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2709
2710 if (rsp_rc != 0) {
2711 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2712 /* If failure is received, the host adapter is most likely going
2713 through reset, return success so the caller will wait for the command
2714 being cancelled to get returned */
2715 return 0;
2716 }
2717
2718 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2719
2720 wait_for_completion(&evt->comp);
2721 status = be16_to_cpu(rsp.mad_common.status);
2722 spin_lock_irqsave(vhost->host->host_lock, flags);
2723 ibmvfc_free_event(evt);
2724 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2725
2726 if (status != IBMVFC_MAD_SUCCESS) {
2727 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2728 switch (status) {
2729 case IBMVFC_MAD_DRIVER_FAILED:
2730 case IBMVFC_MAD_CRQ_ERROR:
2731 /* Host adapter most likely going through reset, return success to
2732 the caller will wait for the command being cancelled to get returned */
2733 return 0;
2734 default:
2735 return -EIO;
2736 };
2737 }
2738
2739 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2740 return 0;
2741 }
2742
2743 /**
2744 * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2745 * @sdev: scsi device to cancel commands
2746 * @type: type of error recovery being performed
2747 *
2748 * This sends a cancel to the VIOS for the specified device. This does
2749 * NOT send any abort to the actual device. That must be done separately.
2750 *
2751 * Returns:
2752 * 0 on success / other on failure
2753 **/
ibmvfc_cancel_all(struct scsi_device * sdev,int type)2754 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2755 {
2756 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2757
2758 if (vhost->mq_enabled && vhost->using_channels)
2759 return ibmvfc_cancel_all_mq(sdev, type);
2760 else
2761 return ibmvfc_cancel_all_sq(sdev, type);
2762 }
2763
2764 /**
2765 * ibmvfc_match_key - Match function for specified cancel key
2766 * @evt: ibmvfc event struct
2767 * @key: cancel key to match
2768 *
2769 * Returns:
2770 * 1 if event matches key / 0 if event does not match key
2771 **/
ibmvfc_match_key(struct ibmvfc_event * evt,void * key)2772 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2773 {
2774 unsigned long cancel_key = (unsigned long)key;
2775
2776 if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2777 be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2778 return 1;
2779 return 0;
2780 }
2781
2782 /**
2783 * ibmvfc_match_evt - Match function for specified event
2784 * @evt: ibmvfc event struct
2785 * @match: event to match
2786 *
2787 * Returns:
2788 * 1 if event matches key / 0 if event does not match key
2789 **/
ibmvfc_match_evt(struct ibmvfc_event * evt,void * match)2790 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2791 {
2792 if (evt == match)
2793 return 1;
2794 return 0;
2795 }
2796
2797 /**
2798 * ibmvfc_abort_task_set - Abort outstanding commands to the device
2799 * @sdev: scsi device to abort commands
2800 *
2801 * This sends an Abort Task Set to the VIOS for the specified device. This does
2802 * NOT send any cancel to the VIOS. That must be done separately.
2803 *
2804 * Returns:
2805 * 0 on success / other on failure
2806 **/
ibmvfc_abort_task_set(struct scsi_device * sdev)2807 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2808 {
2809 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2810 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2811 struct ibmvfc_cmd *tmf;
2812 struct ibmvfc_event *evt, *found_evt;
2813 union ibmvfc_iu rsp_iu;
2814 struct ibmvfc_fcp_cmd_iu *iu;
2815 struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2816 int rc, rsp_rc = -EBUSY;
2817 unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2818 int rsp_code = 0;
2819
2820 found_evt = NULL;
2821 spin_lock_irqsave(vhost->host->host_lock, flags);
2822 spin_lock(&vhost->crq.l_lock);
2823 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2824 if (evt->cmnd && evt->cmnd->device == sdev) {
2825 found_evt = evt;
2826 break;
2827 }
2828 }
2829 spin_unlock(&vhost->crq.l_lock);
2830
2831 if (!found_evt) {
2832 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2833 sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2834 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2835 return 0;
2836 }
2837
2838 if (vhost->state == IBMVFC_ACTIVE) {
2839 evt = ibmvfc_get_event(&vhost->crq);
2840 if (!evt) {
2841 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2842 return -ENOMEM;
2843 }
2844 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2845 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2846 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2847
2848 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2849 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2850 iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2851 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2852 evt->sync_iu = &rsp_iu;
2853
2854 tmf->correlation = cpu_to_be64((u64)evt);
2855
2856 init_completion(&evt->comp);
2857 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2858 }
2859
2860 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2861
2862 if (rsp_rc != 0) {
2863 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2864 return -EIO;
2865 }
2866
2867 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2868 timeout = wait_for_completion_timeout(&evt->comp, timeout);
2869
2870 if (!timeout) {
2871 rc = ibmvfc_cancel_all(sdev, 0);
2872 if (!rc) {
2873 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2874 if (rc == SUCCESS)
2875 rc = 0;
2876 }
2877
2878 if (rc) {
2879 sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2880 ibmvfc_reset_host(vhost);
2881 rsp_rc = -EIO;
2882 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2883
2884 if (rc == SUCCESS)
2885 rsp_rc = 0;
2886
2887 rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2888 if (rc != SUCCESS) {
2889 spin_lock_irqsave(vhost->host->host_lock, flags);
2890 ibmvfc_hard_reset_host(vhost);
2891 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2892 rsp_rc = 0;
2893 }
2894
2895 goto out;
2896 }
2897 }
2898
2899 if (rsp_iu.cmd.status)
2900 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2901
2902 if (rsp_code) {
2903 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2904 rsp_code = fc_rsp->data.info.rsp_code;
2905
2906 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2907 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2908 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2909 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2910 fc_rsp->scsi_status);
2911 rsp_rc = -EIO;
2912 } else
2913 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2914
2915 out:
2916 spin_lock_irqsave(vhost->host->host_lock, flags);
2917 ibmvfc_free_event(evt);
2918 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2919 return rsp_rc;
2920 }
2921
2922 /**
2923 * ibmvfc_eh_abort_handler - Abort a command
2924 * @cmd: scsi command to abort
2925 *
2926 * Returns:
2927 * SUCCESS / FAST_IO_FAIL / FAILED
2928 **/
ibmvfc_eh_abort_handler(struct scsi_cmnd * cmd)2929 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2930 {
2931 struct scsi_device *sdev = cmd->device;
2932 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2933 int cancel_rc, block_rc;
2934 int rc = FAILED;
2935
2936 ENTER;
2937 block_rc = fc_block_scsi_eh(cmd);
2938 ibmvfc_wait_while_resetting(vhost);
2939 if (block_rc != FAST_IO_FAIL) {
2940 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2941 ibmvfc_abort_task_set(sdev);
2942 } else
2943 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2944
2945 if (!cancel_rc)
2946 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2947
2948 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2949 rc = FAST_IO_FAIL;
2950
2951 LEAVE;
2952 return rc;
2953 }
2954
2955 /**
2956 * ibmvfc_eh_device_reset_handler - Reset a single LUN
2957 * @cmd: scsi command struct
2958 *
2959 * Returns:
2960 * SUCCESS / FAST_IO_FAIL / FAILED
2961 **/
ibmvfc_eh_device_reset_handler(struct scsi_cmnd * cmd)2962 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2963 {
2964 struct scsi_device *sdev = cmd->device;
2965 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2966 int cancel_rc, block_rc, reset_rc = 0;
2967 int rc = FAILED;
2968
2969 ENTER;
2970 block_rc = fc_block_scsi_eh(cmd);
2971 ibmvfc_wait_while_resetting(vhost);
2972 if (block_rc != FAST_IO_FAIL) {
2973 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2974 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2975 } else
2976 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2977
2978 if (!cancel_rc && !reset_rc)
2979 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2980
2981 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2982 rc = FAST_IO_FAIL;
2983
2984 LEAVE;
2985 return rc;
2986 }
2987
2988 /**
2989 * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2990 * @sdev: scsi device struct
2991 * @data: return code
2992 *
2993 **/
ibmvfc_dev_cancel_all_noreset(struct scsi_device * sdev,void * data)2994 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2995 {
2996 unsigned long *rc = data;
2997 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2998 }
2999
3000 /**
3001 * ibmvfc_eh_target_reset_handler - Reset the target
3002 * @cmd: scsi command struct
3003 *
3004 * Returns:
3005 * SUCCESS / FAST_IO_FAIL / FAILED
3006 **/
ibmvfc_eh_target_reset_handler(struct scsi_cmnd * cmd)3007 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
3008 {
3009 struct scsi_target *starget = scsi_target(cmd->device);
3010 struct fc_rport *rport = starget_to_rport(starget);
3011 struct Scsi_Host *shost = rport_to_shost(rport);
3012 struct ibmvfc_host *vhost = shost_priv(shost);
3013 int block_rc;
3014 int reset_rc = 0;
3015 int rc = FAILED;
3016 unsigned long cancel_rc = 0;
3017 bool tgt_reset = false;
3018
3019 ENTER;
3020 block_rc = fc_block_rport(rport);
3021 ibmvfc_wait_while_resetting(vhost);
3022 if (block_rc != FAST_IO_FAIL) {
3023 struct scsi_device *sdev;
3024
3025 shost_for_each_device(sdev, shost) {
3026 if ((sdev->channel != starget->channel) ||
3027 (sdev->id != starget->id))
3028 continue;
3029
3030 cancel_rc |= ibmvfc_cancel_all(sdev,
3031 IBMVFC_TMF_TGT_RESET);
3032 if (!tgt_reset) {
3033 reset_rc = ibmvfc_reset_device(sdev,
3034 IBMVFC_TARGET_RESET, "target");
3035 tgt_reset = true;
3036 }
3037 }
3038 } else
3039 starget_for_each_device(starget, &cancel_rc,
3040 ibmvfc_dev_cancel_all_noreset);
3041
3042 if (!cancel_rc && !reset_rc)
3043 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
3044
3045 if (block_rc == FAST_IO_FAIL && rc != FAILED)
3046 rc = FAST_IO_FAIL;
3047
3048 LEAVE;
3049 return rc;
3050 }
3051
3052 /**
3053 * ibmvfc_eh_host_reset_handler - Reset the connection to the server
3054 * @cmd: struct scsi_cmnd having problems
3055 *
3056 **/
ibmvfc_eh_host_reset_handler(struct scsi_cmnd * cmd)3057 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
3058 {
3059 int rc;
3060 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
3061
3062 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
3063 rc = ibmvfc_issue_fc_host_lip(vhost->host);
3064
3065 return rc ? FAILED : SUCCESS;
3066 }
3067
3068 /**
3069 * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
3070 * @rport: rport struct
3071 *
3072 * Return value:
3073 * none
3074 **/
ibmvfc_terminate_rport_io(struct fc_rport * rport)3075 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
3076 {
3077 struct Scsi_Host *shost = rport_to_shost(rport);
3078 struct ibmvfc_host *vhost = shost_priv(shost);
3079 struct fc_rport *dev_rport;
3080 struct scsi_device *sdev;
3081 struct ibmvfc_target *tgt;
3082 unsigned long rc, flags;
3083 unsigned int found;
3084
3085 ENTER;
3086 shost_for_each_device(sdev, shost) {
3087 dev_rport = starget_to_rport(scsi_target(sdev));
3088 if (dev_rport != rport)
3089 continue;
3090 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
3091 }
3092
3093 rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
3094
3095 if (rc == FAILED)
3096 ibmvfc_issue_fc_host_lip(shost);
3097
3098 spin_lock_irqsave(shost->host_lock, flags);
3099 found = 0;
3100 list_for_each_entry(tgt, &vhost->targets, queue) {
3101 if (tgt->scsi_id == rport->port_id) {
3102 found++;
3103 break;
3104 }
3105 }
3106
3107 if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3108 /*
3109 * If we get here, that means we previously attempted to send
3110 * an implicit logout to the target but it failed, most likely
3111 * due to I/O being pending, so we need to send it again
3112 */
3113 ibmvfc_del_tgt(tgt);
3114 ibmvfc_reinit_host(vhost);
3115 }
3116
3117 spin_unlock_irqrestore(shost->host_lock, flags);
3118 LEAVE;
3119 }
3120
3121 static const struct ibmvfc_async_desc ae_desc [] = {
3122 { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3123 { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3124 { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3125 { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3126 { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3127 { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
3128 { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
3129 { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
3130 { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
3131 { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
3132 { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
3133 { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
3134 { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3135 };
3136
3137 static const struct ibmvfc_async_desc unknown_ae = {
3138 "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3139 };
3140
3141 /**
3142 * ibmvfc_get_ae_desc - Get text description for async event
3143 * @ae: async event
3144 *
3145 **/
ibmvfc_get_ae_desc(u64 ae)3146 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3147 {
3148 int i;
3149
3150 for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3151 if (ae_desc[i].ae == ae)
3152 return &ae_desc[i];
3153
3154 return &unknown_ae;
3155 }
3156
3157 static const struct {
3158 enum ibmvfc_ae_link_state state;
3159 const char *desc;
3160 } link_desc [] = {
3161 { IBMVFC_AE_LS_LINK_UP, " link up" },
3162 { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
3163 { IBMVFC_AE_LS_LINK_DOWN, " link down" },
3164 { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
3165 };
3166
3167 /**
3168 * ibmvfc_get_link_state - Get text description for link state
3169 * @state: link state
3170 *
3171 **/
ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)3172 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3173 {
3174 int i;
3175
3176 for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3177 if (link_desc[i].state == state)
3178 return link_desc[i].desc;
3179
3180 return "";
3181 }
3182
3183 /**
3184 * ibmvfc_handle_async - Handle an async event from the adapter
3185 * @crq: crq to process
3186 * @vhost: ibmvfc host struct
3187 *
3188 **/
ibmvfc_handle_async(struct ibmvfc_async_crq * crq,struct ibmvfc_host * vhost)3189 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3190 struct ibmvfc_host *vhost)
3191 {
3192 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3193 struct ibmvfc_target *tgt;
3194
3195 ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3196 " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3197 be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3198 ibmvfc_get_link_state(crq->link_state));
3199
3200 switch (be64_to_cpu(crq->event)) {
3201 case IBMVFC_AE_RESUME:
3202 switch (crq->link_state) {
3203 case IBMVFC_AE_LS_LINK_DOWN:
3204 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3205 break;
3206 case IBMVFC_AE_LS_LINK_DEAD:
3207 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3208 break;
3209 case IBMVFC_AE_LS_LINK_UP:
3210 case IBMVFC_AE_LS_LINK_BOUNCED:
3211 default:
3212 vhost->events_to_log |= IBMVFC_AE_LINKUP;
3213 vhost->delay_init = 1;
3214 __ibmvfc_reset_host(vhost);
3215 break;
3216 }
3217
3218 break;
3219 case IBMVFC_AE_LINK_UP:
3220 vhost->events_to_log |= IBMVFC_AE_LINKUP;
3221 vhost->delay_init = 1;
3222 __ibmvfc_reset_host(vhost);
3223 break;
3224 case IBMVFC_AE_SCN_FABRIC:
3225 case IBMVFC_AE_SCN_DOMAIN:
3226 vhost->events_to_log |= IBMVFC_AE_RSCN;
3227 if (vhost->state < IBMVFC_HALTED) {
3228 vhost->delay_init = 1;
3229 __ibmvfc_reset_host(vhost);
3230 }
3231 break;
3232 case IBMVFC_AE_SCN_NPORT:
3233 case IBMVFC_AE_SCN_GROUP:
3234 vhost->events_to_log |= IBMVFC_AE_RSCN;
3235 ibmvfc_reinit_host(vhost);
3236 break;
3237 case IBMVFC_AE_ELS_LOGO:
3238 case IBMVFC_AE_ELS_PRLO:
3239 case IBMVFC_AE_ELS_PLOGI:
3240 list_for_each_entry(tgt, &vhost->targets, queue) {
3241 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3242 break;
3243 if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3244 continue;
3245 if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3246 continue;
3247 if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3248 continue;
3249 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3250 tgt->logo_rcvd = 1;
3251 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3252 ibmvfc_del_tgt(tgt);
3253 ibmvfc_reinit_host(vhost);
3254 }
3255 }
3256 break;
3257 case IBMVFC_AE_LINK_DOWN:
3258 case IBMVFC_AE_ADAPTER_FAILED:
3259 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3260 break;
3261 case IBMVFC_AE_LINK_DEAD:
3262 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3263 break;
3264 case IBMVFC_AE_HALT:
3265 ibmvfc_link_down(vhost, IBMVFC_HALTED);
3266 break;
3267 default:
3268 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3269 break;
3270 }
3271 }
3272
3273 /**
3274 * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3275 * @crq: Command/Response queue
3276 * @vhost: ibmvfc host struct
3277 * @evt_doneq: Event done queue
3278 *
3279 **/
ibmvfc_handle_crq(struct ibmvfc_crq * crq,struct ibmvfc_host * vhost,struct list_head * evt_doneq)3280 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3281 struct list_head *evt_doneq)
3282 {
3283 long rc;
3284 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3285
3286 switch (crq->valid) {
3287 case IBMVFC_CRQ_INIT_RSP:
3288 switch (crq->format) {
3289 case IBMVFC_CRQ_INIT:
3290 dev_info(vhost->dev, "Partner initialized\n");
3291 /* Send back a response */
3292 rc = ibmvfc_send_crq_init_complete(vhost);
3293 if (rc == 0)
3294 ibmvfc_init_host(vhost);
3295 else
3296 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3297 break;
3298 case IBMVFC_CRQ_INIT_COMPLETE:
3299 dev_info(vhost->dev, "Partner initialization complete\n");
3300 ibmvfc_init_host(vhost);
3301 break;
3302 default:
3303 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3304 }
3305 return;
3306 case IBMVFC_CRQ_XPORT_EVENT:
3307 vhost->state = IBMVFC_NO_CRQ;
3308 vhost->logged_in = 0;
3309 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3310 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3311 /* We need to re-setup the interpartition connection */
3312 dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3313 vhost->client_migrated = 1;
3314
3315 scsi_block_requests(vhost->host);
3316 ibmvfc_purge_requests(vhost, DID_REQUEUE);
3317 ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
3318 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3319 wake_up(&vhost->work_wait_q);
3320 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3321 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3322 ibmvfc_purge_requests(vhost, DID_ERROR);
3323 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3324 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3325 } else {
3326 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3327 }
3328 return;
3329 case IBMVFC_CRQ_CMD_RSP:
3330 break;
3331 default:
3332 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3333 return;
3334 }
3335
3336 if (crq->format == IBMVFC_ASYNC_EVENT)
3337 return;
3338
3339 /* The only kind of payload CRQs we should get are responses to
3340 * things we send. Make sure this response is to something we
3341 * actually sent
3342 */
3343 if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3344 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3345 crq->ioba);
3346 return;
3347 }
3348
3349 if (unlikely(atomic_dec_if_positive(&evt->active))) {
3350 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3351 crq->ioba);
3352 return;
3353 }
3354
3355 spin_lock(&evt->queue->l_lock);
3356 list_move_tail(&evt->queue_list, evt_doneq);
3357 spin_unlock(&evt->queue->l_lock);
3358 }
3359
3360 /**
3361 * ibmvfc_scan_finished - Check if the device scan is done.
3362 * @shost: scsi host struct
3363 * @time: current elapsed time
3364 *
3365 * Returns:
3366 * 0 if scan is not done / 1 if scan is done
3367 **/
ibmvfc_scan_finished(struct Scsi_Host * shost,unsigned long time)3368 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3369 {
3370 unsigned long flags;
3371 struct ibmvfc_host *vhost = shost_priv(shost);
3372 int done = 0;
3373
3374 spin_lock_irqsave(shost->host_lock, flags);
3375 if (!vhost->scan_timeout)
3376 done = 1;
3377 else if (time >= (vhost->scan_timeout * HZ)) {
3378 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3379 "continuing initialization\n", vhost->scan_timeout);
3380 done = 1;
3381 }
3382
3383 if (vhost->scan_complete) {
3384 vhost->scan_timeout = init_timeout;
3385 done = 1;
3386 }
3387 spin_unlock_irqrestore(shost->host_lock, flags);
3388 return done;
3389 }
3390
3391 /**
3392 * ibmvfc_slave_alloc - Setup the device's task set value
3393 * @sdev: struct scsi_device device to configure
3394 *
3395 * Set the device's task set value so that error handling works as
3396 * expected.
3397 *
3398 * Returns:
3399 * 0 on success / -ENXIO if device does not exist
3400 **/
ibmvfc_slave_alloc(struct scsi_device * sdev)3401 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3402 {
3403 struct Scsi_Host *shost = sdev->host;
3404 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3405 struct ibmvfc_host *vhost = shost_priv(shost);
3406 unsigned long flags = 0;
3407
3408 if (!rport || fc_remote_port_chkready(rport))
3409 return -ENXIO;
3410
3411 spin_lock_irqsave(shost->host_lock, flags);
3412 sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3413 spin_unlock_irqrestore(shost->host_lock, flags);
3414 return 0;
3415 }
3416
3417 /**
3418 * ibmvfc_target_alloc - Setup the target's task set value
3419 * @starget: struct scsi_target
3420 *
3421 * Set the target's task set value so that error handling works as
3422 * expected.
3423 *
3424 * Returns:
3425 * 0 on success / -ENXIO if device does not exist
3426 **/
ibmvfc_target_alloc(struct scsi_target * starget)3427 static int ibmvfc_target_alloc(struct scsi_target *starget)
3428 {
3429 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3430 struct ibmvfc_host *vhost = shost_priv(shost);
3431 unsigned long flags = 0;
3432
3433 spin_lock_irqsave(shost->host_lock, flags);
3434 starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3435 spin_unlock_irqrestore(shost->host_lock, flags);
3436 return 0;
3437 }
3438
3439 /**
3440 * ibmvfc_slave_configure - Configure the device
3441 * @sdev: struct scsi_device device to configure
3442 *
3443 * Enable allow_restart for a device if it is a disk. Adjust the
3444 * queue_depth here also.
3445 *
3446 * Returns:
3447 * 0
3448 **/
ibmvfc_slave_configure(struct scsi_device * sdev)3449 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3450 {
3451 struct Scsi_Host *shost = sdev->host;
3452 unsigned long flags = 0;
3453
3454 spin_lock_irqsave(shost->host_lock, flags);
3455 if (sdev->type == TYPE_DISK) {
3456 sdev->allow_restart = 1;
3457 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3458 }
3459 spin_unlock_irqrestore(shost->host_lock, flags);
3460 return 0;
3461 }
3462
3463 /**
3464 * ibmvfc_change_queue_depth - Change the device's queue depth
3465 * @sdev: scsi device struct
3466 * @qdepth: depth to set
3467 *
3468 * Return value:
3469 * actual depth set
3470 **/
ibmvfc_change_queue_depth(struct scsi_device * sdev,int qdepth)3471 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3472 {
3473 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3474 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3475
3476 return scsi_change_queue_depth(sdev, qdepth);
3477 }
3478
ibmvfc_show_host_partition_name(struct device * dev,struct device_attribute * attr,char * buf)3479 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3480 struct device_attribute *attr, char *buf)
3481 {
3482 struct Scsi_Host *shost = class_to_shost(dev);
3483 struct ibmvfc_host *vhost = shost_priv(shost);
3484
3485 return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.partition_name);
3486 }
3487
ibmvfc_show_host_device_name(struct device * dev,struct device_attribute * attr,char * buf)3488 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3489 struct device_attribute *attr, char *buf)
3490 {
3491 struct Scsi_Host *shost = class_to_shost(dev);
3492 struct ibmvfc_host *vhost = shost_priv(shost);
3493
3494 return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.device_name);
3495 }
3496
ibmvfc_show_host_loc_code(struct device * dev,struct device_attribute * attr,char * buf)3497 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3498 struct device_attribute *attr, char *buf)
3499 {
3500 struct Scsi_Host *shost = class_to_shost(dev);
3501 struct ibmvfc_host *vhost = shost_priv(shost);
3502
3503 return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.port_loc_code);
3504 }
3505
ibmvfc_show_host_drc_name(struct device * dev,struct device_attribute * attr,char * buf)3506 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3507 struct device_attribute *attr, char *buf)
3508 {
3509 struct Scsi_Host *shost = class_to_shost(dev);
3510 struct ibmvfc_host *vhost = shost_priv(shost);
3511
3512 return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.drc_name);
3513 }
3514
ibmvfc_show_host_npiv_version(struct device * dev,struct device_attribute * attr,char * buf)3515 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3516 struct device_attribute *attr, char *buf)
3517 {
3518 struct Scsi_Host *shost = class_to_shost(dev);
3519 struct ibmvfc_host *vhost = shost_priv(shost);
3520 return sysfs_emit(buf, "%d\n",
3521 be32_to_cpu(vhost->login_buf->resp.version));
3522 }
3523
ibmvfc_show_host_capabilities(struct device * dev,struct device_attribute * attr,char * buf)3524 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3525 struct device_attribute *attr, char *buf)
3526 {
3527 struct Scsi_Host *shost = class_to_shost(dev);
3528 struct ibmvfc_host *vhost = shost_priv(shost);
3529 return sysfs_emit(buf, "%llx\n",
3530 be64_to_cpu(vhost->login_buf->resp.capabilities));
3531 }
3532
3533 /**
3534 * ibmvfc_show_log_level - Show the adapter's error logging level
3535 * @dev: class device struct
3536 * @attr: unused
3537 * @buf: buffer
3538 *
3539 * Return value:
3540 * number of bytes printed to buffer
3541 **/
ibmvfc_show_log_level(struct device * dev,struct device_attribute * attr,char * buf)3542 static ssize_t ibmvfc_show_log_level(struct device *dev,
3543 struct device_attribute *attr, char *buf)
3544 {
3545 struct Scsi_Host *shost = class_to_shost(dev);
3546 struct ibmvfc_host *vhost = shost_priv(shost);
3547 unsigned long flags = 0;
3548 int len;
3549
3550 spin_lock_irqsave(shost->host_lock, flags);
3551 len = sysfs_emit(buf, "%d\n", vhost->log_level);
3552 spin_unlock_irqrestore(shost->host_lock, flags);
3553 return len;
3554 }
3555
3556 /**
3557 * ibmvfc_store_log_level - Change the adapter's error logging level
3558 * @dev: class device struct
3559 * @attr: unused
3560 * @buf: buffer
3561 * @count: buffer size
3562 *
3563 * Return value:
3564 * number of bytes printed to buffer
3565 **/
ibmvfc_store_log_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3566 static ssize_t ibmvfc_store_log_level(struct device *dev,
3567 struct device_attribute *attr,
3568 const char *buf, size_t count)
3569 {
3570 struct Scsi_Host *shost = class_to_shost(dev);
3571 struct ibmvfc_host *vhost = shost_priv(shost);
3572 unsigned long flags = 0;
3573
3574 spin_lock_irqsave(shost->host_lock, flags);
3575 vhost->log_level = simple_strtoul(buf, NULL, 10);
3576 spin_unlock_irqrestore(shost->host_lock, flags);
3577 return strlen(buf);
3578 }
3579
ibmvfc_show_scsi_channels(struct device * dev,struct device_attribute * attr,char * buf)3580 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3581 struct device_attribute *attr, char *buf)
3582 {
3583 struct Scsi_Host *shost = class_to_shost(dev);
3584 struct ibmvfc_host *vhost = shost_priv(shost);
3585 struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
3586 unsigned long flags = 0;
3587 int len;
3588
3589 spin_lock_irqsave(shost->host_lock, flags);
3590 len = sysfs_emit(buf, "%d\n", scsi->desired_queues);
3591 spin_unlock_irqrestore(shost->host_lock, flags);
3592 return len;
3593 }
3594
ibmvfc_store_scsi_channels(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3595 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3596 struct device_attribute *attr,
3597 const char *buf, size_t count)
3598 {
3599 struct Scsi_Host *shost = class_to_shost(dev);
3600 struct ibmvfc_host *vhost = shost_priv(shost);
3601 struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
3602 unsigned long flags = 0;
3603 unsigned int channels;
3604
3605 spin_lock_irqsave(shost->host_lock, flags);
3606 channels = simple_strtoul(buf, NULL, 10);
3607 scsi->desired_queues = min(channels, shost->nr_hw_queues);
3608 ibmvfc_hard_reset_host(vhost);
3609 spin_unlock_irqrestore(shost->host_lock, flags);
3610 return strlen(buf);
3611 }
3612
3613 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3614 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3615 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3616 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3617 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3618 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3619 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3620 ibmvfc_show_log_level, ibmvfc_store_log_level);
3621 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3622 ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3623
3624 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3625 /**
3626 * ibmvfc_read_trace - Dump the adapter trace
3627 * @filp: open sysfs file
3628 * @kobj: kobject struct
3629 * @bin_attr: bin_attribute struct
3630 * @buf: buffer
3631 * @off: offset
3632 * @count: buffer size
3633 *
3634 * Return value:
3635 * number of bytes printed to buffer
3636 **/
ibmvfc_read_trace(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)3637 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3638 struct bin_attribute *bin_attr,
3639 char *buf, loff_t off, size_t count)
3640 {
3641 struct device *dev = kobj_to_dev(kobj);
3642 struct Scsi_Host *shost = class_to_shost(dev);
3643 struct ibmvfc_host *vhost = shost_priv(shost);
3644 unsigned long flags = 0;
3645 int size = IBMVFC_TRACE_SIZE;
3646 char *src = (char *)vhost->trace;
3647
3648 if (off > size)
3649 return 0;
3650 if (off + count > size) {
3651 size -= off;
3652 count = size;
3653 }
3654
3655 spin_lock_irqsave(shost->host_lock, flags);
3656 memcpy(buf, &src[off], count);
3657 spin_unlock_irqrestore(shost->host_lock, flags);
3658 return count;
3659 }
3660
3661 static struct bin_attribute ibmvfc_trace_attr = {
3662 .attr = {
3663 .name = "trace",
3664 .mode = S_IRUGO,
3665 },
3666 .size = 0,
3667 .read = ibmvfc_read_trace,
3668 };
3669 #endif
3670
3671 static struct attribute *ibmvfc_host_attrs[] = {
3672 &dev_attr_partition_name.attr,
3673 &dev_attr_device_name.attr,
3674 &dev_attr_port_loc_code.attr,
3675 &dev_attr_drc_name.attr,
3676 &dev_attr_npiv_version.attr,
3677 &dev_attr_capabilities.attr,
3678 &dev_attr_log_level.attr,
3679 &dev_attr_nr_scsi_channels.attr,
3680 NULL
3681 };
3682
3683 ATTRIBUTE_GROUPS(ibmvfc_host);
3684
3685 static const struct scsi_host_template driver_template = {
3686 .module = THIS_MODULE,
3687 .name = "IBM POWER Virtual FC Adapter",
3688 .proc_name = IBMVFC_NAME,
3689 .queuecommand = ibmvfc_queuecommand,
3690 .eh_timed_out = fc_eh_timed_out,
3691 .eh_abort_handler = ibmvfc_eh_abort_handler,
3692 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3693 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3694 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3695 .slave_alloc = ibmvfc_slave_alloc,
3696 .slave_configure = ibmvfc_slave_configure,
3697 .target_alloc = ibmvfc_target_alloc,
3698 .scan_finished = ibmvfc_scan_finished,
3699 .change_queue_depth = ibmvfc_change_queue_depth,
3700 .cmd_per_lun = 16,
3701 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3702 .this_id = -1,
3703 .sg_tablesize = SG_ALL,
3704 .max_sectors = IBMVFC_MAX_SECTORS,
3705 .shost_groups = ibmvfc_host_groups,
3706 .track_queue_depth = 1,
3707 };
3708
3709 /**
3710 * ibmvfc_next_async_crq - Returns the next entry in async queue
3711 * @vhost: ibmvfc host struct
3712 *
3713 * Returns:
3714 * Pointer to next entry in queue / NULL if empty
3715 **/
ibmvfc_next_async_crq(struct ibmvfc_host * vhost)3716 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3717 {
3718 struct ibmvfc_queue *async_crq = &vhost->async_crq;
3719 struct ibmvfc_async_crq *crq;
3720
3721 crq = &async_crq->msgs.async[async_crq->cur];
3722 if (crq->valid & 0x80) {
3723 if (++async_crq->cur == async_crq->size)
3724 async_crq->cur = 0;
3725 rmb();
3726 } else
3727 crq = NULL;
3728
3729 return crq;
3730 }
3731
3732 /**
3733 * ibmvfc_next_crq - Returns the next entry in message queue
3734 * @vhost: ibmvfc host struct
3735 *
3736 * Returns:
3737 * Pointer to next entry in queue / NULL if empty
3738 **/
ibmvfc_next_crq(struct ibmvfc_host * vhost)3739 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3740 {
3741 struct ibmvfc_queue *queue = &vhost->crq;
3742 struct ibmvfc_crq *crq;
3743
3744 crq = &queue->msgs.crq[queue->cur];
3745 if (crq->valid & 0x80) {
3746 if (++queue->cur == queue->size)
3747 queue->cur = 0;
3748 rmb();
3749 } else
3750 crq = NULL;
3751
3752 return crq;
3753 }
3754
3755 /**
3756 * ibmvfc_interrupt - Interrupt handler
3757 * @irq: number of irq to handle, not used
3758 * @dev_instance: ibmvfc_host that received interrupt
3759 *
3760 * Returns:
3761 * IRQ_HANDLED
3762 **/
ibmvfc_interrupt(int irq,void * dev_instance)3763 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3764 {
3765 struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3766 unsigned long flags;
3767
3768 spin_lock_irqsave(vhost->host->host_lock, flags);
3769 vio_disable_interrupts(to_vio_dev(vhost->dev));
3770 tasklet_schedule(&vhost->tasklet);
3771 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3772 return IRQ_HANDLED;
3773 }
3774
3775 /**
3776 * ibmvfc_tasklet - Interrupt handler tasklet
3777 * @data: ibmvfc host struct
3778 *
3779 * Returns:
3780 * Nothing
3781 **/
ibmvfc_tasklet(void * data)3782 static void ibmvfc_tasklet(void *data)
3783 {
3784 struct ibmvfc_host *vhost = data;
3785 struct vio_dev *vdev = to_vio_dev(vhost->dev);
3786 struct ibmvfc_crq *crq;
3787 struct ibmvfc_async_crq *async;
3788 struct ibmvfc_event *evt, *temp;
3789 unsigned long flags;
3790 int done = 0;
3791 LIST_HEAD(evt_doneq);
3792
3793 spin_lock_irqsave(vhost->host->host_lock, flags);
3794 spin_lock(vhost->crq.q_lock);
3795 while (!done) {
3796 /* Pull all the valid messages off the async CRQ */
3797 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3798 ibmvfc_handle_async(async, vhost);
3799 async->valid = 0;
3800 wmb();
3801 }
3802
3803 /* Pull all the valid messages off the CRQ */
3804 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3805 ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3806 crq->valid = 0;
3807 wmb();
3808 }
3809
3810 vio_enable_interrupts(vdev);
3811 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3812 vio_disable_interrupts(vdev);
3813 ibmvfc_handle_async(async, vhost);
3814 async->valid = 0;
3815 wmb();
3816 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3817 vio_disable_interrupts(vdev);
3818 ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3819 crq->valid = 0;
3820 wmb();
3821 } else
3822 done = 1;
3823 }
3824
3825 spin_unlock(vhost->crq.q_lock);
3826 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3827
3828 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3829 del_timer(&evt->timer);
3830 list_del(&evt->queue_list);
3831 ibmvfc_trc_end(evt);
3832 evt->done(evt);
3833 }
3834 }
3835
ibmvfc_toggle_scrq_irq(struct ibmvfc_queue * scrq,int enable)3836 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3837 {
3838 struct device *dev = scrq->vhost->dev;
3839 struct vio_dev *vdev = to_vio_dev(dev);
3840 unsigned long rc;
3841 int irq_action = H_ENABLE_VIO_INTERRUPT;
3842
3843 if (!enable)
3844 irq_action = H_DISABLE_VIO_INTERRUPT;
3845
3846 rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3847 scrq->hw_irq, 0, 0);
3848
3849 if (rc)
3850 dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3851 enable ? "enable" : "disable", scrq->hwq_id, rc);
3852
3853 return rc;
3854 }
3855
ibmvfc_handle_scrq(struct ibmvfc_crq * crq,struct ibmvfc_host * vhost,struct list_head * evt_doneq)3856 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3857 struct list_head *evt_doneq)
3858 {
3859 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3860
3861 switch (crq->valid) {
3862 case IBMVFC_CRQ_CMD_RSP:
3863 break;
3864 case IBMVFC_CRQ_XPORT_EVENT:
3865 return;
3866 default:
3867 dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3868 return;
3869 }
3870
3871 /* The only kind of payload CRQs we should get are responses to
3872 * things we send. Make sure this response is to something we
3873 * actually sent
3874 */
3875 if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3876 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3877 crq->ioba);
3878 return;
3879 }
3880
3881 if (unlikely(atomic_dec_if_positive(&evt->active))) {
3882 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3883 crq->ioba);
3884 return;
3885 }
3886
3887 spin_lock(&evt->queue->l_lock);
3888 list_move_tail(&evt->queue_list, evt_doneq);
3889 spin_unlock(&evt->queue->l_lock);
3890 }
3891
ibmvfc_next_scrq(struct ibmvfc_queue * scrq)3892 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3893 {
3894 struct ibmvfc_crq *crq;
3895
3896 crq = &scrq->msgs.scrq[scrq->cur].crq;
3897 if (crq->valid & 0x80) {
3898 if (++scrq->cur == scrq->size)
3899 scrq->cur = 0;
3900 rmb();
3901 } else
3902 crq = NULL;
3903
3904 return crq;
3905 }
3906
ibmvfc_drain_sub_crq(struct ibmvfc_queue * scrq)3907 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3908 {
3909 struct ibmvfc_crq *crq;
3910 struct ibmvfc_event *evt, *temp;
3911 unsigned long flags;
3912 int done = 0;
3913 LIST_HEAD(evt_doneq);
3914
3915 spin_lock_irqsave(scrq->q_lock, flags);
3916 while (!done) {
3917 while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3918 ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3919 crq->valid = 0;
3920 wmb();
3921 }
3922
3923 ibmvfc_toggle_scrq_irq(scrq, 1);
3924 if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3925 ibmvfc_toggle_scrq_irq(scrq, 0);
3926 ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3927 crq->valid = 0;
3928 wmb();
3929 } else
3930 done = 1;
3931 }
3932 spin_unlock_irqrestore(scrq->q_lock, flags);
3933
3934 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3935 del_timer(&evt->timer);
3936 list_del(&evt->queue_list);
3937 ibmvfc_trc_end(evt);
3938 evt->done(evt);
3939 }
3940 }
3941
ibmvfc_interrupt_mq(int irq,void * scrq_instance)3942 static irqreturn_t ibmvfc_interrupt_mq(int irq, void *scrq_instance)
3943 {
3944 struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3945
3946 ibmvfc_toggle_scrq_irq(scrq, 0);
3947 ibmvfc_drain_sub_crq(scrq);
3948
3949 return IRQ_HANDLED;
3950 }
3951
3952 /**
3953 * ibmvfc_init_tgt - Set the next init job step for the target
3954 * @tgt: ibmvfc target struct
3955 * @job_step: job step to perform
3956 *
3957 **/
ibmvfc_init_tgt(struct ibmvfc_target * tgt,void (* job_step)(struct ibmvfc_target *))3958 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3959 void (*job_step) (struct ibmvfc_target *))
3960 {
3961 if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3962 tgt->job_step = job_step;
3963 wake_up(&tgt->vhost->work_wait_q);
3964 }
3965
3966 /**
3967 * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3968 * @tgt: ibmvfc target struct
3969 * @job_step: initialization job step
3970 *
3971 * Returns: 1 if step will be retried / 0 if not
3972 *
3973 **/
ibmvfc_retry_tgt_init(struct ibmvfc_target * tgt,void (* job_step)(struct ibmvfc_target *))3974 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3975 void (*job_step) (struct ibmvfc_target *))
3976 {
3977 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3978 ibmvfc_del_tgt(tgt);
3979 wake_up(&tgt->vhost->work_wait_q);
3980 return 0;
3981 } else
3982 ibmvfc_init_tgt(tgt, job_step);
3983 return 1;
3984 }
3985
3986 /* Defined in FC-LS */
3987 static const struct {
3988 int code;
3989 int retry;
3990 int logged_in;
3991 } prli_rsp [] = {
3992 { 0, 1, 0 },
3993 { 1, 0, 1 },
3994 { 2, 1, 0 },
3995 { 3, 1, 0 },
3996 { 4, 0, 0 },
3997 { 5, 0, 0 },
3998 { 6, 0, 1 },
3999 { 7, 0, 0 },
4000 { 8, 1, 0 },
4001 };
4002
4003 /**
4004 * ibmvfc_get_prli_rsp - Find PRLI response index
4005 * @flags: PRLI response flags
4006 *
4007 **/
ibmvfc_get_prli_rsp(u16 flags)4008 static int ibmvfc_get_prli_rsp(u16 flags)
4009 {
4010 int i;
4011 int code = (flags & 0x0f00) >> 8;
4012
4013 for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
4014 if (prli_rsp[i].code == code)
4015 return i;
4016
4017 return 0;
4018 }
4019
4020 /**
4021 * ibmvfc_tgt_prli_done - Completion handler for Process Login
4022 * @evt: ibmvfc event struct
4023 *
4024 **/
ibmvfc_tgt_prli_done(struct ibmvfc_event * evt)4025 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
4026 {
4027 struct ibmvfc_target *tgt = evt->tgt;
4028 struct ibmvfc_host *vhost = evt->vhost;
4029 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
4030 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
4031 u32 status = be16_to_cpu(rsp->common.status);
4032 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
4033
4034 vhost->discovery_threads--;
4035 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4036 switch (status) {
4037 case IBMVFC_MAD_SUCCESS:
4038 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
4039 parms->type, parms->flags, parms->service_parms);
4040
4041 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
4042 index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
4043 if (prli_rsp[index].logged_in) {
4044 if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
4045 tgt->need_login = 0;
4046 tgt->ids.roles = 0;
4047 if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
4048 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4049 if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
4050 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4051 tgt->add_rport = 1;
4052 } else
4053 ibmvfc_del_tgt(tgt);
4054 } else if (prli_rsp[index].retry)
4055 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4056 else
4057 ibmvfc_del_tgt(tgt);
4058 } else
4059 ibmvfc_del_tgt(tgt);
4060 break;
4061 case IBMVFC_MAD_DRIVER_FAILED:
4062 break;
4063 case IBMVFC_MAD_CRQ_ERROR:
4064 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4065 break;
4066 case IBMVFC_MAD_FAILED:
4067 default:
4068 if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
4069 be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
4070 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4071 else if (tgt->logo_rcvd)
4072 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4073 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4074 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4075 else
4076 ibmvfc_del_tgt(tgt);
4077
4078 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
4079 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4080 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
4081 break;
4082 }
4083
4084 kref_put(&tgt->kref, ibmvfc_release_tgt);
4085 ibmvfc_free_event(evt);
4086 wake_up(&vhost->work_wait_q);
4087 }
4088
4089 /**
4090 * ibmvfc_tgt_send_prli - Send a process login
4091 * @tgt: ibmvfc target struct
4092 *
4093 **/
ibmvfc_tgt_send_prli(struct ibmvfc_target * tgt)4094 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
4095 {
4096 struct ibmvfc_process_login *prli;
4097 struct ibmvfc_host *vhost = tgt->vhost;
4098 struct ibmvfc_event *evt;
4099
4100 if (vhost->discovery_threads >= disc_threads)
4101 return;
4102
4103 kref_get(&tgt->kref);
4104 evt = ibmvfc_get_reserved_event(&vhost->crq);
4105 if (!evt) {
4106 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4107 kref_put(&tgt->kref, ibmvfc_release_tgt);
4108 __ibmvfc_reset_host(vhost);
4109 return;
4110 }
4111 vhost->discovery_threads++;
4112 ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4113 evt->tgt = tgt;
4114 prli = &evt->iu.prli;
4115 memset(prli, 0, sizeof(*prli));
4116 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4117 prli->common.version = cpu_to_be32(2);
4118 prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4119 } else {
4120 prli->common.version = cpu_to_be32(1);
4121 }
4122 prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4123 prli->common.length = cpu_to_be16(sizeof(*prli));
4124 prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4125
4126 prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4127 prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4128 prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4129 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4130
4131 if (cls3_error)
4132 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4133
4134 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4135 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4136 vhost->discovery_threads--;
4137 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4138 kref_put(&tgt->kref, ibmvfc_release_tgt);
4139 } else
4140 tgt_dbg(tgt, "Sent process login\n");
4141 }
4142
4143 /**
4144 * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4145 * @evt: ibmvfc event struct
4146 *
4147 **/
ibmvfc_tgt_plogi_done(struct ibmvfc_event * evt)4148 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4149 {
4150 struct ibmvfc_target *tgt = evt->tgt;
4151 struct ibmvfc_host *vhost = evt->vhost;
4152 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4153 u32 status = be16_to_cpu(rsp->common.status);
4154 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4155
4156 vhost->discovery_threads--;
4157 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4158 switch (status) {
4159 case IBMVFC_MAD_SUCCESS:
4160 tgt_dbg(tgt, "Port Login succeeded\n");
4161 if (tgt->ids.port_name &&
4162 tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
4163 vhost->reinit = 1;
4164 tgt_dbg(tgt, "Port re-init required\n");
4165 break;
4166 }
4167 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4168 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4169 tgt->ids.port_id = tgt->scsi_id;
4170 memcpy(&tgt->service_parms, &rsp->service_parms,
4171 sizeof(tgt->service_parms));
4172 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4173 sizeof(tgt->service_parms_change));
4174 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4175 break;
4176 case IBMVFC_MAD_DRIVER_FAILED:
4177 break;
4178 case IBMVFC_MAD_CRQ_ERROR:
4179 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4180 break;
4181 case IBMVFC_MAD_FAILED:
4182 default:
4183 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4184 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4185 else
4186 ibmvfc_del_tgt(tgt);
4187
4188 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4189 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4190 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4191 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4192 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4193 break;
4194 }
4195
4196 kref_put(&tgt->kref, ibmvfc_release_tgt);
4197 ibmvfc_free_event(evt);
4198 wake_up(&vhost->work_wait_q);
4199 }
4200
4201 /**
4202 * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4203 * @tgt: ibmvfc target struct
4204 *
4205 **/
ibmvfc_tgt_send_plogi(struct ibmvfc_target * tgt)4206 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4207 {
4208 struct ibmvfc_port_login *plogi;
4209 struct ibmvfc_host *vhost = tgt->vhost;
4210 struct ibmvfc_event *evt;
4211
4212 if (vhost->discovery_threads >= disc_threads)
4213 return;
4214
4215 kref_get(&tgt->kref);
4216 tgt->logo_rcvd = 0;
4217 evt = ibmvfc_get_reserved_event(&vhost->crq);
4218 if (!evt) {
4219 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4220 kref_put(&tgt->kref, ibmvfc_release_tgt);
4221 __ibmvfc_reset_host(vhost);
4222 return;
4223 }
4224 vhost->discovery_threads++;
4225 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4226 ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4227 evt->tgt = tgt;
4228 plogi = &evt->iu.plogi;
4229 memset(plogi, 0, sizeof(*plogi));
4230 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4231 plogi->common.version = cpu_to_be32(2);
4232 plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4233 } else {
4234 plogi->common.version = cpu_to_be32(1);
4235 }
4236 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4237 plogi->common.length = cpu_to_be16(sizeof(*plogi));
4238 plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4239
4240 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4241 vhost->discovery_threads--;
4242 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4243 kref_put(&tgt->kref, ibmvfc_release_tgt);
4244 } else
4245 tgt_dbg(tgt, "Sent port login\n");
4246 }
4247
4248 /**
4249 * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4250 * @evt: ibmvfc event struct
4251 *
4252 **/
ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event * evt)4253 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4254 {
4255 struct ibmvfc_target *tgt = evt->tgt;
4256 struct ibmvfc_host *vhost = evt->vhost;
4257 struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4258 u32 status = be16_to_cpu(rsp->common.status);
4259
4260 vhost->discovery_threads--;
4261 ibmvfc_free_event(evt);
4262 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4263
4264 switch (status) {
4265 case IBMVFC_MAD_SUCCESS:
4266 tgt_dbg(tgt, "Implicit Logout succeeded\n");
4267 break;
4268 case IBMVFC_MAD_DRIVER_FAILED:
4269 kref_put(&tgt->kref, ibmvfc_release_tgt);
4270 wake_up(&vhost->work_wait_q);
4271 return;
4272 case IBMVFC_MAD_FAILED:
4273 default:
4274 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4275 break;
4276 }
4277
4278 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4279 kref_put(&tgt->kref, ibmvfc_release_tgt);
4280 wake_up(&vhost->work_wait_q);
4281 }
4282
4283 /**
4284 * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4285 * @tgt: ibmvfc target struct
4286 * @done: Routine to call when the event is responded to
4287 *
4288 * Returns:
4289 * Allocated and initialized ibmvfc_event struct
4290 **/
__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target * tgt,void (* done)(struct ibmvfc_event *))4291 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4292 void (*done) (struct ibmvfc_event *))
4293 {
4294 struct ibmvfc_implicit_logout *mad;
4295 struct ibmvfc_host *vhost = tgt->vhost;
4296 struct ibmvfc_event *evt;
4297
4298 kref_get(&tgt->kref);
4299 evt = ibmvfc_get_reserved_event(&vhost->crq);
4300 if (!evt)
4301 return NULL;
4302 ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4303 evt->tgt = tgt;
4304 mad = &evt->iu.implicit_logout;
4305 memset(mad, 0, sizeof(*mad));
4306 mad->common.version = cpu_to_be32(1);
4307 mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4308 mad->common.length = cpu_to_be16(sizeof(*mad));
4309 mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4310 return evt;
4311 }
4312
4313 /**
4314 * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4315 * @tgt: ibmvfc target struct
4316 *
4317 **/
ibmvfc_tgt_implicit_logout(struct ibmvfc_target * tgt)4318 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4319 {
4320 struct ibmvfc_host *vhost = tgt->vhost;
4321 struct ibmvfc_event *evt;
4322
4323 if (vhost->discovery_threads >= disc_threads)
4324 return;
4325
4326 vhost->discovery_threads++;
4327 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4328 ibmvfc_tgt_implicit_logout_done);
4329 if (!evt) {
4330 vhost->discovery_threads--;
4331 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4332 kref_put(&tgt->kref, ibmvfc_release_tgt);
4333 __ibmvfc_reset_host(vhost);
4334 return;
4335 }
4336
4337 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4338 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4339 vhost->discovery_threads--;
4340 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4341 kref_put(&tgt->kref, ibmvfc_release_tgt);
4342 } else
4343 tgt_dbg(tgt, "Sent Implicit Logout\n");
4344 }
4345
4346 /**
4347 * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4348 * @evt: ibmvfc event struct
4349 *
4350 **/
ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event * evt)4351 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4352 {
4353 struct ibmvfc_target *tgt = evt->tgt;
4354 struct ibmvfc_host *vhost = evt->vhost;
4355 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4356 u32 status = be16_to_cpu(mad->common.status);
4357
4358 vhost->discovery_threads--;
4359 ibmvfc_free_event(evt);
4360
4361 /*
4362 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4363 * driver in which case we need to free up all the targets. If we are
4364 * not unloading, we will still go through a hard reset to get out of
4365 * offline state, so there is no need to track the old targets in that
4366 * case.
4367 */
4368 if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4369 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4370 else
4371 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4372
4373 tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4374 kref_put(&tgt->kref, ibmvfc_release_tgt);
4375 wake_up(&vhost->work_wait_q);
4376 }
4377
4378 /**
4379 * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4380 * @tgt: ibmvfc target struct
4381 *
4382 **/
ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target * tgt)4383 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4384 {
4385 struct ibmvfc_host *vhost = tgt->vhost;
4386 struct ibmvfc_event *evt;
4387
4388 if (!vhost->logged_in) {
4389 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4390 return;
4391 }
4392
4393 if (vhost->discovery_threads >= disc_threads)
4394 return;
4395
4396 vhost->discovery_threads++;
4397 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4398 ibmvfc_tgt_implicit_logout_and_del_done);
4399
4400 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4401 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4402 vhost->discovery_threads--;
4403 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4404 kref_put(&tgt->kref, ibmvfc_release_tgt);
4405 } else
4406 tgt_dbg(tgt, "Sent Implicit Logout\n");
4407 }
4408
4409 /**
4410 * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4411 * @evt: ibmvfc event struct
4412 *
4413 **/
ibmvfc_tgt_move_login_done(struct ibmvfc_event * evt)4414 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4415 {
4416 struct ibmvfc_target *tgt = evt->tgt;
4417 struct ibmvfc_host *vhost = evt->vhost;
4418 struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4419 u32 status = be16_to_cpu(rsp->common.status);
4420 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4421
4422 vhost->discovery_threads--;
4423 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4424 switch (status) {
4425 case IBMVFC_MAD_SUCCESS:
4426 tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
4427 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4428 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4429 tgt->scsi_id = tgt->new_scsi_id;
4430 tgt->ids.port_id = tgt->scsi_id;
4431 memcpy(&tgt->service_parms, &rsp->service_parms,
4432 sizeof(tgt->service_parms));
4433 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4434 sizeof(tgt->service_parms_change));
4435 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4436 break;
4437 case IBMVFC_MAD_DRIVER_FAILED:
4438 break;
4439 case IBMVFC_MAD_CRQ_ERROR:
4440 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4441 break;
4442 case IBMVFC_MAD_FAILED:
4443 default:
4444 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4445
4446 tgt_log(tgt, level,
4447 "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4448 tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4449 status);
4450 break;
4451 }
4452
4453 kref_put(&tgt->kref, ibmvfc_release_tgt);
4454 ibmvfc_free_event(evt);
4455 wake_up(&vhost->work_wait_q);
4456 }
4457
4458
4459 /**
4460 * ibmvfc_tgt_move_login - Initiate a move login for specified target
4461 * @tgt: ibmvfc target struct
4462 *
4463 **/
ibmvfc_tgt_move_login(struct ibmvfc_target * tgt)4464 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4465 {
4466 struct ibmvfc_host *vhost = tgt->vhost;
4467 struct ibmvfc_move_login *move;
4468 struct ibmvfc_event *evt;
4469
4470 if (vhost->discovery_threads >= disc_threads)
4471 return;
4472
4473 kref_get(&tgt->kref);
4474 evt = ibmvfc_get_reserved_event(&vhost->crq);
4475 if (!evt) {
4476 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4477 kref_put(&tgt->kref, ibmvfc_release_tgt);
4478 __ibmvfc_reset_host(vhost);
4479 return;
4480 }
4481 vhost->discovery_threads++;
4482 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4483 ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4484 evt->tgt = tgt;
4485 move = &evt->iu.move_login;
4486 memset(move, 0, sizeof(*move));
4487 move->common.version = cpu_to_be32(1);
4488 move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4489 move->common.length = cpu_to_be16(sizeof(*move));
4490
4491 move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4492 move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
4493 move->wwpn = cpu_to_be64(tgt->wwpn);
4494 move->node_name = cpu_to_be64(tgt->ids.node_name);
4495
4496 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4497 vhost->discovery_threads--;
4498 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4499 kref_put(&tgt->kref, ibmvfc_release_tgt);
4500 } else
4501 tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
4502 }
4503
4504 /**
4505 * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4506 * @mad: ibmvfc passthru mad struct
4507 * @tgt: ibmvfc target struct
4508 *
4509 * Returns:
4510 * 1 if PLOGI needed / 0 if PLOGI not needed
4511 **/
ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad * mad,struct ibmvfc_target * tgt)4512 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4513 struct ibmvfc_target *tgt)
4514 {
4515 if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4516 return 1;
4517 if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4518 return 1;
4519 if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4520 return 1;
4521 return 0;
4522 }
4523
4524 /**
4525 * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4526 * @evt: ibmvfc event struct
4527 *
4528 **/
ibmvfc_tgt_adisc_done(struct ibmvfc_event * evt)4529 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4530 {
4531 struct ibmvfc_target *tgt = evt->tgt;
4532 struct ibmvfc_host *vhost = evt->vhost;
4533 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4534 u32 status = be16_to_cpu(mad->common.status);
4535 u8 fc_reason, fc_explain;
4536
4537 vhost->discovery_threads--;
4538 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4539 del_timer(&tgt->timer);
4540
4541 switch (status) {
4542 case IBMVFC_MAD_SUCCESS:
4543 tgt_dbg(tgt, "ADISC succeeded\n");
4544 if (ibmvfc_adisc_needs_plogi(mad, tgt))
4545 ibmvfc_del_tgt(tgt);
4546 break;
4547 case IBMVFC_MAD_DRIVER_FAILED:
4548 break;
4549 case IBMVFC_MAD_FAILED:
4550 default:
4551 ibmvfc_del_tgt(tgt);
4552 fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4553 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4554 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4555 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4556 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4557 ibmvfc_get_fc_type(fc_reason), fc_reason,
4558 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4559 break;
4560 }
4561
4562 kref_put(&tgt->kref, ibmvfc_release_tgt);
4563 ibmvfc_free_event(evt);
4564 wake_up(&vhost->work_wait_q);
4565 }
4566
4567 /**
4568 * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4569 * @evt: ibmvfc event struct
4570 *
4571 **/
ibmvfc_init_passthru(struct ibmvfc_event * evt)4572 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4573 {
4574 struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4575
4576 memset(mad, 0, sizeof(*mad));
4577 mad->common.version = cpu_to_be32(1);
4578 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4579 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4580 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4581 offsetof(struct ibmvfc_passthru_mad, iu));
4582 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4583 mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4584 mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4585 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4586 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4587 offsetof(struct ibmvfc_passthru_fc_iu, payload));
4588 mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4589 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4590 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4591 offsetof(struct ibmvfc_passthru_fc_iu, response));
4592 mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4593 }
4594
4595 /**
4596 * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4597 * @evt: ibmvfc event struct
4598 *
4599 * Just cleanup this event struct. Everything else is handled by
4600 * the ADISC completion handler. If the ADISC never actually comes
4601 * back, we still have the timer running on the ADISC event struct
4602 * which will fire and cause the CRQ to get reset.
4603 *
4604 **/
ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event * evt)4605 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4606 {
4607 struct ibmvfc_host *vhost = evt->vhost;
4608 struct ibmvfc_target *tgt = evt->tgt;
4609
4610 tgt_dbg(tgt, "ADISC cancel complete\n");
4611 vhost->abort_threads--;
4612 ibmvfc_free_event(evt);
4613 kref_put(&tgt->kref, ibmvfc_release_tgt);
4614 wake_up(&vhost->work_wait_q);
4615 }
4616
4617 /**
4618 * ibmvfc_adisc_timeout - Handle an ADISC timeout
4619 * @t: ibmvfc target struct
4620 *
4621 * If an ADISC times out, send a cancel. If the cancel times
4622 * out, reset the CRQ. When the ADISC comes back as cancelled,
4623 * log back into the target.
4624 **/
ibmvfc_adisc_timeout(struct timer_list * t)4625 static void ibmvfc_adisc_timeout(struct timer_list *t)
4626 {
4627 struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4628 struct ibmvfc_host *vhost = tgt->vhost;
4629 struct ibmvfc_event *evt;
4630 struct ibmvfc_tmf *tmf;
4631 unsigned long flags;
4632 int rc;
4633
4634 tgt_dbg(tgt, "ADISC timeout\n");
4635 spin_lock_irqsave(vhost->host->host_lock, flags);
4636 if (vhost->abort_threads >= disc_threads ||
4637 tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4638 vhost->state != IBMVFC_INITIALIZING ||
4639 vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4640 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4641 return;
4642 }
4643
4644 vhost->abort_threads++;
4645 kref_get(&tgt->kref);
4646 evt = ibmvfc_get_reserved_event(&vhost->crq);
4647 if (!evt) {
4648 tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
4649 vhost->abort_threads--;
4650 kref_put(&tgt->kref, ibmvfc_release_tgt);
4651 __ibmvfc_reset_host(vhost);
4652 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4653 return;
4654 }
4655 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4656
4657 evt->tgt = tgt;
4658 tmf = &evt->iu.tmf;
4659 memset(tmf, 0, sizeof(*tmf));
4660 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4661 tmf->common.version = cpu_to_be32(2);
4662 tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4663 } else {
4664 tmf->common.version = cpu_to_be32(1);
4665 }
4666 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4667 tmf->common.length = cpu_to_be16(sizeof(*tmf));
4668 tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4669 tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4670
4671 rc = ibmvfc_send_event(evt, vhost, default_timeout);
4672
4673 if (rc) {
4674 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4675 vhost->abort_threads--;
4676 kref_put(&tgt->kref, ibmvfc_release_tgt);
4677 __ibmvfc_reset_host(vhost);
4678 } else
4679 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4680 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4681 }
4682
4683 /**
4684 * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4685 * @tgt: ibmvfc target struct
4686 *
4687 * When sending an ADISC we end up with two timers running. The
4688 * first timer is the timer in the ibmvfc target struct. If this
4689 * fires, we send a cancel to the target. The second timer is the
4690 * timer on the ibmvfc event for the ADISC, which is longer. If that
4691 * fires, it means the ADISC timed out and our attempt to cancel it
4692 * also failed, so we need to reset the CRQ.
4693 **/
ibmvfc_tgt_adisc(struct ibmvfc_target * tgt)4694 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4695 {
4696 struct ibmvfc_passthru_mad *mad;
4697 struct ibmvfc_host *vhost = tgt->vhost;
4698 struct ibmvfc_event *evt;
4699
4700 if (vhost->discovery_threads >= disc_threads)
4701 return;
4702
4703 kref_get(&tgt->kref);
4704 evt = ibmvfc_get_reserved_event(&vhost->crq);
4705 if (!evt) {
4706 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4707 kref_put(&tgt->kref, ibmvfc_release_tgt);
4708 __ibmvfc_reset_host(vhost);
4709 return;
4710 }
4711 vhost->discovery_threads++;
4712 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4713 evt->tgt = tgt;
4714
4715 ibmvfc_init_passthru(evt);
4716 mad = &evt->iu.passthru;
4717 mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4718 mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4719 mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4720
4721 mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4722 memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4723 sizeof(vhost->login_buf->resp.port_name));
4724 memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4725 sizeof(vhost->login_buf->resp.node_name));
4726 mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4727
4728 if (timer_pending(&tgt->timer))
4729 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4730 else {
4731 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4732 add_timer(&tgt->timer);
4733 }
4734
4735 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4736 if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4737 vhost->discovery_threads--;
4738 del_timer(&tgt->timer);
4739 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4740 kref_put(&tgt->kref, ibmvfc_release_tgt);
4741 } else
4742 tgt_dbg(tgt, "Sent ADISC\n");
4743 }
4744
4745 /**
4746 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4747 * @evt: ibmvfc event struct
4748 *
4749 **/
ibmvfc_tgt_query_target_done(struct ibmvfc_event * evt)4750 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4751 {
4752 struct ibmvfc_target *tgt = evt->tgt;
4753 struct ibmvfc_host *vhost = evt->vhost;
4754 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4755 u32 status = be16_to_cpu(rsp->common.status);
4756 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4757
4758 vhost->discovery_threads--;
4759 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4760 switch (status) {
4761 case IBMVFC_MAD_SUCCESS:
4762 tgt_dbg(tgt, "Query Target succeeded\n");
4763 if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4764 ibmvfc_del_tgt(tgt);
4765 else
4766 ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4767 break;
4768 case IBMVFC_MAD_DRIVER_FAILED:
4769 break;
4770 case IBMVFC_MAD_CRQ_ERROR:
4771 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4772 break;
4773 case IBMVFC_MAD_FAILED:
4774 default:
4775 if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4776 be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4777 be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4778 ibmvfc_del_tgt(tgt);
4779 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4780 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4781 else
4782 ibmvfc_del_tgt(tgt);
4783
4784 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4785 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4786 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4787 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4788 ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4789 status);
4790 break;
4791 }
4792
4793 kref_put(&tgt->kref, ibmvfc_release_tgt);
4794 ibmvfc_free_event(evt);
4795 wake_up(&vhost->work_wait_q);
4796 }
4797
4798 /**
4799 * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4800 * @tgt: ibmvfc target struct
4801 *
4802 **/
ibmvfc_tgt_query_target(struct ibmvfc_target * tgt)4803 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4804 {
4805 struct ibmvfc_query_tgt *query_tgt;
4806 struct ibmvfc_host *vhost = tgt->vhost;
4807 struct ibmvfc_event *evt;
4808
4809 if (vhost->discovery_threads >= disc_threads)
4810 return;
4811
4812 kref_get(&tgt->kref);
4813 evt = ibmvfc_get_reserved_event(&vhost->crq);
4814 if (!evt) {
4815 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4816 kref_put(&tgt->kref, ibmvfc_release_tgt);
4817 __ibmvfc_reset_host(vhost);
4818 return;
4819 }
4820 vhost->discovery_threads++;
4821 evt->tgt = tgt;
4822 ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4823 query_tgt = &evt->iu.query_tgt;
4824 memset(query_tgt, 0, sizeof(*query_tgt));
4825 query_tgt->common.version = cpu_to_be32(1);
4826 query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4827 query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4828 query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4829
4830 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4831 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4832 vhost->discovery_threads--;
4833 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4834 kref_put(&tgt->kref, ibmvfc_release_tgt);
4835 } else
4836 tgt_dbg(tgt, "Sent Query Target\n");
4837 }
4838
4839 /**
4840 * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4841 * @vhost: ibmvfc host struct
4842 * @target: Holds SCSI ID to allocate target forand the WWPN
4843 *
4844 * Returns:
4845 * 0 on success / other on failure
4846 **/
ibmvfc_alloc_target(struct ibmvfc_host * vhost,struct ibmvfc_discover_targets_entry * target)4847 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4848 struct ibmvfc_discover_targets_entry *target)
4849 {
4850 struct ibmvfc_target *stgt = NULL;
4851 struct ibmvfc_target *wtgt = NULL;
4852 struct ibmvfc_target *tgt;
4853 unsigned long flags;
4854 u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4855 u64 wwpn = be64_to_cpu(target->wwpn);
4856
4857 /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4858 spin_lock_irqsave(vhost->host->host_lock, flags);
4859 list_for_each_entry(tgt, &vhost->targets, queue) {
4860 if (tgt->wwpn == wwpn) {
4861 wtgt = tgt;
4862 break;
4863 }
4864 }
4865
4866 list_for_each_entry(tgt, &vhost->targets, queue) {
4867 if (tgt->scsi_id == scsi_id) {
4868 stgt = tgt;
4869 break;
4870 }
4871 }
4872
4873 if (wtgt && !stgt) {
4874 /*
4875 * A WWPN target has moved and we still are tracking the old
4876 * SCSI ID. The only way we should be able to get here is if
4877 * we attempted to send an implicit logout for the old SCSI ID
4878 * and it failed for some reason, such as there being I/O
4879 * pending to the target. In this case, we will have already
4880 * deleted the rport from the FC transport so we do a move
4881 * login, which works even with I/O pending, however, if
4882 * there is still I/O pending, it will stay outstanding, so
4883 * we only do this if fast fail is disabled for the rport,
4884 * otherwise we let terminate_rport_io clean up the port
4885 * before we login at the new location.
4886 */
4887 if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4888 if (wtgt->move_login) {
4889 /*
4890 * Do a move login here. The old target is no longer
4891 * known to the transport layer We don't use the
4892 * normal ibmvfc_set_tgt_action to set this, as we
4893 * don't normally want to allow this state change.
4894 */
4895 wtgt->new_scsi_id = scsi_id;
4896 wtgt->action = IBMVFC_TGT_ACTION_INIT;
4897 wtgt->init_retries = 0;
4898 ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4899 }
4900 goto unlock_out;
4901 } else {
4902 tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4903 wtgt->action, wtgt->rport);
4904 }
4905 } else if (stgt) {
4906 if (tgt->need_login)
4907 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4908 goto unlock_out;
4909 }
4910 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4911
4912 tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4913 memset(tgt, 0, sizeof(*tgt));
4914 tgt->scsi_id = scsi_id;
4915 tgt->wwpn = wwpn;
4916 tgt->vhost = vhost;
4917 tgt->need_login = 1;
4918 timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4919 kref_init(&tgt->kref);
4920 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4921 spin_lock_irqsave(vhost->host->host_lock, flags);
4922 tgt->cancel_key = vhost->task_set++;
4923 list_add_tail(&tgt->queue, &vhost->targets);
4924
4925 unlock_out:
4926 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4927 return 0;
4928 }
4929
4930 /**
4931 * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4932 * @vhost: ibmvfc host struct
4933 *
4934 * Returns:
4935 * 0 on success / other on failure
4936 **/
ibmvfc_alloc_targets(struct ibmvfc_host * vhost)4937 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4938 {
4939 int i, rc;
4940
4941 for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4942 rc = ibmvfc_alloc_target(vhost, &vhost->scsi_scrqs.disc_buf[i]);
4943
4944 return rc;
4945 }
4946
4947 /**
4948 * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4949 * @evt: ibmvfc event struct
4950 *
4951 **/
ibmvfc_discover_targets_done(struct ibmvfc_event * evt)4952 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4953 {
4954 struct ibmvfc_host *vhost = evt->vhost;
4955 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4956 u32 mad_status = be16_to_cpu(rsp->common.status);
4957 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4958
4959 switch (mad_status) {
4960 case IBMVFC_MAD_SUCCESS:
4961 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4962 vhost->num_targets = be32_to_cpu(rsp->num_written);
4963 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4964 break;
4965 case IBMVFC_MAD_FAILED:
4966 level += ibmvfc_retry_host_init(vhost);
4967 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4968 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4969 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4970 break;
4971 case IBMVFC_MAD_DRIVER_FAILED:
4972 break;
4973 default:
4974 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4975 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4976 break;
4977 }
4978
4979 ibmvfc_free_event(evt);
4980 wake_up(&vhost->work_wait_q);
4981 }
4982
4983 /**
4984 * ibmvfc_discover_targets - Send Discover Targets MAD
4985 * @vhost: ibmvfc host struct
4986 *
4987 **/
ibmvfc_discover_targets(struct ibmvfc_host * vhost)4988 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4989 {
4990 struct ibmvfc_discover_targets *mad;
4991 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
4992 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4993
4994 if (!evt) {
4995 ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
4996 ibmvfc_hard_reset_host(vhost);
4997 return;
4998 }
4999
5000 ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
5001 mad = &evt->iu.discover_targets;
5002 memset(mad, 0, sizeof(*mad));
5003 mad->common.version = cpu_to_be32(1);
5004 mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
5005 mad->common.length = cpu_to_be16(sizeof(*mad));
5006 mad->bufflen = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
5007 mad->buffer.va = cpu_to_be64(vhost->scsi_scrqs.disc_buf_dma);
5008 mad->buffer.len = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
5009 mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
5010 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5011
5012 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5013 ibmvfc_dbg(vhost, "Sent discover targets\n");
5014 else
5015 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5016 }
5017
ibmvfc_channel_setup_done(struct ibmvfc_event * evt)5018 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
5019 {
5020 struct ibmvfc_host *vhost = evt->vhost;
5021 struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
5022 struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
5023 u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
5024 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5025 int flags, active_queues, i;
5026
5027 ibmvfc_free_event(evt);
5028
5029 switch (mad_status) {
5030 case IBMVFC_MAD_SUCCESS:
5031 ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
5032 flags = be32_to_cpu(setup->flags);
5033 vhost->do_enquiry = 0;
5034 active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
5035 scrqs->active_queues = active_queues;
5036
5037 if (flags & IBMVFC_CHANNELS_CANCELED) {
5038 ibmvfc_dbg(vhost, "Channels Canceled\n");
5039 vhost->using_channels = 0;
5040 } else {
5041 if (active_queues)
5042 vhost->using_channels = 1;
5043 for (i = 0; i < active_queues; i++)
5044 scrqs->scrqs[i].vios_cookie =
5045 be64_to_cpu(setup->channel_handles[i]);
5046
5047 ibmvfc_dbg(vhost, "Using %u channels\n",
5048 vhost->scsi_scrqs.active_queues);
5049 }
5050 break;
5051 case IBMVFC_MAD_FAILED:
5052 level += ibmvfc_retry_host_init(vhost);
5053 ibmvfc_log(vhost, level, "Channel Setup failed\n");
5054 fallthrough;
5055 case IBMVFC_MAD_DRIVER_FAILED:
5056 return;
5057 default:
5058 dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
5059 mad_status);
5060 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5061 return;
5062 }
5063
5064 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5065 wake_up(&vhost->work_wait_q);
5066 }
5067
ibmvfc_channel_setup(struct ibmvfc_host * vhost)5068 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
5069 {
5070 struct ibmvfc_channel_setup_mad *mad;
5071 struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
5072 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5073 struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
5074 unsigned int num_channels =
5075 min(scrqs->desired_queues, vhost->max_vios_scsi_channels);
5076 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5077 int i;
5078
5079 if (!evt) {
5080 ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
5081 ibmvfc_hard_reset_host(vhost);
5082 return;
5083 }
5084
5085 memset(setup_buf, 0, sizeof(*setup_buf));
5086 if (num_channels == 0)
5087 setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
5088 else {
5089 setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
5090 for (i = 0; i < num_channels; i++)
5091 setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
5092 }
5093
5094 ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
5095 mad = &evt->iu.channel_setup;
5096 memset(mad, 0, sizeof(*mad));
5097 mad->common.version = cpu_to_be32(1);
5098 mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
5099 mad->common.length = cpu_to_be16(sizeof(*mad));
5100 mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
5101 mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
5102
5103 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5104
5105 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5106 ibmvfc_dbg(vhost, "Sent channel setup\n");
5107 else
5108 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
5109 }
5110
ibmvfc_channel_enquiry_done(struct ibmvfc_event * evt)5111 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
5112 {
5113 struct ibmvfc_host *vhost = evt->vhost;
5114 struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
5115 u32 mad_status = be16_to_cpu(rsp->common.status);
5116 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5117
5118 switch (mad_status) {
5119 case IBMVFC_MAD_SUCCESS:
5120 ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
5121 vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
5122 ibmvfc_free_event(evt);
5123 break;
5124 case IBMVFC_MAD_FAILED:
5125 level += ibmvfc_retry_host_init(vhost);
5126 ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
5127 fallthrough;
5128 case IBMVFC_MAD_DRIVER_FAILED:
5129 ibmvfc_free_event(evt);
5130 return;
5131 default:
5132 dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
5133 mad_status);
5134 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5135 ibmvfc_free_event(evt);
5136 return;
5137 }
5138
5139 ibmvfc_channel_setup(vhost);
5140 }
5141
ibmvfc_channel_enquiry(struct ibmvfc_host * vhost)5142 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
5143 {
5144 struct ibmvfc_channel_enquiry *mad;
5145 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5146 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5147
5148 if (!evt) {
5149 ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
5150 ibmvfc_hard_reset_host(vhost);
5151 return;
5152 }
5153
5154 ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
5155 mad = &evt->iu.channel_enquiry;
5156 memset(mad, 0, sizeof(*mad));
5157 mad->common.version = cpu_to_be32(1);
5158 mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
5159 mad->common.length = cpu_to_be16(sizeof(*mad));
5160
5161 if (mig_channels_only)
5162 mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
5163 if (mig_no_less_channels)
5164 mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
5165
5166 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5167
5168 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5169 ibmvfc_dbg(vhost, "Send channel enquiry\n");
5170 else
5171 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5172 }
5173
5174 /**
5175 * ibmvfc_npiv_login_done - Completion handler for NPIV Login
5176 * @evt: ibmvfc event struct
5177 *
5178 **/
ibmvfc_npiv_login_done(struct ibmvfc_event * evt)5179 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5180 {
5181 struct ibmvfc_host *vhost = evt->vhost;
5182 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5183 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5184 unsigned int npiv_max_sectors;
5185 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5186
5187 switch (mad_status) {
5188 case IBMVFC_MAD_SUCCESS:
5189 ibmvfc_free_event(evt);
5190 break;
5191 case IBMVFC_MAD_FAILED:
5192 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5193 level += ibmvfc_retry_host_init(vhost);
5194 else
5195 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5196 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5197 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5198 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5199 ibmvfc_free_event(evt);
5200 return;
5201 case IBMVFC_MAD_CRQ_ERROR:
5202 ibmvfc_retry_host_init(vhost);
5203 fallthrough;
5204 case IBMVFC_MAD_DRIVER_FAILED:
5205 ibmvfc_free_event(evt);
5206 return;
5207 default:
5208 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5209 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5210 ibmvfc_free_event(evt);
5211 return;
5212 }
5213
5214 vhost->client_migrated = 0;
5215
5216 if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5217 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5218 rsp->flags);
5219 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5220 wake_up(&vhost->work_wait_q);
5221 return;
5222 }
5223
5224 if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5225 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5226 rsp->max_cmds);
5227 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5228 wake_up(&vhost->work_wait_q);
5229 return;
5230 }
5231
5232 vhost->logged_in = 1;
5233 npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5234 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5235 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5236 rsp->drc_name, npiv_max_sectors);
5237
5238 fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5239 fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5240 fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5241 fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5242 fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5243 fc_host_supported_classes(vhost->host) = 0;
5244 if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5245 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5246 if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5247 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5248 if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5249 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5250 fc_host_maxframe_size(vhost->host) =
5251 be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5252
5253 vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5254 vhost->host->max_sectors = npiv_max_sectors;
5255
5256 if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5257 ibmvfc_channel_enquiry(vhost);
5258 } else {
5259 vhost->do_enquiry = 0;
5260 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5261 wake_up(&vhost->work_wait_q);
5262 }
5263 }
5264
5265 /**
5266 * ibmvfc_npiv_login - Sends NPIV login
5267 * @vhost: ibmvfc host struct
5268 *
5269 **/
ibmvfc_npiv_login(struct ibmvfc_host * vhost)5270 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5271 {
5272 struct ibmvfc_npiv_login_mad *mad;
5273 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5274
5275 if (!evt) {
5276 ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
5277 ibmvfc_hard_reset_host(vhost);
5278 return;
5279 }
5280
5281 ibmvfc_gather_partition_info(vhost);
5282 ibmvfc_set_login_info(vhost);
5283 ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5284
5285 memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5286 mad = &evt->iu.npiv_login;
5287 memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5288 mad->common.version = cpu_to_be32(1);
5289 mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5290 mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5291 mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5292 mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5293
5294 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5295
5296 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5297 ibmvfc_dbg(vhost, "Sent NPIV login\n");
5298 else
5299 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5300 }
5301
5302 /**
5303 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5304 * @evt: ibmvfc event struct
5305 *
5306 **/
ibmvfc_npiv_logout_done(struct ibmvfc_event * evt)5307 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5308 {
5309 struct ibmvfc_host *vhost = evt->vhost;
5310 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5311
5312 ibmvfc_free_event(evt);
5313
5314 switch (mad_status) {
5315 case IBMVFC_MAD_SUCCESS:
5316 if (list_empty(&vhost->crq.sent) &&
5317 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5318 ibmvfc_init_host(vhost);
5319 return;
5320 }
5321 break;
5322 case IBMVFC_MAD_FAILED:
5323 case IBMVFC_MAD_NOT_SUPPORTED:
5324 case IBMVFC_MAD_CRQ_ERROR:
5325 case IBMVFC_MAD_DRIVER_FAILED:
5326 default:
5327 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5328 break;
5329 }
5330
5331 ibmvfc_hard_reset_host(vhost);
5332 }
5333
5334 /**
5335 * ibmvfc_npiv_logout - Issue an NPIV Logout
5336 * @vhost: ibmvfc host struct
5337 *
5338 **/
ibmvfc_npiv_logout(struct ibmvfc_host * vhost)5339 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5340 {
5341 struct ibmvfc_npiv_logout_mad *mad;
5342 struct ibmvfc_event *evt;
5343
5344 evt = ibmvfc_get_reserved_event(&vhost->crq);
5345 if (!evt) {
5346 ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
5347 ibmvfc_hard_reset_host(vhost);
5348 return;
5349 }
5350
5351 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5352
5353 mad = &evt->iu.npiv_logout;
5354 memset(mad, 0, sizeof(*mad));
5355 mad->common.version = cpu_to_be32(1);
5356 mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5357 mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5358
5359 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5360
5361 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5362 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5363 else
5364 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5365 }
5366
5367 /**
5368 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5369 * @vhost: ibmvfc host struct
5370 *
5371 * Returns:
5372 * 1 if work to do / 0 if not
5373 **/
ibmvfc_dev_init_to_do(struct ibmvfc_host * vhost)5374 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5375 {
5376 struct ibmvfc_target *tgt;
5377
5378 list_for_each_entry(tgt, &vhost->targets, queue) {
5379 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5380 tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5381 return 1;
5382 }
5383
5384 return 0;
5385 }
5386
5387 /**
5388 * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5389 * @vhost: ibmvfc host struct
5390 *
5391 * Returns:
5392 * 1 if work to do / 0 if not
5393 **/
ibmvfc_dev_logo_to_do(struct ibmvfc_host * vhost)5394 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5395 {
5396 struct ibmvfc_target *tgt;
5397
5398 list_for_each_entry(tgt, &vhost->targets, queue) {
5399 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5400 tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5401 return 1;
5402 }
5403 return 0;
5404 }
5405
5406 /**
5407 * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5408 * @vhost: ibmvfc host struct
5409 *
5410 * Returns:
5411 * 1 if work to do / 0 if not
5412 **/
__ibmvfc_work_to_do(struct ibmvfc_host * vhost)5413 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5414 {
5415 struct ibmvfc_target *tgt;
5416
5417 if (kthread_should_stop())
5418 return 1;
5419 switch (vhost->action) {
5420 case IBMVFC_HOST_ACTION_NONE:
5421 case IBMVFC_HOST_ACTION_INIT_WAIT:
5422 case IBMVFC_HOST_ACTION_LOGO_WAIT:
5423 return 0;
5424 case IBMVFC_HOST_ACTION_TGT_INIT:
5425 case IBMVFC_HOST_ACTION_QUERY_TGTS:
5426 if (vhost->discovery_threads == disc_threads)
5427 return 0;
5428 list_for_each_entry(tgt, &vhost->targets, queue)
5429 if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5430 return 1;
5431 list_for_each_entry(tgt, &vhost->targets, queue)
5432 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5433 return 0;
5434 return 1;
5435 case IBMVFC_HOST_ACTION_TGT_DEL:
5436 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5437 if (vhost->discovery_threads == disc_threads)
5438 return 0;
5439 list_for_each_entry(tgt, &vhost->targets, queue)
5440 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5441 return 1;
5442 list_for_each_entry(tgt, &vhost->targets, queue)
5443 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5444 return 0;
5445 return 1;
5446 case IBMVFC_HOST_ACTION_LOGO:
5447 case IBMVFC_HOST_ACTION_INIT:
5448 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5449 case IBMVFC_HOST_ACTION_QUERY:
5450 case IBMVFC_HOST_ACTION_RESET:
5451 case IBMVFC_HOST_ACTION_REENABLE:
5452 default:
5453 break;
5454 }
5455
5456 return 1;
5457 }
5458
5459 /**
5460 * ibmvfc_work_to_do - Is there task level work to do?
5461 * @vhost: ibmvfc host struct
5462 *
5463 * Returns:
5464 * 1 if work to do / 0 if not
5465 **/
ibmvfc_work_to_do(struct ibmvfc_host * vhost)5466 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5467 {
5468 unsigned long flags;
5469 int rc;
5470
5471 spin_lock_irqsave(vhost->host->host_lock, flags);
5472 rc = __ibmvfc_work_to_do(vhost);
5473 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5474 return rc;
5475 }
5476
5477 /**
5478 * ibmvfc_log_ae - Log async events if necessary
5479 * @vhost: ibmvfc host struct
5480 * @events: events to log
5481 *
5482 **/
ibmvfc_log_ae(struct ibmvfc_host * vhost,int events)5483 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5484 {
5485 if (events & IBMVFC_AE_RSCN)
5486 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5487 if ((events & IBMVFC_AE_LINKDOWN) &&
5488 vhost->state >= IBMVFC_HALTED)
5489 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5490 if ((events & IBMVFC_AE_LINKUP) &&
5491 vhost->state == IBMVFC_INITIALIZING)
5492 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5493 }
5494
5495 /**
5496 * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5497 * @tgt: ibmvfc target struct
5498 *
5499 **/
ibmvfc_tgt_add_rport(struct ibmvfc_target * tgt)5500 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5501 {
5502 struct ibmvfc_host *vhost = tgt->vhost;
5503 struct fc_rport *rport;
5504 unsigned long flags;
5505
5506 tgt_dbg(tgt, "Adding rport\n");
5507 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5508 spin_lock_irqsave(vhost->host->host_lock, flags);
5509
5510 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5511 tgt_dbg(tgt, "Deleting rport\n");
5512 list_del(&tgt->queue);
5513 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5514 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5515 fc_remote_port_delete(rport);
5516 del_timer_sync(&tgt->timer);
5517 kref_put(&tgt->kref, ibmvfc_release_tgt);
5518 return;
5519 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5520 tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5521 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5522 tgt->rport = NULL;
5523 tgt->init_retries = 0;
5524 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5525 fc_remote_port_delete(rport);
5526 return;
5527 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5528 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5529 return;
5530 }
5531
5532 if (rport) {
5533 tgt_dbg(tgt, "rport add succeeded\n");
5534 tgt->rport = rport;
5535 rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5536 rport->supported_classes = 0;
5537 tgt->target_id = rport->scsi_target_id;
5538 if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5539 rport->supported_classes |= FC_COS_CLASS1;
5540 if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5541 rport->supported_classes |= FC_COS_CLASS2;
5542 if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5543 rport->supported_classes |= FC_COS_CLASS3;
5544 } else
5545 tgt_dbg(tgt, "rport add failed\n");
5546 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5547 }
5548
5549 /**
5550 * ibmvfc_do_work - Do task level work
5551 * @vhost: ibmvfc host struct
5552 *
5553 **/
ibmvfc_do_work(struct ibmvfc_host * vhost)5554 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5555 {
5556 struct ibmvfc_target *tgt;
5557 unsigned long flags;
5558 struct fc_rport *rport;
5559 LIST_HEAD(purge);
5560 int rc;
5561
5562 ibmvfc_log_ae(vhost, vhost->events_to_log);
5563 spin_lock_irqsave(vhost->host->host_lock, flags);
5564 vhost->events_to_log = 0;
5565 switch (vhost->action) {
5566 case IBMVFC_HOST_ACTION_NONE:
5567 case IBMVFC_HOST_ACTION_LOGO_WAIT:
5568 case IBMVFC_HOST_ACTION_INIT_WAIT:
5569 break;
5570 case IBMVFC_HOST_ACTION_RESET:
5571 list_splice_init(&vhost->purge, &purge);
5572 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5573 ibmvfc_complete_purge(&purge);
5574 rc = ibmvfc_reset_crq(vhost);
5575
5576 spin_lock_irqsave(vhost->host->host_lock, flags);
5577 if (!rc || rc == H_CLOSED)
5578 vio_enable_interrupts(to_vio_dev(vhost->dev));
5579 if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5580 /*
5581 * The only action we could have changed to would have
5582 * been reenable, in which case, we skip the rest of
5583 * this path and wait until we've done the re-enable
5584 * before sending the crq init.
5585 */
5586 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5587
5588 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5589 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5590 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5591 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5592 }
5593 }
5594 break;
5595 case IBMVFC_HOST_ACTION_REENABLE:
5596 list_splice_init(&vhost->purge, &purge);
5597 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5598 ibmvfc_complete_purge(&purge);
5599 rc = ibmvfc_reenable_crq_queue(vhost);
5600
5601 spin_lock_irqsave(vhost->host->host_lock, flags);
5602 if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5603 /*
5604 * The only action we could have changed to would have
5605 * been reset, in which case, we skip the rest of this
5606 * path and wait until we've done the reset before
5607 * sending the crq init.
5608 */
5609 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5610 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5611 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5612 dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5613 }
5614 }
5615 break;
5616 case IBMVFC_HOST_ACTION_LOGO:
5617 vhost->job_step(vhost);
5618 break;
5619 case IBMVFC_HOST_ACTION_INIT:
5620 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5621 if (vhost->delay_init) {
5622 vhost->delay_init = 0;
5623 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5624 ssleep(15);
5625 return;
5626 } else
5627 vhost->job_step(vhost);
5628 break;
5629 case IBMVFC_HOST_ACTION_QUERY:
5630 list_for_each_entry(tgt, &vhost->targets, queue)
5631 ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5632 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5633 break;
5634 case IBMVFC_HOST_ACTION_QUERY_TGTS:
5635 list_for_each_entry(tgt, &vhost->targets, queue) {
5636 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5637 tgt->job_step(tgt);
5638 break;
5639 }
5640 }
5641
5642 if (!ibmvfc_dev_init_to_do(vhost))
5643 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5644 break;
5645 case IBMVFC_HOST_ACTION_TGT_DEL:
5646 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5647 list_for_each_entry(tgt, &vhost->targets, queue) {
5648 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5649 tgt->job_step(tgt);
5650 break;
5651 }
5652 }
5653
5654 if (ibmvfc_dev_logo_to_do(vhost)) {
5655 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5656 return;
5657 }
5658
5659 list_for_each_entry(tgt, &vhost->targets, queue) {
5660 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5661 tgt_dbg(tgt, "Deleting rport\n");
5662 rport = tgt->rport;
5663 tgt->rport = NULL;
5664 list_del(&tgt->queue);
5665 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5666 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5667 if (rport)
5668 fc_remote_port_delete(rport);
5669 del_timer_sync(&tgt->timer);
5670 kref_put(&tgt->kref, ibmvfc_release_tgt);
5671 return;
5672 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5673 tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5674 rport = tgt->rport;
5675 tgt->rport = NULL;
5676 tgt->init_retries = 0;
5677 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5678
5679 /*
5680 * If fast fail is enabled, we wait for it to fire and then clean up
5681 * the old port, since we expect the fast fail timer to clean up the
5682 * outstanding I/O faster than waiting for normal command timeouts.
5683 * However, if fast fail is disabled, any I/O outstanding to the
5684 * rport LUNs will stay outstanding indefinitely, since the EH handlers
5685 * won't get invoked for I/O's timing out. If this is a NPIV failover
5686 * scenario, the better alternative is to use the move login.
5687 */
5688 if (rport && rport->fast_io_fail_tmo == -1)
5689 tgt->move_login = 1;
5690 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5691 if (rport)
5692 fc_remote_port_delete(rport);
5693 return;
5694 }
5695 }
5696
5697 if (vhost->state == IBMVFC_INITIALIZING) {
5698 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5699 if (vhost->reinit) {
5700 vhost->reinit = 0;
5701 scsi_block_requests(vhost->host);
5702 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5703 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5704 } else {
5705 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5706 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5707 wake_up(&vhost->init_wait_q);
5708 schedule_work(&vhost->rport_add_work_q);
5709 vhost->init_retries = 0;
5710 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5711 scsi_unblock_requests(vhost->host);
5712 }
5713
5714 return;
5715 } else {
5716 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5717 vhost->job_step = ibmvfc_discover_targets;
5718 }
5719 } else {
5720 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5721 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5722 scsi_unblock_requests(vhost->host);
5723 wake_up(&vhost->init_wait_q);
5724 return;
5725 }
5726 break;
5727 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5728 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5729 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5730 ibmvfc_alloc_targets(vhost);
5731 spin_lock_irqsave(vhost->host->host_lock, flags);
5732 break;
5733 case IBMVFC_HOST_ACTION_TGT_INIT:
5734 list_for_each_entry(tgt, &vhost->targets, queue) {
5735 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5736 tgt->job_step(tgt);
5737 break;
5738 }
5739 }
5740
5741 if (!ibmvfc_dev_init_to_do(vhost))
5742 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5743 break;
5744 default:
5745 break;
5746 }
5747
5748 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5749 }
5750
5751 /**
5752 * ibmvfc_work - Do task level work
5753 * @data: ibmvfc host struct
5754 *
5755 * Returns:
5756 * zero
5757 **/
ibmvfc_work(void * data)5758 static int ibmvfc_work(void *data)
5759 {
5760 struct ibmvfc_host *vhost = data;
5761 int rc;
5762
5763 set_user_nice(current, MIN_NICE);
5764
5765 while (1) {
5766 rc = wait_event_interruptible(vhost->work_wait_q,
5767 ibmvfc_work_to_do(vhost));
5768
5769 BUG_ON(rc);
5770
5771 if (kthread_should_stop())
5772 break;
5773
5774 ibmvfc_do_work(vhost);
5775 }
5776
5777 ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5778 return 0;
5779 }
5780
5781 /**
5782 * ibmvfc_alloc_queue - Allocate queue
5783 * @vhost: ibmvfc host struct
5784 * @queue: ibmvfc queue to allocate
5785 * @fmt: queue format to allocate
5786 *
5787 * Returns:
5788 * 0 on success / non-zero on failure
5789 **/
ibmvfc_alloc_queue(struct ibmvfc_host * vhost,struct ibmvfc_queue * queue,enum ibmvfc_msg_fmt fmt)5790 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5791 struct ibmvfc_queue *queue,
5792 enum ibmvfc_msg_fmt fmt)
5793 {
5794 struct device *dev = vhost->dev;
5795 size_t fmt_size;
5796
5797 ENTER;
5798 spin_lock_init(&queue->_lock);
5799 queue->q_lock = &queue->_lock;
5800
5801 switch (fmt) {
5802 case IBMVFC_CRQ_FMT:
5803 fmt_size = sizeof(*queue->msgs.crq);
5804 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
5805 queue->evt_depth = scsi_qdepth;
5806 queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
5807 break;
5808 case IBMVFC_ASYNC_FMT:
5809 fmt_size = sizeof(*queue->msgs.async);
5810 break;
5811 case IBMVFC_SUB_CRQ_FMT:
5812 fmt_size = sizeof(*queue->msgs.scrq);
5813 /* We need one extra event for Cancel Commands */
5814 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5815 queue->evt_depth = scsi_qdepth;
5816 queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5817 break;
5818 default:
5819 dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5820 return -EINVAL;
5821 }
5822
5823 queue->fmt = fmt;
5824 if (ibmvfc_init_event_pool(vhost, queue)) {
5825 dev_err(dev, "Couldn't initialize event pool.\n");
5826 return -ENOMEM;
5827 }
5828
5829 queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5830 if (!queue->msgs.handle)
5831 return -ENOMEM;
5832
5833 queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5834 DMA_BIDIRECTIONAL);
5835
5836 if (dma_mapping_error(dev, queue->msg_token)) {
5837 free_page((unsigned long)queue->msgs.handle);
5838 queue->msgs.handle = NULL;
5839 return -ENOMEM;
5840 }
5841
5842 queue->cur = 0;
5843 queue->size = PAGE_SIZE / fmt_size;
5844
5845 queue->vhost = vhost;
5846 return 0;
5847 }
5848
5849 /**
5850 * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5851 * @vhost: ibmvfc host struct
5852 *
5853 * Allocates a page for messages, maps it for dma, and registers
5854 * the crq with the hypervisor.
5855 *
5856 * Return value:
5857 * zero on success / other on failure
5858 **/
ibmvfc_init_crq(struct ibmvfc_host * vhost)5859 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5860 {
5861 int rc, retrc = -ENOMEM;
5862 struct device *dev = vhost->dev;
5863 struct vio_dev *vdev = to_vio_dev(dev);
5864 struct ibmvfc_queue *crq = &vhost->crq;
5865
5866 ENTER;
5867 if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5868 return -ENOMEM;
5869
5870 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5871 crq->msg_token, PAGE_SIZE);
5872
5873 if (rc == H_RESOURCE)
5874 /* maybe kexecing and resource is busy. try a reset */
5875 retrc = rc = ibmvfc_reset_crq(vhost);
5876
5877 if (rc == H_CLOSED)
5878 dev_warn(dev, "Partner adapter not ready\n");
5879 else if (rc) {
5880 dev_warn(dev, "Error %d opening adapter\n", rc);
5881 goto reg_crq_failed;
5882 }
5883
5884 retrc = 0;
5885
5886 tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5887
5888 if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5889 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5890 goto req_irq_failed;
5891 }
5892
5893 if ((rc = vio_enable_interrupts(vdev))) {
5894 dev_err(dev, "Error %d enabling interrupts\n", rc);
5895 goto req_irq_failed;
5896 }
5897
5898 LEAVE;
5899 return retrc;
5900
5901 req_irq_failed:
5902 tasklet_kill(&vhost->tasklet);
5903 do {
5904 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5905 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5906 reg_crq_failed:
5907 ibmvfc_free_queue(vhost, crq);
5908 return retrc;
5909 }
5910
ibmvfc_register_channel(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels,int index)5911 static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
5912 struct ibmvfc_channels *channels,
5913 int index)
5914 {
5915 struct device *dev = vhost->dev;
5916 struct vio_dev *vdev = to_vio_dev(dev);
5917 struct ibmvfc_queue *scrq = &channels->scrqs[index];
5918 int rc = -ENOMEM;
5919
5920 ENTER;
5921
5922 rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5923 &scrq->cookie, &scrq->hw_irq);
5924
5925 /* H_CLOSED indicates successful register, but no CRQ partner */
5926 if (rc && rc != H_CLOSED) {
5927 dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5928 if (rc == H_PARAMETER)
5929 dev_warn_once(dev, "Firmware may not support MQ\n");
5930 goto reg_failed;
5931 }
5932
5933 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5934
5935 if (!scrq->irq) {
5936 rc = -EINVAL;
5937 dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5938 goto irq_failed;
5939 }
5940
5941 switch (channels->protocol) {
5942 case IBMVFC_PROTO_SCSI:
5943 snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5944 vdev->unit_address, index);
5945 scrq->handler = ibmvfc_interrupt_mq;
5946 break;
5947 case IBMVFC_PROTO_NVME:
5948 snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-nvmf%d",
5949 vdev->unit_address, index);
5950 scrq->handler = ibmvfc_interrupt_mq;
5951 break;
5952 default:
5953 dev_err(dev, "Unknown channel protocol (%d)\n",
5954 channels->protocol);
5955 goto irq_failed;
5956 }
5957
5958 rc = request_irq(scrq->irq, scrq->handler, 0, scrq->name, scrq);
5959
5960 if (rc) {
5961 dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5962 irq_dispose_mapping(scrq->irq);
5963 goto irq_failed;
5964 }
5965
5966 scrq->hwq_id = index;
5967
5968 LEAVE;
5969 return 0;
5970
5971 irq_failed:
5972 do {
5973 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5974 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5975 reg_failed:
5976 LEAVE;
5977 return rc;
5978 }
5979
ibmvfc_deregister_channel(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels,int index)5980 static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
5981 struct ibmvfc_channels *channels,
5982 int index)
5983 {
5984 struct device *dev = vhost->dev;
5985 struct vio_dev *vdev = to_vio_dev(dev);
5986 struct ibmvfc_queue *scrq = &channels->scrqs[index];
5987 long rc;
5988
5989 ENTER;
5990
5991 free_irq(scrq->irq, scrq);
5992 irq_dispose_mapping(scrq->irq);
5993 scrq->irq = 0;
5994
5995 do {
5996 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5997 scrq->cookie);
5998 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5999
6000 if (rc)
6001 dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
6002
6003 /* Clean out the queue */
6004 memset(scrq->msgs.crq, 0, PAGE_SIZE);
6005 scrq->cur = 0;
6006
6007 LEAVE;
6008 }
6009
ibmvfc_reg_sub_crqs(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels)6010 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,
6011 struct ibmvfc_channels *channels)
6012 {
6013 int i, j;
6014
6015 ENTER;
6016 if (!vhost->mq_enabled || !channels->scrqs)
6017 return;
6018
6019 for (i = 0; i < channels->max_queues; i++) {
6020 if (ibmvfc_register_channel(vhost, channels, i)) {
6021 for (j = i; j > 0; j--)
6022 ibmvfc_deregister_channel(vhost, channels, j - 1);
6023 vhost->do_enquiry = 0;
6024 return;
6025 }
6026 }
6027
6028 LEAVE;
6029 }
6030
ibmvfc_dereg_sub_crqs(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels)6031 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,
6032 struct ibmvfc_channels *channels)
6033 {
6034 int i;
6035
6036 ENTER;
6037 if (!vhost->mq_enabled || !channels->scrqs)
6038 return;
6039
6040 for (i = 0; i < channels->max_queues; i++)
6041 ibmvfc_deregister_channel(vhost, channels, i);
6042
6043 LEAVE;
6044 }
6045
ibmvfc_alloc_channels(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels)6046 static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
6047 struct ibmvfc_channels *channels)
6048 {
6049 struct ibmvfc_queue *scrq;
6050 int i, j;
6051 int rc = 0;
6052
6053 channels->scrqs = kcalloc(channels->max_queues,
6054 sizeof(*channels->scrqs),
6055 GFP_KERNEL);
6056 if (!channels->scrqs)
6057 return -ENOMEM;
6058
6059 for (i = 0; i < channels->max_queues; i++) {
6060 scrq = &channels->scrqs[i];
6061 rc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT);
6062 if (rc) {
6063 for (j = i; j > 0; j--) {
6064 scrq = &channels->scrqs[j - 1];
6065 ibmvfc_free_queue(vhost, scrq);
6066 }
6067 kfree(channels->scrqs);
6068 channels->scrqs = NULL;
6069 channels->active_queues = 0;
6070 return rc;
6071 }
6072 }
6073
6074 return rc;
6075 }
6076
ibmvfc_init_sub_crqs(struct ibmvfc_host * vhost)6077 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
6078 {
6079 ENTER;
6080 if (!vhost->mq_enabled)
6081 return;
6082
6083 if (ibmvfc_alloc_channels(vhost, &vhost->scsi_scrqs)) {
6084 vhost->do_enquiry = 0;
6085 vhost->mq_enabled = 0;
6086 return;
6087 }
6088
6089 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
6090
6091 LEAVE;
6092 }
6093
ibmvfc_release_channels(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels)6094 static void ibmvfc_release_channels(struct ibmvfc_host *vhost,
6095 struct ibmvfc_channels *channels)
6096 {
6097 struct ibmvfc_queue *scrq;
6098 int i;
6099
6100 if (channels->scrqs) {
6101 for (i = 0; i < channels->max_queues; i++) {
6102 scrq = &channels->scrqs[i];
6103 ibmvfc_free_queue(vhost, scrq);
6104 }
6105
6106 kfree(channels->scrqs);
6107 channels->scrqs = NULL;
6108 channels->active_queues = 0;
6109 }
6110 }
6111
ibmvfc_release_sub_crqs(struct ibmvfc_host * vhost)6112 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
6113 {
6114 ENTER;
6115 if (!vhost->scsi_scrqs.scrqs)
6116 return;
6117
6118 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
6119
6120 ibmvfc_release_channels(vhost, &vhost->scsi_scrqs);
6121 LEAVE;
6122 }
6123
ibmvfc_free_disc_buf(struct device * dev,struct ibmvfc_channels * channels)6124 static void ibmvfc_free_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
6125 {
6126 dma_free_coherent(dev, channels->disc_buf_sz, channels->disc_buf,
6127 channels->disc_buf_dma);
6128 }
6129
6130 /**
6131 * ibmvfc_free_mem - Free memory for vhost
6132 * @vhost: ibmvfc host struct
6133 *
6134 * Return value:
6135 * none
6136 **/
ibmvfc_free_mem(struct ibmvfc_host * vhost)6137 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
6138 {
6139 struct ibmvfc_queue *async_q = &vhost->async_crq;
6140
6141 ENTER;
6142 mempool_destroy(vhost->tgt_pool);
6143 kfree(vhost->trace);
6144 ibmvfc_free_disc_buf(vhost->dev, &vhost->scsi_scrqs);
6145 dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
6146 vhost->login_buf, vhost->login_buf_dma);
6147 dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
6148 vhost->channel_setup_buf, vhost->channel_setup_dma);
6149 dma_pool_destroy(vhost->sg_pool);
6150 ibmvfc_free_queue(vhost, async_q);
6151 LEAVE;
6152 }
6153
ibmvfc_alloc_disc_buf(struct device * dev,struct ibmvfc_channels * channels)6154 static int ibmvfc_alloc_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
6155 {
6156 channels->disc_buf_sz = sizeof(*channels->disc_buf) * max_targets;
6157 channels->disc_buf = dma_alloc_coherent(dev, channels->disc_buf_sz,
6158 &channels->disc_buf_dma, GFP_KERNEL);
6159
6160 if (!channels->disc_buf) {
6161 dev_err(dev, "Couldn't allocate %s Discover Targets buffer\n",
6162 (channels->protocol == IBMVFC_PROTO_SCSI) ? "SCSI" : "NVMe");
6163 return -ENOMEM;
6164 }
6165
6166 return 0;
6167 }
6168
6169 /**
6170 * ibmvfc_alloc_mem - Allocate memory for vhost
6171 * @vhost: ibmvfc host struct
6172 *
6173 * Return value:
6174 * 0 on success / non-zero on failure
6175 **/
ibmvfc_alloc_mem(struct ibmvfc_host * vhost)6176 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
6177 {
6178 struct ibmvfc_queue *async_q = &vhost->async_crq;
6179 struct device *dev = vhost->dev;
6180
6181 ENTER;
6182 if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
6183 dev_err(dev, "Couldn't allocate/map async queue.\n");
6184 goto nomem;
6185 }
6186
6187 vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
6188 SG_ALL * sizeof(struct srp_direct_buf),
6189 sizeof(struct srp_direct_buf), 0);
6190
6191 if (!vhost->sg_pool) {
6192 dev_err(dev, "Failed to allocate sg pool\n");
6193 goto unmap_async_crq;
6194 }
6195
6196 vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
6197 &vhost->login_buf_dma, GFP_KERNEL);
6198
6199 if (!vhost->login_buf) {
6200 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
6201 goto free_sg_pool;
6202 }
6203
6204 if (ibmvfc_alloc_disc_buf(dev, &vhost->scsi_scrqs))
6205 goto free_login_buffer;
6206
6207 vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
6208 sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
6209 atomic_set(&vhost->trace_index, -1);
6210
6211 if (!vhost->trace)
6212 goto free_scsi_disc_buffer;
6213
6214 vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
6215 sizeof(struct ibmvfc_target));
6216
6217 if (!vhost->tgt_pool) {
6218 dev_err(dev, "Couldn't allocate target memory pool\n");
6219 goto free_trace;
6220 }
6221
6222 vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
6223 &vhost->channel_setup_dma,
6224 GFP_KERNEL);
6225
6226 if (!vhost->channel_setup_buf) {
6227 dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
6228 goto free_tgt_pool;
6229 }
6230
6231 LEAVE;
6232 return 0;
6233
6234 free_tgt_pool:
6235 mempool_destroy(vhost->tgt_pool);
6236 free_trace:
6237 kfree(vhost->trace);
6238 free_scsi_disc_buffer:
6239 ibmvfc_free_disc_buf(dev, &vhost->scsi_scrqs);
6240 free_login_buffer:
6241 dma_free_coherent(dev, sizeof(*vhost->login_buf),
6242 vhost->login_buf, vhost->login_buf_dma);
6243 free_sg_pool:
6244 dma_pool_destroy(vhost->sg_pool);
6245 unmap_async_crq:
6246 ibmvfc_free_queue(vhost, async_q);
6247 nomem:
6248 LEAVE;
6249 return -ENOMEM;
6250 }
6251
6252 /**
6253 * ibmvfc_rport_add_thread - Worker thread for rport adds
6254 * @work: work struct
6255 *
6256 **/
ibmvfc_rport_add_thread(struct work_struct * work)6257 static void ibmvfc_rport_add_thread(struct work_struct *work)
6258 {
6259 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
6260 rport_add_work_q);
6261 struct ibmvfc_target *tgt;
6262 struct fc_rport *rport;
6263 unsigned long flags;
6264 int did_work;
6265
6266 ENTER;
6267 spin_lock_irqsave(vhost->host->host_lock, flags);
6268 do {
6269 did_work = 0;
6270 if (vhost->state != IBMVFC_ACTIVE)
6271 break;
6272
6273 list_for_each_entry(tgt, &vhost->targets, queue) {
6274 if (tgt->add_rport) {
6275 did_work = 1;
6276 tgt->add_rport = 0;
6277 kref_get(&tgt->kref);
6278 rport = tgt->rport;
6279 if (!rport) {
6280 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6281 ibmvfc_tgt_add_rport(tgt);
6282 } else if (get_device(&rport->dev)) {
6283 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6284 tgt_dbg(tgt, "Setting rport roles\n");
6285 fc_remote_port_rolechg(rport, tgt->ids.roles);
6286 put_device(&rport->dev);
6287 } else {
6288 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6289 }
6290
6291 kref_put(&tgt->kref, ibmvfc_release_tgt);
6292 spin_lock_irqsave(vhost->host->host_lock, flags);
6293 break;
6294 }
6295 }
6296 } while(did_work);
6297
6298 if (vhost->state == IBMVFC_ACTIVE)
6299 vhost->scan_complete = 1;
6300 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6301 LEAVE;
6302 }
6303
6304 /**
6305 * ibmvfc_probe - Adapter hot plug add entry point
6306 * @vdev: vio device struct
6307 * @id: vio device id struct
6308 *
6309 * Return value:
6310 * 0 on success / non-zero on failure
6311 **/
ibmvfc_probe(struct vio_dev * vdev,const struct vio_device_id * id)6312 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6313 {
6314 struct ibmvfc_host *vhost;
6315 struct Scsi_Host *shost;
6316 struct device *dev = &vdev->dev;
6317 int rc = -ENOMEM;
6318 unsigned int online_cpus = num_online_cpus();
6319 unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus);
6320
6321 ENTER;
6322 shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6323 if (!shost) {
6324 dev_err(dev, "Couldn't allocate host data\n");
6325 goto out;
6326 }
6327
6328 shost->transportt = ibmvfc_transport_template;
6329 shost->can_queue = scsi_qdepth;
6330 shost->max_lun = max_lun;
6331 shost->max_id = max_targets;
6332 shost->max_sectors = IBMVFC_MAX_SECTORS;
6333 shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6334 shost->unique_id = shost->host_no;
6335 shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6336
6337 vhost = shost_priv(shost);
6338 INIT_LIST_HEAD(&vhost->targets);
6339 INIT_LIST_HEAD(&vhost->purge);
6340 sprintf(vhost->name, IBMVFC_NAME);
6341 vhost->host = shost;
6342 vhost->dev = dev;
6343 vhost->partition_number = -1;
6344 vhost->log_level = log_level;
6345 vhost->task_set = 1;
6346
6347 vhost->mq_enabled = mq_enabled;
6348 vhost->scsi_scrqs.desired_queues = min(shost->nr_hw_queues, nr_scsi_channels);
6349 vhost->scsi_scrqs.max_queues = shost->nr_hw_queues;
6350 vhost->scsi_scrqs.protocol = IBMVFC_PROTO_SCSI;
6351 vhost->using_channels = 0;
6352 vhost->do_enquiry = 1;
6353 vhost->scan_timeout = 0;
6354
6355 strcpy(vhost->partition_name, "UNKNOWN");
6356 init_waitqueue_head(&vhost->work_wait_q);
6357 init_waitqueue_head(&vhost->init_wait_q);
6358 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6359 mutex_init(&vhost->passthru_mutex);
6360
6361 if ((rc = ibmvfc_alloc_mem(vhost)))
6362 goto free_scsi_host;
6363
6364 vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6365 shost->host_no);
6366
6367 if (IS_ERR(vhost->work_thread)) {
6368 dev_err(dev, "Couldn't create kernel thread: %ld\n",
6369 PTR_ERR(vhost->work_thread));
6370 rc = PTR_ERR(vhost->work_thread);
6371 goto free_host_mem;
6372 }
6373
6374 if ((rc = ibmvfc_init_crq(vhost))) {
6375 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6376 goto kill_kthread;
6377 }
6378
6379 if ((rc = scsi_add_host(shost, dev)))
6380 goto release_crq;
6381
6382 fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6383
6384 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6385 &ibmvfc_trace_attr))) {
6386 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6387 goto remove_shost;
6388 }
6389
6390 ibmvfc_init_sub_crqs(vhost);
6391
6392 dev_set_drvdata(dev, vhost);
6393 spin_lock(&ibmvfc_driver_lock);
6394 list_add_tail(&vhost->queue, &ibmvfc_head);
6395 spin_unlock(&ibmvfc_driver_lock);
6396
6397 ibmvfc_send_crq_init(vhost);
6398 scsi_scan_host(shost);
6399 return 0;
6400
6401 remove_shost:
6402 scsi_remove_host(shost);
6403 release_crq:
6404 ibmvfc_release_crq_queue(vhost);
6405 kill_kthread:
6406 kthread_stop(vhost->work_thread);
6407 free_host_mem:
6408 ibmvfc_free_mem(vhost);
6409 free_scsi_host:
6410 scsi_host_put(shost);
6411 out:
6412 LEAVE;
6413 return rc;
6414 }
6415
6416 /**
6417 * ibmvfc_remove - Adapter hot plug remove entry point
6418 * @vdev: vio device struct
6419 *
6420 * Return value:
6421 * 0
6422 **/
ibmvfc_remove(struct vio_dev * vdev)6423 static void ibmvfc_remove(struct vio_dev *vdev)
6424 {
6425 struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6426 LIST_HEAD(purge);
6427 unsigned long flags;
6428
6429 ENTER;
6430 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6431
6432 spin_lock_irqsave(vhost->host->host_lock, flags);
6433 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6434 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6435
6436 ibmvfc_wait_while_resetting(vhost);
6437 kthread_stop(vhost->work_thread);
6438 fc_remove_host(vhost->host);
6439 scsi_remove_host(vhost->host);
6440
6441 spin_lock_irqsave(vhost->host->host_lock, flags);
6442 ibmvfc_purge_requests(vhost, DID_ERROR);
6443 list_splice_init(&vhost->purge, &purge);
6444 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6445 ibmvfc_complete_purge(&purge);
6446 ibmvfc_release_sub_crqs(vhost);
6447 ibmvfc_release_crq_queue(vhost);
6448
6449 ibmvfc_free_mem(vhost);
6450 spin_lock(&ibmvfc_driver_lock);
6451 list_del(&vhost->queue);
6452 spin_unlock(&ibmvfc_driver_lock);
6453 scsi_host_put(vhost->host);
6454 LEAVE;
6455 }
6456
6457 /**
6458 * ibmvfc_resume - Resume from suspend
6459 * @dev: device struct
6460 *
6461 * We may have lost an interrupt across suspend/resume, so kick the
6462 * interrupt handler
6463 *
6464 */
ibmvfc_resume(struct device * dev)6465 static int ibmvfc_resume(struct device *dev)
6466 {
6467 unsigned long flags;
6468 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6469 struct vio_dev *vdev = to_vio_dev(dev);
6470
6471 spin_lock_irqsave(vhost->host->host_lock, flags);
6472 vio_disable_interrupts(vdev);
6473 tasklet_schedule(&vhost->tasklet);
6474 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6475 return 0;
6476 }
6477
6478 /**
6479 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6480 * @vdev: vio device struct
6481 *
6482 * Return value:
6483 * Number of bytes the driver will need to DMA map at the same time in
6484 * order to perform well.
6485 */
ibmvfc_get_desired_dma(struct vio_dev * vdev)6486 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6487 {
6488 unsigned long pool_dma;
6489
6490 pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
6491 return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6492 }
6493
6494 static const struct vio_device_id ibmvfc_device_table[] = {
6495 {"fcp", "IBM,vfc-client"},
6496 { "", "" }
6497 };
6498 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6499
6500 static const struct dev_pm_ops ibmvfc_pm_ops = {
6501 .resume = ibmvfc_resume
6502 };
6503
6504 static struct vio_driver ibmvfc_driver = {
6505 .id_table = ibmvfc_device_table,
6506 .probe = ibmvfc_probe,
6507 .remove = ibmvfc_remove,
6508 .get_desired_dma = ibmvfc_get_desired_dma,
6509 .name = IBMVFC_NAME,
6510 .pm = &ibmvfc_pm_ops,
6511 };
6512
6513 static struct fc_function_template ibmvfc_transport_functions = {
6514 .show_host_fabric_name = 1,
6515 .show_host_node_name = 1,
6516 .show_host_port_name = 1,
6517 .show_host_supported_classes = 1,
6518 .show_host_port_type = 1,
6519 .show_host_port_id = 1,
6520 .show_host_maxframe_size = 1,
6521
6522 .get_host_port_state = ibmvfc_get_host_port_state,
6523 .show_host_port_state = 1,
6524
6525 .get_host_speed = ibmvfc_get_host_speed,
6526 .show_host_speed = 1,
6527
6528 .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6529 .terminate_rport_io = ibmvfc_terminate_rport_io,
6530
6531 .show_rport_maxframe_size = 1,
6532 .show_rport_supported_classes = 1,
6533
6534 .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6535 .show_rport_dev_loss_tmo = 1,
6536
6537 .get_starget_node_name = ibmvfc_get_starget_node_name,
6538 .show_starget_node_name = 1,
6539
6540 .get_starget_port_name = ibmvfc_get_starget_port_name,
6541 .show_starget_port_name = 1,
6542
6543 .get_starget_port_id = ibmvfc_get_starget_port_id,
6544 .show_starget_port_id = 1,
6545
6546 .max_bsg_segments = 1,
6547 .bsg_request = ibmvfc_bsg_request,
6548 .bsg_timeout = ibmvfc_bsg_timeout,
6549 };
6550
6551 /**
6552 * ibmvfc_module_init - Initialize the ibmvfc module
6553 *
6554 * Return value:
6555 * 0 on success / other on failure
6556 **/
ibmvfc_module_init(void)6557 static int __init ibmvfc_module_init(void)
6558 {
6559 int rc;
6560
6561 if (!firmware_has_feature(FW_FEATURE_VIO))
6562 return -ENODEV;
6563
6564 printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6565 IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6566
6567 ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6568 if (!ibmvfc_transport_template)
6569 return -ENOMEM;
6570
6571 rc = vio_register_driver(&ibmvfc_driver);
6572 if (rc)
6573 fc_release_transport(ibmvfc_transport_template);
6574 return rc;
6575 }
6576
6577 /**
6578 * ibmvfc_module_exit - Teardown the ibmvfc module
6579 *
6580 * Return value:
6581 * nothing
6582 **/
ibmvfc_module_exit(void)6583 static void __exit ibmvfc_module_exit(void)
6584 {
6585 vio_unregister_driver(&ibmvfc_driver);
6586 fc_release_transport(ibmvfc_transport_template);
6587 }
6588
6589 module_init(ibmvfc_module_init);
6590 module_exit(ibmvfc_module_exit);
6591