1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4 *
5 * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) IBM Corporation, 2008
8 */
9
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kthread.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/pm.h>
21 #include <linux/stringify.h>
22 #include <linux/bsg-lib.h>
23 #include <asm/firmware.h>
24 #include <asm/irq.h>
25 #include <asm/vio.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/scsi_bsg_fc.h>
33 #include "ibmvfc.h"
34
35 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
36 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
37 static u64 max_lun = IBMVFC_MAX_LUN;
38 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
39 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
40 static u16 max_sectors = IBMVFC_MAX_SECTORS;
41 static u16 scsi_qdepth = IBMVFC_SCSI_QDEPTH;
42 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
43 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
44 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
45 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
46 static unsigned int mq_enabled = IBMVFC_MQ;
47 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
48 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
49 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
50 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
51
52 static LIST_HEAD(ibmvfc_head);
53 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
54 static struct scsi_transport_template *ibmvfc_transport_template;
55
56 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
57 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
58 MODULE_LICENSE("GPL");
59 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
60
61 module_param_named(mq, mq_enabled, uint, S_IRUGO);
62 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
63 "[Default=" __stringify(IBMVFC_MQ) "]");
64 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
65 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
66 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
67 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
68 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
69 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
70 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
71 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
72 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
73 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
74 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
75 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
76
77 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
78 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
79 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
80 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
81 MODULE_PARM_DESC(default_timeout,
82 "Default timeout in seconds for initialization and EH commands. "
83 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
84 module_param_named(max_requests, max_requests, uint, S_IRUGO);
85 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
86 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
87 module_param_named(max_sectors, max_sectors, ushort, S_IRUGO);
88 MODULE_PARM_DESC(max_sectors, "Maximum sectors for this adapter. "
89 "[Default=" __stringify(IBMVFC_MAX_SECTORS) "]");
90 module_param_named(scsi_qdepth, scsi_qdepth, ushort, S_IRUGO);
91 MODULE_PARM_DESC(scsi_qdepth, "Maximum scsi command depth per adapter queue. "
92 "[Default=" __stringify(IBMVFC_SCSI_QDEPTH) "]");
93 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
94 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
95 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
96 module_param_named(max_targets, max_targets, uint, S_IRUGO);
97 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
98 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
99 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
100 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
101 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
102 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
103 MODULE_PARM_DESC(debug, "Enable driver debug information. "
104 "[Default=" __stringify(IBMVFC_DEBUG) "]");
105 module_param_named(log_level, log_level, uint, 0);
106 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
107 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
108 module_param_named(cls3_error, cls3_error, uint, 0);
109 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
110 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
111
112 static const struct {
113 u16 status;
114 u16 error;
115 u8 result;
116 u8 retry;
117 int log;
118 char *name;
119 } cmd_status [] = {
120 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
121 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
122 { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
123 { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
124 { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
125 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
126 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
127 { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
128 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
129 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
130 { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
131 { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
132 { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
133 { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
134
135 { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
136 { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
137 { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
138 { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
139 { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
140 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
141 { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
142 { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
143 { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
144 { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
145
146 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
147 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
148 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
149 { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
150 { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
151 { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
152 { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
153 { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
154 { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
155 { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
156 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
157
158 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
159 { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
160 };
161
162 static void ibmvfc_npiv_login(struct ibmvfc_host *);
163 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
164 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
165 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
166 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
167 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
168 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
169
170 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
171 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *, struct ibmvfc_channels *);
172
173 static const char *unknown_error = "unknown error";
174
h_reg_sub_crq(unsigned long unit_address,unsigned long ioba,unsigned long length,unsigned long * cookie,unsigned long * irq)175 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
176 unsigned long length, unsigned long *cookie,
177 unsigned long *irq)
178 {
179 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
180 long rc;
181
182 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
183 *cookie = retbuf[0];
184 *irq = retbuf[1];
185
186 return rc;
187 }
188
ibmvfc_check_caps(struct ibmvfc_host * vhost,unsigned long cap_flags)189 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
190 {
191 u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
192
193 return (host_caps & cap_flags) ? 1 : 0;
194 }
195
ibmvfc_get_fcp_iu(struct ibmvfc_host * vhost,struct ibmvfc_cmd * vfc_cmd)196 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
197 struct ibmvfc_cmd *vfc_cmd)
198 {
199 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
200 return &vfc_cmd->v2.iu;
201 else
202 return &vfc_cmd->v1.iu;
203 }
204
ibmvfc_get_fcp_rsp(struct ibmvfc_host * vhost,struct ibmvfc_cmd * vfc_cmd)205 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
206 struct ibmvfc_cmd *vfc_cmd)
207 {
208 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
209 return &vfc_cmd->v2.rsp;
210 else
211 return &vfc_cmd->v1.rsp;
212 }
213
214 #ifdef CONFIG_SCSI_IBMVFC_TRACE
215 /**
216 * ibmvfc_trc_start - Log a start trace entry
217 * @evt: ibmvfc event struct
218 *
219 **/
ibmvfc_trc_start(struct ibmvfc_event * evt)220 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
221 {
222 struct ibmvfc_host *vhost = evt->vhost;
223 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
224 struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
225 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
226 struct ibmvfc_trace_entry *entry;
227 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
228
229 entry = &vhost->trace[index];
230 entry->evt = evt;
231 entry->time = jiffies;
232 entry->fmt = evt->crq.format;
233 entry->type = IBMVFC_TRC_START;
234
235 switch (entry->fmt) {
236 case IBMVFC_CMD_FORMAT:
237 entry->op_code = iu->cdb[0];
238 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
239 entry->lun = scsilun_to_int(&iu->lun);
240 entry->tmf_flags = iu->tmf_flags;
241 entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
242 break;
243 case IBMVFC_MAD_FORMAT:
244 entry->op_code = be32_to_cpu(mad->opcode);
245 break;
246 default:
247 break;
248 }
249 }
250
251 /**
252 * ibmvfc_trc_end - Log an end trace entry
253 * @evt: ibmvfc event struct
254 *
255 **/
ibmvfc_trc_end(struct ibmvfc_event * evt)256 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
257 {
258 struct ibmvfc_host *vhost = evt->vhost;
259 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
260 struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
261 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
262 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
263 struct ibmvfc_trace_entry *entry;
264 int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
265
266 entry = &vhost->trace[index];
267 entry->evt = evt;
268 entry->time = jiffies;
269 entry->fmt = evt->crq.format;
270 entry->type = IBMVFC_TRC_END;
271
272 switch (entry->fmt) {
273 case IBMVFC_CMD_FORMAT:
274 entry->op_code = iu->cdb[0];
275 entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
276 entry->lun = scsilun_to_int(&iu->lun);
277 entry->tmf_flags = iu->tmf_flags;
278 entry->u.end.status = be16_to_cpu(vfc_cmd->status);
279 entry->u.end.error = be16_to_cpu(vfc_cmd->error);
280 entry->u.end.fcp_rsp_flags = rsp->flags;
281 entry->u.end.rsp_code = rsp->data.info.rsp_code;
282 entry->u.end.scsi_status = rsp->scsi_status;
283 break;
284 case IBMVFC_MAD_FORMAT:
285 entry->op_code = be32_to_cpu(mad->opcode);
286 entry->u.end.status = be16_to_cpu(mad->status);
287 break;
288 default:
289 break;
290
291 }
292 }
293
294 #else
295 #define ibmvfc_trc_start(evt) do { } while (0)
296 #define ibmvfc_trc_end(evt) do { } while (0)
297 #endif
298
299 /**
300 * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
301 * @status: status / error class
302 * @error: error
303 *
304 * Return value:
305 * index into cmd_status / -EINVAL on failure
306 **/
ibmvfc_get_err_index(u16 status,u16 error)307 static int ibmvfc_get_err_index(u16 status, u16 error)
308 {
309 int i;
310
311 for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
312 if ((cmd_status[i].status & status) == cmd_status[i].status &&
313 cmd_status[i].error == error)
314 return i;
315
316 return -EINVAL;
317 }
318
319 /**
320 * ibmvfc_get_cmd_error - Find the error description for the fcp response
321 * @status: status / error class
322 * @error: error
323 *
324 * Return value:
325 * error description string
326 **/
ibmvfc_get_cmd_error(u16 status,u16 error)327 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
328 {
329 int rc = ibmvfc_get_err_index(status, error);
330 if (rc >= 0)
331 return cmd_status[rc].name;
332 return unknown_error;
333 }
334
335 /**
336 * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
337 * @vhost: ibmvfc host struct
338 * @vfc_cmd: ibmvfc command struct
339 *
340 * Return value:
341 * SCSI result value to return for completed command
342 **/
ibmvfc_get_err_result(struct ibmvfc_host * vhost,struct ibmvfc_cmd * vfc_cmd)343 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
344 {
345 int err;
346 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
347 int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
348
349 if ((rsp->flags & FCP_RSP_LEN_VALID) &&
350 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
351 rsp->data.info.rsp_code))
352 return DID_ERROR << 16;
353
354 err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
355 if (err >= 0)
356 return rsp->scsi_status | (cmd_status[err].result << 16);
357 return rsp->scsi_status | (DID_ERROR << 16);
358 }
359
360 /**
361 * ibmvfc_retry_cmd - Determine if error status is retryable
362 * @status: status / error class
363 * @error: error
364 *
365 * Return value:
366 * 1 if error should be retried / 0 if it should not
367 **/
ibmvfc_retry_cmd(u16 status,u16 error)368 static int ibmvfc_retry_cmd(u16 status, u16 error)
369 {
370 int rc = ibmvfc_get_err_index(status, error);
371
372 if (rc >= 0)
373 return cmd_status[rc].retry;
374 return 1;
375 }
376
377 static const char *unknown_fc_explain = "unknown fc explain";
378
379 static const struct {
380 u16 fc_explain;
381 char *name;
382 } ls_explain [] = {
383 { 0x00, "no additional explanation" },
384 { 0x01, "service parameter error - options" },
385 { 0x03, "service parameter error - initiator control" },
386 { 0x05, "service parameter error - recipient control" },
387 { 0x07, "service parameter error - received data field size" },
388 { 0x09, "service parameter error - concurrent seq" },
389 { 0x0B, "service parameter error - credit" },
390 { 0x0D, "invalid N_Port/F_Port_Name" },
391 { 0x0E, "invalid node/Fabric Name" },
392 { 0x0F, "invalid common service parameters" },
393 { 0x11, "invalid association header" },
394 { 0x13, "association header required" },
395 { 0x15, "invalid originator S_ID" },
396 { 0x17, "invalid OX_ID-RX-ID combination" },
397 { 0x19, "command (request) already in progress" },
398 { 0x1E, "N_Port Login requested" },
399 { 0x1F, "Invalid N_Port_ID" },
400 };
401
402 static const struct {
403 u16 fc_explain;
404 char *name;
405 } gs_explain [] = {
406 { 0x00, "no additional explanation" },
407 { 0x01, "port identifier not registered" },
408 { 0x02, "port name not registered" },
409 { 0x03, "node name not registered" },
410 { 0x04, "class of service not registered" },
411 { 0x06, "initial process associator not registered" },
412 { 0x07, "FC-4 TYPEs not registered" },
413 { 0x08, "symbolic port name not registered" },
414 { 0x09, "symbolic node name not registered" },
415 { 0x0A, "port type not registered" },
416 { 0xF0, "authorization exception" },
417 { 0xF1, "authentication exception" },
418 { 0xF2, "data base full" },
419 { 0xF3, "data base empty" },
420 { 0xF4, "processing request" },
421 { 0xF5, "unable to verify connection" },
422 { 0xF6, "devices not in a common zone" },
423 };
424
425 /**
426 * ibmvfc_get_ls_explain - Return the FC Explain description text
427 * @status: FC Explain status
428 *
429 * Returns:
430 * error string
431 **/
ibmvfc_get_ls_explain(u16 status)432 static const char *ibmvfc_get_ls_explain(u16 status)
433 {
434 int i;
435
436 for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
437 if (ls_explain[i].fc_explain == status)
438 return ls_explain[i].name;
439
440 return unknown_fc_explain;
441 }
442
443 /**
444 * ibmvfc_get_gs_explain - Return the FC Explain description text
445 * @status: FC Explain status
446 *
447 * Returns:
448 * error string
449 **/
ibmvfc_get_gs_explain(u16 status)450 static const char *ibmvfc_get_gs_explain(u16 status)
451 {
452 int i;
453
454 for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
455 if (gs_explain[i].fc_explain == status)
456 return gs_explain[i].name;
457
458 return unknown_fc_explain;
459 }
460
461 static const struct {
462 enum ibmvfc_fc_type fc_type;
463 char *name;
464 } fc_type [] = {
465 { IBMVFC_FABRIC_REJECT, "fabric reject" },
466 { IBMVFC_PORT_REJECT, "port reject" },
467 { IBMVFC_LS_REJECT, "ELS reject" },
468 { IBMVFC_FABRIC_BUSY, "fabric busy" },
469 { IBMVFC_PORT_BUSY, "port busy" },
470 { IBMVFC_BASIC_REJECT, "basic reject" },
471 };
472
473 static const char *unknown_fc_type = "unknown fc type";
474
475 /**
476 * ibmvfc_get_fc_type - Return the FC Type description text
477 * @status: FC Type error status
478 *
479 * Returns:
480 * error string
481 **/
ibmvfc_get_fc_type(u16 status)482 static const char *ibmvfc_get_fc_type(u16 status)
483 {
484 int i;
485
486 for (i = 0; i < ARRAY_SIZE(fc_type); i++)
487 if (fc_type[i].fc_type == status)
488 return fc_type[i].name;
489
490 return unknown_fc_type;
491 }
492
493 /**
494 * ibmvfc_set_tgt_action - Set the next init action for the target
495 * @tgt: ibmvfc target struct
496 * @action: action to perform
497 *
498 * Returns:
499 * 0 if action changed / non-zero if not changed
500 **/
ibmvfc_set_tgt_action(struct ibmvfc_target * tgt,enum ibmvfc_target_action action)501 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
502 enum ibmvfc_target_action action)
503 {
504 int rc = -EINVAL;
505
506 switch (tgt->action) {
507 case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
508 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
509 action == IBMVFC_TGT_ACTION_DEL_RPORT) {
510 tgt->action = action;
511 rc = 0;
512 }
513 break;
514 case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
515 if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
516 action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
517 tgt->action = action;
518 rc = 0;
519 }
520 break;
521 case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
522 if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
523 tgt->action = action;
524 rc = 0;
525 }
526 break;
527 case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
528 if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
529 tgt->action = action;
530 rc = 0;
531 }
532 break;
533 case IBMVFC_TGT_ACTION_DEL_RPORT:
534 if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
535 tgt->action = action;
536 rc = 0;
537 }
538 break;
539 case IBMVFC_TGT_ACTION_DELETED_RPORT:
540 break;
541 default:
542 tgt->action = action;
543 rc = 0;
544 break;
545 }
546
547 if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
548 tgt->add_rport = 0;
549
550 return rc;
551 }
552
553 /**
554 * ibmvfc_set_host_state - Set the state for the host
555 * @vhost: ibmvfc host struct
556 * @state: state to set host to
557 *
558 * Returns:
559 * 0 if state changed / non-zero if not changed
560 **/
ibmvfc_set_host_state(struct ibmvfc_host * vhost,enum ibmvfc_host_state state)561 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
562 enum ibmvfc_host_state state)
563 {
564 int rc = 0;
565
566 switch (vhost->state) {
567 case IBMVFC_HOST_OFFLINE:
568 rc = -EINVAL;
569 break;
570 default:
571 vhost->state = state;
572 break;
573 }
574
575 return rc;
576 }
577
578 /**
579 * ibmvfc_set_host_action - Set the next init action for the host
580 * @vhost: ibmvfc host struct
581 * @action: action to perform
582 *
583 **/
ibmvfc_set_host_action(struct ibmvfc_host * vhost,enum ibmvfc_host_action action)584 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
585 enum ibmvfc_host_action action)
586 {
587 switch (action) {
588 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
589 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
590 vhost->action = action;
591 break;
592 case IBMVFC_HOST_ACTION_LOGO_WAIT:
593 if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
594 vhost->action = action;
595 break;
596 case IBMVFC_HOST_ACTION_INIT_WAIT:
597 if (vhost->action == IBMVFC_HOST_ACTION_INIT)
598 vhost->action = action;
599 break;
600 case IBMVFC_HOST_ACTION_QUERY:
601 switch (vhost->action) {
602 case IBMVFC_HOST_ACTION_INIT_WAIT:
603 case IBMVFC_HOST_ACTION_NONE:
604 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
605 vhost->action = action;
606 break;
607 default:
608 break;
609 }
610 break;
611 case IBMVFC_HOST_ACTION_TGT_INIT:
612 if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
613 vhost->action = action;
614 break;
615 case IBMVFC_HOST_ACTION_REENABLE:
616 case IBMVFC_HOST_ACTION_RESET:
617 vhost->action = action;
618 break;
619 case IBMVFC_HOST_ACTION_INIT:
620 case IBMVFC_HOST_ACTION_TGT_DEL:
621 case IBMVFC_HOST_ACTION_LOGO:
622 case IBMVFC_HOST_ACTION_QUERY_TGTS:
623 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
624 case IBMVFC_HOST_ACTION_NONE:
625 default:
626 switch (vhost->action) {
627 case IBMVFC_HOST_ACTION_RESET:
628 case IBMVFC_HOST_ACTION_REENABLE:
629 break;
630 default:
631 vhost->action = action;
632 break;
633 }
634 break;
635 }
636 }
637
638 /**
639 * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
640 * @vhost: ibmvfc host struct
641 *
642 * Return value:
643 * nothing
644 **/
ibmvfc_reinit_host(struct ibmvfc_host * vhost)645 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
646 {
647 if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
648 vhost->state == IBMVFC_ACTIVE) {
649 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
650 scsi_block_requests(vhost->host);
651 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
652 }
653 } else
654 vhost->reinit = 1;
655
656 wake_up(&vhost->work_wait_q);
657 }
658
659 /**
660 * ibmvfc_del_tgt - Schedule cleanup and removal of the target
661 * @tgt: ibmvfc target struct
662 **/
ibmvfc_del_tgt(struct ibmvfc_target * tgt)663 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
664 {
665 if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
666 tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
667 tgt->init_retries = 0;
668 }
669 wake_up(&tgt->vhost->work_wait_q);
670 }
671
672 /**
673 * ibmvfc_link_down - Handle a link down event from the adapter
674 * @vhost: ibmvfc host struct
675 * @state: ibmvfc host state to enter
676 *
677 **/
ibmvfc_link_down(struct ibmvfc_host * vhost,enum ibmvfc_host_state state)678 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
679 enum ibmvfc_host_state state)
680 {
681 struct ibmvfc_target *tgt;
682
683 ENTER;
684 scsi_block_requests(vhost->host);
685 list_for_each_entry(tgt, &vhost->targets, queue)
686 ibmvfc_del_tgt(tgt);
687 ibmvfc_set_host_state(vhost, state);
688 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
689 vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
690 wake_up(&vhost->work_wait_q);
691 LEAVE;
692 }
693
694 /**
695 * ibmvfc_init_host - Start host initialization
696 * @vhost: ibmvfc host struct
697 *
698 * Return value:
699 * nothing
700 **/
ibmvfc_init_host(struct ibmvfc_host * vhost)701 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
702 {
703 struct ibmvfc_target *tgt;
704
705 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
706 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
707 dev_err(vhost->dev,
708 "Host initialization retries exceeded. Taking adapter offline\n");
709 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
710 return;
711 }
712 }
713
714 if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
715 memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
716 vhost->async_crq.cur = 0;
717
718 list_for_each_entry(tgt, &vhost->targets, queue) {
719 if (vhost->client_migrated)
720 tgt->need_login = 1;
721 else
722 ibmvfc_del_tgt(tgt);
723 }
724
725 scsi_block_requests(vhost->host);
726 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
727 vhost->job_step = ibmvfc_npiv_login;
728 wake_up(&vhost->work_wait_q);
729 }
730 }
731
732 /**
733 * ibmvfc_send_crq - Send a CRQ
734 * @vhost: ibmvfc host struct
735 * @word1: the first 64 bits of the data
736 * @word2: the second 64 bits of the data
737 *
738 * Return value:
739 * 0 on success / other on failure
740 **/
ibmvfc_send_crq(struct ibmvfc_host * vhost,u64 word1,u64 word2)741 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
742 {
743 struct vio_dev *vdev = to_vio_dev(vhost->dev);
744 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
745 }
746
ibmvfc_send_sub_crq(struct ibmvfc_host * vhost,u64 cookie,u64 word1,u64 word2,u64 word3,u64 word4)747 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
748 u64 word2, u64 word3, u64 word4)
749 {
750 struct vio_dev *vdev = to_vio_dev(vhost->dev);
751
752 return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
753 word1, word2, word3, word4);
754 }
755
756 /**
757 * ibmvfc_send_crq_init - Send a CRQ init message
758 * @vhost: ibmvfc host struct
759 *
760 * Return value:
761 * 0 on success / other on failure
762 **/
ibmvfc_send_crq_init(struct ibmvfc_host * vhost)763 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
764 {
765 ibmvfc_dbg(vhost, "Sending CRQ init\n");
766 return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
767 }
768
769 /**
770 * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
771 * @vhost: ibmvfc host struct
772 *
773 * Return value:
774 * 0 on success / other on failure
775 **/
ibmvfc_send_crq_init_complete(struct ibmvfc_host * vhost)776 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
777 {
778 ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
779 return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
780 }
781
782 /**
783 * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
784 * @vhost: ibmvfc host who owns the event pool
785 * @queue: ibmvfc queue struct
786 *
787 * Returns zero on success.
788 **/
ibmvfc_init_event_pool(struct ibmvfc_host * vhost,struct ibmvfc_queue * queue)789 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
790 struct ibmvfc_queue *queue)
791 {
792 int i;
793 struct ibmvfc_event_pool *pool = &queue->evt_pool;
794
795 ENTER;
796 if (!queue->total_depth)
797 return 0;
798
799 pool->size = queue->total_depth;
800 pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
801 if (!pool->events)
802 return -ENOMEM;
803
804 pool->iu_storage = dma_alloc_coherent(vhost->dev,
805 pool->size * sizeof(*pool->iu_storage),
806 &pool->iu_token, 0);
807
808 if (!pool->iu_storage) {
809 kfree(pool->events);
810 return -ENOMEM;
811 }
812
813 INIT_LIST_HEAD(&queue->sent);
814 INIT_LIST_HEAD(&queue->free);
815 queue->evt_free = queue->evt_depth;
816 queue->reserved_free = queue->reserved_depth;
817 spin_lock_init(&queue->l_lock);
818
819 for (i = 0; i < pool->size; ++i) {
820 struct ibmvfc_event *evt = &pool->events[i];
821
822 /*
823 * evt->active states
824 * 1 = in flight
825 * 0 = being completed
826 * -1 = free/freed
827 */
828 atomic_set(&evt->active, -1);
829 atomic_set(&evt->free, 1);
830 evt->crq.valid = 0x80;
831 evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
832 evt->xfer_iu = pool->iu_storage + i;
833 evt->vhost = vhost;
834 evt->queue = queue;
835 evt->ext_list = NULL;
836 list_add_tail(&evt->queue_list, &queue->free);
837 }
838
839 LEAVE;
840 return 0;
841 }
842
843 /**
844 * ibmvfc_free_event_pool - Frees memory of the event pool of a host
845 * @vhost: ibmvfc host who owns the event pool
846 * @queue: ibmvfc queue struct
847 *
848 **/
ibmvfc_free_event_pool(struct ibmvfc_host * vhost,struct ibmvfc_queue * queue)849 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
850 struct ibmvfc_queue *queue)
851 {
852 int i;
853 struct ibmvfc_event_pool *pool = &queue->evt_pool;
854
855 ENTER;
856 for (i = 0; i < pool->size; ++i) {
857 list_del(&pool->events[i].queue_list);
858 BUG_ON(atomic_read(&pool->events[i].free) != 1);
859 if (pool->events[i].ext_list)
860 dma_pool_free(vhost->sg_pool,
861 pool->events[i].ext_list,
862 pool->events[i].ext_list_token);
863 }
864
865 kfree(pool->events);
866 dma_free_coherent(vhost->dev,
867 pool->size * sizeof(*pool->iu_storage),
868 pool->iu_storage, pool->iu_token);
869 LEAVE;
870 }
871
872 /**
873 * ibmvfc_free_queue - Deallocate queue
874 * @vhost: ibmvfc host struct
875 * @queue: ibmvfc queue struct
876 *
877 * Unmaps dma and deallocates page for messages
878 **/
ibmvfc_free_queue(struct ibmvfc_host * vhost,struct ibmvfc_queue * queue)879 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
880 struct ibmvfc_queue *queue)
881 {
882 struct device *dev = vhost->dev;
883
884 dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
885 free_page((unsigned long)queue->msgs.handle);
886 queue->msgs.handle = NULL;
887
888 ibmvfc_free_event_pool(vhost, queue);
889 }
890
891 /**
892 * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
893 * @vhost: ibmvfc host struct
894 *
895 * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
896 * the crq with the hypervisor.
897 **/
ibmvfc_release_crq_queue(struct ibmvfc_host * vhost)898 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
899 {
900 long rc = 0;
901 struct vio_dev *vdev = to_vio_dev(vhost->dev);
902 struct ibmvfc_queue *crq = &vhost->crq;
903
904 ibmvfc_dbg(vhost, "Releasing CRQ\n");
905 free_irq(vdev->irq, vhost);
906 tasklet_kill(&vhost->tasklet);
907 do {
908 if (rc)
909 msleep(100);
910 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
911 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
912
913 vhost->state = IBMVFC_NO_CRQ;
914 vhost->logged_in = 0;
915
916 ibmvfc_free_queue(vhost, crq);
917 }
918
919 /**
920 * ibmvfc_reenable_crq_queue - reenables the CRQ
921 * @vhost: ibmvfc host struct
922 *
923 * Return value:
924 * 0 on success / other on failure
925 **/
ibmvfc_reenable_crq_queue(struct ibmvfc_host * vhost)926 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
927 {
928 int rc = 0;
929 struct vio_dev *vdev = to_vio_dev(vhost->dev);
930 unsigned long flags;
931
932 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
933
934 /* Re-enable the CRQ */
935 do {
936 if (rc)
937 msleep(100);
938 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
939 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
940
941 if (rc)
942 dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
943
944 spin_lock_irqsave(vhost->host->host_lock, flags);
945 spin_lock(vhost->crq.q_lock);
946 vhost->do_enquiry = 1;
947 vhost->using_channels = 0;
948 spin_unlock(vhost->crq.q_lock);
949 spin_unlock_irqrestore(vhost->host->host_lock, flags);
950
951 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
952
953 return rc;
954 }
955
956 /**
957 * ibmvfc_reset_crq - resets a crq after a failure
958 * @vhost: ibmvfc host struct
959 *
960 * Return value:
961 * 0 on success / other on failure
962 **/
ibmvfc_reset_crq(struct ibmvfc_host * vhost)963 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
964 {
965 int rc = 0;
966 unsigned long flags;
967 struct vio_dev *vdev = to_vio_dev(vhost->dev);
968 struct ibmvfc_queue *crq = &vhost->crq;
969
970 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
971
972 /* Close the CRQ */
973 do {
974 if (rc)
975 msleep(100);
976 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
977 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
978
979 spin_lock_irqsave(vhost->host->host_lock, flags);
980 spin_lock(vhost->crq.q_lock);
981 vhost->state = IBMVFC_NO_CRQ;
982 vhost->logged_in = 0;
983 vhost->do_enquiry = 1;
984 vhost->using_channels = 0;
985
986 /* Clean out the queue */
987 memset(crq->msgs.crq, 0, PAGE_SIZE);
988 crq->cur = 0;
989
990 /* And re-open it again */
991 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
992 crq->msg_token, PAGE_SIZE);
993
994 if (rc == H_CLOSED)
995 /* Adapter is good, but other end is not ready */
996 dev_warn(vhost->dev, "Partner adapter not ready\n");
997 else if (rc != 0)
998 dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
999
1000 spin_unlock(vhost->crq.q_lock);
1001 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1002
1003 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
1004
1005 return rc;
1006 }
1007
1008 /**
1009 * ibmvfc_valid_event - Determines if event is valid.
1010 * @pool: event_pool that contains the event
1011 * @evt: ibmvfc event to be checked for validity
1012 *
1013 * Return value:
1014 * 1 if event is valid / 0 if event is not valid
1015 **/
ibmvfc_valid_event(struct ibmvfc_event_pool * pool,struct ibmvfc_event * evt)1016 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
1017 struct ibmvfc_event *evt)
1018 {
1019 int index = evt - pool->events;
1020 if (index < 0 || index >= pool->size) /* outside of bounds */
1021 return 0;
1022 if (evt != pool->events + index) /* unaligned */
1023 return 0;
1024 return 1;
1025 }
1026
1027 /**
1028 * ibmvfc_free_event - Free the specified event
1029 * @evt: ibmvfc_event to be freed
1030 *
1031 **/
ibmvfc_free_event(struct ibmvfc_event * evt)1032 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1033 {
1034 struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1035 unsigned long flags;
1036
1037 BUG_ON(!ibmvfc_valid_event(pool, evt));
1038 BUG_ON(atomic_inc_return(&evt->free) != 1);
1039 BUG_ON(atomic_dec_and_test(&evt->active));
1040
1041 spin_lock_irqsave(&evt->queue->l_lock, flags);
1042 list_add_tail(&evt->queue_list, &evt->queue->free);
1043 if (evt->reserved) {
1044 evt->reserved = 0;
1045 evt->queue->reserved_free++;
1046 } else {
1047 evt->queue->evt_free++;
1048 }
1049 if (evt->eh_comp)
1050 complete(evt->eh_comp);
1051 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1052 }
1053
1054 /**
1055 * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1056 * @evt: ibmvfc event struct
1057 *
1058 * This function does not setup any error status, that must be done
1059 * before this function gets called.
1060 **/
ibmvfc_scsi_eh_done(struct ibmvfc_event * evt)1061 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1062 {
1063 struct scsi_cmnd *cmnd = evt->cmnd;
1064
1065 if (cmnd) {
1066 scsi_dma_unmap(cmnd);
1067 scsi_done(cmnd);
1068 }
1069
1070 ibmvfc_free_event(evt);
1071 }
1072
1073 /**
1074 * ibmvfc_complete_purge - Complete failed command list
1075 * @purge_list: list head of failed commands
1076 *
1077 * This function runs completions on commands to fail as a result of a
1078 * host reset or platform migration.
1079 **/
ibmvfc_complete_purge(struct list_head * purge_list)1080 static void ibmvfc_complete_purge(struct list_head *purge_list)
1081 {
1082 struct ibmvfc_event *evt, *pos;
1083
1084 list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1085 list_del(&evt->queue_list);
1086 ibmvfc_trc_end(evt);
1087 evt->done(evt);
1088 }
1089 }
1090
1091 /**
1092 * ibmvfc_fail_request - Fail request with specified error code
1093 * @evt: ibmvfc event struct
1094 * @error_code: error code to fail request with
1095 *
1096 * Return value:
1097 * none
1098 **/
ibmvfc_fail_request(struct ibmvfc_event * evt,int error_code)1099 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1100 {
1101 /*
1102 * Anything we are failing should still be active. Otherwise, it
1103 * implies we already got a response for the command and are doing
1104 * something bad like double completing it.
1105 */
1106 BUG_ON(!atomic_dec_and_test(&evt->active));
1107 if (evt->cmnd) {
1108 evt->cmnd->result = (error_code << 16);
1109 evt->done = ibmvfc_scsi_eh_done;
1110 } else
1111 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1112
1113 del_timer(&evt->timer);
1114 }
1115
1116 /**
1117 * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1118 * @vhost: ibmvfc host struct
1119 * @error_code: error code to fail requests with
1120 *
1121 * Return value:
1122 * none
1123 **/
ibmvfc_purge_requests(struct ibmvfc_host * vhost,int error_code)1124 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1125 {
1126 struct ibmvfc_event *evt, *pos;
1127 struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1128 unsigned long flags;
1129 int hwqs = 0;
1130 int i;
1131
1132 if (vhost->using_channels)
1133 hwqs = vhost->scsi_scrqs.active_queues;
1134
1135 ibmvfc_dbg(vhost, "Purging all requests\n");
1136 spin_lock_irqsave(&vhost->crq.l_lock, flags);
1137 list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1138 ibmvfc_fail_request(evt, error_code);
1139 list_splice_init(&vhost->crq.sent, &vhost->purge);
1140 spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1141
1142 for (i = 0; i < hwqs; i++) {
1143 spin_lock_irqsave(queues[i].q_lock, flags);
1144 spin_lock(&queues[i].l_lock);
1145 list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1146 ibmvfc_fail_request(evt, error_code);
1147 list_splice_init(&queues[i].sent, &vhost->purge);
1148 spin_unlock(&queues[i].l_lock);
1149 spin_unlock_irqrestore(queues[i].q_lock, flags);
1150 }
1151 }
1152
1153 /**
1154 * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1155 * @vhost: struct ibmvfc host to reset
1156 **/
ibmvfc_hard_reset_host(struct ibmvfc_host * vhost)1157 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1158 {
1159 ibmvfc_purge_requests(vhost, DID_ERROR);
1160 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1161 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1162 }
1163
1164 /**
1165 * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1166 * @vhost: struct ibmvfc host to reset
1167 **/
__ibmvfc_reset_host(struct ibmvfc_host * vhost)1168 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1169 {
1170 if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1171 !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1172 scsi_block_requests(vhost->host);
1173 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1174 vhost->job_step = ibmvfc_npiv_logout;
1175 wake_up(&vhost->work_wait_q);
1176 } else
1177 ibmvfc_hard_reset_host(vhost);
1178 }
1179
1180 /**
1181 * ibmvfc_reset_host - Reset the connection to the server
1182 * @vhost: ibmvfc host struct
1183 **/
ibmvfc_reset_host(struct ibmvfc_host * vhost)1184 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1185 {
1186 unsigned long flags;
1187
1188 spin_lock_irqsave(vhost->host->host_lock, flags);
1189 __ibmvfc_reset_host(vhost);
1190 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1191 }
1192
1193 /**
1194 * ibmvfc_retry_host_init - Retry host initialization if allowed
1195 * @vhost: ibmvfc host struct
1196 *
1197 * Returns: 1 if init will be retried / 0 if not
1198 *
1199 **/
ibmvfc_retry_host_init(struct ibmvfc_host * vhost)1200 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1201 {
1202 int retry = 0;
1203
1204 if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1205 vhost->delay_init = 1;
1206 if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1207 dev_err(vhost->dev,
1208 "Host initialization retries exceeded. Taking adapter offline\n");
1209 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1210 } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1211 __ibmvfc_reset_host(vhost);
1212 else {
1213 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1214 retry = 1;
1215 }
1216 }
1217
1218 wake_up(&vhost->work_wait_q);
1219 return retry;
1220 }
1221
1222 /**
1223 * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1224 * @starget: scsi target struct
1225 *
1226 * Return value:
1227 * ibmvfc_target struct / NULL if not found
1228 **/
__ibmvfc_get_target(struct scsi_target * starget)1229 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1230 {
1231 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1232 struct ibmvfc_host *vhost = shost_priv(shost);
1233 struct ibmvfc_target *tgt;
1234
1235 list_for_each_entry(tgt, &vhost->targets, queue)
1236 if (tgt->target_id == starget->id) {
1237 kref_get(&tgt->kref);
1238 return tgt;
1239 }
1240 return NULL;
1241 }
1242
1243 /**
1244 * ibmvfc_get_target - Find the specified scsi_target
1245 * @starget: scsi target struct
1246 *
1247 * Return value:
1248 * ibmvfc_target struct / NULL if not found
1249 **/
ibmvfc_get_target(struct scsi_target * starget)1250 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1251 {
1252 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1253 struct ibmvfc_target *tgt;
1254 unsigned long flags;
1255
1256 spin_lock_irqsave(shost->host_lock, flags);
1257 tgt = __ibmvfc_get_target(starget);
1258 spin_unlock_irqrestore(shost->host_lock, flags);
1259 return tgt;
1260 }
1261
1262 /**
1263 * ibmvfc_get_host_speed - Get host port speed
1264 * @shost: scsi host struct
1265 *
1266 * Return value:
1267 * none
1268 **/
ibmvfc_get_host_speed(struct Scsi_Host * shost)1269 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1270 {
1271 struct ibmvfc_host *vhost = shost_priv(shost);
1272 unsigned long flags;
1273
1274 spin_lock_irqsave(shost->host_lock, flags);
1275 if (vhost->state == IBMVFC_ACTIVE) {
1276 switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1277 case 1:
1278 fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1279 break;
1280 case 2:
1281 fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1282 break;
1283 case 4:
1284 fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1285 break;
1286 case 8:
1287 fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1288 break;
1289 case 10:
1290 fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1291 break;
1292 case 16:
1293 fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1294 break;
1295 default:
1296 ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1297 be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1298 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1299 break;
1300 }
1301 } else
1302 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1303 spin_unlock_irqrestore(shost->host_lock, flags);
1304 }
1305
1306 /**
1307 * ibmvfc_get_host_port_state - Get host port state
1308 * @shost: scsi host struct
1309 *
1310 * Return value:
1311 * none
1312 **/
ibmvfc_get_host_port_state(struct Scsi_Host * shost)1313 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1314 {
1315 struct ibmvfc_host *vhost = shost_priv(shost);
1316 unsigned long flags;
1317
1318 spin_lock_irqsave(shost->host_lock, flags);
1319 switch (vhost->state) {
1320 case IBMVFC_INITIALIZING:
1321 case IBMVFC_ACTIVE:
1322 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1323 break;
1324 case IBMVFC_LINK_DOWN:
1325 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1326 break;
1327 case IBMVFC_LINK_DEAD:
1328 case IBMVFC_HOST_OFFLINE:
1329 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1330 break;
1331 case IBMVFC_HALTED:
1332 fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1333 break;
1334 case IBMVFC_NO_CRQ:
1335 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1336 break;
1337 default:
1338 ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1339 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1340 break;
1341 }
1342 spin_unlock_irqrestore(shost->host_lock, flags);
1343 }
1344
1345 /**
1346 * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1347 * @rport: rport struct
1348 * @timeout: timeout value
1349 *
1350 * Return value:
1351 * none
1352 **/
ibmvfc_set_rport_dev_loss_tmo(struct fc_rport * rport,u32 timeout)1353 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1354 {
1355 if (timeout)
1356 rport->dev_loss_tmo = timeout;
1357 else
1358 rport->dev_loss_tmo = 1;
1359 }
1360
1361 /**
1362 * ibmvfc_release_tgt - Free memory allocated for a target
1363 * @kref: kref struct
1364 *
1365 **/
ibmvfc_release_tgt(struct kref * kref)1366 static void ibmvfc_release_tgt(struct kref *kref)
1367 {
1368 struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1369 kfree(tgt);
1370 }
1371
1372 /**
1373 * ibmvfc_get_starget_node_name - Get SCSI target's node name
1374 * @starget: scsi target struct
1375 *
1376 * Return value:
1377 * none
1378 **/
ibmvfc_get_starget_node_name(struct scsi_target * starget)1379 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1380 {
1381 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1382 fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1383 if (tgt)
1384 kref_put(&tgt->kref, ibmvfc_release_tgt);
1385 }
1386
1387 /**
1388 * ibmvfc_get_starget_port_name - Get SCSI target's port name
1389 * @starget: scsi target struct
1390 *
1391 * Return value:
1392 * none
1393 **/
ibmvfc_get_starget_port_name(struct scsi_target * starget)1394 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1395 {
1396 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1397 fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1398 if (tgt)
1399 kref_put(&tgt->kref, ibmvfc_release_tgt);
1400 }
1401
1402 /**
1403 * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1404 * @starget: scsi target struct
1405 *
1406 * Return value:
1407 * none
1408 **/
ibmvfc_get_starget_port_id(struct scsi_target * starget)1409 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1410 {
1411 struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1412 fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1413 if (tgt)
1414 kref_put(&tgt->kref, ibmvfc_release_tgt);
1415 }
1416
1417 /**
1418 * ibmvfc_wait_while_resetting - Wait while the host resets
1419 * @vhost: ibmvfc host struct
1420 *
1421 * Return value:
1422 * 0 on success / other on failure
1423 **/
ibmvfc_wait_while_resetting(struct ibmvfc_host * vhost)1424 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1425 {
1426 long timeout = wait_event_timeout(vhost->init_wait_q,
1427 ((vhost->state == IBMVFC_ACTIVE ||
1428 vhost->state == IBMVFC_HOST_OFFLINE ||
1429 vhost->state == IBMVFC_LINK_DEAD) &&
1430 vhost->action == IBMVFC_HOST_ACTION_NONE),
1431 (init_timeout * HZ));
1432
1433 return timeout ? 0 : -EIO;
1434 }
1435
1436 /**
1437 * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1438 * @shost: scsi host struct
1439 *
1440 * Return value:
1441 * 0 on success / other on failure
1442 **/
ibmvfc_issue_fc_host_lip(struct Scsi_Host * shost)1443 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1444 {
1445 struct ibmvfc_host *vhost = shost_priv(shost);
1446
1447 dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1448 ibmvfc_reset_host(vhost);
1449 return ibmvfc_wait_while_resetting(vhost);
1450 }
1451
1452 /**
1453 * ibmvfc_gather_partition_info - Gather info about the LPAR
1454 * @vhost: ibmvfc host struct
1455 *
1456 * Return value:
1457 * none
1458 **/
ibmvfc_gather_partition_info(struct ibmvfc_host * vhost)1459 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1460 {
1461 struct device_node *rootdn;
1462 const char *name;
1463 const unsigned int *num;
1464
1465 rootdn = of_find_node_by_path("/");
1466 if (!rootdn)
1467 return;
1468
1469 name = of_get_property(rootdn, "ibm,partition-name", NULL);
1470 if (name)
1471 strscpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1472 num = of_get_property(rootdn, "ibm,partition-no", NULL);
1473 if (num)
1474 vhost->partition_number = *num;
1475 of_node_put(rootdn);
1476 }
1477
1478 /**
1479 * ibmvfc_set_login_info - Setup info for NPIV login
1480 * @vhost: ibmvfc host struct
1481 *
1482 * Return value:
1483 * none
1484 **/
ibmvfc_set_login_info(struct ibmvfc_host * vhost)1485 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1486 {
1487 struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1488 struct ibmvfc_queue *async_crq = &vhost->async_crq;
1489 struct device_node *of_node = vhost->dev->of_node;
1490 const char *location;
1491 u16 max_cmds;
1492
1493 max_cmds = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
1494 if (mq_enabled)
1495 max_cmds += (scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ) *
1496 vhost->scsi_scrqs.desired_queues;
1497
1498 memset(login_info, 0, sizeof(*login_info));
1499
1500 login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1501 login_info->max_dma_len = cpu_to_be64(max_sectors << 9);
1502 login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1503 login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1504 login_info->partition_num = cpu_to_be32(vhost->partition_number);
1505 login_info->vfc_frame_version = cpu_to_be32(1);
1506 login_info->fcp_version = cpu_to_be16(3);
1507 login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1508 if (vhost->client_migrated)
1509 login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1510
1511 login_info->max_cmds = cpu_to_be32(max_cmds);
1512 login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1513
1514 if (vhost->mq_enabled || vhost->using_channels)
1515 login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1516
1517 login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1518 login_info->async.len = cpu_to_be32(async_crq->size *
1519 sizeof(*async_crq->msgs.async));
1520 strscpy(login_info->partition_name, vhost->partition_name,
1521 sizeof(login_info->partition_name));
1522
1523 strscpy(login_info->device_name,
1524 dev_name(&vhost->host->shost_gendev), sizeof(login_info->device_name));
1525
1526 location = of_get_property(of_node, "ibm,loc-code", NULL);
1527 location = location ? location : dev_name(vhost->dev);
1528 strscpy(login_info->drc_name, location, sizeof(login_info->drc_name));
1529 }
1530
1531 /**
1532 * __ibmvfc_get_event - Gets the next free event in pool
1533 * @queue: ibmvfc queue struct
1534 * @reserved: event is for a reserved management command
1535 *
1536 * Returns a free event from the pool.
1537 **/
__ibmvfc_get_event(struct ibmvfc_queue * queue,int reserved)1538 static struct ibmvfc_event *__ibmvfc_get_event(struct ibmvfc_queue *queue, int reserved)
1539 {
1540 struct ibmvfc_event *evt = NULL;
1541 unsigned long flags;
1542
1543 spin_lock_irqsave(&queue->l_lock, flags);
1544 if (reserved && queue->reserved_free) {
1545 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1546 evt->reserved = 1;
1547 queue->reserved_free--;
1548 } else if (queue->evt_free) {
1549 evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1550 queue->evt_free--;
1551 } else {
1552 goto out;
1553 }
1554
1555 atomic_set(&evt->free, 0);
1556 list_del(&evt->queue_list);
1557 out:
1558 spin_unlock_irqrestore(&queue->l_lock, flags);
1559 return evt;
1560 }
1561
1562 #define ibmvfc_get_event(queue) __ibmvfc_get_event(queue, 0)
1563 #define ibmvfc_get_reserved_event(queue) __ibmvfc_get_event(queue, 1)
1564
1565 /**
1566 * ibmvfc_locked_done - Calls evt completion with host_lock held
1567 * @evt: ibmvfc evt to complete
1568 *
1569 * All non-scsi command completion callbacks have the expectation that the
1570 * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1571 * MAD evt with the host_lock.
1572 **/
ibmvfc_locked_done(struct ibmvfc_event * evt)1573 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1574 {
1575 unsigned long flags;
1576
1577 spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1578 evt->_done(evt);
1579 spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1580 }
1581
1582 /**
1583 * ibmvfc_init_event - Initialize fields in an event struct that are always
1584 * required.
1585 * @evt: The event
1586 * @done: Routine to call when the event is responded to
1587 * @format: SRP or MAD format
1588 **/
ibmvfc_init_event(struct ibmvfc_event * evt,void (* done)(struct ibmvfc_event *),u8 format)1589 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1590 void (*done) (struct ibmvfc_event *), u8 format)
1591 {
1592 evt->cmnd = NULL;
1593 evt->sync_iu = NULL;
1594 evt->eh_comp = NULL;
1595 evt->crq.format = format;
1596 if (format == IBMVFC_CMD_FORMAT)
1597 evt->done = done;
1598 else {
1599 evt->_done = done;
1600 evt->done = ibmvfc_locked_done;
1601 }
1602 evt->hwq = 0;
1603 }
1604
1605 /**
1606 * ibmvfc_map_sg_list - Initialize scatterlist
1607 * @scmd: scsi command struct
1608 * @nseg: number of scatterlist segments
1609 * @md: memory descriptor list to initialize
1610 **/
ibmvfc_map_sg_list(struct scsi_cmnd * scmd,int nseg,struct srp_direct_buf * md)1611 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1612 struct srp_direct_buf *md)
1613 {
1614 int i;
1615 struct scatterlist *sg;
1616
1617 scsi_for_each_sg(scmd, sg, nseg, i) {
1618 md[i].va = cpu_to_be64(sg_dma_address(sg));
1619 md[i].len = cpu_to_be32(sg_dma_len(sg));
1620 md[i].key = 0;
1621 }
1622 }
1623
1624 /**
1625 * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1626 * @scmd: struct scsi_cmnd with the scatterlist
1627 * @evt: ibmvfc event struct
1628 * @vfc_cmd: vfc_cmd that contains the memory descriptor
1629 * @dev: device for which to map dma memory
1630 *
1631 * Returns:
1632 * 0 on success / non-zero on failure
1633 **/
ibmvfc_map_sg_data(struct scsi_cmnd * scmd,struct ibmvfc_event * evt,struct ibmvfc_cmd * vfc_cmd,struct device * dev)1634 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1635 struct ibmvfc_event *evt,
1636 struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1637 {
1638
1639 int sg_mapped;
1640 struct srp_direct_buf *data = &vfc_cmd->ioba;
1641 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1642 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1643
1644 if (cls3_error)
1645 vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1646
1647 sg_mapped = scsi_dma_map(scmd);
1648 if (!sg_mapped) {
1649 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1650 return 0;
1651 } else if (unlikely(sg_mapped < 0)) {
1652 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1653 scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1654 return sg_mapped;
1655 }
1656
1657 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1658 vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1659 iu->add_cdb_len |= IBMVFC_WRDATA;
1660 } else {
1661 vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1662 iu->add_cdb_len |= IBMVFC_RDDATA;
1663 }
1664
1665 if (sg_mapped == 1) {
1666 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1667 return 0;
1668 }
1669
1670 vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1671
1672 if (!evt->ext_list) {
1673 evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1674 &evt->ext_list_token);
1675
1676 if (!evt->ext_list) {
1677 scsi_dma_unmap(scmd);
1678 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1679 scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1680 return -ENOMEM;
1681 }
1682 }
1683
1684 ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1685
1686 data->va = cpu_to_be64(evt->ext_list_token);
1687 data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1688 data->key = 0;
1689 return 0;
1690 }
1691
1692 /**
1693 * ibmvfc_timeout - Internal command timeout handler
1694 * @t: struct ibmvfc_event that timed out
1695 *
1696 * Called when an internally generated command times out
1697 **/
ibmvfc_timeout(struct timer_list * t)1698 static void ibmvfc_timeout(struct timer_list *t)
1699 {
1700 struct ibmvfc_event *evt = from_timer(evt, t, timer);
1701 struct ibmvfc_host *vhost = evt->vhost;
1702 dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1703 ibmvfc_reset_host(vhost);
1704 }
1705
1706 /**
1707 * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1708 * @evt: event to be sent
1709 * @vhost: ibmvfc host struct
1710 * @timeout: timeout in seconds - 0 means do not time command
1711 *
1712 * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1713 **/
ibmvfc_send_event(struct ibmvfc_event * evt,struct ibmvfc_host * vhost,unsigned long timeout)1714 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1715 struct ibmvfc_host *vhost, unsigned long timeout)
1716 {
1717 __be64 *crq_as_u64 = (__be64 *) &evt->crq;
1718 unsigned long flags;
1719 int rc;
1720
1721 /* Copy the IU into the transfer area */
1722 *evt->xfer_iu = evt->iu;
1723 if (evt->crq.format == IBMVFC_CMD_FORMAT)
1724 evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1725 else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1726 evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1727 else
1728 BUG();
1729
1730 timer_setup(&evt->timer, ibmvfc_timeout, 0);
1731
1732 if (timeout) {
1733 evt->timer.expires = jiffies + (timeout * HZ);
1734 add_timer(&evt->timer);
1735 }
1736
1737 spin_lock_irqsave(&evt->queue->l_lock, flags);
1738 list_add_tail(&evt->queue_list, &evt->queue->sent);
1739 atomic_set(&evt->active, 1);
1740
1741 mb();
1742
1743 if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1744 rc = ibmvfc_send_sub_crq(vhost,
1745 evt->queue->vios_cookie,
1746 be64_to_cpu(crq_as_u64[0]),
1747 be64_to_cpu(crq_as_u64[1]),
1748 0, 0);
1749 else
1750 rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1751 be64_to_cpu(crq_as_u64[1]));
1752
1753 if (rc) {
1754 atomic_set(&evt->active, 0);
1755 list_del(&evt->queue_list);
1756 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1757 del_timer(&evt->timer);
1758
1759 /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1760 * Firmware will send a CRQ with a transport event (0xFF) to
1761 * tell this client what has happened to the transport. This
1762 * will be handled in ibmvfc_handle_crq()
1763 */
1764 if (rc == H_CLOSED) {
1765 if (printk_ratelimit())
1766 dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1767 if (evt->cmnd)
1768 scsi_dma_unmap(evt->cmnd);
1769 ibmvfc_free_event(evt);
1770 return SCSI_MLQUEUE_HOST_BUSY;
1771 }
1772
1773 dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1774 if (evt->cmnd) {
1775 evt->cmnd->result = DID_ERROR << 16;
1776 evt->done = ibmvfc_scsi_eh_done;
1777 } else
1778 evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1779
1780 evt->done(evt);
1781 } else {
1782 spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1783 ibmvfc_trc_start(evt);
1784 }
1785
1786 return 0;
1787 }
1788
1789 /**
1790 * ibmvfc_log_error - Log an error for the failed command if appropriate
1791 * @evt: ibmvfc event to log
1792 *
1793 **/
ibmvfc_log_error(struct ibmvfc_event * evt)1794 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1795 {
1796 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1797 struct ibmvfc_host *vhost = evt->vhost;
1798 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1799 struct scsi_cmnd *cmnd = evt->cmnd;
1800 const char *err = unknown_error;
1801 int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1802 int logerr = 0;
1803 int rsp_code = 0;
1804
1805 if (index >= 0) {
1806 logerr = cmd_status[index].log;
1807 err = cmd_status[index].name;
1808 }
1809
1810 if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1811 return;
1812
1813 if (rsp->flags & FCP_RSP_LEN_VALID)
1814 rsp_code = rsp->data.info.rsp_code;
1815
1816 scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1817 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1818 cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1819 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1820 }
1821
1822 /**
1823 * ibmvfc_relogin - Log back into the specified device
1824 * @sdev: scsi device struct
1825 *
1826 **/
ibmvfc_relogin(struct scsi_device * sdev)1827 static void ibmvfc_relogin(struct scsi_device *sdev)
1828 {
1829 struct ibmvfc_host *vhost = shost_priv(sdev->host);
1830 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1831 struct ibmvfc_target *tgt;
1832 unsigned long flags;
1833
1834 spin_lock_irqsave(vhost->host->host_lock, flags);
1835 list_for_each_entry(tgt, &vhost->targets, queue) {
1836 if (rport == tgt->rport) {
1837 ibmvfc_del_tgt(tgt);
1838 break;
1839 }
1840 }
1841
1842 ibmvfc_reinit_host(vhost);
1843 spin_unlock_irqrestore(vhost->host->host_lock, flags);
1844 }
1845
1846 /**
1847 * ibmvfc_scsi_done - Handle responses from commands
1848 * @evt: ibmvfc event to be handled
1849 *
1850 * Used as a callback when sending scsi cmds.
1851 **/
ibmvfc_scsi_done(struct ibmvfc_event * evt)1852 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1853 {
1854 struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1855 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1856 struct scsi_cmnd *cmnd = evt->cmnd;
1857 u32 rsp_len = 0;
1858 u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1859
1860 if (cmnd) {
1861 if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1862 scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1863 else if (rsp->flags & FCP_RESID_UNDER)
1864 scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1865 else
1866 scsi_set_resid(cmnd, 0);
1867
1868 if (vfc_cmd->status) {
1869 cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1870
1871 if (rsp->flags & FCP_RSP_LEN_VALID)
1872 rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1873 if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1874 sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1875 if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1876 memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1877 if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1878 (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1879 ibmvfc_relogin(cmnd->device);
1880
1881 if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1882 cmnd->result = (DID_ERROR << 16);
1883
1884 ibmvfc_log_error(evt);
1885 }
1886
1887 if (!cmnd->result &&
1888 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1889 cmnd->result = (DID_ERROR << 16);
1890
1891 scsi_dma_unmap(cmnd);
1892 scsi_done(cmnd);
1893 }
1894
1895 ibmvfc_free_event(evt);
1896 }
1897
1898 /**
1899 * ibmvfc_host_chkready - Check if the host can accept commands
1900 * @vhost: struct ibmvfc host
1901 *
1902 * Returns:
1903 * 1 if host can accept command / 0 if not
1904 **/
ibmvfc_host_chkready(struct ibmvfc_host * vhost)1905 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1906 {
1907 int result = 0;
1908
1909 switch (vhost->state) {
1910 case IBMVFC_LINK_DEAD:
1911 case IBMVFC_HOST_OFFLINE:
1912 result = DID_NO_CONNECT << 16;
1913 break;
1914 case IBMVFC_NO_CRQ:
1915 case IBMVFC_INITIALIZING:
1916 case IBMVFC_HALTED:
1917 case IBMVFC_LINK_DOWN:
1918 result = DID_REQUEUE << 16;
1919 break;
1920 case IBMVFC_ACTIVE:
1921 result = 0;
1922 break;
1923 }
1924
1925 return result;
1926 }
1927
ibmvfc_init_vfc_cmd(struct ibmvfc_event * evt,struct scsi_device * sdev)1928 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1929 {
1930 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1931 struct ibmvfc_host *vhost = evt->vhost;
1932 struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1933 struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1934 struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1935 size_t offset;
1936
1937 memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1938 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1939 offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1940 vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1941 } else
1942 offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1943 vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1944 vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1945 vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1946 vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1947 vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1948 vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1949 vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1950 int_to_scsilun(sdev->lun, &iu->lun);
1951
1952 return vfc_cmd;
1953 }
1954
1955 /**
1956 * ibmvfc_queuecommand - The queuecommand function of the scsi template
1957 * @shost: scsi host struct
1958 * @cmnd: struct scsi_cmnd to be executed
1959 *
1960 * Returns:
1961 * 0 on success / other on failure
1962 **/
ibmvfc_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)1963 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1964 {
1965 struct ibmvfc_host *vhost = shost_priv(shost);
1966 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1967 struct ibmvfc_cmd *vfc_cmd;
1968 struct ibmvfc_fcp_cmd_iu *iu;
1969 struct ibmvfc_event *evt;
1970 u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
1971 u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1972 u16 scsi_channel;
1973 int rc;
1974
1975 if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1976 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1977 cmnd->result = rc;
1978 scsi_done(cmnd);
1979 return 0;
1980 }
1981
1982 cmnd->result = (DID_OK << 16);
1983 if (vhost->using_channels) {
1984 scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1985 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1986 if (!evt)
1987 return SCSI_MLQUEUE_HOST_BUSY;
1988
1989 evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1990 } else {
1991 evt = ibmvfc_get_event(&vhost->crq);
1992 if (!evt)
1993 return SCSI_MLQUEUE_HOST_BUSY;
1994 }
1995
1996 ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1997 evt->cmnd = cmnd;
1998
1999 vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
2000 iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
2001
2002 iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
2003 memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
2004
2005 if (cmnd->flags & SCMD_TAGGED) {
2006 vfc_cmd->task_tag = cpu_to_be64(scsi_cmd_to_rq(cmnd)->tag);
2007 iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
2008 }
2009
2010 vfc_cmd->correlation = cpu_to_be64((u64)evt);
2011
2012 if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
2013 return ibmvfc_send_event(evt, vhost, 0);
2014
2015 ibmvfc_free_event(evt);
2016 if (rc == -ENOMEM)
2017 return SCSI_MLQUEUE_HOST_BUSY;
2018
2019 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2020 scmd_printk(KERN_ERR, cmnd,
2021 "Failed to map DMA buffer for command. rc=%d\n", rc);
2022
2023 cmnd->result = DID_ERROR << 16;
2024 scsi_done(cmnd);
2025 return 0;
2026 }
2027
2028 /**
2029 * ibmvfc_sync_completion - Signal that a synchronous command has completed
2030 * @evt: ibmvfc event struct
2031 *
2032 **/
ibmvfc_sync_completion(struct ibmvfc_event * evt)2033 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
2034 {
2035 /* copy the response back */
2036 if (evt->sync_iu)
2037 *evt->sync_iu = *evt->xfer_iu;
2038
2039 complete(&evt->comp);
2040 }
2041
2042 /**
2043 * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
2044 * @evt: struct ibmvfc_event
2045 *
2046 **/
ibmvfc_bsg_timeout_done(struct ibmvfc_event * evt)2047 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
2048 {
2049 struct ibmvfc_host *vhost = evt->vhost;
2050
2051 ibmvfc_free_event(evt);
2052 vhost->aborting_passthru = 0;
2053 dev_info(vhost->dev, "Passthru command cancelled\n");
2054 }
2055
2056 /**
2057 * ibmvfc_bsg_timeout - Handle a BSG timeout
2058 * @job: struct bsg_job that timed out
2059 *
2060 * Returns:
2061 * 0 on success / other on failure
2062 **/
ibmvfc_bsg_timeout(struct bsg_job * job)2063 static int ibmvfc_bsg_timeout(struct bsg_job *job)
2064 {
2065 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2066 unsigned long port_id = (unsigned long)job->dd_data;
2067 struct ibmvfc_event *evt;
2068 struct ibmvfc_tmf *tmf;
2069 unsigned long flags;
2070 int rc;
2071
2072 ENTER;
2073 spin_lock_irqsave(vhost->host->host_lock, flags);
2074 if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2075 __ibmvfc_reset_host(vhost);
2076 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2077 return 0;
2078 }
2079
2080 vhost->aborting_passthru = 1;
2081 evt = ibmvfc_get_reserved_event(&vhost->crq);
2082 if (!evt) {
2083 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2084 return -ENOMEM;
2085 }
2086
2087 ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2088
2089 tmf = &evt->iu.tmf;
2090 memset(tmf, 0, sizeof(*tmf));
2091 tmf->common.version = cpu_to_be32(1);
2092 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2093 tmf->common.length = cpu_to_be16(sizeof(*tmf));
2094 tmf->scsi_id = cpu_to_be64(port_id);
2095 tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2096 tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2097 rc = ibmvfc_send_event(evt, vhost, default_timeout);
2098
2099 if (rc != 0) {
2100 vhost->aborting_passthru = 0;
2101 dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2102 rc = -EIO;
2103 } else
2104 dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2105 port_id);
2106
2107 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2108
2109 LEAVE;
2110 return rc;
2111 }
2112
2113 /**
2114 * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2115 * @vhost: struct ibmvfc_host to send command
2116 * @port_id: port ID to send command
2117 *
2118 * Returns:
2119 * 0 on success / other on failure
2120 **/
ibmvfc_bsg_plogi(struct ibmvfc_host * vhost,unsigned int port_id)2121 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2122 {
2123 struct ibmvfc_port_login *plogi;
2124 struct ibmvfc_target *tgt;
2125 struct ibmvfc_event *evt;
2126 union ibmvfc_iu rsp_iu;
2127 unsigned long flags;
2128 int rc = 0, issue_login = 1;
2129
2130 ENTER;
2131 spin_lock_irqsave(vhost->host->host_lock, flags);
2132 list_for_each_entry(tgt, &vhost->targets, queue) {
2133 if (tgt->scsi_id == port_id) {
2134 issue_login = 0;
2135 break;
2136 }
2137 }
2138
2139 if (!issue_login)
2140 goto unlock_out;
2141 if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2142 goto unlock_out;
2143
2144 evt = ibmvfc_get_reserved_event(&vhost->crq);
2145 if (!evt) {
2146 rc = -ENOMEM;
2147 goto unlock_out;
2148 }
2149 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2150 plogi = &evt->iu.plogi;
2151 memset(plogi, 0, sizeof(*plogi));
2152 plogi->common.version = cpu_to_be32(1);
2153 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2154 plogi->common.length = cpu_to_be16(sizeof(*plogi));
2155 plogi->scsi_id = cpu_to_be64(port_id);
2156 evt->sync_iu = &rsp_iu;
2157 init_completion(&evt->comp);
2158
2159 rc = ibmvfc_send_event(evt, vhost, default_timeout);
2160 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2161
2162 if (rc)
2163 return -EIO;
2164
2165 wait_for_completion(&evt->comp);
2166
2167 if (rsp_iu.plogi.common.status)
2168 rc = -EIO;
2169
2170 spin_lock_irqsave(vhost->host->host_lock, flags);
2171 ibmvfc_free_event(evt);
2172 unlock_out:
2173 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2174 LEAVE;
2175 return rc;
2176 }
2177
2178 /**
2179 * ibmvfc_bsg_request - Handle a BSG request
2180 * @job: struct bsg_job to be executed
2181 *
2182 * Returns:
2183 * 0 on success / other on failure
2184 **/
ibmvfc_bsg_request(struct bsg_job * job)2185 static int ibmvfc_bsg_request(struct bsg_job *job)
2186 {
2187 struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2188 struct fc_rport *rport = fc_bsg_to_rport(job);
2189 struct ibmvfc_passthru_mad *mad;
2190 struct ibmvfc_event *evt;
2191 union ibmvfc_iu rsp_iu;
2192 unsigned long flags, port_id = -1;
2193 struct fc_bsg_request *bsg_request = job->request;
2194 struct fc_bsg_reply *bsg_reply = job->reply;
2195 unsigned int code = bsg_request->msgcode;
2196 int rc = 0, req_seg, rsp_seg, issue_login = 0;
2197 u32 fc_flags, rsp_len;
2198
2199 ENTER;
2200 bsg_reply->reply_payload_rcv_len = 0;
2201 if (rport)
2202 port_id = rport->port_id;
2203
2204 switch (code) {
2205 case FC_BSG_HST_ELS_NOLOGIN:
2206 port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2207 (bsg_request->rqst_data.h_els.port_id[1] << 8) |
2208 bsg_request->rqst_data.h_els.port_id[2];
2209 fallthrough;
2210 case FC_BSG_RPT_ELS:
2211 fc_flags = IBMVFC_FC_ELS;
2212 break;
2213 case FC_BSG_HST_CT:
2214 issue_login = 1;
2215 port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2216 (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2217 bsg_request->rqst_data.h_ct.port_id[2];
2218 fallthrough;
2219 case FC_BSG_RPT_CT:
2220 fc_flags = IBMVFC_FC_CT_IU;
2221 break;
2222 default:
2223 return -ENOTSUPP;
2224 }
2225
2226 if (port_id == -1)
2227 return -EINVAL;
2228 if (!mutex_trylock(&vhost->passthru_mutex))
2229 return -EBUSY;
2230
2231 job->dd_data = (void *)port_id;
2232 req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2233 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2234
2235 if (!req_seg) {
2236 mutex_unlock(&vhost->passthru_mutex);
2237 return -ENOMEM;
2238 }
2239
2240 rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2241 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2242
2243 if (!rsp_seg) {
2244 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2245 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2246 mutex_unlock(&vhost->passthru_mutex);
2247 return -ENOMEM;
2248 }
2249
2250 if (req_seg > 1 || rsp_seg > 1) {
2251 rc = -EINVAL;
2252 goto out;
2253 }
2254
2255 if (issue_login)
2256 rc = ibmvfc_bsg_plogi(vhost, port_id);
2257
2258 spin_lock_irqsave(vhost->host->host_lock, flags);
2259
2260 if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2261 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2262 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2263 goto out;
2264 }
2265
2266 evt = ibmvfc_get_reserved_event(&vhost->crq);
2267 if (!evt) {
2268 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2269 rc = -ENOMEM;
2270 goto out;
2271 }
2272 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2273 mad = &evt->iu.passthru;
2274
2275 memset(mad, 0, sizeof(*mad));
2276 mad->common.version = cpu_to_be32(1);
2277 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2278 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2279
2280 mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2281 offsetof(struct ibmvfc_passthru_mad, iu));
2282 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2283
2284 mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2285 mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2286 mad->iu.flags = cpu_to_be32(fc_flags);
2287 mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2288
2289 mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2290 mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2291 mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2292 mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2293 mad->iu.scsi_id = cpu_to_be64(port_id);
2294 mad->iu.tag = cpu_to_be64((u64)evt);
2295 rsp_len = be32_to_cpu(mad->iu.rsp.len);
2296
2297 evt->sync_iu = &rsp_iu;
2298 init_completion(&evt->comp);
2299 rc = ibmvfc_send_event(evt, vhost, 0);
2300 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2301
2302 if (rc) {
2303 rc = -EIO;
2304 goto out;
2305 }
2306
2307 wait_for_completion(&evt->comp);
2308
2309 if (rsp_iu.passthru.common.status)
2310 rc = -EIO;
2311 else
2312 bsg_reply->reply_payload_rcv_len = rsp_len;
2313
2314 spin_lock_irqsave(vhost->host->host_lock, flags);
2315 ibmvfc_free_event(evt);
2316 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2317 bsg_reply->result = rc;
2318 bsg_job_done(job, bsg_reply->result,
2319 bsg_reply->reply_payload_rcv_len);
2320 rc = 0;
2321 out:
2322 dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2323 job->request_payload.sg_cnt, DMA_TO_DEVICE);
2324 dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2325 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2326 mutex_unlock(&vhost->passthru_mutex);
2327 LEAVE;
2328 return rc;
2329 }
2330
2331 /**
2332 * ibmvfc_reset_device - Reset the device with the specified reset type
2333 * @sdev: scsi device to reset
2334 * @type: reset type
2335 * @desc: reset type description for log messages
2336 *
2337 * Returns:
2338 * 0 on success / other on failure
2339 **/
ibmvfc_reset_device(struct scsi_device * sdev,int type,char * desc)2340 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2341 {
2342 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2343 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2344 struct ibmvfc_cmd *tmf;
2345 struct ibmvfc_event *evt = NULL;
2346 union ibmvfc_iu rsp_iu;
2347 struct ibmvfc_fcp_cmd_iu *iu;
2348 struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2349 int rsp_rc = -EBUSY;
2350 unsigned long flags;
2351 int rsp_code = 0;
2352
2353 spin_lock_irqsave(vhost->host->host_lock, flags);
2354 if (vhost->state == IBMVFC_ACTIVE) {
2355 if (vhost->using_channels)
2356 evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2357 else
2358 evt = ibmvfc_get_event(&vhost->crq);
2359
2360 if (!evt) {
2361 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2362 return -ENOMEM;
2363 }
2364
2365 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2366 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2367 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2368
2369 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2370 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2371 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2372 iu->tmf_flags = type;
2373 evt->sync_iu = &rsp_iu;
2374
2375 init_completion(&evt->comp);
2376 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2377 }
2378 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2379
2380 if (rsp_rc != 0) {
2381 sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2382 desc, rsp_rc);
2383 return -EIO;
2384 }
2385
2386 sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2387 wait_for_completion(&evt->comp);
2388
2389 if (rsp_iu.cmd.status)
2390 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2391
2392 if (rsp_code) {
2393 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2394 rsp_code = fc_rsp->data.info.rsp_code;
2395
2396 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2397 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2398 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2399 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2400 fc_rsp->scsi_status);
2401 rsp_rc = -EIO;
2402 } else
2403 sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2404
2405 spin_lock_irqsave(vhost->host->host_lock, flags);
2406 ibmvfc_free_event(evt);
2407 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2408 return rsp_rc;
2409 }
2410
2411 /**
2412 * ibmvfc_match_rport - Match function for specified remote port
2413 * @evt: ibmvfc event struct
2414 * @rport: device to match
2415 *
2416 * Returns:
2417 * 1 if event matches rport / 0 if event does not match rport
2418 **/
ibmvfc_match_rport(struct ibmvfc_event * evt,void * rport)2419 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2420 {
2421 struct fc_rport *cmd_rport;
2422
2423 if (evt->cmnd) {
2424 cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2425 if (cmd_rport == rport)
2426 return 1;
2427 }
2428 return 0;
2429 }
2430
2431 /**
2432 * ibmvfc_match_target - Match function for specified target
2433 * @evt: ibmvfc event struct
2434 * @device: device to match (starget)
2435 *
2436 * Returns:
2437 * 1 if event matches starget / 0 if event does not match starget
2438 **/
ibmvfc_match_target(struct ibmvfc_event * evt,void * device)2439 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2440 {
2441 if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2442 return 1;
2443 return 0;
2444 }
2445
2446 /**
2447 * ibmvfc_match_lun - Match function for specified LUN
2448 * @evt: ibmvfc event struct
2449 * @device: device to match (sdev)
2450 *
2451 * Returns:
2452 * 1 if event matches sdev / 0 if event does not match sdev
2453 **/
ibmvfc_match_lun(struct ibmvfc_event * evt,void * device)2454 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2455 {
2456 if (evt->cmnd && evt->cmnd->device == device)
2457 return 1;
2458 return 0;
2459 }
2460
2461 /**
2462 * ibmvfc_event_is_free - Check if event is free or not
2463 * @evt: ibmvfc event struct
2464 *
2465 * Returns:
2466 * true / false
2467 **/
ibmvfc_event_is_free(struct ibmvfc_event * evt)2468 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2469 {
2470 struct ibmvfc_event *loop_evt;
2471
2472 list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2473 if (loop_evt == evt)
2474 return true;
2475
2476 return false;
2477 }
2478
2479 /**
2480 * ibmvfc_wait_for_ops - Wait for ops to complete
2481 * @vhost: ibmvfc host struct
2482 * @device: device to match (starget or sdev)
2483 * @match: match function
2484 *
2485 * Returns:
2486 * SUCCESS / FAILED
2487 **/
ibmvfc_wait_for_ops(struct ibmvfc_host * vhost,void * device,int (* match)(struct ibmvfc_event *,void *))2488 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2489 int (*match) (struct ibmvfc_event *, void *))
2490 {
2491 struct ibmvfc_event *evt;
2492 DECLARE_COMPLETION_ONSTACK(comp);
2493 int wait, i, q_index, q_size;
2494 unsigned long flags;
2495 signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2496 struct ibmvfc_queue *queues;
2497
2498 ENTER;
2499 if (vhost->mq_enabled && vhost->using_channels) {
2500 queues = vhost->scsi_scrqs.scrqs;
2501 q_size = vhost->scsi_scrqs.active_queues;
2502 } else {
2503 queues = &vhost->crq;
2504 q_size = 1;
2505 }
2506
2507 do {
2508 wait = 0;
2509 spin_lock_irqsave(vhost->host->host_lock, flags);
2510 for (q_index = 0; q_index < q_size; q_index++) {
2511 spin_lock(&queues[q_index].l_lock);
2512 for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2513 evt = &queues[q_index].evt_pool.events[i];
2514 if (!ibmvfc_event_is_free(evt)) {
2515 if (match(evt, device)) {
2516 evt->eh_comp = ∁
2517 wait++;
2518 }
2519 }
2520 }
2521 spin_unlock(&queues[q_index].l_lock);
2522 }
2523 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2524
2525 if (wait) {
2526 timeout = wait_for_completion_timeout(&comp, timeout);
2527
2528 if (!timeout) {
2529 wait = 0;
2530 spin_lock_irqsave(vhost->host->host_lock, flags);
2531 for (q_index = 0; q_index < q_size; q_index++) {
2532 spin_lock(&queues[q_index].l_lock);
2533 for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2534 evt = &queues[q_index].evt_pool.events[i];
2535 if (!ibmvfc_event_is_free(evt)) {
2536 if (match(evt, device)) {
2537 evt->eh_comp = NULL;
2538 wait++;
2539 }
2540 }
2541 }
2542 spin_unlock(&queues[q_index].l_lock);
2543 }
2544 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2545 if (wait)
2546 dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2547 LEAVE;
2548 return wait ? FAILED : SUCCESS;
2549 }
2550 }
2551 } while (wait);
2552
2553 LEAVE;
2554 return SUCCESS;
2555 }
2556
ibmvfc_init_tmf(struct ibmvfc_queue * queue,struct scsi_device * sdev,int type)2557 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2558 struct scsi_device *sdev,
2559 int type)
2560 {
2561 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2562 struct scsi_target *starget = scsi_target(sdev);
2563 struct fc_rport *rport = starget_to_rport(starget);
2564 struct ibmvfc_event *evt;
2565 struct ibmvfc_tmf *tmf;
2566
2567 evt = ibmvfc_get_reserved_event(queue);
2568 if (!evt)
2569 return NULL;
2570 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2571
2572 tmf = &evt->iu.tmf;
2573 memset(tmf, 0, sizeof(*tmf));
2574 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2575 tmf->common.version = cpu_to_be32(2);
2576 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2577 } else {
2578 tmf->common.version = cpu_to_be32(1);
2579 }
2580 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2581 tmf->common.length = cpu_to_be16(sizeof(*tmf));
2582 tmf->scsi_id = cpu_to_be64(rport->port_id);
2583 int_to_scsilun(sdev->lun, &tmf->lun);
2584 if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2585 type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2586 if (vhost->state == IBMVFC_ACTIVE)
2587 tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2588 else
2589 tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2590 tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2591 tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2592
2593 init_completion(&evt->comp);
2594
2595 return evt;
2596 }
2597
ibmvfc_cancel_all_mq(struct scsi_device * sdev,int type)2598 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2599 {
2600 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2601 struct ibmvfc_event *evt, *found_evt, *temp;
2602 struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2603 unsigned long flags;
2604 int num_hwq, i;
2605 int fail = 0;
2606 LIST_HEAD(cancelq);
2607 u16 status;
2608
2609 ENTER;
2610 spin_lock_irqsave(vhost->host->host_lock, flags);
2611 num_hwq = vhost->scsi_scrqs.active_queues;
2612 for (i = 0; i < num_hwq; i++) {
2613 spin_lock(queues[i].q_lock);
2614 spin_lock(&queues[i].l_lock);
2615 found_evt = NULL;
2616 list_for_each_entry(evt, &queues[i].sent, queue_list) {
2617 if (evt->cmnd && evt->cmnd->device == sdev) {
2618 found_evt = evt;
2619 break;
2620 }
2621 }
2622 spin_unlock(&queues[i].l_lock);
2623
2624 if (found_evt && vhost->logged_in) {
2625 evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2626 if (!evt) {
2627 spin_unlock(queues[i].q_lock);
2628 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2629 return -ENOMEM;
2630 }
2631 evt->sync_iu = &queues[i].cancel_rsp;
2632 ibmvfc_send_event(evt, vhost, default_timeout);
2633 list_add_tail(&evt->cancel, &cancelq);
2634 }
2635
2636 spin_unlock(queues[i].q_lock);
2637 }
2638 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2639
2640 if (list_empty(&cancelq)) {
2641 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2642 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2643 return 0;
2644 }
2645
2646 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2647
2648 list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2649 wait_for_completion(&evt->comp);
2650 status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2651 list_del(&evt->cancel);
2652 ibmvfc_free_event(evt);
2653
2654 if (status != IBMVFC_MAD_SUCCESS) {
2655 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2656 switch (status) {
2657 case IBMVFC_MAD_DRIVER_FAILED:
2658 case IBMVFC_MAD_CRQ_ERROR:
2659 /* Host adapter most likely going through reset, return success to
2660 * the caller will wait for the command being cancelled to get returned
2661 */
2662 break;
2663 default:
2664 fail = 1;
2665 break;
2666 }
2667 }
2668 }
2669
2670 if (fail)
2671 return -EIO;
2672
2673 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2674 LEAVE;
2675 return 0;
2676 }
2677
ibmvfc_cancel_all_sq(struct scsi_device * sdev,int type)2678 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2679 {
2680 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2681 struct ibmvfc_event *evt, *found_evt;
2682 union ibmvfc_iu rsp;
2683 int rsp_rc = -EBUSY;
2684 unsigned long flags;
2685 u16 status;
2686
2687 ENTER;
2688 found_evt = NULL;
2689 spin_lock_irqsave(vhost->host->host_lock, flags);
2690 spin_lock(&vhost->crq.l_lock);
2691 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2692 if (evt->cmnd && evt->cmnd->device == sdev) {
2693 found_evt = evt;
2694 break;
2695 }
2696 }
2697 spin_unlock(&vhost->crq.l_lock);
2698
2699 if (!found_evt) {
2700 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2701 sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2702 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2703 return 0;
2704 }
2705
2706 if (vhost->logged_in) {
2707 evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2708 evt->sync_iu = &rsp;
2709 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2710 }
2711
2712 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2713
2714 if (rsp_rc != 0) {
2715 sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2716 /* If failure is received, the host adapter is most likely going
2717 through reset, return success so the caller will wait for the command
2718 being cancelled to get returned */
2719 return 0;
2720 }
2721
2722 sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2723
2724 wait_for_completion(&evt->comp);
2725 status = be16_to_cpu(rsp.mad_common.status);
2726 spin_lock_irqsave(vhost->host->host_lock, flags);
2727 ibmvfc_free_event(evt);
2728 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2729
2730 if (status != IBMVFC_MAD_SUCCESS) {
2731 sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2732 switch (status) {
2733 case IBMVFC_MAD_DRIVER_FAILED:
2734 case IBMVFC_MAD_CRQ_ERROR:
2735 /* Host adapter most likely going through reset, return success to
2736 the caller will wait for the command being cancelled to get returned */
2737 return 0;
2738 default:
2739 return -EIO;
2740 };
2741 }
2742
2743 sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2744 return 0;
2745 }
2746
2747 /**
2748 * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2749 * @sdev: scsi device to cancel commands
2750 * @type: type of error recovery being performed
2751 *
2752 * This sends a cancel to the VIOS for the specified device. This does
2753 * NOT send any abort to the actual device. That must be done separately.
2754 *
2755 * Returns:
2756 * 0 on success / other on failure
2757 **/
ibmvfc_cancel_all(struct scsi_device * sdev,int type)2758 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2759 {
2760 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2761
2762 if (vhost->mq_enabled && vhost->using_channels)
2763 return ibmvfc_cancel_all_mq(sdev, type);
2764 else
2765 return ibmvfc_cancel_all_sq(sdev, type);
2766 }
2767
2768 /**
2769 * ibmvfc_match_key - Match function for specified cancel key
2770 * @evt: ibmvfc event struct
2771 * @key: cancel key to match
2772 *
2773 * Returns:
2774 * 1 if event matches key / 0 if event does not match key
2775 **/
ibmvfc_match_key(struct ibmvfc_event * evt,void * key)2776 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2777 {
2778 unsigned long cancel_key = (unsigned long)key;
2779
2780 if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2781 be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2782 return 1;
2783 return 0;
2784 }
2785
2786 /**
2787 * ibmvfc_match_evt - Match function for specified event
2788 * @evt: ibmvfc event struct
2789 * @match: event to match
2790 *
2791 * Returns:
2792 * 1 if event matches key / 0 if event does not match key
2793 **/
ibmvfc_match_evt(struct ibmvfc_event * evt,void * match)2794 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2795 {
2796 if (evt == match)
2797 return 1;
2798 return 0;
2799 }
2800
2801 /**
2802 * ibmvfc_abort_task_set - Abort outstanding commands to the device
2803 * @sdev: scsi device to abort commands
2804 *
2805 * This sends an Abort Task Set to the VIOS for the specified device. This does
2806 * NOT send any cancel to the VIOS. That must be done separately.
2807 *
2808 * Returns:
2809 * 0 on success / other on failure
2810 **/
ibmvfc_abort_task_set(struct scsi_device * sdev)2811 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2812 {
2813 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2814 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2815 struct ibmvfc_cmd *tmf;
2816 struct ibmvfc_event *evt, *found_evt;
2817 union ibmvfc_iu rsp_iu;
2818 struct ibmvfc_fcp_cmd_iu *iu;
2819 struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2820 int rc, rsp_rc = -EBUSY;
2821 unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2822 int rsp_code = 0;
2823
2824 found_evt = NULL;
2825 spin_lock_irqsave(vhost->host->host_lock, flags);
2826 spin_lock(&vhost->crq.l_lock);
2827 list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2828 if (evt->cmnd && evt->cmnd->device == sdev) {
2829 found_evt = evt;
2830 break;
2831 }
2832 }
2833 spin_unlock(&vhost->crq.l_lock);
2834
2835 if (!found_evt) {
2836 if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2837 sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2838 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2839 return 0;
2840 }
2841
2842 if (vhost->state == IBMVFC_ACTIVE) {
2843 evt = ibmvfc_get_event(&vhost->crq);
2844 if (!evt) {
2845 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2846 return -ENOMEM;
2847 }
2848 ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2849 tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2850 iu = ibmvfc_get_fcp_iu(vhost, tmf);
2851
2852 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2853 tmf->target_wwpn = cpu_to_be64(rport->port_name);
2854 iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2855 tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2856 evt->sync_iu = &rsp_iu;
2857
2858 tmf->correlation = cpu_to_be64((u64)evt);
2859
2860 init_completion(&evt->comp);
2861 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2862 }
2863
2864 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2865
2866 if (rsp_rc != 0) {
2867 sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2868 return -EIO;
2869 }
2870
2871 sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2872 timeout = wait_for_completion_timeout(&evt->comp, timeout);
2873
2874 if (!timeout) {
2875 rc = ibmvfc_cancel_all(sdev, 0);
2876 if (!rc) {
2877 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2878 if (rc == SUCCESS)
2879 rc = 0;
2880 }
2881
2882 if (rc) {
2883 sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2884 ibmvfc_reset_host(vhost);
2885 rsp_rc = -EIO;
2886 rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2887
2888 if (rc == SUCCESS)
2889 rsp_rc = 0;
2890
2891 rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2892 if (rc != SUCCESS) {
2893 spin_lock_irqsave(vhost->host->host_lock, flags);
2894 ibmvfc_hard_reset_host(vhost);
2895 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2896 rsp_rc = 0;
2897 }
2898
2899 goto out;
2900 }
2901 }
2902
2903 if (rsp_iu.cmd.status)
2904 rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2905
2906 if (rsp_code) {
2907 if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2908 rsp_code = fc_rsp->data.info.rsp_code;
2909
2910 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2911 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2912 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2913 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2914 fc_rsp->scsi_status);
2915 rsp_rc = -EIO;
2916 } else
2917 sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2918
2919 out:
2920 spin_lock_irqsave(vhost->host->host_lock, flags);
2921 ibmvfc_free_event(evt);
2922 spin_unlock_irqrestore(vhost->host->host_lock, flags);
2923 return rsp_rc;
2924 }
2925
2926 /**
2927 * ibmvfc_eh_abort_handler - Abort a command
2928 * @cmd: scsi command to abort
2929 *
2930 * Returns:
2931 * SUCCESS / FAST_IO_FAIL / FAILED
2932 **/
ibmvfc_eh_abort_handler(struct scsi_cmnd * cmd)2933 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2934 {
2935 struct scsi_device *sdev = cmd->device;
2936 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2937 int cancel_rc, block_rc;
2938 int rc = FAILED;
2939
2940 ENTER;
2941 block_rc = fc_block_scsi_eh(cmd);
2942 ibmvfc_wait_while_resetting(vhost);
2943 if (block_rc != FAST_IO_FAIL) {
2944 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2945 ibmvfc_abort_task_set(sdev);
2946 } else
2947 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2948
2949 if (!cancel_rc)
2950 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2951
2952 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2953 rc = FAST_IO_FAIL;
2954
2955 LEAVE;
2956 return rc;
2957 }
2958
2959 /**
2960 * ibmvfc_eh_device_reset_handler - Reset a single LUN
2961 * @cmd: scsi command struct
2962 *
2963 * Returns:
2964 * SUCCESS / FAST_IO_FAIL / FAILED
2965 **/
ibmvfc_eh_device_reset_handler(struct scsi_cmnd * cmd)2966 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2967 {
2968 struct scsi_device *sdev = cmd->device;
2969 struct ibmvfc_host *vhost = shost_priv(sdev->host);
2970 int cancel_rc, block_rc, reset_rc = 0;
2971 int rc = FAILED;
2972
2973 ENTER;
2974 block_rc = fc_block_scsi_eh(cmd);
2975 ibmvfc_wait_while_resetting(vhost);
2976 if (block_rc != FAST_IO_FAIL) {
2977 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2978 reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2979 } else
2980 cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2981
2982 if (!cancel_rc && !reset_rc)
2983 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2984
2985 if (block_rc == FAST_IO_FAIL && rc != FAILED)
2986 rc = FAST_IO_FAIL;
2987
2988 LEAVE;
2989 return rc;
2990 }
2991
2992 /**
2993 * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2994 * @sdev: scsi device struct
2995 * @data: return code
2996 *
2997 **/
ibmvfc_dev_cancel_all_noreset(struct scsi_device * sdev,void * data)2998 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2999 {
3000 unsigned long *rc = data;
3001 *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
3002 }
3003
3004 /**
3005 * ibmvfc_eh_target_reset_handler - Reset the target
3006 * @cmd: scsi command struct
3007 *
3008 * Returns:
3009 * SUCCESS / FAST_IO_FAIL / FAILED
3010 **/
ibmvfc_eh_target_reset_handler(struct scsi_cmnd * cmd)3011 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
3012 {
3013 struct scsi_target *starget = scsi_target(cmd->device);
3014 struct fc_rport *rport = starget_to_rport(starget);
3015 struct Scsi_Host *shost = rport_to_shost(rport);
3016 struct ibmvfc_host *vhost = shost_priv(shost);
3017 int block_rc;
3018 int reset_rc = 0;
3019 int rc = FAILED;
3020 unsigned long cancel_rc = 0;
3021 bool tgt_reset = false;
3022
3023 ENTER;
3024 block_rc = fc_block_rport(rport);
3025 ibmvfc_wait_while_resetting(vhost);
3026 if (block_rc != FAST_IO_FAIL) {
3027 struct scsi_device *sdev;
3028
3029 shost_for_each_device(sdev, shost) {
3030 if ((sdev->channel != starget->channel) ||
3031 (sdev->id != starget->id))
3032 continue;
3033
3034 cancel_rc |= ibmvfc_cancel_all(sdev,
3035 IBMVFC_TMF_TGT_RESET);
3036 if (!tgt_reset) {
3037 reset_rc = ibmvfc_reset_device(sdev,
3038 IBMVFC_TARGET_RESET, "target");
3039 tgt_reset = true;
3040 }
3041 }
3042 } else
3043 starget_for_each_device(starget, &cancel_rc,
3044 ibmvfc_dev_cancel_all_noreset);
3045
3046 if (!cancel_rc && !reset_rc)
3047 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
3048
3049 if (block_rc == FAST_IO_FAIL && rc != FAILED)
3050 rc = FAST_IO_FAIL;
3051
3052 LEAVE;
3053 return rc;
3054 }
3055
3056 /**
3057 * ibmvfc_eh_host_reset_handler - Reset the connection to the server
3058 * @cmd: struct scsi_cmnd having problems
3059 *
3060 **/
ibmvfc_eh_host_reset_handler(struct scsi_cmnd * cmd)3061 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
3062 {
3063 int rc;
3064 struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
3065
3066 dev_err(vhost->dev, "Resetting connection due to error recovery\n");
3067 rc = ibmvfc_issue_fc_host_lip(vhost->host);
3068
3069 return rc ? FAILED : SUCCESS;
3070 }
3071
3072 /**
3073 * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
3074 * @rport: rport struct
3075 *
3076 * Return value:
3077 * none
3078 **/
ibmvfc_terminate_rport_io(struct fc_rport * rport)3079 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
3080 {
3081 struct Scsi_Host *shost = rport_to_shost(rport);
3082 struct ibmvfc_host *vhost = shost_priv(shost);
3083 struct fc_rport *dev_rport;
3084 struct scsi_device *sdev;
3085 struct ibmvfc_target *tgt;
3086 unsigned long rc, flags;
3087 unsigned int found;
3088
3089 ENTER;
3090 shost_for_each_device(sdev, shost) {
3091 dev_rport = starget_to_rport(scsi_target(sdev));
3092 if (dev_rport != rport)
3093 continue;
3094 ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
3095 }
3096
3097 rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
3098
3099 if (rc == FAILED)
3100 ibmvfc_issue_fc_host_lip(shost);
3101
3102 spin_lock_irqsave(shost->host_lock, flags);
3103 found = 0;
3104 list_for_each_entry(tgt, &vhost->targets, queue) {
3105 if (tgt->scsi_id == rport->port_id) {
3106 found++;
3107 break;
3108 }
3109 }
3110
3111 if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3112 /*
3113 * If we get here, that means we previously attempted to send
3114 * an implicit logout to the target but it failed, most likely
3115 * due to I/O being pending, so we need to send it again
3116 */
3117 ibmvfc_del_tgt(tgt);
3118 ibmvfc_reinit_host(vhost);
3119 }
3120
3121 spin_unlock_irqrestore(shost->host_lock, flags);
3122 LEAVE;
3123 }
3124
3125 static const struct ibmvfc_async_desc ae_desc [] = {
3126 { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3127 { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3128 { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3129 { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3130 { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3131 { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
3132 { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
3133 { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
3134 { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
3135 { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
3136 { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
3137 { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
3138 { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3139 };
3140
3141 static const struct ibmvfc_async_desc unknown_ae = {
3142 "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3143 };
3144
3145 /**
3146 * ibmvfc_get_ae_desc - Get text description for async event
3147 * @ae: async event
3148 *
3149 **/
ibmvfc_get_ae_desc(u64 ae)3150 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3151 {
3152 int i;
3153
3154 for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3155 if (ae_desc[i].ae == ae)
3156 return &ae_desc[i];
3157
3158 return &unknown_ae;
3159 }
3160
3161 static const struct {
3162 enum ibmvfc_ae_link_state state;
3163 const char *desc;
3164 } link_desc [] = {
3165 { IBMVFC_AE_LS_LINK_UP, " link up" },
3166 { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
3167 { IBMVFC_AE_LS_LINK_DOWN, " link down" },
3168 { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
3169 };
3170
3171 /**
3172 * ibmvfc_get_link_state - Get text description for link state
3173 * @state: link state
3174 *
3175 **/
ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)3176 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3177 {
3178 int i;
3179
3180 for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3181 if (link_desc[i].state == state)
3182 return link_desc[i].desc;
3183
3184 return "";
3185 }
3186
3187 /**
3188 * ibmvfc_handle_async - Handle an async event from the adapter
3189 * @crq: crq to process
3190 * @vhost: ibmvfc host struct
3191 *
3192 **/
ibmvfc_handle_async(struct ibmvfc_async_crq * crq,struct ibmvfc_host * vhost)3193 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3194 struct ibmvfc_host *vhost)
3195 {
3196 const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3197 struct ibmvfc_target *tgt;
3198
3199 ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3200 " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3201 be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3202 ibmvfc_get_link_state(crq->link_state));
3203
3204 switch (be64_to_cpu(crq->event)) {
3205 case IBMVFC_AE_RESUME:
3206 switch (crq->link_state) {
3207 case IBMVFC_AE_LS_LINK_DOWN:
3208 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3209 break;
3210 case IBMVFC_AE_LS_LINK_DEAD:
3211 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3212 break;
3213 case IBMVFC_AE_LS_LINK_UP:
3214 case IBMVFC_AE_LS_LINK_BOUNCED:
3215 default:
3216 vhost->events_to_log |= IBMVFC_AE_LINKUP;
3217 vhost->delay_init = 1;
3218 __ibmvfc_reset_host(vhost);
3219 break;
3220 }
3221
3222 break;
3223 case IBMVFC_AE_LINK_UP:
3224 vhost->events_to_log |= IBMVFC_AE_LINKUP;
3225 vhost->delay_init = 1;
3226 __ibmvfc_reset_host(vhost);
3227 break;
3228 case IBMVFC_AE_SCN_FABRIC:
3229 case IBMVFC_AE_SCN_DOMAIN:
3230 vhost->events_to_log |= IBMVFC_AE_RSCN;
3231 if (vhost->state < IBMVFC_HALTED) {
3232 vhost->delay_init = 1;
3233 __ibmvfc_reset_host(vhost);
3234 }
3235 break;
3236 case IBMVFC_AE_SCN_NPORT:
3237 case IBMVFC_AE_SCN_GROUP:
3238 vhost->events_to_log |= IBMVFC_AE_RSCN;
3239 ibmvfc_reinit_host(vhost);
3240 break;
3241 case IBMVFC_AE_ELS_LOGO:
3242 case IBMVFC_AE_ELS_PRLO:
3243 case IBMVFC_AE_ELS_PLOGI:
3244 list_for_each_entry(tgt, &vhost->targets, queue) {
3245 if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3246 break;
3247 if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3248 continue;
3249 if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3250 continue;
3251 if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3252 continue;
3253 if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3254 tgt->logo_rcvd = 1;
3255 if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3256 ibmvfc_del_tgt(tgt);
3257 ibmvfc_reinit_host(vhost);
3258 }
3259 }
3260 break;
3261 case IBMVFC_AE_LINK_DOWN:
3262 case IBMVFC_AE_ADAPTER_FAILED:
3263 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3264 break;
3265 case IBMVFC_AE_LINK_DEAD:
3266 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3267 break;
3268 case IBMVFC_AE_HALT:
3269 ibmvfc_link_down(vhost, IBMVFC_HALTED);
3270 break;
3271 default:
3272 dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3273 break;
3274 }
3275 }
3276
3277 /**
3278 * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3279 * @crq: Command/Response queue
3280 * @vhost: ibmvfc host struct
3281 * @evt_doneq: Event done queue
3282 *
3283 **/
ibmvfc_handle_crq(struct ibmvfc_crq * crq,struct ibmvfc_host * vhost,struct list_head * evt_doneq)3284 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3285 struct list_head *evt_doneq)
3286 {
3287 long rc;
3288 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3289
3290 switch (crq->valid) {
3291 case IBMVFC_CRQ_INIT_RSP:
3292 switch (crq->format) {
3293 case IBMVFC_CRQ_INIT:
3294 dev_info(vhost->dev, "Partner initialized\n");
3295 /* Send back a response */
3296 rc = ibmvfc_send_crq_init_complete(vhost);
3297 if (rc == 0)
3298 ibmvfc_init_host(vhost);
3299 else
3300 dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3301 break;
3302 case IBMVFC_CRQ_INIT_COMPLETE:
3303 dev_info(vhost->dev, "Partner initialization complete\n");
3304 ibmvfc_init_host(vhost);
3305 break;
3306 default:
3307 dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3308 }
3309 return;
3310 case IBMVFC_CRQ_XPORT_EVENT:
3311 vhost->state = IBMVFC_NO_CRQ;
3312 vhost->logged_in = 0;
3313 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3314 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3315 /* We need to re-setup the interpartition connection */
3316 dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3317 vhost->client_migrated = 1;
3318
3319 scsi_block_requests(vhost->host);
3320 ibmvfc_purge_requests(vhost, DID_REQUEUE);
3321 ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN);
3322 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3323 wake_up(&vhost->work_wait_q);
3324 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3325 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3326 ibmvfc_purge_requests(vhost, DID_ERROR);
3327 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3328 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3329 } else {
3330 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3331 }
3332 return;
3333 case IBMVFC_CRQ_CMD_RSP:
3334 break;
3335 default:
3336 dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3337 return;
3338 }
3339
3340 if (crq->format == IBMVFC_ASYNC_EVENT)
3341 return;
3342
3343 /* The only kind of payload CRQs we should get are responses to
3344 * things we send. Make sure this response is to something we
3345 * actually sent
3346 */
3347 if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3348 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3349 crq->ioba);
3350 return;
3351 }
3352
3353 if (unlikely(atomic_dec_if_positive(&evt->active))) {
3354 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3355 crq->ioba);
3356 return;
3357 }
3358
3359 spin_lock(&evt->queue->l_lock);
3360 list_move_tail(&evt->queue_list, evt_doneq);
3361 spin_unlock(&evt->queue->l_lock);
3362 }
3363
3364 /**
3365 * ibmvfc_scan_finished - Check if the device scan is done.
3366 * @shost: scsi host struct
3367 * @time: current elapsed time
3368 *
3369 * Returns:
3370 * 0 if scan is not done / 1 if scan is done
3371 **/
ibmvfc_scan_finished(struct Scsi_Host * shost,unsigned long time)3372 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3373 {
3374 unsigned long flags;
3375 struct ibmvfc_host *vhost = shost_priv(shost);
3376 int done = 0;
3377
3378 spin_lock_irqsave(shost->host_lock, flags);
3379 if (!vhost->scan_timeout)
3380 done = 1;
3381 else if (time >= (vhost->scan_timeout * HZ)) {
3382 dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3383 "continuing initialization\n", vhost->scan_timeout);
3384 done = 1;
3385 }
3386
3387 if (vhost->scan_complete) {
3388 vhost->scan_timeout = init_timeout;
3389 done = 1;
3390 }
3391 spin_unlock_irqrestore(shost->host_lock, flags);
3392 return done;
3393 }
3394
3395 /**
3396 * ibmvfc_sdev_init - Setup the device's task set value
3397 * @sdev: struct scsi_device device to configure
3398 *
3399 * Set the device's task set value so that error handling works as
3400 * expected.
3401 *
3402 * Returns:
3403 * 0 on success / -ENXIO if device does not exist
3404 **/
ibmvfc_sdev_init(struct scsi_device * sdev)3405 static int ibmvfc_sdev_init(struct scsi_device *sdev)
3406 {
3407 struct Scsi_Host *shost = sdev->host;
3408 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3409 struct ibmvfc_host *vhost = shost_priv(shost);
3410 unsigned long flags = 0;
3411
3412 if (!rport || fc_remote_port_chkready(rport))
3413 return -ENXIO;
3414
3415 spin_lock_irqsave(shost->host_lock, flags);
3416 sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3417 spin_unlock_irqrestore(shost->host_lock, flags);
3418 return 0;
3419 }
3420
3421 /**
3422 * ibmvfc_target_alloc - Setup the target's task set value
3423 * @starget: struct scsi_target
3424 *
3425 * Set the target's task set value so that error handling works as
3426 * expected.
3427 *
3428 * Returns:
3429 * 0 on success / -ENXIO if device does not exist
3430 **/
ibmvfc_target_alloc(struct scsi_target * starget)3431 static int ibmvfc_target_alloc(struct scsi_target *starget)
3432 {
3433 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3434 struct ibmvfc_host *vhost = shost_priv(shost);
3435 unsigned long flags = 0;
3436
3437 spin_lock_irqsave(shost->host_lock, flags);
3438 starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3439 spin_unlock_irqrestore(shost->host_lock, flags);
3440 return 0;
3441 }
3442
3443 /**
3444 * ibmvfc_sdev_configure - Configure the device
3445 * @sdev: struct scsi_device device to configure
3446 * @lim: Request queue limits
3447 *
3448 * Enable allow_restart for a device if it is a disk. Adjust the
3449 * queue_depth here also.
3450 *
3451 * Returns:
3452 * 0
3453 **/
ibmvfc_sdev_configure(struct scsi_device * sdev,struct queue_limits * lim)3454 static int ibmvfc_sdev_configure(struct scsi_device *sdev,
3455 struct queue_limits *lim)
3456 {
3457 struct Scsi_Host *shost = sdev->host;
3458 unsigned long flags = 0;
3459
3460 spin_lock_irqsave(shost->host_lock, flags);
3461 if (sdev->type == TYPE_DISK) {
3462 sdev->allow_restart = 1;
3463 blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3464 }
3465 spin_unlock_irqrestore(shost->host_lock, flags);
3466 return 0;
3467 }
3468
3469 /**
3470 * ibmvfc_change_queue_depth - Change the device's queue depth
3471 * @sdev: scsi device struct
3472 * @qdepth: depth to set
3473 *
3474 * Return value:
3475 * actual depth set
3476 **/
ibmvfc_change_queue_depth(struct scsi_device * sdev,int qdepth)3477 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3478 {
3479 if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3480 qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3481
3482 return scsi_change_queue_depth(sdev, qdepth);
3483 }
3484
ibmvfc_show_host_partition_name(struct device * dev,struct device_attribute * attr,char * buf)3485 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3486 struct device_attribute *attr, char *buf)
3487 {
3488 struct Scsi_Host *shost = class_to_shost(dev);
3489 struct ibmvfc_host *vhost = shost_priv(shost);
3490
3491 return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.partition_name);
3492 }
3493
ibmvfc_show_host_device_name(struct device * dev,struct device_attribute * attr,char * buf)3494 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3495 struct device_attribute *attr, char *buf)
3496 {
3497 struct Scsi_Host *shost = class_to_shost(dev);
3498 struct ibmvfc_host *vhost = shost_priv(shost);
3499
3500 return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.device_name);
3501 }
3502
ibmvfc_show_host_loc_code(struct device * dev,struct device_attribute * attr,char * buf)3503 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3504 struct device_attribute *attr, char *buf)
3505 {
3506 struct Scsi_Host *shost = class_to_shost(dev);
3507 struct ibmvfc_host *vhost = shost_priv(shost);
3508
3509 return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.port_loc_code);
3510 }
3511
ibmvfc_show_host_drc_name(struct device * dev,struct device_attribute * attr,char * buf)3512 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3513 struct device_attribute *attr, char *buf)
3514 {
3515 struct Scsi_Host *shost = class_to_shost(dev);
3516 struct ibmvfc_host *vhost = shost_priv(shost);
3517
3518 return sysfs_emit(buf, "%s\n", vhost->login_buf->resp.drc_name);
3519 }
3520
ibmvfc_show_host_npiv_version(struct device * dev,struct device_attribute * attr,char * buf)3521 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3522 struct device_attribute *attr, char *buf)
3523 {
3524 struct Scsi_Host *shost = class_to_shost(dev);
3525 struct ibmvfc_host *vhost = shost_priv(shost);
3526 return sysfs_emit(buf, "%d\n",
3527 be32_to_cpu(vhost->login_buf->resp.version));
3528 }
3529
ibmvfc_show_host_capabilities(struct device * dev,struct device_attribute * attr,char * buf)3530 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3531 struct device_attribute *attr, char *buf)
3532 {
3533 struct Scsi_Host *shost = class_to_shost(dev);
3534 struct ibmvfc_host *vhost = shost_priv(shost);
3535 return sysfs_emit(buf, "%llx\n",
3536 be64_to_cpu(vhost->login_buf->resp.capabilities));
3537 }
3538
3539 /**
3540 * ibmvfc_show_log_level - Show the adapter's error logging level
3541 * @dev: class device struct
3542 * @attr: unused
3543 * @buf: buffer
3544 *
3545 * Return value:
3546 * number of bytes printed to buffer
3547 **/
ibmvfc_show_log_level(struct device * dev,struct device_attribute * attr,char * buf)3548 static ssize_t ibmvfc_show_log_level(struct device *dev,
3549 struct device_attribute *attr, char *buf)
3550 {
3551 struct Scsi_Host *shost = class_to_shost(dev);
3552 struct ibmvfc_host *vhost = shost_priv(shost);
3553 unsigned long flags = 0;
3554 int len;
3555
3556 spin_lock_irqsave(shost->host_lock, flags);
3557 len = sysfs_emit(buf, "%d\n", vhost->log_level);
3558 spin_unlock_irqrestore(shost->host_lock, flags);
3559 return len;
3560 }
3561
3562 /**
3563 * ibmvfc_store_log_level - Change the adapter's error logging level
3564 * @dev: class device struct
3565 * @attr: unused
3566 * @buf: buffer
3567 * @count: buffer size
3568 *
3569 * Return value:
3570 * number of bytes printed to buffer
3571 **/
ibmvfc_store_log_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3572 static ssize_t ibmvfc_store_log_level(struct device *dev,
3573 struct device_attribute *attr,
3574 const char *buf, size_t count)
3575 {
3576 struct Scsi_Host *shost = class_to_shost(dev);
3577 struct ibmvfc_host *vhost = shost_priv(shost);
3578 unsigned long flags = 0;
3579
3580 spin_lock_irqsave(shost->host_lock, flags);
3581 vhost->log_level = simple_strtoul(buf, NULL, 10);
3582 spin_unlock_irqrestore(shost->host_lock, flags);
3583 return strlen(buf);
3584 }
3585
ibmvfc_show_scsi_channels(struct device * dev,struct device_attribute * attr,char * buf)3586 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3587 struct device_attribute *attr, char *buf)
3588 {
3589 struct Scsi_Host *shost = class_to_shost(dev);
3590 struct ibmvfc_host *vhost = shost_priv(shost);
3591 struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
3592 unsigned long flags = 0;
3593 int len;
3594
3595 spin_lock_irqsave(shost->host_lock, flags);
3596 len = sysfs_emit(buf, "%d\n", scsi->desired_queues);
3597 spin_unlock_irqrestore(shost->host_lock, flags);
3598 return len;
3599 }
3600
ibmvfc_store_scsi_channels(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3601 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3602 struct device_attribute *attr,
3603 const char *buf, size_t count)
3604 {
3605 struct Scsi_Host *shost = class_to_shost(dev);
3606 struct ibmvfc_host *vhost = shost_priv(shost);
3607 struct ibmvfc_channels *scsi = &vhost->scsi_scrqs;
3608 unsigned long flags = 0;
3609 unsigned int channels;
3610
3611 spin_lock_irqsave(shost->host_lock, flags);
3612 channels = simple_strtoul(buf, NULL, 10);
3613 scsi->desired_queues = min(channels, shost->nr_hw_queues);
3614 ibmvfc_hard_reset_host(vhost);
3615 spin_unlock_irqrestore(shost->host_lock, flags);
3616 return strlen(buf);
3617 }
3618
3619 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3620 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3621 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3622 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3623 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3624 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3625 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3626 ibmvfc_show_log_level, ibmvfc_store_log_level);
3627 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3628 ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3629
3630 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3631 /**
3632 * ibmvfc_read_trace - Dump the adapter trace
3633 * @filp: open sysfs file
3634 * @kobj: kobject struct
3635 * @bin_attr: bin_attribute struct
3636 * @buf: buffer
3637 * @off: offset
3638 * @count: buffer size
3639 *
3640 * Return value:
3641 * number of bytes printed to buffer
3642 **/
ibmvfc_read_trace(struct file * filp,struct kobject * kobj,const struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)3643 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3644 const struct bin_attribute *bin_attr,
3645 char *buf, loff_t off, size_t count)
3646 {
3647 struct device *dev = kobj_to_dev(kobj);
3648 struct Scsi_Host *shost = class_to_shost(dev);
3649 struct ibmvfc_host *vhost = shost_priv(shost);
3650 unsigned long flags = 0;
3651 int size = IBMVFC_TRACE_SIZE;
3652 char *src = (char *)vhost->trace;
3653
3654 if (off > size)
3655 return 0;
3656 if (off + count > size) {
3657 size -= off;
3658 count = size;
3659 }
3660
3661 spin_lock_irqsave(shost->host_lock, flags);
3662 memcpy(buf, &src[off], count);
3663 spin_unlock_irqrestore(shost->host_lock, flags);
3664 return count;
3665 }
3666
3667 static const struct bin_attribute ibmvfc_trace_attr = {
3668 .attr = {
3669 .name = "trace",
3670 .mode = S_IRUGO,
3671 },
3672 .size = 0,
3673 .read_new = ibmvfc_read_trace,
3674 };
3675 #endif
3676
3677 static struct attribute *ibmvfc_host_attrs[] = {
3678 &dev_attr_partition_name.attr,
3679 &dev_attr_device_name.attr,
3680 &dev_attr_port_loc_code.attr,
3681 &dev_attr_drc_name.attr,
3682 &dev_attr_npiv_version.attr,
3683 &dev_attr_capabilities.attr,
3684 &dev_attr_log_level.attr,
3685 &dev_attr_nr_scsi_channels.attr,
3686 NULL
3687 };
3688
3689 ATTRIBUTE_GROUPS(ibmvfc_host);
3690
3691 static const struct scsi_host_template driver_template = {
3692 .module = THIS_MODULE,
3693 .name = "IBM POWER Virtual FC Adapter",
3694 .proc_name = IBMVFC_NAME,
3695 .queuecommand = ibmvfc_queuecommand,
3696 .eh_timed_out = fc_eh_timed_out,
3697 .eh_abort_handler = ibmvfc_eh_abort_handler,
3698 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3699 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3700 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3701 .sdev_init = ibmvfc_sdev_init,
3702 .sdev_configure = ibmvfc_sdev_configure,
3703 .target_alloc = ibmvfc_target_alloc,
3704 .scan_finished = ibmvfc_scan_finished,
3705 .change_queue_depth = ibmvfc_change_queue_depth,
3706 .cmd_per_lun = 16,
3707 .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3708 .this_id = -1,
3709 .sg_tablesize = SG_ALL,
3710 .max_sectors = IBMVFC_MAX_SECTORS,
3711 .shost_groups = ibmvfc_host_groups,
3712 .track_queue_depth = 1,
3713 };
3714
3715 /**
3716 * ibmvfc_next_async_crq - Returns the next entry in async queue
3717 * @vhost: ibmvfc host struct
3718 *
3719 * Returns:
3720 * Pointer to next entry in queue / NULL if empty
3721 **/
ibmvfc_next_async_crq(struct ibmvfc_host * vhost)3722 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3723 {
3724 struct ibmvfc_queue *async_crq = &vhost->async_crq;
3725 struct ibmvfc_async_crq *crq;
3726
3727 crq = &async_crq->msgs.async[async_crq->cur];
3728 if (crq->valid & 0x80) {
3729 if (++async_crq->cur == async_crq->size)
3730 async_crq->cur = 0;
3731 rmb();
3732 } else
3733 crq = NULL;
3734
3735 return crq;
3736 }
3737
3738 /**
3739 * ibmvfc_next_crq - Returns the next entry in message queue
3740 * @vhost: ibmvfc host struct
3741 *
3742 * Returns:
3743 * Pointer to next entry in queue / NULL if empty
3744 **/
ibmvfc_next_crq(struct ibmvfc_host * vhost)3745 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3746 {
3747 struct ibmvfc_queue *queue = &vhost->crq;
3748 struct ibmvfc_crq *crq;
3749
3750 crq = &queue->msgs.crq[queue->cur];
3751 if (crq->valid & 0x80) {
3752 if (++queue->cur == queue->size)
3753 queue->cur = 0;
3754 rmb();
3755 } else
3756 crq = NULL;
3757
3758 return crq;
3759 }
3760
3761 /**
3762 * ibmvfc_interrupt - Interrupt handler
3763 * @irq: number of irq to handle, not used
3764 * @dev_instance: ibmvfc_host that received interrupt
3765 *
3766 * Returns:
3767 * IRQ_HANDLED
3768 **/
ibmvfc_interrupt(int irq,void * dev_instance)3769 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3770 {
3771 struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3772 unsigned long flags;
3773
3774 spin_lock_irqsave(vhost->host->host_lock, flags);
3775 vio_disable_interrupts(to_vio_dev(vhost->dev));
3776 tasklet_schedule(&vhost->tasklet);
3777 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3778 return IRQ_HANDLED;
3779 }
3780
3781 /**
3782 * ibmvfc_tasklet - Interrupt handler tasklet
3783 * @data: ibmvfc host struct
3784 *
3785 * Returns:
3786 * Nothing
3787 **/
ibmvfc_tasklet(void * data)3788 static void ibmvfc_tasklet(void *data)
3789 {
3790 struct ibmvfc_host *vhost = data;
3791 struct vio_dev *vdev = to_vio_dev(vhost->dev);
3792 struct ibmvfc_crq *crq;
3793 struct ibmvfc_async_crq *async;
3794 struct ibmvfc_event *evt, *temp;
3795 unsigned long flags;
3796 int done = 0;
3797 LIST_HEAD(evt_doneq);
3798
3799 spin_lock_irqsave(vhost->host->host_lock, flags);
3800 spin_lock(vhost->crq.q_lock);
3801 while (!done) {
3802 /* Pull all the valid messages off the async CRQ */
3803 while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3804 ibmvfc_handle_async(async, vhost);
3805 async->valid = 0;
3806 wmb();
3807 }
3808
3809 /* Pull all the valid messages off the CRQ */
3810 while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3811 ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3812 crq->valid = 0;
3813 wmb();
3814 }
3815
3816 vio_enable_interrupts(vdev);
3817 if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3818 vio_disable_interrupts(vdev);
3819 ibmvfc_handle_async(async, vhost);
3820 async->valid = 0;
3821 wmb();
3822 } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3823 vio_disable_interrupts(vdev);
3824 ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3825 crq->valid = 0;
3826 wmb();
3827 } else
3828 done = 1;
3829 }
3830
3831 spin_unlock(vhost->crq.q_lock);
3832 spin_unlock_irqrestore(vhost->host->host_lock, flags);
3833
3834 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3835 del_timer(&evt->timer);
3836 list_del(&evt->queue_list);
3837 ibmvfc_trc_end(evt);
3838 evt->done(evt);
3839 }
3840 }
3841
ibmvfc_toggle_scrq_irq(struct ibmvfc_queue * scrq,int enable)3842 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3843 {
3844 struct device *dev = scrq->vhost->dev;
3845 struct vio_dev *vdev = to_vio_dev(dev);
3846 unsigned long rc;
3847 int irq_action = H_ENABLE_VIO_INTERRUPT;
3848
3849 if (!enable)
3850 irq_action = H_DISABLE_VIO_INTERRUPT;
3851
3852 rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3853 scrq->hw_irq, 0, 0);
3854
3855 if (rc)
3856 dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3857 enable ? "enable" : "disable", scrq->hwq_id, rc);
3858
3859 return rc;
3860 }
3861
ibmvfc_handle_scrq(struct ibmvfc_crq * crq,struct ibmvfc_host * vhost,struct list_head * evt_doneq)3862 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3863 struct list_head *evt_doneq)
3864 {
3865 struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3866
3867 switch (crq->valid) {
3868 case IBMVFC_CRQ_CMD_RSP:
3869 break;
3870 case IBMVFC_CRQ_XPORT_EVENT:
3871 return;
3872 default:
3873 dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3874 return;
3875 }
3876
3877 /* The only kind of payload CRQs we should get are responses to
3878 * things we send. Make sure this response is to something we
3879 * actually sent
3880 */
3881 if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3882 dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3883 crq->ioba);
3884 return;
3885 }
3886
3887 if (unlikely(atomic_dec_if_positive(&evt->active))) {
3888 dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3889 crq->ioba);
3890 return;
3891 }
3892
3893 spin_lock(&evt->queue->l_lock);
3894 list_move_tail(&evt->queue_list, evt_doneq);
3895 spin_unlock(&evt->queue->l_lock);
3896 }
3897
ibmvfc_next_scrq(struct ibmvfc_queue * scrq)3898 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3899 {
3900 struct ibmvfc_crq *crq;
3901
3902 crq = &scrq->msgs.scrq[scrq->cur].crq;
3903 if (crq->valid & 0x80) {
3904 if (++scrq->cur == scrq->size)
3905 scrq->cur = 0;
3906 rmb();
3907 } else
3908 crq = NULL;
3909
3910 return crq;
3911 }
3912
ibmvfc_drain_sub_crq(struct ibmvfc_queue * scrq)3913 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3914 {
3915 struct ibmvfc_crq *crq;
3916 struct ibmvfc_event *evt, *temp;
3917 unsigned long flags;
3918 int done = 0;
3919 LIST_HEAD(evt_doneq);
3920
3921 spin_lock_irqsave(scrq->q_lock, flags);
3922 while (!done) {
3923 while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3924 ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3925 crq->valid = 0;
3926 wmb();
3927 }
3928
3929 ibmvfc_toggle_scrq_irq(scrq, 1);
3930 if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3931 ibmvfc_toggle_scrq_irq(scrq, 0);
3932 ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3933 crq->valid = 0;
3934 wmb();
3935 } else
3936 done = 1;
3937 }
3938 spin_unlock_irqrestore(scrq->q_lock, flags);
3939
3940 list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3941 del_timer(&evt->timer);
3942 list_del(&evt->queue_list);
3943 ibmvfc_trc_end(evt);
3944 evt->done(evt);
3945 }
3946 }
3947
ibmvfc_interrupt_mq(int irq,void * scrq_instance)3948 static irqreturn_t ibmvfc_interrupt_mq(int irq, void *scrq_instance)
3949 {
3950 struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3951
3952 ibmvfc_toggle_scrq_irq(scrq, 0);
3953 ibmvfc_drain_sub_crq(scrq);
3954
3955 return IRQ_HANDLED;
3956 }
3957
3958 /**
3959 * ibmvfc_init_tgt - Set the next init job step for the target
3960 * @tgt: ibmvfc target struct
3961 * @job_step: job step to perform
3962 *
3963 **/
ibmvfc_init_tgt(struct ibmvfc_target * tgt,void (* job_step)(struct ibmvfc_target *))3964 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3965 void (*job_step) (struct ibmvfc_target *))
3966 {
3967 if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3968 tgt->job_step = job_step;
3969 wake_up(&tgt->vhost->work_wait_q);
3970 }
3971
3972 /**
3973 * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3974 * @tgt: ibmvfc target struct
3975 * @job_step: initialization job step
3976 *
3977 * Returns: 1 if step will be retried / 0 if not
3978 *
3979 **/
ibmvfc_retry_tgt_init(struct ibmvfc_target * tgt,void (* job_step)(struct ibmvfc_target *))3980 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3981 void (*job_step) (struct ibmvfc_target *))
3982 {
3983 if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3984 ibmvfc_del_tgt(tgt);
3985 wake_up(&tgt->vhost->work_wait_q);
3986 return 0;
3987 } else
3988 ibmvfc_init_tgt(tgt, job_step);
3989 return 1;
3990 }
3991
3992 /* Defined in FC-LS */
3993 static const struct {
3994 int code;
3995 int retry;
3996 int logged_in;
3997 } prli_rsp [] = {
3998 { 0, 1, 0 },
3999 { 1, 0, 1 },
4000 { 2, 1, 0 },
4001 { 3, 1, 0 },
4002 { 4, 0, 0 },
4003 { 5, 0, 0 },
4004 { 6, 0, 1 },
4005 { 7, 0, 0 },
4006 { 8, 1, 0 },
4007 };
4008
4009 /**
4010 * ibmvfc_get_prli_rsp - Find PRLI response index
4011 * @flags: PRLI response flags
4012 *
4013 **/
ibmvfc_get_prli_rsp(u16 flags)4014 static int ibmvfc_get_prli_rsp(u16 flags)
4015 {
4016 int i;
4017 int code = (flags & 0x0f00) >> 8;
4018
4019 for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
4020 if (prli_rsp[i].code == code)
4021 return i;
4022
4023 return 0;
4024 }
4025
4026 /**
4027 * ibmvfc_tgt_prli_done - Completion handler for Process Login
4028 * @evt: ibmvfc event struct
4029 *
4030 **/
ibmvfc_tgt_prli_done(struct ibmvfc_event * evt)4031 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
4032 {
4033 struct ibmvfc_target *tgt = evt->tgt;
4034 struct ibmvfc_host *vhost = evt->vhost;
4035 struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
4036 struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
4037 u32 status = be16_to_cpu(rsp->common.status);
4038 int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
4039
4040 vhost->discovery_threads--;
4041 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4042 switch (status) {
4043 case IBMVFC_MAD_SUCCESS:
4044 tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
4045 parms->type, parms->flags, parms->service_parms);
4046
4047 if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
4048 index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
4049 if (prli_rsp[index].logged_in) {
4050 if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
4051 tgt->need_login = 0;
4052 tgt->ids.roles = 0;
4053 if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
4054 tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
4055 if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
4056 tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
4057 tgt->add_rport = 1;
4058 } else
4059 ibmvfc_del_tgt(tgt);
4060 } else if (prli_rsp[index].retry)
4061 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4062 else
4063 ibmvfc_del_tgt(tgt);
4064 } else
4065 ibmvfc_del_tgt(tgt);
4066 break;
4067 case IBMVFC_MAD_DRIVER_FAILED:
4068 break;
4069 case IBMVFC_MAD_CRQ_ERROR:
4070 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4071 break;
4072 case IBMVFC_MAD_FAILED:
4073 default:
4074 if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
4075 be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
4076 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4077 else if (tgt->logo_rcvd)
4078 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4079 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4080 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
4081 else
4082 ibmvfc_del_tgt(tgt);
4083
4084 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
4085 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4086 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
4087 break;
4088 }
4089
4090 kref_put(&tgt->kref, ibmvfc_release_tgt);
4091 ibmvfc_free_event(evt);
4092 wake_up(&vhost->work_wait_q);
4093 }
4094
4095 /**
4096 * ibmvfc_tgt_send_prli - Send a process login
4097 * @tgt: ibmvfc target struct
4098 *
4099 **/
ibmvfc_tgt_send_prli(struct ibmvfc_target * tgt)4100 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
4101 {
4102 struct ibmvfc_process_login *prli;
4103 struct ibmvfc_host *vhost = tgt->vhost;
4104 struct ibmvfc_event *evt;
4105
4106 if (vhost->discovery_threads >= disc_threads)
4107 return;
4108
4109 kref_get(&tgt->kref);
4110 evt = ibmvfc_get_reserved_event(&vhost->crq);
4111 if (!evt) {
4112 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4113 kref_put(&tgt->kref, ibmvfc_release_tgt);
4114 __ibmvfc_reset_host(vhost);
4115 return;
4116 }
4117 vhost->discovery_threads++;
4118 ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4119 evt->tgt = tgt;
4120 prli = &evt->iu.prli;
4121 memset(prli, 0, sizeof(*prli));
4122 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4123 prli->common.version = cpu_to_be32(2);
4124 prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4125 } else {
4126 prli->common.version = cpu_to_be32(1);
4127 }
4128 prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4129 prli->common.length = cpu_to_be16(sizeof(*prli));
4130 prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4131
4132 prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4133 prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4134 prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4135 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4136
4137 if (cls3_error)
4138 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4139
4140 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4141 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4142 vhost->discovery_threads--;
4143 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4144 kref_put(&tgt->kref, ibmvfc_release_tgt);
4145 } else
4146 tgt_dbg(tgt, "Sent process login\n");
4147 }
4148
4149 /**
4150 * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4151 * @evt: ibmvfc event struct
4152 *
4153 **/
ibmvfc_tgt_plogi_done(struct ibmvfc_event * evt)4154 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4155 {
4156 struct ibmvfc_target *tgt = evt->tgt;
4157 struct ibmvfc_host *vhost = evt->vhost;
4158 struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4159 u32 status = be16_to_cpu(rsp->common.status);
4160 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4161
4162 vhost->discovery_threads--;
4163 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4164 switch (status) {
4165 case IBMVFC_MAD_SUCCESS:
4166 tgt_dbg(tgt, "Port Login succeeded\n");
4167 if (tgt->ids.port_name &&
4168 tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
4169 vhost->reinit = 1;
4170 tgt_dbg(tgt, "Port re-init required\n");
4171 break;
4172 }
4173 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4174 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4175 tgt->ids.port_id = tgt->scsi_id;
4176 memcpy(&tgt->service_parms, &rsp->service_parms,
4177 sizeof(tgt->service_parms));
4178 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4179 sizeof(tgt->service_parms_change));
4180 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4181 break;
4182 case IBMVFC_MAD_DRIVER_FAILED:
4183 break;
4184 case IBMVFC_MAD_CRQ_ERROR:
4185 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4186 break;
4187 case IBMVFC_MAD_FAILED:
4188 default:
4189 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4190 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4191 else
4192 ibmvfc_del_tgt(tgt);
4193
4194 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4195 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4196 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4197 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4198 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4199 break;
4200 }
4201
4202 kref_put(&tgt->kref, ibmvfc_release_tgt);
4203 ibmvfc_free_event(evt);
4204 wake_up(&vhost->work_wait_q);
4205 }
4206
4207 /**
4208 * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4209 * @tgt: ibmvfc target struct
4210 *
4211 **/
ibmvfc_tgt_send_plogi(struct ibmvfc_target * tgt)4212 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4213 {
4214 struct ibmvfc_port_login *plogi;
4215 struct ibmvfc_host *vhost = tgt->vhost;
4216 struct ibmvfc_event *evt;
4217
4218 if (vhost->discovery_threads >= disc_threads)
4219 return;
4220
4221 kref_get(&tgt->kref);
4222 tgt->logo_rcvd = 0;
4223 evt = ibmvfc_get_reserved_event(&vhost->crq);
4224 if (!evt) {
4225 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4226 kref_put(&tgt->kref, ibmvfc_release_tgt);
4227 __ibmvfc_reset_host(vhost);
4228 return;
4229 }
4230 vhost->discovery_threads++;
4231 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4232 ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4233 evt->tgt = tgt;
4234 plogi = &evt->iu.plogi;
4235 memset(plogi, 0, sizeof(*plogi));
4236 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4237 plogi->common.version = cpu_to_be32(2);
4238 plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4239 } else {
4240 plogi->common.version = cpu_to_be32(1);
4241 }
4242 plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4243 plogi->common.length = cpu_to_be16(sizeof(*plogi));
4244 plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4245
4246 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4247 vhost->discovery_threads--;
4248 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4249 kref_put(&tgt->kref, ibmvfc_release_tgt);
4250 } else
4251 tgt_dbg(tgt, "Sent port login\n");
4252 }
4253
4254 /**
4255 * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4256 * @evt: ibmvfc event struct
4257 *
4258 **/
ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event * evt)4259 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4260 {
4261 struct ibmvfc_target *tgt = evt->tgt;
4262 struct ibmvfc_host *vhost = evt->vhost;
4263 struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4264 u32 status = be16_to_cpu(rsp->common.status);
4265
4266 vhost->discovery_threads--;
4267 ibmvfc_free_event(evt);
4268 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4269
4270 switch (status) {
4271 case IBMVFC_MAD_SUCCESS:
4272 tgt_dbg(tgt, "Implicit Logout succeeded\n");
4273 break;
4274 case IBMVFC_MAD_DRIVER_FAILED:
4275 kref_put(&tgt->kref, ibmvfc_release_tgt);
4276 wake_up(&vhost->work_wait_q);
4277 return;
4278 case IBMVFC_MAD_FAILED:
4279 default:
4280 tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4281 break;
4282 }
4283
4284 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4285 kref_put(&tgt->kref, ibmvfc_release_tgt);
4286 wake_up(&vhost->work_wait_q);
4287 }
4288
4289 /**
4290 * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4291 * @tgt: ibmvfc target struct
4292 * @done: Routine to call when the event is responded to
4293 *
4294 * Returns:
4295 * Allocated and initialized ibmvfc_event struct
4296 **/
__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target * tgt,void (* done)(struct ibmvfc_event *))4297 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4298 void (*done) (struct ibmvfc_event *))
4299 {
4300 struct ibmvfc_implicit_logout *mad;
4301 struct ibmvfc_host *vhost = tgt->vhost;
4302 struct ibmvfc_event *evt;
4303
4304 kref_get(&tgt->kref);
4305 evt = ibmvfc_get_reserved_event(&vhost->crq);
4306 if (!evt)
4307 return NULL;
4308 ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4309 evt->tgt = tgt;
4310 mad = &evt->iu.implicit_logout;
4311 memset(mad, 0, sizeof(*mad));
4312 mad->common.version = cpu_to_be32(1);
4313 mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4314 mad->common.length = cpu_to_be16(sizeof(*mad));
4315 mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4316 return evt;
4317 }
4318
4319 /**
4320 * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4321 * @tgt: ibmvfc target struct
4322 *
4323 **/
ibmvfc_tgt_implicit_logout(struct ibmvfc_target * tgt)4324 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4325 {
4326 struct ibmvfc_host *vhost = tgt->vhost;
4327 struct ibmvfc_event *evt;
4328
4329 if (vhost->discovery_threads >= disc_threads)
4330 return;
4331
4332 vhost->discovery_threads++;
4333 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4334 ibmvfc_tgt_implicit_logout_done);
4335 if (!evt) {
4336 vhost->discovery_threads--;
4337 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4338 kref_put(&tgt->kref, ibmvfc_release_tgt);
4339 __ibmvfc_reset_host(vhost);
4340 return;
4341 }
4342
4343 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4344 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4345 vhost->discovery_threads--;
4346 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4347 kref_put(&tgt->kref, ibmvfc_release_tgt);
4348 } else
4349 tgt_dbg(tgt, "Sent Implicit Logout\n");
4350 }
4351
4352 /**
4353 * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4354 * @evt: ibmvfc event struct
4355 *
4356 **/
ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event * evt)4357 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4358 {
4359 struct ibmvfc_target *tgt = evt->tgt;
4360 struct ibmvfc_host *vhost = evt->vhost;
4361 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4362 u32 status = be16_to_cpu(mad->common.status);
4363
4364 vhost->discovery_threads--;
4365 ibmvfc_free_event(evt);
4366
4367 /*
4368 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4369 * driver in which case we need to free up all the targets. If we are
4370 * not unloading, we will still go through a hard reset to get out of
4371 * offline state, so there is no need to track the old targets in that
4372 * case.
4373 */
4374 if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4375 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4376 else
4377 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4378
4379 tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4380 kref_put(&tgt->kref, ibmvfc_release_tgt);
4381 wake_up(&vhost->work_wait_q);
4382 }
4383
4384 /**
4385 * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4386 * @tgt: ibmvfc target struct
4387 *
4388 **/
ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target * tgt)4389 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4390 {
4391 struct ibmvfc_host *vhost = tgt->vhost;
4392 struct ibmvfc_event *evt;
4393
4394 if (!vhost->logged_in) {
4395 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4396 return;
4397 }
4398
4399 if (vhost->discovery_threads >= disc_threads)
4400 return;
4401
4402 vhost->discovery_threads++;
4403 evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4404 ibmvfc_tgt_implicit_logout_and_del_done);
4405
4406 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4407 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4408 vhost->discovery_threads--;
4409 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4410 kref_put(&tgt->kref, ibmvfc_release_tgt);
4411 } else
4412 tgt_dbg(tgt, "Sent Implicit Logout\n");
4413 }
4414
4415 /**
4416 * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4417 * @evt: ibmvfc event struct
4418 *
4419 **/
ibmvfc_tgt_move_login_done(struct ibmvfc_event * evt)4420 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4421 {
4422 struct ibmvfc_target *tgt = evt->tgt;
4423 struct ibmvfc_host *vhost = evt->vhost;
4424 struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4425 u32 status = be16_to_cpu(rsp->common.status);
4426 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4427
4428 vhost->discovery_threads--;
4429 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4430 switch (status) {
4431 case IBMVFC_MAD_SUCCESS:
4432 tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
4433 tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4434 tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4435 tgt->scsi_id = tgt->new_scsi_id;
4436 tgt->ids.port_id = tgt->scsi_id;
4437 memcpy(&tgt->service_parms, &rsp->service_parms,
4438 sizeof(tgt->service_parms));
4439 memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4440 sizeof(tgt->service_parms_change));
4441 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4442 break;
4443 case IBMVFC_MAD_DRIVER_FAILED:
4444 break;
4445 case IBMVFC_MAD_CRQ_ERROR:
4446 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4447 break;
4448 case IBMVFC_MAD_FAILED:
4449 default:
4450 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4451
4452 tgt_log(tgt, level,
4453 "Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4454 tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4455 status);
4456 break;
4457 }
4458
4459 kref_put(&tgt->kref, ibmvfc_release_tgt);
4460 ibmvfc_free_event(evt);
4461 wake_up(&vhost->work_wait_q);
4462 }
4463
4464
4465 /**
4466 * ibmvfc_tgt_move_login - Initiate a move login for specified target
4467 * @tgt: ibmvfc target struct
4468 *
4469 **/
ibmvfc_tgt_move_login(struct ibmvfc_target * tgt)4470 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4471 {
4472 struct ibmvfc_host *vhost = tgt->vhost;
4473 struct ibmvfc_move_login *move;
4474 struct ibmvfc_event *evt;
4475
4476 if (vhost->discovery_threads >= disc_threads)
4477 return;
4478
4479 kref_get(&tgt->kref);
4480 evt = ibmvfc_get_reserved_event(&vhost->crq);
4481 if (!evt) {
4482 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4483 kref_put(&tgt->kref, ibmvfc_release_tgt);
4484 __ibmvfc_reset_host(vhost);
4485 return;
4486 }
4487 vhost->discovery_threads++;
4488 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4489 ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4490 evt->tgt = tgt;
4491 move = &evt->iu.move_login;
4492 memset(move, 0, sizeof(*move));
4493 move->common.version = cpu_to_be32(1);
4494 move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4495 move->common.length = cpu_to_be16(sizeof(*move));
4496
4497 move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4498 move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
4499 move->wwpn = cpu_to_be64(tgt->wwpn);
4500 move->node_name = cpu_to_be64(tgt->ids.node_name);
4501
4502 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4503 vhost->discovery_threads--;
4504 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4505 kref_put(&tgt->kref, ibmvfc_release_tgt);
4506 } else
4507 tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
4508 }
4509
4510 /**
4511 * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4512 * @mad: ibmvfc passthru mad struct
4513 * @tgt: ibmvfc target struct
4514 *
4515 * Returns:
4516 * 1 if PLOGI needed / 0 if PLOGI not needed
4517 **/
ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad * mad,struct ibmvfc_target * tgt)4518 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4519 struct ibmvfc_target *tgt)
4520 {
4521 if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4522 return 1;
4523 if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4524 return 1;
4525 if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4526 return 1;
4527 return 0;
4528 }
4529
4530 /**
4531 * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4532 * @evt: ibmvfc event struct
4533 *
4534 **/
ibmvfc_tgt_adisc_done(struct ibmvfc_event * evt)4535 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4536 {
4537 struct ibmvfc_target *tgt = evt->tgt;
4538 struct ibmvfc_host *vhost = evt->vhost;
4539 struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4540 u32 status = be16_to_cpu(mad->common.status);
4541 u8 fc_reason, fc_explain;
4542
4543 vhost->discovery_threads--;
4544 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4545 del_timer(&tgt->timer);
4546
4547 switch (status) {
4548 case IBMVFC_MAD_SUCCESS:
4549 tgt_dbg(tgt, "ADISC succeeded\n");
4550 if (ibmvfc_adisc_needs_plogi(mad, tgt))
4551 ibmvfc_del_tgt(tgt);
4552 break;
4553 case IBMVFC_MAD_DRIVER_FAILED:
4554 break;
4555 case IBMVFC_MAD_FAILED:
4556 default:
4557 ibmvfc_del_tgt(tgt);
4558 fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4559 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4560 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4561 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4562 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4563 ibmvfc_get_fc_type(fc_reason), fc_reason,
4564 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4565 break;
4566 }
4567
4568 kref_put(&tgt->kref, ibmvfc_release_tgt);
4569 ibmvfc_free_event(evt);
4570 wake_up(&vhost->work_wait_q);
4571 }
4572
4573 /**
4574 * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4575 * @evt: ibmvfc event struct
4576 *
4577 **/
ibmvfc_init_passthru(struct ibmvfc_event * evt)4578 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4579 {
4580 struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4581
4582 memset(mad, 0, sizeof(*mad));
4583 mad->common.version = cpu_to_be32(1);
4584 mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4585 mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4586 mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4587 offsetof(struct ibmvfc_passthru_mad, iu));
4588 mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4589 mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4590 mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4591 mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4592 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4593 offsetof(struct ibmvfc_passthru_fc_iu, payload));
4594 mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4595 mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4596 offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4597 offsetof(struct ibmvfc_passthru_fc_iu, response));
4598 mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4599 }
4600
4601 /**
4602 * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4603 * @evt: ibmvfc event struct
4604 *
4605 * Just cleanup this event struct. Everything else is handled by
4606 * the ADISC completion handler. If the ADISC never actually comes
4607 * back, we still have the timer running on the ADISC event struct
4608 * which will fire and cause the CRQ to get reset.
4609 *
4610 **/
ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event * evt)4611 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4612 {
4613 struct ibmvfc_host *vhost = evt->vhost;
4614 struct ibmvfc_target *tgt = evt->tgt;
4615
4616 tgt_dbg(tgt, "ADISC cancel complete\n");
4617 vhost->abort_threads--;
4618 ibmvfc_free_event(evt);
4619 kref_put(&tgt->kref, ibmvfc_release_tgt);
4620 wake_up(&vhost->work_wait_q);
4621 }
4622
4623 /**
4624 * ibmvfc_adisc_timeout - Handle an ADISC timeout
4625 * @t: ibmvfc target struct
4626 *
4627 * If an ADISC times out, send a cancel. If the cancel times
4628 * out, reset the CRQ. When the ADISC comes back as cancelled,
4629 * log back into the target.
4630 **/
ibmvfc_adisc_timeout(struct timer_list * t)4631 static void ibmvfc_adisc_timeout(struct timer_list *t)
4632 {
4633 struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4634 struct ibmvfc_host *vhost = tgt->vhost;
4635 struct ibmvfc_event *evt;
4636 struct ibmvfc_tmf *tmf;
4637 unsigned long flags;
4638 int rc;
4639
4640 tgt_dbg(tgt, "ADISC timeout\n");
4641 spin_lock_irqsave(vhost->host->host_lock, flags);
4642 if (vhost->abort_threads >= disc_threads ||
4643 tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4644 vhost->state != IBMVFC_INITIALIZING ||
4645 vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4646 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4647 return;
4648 }
4649
4650 vhost->abort_threads++;
4651 kref_get(&tgt->kref);
4652 evt = ibmvfc_get_reserved_event(&vhost->crq);
4653 if (!evt) {
4654 tgt_err(tgt, "Failed to get cancel event for ADISC.\n");
4655 vhost->abort_threads--;
4656 kref_put(&tgt->kref, ibmvfc_release_tgt);
4657 __ibmvfc_reset_host(vhost);
4658 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4659 return;
4660 }
4661 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4662
4663 evt->tgt = tgt;
4664 tmf = &evt->iu.tmf;
4665 memset(tmf, 0, sizeof(*tmf));
4666 if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4667 tmf->common.version = cpu_to_be32(2);
4668 tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4669 } else {
4670 tmf->common.version = cpu_to_be32(1);
4671 }
4672 tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4673 tmf->common.length = cpu_to_be16(sizeof(*tmf));
4674 tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4675 tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4676
4677 rc = ibmvfc_send_event(evt, vhost, default_timeout);
4678
4679 if (rc) {
4680 tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4681 vhost->abort_threads--;
4682 kref_put(&tgt->kref, ibmvfc_release_tgt);
4683 __ibmvfc_reset_host(vhost);
4684 } else
4685 tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4686 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4687 }
4688
4689 /**
4690 * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4691 * @tgt: ibmvfc target struct
4692 *
4693 * When sending an ADISC we end up with two timers running. The
4694 * first timer is the timer in the ibmvfc target struct. If this
4695 * fires, we send a cancel to the target. The second timer is the
4696 * timer on the ibmvfc event for the ADISC, which is longer. If that
4697 * fires, it means the ADISC timed out and our attempt to cancel it
4698 * also failed, so we need to reset the CRQ.
4699 **/
ibmvfc_tgt_adisc(struct ibmvfc_target * tgt)4700 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4701 {
4702 struct ibmvfc_passthru_mad *mad;
4703 struct ibmvfc_host *vhost = tgt->vhost;
4704 struct ibmvfc_event *evt;
4705
4706 if (vhost->discovery_threads >= disc_threads)
4707 return;
4708
4709 kref_get(&tgt->kref);
4710 evt = ibmvfc_get_reserved_event(&vhost->crq);
4711 if (!evt) {
4712 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4713 kref_put(&tgt->kref, ibmvfc_release_tgt);
4714 __ibmvfc_reset_host(vhost);
4715 return;
4716 }
4717 vhost->discovery_threads++;
4718 ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4719 evt->tgt = tgt;
4720
4721 ibmvfc_init_passthru(evt);
4722 mad = &evt->iu.passthru;
4723 mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4724 mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4725 mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4726
4727 mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4728 memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4729 sizeof(vhost->login_buf->resp.port_name));
4730 memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4731 sizeof(vhost->login_buf->resp.node_name));
4732 mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4733
4734 if (timer_pending(&tgt->timer))
4735 mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4736 else {
4737 tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4738 add_timer(&tgt->timer);
4739 }
4740
4741 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4742 if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4743 vhost->discovery_threads--;
4744 del_timer(&tgt->timer);
4745 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4746 kref_put(&tgt->kref, ibmvfc_release_tgt);
4747 } else
4748 tgt_dbg(tgt, "Sent ADISC\n");
4749 }
4750
4751 /**
4752 * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4753 * @evt: ibmvfc event struct
4754 *
4755 **/
ibmvfc_tgt_query_target_done(struct ibmvfc_event * evt)4756 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4757 {
4758 struct ibmvfc_target *tgt = evt->tgt;
4759 struct ibmvfc_host *vhost = evt->vhost;
4760 struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4761 u32 status = be16_to_cpu(rsp->common.status);
4762 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4763
4764 vhost->discovery_threads--;
4765 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4766 switch (status) {
4767 case IBMVFC_MAD_SUCCESS:
4768 tgt_dbg(tgt, "Query Target succeeded\n");
4769 if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4770 ibmvfc_del_tgt(tgt);
4771 else
4772 ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4773 break;
4774 case IBMVFC_MAD_DRIVER_FAILED:
4775 break;
4776 case IBMVFC_MAD_CRQ_ERROR:
4777 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4778 break;
4779 case IBMVFC_MAD_FAILED:
4780 default:
4781 if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4782 be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4783 be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4784 ibmvfc_del_tgt(tgt);
4785 else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4786 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4787 else
4788 ibmvfc_del_tgt(tgt);
4789
4790 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4791 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4792 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4793 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4794 ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4795 status);
4796 break;
4797 }
4798
4799 kref_put(&tgt->kref, ibmvfc_release_tgt);
4800 ibmvfc_free_event(evt);
4801 wake_up(&vhost->work_wait_q);
4802 }
4803
4804 /**
4805 * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4806 * @tgt: ibmvfc target struct
4807 *
4808 **/
ibmvfc_tgt_query_target(struct ibmvfc_target * tgt)4809 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4810 {
4811 struct ibmvfc_query_tgt *query_tgt;
4812 struct ibmvfc_host *vhost = tgt->vhost;
4813 struct ibmvfc_event *evt;
4814
4815 if (vhost->discovery_threads >= disc_threads)
4816 return;
4817
4818 kref_get(&tgt->kref);
4819 evt = ibmvfc_get_reserved_event(&vhost->crq);
4820 if (!evt) {
4821 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4822 kref_put(&tgt->kref, ibmvfc_release_tgt);
4823 __ibmvfc_reset_host(vhost);
4824 return;
4825 }
4826 vhost->discovery_threads++;
4827 evt->tgt = tgt;
4828 ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4829 query_tgt = &evt->iu.query_tgt;
4830 memset(query_tgt, 0, sizeof(*query_tgt));
4831 query_tgt->common.version = cpu_to_be32(1);
4832 query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4833 query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4834 query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4835
4836 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4837 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4838 vhost->discovery_threads--;
4839 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4840 kref_put(&tgt->kref, ibmvfc_release_tgt);
4841 } else
4842 tgt_dbg(tgt, "Sent Query Target\n");
4843 }
4844
4845 /**
4846 * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4847 * @vhost: ibmvfc host struct
4848 * @target: Holds SCSI ID to allocate target forand the WWPN
4849 *
4850 * Returns:
4851 * 0 on success / other on failure
4852 **/
ibmvfc_alloc_target(struct ibmvfc_host * vhost,struct ibmvfc_discover_targets_entry * target)4853 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4854 struct ibmvfc_discover_targets_entry *target)
4855 {
4856 struct ibmvfc_target *stgt = NULL;
4857 struct ibmvfc_target *wtgt = NULL;
4858 struct ibmvfc_target *tgt;
4859 unsigned long flags;
4860 u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4861 u64 wwpn = be64_to_cpu(target->wwpn);
4862
4863 /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4864 spin_lock_irqsave(vhost->host->host_lock, flags);
4865 list_for_each_entry(tgt, &vhost->targets, queue) {
4866 if (tgt->wwpn == wwpn) {
4867 wtgt = tgt;
4868 break;
4869 }
4870 }
4871
4872 list_for_each_entry(tgt, &vhost->targets, queue) {
4873 if (tgt->scsi_id == scsi_id) {
4874 stgt = tgt;
4875 break;
4876 }
4877 }
4878
4879 if (wtgt && !stgt) {
4880 /*
4881 * A WWPN target has moved and we still are tracking the old
4882 * SCSI ID. The only way we should be able to get here is if
4883 * we attempted to send an implicit logout for the old SCSI ID
4884 * and it failed for some reason, such as there being I/O
4885 * pending to the target. In this case, we will have already
4886 * deleted the rport from the FC transport so we do a move
4887 * login, which works even with I/O pending, however, if
4888 * there is still I/O pending, it will stay outstanding, so
4889 * we only do this if fast fail is disabled for the rport,
4890 * otherwise we let terminate_rport_io clean up the port
4891 * before we login at the new location.
4892 */
4893 if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4894 if (wtgt->move_login) {
4895 /*
4896 * Do a move login here. The old target is no longer
4897 * known to the transport layer We don't use the
4898 * normal ibmvfc_set_tgt_action to set this, as we
4899 * don't normally want to allow this state change.
4900 */
4901 wtgt->new_scsi_id = scsi_id;
4902 wtgt->action = IBMVFC_TGT_ACTION_INIT;
4903 wtgt->init_retries = 0;
4904 ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4905 }
4906 goto unlock_out;
4907 } else {
4908 tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4909 wtgt->action, wtgt->rport);
4910 }
4911 } else if (stgt) {
4912 if (tgt->need_login)
4913 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4914 goto unlock_out;
4915 }
4916 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4917
4918 tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4919 memset(tgt, 0, sizeof(*tgt));
4920 tgt->scsi_id = scsi_id;
4921 tgt->wwpn = wwpn;
4922 tgt->vhost = vhost;
4923 tgt->need_login = 1;
4924 timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4925 kref_init(&tgt->kref);
4926 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4927 spin_lock_irqsave(vhost->host->host_lock, flags);
4928 tgt->cancel_key = vhost->task_set++;
4929 list_add_tail(&tgt->queue, &vhost->targets);
4930
4931 unlock_out:
4932 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4933 return 0;
4934 }
4935
4936 /**
4937 * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4938 * @vhost: ibmvfc host struct
4939 *
4940 * Returns:
4941 * 0 on success / other on failure
4942 **/
ibmvfc_alloc_targets(struct ibmvfc_host * vhost)4943 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4944 {
4945 int i, rc;
4946
4947 for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4948 rc = ibmvfc_alloc_target(vhost, &vhost->scsi_scrqs.disc_buf[i]);
4949
4950 return rc;
4951 }
4952
4953 /**
4954 * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4955 * @evt: ibmvfc event struct
4956 *
4957 **/
ibmvfc_discover_targets_done(struct ibmvfc_event * evt)4958 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4959 {
4960 struct ibmvfc_host *vhost = evt->vhost;
4961 struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4962 u32 mad_status = be16_to_cpu(rsp->common.status);
4963 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4964
4965 switch (mad_status) {
4966 case IBMVFC_MAD_SUCCESS:
4967 ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4968 vhost->num_targets = be32_to_cpu(rsp->num_written);
4969 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4970 break;
4971 case IBMVFC_MAD_FAILED:
4972 level += ibmvfc_retry_host_init(vhost);
4973 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4974 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4975 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4976 break;
4977 case IBMVFC_MAD_DRIVER_FAILED:
4978 break;
4979 default:
4980 dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4981 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4982 break;
4983 }
4984
4985 ibmvfc_free_event(evt);
4986 wake_up(&vhost->work_wait_q);
4987 }
4988
4989 /**
4990 * ibmvfc_discover_targets - Send Discover Targets MAD
4991 * @vhost: ibmvfc host struct
4992 *
4993 **/
ibmvfc_discover_targets(struct ibmvfc_host * vhost)4994 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4995 {
4996 struct ibmvfc_discover_targets *mad;
4997 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
4998 int level = IBMVFC_DEFAULT_LOG_LEVEL;
4999
5000 if (!evt) {
5001 ibmvfc_log(vhost, level, "Discover Targets failed: no available events\n");
5002 ibmvfc_hard_reset_host(vhost);
5003 return;
5004 }
5005
5006 ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
5007 mad = &evt->iu.discover_targets;
5008 memset(mad, 0, sizeof(*mad));
5009 mad->common.version = cpu_to_be32(1);
5010 mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
5011 mad->common.length = cpu_to_be16(sizeof(*mad));
5012 mad->bufflen = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
5013 mad->buffer.va = cpu_to_be64(vhost->scsi_scrqs.disc_buf_dma);
5014 mad->buffer.len = cpu_to_be32(vhost->scsi_scrqs.disc_buf_sz);
5015 mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
5016 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5017
5018 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5019 ibmvfc_dbg(vhost, "Sent discover targets\n");
5020 else
5021 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5022 }
5023
ibmvfc_channel_setup_done(struct ibmvfc_event * evt)5024 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
5025 {
5026 struct ibmvfc_host *vhost = evt->vhost;
5027 struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
5028 struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
5029 u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
5030 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5031 int flags, active_queues, i;
5032
5033 ibmvfc_free_event(evt);
5034
5035 switch (mad_status) {
5036 case IBMVFC_MAD_SUCCESS:
5037 ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
5038 flags = be32_to_cpu(setup->flags);
5039 vhost->do_enquiry = 0;
5040 active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
5041 scrqs->active_queues = active_queues;
5042
5043 if (flags & IBMVFC_CHANNELS_CANCELED) {
5044 ibmvfc_dbg(vhost, "Channels Canceled\n");
5045 vhost->using_channels = 0;
5046 } else {
5047 if (active_queues)
5048 vhost->using_channels = 1;
5049 for (i = 0; i < active_queues; i++)
5050 scrqs->scrqs[i].vios_cookie =
5051 be64_to_cpu(setup->channel_handles[i]);
5052
5053 ibmvfc_dbg(vhost, "Using %u channels\n",
5054 vhost->scsi_scrqs.active_queues);
5055 }
5056 break;
5057 case IBMVFC_MAD_FAILED:
5058 level += ibmvfc_retry_host_init(vhost);
5059 ibmvfc_log(vhost, level, "Channel Setup failed\n");
5060 fallthrough;
5061 case IBMVFC_MAD_DRIVER_FAILED:
5062 return;
5063 default:
5064 dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
5065 mad_status);
5066 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5067 return;
5068 }
5069
5070 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5071 wake_up(&vhost->work_wait_q);
5072 }
5073
ibmvfc_channel_setup(struct ibmvfc_host * vhost)5074 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
5075 {
5076 struct ibmvfc_channel_setup_mad *mad;
5077 struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
5078 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5079 struct ibmvfc_channels *scrqs = &vhost->scsi_scrqs;
5080 unsigned int num_channels =
5081 min(scrqs->desired_queues, vhost->max_vios_scsi_channels);
5082 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5083 int i;
5084
5085 if (!evt) {
5086 ibmvfc_log(vhost, level, "Channel Setup failed: no available events\n");
5087 ibmvfc_hard_reset_host(vhost);
5088 return;
5089 }
5090
5091 memset(setup_buf, 0, sizeof(*setup_buf));
5092 if (num_channels == 0)
5093 setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
5094 else {
5095 setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
5096 for (i = 0; i < num_channels; i++)
5097 setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
5098 }
5099
5100 ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
5101 mad = &evt->iu.channel_setup;
5102 memset(mad, 0, sizeof(*mad));
5103 mad->common.version = cpu_to_be32(1);
5104 mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
5105 mad->common.length = cpu_to_be16(sizeof(*mad));
5106 mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
5107 mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
5108
5109 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5110
5111 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5112 ibmvfc_dbg(vhost, "Sent channel setup\n");
5113 else
5114 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
5115 }
5116
ibmvfc_channel_enquiry_done(struct ibmvfc_event * evt)5117 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
5118 {
5119 struct ibmvfc_host *vhost = evt->vhost;
5120 struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
5121 u32 mad_status = be16_to_cpu(rsp->common.status);
5122 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5123
5124 switch (mad_status) {
5125 case IBMVFC_MAD_SUCCESS:
5126 ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
5127 vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
5128 ibmvfc_free_event(evt);
5129 break;
5130 case IBMVFC_MAD_FAILED:
5131 level += ibmvfc_retry_host_init(vhost);
5132 ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
5133 fallthrough;
5134 case IBMVFC_MAD_DRIVER_FAILED:
5135 ibmvfc_free_event(evt);
5136 return;
5137 default:
5138 dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
5139 mad_status);
5140 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5141 ibmvfc_free_event(evt);
5142 return;
5143 }
5144
5145 ibmvfc_channel_setup(vhost);
5146 }
5147
ibmvfc_channel_enquiry(struct ibmvfc_host * vhost)5148 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
5149 {
5150 struct ibmvfc_channel_enquiry *mad;
5151 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5152 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5153
5154 if (!evt) {
5155 ibmvfc_log(vhost, level, "Channel Enquiry failed: no available events\n");
5156 ibmvfc_hard_reset_host(vhost);
5157 return;
5158 }
5159
5160 ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
5161 mad = &evt->iu.channel_enquiry;
5162 memset(mad, 0, sizeof(*mad));
5163 mad->common.version = cpu_to_be32(1);
5164 mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
5165 mad->common.length = cpu_to_be16(sizeof(*mad));
5166
5167 if (mig_channels_only)
5168 mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
5169 if (mig_no_less_channels)
5170 mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
5171
5172 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5173
5174 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5175 ibmvfc_dbg(vhost, "Send channel enquiry\n");
5176 else
5177 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5178 }
5179
5180 /**
5181 * ibmvfc_npiv_login_done - Completion handler for NPIV Login
5182 * @evt: ibmvfc event struct
5183 *
5184 **/
ibmvfc_npiv_login_done(struct ibmvfc_event * evt)5185 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5186 {
5187 struct ibmvfc_host *vhost = evt->vhost;
5188 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5189 struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5190 unsigned int npiv_max_sectors;
5191 int level = IBMVFC_DEFAULT_LOG_LEVEL;
5192
5193 switch (mad_status) {
5194 case IBMVFC_MAD_SUCCESS:
5195 ibmvfc_free_event(evt);
5196 break;
5197 case IBMVFC_MAD_FAILED:
5198 if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5199 level += ibmvfc_retry_host_init(vhost);
5200 else
5201 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5202 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5203 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5204 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5205 ibmvfc_free_event(evt);
5206 return;
5207 case IBMVFC_MAD_CRQ_ERROR:
5208 ibmvfc_retry_host_init(vhost);
5209 fallthrough;
5210 case IBMVFC_MAD_DRIVER_FAILED:
5211 ibmvfc_free_event(evt);
5212 return;
5213 default:
5214 dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5215 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5216 ibmvfc_free_event(evt);
5217 return;
5218 }
5219
5220 vhost->client_migrated = 0;
5221
5222 if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5223 dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5224 rsp->flags);
5225 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5226 wake_up(&vhost->work_wait_q);
5227 return;
5228 }
5229
5230 if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5231 dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5232 rsp->max_cmds);
5233 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5234 wake_up(&vhost->work_wait_q);
5235 return;
5236 }
5237
5238 vhost->logged_in = 1;
5239 npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), max_sectors);
5240 dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5241 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5242 rsp->drc_name, npiv_max_sectors);
5243
5244 fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5245 fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5246 fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5247 fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5248 fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5249 fc_host_supported_classes(vhost->host) = 0;
5250 if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5251 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5252 if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5253 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5254 if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5255 fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5256 fc_host_maxframe_size(vhost->host) =
5257 be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5258
5259 vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5260 vhost->host->max_sectors = npiv_max_sectors;
5261
5262 if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5263 ibmvfc_channel_enquiry(vhost);
5264 } else {
5265 vhost->do_enquiry = 0;
5266 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5267 wake_up(&vhost->work_wait_q);
5268 }
5269 }
5270
5271 /**
5272 * ibmvfc_npiv_login - Sends NPIV login
5273 * @vhost: ibmvfc host struct
5274 *
5275 **/
ibmvfc_npiv_login(struct ibmvfc_host * vhost)5276 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5277 {
5278 struct ibmvfc_npiv_login_mad *mad;
5279 struct ibmvfc_event *evt = ibmvfc_get_reserved_event(&vhost->crq);
5280
5281 if (!evt) {
5282 ibmvfc_dbg(vhost, "NPIV Login failed: no available events\n");
5283 ibmvfc_hard_reset_host(vhost);
5284 return;
5285 }
5286
5287 ibmvfc_gather_partition_info(vhost);
5288 ibmvfc_set_login_info(vhost);
5289 ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5290
5291 memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5292 mad = &evt->iu.npiv_login;
5293 memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5294 mad->common.version = cpu_to_be32(1);
5295 mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5296 mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5297 mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5298 mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5299
5300 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5301
5302 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5303 ibmvfc_dbg(vhost, "Sent NPIV login\n");
5304 else
5305 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5306 }
5307
5308 /**
5309 * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5310 * @evt: ibmvfc event struct
5311 *
5312 **/
ibmvfc_npiv_logout_done(struct ibmvfc_event * evt)5313 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5314 {
5315 struct ibmvfc_host *vhost = evt->vhost;
5316 u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5317
5318 ibmvfc_free_event(evt);
5319
5320 switch (mad_status) {
5321 case IBMVFC_MAD_SUCCESS:
5322 if (list_empty(&vhost->crq.sent) &&
5323 vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5324 ibmvfc_init_host(vhost);
5325 return;
5326 }
5327 break;
5328 case IBMVFC_MAD_FAILED:
5329 case IBMVFC_MAD_NOT_SUPPORTED:
5330 case IBMVFC_MAD_CRQ_ERROR:
5331 case IBMVFC_MAD_DRIVER_FAILED:
5332 default:
5333 ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5334 break;
5335 }
5336
5337 ibmvfc_hard_reset_host(vhost);
5338 }
5339
5340 /**
5341 * ibmvfc_npiv_logout - Issue an NPIV Logout
5342 * @vhost: ibmvfc host struct
5343 *
5344 **/
ibmvfc_npiv_logout(struct ibmvfc_host * vhost)5345 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5346 {
5347 struct ibmvfc_npiv_logout_mad *mad;
5348 struct ibmvfc_event *evt;
5349
5350 evt = ibmvfc_get_reserved_event(&vhost->crq);
5351 if (!evt) {
5352 ibmvfc_dbg(vhost, "NPIV Logout failed: no available events\n");
5353 ibmvfc_hard_reset_host(vhost);
5354 return;
5355 }
5356
5357 ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5358
5359 mad = &evt->iu.npiv_logout;
5360 memset(mad, 0, sizeof(*mad));
5361 mad->common.version = cpu_to_be32(1);
5362 mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5363 mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5364
5365 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5366
5367 if (!ibmvfc_send_event(evt, vhost, default_timeout))
5368 ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5369 else
5370 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5371 }
5372
5373 /**
5374 * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5375 * @vhost: ibmvfc host struct
5376 *
5377 * Returns:
5378 * 1 if work to do / 0 if not
5379 **/
ibmvfc_dev_init_to_do(struct ibmvfc_host * vhost)5380 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5381 {
5382 struct ibmvfc_target *tgt;
5383
5384 list_for_each_entry(tgt, &vhost->targets, queue) {
5385 if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5386 tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5387 return 1;
5388 }
5389
5390 return 0;
5391 }
5392
5393 /**
5394 * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5395 * @vhost: ibmvfc host struct
5396 *
5397 * Returns:
5398 * 1 if work to do / 0 if not
5399 **/
ibmvfc_dev_logo_to_do(struct ibmvfc_host * vhost)5400 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5401 {
5402 struct ibmvfc_target *tgt;
5403
5404 list_for_each_entry(tgt, &vhost->targets, queue) {
5405 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5406 tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5407 return 1;
5408 }
5409 return 0;
5410 }
5411
5412 /**
5413 * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5414 * @vhost: ibmvfc host struct
5415 *
5416 * Returns:
5417 * 1 if work to do / 0 if not
5418 **/
__ibmvfc_work_to_do(struct ibmvfc_host * vhost)5419 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5420 {
5421 struct ibmvfc_target *tgt;
5422
5423 if (kthread_should_stop())
5424 return 1;
5425 switch (vhost->action) {
5426 case IBMVFC_HOST_ACTION_NONE:
5427 case IBMVFC_HOST_ACTION_INIT_WAIT:
5428 case IBMVFC_HOST_ACTION_LOGO_WAIT:
5429 return 0;
5430 case IBMVFC_HOST_ACTION_TGT_INIT:
5431 case IBMVFC_HOST_ACTION_QUERY_TGTS:
5432 if (vhost->discovery_threads == disc_threads)
5433 return 0;
5434 list_for_each_entry(tgt, &vhost->targets, queue)
5435 if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5436 return 1;
5437 list_for_each_entry(tgt, &vhost->targets, queue)
5438 if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5439 return 0;
5440 return 1;
5441 case IBMVFC_HOST_ACTION_TGT_DEL:
5442 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5443 if (vhost->discovery_threads == disc_threads)
5444 return 0;
5445 list_for_each_entry(tgt, &vhost->targets, queue)
5446 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5447 return 1;
5448 list_for_each_entry(tgt, &vhost->targets, queue)
5449 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5450 return 0;
5451 return 1;
5452 case IBMVFC_HOST_ACTION_LOGO:
5453 case IBMVFC_HOST_ACTION_INIT:
5454 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5455 case IBMVFC_HOST_ACTION_QUERY:
5456 case IBMVFC_HOST_ACTION_RESET:
5457 case IBMVFC_HOST_ACTION_REENABLE:
5458 default:
5459 break;
5460 }
5461
5462 return 1;
5463 }
5464
5465 /**
5466 * ibmvfc_work_to_do - Is there task level work to do?
5467 * @vhost: ibmvfc host struct
5468 *
5469 * Returns:
5470 * 1 if work to do / 0 if not
5471 **/
ibmvfc_work_to_do(struct ibmvfc_host * vhost)5472 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5473 {
5474 unsigned long flags;
5475 int rc;
5476
5477 spin_lock_irqsave(vhost->host->host_lock, flags);
5478 rc = __ibmvfc_work_to_do(vhost);
5479 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5480 return rc;
5481 }
5482
5483 /**
5484 * ibmvfc_log_ae - Log async events if necessary
5485 * @vhost: ibmvfc host struct
5486 * @events: events to log
5487 *
5488 **/
ibmvfc_log_ae(struct ibmvfc_host * vhost,int events)5489 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5490 {
5491 if (events & IBMVFC_AE_RSCN)
5492 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5493 if ((events & IBMVFC_AE_LINKDOWN) &&
5494 vhost->state >= IBMVFC_HALTED)
5495 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5496 if ((events & IBMVFC_AE_LINKUP) &&
5497 vhost->state == IBMVFC_INITIALIZING)
5498 fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5499 }
5500
5501 /**
5502 * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5503 * @tgt: ibmvfc target struct
5504 *
5505 **/
ibmvfc_tgt_add_rport(struct ibmvfc_target * tgt)5506 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5507 {
5508 struct ibmvfc_host *vhost = tgt->vhost;
5509 struct fc_rport *rport;
5510 unsigned long flags;
5511
5512 tgt_dbg(tgt, "Adding rport\n");
5513 rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5514 spin_lock_irqsave(vhost->host->host_lock, flags);
5515
5516 if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5517 tgt_dbg(tgt, "Deleting rport\n");
5518 list_del(&tgt->queue);
5519 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5520 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5521 fc_remote_port_delete(rport);
5522 del_timer_sync(&tgt->timer);
5523 kref_put(&tgt->kref, ibmvfc_release_tgt);
5524 return;
5525 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5526 tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5527 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5528 tgt->rport = NULL;
5529 tgt->init_retries = 0;
5530 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5531 fc_remote_port_delete(rport);
5532 return;
5533 } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5534 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5535 return;
5536 }
5537
5538 if (rport) {
5539 tgt_dbg(tgt, "rport add succeeded\n");
5540 tgt->rport = rport;
5541 rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5542 rport->supported_classes = 0;
5543 tgt->target_id = rport->scsi_target_id;
5544 if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5545 rport->supported_classes |= FC_COS_CLASS1;
5546 if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5547 rport->supported_classes |= FC_COS_CLASS2;
5548 if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5549 rport->supported_classes |= FC_COS_CLASS3;
5550 } else
5551 tgt_dbg(tgt, "rport add failed\n");
5552 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5553 }
5554
5555 /**
5556 * ibmvfc_do_work - Do task level work
5557 * @vhost: ibmvfc host struct
5558 *
5559 **/
ibmvfc_do_work(struct ibmvfc_host * vhost)5560 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5561 {
5562 struct ibmvfc_target *tgt;
5563 unsigned long flags;
5564 struct fc_rport *rport;
5565 LIST_HEAD(purge);
5566 int rc;
5567
5568 ibmvfc_log_ae(vhost, vhost->events_to_log);
5569 spin_lock_irqsave(vhost->host->host_lock, flags);
5570 vhost->events_to_log = 0;
5571 switch (vhost->action) {
5572 case IBMVFC_HOST_ACTION_NONE:
5573 case IBMVFC_HOST_ACTION_LOGO_WAIT:
5574 case IBMVFC_HOST_ACTION_INIT_WAIT:
5575 break;
5576 case IBMVFC_HOST_ACTION_RESET:
5577 list_splice_init(&vhost->purge, &purge);
5578 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5579 ibmvfc_complete_purge(&purge);
5580 rc = ibmvfc_reset_crq(vhost);
5581
5582 spin_lock_irqsave(vhost->host->host_lock, flags);
5583 if (!rc || rc == H_CLOSED)
5584 vio_enable_interrupts(to_vio_dev(vhost->dev));
5585 if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5586 /*
5587 * The only action we could have changed to would have
5588 * been reenable, in which case, we skip the rest of
5589 * this path and wait until we've done the re-enable
5590 * before sending the crq init.
5591 */
5592 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5593
5594 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5595 (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5596 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5597 dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5598 }
5599 }
5600 break;
5601 case IBMVFC_HOST_ACTION_REENABLE:
5602 list_splice_init(&vhost->purge, &purge);
5603 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5604 ibmvfc_complete_purge(&purge);
5605 rc = ibmvfc_reenable_crq_queue(vhost);
5606
5607 spin_lock_irqsave(vhost->host->host_lock, flags);
5608 if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5609 /*
5610 * The only action we could have changed to would have
5611 * been reset, in which case, we skip the rest of this
5612 * path and wait until we've done the reset before
5613 * sending the crq init.
5614 */
5615 vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5616 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5617 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5618 dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5619 }
5620 }
5621 break;
5622 case IBMVFC_HOST_ACTION_LOGO:
5623 vhost->job_step(vhost);
5624 break;
5625 case IBMVFC_HOST_ACTION_INIT:
5626 BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5627 if (vhost->delay_init) {
5628 vhost->delay_init = 0;
5629 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5630 ssleep(15);
5631 return;
5632 } else
5633 vhost->job_step(vhost);
5634 break;
5635 case IBMVFC_HOST_ACTION_QUERY:
5636 list_for_each_entry(tgt, &vhost->targets, queue)
5637 ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5638 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5639 break;
5640 case IBMVFC_HOST_ACTION_QUERY_TGTS:
5641 list_for_each_entry(tgt, &vhost->targets, queue) {
5642 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5643 tgt->job_step(tgt);
5644 break;
5645 }
5646 }
5647
5648 if (!ibmvfc_dev_init_to_do(vhost))
5649 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5650 break;
5651 case IBMVFC_HOST_ACTION_TGT_DEL:
5652 case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5653 list_for_each_entry(tgt, &vhost->targets, queue) {
5654 if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5655 tgt->job_step(tgt);
5656 break;
5657 }
5658 }
5659
5660 if (ibmvfc_dev_logo_to_do(vhost)) {
5661 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5662 return;
5663 }
5664
5665 list_for_each_entry(tgt, &vhost->targets, queue) {
5666 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5667 tgt_dbg(tgt, "Deleting rport\n");
5668 rport = tgt->rport;
5669 tgt->rport = NULL;
5670 list_del(&tgt->queue);
5671 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5672 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5673 if (rport)
5674 fc_remote_port_delete(rport);
5675 del_timer_sync(&tgt->timer);
5676 kref_put(&tgt->kref, ibmvfc_release_tgt);
5677 return;
5678 } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5679 tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5680 rport = tgt->rport;
5681 tgt->rport = NULL;
5682 tgt->init_retries = 0;
5683 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5684
5685 /*
5686 * If fast fail is enabled, we wait for it to fire and then clean up
5687 * the old port, since we expect the fast fail timer to clean up the
5688 * outstanding I/O faster than waiting for normal command timeouts.
5689 * However, if fast fail is disabled, any I/O outstanding to the
5690 * rport LUNs will stay outstanding indefinitely, since the EH handlers
5691 * won't get invoked for I/O's timing out. If this is a NPIV failover
5692 * scenario, the better alternative is to use the move login.
5693 */
5694 if (rport && rport->fast_io_fail_tmo == -1)
5695 tgt->move_login = 1;
5696 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5697 if (rport)
5698 fc_remote_port_delete(rport);
5699 return;
5700 }
5701 }
5702
5703 if (vhost->state == IBMVFC_INITIALIZING) {
5704 if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5705 if (vhost->reinit) {
5706 vhost->reinit = 0;
5707 scsi_block_requests(vhost->host);
5708 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5709 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5710 } else {
5711 ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5712 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5713 wake_up(&vhost->init_wait_q);
5714 schedule_work(&vhost->rport_add_work_q);
5715 vhost->init_retries = 0;
5716 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5717 scsi_unblock_requests(vhost->host);
5718 }
5719
5720 return;
5721 } else {
5722 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5723 vhost->job_step = ibmvfc_discover_targets;
5724 }
5725 } else {
5726 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5727 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5728 scsi_unblock_requests(vhost->host);
5729 wake_up(&vhost->init_wait_q);
5730 return;
5731 }
5732 break;
5733 case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5734 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5735 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5736 ibmvfc_alloc_targets(vhost);
5737 spin_lock_irqsave(vhost->host->host_lock, flags);
5738 break;
5739 case IBMVFC_HOST_ACTION_TGT_INIT:
5740 list_for_each_entry(tgt, &vhost->targets, queue) {
5741 if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5742 tgt->job_step(tgt);
5743 break;
5744 }
5745 }
5746
5747 if (!ibmvfc_dev_init_to_do(vhost))
5748 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5749 break;
5750 default:
5751 break;
5752 }
5753
5754 spin_unlock_irqrestore(vhost->host->host_lock, flags);
5755 }
5756
5757 /**
5758 * ibmvfc_work - Do task level work
5759 * @data: ibmvfc host struct
5760 *
5761 * Returns:
5762 * zero
5763 **/
ibmvfc_work(void * data)5764 static int ibmvfc_work(void *data)
5765 {
5766 struct ibmvfc_host *vhost = data;
5767 int rc;
5768
5769 set_user_nice(current, MIN_NICE);
5770
5771 while (1) {
5772 rc = wait_event_interruptible(vhost->work_wait_q,
5773 ibmvfc_work_to_do(vhost));
5774
5775 BUG_ON(rc);
5776
5777 if (kthread_should_stop())
5778 break;
5779
5780 ibmvfc_do_work(vhost);
5781 }
5782
5783 ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5784 return 0;
5785 }
5786
5787 /**
5788 * ibmvfc_alloc_queue - Allocate queue
5789 * @vhost: ibmvfc host struct
5790 * @queue: ibmvfc queue to allocate
5791 * @fmt: queue format to allocate
5792 *
5793 * Returns:
5794 * 0 on success / non-zero on failure
5795 **/
ibmvfc_alloc_queue(struct ibmvfc_host * vhost,struct ibmvfc_queue * queue,enum ibmvfc_msg_fmt fmt)5796 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5797 struct ibmvfc_queue *queue,
5798 enum ibmvfc_msg_fmt fmt)
5799 {
5800 struct device *dev = vhost->dev;
5801 size_t fmt_size;
5802
5803 ENTER;
5804 spin_lock_init(&queue->_lock);
5805 queue->q_lock = &queue->_lock;
5806
5807 switch (fmt) {
5808 case IBMVFC_CRQ_FMT:
5809 fmt_size = sizeof(*queue->msgs.crq);
5810 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_REQ;
5811 queue->evt_depth = scsi_qdepth;
5812 queue->reserved_depth = IBMVFC_NUM_INTERNAL_REQ;
5813 break;
5814 case IBMVFC_ASYNC_FMT:
5815 fmt_size = sizeof(*queue->msgs.async);
5816 break;
5817 case IBMVFC_SUB_CRQ_FMT:
5818 fmt_size = sizeof(*queue->msgs.scrq);
5819 /* We need one extra event for Cancel Commands */
5820 queue->total_depth = scsi_qdepth + IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5821 queue->evt_depth = scsi_qdepth;
5822 queue->reserved_depth = IBMVFC_NUM_INTERNAL_SUBQ_REQ;
5823 break;
5824 default:
5825 dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5826 return -EINVAL;
5827 }
5828
5829 queue->fmt = fmt;
5830 if (ibmvfc_init_event_pool(vhost, queue)) {
5831 dev_err(dev, "Couldn't initialize event pool.\n");
5832 return -ENOMEM;
5833 }
5834
5835 queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5836 if (!queue->msgs.handle)
5837 return -ENOMEM;
5838
5839 queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5840 DMA_BIDIRECTIONAL);
5841
5842 if (dma_mapping_error(dev, queue->msg_token)) {
5843 free_page((unsigned long)queue->msgs.handle);
5844 queue->msgs.handle = NULL;
5845 return -ENOMEM;
5846 }
5847
5848 queue->cur = 0;
5849 queue->size = PAGE_SIZE / fmt_size;
5850
5851 queue->vhost = vhost;
5852 return 0;
5853 }
5854
5855 /**
5856 * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5857 * @vhost: ibmvfc host struct
5858 *
5859 * Allocates a page for messages, maps it for dma, and registers
5860 * the crq with the hypervisor.
5861 *
5862 * Return value:
5863 * zero on success / other on failure
5864 **/
ibmvfc_init_crq(struct ibmvfc_host * vhost)5865 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5866 {
5867 int rc, retrc = -ENOMEM;
5868 struct device *dev = vhost->dev;
5869 struct vio_dev *vdev = to_vio_dev(dev);
5870 struct ibmvfc_queue *crq = &vhost->crq;
5871
5872 ENTER;
5873 if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5874 return -ENOMEM;
5875
5876 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5877 crq->msg_token, PAGE_SIZE);
5878
5879 if (rc == H_RESOURCE)
5880 /* maybe kexecing and resource is busy. try a reset */
5881 retrc = rc = ibmvfc_reset_crq(vhost);
5882
5883 if (rc == H_CLOSED)
5884 dev_warn(dev, "Partner adapter not ready\n");
5885 else if (rc) {
5886 dev_warn(dev, "Error %d opening adapter\n", rc);
5887 goto reg_crq_failed;
5888 }
5889
5890 retrc = 0;
5891
5892 tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5893
5894 if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5895 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5896 goto req_irq_failed;
5897 }
5898
5899 if ((rc = vio_enable_interrupts(vdev))) {
5900 dev_err(dev, "Error %d enabling interrupts\n", rc);
5901 goto req_irq_failed;
5902 }
5903
5904 LEAVE;
5905 return retrc;
5906
5907 req_irq_failed:
5908 tasklet_kill(&vhost->tasklet);
5909 do {
5910 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5911 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5912 reg_crq_failed:
5913 ibmvfc_free_queue(vhost, crq);
5914 return retrc;
5915 }
5916
ibmvfc_register_channel(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels,int index)5917 static int ibmvfc_register_channel(struct ibmvfc_host *vhost,
5918 struct ibmvfc_channels *channels,
5919 int index)
5920 {
5921 struct device *dev = vhost->dev;
5922 struct vio_dev *vdev = to_vio_dev(dev);
5923 struct ibmvfc_queue *scrq = &channels->scrqs[index];
5924 int rc = -ENOMEM;
5925
5926 ENTER;
5927
5928 rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5929 &scrq->cookie, &scrq->hw_irq);
5930
5931 /* H_CLOSED indicates successful register, but no CRQ partner */
5932 if (rc && rc != H_CLOSED) {
5933 dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5934 if (rc == H_PARAMETER)
5935 dev_warn_once(dev, "Firmware may not support MQ\n");
5936 goto reg_failed;
5937 }
5938
5939 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5940
5941 if (!scrq->irq) {
5942 rc = -EINVAL;
5943 dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5944 goto irq_failed;
5945 }
5946
5947 switch (channels->protocol) {
5948 case IBMVFC_PROTO_SCSI:
5949 snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5950 vdev->unit_address, index);
5951 scrq->handler = ibmvfc_interrupt_mq;
5952 break;
5953 case IBMVFC_PROTO_NVME:
5954 snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-nvmf%d",
5955 vdev->unit_address, index);
5956 scrq->handler = ibmvfc_interrupt_mq;
5957 break;
5958 default:
5959 dev_err(dev, "Unknown channel protocol (%d)\n",
5960 channels->protocol);
5961 goto irq_failed;
5962 }
5963
5964 rc = request_irq(scrq->irq, scrq->handler, 0, scrq->name, scrq);
5965
5966 if (rc) {
5967 dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5968 irq_dispose_mapping(scrq->irq);
5969 goto irq_failed;
5970 }
5971
5972 scrq->hwq_id = index;
5973
5974 LEAVE;
5975 return 0;
5976
5977 irq_failed:
5978 do {
5979 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5980 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5981 reg_failed:
5982 LEAVE;
5983 return rc;
5984 }
5985
ibmvfc_deregister_channel(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels,int index)5986 static void ibmvfc_deregister_channel(struct ibmvfc_host *vhost,
5987 struct ibmvfc_channels *channels,
5988 int index)
5989 {
5990 struct device *dev = vhost->dev;
5991 struct vio_dev *vdev = to_vio_dev(dev);
5992 struct ibmvfc_queue *scrq = &channels->scrqs[index];
5993 long rc;
5994
5995 ENTER;
5996
5997 free_irq(scrq->irq, scrq);
5998 irq_dispose_mapping(scrq->irq);
5999 scrq->irq = 0;
6000
6001 do {
6002 rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
6003 scrq->cookie);
6004 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
6005
6006 if (rc)
6007 dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
6008
6009 /* Clean out the queue */
6010 memset(scrq->msgs.crq, 0, PAGE_SIZE);
6011 scrq->cur = 0;
6012
6013 LEAVE;
6014 }
6015
ibmvfc_reg_sub_crqs(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels)6016 static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost,
6017 struct ibmvfc_channels *channels)
6018 {
6019 int i, j;
6020
6021 ENTER;
6022 if (!vhost->mq_enabled || !channels->scrqs)
6023 return;
6024
6025 for (i = 0; i < channels->max_queues; i++) {
6026 if (ibmvfc_register_channel(vhost, channels, i)) {
6027 for (j = i; j > 0; j--)
6028 ibmvfc_deregister_channel(vhost, channels, j - 1);
6029 vhost->do_enquiry = 0;
6030 return;
6031 }
6032 }
6033
6034 LEAVE;
6035 }
6036
ibmvfc_dereg_sub_crqs(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels)6037 static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost,
6038 struct ibmvfc_channels *channels)
6039 {
6040 int i;
6041
6042 ENTER;
6043 if (!vhost->mq_enabled || !channels->scrqs)
6044 return;
6045
6046 for (i = 0; i < channels->max_queues; i++)
6047 ibmvfc_deregister_channel(vhost, channels, i);
6048
6049 LEAVE;
6050 }
6051
ibmvfc_alloc_channels(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels)6052 static int ibmvfc_alloc_channels(struct ibmvfc_host *vhost,
6053 struct ibmvfc_channels *channels)
6054 {
6055 struct ibmvfc_queue *scrq;
6056 int i, j;
6057 int rc = 0;
6058
6059 channels->scrqs = kcalloc(channels->max_queues,
6060 sizeof(*channels->scrqs),
6061 GFP_KERNEL);
6062 if (!channels->scrqs)
6063 return -ENOMEM;
6064
6065 for (i = 0; i < channels->max_queues; i++) {
6066 scrq = &channels->scrqs[i];
6067 rc = ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT);
6068 if (rc) {
6069 for (j = i; j > 0; j--) {
6070 scrq = &channels->scrqs[j - 1];
6071 ibmvfc_free_queue(vhost, scrq);
6072 }
6073 kfree(channels->scrqs);
6074 channels->scrqs = NULL;
6075 channels->active_queues = 0;
6076 return rc;
6077 }
6078 }
6079
6080 return rc;
6081 }
6082
ibmvfc_init_sub_crqs(struct ibmvfc_host * vhost)6083 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
6084 {
6085 ENTER;
6086 if (!vhost->mq_enabled)
6087 return;
6088
6089 if (ibmvfc_alloc_channels(vhost, &vhost->scsi_scrqs)) {
6090 vhost->do_enquiry = 0;
6091 vhost->mq_enabled = 0;
6092 return;
6093 }
6094
6095 ibmvfc_reg_sub_crqs(vhost, &vhost->scsi_scrqs);
6096
6097 LEAVE;
6098 }
6099
ibmvfc_release_channels(struct ibmvfc_host * vhost,struct ibmvfc_channels * channels)6100 static void ibmvfc_release_channels(struct ibmvfc_host *vhost,
6101 struct ibmvfc_channels *channels)
6102 {
6103 struct ibmvfc_queue *scrq;
6104 int i;
6105
6106 if (channels->scrqs) {
6107 for (i = 0; i < channels->max_queues; i++) {
6108 scrq = &channels->scrqs[i];
6109 ibmvfc_free_queue(vhost, scrq);
6110 }
6111
6112 kfree(channels->scrqs);
6113 channels->scrqs = NULL;
6114 channels->active_queues = 0;
6115 }
6116 }
6117
ibmvfc_release_sub_crqs(struct ibmvfc_host * vhost)6118 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
6119 {
6120 ENTER;
6121 if (!vhost->scsi_scrqs.scrqs)
6122 return;
6123
6124 ibmvfc_dereg_sub_crqs(vhost, &vhost->scsi_scrqs);
6125
6126 ibmvfc_release_channels(vhost, &vhost->scsi_scrqs);
6127 LEAVE;
6128 }
6129
ibmvfc_free_disc_buf(struct device * dev,struct ibmvfc_channels * channels)6130 static void ibmvfc_free_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
6131 {
6132 dma_free_coherent(dev, channels->disc_buf_sz, channels->disc_buf,
6133 channels->disc_buf_dma);
6134 }
6135
6136 /**
6137 * ibmvfc_free_mem - Free memory for vhost
6138 * @vhost: ibmvfc host struct
6139 *
6140 * Return value:
6141 * none
6142 **/
ibmvfc_free_mem(struct ibmvfc_host * vhost)6143 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
6144 {
6145 struct ibmvfc_queue *async_q = &vhost->async_crq;
6146
6147 ENTER;
6148 mempool_destroy(vhost->tgt_pool);
6149 kfree(vhost->trace);
6150 ibmvfc_free_disc_buf(vhost->dev, &vhost->scsi_scrqs);
6151 dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
6152 vhost->login_buf, vhost->login_buf_dma);
6153 dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
6154 vhost->channel_setup_buf, vhost->channel_setup_dma);
6155 dma_pool_destroy(vhost->sg_pool);
6156 ibmvfc_free_queue(vhost, async_q);
6157 LEAVE;
6158 }
6159
ibmvfc_alloc_disc_buf(struct device * dev,struct ibmvfc_channels * channels)6160 static int ibmvfc_alloc_disc_buf(struct device *dev, struct ibmvfc_channels *channels)
6161 {
6162 channels->disc_buf_sz = sizeof(*channels->disc_buf) * max_targets;
6163 channels->disc_buf = dma_alloc_coherent(dev, channels->disc_buf_sz,
6164 &channels->disc_buf_dma, GFP_KERNEL);
6165
6166 if (!channels->disc_buf) {
6167 dev_err(dev, "Couldn't allocate %s Discover Targets buffer\n",
6168 (channels->protocol == IBMVFC_PROTO_SCSI) ? "SCSI" : "NVMe");
6169 return -ENOMEM;
6170 }
6171
6172 return 0;
6173 }
6174
6175 /**
6176 * ibmvfc_alloc_mem - Allocate memory for vhost
6177 * @vhost: ibmvfc host struct
6178 *
6179 * Return value:
6180 * 0 on success / non-zero on failure
6181 **/
ibmvfc_alloc_mem(struct ibmvfc_host * vhost)6182 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
6183 {
6184 struct ibmvfc_queue *async_q = &vhost->async_crq;
6185 struct device *dev = vhost->dev;
6186
6187 ENTER;
6188 if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
6189 dev_err(dev, "Couldn't allocate/map async queue.\n");
6190 goto nomem;
6191 }
6192
6193 vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
6194 SG_ALL * sizeof(struct srp_direct_buf),
6195 sizeof(struct srp_direct_buf), 0);
6196
6197 if (!vhost->sg_pool) {
6198 dev_err(dev, "Failed to allocate sg pool\n");
6199 goto unmap_async_crq;
6200 }
6201
6202 vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
6203 &vhost->login_buf_dma, GFP_KERNEL);
6204
6205 if (!vhost->login_buf) {
6206 dev_err(dev, "Couldn't allocate NPIV login buffer\n");
6207 goto free_sg_pool;
6208 }
6209
6210 if (ibmvfc_alloc_disc_buf(dev, &vhost->scsi_scrqs))
6211 goto free_login_buffer;
6212
6213 vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
6214 sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
6215 atomic_set(&vhost->trace_index, -1);
6216
6217 if (!vhost->trace)
6218 goto free_scsi_disc_buffer;
6219
6220 vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
6221 sizeof(struct ibmvfc_target));
6222
6223 if (!vhost->tgt_pool) {
6224 dev_err(dev, "Couldn't allocate target memory pool\n");
6225 goto free_trace;
6226 }
6227
6228 vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
6229 &vhost->channel_setup_dma,
6230 GFP_KERNEL);
6231
6232 if (!vhost->channel_setup_buf) {
6233 dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
6234 goto free_tgt_pool;
6235 }
6236
6237 LEAVE;
6238 return 0;
6239
6240 free_tgt_pool:
6241 mempool_destroy(vhost->tgt_pool);
6242 free_trace:
6243 kfree(vhost->trace);
6244 free_scsi_disc_buffer:
6245 ibmvfc_free_disc_buf(dev, &vhost->scsi_scrqs);
6246 free_login_buffer:
6247 dma_free_coherent(dev, sizeof(*vhost->login_buf),
6248 vhost->login_buf, vhost->login_buf_dma);
6249 free_sg_pool:
6250 dma_pool_destroy(vhost->sg_pool);
6251 unmap_async_crq:
6252 ibmvfc_free_queue(vhost, async_q);
6253 nomem:
6254 LEAVE;
6255 return -ENOMEM;
6256 }
6257
6258 /**
6259 * ibmvfc_rport_add_thread - Worker thread for rport adds
6260 * @work: work struct
6261 *
6262 **/
ibmvfc_rport_add_thread(struct work_struct * work)6263 static void ibmvfc_rport_add_thread(struct work_struct *work)
6264 {
6265 struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
6266 rport_add_work_q);
6267 struct ibmvfc_target *tgt;
6268 struct fc_rport *rport;
6269 unsigned long flags;
6270 int did_work;
6271
6272 ENTER;
6273 spin_lock_irqsave(vhost->host->host_lock, flags);
6274 do {
6275 did_work = 0;
6276 if (vhost->state != IBMVFC_ACTIVE)
6277 break;
6278
6279 list_for_each_entry(tgt, &vhost->targets, queue) {
6280 if (tgt->add_rport) {
6281 did_work = 1;
6282 tgt->add_rport = 0;
6283 kref_get(&tgt->kref);
6284 rport = tgt->rport;
6285 if (!rport) {
6286 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6287 ibmvfc_tgt_add_rport(tgt);
6288 } else if (get_device(&rport->dev)) {
6289 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6290 tgt_dbg(tgt, "Setting rport roles\n");
6291 fc_remote_port_rolechg(rport, tgt->ids.roles);
6292 put_device(&rport->dev);
6293 } else {
6294 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6295 }
6296
6297 kref_put(&tgt->kref, ibmvfc_release_tgt);
6298 spin_lock_irqsave(vhost->host->host_lock, flags);
6299 break;
6300 }
6301 }
6302 } while(did_work);
6303
6304 if (vhost->state == IBMVFC_ACTIVE)
6305 vhost->scan_complete = 1;
6306 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6307 LEAVE;
6308 }
6309
6310 /**
6311 * ibmvfc_probe - Adapter hot plug add entry point
6312 * @vdev: vio device struct
6313 * @id: vio device id struct
6314 *
6315 * Return value:
6316 * 0 on success / non-zero on failure
6317 **/
ibmvfc_probe(struct vio_dev * vdev,const struct vio_device_id * id)6318 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6319 {
6320 struct ibmvfc_host *vhost;
6321 struct Scsi_Host *shost;
6322 struct device *dev = &vdev->dev;
6323 int rc = -ENOMEM;
6324 unsigned int online_cpus = num_online_cpus();
6325 unsigned int max_scsi_queues = min((unsigned int)IBMVFC_MAX_SCSI_QUEUES, online_cpus);
6326
6327 ENTER;
6328 shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6329 if (!shost) {
6330 dev_err(dev, "Couldn't allocate host data\n");
6331 goto out;
6332 }
6333
6334 shost->transportt = ibmvfc_transport_template;
6335 shost->can_queue = scsi_qdepth;
6336 shost->max_lun = max_lun;
6337 shost->max_id = max_targets;
6338 shost->max_sectors = max_sectors;
6339 shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6340 shost->unique_id = shost->host_no;
6341 shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6342
6343 vhost = shost_priv(shost);
6344 INIT_LIST_HEAD(&vhost->targets);
6345 INIT_LIST_HEAD(&vhost->purge);
6346 sprintf(vhost->name, IBMVFC_NAME);
6347 vhost->host = shost;
6348 vhost->dev = dev;
6349 vhost->partition_number = -1;
6350 vhost->log_level = log_level;
6351 vhost->task_set = 1;
6352
6353 vhost->mq_enabled = mq_enabled;
6354 vhost->scsi_scrqs.desired_queues = min(shost->nr_hw_queues, nr_scsi_channels);
6355 vhost->scsi_scrqs.max_queues = shost->nr_hw_queues;
6356 vhost->scsi_scrqs.protocol = IBMVFC_PROTO_SCSI;
6357 vhost->using_channels = 0;
6358 vhost->do_enquiry = 1;
6359 vhost->scan_timeout = 0;
6360
6361 strcpy(vhost->partition_name, "UNKNOWN");
6362 init_waitqueue_head(&vhost->work_wait_q);
6363 init_waitqueue_head(&vhost->init_wait_q);
6364 INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6365 mutex_init(&vhost->passthru_mutex);
6366
6367 if ((rc = ibmvfc_alloc_mem(vhost)))
6368 goto free_scsi_host;
6369
6370 vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6371 shost->host_no);
6372
6373 if (IS_ERR(vhost->work_thread)) {
6374 dev_err(dev, "Couldn't create kernel thread: %ld\n",
6375 PTR_ERR(vhost->work_thread));
6376 rc = PTR_ERR(vhost->work_thread);
6377 goto free_host_mem;
6378 }
6379
6380 if ((rc = ibmvfc_init_crq(vhost))) {
6381 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6382 goto kill_kthread;
6383 }
6384
6385 if ((rc = scsi_add_host(shost, dev)))
6386 goto release_crq;
6387
6388 fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6389
6390 if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6391 &ibmvfc_trace_attr))) {
6392 dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6393 goto remove_shost;
6394 }
6395
6396 ibmvfc_init_sub_crqs(vhost);
6397
6398 dev_set_drvdata(dev, vhost);
6399 spin_lock(&ibmvfc_driver_lock);
6400 list_add_tail(&vhost->queue, &ibmvfc_head);
6401 spin_unlock(&ibmvfc_driver_lock);
6402
6403 ibmvfc_send_crq_init(vhost);
6404 scsi_scan_host(shost);
6405 return 0;
6406
6407 remove_shost:
6408 scsi_remove_host(shost);
6409 release_crq:
6410 ibmvfc_release_crq_queue(vhost);
6411 kill_kthread:
6412 kthread_stop(vhost->work_thread);
6413 free_host_mem:
6414 ibmvfc_free_mem(vhost);
6415 free_scsi_host:
6416 scsi_host_put(shost);
6417 out:
6418 LEAVE;
6419 return rc;
6420 }
6421
6422 /**
6423 * ibmvfc_remove - Adapter hot plug remove entry point
6424 * @vdev: vio device struct
6425 *
6426 * Return value:
6427 * 0
6428 **/
ibmvfc_remove(struct vio_dev * vdev)6429 static void ibmvfc_remove(struct vio_dev *vdev)
6430 {
6431 struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6432 LIST_HEAD(purge);
6433 unsigned long flags;
6434
6435 ENTER;
6436 ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6437
6438 spin_lock_irqsave(vhost->host->host_lock, flags);
6439 ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6440 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6441
6442 ibmvfc_wait_while_resetting(vhost);
6443 kthread_stop(vhost->work_thread);
6444 fc_remove_host(vhost->host);
6445 scsi_remove_host(vhost->host);
6446
6447 spin_lock_irqsave(vhost->host->host_lock, flags);
6448 ibmvfc_purge_requests(vhost, DID_ERROR);
6449 list_splice_init(&vhost->purge, &purge);
6450 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6451 ibmvfc_complete_purge(&purge);
6452 ibmvfc_release_sub_crqs(vhost);
6453 ibmvfc_release_crq_queue(vhost);
6454
6455 ibmvfc_free_mem(vhost);
6456 spin_lock(&ibmvfc_driver_lock);
6457 list_del(&vhost->queue);
6458 spin_unlock(&ibmvfc_driver_lock);
6459 scsi_host_put(vhost->host);
6460 LEAVE;
6461 }
6462
6463 /**
6464 * ibmvfc_resume - Resume from suspend
6465 * @dev: device struct
6466 *
6467 * We may have lost an interrupt across suspend/resume, so kick the
6468 * interrupt handler
6469 *
6470 */
ibmvfc_resume(struct device * dev)6471 static int ibmvfc_resume(struct device *dev)
6472 {
6473 unsigned long flags;
6474 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6475 struct vio_dev *vdev = to_vio_dev(dev);
6476
6477 spin_lock_irqsave(vhost->host->host_lock, flags);
6478 vio_disable_interrupts(vdev);
6479 tasklet_schedule(&vhost->tasklet);
6480 spin_unlock_irqrestore(vhost->host->host_lock, flags);
6481 return 0;
6482 }
6483
6484 /**
6485 * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6486 * @vdev: vio device struct
6487 *
6488 * Return value:
6489 * Number of bytes the driver will need to DMA map at the same time in
6490 * order to perform well.
6491 */
ibmvfc_get_desired_dma(struct vio_dev * vdev)6492 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6493 {
6494 unsigned long pool_dma;
6495
6496 pool_dma = (IBMVFC_MAX_SCSI_QUEUES * scsi_qdepth) * sizeof(union ibmvfc_iu);
6497 return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6498 }
6499
6500 static const struct vio_device_id ibmvfc_device_table[] = {
6501 {"fcp", "IBM,vfc-client"},
6502 { "", "" }
6503 };
6504 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6505
6506 static const struct dev_pm_ops ibmvfc_pm_ops = {
6507 .resume = ibmvfc_resume
6508 };
6509
6510 static struct vio_driver ibmvfc_driver = {
6511 .id_table = ibmvfc_device_table,
6512 .probe = ibmvfc_probe,
6513 .remove = ibmvfc_remove,
6514 .get_desired_dma = ibmvfc_get_desired_dma,
6515 .name = IBMVFC_NAME,
6516 .pm = &ibmvfc_pm_ops,
6517 };
6518
6519 static struct fc_function_template ibmvfc_transport_functions = {
6520 .show_host_fabric_name = 1,
6521 .show_host_node_name = 1,
6522 .show_host_port_name = 1,
6523 .show_host_supported_classes = 1,
6524 .show_host_port_type = 1,
6525 .show_host_port_id = 1,
6526 .show_host_maxframe_size = 1,
6527
6528 .get_host_port_state = ibmvfc_get_host_port_state,
6529 .show_host_port_state = 1,
6530
6531 .get_host_speed = ibmvfc_get_host_speed,
6532 .show_host_speed = 1,
6533
6534 .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6535 .terminate_rport_io = ibmvfc_terminate_rport_io,
6536
6537 .show_rport_maxframe_size = 1,
6538 .show_rport_supported_classes = 1,
6539
6540 .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6541 .show_rport_dev_loss_tmo = 1,
6542
6543 .get_starget_node_name = ibmvfc_get_starget_node_name,
6544 .show_starget_node_name = 1,
6545
6546 .get_starget_port_name = ibmvfc_get_starget_port_name,
6547 .show_starget_port_name = 1,
6548
6549 .get_starget_port_id = ibmvfc_get_starget_port_id,
6550 .show_starget_port_id = 1,
6551
6552 .max_bsg_segments = 1,
6553 .bsg_request = ibmvfc_bsg_request,
6554 .bsg_timeout = ibmvfc_bsg_timeout,
6555 };
6556
6557 /**
6558 * ibmvfc_module_init - Initialize the ibmvfc module
6559 *
6560 * Return value:
6561 * 0 on success / other on failure
6562 **/
ibmvfc_module_init(void)6563 static int __init ibmvfc_module_init(void)
6564 {
6565 int min_max_sectors = PAGE_SIZE >> 9;
6566 int rc;
6567
6568 if (!firmware_has_feature(FW_FEATURE_VIO))
6569 return -ENODEV;
6570
6571 printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6572 IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6573
6574 /*
6575 * Range check the max_sectors module parameter. The upper bounds is
6576 * implicity checked since the parameter is a ushort.
6577 */
6578 if (max_sectors < min_max_sectors) {
6579 printk(KERN_ERR IBMVFC_NAME ": max_sectors must be at least %d.\n",
6580 min_max_sectors);
6581 max_sectors = min_max_sectors;
6582 }
6583
6584 ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6585 if (!ibmvfc_transport_template)
6586 return -ENOMEM;
6587
6588 rc = vio_register_driver(&ibmvfc_driver);
6589 if (rc)
6590 fc_release_transport(ibmvfc_transport_template);
6591 return rc;
6592 }
6593
6594 /**
6595 * ibmvfc_module_exit - Teardown the ibmvfc module
6596 *
6597 * Return value:
6598 * nothing
6599 **/
ibmvfc_module_exit(void)6600 static void __exit ibmvfc_module_exit(void)
6601 {
6602 vio_unregister_driver(&ibmvfc_driver);
6603 fc_release_transport(ibmvfc_transport_template);
6604 }
6605
6606 module_init(ibmvfc_module_init);
6607 module_exit(ibmvfc_module_exit);
6608