xref: /linux/drivers/s390/scsi/zfcp_fsf.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Implementation of FSF commands.
6  *
7  * Copyright IBM Corp. 2002, 2023
8  */
9 
10 #define pr_fmt(fmt) "zfcp: " fmt
11 
12 #include <linux/blktrace_api.h>
13 #include <linux/jiffies.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <scsi/fc/fc_els.h>
17 #include "zfcp_ext.h"
18 #include "zfcp_fc.h"
19 #include "zfcp_dbf.h"
20 #include "zfcp_qdio.h"
21 #include "zfcp_reqlist.h"
22 #include "zfcp_diag.h"
23 
24 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
25 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
26 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */
27 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
28 
29 struct kmem_cache *zfcp_fsf_qtcb_cache;
30 
31 static bool ber_stop = true;
32 module_param(ber_stop, bool, 0600);
33 MODULE_PARM_DESC(ber_stop,
34 		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
35 
36 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
37 {
38 	struct zfcp_fsf_req *fsf_req = timer_container_of(fsf_req, t, timer);
39 	struct zfcp_adapter *adapter = fsf_req->adapter;
40 
41 	zfcp_qdio_siosl(adapter);
42 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
43 				"fsrth_1");
44 }
45 
46 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
47 				 unsigned long timeout)
48 {
49 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
50 	fsf_req->timer.expires = jiffies + timeout;
51 	add_timer(&fsf_req->timer);
52 }
53 
54 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
55 {
56 	BUG_ON(!fsf_req->erp_action);
57 	fsf_req->timer.function = zfcp_erp_timeout_handler;
58 	fsf_req->timer.expires = jiffies + 30 * HZ;
59 	add_timer(&fsf_req->timer);
60 }
61 
62 /* association between FSF command and FSF QTCB type */
63 static u32 fsf_qtcb_type[] = {
64 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
65 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
66 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
67 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
68 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
69 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
70 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
71 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
72 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
73 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
74 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
75 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
76 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
77 };
78 
79 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
80 {
81 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
82 		"operational because of an unsupported FC class\n");
83 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
84 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
85 }
86 
87 /**
88  * zfcp_fsf_req_free - free memory used by fsf request
89  * @req: pointer to struct zfcp_fsf_req
90  */
91 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
92 {
93 	if (likely(req->pool)) {
94 		if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
95 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
96 		mempool_free(req, req->pool);
97 		return;
98 	}
99 
100 	if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
101 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
102 	kfree(req);
103 }
104 
105 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
106 {
107 	unsigned long flags;
108 	struct fsf_status_read_buffer *sr_buf = req->data;
109 	struct zfcp_adapter *adapter = req->adapter;
110 	struct zfcp_port *port;
111 	int d_id = ntoh24(sr_buf->d_id);
112 
113 	read_lock_irqsave(&adapter->port_list_lock, flags);
114 	list_for_each_entry(port, &adapter->port_list, list)
115 		if (port->d_id == d_id) {
116 			zfcp_erp_port_reopen(port, 0, "fssrpc1");
117 			break;
118 		}
119 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
120 }
121 
122 void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
123 {
124 	struct Scsi_Host *shost = adapter->scsi_host;
125 
126 	adapter->hydra_version = 0;
127 	adapter->peer_wwpn = 0;
128 	adapter->peer_wwnn = 0;
129 	adapter->peer_d_id = 0;
130 
131 	/* if there is no shost yet, we have nothing to zero-out */
132 	if (shost == NULL)
133 		return;
134 
135 	fc_host_port_id(shost) = 0;
136 	fc_host_fabric_name(shost) = 0;
137 	fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
138 	fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
139 	snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
140 	memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
141 }
142 
143 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
144 					 struct fsf_link_down_info *link_down)
145 {
146 	struct zfcp_adapter *adapter = req->adapter;
147 
148 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
149 		return;
150 
151 	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
152 
153 	zfcp_scsi_schedule_rports_block(adapter);
154 
155 	zfcp_fsf_fc_host_link_down(adapter);
156 
157 	if (!link_down)
158 		goto out;
159 
160 	switch (link_down->error_code) {
161 	case FSF_PSQ_LINK_NO_LIGHT:
162 		dev_warn(&req->adapter->ccw_device->dev,
163 			 "There is no light signal from the local "
164 			 "fibre channel cable\n");
165 		break;
166 	case FSF_PSQ_LINK_WRAP_PLUG:
167 		dev_warn(&req->adapter->ccw_device->dev,
168 			 "There is a wrap plug instead of a fibre "
169 			 "channel cable\n");
170 		break;
171 	case FSF_PSQ_LINK_NO_FCP:
172 		dev_warn(&req->adapter->ccw_device->dev,
173 			 "The adjacent fibre channel node does not "
174 			 "support FCP\n");
175 		break;
176 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
177 		dev_warn(&req->adapter->ccw_device->dev,
178 			 "The FCP device is suspended because of a "
179 			 "firmware update\n");
180 		break;
181 	case FSF_PSQ_LINK_INVALID_WWPN:
182 		dev_warn(&req->adapter->ccw_device->dev,
183 			 "The FCP device detected a WWPN that is "
184 			 "duplicate or not valid\n");
185 		break;
186 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
187 		dev_warn(&req->adapter->ccw_device->dev,
188 			 "The fibre channel fabric does not support NPIV\n");
189 		break;
190 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
191 		dev_warn(&req->adapter->ccw_device->dev,
192 			 "The FCP adapter cannot support more NPIV ports\n");
193 		break;
194 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
195 		dev_warn(&req->adapter->ccw_device->dev,
196 			 "The adjacent switch cannot support "
197 			 "more NPIV ports\n");
198 		break;
199 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
200 		dev_warn(&req->adapter->ccw_device->dev,
201 			 "The FCP adapter could not log in to the "
202 			 "fibre channel fabric\n");
203 		break;
204 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
205 		dev_warn(&req->adapter->ccw_device->dev,
206 			 "The WWPN assignment file on the FCP adapter "
207 			 "has been damaged\n");
208 		break;
209 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
210 		dev_warn(&req->adapter->ccw_device->dev,
211 			 "The mode table on the FCP adapter "
212 			 "has been damaged\n");
213 		break;
214 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
215 		dev_warn(&req->adapter->ccw_device->dev,
216 			 "All NPIV ports on the FCP adapter have "
217 			 "been assigned\n");
218 		break;
219 	default:
220 		dev_warn(&req->adapter->ccw_device->dev,
221 			 "The link between the FCP adapter and "
222 			 "the FC fabric is down\n");
223 	}
224 out:
225 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
226 }
227 
228 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
229 {
230 	struct fsf_status_read_buffer *sr_buf = req->data;
231 	struct fsf_link_down_info *ldi =
232 		(struct fsf_link_down_info *) &sr_buf->payload;
233 
234 	switch (sr_buf->status_subtype) {
235 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
236 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
237 		zfcp_fsf_link_down_info_eval(req, ldi);
238 		break;
239 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
240 		zfcp_fsf_link_down_info_eval(req, NULL);
241 	}
242 }
243 
244 static void
245 zfcp_fsf_status_read_version_change(struct zfcp_adapter *adapter,
246 				    struct fsf_status_read_buffer *sr_buf)
247 {
248 	if (sr_buf->status_subtype == FSF_STATUS_READ_SUB_LIC_CHANGE) {
249 		u32 version = sr_buf->payload.version_change.current_version;
250 
251 		WRITE_ONCE(adapter->fsf_lic_version, version);
252 		snprintf(fc_host_firmware_version(adapter->scsi_host),
253 			 FC_VERSION_STRING_SIZE, "%#08x", version);
254 	}
255 }
256 
257 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
258 {
259 	struct zfcp_adapter *adapter = req->adapter;
260 	struct fsf_status_read_buffer *sr_buf = req->data;
261 
262 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
263 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
264 		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
265 		zfcp_fsf_req_free(req);
266 		return;
267 	}
268 
269 	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
270 
271 	switch (sr_buf->status_type) {
272 	case FSF_STATUS_READ_PORT_CLOSED:
273 		zfcp_fsf_status_read_port_closed(req);
274 		break;
275 	case FSF_STATUS_READ_INCOMING_ELS:
276 		zfcp_fc_incoming_els(req);
277 		break;
278 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
279 		break;
280 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
281 		zfcp_dbf_hba_bit_err("fssrh_3", req);
282 		if (ber_stop) {
283 			dev_warn(&adapter->ccw_device->dev,
284 				 "All paths over this FCP device are disused because of excessive bit errors\n");
285 			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
286 		} else {
287 			dev_warn(&adapter->ccw_device->dev,
288 				 "The error threshold for checksum statistics has been exceeded\n");
289 		}
290 		break;
291 	case FSF_STATUS_READ_LINK_DOWN:
292 		zfcp_fsf_status_read_link_down(req);
293 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
294 		break;
295 	case FSF_STATUS_READ_LINK_UP:
296 		dev_info(&adapter->ccw_device->dev,
297 			 "The local link has been restored\n");
298 		/* All ports should be marked as ready to run again */
299 		zfcp_erp_set_adapter_status(adapter,
300 					    ZFCP_STATUS_COMMON_RUNNING);
301 		zfcp_erp_adapter_reopen(adapter,
302 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
303 					ZFCP_STATUS_COMMON_ERP_FAILED,
304 					"fssrh_2");
305 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
306 
307 		break;
308 	case FSF_STATUS_READ_NOTIFICATION_LOST:
309 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
310 			zfcp_fc_conditional_port_scan(adapter);
311 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_VERSION_CHANGE)
312 			queue_work(adapter->work_queue,
313 				   &adapter->version_change_lost_work);
314 		break;
315 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
316 		adapter->adapter_features = sr_buf->payload.word[0];
317 		break;
318 	case FSF_STATUS_READ_VERSION_CHANGE:
319 		zfcp_fsf_status_read_version_change(adapter, sr_buf);
320 		break;
321 	}
322 
323 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
324 	zfcp_fsf_req_free(req);
325 
326 	atomic_inc(&adapter->stat_miss);
327 	queue_work(adapter->work_queue, &adapter->stat_work);
328 }
329 
330 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
331 {
332 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
333 	case FSF_SQ_FCP_RSP_AVAILABLE:
334 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
335 	case FSF_SQ_NO_RETRY_POSSIBLE:
336 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
337 		return;
338 	case FSF_SQ_COMMAND_ABORTED:
339 		break;
340 	case FSF_SQ_NO_RECOM:
341 		dev_err(&req->adapter->ccw_device->dev,
342 			"The FCP adapter reported a problem "
343 			"that cannot be recovered\n");
344 		zfcp_qdio_siosl(req->adapter);
345 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
346 		break;
347 	}
348 	/* all non-return stats set FSFREQ_ERROR*/
349 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
350 }
351 
352 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
353 {
354 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
355 		return;
356 
357 	switch (req->qtcb->header.fsf_status) {
358 	case FSF_UNKNOWN_COMMAND:
359 		dev_err(&req->adapter->ccw_device->dev,
360 			"The FCP adapter does not recognize the command 0x%x\n",
361 			req->qtcb->header.fsf_command);
362 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
363 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
364 		break;
365 	case FSF_ADAPTER_STATUS_AVAILABLE:
366 		zfcp_fsf_fsfstatus_qual_eval(req);
367 		break;
368 	}
369 }
370 
371 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
372 {
373 	struct zfcp_adapter *adapter = req->adapter;
374 	struct fsf_qtcb *qtcb = req->qtcb;
375 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
376 
377 	zfcp_dbf_hba_fsf_response(req);
378 
379 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
380 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
381 		return;
382 	}
383 
384 	switch (qtcb->prefix.prot_status) {
385 	case FSF_PROT_GOOD:
386 	case FSF_PROT_FSF_STATUS_PRESENTED:
387 		return;
388 	case FSF_PROT_QTCB_VERSION_ERROR:
389 		dev_err(&adapter->ccw_device->dev,
390 			"QTCB version 0x%x not supported by FCP adapter "
391 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
392 			psq->word[0], psq->word[1]);
393 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
394 		break;
395 	case FSF_PROT_ERROR_STATE:
396 	case FSF_PROT_SEQ_NUMB_ERROR:
397 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
398 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
399 		break;
400 	case FSF_PROT_UNSUPP_QTCB_TYPE:
401 		dev_err(&adapter->ccw_device->dev,
402 			"The QTCB type is not supported by the FCP adapter\n");
403 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
404 		break;
405 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
406 		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
407 				&adapter->status);
408 		break;
409 	case FSF_PROT_DUPLICATE_REQUEST_ID:
410 		dev_err(&adapter->ccw_device->dev,
411 			"0x%Lx is an ambiguous request identifier\n",
412 			(unsigned long long)qtcb->bottom.support.req_handle);
413 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
414 		break;
415 	case FSF_PROT_LINK_DOWN:
416 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
417 		/* go through reopen to flush pending requests */
418 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
419 		break;
420 	case FSF_PROT_REEST_QUEUE:
421 		/* All ports should be marked as ready to run again */
422 		zfcp_erp_set_adapter_status(adapter,
423 					    ZFCP_STATUS_COMMON_RUNNING);
424 		zfcp_erp_adapter_reopen(adapter,
425 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
426 					ZFCP_STATUS_COMMON_ERP_FAILED,
427 					"fspse_8");
428 		break;
429 	default:
430 		dev_err(&adapter->ccw_device->dev,
431 			"0x%x is not a valid transfer protocol status\n",
432 			qtcb->prefix.prot_status);
433 		zfcp_qdio_siosl(adapter);
434 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
435 	}
436 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
437 }
438 
439 /**
440  * zfcp_fsf_req_complete - process completion of a FSF request
441  * @req: The FSF request that has been completed.
442  *
443  * When a request has been completed either from the FCP adapter,
444  * or it has been dismissed due to a queue shutdown, this function
445  * is called to process the completion status and trigger further
446  * events related to the FSF request.
447  * Caller must ensure that the request has been removed from
448  * adapter->req_list, to protect against concurrent modification
449  * by zfcp_erp_strategy_check_fsfreq().
450  */
451 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
452 {
453 	struct zfcp_erp_action *erp_action;
454 
455 	if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
456 		zfcp_fsf_status_read_handler(req);
457 		return;
458 	}
459 
460 	timer_delete_sync(&req->timer);
461 	zfcp_fsf_protstatus_eval(req);
462 	zfcp_fsf_fsfstatus_eval(req);
463 	req->handler(req);
464 
465 	erp_action = req->erp_action;
466 	if (erp_action)
467 		zfcp_erp_notify(erp_action, 0);
468 
469 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
470 		zfcp_fsf_req_free(req);
471 	else
472 		complete(&req->completion);
473 }
474 
475 /**
476  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
477  * @adapter: pointer to struct zfcp_adapter
478  *
479  * Never ever call this without shutting down the adapter first.
480  * Otherwise the adapter would continue using and corrupting s390 storage.
481  * Included BUG_ON() call to ensure this is done.
482  * ERP is supposed to be the only user of this function.
483  */
484 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
485 {
486 	struct zfcp_fsf_req *req, *tmp;
487 	LIST_HEAD(remove_queue);
488 
489 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
490 	zfcp_reqlist_move(adapter->req_list, &remove_queue);
491 
492 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
493 		list_del(&req->list);
494 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
495 		zfcp_fsf_req_complete(req);
496 	}
497 }
498 
499 #define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
500 #define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
501 #define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
502 #define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
503 #define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
504 #define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
505 #define ZFCP_FSF_PORTSPEED_32GBIT	(1 <<  6)
506 #define ZFCP_FSF_PORTSPEED_64GBIT	(1 <<  7)
507 #define ZFCP_FSF_PORTSPEED_128GBIT	(1 <<  8)
508 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
509 
510 u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
511 {
512 	u32 fdmi_speed = 0;
513 	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
514 		fdmi_speed |= FC_PORTSPEED_1GBIT;
515 	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
516 		fdmi_speed |= FC_PORTSPEED_2GBIT;
517 	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
518 		fdmi_speed |= FC_PORTSPEED_4GBIT;
519 	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
520 		fdmi_speed |= FC_PORTSPEED_10GBIT;
521 	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
522 		fdmi_speed |= FC_PORTSPEED_8GBIT;
523 	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
524 		fdmi_speed |= FC_PORTSPEED_16GBIT;
525 	if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
526 		fdmi_speed |= FC_PORTSPEED_32GBIT;
527 	if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
528 		fdmi_speed |= FC_PORTSPEED_64GBIT;
529 	if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
530 		fdmi_speed |= FC_PORTSPEED_128GBIT;
531 	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
532 		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
533 	return fdmi_speed;
534 }
535 
536 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
537 {
538 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
539 	struct zfcp_adapter *adapter = req->adapter;
540 	struct fc_els_flogi *plogi;
541 
542 	/* adjust pointers for missing command code */
543 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
544 					- sizeof(u32));
545 
546 	if (req->data)
547 		memcpy(req->data, bottom, sizeof(*bottom));
548 
549 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
550 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
551 					 (u16)FSF_STATUS_READS_RECOM);
552 
553 	/* no error return above here, otherwise must fix call chains */
554 	/* do not evaluate invalid fields */
555 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
556 		return 0;
557 
558 	adapter->hydra_version = bottom->adapter_type;
559 
560 	switch (bottom->fc_topology) {
561 	case FSF_TOPO_P2P:
562 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
563 		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
564 		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
565 		break;
566 	case FSF_TOPO_FABRIC:
567 		break;
568 	case FSF_TOPO_AL:
569 	default:
570 		dev_err(&adapter->ccw_device->dev,
571 			"Unknown or unsupported arbitrated loop "
572 			"fibre channel topology detected\n");
573 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
574 		return -EIO;
575 	}
576 
577 	return 0;
578 }
579 
580 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
581 {
582 	struct zfcp_adapter *adapter = req->adapter;
583 	struct zfcp_diag_header *const diag_hdr =
584 		&adapter->diagnostics->config_data.header;
585 	struct fsf_qtcb *qtcb = req->qtcb;
586 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
587 
588 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
589 		return;
590 
591 	adapter->fsf_lic_version = bottom->lic_version;
592 	adapter->adapter_features = bottom->adapter_features;
593 	adapter->connection_features = bottom->connection_features;
594 	adapter->peer_wwpn = 0;
595 	adapter->peer_wwnn = 0;
596 	adapter->peer_d_id = 0;
597 
598 	switch (qtcb->header.fsf_status) {
599 	case FSF_GOOD:
600 		/*
601 		 * usually we wait with an update till the cache is too old,
602 		 * but because we have the data available, update it anyway
603 		 */
604 		zfcp_diag_update_xdata(diag_hdr, bottom, false);
605 
606 		zfcp_scsi_shost_update_config_data(adapter, bottom, false);
607 		if (zfcp_fsf_exchange_config_evaluate(req))
608 			return;
609 
610 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
611 			dev_err(&adapter->ccw_device->dev,
612 				"FCP adapter maximum QTCB size (%d bytes) "
613 				"is too small\n",
614 				bottom->max_qtcb_size);
615 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
616 			return;
617 		}
618 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
619 				&adapter->status);
620 		break;
621 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
622 		zfcp_diag_update_xdata(diag_hdr, bottom, true);
623 		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
624 
625 		/* avoids adapter shutdown to be able to recognize
626 		 * events such as LINK UP */
627 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
628 				&adapter->status);
629 		zfcp_fsf_link_down_info_eval(req,
630 			&qtcb->header.fsf_status_qual.link_down_info);
631 
632 		zfcp_scsi_shost_update_config_data(adapter, bottom, true);
633 		if (zfcp_fsf_exchange_config_evaluate(req))
634 			return;
635 		break;
636 	default:
637 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
638 		return;
639 	}
640 
641 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
642 		adapter->hardware_version = bottom->hardware_version;
643 
644 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
645 		dev_err(&adapter->ccw_device->dev,
646 			"The FCP adapter only supports newer "
647 			"control block versions\n");
648 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
649 		return;
650 	}
651 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
652 		dev_err(&adapter->ccw_device->dev,
653 			"The FCP adapter only supports older "
654 			"control block versions\n");
655 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
656 	}
657 }
658 
659 /*
660  * Mapping of FC Endpoint Security flag masks to mnemonics
661  *
662  * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any
663  *       changes.
664  */
665 static const struct {
666 	u32	mask;
667 	char	*name;
668 } zfcp_fsf_fc_security_mnemonics[] = {
669 	{ FSF_FC_SECURITY_AUTH,		"Authentication" },
670 	{ FSF_FC_SECURITY_ENC_FCSP2 |
671 	  FSF_FC_SECURITY_ENC_ERAS,	"Encryption" },
672 };
673 
674 /* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */
675 #define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15
676 
677 /**
678  * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into
679  *                                   mnemonics and place in a buffer
680  * @buf        : the buffer to place the translated FC Endpoint Security flag(s)
681  *               into
682  * @size       : the size of the buffer, including the trailing null space
683  * @fc_security: one or more FC Endpoint Security flags, or zero
684  * @fmt        : specifies whether a list or a single item is to be put into the
685  *               buffer
686  *
687  * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics.
688  * If the FC Endpoint Security flags are zero "none" is placed into the buffer.
689  *
690  * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by
691  * a comma followed by a space into the buffer. If one or more FC Endpoint
692  * Security flags cannot be translated into a mnemonic, as they are undefined
693  * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal
694  * representation is placed into the buffer.
695  *
696  * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into
697  * the buffer. If the FC Endpoint Security flag cannot be translated, as it is
698  * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal
699  * representation is placed into the buffer. If more than one FC Endpoint
700  * Security flag was specified, their value in hexadecimal representation is
701  * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH
702  * can be used to define a buffer that is large enough to hold one mnemonic.
703  *
704  * Return: The number of characters written into buf not including the trailing
705  *         '\0'. If size is == 0 the function returns 0.
706  */
707 ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security,
708 				      enum zfcp_fsf_print_fmt fmt)
709 {
710 	const char *prefix = "";
711 	ssize_t len = 0;
712 	int i;
713 
714 	if (fc_security == 0)
715 		return scnprintf(buf, size, "none");
716 	if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1)
717 		return scnprintf(buf, size, "0x%08x", fc_security);
718 
719 	for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) {
720 		if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask))
721 			continue;
722 
723 		len += scnprintf(buf + len, size - len, "%s%s", prefix,
724 				 zfcp_fsf_fc_security_mnemonics[i].name);
725 		prefix = ", ";
726 		fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask;
727 	}
728 
729 	if (fc_security != 0)
730 		len += scnprintf(buf + len, size - len, "%s0x%08x",
731 				 prefix, fc_security);
732 
733 	return len;
734 }
735 
736 static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter,
737 					     struct zfcp_fsf_req *req)
738 {
739 	if (adapter->fc_security_algorithms ==
740 	    adapter->fc_security_algorithms_old) {
741 		/* no change, no trace */
742 		return;
743 	}
744 
745 	zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN,
746 			      adapter->fc_security_algorithms_old,
747 			      adapter->fc_security_algorithms);
748 
749 	adapter->fc_security_algorithms_old = adapter->fc_security_algorithms;
750 }
751 
752 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
753 {
754 	struct zfcp_adapter *adapter = req->adapter;
755 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
756 
757 	if (req->data)
758 		memcpy(req->data, bottom, sizeof(*bottom));
759 
760 	if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
761 		adapter->fc_security_algorithms =
762 			bottom->fc_security_algorithms;
763 	else
764 		adapter->fc_security_algorithms = 0;
765 	zfcp_fsf_dbf_adapter_fc_security(adapter, req);
766 }
767 
768 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
769 {
770 	struct zfcp_diag_header *const diag_hdr =
771 		&req->adapter->diagnostics->port_data.header;
772 	struct fsf_qtcb *qtcb = req->qtcb;
773 	struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
774 
775 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
776 		return;
777 
778 	switch (qtcb->header.fsf_status) {
779 	case FSF_GOOD:
780 		/*
781 		 * usually we wait with an update till the cache is too old,
782 		 * but because we have the data available, update it anyway
783 		 */
784 		zfcp_diag_update_xdata(diag_hdr, bottom, false);
785 
786 		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
787 		zfcp_fsf_exchange_port_evaluate(req);
788 		break;
789 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
790 		zfcp_diag_update_xdata(diag_hdr, bottom, true);
791 		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
792 
793 		zfcp_fsf_link_down_info_eval(req,
794 			&qtcb->header.fsf_status_qual.link_down_info);
795 
796 		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
797 		zfcp_fsf_exchange_port_evaluate(req);
798 		break;
799 	}
800 }
801 
802 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
803 {
804 	struct zfcp_fsf_req *req;
805 
806 	if (likely(pool))
807 		req = mempool_alloc(pool, GFP_ATOMIC);
808 	else
809 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
810 
811 	if (unlikely(!req))
812 		return NULL;
813 
814 	memset(req, 0, sizeof(*req));
815 	req->pool = pool;
816 	return req;
817 }
818 
819 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
820 {
821 	struct fsf_qtcb *qtcb;
822 
823 	if (likely(pool))
824 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
825 	else
826 		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
827 
828 	if (unlikely(!qtcb))
829 		return NULL;
830 
831 	memset(qtcb, 0, sizeof(*qtcb));
832 	return qtcb;
833 }
834 
835 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
836 						u32 fsf_cmd, u8 sbtype,
837 						mempool_t *pool)
838 {
839 	struct zfcp_adapter *adapter = qdio->adapter;
840 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
841 
842 	if (unlikely(!req))
843 		return ERR_PTR(-ENOMEM);
844 
845 	if (adapter->req_no == 0)
846 		adapter->req_no++;
847 
848 	timer_setup(&req->timer, NULL, 0);
849 	init_completion(&req->completion);
850 
851 	req->adapter = adapter;
852 	req->req_id = adapter->req_no;
853 
854 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
855 		if (likely(pool))
856 			req->qtcb = zfcp_fsf_qtcb_alloc(
857 				adapter->pool.qtcb_pool);
858 		else
859 			req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
860 
861 		if (unlikely(!req->qtcb)) {
862 			zfcp_fsf_req_free(req);
863 			return ERR_PTR(-ENOMEM);
864 		}
865 
866 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
867 		req->qtcb->prefix.req_id = req->req_id;
868 		req->qtcb->prefix.ulp_info = 26;
869 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
870 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
871 		req->qtcb->header.req_handle = req->req_id;
872 		req->qtcb->header.fsf_command = fsf_cmd;
873 	}
874 
875 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
876 			   req->qtcb, sizeof(struct fsf_qtcb));
877 
878 	return req;
879 }
880 
881 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
882 {
883 	const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
884 	struct zfcp_adapter *adapter = req->adapter;
885 	struct zfcp_qdio *qdio = adapter->qdio;
886 	u64 req_id = req->req_id;
887 
888 	zfcp_reqlist_add(adapter->req_list, req);
889 
890 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
891 	req->issued = get_tod_clock();
892 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
893 		timer_delete_sync(&req->timer);
894 
895 		/* lookup request again, list might have changed */
896 		if (zfcp_reqlist_find_rm(adapter->req_list, req_id) == NULL)
897 			zfcp_dbf_hba_fsf_reqid("fsrsrmf", 1, adapter, req_id);
898 
899 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
900 		return -EIO;
901 	}
902 
903 	/*
904 	 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
905 	 *	 ONLY TOUCH SYNC req AGAIN ON req->completion.
906 	 *
907 	 * The request might complete and be freed concurrently at any point
908 	 * now. This is not protected by the QDIO-lock (req_q_lock). So any
909 	 * uncontrolled access after this might result in an use-after-free bug.
910 	 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
911 	 * when it is completed via req->completion, is it safe to use req
912 	 * again.
913 	 */
914 
915 	/* Don't increase for unsolicited status */
916 	if (!is_srb)
917 		adapter->fsf_req_seq_no++;
918 	adapter->req_no++;
919 
920 	return 0;
921 }
922 
923 /**
924  * zfcp_fsf_status_read - send status read request
925  * @qdio: pointer to struct zfcp_qdio
926  * Returns: 0 on success, ERROR otherwise
927  */
928 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
929 {
930 	struct zfcp_adapter *adapter = qdio->adapter;
931 	struct zfcp_fsf_req *req;
932 	struct fsf_status_read_buffer *sr_buf;
933 	struct page *page;
934 	int retval = -EIO;
935 
936 	spin_lock_irq(&qdio->req_q_lock);
937 	if (zfcp_qdio_sbal_get(qdio))
938 		goto out;
939 
940 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
941 				  SBAL_SFLAGS0_TYPE_STATUS,
942 				  adapter->pool.status_read_req);
943 	if (IS_ERR(req)) {
944 		retval = PTR_ERR(req);
945 		goto out;
946 	}
947 
948 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
949 	if (!page) {
950 		retval = -ENOMEM;
951 		goto failed_buf;
952 	}
953 	sr_buf = page_address(page);
954 	memset(sr_buf, 0, sizeof(*sr_buf));
955 	req->data = sr_buf;
956 
957 	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
958 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
959 
960 	retval = zfcp_fsf_req_send(req);
961 	if (retval)
962 		goto failed_req_send;
963 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
964 
965 	goto out;
966 
967 failed_req_send:
968 	req->data = NULL;
969 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
970 failed_buf:
971 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
972 	zfcp_fsf_req_free(req);
973 out:
974 	spin_unlock_irq(&qdio->req_q_lock);
975 	return retval;
976 }
977 
978 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
979 {
980 	struct scsi_device *sdev = req->data;
981 	struct zfcp_scsi_dev *zfcp_sdev;
982 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
983 
984 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
985 		return;
986 
987 	zfcp_sdev = sdev_to_zfcp(sdev);
988 
989 	switch (req->qtcb->header.fsf_status) {
990 	case FSF_PORT_HANDLE_NOT_VALID:
991 		if (fsq->word[0] == fsq->word[1]) {
992 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
993 						"fsafch1");
994 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
995 		}
996 		break;
997 	case FSF_LUN_HANDLE_NOT_VALID:
998 		if (fsq->word[0] == fsq->word[1]) {
999 			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
1000 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1001 		}
1002 		break;
1003 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
1004 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
1005 		break;
1006 	case FSF_PORT_BOXED:
1007 		zfcp_erp_set_port_status(zfcp_sdev->port,
1008 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1009 		zfcp_erp_port_reopen(zfcp_sdev->port,
1010 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
1011 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1012 		break;
1013 	case FSF_LUN_BOXED:
1014 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1015 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
1016 				    "fsafch4");
1017 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1018                 break;
1019 	case FSF_ADAPTER_STATUS_AVAILABLE:
1020 		switch (fsq->word[0]) {
1021 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1022 			zfcp_fc_test_link(zfcp_sdev->port);
1023 			fallthrough;
1024 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1025 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1026 			break;
1027 		}
1028 		break;
1029 	case FSF_GOOD:
1030 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1031 		break;
1032 	}
1033 }
1034 
1035 /**
1036  * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
1037  * @scmnd: The SCSI command to abort
1038  * Returns: pointer to struct zfcp_fsf_req
1039  */
1040 
1041 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
1042 {
1043 	struct zfcp_fsf_req *req = NULL;
1044 	struct scsi_device *sdev = scmnd->device;
1045 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1046 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
1047 	u64 old_req_id = (u64) scmnd->host_scribble;
1048 
1049 	spin_lock_irq(&qdio->req_q_lock);
1050 	if (zfcp_qdio_sbal_get(qdio))
1051 		goto out;
1052 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
1053 				  SBAL_SFLAGS0_TYPE_READ,
1054 				  qdio->adapter->pool.scsi_abort);
1055 	if (IS_ERR(req)) {
1056 		req = NULL;
1057 		goto out;
1058 	}
1059 
1060 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
1061 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
1062 		goto out_error_free;
1063 
1064 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1065 
1066 	req->data = sdev;
1067 	req->handler = zfcp_fsf_abort_fcp_command_handler;
1068 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1069 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
1070 	req->qtcb->bottom.support.req_handle = old_req_id;
1071 
1072 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
1073 	if (!zfcp_fsf_req_send(req)) {
1074 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
1075 		goto out;
1076 	}
1077 
1078 out_error_free:
1079 	zfcp_fsf_req_free(req);
1080 	req = NULL;
1081 out:
1082 	spin_unlock_irq(&qdio->req_q_lock);
1083 	return req;
1084 }
1085 
1086 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1087 {
1088 	struct zfcp_adapter *adapter = req->adapter;
1089 	struct zfcp_fsf_ct_els *ct = req->data;
1090 	struct fsf_qtcb_header *header = &req->qtcb->header;
1091 
1092 	ct->status = -EINVAL;
1093 
1094 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1095 		goto skip_fsfstatus;
1096 
1097 	switch (header->fsf_status) {
1098         case FSF_GOOD:
1099 		ct->status = 0;
1100 		zfcp_dbf_san_res("fsscth2", req);
1101 		break;
1102         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1103 		zfcp_fsf_class_not_supp(req);
1104 		break;
1105         case FSF_ADAPTER_STATUS_AVAILABLE:
1106                 switch (header->fsf_status_qual.word[0]){
1107                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1108                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1109 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1110 			break;
1111                 }
1112                 break;
1113         case FSF_PORT_BOXED:
1114 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1115 		break;
1116 	case FSF_PORT_HANDLE_NOT_VALID:
1117 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
1118 		fallthrough;
1119 	case FSF_GENERIC_COMMAND_REJECTED:
1120 	case FSF_PAYLOAD_SIZE_MISMATCH:
1121 	case FSF_REQUEST_SIZE_TOO_LARGE:
1122 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1123 	case FSF_SBAL_MISMATCH:
1124 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1125 		break;
1126 	}
1127 
1128 skip_fsfstatus:
1129 	if (ct->handler)
1130 		ct->handler(ct->handler_data);
1131 }
1132 
1133 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
1134 					    struct zfcp_qdio_req *q_req,
1135 					    struct scatterlist *sg_req,
1136 					    struct scatterlist *sg_resp)
1137 {
1138 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
1139 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
1140 	zfcp_qdio_set_sbale_last(qdio, q_req);
1141 }
1142 
1143 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1144 				       struct scatterlist *sg_req,
1145 				       struct scatterlist *sg_resp)
1146 {
1147 	struct zfcp_adapter *adapter = req->adapter;
1148 	struct zfcp_qdio *qdio = adapter->qdio;
1149 	struct fsf_qtcb *qtcb = req->qtcb;
1150 	u32 feat = adapter->adapter_features;
1151 
1152 	if (zfcp_adapter_multi_buffer_active(adapter)) {
1153 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1154 			return -EIO;
1155 		qtcb->bottom.support.req_buf_length =
1156 			zfcp_qdio_real_bytes(sg_req);
1157 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1158 			return -EIO;
1159 		qtcb->bottom.support.resp_buf_length =
1160 			zfcp_qdio_real_bytes(sg_resp);
1161 
1162 		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
1163 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1164 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
1165 		return 0;
1166 	}
1167 
1168 	/* use single, unchained SBAL if it can hold the request */
1169 	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1170 		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1171 						sg_req, sg_resp);
1172 		return 0;
1173 	}
1174 
1175 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1176 		return -EOPNOTSUPP;
1177 
1178 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1179 		return -EIO;
1180 
1181 	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1182 
1183 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1184 	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1185 
1186 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1187 		return -EIO;
1188 
1189 	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1190 
1191 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1192 
1193 	return 0;
1194 }
1195 
1196 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1197 				 struct scatterlist *sg_req,
1198 				 struct scatterlist *sg_resp,
1199 				 unsigned int timeout)
1200 {
1201 	int ret;
1202 
1203 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1204 	if (ret)
1205 		return ret;
1206 
1207 	/* common settings for ct/gs and els requests */
1208 	if (timeout > 255)
1209 		timeout = 255; /* max value accepted by hardware */
1210 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1211 	req->qtcb->bottom.support.timeout = timeout;
1212 	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1213 
1214 	return 0;
1215 }
1216 
1217 /**
1218  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1219  * @wka_port: pointer to zfcp WKA port to send CT/GS to
1220  * @ct: pointer to struct zfcp_fsf_ct_els with data for CT request
1221  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1222  * @timeout: timeout that hardware should use, and a later software timeout
1223  */
1224 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1225 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1226 		     unsigned int timeout)
1227 {
1228 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1229 	struct zfcp_fsf_req *req;
1230 	int ret = -EIO;
1231 
1232 	spin_lock_irq(&qdio->req_q_lock);
1233 	if (zfcp_qdio_sbal_get(qdio))
1234 		goto out;
1235 
1236 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1237 				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1238 
1239 	if (IS_ERR(req)) {
1240 		ret = PTR_ERR(req);
1241 		goto out;
1242 	}
1243 
1244 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1245 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1246 	if (ret)
1247 		goto failed_send;
1248 
1249 	req->handler = zfcp_fsf_send_ct_handler;
1250 	req->qtcb->header.port_handle = wka_port->handle;
1251 	ct->d_id = wka_port->d_id;
1252 	req->data = ct;
1253 
1254 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1255 
1256 	ret = zfcp_fsf_req_send(req);
1257 	if (ret)
1258 		goto failed_send;
1259 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1260 
1261 	goto out;
1262 
1263 failed_send:
1264 	zfcp_fsf_req_free(req);
1265 out:
1266 	spin_unlock_irq(&qdio->req_q_lock);
1267 	return ret;
1268 }
1269 
1270 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1271 {
1272 	struct zfcp_fsf_ct_els *send_els = req->data;
1273 	struct fsf_qtcb_header *header = &req->qtcb->header;
1274 
1275 	send_els->status = -EINVAL;
1276 
1277 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1278 		goto skip_fsfstatus;
1279 
1280 	switch (header->fsf_status) {
1281 	case FSF_GOOD:
1282 		send_els->status = 0;
1283 		zfcp_dbf_san_res("fsselh1", req);
1284 		break;
1285 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1286 		zfcp_fsf_class_not_supp(req);
1287 		break;
1288 	case FSF_ADAPTER_STATUS_AVAILABLE:
1289 		switch (header->fsf_status_qual.word[0]){
1290 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1291 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1292 		case FSF_SQ_RETRY_IF_POSSIBLE:
1293 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1294 			break;
1295 		}
1296 		break;
1297 	case FSF_ELS_COMMAND_REJECTED:
1298 	case FSF_PAYLOAD_SIZE_MISMATCH:
1299 	case FSF_REQUEST_SIZE_TOO_LARGE:
1300 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1301 		break;
1302 	case FSF_SBAL_MISMATCH:
1303 		/* should never occur, avoided in zfcp_fsf_send_els */
1304 		fallthrough;
1305 	default:
1306 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1307 		break;
1308 	}
1309 skip_fsfstatus:
1310 	if (send_els->handler)
1311 		send_els->handler(send_els->handler_data);
1312 }
1313 
1314 /**
1315  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1316  * @adapter: pointer to zfcp adapter
1317  * @d_id: N_Port_ID to send ELS to
1318  * @els: pointer to struct zfcp_fsf_ct_els with data for the ELS command
1319  * @timeout: timeout that hardware should use, and a later software timeout
1320  */
1321 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1322 		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
1323 {
1324 	struct zfcp_fsf_req *req;
1325 	struct zfcp_qdio *qdio = adapter->qdio;
1326 	int ret = -EIO;
1327 
1328 	spin_lock_irq(&qdio->req_q_lock);
1329 	if (zfcp_qdio_sbal_get(qdio))
1330 		goto out;
1331 
1332 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1333 				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1334 
1335 	if (IS_ERR(req)) {
1336 		ret = PTR_ERR(req);
1337 		goto out;
1338 	}
1339 
1340 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1341 
1342 	if (!zfcp_adapter_multi_buffer_active(adapter))
1343 		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1344 
1345 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1346 
1347 	if (ret)
1348 		goto failed_send;
1349 
1350 	hton24(req->qtcb->bottom.support.d_id, d_id);
1351 	req->handler = zfcp_fsf_send_els_handler;
1352 	els->d_id = d_id;
1353 	req->data = els;
1354 
1355 	zfcp_dbf_san_req("fssels1", req, d_id);
1356 
1357 	ret = zfcp_fsf_req_send(req);
1358 	if (ret)
1359 		goto failed_send;
1360 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1361 
1362 	goto out;
1363 
1364 failed_send:
1365 	zfcp_fsf_req_free(req);
1366 out:
1367 	spin_unlock_irq(&qdio->req_q_lock);
1368 	return ret;
1369 }
1370 
1371 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1372 {
1373 	struct zfcp_fsf_req *req;
1374 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1375 	int retval = -EIO;
1376 
1377 	spin_lock_irq(&qdio->req_q_lock);
1378 	if (zfcp_qdio_sbal_get(qdio))
1379 		goto out;
1380 
1381 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1382 				  SBAL_SFLAGS0_TYPE_READ,
1383 				  qdio->adapter->pool.erp_req);
1384 
1385 	if (IS_ERR(req)) {
1386 		retval = PTR_ERR(req);
1387 		goto out;
1388 	}
1389 
1390 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1391 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1392 
1393 	req->qtcb->bottom.config.feature_selection =
1394 			FSF_FEATURE_NOTIFICATION_LOST |
1395 			FSF_FEATURE_UPDATE_ALERT |
1396 			FSF_FEATURE_REQUEST_SFP_DATA |
1397 			FSF_FEATURE_FC_SECURITY;
1398 	req->erp_action = erp_action;
1399 	req->handler = zfcp_fsf_exchange_config_data_handler;
1400 	erp_action->fsf_req_id = req->req_id;
1401 
1402 	zfcp_fsf_start_erp_timer(req);
1403 	retval = zfcp_fsf_req_send(req);
1404 	if (retval) {
1405 		zfcp_fsf_req_free(req);
1406 		erp_action->fsf_req_id = 0;
1407 	}
1408 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1409 out:
1410 	spin_unlock_irq(&qdio->req_q_lock);
1411 	return retval;
1412 }
1413 
1414 
1415 /**
1416  * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
1417  * @qdio: pointer to the QDIO-Queue to use for sending the command.
1418  * @data: pointer to the QTCB-Bottom for storing the result of the command,
1419  *	  might be %NULL.
1420  *
1421  * Returns:
1422  * * 0		- Exchange Config Data was successful, @data is complete
1423  * * -EIO	- Exchange Config Data was not successful, @data is invalid
1424  * * -EAGAIN	- @data contains incomplete data
1425  * * -ENOMEM	- Some memory allocation failed along the way
1426  */
1427 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1428 				       struct fsf_qtcb_bottom_config *data)
1429 {
1430 	struct zfcp_fsf_req *req = NULL;
1431 	int retval = -EIO;
1432 
1433 	spin_lock_irq(&qdio->req_q_lock);
1434 	if (zfcp_qdio_sbal_get(qdio))
1435 		goto out_unlock;
1436 
1437 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1438 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1439 
1440 	if (IS_ERR(req)) {
1441 		retval = PTR_ERR(req);
1442 		goto out_unlock;
1443 	}
1444 
1445 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1446 	req->handler = zfcp_fsf_exchange_config_data_handler;
1447 
1448 	req->qtcb->bottom.config.feature_selection =
1449 			FSF_FEATURE_NOTIFICATION_LOST |
1450 			FSF_FEATURE_UPDATE_ALERT |
1451 			FSF_FEATURE_REQUEST_SFP_DATA |
1452 			FSF_FEATURE_FC_SECURITY;
1453 
1454 	if (data)
1455 		req->data = data;
1456 
1457 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1458 	retval = zfcp_fsf_req_send(req);
1459 	spin_unlock_irq(&qdio->req_q_lock);
1460 
1461 	if (!retval) {
1462 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1463 		wait_for_completion(&req->completion);
1464 
1465 		if (req->status &
1466 		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
1467 			retval = -EIO;
1468 		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
1469 			retval = -EAGAIN;
1470 	}
1471 
1472 	zfcp_fsf_req_free(req);
1473 	return retval;
1474 
1475 out_unlock:
1476 	spin_unlock_irq(&qdio->req_q_lock);
1477 	return retval;
1478 }
1479 
1480 /**
1481  * zfcp_fsf_exchange_port_data - request information about local port
1482  * @erp_action: ERP action for the adapter for which port data is requested
1483  * Returns: 0 on success, error otherwise
1484  */
1485 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1486 {
1487 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1488 	struct zfcp_fsf_req *req;
1489 	int retval = -EIO;
1490 
1491 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1492 		return -EOPNOTSUPP;
1493 
1494 	spin_lock_irq(&qdio->req_q_lock);
1495 	if (zfcp_qdio_sbal_get(qdio))
1496 		goto out;
1497 
1498 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1499 				  SBAL_SFLAGS0_TYPE_READ,
1500 				  qdio->adapter->pool.erp_req);
1501 
1502 	if (IS_ERR(req)) {
1503 		retval = PTR_ERR(req);
1504 		goto out;
1505 	}
1506 
1507 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1508 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1509 
1510 	req->handler = zfcp_fsf_exchange_port_data_handler;
1511 	req->erp_action = erp_action;
1512 	erp_action->fsf_req_id = req->req_id;
1513 
1514 	zfcp_fsf_start_erp_timer(req);
1515 	retval = zfcp_fsf_req_send(req);
1516 	if (retval) {
1517 		zfcp_fsf_req_free(req);
1518 		erp_action->fsf_req_id = 0;
1519 	}
1520 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1521 out:
1522 	spin_unlock_irq(&qdio->req_q_lock);
1523 	return retval;
1524 }
1525 
1526 /**
1527  * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
1528  * @qdio: pointer to the QDIO-Queue to use for sending the command.
1529  * @data: pointer to the QTCB-Bottom for storing the result of the command,
1530  *	  might be %NULL.
1531  *
1532  * Returns:
1533  * * 0		- Exchange Port Data was successful, @data is complete
1534  * * -EIO	- Exchange Port Data was not successful, @data is invalid
1535  * * -EAGAIN	- @data contains incomplete data
1536  * * -ENOMEM	- Some memory allocation failed along the way
1537  * * -EOPNOTSUPP	- This operation is not supported
1538  */
1539 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1540 				     struct fsf_qtcb_bottom_port *data)
1541 {
1542 	struct zfcp_fsf_req *req = NULL;
1543 	int retval = -EIO;
1544 
1545 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1546 		return -EOPNOTSUPP;
1547 
1548 	spin_lock_irq(&qdio->req_q_lock);
1549 	if (zfcp_qdio_sbal_get(qdio))
1550 		goto out_unlock;
1551 
1552 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1553 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1554 
1555 	if (IS_ERR(req)) {
1556 		retval = PTR_ERR(req);
1557 		goto out_unlock;
1558 	}
1559 
1560 	if (data)
1561 		req->data = data;
1562 
1563 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1564 
1565 	req->handler = zfcp_fsf_exchange_port_data_handler;
1566 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1567 	retval = zfcp_fsf_req_send(req);
1568 	spin_unlock_irq(&qdio->req_q_lock);
1569 
1570 	if (!retval) {
1571 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1572 		wait_for_completion(&req->completion);
1573 
1574 		if (req->status &
1575 		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
1576 			retval = -EIO;
1577 		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
1578 			retval = -EAGAIN;
1579 	}
1580 
1581 	zfcp_fsf_req_free(req);
1582 	return retval;
1583 
1584 out_unlock:
1585 	spin_unlock_irq(&qdio->req_q_lock);
1586 	return retval;
1587 }
1588 
1589 static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port,
1590 					  struct zfcp_fsf_req *req)
1591 {
1592 	char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
1593 	char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
1594 
1595 	if (port->connection_info == port->connection_info_old) {
1596 		/* no change, no log nor trace */
1597 		return;
1598 	}
1599 
1600 	zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn,
1601 			      port->connection_info_old,
1602 			      port->connection_info);
1603 
1604 	zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old),
1605 				      port->connection_info_old,
1606 				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
1607 	zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new),
1608 				      port->connection_info,
1609 				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
1610 
1611 	if (strncmp(mnemonic_old, mnemonic_new,
1612 		    ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) {
1613 		/* no change in string representation, no log */
1614 		goto out;
1615 	}
1616 
1617 	if (port->connection_info_old == 0) {
1618 		/* activation */
1619 		dev_info(&port->adapter->ccw_device->dev,
1620 			 "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n",
1621 			 port->wwpn, mnemonic_new);
1622 	} else if (port->connection_info == 0) {
1623 		/* deactivation */
1624 		dev_warn(&port->adapter->ccw_device->dev,
1625 			 "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n",
1626 			 port->wwpn, mnemonic_old);
1627 	} else {
1628 		/* change */
1629 		dev_warn(&port->adapter->ccw_device->dev,
1630 			 "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n",
1631 			 port->wwpn, mnemonic_old, mnemonic_new);
1632 	}
1633 
1634 out:
1635 	port->connection_info_old = port->connection_info;
1636 }
1637 
1638 static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0,
1639 					u64 wwpn)
1640 {
1641 	switch (fsf_sqw0) {
1642 
1643 	/*
1644 	 * Open Port command error codes
1645 	 */
1646 
1647 	case FSF_SQ_SECURITY_REQUIRED:
1648 		dev_warn_ratelimited(dev,
1649 				     "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n",
1650 				     wwpn);
1651 		break;
1652 	case FSF_SQ_SECURITY_TIMEOUT:
1653 		dev_warn_ratelimited(dev,
1654 				     "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n",
1655 				     wwpn);
1656 		break;
1657 	case FSF_SQ_SECURITY_KM_UNAVAILABLE:
1658 		dev_warn_ratelimited(dev,
1659 				     "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n",
1660 				     wwpn);
1661 		break;
1662 	case FSF_SQ_SECURITY_RKM_UNAVAILABLE:
1663 		dev_warn_ratelimited(dev,
1664 				     "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n",
1665 				     wwpn);
1666 		break;
1667 	case FSF_SQ_SECURITY_AUTH_FAILURE:
1668 		dev_warn_ratelimited(dev,
1669 				     "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n",
1670 				     wwpn);
1671 		break;
1672 
1673 	/*
1674 	 * Send FCP command error codes
1675 	 */
1676 
1677 	case FSF_SQ_SECURITY_ENC_FAILURE:
1678 		dev_warn_ratelimited(dev,
1679 				     "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n",
1680 				     wwpn);
1681 		break;
1682 
1683 	/*
1684 	 * Unknown error codes
1685 	 */
1686 
1687 	default:
1688 		dev_warn_ratelimited(dev,
1689 				     "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n",
1690 				     fsf_sqw0, wwpn);
1691 	}
1692 }
1693 
1694 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1695 {
1696 	struct zfcp_adapter *adapter = req->adapter;
1697 	struct zfcp_port *port = req->data;
1698 	struct fsf_qtcb_header *header = &req->qtcb->header;
1699 	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1700 	struct fc_els_flogi *plogi;
1701 
1702 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1703 		goto out;
1704 
1705 	switch (header->fsf_status) {
1706 	case FSF_PORT_ALREADY_OPEN:
1707 		break;
1708 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1709 		dev_warn(&adapter->ccw_device->dev,
1710 			 "Not enough FCP adapter resources to open "
1711 			 "remote port 0x%016Lx\n",
1712 			 (unsigned long long)port->wwpn);
1713 		zfcp_erp_set_port_status(port,
1714 					 ZFCP_STATUS_COMMON_ERP_FAILED);
1715 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1716 		break;
1717 	case FSF_SECURITY_ERROR:
1718 		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
1719 					    header->fsf_status_qual.word[0],
1720 					    port->wwpn);
1721 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1722 		break;
1723 	case FSF_ADAPTER_STATUS_AVAILABLE:
1724 		switch (header->fsf_status_qual.word[0]) {
1725 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1726 			/* no zfcp_fc_test_link() with failed open port */
1727 			fallthrough;
1728 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1729 		case FSF_SQ_NO_RETRY_POSSIBLE:
1730 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1731 			break;
1732 		}
1733 		break;
1734 	case FSF_GOOD:
1735 		port->handle = header->port_handle;
1736 		if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
1737 			port->connection_info = bottom->connection_info;
1738 		else
1739 			port->connection_info = 0;
1740 		zfcp_fsf_log_port_fc_security(port, req);
1741 		atomic_or(ZFCP_STATUS_COMMON_OPEN |
1742 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1743 		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1744 		                  &port->status);
1745 		/* check whether D_ID has changed during open */
1746 		/*
1747 		 * FIXME: This check is not airtight, as the FCP channel does
1748 		 * not monitor closures of target port connections caused on
1749 		 * the remote side. Thus, they might miss out on invalidating
1750 		 * locally cached WWPNs (and other N_Port parameters) of gone
1751 		 * target ports. So, our heroic attempt to make things safe
1752 		 * could be undermined by 'open port' response data tagged with
1753 		 * obsolete WWPNs. Another reason to monitor potential
1754 		 * connection closures ourself at least (by interpreting
1755 		 * incoming ELS' and unsolicited status). It just crosses my
1756 		 * mind that one should be able to cross-check by means of
1757 		 * another GID_PN straight after a port has been opened.
1758 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1759 		 */
1760 		plogi = (struct fc_els_flogi *) bottom->els;
1761 		if (bottom->els1_length >= FSF_PLOGI_MIN_LEN)
1762 			zfcp_fc_plogi_evaluate(port, plogi);
1763 		break;
1764 	case FSF_UNKNOWN_OP_SUBTYPE:
1765 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1766 		break;
1767 	}
1768 
1769 out:
1770 	put_device(&port->dev);
1771 }
1772 
1773 /**
1774  * zfcp_fsf_open_port - create and send open port request
1775  * @erp_action: pointer to struct zfcp_erp_action
1776  * Returns: 0 on success, error otherwise
1777  */
1778 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1779 {
1780 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1781 	struct zfcp_port *port = erp_action->port;
1782 	struct zfcp_fsf_req *req;
1783 	int retval = -EIO;
1784 
1785 	spin_lock_irq(&qdio->req_q_lock);
1786 	if (zfcp_qdio_sbal_get(qdio))
1787 		goto out;
1788 
1789 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1790 				  SBAL_SFLAGS0_TYPE_READ,
1791 				  qdio->adapter->pool.erp_req);
1792 
1793 	if (IS_ERR(req)) {
1794 		retval = PTR_ERR(req);
1795 		goto out;
1796 	}
1797 
1798 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1799 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1800 
1801 	req->handler = zfcp_fsf_open_port_handler;
1802 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
1803 	req->data = port;
1804 	req->erp_action = erp_action;
1805 	erp_action->fsf_req_id = req->req_id;
1806 	get_device(&port->dev);
1807 
1808 	zfcp_fsf_start_erp_timer(req);
1809 	retval = zfcp_fsf_req_send(req);
1810 	if (retval) {
1811 		zfcp_fsf_req_free(req);
1812 		erp_action->fsf_req_id = 0;
1813 		put_device(&port->dev);
1814 	}
1815 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1816 out:
1817 	spin_unlock_irq(&qdio->req_q_lock);
1818 	return retval;
1819 }
1820 
1821 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1822 {
1823 	struct zfcp_port *port = req->data;
1824 
1825 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1826 		return;
1827 
1828 	switch (req->qtcb->header.fsf_status) {
1829 	case FSF_PORT_HANDLE_NOT_VALID:
1830 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1831 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1832 		break;
1833 	case FSF_ADAPTER_STATUS_AVAILABLE:
1834 		break;
1835 	case FSF_GOOD:
1836 		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1837 		break;
1838 	}
1839 }
1840 
1841 /**
1842  * zfcp_fsf_close_port - create and send close port request
1843  * @erp_action: pointer to struct zfcp_erp_action
1844  * Returns: 0 on success, error otherwise
1845  */
1846 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1847 {
1848 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1849 	struct zfcp_fsf_req *req;
1850 	int retval = -EIO;
1851 
1852 	spin_lock_irq(&qdio->req_q_lock);
1853 	if (zfcp_qdio_sbal_get(qdio))
1854 		goto out;
1855 
1856 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1857 				  SBAL_SFLAGS0_TYPE_READ,
1858 				  qdio->adapter->pool.erp_req);
1859 
1860 	if (IS_ERR(req)) {
1861 		retval = PTR_ERR(req);
1862 		goto out;
1863 	}
1864 
1865 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1866 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1867 
1868 	req->handler = zfcp_fsf_close_port_handler;
1869 	req->data = erp_action->port;
1870 	req->erp_action = erp_action;
1871 	req->qtcb->header.port_handle = erp_action->port->handle;
1872 	erp_action->fsf_req_id = req->req_id;
1873 
1874 	zfcp_fsf_start_erp_timer(req);
1875 	retval = zfcp_fsf_req_send(req);
1876 	if (retval) {
1877 		zfcp_fsf_req_free(req);
1878 		erp_action->fsf_req_id = 0;
1879 	}
1880 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1881 out:
1882 	spin_unlock_irq(&qdio->req_q_lock);
1883 	return retval;
1884 }
1885 
1886 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1887 {
1888 	struct zfcp_fc_wka_port *wka_port = req->data;
1889 	struct fsf_qtcb_header *header = &req->qtcb->header;
1890 
1891 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1892 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1893 		goto out;
1894 	}
1895 
1896 	switch (header->fsf_status) {
1897 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1898 		dev_warn(&req->adapter->ccw_device->dev,
1899 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1900 		fallthrough;
1901 	case FSF_ADAPTER_STATUS_AVAILABLE:
1902 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1903 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1904 		break;
1905 	case FSF_GOOD:
1906 		wka_port->handle = header->port_handle;
1907 		fallthrough;
1908 	case FSF_PORT_ALREADY_OPEN:
1909 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1910 	}
1911 out:
1912 	wake_up(&wka_port->opened);
1913 }
1914 
1915 /**
1916  * zfcp_fsf_open_wka_port - create and send open wka-port request
1917  * @wka_port: pointer to struct zfcp_fc_wka_port
1918  * Returns: 0 on success, error otherwise
1919  */
1920 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1921 {
1922 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1923 	struct zfcp_fsf_req *req;
1924 	u64 req_id = 0;
1925 	int retval = -EIO;
1926 
1927 	spin_lock_irq(&qdio->req_q_lock);
1928 	if (zfcp_qdio_sbal_get(qdio))
1929 		goto out;
1930 
1931 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1932 				  SBAL_SFLAGS0_TYPE_READ,
1933 				  qdio->adapter->pool.erp_req);
1934 
1935 	if (IS_ERR(req)) {
1936 		retval = PTR_ERR(req);
1937 		goto out;
1938 	}
1939 
1940 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1941 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1942 
1943 	req->handler = zfcp_fsf_open_wka_port_handler;
1944 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1945 	req->data = wka_port;
1946 
1947 	req_id = req->req_id;
1948 
1949 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1950 	retval = zfcp_fsf_req_send(req);
1951 	if (retval)
1952 		zfcp_fsf_req_free(req);
1953 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1954 out:
1955 	spin_unlock_irq(&qdio->req_q_lock);
1956 	if (!retval)
1957 		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
1958 	return retval;
1959 }
1960 
1961 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1962 {
1963 	struct zfcp_fc_wka_port *wka_port = req->data;
1964 
1965 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1966 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1967 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1968 	}
1969 
1970 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1971 	wake_up(&wka_port->closed);
1972 }
1973 
1974 /**
1975  * zfcp_fsf_close_wka_port - create and send close wka port request
1976  * @wka_port: WKA port to open
1977  * Returns: 0 on success, error otherwise
1978  */
1979 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1980 {
1981 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1982 	struct zfcp_fsf_req *req;
1983 	u64 req_id = 0;
1984 	int retval = -EIO;
1985 
1986 	spin_lock_irq(&qdio->req_q_lock);
1987 	if (zfcp_qdio_sbal_get(qdio))
1988 		goto out;
1989 
1990 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1991 				  SBAL_SFLAGS0_TYPE_READ,
1992 				  qdio->adapter->pool.erp_req);
1993 
1994 	if (IS_ERR(req)) {
1995 		retval = PTR_ERR(req);
1996 		goto out;
1997 	}
1998 
1999 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2000 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2001 
2002 	req->handler = zfcp_fsf_close_wka_port_handler;
2003 	req->data = wka_port;
2004 	req->qtcb->header.port_handle = wka_port->handle;
2005 
2006 	req_id = req->req_id;
2007 
2008 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2009 	retval = zfcp_fsf_req_send(req);
2010 	if (retval)
2011 		zfcp_fsf_req_free(req);
2012 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2013 out:
2014 	spin_unlock_irq(&qdio->req_q_lock);
2015 	if (!retval)
2016 		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
2017 	return retval;
2018 }
2019 
2020 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
2021 {
2022 	struct zfcp_port *port = req->data;
2023 	struct fsf_qtcb_header *header = &req->qtcb->header;
2024 	struct scsi_device *sdev;
2025 
2026 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2027 		return;
2028 
2029 	switch (header->fsf_status) {
2030 	case FSF_PORT_HANDLE_NOT_VALID:
2031 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
2032 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2033 		break;
2034 	case FSF_PORT_BOXED:
2035 		/* can't use generic zfcp_erp_modify_port_status because
2036 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
2037 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2038 		shost_for_each_device(sdev, port->adapter->scsi_host)
2039 			if (sdev_to_zfcp(sdev)->port == port)
2040 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
2041 						  &sdev_to_zfcp(sdev)->status);
2042 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2043 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
2044 				     "fscpph2");
2045 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2046 		break;
2047 	case FSF_ADAPTER_STATUS_AVAILABLE:
2048 		switch (header->fsf_status_qual.word[0]) {
2049 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2050 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2051 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2052 			break;
2053 		}
2054 		break;
2055 	case FSF_GOOD:
2056 		/* can't use generic zfcp_erp_modify_port_status because
2057 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
2058 		 */
2059 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2060 		shost_for_each_device(sdev, port->adapter->scsi_host)
2061 			if (sdev_to_zfcp(sdev)->port == port)
2062 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
2063 						  &sdev_to_zfcp(sdev)->status);
2064 		break;
2065 	}
2066 }
2067 
2068 /**
2069  * zfcp_fsf_close_physical_port - close physical port
2070  * @erp_action: pointer to struct zfcp_erp_action
2071  * Returns: 0 on success
2072  */
2073 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2074 {
2075 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2076 	struct zfcp_fsf_req *req;
2077 	int retval = -EIO;
2078 
2079 	spin_lock_irq(&qdio->req_q_lock);
2080 	if (zfcp_qdio_sbal_get(qdio))
2081 		goto out;
2082 
2083 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
2084 				  SBAL_SFLAGS0_TYPE_READ,
2085 				  qdio->adapter->pool.erp_req);
2086 
2087 	if (IS_ERR(req)) {
2088 		retval = PTR_ERR(req);
2089 		goto out;
2090 	}
2091 
2092 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2093 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2094 
2095 	req->data = erp_action->port;
2096 	req->qtcb->header.port_handle = erp_action->port->handle;
2097 	req->erp_action = erp_action;
2098 	req->handler = zfcp_fsf_close_physical_port_handler;
2099 	erp_action->fsf_req_id = req->req_id;
2100 
2101 	zfcp_fsf_start_erp_timer(req);
2102 	retval = zfcp_fsf_req_send(req);
2103 	if (retval) {
2104 		zfcp_fsf_req_free(req);
2105 		erp_action->fsf_req_id = 0;
2106 	}
2107 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2108 out:
2109 	spin_unlock_irq(&qdio->req_q_lock);
2110 	return retval;
2111 }
2112 
2113 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
2114 {
2115 	struct zfcp_adapter *adapter = req->adapter;
2116 	struct scsi_device *sdev = req->data;
2117 	struct zfcp_scsi_dev *zfcp_sdev;
2118 	struct fsf_qtcb_header *header = &req->qtcb->header;
2119 	union fsf_status_qual *qual = &header->fsf_status_qual;
2120 
2121 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2122 		return;
2123 
2124 	zfcp_sdev = sdev_to_zfcp(sdev);
2125 
2126 	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2127 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
2128 			  &zfcp_sdev->status);
2129 
2130 	switch (header->fsf_status) {
2131 
2132 	case FSF_PORT_HANDLE_NOT_VALID:
2133 		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
2134 		fallthrough;
2135 	case FSF_LUN_ALREADY_OPEN:
2136 		break;
2137 	case FSF_PORT_BOXED:
2138 		zfcp_erp_set_port_status(zfcp_sdev->port,
2139 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2140 		zfcp_erp_port_reopen(zfcp_sdev->port,
2141 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
2142 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2143 		break;
2144 	case FSF_LUN_SHARING_VIOLATION:
2145 		if (qual->word[0])
2146 			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
2147 				 "LUN 0x%016Lx on port 0x%016Lx is already in "
2148 				 "use by CSS%d, MIF Image ID %x\n",
2149 				 zfcp_scsi_dev_lun(sdev),
2150 				 (unsigned long long)zfcp_sdev->port->wwpn,
2151 				 qual->fsf_queue_designator.cssid,
2152 				 qual->fsf_queue_designator.hla);
2153 		zfcp_erp_set_lun_status(sdev,
2154 					ZFCP_STATUS_COMMON_ERP_FAILED |
2155 					ZFCP_STATUS_COMMON_ACCESS_DENIED);
2156 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2157 		break;
2158 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
2159 		dev_warn(&adapter->ccw_device->dev,
2160 			 "No handle is available for LUN "
2161 			 "0x%016Lx on port 0x%016Lx\n",
2162 			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2163 			 (unsigned long long)zfcp_sdev->port->wwpn);
2164 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
2165 		fallthrough;
2166 	case FSF_INVALID_COMMAND_OPTION:
2167 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2168 		break;
2169 	case FSF_ADAPTER_STATUS_AVAILABLE:
2170 		switch (header->fsf_status_qual.word[0]) {
2171 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2172 			zfcp_fc_test_link(zfcp_sdev->port);
2173 			fallthrough;
2174 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2175 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2176 			break;
2177 		}
2178 		break;
2179 
2180 	case FSF_GOOD:
2181 		zfcp_sdev->lun_handle = header->lun_handle;
2182 		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
2183 		break;
2184 	}
2185 }
2186 
2187 /**
2188  * zfcp_fsf_open_lun - open LUN
2189  * @erp_action: pointer to struct zfcp_erp_action
2190  * Returns: 0 on success, error otherwise
2191  */
2192 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
2193 {
2194 	struct zfcp_adapter *adapter = erp_action->adapter;
2195 	struct zfcp_qdio *qdio = adapter->qdio;
2196 	struct zfcp_fsf_req *req;
2197 	int retval = -EIO;
2198 
2199 	spin_lock_irq(&qdio->req_q_lock);
2200 	if (zfcp_qdio_sbal_get(qdio))
2201 		goto out;
2202 
2203 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
2204 				  SBAL_SFLAGS0_TYPE_READ,
2205 				  adapter->pool.erp_req);
2206 
2207 	if (IS_ERR(req)) {
2208 		retval = PTR_ERR(req);
2209 		goto out;
2210 	}
2211 
2212 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2213 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2214 
2215 	req->qtcb->header.port_handle = erp_action->port->handle;
2216 	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
2217 	req->handler = zfcp_fsf_open_lun_handler;
2218 	req->data = erp_action->sdev;
2219 	req->erp_action = erp_action;
2220 	erp_action->fsf_req_id = req->req_id;
2221 
2222 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2223 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
2224 
2225 	zfcp_fsf_start_erp_timer(req);
2226 	retval = zfcp_fsf_req_send(req);
2227 	if (retval) {
2228 		zfcp_fsf_req_free(req);
2229 		erp_action->fsf_req_id = 0;
2230 	}
2231 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2232 out:
2233 	spin_unlock_irq(&qdio->req_q_lock);
2234 	return retval;
2235 }
2236 
2237 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
2238 {
2239 	struct scsi_device *sdev = req->data;
2240 	struct zfcp_scsi_dev *zfcp_sdev;
2241 
2242 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2243 		return;
2244 
2245 	zfcp_sdev = sdev_to_zfcp(sdev);
2246 
2247 	switch (req->qtcb->header.fsf_status) {
2248 	case FSF_PORT_HANDLE_NOT_VALID:
2249 		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
2250 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2251 		break;
2252 	case FSF_LUN_HANDLE_NOT_VALID:
2253 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
2254 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2255 		break;
2256 	case FSF_PORT_BOXED:
2257 		zfcp_erp_set_port_status(zfcp_sdev->port,
2258 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2259 		zfcp_erp_port_reopen(zfcp_sdev->port,
2260 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
2261 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2262 		break;
2263 	case FSF_ADAPTER_STATUS_AVAILABLE:
2264 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
2265 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2266 			zfcp_fc_test_link(zfcp_sdev->port);
2267 			fallthrough;
2268 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2269 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2270 			break;
2271 		}
2272 		break;
2273 	case FSF_GOOD:
2274 		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
2275 		break;
2276 	}
2277 }
2278 
2279 /**
2280  * zfcp_fsf_close_lun - close LUN
2281  * @erp_action: pointer to erp_action triggering the "close LUN"
2282  * Returns: 0 on success, error otherwise
2283  */
2284 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
2285 {
2286 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2287 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
2288 	struct zfcp_fsf_req *req;
2289 	int retval = -EIO;
2290 
2291 	spin_lock_irq(&qdio->req_q_lock);
2292 	if (zfcp_qdio_sbal_get(qdio))
2293 		goto out;
2294 
2295 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2296 				  SBAL_SFLAGS0_TYPE_READ,
2297 				  qdio->adapter->pool.erp_req);
2298 
2299 	if (IS_ERR(req)) {
2300 		retval = PTR_ERR(req);
2301 		goto out;
2302 	}
2303 
2304 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2305 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2306 
2307 	req->qtcb->header.port_handle = erp_action->port->handle;
2308 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2309 	req->handler = zfcp_fsf_close_lun_handler;
2310 	req->data = erp_action->sdev;
2311 	req->erp_action = erp_action;
2312 	erp_action->fsf_req_id = req->req_id;
2313 
2314 	zfcp_fsf_start_erp_timer(req);
2315 	retval = zfcp_fsf_req_send(req);
2316 	if (retval) {
2317 		zfcp_fsf_req_free(req);
2318 		erp_action->fsf_req_id = 0;
2319 	}
2320 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2321 out:
2322 	spin_unlock_irq(&qdio->req_q_lock);
2323 	return retval;
2324 }
2325 
2326 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
2327 {
2328 	lat_rec->sum += lat;
2329 	lat_rec->min = min(lat_rec->min, lat);
2330 	lat_rec->max = max(lat_rec->max, lat);
2331 }
2332 
2333 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2334 {
2335 	struct fsf_qual_latency_info *lat_in;
2336 	struct zfcp_latency_cont *lat = NULL;
2337 	struct zfcp_scsi_dev *zfcp_sdev;
2338 	struct zfcp_blk_drv_data blktrc;
2339 	int ticks = req->adapter->timer_ticks;
2340 
2341 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2342 
2343 	blktrc.flags = 0;
2344 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2345 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2346 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2347 	blktrc.inb_usage = 0;
2348 	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2349 
2350 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2351 	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2352 		zfcp_sdev = sdev_to_zfcp(scsi->device);
2353 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
2354 		blktrc.channel_lat = lat_in->channel_lat * ticks;
2355 		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2356 
2357 		switch (req->qtcb->bottom.io.data_direction) {
2358 		case FSF_DATADIR_DIF_READ_STRIP:
2359 		case FSF_DATADIR_DIF_READ_CONVERT:
2360 		case FSF_DATADIR_READ:
2361 			lat = &zfcp_sdev->latencies.read;
2362 			break;
2363 		case FSF_DATADIR_DIF_WRITE_INSERT:
2364 		case FSF_DATADIR_DIF_WRITE_CONVERT:
2365 		case FSF_DATADIR_WRITE:
2366 			lat = &zfcp_sdev->latencies.write;
2367 			break;
2368 		case FSF_DATADIR_CMND:
2369 			lat = &zfcp_sdev->latencies.cmd;
2370 			break;
2371 		}
2372 
2373 		if (lat) {
2374 			spin_lock(&zfcp_sdev->latencies.lock);
2375 			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2376 			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2377 			lat->counter++;
2378 			spin_unlock(&zfcp_sdev->latencies.lock);
2379 		}
2380 	}
2381 
2382 	blk_add_driver_data(scsi_cmd_to_rq(scsi), &blktrc, sizeof(blktrc));
2383 }
2384 
2385 /**
2386  * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
2387  * @req: Pointer to FSF request.
2388  * @sdev: Pointer to SCSI device as request context.
2389  */
2390 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
2391 					struct scsi_device *sdev)
2392 {
2393 	struct zfcp_scsi_dev *zfcp_sdev;
2394 	struct fsf_qtcb_header *header = &req->qtcb->header;
2395 
2396 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2397 		return;
2398 
2399 	zfcp_sdev = sdev_to_zfcp(sdev);
2400 
2401 	switch (header->fsf_status) {
2402 	case FSF_HANDLE_MISMATCH:
2403 	case FSF_PORT_HANDLE_NOT_VALID:
2404 		zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
2405 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2406 		break;
2407 	case FSF_FCPLUN_NOT_VALID:
2408 	case FSF_LUN_HANDLE_NOT_VALID:
2409 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2410 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2411 		break;
2412 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2413 		zfcp_fsf_class_not_supp(req);
2414 		break;
2415 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2416 		dev_err(&req->adapter->ccw_device->dev,
2417 			"Incorrect direction %d, LUN 0x%016Lx on port "
2418 			"0x%016Lx closed\n",
2419 			req->qtcb->bottom.io.data_direction,
2420 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
2421 			(unsigned long long)zfcp_sdev->port->wwpn);
2422 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
2423 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2424 		break;
2425 	case FSF_CMND_LENGTH_NOT_VALID:
2426 		dev_err(&req->adapter->ccw_device->dev,
2427 			"Incorrect FCP_CMND length %d, FCP device closed\n",
2428 			req->qtcb->bottom.io.fcp_cmnd_length);
2429 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
2430 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2431 		break;
2432 	case FSF_PORT_BOXED:
2433 		zfcp_erp_set_port_status(zfcp_sdev->port,
2434 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2435 		zfcp_erp_port_reopen(zfcp_sdev->port,
2436 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2437 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2438 		break;
2439 	case FSF_LUN_BOXED:
2440 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2441 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2442 				    "fssfch6");
2443 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2444 		break;
2445 	case FSF_ADAPTER_STATUS_AVAILABLE:
2446 		if (header->fsf_status_qual.word[0] ==
2447 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2448 			zfcp_fc_test_link(zfcp_sdev->port);
2449 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2450 		break;
2451 	case FSF_SECURITY_ERROR:
2452 		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
2453 					    header->fsf_status_qual.word[0],
2454 					    zfcp_sdev->port->wwpn);
2455 		zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7");
2456 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2457 		break;
2458 	}
2459 }
2460 
2461 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2462 {
2463 	struct scsi_cmnd *scpnt;
2464 	struct fcp_resp_with_ext *fcp_rsp;
2465 	unsigned long flags;
2466 
2467 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2468 
2469 	scpnt = req->data;
2470 	if (unlikely(!scpnt)) {
2471 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2472 		return;
2473 	}
2474 
2475 	zfcp_fsf_fcp_handler_common(req, scpnt->device);
2476 
2477 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2478 		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2479 		goto skip_fsfstatus;
2480 	}
2481 
2482 	switch (req->qtcb->header.fsf_status) {
2483 	case FSF_INCONSISTENT_PROT_DATA:
2484 	case FSF_INVALID_PROT_PARM:
2485 		set_host_byte(scpnt, DID_ERROR);
2486 		goto skip_fsfstatus;
2487 	case FSF_BLOCK_GUARD_CHECK_FAILURE:
2488 		zfcp_scsi_dif_sense_error(scpnt, 0x1);
2489 		goto skip_fsfstatus;
2490 	case FSF_APP_TAG_CHECK_FAILURE:
2491 		zfcp_scsi_dif_sense_error(scpnt, 0x2);
2492 		goto skip_fsfstatus;
2493 	case FSF_REF_TAG_CHECK_FAILURE:
2494 		zfcp_scsi_dif_sense_error(scpnt, 0x3);
2495 		goto skip_fsfstatus;
2496 	}
2497 	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
2498 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2499 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2500 
2501 skip_fsfstatus:
2502 	zfcp_fsf_req_trace(req, scpnt);
2503 	zfcp_dbf_scsi_result(scpnt, req);
2504 
2505 	scpnt->host_scribble = NULL;
2506 	scsi_done(scpnt);
2507 	/*
2508 	 * We must hold this lock until scsi_done has been called.
2509 	 * Otherwise we may call scsi_done after abort regarding this
2510 	 * command has completed.
2511 	 * Note: scsi_done must not block!
2512 	 */
2513 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2514 }
2515 
2516 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2517 {
2518 	switch (scsi_get_prot_op(scsi_cmnd)) {
2519 	case SCSI_PROT_NORMAL:
2520 		switch (scsi_cmnd->sc_data_direction) {
2521 		case DMA_NONE:
2522 			*data_dir = FSF_DATADIR_CMND;
2523 			break;
2524 		case DMA_FROM_DEVICE:
2525 			*data_dir = FSF_DATADIR_READ;
2526 			break;
2527 		case DMA_TO_DEVICE:
2528 			*data_dir = FSF_DATADIR_WRITE;
2529 			break;
2530 		case DMA_BIDIRECTIONAL:
2531 			return -EINVAL;
2532 		}
2533 		break;
2534 
2535 	case SCSI_PROT_READ_STRIP:
2536 		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
2537 		break;
2538 	case SCSI_PROT_WRITE_INSERT:
2539 		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2540 		break;
2541 	case SCSI_PROT_READ_PASS:
2542 		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2543 		break;
2544 	case SCSI_PROT_WRITE_PASS:
2545 		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2546 		break;
2547 	default:
2548 		return -EINVAL;
2549 	}
2550 
2551 	return 0;
2552 }
2553 
2554 /**
2555  * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2556  * @scsi_cmnd: scsi command to be sent
2557  */
2558 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2559 {
2560 	struct zfcp_fsf_req *req;
2561 	struct fcp_cmnd *fcp_cmnd;
2562 	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2563 	int retval = -EIO;
2564 	struct scsi_device *sdev = scsi_cmnd->device;
2565 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2566 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2567 	struct zfcp_qdio *qdio = adapter->qdio;
2568 	struct fsf_qtcb_bottom_io *io;
2569 	unsigned long flags;
2570 
2571 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2572 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2573 		return -EBUSY;
2574 
2575 	spin_lock_irqsave(&qdio->req_q_lock, flags);
2576 	if (atomic_read(&qdio->req_q_free) <= 0) {
2577 		atomic_inc(&qdio->req_q_full);
2578 		goto out;
2579 	}
2580 
2581 	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2582 		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2583 
2584 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2585 				  sbtype, adapter->pool.scsi_req);
2586 
2587 	if (IS_ERR(req)) {
2588 		retval = PTR_ERR(req);
2589 		goto out;
2590 	}
2591 
2592 	BUILD_BUG_ON(sizeof(scsi_cmnd->host_scribble) < sizeof(req->req_id));
2593 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2594 
2595 	io = &req->qtcb->bottom.io;
2596 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2597 	req->data = scsi_cmnd;
2598 	req->handler = zfcp_fsf_fcp_cmnd_handler;
2599 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2600 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2601 	io->service_class = FSF_CLASS_3;
2602 	io->fcp_cmnd_length = FCP_CMND_LEN;
2603 
2604 	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2605 		io->data_block_length = scsi_prot_interval(scsi_cmnd);
2606 		io->ref_tag_value = scsi_prot_ref_tag(scsi_cmnd);
2607 	}
2608 
2609 	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2610 		goto failed_scsi_cmnd;
2611 
2612 	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
2613 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2614 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2615 
2616 	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2617 	    scsi_prot_sg_count(scsi_cmnd)) {
2618 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2619 				       scsi_prot_sg_count(scsi_cmnd));
2620 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2621 						 scsi_prot_sglist(scsi_cmnd));
2622 		if (retval)
2623 			goto failed_scsi_cmnd;
2624 		io->prot_data_length = zfcp_qdio_real_bytes(
2625 						scsi_prot_sglist(scsi_cmnd));
2626 	}
2627 
2628 	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2629 					 scsi_sglist(scsi_cmnd));
2630 	if (unlikely(retval))
2631 		goto failed_scsi_cmnd;
2632 
2633 	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2634 	if (zfcp_adapter_multi_buffer_active(adapter))
2635 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
2636 
2637 	retval = zfcp_fsf_req_send(req);
2638 	if (unlikely(retval))
2639 		goto failed_scsi_cmnd;
2640 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2641 
2642 	goto out;
2643 
2644 failed_scsi_cmnd:
2645 	zfcp_fsf_req_free(req);
2646 	scsi_cmnd->host_scribble = NULL;
2647 out:
2648 	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2649 	return retval;
2650 }
2651 
2652 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2653 {
2654 	struct scsi_device *sdev = req->data;
2655 	struct fcp_resp_with_ext *fcp_rsp;
2656 	struct fcp_resp_rsp_info *rsp_info;
2657 
2658 	zfcp_fsf_fcp_handler_common(req, sdev);
2659 
2660 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2661 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2662 
2663 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2664 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2665 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2666 }
2667 
2668 /**
2669  * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
2670  * @sdev: Pointer to SCSI device to send the task management command to.
2671  * @tm_flags: Unsigned byte for task management flags.
2672  *
2673  * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
2674  */
2675 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
2676 					    u8 tm_flags)
2677 {
2678 	struct zfcp_fsf_req *req = NULL;
2679 	struct fcp_cmnd *fcp_cmnd;
2680 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2681 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2682 
2683 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2684 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2685 		return NULL;
2686 
2687 	spin_lock_irq(&qdio->req_q_lock);
2688 	if (zfcp_qdio_sbal_get(qdio))
2689 		goto out;
2690 
2691 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2692 				  SBAL_SFLAGS0_TYPE_WRITE,
2693 				  qdio->adapter->pool.scsi_req);
2694 
2695 	if (IS_ERR(req)) {
2696 		req = NULL;
2697 		goto out;
2698 	}
2699 
2700 	req->data = sdev;
2701 
2702 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2703 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2704 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2705 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2706 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2707 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2708 
2709 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2710 
2711 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2712 	zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
2713 
2714 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
2715 	if (!zfcp_fsf_req_send(req)) {
2716 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
2717 		goto out;
2718 	}
2719 
2720 	zfcp_fsf_req_free(req);
2721 	req = NULL;
2722 out:
2723 	spin_unlock_irq(&qdio->req_q_lock);
2724 	return req;
2725 }
2726 
2727 /**
2728  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2729  * @qdio: pointer to struct zfcp_qdio
2730  * @sbal_idx: response queue index of SBAL to be processed
2731  */
2732 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2733 {
2734 	struct zfcp_adapter *adapter = qdio->adapter;
2735 	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2736 	struct qdio_buffer_element *sbale;
2737 	struct zfcp_fsf_req *fsf_req;
2738 	u64 req_id;
2739 	int idx;
2740 
2741 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2742 
2743 		sbale = &sbal->element[idx];
2744 		req_id = dma64_to_u64(sbale->addr);
2745 		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2746 
2747 		if (!fsf_req) {
2748 			/*
2749 			 * Unknown request means that we have potentially memory
2750 			 * corruption and must stop the machine immediately.
2751 			 */
2752 			zfcp_qdio_siosl(adapter);
2753 			panic("error: unknown req_id (%llx) on adapter %s.\n",
2754 			      req_id, dev_name(&adapter->ccw_device->dev));
2755 		}
2756 
2757 		zfcp_fsf_req_complete(fsf_req);
2758 
2759 		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2760 			break;
2761 	}
2762 }
2763