1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Implementation of FSF commands. 6 * 7 * Copyright IBM Corp. 2002, 2020 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/blktrace_api.h> 14 #include <linux/jiffies.h> 15 #include <linux/types.h> 16 #include <linux/slab.h> 17 #include <scsi/fc/fc_els.h> 18 #include "zfcp_ext.h" 19 #include "zfcp_fc.h" 20 #include "zfcp_dbf.h" 21 #include "zfcp_qdio.h" 22 #include "zfcp_reqlist.h" 23 #include "zfcp_diag.h" 24 25 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */ 26 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ) 27 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */ 28 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 29 30 struct kmem_cache *zfcp_fsf_qtcb_cache; 31 32 static bool ber_stop = true; 33 module_param(ber_stop, bool, 0600); 34 MODULE_PARM_DESC(ber_stop, 35 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); 36 37 static void zfcp_fsf_request_timeout_handler(struct timer_list *t) 38 { 39 struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); 40 struct zfcp_adapter *adapter = fsf_req->adapter; 41 42 zfcp_qdio_siosl(adapter); 43 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 44 "fsrth_1"); 45 } 46 47 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 48 unsigned long timeout) 49 { 50 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 51 fsf_req->timer.expires = jiffies + timeout; 52 add_timer(&fsf_req->timer); 53 } 54 55 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) 56 { 57 BUG_ON(!fsf_req->erp_action); 58 fsf_req->timer.function = zfcp_erp_timeout_handler; 59 fsf_req->timer.expires = jiffies + 30 * HZ; 60 add_timer(&fsf_req->timer); 61 } 62 63 /* association between FSF command and FSF QTCB type */ 64 static u32 fsf_qtcb_type[] = { 65 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND, 66 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND, 67 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND, 68 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND, 69 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND, 70 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND, 71 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND, 72 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND, 73 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND, 74 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND, 75 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND, 76 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND, 77 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 78 }; 79 80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 81 { 82 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 83 "operational because of an unsupported FC class\n"); 84 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); 85 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 86 } 87 88 /** 89 * zfcp_fsf_req_free - free memory used by fsf request 90 * @req: pointer to struct zfcp_fsf_req 91 */ 92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 93 { 94 if (likely(req->pool)) { 95 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); 97 mempool_free(req, req->pool); 98 return; 99 } 100 101 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 102 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); 103 kfree(req); 104 } 105 106 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 107 { 108 unsigned long flags; 109 struct fsf_status_read_buffer *sr_buf = req->data; 110 struct zfcp_adapter *adapter = req->adapter; 111 struct zfcp_port *port; 112 int d_id = ntoh24(sr_buf->d_id); 113 114 read_lock_irqsave(&adapter->port_list_lock, flags); 115 list_for_each_entry(port, &adapter->port_list, list) 116 if (port->d_id == d_id) { 117 zfcp_erp_port_reopen(port, 0, "fssrpc1"); 118 break; 119 } 120 read_unlock_irqrestore(&adapter->port_list_lock, flags); 121 } 122 123 static void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter) 124 { 125 struct Scsi_Host *shost = adapter->scsi_host; 126 127 fc_host_port_id(shost) = 0; 128 fc_host_fabric_name(shost) = 0; 129 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 130 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 131 adapter->hydra_version = 0; 132 snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0); 133 memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE); 134 135 adapter->peer_wwpn = 0; 136 adapter->peer_wwnn = 0; 137 adapter->peer_d_id = 0; 138 } 139 140 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, 141 struct fsf_link_down_info *link_down) 142 { 143 struct zfcp_adapter *adapter = req->adapter; 144 145 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 146 return; 147 148 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 149 150 zfcp_scsi_schedule_rports_block(adapter); 151 152 zfcp_fsf_fc_host_link_down(adapter); 153 154 if (!link_down) 155 goto out; 156 157 switch (link_down->error_code) { 158 case FSF_PSQ_LINK_NO_LIGHT: 159 dev_warn(&req->adapter->ccw_device->dev, 160 "There is no light signal from the local " 161 "fibre channel cable\n"); 162 break; 163 case FSF_PSQ_LINK_WRAP_PLUG: 164 dev_warn(&req->adapter->ccw_device->dev, 165 "There is a wrap plug instead of a fibre " 166 "channel cable\n"); 167 break; 168 case FSF_PSQ_LINK_NO_FCP: 169 dev_warn(&req->adapter->ccw_device->dev, 170 "The adjacent fibre channel node does not " 171 "support FCP\n"); 172 break; 173 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 174 dev_warn(&req->adapter->ccw_device->dev, 175 "The FCP device is suspended because of a " 176 "firmware update\n"); 177 break; 178 case FSF_PSQ_LINK_INVALID_WWPN: 179 dev_warn(&req->adapter->ccw_device->dev, 180 "The FCP device detected a WWPN that is " 181 "duplicate or not valid\n"); 182 break; 183 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 184 dev_warn(&req->adapter->ccw_device->dev, 185 "The fibre channel fabric does not support NPIV\n"); 186 break; 187 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 188 dev_warn(&req->adapter->ccw_device->dev, 189 "The FCP adapter cannot support more NPIV ports\n"); 190 break; 191 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 192 dev_warn(&req->adapter->ccw_device->dev, 193 "The adjacent switch cannot support " 194 "more NPIV ports\n"); 195 break; 196 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 197 dev_warn(&req->adapter->ccw_device->dev, 198 "The FCP adapter could not log in to the " 199 "fibre channel fabric\n"); 200 break; 201 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 202 dev_warn(&req->adapter->ccw_device->dev, 203 "The WWPN assignment file on the FCP adapter " 204 "has been damaged\n"); 205 break; 206 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 207 dev_warn(&req->adapter->ccw_device->dev, 208 "The mode table on the FCP adapter " 209 "has been damaged\n"); 210 break; 211 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 212 dev_warn(&req->adapter->ccw_device->dev, 213 "All NPIV ports on the FCP adapter have " 214 "been assigned\n"); 215 break; 216 default: 217 dev_warn(&req->adapter->ccw_device->dev, 218 "The link between the FCP adapter and " 219 "the FC fabric is down\n"); 220 } 221 out: 222 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 223 } 224 225 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 226 { 227 struct fsf_status_read_buffer *sr_buf = req->data; 228 struct fsf_link_down_info *ldi = 229 (struct fsf_link_down_info *) &sr_buf->payload; 230 231 switch (sr_buf->status_subtype) { 232 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 233 case FSF_STATUS_READ_SUB_FDISC_FAILED: 234 zfcp_fsf_link_down_info_eval(req, ldi); 235 break; 236 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 237 zfcp_fsf_link_down_info_eval(req, NULL); 238 } 239 } 240 241 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) 242 { 243 struct zfcp_adapter *adapter = req->adapter; 244 struct fsf_status_read_buffer *sr_buf = req->data; 245 246 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 247 zfcp_dbf_hba_fsf_uss("fssrh_1", req); 248 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 249 zfcp_fsf_req_free(req); 250 return; 251 } 252 253 zfcp_dbf_hba_fsf_uss("fssrh_4", req); 254 255 switch (sr_buf->status_type) { 256 case FSF_STATUS_READ_PORT_CLOSED: 257 zfcp_fsf_status_read_port_closed(req); 258 break; 259 case FSF_STATUS_READ_INCOMING_ELS: 260 zfcp_fc_incoming_els(req); 261 break; 262 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 263 break; 264 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 265 zfcp_dbf_hba_bit_err("fssrh_3", req); 266 if (ber_stop) { 267 dev_warn(&adapter->ccw_device->dev, 268 "All paths over this FCP device are disused because of excessive bit errors\n"); 269 zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); 270 } else { 271 dev_warn(&adapter->ccw_device->dev, 272 "The error threshold for checksum statistics has been exceeded\n"); 273 } 274 break; 275 case FSF_STATUS_READ_LINK_DOWN: 276 zfcp_fsf_status_read_link_down(req); 277 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); 278 break; 279 case FSF_STATUS_READ_LINK_UP: 280 dev_info(&adapter->ccw_device->dev, 281 "The local link has been restored\n"); 282 /* All ports should be marked as ready to run again */ 283 zfcp_erp_set_adapter_status(adapter, 284 ZFCP_STATUS_COMMON_RUNNING); 285 zfcp_erp_adapter_reopen(adapter, 286 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 287 ZFCP_STATUS_COMMON_ERP_FAILED, 288 "fssrh_2"); 289 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 290 291 break; 292 case FSF_STATUS_READ_NOTIFICATION_LOST: 293 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 294 zfcp_fc_conditional_port_scan(adapter); 295 break; 296 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 297 adapter->adapter_features = sr_buf->payload.word[0]; 298 break; 299 } 300 301 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 302 zfcp_fsf_req_free(req); 303 304 atomic_inc(&adapter->stat_miss); 305 queue_work(adapter->work_queue, &adapter->stat_work); 306 } 307 308 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 309 { 310 switch (req->qtcb->header.fsf_status_qual.word[0]) { 311 case FSF_SQ_FCP_RSP_AVAILABLE: 312 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 313 case FSF_SQ_NO_RETRY_POSSIBLE: 314 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 315 return; 316 case FSF_SQ_COMMAND_ABORTED: 317 break; 318 case FSF_SQ_NO_RECOM: 319 dev_err(&req->adapter->ccw_device->dev, 320 "The FCP adapter reported a problem " 321 "that cannot be recovered\n"); 322 zfcp_qdio_siosl(req->adapter); 323 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); 324 break; 325 } 326 /* all non-return stats set FSFREQ_ERROR*/ 327 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 328 } 329 330 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) 331 { 332 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 333 return; 334 335 switch (req->qtcb->header.fsf_status) { 336 case FSF_UNKNOWN_COMMAND: 337 dev_err(&req->adapter->ccw_device->dev, 338 "The FCP adapter does not recognize the command 0x%x\n", 339 req->qtcb->header.fsf_command); 340 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); 341 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 342 break; 343 case FSF_ADAPTER_STATUS_AVAILABLE: 344 zfcp_fsf_fsfstatus_qual_eval(req); 345 break; 346 } 347 } 348 349 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) 350 { 351 struct zfcp_adapter *adapter = req->adapter; 352 struct fsf_qtcb *qtcb = req->qtcb; 353 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 354 355 zfcp_dbf_hba_fsf_response(req); 356 357 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 358 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 359 return; 360 } 361 362 switch (qtcb->prefix.prot_status) { 363 case FSF_PROT_GOOD: 364 case FSF_PROT_FSF_STATUS_PRESENTED: 365 return; 366 case FSF_PROT_QTCB_VERSION_ERROR: 367 dev_err(&adapter->ccw_device->dev, 368 "QTCB version 0x%x not supported by FCP adapter " 369 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 370 psq->word[0], psq->word[1]); 371 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); 372 break; 373 case FSF_PROT_ERROR_STATE: 374 case FSF_PROT_SEQ_NUMB_ERROR: 375 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); 376 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 377 break; 378 case FSF_PROT_UNSUPP_QTCB_TYPE: 379 dev_err(&adapter->ccw_device->dev, 380 "The QTCB type is not supported by the FCP adapter\n"); 381 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); 382 break; 383 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 384 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 385 &adapter->status); 386 break; 387 case FSF_PROT_DUPLICATE_REQUEST_ID: 388 dev_err(&adapter->ccw_device->dev, 389 "0x%Lx is an ambiguous request identifier\n", 390 (unsigned long long)qtcb->bottom.support.req_handle); 391 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); 392 break; 393 case FSF_PROT_LINK_DOWN: 394 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); 395 /* go through reopen to flush pending requests */ 396 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); 397 break; 398 case FSF_PROT_REEST_QUEUE: 399 /* All ports should be marked as ready to run again */ 400 zfcp_erp_set_adapter_status(adapter, 401 ZFCP_STATUS_COMMON_RUNNING); 402 zfcp_erp_adapter_reopen(adapter, 403 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 404 ZFCP_STATUS_COMMON_ERP_FAILED, 405 "fspse_8"); 406 break; 407 default: 408 dev_err(&adapter->ccw_device->dev, 409 "0x%x is not a valid transfer protocol status\n", 410 qtcb->prefix.prot_status); 411 zfcp_qdio_siosl(adapter); 412 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); 413 } 414 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 415 } 416 417 /** 418 * zfcp_fsf_req_complete - process completion of a FSF request 419 * @req: The FSF request that has been completed. 420 * 421 * When a request has been completed either from the FCP adapter, 422 * or it has been dismissed due to a queue shutdown, this function 423 * is called to process the completion status and trigger further 424 * events related to the FSF request. 425 */ 426 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 427 { 428 if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) { 429 zfcp_fsf_status_read_handler(req); 430 return; 431 } 432 433 del_timer(&req->timer); 434 zfcp_fsf_protstatus_eval(req); 435 zfcp_fsf_fsfstatus_eval(req); 436 req->handler(req); 437 438 if (req->erp_action) 439 zfcp_erp_notify(req->erp_action, 0); 440 441 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 442 zfcp_fsf_req_free(req); 443 else 444 complete(&req->completion); 445 } 446 447 /** 448 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests 449 * @adapter: pointer to struct zfcp_adapter 450 * 451 * Never ever call this without shutting down the adapter first. 452 * Otherwise the adapter would continue using and corrupting s390 storage. 453 * Included BUG_ON() call to ensure this is done. 454 * ERP is supposed to be the only user of this function. 455 */ 456 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 457 { 458 struct zfcp_fsf_req *req, *tmp; 459 LIST_HEAD(remove_queue); 460 461 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 462 zfcp_reqlist_move(adapter->req_list, &remove_queue); 463 464 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 465 list_del(&req->list); 466 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 467 zfcp_fsf_req_complete(req); 468 } 469 } 470 471 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) 472 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) 473 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) 474 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) 475 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) 476 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) 477 #define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6) 478 #define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7) 479 #define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8) 480 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) 481 482 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) 483 { 484 u32 fdmi_speed = 0; 485 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) 486 fdmi_speed |= FC_PORTSPEED_1GBIT; 487 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) 488 fdmi_speed |= FC_PORTSPEED_2GBIT; 489 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) 490 fdmi_speed |= FC_PORTSPEED_4GBIT; 491 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) 492 fdmi_speed |= FC_PORTSPEED_10GBIT; 493 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) 494 fdmi_speed |= FC_PORTSPEED_8GBIT; 495 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) 496 fdmi_speed |= FC_PORTSPEED_16GBIT; 497 if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT) 498 fdmi_speed |= FC_PORTSPEED_32GBIT; 499 if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT) 500 fdmi_speed |= FC_PORTSPEED_64GBIT; 501 if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT) 502 fdmi_speed |= FC_PORTSPEED_128GBIT; 503 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) 504 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; 505 return fdmi_speed; 506 } 507 508 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 509 { 510 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 511 struct zfcp_adapter *adapter = req->adapter; 512 struct Scsi_Host *shost = adapter->scsi_host; 513 struct fc_els_flogi *nsp, *plogi; 514 515 /* adjust pointers for missing command code */ 516 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param 517 - sizeof(u32)); 518 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload 519 - sizeof(u32)); 520 521 if (req->data) 522 memcpy(req->data, bottom, sizeof(*bottom)); 523 524 snprintf(fc_host_manufacturer(shost), FC_SERIAL_NUMBER_SIZE, "%s", 525 "IBM"); 526 fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn); 527 fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn); 528 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 529 530 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; 531 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 532 (u16)FSF_STATUS_READS_RECOM); 533 534 zfcp_scsi_set_prot(adapter); 535 536 /* no error return above here, otherwise must fix call chains */ 537 /* do not evaluate invalid fields */ 538 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) 539 return 0; 540 541 fc_host_port_id(shost) = ntoh24(bottom->s_id); 542 fc_host_speed(shost) = 543 zfcp_fsf_convert_portspeed(bottom->fc_link_speed); 544 545 adapter->hydra_version = bottom->adapter_type; 546 snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 547 bottom->adapter_type); 548 549 switch (bottom->fc_topology) { 550 case FSF_TOPO_P2P: 551 adapter->peer_d_id = ntoh24(bottom->peer_d_id); 552 adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn); 553 adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn); 554 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 555 fc_host_fabric_name(shost) = 0; 556 break; 557 case FSF_TOPO_FABRIC: 558 fc_host_fabric_name(shost) = be64_to_cpu(plogi->fl_wwnn); 559 if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) 560 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 561 else 562 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 563 break; 564 case FSF_TOPO_AL: 565 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 566 fc_host_fabric_name(shost) = 0; 567 fallthrough; 568 default: 569 fc_host_fabric_name(shost) = 0; 570 dev_err(&adapter->ccw_device->dev, 571 "Unknown or unsupported arbitrated loop " 572 "fibre channel topology detected\n"); 573 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); 574 return -EIO; 575 } 576 577 return 0; 578 } 579 580 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) 581 { 582 struct zfcp_adapter *adapter = req->adapter; 583 struct zfcp_diag_header *const diag_hdr = 584 &adapter->diagnostics->config_data.header; 585 struct fsf_qtcb *qtcb = req->qtcb; 586 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; 587 struct Scsi_Host *shost = adapter->scsi_host; 588 589 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 590 return; 591 592 snprintf(fc_host_firmware_version(shost), FC_VERSION_STRING_SIZE, 593 "0x%08x", bottom->lic_version); 594 adapter->fsf_lic_version = bottom->lic_version; 595 adapter->adapter_features = bottom->adapter_features; 596 adapter->connection_features = bottom->connection_features; 597 adapter->peer_wwpn = 0; 598 adapter->peer_wwnn = 0; 599 adapter->peer_d_id = 0; 600 601 switch (qtcb->header.fsf_status) { 602 case FSF_GOOD: 603 /* 604 * usually we wait with an update till the cache is too old, 605 * but because we have the data available, update it anyway 606 */ 607 zfcp_diag_update_xdata(diag_hdr, bottom, false); 608 609 if (zfcp_fsf_exchange_config_evaluate(req)) 610 return; 611 612 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 613 dev_err(&adapter->ccw_device->dev, 614 "FCP adapter maximum QTCB size (%d bytes) " 615 "is too small\n", 616 bottom->max_qtcb_size); 617 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); 618 return; 619 } 620 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 621 &adapter->status); 622 break; 623 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 624 zfcp_diag_update_xdata(diag_hdr, bottom, true); 625 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 626 627 /* avoids adapter shutdown to be able to recognize 628 * events such as LINK UP */ 629 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 630 &adapter->status); 631 zfcp_fsf_link_down_info_eval(req, 632 &qtcb->header.fsf_status_qual.link_down_info); 633 if (zfcp_fsf_exchange_config_evaluate(req)) 634 return; 635 break; 636 default: 637 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); 638 return; 639 } 640 641 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { 642 adapter->hardware_version = bottom->hardware_version; 643 snprintf(fc_host_hardware_version(shost), 644 FC_VERSION_STRING_SIZE, 645 "0x%08x", bottom->hardware_version); 646 memcpy(fc_host_serial_number(shost), bottom->serial_number, 647 min(FC_SERIAL_NUMBER_SIZE, 17)); 648 EBCASC(fc_host_serial_number(shost), 649 min(FC_SERIAL_NUMBER_SIZE, 17)); 650 } 651 652 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 653 dev_err(&adapter->ccw_device->dev, 654 "The FCP adapter only supports newer " 655 "control block versions\n"); 656 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); 657 return; 658 } 659 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 660 dev_err(&adapter->ccw_device->dev, 661 "The FCP adapter only supports older " 662 "control block versions\n"); 663 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); 664 } 665 } 666 667 /* 668 * Mapping of FC Endpoint Security flag masks to mnemonics 669 * 670 * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any 671 * changes. 672 */ 673 static const struct { 674 u32 mask; 675 char *name; 676 } zfcp_fsf_fc_security_mnemonics[] = { 677 { FSF_FC_SECURITY_AUTH, "Authentication" }, 678 { FSF_FC_SECURITY_ENC_FCSP2 | 679 FSF_FC_SECURITY_ENC_ERAS, "Encryption" }, 680 }; 681 682 /* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */ 683 #define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15 684 685 /** 686 * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into 687 * mnemonics and place in a buffer 688 * @buf : the buffer to place the translated FC Endpoint Security flag(s) 689 * into 690 * @size : the size of the buffer, including the trailing null space 691 * @fc_security: one or more FC Endpoint Security flags, or zero 692 * @fmt : specifies whether a list or a single item is to be put into the 693 * buffer 694 * 695 * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics. 696 * If the FC Endpoint Security flags are zero "none" is placed into the buffer. 697 * 698 * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by 699 * a comma followed by a space into the buffer. If one or more FC Endpoint 700 * Security flags cannot be translated into a mnemonic, as they are undefined 701 * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal 702 * representation is placed into the buffer. 703 * 704 * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into 705 * the buffer. If the FC Endpoint Security flag cannot be translated, as it is 706 * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal 707 * representation is placed into the buffer. If more than one FC Endpoint 708 * Security flag was specified, their value in hexadecimal representation is 709 * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 710 * can be used to define a buffer that is large enough to hold one mnemonic. 711 * 712 * Return: The number of characters written into buf not including the trailing 713 * '\0'. If size is == 0 the function returns 0. 714 */ 715 ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security, 716 enum zfcp_fsf_print_fmt fmt) 717 { 718 const char *prefix = ""; 719 ssize_t len = 0; 720 int i; 721 722 if (fc_security == 0) 723 return scnprintf(buf, size, "none"); 724 if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1) 725 return scnprintf(buf, size, "0x%08x", fc_security); 726 727 for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) { 728 if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask)) 729 continue; 730 731 len += scnprintf(buf + len, size - len, "%s%s", prefix, 732 zfcp_fsf_fc_security_mnemonics[i].name); 733 prefix = ", "; 734 fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask; 735 } 736 737 if (fc_security != 0) 738 len += scnprintf(buf + len, size - len, "%s0x%08x", 739 prefix, fc_security); 740 741 return len; 742 } 743 744 static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter, 745 struct zfcp_fsf_req *req) 746 { 747 if (adapter->fc_security_algorithms == 748 adapter->fc_security_algorithms_old) { 749 /* no change, no trace */ 750 return; 751 } 752 753 zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN, 754 adapter->fc_security_algorithms_old, 755 adapter->fc_security_algorithms); 756 757 adapter->fc_security_algorithms_old = adapter->fc_security_algorithms; 758 } 759 760 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) 761 { 762 struct zfcp_adapter *adapter = req->adapter; 763 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; 764 struct Scsi_Host *shost = adapter->scsi_host; 765 766 if (req->data) 767 memcpy(req->data, bottom, sizeof(*bottom)); 768 769 fc_host_permanent_port_name(shost) = bottom->wwpn; 770 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 771 fc_host_supported_speeds(shost) = 772 zfcp_fsf_convert_portspeed(bottom->supported_speed); 773 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, 774 FC_FC4_LIST_SIZE); 775 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, 776 FC_FC4_LIST_SIZE); 777 if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY) 778 adapter->fc_security_algorithms = 779 bottom->fc_security_algorithms; 780 else 781 adapter->fc_security_algorithms = 0; 782 zfcp_fsf_dbf_adapter_fc_security(adapter, req); 783 } 784 785 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 786 { 787 struct zfcp_diag_header *const diag_hdr = 788 &req->adapter->diagnostics->port_data.header; 789 struct fsf_qtcb *qtcb = req->qtcb; 790 struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port; 791 792 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 793 return; 794 795 switch (qtcb->header.fsf_status) { 796 case FSF_GOOD: 797 /* 798 * usually we wait with an update till the cache is too old, 799 * but because we have the data available, update it anyway 800 */ 801 zfcp_diag_update_xdata(diag_hdr, bottom, false); 802 803 zfcp_fsf_exchange_port_evaluate(req); 804 break; 805 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 806 zfcp_diag_update_xdata(diag_hdr, bottom, true); 807 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 808 809 zfcp_fsf_link_down_info_eval(req, 810 &qtcb->header.fsf_status_qual.link_down_info); 811 zfcp_fsf_exchange_port_evaluate(req); 812 break; 813 } 814 } 815 816 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) 817 { 818 struct zfcp_fsf_req *req; 819 820 if (likely(pool)) 821 req = mempool_alloc(pool, GFP_ATOMIC); 822 else 823 req = kmalloc(sizeof(*req), GFP_ATOMIC); 824 825 if (unlikely(!req)) 826 return NULL; 827 828 memset(req, 0, sizeof(*req)); 829 req->pool = pool; 830 return req; 831 } 832 833 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool) 834 { 835 struct fsf_qtcb *qtcb; 836 837 if (likely(pool)) 838 qtcb = mempool_alloc(pool, GFP_ATOMIC); 839 else 840 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); 841 842 if (unlikely(!qtcb)) 843 return NULL; 844 845 memset(qtcb, 0, sizeof(*qtcb)); 846 return qtcb; 847 } 848 849 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 850 u32 fsf_cmd, u8 sbtype, 851 mempool_t *pool) 852 { 853 struct zfcp_adapter *adapter = qdio->adapter; 854 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); 855 856 if (unlikely(!req)) 857 return ERR_PTR(-ENOMEM); 858 859 if (adapter->req_no == 0) 860 adapter->req_no++; 861 862 INIT_LIST_HEAD(&req->list); 863 timer_setup(&req->timer, NULL, 0); 864 init_completion(&req->completion); 865 866 req->adapter = adapter; 867 req->req_id = adapter->req_no; 868 869 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 870 if (likely(pool)) 871 req->qtcb = zfcp_fsf_qtcb_alloc( 872 adapter->pool.qtcb_pool); 873 else 874 req->qtcb = zfcp_fsf_qtcb_alloc(NULL); 875 876 if (unlikely(!req->qtcb)) { 877 zfcp_fsf_req_free(req); 878 return ERR_PTR(-ENOMEM); 879 } 880 881 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 882 req->qtcb->prefix.req_id = req->req_id; 883 req->qtcb->prefix.ulp_info = 26; 884 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 885 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 886 req->qtcb->header.req_handle = req->req_id; 887 req->qtcb->header.fsf_command = fsf_cmd; 888 } 889 890 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 891 req->qtcb, sizeof(struct fsf_qtcb)); 892 893 return req; 894 } 895 896 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 897 { 898 const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); 899 struct zfcp_adapter *adapter = req->adapter; 900 struct zfcp_qdio *qdio = adapter->qdio; 901 int req_id = req->req_id; 902 903 zfcp_reqlist_add(adapter->req_list, req); 904 905 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 906 req->issued = get_tod_clock(); 907 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 908 del_timer(&req->timer); 909 /* lookup request again, list might have changed */ 910 zfcp_reqlist_find_rm(adapter->req_list, req_id); 911 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); 912 return -EIO; 913 } 914 915 /* 916 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT. 917 * ONLY TOUCH SYNC req AGAIN ON req->completion. 918 * 919 * The request might complete and be freed concurrently at any point 920 * now. This is not protected by the QDIO-lock (req_q_lock). So any 921 * uncontrolled access after this might result in an use-after-free bug. 922 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and 923 * when it is completed via req->completion, is it safe to use req 924 * again. 925 */ 926 927 /* Don't increase for unsolicited status */ 928 if (!is_srb) 929 adapter->fsf_req_seq_no++; 930 adapter->req_no++; 931 932 return 0; 933 } 934 935 /** 936 * zfcp_fsf_status_read - send status read request 937 * @qdio: pointer to struct zfcp_qdio 938 * Returns: 0 on success, ERROR otherwise 939 */ 940 int zfcp_fsf_status_read(struct zfcp_qdio *qdio) 941 { 942 struct zfcp_adapter *adapter = qdio->adapter; 943 struct zfcp_fsf_req *req; 944 struct fsf_status_read_buffer *sr_buf; 945 struct page *page; 946 int retval = -EIO; 947 948 spin_lock_irq(&qdio->req_q_lock); 949 if (zfcp_qdio_sbal_get(qdio)) 950 goto out; 951 952 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 953 SBAL_SFLAGS0_TYPE_STATUS, 954 adapter->pool.status_read_req); 955 if (IS_ERR(req)) { 956 retval = PTR_ERR(req); 957 goto out; 958 } 959 960 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); 961 if (!page) { 962 retval = -ENOMEM; 963 goto failed_buf; 964 } 965 sr_buf = page_address(page); 966 memset(sr_buf, 0, sizeof(*sr_buf)); 967 req->data = sr_buf; 968 969 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); 970 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 971 972 retval = zfcp_fsf_req_send(req); 973 if (retval) 974 goto failed_req_send; 975 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 976 977 goto out; 978 979 failed_req_send: 980 req->data = NULL; 981 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 982 failed_buf: 983 zfcp_dbf_hba_fsf_uss("fssr__1", req); 984 zfcp_fsf_req_free(req); 985 out: 986 spin_unlock_irq(&qdio->req_q_lock); 987 return retval; 988 } 989 990 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 991 { 992 struct scsi_device *sdev = req->data; 993 struct zfcp_scsi_dev *zfcp_sdev; 994 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 995 996 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 997 return; 998 999 zfcp_sdev = sdev_to_zfcp(sdev); 1000 1001 switch (req->qtcb->header.fsf_status) { 1002 case FSF_PORT_HANDLE_NOT_VALID: 1003 if (fsq->word[0] == fsq->word[1]) { 1004 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, 1005 "fsafch1"); 1006 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1007 } 1008 break; 1009 case FSF_LUN_HANDLE_NOT_VALID: 1010 if (fsq->word[0] == fsq->word[1]) { 1011 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); 1012 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1013 } 1014 break; 1015 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 1016 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 1017 break; 1018 case FSF_PORT_BOXED: 1019 zfcp_erp_set_port_status(zfcp_sdev->port, 1020 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1021 zfcp_erp_port_reopen(zfcp_sdev->port, 1022 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); 1023 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1024 break; 1025 case FSF_LUN_BOXED: 1026 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 1027 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 1028 "fsafch4"); 1029 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1030 break; 1031 case FSF_ADAPTER_STATUS_AVAILABLE: 1032 switch (fsq->word[0]) { 1033 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1034 zfcp_fc_test_link(zfcp_sdev->port); 1035 fallthrough; 1036 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1037 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1038 break; 1039 } 1040 break; 1041 case FSF_GOOD: 1042 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; 1043 break; 1044 } 1045 } 1046 1047 /** 1048 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command 1049 * @scmnd: The SCSI command to abort 1050 * Returns: pointer to struct zfcp_fsf_req 1051 */ 1052 1053 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) 1054 { 1055 struct zfcp_fsf_req *req = NULL; 1056 struct scsi_device *sdev = scmnd->device; 1057 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 1058 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 1059 unsigned long old_req_id = (unsigned long) scmnd->host_scribble; 1060 1061 spin_lock_irq(&qdio->req_q_lock); 1062 if (zfcp_qdio_sbal_get(qdio)) 1063 goto out; 1064 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 1065 SBAL_SFLAGS0_TYPE_READ, 1066 qdio->adapter->pool.scsi_abort); 1067 if (IS_ERR(req)) { 1068 req = NULL; 1069 goto out; 1070 } 1071 1072 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 1073 ZFCP_STATUS_COMMON_UNBLOCKED))) 1074 goto out_error_free; 1075 1076 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1077 1078 req->data = sdev; 1079 req->handler = zfcp_fsf_abort_fcp_command_handler; 1080 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 1081 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 1082 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 1083 1084 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 1085 if (!zfcp_fsf_req_send(req)) { 1086 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 1087 goto out; 1088 } 1089 1090 out_error_free: 1091 zfcp_fsf_req_free(req); 1092 req = NULL; 1093 out: 1094 spin_unlock_irq(&qdio->req_q_lock); 1095 return req; 1096 } 1097 1098 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 1099 { 1100 struct zfcp_adapter *adapter = req->adapter; 1101 struct zfcp_fsf_ct_els *ct = req->data; 1102 struct fsf_qtcb_header *header = &req->qtcb->header; 1103 1104 ct->status = -EINVAL; 1105 1106 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1107 goto skip_fsfstatus; 1108 1109 switch (header->fsf_status) { 1110 case FSF_GOOD: 1111 ct->status = 0; 1112 zfcp_dbf_san_res("fsscth2", req); 1113 break; 1114 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1115 zfcp_fsf_class_not_supp(req); 1116 break; 1117 case FSF_ADAPTER_STATUS_AVAILABLE: 1118 switch (header->fsf_status_qual.word[0]){ 1119 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1120 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1121 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1122 break; 1123 } 1124 break; 1125 case FSF_PORT_BOXED: 1126 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1127 break; 1128 case FSF_PORT_HANDLE_NOT_VALID: 1129 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); 1130 fallthrough; 1131 case FSF_GENERIC_COMMAND_REJECTED: 1132 case FSF_PAYLOAD_SIZE_MISMATCH: 1133 case FSF_REQUEST_SIZE_TOO_LARGE: 1134 case FSF_RESPONSE_SIZE_TOO_LARGE: 1135 case FSF_SBAL_MISMATCH: 1136 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1137 break; 1138 } 1139 1140 skip_fsfstatus: 1141 if (ct->handler) 1142 ct->handler(ct->handler_data); 1143 } 1144 1145 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, 1146 struct zfcp_qdio_req *q_req, 1147 struct scatterlist *sg_req, 1148 struct scatterlist *sg_resp) 1149 { 1150 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length); 1151 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length); 1152 zfcp_qdio_set_sbale_last(qdio, q_req); 1153 } 1154 1155 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 1156 struct scatterlist *sg_req, 1157 struct scatterlist *sg_resp) 1158 { 1159 struct zfcp_adapter *adapter = req->adapter; 1160 struct zfcp_qdio *qdio = adapter->qdio; 1161 struct fsf_qtcb *qtcb = req->qtcb; 1162 u32 feat = adapter->adapter_features; 1163 1164 if (zfcp_adapter_multi_buffer_active(adapter)) { 1165 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1166 return -EIO; 1167 qtcb->bottom.support.req_buf_length = 1168 zfcp_qdio_real_bytes(sg_req); 1169 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1170 return -EIO; 1171 qtcb->bottom.support.resp_buf_length = 1172 zfcp_qdio_real_bytes(sg_resp); 1173 1174 zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req)); 1175 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1176 zfcp_qdio_set_scount(qdio, &req->qdio_req); 1177 return 0; 1178 } 1179 1180 /* use single, unchained SBAL if it can hold the request */ 1181 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 1182 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, 1183 sg_req, sg_resp); 1184 return 0; 1185 } 1186 1187 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) 1188 return -EOPNOTSUPP; 1189 1190 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1191 return -EIO; 1192 1193 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); 1194 1195 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1196 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); 1197 1198 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1199 return -EIO; 1200 1201 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); 1202 1203 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1204 1205 return 0; 1206 } 1207 1208 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1209 struct scatterlist *sg_req, 1210 struct scatterlist *sg_resp, 1211 unsigned int timeout) 1212 { 1213 int ret; 1214 1215 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); 1216 if (ret) 1217 return ret; 1218 1219 /* common settings for ct/gs and els requests */ 1220 if (timeout > 255) 1221 timeout = 255; /* max value accepted by hardware */ 1222 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1223 req->qtcb->bottom.support.timeout = timeout; 1224 zfcp_fsf_start_timer(req, (timeout + 10) * HZ); 1225 1226 return 0; 1227 } 1228 1229 /** 1230 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1231 * @wka_port: pointer to zfcp WKA port to send CT/GS to 1232 * @ct: pointer to struct zfcp_send_ct with data for request 1233 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1234 * @timeout: timeout that hardware should use, and a later software timeout 1235 */ 1236 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1237 struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1238 unsigned int timeout) 1239 { 1240 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1241 struct zfcp_fsf_req *req; 1242 int ret = -EIO; 1243 1244 spin_lock_irq(&qdio->req_q_lock); 1245 if (zfcp_qdio_sbal_get(qdio)) 1246 goto out; 1247 1248 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1249 SBAL_SFLAGS0_TYPE_WRITE_READ, pool); 1250 1251 if (IS_ERR(req)) { 1252 ret = PTR_ERR(req); 1253 goto out; 1254 } 1255 1256 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1257 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); 1258 if (ret) 1259 goto failed_send; 1260 1261 req->handler = zfcp_fsf_send_ct_handler; 1262 req->qtcb->header.port_handle = wka_port->handle; 1263 ct->d_id = wka_port->d_id; 1264 req->data = ct; 1265 1266 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); 1267 1268 ret = zfcp_fsf_req_send(req); 1269 if (ret) 1270 goto failed_send; 1271 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1272 1273 goto out; 1274 1275 failed_send: 1276 zfcp_fsf_req_free(req); 1277 out: 1278 spin_unlock_irq(&qdio->req_q_lock); 1279 return ret; 1280 } 1281 1282 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1283 { 1284 struct zfcp_fsf_ct_els *send_els = req->data; 1285 struct fsf_qtcb_header *header = &req->qtcb->header; 1286 1287 send_els->status = -EINVAL; 1288 1289 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1290 goto skip_fsfstatus; 1291 1292 switch (header->fsf_status) { 1293 case FSF_GOOD: 1294 send_els->status = 0; 1295 zfcp_dbf_san_res("fsselh1", req); 1296 break; 1297 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1298 zfcp_fsf_class_not_supp(req); 1299 break; 1300 case FSF_ADAPTER_STATUS_AVAILABLE: 1301 switch (header->fsf_status_qual.word[0]){ 1302 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1303 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1304 case FSF_SQ_RETRY_IF_POSSIBLE: 1305 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1306 break; 1307 } 1308 break; 1309 case FSF_ELS_COMMAND_REJECTED: 1310 case FSF_PAYLOAD_SIZE_MISMATCH: 1311 case FSF_REQUEST_SIZE_TOO_LARGE: 1312 case FSF_RESPONSE_SIZE_TOO_LARGE: 1313 break; 1314 case FSF_SBAL_MISMATCH: 1315 /* should never occur, avoided in zfcp_fsf_send_els */ 1316 fallthrough; 1317 default: 1318 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1319 break; 1320 } 1321 skip_fsfstatus: 1322 if (send_els->handler) 1323 send_els->handler(send_els->handler_data); 1324 } 1325 1326 /** 1327 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1328 * @adapter: pointer to zfcp adapter 1329 * @d_id: N_Port_ID to send ELS to 1330 * @els: pointer to struct zfcp_send_els with data for the command 1331 * @timeout: timeout that hardware should use, and a later software timeout 1332 */ 1333 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1334 struct zfcp_fsf_ct_els *els, unsigned int timeout) 1335 { 1336 struct zfcp_fsf_req *req; 1337 struct zfcp_qdio *qdio = adapter->qdio; 1338 int ret = -EIO; 1339 1340 spin_lock_irq(&qdio->req_q_lock); 1341 if (zfcp_qdio_sbal_get(qdio)) 1342 goto out; 1343 1344 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1345 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL); 1346 1347 if (IS_ERR(req)) { 1348 ret = PTR_ERR(req); 1349 goto out; 1350 } 1351 1352 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1353 1354 if (!zfcp_adapter_multi_buffer_active(adapter)) 1355 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); 1356 1357 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1358 1359 if (ret) 1360 goto failed_send; 1361 1362 hton24(req->qtcb->bottom.support.d_id, d_id); 1363 req->handler = zfcp_fsf_send_els_handler; 1364 els->d_id = d_id; 1365 req->data = els; 1366 1367 zfcp_dbf_san_req("fssels1", req, d_id); 1368 1369 ret = zfcp_fsf_req_send(req); 1370 if (ret) 1371 goto failed_send; 1372 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1373 1374 goto out; 1375 1376 failed_send: 1377 zfcp_fsf_req_free(req); 1378 out: 1379 spin_unlock_irq(&qdio->req_q_lock); 1380 return ret; 1381 } 1382 1383 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1384 { 1385 struct zfcp_fsf_req *req; 1386 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1387 int retval = -EIO; 1388 1389 spin_lock_irq(&qdio->req_q_lock); 1390 if (zfcp_qdio_sbal_get(qdio)) 1391 goto out; 1392 1393 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1394 SBAL_SFLAGS0_TYPE_READ, 1395 qdio->adapter->pool.erp_req); 1396 1397 if (IS_ERR(req)) { 1398 retval = PTR_ERR(req); 1399 goto out; 1400 } 1401 1402 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1403 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1404 1405 req->qtcb->bottom.config.feature_selection = 1406 FSF_FEATURE_NOTIFICATION_LOST | 1407 FSF_FEATURE_UPDATE_ALERT | 1408 FSF_FEATURE_REQUEST_SFP_DATA | 1409 FSF_FEATURE_FC_SECURITY; 1410 req->erp_action = erp_action; 1411 req->handler = zfcp_fsf_exchange_config_data_handler; 1412 erp_action->fsf_req_id = req->req_id; 1413 1414 zfcp_fsf_start_erp_timer(req); 1415 retval = zfcp_fsf_req_send(req); 1416 if (retval) { 1417 zfcp_fsf_req_free(req); 1418 erp_action->fsf_req_id = 0; 1419 } 1420 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1421 out: 1422 spin_unlock_irq(&qdio->req_q_lock); 1423 return retval; 1424 } 1425 1426 1427 /** 1428 * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel. 1429 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1430 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1431 * might be %NULL. 1432 * 1433 * Returns: 1434 * * 0 - Exchange Config Data was successful, @data is complete 1435 * * -EIO - Exchange Config Data was not successful, @data is invalid 1436 * * -EAGAIN - @data contains incomplete data 1437 * * -ENOMEM - Some memory allocation failed along the way 1438 */ 1439 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, 1440 struct fsf_qtcb_bottom_config *data) 1441 { 1442 struct zfcp_fsf_req *req = NULL; 1443 int retval = -EIO; 1444 1445 spin_lock_irq(&qdio->req_q_lock); 1446 if (zfcp_qdio_sbal_get(qdio)) 1447 goto out_unlock; 1448 1449 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1450 SBAL_SFLAGS0_TYPE_READ, NULL); 1451 1452 if (IS_ERR(req)) { 1453 retval = PTR_ERR(req); 1454 goto out_unlock; 1455 } 1456 1457 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1458 req->handler = zfcp_fsf_exchange_config_data_handler; 1459 1460 req->qtcb->bottom.config.feature_selection = 1461 FSF_FEATURE_NOTIFICATION_LOST | 1462 FSF_FEATURE_UPDATE_ALERT | 1463 FSF_FEATURE_REQUEST_SFP_DATA | 1464 FSF_FEATURE_FC_SECURITY; 1465 1466 if (data) 1467 req->data = data; 1468 1469 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1470 retval = zfcp_fsf_req_send(req); 1471 spin_unlock_irq(&qdio->req_q_lock); 1472 1473 if (!retval) { 1474 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1475 wait_for_completion(&req->completion); 1476 1477 if (req->status & 1478 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1479 retval = -EIO; 1480 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1481 retval = -EAGAIN; 1482 } 1483 1484 zfcp_fsf_req_free(req); 1485 return retval; 1486 1487 out_unlock: 1488 spin_unlock_irq(&qdio->req_q_lock); 1489 return retval; 1490 } 1491 1492 /** 1493 * zfcp_fsf_exchange_port_data - request information about local port 1494 * @erp_action: ERP action for the adapter for which port data is requested 1495 * Returns: 0 on success, error otherwise 1496 */ 1497 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1498 { 1499 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1500 struct zfcp_fsf_req *req; 1501 int retval = -EIO; 1502 1503 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1504 return -EOPNOTSUPP; 1505 1506 spin_lock_irq(&qdio->req_q_lock); 1507 if (zfcp_qdio_sbal_get(qdio)) 1508 goto out; 1509 1510 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1511 SBAL_SFLAGS0_TYPE_READ, 1512 qdio->adapter->pool.erp_req); 1513 1514 if (IS_ERR(req)) { 1515 retval = PTR_ERR(req); 1516 goto out; 1517 } 1518 1519 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1520 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1521 1522 req->handler = zfcp_fsf_exchange_port_data_handler; 1523 req->erp_action = erp_action; 1524 erp_action->fsf_req_id = req->req_id; 1525 1526 zfcp_fsf_start_erp_timer(req); 1527 retval = zfcp_fsf_req_send(req); 1528 if (retval) { 1529 zfcp_fsf_req_free(req); 1530 erp_action->fsf_req_id = 0; 1531 } 1532 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1533 out: 1534 spin_unlock_irq(&qdio->req_q_lock); 1535 return retval; 1536 } 1537 1538 /** 1539 * zfcp_fsf_exchange_port_data_sync() - Request information about local port. 1540 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1541 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1542 * might be %NULL. 1543 * 1544 * Returns: 1545 * * 0 - Exchange Port Data was successful, @data is complete 1546 * * -EIO - Exchange Port Data was not successful, @data is invalid 1547 * * -EAGAIN - @data contains incomplete data 1548 * * -ENOMEM - Some memory allocation failed along the way 1549 * * -EOPNOTSUPP - This operation is not supported 1550 */ 1551 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, 1552 struct fsf_qtcb_bottom_port *data) 1553 { 1554 struct zfcp_fsf_req *req = NULL; 1555 int retval = -EIO; 1556 1557 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1558 return -EOPNOTSUPP; 1559 1560 spin_lock_irq(&qdio->req_q_lock); 1561 if (zfcp_qdio_sbal_get(qdio)) 1562 goto out_unlock; 1563 1564 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1565 SBAL_SFLAGS0_TYPE_READ, NULL); 1566 1567 if (IS_ERR(req)) { 1568 retval = PTR_ERR(req); 1569 goto out_unlock; 1570 } 1571 1572 if (data) 1573 req->data = data; 1574 1575 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1576 1577 req->handler = zfcp_fsf_exchange_port_data_handler; 1578 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1579 retval = zfcp_fsf_req_send(req); 1580 spin_unlock_irq(&qdio->req_q_lock); 1581 1582 if (!retval) { 1583 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1584 wait_for_completion(&req->completion); 1585 1586 if (req->status & 1587 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1588 retval = -EIO; 1589 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1590 retval = -EAGAIN; 1591 } 1592 1593 zfcp_fsf_req_free(req); 1594 return retval; 1595 1596 out_unlock: 1597 spin_unlock_irq(&qdio->req_q_lock); 1598 return retval; 1599 } 1600 1601 static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port, 1602 struct zfcp_fsf_req *req) 1603 { 1604 char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH]; 1605 char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH]; 1606 1607 if (port->connection_info == port->connection_info_old) { 1608 /* no change, no log nor trace */ 1609 return; 1610 } 1611 1612 zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn, 1613 port->connection_info_old, 1614 port->connection_info); 1615 1616 zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old), 1617 port->connection_info_old, 1618 ZFCP_FSF_PRINT_FMT_SINGLEITEM); 1619 zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new), 1620 port->connection_info, 1621 ZFCP_FSF_PRINT_FMT_SINGLEITEM); 1622 1623 if (strncmp(mnemonic_old, mnemonic_new, 1624 ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) { 1625 /* no change in string representation, no log */ 1626 goto out; 1627 } 1628 1629 if (port->connection_info_old == 0) { 1630 /* activation */ 1631 dev_info(&port->adapter->ccw_device->dev, 1632 "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n", 1633 port->wwpn, mnemonic_new); 1634 } else if (port->connection_info == 0) { 1635 /* deactivation */ 1636 dev_warn(&port->adapter->ccw_device->dev, 1637 "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n", 1638 port->wwpn, mnemonic_old); 1639 } else { 1640 /* change */ 1641 dev_warn(&port->adapter->ccw_device->dev, 1642 "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n", 1643 port->wwpn, mnemonic_old, mnemonic_new); 1644 } 1645 1646 out: 1647 port->connection_info_old = port->connection_info; 1648 } 1649 1650 static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0, 1651 u64 wwpn) 1652 { 1653 switch (fsf_sqw0) { 1654 1655 /* 1656 * Open Port command error codes 1657 */ 1658 1659 case FSF_SQ_SECURITY_REQUIRED: 1660 dev_warn_ratelimited(dev, 1661 "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n", 1662 wwpn); 1663 break; 1664 case FSF_SQ_SECURITY_TIMEOUT: 1665 dev_warn_ratelimited(dev, 1666 "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n", 1667 wwpn); 1668 break; 1669 case FSF_SQ_SECURITY_KM_UNAVAILABLE: 1670 dev_warn_ratelimited(dev, 1671 "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n", 1672 wwpn); 1673 break; 1674 case FSF_SQ_SECURITY_RKM_UNAVAILABLE: 1675 dev_warn_ratelimited(dev, 1676 "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n", 1677 wwpn); 1678 break; 1679 case FSF_SQ_SECURITY_AUTH_FAILURE: 1680 dev_warn_ratelimited(dev, 1681 "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n", 1682 wwpn); 1683 break; 1684 1685 /* 1686 * Send FCP command error codes 1687 */ 1688 1689 case FSF_SQ_SECURITY_ENC_FAILURE: 1690 dev_warn_ratelimited(dev, 1691 "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n", 1692 wwpn); 1693 break; 1694 1695 /* 1696 * Unknown error codes 1697 */ 1698 1699 default: 1700 dev_warn_ratelimited(dev, 1701 "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n", 1702 fsf_sqw0, wwpn); 1703 } 1704 } 1705 1706 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) 1707 { 1708 struct zfcp_adapter *adapter = req->adapter; 1709 struct zfcp_port *port = req->data; 1710 struct fsf_qtcb_header *header = &req->qtcb->header; 1711 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support; 1712 struct fc_els_flogi *plogi; 1713 1714 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1715 goto out; 1716 1717 switch (header->fsf_status) { 1718 case FSF_PORT_ALREADY_OPEN: 1719 break; 1720 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1721 dev_warn(&adapter->ccw_device->dev, 1722 "Not enough FCP adapter resources to open " 1723 "remote port 0x%016Lx\n", 1724 (unsigned long long)port->wwpn); 1725 zfcp_erp_set_port_status(port, 1726 ZFCP_STATUS_COMMON_ERP_FAILED); 1727 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1728 break; 1729 case FSF_SECURITY_ERROR: 1730 zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev, 1731 header->fsf_status_qual.word[0], 1732 port->wwpn); 1733 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1734 break; 1735 case FSF_ADAPTER_STATUS_AVAILABLE: 1736 switch (header->fsf_status_qual.word[0]) { 1737 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1738 /* no zfcp_fc_test_link() with failed open port */ 1739 fallthrough; 1740 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1741 case FSF_SQ_NO_RETRY_POSSIBLE: 1742 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1743 break; 1744 } 1745 break; 1746 case FSF_GOOD: 1747 port->handle = header->port_handle; 1748 if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY) 1749 port->connection_info = bottom->connection_info; 1750 else 1751 port->connection_info = 0; 1752 zfcp_fsf_log_port_fc_security(port, req); 1753 atomic_or(ZFCP_STATUS_COMMON_OPEN | 1754 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1755 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1756 &port->status); 1757 /* check whether D_ID has changed during open */ 1758 /* 1759 * FIXME: This check is not airtight, as the FCP channel does 1760 * not monitor closures of target port connections caused on 1761 * the remote side. Thus, they might miss out on invalidating 1762 * locally cached WWPNs (and other N_Port parameters) of gone 1763 * target ports. So, our heroic attempt to make things safe 1764 * could be undermined by 'open port' response data tagged with 1765 * obsolete WWPNs. Another reason to monitor potential 1766 * connection closures ourself at least (by interpreting 1767 * incoming ELS' and unsolicited status). It just crosses my 1768 * mind that one should be able to cross-check by means of 1769 * another GID_PN straight after a port has been opened. 1770 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1771 */ 1772 plogi = (struct fc_els_flogi *) bottom->els; 1773 if (bottom->els1_length >= FSF_PLOGI_MIN_LEN) 1774 zfcp_fc_plogi_evaluate(port, plogi); 1775 break; 1776 case FSF_UNKNOWN_OP_SUBTYPE: 1777 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1778 break; 1779 } 1780 1781 out: 1782 put_device(&port->dev); 1783 } 1784 1785 /** 1786 * zfcp_fsf_open_port - create and send open port request 1787 * @erp_action: pointer to struct zfcp_erp_action 1788 * Returns: 0 on success, error otherwise 1789 */ 1790 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1791 { 1792 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1793 struct zfcp_port *port = erp_action->port; 1794 struct zfcp_fsf_req *req; 1795 int retval = -EIO; 1796 1797 spin_lock_irq(&qdio->req_q_lock); 1798 if (zfcp_qdio_sbal_get(qdio)) 1799 goto out; 1800 1801 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1802 SBAL_SFLAGS0_TYPE_READ, 1803 qdio->adapter->pool.erp_req); 1804 1805 if (IS_ERR(req)) { 1806 retval = PTR_ERR(req); 1807 goto out; 1808 } 1809 1810 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1811 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1812 1813 req->handler = zfcp_fsf_open_port_handler; 1814 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1815 req->data = port; 1816 req->erp_action = erp_action; 1817 erp_action->fsf_req_id = req->req_id; 1818 get_device(&port->dev); 1819 1820 zfcp_fsf_start_erp_timer(req); 1821 retval = zfcp_fsf_req_send(req); 1822 if (retval) { 1823 zfcp_fsf_req_free(req); 1824 erp_action->fsf_req_id = 0; 1825 put_device(&port->dev); 1826 } 1827 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1828 out: 1829 spin_unlock_irq(&qdio->req_q_lock); 1830 return retval; 1831 } 1832 1833 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) 1834 { 1835 struct zfcp_port *port = req->data; 1836 1837 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1838 return; 1839 1840 switch (req->qtcb->header.fsf_status) { 1841 case FSF_PORT_HANDLE_NOT_VALID: 1842 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); 1843 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1844 break; 1845 case FSF_ADAPTER_STATUS_AVAILABLE: 1846 break; 1847 case FSF_GOOD: 1848 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); 1849 break; 1850 } 1851 } 1852 1853 /** 1854 * zfcp_fsf_close_port - create and send close port request 1855 * @erp_action: pointer to struct zfcp_erp_action 1856 * Returns: 0 on success, error otherwise 1857 */ 1858 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1859 { 1860 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1861 struct zfcp_fsf_req *req; 1862 int retval = -EIO; 1863 1864 spin_lock_irq(&qdio->req_q_lock); 1865 if (zfcp_qdio_sbal_get(qdio)) 1866 goto out; 1867 1868 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1869 SBAL_SFLAGS0_TYPE_READ, 1870 qdio->adapter->pool.erp_req); 1871 1872 if (IS_ERR(req)) { 1873 retval = PTR_ERR(req); 1874 goto out; 1875 } 1876 1877 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1878 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1879 1880 req->handler = zfcp_fsf_close_port_handler; 1881 req->data = erp_action->port; 1882 req->erp_action = erp_action; 1883 req->qtcb->header.port_handle = erp_action->port->handle; 1884 erp_action->fsf_req_id = req->req_id; 1885 1886 zfcp_fsf_start_erp_timer(req); 1887 retval = zfcp_fsf_req_send(req); 1888 if (retval) { 1889 zfcp_fsf_req_free(req); 1890 erp_action->fsf_req_id = 0; 1891 } 1892 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1893 out: 1894 spin_unlock_irq(&qdio->req_q_lock); 1895 return retval; 1896 } 1897 1898 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1899 { 1900 struct zfcp_fc_wka_port *wka_port = req->data; 1901 struct fsf_qtcb_header *header = &req->qtcb->header; 1902 1903 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1904 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1905 goto out; 1906 } 1907 1908 switch (header->fsf_status) { 1909 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1910 dev_warn(&req->adapter->ccw_device->dev, 1911 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1912 fallthrough; 1913 case FSF_ADAPTER_STATUS_AVAILABLE: 1914 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1915 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1916 break; 1917 case FSF_GOOD: 1918 wka_port->handle = header->port_handle; 1919 fallthrough; 1920 case FSF_PORT_ALREADY_OPEN: 1921 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; 1922 } 1923 out: 1924 wake_up(&wka_port->completion_wq); 1925 } 1926 1927 /** 1928 * zfcp_fsf_open_wka_port - create and send open wka-port request 1929 * @wka_port: pointer to struct zfcp_fc_wka_port 1930 * Returns: 0 on success, error otherwise 1931 */ 1932 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1933 { 1934 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1935 struct zfcp_fsf_req *req; 1936 unsigned long req_id = 0; 1937 int retval = -EIO; 1938 1939 spin_lock_irq(&qdio->req_q_lock); 1940 if (zfcp_qdio_sbal_get(qdio)) 1941 goto out; 1942 1943 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1944 SBAL_SFLAGS0_TYPE_READ, 1945 qdio->adapter->pool.erp_req); 1946 1947 if (IS_ERR(req)) { 1948 retval = PTR_ERR(req); 1949 goto out; 1950 } 1951 1952 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1953 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1954 1955 req->handler = zfcp_fsf_open_wka_port_handler; 1956 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); 1957 req->data = wka_port; 1958 1959 req_id = req->req_id; 1960 1961 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1962 retval = zfcp_fsf_req_send(req); 1963 if (retval) 1964 zfcp_fsf_req_free(req); 1965 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1966 out: 1967 spin_unlock_irq(&qdio->req_q_lock); 1968 if (!retval) 1969 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); 1970 return retval; 1971 } 1972 1973 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1974 { 1975 struct zfcp_fc_wka_port *wka_port = req->data; 1976 1977 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1978 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1979 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); 1980 } 1981 1982 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1983 wake_up(&wka_port->completion_wq); 1984 } 1985 1986 /** 1987 * zfcp_fsf_close_wka_port - create and send close wka port request 1988 * @wka_port: WKA port to open 1989 * Returns: 0 on success, error otherwise 1990 */ 1991 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1992 { 1993 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1994 struct zfcp_fsf_req *req; 1995 unsigned long req_id = 0; 1996 int retval = -EIO; 1997 1998 spin_lock_irq(&qdio->req_q_lock); 1999 if (zfcp_qdio_sbal_get(qdio)) 2000 goto out; 2001 2002 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 2003 SBAL_SFLAGS0_TYPE_READ, 2004 qdio->adapter->pool.erp_req); 2005 2006 if (IS_ERR(req)) { 2007 retval = PTR_ERR(req); 2008 goto out; 2009 } 2010 2011 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2012 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2013 2014 req->handler = zfcp_fsf_close_wka_port_handler; 2015 req->data = wka_port; 2016 req->qtcb->header.port_handle = wka_port->handle; 2017 2018 req_id = req->req_id; 2019 2020 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2021 retval = zfcp_fsf_req_send(req); 2022 if (retval) 2023 zfcp_fsf_req_free(req); 2024 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2025 out: 2026 spin_unlock_irq(&qdio->req_q_lock); 2027 if (!retval) 2028 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); 2029 return retval; 2030 } 2031 2032 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) 2033 { 2034 struct zfcp_port *port = req->data; 2035 struct fsf_qtcb_header *header = &req->qtcb->header; 2036 struct scsi_device *sdev; 2037 2038 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2039 return; 2040 2041 switch (header->fsf_status) { 2042 case FSF_PORT_HANDLE_NOT_VALID: 2043 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); 2044 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2045 break; 2046 case FSF_PORT_BOXED: 2047 /* can't use generic zfcp_erp_modify_port_status because 2048 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 2049 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 2050 shost_for_each_device(sdev, port->adapter->scsi_host) 2051 if (sdev_to_zfcp(sdev)->port == port) 2052 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 2053 &sdev_to_zfcp(sdev)->status); 2054 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2055 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 2056 "fscpph2"); 2057 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2058 break; 2059 case FSF_ADAPTER_STATUS_AVAILABLE: 2060 switch (header->fsf_status_qual.word[0]) { 2061 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2062 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2063 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2064 break; 2065 } 2066 break; 2067 case FSF_GOOD: 2068 /* can't use generic zfcp_erp_modify_port_status because 2069 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 2070 */ 2071 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 2072 shost_for_each_device(sdev, port->adapter->scsi_host) 2073 if (sdev_to_zfcp(sdev)->port == port) 2074 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 2075 &sdev_to_zfcp(sdev)->status); 2076 break; 2077 } 2078 } 2079 2080 /** 2081 * zfcp_fsf_close_physical_port - close physical port 2082 * @erp_action: pointer to struct zfcp_erp_action 2083 * Returns: 0 on success 2084 */ 2085 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 2086 { 2087 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 2088 struct zfcp_fsf_req *req; 2089 int retval = -EIO; 2090 2091 spin_lock_irq(&qdio->req_q_lock); 2092 if (zfcp_qdio_sbal_get(qdio)) 2093 goto out; 2094 2095 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 2096 SBAL_SFLAGS0_TYPE_READ, 2097 qdio->adapter->pool.erp_req); 2098 2099 if (IS_ERR(req)) { 2100 retval = PTR_ERR(req); 2101 goto out; 2102 } 2103 2104 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2105 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2106 2107 req->data = erp_action->port; 2108 req->qtcb->header.port_handle = erp_action->port->handle; 2109 req->erp_action = erp_action; 2110 req->handler = zfcp_fsf_close_physical_port_handler; 2111 erp_action->fsf_req_id = req->req_id; 2112 2113 zfcp_fsf_start_erp_timer(req); 2114 retval = zfcp_fsf_req_send(req); 2115 if (retval) { 2116 zfcp_fsf_req_free(req); 2117 erp_action->fsf_req_id = 0; 2118 } 2119 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2120 out: 2121 spin_unlock_irq(&qdio->req_q_lock); 2122 return retval; 2123 } 2124 2125 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) 2126 { 2127 struct zfcp_adapter *adapter = req->adapter; 2128 struct scsi_device *sdev = req->data; 2129 struct zfcp_scsi_dev *zfcp_sdev; 2130 struct fsf_qtcb_header *header = &req->qtcb->header; 2131 union fsf_status_qual *qual = &header->fsf_status_qual; 2132 2133 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2134 return; 2135 2136 zfcp_sdev = sdev_to_zfcp(sdev); 2137 2138 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | 2139 ZFCP_STATUS_COMMON_ACCESS_BOXED, 2140 &zfcp_sdev->status); 2141 2142 switch (header->fsf_status) { 2143 2144 case FSF_PORT_HANDLE_NOT_VALID: 2145 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); 2146 fallthrough; 2147 case FSF_LUN_ALREADY_OPEN: 2148 break; 2149 case FSF_PORT_BOXED: 2150 zfcp_erp_set_port_status(zfcp_sdev->port, 2151 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2152 zfcp_erp_port_reopen(zfcp_sdev->port, 2153 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); 2154 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2155 break; 2156 case FSF_LUN_SHARING_VIOLATION: 2157 if (qual->word[0]) 2158 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, 2159 "LUN 0x%016Lx on port 0x%016Lx is already in " 2160 "use by CSS%d, MIF Image ID %x\n", 2161 zfcp_scsi_dev_lun(sdev), 2162 (unsigned long long)zfcp_sdev->port->wwpn, 2163 qual->fsf_queue_designator.cssid, 2164 qual->fsf_queue_designator.hla); 2165 zfcp_erp_set_lun_status(sdev, 2166 ZFCP_STATUS_COMMON_ERP_FAILED | 2167 ZFCP_STATUS_COMMON_ACCESS_DENIED); 2168 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2169 break; 2170 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 2171 dev_warn(&adapter->ccw_device->dev, 2172 "No handle is available for LUN " 2173 "0x%016Lx on port 0x%016Lx\n", 2174 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2175 (unsigned long long)zfcp_sdev->port->wwpn); 2176 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 2177 fallthrough; 2178 case FSF_INVALID_COMMAND_OPTION: 2179 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2180 break; 2181 case FSF_ADAPTER_STATUS_AVAILABLE: 2182 switch (header->fsf_status_qual.word[0]) { 2183 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2184 zfcp_fc_test_link(zfcp_sdev->port); 2185 fallthrough; 2186 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2187 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2188 break; 2189 } 2190 break; 2191 2192 case FSF_GOOD: 2193 zfcp_sdev->lun_handle = header->lun_handle; 2194 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 2195 break; 2196 } 2197 } 2198 2199 /** 2200 * zfcp_fsf_open_lun - open LUN 2201 * @erp_action: pointer to struct zfcp_erp_action 2202 * Returns: 0 on success, error otherwise 2203 */ 2204 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) 2205 { 2206 struct zfcp_adapter *adapter = erp_action->adapter; 2207 struct zfcp_qdio *qdio = adapter->qdio; 2208 struct zfcp_fsf_req *req; 2209 int retval = -EIO; 2210 2211 spin_lock_irq(&qdio->req_q_lock); 2212 if (zfcp_qdio_sbal_get(qdio)) 2213 goto out; 2214 2215 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 2216 SBAL_SFLAGS0_TYPE_READ, 2217 adapter->pool.erp_req); 2218 2219 if (IS_ERR(req)) { 2220 retval = PTR_ERR(req); 2221 goto out; 2222 } 2223 2224 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2225 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2226 2227 req->qtcb->header.port_handle = erp_action->port->handle; 2228 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); 2229 req->handler = zfcp_fsf_open_lun_handler; 2230 req->data = erp_action->sdev; 2231 req->erp_action = erp_action; 2232 erp_action->fsf_req_id = req->req_id; 2233 2234 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 2235 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 2236 2237 zfcp_fsf_start_erp_timer(req); 2238 retval = zfcp_fsf_req_send(req); 2239 if (retval) { 2240 zfcp_fsf_req_free(req); 2241 erp_action->fsf_req_id = 0; 2242 } 2243 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2244 out: 2245 spin_unlock_irq(&qdio->req_q_lock); 2246 return retval; 2247 } 2248 2249 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 2250 { 2251 struct scsi_device *sdev = req->data; 2252 struct zfcp_scsi_dev *zfcp_sdev; 2253 2254 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2255 return; 2256 2257 zfcp_sdev = sdev_to_zfcp(sdev); 2258 2259 switch (req->qtcb->header.fsf_status) { 2260 case FSF_PORT_HANDLE_NOT_VALID: 2261 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 2262 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2263 break; 2264 case FSF_LUN_HANDLE_NOT_VALID: 2265 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); 2266 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2267 break; 2268 case FSF_PORT_BOXED: 2269 zfcp_erp_set_port_status(zfcp_sdev->port, 2270 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2271 zfcp_erp_port_reopen(zfcp_sdev->port, 2272 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); 2273 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2274 break; 2275 case FSF_ADAPTER_STATUS_AVAILABLE: 2276 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2277 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2278 zfcp_fc_test_link(zfcp_sdev->port); 2279 fallthrough; 2280 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2281 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2282 break; 2283 } 2284 break; 2285 case FSF_GOOD: 2286 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 2287 break; 2288 } 2289 } 2290 2291 /** 2292 * zfcp_fsf_close_LUN - close LUN 2293 * @erp_action: pointer to erp_action triggering the "close LUN" 2294 * Returns: 0 on success, error otherwise 2295 */ 2296 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) 2297 { 2298 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 2299 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 2300 struct zfcp_fsf_req *req; 2301 int retval = -EIO; 2302 2303 spin_lock_irq(&qdio->req_q_lock); 2304 if (zfcp_qdio_sbal_get(qdio)) 2305 goto out; 2306 2307 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 2308 SBAL_SFLAGS0_TYPE_READ, 2309 qdio->adapter->pool.erp_req); 2310 2311 if (IS_ERR(req)) { 2312 retval = PTR_ERR(req); 2313 goto out; 2314 } 2315 2316 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2317 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2318 2319 req->qtcb->header.port_handle = erp_action->port->handle; 2320 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2321 req->handler = zfcp_fsf_close_lun_handler; 2322 req->data = erp_action->sdev; 2323 req->erp_action = erp_action; 2324 erp_action->fsf_req_id = req->req_id; 2325 2326 zfcp_fsf_start_erp_timer(req); 2327 retval = zfcp_fsf_req_send(req); 2328 if (retval) { 2329 zfcp_fsf_req_free(req); 2330 erp_action->fsf_req_id = 0; 2331 } 2332 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2333 out: 2334 spin_unlock_irq(&qdio->req_q_lock); 2335 return retval; 2336 } 2337 2338 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat) 2339 { 2340 lat_rec->sum += lat; 2341 lat_rec->min = min(lat_rec->min, lat); 2342 lat_rec->max = max(lat_rec->max, lat); 2343 } 2344 2345 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) 2346 { 2347 struct fsf_qual_latency_info *lat_in; 2348 struct zfcp_latency_cont *lat = NULL; 2349 struct zfcp_scsi_dev *zfcp_sdev; 2350 struct zfcp_blk_drv_data blktrc; 2351 int ticks = req->adapter->timer_ticks; 2352 2353 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; 2354 2355 blktrc.flags = 0; 2356 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2357 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2358 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2359 blktrc.inb_usage = 0; 2360 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2361 2362 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2363 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2364 zfcp_sdev = sdev_to_zfcp(scsi->device); 2365 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2366 blktrc.channel_lat = lat_in->channel_lat * ticks; 2367 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2368 2369 switch (req->qtcb->bottom.io.data_direction) { 2370 case FSF_DATADIR_DIF_READ_STRIP: 2371 case FSF_DATADIR_DIF_READ_CONVERT: 2372 case FSF_DATADIR_READ: 2373 lat = &zfcp_sdev->latencies.read; 2374 break; 2375 case FSF_DATADIR_DIF_WRITE_INSERT: 2376 case FSF_DATADIR_DIF_WRITE_CONVERT: 2377 case FSF_DATADIR_WRITE: 2378 lat = &zfcp_sdev->latencies.write; 2379 break; 2380 case FSF_DATADIR_CMND: 2381 lat = &zfcp_sdev->latencies.cmd; 2382 break; 2383 } 2384 2385 if (lat) { 2386 spin_lock(&zfcp_sdev->latencies.lock); 2387 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 2388 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 2389 lat->counter++; 2390 spin_unlock(&zfcp_sdev->latencies.lock); 2391 } 2392 } 2393 2394 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, 2395 sizeof(blktrc)); 2396 } 2397 2398 /** 2399 * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF. 2400 * @req: Pointer to FSF request. 2401 * @sdev: Pointer to SCSI device as request context. 2402 */ 2403 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req, 2404 struct scsi_device *sdev) 2405 { 2406 struct zfcp_scsi_dev *zfcp_sdev; 2407 struct fsf_qtcb_header *header = &req->qtcb->header; 2408 2409 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2410 return; 2411 2412 zfcp_sdev = sdev_to_zfcp(sdev); 2413 2414 switch (header->fsf_status) { 2415 case FSF_HANDLE_MISMATCH: 2416 case FSF_PORT_HANDLE_NOT_VALID: 2417 zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1"); 2418 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2419 break; 2420 case FSF_FCPLUN_NOT_VALID: 2421 case FSF_LUN_HANDLE_NOT_VALID: 2422 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); 2423 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2424 break; 2425 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2426 zfcp_fsf_class_not_supp(req); 2427 break; 2428 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2429 dev_err(&req->adapter->ccw_device->dev, 2430 "Incorrect direction %d, LUN 0x%016Lx on port " 2431 "0x%016Lx closed\n", 2432 req->qtcb->bottom.io.data_direction, 2433 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2434 (unsigned long long)zfcp_sdev->port->wwpn); 2435 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3"); 2436 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2437 break; 2438 case FSF_CMND_LENGTH_NOT_VALID: 2439 dev_err(&req->adapter->ccw_device->dev, 2440 "Incorrect FCP_CMND length %d, FCP device closed\n", 2441 req->qtcb->bottom.io.fcp_cmnd_length); 2442 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); 2443 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2444 break; 2445 case FSF_PORT_BOXED: 2446 zfcp_erp_set_port_status(zfcp_sdev->port, 2447 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2448 zfcp_erp_port_reopen(zfcp_sdev->port, 2449 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); 2450 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2451 break; 2452 case FSF_LUN_BOXED: 2453 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2454 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 2455 "fssfch6"); 2456 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2457 break; 2458 case FSF_ADAPTER_STATUS_AVAILABLE: 2459 if (header->fsf_status_qual.word[0] == 2460 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2461 zfcp_fc_test_link(zfcp_sdev->port); 2462 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2463 break; 2464 case FSF_SECURITY_ERROR: 2465 zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev, 2466 header->fsf_status_qual.word[0], 2467 zfcp_sdev->port->wwpn); 2468 zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7"); 2469 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2470 break; 2471 } 2472 } 2473 2474 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) 2475 { 2476 struct scsi_cmnd *scpnt; 2477 struct fcp_resp_with_ext *fcp_rsp; 2478 unsigned long flags; 2479 2480 read_lock_irqsave(&req->adapter->abort_lock, flags); 2481 2482 scpnt = req->data; 2483 if (unlikely(!scpnt)) { 2484 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2485 return; 2486 } 2487 2488 zfcp_fsf_fcp_handler_common(req, scpnt->device); 2489 2490 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2491 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2492 goto skip_fsfstatus; 2493 } 2494 2495 switch (req->qtcb->header.fsf_status) { 2496 case FSF_INCONSISTENT_PROT_DATA: 2497 case FSF_INVALID_PROT_PARM: 2498 set_host_byte(scpnt, DID_ERROR); 2499 goto skip_fsfstatus; 2500 case FSF_BLOCK_GUARD_CHECK_FAILURE: 2501 zfcp_scsi_dif_sense_error(scpnt, 0x1); 2502 goto skip_fsfstatus; 2503 case FSF_APP_TAG_CHECK_FAILURE: 2504 zfcp_scsi_dif_sense_error(scpnt, 0x2); 2505 goto skip_fsfstatus; 2506 case FSF_REF_TAG_CHECK_FAILURE: 2507 zfcp_scsi_dif_sense_error(scpnt, 0x3); 2508 goto skip_fsfstatus; 2509 } 2510 BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE); 2511 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2512 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2513 2514 skip_fsfstatus: 2515 zfcp_fsf_req_trace(req, scpnt); 2516 zfcp_dbf_scsi_result(scpnt, req); 2517 2518 scpnt->host_scribble = NULL; 2519 (scpnt->scsi_done) (scpnt); 2520 /* 2521 * We must hold this lock until scsi_done has been called. 2522 * Otherwise we may call scsi_done after abort regarding this 2523 * command has completed. 2524 * Note: scsi_done must not block! 2525 */ 2526 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2527 } 2528 2529 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2530 { 2531 switch (scsi_get_prot_op(scsi_cmnd)) { 2532 case SCSI_PROT_NORMAL: 2533 switch (scsi_cmnd->sc_data_direction) { 2534 case DMA_NONE: 2535 *data_dir = FSF_DATADIR_CMND; 2536 break; 2537 case DMA_FROM_DEVICE: 2538 *data_dir = FSF_DATADIR_READ; 2539 break; 2540 case DMA_TO_DEVICE: 2541 *data_dir = FSF_DATADIR_WRITE; 2542 break; 2543 case DMA_BIDIRECTIONAL: 2544 return -EINVAL; 2545 } 2546 break; 2547 2548 case SCSI_PROT_READ_STRIP: 2549 *data_dir = FSF_DATADIR_DIF_READ_STRIP; 2550 break; 2551 case SCSI_PROT_WRITE_INSERT: 2552 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; 2553 break; 2554 case SCSI_PROT_READ_PASS: 2555 *data_dir = FSF_DATADIR_DIF_READ_CONVERT; 2556 break; 2557 case SCSI_PROT_WRITE_PASS: 2558 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; 2559 break; 2560 default: 2561 return -EINVAL; 2562 } 2563 2564 return 0; 2565 } 2566 2567 /** 2568 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) 2569 * @scsi_cmnd: scsi command to be sent 2570 */ 2571 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) 2572 { 2573 struct zfcp_fsf_req *req; 2574 struct fcp_cmnd *fcp_cmnd; 2575 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2576 int retval = -EIO; 2577 struct scsi_device *sdev = scsi_cmnd->device; 2578 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2579 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2580 struct zfcp_qdio *qdio = adapter->qdio; 2581 struct fsf_qtcb_bottom_io *io; 2582 unsigned long flags; 2583 2584 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2585 ZFCP_STATUS_COMMON_UNBLOCKED))) 2586 return -EBUSY; 2587 2588 spin_lock_irqsave(&qdio->req_q_lock, flags); 2589 if (atomic_read(&qdio->req_q_free) <= 0) { 2590 atomic_inc(&qdio->req_q_full); 2591 goto out; 2592 } 2593 2594 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2595 sbtype = SBAL_SFLAGS0_TYPE_WRITE; 2596 2597 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2598 sbtype, adapter->pool.scsi_req); 2599 2600 if (IS_ERR(req)) { 2601 retval = PTR_ERR(req); 2602 goto out; 2603 } 2604 2605 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2606 2607 io = &req->qtcb->bottom.io; 2608 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2609 req->data = scsi_cmnd; 2610 req->handler = zfcp_fsf_fcp_cmnd_handler; 2611 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2612 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2613 io->service_class = FSF_CLASS_3; 2614 io->fcp_cmnd_length = FCP_CMND_LEN; 2615 2616 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { 2617 io->data_block_length = scsi_cmnd->device->sector_size; 2618 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2619 } 2620 2621 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) 2622 goto failed_scsi_cmnd; 2623 2624 BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); 2625 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2626 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2627 2628 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && 2629 scsi_prot_sg_count(scsi_cmnd)) { 2630 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2631 scsi_prot_sg_count(scsi_cmnd)); 2632 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2633 scsi_prot_sglist(scsi_cmnd)); 2634 if (retval) 2635 goto failed_scsi_cmnd; 2636 io->prot_data_length = zfcp_qdio_real_bytes( 2637 scsi_prot_sglist(scsi_cmnd)); 2638 } 2639 2640 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2641 scsi_sglist(scsi_cmnd)); 2642 if (unlikely(retval)) 2643 goto failed_scsi_cmnd; 2644 2645 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2646 if (zfcp_adapter_multi_buffer_active(adapter)) 2647 zfcp_qdio_set_scount(qdio, &req->qdio_req); 2648 2649 retval = zfcp_fsf_req_send(req); 2650 if (unlikely(retval)) 2651 goto failed_scsi_cmnd; 2652 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2653 2654 goto out; 2655 2656 failed_scsi_cmnd: 2657 zfcp_fsf_req_free(req); 2658 scsi_cmnd->host_scribble = NULL; 2659 out: 2660 spin_unlock_irqrestore(&qdio->req_q_lock, flags); 2661 return retval; 2662 } 2663 2664 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) 2665 { 2666 struct scsi_device *sdev = req->data; 2667 struct fcp_resp_with_ext *fcp_rsp; 2668 struct fcp_resp_rsp_info *rsp_info; 2669 2670 zfcp_fsf_fcp_handler_common(req, sdev); 2671 2672 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2673 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 2674 2675 if ((rsp_info->rsp_code != FCP_TMF_CMPL) || 2676 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2677 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2678 } 2679 2680 /** 2681 * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF). 2682 * @sdev: Pointer to SCSI device to send the task management command to. 2683 * @tm_flags: Unsigned byte for task management flags. 2684 * 2685 * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise. 2686 */ 2687 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, 2688 u8 tm_flags) 2689 { 2690 struct zfcp_fsf_req *req = NULL; 2691 struct fcp_cmnd *fcp_cmnd; 2692 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2693 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 2694 2695 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2696 ZFCP_STATUS_COMMON_UNBLOCKED))) 2697 return NULL; 2698 2699 spin_lock_irq(&qdio->req_q_lock); 2700 if (zfcp_qdio_sbal_get(qdio)) 2701 goto out; 2702 2703 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2704 SBAL_SFLAGS0_TYPE_WRITE, 2705 qdio->adapter->pool.scsi_req); 2706 2707 if (IS_ERR(req)) { 2708 req = NULL; 2709 goto out; 2710 } 2711 2712 req->data = sdev; 2713 2714 req->handler = zfcp_fsf_fcp_task_mgmt_handler; 2715 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2716 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2717 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2718 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2719 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2720 2721 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2722 2723 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2724 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags); 2725 2726 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 2727 if (!zfcp_fsf_req_send(req)) { 2728 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 2729 goto out; 2730 } 2731 2732 zfcp_fsf_req_free(req); 2733 req = NULL; 2734 out: 2735 spin_unlock_irq(&qdio->req_q_lock); 2736 return req; 2737 } 2738 2739 /** 2740 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO 2741 * @qdio: pointer to struct zfcp_qdio 2742 * @sbal_idx: response queue index of SBAL to be processed 2743 */ 2744 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2745 { 2746 struct zfcp_adapter *adapter = qdio->adapter; 2747 struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; 2748 struct qdio_buffer_element *sbale; 2749 struct zfcp_fsf_req *fsf_req; 2750 unsigned long req_id; 2751 int idx; 2752 2753 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2754 2755 sbale = &sbal->element[idx]; 2756 req_id = sbale->addr; 2757 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2758 2759 if (!fsf_req) { 2760 /* 2761 * Unknown request means that we have potentially memory 2762 * corruption and must stop the machine immediately. 2763 */ 2764 zfcp_qdio_siosl(adapter); 2765 panic("error: unknown req_id (%lx) on adapter %s.\n", 2766 req_id, dev_name(&adapter->ccw_device->dev)); 2767 } 2768 2769 zfcp_fsf_req_complete(fsf_req); 2770 2771 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) 2772 break; 2773 } 2774 } 2775