1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Implementation of FSF commands. 6 * 7 * Copyright IBM Corp. 2002, 2018 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/blktrace_api.h> 14 #include <linux/jiffies.h> 15 #include <linux/types.h> 16 #include <linux/slab.h> 17 #include <scsi/fc/fc_els.h> 18 #include "zfcp_ext.h" 19 #include "zfcp_fc.h" 20 #include "zfcp_dbf.h" 21 #include "zfcp_qdio.h" 22 #include "zfcp_reqlist.h" 23 #include "zfcp_diag.h" 24 25 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */ 26 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ) 27 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */ 28 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 29 30 struct kmem_cache *zfcp_fsf_qtcb_cache; 31 32 static bool ber_stop = true; 33 module_param(ber_stop, bool, 0600); 34 MODULE_PARM_DESC(ber_stop, 35 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); 36 37 static void zfcp_fsf_request_timeout_handler(struct timer_list *t) 38 { 39 struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); 40 struct zfcp_adapter *adapter = fsf_req->adapter; 41 42 zfcp_qdio_siosl(adapter); 43 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 44 "fsrth_1"); 45 } 46 47 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 48 unsigned long timeout) 49 { 50 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 51 fsf_req->timer.expires = jiffies + timeout; 52 add_timer(&fsf_req->timer); 53 } 54 55 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) 56 { 57 BUG_ON(!fsf_req->erp_action); 58 fsf_req->timer.function = zfcp_erp_timeout_handler; 59 fsf_req->timer.expires = jiffies + 30 * HZ; 60 add_timer(&fsf_req->timer); 61 } 62 63 /* association between FSF command and FSF QTCB type */ 64 static u32 fsf_qtcb_type[] = { 65 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND, 66 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND, 67 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND, 68 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND, 69 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND, 70 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND, 71 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND, 72 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND, 73 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND, 74 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND, 75 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND, 76 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND, 77 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 78 }; 79 80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 81 { 82 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 83 "operational because of an unsupported FC class\n"); 84 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); 85 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 86 } 87 88 /** 89 * zfcp_fsf_req_free - free memory used by fsf request 90 * @req: pointer to struct zfcp_fsf_req 91 */ 92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 93 { 94 if (likely(req->pool)) { 95 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); 97 mempool_free(req, req->pool); 98 return; 99 } 100 101 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 102 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); 103 kfree(req); 104 } 105 106 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 107 { 108 unsigned long flags; 109 struct fsf_status_read_buffer *sr_buf = req->data; 110 struct zfcp_adapter *adapter = req->adapter; 111 struct zfcp_port *port; 112 int d_id = ntoh24(sr_buf->d_id); 113 114 read_lock_irqsave(&adapter->port_list_lock, flags); 115 list_for_each_entry(port, &adapter->port_list, list) 116 if (port->d_id == d_id) { 117 zfcp_erp_port_reopen(port, 0, "fssrpc1"); 118 break; 119 } 120 read_unlock_irqrestore(&adapter->port_list_lock, flags); 121 } 122 123 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, 124 struct fsf_link_down_info *link_down) 125 { 126 struct zfcp_adapter *adapter = req->adapter; 127 128 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 129 return; 130 131 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 132 133 zfcp_scsi_schedule_rports_block(adapter); 134 135 if (!link_down) 136 goto out; 137 138 switch (link_down->error_code) { 139 case FSF_PSQ_LINK_NO_LIGHT: 140 dev_warn(&req->adapter->ccw_device->dev, 141 "There is no light signal from the local " 142 "fibre channel cable\n"); 143 break; 144 case FSF_PSQ_LINK_WRAP_PLUG: 145 dev_warn(&req->adapter->ccw_device->dev, 146 "There is a wrap plug instead of a fibre " 147 "channel cable\n"); 148 break; 149 case FSF_PSQ_LINK_NO_FCP: 150 dev_warn(&req->adapter->ccw_device->dev, 151 "The adjacent fibre channel node does not " 152 "support FCP\n"); 153 break; 154 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 155 dev_warn(&req->adapter->ccw_device->dev, 156 "The FCP device is suspended because of a " 157 "firmware update\n"); 158 break; 159 case FSF_PSQ_LINK_INVALID_WWPN: 160 dev_warn(&req->adapter->ccw_device->dev, 161 "The FCP device detected a WWPN that is " 162 "duplicate or not valid\n"); 163 break; 164 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 165 dev_warn(&req->adapter->ccw_device->dev, 166 "The fibre channel fabric does not support NPIV\n"); 167 break; 168 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 169 dev_warn(&req->adapter->ccw_device->dev, 170 "The FCP adapter cannot support more NPIV ports\n"); 171 break; 172 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 173 dev_warn(&req->adapter->ccw_device->dev, 174 "The adjacent switch cannot support " 175 "more NPIV ports\n"); 176 break; 177 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 178 dev_warn(&req->adapter->ccw_device->dev, 179 "The FCP adapter could not log in to the " 180 "fibre channel fabric\n"); 181 break; 182 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 183 dev_warn(&req->adapter->ccw_device->dev, 184 "The WWPN assignment file on the FCP adapter " 185 "has been damaged\n"); 186 break; 187 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 188 dev_warn(&req->adapter->ccw_device->dev, 189 "The mode table on the FCP adapter " 190 "has been damaged\n"); 191 break; 192 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 193 dev_warn(&req->adapter->ccw_device->dev, 194 "All NPIV ports on the FCP adapter have " 195 "been assigned\n"); 196 break; 197 default: 198 dev_warn(&req->adapter->ccw_device->dev, 199 "The link between the FCP adapter and " 200 "the FC fabric is down\n"); 201 } 202 out: 203 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 204 } 205 206 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 207 { 208 struct fsf_status_read_buffer *sr_buf = req->data; 209 struct fsf_link_down_info *ldi = 210 (struct fsf_link_down_info *) &sr_buf->payload; 211 212 switch (sr_buf->status_subtype) { 213 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 214 case FSF_STATUS_READ_SUB_FDISC_FAILED: 215 zfcp_fsf_link_down_info_eval(req, ldi); 216 break; 217 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 218 zfcp_fsf_link_down_info_eval(req, NULL); 219 } 220 } 221 222 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) 223 { 224 struct zfcp_adapter *adapter = req->adapter; 225 struct fsf_status_read_buffer *sr_buf = req->data; 226 227 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 228 zfcp_dbf_hba_fsf_uss("fssrh_1", req); 229 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 230 zfcp_fsf_req_free(req); 231 return; 232 } 233 234 zfcp_dbf_hba_fsf_uss("fssrh_4", req); 235 236 switch (sr_buf->status_type) { 237 case FSF_STATUS_READ_PORT_CLOSED: 238 zfcp_fsf_status_read_port_closed(req); 239 break; 240 case FSF_STATUS_READ_INCOMING_ELS: 241 zfcp_fc_incoming_els(req); 242 break; 243 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 244 break; 245 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 246 zfcp_dbf_hba_bit_err("fssrh_3", req); 247 if (ber_stop) { 248 dev_warn(&adapter->ccw_device->dev, 249 "All paths over this FCP device are disused because of excessive bit errors\n"); 250 zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); 251 } else { 252 dev_warn(&adapter->ccw_device->dev, 253 "The error threshold for checksum statistics has been exceeded\n"); 254 } 255 break; 256 case FSF_STATUS_READ_LINK_DOWN: 257 zfcp_fsf_status_read_link_down(req); 258 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); 259 break; 260 case FSF_STATUS_READ_LINK_UP: 261 dev_info(&adapter->ccw_device->dev, 262 "The local link has been restored\n"); 263 /* All ports should be marked as ready to run again */ 264 zfcp_erp_set_adapter_status(adapter, 265 ZFCP_STATUS_COMMON_RUNNING); 266 zfcp_erp_adapter_reopen(adapter, 267 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 268 ZFCP_STATUS_COMMON_ERP_FAILED, 269 "fssrh_2"); 270 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 271 272 break; 273 case FSF_STATUS_READ_NOTIFICATION_LOST: 274 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 275 zfcp_fc_conditional_port_scan(adapter); 276 break; 277 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 278 adapter->adapter_features = sr_buf->payload.word[0]; 279 break; 280 } 281 282 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 283 zfcp_fsf_req_free(req); 284 285 atomic_inc(&adapter->stat_miss); 286 queue_work(adapter->work_queue, &adapter->stat_work); 287 } 288 289 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 290 { 291 switch (req->qtcb->header.fsf_status_qual.word[0]) { 292 case FSF_SQ_FCP_RSP_AVAILABLE: 293 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 294 case FSF_SQ_NO_RETRY_POSSIBLE: 295 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 296 return; 297 case FSF_SQ_COMMAND_ABORTED: 298 break; 299 case FSF_SQ_NO_RECOM: 300 dev_err(&req->adapter->ccw_device->dev, 301 "The FCP adapter reported a problem " 302 "that cannot be recovered\n"); 303 zfcp_qdio_siosl(req->adapter); 304 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); 305 break; 306 } 307 /* all non-return stats set FSFREQ_ERROR*/ 308 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 309 } 310 311 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) 312 { 313 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 314 return; 315 316 switch (req->qtcb->header.fsf_status) { 317 case FSF_UNKNOWN_COMMAND: 318 dev_err(&req->adapter->ccw_device->dev, 319 "The FCP adapter does not recognize the command 0x%x\n", 320 req->qtcb->header.fsf_command); 321 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); 322 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 323 break; 324 case FSF_ADAPTER_STATUS_AVAILABLE: 325 zfcp_fsf_fsfstatus_qual_eval(req); 326 break; 327 } 328 } 329 330 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) 331 { 332 struct zfcp_adapter *adapter = req->adapter; 333 struct fsf_qtcb *qtcb = req->qtcb; 334 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 335 336 zfcp_dbf_hba_fsf_response(req); 337 338 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 339 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 340 return; 341 } 342 343 switch (qtcb->prefix.prot_status) { 344 case FSF_PROT_GOOD: 345 case FSF_PROT_FSF_STATUS_PRESENTED: 346 return; 347 case FSF_PROT_QTCB_VERSION_ERROR: 348 dev_err(&adapter->ccw_device->dev, 349 "QTCB version 0x%x not supported by FCP adapter " 350 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 351 psq->word[0], psq->word[1]); 352 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); 353 break; 354 case FSF_PROT_ERROR_STATE: 355 case FSF_PROT_SEQ_NUMB_ERROR: 356 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); 357 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 358 break; 359 case FSF_PROT_UNSUPP_QTCB_TYPE: 360 dev_err(&adapter->ccw_device->dev, 361 "The QTCB type is not supported by the FCP adapter\n"); 362 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); 363 break; 364 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 365 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 366 &adapter->status); 367 break; 368 case FSF_PROT_DUPLICATE_REQUEST_ID: 369 dev_err(&adapter->ccw_device->dev, 370 "0x%Lx is an ambiguous request identifier\n", 371 (unsigned long long)qtcb->bottom.support.req_handle); 372 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); 373 break; 374 case FSF_PROT_LINK_DOWN: 375 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); 376 /* go through reopen to flush pending requests */ 377 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); 378 break; 379 case FSF_PROT_REEST_QUEUE: 380 /* All ports should be marked as ready to run again */ 381 zfcp_erp_set_adapter_status(adapter, 382 ZFCP_STATUS_COMMON_RUNNING); 383 zfcp_erp_adapter_reopen(adapter, 384 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 385 ZFCP_STATUS_COMMON_ERP_FAILED, 386 "fspse_8"); 387 break; 388 default: 389 dev_err(&adapter->ccw_device->dev, 390 "0x%x is not a valid transfer protocol status\n", 391 qtcb->prefix.prot_status); 392 zfcp_qdio_siosl(adapter); 393 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); 394 } 395 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 396 } 397 398 /** 399 * zfcp_fsf_req_complete - process completion of a FSF request 400 * @req: The FSF request that has been completed. 401 * 402 * When a request has been completed either from the FCP adapter, 403 * or it has been dismissed due to a queue shutdown, this function 404 * is called to process the completion status and trigger further 405 * events related to the FSF request. 406 */ 407 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 408 { 409 if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) { 410 zfcp_fsf_status_read_handler(req); 411 return; 412 } 413 414 del_timer(&req->timer); 415 zfcp_fsf_protstatus_eval(req); 416 zfcp_fsf_fsfstatus_eval(req); 417 req->handler(req); 418 419 if (req->erp_action) 420 zfcp_erp_notify(req->erp_action, 0); 421 422 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 423 zfcp_fsf_req_free(req); 424 else 425 complete(&req->completion); 426 } 427 428 /** 429 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests 430 * @adapter: pointer to struct zfcp_adapter 431 * 432 * Never ever call this without shutting down the adapter first. 433 * Otherwise the adapter would continue using and corrupting s390 storage. 434 * Included BUG_ON() call to ensure this is done. 435 * ERP is supposed to be the only user of this function. 436 */ 437 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 438 { 439 struct zfcp_fsf_req *req, *tmp; 440 LIST_HEAD(remove_queue); 441 442 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 443 zfcp_reqlist_move(adapter->req_list, &remove_queue); 444 445 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 446 list_del(&req->list); 447 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 448 zfcp_fsf_req_complete(req); 449 } 450 } 451 452 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) 453 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) 454 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) 455 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) 456 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) 457 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) 458 #define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6) 459 #define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7) 460 #define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8) 461 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) 462 463 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) 464 { 465 u32 fdmi_speed = 0; 466 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) 467 fdmi_speed |= FC_PORTSPEED_1GBIT; 468 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) 469 fdmi_speed |= FC_PORTSPEED_2GBIT; 470 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) 471 fdmi_speed |= FC_PORTSPEED_4GBIT; 472 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) 473 fdmi_speed |= FC_PORTSPEED_10GBIT; 474 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) 475 fdmi_speed |= FC_PORTSPEED_8GBIT; 476 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) 477 fdmi_speed |= FC_PORTSPEED_16GBIT; 478 if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT) 479 fdmi_speed |= FC_PORTSPEED_32GBIT; 480 if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT) 481 fdmi_speed |= FC_PORTSPEED_64GBIT; 482 if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT) 483 fdmi_speed |= FC_PORTSPEED_128GBIT; 484 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) 485 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; 486 return fdmi_speed; 487 } 488 489 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 490 { 491 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 492 struct zfcp_adapter *adapter = req->adapter; 493 struct Scsi_Host *shost = adapter->scsi_host; 494 struct fc_els_flogi *nsp, *plogi; 495 496 /* adjust pointers for missing command code */ 497 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param 498 - sizeof(u32)); 499 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload 500 - sizeof(u32)); 501 502 if (req->data) 503 memcpy(req->data, bottom, sizeof(*bottom)); 504 505 fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn); 506 fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn); 507 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 508 509 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; 510 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 511 (u16)FSF_STATUS_READS_RECOM); 512 513 if (fc_host_permanent_port_name(shost) == -1) 514 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 515 516 zfcp_scsi_set_prot(adapter); 517 518 /* no error return above here, otherwise must fix call chains */ 519 /* do not evaluate invalid fields */ 520 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) 521 return 0; 522 523 fc_host_port_id(shost) = ntoh24(bottom->s_id); 524 fc_host_speed(shost) = 525 zfcp_fsf_convert_portspeed(bottom->fc_link_speed); 526 527 adapter->hydra_version = bottom->adapter_type; 528 529 switch (bottom->fc_topology) { 530 case FSF_TOPO_P2P: 531 adapter->peer_d_id = ntoh24(bottom->peer_d_id); 532 adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn); 533 adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn); 534 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 535 break; 536 case FSF_TOPO_FABRIC: 537 if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) 538 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 539 else 540 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 541 break; 542 case FSF_TOPO_AL: 543 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 544 /* fall through */ 545 default: 546 dev_err(&adapter->ccw_device->dev, 547 "Unknown or unsupported arbitrated loop " 548 "fibre channel topology detected\n"); 549 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); 550 return -EIO; 551 } 552 553 return 0; 554 } 555 556 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) 557 { 558 struct zfcp_adapter *adapter = req->adapter; 559 struct fsf_qtcb *qtcb = req->qtcb; 560 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; 561 struct Scsi_Host *shost = adapter->scsi_host; 562 563 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 564 return; 565 566 adapter->fsf_lic_version = bottom->lic_version; 567 adapter->adapter_features = bottom->adapter_features; 568 adapter->connection_features = bottom->connection_features; 569 adapter->peer_wwpn = 0; 570 adapter->peer_wwnn = 0; 571 adapter->peer_d_id = 0; 572 573 switch (qtcb->header.fsf_status) { 574 case FSF_GOOD: 575 if (zfcp_fsf_exchange_config_evaluate(req)) 576 return; 577 578 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 579 dev_err(&adapter->ccw_device->dev, 580 "FCP adapter maximum QTCB size (%d bytes) " 581 "is too small\n", 582 bottom->max_qtcb_size); 583 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); 584 return; 585 } 586 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 587 &adapter->status); 588 break; 589 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 590 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 591 592 fc_host_node_name(shost) = 0; 593 fc_host_port_name(shost) = 0; 594 fc_host_port_id(shost) = 0; 595 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 596 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 597 adapter->hydra_version = 0; 598 599 /* avoids adapter shutdown to be able to recognize 600 * events such as LINK UP */ 601 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 602 &adapter->status); 603 zfcp_fsf_link_down_info_eval(req, 604 &qtcb->header.fsf_status_qual.link_down_info); 605 if (zfcp_fsf_exchange_config_evaluate(req)) 606 return; 607 break; 608 default: 609 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); 610 return; 611 } 612 613 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { 614 adapter->hardware_version = bottom->hardware_version; 615 memcpy(fc_host_serial_number(shost), bottom->serial_number, 616 min(FC_SERIAL_NUMBER_SIZE, 17)); 617 EBCASC(fc_host_serial_number(shost), 618 min(FC_SERIAL_NUMBER_SIZE, 17)); 619 } 620 621 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 622 dev_err(&adapter->ccw_device->dev, 623 "The FCP adapter only supports newer " 624 "control block versions\n"); 625 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); 626 return; 627 } 628 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 629 dev_err(&adapter->ccw_device->dev, 630 "The FCP adapter only supports older " 631 "control block versions\n"); 632 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); 633 } 634 } 635 636 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) 637 { 638 struct zfcp_adapter *adapter = req->adapter; 639 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; 640 struct Scsi_Host *shost = adapter->scsi_host; 641 642 if (req->data) 643 memcpy(req->data, bottom, sizeof(*bottom)); 644 645 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { 646 fc_host_permanent_port_name(shost) = bottom->wwpn; 647 } else 648 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 649 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 650 fc_host_supported_speeds(shost) = 651 zfcp_fsf_convert_portspeed(bottom->supported_speed); 652 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, 653 FC_FC4_LIST_SIZE); 654 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, 655 FC_FC4_LIST_SIZE); 656 } 657 658 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 659 { 660 struct zfcp_diag_header *const diag_hdr = 661 &req->adapter->diagnostics->port_data.header; 662 struct fsf_qtcb *qtcb = req->qtcb; 663 struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port; 664 665 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 666 return; 667 668 switch (qtcb->header.fsf_status) { 669 case FSF_GOOD: 670 /* 671 * usually we wait with an update till the cache is too old, 672 * but because we have the data available, update it anyway 673 */ 674 zfcp_diag_update_xdata(diag_hdr, bottom, false); 675 676 zfcp_fsf_exchange_port_evaluate(req); 677 break; 678 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 679 zfcp_diag_update_xdata(diag_hdr, bottom, true); 680 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 681 682 zfcp_fsf_exchange_port_evaluate(req); 683 zfcp_fsf_link_down_info_eval(req, 684 &qtcb->header.fsf_status_qual.link_down_info); 685 break; 686 } 687 } 688 689 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) 690 { 691 struct zfcp_fsf_req *req; 692 693 if (likely(pool)) 694 req = mempool_alloc(pool, GFP_ATOMIC); 695 else 696 req = kmalloc(sizeof(*req), GFP_ATOMIC); 697 698 if (unlikely(!req)) 699 return NULL; 700 701 memset(req, 0, sizeof(*req)); 702 req->pool = pool; 703 return req; 704 } 705 706 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool) 707 { 708 struct fsf_qtcb *qtcb; 709 710 if (likely(pool)) 711 qtcb = mempool_alloc(pool, GFP_ATOMIC); 712 else 713 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); 714 715 if (unlikely(!qtcb)) 716 return NULL; 717 718 memset(qtcb, 0, sizeof(*qtcb)); 719 return qtcb; 720 } 721 722 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 723 u32 fsf_cmd, u8 sbtype, 724 mempool_t *pool) 725 { 726 struct zfcp_adapter *adapter = qdio->adapter; 727 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); 728 729 if (unlikely(!req)) 730 return ERR_PTR(-ENOMEM); 731 732 if (adapter->req_no == 0) 733 adapter->req_no++; 734 735 INIT_LIST_HEAD(&req->list); 736 timer_setup(&req->timer, NULL, 0); 737 init_completion(&req->completion); 738 739 req->adapter = adapter; 740 req->req_id = adapter->req_no; 741 742 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 743 if (likely(pool)) 744 req->qtcb = zfcp_fsf_qtcb_alloc( 745 adapter->pool.qtcb_pool); 746 else 747 req->qtcb = zfcp_fsf_qtcb_alloc(NULL); 748 749 if (unlikely(!req->qtcb)) { 750 zfcp_fsf_req_free(req); 751 return ERR_PTR(-ENOMEM); 752 } 753 754 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 755 req->qtcb->prefix.req_id = req->req_id; 756 req->qtcb->prefix.ulp_info = 26; 757 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 758 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 759 req->qtcb->header.req_handle = req->req_id; 760 req->qtcb->header.fsf_command = fsf_cmd; 761 } 762 763 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 764 req->qtcb, sizeof(struct fsf_qtcb)); 765 766 return req; 767 } 768 769 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 770 { 771 const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); 772 struct zfcp_adapter *adapter = req->adapter; 773 struct zfcp_qdio *qdio = adapter->qdio; 774 int req_id = req->req_id; 775 776 zfcp_reqlist_add(adapter->req_list, req); 777 778 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 779 req->issued = get_tod_clock(); 780 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 781 del_timer(&req->timer); 782 /* lookup request again, list might have changed */ 783 zfcp_reqlist_find_rm(adapter->req_list, req_id); 784 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); 785 return -EIO; 786 } 787 788 /* 789 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT. 790 * ONLY TOUCH SYNC req AGAIN ON req->completion. 791 * 792 * The request might complete and be freed concurrently at any point 793 * now. This is not protected by the QDIO-lock (req_q_lock). So any 794 * uncontrolled access after this might result in an use-after-free bug. 795 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and 796 * when it is completed via req->completion, is it safe to use req 797 * again. 798 */ 799 800 /* Don't increase for unsolicited status */ 801 if (!is_srb) 802 adapter->fsf_req_seq_no++; 803 adapter->req_no++; 804 805 return 0; 806 } 807 808 /** 809 * zfcp_fsf_status_read - send status read request 810 * @qdio: pointer to struct zfcp_qdio 811 * Returns: 0 on success, ERROR otherwise 812 */ 813 int zfcp_fsf_status_read(struct zfcp_qdio *qdio) 814 { 815 struct zfcp_adapter *adapter = qdio->adapter; 816 struct zfcp_fsf_req *req; 817 struct fsf_status_read_buffer *sr_buf; 818 struct page *page; 819 int retval = -EIO; 820 821 spin_lock_irq(&qdio->req_q_lock); 822 if (zfcp_qdio_sbal_get(qdio)) 823 goto out; 824 825 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 826 SBAL_SFLAGS0_TYPE_STATUS, 827 adapter->pool.status_read_req); 828 if (IS_ERR(req)) { 829 retval = PTR_ERR(req); 830 goto out; 831 } 832 833 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); 834 if (!page) { 835 retval = -ENOMEM; 836 goto failed_buf; 837 } 838 sr_buf = page_address(page); 839 memset(sr_buf, 0, sizeof(*sr_buf)); 840 req->data = sr_buf; 841 842 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); 843 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 844 845 retval = zfcp_fsf_req_send(req); 846 if (retval) 847 goto failed_req_send; 848 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 849 850 goto out; 851 852 failed_req_send: 853 req->data = NULL; 854 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 855 failed_buf: 856 zfcp_dbf_hba_fsf_uss("fssr__1", req); 857 zfcp_fsf_req_free(req); 858 out: 859 spin_unlock_irq(&qdio->req_q_lock); 860 return retval; 861 } 862 863 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 864 { 865 struct scsi_device *sdev = req->data; 866 struct zfcp_scsi_dev *zfcp_sdev; 867 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 868 869 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 870 return; 871 872 zfcp_sdev = sdev_to_zfcp(sdev); 873 874 switch (req->qtcb->header.fsf_status) { 875 case FSF_PORT_HANDLE_NOT_VALID: 876 if (fsq->word[0] == fsq->word[1]) { 877 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, 878 "fsafch1"); 879 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 880 } 881 break; 882 case FSF_LUN_HANDLE_NOT_VALID: 883 if (fsq->word[0] == fsq->word[1]) { 884 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); 885 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 886 } 887 break; 888 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 889 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 890 break; 891 case FSF_PORT_BOXED: 892 zfcp_erp_set_port_status(zfcp_sdev->port, 893 ZFCP_STATUS_COMMON_ACCESS_BOXED); 894 zfcp_erp_port_reopen(zfcp_sdev->port, 895 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); 896 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 897 break; 898 case FSF_LUN_BOXED: 899 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 900 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 901 "fsafch4"); 902 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 903 break; 904 case FSF_ADAPTER_STATUS_AVAILABLE: 905 switch (fsq->word[0]) { 906 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 907 zfcp_fc_test_link(zfcp_sdev->port); 908 /* fall through */ 909 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 910 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 911 break; 912 } 913 break; 914 case FSF_GOOD: 915 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; 916 break; 917 } 918 } 919 920 /** 921 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command 922 * @scmnd: The SCSI command to abort 923 * Returns: pointer to struct zfcp_fsf_req 924 */ 925 926 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) 927 { 928 struct zfcp_fsf_req *req = NULL; 929 struct scsi_device *sdev = scmnd->device; 930 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 931 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 932 unsigned long old_req_id = (unsigned long) scmnd->host_scribble; 933 934 spin_lock_irq(&qdio->req_q_lock); 935 if (zfcp_qdio_sbal_get(qdio)) 936 goto out; 937 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 938 SBAL_SFLAGS0_TYPE_READ, 939 qdio->adapter->pool.scsi_abort); 940 if (IS_ERR(req)) { 941 req = NULL; 942 goto out; 943 } 944 945 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 946 ZFCP_STATUS_COMMON_UNBLOCKED))) 947 goto out_error_free; 948 949 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 950 951 req->data = sdev; 952 req->handler = zfcp_fsf_abort_fcp_command_handler; 953 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 954 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 955 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 956 957 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 958 if (!zfcp_fsf_req_send(req)) { 959 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 960 goto out; 961 } 962 963 out_error_free: 964 zfcp_fsf_req_free(req); 965 req = NULL; 966 out: 967 spin_unlock_irq(&qdio->req_q_lock); 968 return req; 969 } 970 971 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 972 { 973 struct zfcp_adapter *adapter = req->adapter; 974 struct zfcp_fsf_ct_els *ct = req->data; 975 struct fsf_qtcb_header *header = &req->qtcb->header; 976 977 ct->status = -EINVAL; 978 979 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 980 goto skip_fsfstatus; 981 982 switch (header->fsf_status) { 983 case FSF_GOOD: 984 ct->status = 0; 985 zfcp_dbf_san_res("fsscth2", req); 986 break; 987 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 988 zfcp_fsf_class_not_supp(req); 989 break; 990 case FSF_ADAPTER_STATUS_AVAILABLE: 991 switch (header->fsf_status_qual.word[0]){ 992 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 993 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 994 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 995 break; 996 } 997 break; 998 case FSF_PORT_BOXED: 999 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1000 break; 1001 case FSF_PORT_HANDLE_NOT_VALID: 1002 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); 1003 /* fall through */ 1004 case FSF_GENERIC_COMMAND_REJECTED: 1005 case FSF_PAYLOAD_SIZE_MISMATCH: 1006 case FSF_REQUEST_SIZE_TOO_LARGE: 1007 case FSF_RESPONSE_SIZE_TOO_LARGE: 1008 case FSF_SBAL_MISMATCH: 1009 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1010 break; 1011 } 1012 1013 skip_fsfstatus: 1014 if (ct->handler) 1015 ct->handler(ct->handler_data); 1016 } 1017 1018 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, 1019 struct zfcp_qdio_req *q_req, 1020 struct scatterlist *sg_req, 1021 struct scatterlist *sg_resp) 1022 { 1023 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length); 1024 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length); 1025 zfcp_qdio_set_sbale_last(qdio, q_req); 1026 } 1027 1028 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 1029 struct scatterlist *sg_req, 1030 struct scatterlist *sg_resp) 1031 { 1032 struct zfcp_adapter *adapter = req->adapter; 1033 struct zfcp_qdio *qdio = adapter->qdio; 1034 struct fsf_qtcb *qtcb = req->qtcb; 1035 u32 feat = adapter->adapter_features; 1036 1037 if (zfcp_adapter_multi_buffer_active(adapter)) { 1038 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1039 return -EIO; 1040 qtcb->bottom.support.req_buf_length = 1041 zfcp_qdio_real_bytes(sg_req); 1042 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1043 return -EIO; 1044 qtcb->bottom.support.resp_buf_length = 1045 zfcp_qdio_real_bytes(sg_resp); 1046 1047 zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req)); 1048 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1049 zfcp_qdio_set_scount(qdio, &req->qdio_req); 1050 return 0; 1051 } 1052 1053 /* use single, unchained SBAL if it can hold the request */ 1054 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 1055 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, 1056 sg_req, sg_resp); 1057 return 0; 1058 } 1059 1060 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) 1061 return -EOPNOTSUPP; 1062 1063 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1064 return -EIO; 1065 1066 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); 1067 1068 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1069 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); 1070 1071 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1072 return -EIO; 1073 1074 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); 1075 1076 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1077 1078 return 0; 1079 } 1080 1081 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1082 struct scatterlist *sg_req, 1083 struct scatterlist *sg_resp, 1084 unsigned int timeout) 1085 { 1086 int ret; 1087 1088 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); 1089 if (ret) 1090 return ret; 1091 1092 /* common settings for ct/gs and els requests */ 1093 if (timeout > 255) 1094 timeout = 255; /* max value accepted by hardware */ 1095 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1096 req->qtcb->bottom.support.timeout = timeout; 1097 zfcp_fsf_start_timer(req, (timeout + 10) * HZ); 1098 1099 return 0; 1100 } 1101 1102 /** 1103 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1104 * @wka_port: pointer to zfcp WKA port to send CT/GS to 1105 * @ct: pointer to struct zfcp_send_ct with data for request 1106 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1107 * @timeout: timeout that hardware should use, and a later software timeout 1108 */ 1109 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1110 struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1111 unsigned int timeout) 1112 { 1113 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1114 struct zfcp_fsf_req *req; 1115 int ret = -EIO; 1116 1117 spin_lock_irq(&qdio->req_q_lock); 1118 if (zfcp_qdio_sbal_get(qdio)) 1119 goto out; 1120 1121 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1122 SBAL_SFLAGS0_TYPE_WRITE_READ, pool); 1123 1124 if (IS_ERR(req)) { 1125 ret = PTR_ERR(req); 1126 goto out; 1127 } 1128 1129 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1130 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); 1131 if (ret) 1132 goto failed_send; 1133 1134 req->handler = zfcp_fsf_send_ct_handler; 1135 req->qtcb->header.port_handle = wka_port->handle; 1136 ct->d_id = wka_port->d_id; 1137 req->data = ct; 1138 1139 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); 1140 1141 ret = zfcp_fsf_req_send(req); 1142 if (ret) 1143 goto failed_send; 1144 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1145 1146 goto out; 1147 1148 failed_send: 1149 zfcp_fsf_req_free(req); 1150 out: 1151 spin_unlock_irq(&qdio->req_q_lock); 1152 return ret; 1153 } 1154 1155 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1156 { 1157 struct zfcp_fsf_ct_els *send_els = req->data; 1158 struct fsf_qtcb_header *header = &req->qtcb->header; 1159 1160 send_els->status = -EINVAL; 1161 1162 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1163 goto skip_fsfstatus; 1164 1165 switch (header->fsf_status) { 1166 case FSF_GOOD: 1167 send_els->status = 0; 1168 zfcp_dbf_san_res("fsselh1", req); 1169 break; 1170 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1171 zfcp_fsf_class_not_supp(req); 1172 break; 1173 case FSF_ADAPTER_STATUS_AVAILABLE: 1174 switch (header->fsf_status_qual.word[0]){ 1175 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1176 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1177 case FSF_SQ_RETRY_IF_POSSIBLE: 1178 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1179 break; 1180 } 1181 break; 1182 case FSF_ELS_COMMAND_REJECTED: 1183 case FSF_PAYLOAD_SIZE_MISMATCH: 1184 case FSF_REQUEST_SIZE_TOO_LARGE: 1185 case FSF_RESPONSE_SIZE_TOO_LARGE: 1186 break; 1187 case FSF_SBAL_MISMATCH: 1188 /* should never occur, avoided in zfcp_fsf_send_els */ 1189 /* fall through */ 1190 default: 1191 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1192 break; 1193 } 1194 skip_fsfstatus: 1195 if (send_els->handler) 1196 send_els->handler(send_els->handler_data); 1197 } 1198 1199 /** 1200 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1201 * @adapter: pointer to zfcp adapter 1202 * @d_id: N_Port_ID to send ELS to 1203 * @els: pointer to struct zfcp_send_els with data for the command 1204 * @timeout: timeout that hardware should use, and a later software timeout 1205 */ 1206 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1207 struct zfcp_fsf_ct_els *els, unsigned int timeout) 1208 { 1209 struct zfcp_fsf_req *req; 1210 struct zfcp_qdio *qdio = adapter->qdio; 1211 int ret = -EIO; 1212 1213 spin_lock_irq(&qdio->req_q_lock); 1214 if (zfcp_qdio_sbal_get(qdio)) 1215 goto out; 1216 1217 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1218 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL); 1219 1220 if (IS_ERR(req)) { 1221 ret = PTR_ERR(req); 1222 goto out; 1223 } 1224 1225 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1226 1227 if (!zfcp_adapter_multi_buffer_active(adapter)) 1228 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); 1229 1230 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1231 1232 if (ret) 1233 goto failed_send; 1234 1235 hton24(req->qtcb->bottom.support.d_id, d_id); 1236 req->handler = zfcp_fsf_send_els_handler; 1237 els->d_id = d_id; 1238 req->data = els; 1239 1240 zfcp_dbf_san_req("fssels1", req, d_id); 1241 1242 ret = zfcp_fsf_req_send(req); 1243 if (ret) 1244 goto failed_send; 1245 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1246 1247 goto out; 1248 1249 failed_send: 1250 zfcp_fsf_req_free(req); 1251 out: 1252 spin_unlock_irq(&qdio->req_q_lock); 1253 return ret; 1254 } 1255 1256 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1257 { 1258 struct zfcp_fsf_req *req; 1259 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1260 int retval = -EIO; 1261 1262 spin_lock_irq(&qdio->req_q_lock); 1263 if (zfcp_qdio_sbal_get(qdio)) 1264 goto out; 1265 1266 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1267 SBAL_SFLAGS0_TYPE_READ, 1268 qdio->adapter->pool.erp_req); 1269 1270 if (IS_ERR(req)) { 1271 retval = PTR_ERR(req); 1272 goto out; 1273 } 1274 1275 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1276 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1277 1278 req->qtcb->bottom.config.feature_selection = 1279 FSF_FEATURE_NOTIFICATION_LOST | 1280 FSF_FEATURE_UPDATE_ALERT; 1281 req->erp_action = erp_action; 1282 req->handler = zfcp_fsf_exchange_config_data_handler; 1283 erp_action->fsf_req_id = req->req_id; 1284 1285 zfcp_fsf_start_erp_timer(req); 1286 retval = zfcp_fsf_req_send(req); 1287 if (retval) { 1288 zfcp_fsf_req_free(req); 1289 erp_action->fsf_req_id = 0; 1290 } 1291 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1292 out: 1293 spin_unlock_irq(&qdio->req_q_lock); 1294 return retval; 1295 } 1296 1297 1298 /** 1299 * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel. 1300 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1301 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1302 * might be %NULL. 1303 * 1304 * Returns: 1305 * * 0 - Exchange Config Data was successful, @data is complete 1306 * * -EIO - Exchange Config Data was not successful, @data is invalid 1307 * * -EAGAIN - @data contains incomplete data 1308 * * -ENOMEM - Some memory allocation failed along the way 1309 */ 1310 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, 1311 struct fsf_qtcb_bottom_config *data) 1312 { 1313 struct zfcp_fsf_req *req = NULL; 1314 int retval = -EIO; 1315 1316 spin_lock_irq(&qdio->req_q_lock); 1317 if (zfcp_qdio_sbal_get(qdio)) 1318 goto out_unlock; 1319 1320 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1321 SBAL_SFLAGS0_TYPE_READ, NULL); 1322 1323 if (IS_ERR(req)) { 1324 retval = PTR_ERR(req); 1325 goto out_unlock; 1326 } 1327 1328 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1329 req->handler = zfcp_fsf_exchange_config_data_handler; 1330 1331 req->qtcb->bottom.config.feature_selection = 1332 FSF_FEATURE_NOTIFICATION_LOST | 1333 FSF_FEATURE_UPDATE_ALERT; 1334 1335 if (data) 1336 req->data = data; 1337 1338 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1339 retval = zfcp_fsf_req_send(req); 1340 spin_unlock_irq(&qdio->req_q_lock); 1341 1342 if (!retval) { 1343 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1344 wait_for_completion(&req->completion); 1345 1346 if (req->status & 1347 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1348 retval = -EIO; 1349 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1350 retval = -EAGAIN; 1351 } 1352 1353 zfcp_fsf_req_free(req); 1354 return retval; 1355 1356 out_unlock: 1357 spin_unlock_irq(&qdio->req_q_lock); 1358 return retval; 1359 } 1360 1361 /** 1362 * zfcp_fsf_exchange_port_data - request information about local port 1363 * @erp_action: ERP action for the adapter for which port data is requested 1364 * Returns: 0 on success, error otherwise 1365 */ 1366 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1367 { 1368 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1369 struct zfcp_fsf_req *req; 1370 int retval = -EIO; 1371 1372 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1373 return -EOPNOTSUPP; 1374 1375 spin_lock_irq(&qdio->req_q_lock); 1376 if (zfcp_qdio_sbal_get(qdio)) 1377 goto out; 1378 1379 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1380 SBAL_SFLAGS0_TYPE_READ, 1381 qdio->adapter->pool.erp_req); 1382 1383 if (IS_ERR(req)) { 1384 retval = PTR_ERR(req); 1385 goto out; 1386 } 1387 1388 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1389 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1390 1391 req->handler = zfcp_fsf_exchange_port_data_handler; 1392 req->erp_action = erp_action; 1393 erp_action->fsf_req_id = req->req_id; 1394 1395 zfcp_fsf_start_erp_timer(req); 1396 retval = zfcp_fsf_req_send(req); 1397 if (retval) { 1398 zfcp_fsf_req_free(req); 1399 erp_action->fsf_req_id = 0; 1400 } 1401 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1402 out: 1403 spin_unlock_irq(&qdio->req_q_lock); 1404 return retval; 1405 } 1406 1407 /** 1408 * zfcp_fsf_exchange_port_data_sync() - Request information about local port. 1409 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1410 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1411 * might be %NULL. 1412 * 1413 * Returns: 1414 * * 0 - Exchange Port Data was successful, @data is complete 1415 * * -EIO - Exchange Port Data was not successful, @data is invalid 1416 * * -EAGAIN - @data contains incomplete data 1417 * * -ENOMEM - Some memory allocation failed along the way 1418 * * -EOPNOTSUPP - This operation is not supported 1419 */ 1420 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, 1421 struct fsf_qtcb_bottom_port *data) 1422 { 1423 struct zfcp_fsf_req *req = NULL; 1424 int retval = -EIO; 1425 1426 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1427 return -EOPNOTSUPP; 1428 1429 spin_lock_irq(&qdio->req_q_lock); 1430 if (zfcp_qdio_sbal_get(qdio)) 1431 goto out_unlock; 1432 1433 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1434 SBAL_SFLAGS0_TYPE_READ, NULL); 1435 1436 if (IS_ERR(req)) { 1437 retval = PTR_ERR(req); 1438 goto out_unlock; 1439 } 1440 1441 if (data) 1442 req->data = data; 1443 1444 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1445 1446 req->handler = zfcp_fsf_exchange_port_data_handler; 1447 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1448 retval = zfcp_fsf_req_send(req); 1449 spin_unlock_irq(&qdio->req_q_lock); 1450 1451 if (!retval) { 1452 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1453 wait_for_completion(&req->completion); 1454 1455 if (req->status & 1456 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1457 retval = -EIO; 1458 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1459 retval = -EAGAIN; 1460 } 1461 1462 zfcp_fsf_req_free(req); 1463 return retval; 1464 1465 out_unlock: 1466 spin_unlock_irq(&qdio->req_q_lock); 1467 return retval; 1468 } 1469 1470 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) 1471 { 1472 struct zfcp_port *port = req->data; 1473 struct fsf_qtcb_header *header = &req->qtcb->header; 1474 struct fc_els_flogi *plogi; 1475 1476 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1477 goto out; 1478 1479 switch (header->fsf_status) { 1480 case FSF_PORT_ALREADY_OPEN: 1481 break; 1482 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1483 dev_warn(&req->adapter->ccw_device->dev, 1484 "Not enough FCP adapter resources to open " 1485 "remote port 0x%016Lx\n", 1486 (unsigned long long)port->wwpn); 1487 zfcp_erp_set_port_status(port, 1488 ZFCP_STATUS_COMMON_ERP_FAILED); 1489 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1490 break; 1491 case FSF_ADAPTER_STATUS_AVAILABLE: 1492 switch (header->fsf_status_qual.word[0]) { 1493 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1494 /* no zfcp_fc_test_link() with failed open port */ 1495 /* fall through */ 1496 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1497 case FSF_SQ_NO_RETRY_POSSIBLE: 1498 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1499 break; 1500 } 1501 break; 1502 case FSF_GOOD: 1503 port->handle = header->port_handle; 1504 atomic_or(ZFCP_STATUS_COMMON_OPEN | 1505 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1506 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1507 &port->status); 1508 /* check whether D_ID has changed during open */ 1509 /* 1510 * FIXME: This check is not airtight, as the FCP channel does 1511 * not monitor closures of target port connections caused on 1512 * the remote side. Thus, they might miss out on invalidating 1513 * locally cached WWPNs (and other N_Port parameters) of gone 1514 * target ports. So, our heroic attempt to make things safe 1515 * could be undermined by 'open port' response data tagged with 1516 * obsolete WWPNs. Another reason to monitor potential 1517 * connection closures ourself at least (by interpreting 1518 * incoming ELS' and unsolicited status). It just crosses my 1519 * mind that one should be able to cross-check by means of 1520 * another GID_PN straight after a port has been opened. 1521 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1522 */ 1523 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els; 1524 if (req->qtcb->bottom.support.els1_length >= 1525 FSF_PLOGI_MIN_LEN) 1526 zfcp_fc_plogi_evaluate(port, plogi); 1527 break; 1528 case FSF_UNKNOWN_OP_SUBTYPE: 1529 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1530 break; 1531 } 1532 1533 out: 1534 put_device(&port->dev); 1535 } 1536 1537 /** 1538 * zfcp_fsf_open_port - create and send open port request 1539 * @erp_action: pointer to struct zfcp_erp_action 1540 * Returns: 0 on success, error otherwise 1541 */ 1542 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1543 { 1544 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1545 struct zfcp_port *port = erp_action->port; 1546 struct zfcp_fsf_req *req; 1547 int retval = -EIO; 1548 1549 spin_lock_irq(&qdio->req_q_lock); 1550 if (zfcp_qdio_sbal_get(qdio)) 1551 goto out; 1552 1553 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1554 SBAL_SFLAGS0_TYPE_READ, 1555 qdio->adapter->pool.erp_req); 1556 1557 if (IS_ERR(req)) { 1558 retval = PTR_ERR(req); 1559 goto out; 1560 } 1561 1562 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1563 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1564 1565 req->handler = zfcp_fsf_open_port_handler; 1566 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1567 req->data = port; 1568 req->erp_action = erp_action; 1569 erp_action->fsf_req_id = req->req_id; 1570 get_device(&port->dev); 1571 1572 zfcp_fsf_start_erp_timer(req); 1573 retval = zfcp_fsf_req_send(req); 1574 if (retval) { 1575 zfcp_fsf_req_free(req); 1576 erp_action->fsf_req_id = 0; 1577 put_device(&port->dev); 1578 } 1579 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1580 out: 1581 spin_unlock_irq(&qdio->req_q_lock); 1582 return retval; 1583 } 1584 1585 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) 1586 { 1587 struct zfcp_port *port = req->data; 1588 1589 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1590 return; 1591 1592 switch (req->qtcb->header.fsf_status) { 1593 case FSF_PORT_HANDLE_NOT_VALID: 1594 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); 1595 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1596 break; 1597 case FSF_ADAPTER_STATUS_AVAILABLE: 1598 break; 1599 case FSF_GOOD: 1600 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); 1601 break; 1602 } 1603 } 1604 1605 /** 1606 * zfcp_fsf_close_port - create and send close port request 1607 * @erp_action: pointer to struct zfcp_erp_action 1608 * Returns: 0 on success, error otherwise 1609 */ 1610 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1611 { 1612 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1613 struct zfcp_fsf_req *req; 1614 int retval = -EIO; 1615 1616 spin_lock_irq(&qdio->req_q_lock); 1617 if (zfcp_qdio_sbal_get(qdio)) 1618 goto out; 1619 1620 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1621 SBAL_SFLAGS0_TYPE_READ, 1622 qdio->adapter->pool.erp_req); 1623 1624 if (IS_ERR(req)) { 1625 retval = PTR_ERR(req); 1626 goto out; 1627 } 1628 1629 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1630 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1631 1632 req->handler = zfcp_fsf_close_port_handler; 1633 req->data = erp_action->port; 1634 req->erp_action = erp_action; 1635 req->qtcb->header.port_handle = erp_action->port->handle; 1636 erp_action->fsf_req_id = req->req_id; 1637 1638 zfcp_fsf_start_erp_timer(req); 1639 retval = zfcp_fsf_req_send(req); 1640 if (retval) { 1641 zfcp_fsf_req_free(req); 1642 erp_action->fsf_req_id = 0; 1643 } 1644 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1645 out: 1646 spin_unlock_irq(&qdio->req_q_lock); 1647 return retval; 1648 } 1649 1650 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1651 { 1652 struct zfcp_fc_wka_port *wka_port = req->data; 1653 struct fsf_qtcb_header *header = &req->qtcb->header; 1654 1655 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1656 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1657 goto out; 1658 } 1659 1660 switch (header->fsf_status) { 1661 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1662 dev_warn(&req->adapter->ccw_device->dev, 1663 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1664 /* fall through */ 1665 case FSF_ADAPTER_STATUS_AVAILABLE: 1666 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1667 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1668 break; 1669 case FSF_GOOD: 1670 wka_port->handle = header->port_handle; 1671 /* fall through */ 1672 case FSF_PORT_ALREADY_OPEN: 1673 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; 1674 } 1675 out: 1676 wake_up(&wka_port->completion_wq); 1677 } 1678 1679 /** 1680 * zfcp_fsf_open_wka_port - create and send open wka-port request 1681 * @wka_port: pointer to struct zfcp_fc_wka_port 1682 * Returns: 0 on success, error otherwise 1683 */ 1684 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1685 { 1686 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1687 struct zfcp_fsf_req *req; 1688 unsigned long req_id = 0; 1689 int retval = -EIO; 1690 1691 spin_lock_irq(&qdio->req_q_lock); 1692 if (zfcp_qdio_sbal_get(qdio)) 1693 goto out; 1694 1695 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1696 SBAL_SFLAGS0_TYPE_READ, 1697 qdio->adapter->pool.erp_req); 1698 1699 if (IS_ERR(req)) { 1700 retval = PTR_ERR(req); 1701 goto out; 1702 } 1703 1704 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1705 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1706 1707 req->handler = zfcp_fsf_open_wka_port_handler; 1708 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); 1709 req->data = wka_port; 1710 1711 req_id = req->req_id; 1712 1713 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1714 retval = zfcp_fsf_req_send(req); 1715 if (retval) 1716 zfcp_fsf_req_free(req); 1717 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1718 out: 1719 spin_unlock_irq(&qdio->req_q_lock); 1720 if (!retval) 1721 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); 1722 return retval; 1723 } 1724 1725 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1726 { 1727 struct zfcp_fc_wka_port *wka_port = req->data; 1728 1729 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1730 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1731 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); 1732 } 1733 1734 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1735 wake_up(&wka_port->completion_wq); 1736 } 1737 1738 /** 1739 * zfcp_fsf_close_wka_port - create and send close wka port request 1740 * @wka_port: WKA port to open 1741 * Returns: 0 on success, error otherwise 1742 */ 1743 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1744 { 1745 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1746 struct zfcp_fsf_req *req; 1747 unsigned long req_id = 0; 1748 int retval = -EIO; 1749 1750 spin_lock_irq(&qdio->req_q_lock); 1751 if (zfcp_qdio_sbal_get(qdio)) 1752 goto out; 1753 1754 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1755 SBAL_SFLAGS0_TYPE_READ, 1756 qdio->adapter->pool.erp_req); 1757 1758 if (IS_ERR(req)) { 1759 retval = PTR_ERR(req); 1760 goto out; 1761 } 1762 1763 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1764 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1765 1766 req->handler = zfcp_fsf_close_wka_port_handler; 1767 req->data = wka_port; 1768 req->qtcb->header.port_handle = wka_port->handle; 1769 1770 req_id = req->req_id; 1771 1772 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1773 retval = zfcp_fsf_req_send(req); 1774 if (retval) 1775 zfcp_fsf_req_free(req); 1776 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1777 out: 1778 spin_unlock_irq(&qdio->req_q_lock); 1779 if (!retval) 1780 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); 1781 return retval; 1782 } 1783 1784 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) 1785 { 1786 struct zfcp_port *port = req->data; 1787 struct fsf_qtcb_header *header = &req->qtcb->header; 1788 struct scsi_device *sdev; 1789 1790 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1791 return; 1792 1793 switch (header->fsf_status) { 1794 case FSF_PORT_HANDLE_NOT_VALID: 1795 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); 1796 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1797 break; 1798 case FSF_PORT_BOXED: 1799 /* can't use generic zfcp_erp_modify_port_status because 1800 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1801 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1802 shost_for_each_device(sdev, port->adapter->scsi_host) 1803 if (sdev_to_zfcp(sdev)->port == port) 1804 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1805 &sdev_to_zfcp(sdev)->status); 1806 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 1807 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 1808 "fscpph2"); 1809 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1810 break; 1811 case FSF_ADAPTER_STATUS_AVAILABLE: 1812 switch (header->fsf_status_qual.word[0]) { 1813 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1814 /* fall through */ 1815 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1816 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1817 break; 1818 } 1819 break; 1820 case FSF_GOOD: 1821 /* can't use generic zfcp_erp_modify_port_status because 1822 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1823 */ 1824 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1825 shost_for_each_device(sdev, port->adapter->scsi_host) 1826 if (sdev_to_zfcp(sdev)->port == port) 1827 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1828 &sdev_to_zfcp(sdev)->status); 1829 break; 1830 } 1831 } 1832 1833 /** 1834 * zfcp_fsf_close_physical_port - close physical port 1835 * @erp_action: pointer to struct zfcp_erp_action 1836 * Returns: 0 on success 1837 */ 1838 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1839 { 1840 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1841 struct zfcp_fsf_req *req; 1842 int retval = -EIO; 1843 1844 spin_lock_irq(&qdio->req_q_lock); 1845 if (zfcp_qdio_sbal_get(qdio)) 1846 goto out; 1847 1848 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1849 SBAL_SFLAGS0_TYPE_READ, 1850 qdio->adapter->pool.erp_req); 1851 1852 if (IS_ERR(req)) { 1853 retval = PTR_ERR(req); 1854 goto out; 1855 } 1856 1857 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1858 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1859 1860 req->data = erp_action->port; 1861 req->qtcb->header.port_handle = erp_action->port->handle; 1862 req->erp_action = erp_action; 1863 req->handler = zfcp_fsf_close_physical_port_handler; 1864 erp_action->fsf_req_id = req->req_id; 1865 1866 zfcp_fsf_start_erp_timer(req); 1867 retval = zfcp_fsf_req_send(req); 1868 if (retval) { 1869 zfcp_fsf_req_free(req); 1870 erp_action->fsf_req_id = 0; 1871 } 1872 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1873 out: 1874 spin_unlock_irq(&qdio->req_q_lock); 1875 return retval; 1876 } 1877 1878 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) 1879 { 1880 struct zfcp_adapter *adapter = req->adapter; 1881 struct scsi_device *sdev = req->data; 1882 struct zfcp_scsi_dev *zfcp_sdev; 1883 struct fsf_qtcb_header *header = &req->qtcb->header; 1884 union fsf_status_qual *qual = &header->fsf_status_qual; 1885 1886 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1887 return; 1888 1889 zfcp_sdev = sdev_to_zfcp(sdev); 1890 1891 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1892 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1893 &zfcp_sdev->status); 1894 1895 switch (header->fsf_status) { 1896 1897 case FSF_PORT_HANDLE_NOT_VALID: 1898 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); 1899 /* fall through */ 1900 case FSF_LUN_ALREADY_OPEN: 1901 break; 1902 case FSF_PORT_BOXED: 1903 zfcp_erp_set_port_status(zfcp_sdev->port, 1904 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1905 zfcp_erp_port_reopen(zfcp_sdev->port, 1906 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); 1907 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1908 break; 1909 case FSF_LUN_SHARING_VIOLATION: 1910 if (qual->word[0]) 1911 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, 1912 "LUN 0x%016Lx on port 0x%016Lx is already in " 1913 "use by CSS%d, MIF Image ID %x\n", 1914 zfcp_scsi_dev_lun(sdev), 1915 (unsigned long long)zfcp_sdev->port->wwpn, 1916 qual->fsf_queue_designator.cssid, 1917 qual->fsf_queue_designator.hla); 1918 zfcp_erp_set_lun_status(sdev, 1919 ZFCP_STATUS_COMMON_ERP_FAILED | 1920 ZFCP_STATUS_COMMON_ACCESS_DENIED); 1921 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1922 break; 1923 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1924 dev_warn(&adapter->ccw_device->dev, 1925 "No handle is available for LUN " 1926 "0x%016Lx on port 0x%016Lx\n", 1927 (unsigned long long)zfcp_scsi_dev_lun(sdev), 1928 (unsigned long long)zfcp_sdev->port->wwpn); 1929 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 1930 /* fall through */ 1931 case FSF_INVALID_COMMAND_OPTION: 1932 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1933 break; 1934 case FSF_ADAPTER_STATUS_AVAILABLE: 1935 switch (header->fsf_status_qual.word[0]) { 1936 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1937 zfcp_fc_test_link(zfcp_sdev->port); 1938 /* fall through */ 1939 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1940 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1941 break; 1942 } 1943 break; 1944 1945 case FSF_GOOD: 1946 zfcp_sdev->lun_handle = header->lun_handle; 1947 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1948 break; 1949 } 1950 } 1951 1952 /** 1953 * zfcp_fsf_open_lun - open LUN 1954 * @erp_action: pointer to struct zfcp_erp_action 1955 * Returns: 0 on success, error otherwise 1956 */ 1957 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) 1958 { 1959 struct zfcp_adapter *adapter = erp_action->adapter; 1960 struct zfcp_qdio *qdio = adapter->qdio; 1961 struct zfcp_fsf_req *req; 1962 int retval = -EIO; 1963 1964 spin_lock_irq(&qdio->req_q_lock); 1965 if (zfcp_qdio_sbal_get(qdio)) 1966 goto out; 1967 1968 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1969 SBAL_SFLAGS0_TYPE_READ, 1970 adapter->pool.erp_req); 1971 1972 if (IS_ERR(req)) { 1973 retval = PTR_ERR(req); 1974 goto out; 1975 } 1976 1977 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1978 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1979 1980 req->qtcb->header.port_handle = erp_action->port->handle; 1981 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); 1982 req->handler = zfcp_fsf_open_lun_handler; 1983 req->data = erp_action->sdev; 1984 req->erp_action = erp_action; 1985 erp_action->fsf_req_id = req->req_id; 1986 1987 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1988 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1989 1990 zfcp_fsf_start_erp_timer(req); 1991 retval = zfcp_fsf_req_send(req); 1992 if (retval) { 1993 zfcp_fsf_req_free(req); 1994 erp_action->fsf_req_id = 0; 1995 } 1996 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1997 out: 1998 spin_unlock_irq(&qdio->req_q_lock); 1999 return retval; 2000 } 2001 2002 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 2003 { 2004 struct scsi_device *sdev = req->data; 2005 struct zfcp_scsi_dev *zfcp_sdev; 2006 2007 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2008 return; 2009 2010 zfcp_sdev = sdev_to_zfcp(sdev); 2011 2012 switch (req->qtcb->header.fsf_status) { 2013 case FSF_PORT_HANDLE_NOT_VALID: 2014 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 2015 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2016 break; 2017 case FSF_LUN_HANDLE_NOT_VALID: 2018 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); 2019 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2020 break; 2021 case FSF_PORT_BOXED: 2022 zfcp_erp_set_port_status(zfcp_sdev->port, 2023 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2024 zfcp_erp_port_reopen(zfcp_sdev->port, 2025 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); 2026 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2027 break; 2028 case FSF_ADAPTER_STATUS_AVAILABLE: 2029 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2030 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2031 zfcp_fc_test_link(zfcp_sdev->port); 2032 /* fall through */ 2033 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2034 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2035 break; 2036 } 2037 break; 2038 case FSF_GOOD: 2039 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 2040 break; 2041 } 2042 } 2043 2044 /** 2045 * zfcp_fsf_close_LUN - close LUN 2046 * @erp_action: pointer to erp_action triggering the "close LUN" 2047 * Returns: 0 on success, error otherwise 2048 */ 2049 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) 2050 { 2051 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 2052 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 2053 struct zfcp_fsf_req *req; 2054 int retval = -EIO; 2055 2056 spin_lock_irq(&qdio->req_q_lock); 2057 if (zfcp_qdio_sbal_get(qdio)) 2058 goto out; 2059 2060 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 2061 SBAL_SFLAGS0_TYPE_READ, 2062 qdio->adapter->pool.erp_req); 2063 2064 if (IS_ERR(req)) { 2065 retval = PTR_ERR(req); 2066 goto out; 2067 } 2068 2069 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2070 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2071 2072 req->qtcb->header.port_handle = erp_action->port->handle; 2073 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2074 req->handler = zfcp_fsf_close_lun_handler; 2075 req->data = erp_action->sdev; 2076 req->erp_action = erp_action; 2077 erp_action->fsf_req_id = req->req_id; 2078 2079 zfcp_fsf_start_erp_timer(req); 2080 retval = zfcp_fsf_req_send(req); 2081 if (retval) { 2082 zfcp_fsf_req_free(req); 2083 erp_action->fsf_req_id = 0; 2084 } 2085 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2086 out: 2087 spin_unlock_irq(&qdio->req_q_lock); 2088 return retval; 2089 } 2090 2091 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat) 2092 { 2093 lat_rec->sum += lat; 2094 lat_rec->min = min(lat_rec->min, lat); 2095 lat_rec->max = max(lat_rec->max, lat); 2096 } 2097 2098 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) 2099 { 2100 struct fsf_qual_latency_info *lat_in; 2101 struct zfcp_latency_cont *lat = NULL; 2102 struct zfcp_scsi_dev *zfcp_sdev; 2103 struct zfcp_blk_drv_data blktrc; 2104 int ticks = req->adapter->timer_ticks; 2105 2106 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; 2107 2108 blktrc.flags = 0; 2109 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2110 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2111 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2112 blktrc.inb_usage = 0; 2113 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2114 2115 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2116 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2117 zfcp_sdev = sdev_to_zfcp(scsi->device); 2118 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2119 blktrc.channel_lat = lat_in->channel_lat * ticks; 2120 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2121 2122 switch (req->qtcb->bottom.io.data_direction) { 2123 case FSF_DATADIR_DIF_READ_STRIP: 2124 case FSF_DATADIR_DIF_READ_CONVERT: 2125 case FSF_DATADIR_READ: 2126 lat = &zfcp_sdev->latencies.read; 2127 break; 2128 case FSF_DATADIR_DIF_WRITE_INSERT: 2129 case FSF_DATADIR_DIF_WRITE_CONVERT: 2130 case FSF_DATADIR_WRITE: 2131 lat = &zfcp_sdev->latencies.write; 2132 break; 2133 case FSF_DATADIR_CMND: 2134 lat = &zfcp_sdev->latencies.cmd; 2135 break; 2136 } 2137 2138 if (lat) { 2139 spin_lock(&zfcp_sdev->latencies.lock); 2140 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 2141 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 2142 lat->counter++; 2143 spin_unlock(&zfcp_sdev->latencies.lock); 2144 } 2145 } 2146 2147 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, 2148 sizeof(blktrc)); 2149 } 2150 2151 /** 2152 * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF. 2153 * @req: Pointer to FSF request. 2154 * @sdev: Pointer to SCSI device as request context. 2155 */ 2156 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req, 2157 struct scsi_device *sdev) 2158 { 2159 struct zfcp_scsi_dev *zfcp_sdev; 2160 struct fsf_qtcb_header *header = &req->qtcb->header; 2161 2162 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2163 return; 2164 2165 zfcp_sdev = sdev_to_zfcp(sdev); 2166 2167 switch (header->fsf_status) { 2168 case FSF_HANDLE_MISMATCH: 2169 case FSF_PORT_HANDLE_NOT_VALID: 2170 zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1"); 2171 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2172 break; 2173 case FSF_FCPLUN_NOT_VALID: 2174 case FSF_LUN_HANDLE_NOT_VALID: 2175 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); 2176 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2177 break; 2178 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2179 zfcp_fsf_class_not_supp(req); 2180 break; 2181 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2182 dev_err(&req->adapter->ccw_device->dev, 2183 "Incorrect direction %d, LUN 0x%016Lx on port " 2184 "0x%016Lx closed\n", 2185 req->qtcb->bottom.io.data_direction, 2186 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2187 (unsigned long long)zfcp_sdev->port->wwpn); 2188 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3"); 2189 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2190 break; 2191 case FSF_CMND_LENGTH_NOT_VALID: 2192 dev_err(&req->adapter->ccw_device->dev, 2193 "Incorrect FCP_CMND length %d, FCP device closed\n", 2194 req->qtcb->bottom.io.fcp_cmnd_length); 2195 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); 2196 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2197 break; 2198 case FSF_PORT_BOXED: 2199 zfcp_erp_set_port_status(zfcp_sdev->port, 2200 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2201 zfcp_erp_port_reopen(zfcp_sdev->port, 2202 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); 2203 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2204 break; 2205 case FSF_LUN_BOXED: 2206 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2207 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 2208 "fssfch6"); 2209 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2210 break; 2211 case FSF_ADAPTER_STATUS_AVAILABLE: 2212 if (header->fsf_status_qual.word[0] == 2213 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2214 zfcp_fc_test_link(zfcp_sdev->port); 2215 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2216 break; 2217 } 2218 } 2219 2220 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) 2221 { 2222 struct scsi_cmnd *scpnt; 2223 struct fcp_resp_with_ext *fcp_rsp; 2224 unsigned long flags; 2225 2226 read_lock_irqsave(&req->adapter->abort_lock, flags); 2227 2228 scpnt = req->data; 2229 if (unlikely(!scpnt)) { 2230 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2231 return; 2232 } 2233 2234 zfcp_fsf_fcp_handler_common(req, scpnt->device); 2235 2236 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2237 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2238 goto skip_fsfstatus; 2239 } 2240 2241 switch (req->qtcb->header.fsf_status) { 2242 case FSF_INCONSISTENT_PROT_DATA: 2243 case FSF_INVALID_PROT_PARM: 2244 set_host_byte(scpnt, DID_ERROR); 2245 goto skip_fsfstatus; 2246 case FSF_BLOCK_GUARD_CHECK_FAILURE: 2247 zfcp_scsi_dif_sense_error(scpnt, 0x1); 2248 goto skip_fsfstatus; 2249 case FSF_APP_TAG_CHECK_FAILURE: 2250 zfcp_scsi_dif_sense_error(scpnt, 0x2); 2251 goto skip_fsfstatus; 2252 case FSF_REF_TAG_CHECK_FAILURE: 2253 zfcp_scsi_dif_sense_error(scpnt, 0x3); 2254 goto skip_fsfstatus; 2255 } 2256 BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE); 2257 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2258 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2259 2260 skip_fsfstatus: 2261 zfcp_fsf_req_trace(req, scpnt); 2262 zfcp_dbf_scsi_result(scpnt, req); 2263 2264 scpnt->host_scribble = NULL; 2265 (scpnt->scsi_done) (scpnt); 2266 /* 2267 * We must hold this lock until scsi_done has been called. 2268 * Otherwise we may call scsi_done after abort regarding this 2269 * command has completed. 2270 * Note: scsi_done must not block! 2271 */ 2272 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2273 } 2274 2275 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2276 { 2277 switch (scsi_get_prot_op(scsi_cmnd)) { 2278 case SCSI_PROT_NORMAL: 2279 switch (scsi_cmnd->sc_data_direction) { 2280 case DMA_NONE: 2281 *data_dir = FSF_DATADIR_CMND; 2282 break; 2283 case DMA_FROM_DEVICE: 2284 *data_dir = FSF_DATADIR_READ; 2285 break; 2286 case DMA_TO_DEVICE: 2287 *data_dir = FSF_DATADIR_WRITE; 2288 break; 2289 case DMA_BIDIRECTIONAL: 2290 return -EINVAL; 2291 } 2292 break; 2293 2294 case SCSI_PROT_READ_STRIP: 2295 *data_dir = FSF_DATADIR_DIF_READ_STRIP; 2296 break; 2297 case SCSI_PROT_WRITE_INSERT: 2298 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; 2299 break; 2300 case SCSI_PROT_READ_PASS: 2301 *data_dir = FSF_DATADIR_DIF_READ_CONVERT; 2302 break; 2303 case SCSI_PROT_WRITE_PASS: 2304 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; 2305 break; 2306 default: 2307 return -EINVAL; 2308 } 2309 2310 return 0; 2311 } 2312 2313 /** 2314 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) 2315 * @scsi_cmnd: scsi command to be sent 2316 */ 2317 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) 2318 { 2319 struct zfcp_fsf_req *req; 2320 struct fcp_cmnd *fcp_cmnd; 2321 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2322 int retval = -EIO; 2323 struct scsi_device *sdev = scsi_cmnd->device; 2324 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2325 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2326 struct zfcp_qdio *qdio = adapter->qdio; 2327 struct fsf_qtcb_bottom_io *io; 2328 unsigned long flags; 2329 2330 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2331 ZFCP_STATUS_COMMON_UNBLOCKED))) 2332 return -EBUSY; 2333 2334 spin_lock_irqsave(&qdio->req_q_lock, flags); 2335 if (atomic_read(&qdio->req_q_free) <= 0) { 2336 atomic_inc(&qdio->req_q_full); 2337 goto out; 2338 } 2339 2340 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2341 sbtype = SBAL_SFLAGS0_TYPE_WRITE; 2342 2343 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2344 sbtype, adapter->pool.scsi_req); 2345 2346 if (IS_ERR(req)) { 2347 retval = PTR_ERR(req); 2348 goto out; 2349 } 2350 2351 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2352 2353 io = &req->qtcb->bottom.io; 2354 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2355 req->data = scsi_cmnd; 2356 req->handler = zfcp_fsf_fcp_cmnd_handler; 2357 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2358 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2359 io->service_class = FSF_CLASS_3; 2360 io->fcp_cmnd_length = FCP_CMND_LEN; 2361 2362 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { 2363 io->data_block_length = scsi_cmnd->device->sector_size; 2364 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2365 } 2366 2367 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) 2368 goto failed_scsi_cmnd; 2369 2370 BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); 2371 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2372 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2373 2374 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && 2375 scsi_prot_sg_count(scsi_cmnd)) { 2376 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2377 scsi_prot_sg_count(scsi_cmnd)); 2378 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2379 scsi_prot_sglist(scsi_cmnd)); 2380 if (retval) 2381 goto failed_scsi_cmnd; 2382 io->prot_data_length = zfcp_qdio_real_bytes( 2383 scsi_prot_sglist(scsi_cmnd)); 2384 } 2385 2386 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2387 scsi_sglist(scsi_cmnd)); 2388 if (unlikely(retval)) 2389 goto failed_scsi_cmnd; 2390 2391 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2392 if (zfcp_adapter_multi_buffer_active(adapter)) 2393 zfcp_qdio_set_scount(qdio, &req->qdio_req); 2394 2395 retval = zfcp_fsf_req_send(req); 2396 if (unlikely(retval)) 2397 goto failed_scsi_cmnd; 2398 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2399 2400 goto out; 2401 2402 failed_scsi_cmnd: 2403 zfcp_fsf_req_free(req); 2404 scsi_cmnd->host_scribble = NULL; 2405 out: 2406 spin_unlock_irqrestore(&qdio->req_q_lock, flags); 2407 return retval; 2408 } 2409 2410 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) 2411 { 2412 struct scsi_device *sdev = req->data; 2413 struct fcp_resp_with_ext *fcp_rsp; 2414 struct fcp_resp_rsp_info *rsp_info; 2415 2416 zfcp_fsf_fcp_handler_common(req, sdev); 2417 2418 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2419 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 2420 2421 if ((rsp_info->rsp_code != FCP_TMF_CMPL) || 2422 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2423 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2424 } 2425 2426 /** 2427 * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF). 2428 * @sdev: Pointer to SCSI device to send the task management command to. 2429 * @tm_flags: Unsigned byte for task management flags. 2430 * 2431 * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise. 2432 */ 2433 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, 2434 u8 tm_flags) 2435 { 2436 struct zfcp_fsf_req *req = NULL; 2437 struct fcp_cmnd *fcp_cmnd; 2438 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2439 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 2440 2441 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2442 ZFCP_STATUS_COMMON_UNBLOCKED))) 2443 return NULL; 2444 2445 spin_lock_irq(&qdio->req_q_lock); 2446 if (zfcp_qdio_sbal_get(qdio)) 2447 goto out; 2448 2449 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2450 SBAL_SFLAGS0_TYPE_WRITE, 2451 qdio->adapter->pool.scsi_req); 2452 2453 if (IS_ERR(req)) { 2454 req = NULL; 2455 goto out; 2456 } 2457 2458 req->data = sdev; 2459 2460 req->handler = zfcp_fsf_fcp_task_mgmt_handler; 2461 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2462 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2463 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2464 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2465 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2466 2467 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2468 2469 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2470 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags); 2471 2472 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 2473 if (!zfcp_fsf_req_send(req)) { 2474 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 2475 goto out; 2476 } 2477 2478 zfcp_fsf_req_free(req); 2479 req = NULL; 2480 out: 2481 spin_unlock_irq(&qdio->req_q_lock); 2482 return req; 2483 } 2484 2485 /** 2486 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO 2487 * @qdio: pointer to struct zfcp_qdio 2488 * @sbal_idx: response queue index of SBAL to be processed 2489 */ 2490 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2491 { 2492 struct zfcp_adapter *adapter = qdio->adapter; 2493 struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; 2494 struct qdio_buffer_element *sbale; 2495 struct zfcp_fsf_req *fsf_req; 2496 unsigned long req_id; 2497 int idx; 2498 2499 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2500 2501 sbale = &sbal->element[idx]; 2502 req_id = (unsigned long) sbale->addr; 2503 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2504 2505 if (!fsf_req) { 2506 /* 2507 * Unknown request means that we have potentially memory 2508 * corruption and must stop the machine immediately. 2509 */ 2510 zfcp_qdio_siosl(adapter); 2511 panic("error: unknown req_id (%lx) on adapter %s.\n", 2512 req_id, dev_name(&adapter->ccw_device->dev)); 2513 } 2514 2515 zfcp_fsf_req_complete(fsf_req); 2516 2517 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) 2518 break; 2519 } 2520 } 2521