1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * zfcp device driver 4 * 5 * Implementation of FSF commands. 6 * 7 * Copyright IBM Corp. 2002, 2018 8 */ 9 10 #define KMSG_COMPONENT "zfcp" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/blktrace_api.h> 14 #include <linux/types.h> 15 #include <linux/slab.h> 16 #include <scsi/fc/fc_els.h> 17 #include "zfcp_ext.h" 18 #include "zfcp_fc.h" 19 #include "zfcp_dbf.h" 20 #include "zfcp_qdio.h" 21 #include "zfcp_reqlist.h" 22 23 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */ 24 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ) 25 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */ 26 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ) 27 28 struct kmem_cache *zfcp_fsf_qtcb_cache; 29 30 static bool ber_stop = true; 31 module_param(ber_stop, bool, 0600); 32 MODULE_PARM_DESC(ber_stop, 33 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)"); 34 35 static void zfcp_fsf_request_timeout_handler(struct timer_list *t) 36 { 37 struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer); 38 struct zfcp_adapter *adapter = fsf_req->adapter; 39 40 zfcp_qdio_siosl(adapter); 41 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 42 "fsrth_1"); 43 } 44 45 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 46 unsigned long timeout) 47 { 48 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 49 fsf_req->timer.expires = jiffies + timeout; 50 add_timer(&fsf_req->timer); 51 } 52 53 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) 54 { 55 BUG_ON(!fsf_req->erp_action); 56 fsf_req->timer.function = zfcp_erp_timeout_handler; 57 fsf_req->timer.expires = jiffies + 30 * HZ; 58 add_timer(&fsf_req->timer); 59 } 60 61 /* association between FSF command and FSF QTCB type */ 62 static u32 fsf_qtcb_type[] = { 63 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND, 64 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND, 65 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND, 66 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND, 67 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND, 68 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND, 69 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND, 70 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND, 71 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND, 72 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND, 73 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND, 74 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND, 75 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 76 }; 77 78 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 79 { 80 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 81 "operational because of an unsupported FC class\n"); 82 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); 83 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 84 } 85 86 /** 87 * zfcp_fsf_req_free - free memory used by fsf request 88 * @req: pointer to struct zfcp_fsf_req 89 */ 90 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 91 { 92 if (likely(req->pool)) { 93 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 94 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); 95 mempool_free(req, req->pool); 96 return; 97 } 98 99 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) 100 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); 101 kfree(req); 102 } 103 104 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 105 { 106 unsigned long flags; 107 struct fsf_status_read_buffer *sr_buf = req->data; 108 struct zfcp_adapter *adapter = req->adapter; 109 struct zfcp_port *port; 110 int d_id = ntoh24(sr_buf->d_id); 111 112 read_lock_irqsave(&adapter->port_list_lock, flags); 113 list_for_each_entry(port, &adapter->port_list, list) 114 if (port->d_id == d_id) { 115 zfcp_erp_port_reopen(port, 0, "fssrpc1"); 116 break; 117 } 118 read_unlock_irqrestore(&adapter->port_list_lock, flags); 119 } 120 121 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, 122 struct fsf_link_down_info *link_down) 123 { 124 struct zfcp_adapter *adapter = req->adapter; 125 126 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 127 return; 128 129 atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 130 131 zfcp_scsi_schedule_rports_block(adapter); 132 133 if (!link_down) 134 goto out; 135 136 switch (link_down->error_code) { 137 case FSF_PSQ_LINK_NO_LIGHT: 138 dev_warn(&req->adapter->ccw_device->dev, 139 "There is no light signal from the local " 140 "fibre channel cable\n"); 141 break; 142 case FSF_PSQ_LINK_WRAP_PLUG: 143 dev_warn(&req->adapter->ccw_device->dev, 144 "There is a wrap plug instead of a fibre " 145 "channel cable\n"); 146 break; 147 case FSF_PSQ_LINK_NO_FCP: 148 dev_warn(&req->adapter->ccw_device->dev, 149 "The adjacent fibre channel node does not " 150 "support FCP\n"); 151 break; 152 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 153 dev_warn(&req->adapter->ccw_device->dev, 154 "The FCP device is suspended because of a " 155 "firmware update\n"); 156 break; 157 case FSF_PSQ_LINK_INVALID_WWPN: 158 dev_warn(&req->adapter->ccw_device->dev, 159 "The FCP device detected a WWPN that is " 160 "duplicate or not valid\n"); 161 break; 162 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 163 dev_warn(&req->adapter->ccw_device->dev, 164 "The fibre channel fabric does not support NPIV\n"); 165 break; 166 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 167 dev_warn(&req->adapter->ccw_device->dev, 168 "The FCP adapter cannot support more NPIV ports\n"); 169 break; 170 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 171 dev_warn(&req->adapter->ccw_device->dev, 172 "The adjacent switch cannot support " 173 "more NPIV ports\n"); 174 break; 175 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 176 dev_warn(&req->adapter->ccw_device->dev, 177 "The FCP adapter could not log in to the " 178 "fibre channel fabric\n"); 179 break; 180 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 181 dev_warn(&req->adapter->ccw_device->dev, 182 "The WWPN assignment file on the FCP adapter " 183 "has been damaged\n"); 184 break; 185 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 186 dev_warn(&req->adapter->ccw_device->dev, 187 "The mode table on the FCP adapter " 188 "has been damaged\n"); 189 break; 190 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 191 dev_warn(&req->adapter->ccw_device->dev, 192 "All NPIV ports on the FCP adapter have " 193 "been assigned\n"); 194 break; 195 default: 196 dev_warn(&req->adapter->ccw_device->dev, 197 "The link between the FCP adapter and " 198 "the FC fabric is down\n"); 199 } 200 out: 201 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 202 } 203 204 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 205 { 206 struct fsf_status_read_buffer *sr_buf = req->data; 207 struct fsf_link_down_info *ldi = 208 (struct fsf_link_down_info *) &sr_buf->payload; 209 210 switch (sr_buf->status_subtype) { 211 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 212 case FSF_STATUS_READ_SUB_FDISC_FAILED: 213 zfcp_fsf_link_down_info_eval(req, ldi); 214 break; 215 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 216 zfcp_fsf_link_down_info_eval(req, NULL); 217 } 218 } 219 220 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) 221 { 222 struct zfcp_adapter *adapter = req->adapter; 223 struct fsf_status_read_buffer *sr_buf = req->data; 224 225 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 226 zfcp_dbf_hba_fsf_uss("fssrh_1", req); 227 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 228 zfcp_fsf_req_free(req); 229 return; 230 } 231 232 zfcp_dbf_hba_fsf_uss("fssrh_4", req); 233 234 switch (sr_buf->status_type) { 235 case FSF_STATUS_READ_PORT_CLOSED: 236 zfcp_fsf_status_read_port_closed(req); 237 break; 238 case FSF_STATUS_READ_INCOMING_ELS: 239 zfcp_fc_incoming_els(req); 240 break; 241 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 242 break; 243 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 244 zfcp_dbf_hba_bit_err("fssrh_3", req); 245 if (ber_stop) { 246 dev_warn(&adapter->ccw_device->dev, 247 "All paths over this FCP device are disused because of excessive bit errors\n"); 248 zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b"); 249 } else { 250 dev_warn(&adapter->ccw_device->dev, 251 "The error threshold for checksum statistics has been exceeded\n"); 252 } 253 break; 254 case FSF_STATUS_READ_LINK_DOWN: 255 zfcp_fsf_status_read_link_down(req); 256 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); 257 break; 258 case FSF_STATUS_READ_LINK_UP: 259 dev_info(&adapter->ccw_device->dev, 260 "The local link has been restored\n"); 261 /* All ports should be marked as ready to run again */ 262 zfcp_erp_set_adapter_status(adapter, 263 ZFCP_STATUS_COMMON_RUNNING); 264 zfcp_erp_adapter_reopen(adapter, 265 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 266 ZFCP_STATUS_COMMON_ERP_FAILED, 267 "fssrh_2"); 268 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 269 270 break; 271 case FSF_STATUS_READ_NOTIFICATION_LOST: 272 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 273 zfcp_fc_conditional_port_scan(adapter); 274 break; 275 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 276 adapter->adapter_features = sr_buf->payload.word[0]; 277 break; 278 } 279 280 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 281 zfcp_fsf_req_free(req); 282 283 atomic_inc(&adapter->stat_miss); 284 queue_work(adapter->work_queue, &adapter->stat_work); 285 } 286 287 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 288 { 289 switch (req->qtcb->header.fsf_status_qual.word[0]) { 290 case FSF_SQ_FCP_RSP_AVAILABLE: 291 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 292 case FSF_SQ_NO_RETRY_POSSIBLE: 293 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 294 return; 295 case FSF_SQ_COMMAND_ABORTED: 296 break; 297 case FSF_SQ_NO_RECOM: 298 dev_err(&req->adapter->ccw_device->dev, 299 "The FCP adapter reported a problem " 300 "that cannot be recovered\n"); 301 zfcp_qdio_siosl(req->adapter); 302 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); 303 break; 304 } 305 /* all non-return stats set FSFREQ_ERROR*/ 306 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 307 } 308 309 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) 310 { 311 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 312 return; 313 314 switch (req->qtcb->header.fsf_status) { 315 case FSF_UNKNOWN_COMMAND: 316 dev_err(&req->adapter->ccw_device->dev, 317 "The FCP adapter does not recognize the command 0x%x\n", 318 req->qtcb->header.fsf_command); 319 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); 320 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 321 break; 322 case FSF_ADAPTER_STATUS_AVAILABLE: 323 zfcp_fsf_fsfstatus_qual_eval(req); 324 break; 325 } 326 } 327 328 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) 329 { 330 struct zfcp_adapter *adapter = req->adapter; 331 struct fsf_qtcb *qtcb = req->qtcb; 332 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 333 334 zfcp_dbf_hba_fsf_response(req); 335 336 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 337 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 338 return; 339 } 340 341 switch (qtcb->prefix.prot_status) { 342 case FSF_PROT_GOOD: 343 case FSF_PROT_FSF_STATUS_PRESENTED: 344 return; 345 case FSF_PROT_QTCB_VERSION_ERROR: 346 dev_err(&adapter->ccw_device->dev, 347 "QTCB version 0x%x not supported by FCP adapter " 348 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 349 psq->word[0], psq->word[1]); 350 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); 351 break; 352 case FSF_PROT_ERROR_STATE: 353 case FSF_PROT_SEQ_NUMB_ERROR: 354 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); 355 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 356 break; 357 case FSF_PROT_UNSUPP_QTCB_TYPE: 358 dev_err(&adapter->ccw_device->dev, 359 "The QTCB type is not supported by the FCP adapter\n"); 360 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); 361 break; 362 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 363 atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 364 &adapter->status); 365 break; 366 case FSF_PROT_DUPLICATE_REQUEST_ID: 367 dev_err(&adapter->ccw_device->dev, 368 "0x%Lx is an ambiguous request identifier\n", 369 (unsigned long long)qtcb->bottom.support.req_handle); 370 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); 371 break; 372 case FSF_PROT_LINK_DOWN: 373 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); 374 /* go through reopen to flush pending requests */ 375 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); 376 break; 377 case FSF_PROT_REEST_QUEUE: 378 /* All ports should be marked as ready to run again */ 379 zfcp_erp_set_adapter_status(adapter, 380 ZFCP_STATUS_COMMON_RUNNING); 381 zfcp_erp_adapter_reopen(adapter, 382 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 383 ZFCP_STATUS_COMMON_ERP_FAILED, 384 "fspse_8"); 385 break; 386 default: 387 dev_err(&adapter->ccw_device->dev, 388 "0x%x is not a valid transfer protocol status\n", 389 qtcb->prefix.prot_status); 390 zfcp_qdio_siosl(adapter); 391 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); 392 } 393 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 394 } 395 396 /** 397 * zfcp_fsf_req_complete - process completion of a FSF request 398 * @req: The FSF request that has been completed. 399 * 400 * When a request has been completed either from the FCP adapter, 401 * or it has been dismissed due to a queue shutdown, this function 402 * is called to process the completion status and trigger further 403 * events related to the FSF request. 404 */ 405 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 406 { 407 if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) { 408 zfcp_fsf_status_read_handler(req); 409 return; 410 } 411 412 del_timer(&req->timer); 413 zfcp_fsf_protstatus_eval(req); 414 zfcp_fsf_fsfstatus_eval(req); 415 req->handler(req); 416 417 if (req->erp_action) 418 zfcp_erp_notify(req->erp_action, 0); 419 420 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 421 zfcp_fsf_req_free(req); 422 else 423 complete(&req->completion); 424 } 425 426 /** 427 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests 428 * @adapter: pointer to struct zfcp_adapter 429 * 430 * Never ever call this without shutting down the adapter first. 431 * Otherwise the adapter would continue using and corrupting s390 storage. 432 * Included BUG_ON() call to ensure this is done. 433 * ERP is supposed to be the only user of this function. 434 */ 435 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 436 { 437 struct zfcp_fsf_req *req, *tmp; 438 LIST_HEAD(remove_queue); 439 440 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 441 zfcp_reqlist_move(adapter->req_list, &remove_queue); 442 443 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 444 list_del(&req->list); 445 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 446 zfcp_fsf_req_complete(req); 447 } 448 } 449 450 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) 451 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) 452 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) 453 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) 454 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) 455 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) 456 #define ZFCP_FSF_PORTSPEED_32GBIT (1 << 6) 457 #define ZFCP_FSF_PORTSPEED_64GBIT (1 << 7) 458 #define ZFCP_FSF_PORTSPEED_128GBIT (1 << 8) 459 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) 460 461 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) 462 { 463 u32 fdmi_speed = 0; 464 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) 465 fdmi_speed |= FC_PORTSPEED_1GBIT; 466 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) 467 fdmi_speed |= FC_PORTSPEED_2GBIT; 468 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) 469 fdmi_speed |= FC_PORTSPEED_4GBIT; 470 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) 471 fdmi_speed |= FC_PORTSPEED_10GBIT; 472 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) 473 fdmi_speed |= FC_PORTSPEED_8GBIT; 474 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) 475 fdmi_speed |= FC_PORTSPEED_16GBIT; 476 if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT) 477 fdmi_speed |= FC_PORTSPEED_32GBIT; 478 if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT) 479 fdmi_speed |= FC_PORTSPEED_64GBIT; 480 if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT) 481 fdmi_speed |= FC_PORTSPEED_128GBIT; 482 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) 483 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; 484 return fdmi_speed; 485 } 486 487 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 488 { 489 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 490 struct zfcp_adapter *adapter = req->adapter; 491 struct Scsi_Host *shost = adapter->scsi_host; 492 struct fc_els_flogi *nsp, *plogi; 493 494 /* adjust pointers for missing command code */ 495 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param 496 - sizeof(u32)); 497 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload 498 - sizeof(u32)); 499 500 if (req->data) 501 memcpy(req->data, bottom, sizeof(*bottom)); 502 503 fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn); 504 fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn); 505 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 506 507 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; 508 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 509 (u16)FSF_STATUS_READS_RECOM); 510 511 if (fc_host_permanent_port_name(shost) == -1) 512 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 513 514 zfcp_scsi_set_prot(adapter); 515 516 /* no error return above here, otherwise must fix call chains */ 517 /* do not evaluate invalid fields */ 518 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) 519 return 0; 520 521 fc_host_port_id(shost) = ntoh24(bottom->s_id); 522 fc_host_speed(shost) = 523 zfcp_fsf_convert_portspeed(bottom->fc_link_speed); 524 525 adapter->hydra_version = bottom->adapter_type; 526 527 switch (bottom->fc_topology) { 528 case FSF_TOPO_P2P: 529 adapter->peer_d_id = ntoh24(bottom->peer_d_id); 530 adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn); 531 adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn); 532 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 533 break; 534 case FSF_TOPO_FABRIC: 535 if (bottom->connection_features & FSF_FEATURE_NPIV_MODE) 536 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 537 else 538 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 539 break; 540 case FSF_TOPO_AL: 541 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 542 /* fall through */ 543 default: 544 dev_err(&adapter->ccw_device->dev, 545 "Unknown or unsupported arbitrated loop " 546 "fibre channel topology detected\n"); 547 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); 548 return -EIO; 549 } 550 551 return 0; 552 } 553 554 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) 555 { 556 struct zfcp_adapter *adapter = req->adapter; 557 struct fsf_qtcb *qtcb = req->qtcb; 558 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; 559 struct Scsi_Host *shost = adapter->scsi_host; 560 561 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 562 return; 563 564 adapter->fsf_lic_version = bottom->lic_version; 565 adapter->adapter_features = bottom->adapter_features; 566 adapter->connection_features = bottom->connection_features; 567 adapter->peer_wwpn = 0; 568 adapter->peer_wwnn = 0; 569 adapter->peer_d_id = 0; 570 571 switch (qtcb->header.fsf_status) { 572 case FSF_GOOD: 573 if (zfcp_fsf_exchange_config_evaluate(req)) 574 return; 575 576 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 577 dev_err(&adapter->ccw_device->dev, 578 "FCP adapter maximum QTCB size (%d bytes) " 579 "is too small\n", 580 bottom->max_qtcb_size); 581 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); 582 return; 583 } 584 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 585 &adapter->status); 586 break; 587 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 588 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 589 590 fc_host_node_name(shost) = 0; 591 fc_host_port_name(shost) = 0; 592 fc_host_port_id(shost) = 0; 593 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 594 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 595 adapter->hydra_version = 0; 596 597 /* avoids adapter shutdown to be able to recognize 598 * events such as LINK UP */ 599 atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 600 &adapter->status); 601 zfcp_fsf_link_down_info_eval(req, 602 &qtcb->header.fsf_status_qual.link_down_info); 603 if (zfcp_fsf_exchange_config_evaluate(req)) 604 return; 605 break; 606 default: 607 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); 608 return; 609 } 610 611 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { 612 adapter->hardware_version = bottom->hardware_version; 613 memcpy(fc_host_serial_number(shost), bottom->serial_number, 614 min(FC_SERIAL_NUMBER_SIZE, 17)); 615 EBCASC(fc_host_serial_number(shost), 616 min(FC_SERIAL_NUMBER_SIZE, 17)); 617 } 618 619 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 620 dev_err(&adapter->ccw_device->dev, 621 "The FCP adapter only supports newer " 622 "control block versions\n"); 623 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); 624 return; 625 } 626 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 627 dev_err(&adapter->ccw_device->dev, 628 "The FCP adapter only supports older " 629 "control block versions\n"); 630 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); 631 } 632 } 633 634 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) 635 { 636 struct zfcp_adapter *adapter = req->adapter; 637 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; 638 struct Scsi_Host *shost = adapter->scsi_host; 639 640 if (req->data) 641 memcpy(req->data, bottom, sizeof(*bottom)); 642 643 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { 644 fc_host_permanent_port_name(shost) = bottom->wwpn; 645 } else 646 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 647 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 648 fc_host_supported_speeds(shost) = 649 zfcp_fsf_convert_portspeed(bottom->supported_speed); 650 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, 651 FC_FC4_LIST_SIZE); 652 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, 653 FC_FC4_LIST_SIZE); 654 } 655 656 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 657 { 658 struct fsf_qtcb *qtcb = req->qtcb; 659 660 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 661 return; 662 663 switch (qtcb->header.fsf_status) { 664 case FSF_GOOD: 665 zfcp_fsf_exchange_port_evaluate(req); 666 break; 667 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 668 req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE; 669 670 zfcp_fsf_exchange_port_evaluate(req); 671 zfcp_fsf_link_down_info_eval(req, 672 &qtcb->header.fsf_status_qual.link_down_info); 673 break; 674 } 675 } 676 677 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) 678 { 679 struct zfcp_fsf_req *req; 680 681 if (likely(pool)) 682 req = mempool_alloc(pool, GFP_ATOMIC); 683 else 684 req = kmalloc(sizeof(*req), GFP_ATOMIC); 685 686 if (unlikely(!req)) 687 return NULL; 688 689 memset(req, 0, sizeof(*req)); 690 req->pool = pool; 691 return req; 692 } 693 694 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool) 695 { 696 struct fsf_qtcb *qtcb; 697 698 if (likely(pool)) 699 qtcb = mempool_alloc(pool, GFP_ATOMIC); 700 else 701 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); 702 703 if (unlikely(!qtcb)) 704 return NULL; 705 706 memset(qtcb, 0, sizeof(*qtcb)); 707 return qtcb; 708 } 709 710 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 711 u32 fsf_cmd, u8 sbtype, 712 mempool_t *pool) 713 { 714 struct zfcp_adapter *adapter = qdio->adapter; 715 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); 716 717 if (unlikely(!req)) 718 return ERR_PTR(-ENOMEM); 719 720 if (adapter->req_no == 0) 721 adapter->req_no++; 722 723 INIT_LIST_HEAD(&req->list); 724 timer_setup(&req->timer, NULL, 0); 725 init_completion(&req->completion); 726 727 req->adapter = adapter; 728 req->req_id = adapter->req_no; 729 730 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 731 if (likely(pool)) 732 req->qtcb = zfcp_fsf_qtcb_alloc( 733 adapter->pool.qtcb_pool); 734 else 735 req->qtcb = zfcp_fsf_qtcb_alloc(NULL); 736 737 if (unlikely(!req->qtcb)) { 738 zfcp_fsf_req_free(req); 739 return ERR_PTR(-ENOMEM); 740 } 741 742 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 743 req->qtcb->prefix.req_id = req->req_id; 744 req->qtcb->prefix.ulp_info = 26; 745 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd]; 746 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 747 req->qtcb->header.req_handle = req->req_id; 748 req->qtcb->header.fsf_command = fsf_cmd; 749 } 750 751 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 752 req->qtcb, sizeof(struct fsf_qtcb)); 753 754 return req; 755 } 756 757 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 758 { 759 const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); 760 struct zfcp_adapter *adapter = req->adapter; 761 struct zfcp_qdio *qdio = adapter->qdio; 762 int req_id = req->req_id; 763 764 zfcp_reqlist_add(adapter->req_list, req); 765 766 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 767 req->issued = get_tod_clock(); 768 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 769 del_timer(&req->timer); 770 /* lookup request again, list might have changed */ 771 zfcp_reqlist_find_rm(adapter->req_list, req_id); 772 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); 773 return -EIO; 774 } 775 776 /* 777 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT. 778 * ONLY TOUCH SYNC req AGAIN ON req->completion. 779 * 780 * The request might complete and be freed concurrently at any point 781 * now. This is not protected by the QDIO-lock (req_q_lock). So any 782 * uncontrolled access after this might result in an use-after-free bug. 783 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and 784 * when it is completed via req->completion, is it safe to use req 785 * again. 786 */ 787 788 /* Don't increase for unsolicited status */ 789 if (!is_srb) 790 adapter->fsf_req_seq_no++; 791 adapter->req_no++; 792 793 return 0; 794 } 795 796 /** 797 * zfcp_fsf_status_read - send status read request 798 * @qdio: pointer to struct zfcp_qdio 799 * Returns: 0 on success, ERROR otherwise 800 */ 801 int zfcp_fsf_status_read(struct zfcp_qdio *qdio) 802 { 803 struct zfcp_adapter *adapter = qdio->adapter; 804 struct zfcp_fsf_req *req; 805 struct fsf_status_read_buffer *sr_buf; 806 struct page *page; 807 int retval = -EIO; 808 809 spin_lock_irq(&qdio->req_q_lock); 810 if (zfcp_qdio_sbal_get(qdio)) 811 goto out; 812 813 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 814 SBAL_SFLAGS0_TYPE_STATUS, 815 adapter->pool.status_read_req); 816 if (IS_ERR(req)) { 817 retval = PTR_ERR(req); 818 goto out; 819 } 820 821 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); 822 if (!page) { 823 retval = -ENOMEM; 824 goto failed_buf; 825 } 826 sr_buf = page_address(page); 827 memset(sr_buf, 0, sizeof(*sr_buf)); 828 req->data = sr_buf; 829 830 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); 831 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 832 833 retval = zfcp_fsf_req_send(req); 834 if (retval) 835 goto failed_req_send; 836 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 837 838 goto out; 839 840 failed_req_send: 841 req->data = NULL; 842 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 843 failed_buf: 844 zfcp_dbf_hba_fsf_uss("fssr__1", req); 845 zfcp_fsf_req_free(req); 846 out: 847 spin_unlock_irq(&qdio->req_q_lock); 848 return retval; 849 } 850 851 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 852 { 853 struct scsi_device *sdev = req->data; 854 struct zfcp_scsi_dev *zfcp_sdev; 855 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 856 857 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 858 return; 859 860 zfcp_sdev = sdev_to_zfcp(sdev); 861 862 switch (req->qtcb->header.fsf_status) { 863 case FSF_PORT_HANDLE_NOT_VALID: 864 if (fsq->word[0] == fsq->word[1]) { 865 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, 866 "fsafch1"); 867 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 868 } 869 break; 870 case FSF_LUN_HANDLE_NOT_VALID: 871 if (fsq->word[0] == fsq->word[1]) { 872 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); 873 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 874 } 875 break; 876 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 877 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 878 break; 879 case FSF_PORT_BOXED: 880 zfcp_erp_set_port_status(zfcp_sdev->port, 881 ZFCP_STATUS_COMMON_ACCESS_BOXED); 882 zfcp_erp_port_reopen(zfcp_sdev->port, 883 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); 884 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 885 break; 886 case FSF_LUN_BOXED: 887 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 888 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 889 "fsafch4"); 890 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 891 break; 892 case FSF_ADAPTER_STATUS_AVAILABLE: 893 switch (fsq->word[0]) { 894 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 895 zfcp_fc_test_link(zfcp_sdev->port); 896 /* fall through */ 897 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 898 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 899 break; 900 } 901 break; 902 case FSF_GOOD: 903 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; 904 break; 905 } 906 } 907 908 /** 909 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command 910 * @scmnd: The SCSI command to abort 911 * Returns: pointer to struct zfcp_fsf_req 912 */ 913 914 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) 915 { 916 struct zfcp_fsf_req *req = NULL; 917 struct scsi_device *sdev = scmnd->device; 918 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 919 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 920 unsigned long old_req_id = (unsigned long) scmnd->host_scribble; 921 922 spin_lock_irq(&qdio->req_q_lock); 923 if (zfcp_qdio_sbal_get(qdio)) 924 goto out; 925 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 926 SBAL_SFLAGS0_TYPE_READ, 927 qdio->adapter->pool.scsi_abort); 928 if (IS_ERR(req)) { 929 req = NULL; 930 goto out; 931 } 932 933 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 934 ZFCP_STATUS_COMMON_UNBLOCKED))) 935 goto out_error_free; 936 937 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 938 939 req->data = sdev; 940 req->handler = zfcp_fsf_abort_fcp_command_handler; 941 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 942 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 943 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 944 945 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 946 if (!zfcp_fsf_req_send(req)) { 947 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 948 goto out; 949 } 950 951 out_error_free: 952 zfcp_fsf_req_free(req); 953 req = NULL; 954 out: 955 spin_unlock_irq(&qdio->req_q_lock); 956 return req; 957 } 958 959 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 960 { 961 struct zfcp_adapter *adapter = req->adapter; 962 struct zfcp_fsf_ct_els *ct = req->data; 963 struct fsf_qtcb_header *header = &req->qtcb->header; 964 965 ct->status = -EINVAL; 966 967 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 968 goto skip_fsfstatus; 969 970 switch (header->fsf_status) { 971 case FSF_GOOD: 972 ct->status = 0; 973 zfcp_dbf_san_res("fsscth2", req); 974 break; 975 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 976 zfcp_fsf_class_not_supp(req); 977 break; 978 case FSF_ADAPTER_STATUS_AVAILABLE: 979 switch (header->fsf_status_qual.word[0]){ 980 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 981 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 982 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 983 break; 984 } 985 break; 986 case FSF_PORT_BOXED: 987 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 988 break; 989 case FSF_PORT_HANDLE_NOT_VALID: 990 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); 991 /* fall through */ 992 case FSF_GENERIC_COMMAND_REJECTED: 993 case FSF_PAYLOAD_SIZE_MISMATCH: 994 case FSF_REQUEST_SIZE_TOO_LARGE: 995 case FSF_RESPONSE_SIZE_TOO_LARGE: 996 case FSF_SBAL_MISMATCH: 997 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 998 break; 999 } 1000 1001 skip_fsfstatus: 1002 if (ct->handler) 1003 ct->handler(ct->handler_data); 1004 } 1005 1006 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, 1007 struct zfcp_qdio_req *q_req, 1008 struct scatterlist *sg_req, 1009 struct scatterlist *sg_resp) 1010 { 1011 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length); 1012 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length); 1013 zfcp_qdio_set_sbale_last(qdio, q_req); 1014 } 1015 1016 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 1017 struct scatterlist *sg_req, 1018 struct scatterlist *sg_resp) 1019 { 1020 struct zfcp_adapter *adapter = req->adapter; 1021 struct zfcp_qdio *qdio = adapter->qdio; 1022 struct fsf_qtcb *qtcb = req->qtcb; 1023 u32 feat = adapter->adapter_features; 1024 1025 if (zfcp_adapter_multi_buffer_active(adapter)) { 1026 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1027 return -EIO; 1028 qtcb->bottom.support.req_buf_length = 1029 zfcp_qdio_real_bytes(sg_req); 1030 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1031 return -EIO; 1032 qtcb->bottom.support.resp_buf_length = 1033 zfcp_qdio_real_bytes(sg_resp); 1034 1035 zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req)); 1036 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1037 zfcp_qdio_set_scount(qdio, &req->qdio_req); 1038 return 0; 1039 } 1040 1041 /* use single, unchained SBAL if it can hold the request */ 1042 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 1043 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, 1044 sg_req, sg_resp); 1045 return 0; 1046 } 1047 1048 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) 1049 return -EOPNOTSUPP; 1050 1051 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 1052 return -EIO; 1053 1054 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); 1055 1056 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1057 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); 1058 1059 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1060 return -EIO; 1061 1062 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); 1063 1064 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1065 1066 return 0; 1067 } 1068 1069 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1070 struct scatterlist *sg_req, 1071 struct scatterlist *sg_resp, 1072 unsigned int timeout) 1073 { 1074 int ret; 1075 1076 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); 1077 if (ret) 1078 return ret; 1079 1080 /* common settings for ct/gs and els requests */ 1081 if (timeout > 255) 1082 timeout = 255; /* max value accepted by hardware */ 1083 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1084 req->qtcb->bottom.support.timeout = timeout; 1085 zfcp_fsf_start_timer(req, (timeout + 10) * HZ); 1086 1087 return 0; 1088 } 1089 1090 /** 1091 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1092 * @wka_port: pointer to zfcp WKA port to send CT/GS to 1093 * @ct: pointer to struct zfcp_send_ct with data for request 1094 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1095 * @timeout: timeout that hardware should use, and a later software timeout 1096 */ 1097 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1098 struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1099 unsigned int timeout) 1100 { 1101 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1102 struct zfcp_fsf_req *req; 1103 int ret = -EIO; 1104 1105 spin_lock_irq(&qdio->req_q_lock); 1106 if (zfcp_qdio_sbal_get(qdio)) 1107 goto out; 1108 1109 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1110 SBAL_SFLAGS0_TYPE_WRITE_READ, pool); 1111 1112 if (IS_ERR(req)) { 1113 ret = PTR_ERR(req); 1114 goto out; 1115 } 1116 1117 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1118 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); 1119 if (ret) 1120 goto failed_send; 1121 1122 req->handler = zfcp_fsf_send_ct_handler; 1123 req->qtcb->header.port_handle = wka_port->handle; 1124 ct->d_id = wka_port->d_id; 1125 req->data = ct; 1126 1127 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); 1128 1129 ret = zfcp_fsf_req_send(req); 1130 if (ret) 1131 goto failed_send; 1132 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1133 1134 goto out; 1135 1136 failed_send: 1137 zfcp_fsf_req_free(req); 1138 out: 1139 spin_unlock_irq(&qdio->req_q_lock); 1140 return ret; 1141 } 1142 1143 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1144 { 1145 struct zfcp_fsf_ct_els *send_els = req->data; 1146 struct fsf_qtcb_header *header = &req->qtcb->header; 1147 1148 send_els->status = -EINVAL; 1149 1150 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1151 goto skip_fsfstatus; 1152 1153 switch (header->fsf_status) { 1154 case FSF_GOOD: 1155 send_els->status = 0; 1156 zfcp_dbf_san_res("fsselh1", req); 1157 break; 1158 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1159 zfcp_fsf_class_not_supp(req); 1160 break; 1161 case FSF_ADAPTER_STATUS_AVAILABLE: 1162 switch (header->fsf_status_qual.word[0]){ 1163 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1164 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1165 case FSF_SQ_RETRY_IF_POSSIBLE: 1166 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1167 break; 1168 } 1169 break; 1170 case FSF_ELS_COMMAND_REJECTED: 1171 case FSF_PAYLOAD_SIZE_MISMATCH: 1172 case FSF_REQUEST_SIZE_TOO_LARGE: 1173 case FSF_RESPONSE_SIZE_TOO_LARGE: 1174 break; 1175 case FSF_SBAL_MISMATCH: 1176 /* should never occur, avoided in zfcp_fsf_send_els */ 1177 /* fall through */ 1178 default: 1179 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1180 break; 1181 } 1182 skip_fsfstatus: 1183 if (send_els->handler) 1184 send_els->handler(send_els->handler_data); 1185 } 1186 1187 /** 1188 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1189 * @adapter: pointer to zfcp adapter 1190 * @d_id: N_Port_ID to send ELS to 1191 * @els: pointer to struct zfcp_send_els with data for the command 1192 * @timeout: timeout that hardware should use, and a later software timeout 1193 */ 1194 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1195 struct zfcp_fsf_ct_els *els, unsigned int timeout) 1196 { 1197 struct zfcp_fsf_req *req; 1198 struct zfcp_qdio *qdio = adapter->qdio; 1199 int ret = -EIO; 1200 1201 spin_lock_irq(&qdio->req_q_lock); 1202 if (zfcp_qdio_sbal_get(qdio)) 1203 goto out; 1204 1205 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1206 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL); 1207 1208 if (IS_ERR(req)) { 1209 ret = PTR_ERR(req); 1210 goto out; 1211 } 1212 1213 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1214 1215 if (!zfcp_adapter_multi_buffer_active(adapter)) 1216 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); 1217 1218 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1219 1220 if (ret) 1221 goto failed_send; 1222 1223 hton24(req->qtcb->bottom.support.d_id, d_id); 1224 req->handler = zfcp_fsf_send_els_handler; 1225 els->d_id = d_id; 1226 req->data = els; 1227 1228 zfcp_dbf_san_req("fssels1", req, d_id); 1229 1230 ret = zfcp_fsf_req_send(req); 1231 if (ret) 1232 goto failed_send; 1233 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1234 1235 goto out; 1236 1237 failed_send: 1238 zfcp_fsf_req_free(req); 1239 out: 1240 spin_unlock_irq(&qdio->req_q_lock); 1241 return ret; 1242 } 1243 1244 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1245 { 1246 struct zfcp_fsf_req *req; 1247 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1248 int retval = -EIO; 1249 1250 spin_lock_irq(&qdio->req_q_lock); 1251 if (zfcp_qdio_sbal_get(qdio)) 1252 goto out; 1253 1254 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1255 SBAL_SFLAGS0_TYPE_READ, 1256 qdio->adapter->pool.erp_req); 1257 1258 if (IS_ERR(req)) { 1259 retval = PTR_ERR(req); 1260 goto out; 1261 } 1262 1263 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1264 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1265 1266 req->qtcb->bottom.config.feature_selection = 1267 FSF_FEATURE_NOTIFICATION_LOST | 1268 FSF_FEATURE_UPDATE_ALERT; 1269 req->erp_action = erp_action; 1270 req->handler = zfcp_fsf_exchange_config_data_handler; 1271 erp_action->fsf_req_id = req->req_id; 1272 1273 zfcp_fsf_start_erp_timer(req); 1274 retval = zfcp_fsf_req_send(req); 1275 if (retval) { 1276 zfcp_fsf_req_free(req); 1277 erp_action->fsf_req_id = 0; 1278 } 1279 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1280 out: 1281 spin_unlock_irq(&qdio->req_q_lock); 1282 return retval; 1283 } 1284 1285 1286 /** 1287 * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel. 1288 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1289 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1290 * might be %NULL. 1291 * 1292 * Returns: 1293 * * 0 - Exchange Config Data was successful, @data is complete 1294 * * -EIO - Exchange Config Data was not successful, @data is invalid 1295 * * -EAGAIN - @data contains incomplete data 1296 * * -ENOMEM - Some memory allocation failed along the way 1297 */ 1298 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, 1299 struct fsf_qtcb_bottom_config *data) 1300 { 1301 struct zfcp_fsf_req *req = NULL; 1302 int retval = -EIO; 1303 1304 spin_lock_irq(&qdio->req_q_lock); 1305 if (zfcp_qdio_sbal_get(qdio)) 1306 goto out_unlock; 1307 1308 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1309 SBAL_SFLAGS0_TYPE_READ, NULL); 1310 1311 if (IS_ERR(req)) { 1312 retval = PTR_ERR(req); 1313 goto out_unlock; 1314 } 1315 1316 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1317 req->handler = zfcp_fsf_exchange_config_data_handler; 1318 1319 req->qtcb->bottom.config.feature_selection = 1320 FSF_FEATURE_NOTIFICATION_LOST | 1321 FSF_FEATURE_UPDATE_ALERT; 1322 1323 if (data) 1324 req->data = data; 1325 1326 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1327 retval = zfcp_fsf_req_send(req); 1328 spin_unlock_irq(&qdio->req_q_lock); 1329 1330 if (!retval) { 1331 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1332 wait_for_completion(&req->completion); 1333 1334 if (req->status & 1335 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1336 retval = -EIO; 1337 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1338 retval = -EAGAIN; 1339 } 1340 1341 zfcp_fsf_req_free(req); 1342 return retval; 1343 1344 out_unlock: 1345 spin_unlock_irq(&qdio->req_q_lock); 1346 return retval; 1347 } 1348 1349 /** 1350 * zfcp_fsf_exchange_port_data - request information about local port 1351 * @erp_action: ERP action for the adapter for which port data is requested 1352 * Returns: 0 on success, error otherwise 1353 */ 1354 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1355 { 1356 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1357 struct zfcp_fsf_req *req; 1358 int retval = -EIO; 1359 1360 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1361 return -EOPNOTSUPP; 1362 1363 spin_lock_irq(&qdio->req_q_lock); 1364 if (zfcp_qdio_sbal_get(qdio)) 1365 goto out; 1366 1367 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1368 SBAL_SFLAGS0_TYPE_READ, 1369 qdio->adapter->pool.erp_req); 1370 1371 if (IS_ERR(req)) { 1372 retval = PTR_ERR(req); 1373 goto out; 1374 } 1375 1376 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1377 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1378 1379 req->handler = zfcp_fsf_exchange_port_data_handler; 1380 req->erp_action = erp_action; 1381 erp_action->fsf_req_id = req->req_id; 1382 1383 zfcp_fsf_start_erp_timer(req); 1384 retval = zfcp_fsf_req_send(req); 1385 if (retval) { 1386 zfcp_fsf_req_free(req); 1387 erp_action->fsf_req_id = 0; 1388 } 1389 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1390 out: 1391 spin_unlock_irq(&qdio->req_q_lock); 1392 return retval; 1393 } 1394 1395 /** 1396 * zfcp_fsf_exchange_port_data_sync() - Request information about local port. 1397 * @qdio: pointer to the QDIO-Queue to use for sending the command. 1398 * @data: pointer to the QTCB-Bottom for storing the result of the command, 1399 * might be %NULL. 1400 * 1401 * Returns: 1402 * * 0 - Exchange Port Data was successful, @data is complete 1403 * * -EIO - Exchange Port Data was not successful, @data is invalid 1404 * * -EAGAIN - @data contains incomplete data 1405 * * -ENOMEM - Some memory allocation failed along the way 1406 * * -EOPNOTSUPP - This operation is not supported 1407 */ 1408 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, 1409 struct fsf_qtcb_bottom_port *data) 1410 { 1411 struct zfcp_fsf_req *req = NULL; 1412 int retval = -EIO; 1413 1414 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1415 return -EOPNOTSUPP; 1416 1417 spin_lock_irq(&qdio->req_q_lock); 1418 if (zfcp_qdio_sbal_get(qdio)) 1419 goto out_unlock; 1420 1421 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1422 SBAL_SFLAGS0_TYPE_READ, NULL); 1423 1424 if (IS_ERR(req)) { 1425 retval = PTR_ERR(req); 1426 goto out_unlock; 1427 } 1428 1429 if (data) 1430 req->data = data; 1431 1432 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1433 1434 req->handler = zfcp_fsf_exchange_port_data_handler; 1435 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1436 retval = zfcp_fsf_req_send(req); 1437 spin_unlock_irq(&qdio->req_q_lock); 1438 1439 if (!retval) { 1440 /* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */ 1441 wait_for_completion(&req->completion); 1442 1443 if (req->status & 1444 (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED)) 1445 retval = -EIO; 1446 else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE) 1447 retval = -EAGAIN; 1448 } 1449 1450 zfcp_fsf_req_free(req); 1451 return retval; 1452 1453 out_unlock: 1454 spin_unlock_irq(&qdio->req_q_lock); 1455 return retval; 1456 } 1457 1458 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) 1459 { 1460 struct zfcp_port *port = req->data; 1461 struct fsf_qtcb_header *header = &req->qtcb->header; 1462 struct fc_els_flogi *plogi; 1463 1464 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1465 goto out; 1466 1467 switch (header->fsf_status) { 1468 case FSF_PORT_ALREADY_OPEN: 1469 break; 1470 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1471 dev_warn(&req->adapter->ccw_device->dev, 1472 "Not enough FCP adapter resources to open " 1473 "remote port 0x%016Lx\n", 1474 (unsigned long long)port->wwpn); 1475 zfcp_erp_set_port_status(port, 1476 ZFCP_STATUS_COMMON_ERP_FAILED); 1477 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1478 break; 1479 case FSF_ADAPTER_STATUS_AVAILABLE: 1480 switch (header->fsf_status_qual.word[0]) { 1481 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1482 /* no zfcp_fc_test_link() with failed open port */ 1483 /* fall through */ 1484 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1485 case FSF_SQ_NO_RETRY_POSSIBLE: 1486 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1487 break; 1488 } 1489 break; 1490 case FSF_GOOD: 1491 port->handle = header->port_handle; 1492 atomic_or(ZFCP_STATUS_COMMON_OPEN | 1493 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1494 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED, 1495 &port->status); 1496 /* check whether D_ID has changed during open */ 1497 /* 1498 * FIXME: This check is not airtight, as the FCP channel does 1499 * not monitor closures of target port connections caused on 1500 * the remote side. Thus, they might miss out on invalidating 1501 * locally cached WWPNs (and other N_Port parameters) of gone 1502 * target ports. So, our heroic attempt to make things safe 1503 * could be undermined by 'open port' response data tagged with 1504 * obsolete WWPNs. Another reason to monitor potential 1505 * connection closures ourself at least (by interpreting 1506 * incoming ELS' and unsolicited status). It just crosses my 1507 * mind that one should be able to cross-check by means of 1508 * another GID_PN straight after a port has been opened. 1509 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1510 */ 1511 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els; 1512 if (req->qtcb->bottom.support.els1_length >= 1513 FSF_PLOGI_MIN_LEN) 1514 zfcp_fc_plogi_evaluate(port, plogi); 1515 break; 1516 case FSF_UNKNOWN_OP_SUBTYPE: 1517 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1518 break; 1519 } 1520 1521 out: 1522 put_device(&port->dev); 1523 } 1524 1525 /** 1526 * zfcp_fsf_open_port - create and send open port request 1527 * @erp_action: pointer to struct zfcp_erp_action 1528 * Returns: 0 on success, error otherwise 1529 */ 1530 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1531 { 1532 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1533 struct zfcp_port *port = erp_action->port; 1534 struct zfcp_fsf_req *req; 1535 int retval = -EIO; 1536 1537 spin_lock_irq(&qdio->req_q_lock); 1538 if (zfcp_qdio_sbal_get(qdio)) 1539 goto out; 1540 1541 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1542 SBAL_SFLAGS0_TYPE_READ, 1543 qdio->adapter->pool.erp_req); 1544 1545 if (IS_ERR(req)) { 1546 retval = PTR_ERR(req); 1547 goto out; 1548 } 1549 1550 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1551 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1552 1553 req->handler = zfcp_fsf_open_port_handler; 1554 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1555 req->data = port; 1556 req->erp_action = erp_action; 1557 erp_action->fsf_req_id = req->req_id; 1558 get_device(&port->dev); 1559 1560 zfcp_fsf_start_erp_timer(req); 1561 retval = zfcp_fsf_req_send(req); 1562 if (retval) { 1563 zfcp_fsf_req_free(req); 1564 erp_action->fsf_req_id = 0; 1565 put_device(&port->dev); 1566 } 1567 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1568 out: 1569 spin_unlock_irq(&qdio->req_q_lock); 1570 return retval; 1571 } 1572 1573 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) 1574 { 1575 struct zfcp_port *port = req->data; 1576 1577 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1578 return; 1579 1580 switch (req->qtcb->header.fsf_status) { 1581 case FSF_PORT_HANDLE_NOT_VALID: 1582 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); 1583 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1584 break; 1585 case FSF_ADAPTER_STATUS_AVAILABLE: 1586 break; 1587 case FSF_GOOD: 1588 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); 1589 break; 1590 } 1591 } 1592 1593 /** 1594 * zfcp_fsf_close_port - create and send close port request 1595 * @erp_action: pointer to struct zfcp_erp_action 1596 * Returns: 0 on success, error otherwise 1597 */ 1598 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1599 { 1600 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1601 struct zfcp_fsf_req *req; 1602 int retval = -EIO; 1603 1604 spin_lock_irq(&qdio->req_q_lock); 1605 if (zfcp_qdio_sbal_get(qdio)) 1606 goto out; 1607 1608 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1609 SBAL_SFLAGS0_TYPE_READ, 1610 qdio->adapter->pool.erp_req); 1611 1612 if (IS_ERR(req)) { 1613 retval = PTR_ERR(req); 1614 goto out; 1615 } 1616 1617 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1618 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1619 1620 req->handler = zfcp_fsf_close_port_handler; 1621 req->data = erp_action->port; 1622 req->erp_action = erp_action; 1623 req->qtcb->header.port_handle = erp_action->port->handle; 1624 erp_action->fsf_req_id = req->req_id; 1625 1626 zfcp_fsf_start_erp_timer(req); 1627 retval = zfcp_fsf_req_send(req); 1628 if (retval) { 1629 zfcp_fsf_req_free(req); 1630 erp_action->fsf_req_id = 0; 1631 } 1632 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1633 out: 1634 spin_unlock_irq(&qdio->req_q_lock); 1635 return retval; 1636 } 1637 1638 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1639 { 1640 struct zfcp_fc_wka_port *wka_port = req->data; 1641 struct fsf_qtcb_header *header = &req->qtcb->header; 1642 1643 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1644 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1645 goto out; 1646 } 1647 1648 switch (header->fsf_status) { 1649 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1650 dev_warn(&req->adapter->ccw_device->dev, 1651 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1652 /* fall through */ 1653 case FSF_ADAPTER_STATUS_AVAILABLE: 1654 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1655 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1656 break; 1657 case FSF_GOOD: 1658 wka_port->handle = header->port_handle; 1659 /* fall through */ 1660 case FSF_PORT_ALREADY_OPEN: 1661 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; 1662 } 1663 out: 1664 wake_up(&wka_port->completion_wq); 1665 } 1666 1667 /** 1668 * zfcp_fsf_open_wka_port - create and send open wka-port request 1669 * @wka_port: pointer to struct zfcp_fc_wka_port 1670 * Returns: 0 on success, error otherwise 1671 */ 1672 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1673 { 1674 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1675 struct zfcp_fsf_req *req; 1676 unsigned long req_id = 0; 1677 int retval = -EIO; 1678 1679 spin_lock_irq(&qdio->req_q_lock); 1680 if (zfcp_qdio_sbal_get(qdio)) 1681 goto out; 1682 1683 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1684 SBAL_SFLAGS0_TYPE_READ, 1685 qdio->adapter->pool.erp_req); 1686 1687 if (IS_ERR(req)) { 1688 retval = PTR_ERR(req); 1689 goto out; 1690 } 1691 1692 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1693 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1694 1695 req->handler = zfcp_fsf_open_wka_port_handler; 1696 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); 1697 req->data = wka_port; 1698 1699 req_id = req->req_id; 1700 1701 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1702 retval = zfcp_fsf_req_send(req); 1703 if (retval) 1704 zfcp_fsf_req_free(req); 1705 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1706 out: 1707 spin_unlock_irq(&qdio->req_q_lock); 1708 if (!retval) 1709 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id); 1710 return retval; 1711 } 1712 1713 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1714 { 1715 struct zfcp_fc_wka_port *wka_port = req->data; 1716 1717 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1718 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1719 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); 1720 } 1721 1722 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1723 wake_up(&wka_port->completion_wq); 1724 } 1725 1726 /** 1727 * zfcp_fsf_close_wka_port - create and send close wka port request 1728 * @wka_port: WKA port to open 1729 * Returns: 0 on success, error otherwise 1730 */ 1731 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1732 { 1733 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1734 struct zfcp_fsf_req *req; 1735 unsigned long req_id = 0; 1736 int retval = -EIO; 1737 1738 spin_lock_irq(&qdio->req_q_lock); 1739 if (zfcp_qdio_sbal_get(qdio)) 1740 goto out; 1741 1742 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1743 SBAL_SFLAGS0_TYPE_READ, 1744 qdio->adapter->pool.erp_req); 1745 1746 if (IS_ERR(req)) { 1747 retval = PTR_ERR(req); 1748 goto out; 1749 } 1750 1751 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1752 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1753 1754 req->handler = zfcp_fsf_close_wka_port_handler; 1755 req->data = wka_port; 1756 req->qtcb->header.port_handle = wka_port->handle; 1757 1758 req_id = req->req_id; 1759 1760 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1761 retval = zfcp_fsf_req_send(req); 1762 if (retval) 1763 zfcp_fsf_req_free(req); 1764 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1765 out: 1766 spin_unlock_irq(&qdio->req_q_lock); 1767 if (!retval) 1768 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id); 1769 return retval; 1770 } 1771 1772 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) 1773 { 1774 struct zfcp_port *port = req->data; 1775 struct fsf_qtcb_header *header = &req->qtcb->header; 1776 struct scsi_device *sdev; 1777 1778 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1779 return; 1780 1781 switch (header->fsf_status) { 1782 case FSF_PORT_HANDLE_NOT_VALID: 1783 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); 1784 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1785 break; 1786 case FSF_PORT_BOXED: 1787 /* can't use generic zfcp_erp_modify_port_status because 1788 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1789 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1790 shost_for_each_device(sdev, port->adapter->scsi_host) 1791 if (sdev_to_zfcp(sdev)->port == port) 1792 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1793 &sdev_to_zfcp(sdev)->status); 1794 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 1795 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 1796 "fscpph2"); 1797 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1798 break; 1799 case FSF_ADAPTER_STATUS_AVAILABLE: 1800 switch (header->fsf_status_qual.word[0]) { 1801 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1802 /* fall through */ 1803 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1804 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1805 break; 1806 } 1807 break; 1808 case FSF_GOOD: 1809 /* can't use generic zfcp_erp_modify_port_status because 1810 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1811 */ 1812 atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1813 shost_for_each_device(sdev, port->adapter->scsi_host) 1814 if (sdev_to_zfcp(sdev)->port == port) 1815 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, 1816 &sdev_to_zfcp(sdev)->status); 1817 break; 1818 } 1819 } 1820 1821 /** 1822 * zfcp_fsf_close_physical_port - close physical port 1823 * @erp_action: pointer to struct zfcp_erp_action 1824 * Returns: 0 on success 1825 */ 1826 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1827 { 1828 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1829 struct zfcp_fsf_req *req; 1830 int retval = -EIO; 1831 1832 spin_lock_irq(&qdio->req_q_lock); 1833 if (zfcp_qdio_sbal_get(qdio)) 1834 goto out; 1835 1836 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1837 SBAL_SFLAGS0_TYPE_READ, 1838 qdio->adapter->pool.erp_req); 1839 1840 if (IS_ERR(req)) { 1841 retval = PTR_ERR(req); 1842 goto out; 1843 } 1844 1845 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1846 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1847 1848 req->data = erp_action->port; 1849 req->qtcb->header.port_handle = erp_action->port->handle; 1850 req->erp_action = erp_action; 1851 req->handler = zfcp_fsf_close_physical_port_handler; 1852 erp_action->fsf_req_id = req->req_id; 1853 1854 zfcp_fsf_start_erp_timer(req); 1855 retval = zfcp_fsf_req_send(req); 1856 if (retval) { 1857 zfcp_fsf_req_free(req); 1858 erp_action->fsf_req_id = 0; 1859 } 1860 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1861 out: 1862 spin_unlock_irq(&qdio->req_q_lock); 1863 return retval; 1864 } 1865 1866 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) 1867 { 1868 struct zfcp_adapter *adapter = req->adapter; 1869 struct scsi_device *sdev = req->data; 1870 struct zfcp_scsi_dev *zfcp_sdev; 1871 struct fsf_qtcb_header *header = &req->qtcb->header; 1872 union fsf_status_qual *qual = &header->fsf_status_qual; 1873 1874 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1875 return; 1876 1877 zfcp_sdev = sdev_to_zfcp(sdev); 1878 1879 atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1880 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1881 &zfcp_sdev->status); 1882 1883 switch (header->fsf_status) { 1884 1885 case FSF_PORT_HANDLE_NOT_VALID: 1886 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); 1887 /* fall through */ 1888 case FSF_LUN_ALREADY_OPEN: 1889 break; 1890 case FSF_PORT_BOXED: 1891 zfcp_erp_set_port_status(zfcp_sdev->port, 1892 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1893 zfcp_erp_port_reopen(zfcp_sdev->port, 1894 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); 1895 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1896 break; 1897 case FSF_LUN_SHARING_VIOLATION: 1898 if (qual->word[0]) 1899 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, 1900 "LUN 0x%016Lx on port 0x%016Lx is already in " 1901 "use by CSS%d, MIF Image ID %x\n", 1902 zfcp_scsi_dev_lun(sdev), 1903 (unsigned long long)zfcp_sdev->port->wwpn, 1904 qual->fsf_queue_designator.cssid, 1905 qual->fsf_queue_designator.hla); 1906 zfcp_erp_set_lun_status(sdev, 1907 ZFCP_STATUS_COMMON_ERP_FAILED | 1908 ZFCP_STATUS_COMMON_ACCESS_DENIED); 1909 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1910 break; 1911 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1912 dev_warn(&adapter->ccw_device->dev, 1913 "No handle is available for LUN " 1914 "0x%016Lx on port 0x%016Lx\n", 1915 (unsigned long long)zfcp_scsi_dev_lun(sdev), 1916 (unsigned long long)zfcp_sdev->port->wwpn); 1917 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 1918 /* fall through */ 1919 case FSF_INVALID_COMMAND_OPTION: 1920 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1921 break; 1922 case FSF_ADAPTER_STATUS_AVAILABLE: 1923 switch (header->fsf_status_qual.word[0]) { 1924 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1925 zfcp_fc_test_link(zfcp_sdev->port); 1926 /* fall through */ 1927 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1928 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1929 break; 1930 } 1931 break; 1932 1933 case FSF_GOOD: 1934 zfcp_sdev->lun_handle = header->lun_handle; 1935 atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1936 break; 1937 } 1938 } 1939 1940 /** 1941 * zfcp_fsf_open_lun - open LUN 1942 * @erp_action: pointer to struct zfcp_erp_action 1943 * Returns: 0 on success, error otherwise 1944 */ 1945 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) 1946 { 1947 struct zfcp_adapter *adapter = erp_action->adapter; 1948 struct zfcp_qdio *qdio = adapter->qdio; 1949 struct zfcp_fsf_req *req; 1950 int retval = -EIO; 1951 1952 spin_lock_irq(&qdio->req_q_lock); 1953 if (zfcp_qdio_sbal_get(qdio)) 1954 goto out; 1955 1956 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1957 SBAL_SFLAGS0_TYPE_READ, 1958 adapter->pool.erp_req); 1959 1960 if (IS_ERR(req)) { 1961 retval = PTR_ERR(req); 1962 goto out; 1963 } 1964 1965 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1966 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1967 1968 req->qtcb->header.port_handle = erp_action->port->handle; 1969 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); 1970 req->handler = zfcp_fsf_open_lun_handler; 1971 req->data = erp_action->sdev; 1972 req->erp_action = erp_action; 1973 erp_action->fsf_req_id = req->req_id; 1974 1975 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1976 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1977 1978 zfcp_fsf_start_erp_timer(req); 1979 retval = zfcp_fsf_req_send(req); 1980 if (retval) { 1981 zfcp_fsf_req_free(req); 1982 erp_action->fsf_req_id = 0; 1983 } 1984 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 1985 out: 1986 spin_unlock_irq(&qdio->req_q_lock); 1987 return retval; 1988 } 1989 1990 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 1991 { 1992 struct scsi_device *sdev = req->data; 1993 struct zfcp_scsi_dev *zfcp_sdev; 1994 1995 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1996 return; 1997 1998 zfcp_sdev = sdev_to_zfcp(sdev); 1999 2000 switch (req->qtcb->header.fsf_status) { 2001 case FSF_PORT_HANDLE_NOT_VALID: 2002 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 2003 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2004 break; 2005 case FSF_LUN_HANDLE_NOT_VALID: 2006 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); 2007 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2008 break; 2009 case FSF_PORT_BOXED: 2010 zfcp_erp_set_port_status(zfcp_sdev->port, 2011 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2012 zfcp_erp_port_reopen(zfcp_sdev->port, 2013 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); 2014 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2015 break; 2016 case FSF_ADAPTER_STATUS_AVAILABLE: 2017 switch (req->qtcb->header.fsf_status_qual.word[0]) { 2018 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 2019 zfcp_fc_test_link(zfcp_sdev->port); 2020 /* fall through */ 2021 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 2022 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2023 break; 2024 } 2025 break; 2026 case FSF_GOOD: 2027 atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 2028 break; 2029 } 2030 } 2031 2032 /** 2033 * zfcp_fsf_close_LUN - close LUN 2034 * @erp_action: pointer to erp_action triggering the "close LUN" 2035 * Returns: 0 on success, error otherwise 2036 */ 2037 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) 2038 { 2039 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 2040 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 2041 struct zfcp_fsf_req *req; 2042 int retval = -EIO; 2043 2044 spin_lock_irq(&qdio->req_q_lock); 2045 if (zfcp_qdio_sbal_get(qdio)) 2046 goto out; 2047 2048 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 2049 SBAL_SFLAGS0_TYPE_READ, 2050 qdio->adapter->pool.erp_req); 2051 2052 if (IS_ERR(req)) { 2053 retval = PTR_ERR(req); 2054 goto out; 2055 } 2056 2057 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2058 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2059 2060 req->qtcb->header.port_handle = erp_action->port->handle; 2061 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2062 req->handler = zfcp_fsf_close_lun_handler; 2063 req->data = erp_action->sdev; 2064 req->erp_action = erp_action; 2065 erp_action->fsf_req_id = req->req_id; 2066 2067 zfcp_fsf_start_erp_timer(req); 2068 retval = zfcp_fsf_req_send(req); 2069 if (retval) { 2070 zfcp_fsf_req_free(req); 2071 erp_action->fsf_req_id = 0; 2072 } 2073 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2074 out: 2075 spin_unlock_irq(&qdio->req_q_lock); 2076 return retval; 2077 } 2078 2079 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat) 2080 { 2081 lat_rec->sum += lat; 2082 lat_rec->min = min(lat_rec->min, lat); 2083 lat_rec->max = max(lat_rec->max, lat); 2084 } 2085 2086 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) 2087 { 2088 struct fsf_qual_latency_info *lat_in; 2089 struct zfcp_latency_cont *lat = NULL; 2090 struct zfcp_scsi_dev *zfcp_sdev; 2091 struct zfcp_blk_drv_data blktrc; 2092 int ticks = req->adapter->timer_ticks; 2093 2094 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; 2095 2096 blktrc.flags = 0; 2097 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 2098 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 2099 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 2100 blktrc.inb_usage = 0; 2101 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 2102 2103 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 2104 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2105 zfcp_sdev = sdev_to_zfcp(scsi->device); 2106 blktrc.flags |= ZFCP_BLK_LAT_VALID; 2107 blktrc.channel_lat = lat_in->channel_lat * ticks; 2108 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 2109 2110 switch (req->qtcb->bottom.io.data_direction) { 2111 case FSF_DATADIR_DIF_READ_STRIP: 2112 case FSF_DATADIR_DIF_READ_CONVERT: 2113 case FSF_DATADIR_READ: 2114 lat = &zfcp_sdev->latencies.read; 2115 break; 2116 case FSF_DATADIR_DIF_WRITE_INSERT: 2117 case FSF_DATADIR_DIF_WRITE_CONVERT: 2118 case FSF_DATADIR_WRITE: 2119 lat = &zfcp_sdev->latencies.write; 2120 break; 2121 case FSF_DATADIR_CMND: 2122 lat = &zfcp_sdev->latencies.cmd; 2123 break; 2124 } 2125 2126 if (lat) { 2127 spin_lock(&zfcp_sdev->latencies.lock); 2128 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 2129 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 2130 lat->counter++; 2131 spin_unlock(&zfcp_sdev->latencies.lock); 2132 } 2133 } 2134 2135 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, 2136 sizeof(blktrc)); 2137 } 2138 2139 /** 2140 * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF. 2141 * @req: Pointer to FSF request. 2142 * @sdev: Pointer to SCSI device as request context. 2143 */ 2144 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req, 2145 struct scsi_device *sdev) 2146 { 2147 struct zfcp_scsi_dev *zfcp_sdev; 2148 struct fsf_qtcb_header *header = &req->qtcb->header; 2149 2150 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2151 return; 2152 2153 zfcp_sdev = sdev_to_zfcp(sdev); 2154 2155 switch (header->fsf_status) { 2156 case FSF_HANDLE_MISMATCH: 2157 case FSF_PORT_HANDLE_NOT_VALID: 2158 zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1"); 2159 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2160 break; 2161 case FSF_FCPLUN_NOT_VALID: 2162 case FSF_LUN_HANDLE_NOT_VALID: 2163 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); 2164 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2165 break; 2166 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2167 zfcp_fsf_class_not_supp(req); 2168 break; 2169 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2170 dev_err(&req->adapter->ccw_device->dev, 2171 "Incorrect direction %d, LUN 0x%016Lx on port " 2172 "0x%016Lx closed\n", 2173 req->qtcb->bottom.io.data_direction, 2174 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2175 (unsigned long long)zfcp_sdev->port->wwpn); 2176 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3"); 2177 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2178 break; 2179 case FSF_CMND_LENGTH_NOT_VALID: 2180 dev_err(&req->adapter->ccw_device->dev, 2181 "Incorrect FCP_CMND length %d, FCP device closed\n", 2182 req->qtcb->bottom.io.fcp_cmnd_length); 2183 zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4"); 2184 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2185 break; 2186 case FSF_PORT_BOXED: 2187 zfcp_erp_set_port_status(zfcp_sdev->port, 2188 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2189 zfcp_erp_port_reopen(zfcp_sdev->port, 2190 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); 2191 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2192 break; 2193 case FSF_LUN_BOXED: 2194 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2195 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 2196 "fssfch6"); 2197 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2198 break; 2199 case FSF_ADAPTER_STATUS_AVAILABLE: 2200 if (header->fsf_status_qual.word[0] == 2201 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2202 zfcp_fc_test_link(zfcp_sdev->port); 2203 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2204 break; 2205 } 2206 } 2207 2208 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) 2209 { 2210 struct scsi_cmnd *scpnt; 2211 struct fcp_resp_with_ext *fcp_rsp; 2212 unsigned long flags; 2213 2214 read_lock_irqsave(&req->adapter->abort_lock, flags); 2215 2216 scpnt = req->data; 2217 if (unlikely(!scpnt)) { 2218 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2219 return; 2220 } 2221 2222 zfcp_fsf_fcp_handler_common(req, scpnt->device); 2223 2224 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2225 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2226 goto skip_fsfstatus; 2227 } 2228 2229 switch (req->qtcb->header.fsf_status) { 2230 case FSF_INCONSISTENT_PROT_DATA: 2231 case FSF_INVALID_PROT_PARM: 2232 set_host_byte(scpnt, DID_ERROR); 2233 goto skip_fsfstatus; 2234 case FSF_BLOCK_GUARD_CHECK_FAILURE: 2235 zfcp_scsi_dif_sense_error(scpnt, 0x1); 2236 goto skip_fsfstatus; 2237 case FSF_APP_TAG_CHECK_FAILURE: 2238 zfcp_scsi_dif_sense_error(scpnt, 0x2); 2239 goto skip_fsfstatus; 2240 case FSF_REF_TAG_CHECK_FAILURE: 2241 zfcp_scsi_dif_sense_error(scpnt, 0x3); 2242 goto skip_fsfstatus; 2243 } 2244 BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE); 2245 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2246 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2247 2248 skip_fsfstatus: 2249 zfcp_fsf_req_trace(req, scpnt); 2250 zfcp_dbf_scsi_result(scpnt, req); 2251 2252 scpnt->host_scribble = NULL; 2253 (scpnt->scsi_done) (scpnt); 2254 /* 2255 * We must hold this lock until scsi_done has been called. 2256 * Otherwise we may call scsi_done after abort regarding this 2257 * command has completed. 2258 * Note: scsi_done must not block! 2259 */ 2260 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2261 } 2262 2263 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2264 { 2265 switch (scsi_get_prot_op(scsi_cmnd)) { 2266 case SCSI_PROT_NORMAL: 2267 switch (scsi_cmnd->sc_data_direction) { 2268 case DMA_NONE: 2269 *data_dir = FSF_DATADIR_CMND; 2270 break; 2271 case DMA_FROM_DEVICE: 2272 *data_dir = FSF_DATADIR_READ; 2273 break; 2274 case DMA_TO_DEVICE: 2275 *data_dir = FSF_DATADIR_WRITE; 2276 break; 2277 case DMA_BIDIRECTIONAL: 2278 return -EINVAL; 2279 } 2280 break; 2281 2282 case SCSI_PROT_READ_STRIP: 2283 *data_dir = FSF_DATADIR_DIF_READ_STRIP; 2284 break; 2285 case SCSI_PROT_WRITE_INSERT: 2286 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; 2287 break; 2288 case SCSI_PROT_READ_PASS: 2289 *data_dir = FSF_DATADIR_DIF_READ_CONVERT; 2290 break; 2291 case SCSI_PROT_WRITE_PASS: 2292 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; 2293 break; 2294 default: 2295 return -EINVAL; 2296 } 2297 2298 return 0; 2299 } 2300 2301 /** 2302 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) 2303 * @scsi_cmnd: scsi command to be sent 2304 */ 2305 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) 2306 { 2307 struct zfcp_fsf_req *req; 2308 struct fcp_cmnd *fcp_cmnd; 2309 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2310 int retval = -EIO; 2311 struct scsi_device *sdev = scsi_cmnd->device; 2312 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2313 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2314 struct zfcp_qdio *qdio = adapter->qdio; 2315 struct fsf_qtcb_bottom_io *io; 2316 unsigned long flags; 2317 2318 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2319 ZFCP_STATUS_COMMON_UNBLOCKED))) 2320 return -EBUSY; 2321 2322 spin_lock_irqsave(&qdio->req_q_lock, flags); 2323 if (atomic_read(&qdio->req_q_free) <= 0) { 2324 atomic_inc(&qdio->req_q_full); 2325 goto out; 2326 } 2327 2328 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2329 sbtype = SBAL_SFLAGS0_TYPE_WRITE; 2330 2331 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2332 sbtype, adapter->pool.scsi_req); 2333 2334 if (IS_ERR(req)) { 2335 retval = PTR_ERR(req); 2336 goto out; 2337 } 2338 2339 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2340 2341 io = &req->qtcb->bottom.io; 2342 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2343 req->data = scsi_cmnd; 2344 req->handler = zfcp_fsf_fcp_cmnd_handler; 2345 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2346 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2347 io->service_class = FSF_CLASS_3; 2348 io->fcp_cmnd_length = FCP_CMND_LEN; 2349 2350 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { 2351 io->data_block_length = scsi_cmnd->device->sector_size; 2352 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2353 } 2354 2355 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) 2356 goto failed_scsi_cmnd; 2357 2358 BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE); 2359 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2360 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); 2361 2362 if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) && 2363 scsi_prot_sg_count(scsi_cmnd)) { 2364 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2365 scsi_prot_sg_count(scsi_cmnd)); 2366 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2367 scsi_prot_sglist(scsi_cmnd)); 2368 if (retval) 2369 goto failed_scsi_cmnd; 2370 io->prot_data_length = zfcp_qdio_real_bytes( 2371 scsi_prot_sglist(scsi_cmnd)); 2372 } 2373 2374 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2375 scsi_sglist(scsi_cmnd)); 2376 if (unlikely(retval)) 2377 goto failed_scsi_cmnd; 2378 2379 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2380 if (zfcp_adapter_multi_buffer_active(adapter)) 2381 zfcp_qdio_set_scount(qdio, &req->qdio_req); 2382 2383 retval = zfcp_fsf_req_send(req); 2384 if (unlikely(retval)) 2385 goto failed_scsi_cmnd; 2386 /* NOTE: DO NOT TOUCH req PAST THIS POINT! */ 2387 2388 goto out; 2389 2390 failed_scsi_cmnd: 2391 zfcp_fsf_req_free(req); 2392 scsi_cmnd->host_scribble = NULL; 2393 out: 2394 spin_unlock_irqrestore(&qdio->req_q_lock, flags); 2395 return retval; 2396 } 2397 2398 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) 2399 { 2400 struct scsi_device *sdev = req->data; 2401 struct fcp_resp_with_ext *fcp_rsp; 2402 struct fcp_resp_rsp_info *rsp_info; 2403 2404 zfcp_fsf_fcp_handler_common(req, sdev); 2405 2406 fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu; 2407 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 2408 2409 if ((rsp_info->rsp_code != FCP_TMF_CMPL) || 2410 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2411 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2412 } 2413 2414 /** 2415 * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF). 2416 * @sdev: Pointer to SCSI device to send the task management command to. 2417 * @tm_flags: Unsigned byte for task management flags. 2418 * 2419 * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise. 2420 */ 2421 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev, 2422 u8 tm_flags) 2423 { 2424 struct zfcp_fsf_req *req = NULL; 2425 struct fcp_cmnd *fcp_cmnd; 2426 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2427 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 2428 2429 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2430 ZFCP_STATUS_COMMON_UNBLOCKED))) 2431 return NULL; 2432 2433 spin_lock_irq(&qdio->req_q_lock); 2434 if (zfcp_qdio_sbal_get(qdio)) 2435 goto out; 2436 2437 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2438 SBAL_SFLAGS0_TYPE_WRITE, 2439 qdio->adapter->pool.scsi_req); 2440 2441 if (IS_ERR(req)) { 2442 req = NULL; 2443 goto out; 2444 } 2445 2446 req->data = sdev; 2447 2448 req->handler = zfcp_fsf_fcp_task_mgmt_handler; 2449 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2450 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2451 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2452 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2453 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2454 2455 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2456 2457 fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu; 2458 zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags); 2459 2460 zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT); 2461 if (!zfcp_fsf_req_send(req)) { 2462 /* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */ 2463 goto out; 2464 } 2465 2466 zfcp_fsf_req_free(req); 2467 req = NULL; 2468 out: 2469 spin_unlock_irq(&qdio->req_q_lock); 2470 return req; 2471 } 2472 2473 /** 2474 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO 2475 * @qdio: pointer to struct zfcp_qdio 2476 * @sbal_idx: response queue index of SBAL to be processed 2477 */ 2478 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2479 { 2480 struct zfcp_adapter *adapter = qdio->adapter; 2481 struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; 2482 struct qdio_buffer_element *sbale; 2483 struct zfcp_fsf_req *fsf_req; 2484 unsigned long req_id; 2485 int idx; 2486 2487 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2488 2489 sbale = &sbal->element[idx]; 2490 req_id = (unsigned long) sbale->addr; 2491 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2492 2493 if (!fsf_req) { 2494 /* 2495 * Unknown request means that we have potentially memory 2496 * corruption and must stop the machine immediately. 2497 */ 2498 zfcp_qdio_siosl(adapter); 2499 panic("error: unknown req_id (%lx) on adapter %s.\n", 2500 req_id, dev_name(&adapter->ccw_device->dev)); 2501 } 2502 2503 zfcp_fsf_req_complete(fsf_req); 2504 2505 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) 2506 break; 2507 } 2508 } 2509