1 /* 2 * zfcp device driver 3 * 4 * Implementation of FSF commands. 5 * 6 * Copyright IBM Corp. 2002, 2010 7 */ 8 9 #define KMSG_COMPONENT "zfcp" 10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 11 12 #include <linux/blktrace_api.h> 13 #include <linux/slab.h> 14 #include <scsi/fc/fc_els.h> 15 #include "zfcp_ext.h" 16 #include "zfcp_fc.h" 17 #include "zfcp_dbf.h" 18 #include "zfcp_qdio.h" 19 #include "zfcp_reqlist.h" 20 21 struct kmem_cache *zfcp_fsf_qtcb_cache; 22 23 static void zfcp_fsf_request_timeout_handler(unsigned long data) 24 { 25 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 26 zfcp_qdio_siosl(adapter); 27 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, 28 "fsrth_1"); 29 } 30 31 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, 32 unsigned long timeout) 33 { 34 fsf_req->timer.function = zfcp_fsf_request_timeout_handler; 35 fsf_req->timer.data = (unsigned long) fsf_req->adapter; 36 fsf_req->timer.expires = jiffies + timeout; 37 add_timer(&fsf_req->timer); 38 } 39 40 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req) 41 { 42 BUG_ON(!fsf_req->erp_action); 43 fsf_req->timer.function = zfcp_erp_timeout_handler; 44 fsf_req->timer.data = (unsigned long) fsf_req->erp_action; 45 fsf_req->timer.expires = jiffies + 30 * HZ; 46 add_timer(&fsf_req->timer); 47 } 48 49 /* association between FSF command and FSF QTCB type */ 50 static u32 fsf_qtcb_type[] = { 51 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND, 52 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND, 53 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND, 54 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND, 55 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND, 56 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND, 57 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND, 58 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND, 59 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND, 60 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND, 61 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND, 62 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND, 63 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND 64 }; 65 66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) 67 { 68 dev_err(&req->adapter->ccw_device->dev, "FCP device not " 69 "operational because of an unsupported FC class\n"); 70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); 71 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 72 } 73 74 /** 75 * zfcp_fsf_req_free - free memory used by fsf request 76 * @fsf_req: pointer to struct zfcp_fsf_req 77 */ 78 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) 79 { 80 if (likely(req->pool)) { 81 if (likely(req->qtcb)) 82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); 83 mempool_free(req, req->pool); 84 return; 85 } 86 87 if (likely(req->qtcb)) 88 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); 89 kfree(req); 90 } 91 92 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) 93 { 94 unsigned long flags; 95 struct fsf_status_read_buffer *sr_buf = req->data; 96 struct zfcp_adapter *adapter = req->adapter; 97 struct zfcp_port *port; 98 int d_id = ntoh24(sr_buf->d_id); 99 100 read_lock_irqsave(&adapter->port_list_lock, flags); 101 list_for_each_entry(port, &adapter->port_list, list) 102 if (port->d_id == d_id) { 103 zfcp_erp_port_reopen(port, 0, "fssrpc1"); 104 break; 105 } 106 read_unlock_irqrestore(&adapter->port_list_lock, flags); 107 } 108 109 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, 110 struct fsf_link_down_info *link_down) 111 { 112 struct zfcp_adapter *adapter = req->adapter; 113 114 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED) 115 return; 116 117 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status); 118 119 zfcp_scsi_schedule_rports_block(adapter); 120 121 if (!link_down) 122 goto out; 123 124 switch (link_down->error_code) { 125 case FSF_PSQ_LINK_NO_LIGHT: 126 dev_warn(&req->adapter->ccw_device->dev, 127 "There is no light signal from the local " 128 "fibre channel cable\n"); 129 break; 130 case FSF_PSQ_LINK_WRAP_PLUG: 131 dev_warn(&req->adapter->ccw_device->dev, 132 "There is a wrap plug instead of a fibre " 133 "channel cable\n"); 134 break; 135 case FSF_PSQ_LINK_NO_FCP: 136 dev_warn(&req->adapter->ccw_device->dev, 137 "The adjacent fibre channel node does not " 138 "support FCP\n"); 139 break; 140 case FSF_PSQ_LINK_FIRMWARE_UPDATE: 141 dev_warn(&req->adapter->ccw_device->dev, 142 "The FCP device is suspended because of a " 143 "firmware update\n"); 144 break; 145 case FSF_PSQ_LINK_INVALID_WWPN: 146 dev_warn(&req->adapter->ccw_device->dev, 147 "The FCP device detected a WWPN that is " 148 "duplicate or not valid\n"); 149 break; 150 case FSF_PSQ_LINK_NO_NPIV_SUPPORT: 151 dev_warn(&req->adapter->ccw_device->dev, 152 "The fibre channel fabric does not support NPIV\n"); 153 break; 154 case FSF_PSQ_LINK_NO_FCP_RESOURCES: 155 dev_warn(&req->adapter->ccw_device->dev, 156 "The FCP adapter cannot support more NPIV ports\n"); 157 break; 158 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES: 159 dev_warn(&req->adapter->ccw_device->dev, 160 "The adjacent switch cannot support " 161 "more NPIV ports\n"); 162 break; 163 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE: 164 dev_warn(&req->adapter->ccw_device->dev, 165 "The FCP adapter could not log in to the " 166 "fibre channel fabric\n"); 167 break; 168 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED: 169 dev_warn(&req->adapter->ccw_device->dev, 170 "The WWPN assignment file on the FCP adapter " 171 "has been damaged\n"); 172 break; 173 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED: 174 dev_warn(&req->adapter->ccw_device->dev, 175 "The mode table on the FCP adapter " 176 "has been damaged\n"); 177 break; 178 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT: 179 dev_warn(&req->adapter->ccw_device->dev, 180 "All NPIV ports on the FCP adapter have " 181 "been assigned\n"); 182 break; 183 default: 184 dev_warn(&req->adapter->ccw_device->dev, 185 "The link between the FCP adapter and " 186 "the FC fabric is down\n"); 187 } 188 out: 189 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 190 } 191 192 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) 193 { 194 struct fsf_status_read_buffer *sr_buf = req->data; 195 struct fsf_link_down_info *ldi = 196 (struct fsf_link_down_info *) &sr_buf->payload; 197 198 switch (sr_buf->status_subtype) { 199 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK: 200 zfcp_fsf_link_down_info_eval(req, ldi); 201 break; 202 case FSF_STATUS_READ_SUB_FDISC_FAILED: 203 zfcp_fsf_link_down_info_eval(req, ldi); 204 break; 205 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE: 206 zfcp_fsf_link_down_info_eval(req, NULL); 207 }; 208 } 209 210 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) 211 { 212 struct zfcp_adapter *adapter = req->adapter; 213 struct fsf_status_read_buffer *sr_buf = req->data; 214 215 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 216 zfcp_dbf_hba_fsf_uss("fssrh_1", req); 217 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 218 zfcp_fsf_req_free(req); 219 return; 220 } 221 222 zfcp_dbf_hba_fsf_uss("fssrh_4", req); 223 224 switch (sr_buf->status_type) { 225 case FSF_STATUS_READ_PORT_CLOSED: 226 zfcp_fsf_status_read_port_closed(req); 227 break; 228 case FSF_STATUS_READ_INCOMING_ELS: 229 zfcp_fc_incoming_els(req); 230 break; 231 case FSF_STATUS_READ_SENSE_DATA_AVAIL: 232 break; 233 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD: 234 dev_warn(&adapter->ccw_device->dev, 235 "The error threshold for checksum statistics " 236 "has been exceeded\n"); 237 zfcp_dbf_hba_bit_err("fssrh_3", req); 238 break; 239 case FSF_STATUS_READ_LINK_DOWN: 240 zfcp_fsf_status_read_link_down(req); 241 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0); 242 break; 243 case FSF_STATUS_READ_LINK_UP: 244 dev_info(&adapter->ccw_device->dev, 245 "The local link has been restored\n"); 246 /* All ports should be marked as ready to run again */ 247 zfcp_erp_set_adapter_status(adapter, 248 ZFCP_STATUS_COMMON_RUNNING); 249 zfcp_erp_adapter_reopen(adapter, 250 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 251 ZFCP_STATUS_COMMON_ERP_FAILED, 252 "fssrh_2"); 253 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0); 254 255 break; 256 case FSF_STATUS_READ_NOTIFICATION_LOST: 257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS) 258 zfcp_fc_conditional_port_scan(adapter); 259 break; 260 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT: 261 adapter->adapter_features = sr_buf->payload.word[0]; 262 break; 263 } 264 265 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 266 zfcp_fsf_req_free(req); 267 268 atomic_inc(&adapter->stat_miss); 269 queue_work(adapter->work_queue, &adapter->stat_work); 270 } 271 272 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) 273 { 274 switch (req->qtcb->header.fsf_status_qual.word[0]) { 275 case FSF_SQ_FCP_RSP_AVAILABLE: 276 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 277 case FSF_SQ_NO_RETRY_POSSIBLE: 278 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 279 return; 280 case FSF_SQ_COMMAND_ABORTED: 281 break; 282 case FSF_SQ_NO_RECOM: 283 dev_err(&req->adapter->ccw_device->dev, 284 "The FCP adapter reported a problem " 285 "that cannot be recovered\n"); 286 zfcp_qdio_siosl(req->adapter); 287 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); 288 break; 289 } 290 /* all non-return stats set FSFREQ_ERROR*/ 291 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 292 } 293 294 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) 295 { 296 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 297 return; 298 299 switch (req->qtcb->header.fsf_status) { 300 case FSF_UNKNOWN_COMMAND: 301 dev_err(&req->adapter->ccw_device->dev, 302 "The FCP adapter does not recognize the command 0x%x\n", 303 req->qtcb->header.fsf_command); 304 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); 305 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 306 break; 307 case FSF_ADAPTER_STATUS_AVAILABLE: 308 zfcp_fsf_fsfstatus_qual_eval(req); 309 break; 310 } 311 } 312 313 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) 314 { 315 struct zfcp_adapter *adapter = req->adapter; 316 struct fsf_qtcb *qtcb = req->qtcb; 317 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual; 318 319 zfcp_dbf_hba_fsf_response(req); 320 321 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { 322 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 323 return; 324 } 325 326 switch (qtcb->prefix.prot_status) { 327 case FSF_PROT_GOOD: 328 case FSF_PROT_FSF_STATUS_PRESENTED: 329 return; 330 case FSF_PROT_QTCB_VERSION_ERROR: 331 dev_err(&adapter->ccw_device->dev, 332 "QTCB version 0x%x not supported by FCP adapter " 333 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION, 334 psq->word[0], psq->word[1]); 335 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1"); 336 break; 337 case FSF_PROT_ERROR_STATE: 338 case FSF_PROT_SEQ_NUMB_ERROR: 339 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2"); 340 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 341 break; 342 case FSF_PROT_UNSUPP_QTCB_TYPE: 343 dev_err(&adapter->ccw_device->dev, 344 "The QTCB type is not supported by the FCP adapter\n"); 345 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3"); 346 break; 347 case FSF_PROT_HOST_CONNECTION_INITIALIZING: 348 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, 349 &adapter->status); 350 break; 351 case FSF_PROT_DUPLICATE_REQUEST_ID: 352 dev_err(&adapter->ccw_device->dev, 353 "0x%Lx is an ambiguous request identifier\n", 354 (unsigned long long)qtcb->bottom.support.req_handle); 355 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4"); 356 break; 357 case FSF_PROT_LINK_DOWN: 358 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); 359 /* go through reopen to flush pending requests */ 360 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6"); 361 break; 362 case FSF_PROT_REEST_QUEUE: 363 /* All ports should be marked as ready to run again */ 364 zfcp_erp_set_adapter_status(adapter, 365 ZFCP_STATUS_COMMON_RUNNING); 366 zfcp_erp_adapter_reopen(adapter, 367 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 368 ZFCP_STATUS_COMMON_ERP_FAILED, 369 "fspse_8"); 370 break; 371 default: 372 dev_err(&adapter->ccw_device->dev, 373 "0x%x is not a valid transfer protocol status\n", 374 qtcb->prefix.prot_status); 375 zfcp_qdio_siosl(adapter); 376 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9"); 377 } 378 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 379 } 380 381 /** 382 * zfcp_fsf_req_complete - process completion of a FSF request 383 * @fsf_req: The FSF request that has been completed. 384 * 385 * When a request has been completed either from the FCP adapter, 386 * or it has been dismissed due to a queue shutdown, this function 387 * is called to process the completion status and trigger further 388 * events related to the FSF request. 389 */ 390 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) 391 { 392 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { 393 zfcp_fsf_status_read_handler(req); 394 return; 395 } 396 397 del_timer(&req->timer); 398 zfcp_fsf_protstatus_eval(req); 399 zfcp_fsf_fsfstatus_eval(req); 400 req->handler(req); 401 402 if (req->erp_action) 403 zfcp_erp_notify(req->erp_action, 0); 404 405 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) 406 zfcp_fsf_req_free(req); 407 else 408 complete(&req->completion); 409 } 410 411 /** 412 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests 413 * @adapter: pointer to struct zfcp_adapter 414 * 415 * Never ever call this without shutting down the adapter first. 416 * Otherwise the adapter would continue using and corrupting s390 storage. 417 * Included BUG_ON() call to ensure this is done. 418 * ERP is supposed to be the only user of this function. 419 */ 420 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 421 { 422 struct zfcp_fsf_req *req, *tmp; 423 LIST_HEAD(remove_queue); 424 425 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP); 426 zfcp_reqlist_move(adapter->req_list, &remove_queue); 427 428 list_for_each_entry_safe(req, tmp, &remove_queue, list) { 429 list_del(&req->list); 430 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 431 zfcp_fsf_req_complete(req); 432 } 433 } 434 435 #define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0) 436 #define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1) 437 #define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2) 438 #define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3) 439 #define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4) 440 #define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5) 441 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15) 442 443 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed) 444 { 445 u32 fdmi_speed = 0; 446 if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT) 447 fdmi_speed |= FC_PORTSPEED_1GBIT; 448 if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT) 449 fdmi_speed |= FC_PORTSPEED_2GBIT; 450 if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT) 451 fdmi_speed |= FC_PORTSPEED_4GBIT; 452 if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT) 453 fdmi_speed |= FC_PORTSPEED_10GBIT; 454 if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT) 455 fdmi_speed |= FC_PORTSPEED_8GBIT; 456 if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT) 457 fdmi_speed |= FC_PORTSPEED_16GBIT; 458 if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED) 459 fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED; 460 return fdmi_speed; 461 } 462 463 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) 464 { 465 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; 466 struct zfcp_adapter *adapter = req->adapter; 467 struct Scsi_Host *shost = adapter->scsi_host; 468 struct fc_els_flogi *nsp, *plogi; 469 470 /* adjust pointers for missing command code */ 471 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param 472 - sizeof(u32)); 473 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload 474 - sizeof(u32)); 475 476 if (req->data) 477 memcpy(req->data, bottom, sizeof(*bottom)); 478 479 fc_host_port_name(shost) = nsp->fl_wwpn; 480 fc_host_node_name(shost) = nsp->fl_wwnn; 481 fc_host_port_id(shost) = ntoh24(bottom->s_id); 482 fc_host_speed(shost) = 483 zfcp_fsf_convert_portspeed(bottom->fc_link_speed); 484 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; 485 486 adapter->hydra_version = bottom->adapter_type; 487 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; 488 adapter->stat_read_buf_num = max(bottom->status_read_buf_num, 489 (u16)FSF_STATUS_READS_RECOM); 490 491 if (fc_host_permanent_port_name(shost) == -1) 492 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 493 494 switch (bottom->fc_topology) { 495 case FSF_TOPO_P2P: 496 adapter->peer_d_id = ntoh24(bottom->peer_d_id); 497 adapter->peer_wwpn = plogi->fl_wwpn; 498 adapter->peer_wwnn = plogi->fl_wwnn; 499 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 500 break; 501 case FSF_TOPO_FABRIC: 502 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 503 break; 504 case FSF_TOPO_AL: 505 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 506 /* fall through */ 507 default: 508 dev_err(&adapter->ccw_device->dev, 509 "Unknown or unsupported arbitrated loop " 510 "fibre channel topology detected\n"); 511 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1"); 512 return -EIO; 513 } 514 515 zfcp_scsi_set_prot(adapter); 516 517 return 0; 518 } 519 520 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) 521 { 522 struct zfcp_adapter *adapter = req->adapter; 523 struct fsf_qtcb *qtcb = req->qtcb; 524 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config; 525 struct Scsi_Host *shost = adapter->scsi_host; 526 527 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 528 return; 529 530 adapter->fsf_lic_version = bottom->lic_version; 531 adapter->adapter_features = bottom->adapter_features; 532 adapter->connection_features = bottom->connection_features; 533 adapter->peer_wwpn = 0; 534 adapter->peer_wwnn = 0; 535 adapter->peer_d_id = 0; 536 537 switch (qtcb->header.fsf_status) { 538 case FSF_GOOD: 539 if (zfcp_fsf_exchange_config_evaluate(req)) 540 return; 541 542 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) { 543 dev_err(&adapter->ccw_device->dev, 544 "FCP adapter maximum QTCB size (%d bytes) " 545 "is too small\n", 546 bottom->max_qtcb_size); 547 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1"); 548 return; 549 } 550 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 551 &adapter->status); 552 break; 553 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 554 fc_host_node_name(shost) = 0; 555 fc_host_port_name(shost) = 0; 556 fc_host_port_id(shost) = 0; 557 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; 558 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; 559 adapter->hydra_version = 0; 560 561 /* avoids adapter shutdown to be able to recognize 562 * events such as LINK UP */ 563 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, 564 &adapter->status); 565 zfcp_fsf_link_down_info_eval(req, 566 &qtcb->header.fsf_status_qual.link_down_info); 567 break; 568 default: 569 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); 570 return; 571 } 572 573 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) { 574 adapter->hardware_version = bottom->hardware_version; 575 memcpy(fc_host_serial_number(shost), bottom->serial_number, 576 min(FC_SERIAL_NUMBER_SIZE, 17)); 577 EBCASC(fc_host_serial_number(shost), 578 min(FC_SERIAL_NUMBER_SIZE, 17)); 579 } 580 581 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) { 582 dev_err(&adapter->ccw_device->dev, 583 "The FCP adapter only supports newer " 584 "control block versions\n"); 585 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4"); 586 return; 587 } 588 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) { 589 dev_err(&adapter->ccw_device->dev, 590 "The FCP adapter only supports older " 591 "control block versions\n"); 592 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5"); 593 } 594 } 595 596 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) 597 { 598 struct zfcp_adapter *adapter = req->adapter; 599 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; 600 struct Scsi_Host *shost = adapter->scsi_host; 601 602 if (req->data) 603 memcpy(req->data, bottom, sizeof(*bottom)); 604 605 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { 606 fc_host_permanent_port_name(shost) = bottom->wwpn; 607 fc_host_port_type(shost) = FC_PORTTYPE_NPIV; 608 } else 609 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 610 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 611 fc_host_supported_speeds(shost) = 612 zfcp_fsf_convert_portspeed(bottom->supported_speed); 613 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types, 614 FC_FC4_LIST_SIZE); 615 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types, 616 FC_FC4_LIST_SIZE); 617 } 618 619 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) 620 { 621 struct fsf_qtcb *qtcb = req->qtcb; 622 623 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 624 return; 625 626 switch (qtcb->header.fsf_status) { 627 case FSF_GOOD: 628 zfcp_fsf_exchange_port_evaluate(req); 629 break; 630 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE: 631 zfcp_fsf_exchange_port_evaluate(req); 632 zfcp_fsf_link_down_info_eval(req, 633 &qtcb->header.fsf_status_qual.link_down_info); 634 break; 635 } 636 } 637 638 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool) 639 { 640 struct zfcp_fsf_req *req; 641 642 if (likely(pool)) 643 req = mempool_alloc(pool, GFP_ATOMIC); 644 else 645 req = kmalloc(sizeof(*req), GFP_ATOMIC); 646 647 if (unlikely(!req)) 648 return NULL; 649 650 memset(req, 0, sizeof(*req)); 651 req->pool = pool; 652 return req; 653 } 654 655 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) 656 { 657 struct fsf_qtcb *qtcb; 658 659 if (likely(pool)) 660 qtcb = mempool_alloc(pool, GFP_ATOMIC); 661 else 662 qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); 663 664 if (unlikely(!qtcb)) 665 return NULL; 666 667 memset(qtcb, 0, sizeof(*qtcb)); 668 return qtcb; 669 } 670 671 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 672 u32 fsf_cmd, u8 sbtype, 673 mempool_t *pool) 674 { 675 struct zfcp_adapter *adapter = qdio->adapter; 676 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); 677 678 if (unlikely(!req)) 679 return ERR_PTR(-ENOMEM); 680 681 if (adapter->req_no == 0) 682 adapter->req_no++; 683 684 INIT_LIST_HEAD(&req->list); 685 init_timer(&req->timer); 686 init_completion(&req->completion); 687 688 req->adapter = adapter; 689 req->fsf_command = fsf_cmd; 690 req->req_id = adapter->req_no; 691 692 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) { 693 if (likely(pool)) 694 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool); 695 else 696 req->qtcb = zfcp_qtcb_alloc(NULL); 697 698 if (unlikely(!req->qtcb)) { 699 zfcp_fsf_req_free(req); 700 return ERR_PTR(-ENOMEM); 701 } 702 703 req->seq_no = adapter->fsf_req_seq_no; 704 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; 705 req->qtcb->prefix.req_id = req->req_id; 706 req->qtcb->prefix.ulp_info = 26; 707 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; 708 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; 709 req->qtcb->header.req_handle = req->req_id; 710 req->qtcb->header.fsf_command = req->fsf_command; 711 } 712 713 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, 714 req->qtcb, sizeof(struct fsf_qtcb)); 715 716 return req; 717 } 718 719 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) 720 { 721 struct zfcp_adapter *adapter = req->adapter; 722 struct zfcp_qdio *qdio = adapter->qdio; 723 int with_qtcb = (req->qtcb != NULL); 724 int req_id = req->req_id; 725 726 zfcp_reqlist_add(adapter->req_list, req); 727 728 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); 729 req->issued = get_tod_clock(); 730 if (zfcp_qdio_send(qdio, &req->qdio_req)) { 731 del_timer(&req->timer); 732 /* lookup request again, list might have changed */ 733 zfcp_reqlist_find_rm(adapter->req_list, req_id); 734 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1"); 735 return -EIO; 736 } 737 738 /* Don't increase for unsolicited status */ 739 if (with_qtcb) 740 adapter->fsf_req_seq_no++; 741 adapter->req_no++; 742 743 return 0; 744 } 745 746 /** 747 * zfcp_fsf_status_read - send status read request 748 * @adapter: pointer to struct zfcp_adapter 749 * @req_flags: request flags 750 * Returns: 0 on success, ERROR otherwise 751 */ 752 int zfcp_fsf_status_read(struct zfcp_qdio *qdio) 753 { 754 struct zfcp_adapter *adapter = qdio->adapter; 755 struct zfcp_fsf_req *req; 756 struct fsf_status_read_buffer *sr_buf; 757 struct page *page; 758 int retval = -EIO; 759 760 spin_lock_irq(&qdio->req_q_lock); 761 if (zfcp_qdio_sbal_get(qdio)) 762 goto out; 763 764 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0, 765 adapter->pool.status_read_req); 766 if (IS_ERR(req)) { 767 retval = PTR_ERR(req); 768 goto out; 769 } 770 771 page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); 772 if (!page) { 773 retval = -ENOMEM; 774 goto failed_buf; 775 } 776 sr_buf = page_address(page); 777 memset(sr_buf, 0, sizeof(*sr_buf)); 778 req->data = sr_buf; 779 780 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); 781 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 782 783 retval = zfcp_fsf_req_send(req); 784 if (retval) 785 goto failed_req_send; 786 787 goto out; 788 789 failed_req_send: 790 req->data = NULL; 791 mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); 792 failed_buf: 793 zfcp_dbf_hba_fsf_uss("fssr__1", req); 794 zfcp_fsf_req_free(req); 795 out: 796 spin_unlock_irq(&qdio->req_q_lock); 797 return retval; 798 } 799 800 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) 801 { 802 struct scsi_device *sdev = req->data; 803 struct zfcp_scsi_dev *zfcp_sdev; 804 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; 805 806 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 807 return; 808 809 zfcp_sdev = sdev_to_zfcp(sdev); 810 811 switch (req->qtcb->header.fsf_status) { 812 case FSF_PORT_HANDLE_NOT_VALID: 813 if (fsq->word[0] == fsq->word[1]) { 814 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, 815 "fsafch1"); 816 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 817 } 818 break; 819 case FSF_LUN_HANDLE_NOT_VALID: 820 if (fsq->word[0] == fsq->word[1]) { 821 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2"); 822 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 823 } 824 break; 825 case FSF_FCP_COMMAND_DOES_NOT_EXIST: 826 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; 827 break; 828 case FSF_PORT_BOXED: 829 zfcp_erp_set_port_status(zfcp_sdev->port, 830 ZFCP_STATUS_COMMON_ACCESS_BOXED); 831 zfcp_erp_port_reopen(zfcp_sdev->port, 832 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3"); 833 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 834 break; 835 case FSF_LUN_BOXED: 836 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 837 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 838 "fsafch4"); 839 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 840 break; 841 case FSF_ADAPTER_STATUS_AVAILABLE: 842 switch (fsq->word[0]) { 843 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 844 zfcp_fc_test_link(zfcp_sdev->port); 845 /* fall through */ 846 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 847 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 848 break; 849 } 850 break; 851 case FSF_GOOD: 852 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; 853 break; 854 } 855 } 856 857 /** 858 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command 859 * @scmnd: The SCSI command to abort 860 * Returns: pointer to struct zfcp_fsf_req 861 */ 862 863 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) 864 { 865 struct zfcp_fsf_req *req = NULL; 866 struct scsi_device *sdev = scmnd->device; 867 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 868 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 869 unsigned long old_req_id = (unsigned long) scmnd->host_scribble; 870 871 spin_lock_irq(&qdio->req_q_lock); 872 if (zfcp_qdio_sbal_get(qdio)) 873 goto out; 874 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 875 SBAL_SFLAGS0_TYPE_READ, 876 qdio->adapter->pool.scsi_abort); 877 if (IS_ERR(req)) { 878 req = NULL; 879 goto out; 880 } 881 882 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 883 ZFCP_STATUS_COMMON_UNBLOCKED))) 884 goto out_error_free; 885 886 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 887 888 req->data = sdev; 889 req->handler = zfcp_fsf_abort_fcp_command_handler; 890 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 891 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 892 req->qtcb->bottom.support.req_handle = (u64) old_req_id; 893 894 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 895 if (!zfcp_fsf_req_send(req)) 896 goto out; 897 898 out_error_free: 899 zfcp_fsf_req_free(req); 900 req = NULL; 901 out: 902 spin_unlock_irq(&qdio->req_q_lock); 903 return req; 904 } 905 906 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) 907 { 908 struct zfcp_adapter *adapter = req->adapter; 909 struct zfcp_fsf_ct_els *ct = req->data; 910 struct fsf_qtcb_header *header = &req->qtcb->header; 911 912 ct->status = -EINVAL; 913 914 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 915 goto skip_fsfstatus; 916 917 switch (header->fsf_status) { 918 case FSF_GOOD: 919 zfcp_dbf_san_res("fsscth2", req); 920 ct->status = 0; 921 break; 922 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 923 zfcp_fsf_class_not_supp(req); 924 break; 925 case FSF_ADAPTER_STATUS_AVAILABLE: 926 switch (header->fsf_status_qual.word[0]){ 927 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 928 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 929 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 930 break; 931 } 932 break; 933 case FSF_PORT_BOXED: 934 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 935 break; 936 case FSF_PORT_HANDLE_NOT_VALID: 937 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1"); 938 /* fall through */ 939 case FSF_GENERIC_COMMAND_REJECTED: 940 case FSF_PAYLOAD_SIZE_MISMATCH: 941 case FSF_REQUEST_SIZE_TOO_LARGE: 942 case FSF_RESPONSE_SIZE_TOO_LARGE: 943 case FSF_SBAL_MISMATCH: 944 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 945 break; 946 } 947 948 skip_fsfstatus: 949 if (ct->handler) 950 ct->handler(ct->handler_data); 951 } 952 953 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio, 954 struct zfcp_qdio_req *q_req, 955 struct scatterlist *sg_req, 956 struct scatterlist *sg_resp) 957 { 958 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length); 959 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length); 960 zfcp_qdio_set_sbale_last(qdio, q_req); 961 } 962 963 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, 964 struct scatterlist *sg_req, 965 struct scatterlist *sg_resp) 966 { 967 struct zfcp_adapter *adapter = req->adapter; 968 struct zfcp_qdio *qdio = adapter->qdio; 969 struct fsf_qtcb *qtcb = req->qtcb; 970 u32 feat = adapter->adapter_features; 971 972 if (zfcp_adapter_multi_buffer_active(adapter)) { 973 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 974 return -EIO; 975 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 976 return -EIO; 977 978 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 979 zfcp_qdio_sbale_count(sg_req)); 980 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 981 zfcp_qdio_set_scount(qdio, &req->qdio_req); 982 return 0; 983 } 984 985 /* use single, unchained SBAL if it can hold the request */ 986 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 987 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, 988 sg_req, sg_resp); 989 return 0; 990 } 991 992 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) 993 return -EOPNOTSUPP; 994 995 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 996 return -EIO; 997 998 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); 999 1000 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1001 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); 1002 1003 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 1004 return -EIO; 1005 1006 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); 1007 1008 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1009 1010 return 0; 1011 } 1012 1013 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, 1014 struct scatterlist *sg_req, 1015 struct scatterlist *sg_resp, 1016 unsigned int timeout) 1017 { 1018 int ret; 1019 1020 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); 1021 if (ret) 1022 return ret; 1023 1024 /* common settings for ct/gs and els requests */ 1025 if (timeout > 255) 1026 timeout = 255; /* max value accepted by hardware */ 1027 req->qtcb->bottom.support.service_class = FSF_CLASS_3; 1028 req->qtcb->bottom.support.timeout = timeout; 1029 zfcp_fsf_start_timer(req, (timeout + 10) * HZ); 1030 1031 return 0; 1032 } 1033 1034 /** 1035 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS) 1036 * @ct: pointer to struct zfcp_send_ct with data for request 1037 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req 1038 */ 1039 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port, 1040 struct zfcp_fsf_ct_els *ct, mempool_t *pool, 1041 unsigned int timeout) 1042 { 1043 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1044 struct zfcp_fsf_req *req; 1045 int ret = -EIO; 1046 1047 spin_lock_irq(&qdio->req_q_lock); 1048 if (zfcp_qdio_sbal_get(qdio)) 1049 goto out; 1050 1051 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1052 SBAL_SFLAGS0_TYPE_WRITE_READ, pool); 1053 1054 if (IS_ERR(req)) { 1055 ret = PTR_ERR(req); 1056 goto out; 1057 } 1058 1059 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1060 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); 1061 if (ret) 1062 goto failed_send; 1063 1064 req->handler = zfcp_fsf_send_ct_handler; 1065 req->qtcb->header.port_handle = wka_port->handle; 1066 req->data = ct; 1067 1068 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); 1069 1070 ret = zfcp_fsf_req_send(req); 1071 if (ret) 1072 goto failed_send; 1073 1074 goto out; 1075 1076 failed_send: 1077 zfcp_fsf_req_free(req); 1078 out: 1079 spin_unlock_irq(&qdio->req_q_lock); 1080 return ret; 1081 } 1082 1083 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) 1084 { 1085 struct zfcp_fsf_ct_els *send_els = req->data; 1086 struct fsf_qtcb_header *header = &req->qtcb->header; 1087 1088 send_els->status = -EINVAL; 1089 1090 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1091 goto skip_fsfstatus; 1092 1093 switch (header->fsf_status) { 1094 case FSF_GOOD: 1095 zfcp_dbf_san_res("fsselh1", req); 1096 send_els->status = 0; 1097 break; 1098 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 1099 zfcp_fsf_class_not_supp(req); 1100 break; 1101 case FSF_ADAPTER_STATUS_AVAILABLE: 1102 switch (header->fsf_status_qual.word[0]){ 1103 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1104 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1105 case FSF_SQ_RETRY_IF_POSSIBLE: 1106 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1107 break; 1108 } 1109 break; 1110 case FSF_ELS_COMMAND_REJECTED: 1111 case FSF_PAYLOAD_SIZE_MISMATCH: 1112 case FSF_REQUEST_SIZE_TOO_LARGE: 1113 case FSF_RESPONSE_SIZE_TOO_LARGE: 1114 break; 1115 case FSF_SBAL_MISMATCH: 1116 /* should never occur, avoided in zfcp_fsf_send_els */ 1117 /* fall through */ 1118 default: 1119 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1120 break; 1121 } 1122 skip_fsfstatus: 1123 if (send_els->handler) 1124 send_els->handler(send_els->handler_data); 1125 } 1126 1127 /** 1128 * zfcp_fsf_send_els - initiate an ELS command (FC-FS) 1129 * @els: pointer to struct zfcp_send_els with data for the command 1130 */ 1131 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, 1132 struct zfcp_fsf_ct_els *els, unsigned int timeout) 1133 { 1134 struct zfcp_fsf_req *req; 1135 struct zfcp_qdio *qdio = adapter->qdio; 1136 int ret = -EIO; 1137 1138 spin_lock_irq(&qdio->req_q_lock); 1139 if (zfcp_qdio_sbal_get(qdio)) 1140 goto out; 1141 1142 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1143 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL); 1144 1145 if (IS_ERR(req)) { 1146 ret = PTR_ERR(req); 1147 goto out; 1148 } 1149 1150 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1151 1152 if (!zfcp_adapter_multi_buffer_active(adapter)) 1153 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); 1154 1155 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1156 1157 if (ret) 1158 goto failed_send; 1159 1160 hton24(req->qtcb->bottom.support.d_id, d_id); 1161 req->handler = zfcp_fsf_send_els_handler; 1162 req->data = els; 1163 1164 zfcp_dbf_san_req("fssels1", req, d_id); 1165 1166 ret = zfcp_fsf_req_send(req); 1167 if (ret) 1168 goto failed_send; 1169 1170 goto out; 1171 1172 failed_send: 1173 zfcp_fsf_req_free(req); 1174 out: 1175 spin_unlock_irq(&qdio->req_q_lock); 1176 return ret; 1177 } 1178 1179 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action) 1180 { 1181 struct zfcp_fsf_req *req; 1182 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1183 int retval = -EIO; 1184 1185 spin_lock_irq(&qdio->req_q_lock); 1186 if (zfcp_qdio_sbal_get(qdio)) 1187 goto out; 1188 1189 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1190 SBAL_SFLAGS0_TYPE_READ, 1191 qdio->adapter->pool.erp_req); 1192 1193 if (IS_ERR(req)) { 1194 retval = PTR_ERR(req); 1195 goto out; 1196 } 1197 1198 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1199 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1200 1201 req->qtcb->bottom.config.feature_selection = 1202 FSF_FEATURE_NOTIFICATION_LOST | 1203 FSF_FEATURE_UPDATE_ALERT; 1204 req->erp_action = erp_action; 1205 req->handler = zfcp_fsf_exchange_config_data_handler; 1206 erp_action->fsf_req_id = req->req_id; 1207 1208 zfcp_fsf_start_erp_timer(req); 1209 retval = zfcp_fsf_req_send(req); 1210 if (retval) { 1211 zfcp_fsf_req_free(req); 1212 erp_action->fsf_req_id = 0; 1213 } 1214 out: 1215 spin_unlock_irq(&qdio->req_q_lock); 1216 return retval; 1217 } 1218 1219 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio, 1220 struct fsf_qtcb_bottom_config *data) 1221 { 1222 struct zfcp_fsf_req *req = NULL; 1223 int retval = -EIO; 1224 1225 spin_lock_irq(&qdio->req_q_lock); 1226 if (zfcp_qdio_sbal_get(qdio)) 1227 goto out_unlock; 1228 1229 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1230 SBAL_SFLAGS0_TYPE_READ, NULL); 1231 1232 if (IS_ERR(req)) { 1233 retval = PTR_ERR(req); 1234 goto out_unlock; 1235 } 1236 1237 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1238 req->handler = zfcp_fsf_exchange_config_data_handler; 1239 1240 req->qtcb->bottom.config.feature_selection = 1241 FSF_FEATURE_NOTIFICATION_LOST | 1242 FSF_FEATURE_UPDATE_ALERT; 1243 1244 if (data) 1245 req->data = data; 1246 1247 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1248 retval = zfcp_fsf_req_send(req); 1249 spin_unlock_irq(&qdio->req_q_lock); 1250 if (!retval) 1251 wait_for_completion(&req->completion); 1252 1253 zfcp_fsf_req_free(req); 1254 return retval; 1255 1256 out_unlock: 1257 spin_unlock_irq(&qdio->req_q_lock); 1258 return retval; 1259 } 1260 1261 /** 1262 * zfcp_fsf_exchange_port_data - request information about local port 1263 * @erp_action: ERP action for the adapter for which port data is requested 1264 * Returns: 0 on success, error otherwise 1265 */ 1266 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action) 1267 { 1268 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1269 struct zfcp_fsf_req *req; 1270 int retval = -EIO; 1271 1272 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1273 return -EOPNOTSUPP; 1274 1275 spin_lock_irq(&qdio->req_q_lock); 1276 if (zfcp_qdio_sbal_get(qdio)) 1277 goto out; 1278 1279 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1280 SBAL_SFLAGS0_TYPE_READ, 1281 qdio->adapter->pool.erp_req); 1282 1283 if (IS_ERR(req)) { 1284 retval = PTR_ERR(req); 1285 goto out; 1286 } 1287 1288 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1289 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1290 1291 req->handler = zfcp_fsf_exchange_port_data_handler; 1292 req->erp_action = erp_action; 1293 erp_action->fsf_req_id = req->req_id; 1294 1295 zfcp_fsf_start_erp_timer(req); 1296 retval = zfcp_fsf_req_send(req); 1297 if (retval) { 1298 zfcp_fsf_req_free(req); 1299 erp_action->fsf_req_id = 0; 1300 } 1301 out: 1302 spin_unlock_irq(&qdio->req_q_lock); 1303 return retval; 1304 } 1305 1306 /** 1307 * zfcp_fsf_exchange_port_data_sync - request information about local port 1308 * @qdio: pointer to struct zfcp_qdio 1309 * @data: pointer to struct fsf_qtcb_bottom_port 1310 * Returns: 0 on success, error otherwise 1311 */ 1312 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio, 1313 struct fsf_qtcb_bottom_port *data) 1314 { 1315 struct zfcp_fsf_req *req = NULL; 1316 int retval = -EIO; 1317 1318 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)) 1319 return -EOPNOTSUPP; 1320 1321 spin_lock_irq(&qdio->req_q_lock); 1322 if (zfcp_qdio_sbal_get(qdio)) 1323 goto out_unlock; 1324 1325 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1326 SBAL_SFLAGS0_TYPE_READ, NULL); 1327 1328 if (IS_ERR(req)) { 1329 retval = PTR_ERR(req); 1330 goto out_unlock; 1331 } 1332 1333 if (data) 1334 req->data = data; 1335 1336 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1337 1338 req->handler = zfcp_fsf_exchange_port_data_handler; 1339 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1340 retval = zfcp_fsf_req_send(req); 1341 spin_unlock_irq(&qdio->req_q_lock); 1342 1343 if (!retval) 1344 wait_for_completion(&req->completion); 1345 1346 zfcp_fsf_req_free(req); 1347 1348 return retval; 1349 1350 out_unlock: 1351 spin_unlock_irq(&qdio->req_q_lock); 1352 return retval; 1353 } 1354 1355 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) 1356 { 1357 struct zfcp_port *port = req->data; 1358 struct fsf_qtcb_header *header = &req->qtcb->header; 1359 struct fc_els_flogi *plogi; 1360 1361 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1362 goto out; 1363 1364 switch (header->fsf_status) { 1365 case FSF_PORT_ALREADY_OPEN: 1366 break; 1367 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1368 dev_warn(&req->adapter->ccw_device->dev, 1369 "Not enough FCP adapter resources to open " 1370 "remote port 0x%016Lx\n", 1371 (unsigned long long)port->wwpn); 1372 zfcp_erp_set_port_status(port, 1373 ZFCP_STATUS_COMMON_ERP_FAILED); 1374 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1375 break; 1376 case FSF_ADAPTER_STATUS_AVAILABLE: 1377 switch (header->fsf_status_qual.word[0]) { 1378 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1379 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1380 case FSF_SQ_NO_RETRY_POSSIBLE: 1381 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1382 break; 1383 } 1384 break; 1385 case FSF_GOOD: 1386 port->handle = header->port_handle; 1387 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN | 1388 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1389 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1390 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1391 &port->status); 1392 /* check whether D_ID has changed during open */ 1393 /* 1394 * FIXME: This check is not airtight, as the FCP channel does 1395 * not monitor closures of target port connections caused on 1396 * the remote side. Thus, they might miss out on invalidating 1397 * locally cached WWPNs (and other N_Port parameters) of gone 1398 * target ports. So, our heroic attempt to make things safe 1399 * could be undermined by 'open port' response data tagged with 1400 * obsolete WWPNs. Another reason to monitor potential 1401 * connection closures ourself at least (by interpreting 1402 * incoming ELS' and unsolicited status). It just crosses my 1403 * mind that one should be able to cross-check by means of 1404 * another GID_PN straight after a port has been opened. 1405 * Alternately, an ADISC/PDISC ELS should suffice, as well. 1406 */ 1407 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els; 1408 if (req->qtcb->bottom.support.els1_length >= 1409 FSF_PLOGI_MIN_LEN) 1410 zfcp_fc_plogi_evaluate(port, plogi); 1411 break; 1412 case FSF_UNKNOWN_OP_SUBTYPE: 1413 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1414 break; 1415 } 1416 1417 out: 1418 put_device(&port->dev); 1419 } 1420 1421 /** 1422 * zfcp_fsf_open_port - create and send open port request 1423 * @erp_action: pointer to struct zfcp_erp_action 1424 * Returns: 0 on success, error otherwise 1425 */ 1426 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) 1427 { 1428 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1429 struct zfcp_port *port = erp_action->port; 1430 struct zfcp_fsf_req *req; 1431 int retval = -EIO; 1432 1433 spin_lock_irq(&qdio->req_q_lock); 1434 if (zfcp_qdio_sbal_get(qdio)) 1435 goto out; 1436 1437 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1438 SBAL_SFLAGS0_TYPE_READ, 1439 qdio->adapter->pool.erp_req); 1440 1441 if (IS_ERR(req)) { 1442 retval = PTR_ERR(req); 1443 goto out; 1444 } 1445 1446 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1447 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1448 1449 req->handler = zfcp_fsf_open_port_handler; 1450 hton24(req->qtcb->bottom.support.d_id, port->d_id); 1451 req->data = port; 1452 req->erp_action = erp_action; 1453 erp_action->fsf_req_id = req->req_id; 1454 get_device(&port->dev); 1455 1456 zfcp_fsf_start_erp_timer(req); 1457 retval = zfcp_fsf_req_send(req); 1458 if (retval) { 1459 zfcp_fsf_req_free(req); 1460 erp_action->fsf_req_id = 0; 1461 put_device(&port->dev); 1462 } 1463 out: 1464 spin_unlock_irq(&qdio->req_q_lock); 1465 return retval; 1466 } 1467 1468 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) 1469 { 1470 struct zfcp_port *port = req->data; 1471 1472 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1473 return; 1474 1475 switch (req->qtcb->header.fsf_status) { 1476 case FSF_PORT_HANDLE_NOT_VALID: 1477 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1"); 1478 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1479 break; 1480 case FSF_ADAPTER_STATUS_AVAILABLE: 1481 break; 1482 case FSF_GOOD: 1483 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN); 1484 break; 1485 } 1486 } 1487 1488 /** 1489 * zfcp_fsf_close_port - create and send close port request 1490 * @erp_action: pointer to struct zfcp_erp_action 1491 * Returns: 0 on success, error otherwise 1492 */ 1493 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action) 1494 { 1495 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1496 struct zfcp_fsf_req *req; 1497 int retval = -EIO; 1498 1499 spin_lock_irq(&qdio->req_q_lock); 1500 if (zfcp_qdio_sbal_get(qdio)) 1501 goto out; 1502 1503 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1504 SBAL_SFLAGS0_TYPE_READ, 1505 qdio->adapter->pool.erp_req); 1506 1507 if (IS_ERR(req)) { 1508 retval = PTR_ERR(req); 1509 goto out; 1510 } 1511 1512 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1513 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1514 1515 req->handler = zfcp_fsf_close_port_handler; 1516 req->data = erp_action->port; 1517 req->erp_action = erp_action; 1518 req->qtcb->header.port_handle = erp_action->port->handle; 1519 erp_action->fsf_req_id = req->req_id; 1520 1521 zfcp_fsf_start_erp_timer(req); 1522 retval = zfcp_fsf_req_send(req); 1523 if (retval) { 1524 zfcp_fsf_req_free(req); 1525 erp_action->fsf_req_id = 0; 1526 } 1527 out: 1528 spin_unlock_irq(&qdio->req_q_lock); 1529 return retval; 1530 } 1531 1532 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) 1533 { 1534 struct zfcp_fc_wka_port *wka_port = req->data; 1535 struct fsf_qtcb_header *header = &req->qtcb->header; 1536 1537 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { 1538 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1539 goto out; 1540 } 1541 1542 switch (header->fsf_status) { 1543 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: 1544 dev_warn(&req->adapter->ccw_device->dev, 1545 "Opening WKA port 0x%x failed\n", wka_port->d_id); 1546 /* fall through */ 1547 case FSF_ADAPTER_STATUS_AVAILABLE: 1548 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1549 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1550 break; 1551 case FSF_GOOD: 1552 wka_port->handle = header->port_handle; 1553 /* fall through */ 1554 case FSF_PORT_ALREADY_OPEN: 1555 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE; 1556 } 1557 out: 1558 wake_up(&wka_port->completion_wq); 1559 } 1560 1561 /** 1562 * zfcp_fsf_open_wka_port - create and send open wka-port request 1563 * @wka_port: pointer to struct zfcp_fc_wka_port 1564 * Returns: 0 on success, error otherwise 1565 */ 1566 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1567 { 1568 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1569 struct zfcp_fsf_req *req; 1570 int retval = -EIO; 1571 1572 spin_lock_irq(&qdio->req_q_lock); 1573 if (zfcp_qdio_sbal_get(qdio)) 1574 goto out; 1575 1576 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1577 SBAL_SFLAGS0_TYPE_READ, 1578 qdio->adapter->pool.erp_req); 1579 1580 if (IS_ERR(req)) { 1581 retval = PTR_ERR(req); 1582 goto out; 1583 } 1584 1585 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1586 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1587 1588 req->handler = zfcp_fsf_open_wka_port_handler; 1589 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); 1590 req->data = wka_port; 1591 1592 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1593 retval = zfcp_fsf_req_send(req); 1594 if (retval) 1595 zfcp_fsf_req_free(req); 1596 out: 1597 spin_unlock_irq(&qdio->req_q_lock); 1598 return retval; 1599 } 1600 1601 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) 1602 { 1603 struct zfcp_fc_wka_port *wka_port = req->data; 1604 1605 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { 1606 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1607 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1"); 1608 } 1609 1610 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE; 1611 wake_up(&wka_port->completion_wq); 1612 } 1613 1614 /** 1615 * zfcp_fsf_close_wka_port - create and send close wka port request 1616 * @wka_port: WKA port to open 1617 * Returns: 0 on success, error otherwise 1618 */ 1619 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1620 { 1621 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1622 struct zfcp_fsf_req *req; 1623 int retval = -EIO; 1624 1625 spin_lock_irq(&qdio->req_q_lock); 1626 if (zfcp_qdio_sbal_get(qdio)) 1627 goto out; 1628 1629 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1630 SBAL_SFLAGS0_TYPE_READ, 1631 qdio->adapter->pool.erp_req); 1632 1633 if (IS_ERR(req)) { 1634 retval = PTR_ERR(req); 1635 goto out; 1636 } 1637 1638 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1639 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1640 1641 req->handler = zfcp_fsf_close_wka_port_handler; 1642 req->data = wka_port; 1643 req->qtcb->header.port_handle = wka_port->handle; 1644 1645 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 1646 retval = zfcp_fsf_req_send(req); 1647 if (retval) 1648 zfcp_fsf_req_free(req); 1649 out: 1650 spin_unlock_irq(&qdio->req_q_lock); 1651 return retval; 1652 } 1653 1654 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) 1655 { 1656 struct zfcp_port *port = req->data; 1657 struct fsf_qtcb_header *header = &req->qtcb->header; 1658 struct scsi_device *sdev; 1659 1660 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1661 return; 1662 1663 switch (header->fsf_status) { 1664 case FSF_PORT_HANDLE_NOT_VALID: 1665 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1"); 1666 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1667 break; 1668 case FSF_PORT_BOXED: 1669 /* can't use generic zfcp_erp_modify_port_status because 1670 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */ 1671 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1672 shost_for_each_device(sdev, port->adapter->scsi_host) 1673 if (sdev_to_zfcp(sdev)->port == port) 1674 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1675 &sdev_to_zfcp(sdev)->status); 1676 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED); 1677 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, 1678 "fscpph2"); 1679 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1680 break; 1681 case FSF_ADAPTER_STATUS_AVAILABLE: 1682 switch (header->fsf_status_qual.word[0]) { 1683 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1684 /* fall through */ 1685 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1686 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1687 break; 1688 } 1689 break; 1690 case FSF_GOOD: 1691 /* can't use generic zfcp_erp_modify_port_status because 1692 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port 1693 */ 1694 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status); 1695 shost_for_each_device(sdev, port->adapter->scsi_host) 1696 if (sdev_to_zfcp(sdev)->port == port) 1697 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, 1698 &sdev_to_zfcp(sdev)->status); 1699 break; 1700 } 1701 } 1702 1703 /** 1704 * zfcp_fsf_close_physical_port - close physical port 1705 * @erp_action: pointer to struct zfcp_erp_action 1706 * Returns: 0 on success 1707 */ 1708 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action) 1709 { 1710 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1711 struct zfcp_fsf_req *req; 1712 int retval = -EIO; 1713 1714 spin_lock_irq(&qdio->req_q_lock); 1715 if (zfcp_qdio_sbal_get(qdio)) 1716 goto out; 1717 1718 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1719 SBAL_SFLAGS0_TYPE_READ, 1720 qdio->adapter->pool.erp_req); 1721 1722 if (IS_ERR(req)) { 1723 retval = PTR_ERR(req); 1724 goto out; 1725 } 1726 1727 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1728 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1729 1730 req->data = erp_action->port; 1731 req->qtcb->header.port_handle = erp_action->port->handle; 1732 req->erp_action = erp_action; 1733 req->handler = zfcp_fsf_close_physical_port_handler; 1734 erp_action->fsf_req_id = req->req_id; 1735 1736 zfcp_fsf_start_erp_timer(req); 1737 retval = zfcp_fsf_req_send(req); 1738 if (retval) { 1739 zfcp_fsf_req_free(req); 1740 erp_action->fsf_req_id = 0; 1741 } 1742 out: 1743 spin_unlock_irq(&qdio->req_q_lock); 1744 return retval; 1745 } 1746 1747 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) 1748 { 1749 struct zfcp_adapter *adapter = req->adapter; 1750 struct scsi_device *sdev = req->data; 1751 struct zfcp_scsi_dev *zfcp_sdev; 1752 struct fsf_qtcb_header *header = &req->qtcb->header; 1753 union fsf_status_qual *qual = &header->fsf_status_qual; 1754 1755 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1756 return; 1757 1758 zfcp_sdev = sdev_to_zfcp(sdev); 1759 1760 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | 1761 ZFCP_STATUS_COMMON_ACCESS_BOXED, 1762 &zfcp_sdev->status); 1763 1764 switch (header->fsf_status) { 1765 1766 case FSF_PORT_HANDLE_NOT_VALID: 1767 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1"); 1768 /* fall through */ 1769 case FSF_LUN_ALREADY_OPEN: 1770 break; 1771 case FSF_PORT_BOXED: 1772 zfcp_erp_set_port_status(zfcp_sdev->port, 1773 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1774 zfcp_erp_port_reopen(zfcp_sdev->port, 1775 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2"); 1776 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1777 break; 1778 case FSF_LUN_SHARING_VIOLATION: 1779 if (qual->word[0]) 1780 dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev, 1781 "LUN 0x%Lx on port 0x%Lx is already in " 1782 "use by CSS%d, MIF Image ID %x\n", 1783 zfcp_scsi_dev_lun(sdev), 1784 (unsigned long long)zfcp_sdev->port->wwpn, 1785 qual->fsf_queue_designator.cssid, 1786 qual->fsf_queue_designator.hla); 1787 zfcp_erp_set_lun_status(sdev, 1788 ZFCP_STATUS_COMMON_ERP_FAILED | 1789 ZFCP_STATUS_COMMON_ACCESS_DENIED); 1790 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1791 break; 1792 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED: 1793 dev_warn(&adapter->ccw_device->dev, 1794 "No handle is available for LUN " 1795 "0x%016Lx on port 0x%016Lx\n", 1796 (unsigned long long)zfcp_scsi_dev_lun(sdev), 1797 (unsigned long long)zfcp_sdev->port->wwpn); 1798 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); 1799 /* fall through */ 1800 case FSF_INVALID_COMMAND_OPTION: 1801 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1802 break; 1803 case FSF_ADAPTER_STATUS_AVAILABLE: 1804 switch (header->fsf_status_qual.word[0]) { 1805 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1806 zfcp_fc_test_link(zfcp_sdev->port); 1807 /* fall through */ 1808 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1809 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1810 break; 1811 } 1812 break; 1813 1814 case FSF_GOOD: 1815 zfcp_sdev->lun_handle = header->lun_handle; 1816 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1817 break; 1818 } 1819 } 1820 1821 /** 1822 * zfcp_fsf_open_lun - open LUN 1823 * @erp_action: pointer to struct zfcp_erp_action 1824 * Returns: 0 on success, error otherwise 1825 */ 1826 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action) 1827 { 1828 struct zfcp_adapter *adapter = erp_action->adapter; 1829 struct zfcp_qdio *qdio = adapter->qdio; 1830 struct zfcp_fsf_req *req; 1831 int retval = -EIO; 1832 1833 spin_lock_irq(&qdio->req_q_lock); 1834 if (zfcp_qdio_sbal_get(qdio)) 1835 goto out; 1836 1837 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1838 SBAL_SFLAGS0_TYPE_READ, 1839 adapter->pool.erp_req); 1840 1841 if (IS_ERR(req)) { 1842 retval = PTR_ERR(req); 1843 goto out; 1844 } 1845 1846 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1847 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1848 1849 req->qtcb->header.port_handle = erp_action->port->handle; 1850 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); 1851 req->handler = zfcp_fsf_open_lun_handler; 1852 req->data = erp_action->sdev; 1853 req->erp_action = erp_action; 1854 erp_action->fsf_req_id = req->req_id; 1855 1856 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) 1857 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; 1858 1859 zfcp_fsf_start_erp_timer(req); 1860 retval = zfcp_fsf_req_send(req); 1861 if (retval) { 1862 zfcp_fsf_req_free(req); 1863 erp_action->fsf_req_id = 0; 1864 } 1865 out: 1866 spin_unlock_irq(&qdio->req_q_lock); 1867 return retval; 1868 } 1869 1870 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) 1871 { 1872 struct scsi_device *sdev = req->data; 1873 struct zfcp_scsi_dev *zfcp_sdev; 1874 1875 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1876 return; 1877 1878 zfcp_sdev = sdev_to_zfcp(sdev); 1879 1880 switch (req->qtcb->header.fsf_status) { 1881 case FSF_PORT_HANDLE_NOT_VALID: 1882 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1"); 1883 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1884 break; 1885 case FSF_LUN_HANDLE_NOT_VALID: 1886 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2"); 1887 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1888 break; 1889 case FSF_PORT_BOXED: 1890 zfcp_erp_set_port_status(zfcp_sdev->port, 1891 ZFCP_STATUS_COMMON_ACCESS_BOXED); 1892 zfcp_erp_port_reopen(zfcp_sdev->port, 1893 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3"); 1894 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1895 break; 1896 case FSF_ADAPTER_STATUS_AVAILABLE: 1897 switch (req->qtcb->header.fsf_status_qual.word[0]) { 1898 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: 1899 zfcp_fc_test_link(zfcp_sdev->port); 1900 /* fall through */ 1901 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: 1902 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 1903 break; 1904 } 1905 break; 1906 case FSF_GOOD: 1907 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status); 1908 break; 1909 } 1910 } 1911 1912 /** 1913 * zfcp_fsf_close_LUN - close LUN 1914 * @erp_action: pointer to erp_action triggering the "close LUN" 1915 * Returns: 0 on success, error otherwise 1916 */ 1917 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action) 1918 { 1919 struct zfcp_qdio *qdio = erp_action->adapter->qdio; 1920 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev); 1921 struct zfcp_fsf_req *req; 1922 int retval = -EIO; 1923 1924 spin_lock_irq(&qdio->req_q_lock); 1925 if (zfcp_qdio_sbal_get(qdio)) 1926 goto out; 1927 1928 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 1929 SBAL_SFLAGS0_TYPE_READ, 1930 qdio->adapter->pool.erp_req); 1931 1932 if (IS_ERR(req)) { 1933 retval = PTR_ERR(req); 1934 goto out; 1935 } 1936 1937 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1938 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 1939 1940 req->qtcb->header.port_handle = erp_action->port->handle; 1941 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 1942 req->handler = zfcp_fsf_close_lun_handler; 1943 req->data = erp_action->sdev; 1944 req->erp_action = erp_action; 1945 erp_action->fsf_req_id = req->req_id; 1946 1947 zfcp_fsf_start_erp_timer(req); 1948 retval = zfcp_fsf_req_send(req); 1949 if (retval) { 1950 zfcp_fsf_req_free(req); 1951 erp_action->fsf_req_id = 0; 1952 } 1953 out: 1954 spin_unlock_irq(&qdio->req_q_lock); 1955 return retval; 1956 } 1957 1958 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat) 1959 { 1960 lat_rec->sum += lat; 1961 lat_rec->min = min(lat_rec->min, lat); 1962 lat_rec->max = max(lat_rec->max, lat); 1963 } 1964 1965 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) 1966 { 1967 struct fsf_qual_latency_info *lat_in; 1968 struct latency_cont *lat = NULL; 1969 struct zfcp_scsi_dev *zfcp_sdev; 1970 struct zfcp_blk_drv_data blktrc; 1971 int ticks = req->adapter->timer_ticks; 1972 1973 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; 1974 1975 blktrc.flags = 0; 1976 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC; 1977 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) 1978 blktrc.flags |= ZFCP_BLK_REQ_ERROR; 1979 blktrc.inb_usage = 0; 1980 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; 1981 1982 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && 1983 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 1984 zfcp_sdev = sdev_to_zfcp(scsi->device); 1985 blktrc.flags |= ZFCP_BLK_LAT_VALID; 1986 blktrc.channel_lat = lat_in->channel_lat * ticks; 1987 blktrc.fabric_lat = lat_in->fabric_lat * ticks; 1988 1989 switch (req->qtcb->bottom.io.data_direction) { 1990 case FSF_DATADIR_DIF_READ_STRIP: 1991 case FSF_DATADIR_DIF_READ_CONVERT: 1992 case FSF_DATADIR_READ: 1993 lat = &zfcp_sdev->latencies.read; 1994 break; 1995 case FSF_DATADIR_DIF_WRITE_INSERT: 1996 case FSF_DATADIR_DIF_WRITE_CONVERT: 1997 case FSF_DATADIR_WRITE: 1998 lat = &zfcp_sdev->latencies.write; 1999 break; 2000 case FSF_DATADIR_CMND: 2001 lat = &zfcp_sdev->latencies.cmd; 2002 break; 2003 } 2004 2005 if (lat) { 2006 spin_lock(&zfcp_sdev->latencies.lock); 2007 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat); 2008 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat); 2009 lat->counter++; 2010 spin_unlock(&zfcp_sdev->latencies.lock); 2011 } 2012 } 2013 2014 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, 2015 sizeof(blktrc)); 2016 } 2017 2018 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) 2019 { 2020 struct scsi_cmnd *scmnd = req->data; 2021 struct scsi_device *sdev = scmnd->device; 2022 struct zfcp_scsi_dev *zfcp_sdev; 2023 struct fsf_qtcb_header *header = &req->qtcb->header; 2024 2025 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2026 return; 2027 2028 zfcp_sdev = sdev_to_zfcp(sdev); 2029 2030 switch (header->fsf_status) { 2031 case FSF_HANDLE_MISMATCH: 2032 case FSF_PORT_HANDLE_NOT_VALID: 2033 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1"); 2034 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2035 break; 2036 case FSF_FCPLUN_NOT_VALID: 2037 case FSF_LUN_HANDLE_NOT_VALID: 2038 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2"); 2039 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2040 break; 2041 case FSF_SERVICE_CLASS_NOT_SUPPORTED: 2042 zfcp_fsf_class_not_supp(req); 2043 break; 2044 case FSF_DIRECTION_INDICATOR_NOT_VALID: 2045 dev_err(&req->adapter->ccw_device->dev, 2046 "Incorrect direction %d, LUN 0x%016Lx on port " 2047 "0x%016Lx closed\n", 2048 req->qtcb->bottom.io.data_direction, 2049 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2050 (unsigned long long)zfcp_sdev->port->wwpn); 2051 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, 2052 "fssfch3"); 2053 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2054 break; 2055 case FSF_CMND_LENGTH_NOT_VALID: 2056 dev_err(&req->adapter->ccw_device->dev, 2057 "Incorrect CDB length %d, LUN 0x%016Lx on " 2058 "port 0x%016Lx closed\n", 2059 req->qtcb->bottom.io.fcp_cmnd_length, 2060 (unsigned long long)zfcp_scsi_dev_lun(sdev), 2061 (unsigned long long)zfcp_sdev->port->wwpn); 2062 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0, 2063 "fssfch4"); 2064 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2065 break; 2066 case FSF_PORT_BOXED: 2067 zfcp_erp_set_port_status(zfcp_sdev->port, 2068 ZFCP_STATUS_COMMON_ACCESS_BOXED); 2069 zfcp_erp_port_reopen(zfcp_sdev->port, 2070 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5"); 2071 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2072 break; 2073 case FSF_LUN_BOXED: 2074 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED); 2075 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, 2076 "fssfch6"); 2077 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2078 break; 2079 case FSF_ADAPTER_STATUS_AVAILABLE: 2080 if (header->fsf_status_qual.word[0] == 2081 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE) 2082 zfcp_fc_test_link(zfcp_sdev->port); 2083 req->status |= ZFCP_STATUS_FSFREQ_ERROR; 2084 break; 2085 } 2086 } 2087 2088 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) 2089 { 2090 struct scsi_cmnd *scpnt; 2091 struct fcp_resp_with_ext *fcp_rsp; 2092 unsigned long flags; 2093 2094 read_lock_irqsave(&req->adapter->abort_lock, flags); 2095 2096 scpnt = req->data; 2097 if (unlikely(!scpnt)) { 2098 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2099 return; 2100 } 2101 2102 zfcp_fsf_fcp_handler_common(req); 2103 2104 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { 2105 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); 2106 goto skip_fsfstatus; 2107 } 2108 2109 switch (req->qtcb->header.fsf_status) { 2110 case FSF_INCONSISTENT_PROT_DATA: 2111 case FSF_INVALID_PROT_PARM: 2112 set_host_byte(scpnt, DID_ERROR); 2113 goto skip_fsfstatus; 2114 case FSF_BLOCK_GUARD_CHECK_FAILURE: 2115 zfcp_scsi_dif_sense_error(scpnt, 0x1); 2116 goto skip_fsfstatus; 2117 case FSF_APP_TAG_CHECK_FAILURE: 2118 zfcp_scsi_dif_sense_error(scpnt, 0x2); 2119 goto skip_fsfstatus; 2120 case FSF_REF_TAG_CHECK_FAILURE: 2121 zfcp_scsi_dif_sense_error(scpnt, 0x3); 2122 goto skip_fsfstatus; 2123 } 2124 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; 2125 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt); 2126 2127 skip_fsfstatus: 2128 zfcp_fsf_req_trace(req, scpnt); 2129 zfcp_dbf_scsi_result(scpnt, req); 2130 2131 scpnt->host_scribble = NULL; 2132 (scpnt->scsi_done) (scpnt); 2133 /* 2134 * We must hold this lock until scsi_done has been called. 2135 * Otherwise we may call scsi_done after abort regarding this 2136 * command has completed. 2137 * Note: scsi_done must not block! 2138 */ 2139 read_unlock_irqrestore(&req->adapter->abort_lock, flags); 2140 } 2141 2142 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir) 2143 { 2144 switch (scsi_get_prot_op(scsi_cmnd)) { 2145 case SCSI_PROT_NORMAL: 2146 switch (scsi_cmnd->sc_data_direction) { 2147 case DMA_NONE: 2148 *data_dir = FSF_DATADIR_CMND; 2149 break; 2150 case DMA_FROM_DEVICE: 2151 *data_dir = FSF_DATADIR_READ; 2152 break; 2153 case DMA_TO_DEVICE: 2154 *data_dir = FSF_DATADIR_WRITE; 2155 break; 2156 case DMA_BIDIRECTIONAL: 2157 return -EINVAL; 2158 } 2159 break; 2160 2161 case SCSI_PROT_READ_STRIP: 2162 *data_dir = FSF_DATADIR_DIF_READ_STRIP; 2163 break; 2164 case SCSI_PROT_WRITE_INSERT: 2165 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT; 2166 break; 2167 case SCSI_PROT_READ_PASS: 2168 *data_dir = FSF_DATADIR_DIF_READ_CONVERT; 2169 break; 2170 case SCSI_PROT_WRITE_PASS: 2171 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT; 2172 break; 2173 default: 2174 return -EINVAL; 2175 } 2176 2177 return 0; 2178 } 2179 2180 /** 2181 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command) 2182 * @scsi_cmnd: scsi command to be sent 2183 */ 2184 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) 2185 { 2186 struct zfcp_fsf_req *req; 2187 struct fcp_cmnd *fcp_cmnd; 2188 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2189 int retval = -EIO; 2190 struct scsi_device *sdev = scsi_cmnd->device; 2191 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2192 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2193 struct zfcp_qdio *qdio = adapter->qdio; 2194 struct fsf_qtcb_bottom_io *io; 2195 unsigned long flags; 2196 2197 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2198 ZFCP_STATUS_COMMON_UNBLOCKED))) 2199 return -EBUSY; 2200 2201 spin_lock_irqsave(&qdio->req_q_lock, flags); 2202 if (atomic_read(&qdio->req_q_free) <= 0) { 2203 atomic_inc(&qdio->req_q_full); 2204 goto out; 2205 } 2206 2207 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2208 sbtype = SBAL_SFLAGS0_TYPE_WRITE; 2209 2210 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2211 sbtype, adapter->pool.scsi_req); 2212 2213 if (IS_ERR(req)) { 2214 retval = PTR_ERR(req); 2215 goto out; 2216 } 2217 2218 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; 2219 2220 io = &req->qtcb->bottom.io; 2221 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 2222 req->data = scsi_cmnd; 2223 req->handler = zfcp_fsf_fcp_cmnd_handler; 2224 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2225 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2226 io->service_class = FSF_CLASS_3; 2227 io->fcp_cmnd_length = FCP_CMND_LEN; 2228 2229 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) { 2230 io->data_block_length = scsi_cmnd->device->sector_size; 2231 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2232 } 2233 2234 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) 2235 goto failed_scsi_cmnd; 2236 2237 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2238 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); 2239 2240 if (scsi_prot_sg_count(scsi_cmnd)) { 2241 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2242 scsi_prot_sg_count(scsi_cmnd)); 2243 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2244 scsi_prot_sglist(scsi_cmnd)); 2245 if (retval) 2246 goto failed_scsi_cmnd; 2247 io->prot_data_length = zfcp_qdio_real_bytes( 2248 scsi_prot_sglist(scsi_cmnd)); 2249 } 2250 2251 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2252 scsi_sglist(scsi_cmnd)); 2253 if (unlikely(retval)) 2254 goto failed_scsi_cmnd; 2255 2256 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2257 if (zfcp_adapter_multi_buffer_active(adapter)) 2258 zfcp_qdio_set_scount(qdio, &req->qdio_req); 2259 2260 retval = zfcp_fsf_req_send(req); 2261 if (unlikely(retval)) 2262 goto failed_scsi_cmnd; 2263 2264 goto out; 2265 2266 failed_scsi_cmnd: 2267 zfcp_fsf_req_free(req); 2268 scsi_cmnd->host_scribble = NULL; 2269 out: 2270 spin_unlock_irqrestore(&qdio->req_q_lock, flags); 2271 return retval; 2272 } 2273 2274 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) 2275 { 2276 struct fcp_resp_with_ext *fcp_rsp; 2277 struct fcp_resp_rsp_info *rsp_info; 2278 2279 zfcp_fsf_fcp_handler_common(req); 2280 2281 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; 2282 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1]; 2283 2284 if ((rsp_info->rsp_code != FCP_TMF_CMPL) || 2285 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) 2286 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; 2287 } 2288 2289 /** 2290 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command 2291 * @scmnd: SCSI command to send the task management command for 2292 * @tm_flags: unsigned byte for task management flags 2293 * Returns: on success pointer to struct fsf_req, NULL otherwise 2294 */ 2295 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, 2296 u8 tm_flags) 2297 { 2298 struct zfcp_fsf_req *req = NULL; 2299 struct fcp_cmnd *fcp_cmnd; 2300 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device); 2301 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio; 2302 2303 if (unlikely(!(atomic_read(&zfcp_sdev->status) & 2304 ZFCP_STATUS_COMMON_UNBLOCKED))) 2305 return NULL; 2306 2307 spin_lock_irq(&qdio->req_q_lock); 2308 if (zfcp_qdio_sbal_get(qdio)) 2309 goto out; 2310 2311 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2312 SBAL_SFLAGS0_TYPE_WRITE, 2313 qdio->adapter->pool.scsi_req); 2314 2315 if (IS_ERR(req)) { 2316 req = NULL; 2317 goto out; 2318 } 2319 2320 req->data = scmnd; 2321 req->handler = zfcp_fsf_fcp_task_mgmt_handler; 2322 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; 2323 req->qtcb->header.port_handle = zfcp_sdev->port->handle; 2324 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; 2325 req->qtcb->bottom.io.service_class = FSF_CLASS_3; 2326 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; 2327 2328 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); 2329 2330 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2331 zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags); 2332 2333 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); 2334 if (!zfcp_fsf_req_send(req)) 2335 goto out; 2336 2337 zfcp_fsf_req_free(req); 2338 req = NULL; 2339 out: 2340 spin_unlock_irq(&qdio->req_q_lock); 2341 return req; 2342 } 2343 2344 /** 2345 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO 2346 * @adapter: pointer to struct zfcp_adapter 2347 * @sbal_idx: response queue index of SBAL to be processed 2348 */ 2349 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx) 2350 { 2351 struct zfcp_adapter *adapter = qdio->adapter; 2352 struct qdio_buffer *sbal = qdio->res_q[sbal_idx]; 2353 struct qdio_buffer_element *sbale; 2354 struct zfcp_fsf_req *fsf_req; 2355 unsigned long req_id; 2356 int idx; 2357 2358 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) { 2359 2360 sbale = &sbal->element[idx]; 2361 req_id = (unsigned long) sbale->addr; 2362 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id); 2363 2364 if (!fsf_req) { 2365 /* 2366 * Unknown request means that we have potentially memory 2367 * corruption and must stop the machine immediately. 2368 */ 2369 zfcp_qdio_siosl(adapter); 2370 panic("error: unknown req_id (%lx) on adapter %s.\n", 2371 req_id, dev_name(&adapter->ccw_device->dev)); 2372 } 2373 2374 fsf_req->qdio_req.sbal_response = sbal_idx; 2375 zfcp_fsf_req_complete(fsf_req); 2376 2377 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY)) 2378 break; 2379 } 2380 } 2381 2382 struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio, 2383 struct qdio_buffer *sbal) 2384 { 2385 struct qdio_buffer_element *sbale = &sbal->element[0]; 2386 u64 req_id = (unsigned long) sbale->addr; 2387 2388 return zfcp_reqlist_find(qdio->adapter->req_list, req_id); 2389 } 2390