1 /* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 /* 21 * PORT LOCKING NOTES 22 * 23 * These comments only apply to the 'port code' which consists of the lport, 24 * disc and rport blocks. 25 * 26 * MOTIVATION 27 * 28 * The lport, disc and rport blocks all have mutexes that are used to protect 29 * those objects. The main motivation for these locks is to prevent from 30 * having an lport reset just before we send a frame. In that scenario the 31 * lport's FID would get set to zero and then we'd send a frame with an 32 * invalid SID. We also need to ensure that states don't change unexpectedly 33 * while processing another state. 34 * 35 * HIERARCHY 36 * 37 * The following hierarchy defines the locking rules. A greater lock 38 * may be held before acquiring a lesser lock, but a lesser lock should never 39 * be held while attempting to acquire a greater lock. Here is the hierarchy- 40 * 41 * lport > disc, lport > rport, disc > rport 42 * 43 * CALLBACKS 44 * 45 * The callbacks cause complications with this scheme. There is a callback 46 * from the rport (to either lport or disc) and a callback from disc 47 * (to the lport). 48 * 49 * As rports exit the rport state machine a callback is made to the owner of 50 * the rport to notify success or failure. Since the callback is likely to 51 * cause the lport or disc to grab its lock we cannot hold the rport lock 52 * while making the callback. To ensure that the rport is not free'd while 53 * processing the callback the rport callbacks are serialized through a 54 * single-threaded workqueue. An rport would never be free'd while in a 55 * callback handler because no other rport work in this queue can be executed 56 * at the same time. 57 * 58 * When discovery succeeds or fails a callback is made to the lport as 59 * notification. Currently, successful discovery causes the lport to take no 60 * action. A failure will cause the lport to reset. There is likely a circular 61 * locking problem with this implementation. 62 */ 63 64 /* 65 * LPORT LOCKING 66 * 67 * The critical sections protected by the lport's mutex are quite broad and 68 * may be improved upon in the future. The lport code and its locking doesn't 69 * influence the I/O path, so excessive locking doesn't penalize I/O 70 * performance. 71 * 72 * The strategy is to lock whenever processing a request or response. Note 73 * that every _enter_* function corresponds to a state change. They generally 74 * change the lports state and then send a request out on the wire. We lock 75 * before calling any of these functions to protect that state change. This 76 * means that the entry points into the lport block manage the locks while 77 * the state machine can transition between states (i.e. _enter_* functions) 78 * while always staying protected. 79 * 80 * When handling responses we also hold the lport mutex broadly. When the 81 * lport receives the response frame it locks the mutex and then calls the 82 * appropriate handler for the particuar response. Generally a response will 83 * trigger a state change and so the lock must already be held. 84 * 85 * Retries also have to consider the locking. The retries occur from a work 86 * context and the work function will lock the lport and then retry the state 87 * (i.e. _enter_* function). 88 */ 89 90 #include <linux/timer.h> 91 #include <linux/delay.h> 92 #include <linux/module.h> 93 #include <linux/slab.h> 94 #include <asm/unaligned.h> 95 96 #include <scsi/fc/fc_gs.h> 97 98 #include <scsi/libfc.h> 99 #include <scsi/fc_encode.h> 100 #include <linux/scatterlist.h> 101 102 #include "fc_libfc.h" 103 104 /* Fabric IDs to use for point-to-point mode, chosen on whims. */ 105 #define FC_LOCAL_PTP_FID_LO 0x010101 106 #define FC_LOCAL_PTP_FID_HI 0x010102 107 108 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 109 110 static void fc_lport_error(struct fc_lport *, struct fc_frame *); 111 112 static void fc_lport_enter_reset(struct fc_lport *); 113 static void fc_lport_enter_flogi(struct fc_lport *); 114 static void fc_lport_enter_dns(struct fc_lport *); 115 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); 116 static void fc_lport_enter_scr(struct fc_lport *); 117 static void fc_lport_enter_ready(struct fc_lport *); 118 static void fc_lport_enter_logo(struct fc_lport *); 119 120 static const char *fc_lport_state_names[] = { 121 [LPORT_ST_DISABLED] = "disabled", 122 [LPORT_ST_FLOGI] = "FLOGI", 123 [LPORT_ST_DNS] = "dNS", 124 [LPORT_ST_RNN_ID] = "RNN_ID", 125 [LPORT_ST_RSNN_NN] = "RSNN_NN", 126 [LPORT_ST_RSPN_ID] = "RSPN_ID", 127 [LPORT_ST_RFT_ID] = "RFT_ID", 128 [LPORT_ST_RFF_ID] = "RFF_ID", 129 [LPORT_ST_SCR] = "SCR", 130 [LPORT_ST_READY] = "Ready", 131 [LPORT_ST_LOGO] = "LOGO", 132 [LPORT_ST_RESET] = "reset", 133 }; 134 135 /** 136 * struct fc_bsg_info - FC Passthrough managemet structure 137 * @job: The passthrough job 138 * @lport: The local port to pass through a command 139 * @rsp_code: The expected response code 140 * @sg: job->reply_payload.sg_list 141 * @nents: job->reply_payload.sg_cnt 142 * @offset: The offset into the response data 143 */ 144 struct fc_bsg_info { 145 struct fc_bsg_job *job; 146 struct fc_lport *lport; 147 u16 rsp_code; 148 struct scatterlist *sg; 149 u32 nents; 150 size_t offset; 151 }; 152 153 /** 154 * fc_frame_drop() - Dummy frame handler 155 * @lport: The local port the frame was received on 156 * @fp: The received frame 157 */ 158 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) 159 { 160 fc_frame_free(fp); 161 return 0; 162 } 163 164 /** 165 * fc_lport_rport_callback() - Event handler for rport events 166 * @lport: The lport which is receiving the event 167 * @rdata: private remote port data 168 * @event: The event that occurred 169 * 170 * Locking Note: The rport lock should not be held when calling 171 * this function. 172 */ 173 static void fc_lport_rport_callback(struct fc_lport *lport, 174 struct fc_rport_priv *rdata, 175 enum fc_rport_event event) 176 { 177 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, 178 rdata->ids.port_id); 179 180 mutex_lock(&lport->lp_mutex); 181 switch (event) { 182 case RPORT_EV_READY: 183 if (lport->state == LPORT_ST_DNS) { 184 lport->dns_rdata = rdata; 185 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); 186 } else { 187 FC_LPORT_DBG(lport, "Received an READY event " 188 "on port (%6.6x) for the directory " 189 "server, but the lport is not " 190 "in the DNS state, it's in the " 191 "%d state", rdata->ids.port_id, 192 lport->state); 193 lport->tt.rport_logoff(rdata); 194 } 195 break; 196 case RPORT_EV_LOGO: 197 case RPORT_EV_FAILED: 198 case RPORT_EV_STOP: 199 lport->dns_rdata = NULL; 200 break; 201 case RPORT_EV_NONE: 202 break; 203 } 204 mutex_unlock(&lport->lp_mutex); 205 } 206 207 /** 208 * fc_lport_state() - Return a string which represents the lport's state 209 * @lport: The lport whose state is to converted to a string 210 */ 211 static const char *fc_lport_state(struct fc_lport *lport) 212 { 213 const char *cp; 214 215 cp = fc_lport_state_names[lport->state]; 216 if (!cp) 217 cp = "unknown"; 218 return cp; 219 } 220 221 /** 222 * fc_lport_ptp_setup() - Create an rport for point-to-point mode 223 * @lport: The lport to attach the ptp rport to 224 * @remote_fid: The FID of the ptp rport 225 * @remote_wwpn: The WWPN of the ptp rport 226 * @remote_wwnn: The WWNN of the ptp rport 227 */ 228 static void fc_lport_ptp_setup(struct fc_lport *lport, 229 u32 remote_fid, u64 remote_wwpn, 230 u64 remote_wwnn) 231 { 232 mutex_lock(&lport->disc.disc_mutex); 233 if (lport->ptp_rdata) { 234 lport->tt.rport_logoff(lport->ptp_rdata); 235 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 236 } 237 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); 238 kref_get(&lport->ptp_rdata->kref); 239 lport->ptp_rdata->ids.port_name = remote_wwpn; 240 lport->ptp_rdata->ids.node_name = remote_wwnn; 241 mutex_unlock(&lport->disc.disc_mutex); 242 243 lport->tt.rport_login(lport->ptp_rdata); 244 245 fc_lport_enter_ready(lport); 246 } 247 248 /** 249 * fc_get_host_port_state() - Return the port state of the given Scsi_Host 250 * @shost: The SCSI host whose port state is to be determined 251 */ 252 void fc_get_host_port_state(struct Scsi_Host *shost) 253 { 254 struct fc_lport *lport = shost_priv(shost); 255 256 mutex_lock(&lport->lp_mutex); 257 if (!lport->link_up) 258 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 259 else 260 switch (lport->state) { 261 case LPORT_ST_READY: 262 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 263 break; 264 default: 265 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 266 } 267 mutex_unlock(&lport->lp_mutex); 268 } 269 EXPORT_SYMBOL(fc_get_host_port_state); 270 271 /** 272 * fc_get_host_speed() - Return the speed of the given Scsi_Host 273 * @shost: The SCSI host whose port speed is to be determined 274 */ 275 void fc_get_host_speed(struct Scsi_Host *shost) 276 { 277 struct fc_lport *lport = shost_priv(shost); 278 279 fc_host_speed(shost) = lport->link_speed; 280 } 281 EXPORT_SYMBOL(fc_get_host_speed); 282 283 /** 284 * fc_get_host_stats() - Return the Scsi_Host's statistics 285 * @shost: The SCSI host whose statistics are to be returned 286 */ 287 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) 288 { 289 struct fc_host_statistics *fcoe_stats; 290 struct fc_lport *lport = shost_priv(shost); 291 struct timespec v0, v1; 292 unsigned int cpu; 293 u64 fcp_in_bytes = 0; 294 u64 fcp_out_bytes = 0; 295 296 fcoe_stats = &lport->host_stats; 297 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); 298 299 jiffies_to_timespec(jiffies, &v0); 300 jiffies_to_timespec(lport->boot_time, &v1); 301 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); 302 303 for_each_possible_cpu(cpu) { 304 struct fcoe_dev_stats *stats; 305 306 stats = per_cpu_ptr(lport->dev_stats, cpu); 307 308 fcoe_stats->tx_frames += stats->TxFrames; 309 fcoe_stats->tx_words += stats->TxWords; 310 fcoe_stats->rx_frames += stats->RxFrames; 311 fcoe_stats->rx_words += stats->RxWords; 312 fcoe_stats->error_frames += stats->ErrorFrames; 313 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount; 314 fcoe_stats->fcp_input_requests += stats->InputRequests; 315 fcoe_stats->fcp_output_requests += stats->OutputRequests; 316 fcoe_stats->fcp_control_requests += stats->ControlRequests; 317 fcp_in_bytes += stats->InputBytes; 318 fcp_out_bytes += stats->OutputBytes; 319 fcoe_stats->link_failure_count += stats->LinkFailureCount; 320 } 321 fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); 322 fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); 323 fcoe_stats->lip_count = -1; 324 fcoe_stats->nos_count = -1; 325 fcoe_stats->loss_of_sync_count = -1; 326 fcoe_stats->loss_of_signal_count = -1; 327 fcoe_stats->prim_seq_protocol_err_count = -1; 328 fcoe_stats->dumped_frames = -1; 329 return fcoe_stats; 330 } 331 EXPORT_SYMBOL(fc_get_host_stats); 332 333 /** 334 * fc_lport_flogi_fill() - Fill in FLOGI command for request 335 * @lport: The local port the FLOGI is for 336 * @flogi: The FLOGI command 337 * @op: The opcode 338 */ 339 static void fc_lport_flogi_fill(struct fc_lport *lport, 340 struct fc_els_flogi *flogi, 341 unsigned int op) 342 { 343 struct fc_els_csp *sp; 344 struct fc_els_cssp *cp; 345 346 memset(flogi, 0, sizeof(*flogi)); 347 flogi->fl_cmd = (u8) op; 348 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); 349 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); 350 sp = &flogi->fl_csp; 351 sp->sp_hi_ver = 0x20; 352 sp->sp_lo_ver = 0x20; 353 sp->sp_bb_cred = htons(10); /* this gets set by gateway */ 354 sp->sp_bb_data = htons((u16) lport->mfs); 355 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ 356 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); 357 if (op != ELS_FLOGI) { 358 sp->sp_features = htons(FC_SP_FT_CIRO); 359 sp->sp_tot_seq = htons(255); /* seq. we accept */ 360 sp->sp_rel_off = htons(0x1f); 361 sp->sp_e_d_tov = htonl(lport->e_d_tov); 362 363 cp->cp_rdfs = htons((u16) lport->mfs); 364 cp->cp_con_seq = htons(255); 365 cp->cp_open_seq = 1; 366 } 367 } 368 369 /** 370 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port 371 * @lport: The local port to add a new FC-4 type to 372 * @type: The new FC-4 type 373 */ 374 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) 375 { 376 __be32 *mp; 377 378 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW]; 379 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW)); 380 } 381 382 /** 383 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. 384 * @lport: Fibre Channel local port receiving the RLIR 385 * @fp: The RLIR request frame 386 * 387 * Locking Note: The lport lock is expected to be held before calling 388 * this function. 389 */ 390 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) 391 { 392 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 393 fc_lport_state(lport)); 394 395 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 396 fc_frame_free(fp); 397 } 398 399 /** 400 * fc_lport_recv_echo_req() - Handle received ECHO request 401 * @lport: The local port receiving the ECHO 402 * @fp: ECHO request frame 403 * 404 * Locking Note: The lport lock is expected to be held before calling 405 * this function. 406 */ 407 static void fc_lport_recv_echo_req(struct fc_lport *lport, 408 struct fc_frame *in_fp) 409 { 410 struct fc_frame *fp; 411 unsigned int len; 412 void *pp; 413 void *dp; 414 415 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", 416 fc_lport_state(lport)); 417 418 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 419 pp = fc_frame_payload_get(in_fp, len); 420 421 if (len < sizeof(__be32)) 422 len = sizeof(__be32); 423 424 fp = fc_frame_alloc(lport, len); 425 if (fp) { 426 dp = fc_frame_payload_get(fp, len); 427 memcpy(dp, pp, len); 428 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); 429 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 430 lport->tt.frame_send(lport, fp); 431 } 432 fc_frame_free(in_fp); 433 } 434 435 /** 436 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request 437 * @lport: The local port receiving the RNID 438 * @fp: The RNID request frame 439 * 440 * Locking Note: The lport lock is expected to be held before calling 441 * this function. 442 */ 443 static void fc_lport_recv_rnid_req(struct fc_lport *lport, 444 struct fc_frame *in_fp) 445 { 446 struct fc_frame *fp; 447 struct fc_els_rnid *req; 448 struct { 449 struct fc_els_rnid_resp rnid; 450 struct fc_els_rnid_cid cid; 451 struct fc_els_rnid_gen gen; 452 } *rp; 453 struct fc_seq_els_data rjt_data; 454 u8 fmt; 455 size_t len; 456 457 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", 458 fc_lport_state(lport)); 459 460 req = fc_frame_payload_get(in_fp, sizeof(*req)); 461 if (!req) { 462 rjt_data.reason = ELS_RJT_LOGIC; 463 rjt_data.explan = ELS_EXPL_NONE; 464 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 465 } else { 466 fmt = req->rnid_fmt; 467 len = sizeof(*rp); 468 if (fmt != ELS_RNIDF_GEN || 469 ntohl(lport->rnid_gen.rnid_atype) == 0) { 470 fmt = ELS_RNIDF_NONE; /* nothing to provide */ 471 len -= sizeof(rp->gen); 472 } 473 fp = fc_frame_alloc(lport, len); 474 if (fp) { 475 rp = fc_frame_payload_get(fp, len); 476 memset(rp, 0, len); 477 rp->rnid.rnid_cmd = ELS_LS_ACC; 478 rp->rnid.rnid_fmt = fmt; 479 rp->rnid.rnid_cid_len = sizeof(rp->cid); 480 rp->cid.rnid_wwpn = htonll(lport->wwpn); 481 rp->cid.rnid_wwnn = htonll(lport->wwnn); 482 if (fmt == ELS_RNIDF_GEN) { 483 rp->rnid.rnid_sid_len = sizeof(rp->gen); 484 memcpy(&rp->gen, &lport->rnid_gen, 485 sizeof(rp->gen)); 486 } 487 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 488 lport->tt.frame_send(lport, fp); 489 } 490 } 491 fc_frame_free(in_fp); 492 } 493 494 /** 495 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 496 * @lport: The local port receiving the LOGO 497 * @fp: The LOGO request frame 498 * 499 * Locking Note: The lport lock is exected to be held before calling 500 * this function. 501 */ 502 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) 503 { 504 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 505 fc_lport_enter_reset(lport); 506 fc_frame_free(fp); 507 } 508 509 /** 510 * fc_fabric_login() - Start the lport state machine 511 * @lport: The local port that should log into the fabric 512 * 513 * Locking Note: This function should not be called 514 * with the lport lock held. 515 */ 516 int fc_fabric_login(struct fc_lport *lport) 517 { 518 int rc = -1; 519 520 mutex_lock(&lport->lp_mutex); 521 if (lport->state == LPORT_ST_DISABLED || 522 lport->state == LPORT_ST_LOGO) { 523 fc_lport_state_enter(lport, LPORT_ST_RESET); 524 fc_lport_enter_reset(lport); 525 rc = 0; 526 } 527 mutex_unlock(&lport->lp_mutex); 528 529 return rc; 530 } 531 EXPORT_SYMBOL(fc_fabric_login); 532 533 /** 534 * __fc_linkup() - Handler for transport linkup events 535 * @lport: The lport whose link is up 536 * 537 * Locking: must be called with the lp_mutex held 538 */ 539 void __fc_linkup(struct fc_lport *lport) 540 { 541 if (!lport->link_up) { 542 lport->link_up = 1; 543 544 if (lport->state == LPORT_ST_RESET) 545 fc_lport_enter_flogi(lport); 546 } 547 } 548 549 /** 550 * fc_linkup() - Handler for transport linkup events 551 * @lport: The local port whose link is up 552 */ 553 void fc_linkup(struct fc_lport *lport) 554 { 555 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n", 556 lport->host->host_no, lport->port_id); 557 558 mutex_lock(&lport->lp_mutex); 559 __fc_linkup(lport); 560 mutex_unlock(&lport->lp_mutex); 561 } 562 EXPORT_SYMBOL(fc_linkup); 563 564 /** 565 * __fc_linkdown() - Handler for transport linkdown events 566 * @lport: The lport whose link is down 567 * 568 * Locking: must be called with the lp_mutex held 569 */ 570 void __fc_linkdown(struct fc_lport *lport) 571 { 572 if (lport->link_up) { 573 lport->link_up = 0; 574 fc_lport_enter_reset(lport); 575 lport->tt.fcp_cleanup(lport); 576 } 577 } 578 579 /** 580 * fc_linkdown() - Handler for transport linkdown events 581 * @lport: The local port whose link is down 582 */ 583 void fc_linkdown(struct fc_lport *lport) 584 { 585 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n", 586 lport->host->host_no, lport->port_id); 587 588 mutex_lock(&lport->lp_mutex); 589 __fc_linkdown(lport); 590 mutex_unlock(&lport->lp_mutex); 591 } 592 EXPORT_SYMBOL(fc_linkdown); 593 594 /** 595 * fc_fabric_logoff() - Logout of the fabric 596 * @lport: The local port to logoff the fabric 597 * 598 * Return value: 599 * 0 for success, -1 for failure 600 */ 601 int fc_fabric_logoff(struct fc_lport *lport) 602 { 603 lport->tt.disc_stop_final(lport); 604 mutex_lock(&lport->lp_mutex); 605 if (lport->dns_rdata) 606 lport->tt.rport_logoff(lport->dns_rdata); 607 mutex_unlock(&lport->lp_mutex); 608 lport->tt.rport_flush_queue(); 609 mutex_lock(&lport->lp_mutex); 610 fc_lport_enter_logo(lport); 611 mutex_unlock(&lport->lp_mutex); 612 cancel_delayed_work_sync(&lport->retry_work); 613 return 0; 614 } 615 EXPORT_SYMBOL(fc_fabric_logoff); 616 617 /** 618 * fc_lport_destroy() - Unregister a fc_lport 619 * @lport: The local port to unregister 620 * 621 * Note: 622 * exit routine for fc_lport instance 623 * clean-up all the allocated memory 624 * and free up other system resources. 625 * 626 */ 627 int fc_lport_destroy(struct fc_lport *lport) 628 { 629 mutex_lock(&lport->lp_mutex); 630 lport->state = LPORT_ST_DISABLED; 631 lport->link_up = 0; 632 lport->tt.frame_send = fc_frame_drop; 633 mutex_unlock(&lport->lp_mutex); 634 635 lport->tt.fcp_abort_io(lport); 636 lport->tt.disc_stop_final(lport); 637 lport->tt.exch_mgr_reset(lport, 0, 0); 638 fc_fc4_del_lport(lport); 639 return 0; 640 } 641 EXPORT_SYMBOL(fc_lport_destroy); 642 643 /** 644 * fc_set_mfs() - Set the maximum frame size for a local port 645 * @lport: The local port to set the MFS for 646 * @mfs: The new MFS 647 */ 648 int fc_set_mfs(struct fc_lport *lport, u32 mfs) 649 { 650 unsigned int old_mfs; 651 int rc = -EINVAL; 652 653 mutex_lock(&lport->lp_mutex); 654 655 old_mfs = lport->mfs; 656 657 if (mfs >= FC_MIN_MAX_FRAME) { 658 mfs &= ~3; 659 if (mfs > FC_MAX_FRAME) 660 mfs = FC_MAX_FRAME; 661 mfs -= sizeof(struct fc_frame_header); 662 lport->mfs = mfs; 663 rc = 0; 664 } 665 666 if (!rc && mfs < old_mfs) 667 fc_lport_enter_reset(lport); 668 669 mutex_unlock(&lport->lp_mutex); 670 671 return rc; 672 } 673 EXPORT_SYMBOL(fc_set_mfs); 674 675 /** 676 * fc_lport_disc_callback() - Callback for discovery events 677 * @lport: The local port receiving the event 678 * @event: The discovery event 679 */ 680 static void fc_lport_disc_callback(struct fc_lport *lport, 681 enum fc_disc_event event) 682 { 683 switch (event) { 684 case DISC_EV_SUCCESS: 685 FC_LPORT_DBG(lport, "Discovery succeeded\n"); 686 break; 687 case DISC_EV_FAILED: 688 printk(KERN_ERR "host%d: libfc: " 689 "Discovery failed for port (%6.6x)\n", 690 lport->host->host_no, lport->port_id); 691 mutex_lock(&lport->lp_mutex); 692 fc_lport_enter_reset(lport); 693 mutex_unlock(&lport->lp_mutex); 694 break; 695 case DISC_EV_NONE: 696 WARN_ON(1); 697 break; 698 } 699 } 700 701 /** 702 * fc_rport_enter_ready() - Enter the ready state and start discovery 703 * @lport: The local port that is ready 704 * 705 * Locking Note: The lport lock is expected to be held before calling 706 * this routine. 707 */ 708 static void fc_lport_enter_ready(struct fc_lport *lport) 709 { 710 FC_LPORT_DBG(lport, "Entered READY from state %s\n", 711 fc_lport_state(lport)); 712 713 fc_lport_state_enter(lport, LPORT_ST_READY); 714 if (lport->vport) 715 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); 716 fc_vports_linkchange(lport); 717 718 if (!lport->ptp_rdata) 719 lport->tt.disc_start(fc_lport_disc_callback, lport); 720 } 721 722 /** 723 * fc_lport_set_port_id() - set the local port Port ID 724 * @lport: The local port which will have its Port ID set. 725 * @port_id: The new port ID. 726 * @fp: The frame containing the incoming request, or NULL. 727 * 728 * Locking Note: The lport lock is expected to be held before calling 729 * this function. 730 */ 731 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, 732 struct fc_frame *fp) 733 { 734 if (port_id) 735 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", 736 lport->host->host_no, port_id); 737 738 lport->port_id = port_id; 739 740 /* Update the fc_host */ 741 fc_host_port_id(lport->host) = port_id; 742 743 if (lport->tt.lport_set_port_id) 744 lport->tt.lport_set_port_id(lport, port_id, fp); 745 } 746 747 /** 748 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint 749 * @lport: The local port which will have its Port ID set. 750 * @port_id: The new port ID. 751 * 752 * Called by the lower-level driver when transport sets the local port_id. 753 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and 754 * discovery to be skipped. 755 */ 756 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) 757 { 758 mutex_lock(&lport->lp_mutex); 759 760 fc_lport_set_port_id(lport, port_id, NULL); 761 762 switch (lport->state) { 763 case LPORT_ST_RESET: 764 case LPORT_ST_FLOGI: 765 if (port_id) 766 fc_lport_enter_ready(lport); 767 break; 768 default: 769 break; 770 } 771 mutex_unlock(&lport->lp_mutex); 772 } 773 EXPORT_SYMBOL(fc_lport_set_local_id); 774 775 /** 776 * fc_lport_recv_flogi_req() - Receive a FLOGI request 777 * @lport: The local port that received the request 778 * @rx_fp: The FLOGI frame 779 * 780 * A received FLOGI request indicates a point-to-point connection. 781 * Accept it with the common service parameters indicating our N port. 782 * Set up to do a PLOGI if we have the higher-number WWPN. 783 * 784 * Locking Note: The lport lock is expected to be held before calling 785 * this function. 786 */ 787 static void fc_lport_recv_flogi_req(struct fc_lport *lport, 788 struct fc_frame *rx_fp) 789 { 790 struct fc_frame *fp; 791 struct fc_frame_header *fh; 792 struct fc_els_flogi *flp; 793 struct fc_els_flogi *new_flp; 794 u64 remote_wwpn; 795 u32 remote_fid; 796 u32 local_fid; 797 798 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", 799 fc_lport_state(lport)); 800 801 remote_fid = fc_frame_sid(rx_fp); 802 flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); 803 if (!flp) 804 goto out; 805 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 806 if (remote_wwpn == lport->wwpn) { 807 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " 808 "with same WWPN %16.16llx\n", 809 lport->host->host_no, remote_wwpn); 810 goto out; 811 } 812 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); 813 814 /* 815 * XXX what is the right thing to do for FIDs? 816 * The originator might expect our S_ID to be 0xfffffe. 817 * But if so, both of us could end up with the same FID. 818 */ 819 local_fid = FC_LOCAL_PTP_FID_LO; 820 if (remote_wwpn < lport->wwpn) { 821 local_fid = FC_LOCAL_PTP_FID_HI; 822 if (!remote_fid || remote_fid == local_fid) 823 remote_fid = FC_LOCAL_PTP_FID_LO; 824 } else if (!remote_fid) { 825 remote_fid = FC_LOCAL_PTP_FID_HI; 826 } 827 828 fc_lport_set_port_id(lport, local_fid, rx_fp); 829 830 fp = fc_frame_alloc(lport, sizeof(*flp)); 831 if (fp) { 832 new_flp = fc_frame_payload_get(fp, sizeof(*flp)); 833 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); 834 new_flp->fl_cmd = (u8) ELS_LS_ACC; 835 836 /* 837 * Send the response. If this fails, the originator should 838 * repeat the sequence. 839 */ 840 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 841 fh = fc_frame_header_get(fp); 842 hton24(fh->fh_s_id, local_fid); 843 hton24(fh->fh_d_id, remote_fid); 844 lport->tt.frame_send(lport, fp); 845 846 } else { 847 fc_lport_error(lport, fp); 848 } 849 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, 850 get_unaligned_be64(&flp->fl_wwnn)); 851 out: 852 fc_frame_free(rx_fp); 853 } 854 855 /** 856 * fc_lport_recv_els_req() - The generic lport ELS request handler 857 * @lport: The local port that received the request 858 * @fp: The request frame 859 * 860 * This function will see if the lport handles the request or 861 * if an rport should handle the request. 862 * 863 * Locking Note: This function should not be called with the lport 864 * lock held because it will grab the lock. 865 */ 866 static void fc_lport_recv_els_req(struct fc_lport *lport, 867 struct fc_frame *fp) 868 { 869 void (*recv)(struct fc_lport *, struct fc_frame *); 870 871 mutex_lock(&lport->lp_mutex); 872 873 /* 874 * Handle special ELS cases like FLOGI, LOGO, and 875 * RSCN here. These don't require a session. 876 * Even if we had a session, it might not be ready. 877 */ 878 if (!lport->link_up) 879 fc_frame_free(fp); 880 else { 881 /* 882 * Check opcode. 883 */ 884 recv = lport->tt.rport_recv_req; 885 switch (fc_frame_payload_op(fp)) { 886 case ELS_FLOGI: 887 if (!lport->point_to_multipoint) 888 recv = fc_lport_recv_flogi_req; 889 break; 890 case ELS_LOGO: 891 if (fc_frame_sid(fp) == FC_FID_FLOGI) 892 recv = fc_lport_recv_logo_req; 893 break; 894 case ELS_RSCN: 895 recv = lport->tt.disc_recv_req; 896 break; 897 case ELS_ECHO: 898 recv = fc_lport_recv_echo_req; 899 break; 900 case ELS_RLIR: 901 recv = fc_lport_recv_rlir_req; 902 break; 903 case ELS_RNID: 904 recv = fc_lport_recv_rnid_req; 905 break; 906 } 907 908 recv(lport, fp); 909 } 910 mutex_unlock(&lport->lp_mutex); 911 } 912 913 static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, 914 const struct fc_els_spp *spp_in, 915 struct fc_els_spp *spp_out) 916 { 917 return FC_SPP_RESP_INVL; 918 } 919 920 struct fc4_prov fc_lport_els_prov = { 921 .prli = fc_lport_els_prli, 922 .recv = fc_lport_recv_els_req, 923 }; 924 925 /** 926 * fc_lport_recv_req() - The generic lport request handler 927 * @lport: The lport that received the request 928 * @fp: The frame the request is in 929 * 930 * Locking Note: This function should not be called with the lport 931 * lock held because it may grab the lock. 932 */ 933 static void fc_lport_recv_req(struct fc_lport *lport, 934 struct fc_frame *fp) 935 { 936 struct fc_frame_header *fh = fc_frame_header_get(fp); 937 struct fc_seq *sp = fr_seq(fp); 938 struct fc4_prov *prov; 939 940 /* 941 * Use RCU read lock and module_lock to be sure module doesn't 942 * deregister and get unloaded while we're calling it. 943 * try_module_get() is inlined and accepts a NULL parameter. 944 * Only ELSes and FCP target ops should come through here. 945 * The locking is unfortunate, and a better scheme is being sought. 946 */ 947 948 rcu_read_lock(); 949 if (fh->fh_type >= FC_FC4_PROV_SIZE) 950 goto drop; 951 prov = rcu_dereference(fc_passive_prov[fh->fh_type]); 952 if (!prov || !try_module_get(prov->module)) 953 goto drop; 954 rcu_read_unlock(); 955 prov->recv(lport, fp); 956 module_put(prov->module); 957 return; 958 drop: 959 rcu_read_unlock(); 960 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); 961 fc_frame_free(fp); 962 lport->tt.exch_done(sp); 963 } 964 965 /** 966 * fc_lport_reset() - Reset a local port 967 * @lport: The local port which should be reset 968 * 969 * Locking Note: This functions should not be called with the 970 * lport lock held. 971 */ 972 int fc_lport_reset(struct fc_lport *lport) 973 { 974 cancel_delayed_work_sync(&lport->retry_work); 975 mutex_lock(&lport->lp_mutex); 976 fc_lport_enter_reset(lport); 977 mutex_unlock(&lport->lp_mutex); 978 return 0; 979 } 980 EXPORT_SYMBOL(fc_lport_reset); 981 982 /** 983 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held 984 * @lport: The local port to be reset 985 * 986 * Locking Note: The lport lock is expected to be held before calling 987 * this routine. 988 */ 989 static void fc_lport_reset_locked(struct fc_lport *lport) 990 { 991 if (lport->dns_rdata) 992 lport->tt.rport_logoff(lport->dns_rdata); 993 994 if (lport->ptp_rdata) { 995 lport->tt.rport_logoff(lport->ptp_rdata); 996 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 997 lport->ptp_rdata = NULL; 998 } 999 1000 lport->tt.disc_stop(lport); 1001 1002 lport->tt.exch_mgr_reset(lport, 0, 0); 1003 fc_host_fabric_name(lport->host) = 0; 1004 1005 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) 1006 fc_lport_set_port_id(lport, 0, NULL); 1007 } 1008 1009 /** 1010 * fc_lport_enter_reset() - Reset the local port 1011 * @lport: The local port to be reset 1012 * 1013 * Locking Note: The lport lock is expected to be held before calling 1014 * this routine. 1015 */ 1016 static void fc_lport_enter_reset(struct fc_lport *lport) 1017 { 1018 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 1019 fc_lport_state(lport)); 1020 1021 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) 1022 return; 1023 1024 if (lport->vport) { 1025 if (lport->link_up) 1026 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 1027 else 1028 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); 1029 } 1030 fc_lport_state_enter(lport, LPORT_ST_RESET); 1031 fc_host_post_event(lport->host, fc_get_event_number(), 1032 FCH_EVT_LIPRESET, 0); 1033 fc_vports_linkchange(lport); 1034 fc_lport_reset_locked(lport); 1035 if (lport->link_up) 1036 fc_lport_enter_flogi(lport); 1037 } 1038 1039 /** 1040 * fc_lport_enter_disabled() - Disable the local port 1041 * @lport: The local port to be reset 1042 * 1043 * Locking Note: The lport lock is expected to be held before calling 1044 * this routine. 1045 */ 1046 static void fc_lport_enter_disabled(struct fc_lport *lport) 1047 { 1048 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", 1049 fc_lport_state(lport)); 1050 1051 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1052 fc_vports_linkchange(lport); 1053 fc_lport_reset_locked(lport); 1054 } 1055 1056 /** 1057 * fc_lport_error() - Handler for any errors 1058 * @lport: The local port that the error was on 1059 * @fp: The error code encoded in a frame pointer 1060 * 1061 * If the error was caused by a resource allocation failure 1062 * then wait for half a second and retry, otherwise retry 1063 * after the e_d_tov time. 1064 */ 1065 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 1066 { 1067 unsigned long delay = 0; 1068 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", 1069 PTR_ERR(fp), fc_lport_state(lport), 1070 lport->retry_count); 1071 1072 if (PTR_ERR(fp) == -FC_EX_CLOSED) 1073 return; 1074 1075 /* 1076 * Memory allocation failure, or the exchange timed out 1077 * or we received LS_RJT. 1078 * Retry after delay 1079 */ 1080 if (lport->retry_count < lport->max_retry_count) { 1081 lport->retry_count++; 1082 if (!fp) 1083 delay = msecs_to_jiffies(500); 1084 else 1085 delay = msecs_to_jiffies(lport->e_d_tov); 1086 1087 schedule_delayed_work(&lport->retry_work, delay); 1088 } else 1089 fc_lport_enter_reset(lport); 1090 } 1091 1092 /** 1093 * fc_lport_ns_resp() - Handle response to a name server 1094 * registration exchange 1095 * @sp: current sequence in exchange 1096 * @fp: response frame 1097 * @lp_arg: Fibre Channel host port instance 1098 * 1099 * Locking Note: This function will be called without the lport lock 1100 * held, but it will lock, call an _enter_* function or fc_lport_error() 1101 * and then unlock the lport. 1102 */ 1103 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, 1104 void *lp_arg) 1105 { 1106 struct fc_lport *lport = lp_arg; 1107 struct fc_frame_header *fh; 1108 struct fc_ct_hdr *ct; 1109 1110 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); 1111 1112 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1113 return; 1114 1115 mutex_lock(&lport->lp_mutex); 1116 1117 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { 1118 FC_LPORT_DBG(lport, "Received a name server response, " 1119 "but in state %s\n", fc_lport_state(lport)); 1120 if (IS_ERR(fp)) 1121 goto err; 1122 goto out; 1123 } 1124 1125 if (IS_ERR(fp)) { 1126 fc_lport_error(lport, fp); 1127 goto err; 1128 } 1129 1130 fh = fc_frame_header_get(fp); 1131 ct = fc_frame_payload_get(fp, sizeof(*ct)); 1132 1133 if (fh && ct && fh->fh_type == FC_TYPE_CT && 1134 ct->ct_fs_type == FC_FST_DIR && 1135 ct->ct_fs_subtype == FC_NS_SUBTYPE && 1136 ntohs(ct->ct_cmd) == FC_FS_ACC) 1137 switch (lport->state) { 1138 case LPORT_ST_RNN_ID: 1139 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); 1140 break; 1141 case LPORT_ST_RSNN_NN: 1142 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); 1143 break; 1144 case LPORT_ST_RSPN_ID: 1145 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1146 break; 1147 case LPORT_ST_RFT_ID: 1148 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); 1149 break; 1150 case LPORT_ST_RFF_ID: 1151 fc_lport_enter_scr(lport); 1152 break; 1153 default: 1154 /* should have already been caught by state checks */ 1155 break; 1156 } 1157 else 1158 fc_lport_error(lport, fp); 1159 out: 1160 fc_frame_free(fp); 1161 err: 1162 mutex_unlock(&lport->lp_mutex); 1163 } 1164 1165 /** 1166 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request 1167 * @sp: current sequence in SCR exchange 1168 * @fp: response frame 1169 * @lp_arg: Fibre Channel lport port instance that sent the registration request 1170 * 1171 * Locking Note: This function will be called without the lport lock 1172 * held, but it will lock, call an _enter_* function or fc_lport_error 1173 * and then unlock the lport. 1174 */ 1175 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, 1176 void *lp_arg) 1177 { 1178 struct fc_lport *lport = lp_arg; 1179 u8 op; 1180 1181 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); 1182 1183 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1184 return; 1185 1186 mutex_lock(&lport->lp_mutex); 1187 1188 if (lport->state != LPORT_ST_SCR) { 1189 FC_LPORT_DBG(lport, "Received a SCR response, but in state " 1190 "%s\n", fc_lport_state(lport)); 1191 if (IS_ERR(fp)) 1192 goto err; 1193 goto out; 1194 } 1195 1196 if (IS_ERR(fp)) { 1197 fc_lport_error(lport, fp); 1198 goto err; 1199 } 1200 1201 op = fc_frame_payload_op(fp); 1202 if (op == ELS_LS_ACC) 1203 fc_lport_enter_ready(lport); 1204 else 1205 fc_lport_error(lport, fp); 1206 1207 out: 1208 fc_frame_free(fp); 1209 err: 1210 mutex_unlock(&lport->lp_mutex); 1211 } 1212 1213 /** 1214 * fc_lport_enter_scr() - Send a SCR (State Change Register) request 1215 * @lport: The local port to register for state changes 1216 * 1217 * Locking Note: The lport lock is expected to be held before calling 1218 * this routine. 1219 */ 1220 static void fc_lport_enter_scr(struct fc_lport *lport) 1221 { 1222 struct fc_frame *fp; 1223 1224 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", 1225 fc_lport_state(lport)); 1226 1227 fc_lport_state_enter(lport, LPORT_ST_SCR); 1228 1229 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); 1230 if (!fp) { 1231 fc_lport_error(lport, fp); 1232 return; 1233 } 1234 1235 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, 1236 fc_lport_scr_resp, lport, 1237 2 * lport->r_a_tov)) 1238 fc_lport_error(lport, NULL); 1239 } 1240 1241 /** 1242 * fc_lport_enter_ns() - register some object with the name server 1243 * @lport: Fibre Channel local port to register 1244 * 1245 * Locking Note: The lport lock is expected to be held before calling 1246 * this routine. 1247 */ 1248 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) 1249 { 1250 struct fc_frame *fp; 1251 enum fc_ns_req cmd; 1252 int size = sizeof(struct fc_ct_hdr); 1253 size_t len; 1254 1255 FC_LPORT_DBG(lport, "Entered %s state from %s state\n", 1256 fc_lport_state_names[state], 1257 fc_lport_state(lport)); 1258 1259 fc_lport_state_enter(lport, state); 1260 1261 switch (state) { 1262 case LPORT_ST_RNN_ID: 1263 cmd = FC_NS_RNN_ID; 1264 size += sizeof(struct fc_ns_rn_id); 1265 break; 1266 case LPORT_ST_RSNN_NN: 1267 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1268 /* if there is no symbolic name, skip to RFT_ID */ 1269 if (!len) 1270 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1271 cmd = FC_NS_RSNN_NN; 1272 size += sizeof(struct fc_ns_rsnn) + len; 1273 break; 1274 case LPORT_ST_RSPN_ID: 1275 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1276 /* if there is no symbolic name, skip to RFT_ID */ 1277 if (!len) 1278 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1279 cmd = FC_NS_RSPN_ID; 1280 size += sizeof(struct fc_ns_rspn) + len; 1281 break; 1282 case LPORT_ST_RFT_ID: 1283 cmd = FC_NS_RFT_ID; 1284 size += sizeof(struct fc_ns_rft); 1285 break; 1286 case LPORT_ST_RFF_ID: 1287 cmd = FC_NS_RFF_ID; 1288 size += sizeof(struct fc_ns_rff_id); 1289 break; 1290 default: 1291 fc_lport_error(lport, NULL); 1292 return; 1293 } 1294 1295 fp = fc_frame_alloc(lport, size); 1296 if (!fp) { 1297 fc_lport_error(lport, fp); 1298 return; 1299 } 1300 1301 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, 1302 fc_lport_ns_resp, 1303 lport, 3 * lport->r_a_tov)) 1304 fc_lport_error(lport, fp); 1305 } 1306 1307 static struct fc_rport_operations fc_lport_rport_ops = { 1308 .event_callback = fc_lport_rport_callback, 1309 }; 1310 1311 /** 1312 * fc_rport_enter_dns() - Create a fc_rport for the name server 1313 * @lport: The local port requesting a remote port for the name server 1314 * 1315 * Locking Note: The lport lock is expected to be held before calling 1316 * this routine. 1317 */ 1318 static void fc_lport_enter_dns(struct fc_lport *lport) 1319 { 1320 struct fc_rport_priv *rdata; 1321 1322 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", 1323 fc_lport_state(lport)); 1324 1325 fc_lport_state_enter(lport, LPORT_ST_DNS); 1326 1327 mutex_lock(&lport->disc.disc_mutex); 1328 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); 1329 mutex_unlock(&lport->disc.disc_mutex); 1330 if (!rdata) 1331 goto err; 1332 1333 rdata->ops = &fc_lport_rport_ops; 1334 lport->tt.rport_login(rdata); 1335 return; 1336 1337 err: 1338 fc_lport_error(lport, NULL); 1339 } 1340 1341 /** 1342 * fc_lport_timeout() - Handler for the retry_work timer 1343 * @work: The work struct of the local port 1344 */ 1345 static void fc_lport_timeout(struct work_struct *work) 1346 { 1347 struct fc_lport *lport = 1348 container_of(work, struct fc_lport, 1349 retry_work.work); 1350 1351 mutex_lock(&lport->lp_mutex); 1352 1353 switch (lport->state) { 1354 case LPORT_ST_DISABLED: 1355 WARN_ON(1); 1356 break; 1357 case LPORT_ST_READY: 1358 break; 1359 case LPORT_ST_RESET: 1360 break; 1361 case LPORT_ST_FLOGI: 1362 fc_lport_enter_flogi(lport); 1363 break; 1364 case LPORT_ST_DNS: 1365 fc_lport_enter_dns(lport); 1366 break; 1367 case LPORT_ST_RNN_ID: 1368 case LPORT_ST_RSNN_NN: 1369 case LPORT_ST_RSPN_ID: 1370 case LPORT_ST_RFT_ID: 1371 case LPORT_ST_RFF_ID: 1372 fc_lport_enter_ns(lport, lport->state); 1373 break; 1374 case LPORT_ST_SCR: 1375 fc_lport_enter_scr(lport); 1376 break; 1377 case LPORT_ST_LOGO: 1378 fc_lport_enter_logo(lport); 1379 break; 1380 } 1381 1382 mutex_unlock(&lport->lp_mutex); 1383 } 1384 1385 /** 1386 * fc_lport_logo_resp() - Handle response to LOGO request 1387 * @sp: The sequence that the LOGO was on 1388 * @fp: The LOGO frame 1389 * @lp_arg: The lport port that received the LOGO request 1390 * 1391 * Locking Note: This function will be called without the lport lock 1392 * held, but it will lock, call an _enter_* function or fc_lport_error() 1393 * and then unlock the lport. 1394 */ 1395 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 1396 void *lp_arg) 1397 { 1398 struct fc_lport *lport = lp_arg; 1399 u8 op; 1400 1401 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); 1402 1403 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1404 return; 1405 1406 mutex_lock(&lport->lp_mutex); 1407 1408 if (lport->state != LPORT_ST_LOGO) { 1409 FC_LPORT_DBG(lport, "Received a LOGO response, but in state " 1410 "%s\n", fc_lport_state(lport)); 1411 if (IS_ERR(fp)) 1412 goto err; 1413 goto out; 1414 } 1415 1416 if (IS_ERR(fp)) { 1417 fc_lport_error(lport, fp); 1418 goto err; 1419 } 1420 1421 op = fc_frame_payload_op(fp); 1422 if (op == ELS_LS_ACC) 1423 fc_lport_enter_disabled(lport); 1424 else 1425 fc_lport_error(lport, fp); 1426 1427 out: 1428 fc_frame_free(fp); 1429 err: 1430 mutex_unlock(&lport->lp_mutex); 1431 } 1432 EXPORT_SYMBOL(fc_lport_logo_resp); 1433 1434 /** 1435 * fc_rport_enter_logo() - Logout of the fabric 1436 * @lport: The local port to be logged out 1437 * 1438 * Locking Note: The lport lock is expected to be held before calling 1439 * this routine. 1440 */ 1441 static void fc_lport_enter_logo(struct fc_lport *lport) 1442 { 1443 struct fc_frame *fp; 1444 struct fc_els_logo *logo; 1445 1446 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", 1447 fc_lport_state(lport)); 1448 1449 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1450 fc_vports_linkchange(lport); 1451 1452 fp = fc_frame_alloc(lport, sizeof(*logo)); 1453 if (!fp) { 1454 fc_lport_error(lport, fp); 1455 return; 1456 } 1457 1458 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, 1459 fc_lport_logo_resp, lport, 1460 2 * lport->r_a_tov)) 1461 fc_lport_error(lport, NULL); 1462 } 1463 1464 /** 1465 * fc_lport_flogi_resp() - Handle response to FLOGI request 1466 * @sp: The sequence that the FLOGI was on 1467 * @fp: The FLOGI response frame 1468 * @lp_arg: The lport port that received the FLOGI response 1469 * 1470 * Locking Note: This function will be called without the lport lock 1471 * held, but it will lock, call an _enter_* function or fc_lport_error() 1472 * and then unlock the lport. 1473 */ 1474 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 1475 void *lp_arg) 1476 { 1477 struct fc_lport *lport = lp_arg; 1478 struct fc_frame_header *fh; 1479 struct fc_els_flogi *flp; 1480 u32 did; 1481 u16 csp_flags; 1482 unsigned int r_a_tov; 1483 unsigned int e_d_tov; 1484 u16 mfs; 1485 1486 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); 1487 1488 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1489 return; 1490 1491 mutex_lock(&lport->lp_mutex); 1492 1493 if (lport->state != LPORT_ST_FLOGI) { 1494 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " 1495 "%s\n", fc_lport_state(lport)); 1496 if (IS_ERR(fp)) 1497 goto err; 1498 goto out; 1499 } 1500 1501 if (IS_ERR(fp)) { 1502 fc_lport_error(lport, fp); 1503 goto err; 1504 } 1505 1506 fh = fc_frame_header_get(fp); 1507 did = fc_frame_did(fp); 1508 if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 || 1509 fc_frame_payload_op(fp) != ELS_LS_ACC) { 1510 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); 1511 fc_lport_error(lport, fp); 1512 goto err; 1513 } 1514 1515 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1516 if (!flp) { 1517 FC_LPORT_DBG(lport, "FLOGI bad response\n"); 1518 fc_lport_error(lport, fp); 1519 goto err; 1520 } 1521 1522 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1523 FC_SP_BB_DATA_MASK; 1524 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && 1525 mfs < lport->mfs) 1526 lport->mfs = mfs; 1527 csp_flags = ntohs(flp->fl_csp.sp_features); 1528 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1529 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1530 if (csp_flags & FC_SP_FT_EDTR) 1531 e_d_tov /= 1000000; 1532 1533 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); 1534 1535 if ((csp_flags & FC_SP_FT_FPORT) == 0) { 1536 if (e_d_tov > lport->e_d_tov) 1537 lport->e_d_tov = e_d_tov; 1538 lport->r_a_tov = 2 * e_d_tov; 1539 fc_lport_set_port_id(lport, did, fp); 1540 printk(KERN_INFO "host%d: libfc: " 1541 "Port (%6.6x) entered " 1542 "point-to-point mode\n", 1543 lport->host->host_no, did); 1544 fc_lport_ptp_setup(lport, fc_frame_sid(fp), 1545 get_unaligned_be64( 1546 &flp->fl_wwpn), 1547 get_unaligned_be64( 1548 &flp->fl_wwnn)); 1549 } else { 1550 lport->e_d_tov = e_d_tov; 1551 lport->r_a_tov = r_a_tov; 1552 fc_host_fabric_name(lport->host) = 1553 get_unaligned_be64(&flp->fl_wwnn); 1554 fc_lport_set_port_id(lport, did, fp); 1555 fc_lport_enter_dns(lport); 1556 } 1557 1558 out: 1559 fc_frame_free(fp); 1560 err: 1561 mutex_unlock(&lport->lp_mutex); 1562 } 1563 EXPORT_SYMBOL(fc_lport_flogi_resp); 1564 1565 /** 1566 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager 1567 * @lport: Fibre Channel local port to be logged in to the fabric 1568 * 1569 * Locking Note: The lport lock is expected to be held before calling 1570 * this routine. 1571 */ 1572 static void fc_lport_enter_flogi(struct fc_lport *lport) 1573 { 1574 struct fc_frame *fp; 1575 1576 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", 1577 fc_lport_state(lport)); 1578 1579 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1580 1581 if (lport->point_to_multipoint) { 1582 if (lport->port_id) 1583 fc_lport_enter_ready(lport); 1584 return; 1585 } 1586 1587 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 1588 if (!fp) 1589 return fc_lport_error(lport, fp); 1590 1591 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, 1592 lport->vport ? ELS_FDISC : ELS_FLOGI, 1593 fc_lport_flogi_resp, lport, 1594 lport->vport ? 2 * lport->r_a_tov : 1595 lport->e_d_tov)) 1596 fc_lport_error(lport, NULL); 1597 } 1598 1599 /** 1600 * fc_lport_config() - Configure a fc_lport 1601 * @lport: The local port to be configured 1602 */ 1603 int fc_lport_config(struct fc_lport *lport) 1604 { 1605 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1606 mutex_init(&lport->lp_mutex); 1607 1608 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1609 1610 fc_lport_add_fc4_type(lport, FC_TYPE_FCP); 1611 fc_lport_add_fc4_type(lport, FC_TYPE_CT); 1612 fc_fc4_conf_lport_params(lport, FC_TYPE_FCP); 1613 1614 return 0; 1615 } 1616 EXPORT_SYMBOL(fc_lport_config); 1617 1618 /** 1619 * fc_lport_init() - Initialize the lport layer for a local port 1620 * @lport: The local port to initialize the exchange layer for 1621 */ 1622 int fc_lport_init(struct fc_lport *lport) 1623 { 1624 if (!lport->tt.lport_recv) 1625 lport->tt.lport_recv = fc_lport_recv_req; 1626 1627 if (!lport->tt.lport_reset) 1628 lport->tt.lport_reset = fc_lport_reset; 1629 1630 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1631 fc_host_node_name(lport->host) = lport->wwnn; 1632 fc_host_port_name(lport->host) = lport->wwpn; 1633 fc_host_supported_classes(lport->host) = FC_COS_CLASS3; 1634 memset(fc_host_supported_fc4s(lport->host), 0, 1635 sizeof(fc_host_supported_fc4s(lport->host))); 1636 fc_host_supported_fc4s(lport->host)[2] = 1; 1637 fc_host_supported_fc4s(lport->host)[7] = 1; 1638 1639 /* This value is also unchanging */ 1640 memset(fc_host_active_fc4s(lport->host), 0, 1641 sizeof(fc_host_active_fc4s(lport->host))); 1642 fc_host_active_fc4s(lport->host)[2] = 1; 1643 fc_host_active_fc4s(lport->host)[7] = 1; 1644 fc_host_maxframe_size(lport->host) = lport->mfs; 1645 fc_host_supported_speeds(lport->host) = 0; 1646 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT) 1647 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; 1648 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) 1649 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; 1650 fc_fc4_add_lport(lport); 1651 1652 return 0; 1653 } 1654 EXPORT_SYMBOL(fc_lport_init); 1655 1656 /** 1657 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests 1658 * @sp: The sequence for the FC Passthrough response 1659 * @fp: The response frame 1660 * @info_arg: The BSG info that the response is for 1661 */ 1662 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, 1663 void *info_arg) 1664 { 1665 struct fc_bsg_info *info = info_arg; 1666 struct fc_bsg_job *job = info->job; 1667 struct fc_lport *lport = info->lport; 1668 struct fc_frame_header *fh; 1669 size_t len; 1670 void *buf; 1671 1672 if (IS_ERR(fp)) { 1673 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? 1674 -ECONNABORTED : -ETIMEDOUT; 1675 job->reply_len = sizeof(uint32_t); 1676 job->state_flags |= FC_RQST_STATE_DONE; 1677 job->job_done(job); 1678 kfree(info); 1679 return; 1680 } 1681 1682 mutex_lock(&lport->lp_mutex); 1683 fh = fc_frame_header_get(fp); 1684 len = fr_len(fp) - sizeof(*fh); 1685 buf = fc_frame_payload_get(fp, 0); 1686 1687 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { 1688 /* Get the response code from the first frame payload */ 1689 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? 1690 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : 1691 (unsigned short)fc_frame_payload_op(fp); 1692 1693 /* Save the reply status of the job */ 1694 job->reply->reply_data.ctels_reply.status = 1695 (cmd == info->rsp_code) ? 1696 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; 1697 } 1698 1699 job->reply->reply_payload_rcv_len += 1700 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1701 &info->offset, KM_BIO_SRC_IRQ, NULL); 1702 1703 if (fr_eof(fp) == FC_EOF_T && 1704 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1705 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1706 if (job->reply->reply_payload_rcv_len > 1707 job->reply_payload.payload_len) 1708 job->reply->reply_payload_rcv_len = 1709 job->reply_payload.payload_len; 1710 job->reply->result = 0; 1711 job->state_flags |= FC_RQST_STATE_DONE; 1712 job->job_done(job); 1713 kfree(info); 1714 } 1715 fc_frame_free(fp); 1716 mutex_unlock(&lport->lp_mutex); 1717 } 1718 1719 /** 1720 * fc_lport_els_request() - Send ELS passthrough request 1721 * @job: The BSG Passthrough job 1722 * @lport: The local port sending the request 1723 * @did: The destination port id 1724 * 1725 * Locking Note: The lport lock is expected to be held before calling 1726 * this routine. 1727 */ 1728 static int fc_lport_els_request(struct fc_bsg_job *job, 1729 struct fc_lport *lport, 1730 u32 did, u32 tov) 1731 { 1732 struct fc_bsg_info *info; 1733 struct fc_frame *fp; 1734 struct fc_frame_header *fh; 1735 char *pp; 1736 int len; 1737 1738 fp = fc_frame_alloc(lport, job->request_payload.payload_len); 1739 if (!fp) 1740 return -ENOMEM; 1741 1742 len = job->request_payload.payload_len; 1743 pp = fc_frame_payload_get(fp, len); 1744 1745 sg_copy_to_buffer(job->request_payload.sg_list, 1746 job->request_payload.sg_cnt, 1747 pp, len); 1748 1749 fh = fc_frame_header_get(fp); 1750 fh->fh_r_ctl = FC_RCTL_ELS_REQ; 1751 hton24(fh->fh_d_id, did); 1752 hton24(fh->fh_s_id, lport->port_id); 1753 fh->fh_type = FC_TYPE_ELS; 1754 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1755 fh->fh_cs_ctl = 0; 1756 fh->fh_df_ctl = 0; 1757 fh->fh_parm_offset = 0; 1758 1759 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1760 if (!info) { 1761 fc_frame_free(fp); 1762 return -ENOMEM; 1763 } 1764 1765 info->job = job; 1766 info->lport = lport; 1767 info->rsp_code = ELS_LS_ACC; 1768 info->nents = job->reply_payload.sg_cnt; 1769 info->sg = job->reply_payload.sg_list; 1770 1771 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1772 NULL, info, tov)) { 1773 kfree(info); 1774 return -ECOMM; 1775 } 1776 return 0; 1777 } 1778 1779 /** 1780 * fc_lport_ct_request() - Send CT Passthrough request 1781 * @job: The BSG Passthrough job 1782 * @lport: The local port sending the request 1783 * @did: The destination FC-ID 1784 * @tov: The timeout period to wait for the response 1785 * 1786 * Locking Note: The lport lock is expected to be held before calling 1787 * this routine. 1788 */ 1789 static int fc_lport_ct_request(struct fc_bsg_job *job, 1790 struct fc_lport *lport, u32 did, u32 tov) 1791 { 1792 struct fc_bsg_info *info; 1793 struct fc_frame *fp; 1794 struct fc_frame_header *fh; 1795 struct fc_ct_req *ct; 1796 size_t len; 1797 1798 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + 1799 job->request_payload.payload_len); 1800 if (!fp) 1801 return -ENOMEM; 1802 1803 len = job->request_payload.payload_len; 1804 ct = fc_frame_payload_get(fp, len); 1805 1806 sg_copy_to_buffer(job->request_payload.sg_list, 1807 job->request_payload.sg_cnt, 1808 ct, len); 1809 1810 fh = fc_frame_header_get(fp); 1811 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; 1812 hton24(fh->fh_d_id, did); 1813 hton24(fh->fh_s_id, lport->port_id); 1814 fh->fh_type = FC_TYPE_CT; 1815 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1816 fh->fh_cs_ctl = 0; 1817 fh->fh_df_ctl = 0; 1818 fh->fh_parm_offset = 0; 1819 1820 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1821 if (!info) { 1822 fc_frame_free(fp); 1823 return -ENOMEM; 1824 } 1825 1826 info->job = job; 1827 info->lport = lport; 1828 info->rsp_code = FC_FS_ACC; 1829 info->nents = job->reply_payload.sg_cnt; 1830 info->sg = job->reply_payload.sg_list; 1831 1832 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1833 NULL, info, tov)) { 1834 kfree(info); 1835 return -ECOMM; 1836 } 1837 return 0; 1838 } 1839 1840 /** 1841 * fc_lport_bsg_request() - The common entry point for sending 1842 * FC Passthrough requests 1843 * @job: The BSG passthrough job 1844 */ 1845 int fc_lport_bsg_request(struct fc_bsg_job *job) 1846 { 1847 struct request *rsp = job->req->next_rq; 1848 struct Scsi_Host *shost = job->shost; 1849 struct fc_lport *lport = shost_priv(shost); 1850 struct fc_rport *rport; 1851 struct fc_rport_priv *rdata; 1852 int rc = -EINVAL; 1853 u32 did; 1854 1855 job->reply->reply_payload_rcv_len = 0; 1856 if (rsp) 1857 rsp->resid_len = job->reply_payload.payload_len; 1858 1859 mutex_lock(&lport->lp_mutex); 1860 1861 switch (job->request->msgcode) { 1862 case FC_BSG_RPT_ELS: 1863 rport = job->rport; 1864 if (!rport) 1865 break; 1866 1867 rdata = rport->dd_data; 1868 rc = fc_lport_els_request(job, lport, rport->port_id, 1869 rdata->e_d_tov); 1870 break; 1871 1872 case FC_BSG_RPT_CT: 1873 rport = job->rport; 1874 if (!rport) 1875 break; 1876 1877 rdata = rport->dd_data; 1878 rc = fc_lport_ct_request(job, lport, rport->port_id, 1879 rdata->e_d_tov); 1880 break; 1881 1882 case FC_BSG_HST_CT: 1883 did = ntoh24(job->request->rqst_data.h_ct.port_id); 1884 if (did == FC_FID_DIR_SERV) 1885 rdata = lport->dns_rdata; 1886 else 1887 rdata = lport->tt.rport_lookup(lport, did); 1888 1889 if (!rdata) 1890 break; 1891 1892 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); 1893 break; 1894 1895 case FC_BSG_HST_ELS_NOLOGIN: 1896 did = ntoh24(job->request->rqst_data.h_els.port_id); 1897 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); 1898 break; 1899 } 1900 1901 mutex_unlock(&lport->lp_mutex); 1902 return rc; 1903 } 1904 EXPORT_SYMBOL(fc_lport_bsg_request); 1905