1 /* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 /* 21 * PORT LOCKING NOTES 22 * 23 * These comments only apply to the 'port code' which consists of the lport, 24 * disc and rport blocks. 25 * 26 * MOTIVATION 27 * 28 * The lport, disc and rport blocks all have mutexes that are used to protect 29 * those objects. The main motivation for these locks is to prevent from 30 * having an lport reset just before we send a frame. In that scenario the 31 * lport's FID would get set to zero and then we'd send a frame with an 32 * invalid SID. We also need to ensure that states don't change unexpectedly 33 * while processing another state. 34 * 35 * HIERARCHY 36 * 37 * The following hierarchy defines the locking rules. A greater lock 38 * may be held before acquiring a lesser lock, but a lesser lock should never 39 * be held while attempting to acquire a greater lock. Here is the hierarchy- 40 * 41 * lport > disc, lport > rport, disc > rport 42 * 43 * CALLBACKS 44 * 45 * The callbacks cause complications with this scheme. There is a callback 46 * from the rport (to either lport or disc) and a callback from disc 47 * (to the lport). 48 * 49 * As rports exit the rport state machine a callback is made to the owner of 50 * the rport to notify success or failure. Since the callback is likely to 51 * cause the lport or disc to grab its lock we cannot hold the rport lock 52 * while making the callback. To ensure that the rport is not free'd while 53 * processing the callback the rport callbacks are serialized through a 54 * single-threaded workqueue. An rport would never be free'd while in a 55 * callback handler because no other rport work in this queue can be executed 56 * at the same time. 57 * 58 * When discovery succeeds or fails a callback is made to the lport as 59 * notification. Currently, successful discovery causes the lport to take no 60 * action. A failure will cause the lport to reset. There is likely a circular 61 * locking problem with this implementation. 62 */ 63 64 /* 65 * LPORT LOCKING 66 * 67 * The critical sections protected by the lport's mutex are quite broad and 68 * may be improved upon in the future. The lport code and its locking doesn't 69 * influence the I/O path, so excessive locking doesn't penalize I/O 70 * performance. 71 * 72 * The strategy is to lock whenever processing a request or response. Note 73 * that every _enter_* function corresponds to a state change. They generally 74 * change the lports state and then send a request out on the wire. We lock 75 * before calling any of these functions to protect that state change. This 76 * means that the entry points into the lport block manage the locks while 77 * the state machine can transition between states (i.e. _enter_* functions) 78 * while always staying protected. 79 * 80 * When handling responses we also hold the lport mutex broadly. When the 81 * lport receives the response frame it locks the mutex and then calls the 82 * appropriate handler for the particuar response. Generally a response will 83 * trigger a state change and so the lock must already be held. 84 * 85 * Retries also have to consider the locking. The retries occur from a work 86 * context and the work function will lock the lport and then retry the state 87 * (i.e. _enter_* function). 88 */ 89 90 #include <linux/timer.h> 91 #include <linux/slab.h> 92 #include <asm/unaligned.h> 93 94 #include <scsi/fc/fc_gs.h> 95 96 #include <scsi/libfc.h> 97 #include <scsi/fc_encode.h> 98 #include <linux/scatterlist.h> 99 100 #include "fc_libfc.h" 101 102 /* Fabric IDs to use for point-to-point mode, chosen on whims. */ 103 #define FC_LOCAL_PTP_FID_LO 0x010101 104 #define FC_LOCAL_PTP_FID_HI 0x010102 105 106 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 107 108 static void fc_lport_error(struct fc_lport *, struct fc_frame *); 109 110 static void fc_lport_enter_reset(struct fc_lport *); 111 static void fc_lport_enter_flogi(struct fc_lport *); 112 static void fc_lport_enter_dns(struct fc_lport *); 113 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); 114 static void fc_lport_enter_scr(struct fc_lport *); 115 static void fc_lport_enter_ready(struct fc_lport *); 116 static void fc_lport_enter_logo(struct fc_lport *); 117 118 static const char *fc_lport_state_names[] = { 119 [LPORT_ST_DISABLED] = "disabled", 120 [LPORT_ST_FLOGI] = "FLOGI", 121 [LPORT_ST_DNS] = "dNS", 122 [LPORT_ST_RNN_ID] = "RNN_ID", 123 [LPORT_ST_RSNN_NN] = "RSNN_NN", 124 [LPORT_ST_RSPN_ID] = "RSPN_ID", 125 [LPORT_ST_RFT_ID] = "RFT_ID", 126 [LPORT_ST_RFF_ID] = "RFF_ID", 127 [LPORT_ST_SCR] = "SCR", 128 [LPORT_ST_READY] = "Ready", 129 [LPORT_ST_LOGO] = "LOGO", 130 [LPORT_ST_RESET] = "reset", 131 }; 132 133 /** 134 * struct fc_bsg_info - FC Passthrough managemet structure 135 * @job: The passthrough job 136 * @lport: The local port to pass through a command 137 * @rsp_code: The expected response code 138 * @sg: job->reply_payload.sg_list 139 * @nents: job->reply_payload.sg_cnt 140 * @offset: The offset into the response data 141 */ 142 struct fc_bsg_info { 143 struct fc_bsg_job *job; 144 struct fc_lport *lport; 145 u16 rsp_code; 146 struct scatterlist *sg; 147 u32 nents; 148 size_t offset; 149 }; 150 151 /** 152 * fc_frame_drop() - Dummy frame handler 153 * @lport: The local port the frame was received on 154 * @fp: The received frame 155 */ 156 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) 157 { 158 fc_frame_free(fp); 159 return 0; 160 } 161 162 /** 163 * fc_lport_rport_callback() - Event handler for rport events 164 * @lport: The lport which is receiving the event 165 * @rdata: private remote port data 166 * @event: The event that occurred 167 * 168 * Locking Note: The rport lock should not be held when calling 169 * this function. 170 */ 171 static void fc_lport_rport_callback(struct fc_lport *lport, 172 struct fc_rport_priv *rdata, 173 enum fc_rport_event event) 174 { 175 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, 176 rdata->ids.port_id); 177 178 mutex_lock(&lport->lp_mutex); 179 switch (event) { 180 case RPORT_EV_READY: 181 if (lport->state == LPORT_ST_DNS) { 182 lport->dns_rdata = rdata; 183 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); 184 } else { 185 FC_LPORT_DBG(lport, "Received an READY event " 186 "on port (%6.6x) for the directory " 187 "server, but the lport is not " 188 "in the DNS state, it's in the " 189 "%d state", rdata->ids.port_id, 190 lport->state); 191 lport->tt.rport_logoff(rdata); 192 } 193 break; 194 case RPORT_EV_LOGO: 195 case RPORT_EV_FAILED: 196 case RPORT_EV_STOP: 197 lport->dns_rdata = NULL; 198 break; 199 case RPORT_EV_NONE: 200 break; 201 } 202 mutex_unlock(&lport->lp_mutex); 203 } 204 205 /** 206 * fc_lport_state() - Return a string which represents the lport's state 207 * @lport: The lport whose state is to converted to a string 208 */ 209 static const char *fc_lport_state(struct fc_lport *lport) 210 { 211 const char *cp; 212 213 cp = fc_lport_state_names[lport->state]; 214 if (!cp) 215 cp = "unknown"; 216 return cp; 217 } 218 219 /** 220 * fc_lport_ptp_setup() - Create an rport for point-to-point mode 221 * @lport: The lport to attach the ptp rport to 222 * @remote_fid: The FID of the ptp rport 223 * @remote_wwpn: The WWPN of the ptp rport 224 * @remote_wwnn: The WWNN of the ptp rport 225 */ 226 static void fc_lport_ptp_setup(struct fc_lport *lport, 227 u32 remote_fid, u64 remote_wwpn, 228 u64 remote_wwnn) 229 { 230 mutex_lock(&lport->disc.disc_mutex); 231 if (lport->ptp_rdata) { 232 lport->tt.rport_logoff(lport->ptp_rdata); 233 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 234 } 235 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); 236 kref_get(&lport->ptp_rdata->kref); 237 lport->ptp_rdata->ids.port_name = remote_wwpn; 238 lport->ptp_rdata->ids.node_name = remote_wwnn; 239 mutex_unlock(&lport->disc.disc_mutex); 240 241 lport->tt.rport_login(lport->ptp_rdata); 242 243 fc_lport_enter_ready(lport); 244 } 245 246 /** 247 * fc_get_host_port_state() - Return the port state of the given Scsi_Host 248 * @shost: The SCSI host whose port state is to be determined 249 */ 250 void fc_get_host_port_state(struct Scsi_Host *shost) 251 { 252 struct fc_lport *lport = shost_priv(shost); 253 254 mutex_lock(&lport->lp_mutex); 255 if (!lport->link_up) 256 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 257 else 258 switch (lport->state) { 259 case LPORT_ST_READY: 260 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 261 break; 262 default: 263 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 264 } 265 mutex_unlock(&lport->lp_mutex); 266 } 267 EXPORT_SYMBOL(fc_get_host_port_state); 268 269 /** 270 * fc_get_host_speed() - Return the speed of the given Scsi_Host 271 * @shost: The SCSI host whose port speed is to be determined 272 */ 273 void fc_get_host_speed(struct Scsi_Host *shost) 274 { 275 struct fc_lport *lport = shost_priv(shost); 276 277 fc_host_speed(shost) = lport->link_speed; 278 } 279 EXPORT_SYMBOL(fc_get_host_speed); 280 281 /** 282 * fc_get_host_stats() - Return the Scsi_Host's statistics 283 * @shost: The SCSI host whose statistics are to be returned 284 */ 285 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) 286 { 287 struct fc_host_statistics *fcoe_stats; 288 struct fc_lport *lport = shost_priv(shost); 289 struct timespec v0, v1; 290 unsigned int cpu; 291 u64 fcp_in_bytes = 0; 292 u64 fcp_out_bytes = 0; 293 294 fcoe_stats = &lport->host_stats; 295 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); 296 297 jiffies_to_timespec(jiffies, &v0); 298 jiffies_to_timespec(lport->boot_time, &v1); 299 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); 300 301 for_each_possible_cpu(cpu) { 302 struct fcoe_dev_stats *stats; 303 304 stats = per_cpu_ptr(lport->dev_stats, cpu); 305 306 fcoe_stats->tx_frames += stats->TxFrames; 307 fcoe_stats->tx_words += stats->TxWords; 308 fcoe_stats->rx_frames += stats->RxFrames; 309 fcoe_stats->rx_words += stats->RxWords; 310 fcoe_stats->error_frames += stats->ErrorFrames; 311 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount; 312 fcoe_stats->fcp_input_requests += stats->InputRequests; 313 fcoe_stats->fcp_output_requests += stats->OutputRequests; 314 fcoe_stats->fcp_control_requests += stats->ControlRequests; 315 fcp_in_bytes += stats->InputBytes; 316 fcp_out_bytes += stats->OutputBytes; 317 fcoe_stats->link_failure_count += stats->LinkFailureCount; 318 } 319 fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); 320 fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); 321 fcoe_stats->lip_count = -1; 322 fcoe_stats->nos_count = -1; 323 fcoe_stats->loss_of_sync_count = -1; 324 fcoe_stats->loss_of_signal_count = -1; 325 fcoe_stats->prim_seq_protocol_err_count = -1; 326 fcoe_stats->dumped_frames = -1; 327 return fcoe_stats; 328 } 329 EXPORT_SYMBOL(fc_get_host_stats); 330 331 /** 332 * fc_lport_flogi_fill() - Fill in FLOGI command for request 333 * @lport: The local port the FLOGI is for 334 * @flogi: The FLOGI command 335 * @op: The opcode 336 */ 337 static void fc_lport_flogi_fill(struct fc_lport *lport, 338 struct fc_els_flogi *flogi, 339 unsigned int op) 340 { 341 struct fc_els_csp *sp; 342 struct fc_els_cssp *cp; 343 344 memset(flogi, 0, sizeof(*flogi)); 345 flogi->fl_cmd = (u8) op; 346 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); 347 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); 348 sp = &flogi->fl_csp; 349 sp->sp_hi_ver = 0x20; 350 sp->sp_lo_ver = 0x20; 351 sp->sp_bb_cred = htons(10); /* this gets set by gateway */ 352 sp->sp_bb_data = htons((u16) lport->mfs); 353 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ 354 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); 355 if (op != ELS_FLOGI) { 356 sp->sp_features = htons(FC_SP_FT_CIRO); 357 sp->sp_tot_seq = htons(255); /* seq. we accept */ 358 sp->sp_rel_off = htons(0x1f); 359 sp->sp_e_d_tov = htonl(lport->e_d_tov); 360 361 cp->cp_rdfs = htons((u16) lport->mfs); 362 cp->cp_con_seq = htons(255); 363 cp->cp_open_seq = 1; 364 } 365 } 366 367 /** 368 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port 369 * @lport: The local port to add a new FC-4 type to 370 * @type: The new FC-4 type 371 */ 372 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) 373 { 374 __be32 *mp; 375 376 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW]; 377 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW)); 378 } 379 380 /** 381 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. 382 * @lport: Fibre Channel local port receiving the RLIR 383 * @fp: The RLIR request frame 384 * 385 * Locking Note: The lport lock is expected to be held before calling 386 * this function. 387 */ 388 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) 389 { 390 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 391 fc_lport_state(lport)); 392 393 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 394 fc_frame_free(fp); 395 } 396 397 /** 398 * fc_lport_recv_echo_req() - Handle received ECHO request 399 * @lport: The local port receiving the ECHO 400 * @fp: ECHO request frame 401 * 402 * Locking Note: The lport lock is expected to be held before calling 403 * this function. 404 */ 405 static void fc_lport_recv_echo_req(struct fc_lport *lport, 406 struct fc_frame *in_fp) 407 { 408 struct fc_frame *fp; 409 unsigned int len; 410 void *pp; 411 void *dp; 412 413 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", 414 fc_lport_state(lport)); 415 416 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 417 pp = fc_frame_payload_get(in_fp, len); 418 419 if (len < sizeof(__be32)) 420 len = sizeof(__be32); 421 422 fp = fc_frame_alloc(lport, len); 423 if (fp) { 424 dp = fc_frame_payload_get(fp, len); 425 memcpy(dp, pp, len); 426 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); 427 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 428 lport->tt.frame_send(lport, fp); 429 } 430 fc_frame_free(in_fp); 431 } 432 433 /** 434 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request 435 * @lport: The local port receiving the RNID 436 * @fp: The RNID request frame 437 * 438 * Locking Note: The lport lock is expected to be held before calling 439 * this function. 440 */ 441 static void fc_lport_recv_rnid_req(struct fc_lport *lport, 442 struct fc_frame *in_fp) 443 { 444 struct fc_frame *fp; 445 struct fc_els_rnid *req; 446 struct { 447 struct fc_els_rnid_resp rnid; 448 struct fc_els_rnid_cid cid; 449 struct fc_els_rnid_gen gen; 450 } *rp; 451 struct fc_seq_els_data rjt_data; 452 u8 fmt; 453 size_t len; 454 455 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", 456 fc_lport_state(lport)); 457 458 req = fc_frame_payload_get(in_fp, sizeof(*req)); 459 if (!req) { 460 rjt_data.reason = ELS_RJT_LOGIC; 461 rjt_data.explan = ELS_EXPL_NONE; 462 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 463 } else { 464 fmt = req->rnid_fmt; 465 len = sizeof(*rp); 466 if (fmt != ELS_RNIDF_GEN || 467 ntohl(lport->rnid_gen.rnid_atype) == 0) { 468 fmt = ELS_RNIDF_NONE; /* nothing to provide */ 469 len -= sizeof(rp->gen); 470 } 471 fp = fc_frame_alloc(lport, len); 472 if (fp) { 473 rp = fc_frame_payload_get(fp, len); 474 memset(rp, 0, len); 475 rp->rnid.rnid_cmd = ELS_LS_ACC; 476 rp->rnid.rnid_fmt = fmt; 477 rp->rnid.rnid_cid_len = sizeof(rp->cid); 478 rp->cid.rnid_wwpn = htonll(lport->wwpn); 479 rp->cid.rnid_wwnn = htonll(lport->wwnn); 480 if (fmt == ELS_RNIDF_GEN) { 481 rp->rnid.rnid_sid_len = sizeof(rp->gen); 482 memcpy(&rp->gen, &lport->rnid_gen, 483 sizeof(rp->gen)); 484 } 485 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 486 lport->tt.frame_send(lport, fp); 487 } 488 } 489 fc_frame_free(in_fp); 490 } 491 492 /** 493 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 494 * @lport: The local port receiving the LOGO 495 * @fp: The LOGO request frame 496 * 497 * Locking Note: The lport lock is exected to be held before calling 498 * this function. 499 */ 500 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) 501 { 502 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 503 fc_lport_enter_reset(lport); 504 fc_frame_free(fp); 505 } 506 507 /** 508 * fc_fabric_login() - Start the lport state machine 509 * @lport: The local port that should log into the fabric 510 * 511 * Locking Note: This function should not be called 512 * with the lport lock held. 513 */ 514 int fc_fabric_login(struct fc_lport *lport) 515 { 516 int rc = -1; 517 518 mutex_lock(&lport->lp_mutex); 519 if (lport->state == LPORT_ST_DISABLED || 520 lport->state == LPORT_ST_LOGO) { 521 fc_lport_state_enter(lport, LPORT_ST_RESET); 522 fc_lport_enter_reset(lport); 523 rc = 0; 524 } 525 mutex_unlock(&lport->lp_mutex); 526 527 return rc; 528 } 529 EXPORT_SYMBOL(fc_fabric_login); 530 531 /** 532 * __fc_linkup() - Handler for transport linkup events 533 * @lport: The lport whose link is up 534 * 535 * Locking: must be called with the lp_mutex held 536 */ 537 void __fc_linkup(struct fc_lport *lport) 538 { 539 if (!lport->link_up) { 540 lport->link_up = 1; 541 542 if (lport->state == LPORT_ST_RESET) 543 fc_lport_enter_flogi(lport); 544 } 545 } 546 547 /** 548 * fc_linkup() - Handler for transport linkup events 549 * @lport: The local port whose link is up 550 */ 551 void fc_linkup(struct fc_lport *lport) 552 { 553 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n", 554 lport->host->host_no, lport->port_id); 555 556 mutex_lock(&lport->lp_mutex); 557 __fc_linkup(lport); 558 mutex_unlock(&lport->lp_mutex); 559 } 560 EXPORT_SYMBOL(fc_linkup); 561 562 /** 563 * __fc_linkdown() - Handler for transport linkdown events 564 * @lport: The lport whose link is down 565 * 566 * Locking: must be called with the lp_mutex held 567 */ 568 void __fc_linkdown(struct fc_lport *lport) 569 { 570 if (lport->link_up) { 571 lport->link_up = 0; 572 fc_lport_enter_reset(lport); 573 lport->tt.fcp_cleanup(lport); 574 } 575 } 576 577 /** 578 * fc_linkdown() - Handler for transport linkdown events 579 * @lport: The local port whose link is down 580 */ 581 void fc_linkdown(struct fc_lport *lport) 582 { 583 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n", 584 lport->host->host_no, lport->port_id); 585 586 mutex_lock(&lport->lp_mutex); 587 __fc_linkdown(lport); 588 mutex_unlock(&lport->lp_mutex); 589 } 590 EXPORT_SYMBOL(fc_linkdown); 591 592 /** 593 * fc_fabric_logoff() - Logout of the fabric 594 * @lport: The local port to logoff the fabric 595 * 596 * Return value: 597 * 0 for success, -1 for failure 598 */ 599 int fc_fabric_logoff(struct fc_lport *lport) 600 { 601 lport->tt.disc_stop_final(lport); 602 mutex_lock(&lport->lp_mutex); 603 if (lport->dns_rdata) 604 lport->tt.rport_logoff(lport->dns_rdata); 605 mutex_unlock(&lport->lp_mutex); 606 lport->tt.rport_flush_queue(); 607 mutex_lock(&lport->lp_mutex); 608 fc_lport_enter_logo(lport); 609 mutex_unlock(&lport->lp_mutex); 610 cancel_delayed_work_sync(&lport->retry_work); 611 return 0; 612 } 613 EXPORT_SYMBOL(fc_fabric_logoff); 614 615 /** 616 * fc_lport_destroy() - Unregister a fc_lport 617 * @lport: The local port to unregister 618 * 619 * Note: 620 * exit routine for fc_lport instance 621 * clean-up all the allocated memory 622 * and free up other system resources. 623 * 624 */ 625 int fc_lport_destroy(struct fc_lport *lport) 626 { 627 mutex_lock(&lport->lp_mutex); 628 lport->state = LPORT_ST_DISABLED; 629 lport->link_up = 0; 630 lport->tt.frame_send = fc_frame_drop; 631 mutex_unlock(&lport->lp_mutex); 632 633 lport->tt.fcp_abort_io(lport); 634 lport->tt.disc_stop_final(lport); 635 lport->tt.exch_mgr_reset(lport, 0, 0); 636 fc_fc4_del_lport(lport); 637 return 0; 638 } 639 EXPORT_SYMBOL(fc_lport_destroy); 640 641 /** 642 * fc_set_mfs() - Set the maximum frame size for a local port 643 * @lport: The local port to set the MFS for 644 * @mfs: The new MFS 645 */ 646 int fc_set_mfs(struct fc_lport *lport, u32 mfs) 647 { 648 unsigned int old_mfs; 649 int rc = -EINVAL; 650 651 mutex_lock(&lport->lp_mutex); 652 653 old_mfs = lport->mfs; 654 655 if (mfs >= FC_MIN_MAX_FRAME) { 656 mfs &= ~3; 657 if (mfs > FC_MAX_FRAME) 658 mfs = FC_MAX_FRAME; 659 mfs -= sizeof(struct fc_frame_header); 660 lport->mfs = mfs; 661 rc = 0; 662 } 663 664 if (!rc && mfs < old_mfs) 665 fc_lport_enter_reset(lport); 666 667 mutex_unlock(&lport->lp_mutex); 668 669 return rc; 670 } 671 EXPORT_SYMBOL(fc_set_mfs); 672 673 /** 674 * fc_lport_disc_callback() - Callback for discovery events 675 * @lport: The local port receiving the event 676 * @event: The discovery event 677 */ 678 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) 679 { 680 switch (event) { 681 case DISC_EV_SUCCESS: 682 FC_LPORT_DBG(lport, "Discovery succeeded\n"); 683 break; 684 case DISC_EV_FAILED: 685 printk(KERN_ERR "host%d: libfc: " 686 "Discovery failed for port (%6.6x)\n", 687 lport->host->host_no, lport->port_id); 688 mutex_lock(&lport->lp_mutex); 689 fc_lport_enter_reset(lport); 690 mutex_unlock(&lport->lp_mutex); 691 break; 692 case DISC_EV_NONE: 693 WARN_ON(1); 694 break; 695 } 696 } 697 698 /** 699 * fc_rport_enter_ready() - Enter the ready state and start discovery 700 * @lport: The local port that is ready 701 * 702 * Locking Note: The lport lock is expected to be held before calling 703 * this routine. 704 */ 705 static void fc_lport_enter_ready(struct fc_lport *lport) 706 { 707 FC_LPORT_DBG(lport, "Entered READY from state %s\n", 708 fc_lport_state(lport)); 709 710 fc_lport_state_enter(lport, LPORT_ST_READY); 711 if (lport->vport) 712 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); 713 fc_vports_linkchange(lport); 714 715 if (!lport->ptp_rdata) 716 lport->tt.disc_start(fc_lport_disc_callback, lport); 717 } 718 719 /** 720 * fc_lport_set_port_id() - set the local port Port ID 721 * @lport: The local port which will have its Port ID set. 722 * @port_id: The new port ID. 723 * @fp: The frame containing the incoming request, or NULL. 724 * 725 * Locking Note: The lport lock is expected to be held before calling 726 * this function. 727 */ 728 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, 729 struct fc_frame *fp) 730 { 731 if (port_id) 732 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", 733 lport->host->host_no, port_id); 734 735 lport->port_id = port_id; 736 737 /* Update the fc_host */ 738 fc_host_port_id(lport->host) = port_id; 739 740 if (lport->tt.lport_set_port_id) 741 lport->tt.lport_set_port_id(lport, port_id, fp); 742 } 743 744 /** 745 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint 746 * @lport: The local port which will have its Port ID set. 747 * @port_id: The new port ID. 748 * 749 * Called by the lower-level driver when transport sets the local port_id. 750 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and 751 * discovery to be skipped. 752 */ 753 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) 754 { 755 mutex_lock(&lport->lp_mutex); 756 757 fc_lport_set_port_id(lport, port_id, NULL); 758 759 switch (lport->state) { 760 case LPORT_ST_RESET: 761 case LPORT_ST_FLOGI: 762 if (port_id) 763 fc_lport_enter_ready(lport); 764 break; 765 default: 766 break; 767 } 768 mutex_unlock(&lport->lp_mutex); 769 } 770 EXPORT_SYMBOL(fc_lport_set_local_id); 771 772 /** 773 * fc_lport_recv_flogi_req() - Receive a FLOGI request 774 * @lport: The local port that received the request 775 * @rx_fp: The FLOGI frame 776 * 777 * A received FLOGI request indicates a point-to-point connection. 778 * Accept it with the common service parameters indicating our N port. 779 * Set up to do a PLOGI if we have the higher-number WWPN. 780 * 781 * Locking Note: The lport lock is expected to be held before calling 782 * this function. 783 */ 784 static void fc_lport_recv_flogi_req(struct fc_lport *lport, 785 struct fc_frame *rx_fp) 786 { 787 struct fc_frame *fp; 788 struct fc_frame_header *fh; 789 struct fc_els_flogi *flp; 790 struct fc_els_flogi *new_flp; 791 u64 remote_wwpn; 792 u32 remote_fid; 793 u32 local_fid; 794 795 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", 796 fc_lport_state(lport)); 797 798 remote_fid = fc_frame_sid(rx_fp); 799 flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); 800 if (!flp) 801 goto out; 802 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 803 if (remote_wwpn == lport->wwpn) { 804 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " 805 "with same WWPN %16.16llx\n", 806 lport->host->host_no, remote_wwpn); 807 goto out; 808 } 809 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); 810 811 /* 812 * XXX what is the right thing to do for FIDs? 813 * The originator might expect our S_ID to be 0xfffffe. 814 * But if so, both of us could end up with the same FID. 815 */ 816 local_fid = FC_LOCAL_PTP_FID_LO; 817 if (remote_wwpn < lport->wwpn) { 818 local_fid = FC_LOCAL_PTP_FID_HI; 819 if (!remote_fid || remote_fid == local_fid) 820 remote_fid = FC_LOCAL_PTP_FID_LO; 821 } else if (!remote_fid) { 822 remote_fid = FC_LOCAL_PTP_FID_HI; 823 } 824 825 fc_lport_set_port_id(lport, local_fid, rx_fp); 826 827 fp = fc_frame_alloc(lport, sizeof(*flp)); 828 if (fp) { 829 new_flp = fc_frame_payload_get(fp, sizeof(*flp)); 830 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); 831 new_flp->fl_cmd = (u8) ELS_LS_ACC; 832 833 /* 834 * Send the response. If this fails, the originator should 835 * repeat the sequence. 836 */ 837 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 838 fh = fc_frame_header_get(fp); 839 hton24(fh->fh_s_id, local_fid); 840 hton24(fh->fh_d_id, remote_fid); 841 lport->tt.frame_send(lport, fp); 842 843 } else { 844 fc_lport_error(lport, fp); 845 } 846 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, 847 get_unaligned_be64(&flp->fl_wwnn)); 848 out: 849 fc_frame_free(rx_fp); 850 } 851 852 /** 853 * fc_lport_recv_els_req() - The generic lport ELS request handler 854 * @lport: The local port that received the request 855 * @fp: The request frame 856 * 857 * This function will see if the lport handles the request or 858 * if an rport should handle the request. 859 * 860 * Locking Note: This function should not be called with the lport 861 * lock held because it will grab the lock. 862 */ 863 static void fc_lport_recv_els_req(struct fc_lport *lport, 864 struct fc_frame *fp) 865 { 866 void (*recv)(struct fc_lport *, struct fc_frame *); 867 868 mutex_lock(&lport->lp_mutex); 869 870 /* 871 * Handle special ELS cases like FLOGI, LOGO, and 872 * RSCN here. These don't require a session. 873 * Even if we had a session, it might not be ready. 874 */ 875 if (!lport->link_up) 876 fc_frame_free(fp); 877 else { 878 /* 879 * Check opcode. 880 */ 881 recv = lport->tt.rport_recv_req; 882 switch (fc_frame_payload_op(fp)) { 883 case ELS_FLOGI: 884 if (!lport->point_to_multipoint) 885 recv = fc_lport_recv_flogi_req; 886 break; 887 case ELS_LOGO: 888 if (fc_frame_sid(fp) == FC_FID_FLOGI) 889 recv = fc_lport_recv_logo_req; 890 break; 891 case ELS_RSCN: 892 recv = lport->tt.disc_recv_req; 893 break; 894 case ELS_ECHO: 895 recv = fc_lport_recv_echo_req; 896 break; 897 case ELS_RLIR: 898 recv = fc_lport_recv_rlir_req; 899 break; 900 case ELS_RNID: 901 recv = fc_lport_recv_rnid_req; 902 break; 903 } 904 905 recv(lport, fp); 906 } 907 mutex_unlock(&lport->lp_mutex); 908 } 909 910 static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, 911 const struct fc_els_spp *spp_in, 912 struct fc_els_spp *spp_out) 913 { 914 return FC_SPP_RESP_INVL; 915 } 916 917 struct fc4_prov fc_lport_els_prov = { 918 .prli = fc_lport_els_prli, 919 .recv = fc_lport_recv_els_req, 920 }; 921 922 /** 923 * fc_lport_recv_req() - The generic lport request handler 924 * @lport: The lport that received the request 925 * @fp: The frame the request is in 926 * 927 * Locking Note: This function should not be called with the lport 928 * lock held because it may grab the lock. 929 */ 930 static void fc_lport_recv_req(struct fc_lport *lport, 931 struct fc_frame *fp) 932 { 933 struct fc_frame_header *fh = fc_frame_header_get(fp); 934 struct fc_seq *sp = fr_seq(fp); 935 struct fc4_prov *prov; 936 937 /* 938 * Use RCU read lock and module_lock to be sure module doesn't 939 * deregister and get unloaded while we're calling it. 940 * try_module_get() is inlined and accepts a NULL parameter. 941 * Only ELSes and FCP target ops should come through here. 942 * The locking is unfortunate, and a better scheme is being sought. 943 */ 944 945 rcu_read_lock(); 946 if (fh->fh_type >= FC_FC4_PROV_SIZE) 947 goto drop; 948 prov = rcu_dereference(fc_passive_prov[fh->fh_type]); 949 if (!prov || !try_module_get(prov->module)) 950 goto drop; 951 rcu_read_unlock(); 952 prov->recv(lport, fp); 953 module_put(prov->module); 954 return; 955 drop: 956 rcu_read_unlock(); 957 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); 958 fc_frame_free(fp); 959 lport->tt.exch_done(sp); 960 } 961 962 /** 963 * fc_lport_reset() - Reset a local port 964 * @lport: The local port which should be reset 965 * 966 * Locking Note: This functions should not be called with the 967 * lport lock held. 968 */ 969 int fc_lport_reset(struct fc_lport *lport) 970 { 971 cancel_delayed_work_sync(&lport->retry_work); 972 mutex_lock(&lport->lp_mutex); 973 fc_lport_enter_reset(lport); 974 mutex_unlock(&lport->lp_mutex); 975 return 0; 976 } 977 EXPORT_SYMBOL(fc_lport_reset); 978 979 /** 980 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held 981 * @lport: The local port to be reset 982 * 983 * Locking Note: The lport lock is expected to be held before calling 984 * this routine. 985 */ 986 static void fc_lport_reset_locked(struct fc_lport *lport) 987 { 988 if (lport->dns_rdata) 989 lport->tt.rport_logoff(lport->dns_rdata); 990 991 if (lport->ptp_rdata) { 992 lport->tt.rport_logoff(lport->ptp_rdata); 993 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 994 lport->ptp_rdata = NULL; 995 } 996 997 lport->tt.disc_stop(lport); 998 999 lport->tt.exch_mgr_reset(lport, 0, 0); 1000 fc_host_fabric_name(lport->host) = 0; 1001 1002 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) 1003 fc_lport_set_port_id(lport, 0, NULL); 1004 } 1005 1006 /** 1007 * fc_lport_enter_reset() - Reset the local port 1008 * @lport: The local port to be reset 1009 * 1010 * Locking Note: The lport lock is expected to be held before calling 1011 * this routine. 1012 */ 1013 static void fc_lport_enter_reset(struct fc_lport *lport) 1014 { 1015 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 1016 fc_lport_state(lport)); 1017 1018 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) 1019 return; 1020 1021 if (lport->vport) { 1022 if (lport->link_up) 1023 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 1024 else 1025 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); 1026 } 1027 fc_lport_state_enter(lport, LPORT_ST_RESET); 1028 fc_host_post_event(lport->host, fc_get_event_number(), 1029 FCH_EVT_LIPRESET, 0); 1030 fc_vports_linkchange(lport); 1031 fc_lport_reset_locked(lport); 1032 if (lport->link_up) 1033 fc_lport_enter_flogi(lport); 1034 } 1035 1036 /** 1037 * fc_lport_enter_disabled() - Disable the local port 1038 * @lport: The local port to be reset 1039 * 1040 * Locking Note: The lport lock is expected to be held before calling 1041 * this routine. 1042 */ 1043 static void fc_lport_enter_disabled(struct fc_lport *lport) 1044 { 1045 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", 1046 fc_lport_state(lport)); 1047 1048 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1049 fc_vports_linkchange(lport); 1050 fc_lport_reset_locked(lport); 1051 } 1052 1053 /** 1054 * fc_lport_error() - Handler for any errors 1055 * @lport: The local port that the error was on 1056 * @fp: The error code encoded in a frame pointer 1057 * 1058 * If the error was caused by a resource allocation failure 1059 * then wait for half a second and retry, otherwise retry 1060 * after the e_d_tov time. 1061 */ 1062 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 1063 { 1064 unsigned long delay = 0; 1065 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", 1066 PTR_ERR(fp), fc_lport_state(lport), 1067 lport->retry_count); 1068 1069 if (PTR_ERR(fp) == -FC_EX_CLOSED) 1070 return; 1071 1072 /* 1073 * Memory allocation failure, or the exchange timed out 1074 * or we received LS_RJT. 1075 * Retry after delay 1076 */ 1077 if (lport->retry_count < lport->max_retry_count) { 1078 lport->retry_count++; 1079 if (!fp) 1080 delay = msecs_to_jiffies(500); 1081 else 1082 delay = msecs_to_jiffies(lport->e_d_tov); 1083 1084 schedule_delayed_work(&lport->retry_work, delay); 1085 } else 1086 fc_lport_enter_reset(lport); 1087 } 1088 1089 /** 1090 * fc_lport_ns_resp() - Handle response to a name server 1091 * registration exchange 1092 * @sp: current sequence in exchange 1093 * @fp: response frame 1094 * @lp_arg: Fibre Channel host port instance 1095 * 1096 * Locking Note: This function will be called without the lport lock 1097 * held, but it will lock, call an _enter_* function or fc_lport_error() 1098 * and then unlock the lport. 1099 */ 1100 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, 1101 void *lp_arg) 1102 { 1103 struct fc_lport *lport = lp_arg; 1104 struct fc_frame_header *fh; 1105 struct fc_ct_hdr *ct; 1106 1107 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); 1108 1109 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1110 return; 1111 1112 mutex_lock(&lport->lp_mutex); 1113 1114 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { 1115 FC_LPORT_DBG(lport, "Received a name server response, " 1116 "but in state %s\n", fc_lport_state(lport)); 1117 if (IS_ERR(fp)) 1118 goto err; 1119 goto out; 1120 } 1121 1122 if (IS_ERR(fp)) { 1123 fc_lport_error(lport, fp); 1124 goto err; 1125 } 1126 1127 fh = fc_frame_header_get(fp); 1128 ct = fc_frame_payload_get(fp, sizeof(*ct)); 1129 1130 if (fh && ct && fh->fh_type == FC_TYPE_CT && 1131 ct->ct_fs_type == FC_FST_DIR && 1132 ct->ct_fs_subtype == FC_NS_SUBTYPE && 1133 ntohs(ct->ct_cmd) == FC_FS_ACC) 1134 switch (lport->state) { 1135 case LPORT_ST_RNN_ID: 1136 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); 1137 break; 1138 case LPORT_ST_RSNN_NN: 1139 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); 1140 break; 1141 case LPORT_ST_RSPN_ID: 1142 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1143 break; 1144 case LPORT_ST_RFT_ID: 1145 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); 1146 break; 1147 case LPORT_ST_RFF_ID: 1148 fc_lport_enter_scr(lport); 1149 break; 1150 default: 1151 /* should have already been caught by state checks */ 1152 break; 1153 } 1154 else 1155 fc_lport_error(lport, fp); 1156 out: 1157 fc_frame_free(fp); 1158 err: 1159 mutex_unlock(&lport->lp_mutex); 1160 } 1161 1162 /** 1163 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request 1164 * @sp: current sequence in SCR exchange 1165 * @fp: response frame 1166 * @lp_arg: Fibre Channel lport port instance that sent the registration request 1167 * 1168 * Locking Note: This function will be called without the lport lock 1169 * held, but it will lock, call an _enter_* function or fc_lport_error 1170 * and then unlock the lport. 1171 */ 1172 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, 1173 void *lp_arg) 1174 { 1175 struct fc_lport *lport = lp_arg; 1176 u8 op; 1177 1178 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); 1179 1180 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1181 return; 1182 1183 mutex_lock(&lport->lp_mutex); 1184 1185 if (lport->state != LPORT_ST_SCR) { 1186 FC_LPORT_DBG(lport, "Received a SCR response, but in state " 1187 "%s\n", fc_lport_state(lport)); 1188 if (IS_ERR(fp)) 1189 goto err; 1190 goto out; 1191 } 1192 1193 if (IS_ERR(fp)) { 1194 fc_lport_error(lport, fp); 1195 goto err; 1196 } 1197 1198 op = fc_frame_payload_op(fp); 1199 if (op == ELS_LS_ACC) 1200 fc_lport_enter_ready(lport); 1201 else 1202 fc_lport_error(lport, fp); 1203 1204 out: 1205 fc_frame_free(fp); 1206 err: 1207 mutex_unlock(&lport->lp_mutex); 1208 } 1209 1210 /** 1211 * fc_lport_enter_scr() - Send a SCR (State Change Register) request 1212 * @lport: The local port to register for state changes 1213 * 1214 * Locking Note: The lport lock is expected to be held before calling 1215 * this routine. 1216 */ 1217 static void fc_lport_enter_scr(struct fc_lport *lport) 1218 { 1219 struct fc_frame *fp; 1220 1221 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", 1222 fc_lport_state(lport)); 1223 1224 fc_lport_state_enter(lport, LPORT_ST_SCR); 1225 1226 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); 1227 if (!fp) { 1228 fc_lport_error(lport, fp); 1229 return; 1230 } 1231 1232 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, 1233 fc_lport_scr_resp, lport, 1234 2 * lport->r_a_tov)) 1235 fc_lport_error(lport, NULL); 1236 } 1237 1238 /** 1239 * fc_lport_enter_ns() - register some object with the name server 1240 * @lport: Fibre Channel local port to register 1241 * 1242 * Locking Note: The lport lock is expected to be held before calling 1243 * this routine. 1244 */ 1245 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) 1246 { 1247 struct fc_frame *fp; 1248 enum fc_ns_req cmd; 1249 int size = sizeof(struct fc_ct_hdr); 1250 size_t len; 1251 1252 FC_LPORT_DBG(lport, "Entered %s state from %s state\n", 1253 fc_lport_state_names[state], 1254 fc_lport_state(lport)); 1255 1256 fc_lport_state_enter(lport, state); 1257 1258 switch (state) { 1259 case LPORT_ST_RNN_ID: 1260 cmd = FC_NS_RNN_ID; 1261 size += sizeof(struct fc_ns_rn_id); 1262 break; 1263 case LPORT_ST_RSNN_NN: 1264 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1265 /* if there is no symbolic name, skip to RFT_ID */ 1266 if (!len) 1267 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1268 cmd = FC_NS_RSNN_NN; 1269 size += sizeof(struct fc_ns_rsnn) + len; 1270 break; 1271 case LPORT_ST_RSPN_ID: 1272 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1273 /* if there is no symbolic name, skip to RFT_ID */ 1274 if (!len) 1275 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1276 cmd = FC_NS_RSPN_ID; 1277 size += sizeof(struct fc_ns_rspn) + len; 1278 break; 1279 case LPORT_ST_RFT_ID: 1280 cmd = FC_NS_RFT_ID; 1281 size += sizeof(struct fc_ns_rft); 1282 break; 1283 case LPORT_ST_RFF_ID: 1284 cmd = FC_NS_RFF_ID; 1285 size += sizeof(struct fc_ns_rff_id); 1286 break; 1287 default: 1288 fc_lport_error(lport, NULL); 1289 return; 1290 } 1291 1292 fp = fc_frame_alloc(lport, size); 1293 if (!fp) { 1294 fc_lport_error(lport, fp); 1295 return; 1296 } 1297 1298 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, 1299 fc_lport_ns_resp, 1300 lport, 3 * lport->r_a_tov)) 1301 fc_lport_error(lport, fp); 1302 } 1303 1304 static struct fc_rport_operations fc_lport_rport_ops = { 1305 .event_callback = fc_lport_rport_callback, 1306 }; 1307 1308 /** 1309 * fc_rport_enter_dns() - Create a fc_rport for the name server 1310 * @lport: The local port requesting a remote port for the name server 1311 * 1312 * Locking Note: The lport lock is expected to be held before calling 1313 * this routine. 1314 */ 1315 static void fc_lport_enter_dns(struct fc_lport *lport) 1316 { 1317 struct fc_rport_priv *rdata; 1318 1319 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", 1320 fc_lport_state(lport)); 1321 1322 fc_lport_state_enter(lport, LPORT_ST_DNS); 1323 1324 mutex_lock(&lport->disc.disc_mutex); 1325 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); 1326 mutex_unlock(&lport->disc.disc_mutex); 1327 if (!rdata) 1328 goto err; 1329 1330 rdata->ops = &fc_lport_rport_ops; 1331 lport->tt.rport_login(rdata); 1332 return; 1333 1334 err: 1335 fc_lport_error(lport, NULL); 1336 } 1337 1338 /** 1339 * fc_lport_timeout() - Handler for the retry_work timer 1340 * @work: The work struct of the local port 1341 */ 1342 static void fc_lport_timeout(struct work_struct *work) 1343 { 1344 struct fc_lport *lport = 1345 container_of(work, struct fc_lport, 1346 retry_work.work); 1347 1348 mutex_lock(&lport->lp_mutex); 1349 1350 switch (lport->state) { 1351 case LPORT_ST_DISABLED: 1352 WARN_ON(1); 1353 break; 1354 case LPORT_ST_READY: 1355 break; 1356 case LPORT_ST_RESET: 1357 break; 1358 case LPORT_ST_FLOGI: 1359 fc_lport_enter_flogi(lport); 1360 break; 1361 case LPORT_ST_DNS: 1362 fc_lport_enter_dns(lport); 1363 break; 1364 case LPORT_ST_RNN_ID: 1365 case LPORT_ST_RSNN_NN: 1366 case LPORT_ST_RSPN_ID: 1367 case LPORT_ST_RFT_ID: 1368 case LPORT_ST_RFF_ID: 1369 fc_lport_enter_ns(lport, lport->state); 1370 break; 1371 case LPORT_ST_SCR: 1372 fc_lport_enter_scr(lport); 1373 break; 1374 case LPORT_ST_LOGO: 1375 fc_lport_enter_logo(lport); 1376 break; 1377 } 1378 1379 mutex_unlock(&lport->lp_mutex); 1380 } 1381 1382 /** 1383 * fc_lport_logo_resp() - Handle response to LOGO request 1384 * @sp: The sequence that the LOGO was on 1385 * @fp: The LOGO frame 1386 * @lp_arg: The lport port that received the LOGO request 1387 * 1388 * Locking Note: This function will be called without the lport lock 1389 * held, but it will lock, call an _enter_* function or fc_lport_error() 1390 * and then unlock the lport. 1391 */ 1392 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 1393 void *lp_arg) 1394 { 1395 struct fc_lport *lport = lp_arg; 1396 u8 op; 1397 1398 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); 1399 1400 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1401 return; 1402 1403 mutex_lock(&lport->lp_mutex); 1404 1405 if (lport->state != LPORT_ST_LOGO) { 1406 FC_LPORT_DBG(lport, "Received a LOGO response, but in state " 1407 "%s\n", fc_lport_state(lport)); 1408 if (IS_ERR(fp)) 1409 goto err; 1410 goto out; 1411 } 1412 1413 if (IS_ERR(fp)) { 1414 fc_lport_error(lport, fp); 1415 goto err; 1416 } 1417 1418 op = fc_frame_payload_op(fp); 1419 if (op == ELS_LS_ACC) 1420 fc_lport_enter_disabled(lport); 1421 else 1422 fc_lport_error(lport, fp); 1423 1424 out: 1425 fc_frame_free(fp); 1426 err: 1427 mutex_unlock(&lport->lp_mutex); 1428 } 1429 EXPORT_SYMBOL(fc_lport_logo_resp); 1430 1431 /** 1432 * fc_rport_enter_logo() - Logout of the fabric 1433 * @lport: The local port to be logged out 1434 * 1435 * Locking Note: The lport lock is expected to be held before calling 1436 * this routine. 1437 */ 1438 static void fc_lport_enter_logo(struct fc_lport *lport) 1439 { 1440 struct fc_frame *fp; 1441 struct fc_els_logo *logo; 1442 1443 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", 1444 fc_lport_state(lport)); 1445 1446 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1447 fc_vports_linkchange(lport); 1448 1449 fp = fc_frame_alloc(lport, sizeof(*logo)); 1450 if (!fp) { 1451 fc_lport_error(lport, fp); 1452 return; 1453 } 1454 1455 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, 1456 fc_lport_logo_resp, lport, 1457 2 * lport->r_a_tov)) 1458 fc_lport_error(lport, NULL); 1459 } 1460 1461 /** 1462 * fc_lport_flogi_resp() - Handle response to FLOGI request 1463 * @sp: The sequence that the FLOGI was on 1464 * @fp: The FLOGI response frame 1465 * @lp_arg: The lport port that received the FLOGI response 1466 * 1467 * Locking Note: This function will be called without the lport lock 1468 * held, but it will lock, call an _enter_* function or fc_lport_error() 1469 * and then unlock the lport. 1470 */ 1471 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 1472 void *lp_arg) 1473 { 1474 struct fc_lport *lport = lp_arg; 1475 struct fc_els_flogi *flp; 1476 u32 did; 1477 u16 csp_flags; 1478 unsigned int r_a_tov; 1479 unsigned int e_d_tov; 1480 u16 mfs; 1481 1482 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); 1483 1484 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1485 return; 1486 1487 mutex_lock(&lport->lp_mutex); 1488 1489 if (lport->state != LPORT_ST_FLOGI) { 1490 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " 1491 "%s\n", fc_lport_state(lport)); 1492 if (IS_ERR(fp)) 1493 goto err; 1494 goto out; 1495 } 1496 1497 if (IS_ERR(fp)) { 1498 fc_lport_error(lport, fp); 1499 goto err; 1500 } 1501 1502 did = fc_frame_did(fp); 1503 if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) { 1504 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1505 if (flp) { 1506 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1507 FC_SP_BB_DATA_MASK; 1508 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && 1509 mfs < lport->mfs) 1510 lport->mfs = mfs; 1511 csp_flags = ntohs(flp->fl_csp.sp_features); 1512 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1513 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1514 if (csp_flags & FC_SP_FT_EDTR) 1515 e_d_tov /= 1000000; 1516 1517 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); 1518 1519 if ((csp_flags & FC_SP_FT_FPORT) == 0) { 1520 if (e_d_tov > lport->e_d_tov) 1521 lport->e_d_tov = e_d_tov; 1522 lport->r_a_tov = 2 * e_d_tov; 1523 fc_lport_set_port_id(lport, did, fp); 1524 printk(KERN_INFO "host%d: libfc: " 1525 "Port (%6.6x) entered " 1526 "point-to-point mode\n", 1527 lport->host->host_no, did); 1528 fc_lport_ptp_setup(lport, fc_frame_sid(fp), 1529 get_unaligned_be64( 1530 &flp->fl_wwpn), 1531 get_unaligned_be64( 1532 &flp->fl_wwnn)); 1533 } else { 1534 lport->e_d_tov = e_d_tov; 1535 lport->r_a_tov = r_a_tov; 1536 fc_host_fabric_name(lport->host) = 1537 get_unaligned_be64(&flp->fl_wwnn); 1538 fc_lport_set_port_id(lport, did, fp); 1539 fc_lport_enter_dns(lport); 1540 } 1541 } 1542 } else { 1543 FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n"); 1544 fc_lport_error(lport, fp); 1545 } 1546 1547 out: 1548 fc_frame_free(fp); 1549 err: 1550 mutex_unlock(&lport->lp_mutex); 1551 } 1552 EXPORT_SYMBOL(fc_lport_flogi_resp); 1553 1554 /** 1555 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager 1556 * @lport: Fibre Channel local port to be logged in to the fabric 1557 * 1558 * Locking Note: The lport lock is expected to be held before calling 1559 * this routine. 1560 */ 1561 void fc_lport_enter_flogi(struct fc_lport *lport) 1562 { 1563 struct fc_frame *fp; 1564 1565 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", 1566 fc_lport_state(lport)); 1567 1568 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1569 1570 if (lport->point_to_multipoint) { 1571 if (lport->port_id) 1572 fc_lport_enter_ready(lport); 1573 return; 1574 } 1575 1576 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 1577 if (!fp) 1578 return fc_lport_error(lport, fp); 1579 1580 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, 1581 lport->vport ? ELS_FDISC : ELS_FLOGI, 1582 fc_lport_flogi_resp, lport, 1583 lport->vport ? 2 * lport->r_a_tov : 1584 lport->e_d_tov)) 1585 fc_lport_error(lport, NULL); 1586 } 1587 1588 /** 1589 * fc_lport_config() - Configure a fc_lport 1590 * @lport: The local port to be configured 1591 */ 1592 int fc_lport_config(struct fc_lport *lport) 1593 { 1594 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1595 mutex_init(&lport->lp_mutex); 1596 1597 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1598 1599 fc_lport_add_fc4_type(lport, FC_TYPE_FCP); 1600 fc_lport_add_fc4_type(lport, FC_TYPE_CT); 1601 fc_fc4_conf_lport_params(lport, FC_TYPE_FCP); 1602 1603 return 0; 1604 } 1605 EXPORT_SYMBOL(fc_lport_config); 1606 1607 /** 1608 * fc_lport_init() - Initialize the lport layer for a local port 1609 * @lport: The local port to initialize the exchange layer for 1610 */ 1611 int fc_lport_init(struct fc_lport *lport) 1612 { 1613 if (!lport->tt.lport_recv) 1614 lport->tt.lport_recv = fc_lport_recv_req; 1615 1616 if (!lport->tt.lport_reset) 1617 lport->tt.lport_reset = fc_lport_reset; 1618 1619 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1620 fc_host_node_name(lport->host) = lport->wwnn; 1621 fc_host_port_name(lport->host) = lport->wwpn; 1622 fc_host_supported_classes(lport->host) = FC_COS_CLASS3; 1623 memset(fc_host_supported_fc4s(lport->host), 0, 1624 sizeof(fc_host_supported_fc4s(lport->host))); 1625 fc_host_supported_fc4s(lport->host)[2] = 1; 1626 fc_host_supported_fc4s(lport->host)[7] = 1; 1627 1628 /* This value is also unchanging */ 1629 memset(fc_host_active_fc4s(lport->host), 0, 1630 sizeof(fc_host_active_fc4s(lport->host))); 1631 fc_host_active_fc4s(lport->host)[2] = 1; 1632 fc_host_active_fc4s(lport->host)[7] = 1; 1633 fc_host_maxframe_size(lport->host) = lport->mfs; 1634 fc_host_supported_speeds(lport->host) = 0; 1635 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT) 1636 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; 1637 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) 1638 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; 1639 fc_fc4_add_lport(lport); 1640 1641 return 0; 1642 } 1643 EXPORT_SYMBOL(fc_lport_init); 1644 1645 /** 1646 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests 1647 * @sp: The sequence for the FC Passthrough response 1648 * @fp: The response frame 1649 * @info_arg: The BSG info that the response is for 1650 */ 1651 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, 1652 void *info_arg) 1653 { 1654 struct fc_bsg_info *info = info_arg; 1655 struct fc_bsg_job *job = info->job; 1656 struct fc_lport *lport = info->lport; 1657 struct fc_frame_header *fh; 1658 size_t len; 1659 void *buf; 1660 1661 if (IS_ERR(fp)) { 1662 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? 1663 -ECONNABORTED : -ETIMEDOUT; 1664 job->reply_len = sizeof(uint32_t); 1665 job->state_flags |= FC_RQST_STATE_DONE; 1666 job->job_done(job); 1667 kfree(info); 1668 return; 1669 } 1670 1671 mutex_lock(&lport->lp_mutex); 1672 fh = fc_frame_header_get(fp); 1673 len = fr_len(fp) - sizeof(*fh); 1674 buf = fc_frame_payload_get(fp, 0); 1675 1676 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { 1677 /* Get the response code from the first frame payload */ 1678 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? 1679 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : 1680 (unsigned short)fc_frame_payload_op(fp); 1681 1682 /* Save the reply status of the job */ 1683 job->reply->reply_data.ctels_reply.status = 1684 (cmd == info->rsp_code) ? 1685 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; 1686 } 1687 1688 job->reply->reply_payload_rcv_len += 1689 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1690 &info->offset, KM_BIO_SRC_IRQ, NULL); 1691 1692 if (fr_eof(fp) == FC_EOF_T && 1693 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1694 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1695 if (job->reply->reply_payload_rcv_len > 1696 job->reply_payload.payload_len) 1697 job->reply->reply_payload_rcv_len = 1698 job->reply_payload.payload_len; 1699 job->reply->result = 0; 1700 job->state_flags |= FC_RQST_STATE_DONE; 1701 job->job_done(job); 1702 kfree(info); 1703 } 1704 fc_frame_free(fp); 1705 mutex_unlock(&lport->lp_mutex); 1706 } 1707 1708 /** 1709 * fc_lport_els_request() - Send ELS passthrough request 1710 * @job: The BSG Passthrough job 1711 * @lport: The local port sending the request 1712 * @did: The destination port id 1713 * 1714 * Locking Note: The lport lock is expected to be held before calling 1715 * this routine. 1716 */ 1717 static int fc_lport_els_request(struct fc_bsg_job *job, 1718 struct fc_lport *lport, 1719 u32 did, u32 tov) 1720 { 1721 struct fc_bsg_info *info; 1722 struct fc_frame *fp; 1723 struct fc_frame_header *fh; 1724 char *pp; 1725 int len; 1726 1727 fp = fc_frame_alloc(lport, job->request_payload.payload_len); 1728 if (!fp) 1729 return -ENOMEM; 1730 1731 len = job->request_payload.payload_len; 1732 pp = fc_frame_payload_get(fp, len); 1733 1734 sg_copy_to_buffer(job->request_payload.sg_list, 1735 job->request_payload.sg_cnt, 1736 pp, len); 1737 1738 fh = fc_frame_header_get(fp); 1739 fh->fh_r_ctl = FC_RCTL_ELS_REQ; 1740 hton24(fh->fh_d_id, did); 1741 hton24(fh->fh_s_id, lport->port_id); 1742 fh->fh_type = FC_TYPE_ELS; 1743 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1744 fh->fh_cs_ctl = 0; 1745 fh->fh_df_ctl = 0; 1746 fh->fh_parm_offset = 0; 1747 1748 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1749 if (!info) { 1750 fc_frame_free(fp); 1751 return -ENOMEM; 1752 } 1753 1754 info->job = job; 1755 info->lport = lport; 1756 info->rsp_code = ELS_LS_ACC; 1757 info->nents = job->reply_payload.sg_cnt; 1758 info->sg = job->reply_payload.sg_list; 1759 1760 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1761 NULL, info, tov)) { 1762 kfree(info); 1763 return -ECOMM; 1764 } 1765 return 0; 1766 } 1767 1768 /** 1769 * fc_lport_ct_request() - Send CT Passthrough request 1770 * @job: The BSG Passthrough job 1771 * @lport: The local port sending the request 1772 * @did: The destination FC-ID 1773 * @tov: The timeout period to wait for the response 1774 * 1775 * Locking Note: The lport lock is expected to be held before calling 1776 * this routine. 1777 */ 1778 static int fc_lport_ct_request(struct fc_bsg_job *job, 1779 struct fc_lport *lport, u32 did, u32 tov) 1780 { 1781 struct fc_bsg_info *info; 1782 struct fc_frame *fp; 1783 struct fc_frame_header *fh; 1784 struct fc_ct_req *ct; 1785 size_t len; 1786 1787 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + 1788 job->request_payload.payload_len); 1789 if (!fp) 1790 return -ENOMEM; 1791 1792 len = job->request_payload.payload_len; 1793 ct = fc_frame_payload_get(fp, len); 1794 1795 sg_copy_to_buffer(job->request_payload.sg_list, 1796 job->request_payload.sg_cnt, 1797 ct, len); 1798 1799 fh = fc_frame_header_get(fp); 1800 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; 1801 hton24(fh->fh_d_id, did); 1802 hton24(fh->fh_s_id, lport->port_id); 1803 fh->fh_type = FC_TYPE_CT; 1804 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1805 fh->fh_cs_ctl = 0; 1806 fh->fh_df_ctl = 0; 1807 fh->fh_parm_offset = 0; 1808 1809 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1810 if (!info) { 1811 fc_frame_free(fp); 1812 return -ENOMEM; 1813 } 1814 1815 info->job = job; 1816 info->lport = lport; 1817 info->rsp_code = FC_FS_ACC; 1818 info->nents = job->reply_payload.sg_cnt; 1819 info->sg = job->reply_payload.sg_list; 1820 1821 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1822 NULL, info, tov)) { 1823 kfree(info); 1824 return -ECOMM; 1825 } 1826 return 0; 1827 } 1828 1829 /** 1830 * fc_lport_bsg_request() - The common entry point for sending 1831 * FC Passthrough requests 1832 * @job: The BSG passthrough job 1833 */ 1834 int fc_lport_bsg_request(struct fc_bsg_job *job) 1835 { 1836 struct request *rsp = job->req->next_rq; 1837 struct Scsi_Host *shost = job->shost; 1838 struct fc_lport *lport = shost_priv(shost); 1839 struct fc_rport *rport; 1840 struct fc_rport_priv *rdata; 1841 int rc = -EINVAL; 1842 u32 did; 1843 1844 job->reply->reply_payload_rcv_len = 0; 1845 if (rsp) 1846 rsp->resid_len = job->reply_payload.payload_len; 1847 1848 mutex_lock(&lport->lp_mutex); 1849 1850 switch (job->request->msgcode) { 1851 case FC_BSG_RPT_ELS: 1852 rport = job->rport; 1853 if (!rport) 1854 break; 1855 1856 rdata = rport->dd_data; 1857 rc = fc_lport_els_request(job, lport, rport->port_id, 1858 rdata->e_d_tov); 1859 break; 1860 1861 case FC_BSG_RPT_CT: 1862 rport = job->rport; 1863 if (!rport) 1864 break; 1865 1866 rdata = rport->dd_data; 1867 rc = fc_lport_ct_request(job, lport, rport->port_id, 1868 rdata->e_d_tov); 1869 break; 1870 1871 case FC_BSG_HST_CT: 1872 did = ntoh24(job->request->rqst_data.h_ct.port_id); 1873 if (did == FC_FID_DIR_SERV) 1874 rdata = lport->dns_rdata; 1875 else 1876 rdata = lport->tt.rport_lookup(lport, did); 1877 1878 if (!rdata) 1879 break; 1880 1881 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); 1882 break; 1883 1884 case FC_BSG_HST_ELS_NOLOGIN: 1885 did = ntoh24(job->request->rqst_data.h_els.port_id); 1886 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); 1887 break; 1888 } 1889 1890 mutex_unlock(&lport->lp_mutex); 1891 return rc; 1892 } 1893 EXPORT_SYMBOL(fc_lport_bsg_request); 1894