1 /* 2 * Copyright(c) 2007 Intel Corporation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 16 * 17 * Maintained at www.Open-FCoE.org 18 */ 19 20 /* 21 * PORT LOCKING NOTES 22 * 23 * These comments only apply to the 'port code' which consists of the lport, 24 * disc and rport blocks. 25 * 26 * MOTIVATION 27 * 28 * The lport, disc and rport blocks all have mutexes that are used to protect 29 * those objects. The main motivation for these locks is to prevent from 30 * having an lport reset just before we send a frame. In that scenario the 31 * lport's FID would get set to zero and then we'd send a frame with an 32 * invalid SID. We also need to ensure that states don't change unexpectedly 33 * while processing another state. 34 * 35 * HIERARCHY 36 * 37 * The following hierarchy defines the locking rules. A greater lock 38 * may be held before acquiring a lesser lock, but a lesser lock should never 39 * be held while attempting to acquire a greater lock. Here is the hierarchy- 40 * 41 * lport > disc, lport > rport, disc > rport 42 * 43 * CALLBACKS 44 * 45 * The callbacks cause complications with this scheme. There is a callback 46 * from the rport (to either lport or disc) and a callback from disc 47 * (to the lport). 48 * 49 * As rports exit the rport state machine a callback is made to the owner of 50 * the rport to notify success or failure. Since the callback is likely to 51 * cause the lport or disc to grab its lock we cannot hold the rport lock 52 * while making the callback. To ensure that the rport is not free'd while 53 * processing the callback the rport callbacks are serialized through a 54 * single-threaded workqueue. An rport would never be free'd while in a 55 * callback handler because no other rport work in this queue can be executed 56 * at the same time. 57 * 58 * When discovery succeeds or fails a callback is made to the lport as 59 * notification. Currently, successful discovery causes the lport to take no 60 * action. A failure will cause the lport to reset. There is likely a circular 61 * locking problem with this implementation. 62 */ 63 64 /* 65 * LPORT LOCKING 66 * 67 * The critical sections protected by the lport's mutex are quite broad and 68 * may be improved upon in the future. The lport code and its locking doesn't 69 * influence the I/O path, so excessive locking doesn't penalize I/O 70 * performance. 71 * 72 * The strategy is to lock whenever processing a request or response. Note 73 * that every _enter_* function corresponds to a state change. They generally 74 * change the lports state and then send a request out on the wire. We lock 75 * before calling any of these functions to protect that state change. This 76 * means that the entry points into the lport block manage the locks while 77 * the state machine can transition between states (i.e. _enter_* functions) 78 * while always staying protected. 79 * 80 * When handling responses we also hold the lport mutex broadly. When the 81 * lport receives the response frame it locks the mutex and then calls the 82 * appropriate handler for the particuar response. Generally a response will 83 * trigger a state change and so the lock must already be held. 84 * 85 * Retries also have to consider the locking. The retries occur from a work 86 * context and the work function will lock the lport and then retry the state 87 * (i.e. _enter_* function). 88 */ 89 90 #include <linux/timer.h> 91 #include <linux/delay.h> 92 #include <linux/module.h> 93 #include <linux/slab.h> 94 #include <asm/unaligned.h> 95 96 #include <scsi/fc/fc_gs.h> 97 98 #include <scsi/libfc.h> 99 #include <scsi/fc_encode.h> 100 #include <linux/scatterlist.h> 101 102 #include "fc_libfc.h" 103 104 /* Fabric IDs to use for point-to-point mode, chosen on whims. */ 105 #define FC_LOCAL_PTP_FID_LO 0x010101 106 #define FC_LOCAL_PTP_FID_HI 0x010102 107 108 #define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/ 109 110 static void fc_lport_error(struct fc_lport *, struct fc_frame *); 111 112 static void fc_lport_enter_reset(struct fc_lport *); 113 static void fc_lport_enter_flogi(struct fc_lport *); 114 static void fc_lport_enter_dns(struct fc_lport *); 115 static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state); 116 static void fc_lport_enter_scr(struct fc_lport *); 117 static void fc_lport_enter_ready(struct fc_lport *); 118 static void fc_lport_enter_logo(struct fc_lport *); 119 120 static const char *fc_lport_state_names[] = { 121 [LPORT_ST_DISABLED] = "disabled", 122 [LPORT_ST_FLOGI] = "FLOGI", 123 [LPORT_ST_DNS] = "dNS", 124 [LPORT_ST_RNN_ID] = "RNN_ID", 125 [LPORT_ST_RSNN_NN] = "RSNN_NN", 126 [LPORT_ST_RSPN_ID] = "RSPN_ID", 127 [LPORT_ST_RFT_ID] = "RFT_ID", 128 [LPORT_ST_RFF_ID] = "RFF_ID", 129 [LPORT_ST_SCR] = "SCR", 130 [LPORT_ST_READY] = "Ready", 131 [LPORT_ST_LOGO] = "LOGO", 132 [LPORT_ST_RESET] = "reset", 133 }; 134 135 /** 136 * struct fc_bsg_info - FC Passthrough managemet structure 137 * @job: The passthrough job 138 * @lport: The local port to pass through a command 139 * @rsp_code: The expected response code 140 * @sg: job->reply_payload.sg_list 141 * @nents: job->reply_payload.sg_cnt 142 * @offset: The offset into the response data 143 */ 144 struct fc_bsg_info { 145 struct fc_bsg_job *job; 146 struct fc_lport *lport; 147 u16 rsp_code; 148 struct scatterlist *sg; 149 u32 nents; 150 size_t offset; 151 }; 152 153 /** 154 * fc_frame_drop() - Dummy frame handler 155 * @lport: The local port the frame was received on 156 * @fp: The received frame 157 */ 158 static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp) 159 { 160 fc_frame_free(fp); 161 return 0; 162 } 163 164 /** 165 * fc_lport_rport_callback() - Event handler for rport events 166 * @lport: The lport which is receiving the event 167 * @rdata: private remote port data 168 * @event: The event that occurred 169 * 170 * Locking Note: The rport lock should not be held when calling 171 * this function. 172 */ 173 static void fc_lport_rport_callback(struct fc_lport *lport, 174 struct fc_rport_priv *rdata, 175 enum fc_rport_event event) 176 { 177 FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event, 178 rdata->ids.port_id); 179 180 mutex_lock(&lport->lp_mutex); 181 switch (event) { 182 case RPORT_EV_READY: 183 if (lport->state == LPORT_ST_DNS) { 184 lport->dns_rdata = rdata; 185 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); 186 } else { 187 FC_LPORT_DBG(lport, "Received an READY event " 188 "on port (%6.6x) for the directory " 189 "server, but the lport is not " 190 "in the DNS state, it's in the " 191 "%d state", rdata->ids.port_id, 192 lport->state); 193 lport->tt.rport_logoff(rdata); 194 } 195 break; 196 case RPORT_EV_LOGO: 197 case RPORT_EV_FAILED: 198 case RPORT_EV_STOP: 199 lport->dns_rdata = NULL; 200 break; 201 case RPORT_EV_NONE: 202 break; 203 } 204 mutex_unlock(&lport->lp_mutex); 205 } 206 207 /** 208 * fc_lport_state() - Return a string which represents the lport's state 209 * @lport: The lport whose state is to converted to a string 210 */ 211 static const char *fc_lport_state(struct fc_lport *lport) 212 { 213 const char *cp; 214 215 cp = fc_lport_state_names[lport->state]; 216 if (!cp) 217 cp = "unknown"; 218 return cp; 219 } 220 221 /** 222 * fc_lport_ptp_setup() - Create an rport for point-to-point mode 223 * @lport: The lport to attach the ptp rport to 224 * @remote_fid: The FID of the ptp rport 225 * @remote_wwpn: The WWPN of the ptp rport 226 * @remote_wwnn: The WWNN of the ptp rport 227 */ 228 static void fc_lport_ptp_setup(struct fc_lport *lport, 229 u32 remote_fid, u64 remote_wwpn, 230 u64 remote_wwnn) 231 { 232 mutex_lock(&lport->disc.disc_mutex); 233 if (lport->ptp_rdata) { 234 lport->tt.rport_logoff(lport->ptp_rdata); 235 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 236 } 237 lport->ptp_rdata = lport->tt.rport_create(lport, remote_fid); 238 kref_get(&lport->ptp_rdata->kref); 239 lport->ptp_rdata->ids.port_name = remote_wwpn; 240 lport->ptp_rdata->ids.node_name = remote_wwnn; 241 mutex_unlock(&lport->disc.disc_mutex); 242 243 lport->tt.rport_login(lport->ptp_rdata); 244 245 fc_lport_enter_ready(lport); 246 } 247 248 /** 249 * fc_get_host_port_state() - Return the port state of the given Scsi_Host 250 * @shost: The SCSI host whose port state is to be determined 251 */ 252 void fc_get_host_port_state(struct Scsi_Host *shost) 253 { 254 struct fc_lport *lport = shost_priv(shost); 255 256 mutex_lock(&lport->lp_mutex); 257 if (!lport->link_up) 258 fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; 259 else 260 switch (lport->state) { 261 case LPORT_ST_READY: 262 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; 263 break; 264 default: 265 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; 266 } 267 mutex_unlock(&lport->lp_mutex); 268 } 269 EXPORT_SYMBOL(fc_get_host_port_state); 270 271 /** 272 * fc_get_host_speed() - Return the speed of the given Scsi_Host 273 * @shost: The SCSI host whose port speed is to be determined 274 */ 275 void fc_get_host_speed(struct Scsi_Host *shost) 276 { 277 struct fc_lport *lport = shost_priv(shost); 278 279 fc_host_speed(shost) = lport->link_speed; 280 } 281 EXPORT_SYMBOL(fc_get_host_speed); 282 283 /** 284 * fc_get_host_stats() - Return the Scsi_Host's statistics 285 * @shost: The SCSI host whose statistics are to be returned 286 */ 287 struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) 288 { 289 struct fc_host_statistics *fcoe_stats; 290 struct fc_lport *lport = shost_priv(shost); 291 struct timespec v0, v1; 292 unsigned int cpu; 293 u64 fcp_in_bytes = 0; 294 u64 fcp_out_bytes = 0; 295 296 fcoe_stats = &lport->host_stats; 297 memset(fcoe_stats, 0, sizeof(struct fc_host_statistics)); 298 299 jiffies_to_timespec(jiffies, &v0); 300 jiffies_to_timespec(lport->boot_time, &v1); 301 fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec); 302 303 for_each_possible_cpu(cpu) { 304 struct fcoe_dev_stats *stats; 305 306 stats = per_cpu_ptr(lport->dev_stats, cpu); 307 308 fcoe_stats->tx_frames += stats->TxFrames; 309 fcoe_stats->tx_words += stats->TxWords; 310 fcoe_stats->rx_frames += stats->RxFrames; 311 fcoe_stats->rx_words += stats->RxWords; 312 fcoe_stats->error_frames += stats->ErrorFrames; 313 fcoe_stats->invalid_crc_count += stats->InvalidCRCCount; 314 fcoe_stats->fcp_input_requests += stats->InputRequests; 315 fcoe_stats->fcp_output_requests += stats->OutputRequests; 316 fcoe_stats->fcp_control_requests += stats->ControlRequests; 317 fcp_in_bytes += stats->InputBytes; 318 fcp_out_bytes += stats->OutputBytes; 319 fcoe_stats->link_failure_count += stats->LinkFailureCount; 320 } 321 fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); 322 fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); 323 fcoe_stats->lip_count = -1; 324 fcoe_stats->nos_count = -1; 325 fcoe_stats->loss_of_sync_count = -1; 326 fcoe_stats->loss_of_signal_count = -1; 327 fcoe_stats->prim_seq_protocol_err_count = -1; 328 fcoe_stats->dumped_frames = -1; 329 return fcoe_stats; 330 } 331 EXPORT_SYMBOL(fc_get_host_stats); 332 333 /** 334 * fc_lport_flogi_fill() - Fill in FLOGI command for request 335 * @lport: The local port the FLOGI is for 336 * @flogi: The FLOGI command 337 * @op: The opcode 338 */ 339 static void fc_lport_flogi_fill(struct fc_lport *lport, 340 struct fc_els_flogi *flogi, 341 unsigned int op) 342 { 343 struct fc_els_csp *sp; 344 struct fc_els_cssp *cp; 345 346 memset(flogi, 0, sizeof(*flogi)); 347 flogi->fl_cmd = (u8) op; 348 put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn); 349 put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn); 350 sp = &flogi->fl_csp; 351 sp->sp_hi_ver = 0x20; 352 sp->sp_lo_ver = 0x20; 353 sp->sp_bb_cred = htons(10); /* this gets set by gateway */ 354 sp->sp_bb_data = htons((u16) lport->mfs); 355 cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */ 356 cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ); 357 if (op != ELS_FLOGI) { 358 sp->sp_features = htons(FC_SP_FT_CIRO); 359 sp->sp_tot_seq = htons(255); /* seq. we accept */ 360 sp->sp_rel_off = htons(0x1f); 361 sp->sp_e_d_tov = htonl(lport->e_d_tov); 362 363 cp->cp_rdfs = htons((u16) lport->mfs); 364 cp->cp_con_seq = htons(255); 365 cp->cp_open_seq = 1; 366 } 367 } 368 369 /** 370 * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port 371 * @lport: The local port to add a new FC-4 type to 372 * @type: The new FC-4 type 373 */ 374 static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) 375 { 376 __be32 *mp; 377 378 mp = &lport->fcts.ff_type_map[type / FC_NS_BPW]; 379 *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW)); 380 } 381 382 /** 383 * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. 384 * @lport: Fibre Channel local port receiving the RLIR 385 * @fp: The RLIR request frame 386 * 387 * Locking Note: The lport lock is expected to be held before calling 388 * this function. 389 */ 390 static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) 391 { 392 FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", 393 fc_lport_state(lport)); 394 395 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 396 fc_frame_free(fp); 397 } 398 399 /** 400 * fc_lport_recv_echo_req() - Handle received ECHO request 401 * @lport: The local port receiving the ECHO 402 * @fp: ECHO request frame 403 * 404 * Locking Note: The lport lock is expected to be held before calling 405 * this function. 406 */ 407 static void fc_lport_recv_echo_req(struct fc_lport *lport, 408 struct fc_frame *in_fp) 409 { 410 struct fc_frame *fp; 411 unsigned int len; 412 void *pp; 413 void *dp; 414 415 FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", 416 fc_lport_state(lport)); 417 418 len = fr_len(in_fp) - sizeof(struct fc_frame_header); 419 pp = fc_frame_payload_get(in_fp, len); 420 421 if (len < sizeof(__be32)) 422 len = sizeof(__be32); 423 424 fp = fc_frame_alloc(lport, len); 425 if (fp) { 426 dp = fc_frame_payload_get(fp, len); 427 memcpy(dp, pp, len); 428 *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); 429 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 430 lport->tt.frame_send(lport, fp); 431 } 432 fc_frame_free(in_fp); 433 } 434 435 /** 436 * fc_lport_recv_rnid_req() - Handle received Request Node ID data request 437 * @lport: The local port receiving the RNID 438 * @fp: The RNID request frame 439 * 440 * Locking Note: The lport lock is expected to be held before calling 441 * this function. 442 */ 443 static void fc_lport_recv_rnid_req(struct fc_lport *lport, 444 struct fc_frame *in_fp) 445 { 446 struct fc_frame *fp; 447 struct fc_els_rnid *req; 448 struct { 449 struct fc_els_rnid_resp rnid; 450 struct fc_els_rnid_cid cid; 451 struct fc_els_rnid_gen gen; 452 } *rp; 453 struct fc_seq_els_data rjt_data; 454 u8 fmt; 455 size_t len; 456 457 FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", 458 fc_lport_state(lport)); 459 460 req = fc_frame_payload_get(in_fp, sizeof(*req)); 461 if (!req) { 462 rjt_data.reason = ELS_RJT_LOGIC; 463 rjt_data.explan = ELS_EXPL_NONE; 464 lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); 465 } else { 466 fmt = req->rnid_fmt; 467 len = sizeof(*rp); 468 if (fmt != ELS_RNIDF_GEN || 469 ntohl(lport->rnid_gen.rnid_atype) == 0) { 470 fmt = ELS_RNIDF_NONE; /* nothing to provide */ 471 len -= sizeof(rp->gen); 472 } 473 fp = fc_frame_alloc(lport, len); 474 if (fp) { 475 rp = fc_frame_payload_get(fp, len); 476 memset(rp, 0, len); 477 rp->rnid.rnid_cmd = ELS_LS_ACC; 478 rp->rnid.rnid_fmt = fmt; 479 rp->rnid.rnid_cid_len = sizeof(rp->cid); 480 rp->cid.rnid_wwpn = htonll(lport->wwpn); 481 rp->cid.rnid_wwnn = htonll(lport->wwnn); 482 if (fmt == ELS_RNIDF_GEN) { 483 rp->rnid.rnid_sid_len = sizeof(rp->gen); 484 memcpy(&rp->gen, &lport->rnid_gen, 485 sizeof(rp->gen)); 486 } 487 fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); 488 lport->tt.frame_send(lport, fp); 489 } 490 } 491 fc_frame_free(in_fp); 492 } 493 494 /** 495 * fc_lport_recv_logo_req() - Handle received fabric LOGO request 496 * @lport: The local port receiving the LOGO 497 * @fp: The LOGO request frame 498 * 499 * Locking Note: The lport lock is exected to be held before calling 500 * this function. 501 */ 502 static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) 503 { 504 lport->tt.seq_els_rsp_send(fp, ELS_LS_ACC, NULL); 505 fc_lport_enter_reset(lport); 506 fc_frame_free(fp); 507 } 508 509 /** 510 * fc_fabric_login() - Start the lport state machine 511 * @lport: The local port that should log into the fabric 512 * 513 * Locking Note: This function should not be called 514 * with the lport lock held. 515 */ 516 int fc_fabric_login(struct fc_lport *lport) 517 { 518 int rc = -1; 519 520 mutex_lock(&lport->lp_mutex); 521 if (lport->state == LPORT_ST_DISABLED || 522 lport->state == LPORT_ST_LOGO) { 523 fc_lport_state_enter(lport, LPORT_ST_RESET); 524 fc_lport_enter_reset(lport); 525 rc = 0; 526 } 527 mutex_unlock(&lport->lp_mutex); 528 529 return rc; 530 } 531 EXPORT_SYMBOL(fc_fabric_login); 532 533 /** 534 * __fc_linkup() - Handler for transport linkup events 535 * @lport: The lport whose link is up 536 * 537 * Locking: must be called with the lp_mutex held 538 */ 539 void __fc_linkup(struct fc_lport *lport) 540 { 541 if (!lport->link_up) { 542 lport->link_up = 1; 543 544 if (lport->state == LPORT_ST_RESET) 545 fc_lport_enter_flogi(lport); 546 } 547 } 548 549 /** 550 * fc_linkup() - Handler for transport linkup events 551 * @lport: The local port whose link is up 552 */ 553 void fc_linkup(struct fc_lport *lport) 554 { 555 printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n", 556 lport->host->host_no, lport->port_id); 557 558 mutex_lock(&lport->lp_mutex); 559 __fc_linkup(lport); 560 mutex_unlock(&lport->lp_mutex); 561 } 562 EXPORT_SYMBOL(fc_linkup); 563 564 /** 565 * __fc_linkdown() - Handler for transport linkdown events 566 * @lport: The lport whose link is down 567 * 568 * Locking: must be called with the lp_mutex held 569 */ 570 void __fc_linkdown(struct fc_lport *lport) 571 { 572 if (lport->link_up) { 573 lport->link_up = 0; 574 fc_lport_enter_reset(lport); 575 lport->tt.fcp_cleanup(lport); 576 } 577 } 578 579 /** 580 * fc_linkdown() - Handler for transport linkdown events 581 * @lport: The local port whose link is down 582 */ 583 void fc_linkdown(struct fc_lport *lport) 584 { 585 printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n", 586 lport->host->host_no, lport->port_id); 587 588 mutex_lock(&lport->lp_mutex); 589 __fc_linkdown(lport); 590 mutex_unlock(&lport->lp_mutex); 591 } 592 EXPORT_SYMBOL(fc_linkdown); 593 594 /** 595 * fc_fabric_logoff() - Logout of the fabric 596 * @lport: The local port to logoff the fabric 597 * 598 * Return value: 599 * 0 for success, -1 for failure 600 */ 601 int fc_fabric_logoff(struct fc_lport *lport) 602 { 603 lport->tt.disc_stop_final(lport); 604 mutex_lock(&lport->lp_mutex); 605 if (lport->dns_rdata) 606 lport->tt.rport_logoff(lport->dns_rdata); 607 mutex_unlock(&lport->lp_mutex); 608 lport->tt.rport_flush_queue(); 609 mutex_lock(&lport->lp_mutex); 610 fc_lport_enter_logo(lport); 611 mutex_unlock(&lport->lp_mutex); 612 cancel_delayed_work_sync(&lport->retry_work); 613 return 0; 614 } 615 EXPORT_SYMBOL(fc_fabric_logoff); 616 617 /** 618 * fc_lport_destroy() - Unregister a fc_lport 619 * @lport: The local port to unregister 620 * 621 * Note: 622 * exit routine for fc_lport instance 623 * clean-up all the allocated memory 624 * and free up other system resources. 625 * 626 */ 627 int fc_lport_destroy(struct fc_lport *lport) 628 { 629 mutex_lock(&lport->lp_mutex); 630 lport->state = LPORT_ST_DISABLED; 631 lport->link_up = 0; 632 lport->tt.frame_send = fc_frame_drop; 633 mutex_unlock(&lport->lp_mutex); 634 635 lport->tt.fcp_abort_io(lport); 636 lport->tt.disc_stop_final(lport); 637 lport->tt.exch_mgr_reset(lport, 0, 0); 638 fc_fc4_del_lport(lport); 639 return 0; 640 } 641 EXPORT_SYMBOL(fc_lport_destroy); 642 643 /** 644 * fc_set_mfs() - Set the maximum frame size for a local port 645 * @lport: The local port to set the MFS for 646 * @mfs: The new MFS 647 */ 648 int fc_set_mfs(struct fc_lport *lport, u32 mfs) 649 { 650 unsigned int old_mfs; 651 int rc = -EINVAL; 652 653 mutex_lock(&lport->lp_mutex); 654 655 old_mfs = lport->mfs; 656 657 if (mfs >= FC_MIN_MAX_FRAME) { 658 mfs &= ~3; 659 if (mfs > FC_MAX_FRAME) 660 mfs = FC_MAX_FRAME; 661 mfs -= sizeof(struct fc_frame_header); 662 lport->mfs = mfs; 663 rc = 0; 664 } 665 666 if (!rc && mfs < old_mfs) 667 fc_lport_enter_reset(lport); 668 669 mutex_unlock(&lport->lp_mutex); 670 671 return rc; 672 } 673 EXPORT_SYMBOL(fc_set_mfs); 674 675 /** 676 * fc_lport_disc_callback() - Callback for discovery events 677 * @lport: The local port receiving the event 678 * @event: The discovery event 679 */ 680 void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event) 681 { 682 switch (event) { 683 case DISC_EV_SUCCESS: 684 FC_LPORT_DBG(lport, "Discovery succeeded\n"); 685 break; 686 case DISC_EV_FAILED: 687 printk(KERN_ERR "host%d: libfc: " 688 "Discovery failed for port (%6.6x)\n", 689 lport->host->host_no, lport->port_id); 690 mutex_lock(&lport->lp_mutex); 691 fc_lport_enter_reset(lport); 692 mutex_unlock(&lport->lp_mutex); 693 break; 694 case DISC_EV_NONE: 695 WARN_ON(1); 696 break; 697 } 698 } 699 700 /** 701 * fc_rport_enter_ready() - Enter the ready state and start discovery 702 * @lport: The local port that is ready 703 * 704 * Locking Note: The lport lock is expected to be held before calling 705 * this routine. 706 */ 707 static void fc_lport_enter_ready(struct fc_lport *lport) 708 { 709 FC_LPORT_DBG(lport, "Entered READY from state %s\n", 710 fc_lport_state(lport)); 711 712 fc_lport_state_enter(lport, LPORT_ST_READY); 713 if (lport->vport) 714 fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE); 715 fc_vports_linkchange(lport); 716 717 if (!lport->ptp_rdata) 718 lport->tt.disc_start(fc_lport_disc_callback, lport); 719 } 720 721 /** 722 * fc_lport_set_port_id() - set the local port Port ID 723 * @lport: The local port which will have its Port ID set. 724 * @port_id: The new port ID. 725 * @fp: The frame containing the incoming request, or NULL. 726 * 727 * Locking Note: The lport lock is expected to be held before calling 728 * this function. 729 */ 730 static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, 731 struct fc_frame *fp) 732 { 733 if (port_id) 734 printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", 735 lport->host->host_no, port_id); 736 737 lport->port_id = port_id; 738 739 /* Update the fc_host */ 740 fc_host_port_id(lport->host) = port_id; 741 742 if (lport->tt.lport_set_port_id) 743 lport->tt.lport_set_port_id(lport, port_id, fp); 744 } 745 746 /** 747 * fc_lport_set_port_id() - set the local port Port ID for point-to-multipoint 748 * @lport: The local port which will have its Port ID set. 749 * @port_id: The new port ID. 750 * 751 * Called by the lower-level driver when transport sets the local port_id. 752 * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and 753 * discovery to be skipped. 754 */ 755 void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id) 756 { 757 mutex_lock(&lport->lp_mutex); 758 759 fc_lport_set_port_id(lport, port_id, NULL); 760 761 switch (lport->state) { 762 case LPORT_ST_RESET: 763 case LPORT_ST_FLOGI: 764 if (port_id) 765 fc_lport_enter_ready(lport); 766 break; 767 default: 768 break; 769 } 770 mutex_unlock(&lport->lp_mutex); 771 } 772 EXPORT_SYMBOL(fc_lport_set_local_id); 773 774 /** 775 * fc_lport_recv_flogi_req() - Receive a FLOGI request 776 * @lport: The local port that received the request 777 * @rx_fp: The FLOGI frame 778 * 779 * A received FLOGI request indicates a point-to-point connection. 780 * Accept it with the common service parameters indicating our N port. 781 * Set up to do a PLOGI if we have the higher-number WWPN. 782 * 783 * Locking Note: The lport lock is expected to be held before calling 784 * this function. 785 */ 786 static void fc_lport_recv_flogi_req(struct fc_lport *lport, 787 struct fc_frame *rx_fp) 788 { 789 struct fc_frame *fp; 790 struct fc_frame_header *fh; 791 struct fc_els_flogi *flp; 792 struct fc_els_flogi *new_flp; 793 u64 remote_wwpn; 794 u32 remote_fid; 795 u32 local_fid; 796 797 FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", 798 fc_lport_state(lport)); 799 800 remote_fid = fc_frame_sid(rx_fp); 801 flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); 802 if (!flp) 803 goto out; 804 remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); 805 if (remote_wwpn == lport->wwpn) { 806 printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " 807 "with same WWPN %16.16llx\n", 808 lport->host->host_no, remote_wwpn); 809 goto out; 810 } 811 FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); 812 813 /* 814 * XXX what is the right thing to do for FIDs? 815 * The originator might expect our S_ID to be 0xfffffe. 816 * But if so, both of us could end up with the same FID. 817 */ 818 local_fid = FC_LOCAL_PTP_FID_LO; 819 if (remote_wwpn < lport->wwpn) { 820 local_fid = FC_LOCAL_PTP_FID_HI; 821 if (!remote_fid || remote_fid == local_fid) 822 remote_fid = FC_LOCAL_PTP_FID_LO; 823 } else if (!remote_fid) { 824 remote_fid = FC_LOCAL_PTP_FID_HI; 825 } 826 827 fc_lport_set_port_id(lport, local_fid, rx_fp); 828 829 fp = fc_frame_alloc(lport, sizeof(*flp)); 830 if (fp) { 831 new_flp = fc_frame_payload_get(fp, sizeof(*flp)); 832 fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); 833 new_flp->fl_cmd = (u8) ELS_LS_ACC; 834 835 /* 836 * Send the response. If this fails, the originator should 837 * repeat the sequence. 838 */ 839 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 840 fh = fc_frame_header_get(fp); 841 hton24(fh->fh_s_id, local_fid); 842 hton24(fh->fh_d_id, remote_fid); 843 lport->tt.frame_send(lport, fp); 844 845 } else { 846 fc_lport_error(lport, fp); 847 } 848 fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, 849 get_unaligned_be64(&flp->fl_wwnn)); 850 out: 851 fc_frame_free(rx_fp); 852 } 853 854 /** 855 * fc_lport_recv_els_req() - The generic lport ELS request handler 856 * @lport: The local port that received the request 857 * @fp: The request frame 858 * 859 * This function will see if the lport handles the request or 860 * if an rport should handle the request. 861 * 862 * Locking Note: This function should not be called with the lport 863 * lock held because it will grab the lock. 864 */ 865 static void fc_lport_recv_els_req(struct fc_lport *lport, 866 struct fc_frame *fp) 867 { 868 void (*recv)(struct fc_lport *, struct fc_frame *); 869 870 mutex_lock(&lport->lp_mutex); 871 872 /* 873 * Handle special ELS cases like FLOGI, LOGO, and 874 * RSCN here. These don't require a session. 875 * Even if we had a session, it might not be ready. 876 */ 877 if (!lport->link_up) 878 fc_frame_free(fp); 879 else { 880 /* 881 * Check opcode. 882 */ 883 recv = lport->tt.rport_recv_req; 884 switch (fc_frame_payload_op(fp)) { 885 case ELS_FLOGI: 886 if (!lport->point_to_multipoint) 887 recv = fc_lport_recv_flogi_req; 888 break; 889 case ELS_LOGO: 890 if (fc_frame_sid(fp) == FC_FID_FLOGI) 891 recv = fc_lport_recv_logo_req; 892 break; 893 case ELS_RSCN: 894 recv = lport->tt.disc_recv_req; 895 break; 896 case ELS_ECHO: 897 recv = fc_lport_recv_echo_req; 898 break; 899 case ELS_RLIR: 900 recv = fc_lport_recv_rlir_req; 901 break; 902 case ELS_RNID: 903 recv = fc_lport_recv_rnid_req; 904 break; 905 } 906 907 recv(lport, fp); 908 } 909 mutex_unlock(&lport->lp_mutex); 910 } 911 912 static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, 913 const struct fc_els_spp *spp_in, 914 struct fc_els_spp *spp_out) 915 { 916 return FC_SPP_RESP_INVL; 917 } 918 919 struct fc4_prov fc_lport_els_prov = { 920 .prli = fc_lport_els_prli, 921 .recv = fc_lport_recv_els_req, 922 }; 923 924 /** 925 * fc_lport_recv_req() - The generic lport request handler 926 * @lport: The lport that received the request 927 * @fp: The frame the request is in 928 * 929 * Locking Note: This function should not be called with the lport 930 * lock held because it may grab the lock. 931 */ 932 static void fc_lport_recv_req(struct fc_lport *lport, 933 struct fc_frame *fp) 934 { 935 struct fc_frame_header *fh = fc_frame_header_get(fp); 936 struct fc_seq *sp = fr_seq(fp); 937 struct fc4_prov *prov; 938 939 /* 940 * Use RCU read lock and module_lock to be sure module doesn't 941 * deregister and get unloaded while we're calling it. 942 * try_module_get() is inlined and accepts a NULL parameter. 943 * Only ELSes and FCP target ops should come through here. 944 * The locking is unfortunate, and a better scheme is being sought. 945 */ 946 947 rcu_read_lock(); 948 if (fh->fh_type >= FC_FC4_PROV_SIZE) 949 goto drop; 950 prov = rcu_dereference(fc_passive_prov[fh->fh_type]); 951 if (!prov || !try_module_get(prov->module)) 952 goto drop; 953 rcu_read_unlock(); 954 prov->recv(lport, fp); 955 module_put(prov->module); 956 return; 957 drop: 958 rcu_read_unlock(); 959 FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); 960 fc_frame_free(fp); 961 lport->tt.exch_done(sp); 962 } 963 964 /** 965 * fc_lport_reset() - Reset a local port 966 * @lport: The local port which should be reset 967 * 968 * Locking Note: This functions should not be called with the 969 * lport lock held. 970 */ 971 int fc_lport_reset(struct fc_lport *lport) 972 { 973 cancel_delayed_work_sync(&lport->retry_work); 974 mutex_lock(&lport->lp_mutex); 975 fc_lport_enter_reset(lport); 976 mutex_unlock(&lport->lp_mutex); 977 return 0; 978 } 979 EXPORT_SYMBOL(fc_lport_reset); 980 981 /** 982 * fc_lport_reset_locked() - Reset the local port w/ the lport lock held 983 * @lport: The local port to be reset 984 * 985 * Locking Note: The lport lock is expected to be held before calling 986 * this routine. 987 */ 988 static void fc_lport_reset_locked(struct fc_lport *lport) 989 { 990 if (lport->dns_rdata) 991 lport->tt.rport_logoff(lport->dns_rdata); 992 993 if (lport->ptp_rdata) { 994 lport->tt.rport_logoff(lport->ptp_rdata); 995 kref_put(&lport->ptp_rdata->kref, lport->tt.rport_destroy); 996 lport->ptp_rdata = NULL; 997 } 998 999 lport->tt.disc_stop(lport); 1000 1001 lport->tt.exch_mgr_reset(lport, 0, 0); 1002 fc_host_fabric_name(lport->host) = 0; 1003 1004 if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up)) 1005 fc_lport_set_port_id(lport, 0, NULL); 1006 } 1007 1008 /** 1009 * fc_lport_enter_reset() - Reset the local port 1010 * @lport: The local port to be reset 1011 * 1012 * Locking Note: The lport lock is expected to be held before calling 1013 * this routine. 1014 */ 1015 static void fc_lport_enter_reset(struct fc_lport *lport) 1016 { 1017 FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", 1018 fc_lport_state(lport)); 1019 1020 if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO) 1021 return; 1022 1023 if (lport->vport) { 1024 if (lport->link_up) 1025 fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING); 1026 else 1027 fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN); 1028 } 1029 fc_lport_state_enter(lport, LPORT_ST_RESET); 1030 fc_host_post_event(lport->host, fc_get_event_number(), 1031 FCH_EVT_LIPRESET, 0); 1032 fc_vports_linkchange(lport); 1033 fc_lport_reset_locked(lport); 1034 if (lport->link_up) 1035 fc_lport_enter_flogi(lport); 1036 } 1037 1038 /** 1039 * fc_lport_enter_disabled() - Disable the local port 1040 * @lport: The local port to be reset 1041 * 1042 * Locking Note: The lport lock is expected to be held before calling 1043 * this routine. 1044 */ 1045 static void fc_lport_enter_disabled(struct fc_lport *lport) 1046 { 1047 FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", 1048 fc_lport_state(lport)); 1049 1050 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1051 fc_vports_linkchange(lport); 1052 fc_lport_reset_locked(lport); 1053 } 1054 1055 /** 1056 * fc_lport_error() - Handler for any errors 1057 * @lport: The local port that the error was on 1058 * @fp: The error code encoded in a frame pointer 1059 * 1060 * If the error was caused by a resource allocation failure 1061 * then wait for half a second and retry, otherwise retry 1062 * after the e_d_tov time. 1063 */ 1064 static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp) 1065 { 1066 unsigned long delay = 0; 1067 FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", 1068 PTR_ERR(fp), fc_lport_state(lport), 1069 lport->retry_count); 1070 1071 if (PTR_ERR(fp) == -FC_EX_CLOSED) 1072 return; 1073 1074 /* 1075 * Memory allocation failure, or the exchange timed out 1076 * or we received LS_RJT. 1077 * Retry after delay 1078 */ 1079 if (lport->retry_count < lport->max_retry_count) { 1080 lport->retry_count++; 1081 if (!fp) 1082 delay = msecs_to_jiffies(500); 1083 else 1084 delay = msecs_to_jiffies(lport->e_d_tov); 1085 1086 schedule_delayed_work(&lport->retry_work, delay); 1087 } else 1088 fc_lport_enter_reset(lport); 1089 } 1090 1091 /** 1092 * fc_lport_ns_resp() - Handle response to a name server 1093 * registration exchange 1094 * @sp: current sequence in exchange 1095 * @fp: response frame 1096 * @lp_arg: Fibre Channel host port instance 1097 * 1098 * Locking Note: This function will be called without the lport lock 1099 * held, but it will lock, call an _enter_* function or fc_lport_error() 1100 * and then unlock the lport. 1101 */ 1102 static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, 1103 void *lp_arg) 1104 { 1105 struct fc_lport *lport = lp_arg; 1106 struct fc_frame_header *fh; 1107 struct fc_ct_hdr *ct; 1108 1109 FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); 1110 1111 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1112 return; 1113 1114 mutex_lock(&lport->lp_mutex); 1115 1116 if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { 1117 FC_LPORT_DBG(lport, "Received a name server response, " 1118 "but in state %s\n", fc_lport_state(lport)); 1119 if (IS_ERR(fp)) 1120 goto err; 1121 goto out; 1122 } 1123 1124 if (IS_ERR(fp)) { 1125 fc_lport_error(lport, fp); 1126 goto err; 1127 } 1128 1129 fh = fc_frame_header_get(fp); 1130 ct = fc_frame_payload_get(fp, sizeof(*ct)); 1131 1132 if (fh && ct && fh->fh_type == FC_TYPE_CT && 1133 ct->ct_fs_type == FC_FST_DIR && 1134 ct->ct_fs_subtype == FC_NS_SUBTYPE && 1135 ntohs(ct->ct_cmd) == FC_FS_ACC) 1136 switch (lport->state) { 1137 case LPORT_ST_RNN_ID: 1138 fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); 1139 break; 1140 case LPORT_ST_RSNN_NN: 1141 fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); 1142 break; 1143 case LPORT_ST_RSPN_ID: 1144 fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1145 break; 1146 case LPORT_ST_RFT_ID: 1147 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); 1148 break; 1149 case LPORT_ST_RFF_ID: 1150 fc_lport_enter_scr(lport); 1151 break; 1152 default: 1153 /* should have already been caught by state checks */ 1154 break; 1155 } 1156 else 1157 fc_lport_error(lport, fp); 1158 out: 1159 fc_frame_free(fp); 1160 err: 1161 mutex_unlock(&lport->lp_mutex); 1162 } 1163 1164 /** 1165 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request 1166 * @sp: current sequence in SCR exchange 1167 * @fp: response frame 1168 * @lp_arg: Fibre Channel lport port instance that sent the registration request 1169 * 1170 * Locking Note: This function will be called without the lport lock 1171 * held, but it will lock, call an _enter_* function or fc_lport_error 1172 * and then unlock the lport. 1173 */ 1174 static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp, 1175 void *lp_arg) 1176 { 1177 struct fc_lport *lport = lp_arg; 1178 u8 op; 1179 1180 FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp)); 1181 1182 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1183 return; 1184 1185 mutex_lock(&lport->lp_mutex); 1186 1187 if (lport->state != LPORT_ST_SCR) { 1188 FC_LPORT_DBG(lport, "Received a SCR response, but in state " 1189 "%s\n", fc_lport_state(lport)); 1190 if (IS_ERR(fp)) 1191 goto err; 1192 goto out; 1193 } 1194 1195 if (IS_ERR(fp)) { 1196 fc_lport_error(lport, fp); 1197 goto err; 1198 } 1199 1200 op = fc_frame_payload_op(fp); 1201 if (op == ELS_LS_ACC) 1202 fc_lport_enter_ready(lport); 1203 else 1204 fc_lport_error(lport, fp); 1205 1206 out: 1207 fc_frame_free(fp); 1208 err: 1209 mutex_unlock(&lport->lp_mutex); 1210 } 1211 1212 /** 1213 * fc_lport_enter_scr() - Send a SCR (State Change Register) request 1214 * @lport: The local port to register for state changes 1215 * 1216 * Locking Note: The lport lock is expected to be held before calling 1217 * this routine. 1218 */ 1219 static void fc_lport_enter_scr(struct fc_lport *lport) 1220 { 1221 struct fc_frame *fp; 1222 1223 FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", 1224 fc_lport_state(lport)); 1225 1226 fc_lport_state_enter(lport, LPORT_ST_SCR); 1227 1228 fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr)); 1229 if (!fp) { 1230 fc_lport_error(lport, fp); 1231 return; 1232 } 1233 1234 if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR, 1235 fc_lport_scr_resp, lport, 1236 2 * lport->r_a_tov)) 1237 fc_lport_error(lport, NULL); 1238 } 1239 1240 /** 1241 * fc_lport_enter_ns() - register some object with the name server 1242 * @lport: Fibre Channel local port to register 1243 * 1244 * Locking Note: The lport lock is expected to be held before calling 1245 * this routine. 1246 */ 1247 static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) 1248 { 1249 struct fc_frame *fp; 1250 enum fc_ns_req cmd; 1251 int size = sizeof(struct fc_ct_hdr); 1252 size_t len; 1253 1254 FC_LPORT_DBG(lport, "Entered %s state from %s state\n", 1255 fc_lport_state_names[state], 1256 fc_lport_state(lport)); 1257 1258 fc_lport_state_enter(lport, state); 1259 1260 switch (state) { 1261 case LPORT_ST_RNN_ID: 1262 cmd = FC_NS_RNN_ID; 1263 size += sizeof(struct fc_ns_rn_id); 1264 break; 1265 case LPORT_ST_RSNN_NN: 1266 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1267 /* if there is no symbolic name, skip to RFT_ID */ 1268 if (!len) 1269 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1270 cmd = FC_NS_RSNN_NN; 1271 size += sizeof(struct fc_ns_rsnn) + len; 1272 break; 1273 case LPORT_ST_RSPN_ID: 1274 len = strnlen(fc_host_symbolic_name(lport->host), 255); 1275 /* if there is no symbolic name, skip to RFT_ID */ 1276 if (!len) 1277 return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); 1278 cmd = FC_NS_RSPN_ID; 1279 size += sizeof(struct fc_ns_rspn) + len; 1280 break; 1281 case LPORT_ST_RFT_ID: 1282 cmd = FC_NS_RFT_ID; 1283 size += sizeof(struct fc_ns_rft); 1284 break; 1285 case LPORT_ST_RFF_ID: 1286 cmd = FC_NS_RFF_ID; 1287 size += sizeof(struct fc_ns_rff_id); 1288 break; 1289 default: 1290 fc_lport_error(lport, NULL); 1291 return; 1292 } 1293 1294 fp = fc_frame_alloc(lport, size); 1295 if (!fp) { 1296 fc_lport_error(lport, fp); 1297 return; 1298 } 1299 1300 if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd, 1301 fc_lport_ns_resp, 1302 lport, 3 * lport->r_a_tov)) 1303 fc_lport_error(lport, fp); 1304 } 1305 1306 static struct fc_rport_operations fc_lport_rport_ops = { 1307 .event_callback = fc_lport_rport_callback, 1308 }; 1309 1310 /** 1311 * fc_rport_enter_dns() - Create a fc_rport for the name server 1312 * @lport: The local port requesting a remote port for the name server 1313 * 1314 * Locking Note: The lport lock is expected to be held before calling 1315 * this routine. 1316 */ 1317 static void fc_lport_enter_dns(struct fc_lport *lport) 1318 { 1319 struct fc_rport_priv *rdata; 1320 1321 FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", 1322 fc_lport_state(lport)); 1323 1324 fc_lport_state_enter(lport, LPORT_ST_DNS); 1325 1326 mutex_lock(&lport->disc.disc_mutex); 1327 rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV); 1328 mutex_unlock(&lport->disc.disc_mutex); 1329 if (!rdata) 1330 goto err; 1331 1332 rdata->ops = &fc_lport_rport_ops; 1333 lport->tt.rport_login(rdata); 1334 return; 1335 1336 err: 1337 fc_lport_error(lport, NULL); 1338 } 1339 1340 /** 1341 * fc_lport_timeout() - Handler for the retry_work timer 1342 * @work: The work struct of the local port 1343 */ 1344 static void fc_lport_timeout(struct work_struct *work) 1345 { 1346 struct fc_lport *lport = 1347 container_of(work, struct fc_lport, 1348 retry_work.work); 1349 1350 mutex_lock(&lport->lp_mutex); 1351 1352 switch (lport->state) { 1353 case LPORT_ST_DISABLED: 1354 WARN_ON(1); 1355 break; 1356 case LPORT_ST_READY: 1357 break; 1358 case LPORT_ST_RESET: 1359 break; 1360 case LPORT_ST_FLOGI: 1361 fc_lport_enter_flogi(lport); 1362 break; 1363 case LPORT_ST_DNS: 1364 fc_lport_enter_dns(lport); 1365 break; 1366 case LPORT_ST_RNN_ID: 1367 case LPORT_ST_RSNN_NN: 1368 case LPORT_ST_RSPN_ID: 1369 case LPORT_ST_RFT_ID: 1370 case LPORT_ST_RFF_ID: 1371 fc_lport_enter_ns(lport, lport->state); 1372 break; 1373 case LPORT_ST_SCR: 1374 fc_lport_enter_scr(lport); 1375 break; 1376 case LPORT_ST_LOGO: 1377 fc_lport_enter_logo(lport); 1378 break; 1379 } 1380 1381 mutex_unlock(&lport->lp_mutex); 1382 } 1383 1384 /** 1385 * fc_lport_logo_resp() - Handle response to LOGO request 1386 * @sp: The sequence that the LOGO was on 1387 * @fp: The LOGO frame 1388 * @lp_arg: The lport port that received the LOGO request 1389 * 1390 * Locking Note: This function will be called without the lport lock 1391 * held, but it will lock, call an _enter_* function or fc_lport_error() 1392 * and then unlock the lport. 1393 */ 1394 void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, 1395 void *lp_arg) 1396 { 1397 struct fc_lport *lport = lp_arg; 1398 u8 op; 1399 1400 FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); 1401 1402 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1403 return; 1404 1405 mutex_lock(&lport->lp_mutex); 1406 1407 if (lport->state != LPORT_ST_LOGO) { 1408 FC_LPORT_DBG(lport, "Received a LOGO response, but in state " 1409 "%s\n", fc_lport_state(lport)); 1410 if (IS_ERR(fp)) 1411 goto err; 1412 goto out; 1413 } 1414 1415 if (IS_ERR(fp)) { 1416 fc_lport_error(lport, fp); 1417 goto err; 1418 } 1419 1420 op = fc_frame_payload_op(fp); 1421 if (op == ELS_LS_ACC) 1422 fc_lport_enter_disabled(lport); 1423 else 1424 fc_lport_error(lport, fp); 1425 1426 out: 1427 fc_frame_free(fp); 1428 err: 1429 mutex_unlock(&lport->lp_mutex); 1430 } 1431 EXPORT_SYMBOL(fc_lport_logo_resp); 1432 1433 /** 1434 * fc_rport_enter_logo() - Logout of the fabric 1435 * @lport: The local port to be logged out 1436 * 1437 * Locking Note: The lport lock is expected to be held before calling 1438 * this routine. 1439 */ 1440 static void fc_lport_enter_logo(struct fc_lport *lport) 1441 { 1442 struct fc_frame *fp; 1443 struct fc_els_logo *logo; 1444 1445 FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", 1446 fc_lport_state(lport)); 1447 1448 fc_lport_state_enter(lport, LPORT_ST_LOGO); 1449 fc_vports_linkchange(lport); 1450 1451 fp = fc_frame_alloc(lport, sizeof(*logo)); 1452 if (!fp) { 1453 fc_lport_error(lport, fp); 1454 return; 1455 } 1456 1457 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO, 1458 fc_lport_logo_resp, lport, 1459 2 * lport->r_a_tov)) 1460 fc_lport_error(lport, NULL); 1461 } 1462 1463 /** 1464 * fc_lport_flogi_resp() - Handle response to FLOGI request 1465 * @sp: The sequence that the FLOGI was on 1466 * @fp: The FLOGI response frame 1467 * @lp_arg: The lport port that received the FLOGI response 1468 * 1469 * Locking Note: This function will be called without the lport lock 1470 * held, but it will lock, call an _enter_* function or fc_lport_error() 1471 * and then unlock the lport. 1472 */ 1473 void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, 1474 void *lp_arg) 1475 { 1476 struct fc_lport *lport = lp_arg; 1477 struct fc_frame_header *fh; 1478 struct fc_els_flogi *flp; 1479 u32 did; 1480 u16 csp_flags; 1481 unsigned int r_a_tov; 1482 unsigned int e_d_tov; 1483 u16 mfs; 1484 1485 FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); 1486 1487 if (fp == ERR_PTR(-FC_EX_CLOSED)) 1488 return; 1489 1490 mutex_lock(&lport->lp_mutex); 1491 1492 if (lport->state != LPORT_ST_FLOGI) { 1493 FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " 1494 "%s\n", fc_lport_state(lport)); 1495 if (IS_ERR(fp)) 1496 goto err; 1497 goto out; 1498 } 1499 1500 if (IS_ERR(fp)) { 1501 fc_lport_error(lport, fp); 1502 goto err; 1503 } 1504 1505 fh = fc_frame_header_get(fp); 1506 did = fc_frame_did(fp); 1507 if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 || 1508 fc_frame_payload_op(fp) != ELS_LS_ACC) { 1509 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); 1510 fc_lport_error(lport, fp); 1511 goto err; 1512 } 1513 1514 flp = fc_frame_payload_get(fp, sizeof(*flp)); 1515 if (!flp) { 1516 FC_LPORT_DBG(lport, "FLOGI bad response\n"); 1517 fc_lport_error(lport, fp); 1518 goto err; 1519 } 1520 1521 mfs = ntohs(flp->fl_csp.sp_bb_data) & 1522 FC_SP_BB_DATA_MASK; 1523 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && 1524 mfs < lport->mfs) 1525 lport->mfs = mfs; 1526 csp_flags = ntohs(flp->fl_csp.sp_features); 1527 r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); 1528 e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); 1529 if (csp_flags & FC_SP_FT_EDTR) 1530 e_d_tov /= 1000000; 1531 1532 lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); 1533 1534 if ((csp_flags & FC_SP_FT_FPORT) == 0) { 1535 if (e_d_tov > lport->e_d_tov) 1536 lport->e_d_tov = e_d_tov; 1537 lport->r_a_tov = 2 * e_d_tov; 1538 fc_lport_set_port_id(lport, did, fp); 1539 printk(KERN_INFO "host%d: libfc: " 1540 "Port (%6.6x) entered " 1541 "point-to-point mode\n", 1542 lport->host->host_no, did); 1543 fc_lport_ptp_setup(lport, fc_frame_sid(fp), 1544 get_unaligned_be64( 1545 &flp->fl_wwpn), 1546 get_unaligned_be64( 1547 &flp->fl_wwnn)); 1548 } else { 1549 lport->e_d_tov = e_d_tov; 1550 lport->r_a_tov = r_a_tov; 1551 fc_host_fabric_name(lport->host) = 1552 get_unaligned_be64(&flp->fl_wwnn); 1553 fc_lport_set_port_id(lport, did, fp); 1554 fc_lport_enter_dns(lport); 1555 } 1556 1557 out: 1558 fc_frame_free(fp); 1559 err: 1560 mutex_unlock(&lport->lp_mutex); 1561 } 1562 EXPORT_SYMBOL(fc_lport_flogi_resp); 1563 1564 /** 1565 * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager 1566 * @lport: Fibre Channel local port to be logged in to the fabric 1567 * 1568 * Locking Note: The lport lock is expected to be held before calling 1569 * this routine. 1570 */ 1571 void fc_lport_enter_flogi(struct fc_lport *lport) 1572 { 1573 struct fc_frame *fp; 1574 1575 FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", 1576 fc_lport_state(lport)); 1577 1578 fc_lport_state_enter(lport, LPORT_ST_FLOGI); 1579 1580 if (lport->point_to_multipoint) { 1581 if (lport->port_id) 1582 fc_lport_enter_ready(lport); 1583 return; 1584 } 1585 1586 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 1587 if (!fp) 1588 return fc_lport_error(lport, fp); 1589 1590 if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, 1591 lport->vport ? ELS_FDISC : ELS_FLOGI, 1592 fc_lport_flogi_resp, lport, 1593 lport->vport ? 2 * lport->r_a_tov : 1594 lport->e_d_tov)) 1595 fc_lport_error(lport, NULL); 1596 } 1597 1598 /** 1599 * fc_lport_config() - Configure a fc_lport 1600 * @lport: The local port to be configured 1601 */ 1602 int fc_lport_config(struct fc_lport *lport) 1603 { 1604 INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); 1605 mutex_init(&lport->lp_mutex); 1606 1607 fc_lport_state_enter(lport, LPORT_ST_DISABLED); 1608 1609 fc_lport_add_fc4_type(lport, FC_TYPE_FCP); 1610 fc_lport_add_fc4_type(lport, FC_TYPE_CT); 1611 fc_fc4_conf_lport_params(lport, FC_TYPE_FCP); 1612 1613 return 0; 1614 } 1615 EXPORT_SYMBOL(fc_lport_config); 1616 1617 /** 1618 * fc_lport_init() - Initialize the lport layer for a local port 1619 * @lport: The local port to initialize the exchange layer for 1620 */ 1621 int fc_lport_init(struct fc_lport *lport) 1622 { 1623 if (!lport->tt.lport_recv) 1624 lport->tt.lport_recv = fc_lport_recv_req; 1625 1626 if (!lport->tt.lport_reset) 1627 lport->tt.lport_reset = fc_lport_reset; 1628 1629 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1630 fc_host_node_name(lport->host) = lport->wwnn; 1631 fc_host_port_name(lport->host) = lport->wwpn; 1632 fc_host_supported_classes(lport->host) = FC_COS_CLASS3; 1633 memset(fc_host_supported_fc4s(lport->host), 0, 1634 sizeof(fc_host_supported_fc4s(lport->host))); 1635 fc_host_supported_fc4s(lport->host)[2] = 1; 1636 fc_host_supported_fc4s(lport->host)[7] = 1; 1637 1638 /* This value is also unchanging */ 1639 memset(fc_host_active_fc4s(lport->host), 0, 1640 sizeof(fc_host_active_fc4s(lport->host))); 1641 fc_host_active_fc4s(lport->host)[2] = 1; 1642 fc_host_active_fc4s(lport->host)[7] = 1; 1643 fc_host_maxframe_size(lport->host) = lport->mfs; 1644 fc_host_supported_speeds(lport->host) = 0; 1645 if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT) 1646 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; 1647 if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) 1648 fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; 1649 fc_fc4_add_lport(lport); 1650 1651 return 0; 1652 } 1653 EXPORT_SYMBOL(fc_lport_init); 1654 1655 /** 1656 * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests 1657 * @sp: The sequence for the FC Passthrough response 1658 * @fp: The response frame 1659 * @info_arg: The BSG info that the response is for 1660 */ 1661 static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, 1662 void *info_arg) 1663 { 1664 struct fc_bsg_info *info = info_arg; 1665 struct fc_bsg_job *job = info->job; 1666 struct fc_lport *lport = info->lport; 1667 struct fc_frame_header *fh; 1668 size_t len; 1669 void *buf; 1670 1671 if (IS_ERR(fp)) { 1672 job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? 1673 -ECONNABORTED : -ETIMEDOUT; 1674 job->reply_len = sizeof(uint32_t); 1675 job->state_flags |= FC_RQST_STATE_DONE; 1676 job->job_done(job); 1677 kfree(info); 1678 return; 1679 } 1680 1681 mutex_lock(&lport->lp_mutex); 1682 fh = fc_frame_header_get(fp); 1683 len = fr_len(fp) - sizeof(*fh); 1684 buf = fc_frame_payload_get(fp, 0); 1685 1686 if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { 1687 /* Get the response code from the first frame payload */ 1688 unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? 1689 ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : 1690 (unsigned short)fc_frame_payload_op(fp); 1691 1692 /* Save the reply status of the job */ 1693 job->reply->reply_data.ctels_reply.status = 1694 (cmd == info->rsp_code) ? 1695 FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; 1696 } 1697 1698 job->reply->reply_payload_rcv_len += 1699 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1700 &info->offset, KM_BIO_SRC_IRQ, NULL); 1701 1702 if (fr_eof(fp) == FC_EOF_T && 1703 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1704 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { 1705 if (job->reply->reply_payload_rcv_len > 1706 job->reply_payload.payload_len) 1707 job->reply->reply_payload_rcv_len = 1708 job->reply_payload.payload_len; 1709 job->reply->result = 0; 1710 job->state_flags |= FC_RQST_STATE_DONE; 1711 job->job_done(job); 1712 kfree(info); 1713 } 1714 fc_frame_free(fp); 1715 mutex_unlock(&lport->lp_mutex); 1716 } 1717 1718 /** 1719 * fc_lport_els_request() - Send ELS passthrough request 1720 * @job: The BSG Passthrough job 1721 * @lport: The local port sending the request 1722 * @did: The destination port id 1723 * 1724 * Locking Note: The lport lock is expected to be held before calling 1725 * this routine. 1726 */ 1727 static int fc_lport_els_request(struct fc_bsg_job *job, 1728 struct fc_lport *lport, 1729 u32 did, u32 tov) 1730 { 1731 struct fc_bsg_info *info; 1732 struct fc_frame *fp; 1733 struct fc_frame_header *fh; 1734 char *pp; 1735 int len; 1736 1737 fp = fc_frame_alloc(lport, job->request_payload.payload_len); 1738 if (!fp) 1739 return -ENOMEM; 1740 1741 len = job->request_payload.payload_len; 1742 pp = fc_frame_payload_get(fp, len); 1743 1744 sg_copy_to_buffer(job->request_payload.sg_list, 1745 job->request_payload.sg_cnt, 1746 pp, len); 1747 1748 fh = fc_frame_header_get(fp); 1749 fh->fh_r_ctl = FC_RCTL_ELS_REQ; 1750 hton24(fh->fh_d_id, did); 1751 hton24(fh->fh_s_id, lport->port_id); 1752 fh->fh_type = FC_TYPE_ELS; 1753 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1754 fh->fh_cs_ctl = 0; 1755 fh->fh_df_ctl = 0; 1756 fh->fh_parm_offset = 0; 1757 1758 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1759 if (!info) { 1760 fc_frame_free(fp); 1761 return -ENOMEM; 1762 } 1763 1764 info->job = job; 1765 info->lport = lport; 1766 info->rsp_code = ELS_LS_ACC; 1767 info->nents = job->reply_payload.sg_cnt; 1768 info->sg = job->reply_payload.sg_list; 1769 1770 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1771 NULL, info, tov)) { 1772 kfree(info); 1773 return -ECOMM; 1774 } 1775 return 0; 1776 } 1777 1778 /** 1779 * fc_lport_ct_request() - Send CT Passthrough request 1780 * @job: The BSG Passthrough job 1781 * @lport: The local port sending the request 1782 * @did: The destination FC-ID 1783 * @tov: The timeout period to wait for the response 1784 * 1785 * Locking Note: The lport lock is expected to be held before calling 1786 * this routine. 1787 */ 1788 static int fc_lport_ct_request(struct fc_bsg_job *job, 1789 struct fc_lport *lport, u32 did, u32 tov) 1790 { 1791 struct fc_bsg_info *info; 1792 struct fc_frame *fp; 1793 struct fc_frame_header *fh; 1794 struct fc_ct_req *ct; 1795 size_t len; 1796 1797 fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + 1798 job->request_payload.payload_len); 1799 if (!fp) 1800 return -ENOMEM; 1801 1802 len = job->request_payload.payload_len; 1803 ct = fc_frame_payload_get(fp, len); 1804 1805 sg_copy_to_buffer(job->request_payload.sg_list, 1806 job->request_payload.sg_cnt, 1807 ct, len); 1808 1809 fh = fc_frame_header_get(fp); 1810 fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; 1811 hton24(fh->fh_d_id, did); 1812 hton24(fh->fh_s_id, lport->port_id); 1813 fh->fh_type = FC_TYPE_CT; 1814 hton24(fh->fh_f_ctl, FC_FCTL_REQ); 1815 fh->fh_cs_ctl = 0; 1816 fh->fh_df_ctl = 0; 1817 fh->fh_parm_offset = 0; 1818 1819 info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); 1820 if (!info) { 1821 fc_frame_free(fp); 1822 return -ENOMEM; 1823 } 1824 1825 info->job = job; 1826 info->lport = lport; 1827 info->rsp_code = FC_FS_ACC; 1828 info->nents = job->reply_payload.sg_cnt; 1829 info->sg = job->reply_payload.sg_list; 1830 1831 if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, 1832 NULL, info, tov)) { 1833 kfree(info); 1834 return -ECOMM; 1835 } 1836 return 0; 1837 } 1838 1839 /** 1840 * fc_lport_bsg_request() - The common entry point for sending 1841 * FC Passthrough requests 1842 * @job: The BSG passthrough job 1843 */ 1844 int fc_lport_bsg_request(struct fc_bsg_job *job) 1845 { 1846 struct request *rsp = job->req->next_rq; 1847 struct Scsi_Host *shost = job->shost; 1848 struct fc_lport *lport = shost_priv(shost); 1849 struct fc_rport *rport; 1850 struct fc_rport_priv *rdata; 1851 int rc = -EINVAL; 1852 u32 did; 1853 1854 job->reply->reply_payload_rcv_len = 0; 1855 if (rsp) 1856 rsp->resid_len = job->reply_payload.payload_len; 1857 1858 mutex_lock(&lport->lp_mutex); 1859 1860 switch (job->request->msgcode) { 1861 case FC_BSG_RPT_ELS: 1862 rport = job->rport; 1863 if (!rport) 1864 break; 1865 1866 rdata = rport->dd_data; 1867 rc = fc_lport_els_request(job, lport, rport->port_id, 1868 rdata->e_d_tov); 1869 break; 1870 1871 case FC_BSG_RPT_CT: 1872 rport = job->rport; 1873 if (!rport) 1874 break; 1875 1876 rdata = rport->dd_data; 1877 rc = fc_lport_ct_request(job, lport, rport->port_id, 1878 rdata->e_d_tov); 1879 break; 1880 1881 case FC_BSG_HST_CT: 1882 did = ntoh24(job->request->rqst_data.h_ct.port_id); 1883 if (did == FC_FID_DIR_SERV) 1884 rdata = lport->dns_rdata; 1885 else 1886 rdata = lport->tt.rport_lookup(lport, did); 1887 1888 if (!rdata) 1889 break; 1890 1891 rc = fc_lport_ct_request(job, lport, did, rdata->e_d_tov); 1892 break; 1893 1894 case FC_BSG_HST_ELS_NOLOGIN: 1895 did = ntoh24(job->request->rqst_data.h_els.port_id); 1896 rc = fc_lport_els_request(job, lport, did, lport->e_d_tov); 1897 break; 1898 } 1899 1900 mutex_unlock(&lport->lp_mutex); 1901 return rc; 1902 } 1903 EXPORT_SYMBOL(fc_lport_bsg_request); 1904