1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 #include <linux/pci.h> 28 #include <linux/kthread.h> 29 #include <linux/interrupt.h> 30 #include <linux/lockdep.h> 31 #include <linux/utsname.h> 32 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 39 #include "lpfc_hw4.h" 40 #include "lpfc_hw.h" 41 #include "lpfc_nl.h" 42 #include "lpfc_disc.h" 43 #include "lpfc_sli.h" 44 #include "lpfc_sli4.h" 45 #include "lpfc.h" 46 #include "lpfc_scsi.h" 47 #include "lpfc_nvme.h" 48 #include "lpfc_logmsg.h" 49 #include "lpfc_crtn.h" 50 #include "lpfc_vport.h" 51 #include "lpfc_debugfs.h" 52 53 /* AlpaArray for assignment of scsid for scan-down and bind_method */ 54 static uint8_t lpfcAlpaArray[] = { 55 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, 56 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 57 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 58 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 59 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, 60 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, 61 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 62 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, 63 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 64 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, 65 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 66 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, 67 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 68 }; 69 70 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 71 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 72 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 73 static int lpfc_fcf_inuse(struct lpfc_hba *); 74 static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); 75 static void lpfc_check_inactive_vmid(struct lpfc_hba *phba); 76 static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba); 77 78 static int 79 lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp) 80 { 81 if (ndlp->nlp_fc4_type || 82 ndlp->nlp_type & NLP_FABRIC) 83 return 1; 84 return 0; 85 } 86 /* The source of a terminate rport I/O is either a dev_loss_tmo 87 * event or a call to fc_remove_host. While the rport should be 88 * valid during these downcalls, the transport can call twice 89 * in a single event. This routine provides somoe protection 90 * as the NDLP isn't really free, just released to the pool. 91 */ 92 static int 93 lpfc_rport_invalid(struct fc_rport *rport) 94 { 95 struct lpfc_rport_data *rdata; 96 struct lpfc_nodelist *ndlp; 97 98 if (!rport) { 99 pr_err("**** %s: NULL rport, exit.\n", __func__); 100 return -EINVAL; 101 } 102 103 rdata = rport->dd_data; 104 if (!rdata) { 105 pr_err("**** %s: NULL dd_data on rport x%px SID x%x\n", 106 __func__, rport, rport->scsi_target_id); 107 return -EINVAL; 108 } 109 110 ndlp = rdata->pnode; 111 if (!rdata->pnode) { 112 pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n", 113 __func__, rport, rport->scsi_target_id); 114 return -EINVAL; 115 } 116 117 if (!ndlp->vport) { 118 pr_err("**** %s: Null vport on ndlp x%px, DID x%x rport x%px " 119 "SID x%x\n", __func__, ndlp, ndlp->nlp_DID, rport, 120 rport->scsi_target_id); 121 return -EINVAL; 122 } 123 return 0; 124 } 125 126 void 127 lpfc_terminate_rport_io(struct fc_rport *rport) 128 { 129 struct lpfc_rport_data *rdata; 130 struct lpfc_nodelist *ndlp; 131 struct lpfc_vport *vport; 132 133 if (lpfc_rport_invalid(rport)) 134 return; 135 136 rdata = rport->dd_data; 137 ndlp = rdata->pnode; 138 vport = ndlp->vport; 139 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 140 "rport terminate: sid:x%x did:x%x flg:x%x", 141 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 142 143 if (ndlp->nlp_sid != NLP_NO_SID) 144 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); 145 } 146 147 /* 148 * This function will be called when dev_loss_tmo fire. 149 */ 150 void 151 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) 152 { 153 struct lpfc_nodelist *ndlp; 154 struct lpfc_vport *vport; 155 struct lpfc_hba *phba; 156 struct lpfc_work_evt *evtp; 157 unsigned long iflags; 158 159 ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; 160 if (!ndlp) 161 return; 162 163 vport = ndlp->vport; 164 phba = vport->phba; 165 166 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 167 "rport devlosscb: sid:x%x did:x%x flg:x%x", 168 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 169 170 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 171 "3181 dev_loss_callbk x%06x, rport x%px flg x%x " 172 "load_flag x%lx refcnt %u state %d xpt x%x\n", 173 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, 174 vport->load_flag, kref_read(&ndlp->kref), 175 ndlp->nlp_state, ndlp->fc4_xpt_flags); 176 177 /* Don't schedule a worker thread event if the vport is going down. */ 178 if (test_bit(FC_UNLOADING, &vport->load_flag) || 179 !test_bit(HBA_SETUP, &phba->hba_flag)) { 180 spin_lock_irqsave(&ndlp->lock, iflags); 181 ndlp->rport = NULL; 182 183 /* The scsi_transport is done with the rport so lpfc cannot 184 * call to unregister. Remove the scsi transport reference 185 * and clean up the SCSI transport node details. 186 */ 187 if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) { 188 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; 189 190 /* NVME transport-registered rports need the 191 * NLP_XPT_REGD flag to complete an unregister. 192 */ 193 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) 194 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; 195 spin_unlock_irqrestore(&ndlp->lock, iflags); 196 lpfc_nlp_put(ndlp); 197 spin_lock_irqsave(&ndlp->lock, iflags); 198 } 199 200 /* Only 1 thread can drop the initial node reference. If 201 * another thread has set NLP_DROPPED, this thread is done. 202 */ 203 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD) && 204 !(ndlp->nlp_flag & NLP_DROPPED)) { 205 ndlp->nlp_flag |= NLP_DROPPED; 206 spin_unlock_irqrestore(&ndlp->lock, iflags); 207 lpfc_nlp_put(ndlp); 208 return; 209 } 210 211 spin_unlock_irqrestore(&ndlp->lock, iflags); 212 return; 213 } 214 215 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 216 return; 217 218 /* check for recovered fabric node */ 219 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE && 220 ndlp->nlp_DID == Fabric_DID) 221 return; 222 223 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) 224 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 225 "6789 rport name %llx != node port name %llx", 226 rport->port_name, 227 wwn_to_u64(ndlp->nlp_portname.u.wwn)); 228 229 evtp = &ndlp->dev_loss_evt; 230 231 if (!list_empty(&evtp->evt_listp)) { 232 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 233 "6790 rport name %llx dev_loss_evt pending\n", 234 rport->port_name); 235 return; 236 } 237 238 spin_lock_irqsave(&ndlp->lock, iflags); 239 ndlp->nlp_flag |= NLP_IN_DEV_LOSS; 240 241 /* If there is a PLOGI in progress, and we are in a 242 * NLP_NPR_2B_DISC state, don't turn off the flag. 243 */ 244 if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) 245 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 246 247 /* 248 * The backend does not expect any more calls associated with this 249 * rport. Remove the association between rport and ndlp. 250 */ 251 ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD; 252 ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL; 253 ndlp->rport = NULL; 254 spin_unlock_irqrestore(&ndlp->lock, iflags); 255 256 if (phba->worker_thread) { 257 /* We need to hold the node by incrementing the reference 258 * count until this queued work is done 259 */ 260 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 261 262 spin_lock_irqsave(&phba->hbalock, iflags); 263 if (evtp->evt_arg1) { 264 evtp->evt = LPFC_EVT_DEV_LOSS; 265 list_add_tail(&evtp->evt_listp, &phba->work_list); 266 spin_unlock_irqrestore(&phba->hbalock, iflags); 267 lpfc_worker_wake_up(phba); 268 return; 269 } 270 spin_unlock_irqrestore(&phba->hbalock, iflags); 271 } else { 272 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 273 "3188 worker thread is stopped %s x%06x, " 274 " rport x%px flg x%x load_flag x%lx refcnt " 275 "%d\n", __func__, ndlp->nlp_DID, 276 ndlp->rport, ndlp->nlp_flag, 277 vport->load_flag, kref_read(&ndlp->kref)); 278 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { 279 spin_lock_irqsave(&ndlp->lock, iflags); 280 /* Node is in dev loss. No further transaction. */ 281 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 282 spin_unlock_irqrestore(&ndlp->lock, iflags); 283 lpfc_disc_state_machine(vport, ndlp, NULL, 284 NLP_EVT_DEVICE_RM); 285 } 286 } 287 } 288 289 /** 290 * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport 291 * @vport: Pointer to vport context object. 292 * 293 * This function checks for idle VMID entries related to a particular vport. If 294 * found unused/idle, free them accordingly. 295 **/ 296 static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport) 297 { 298 u16 keep; 299 u32 difftime = 0, r, bucket; 300 u64 *lta; 301 int cpu; 302 struct lpfc_vmid *vmp; 303 304 write_lock(&vport->vmid_lock); 305 306 if (!vport->cur_vmid_cnt) 307 goto out; 308 309 /* iterate through the table */ 310 hash_for_each(vport->hash_table, bucket, vmp, hnode) { 311 keep = 0; 312 if (vmp->flag & LPFC_VMID_REGISTERED) { 313 /* check if the particular VMID is in use */ 314 /* for all available per cpu variable */ 315 for_each_possible_cpu(cpu) { 316 /* if last access time is less than timeout */ 317 lta = per_cpu_ptr(vmp->last_io_time, cpu); 318 if (!lta) 319 continue; 320 difftime = (jiffies) - (*lta); 321 if ((vport->vmid_inactivity_timeout * 322 JIFFIES_PER_HR) > difftime) { 323 keep = 1; 324 break; 325 } 326 } 327 328 /* if none of the cpus have been used by the vm, */ 329 /* remove the entry if already registered */ 330 if (!keep) { 331 /* mark the entry for deregistration */ 332 vmp->flag = LPFC_VMID_DE_REGISTER; 333 write_unlock(&vport->vmid_lock); 334 if (vport->vmid_priority_tagging) 335 r = lpfc_vmid_uvem(vport, vmp, false); 336 else 337 r = lpfc_vmid_cmd(vport, 338 SLI_CTAS_DAPP_IDENT, 339 vmp); 340 341 /* decrement number of active vms and mark */ 342 /* entry in slot as free */ 343 write_lock(&vport->vmid_lock); 344 if (!r) { 345 struct lpfc_vmid *ht = vmp; 346 347 vport->cur_vmid_cnt--; 348 ht->flag = LPFC_VMID_SLOT_FREE; 349 free_percpu(ht->last_io_time); 350 ht->last_io_time = NULL; 351 hash_del(&ht->hnode); 352 } 353 } 354 } 355 } 356 out: 357 write_unlock(&vport->vmid_lock); 358 } 359 360 /** 361 * lpfc_check_inactive_vmid - VMID inactivity checker 362 * @phba: Pointer to hba context object. 363 * 364 * This function is called from the worker thread to determine if an entry in 365 * the VMID table can be released since there was no I/O activity seen from that 366 * particular VM for the specified time. When this happens, the entry in the 367 * table is released and also the resources on the switch cleared. 368 **/ 369 370 static void lpfc_check_inactive_vmid(struct lpfc_hba *phba) 371 { 372 struct lpfc_vport *vport; 373 struct lpfc_vport **vports; 374 int i; 375 376 vports = lpfc_create_vport_work_array(phba); 377 if (!vports) 378 return; 379 380 for (i = 0; i <= phba->max_vports; i++) { 381 if ((!vports[i]) && (i == 0)) 382 vport = phba->pport; 383 else 384 vport = vports[i]; 385 if (!vport) 386 break; 387 388 lpfc_check_inactive_vmid_one(vport); 389 } 390 lpfc_destroy_vport_work_array(phba, vports); 391 } 392 393 /** 394 * lpfc_check_nlp_post_devloss - Check to restore ndlp refcnt after devloss 395 * @vport: Pointer to vport object. 396 * @ndlp: Pointer to remote node object. 397 * 398 * If NLP_IN_RECOV_POST_DEV_LOSS flag was set due to outstanding recovery of 399 * node during dev_loss_tmo processing, then this function restores the nlp_put 400 * kref decrement from lpfc_dev_loss_tmo_handler. 401 **/ 402 void 403 lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, 404 struct lpfc_nodelist *ndlp) 405 { 406 unsigned long iflags; 407 408 spin_lock_irqsave(&ndlp->lock, iflags); 409 if (ndlp->save_flags & NLP_IN_RECOV_POST_DEV_LOSS) { 410 ndlp->save_flags &= ~NLP_IN_RECOV_POST_DEV_LOSS; 411 spin_unlock_irqrestore(&ndlp->lock, iflags); 412 lpfc_nlp_get(ndlp); 413 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, 414 "8438 Devloss timeout reversed on DID x%x " 415 "refcnt %d ndlp %p flag x%x " 416 "port_state = x%x\n", 417 ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, 418 ndlp->nlp_flag, vport->port_state); 419 return; 420 } 421 spin_unlock_irqrestore(&ndlp->lock, iflags); 422 } 423 424 /** 425 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler 426 * @ndlp: Pointer to remote node object. 427 * 428 * This function is called from the worker thread when devloss timeout timer 429 * expires. For SLI4 host, this routine shall return 1 when at lease one 430 * remote node, including this @ndlp, is still in use of FCF; otherwise, this 431 * routine shall return 0 when there is no remote node is still in use of FCF 432 * when devloss timeout happened to this @ndlp. 433 **/ 434 static int 435 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 436 { 437 struct lpfc_vport *vport; 438 struct lpfc_hba *phba; 439 uint8_t *name; 440 int warn_on = 0; 441 int fcf_inuse = 0; 442 bool recovering = false; 443 struct fc_vport *fc_vport = NULL; 444 unsigned long iflags; 445 446 vport = ndlp->vport; 447 name = (uint8_t *)&ndlp->nlp_portname; 448 phba = vport->phba; 449 450 if (phba->sli_rev == LPFC_SLI_REV4) 451 fcf_inuse = lpfc_fcf_inuse(phba); 452 453 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 454 "rport devlosstmo:did:x%x type:x%x id:x%x", 455 ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); 456 457 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 458 "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", 459 __func__, ndlp->nlp_DID, ndlp->nlp_flag, 460 ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); 461 462 /* If the driver is recovering the rport, ignore devloss. */ 463 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 464 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 465 "0284 Devloss timeout Ignored on " 466 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 467 "NPort x%x\n", 468 *name, *(name+1), *(name+2), *(name+3), 469 *(name+4), *(name+5), *(name+6), *(name+7), 470 ndlp->nlp_DID); 471 472 spin_lock_irqsave(&ndlp->lock, iflags); 473 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 474 spin_unlock_irqrestore(&ndlp->lock, iflags); 475 return fcf_inuse; 476 } 477 478 /* Fabric nodes are done. */ 479 if (ndlp->nlp_type & NLP_FABRIC) { 480 spin_lock_irqsave(&ndlp->lock, iflags); 481 482 /* The driver has to account for a race between any fabric 483 * node that's in recovery when dev_loss_tmo expires. When this 484 * happens, the driver has to allow node recovery. 485 */ 486 switch (ndlp->nlp_DID) { 487 case Fabric_DID: 488 fc_vport = vport->fc_vport; 489 if (fc_vport) { 490 /* NPIV path. */ 491 if (fc_vport->vport_state == 492 FC_VPORT_INITIALIZING) 493 recovering = true; 494 } else { 495 /* Physical port path. */ 496 if (test_bit(HBA_FLOGI_OUTSTANDING, 497 &phba->hba_flag)) 498 recovering = true; 499 } 500 break; 501 case Fabric_Cntl_DID: 502 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 503 recovering = true; 504 break; 505 case FDMI_DID: 506 fallthrough; 507 case NameServer_DID: 508 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 509 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) 510 recovering = true; 511 break; 512 default: 513 /* Ensure the nlp_DID at least has the correct prefix. 514 * The fabric domain controller's last three nibbles 515 * vary so we handle it in the default case. 516 */ 517 if (ndlp->nlp_DID & Fabric_DID_MASK) { 518 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 519 ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) 520 recovering = true; 521 } 522 break; 523 } 524 spin_unlock_irqrestore(&ndlp->lock, iflags); 525 526 /* Mark an NLP_IN_RECOV_POST_DEV_LOSS flag to know if reversing 527 * the following lpfc_nlp_put is necessary after fabric node is 528 * recovered. 529 */ 530 spin_lock_irqsave(&ndlp->lock, iflags); 531 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 532 spin_unlock_irqrestore(&ndlp->lock, iflags); 533 if (recovering) { 534 lpfc_printf_vlog(vport, KERN_INFO, 535 LOG_DISCOVERY | LOG_NODE, 536 "8436 Devloss timeout marked on " 537 "DID x%x refcnt %d ndlp %p " 538 "flag x%x port_state = x%x\n", 539 ndlp->nlp_DID, kref_read(&ndlp->kref), 540 ndlp, ndlp->nlp_flag, 541 vport->port_state); 542 spin_lock_irqsave(&ndlp->lock, iflags); 543 ndlp->save_flags |= NLP_IN_RECOV_POST_DEV_LOSS; 544 spin_unlock_irqrestore(&ndlp->lock, iflags); 545 return fcf_inuse; 546 } else if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) { 547 /* Fabric node fully recovered before this dev_loss_tmo 548 * queue work is processed. Thus, ignore the 549 * dev_loss_tmo event. 550 */ 551 lpfc_printf_vlog(vport, KERN_INFO, 552 LOG_DISCOVERY | LOG_NODE, 553 "8437 Devloss timeout ignored on " 554 "DID x%x refcnt %d ndlp %p " 555 "flag x%x port_state = x%x\n", 556 ndlp->nlp_DID, kref_read(&ndlp->kref), 557 ndlp, ndlp->nlp_flag, 558 vport->port_state); 559 return fcf_inuse; 560 } 561 562 lpfc_nlp_put(ndlp); 563 return fcf_inuse; 564 } 565 566 if (ndlp->nlp_sid != NLP_NO_SID) { 567 warn_on = 1; 568 lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); 569 } 570 571 if (warn_on) { 572 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 573 "0203 Devloss timeout on " 574 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 575 "NPort x%06x Data: x%x x%x x%x refcnt %d\n", 576 *name, *(name+1), *(name+2), *(name+3), 577 *(name+4), *(name+5), *(name+6), *(name+7), 578 ndlp->nlp_DID, ndlp->nlp_flag, 579 ndlp->nlp_state, ndlp->nlp_rpi, 580 kref_read(&ndlp->kref)); 581 } else { 582 lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, 583 "0204 Devloss timeout on " 584 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 585 "NPort x%06x Data: x%x x%x x%x\n", 586 *name, *(name+1), *(name+2), *(name+3), 587 *(name+4), *(name+5), *(name+6), *(name+7), 588 ndlp->nlp_DID, ndlp->nlp_flag, 589 ndlp->nlp_state, ndlp->nlp_rpi); 590 } 591 spin_lock_irqsave(&ndlp->lock, iflags); 592 ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; 593 spin_unlock_irqrestore(&ndlp->lock, iflags); 594 595 /* If we are devloss, but we are in the process of rediscovering the 596 * ndlp, don't issue a NLP_EVT_DEVICE_RM event. 597 */ 598 if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && 599 ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { 600 return fcf_inuse; 601 } 602 603 if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) 604 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 605 606 return fcf_inuse; 607 } 608 609 static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba) 610 { 611 struct lpfc_vport *vport; 612 struct lpfc_vport **vports; 613 int i; 614 615 vports = lpfc_create_vport_work_array(phba); 616 if (!vports) 617 return; 618 619 for (i = 0; i <= phba->max_vports; i++) { 620 if ((!vports[i]) && (i == 0)) 621 vport = phba->pport; 622 else 623 vport = vports[i]; 624 if (!vport) 625 break; 626 627 if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) { 628 if (!lpfc_issue_els_qfpa(vport)) 629 vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA; 630 } 631 } 632 lpfc_destroy_vport_work_array(phba, vports); 633 } 634 635 /** 636 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler 637 * @phba: Pointer to hba context object. 638 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. 639 * @nlp_did: remote node identifer with devloss timeout. 640 * 641 * This function is called from the worker thread after invoking devloss 642 * timeout handler and releasing the reference count for the ndlp with 643 * which the devloss timeout was handled for SLI4 host. For the devloss 644 * timeout of the last remote node which had been in use of FCF, when this 645 * routine is invoked, it shall be guaranteed that none of the remote are 646 * in-use of FCF. When devloss timeout to the last remote using the FCF, 647 * if the FIP engine is neither in FCF table scan process nor roundrobin 648 * failover process, the in-use FCF shall be unregistered. If the FIP 649 * engine is in FCF discovery process, the devloss timeout state shall 650 * be set for either the FCF table scan process or roundrobin failover 651 * process to unregister the in-use FCF. 652 **/ 653 static void 654 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, 655 uint32_t nlp_did) 656 { 657 /* If devloss timeout happened to a remote node when FCF had no 658 * longer been in-use, do nothing. 659 */ 660 if (!fcf_inuse) 661 return; 662 663 if (test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) && 664 !lpfc_fcf_inuse(phba)) { 665 spin_lock_irq(&phba->hbalock); 666 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 667 if (test_and_set_bit(HBA_DEVLOSS_TMO, 668 &phba->hba_flag)) { 669 spin_unlock_irq(&phba->hbalock); 670 return; 671 } 672 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 673 "2847 Last remote node (x%x) using " 674 "FCF devloss tmo\n", nlp_did); 675 } 676 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { 677 spin_unlock_irq(&phba->hbalock); 678 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 679 "2868 Devloss tmo to FCF rediscovery " 680 "in progress\n"); 681 return; 682 } 683 spin_unlock_irq(&phba->hbalock); 684 if (!test_bit(FCF_TS_INPROG, &phba->hba_flag) && 685 !test_bit(FCF_RR_INPROG, &phba->hba_flag)) { 686 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 687 "2869 Devloss tmo to idle FIP engine, " 688 "unreg in-use FCF and rescan.\n"); 689 /* Unregister in-use FCF and rescan */ 690 lpfc_unregister_fcf_rescan(phba); 691 return; 692 } 693 if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) 694 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 695 "2870 FCF table scan in progress\n"); 696 if (test_bit(FCF_RR_INPROG, &phba->hba_flag)) 697 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 698 "2871 FLOGI roundrobin FCF failover " 699 "in progress\n"); 700 } 701 lpfc_unregister_unused_fcf(phba); 702 } 703 704 /** 705 * lpfc_alloc_fast_evt - Allocates data structure for posting event 706 * @phba: Pointer to hba context object. 707 * 708 * This function is called from the functions which need to post 709 * events from interrupt context. This function allocates data 710 * structure required for posting event. It also keeps track of 711 * number of events pending and prevent event storm when there are 712 * too many events. 713 **/ 714 struct lpfc_fast_path_event * 715 lpfc_alloc_fast_evt(struct lpfc_hba *phba) { 716 struct lpfc_fast_path_event *ret; 717 718 /* If there are lot of fast event do not exhaust memory due to this */ 719 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) 720 return NULL; 721 722 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 723 GFP_ATOMIC); 724 if (ret) { 725 atomic_inc(&phba->fast_event_count); 726 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 727 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 728 } 729 return ret; 730 } 731 732 /** 733 * lpfc_free_fast_evt - Frees event data structure 734 * @phba: Pointer to hba context object. 735 * @evt: Event object which need to be freed. 736 * 737 * This function frees the data structure required for posting 738 * events. 739 **/ 740 void 741 lpfc_free_fast_evt(struct lpfc_hba *phba, 742 struct lpfc_fast_path_event *evt) { 743 744 atomic_dec(&phba->fast_event_count); 745 kfree(evt); 746 } 747 748 /** 749 * lpfc_send_fastpath_evt - Posts events generated from fast path 750 * @phba: Pointer to hba context object. 751 * @evtp: Event data structure. 752 * 753 * This function is called from worker thread, when the interrupt 754 * context need to post an event. This function posts the event 755 * to fc transport netlink interface. 756 **/ 757 static void 758 lpfc_send_fastpath_evt(struct lpfc_hba *phba, 759 struct lpfc_work_evt *evtp) 760 { 761 unsigned long evt_category, evt_sub_category; 762 struct lpfc_fast_path_event *fast_evt_data; 763 char *evt_data; 764 uint32_t evt_data_size; 765 struct Scsi_Host *shost; 766 767 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, 768 work_evt); 769 770 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; 771 evt_sub_category = (unsigned long) fast_evt_data->un. 772 fabric_evt.subcategory; 773 shost = lpfc_shost_from_vport(fast_evt_data->vport); 774 if (evt_category == FC_REG_FABRIC_EVENT) { 775 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { 776 evt_data = (char *) &fast_evt_data->un.read_check_error; 777 evt_data_size = sizeof(fast_evt_data->un. 778 read_check_error); 779 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || 780 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { 781 evt_data = (char *) &fast_evt_data->un.fabric_evt; 782 evt_data_size = sizeof(fast_evt_data->un.fabric_evt); 783 } else { 784 lpfc_free_fast_evt(phba, fast_evt_data); 785 return; 786 } 787 } else if (evt_category == FC_REG_SCSI_EVENT) { 788 switch (evt_sub_category) { 789 case LPFC_EVENT_QFULL: 790 case LPFC_EVENT_DEVBSY: 791 evt_data = (char *) &fast_evt_data->un.scsi_evt; 792 evt_data_size = sizeof(fast_evt_data->un.scsi_evt); 793 break; 794 case LPFC_EVENT_CHECK_COND: 795 evt_data = (char *) &fast_evt_data->un.check_cond_evt; 796 evt_data_size = sizeof(fast_evt_data->un. 797 check_cond_evt); 798 break; 799 case LPFC_EVENT_VARQUEDEPTH: 800 evt_data = (char *) &fast_evt_data->un.queue_depth_evt; 801 evt_data_size = sizeof(fast_evt_data->un. 802 queue_depth_evt); 803 break; 804 default: 805 lpfc_free_fast_evt(phba, fast_evt_data); 806 return; 807 } 808 } else { 809 lpfc_free_fast_evt(phba, fast_evt_data); 810 return; 811 } 812 813 if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) 814 fc_host_post_vendor_event(shost, 815 fc_get_event_number(), 816 evt_data_size, 817 evt_data, 818 LPFC_NL_VENDOR_ID); 819 820 lpfc_free_fast_evt(phba, fast_evt_data); 821 return; 822 } 823 824 static void 825 lpfc_work_list_done(struct lpfc_hba *phba) 826 { 827 struct lpfc_work_evt *evtp = NULL; 828 struct lpfc_nodelist *ndlp; 829 int free_evt; 830 int fcf_inuse; 831 uint32_t nlp_did; 832 bool hba_pci_err; 833 834 spin_lock_irq(&phba->hbalock); 835 while (!list_empty(&phba->work_list)) { 836 list_remove_head((&phba->work_list), evtp, typeof(*evtp), 837 evt_listp); 838 spin_unlock_irq(&phba->hbalock); 839 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 840 free_evt = 1; 841 switch (evtp->evt) { 842 case LPFC_EVT_ELS_RETRY: 843 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 844 if (!hba_pci_err) { 845 lpfc_els_retry_delay_handler(ndlp); 846 free_evt = 0; /* evt is part of ndlp */ 847 } 848 /* decrement the node reference count held 849 * for this queued work 850 */ 851 lpfc_nlp_put(ndlp); 852 break; 853 case LPFC_EVT_DEV_LOSS: 854 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 855 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); 856 free_evt = 0; 857 /* decrement the node reference count held for 858 * this queued work 859 */ 860 nlp_did = ndlp->nlp_DID; 861 lpfc_nlp_put(ndlp); 862 if (phba->sli_rev == LPFC_SLI_REV4) 863 lpfc_sli4_post_dev_loss_tmo_handler(phba, 864 fcf_inuse, 865 nlp_did); 866 break; 867 case LPFC_EVT_RECOVER_PORT: 868 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 869 if (!hba_pci_err) { 870 lpfc_sli_abts_recover_port(ndlp->vport, ndlp); 871 free_evt = 0; 872 } 873 /* decrement the node reference count held for 874 * this queued work 875 */ 876 lpfc_nlp_put(ndlp); 877 break; 878 case LPFC_EVT_ONLINE: 879 if (phba->link_state < LPFC_LINK_DOWN) 880 *(int *) (evtp->evt_arg1) = lpfc_online(phba); 881 else 882 *(int *) (evtp->evt_arg1) = 0; 883 complete((struct completion *)(evtp->evt_arg2)); 884 break; 885 case LPFC_EVT_OFFLINE_PREP: 886 if (phba->link_state >= LPFC_LINK_DOWN) 887 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 888 *(int *)(evtp->evt_arg1) = 0; 889 complete((struct completion *)(evtp->evt_arg2)); 890 break; 891 case LPFC_EVT_OFFLINE: 892 lpfc_offline(phba); 893 lpfc_sli_brdrestart(phba); 894 *(int *)(evtp->evt_arg1) = 895 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); 896 lpfc_unblock_mgmt_io(phba); 897 complete((struct completion *)(evtp->evt_arg2)); 898 break; 899 case LPFC_EVT_WARM_START: 900 lpfc_offline(phba); 901 lpfc_reset_barrier(phba); 902 lpfc_sli_brdreset(phba); 903 lpfc_hba_down_post(phba); 904 *(int *)(evtp->evt_arg1) = 905 lpfc_sli_brdready(phba, HS_MBRDY); 906 lpfc_unblock_mgmt_io(phba); 907 complete((struct completion *)(evtp->evt_arg2)); 908 break; 909 case LPFC_EVT_KILL: 910 lpfc_offline(phba); 911 *(int *)(evtp->evt_arg1) 912 = (phba->pport->stopped) 913 ? 0 : lpfc_sli_brdkill(phba); 914 lpfc_unblock_mgmt_io(phba); 915 complete((struct completion *)(evtp->evt_arg2)); 916 break; 917 case LPFC_EVT_FASTPATH_MGMT_EVT: 918 lpfc_send_fastpath_evt(phba, evtp); 919 free_evt = 0; 920 break; 921 case LPFC_EVT_RESET_HBA: 922 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 923 lpfc_reset_hba(phba); 924 break; 925 } 926 if (free_evt) 927 kfree(evtp); 928 spin_lock_irq(&phba->hbalock); 929 } 930 spin_unlock_irq(&phba->hbalock); 931 932 } 933 934 static void 935 lpfc_work_done(struct lpfc_hba *phba) 936 { 937 struct lpfc_sli_ring *pring; 938 uint32_t ha_copy, status, control, work_port_events; 939 struct lpfc_vport **vports; 940 struct lpfc_vport *vport; 941 int i; 942 bool hba_pci_err; 943 944 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags); 945 spin_lock_irq(&phba->hbalock); 946 ha_copy = phba->work_ha; 947 phba->work_ha = 0; 948 spin_unlock_irq(&phba->hbalock); 949 if (hba_pci_err) 950 ha_copy = 0; 951 952 /* First, try to post the next mailbox command to SLI4 device */ 953 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err) 954 lpfc_sli4_post_async_mbox(phba); 955 956 if (ha_copy & HA_ERATT) { 957 /* Handle the error attention event */ 958 lpfc_handle_eratt(phba); 959 960 if (phba->fw_dump_cmpl) { 961 complete(phba->fw_dump_cmpl); 962 phba->fw_dump_cmpl = NULL; 963 } 964 } 965 966 if (ha_copy & HA_MBATT) 967 lpfc_sli_handle_mb_event(phba); 968 969 if (ha_copy & HA_LATT) 970 lpfc_handle_latt(phba); 971 972 /* Handle VMID Events */ 973 if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) { 974 if (phba->pport->work_port_events & 975 WORKER_CHECK_VMID_ISSUE_QFPA) { 976 lpfc_check_vmid_qfpa_issue(phba); 977 phba->pport->work_port_events &= 978 ~WORKER_CHECK_VMID_ISSUE_QFPA; 979 } 980 if (phba->pport->work_port_events & 981 WORKER_CHECK_INACTIVE_VMID) { 982 lpfc_check_inactive_vmid(phba); 983 phba->pport->work_port_events &= 984 ~WORKER_CHECK_INACTIVE_VMID; 985 } 986 } 987 988 /* Process SLI4 events */ 989 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { 990 if (test_bit(HBA_RRQ_ACTIVE, &phba->hba_flag)) 991 lpfc_handle_rrq_active(phba); 992 if (test_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag)) 993 lpfc_sli4_els_xri_abort_event_proc(phba); 994 if (test_bit(ASYNC_EVENT, &phba->hba_flag)) 995 lpfc_sli4_async_event_proc(phba); 996 if (test_and_clear_bit(HBA_POST_RECEIVE_BUFFER, 997 &phba->hba_flag)) 998 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 999 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) 1000 lpfc_sli4_fcf_redisc_event_proc(phba); 1001 } 1002 1003 vports = lpfc_create_vport_work_array(phba); 1004 if (vports != NULL) 1005 for (i = 0; i <= phba->max_vports; i++) { 1006 /* 1007 * We could have no vports in array if unloading, so if 1008 * this happens then just use the pport 1009 */ 1010 if (vports[i] == NULL && i == 0) 1011 vport = phba->pport; 1012 else 1013 vport = vports[i]; 1014 if (vport == NULL) 1015 break; 1016 spin_lock_irq(&vport->work_port_lock); 1017 work_port_events = vport->work_port_events; 1018 vport->work_port_events &= ~work_port_events; 1019 spin_unlock_irq(&vport->work_port_lock); 1020 if (hba_pci_err) 1021 continue; 1022 if (work_port_events & WORKER_DISC_TMO) 1023 lpfc_disc_timeout_handler(vport); 1024 if (work_port_events & WORKER_ELS_TMO) 1025 lpfc_els_timeout_handler(vport); 1026 if (work_port_events & WORKER_HB_TMO) 1027 lpfc_hb_timeout_handler(phba); 1028 if (work_port_events & WORKER_MBOX_TMO) 1029 lpfc_mbox_timeout_handler(phba); 1030 if (work_port_events & WORKER_FABRIC_BLOCK_TMO) 1031 lpfc_unblock_fabric_iocbs(phba); 1032 if (work_port_events & WORKER_RAMP_DOWN_QUEUE) 1033 lpfc_ramp_down_queue_handler(phba); 1034 if (work_port_events & WORKER_DELAYED_DISC_TMO) 1035 lpfc_delayed_disc_timeout_handler(vport); 1036 } 1037 lpfc_destroy_vport_work_array(phba, vports); 1038 1039 pring = lpfc_phba_elsring(phba); 1040 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 1041 status >>= (4*LPFC_ELS_RING); 1042 if (pring && (status & HA_RXMASK || 1043 pring->flag & LPFC_DEFERRED_RING_EVENT || 1044 test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag))) { 1045 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 1046 pring->flag |= LPFC_DEFERRED_RING_EVENT; 1047 /* Preserve legacy behavior. */ 1048 if (!test_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag)) 1049 set_bit(LPFC_DATA_READY, &phba->data_flags); 1050 } else { 1051 /* Driver could have abort request completed in queue 1052 * when link goes down. Allow for this transition. 1053 */ 1054 if (phba->link_state >= LPFC_LINK_DOWN || 1055 phba->link_flag & LS_MDS_LOOPBACK) { 1056 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 1057 lpfc_sli_handle_slow_ring_event(phba, pring, 1058 (status & 1059 HA_RXMASK)); 1060 } 1061 } 1062 if (phba->sli_rev == LPFC_SLI_REV4) 1063 lpfc_drain_txq(phba); 1064 /* 1065 * Turn on Ring interrupts 1066 */ 1067 if (phba->sli_rev <= LPFC_SLI_REV3) { 1068 spin_lock_irq(&phba->hbalock); 1069 control = readl(phba->HCregaddr); 1070 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 1071 lpfc_debugfs_slow_ring_trc(phba, 1072 "WRK Enable ring: cntl:x%x hacopy:x%x", 1073 control, ha_copy, 0); 1074 1075 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 1076 writel(control, phba->HCregaddr); 1077 readl(phba->HCregaddr); /* flush */ 1078 } else { 1079 lpfc_debugfs_slow_ring_trc(phba, 1080 "WRK Ring ok: cntl:x%x hacopy:x%x", 1081 control, ha_copy, 0); 1082 } 1083 spin_unlock_irq(&phba->hbalock); 1084 } 1085 } 1086 lpfc_work_list_done(phba); 1087 } 1088 1089 int 1090 lpfc_do_work(void *p) 1091 { 1092 struct lpfc_hba *phba = p; 1093 int rc; 1094 1095 set_user_nice(current, MIN_NICE); 1096 current->flags |= PF_NOFREEZE; 1097 phba->data_flags = 0; 1098 1099 while (!kthread_should_stop()) { 1100 /* wait and check worker queue activities */ 1101 rc = wait_event_interruptible(phba->work_waitq, 1102 (test_and_clear_bit(LPFC_DATA_READY, 1103 &phba->data_flags) 1104 || kthread_should_stop())); 1105 /* Signal wakeup shall terminate the worker thread */ 1106 if (rc) { 1107 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 1108 "0433 Wakeup on signal: rc=x%x\n", rc); 1109 break; 1110 } 1111 1112 /* Attend pending lpfc data processing */ 1113 lpfc_work_done(phba); 1114 } 1115 phba->worker_thread = NULL; 1116 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 1117 "0432 Worker thread stopped.\n"); 1118 return 0; 1119 } 1120 1121 /* 1122 * This is only called to handle FC worker events. Since this a rare 1123 * occurrence, we allocate a struct lpfc_work_evt structure here instead of 1124 * embedding it in the IOCB. 1125 */ 1126 int 1127 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, 1128 uint32_t evt) 1129 { 1130 struct lpfc_work_evt *evtp; 1131 unsigned long flags; 1132 1133 /* 1134 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 1135 * be queued to worker thread for processing 1136 */ 1137 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); 1138 if (!evtp) 1139 return 0; 1140 1141 evtp->evt_arg1 = arg1; 1142 evtp->evt_arg2 = arg2; 1143 evtp->evt = evt; 1144 1145 spin_lock_irqsave(&phba->hbalock, flags); 1146 list_add_tail(&evtp->evt_listp, &phba->work_list); 1147 spin_unlock_irqrestore(&phba->hbalock, flags); 1148 1149 lpfc_worker_wake_up(phba); 1150 1151 return 1; 1152 } 1153 1154 void 1155 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) 1156 { 1157 struct lpfc_hba *phba = vport->phba; 1158 struct lpfc_nodelist *ndlp, *next_ndlp; 1159 1160 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 1161 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 1162 ((vport->port_type == LPFC_NPIV_PORT) && 1163 ((ndlp->nlp_DID == NameServer_DID) || 1164 (ndlp->nlp_DID == FDMI_DID) || 1165 (ndlp->nlp_DID == Fabric_Cntl_DID)))) 1166 lpfc_unreg_rpi(vport, ndlp); 1167 1168 /* Leave Fabric nodes alone on link down */ 1169 if ((phba->sli_rev < LPFC_SLI_REV4) && 1170 (!remove && ndlp->nlp_type & NLP_FABRIC)) 1171 continue; 1172 1173 /* Notify transport of connectivity loss to trigger cleanup. */ 1174 if (phba->nvmet_support && 1175 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) 1176 lpfc_nvmet_invalidate_host(phba, ndlp); 1177 1178 lpfc_disc_state_machine(vport, ndlp, NULL, 1179 remove 1180 ? NLP_EVT_DEVICE_RM 1181 : NLP_EVT_DEVICE_RECOVERY); 1182 } 1183 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 1184 if (phba->sli_rev == LPFC_SLI_REV4) 1185 lpfc_sli4_unreg_all_rpis(vport); 1186 lpfc_mbx_unreg_vpi(vport); 1187 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 1188 } 1189 } 1190 1191 void 1192 lpfc_port_link_failure(struct lpfc_vport *vport) 1193 { 1194 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 1195 1196 /* Cleanup any outstanding received buffers */ 1197 lpfc_cleanup_rcv_buffers(vport); 1198 1199 /* Cleanup any outstanding RSCN activity */ 1200 lpfc_els_flush_rscn(vport); 1201 1202 /* Cleanup any outstanding ELS commands */ 1203 lpfc_els_flush_cmd(vport); 1204 1205 lpfc_cleanup_rpis(vport, 0); 1206 1207 /* Turn off discovery timer if its running */ 1208 lpfc_can_disctmo(vport); 1209 } 1210 1211 void 1212 lpfc_linkdown_port(struct lpfc_vport *vport) 1213 { 1214 struct lpfc_hba *phba = vport->phba; 1215 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1216 1217 if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) 1218 fc_host_post_event(shost, fc_get_event_number(), 1219 FCH_EVT_LINKDOWN, 0); 1220 1221 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1222 "Link Down: state:x%x rtry:x%x flg:x%x", 1223 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 1224 1225 lpfc_port_link_failure(vport); 1226 1227 /* Stop delayed Nport discovery */ 1228 clear_bit(FC_DISC_DELAYED, &vport->fc_flag); 1229 del_timer_sync(&vport->delayed_disc_tmo); 1230 1231 if (phba->sli_rev == LPFC_SLI_REV4 && 1232 vport->port_type == LPFC_PHYSICAL_PORT && 1233 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) { 1234 /* Assume success on link up */ 1235 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC; 1236 } 1237 } 1238 1239 int 1240 lpfc_linkdown(struct lpfc_hba *phba) 1241 { 1242 struct lpfc_vport *vport = phba->pport; 1243 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1244 struct lpfc_vport **vports; 1245 LPFC_MBOXQ_t *mb; 1246 int i; 1247 int offline; 1248 1249 if (phba->link_state == LPFC_LINK_DOWN) 1250 return 0; 1251 1252 /* Block all SCSI stack I/Os */ 1253 lpfc_scsi_dev_block(phba); 1254 offline = pci_channel_offline(phba->pcidev); 1255 1256 /* Decrement the held ndlp if there is a deferred flogi acc */ 1257 if (phba->defer_flogi_acc.flag) { 1258 if (phba->defer_flogi_acc.ndlp) { 1259 lpfc_nlp_put(phba->defer_flogi_acc.ndlp); 1260 phba->defer_flogi_acc.ndlp = NULL; 1261 } 1262 } 1263 phba->defer_flogi_acc.flag = false; 1264 1265 /* Clear external loopback plug detected flag */ 1266 phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; 1267 1268 spin_lock_irq(&phba->hbalock); 1269 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 1270 spin_unlock_irq(&phba->hbalock); 1271 if (phba->link_state > LPFC_LINK_DOWN) { 1272 phba->link_state = LPFC_LINK_DOWN; 1273 if (phba->sli4_hba.conf_trunk) { 1274 phba->trunk_link.link0.state = 0; 1275 phba->trunk_link.link1.state = 0; 1276 phba->trunk_link.link2.state = 0; 1277 phba->trunk_link.link3.state = 0; 1278 phba->trunk_link.phy_lnk_speed = 1279 LPFC_LINK_SPEED_UNKNOWN; 1280 phba->sli4_hba.link_state.logical_speed = 1281 LPFC_LINK_SPEED_UNKNOWN; 1282 } 1283 clear_bit(FC_LBIT, &phba->pport->fc_flag); 1284 } 1285 vports = lpfc_create_vport_work_array(phba); 1286 if (vports != NULL) { 1287 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1288 /* Issue a LINK DOWN event to all nodes */ 1289 lpfc_linkdown_port(vports[i]); 1290 1291 vports[i]->fc_myDID = 0; 1292 1293 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 1294 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 1295 if (phba->nvmet_support) 1296 lpfc_nvmet_update_targetport(phba); 1297 else 1298 lpfc_nvme_update_localport(vports[i]); 1299 } 1300 } 1301 } 1302 lpfc_destroy_vport_work_array(phba, vports); 1303 1304 /* Clean up any SLI3 firmware default rpi's */ 1305 if (phba->sli_rev > LPFC_SLI_REV3 || offline) 1306 goto skip_unreg_did; 1307 1308 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1309 if (mb) { 1310 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); 1311 mb->vport = vport; 1312 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1313 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 1314 == MBX_NOT_FINISHED) { 1315 mempool_free(mb, phba->mbox_mem_pool); 1316 } 1317 } 1318 1319 skip_unreg_did: 1320 /* Setup myDID for link up if we are in pt2pt mode */ 1321 if (test_bit(FC_PT2PT, &phba->pport->fc_flag)) { 1322 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1323 if (mb) { 1324 lpfc_config_link(phba, mb); 1325 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1326 mb->vport = vport; 1327 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 1328 == MBX_NOT_FINISHED) { 1329 mempool_free(mb, phba->mbox_mem_pool); 1330 } 1331 } 1332 clear_bit(FC_PT2PT, &phba->pport->fc_flag); 1333 clear_bit(FC_PT2PT_PLOGI, &phba->pport->fc_flag); 1334 spin_lock_irq(shost->host_lock); 1335 phba->pport->rcv_flogi_cnt = 0; 1336 spin_unlock_irq(shost->host_lock); 1337 } 1338 return 0; 1339 } 1340 1341 static void 1342 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) 1343 { 1344 struct lpfc_nodelist *ndlp; 1345 1346 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 1347 ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); 1348 1349 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 1350 continue; 1351 if (ndlp->nlp_type & NLP_FABRIC) { 1352 /* On Linkup its safe to clean up the ndlp 1353 * from Fabric connections. 1354 */ 1355 if (ndlp->nlp_DID != Fabric_DID) 1356 lpfc_unreg_rpi(vport, ndlp); 1357 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 1358 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 1359 /* Fail outstanding IO now since device is 1360 * marked for PLOGI. 1361 */ 1362 lpfc_unreg_rpi(vport, ndlp); 1363 } 1364 } 1365 } 1366 1367 static void 1368 lpfc_linkup_port(struct lpfc_vport *vport) 1369 { 1370 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1371 struct lpfc_hba *phba = vport->phba; 1372 1373 if (test_bit(FC_UNLOADING, &vport->load_flag)) 1374 return; 1375 1376 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 1377 "Link Up: top:x%x speed:x%x flg:x%x", 1378 phba->fc_topology, phba->fc_linkspeed, phba->link_flag); 1379 1380 /* If NPIV is not enabled, only bring the physical port up */ 1381 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1382 (vport != phba->pport)) 1383 return; 1384 1385 if (phba->defer_flogi_acc.flag) { 1386 clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); 1387 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 1388 clear_bit(FC_NLP_MORE, &vport->fc_flag); 1389 clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 1390 } else { 1391 clear_bit(FC_PT2PT, &vport->fc_flag); 1392 clear_bit(FC_PT2PT_PLOGI, &vport->fc_flag); 1393 clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); 1394 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 1395 clear_bit(FC_NLP_MORE, &vport->fc_flag); 1396 clear_bit(FC_RSCN_DISCOVERY, &vport->fc_flag); 1397 } 1398 set_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 1399 1400 spin_lock_irq(shost->host_lock); 1401 vport->fc_ns_retry = 0; 1402 spin_unlock_irq(shost->host_lock); 1403 lpfc_setup_fdmi_mask(vport); 1404 1405 lpfc_linkup_cleanup_nodes(vport); 1406 } 1407 1408 static int 1409 lpfc_linkup(struct lpfc_hba *phba) 1410 { 1411 struct lpfc_vport **vports; 1412 int i; 1413 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 1414 1415 phba->link_state = LPFC_LINK_UP; 1416 1417 /* Unblock fabric iocbs if they are blocked */ 1418 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 1419 del_timer_sync(&phba->fabric_block_timer); 1420 1421 vports = lpfc_create_vport_work_array(phba); 1422 if (vports != NULL) 1423 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1424 lpfc_linkup_port(vports[i]); 1425 lpfc_destroy_vport_work_array(phba, vports); 1426 1427 /* Clear the pport flogi counter in case the link down was 1428 * absorbed without an ACQE. No lock here - in worker thread 1429 * and discovery is synchronized. 1430 */ 1431 spin_lock_irq(shost->host_lock); 1432 phba->pport->rcv_flogi_cnt = 0; 1433 spin_unlock_irq(shost->host_lock); 1434 1435 /* reinitialize initial HBA flag */ 1436 clear_bit(HBA_FLOGI_ISSUED, &phba->hba_flag); 1437 clear_bit(HBA_RHBA_CMPL, &phba->hba_flag); 1438 1439 return 0; 1440 } 1441 1442 /* 1443 * This routine handles processing a CLEAR_LA mailbox 1444 * command upon completion. It is setup in the LPFC_MBOXQ 1445 * as the completion routine when the command is 1446 * handed off to the SLI layer. SLI3 only. 1447 */ 1448 static void 1449 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1450 { 1451 struct lpfc_vport *vport = pmb->vport; 1452 struct lpfc_sli *psli = &phba->sli; 1453 MAILBOX_t *mb = &pmb->u.mb; 1454 uint32_t control; 1455 1456 /* Since we don't do discovery right now, turn these off here */ 1457 psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1458 psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1459 1460 /* Check for error */ 1461 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 1462 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ 1463 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1464 "0320 CLEAR_LA mbxStatus error x%x hba " 1465 "state x%x\n", 1466 mb->mbxStatus, vport->port_state); 1467 phba->link_state = LPFC_HBA_ERROR; 1468 goto out; 1469 } 1470 1471 if (vport->port_type == LPFC_PHYSICAL_PORT) 1472 phba->link_state = LPFC_HBA_READY; 1473 1474 spin_lock_irq(&phba->hbalock); 1475 psli->sli_flag |= LPFC_PROCESS_LA; 1476 control = readl(phba->HCregaddr); 1477 control |= HC_LAINT_ENA; 1478 writel(control, phba->HCregaddr); 1479 readl(phba->HCregaddr); /* flush */ 1480 spin_unlock_irq(&phba->hbalock); 1481 mempool_free(pmb, phba->mbox_mem_pool); 1482 return; 1483 1484 out: 1485 /* Device Discovery completes */ 1486 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1487 "0225 Device Discovery completes\n"); 1488 mempool_free(pmb, phba->mbox_mem_pool); 1489 1490 clear_bit(FC_ABORT_DISCOVERY, &vport->fc_flag); 1491 1492 lpfc_can_disctmo(vport); 1493 1494 /* turn on Link Attention interrupts */ 1495 1496 spin_lock_irq(&phba->hbalock); 1497 psli->sli_flag |= LPFC_PROCESS_LA; 1498 control = readl(phba->HCregaddr); 1499 control |= HC_LAINT_ENA; 1500 writel(control, phba->HCregaddr); 1501 readl(phba->HCregaddr); /* flush */ 1502 spin_unlock_irq(&phba->hbalock); 1503 1504 return; 1505 } 1506 1507 void 1508 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1509 { 1510 struct lpfc_vport *vport = pmb->vport; 1511 LPFC_MBOXQ_t *sparam_mb; 1512 u16 status = pmb->u.mb.mbxStatus; 1513 int rc; 1514 1515 mempool_free(pmb, phba->mbox_mem_pool); 1516 1517 if (status) 1518 goto out; 1519 1520 /* don't perform discovery for SLI4 loopback diagnostic test */ 1521 if ((phba->sli_rev == LPFC_SLI_REV4) && 1522 !test_bit(HBA_FCOE_MODE, &phba->hba_flag) && 1523 (phba->link_flag & LS_LOOPBACK_MODE)) 1524 return; 1525 1526 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 1527 test_bit(FC_PUBLIC_LOOP, &vport->fc_flag) && 1528 !test_bit(FC_LBIT, &vport->fc_flag)) { 1529 /* Need to wait for FAN - use discovery timer 1530 * for timeout. port_state is identically 1531 * LPFC_LOCAL_CFG_LINK while waiting for FAN 1532 */ 1533 lpfc_set_disctmo(vport); 1534 return; 1535 } 1536 1537 /* Start discovery by sending a FLOGI. port_state is identically 1538 * LPFC_FLOGI while waiting for FLOGI cmpl. 1539 */ 1540 if (vport->port_state != LPFC_FLOGI) { 1541 /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if 1542 * bb-credit recovery is in place. 1543 */ 1544 if (phba->bbcredit_support && phba->cfg_enable_bbcr && 1545 !(phba->link_flag & LS_LOOPBACK_MODE)) { 1546 sparam_mb = mempool_alloc(phba->mbox_mem_pool, 1547 GFP_KERNEL); 1548 if (!sparam_mb) 1549 goto sparam_out; 1550 1551 rc = lpfc_read_sparam(phba, sparam_mb, 0); 1552 if (rc) { 1553 mempool_free(sparam_mb, phba->mbox_mem_pool); 1554 goto sparam_out; 1555 } 1556 sparam_mb->vport = vport; 1557 sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 1558 rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); 1559 if (rc == MBX_NOT_FINISHED) { 1560 lpfc_mbox_rsrc_cleanup(phba, sparam_mb, 1561 MBOX_THD_UNLOCKED); 1562 goto sparam_out; 1563 } 1564 1565 set_bit(HBA_DEFER_FLOGI, &phba->hba_flag); 1566 } else { 1567 lpfc_initial_flogi(vport); 1568 } 1569 } else { 1570 if (test_bit(FC_PT2PT, &vport->fc_flag)) 1571 lpfc_disc_start(vport); 1572 } 1573 return; 1574 1575 out: 1576 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1577 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n", 1578 status, vport->port_state); 1579 1580 sparam_out: 1581 lpfc_linkdown(phba); 1582 1583 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1584 "0200 CONFIG_LINK bad hba state x%x\n", 1585 vport->port_state); 1586 1587 lpfc_issue_clear_la(phba, vport); 1588 return; 1589 } 1590 1591 /** 1592 * lpfc_sli4_clear_fcf_rr_bmask 1593 * @phba: pointer to the struct lpfc_hba for this port. 1594 * This fucnction resets the round robin bit mask and clears the 1595 * fcf priority list. The list deletions are done while holding the 1596 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared 1597 * from the lpfc_fcf_pri record. 1598 **/ 1599 void 1600 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) 1601 { 1602 struct lpfc_fcf_pri *fcf_pri; 1603 struct lpfc_fcf_pri *next_fcf_pri; 1604 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 1605 spin_lock_irq(&phba->hbalock); 1606 list_for_each_entry_safe(fcf_pri, next_fcf_pri, 1607 &phba->fcf.fcf_pri_list, list) { 1608 list_del_init(&fcf_pri->list); 1609 fcf_pri->fcf_rec.flag = 0; 1610 } 1611 spin_unlock_irq(&phba->hbalock); 1612 } 1613 static void 1614 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1615 { 1616 struct lpfc_vport *vport = mboxq->vport; 1617 1618 if (mboxq->u.mb.mbxStatus) { 1619 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 1620 "2017 REG_FCFI mbxStatus error x%x " 1621 "HBA state x%x\n", mboxq->u.mb.mbxStatus, 1622 vport->port_state); 1623 goto fail_out; 1624 } 1625 1626 /* Start FCoE discovery by sending a FLOGI. */ 1627 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); 1628 /* Set the FCFI registered flag */ 1629 spin_lock_irq(&phba->hbalock); 1630 phba->fcf.fcf_flag |= FCF_REGISTERED; 1631 spin_unlock_irq(&phba->hbalock); 1632 1633 /* If there is a pending FCoE event, restart FCF table scan. */ 1634 if (!test_bit(FCF_RR_INPROG, &phba->hba_flag) && 1635 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1636 goto fail_out; 1637 1638 /* Mark successful completion of FCF table scan */ 1639 spin_lock_irq(&phba->hbalock); 1640 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1641 spin_unlock_irq(&phba->hbalock); 1642 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1643 if (vport->port_state != LPFC_FLOGI) { 1644 set_bit(FCF_RR_INPROG, &phba->hba_flag); 1645 lpfc_issue_init_vfi(vport); 1646 } 1647 goto out; 1648 1649 fail_out: 1650 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1651 out: 1652 mempool_free(mboxq, phba->mbox_mem_pool); 1653 } 1654 1655 /** 1656 * lpfc_fab_name_match - Check if the fcf fabric name match. 1657 * @fab_name: pointer to fabric name. 1658 * @new_fcf_record: pointer to fcf record. 1659 * 1660 * This routine compare the fcf record's fabric name with provided 1661 * fabric name. If the fabric name are identical this function 1662 * returns 1 else return 0. 1663 **/ 1664 static uint32_t 1665 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) 1666 { 1667 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) 1668 return 0; 1669 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) 1670 return 0; 1671 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) 1672 return 0; 1673 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) 1674 return 0; 1675 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) 1676 return 0; 1677 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) 1678 return 0; 1679 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) 1680 return 0; 1681 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) 1682 return 0; 1683 return 1; 1684 } 1685 1686 /** 1687 * lpfc_sw_name_match - Check if the fcf switch name match. 1688 * @sw_name: pointer to switch name. 1689 * @new_fcf_record: pointer to fcf record. 1690 * 1691 * This routine compare the fcf record's switch name with provided 1692 * switch name. If the switch name are identical this function 1693 * returns 1 else return 0. 1694 **/ 1695 static uint32_t 1696 lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) 1697 { 1698 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) 1699 return 0; 1700 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) 1701 return 0; 1702 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) 1703 return 0; 1704 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) 1705 return 0; 1706 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) 1707 return 0; 1708 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) 1709 return 0; 1710 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) 1711 return 0; 1712 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) 1713 return 0; 1714 return 1; 1715 } 1716 1717 /** 1718 * lpfc_mac_addr_match - Check if the fcf mac address match. 1719 * @mac_addr: pointer to mac address. 1720 * @new_fcf_record: pointer to fcf record. 1721 * 1722 * This routine compare the fcf record's mac address with HBA's 1723 * FCF mac address. If the mac addresses are identical this function 1724 * returns 1 else return 0. 1725 **/ 1726 static uint32_t 1727 lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) 1728 { 1729 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) 1730 return 0; 1731 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) 1732 return 0; 1733 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) 1734 return 0; 1735 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) 1736 return 0; 1737 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) 1738 return 0; 1739 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) 1740 return 0; 1741 return 1; 1742 } 1743 1744 static bool 1745 lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) 1746 { 1747 return (curr_vlan_id == new_vlan_id); 1748 } 1749 1750 /** 1751 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. 1752 * @phba: pointer to lpfc hba data structure. 1753 * @fcf_index: Index for the lpfc_fcf_record. 1754 * @new_fcf_record: pointer to hba fcf record. 1755 * 1756 * This routine updates the driver FCF priority record from the new HBA FCF 1757 * record. The hbalock is asserted held in the code path calling this 1758 * routine. 1759 **/ 1760 static void 1761 __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, 1762 struct fcf_record *new_fcf_record 1763 ) 1764 { 1765 struct lpfc_fcf_pri *fcf_pri; 1766 1767 fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1768 fcf_pri->fcf_rec.fcf_index = fcf_index; 1769 /* FCF record priority */ 1770 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 1771 1772 } 1773 1774 /** 1775 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1776 * @fcf_rec: pointer to driver fcf record. 1777 * @new_fcf_record: pointer to fcf record. 1778 * 1779 * This routine copies the FCF information from the FCF 1780 * record to lpfc_hba data structure. 1781 **/ 1782 static void 1783 lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, 1784 struct fcf_record *new_fcf_record) 1785 { 1786 /* Fabric name */ 1787 fcf_rec->fabric_name[0] = 1788 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); 1789 fcf_rec->fabric_name[1] = 1790 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); 1791 fcf_rec->fabric_name[2] = 1792 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); 1793 fcf_rec->fabric_name[3] = 1794 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); 1795 fcf_rec->fabric_name[4] = 1796 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); 1797 fcf_rec->fabric_name[5] = 1798 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); 1799 fcf_rec->fabric_name[6] = 1800 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); 1801 fcf_rec->fabric_name[7] = 1802 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); 1803 /* Mac address */ 1804 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); 1805 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); 1806 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); 1807 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); 1808 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); 1809 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1810 /* FCF record index */ 1811 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1812 /* FCF record priority */ 1813 fcf_rec->priority = new_fcf_record->fip_priority; 1814 /* Switch name */ 1815 fcf_rec->switch_name[0] = 1816 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); 1817 fcf_rec->switch_name[1] = 1818 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); 1819 fcf_rec->switch_name[2] = 1820 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); 1821 fcf_rec->switch_name[3] = 1822 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); 1823 fcf_rec->switch_name[4] = 1824 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); 1825 fcf_rec->switch_name[5] = 1826 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); 1827 fcf_rec->switch_name[6] = 1828 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); 1829 fcf_rec->switch_name[7] = 1830 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); 1831 } 1832 1833 /** 1834 * __lpfc_update_fcf_record - Update driver fcf record 1835 * @phba: pointer to lpfc hba data structure. 1836 * @fcf_rec: pointer to driver fcf record. 1837 * @new_fcf_record: pointer to hba fcf record. 1838 * @addr_mode: address mode to be set to the driver fcf record. 1839 * @vlan_id: vlan tag to be set to the driver fcf record. 1840 * @flag: flag bits to be set to the driver fcf record. 1841 * 1842 * This routine updates the driver FCF record from the new HBA FCF record 1843 * together with the address mode, vlan_id, and other informations. This 1844 * routine is called with the hbalock held. 1845 **/ 1846 static void 1847 __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, 1848 struct fcf_record *new_fcf_record, uint32_t addr_mode, 1849 uint16_t vlan_id, uint32_t flag) 1850 { 1851 lockdep_assert_held(&phba->hbalock); 1852 1853 /* Copy the fields from the HBA's FCF record */ 1854 lpfc_copy_fcf_record(fcf_rec, new_fcf_record); 1855 /* Update other fields of driver FCF record */ 1856 fcf_rec->addr_mode = addr_mode; 1857 fcf_rec->vlan_id = vlan_id; 1858 fcf_rec->flag |= (flag | RECORD_VALID); 1859 __lpfc_update_fcf_record_pri(phba, 1860 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), 1861 new_fcf_record); 1862 } 1863 1864 /** 1865 * lpfc_register_fcf - Register the FCF with hba. 1866 * @phba: pointer to lpfc hba data structure. 1867 * 1868 * This routine issues a register fcfi mailbox command to register 1869 * the fcf with HBA. 1870 **/ 1871 static void 1872 lpfc_register_fcf(struct lpfc_hba *phba) 1873 { 1874 LPFC_MBOXQ_t *fcf_mbxq; 1875 int rc; 1876 1877 spin_lock_irq(&phba->hbalock); 1878 /* If the FCF is not available do nothing. */ 1879 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1880 spin_unlock_irq(&phba->hbalock); 1881 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1882 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1883 return; 1884 } 1885 1886 /* The FCF is already registered, start discovery */ 1887 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1888 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1889 spin_unlock_irq(&phba->hbalock); 1890 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1891 if (phba->pport->port_state != LPFC_FLOGI && 1892 test_bit(FC_FABRIC, &phba->pport->fc_flag)) { 1893 set_bit(FCF_RR_INPROG, &phba->hba_flag); 1894 lpfc_initial_flogi(phba->pport); 1895 return; 1896 } 1897 return; 1898 } 1899 spin_unlock_irq(&phba->hbalock); 1900 1901 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1902 if (!fcf_mbxq) { 1903 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1904 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1905 return; 1906 } 1907 1908 lpfc_reg_fcfi(phba, fcf_mbxq); 1909 fcf_mbxq->vport = phba->pport; 1910 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1911 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1912 if (rc == MBX_NOT_FINISHED) { 1913 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 1914 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 1915 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1916 } 1917 1918 return; 1919 } 1920 1921 /** 1922 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. 1923 * @phba: pointer to lpfc hba data structure. 1924 * @new_fcf_record: pointer to fcf record. 1925 * @boot_flag: Indicates if this record used by boot bios. 1926 * @addr_mode: The address mode to be used by this FCF 1927 * @vlan_id: The vlan id to be used as vlan tagging by this FCF. 1928 * 1929 * This routine compare the fcf record with connect list obtained from the 1930 * config region to decide if this FCF can be used for SAN discovery. It returns 1931 * 1 if this record can be used for SAN discovery else return zero. If this FCF 1932 * record can be used for SAN discovery, the boot_flag will indicate if this FCF 1933 * is used by boot bios and addr_mode will indicate the addressing mode to be 1934 * used for this FCF when the function returns. 1935 * If the FCF record need to be used with a particular vlan id, the vlan is 1936 * set in the vlan_id on return of the function. If not VLAN tagging need to 1937 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; 1938 **/ 1939 static int 1940 lpfc_match_fcf_conn_list(struct lpfc_hba *phba, 1941 struct fcf_record *new_fcf_record, 1942 uint32_t *boot_flag, uint32_t *addr_mode, 1943 uint16_t *vlan_id) 1944 { 1945 struct lpfc_fcf_conn_entry *conn_entry; 1946 int i, j, fcf_vlan_id = 0; 1947 1948 /* Find the lowest VLAN id in the FCF record */ 1949 for (i = 0; i < 512; i++) { 1950 if (new_fcf_record->vlan_bitmap[i]) { 1951 fcf_vlan_id = i * 8; 1952 j = 0; 1953 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { 1954 j++; 1955 fcf_vlan_id++; 1956 } 1957 break; 1958 } 1959 } 1960 1961 /* FCF not valid/available or solicitation in progress */ 1962 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1963 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) || 1964 bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record)) 1965 return 0; 1966 1967 if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) { 1968 *boot_flag = 0; 1969 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1970 new_fcf_record); 1971 if (phba->valid_vlan) 1972 *vlan_id = phba->vlan_id; 1973 else 1974 *vlan_id = LPFC_FCOE_NULL_VID; 1975 return 1; 1976 } 1977 1978 /* 1979 * If there are no FCF connection table entry, driver connect to all 1980 * FCFs. 1981 */ 1982 if (list_empty(&phba->fcf_conn_rec_list)) { 1983 *boot_flag = 0; 1984 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1985 new_fcf_record); 1986 1987 /* 1988 * When there are no FCF connect entries, use driver's default 1989 * addressing mode - FPMA. 1990 */ 1991 if (*addr_mode & LPFC_FCF_FPMA) 1992 *addr_mode = LPFC_FCF_FPMA; 1993 1994 /* If FCF record report a vlan id use that vlan id */ 1995 if (fcf_vlan_id) 1996 *vlan_id = fcf_vlan_id; 1997 else 1998 *vlan_id = LPFC_FCOE_NULL_VID; 1999 return 1; 2000 } 2001 2002 list_for_each_entry(conn_entry, 2003 &phba->fcf_conn_rec_list, list) { 2004 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) 2005 continue; 2006 2007 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && 2008 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, 2009 new_fcf_record)) 2010 continue; 2011 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && 2012 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, 2013 new_fcf_record)) 2014 continue; 2015 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { 2016 /* 2017 * If the vlan bit map does not have the bit set for the 2018 * vlan id to be used, then it is not a match. 2019 */ 2020 if (!(new_fcf_record->vlan_bitmap 2021 [conn_entry->conn_rec.vlan_tag / 8] & 2022 (1 << (conn_entry->conn_rec.vlan_tag % 8)))) 2023 continue; 2024 } 2025 2026 /* 2027 * If connection record does not support any addressing mode, 2028 * skip the FCF record. 2029 */ 2030 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) 2031 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) 2032 continue; 2033 2034 /* 2035 * Check if the connection record specifies a required 2036 * addressing mode. 2037 */ 2038 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 2039 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { 2040 2041 /* 2042 * If SPMA required but FCF not support this continue. 2043 */ 2044 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 2045 !(bf_get(lpfc_fcf_record_mac_addr_prov, 2046 new_fcf_record) & LPFC_FCF_SPMA)) 2047 continue; 2048 2049 /* 2050 * If FPMA required but FCF not support this continue. 2051 */ 2052 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 2053 !(bf_get(lpfc_fcf_record_mac_addr_prov, 2054 new_fcf_record) & LPFC_FCF_FPMA)) 2055 continue; 2056 } 2057 2058 /* 2059 * This fcf record matches filtering criteria. 2060 */ 2061 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) 2062 *boot_flag = 1; 2063 else 2064 *boot_flag = 0; 2065 2066 /* 2067 * If user did not specify any addressing mode, or if the 2068 * preferred addressing mode specified by user is not supported 2069 * by FCF, allow fabric to pick the addressing mode. 2070 */ 2071 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 2072 new_fcf_record); 2073 /* 2074 * If the user specified a required address mode, assign that 2075 * address mode 2076 */ 2077 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 2078 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) 2079 *addr_mode = (conn_entry->conn_rec.flags & 2080 FCFCNCT_AM_SPMA) ? 2081 LPFC_FCF_SPMA : LPFC_FCF_FPMA; 2082 /* 2083 * If the user specified a preferred address mode, use the 2084 * addr mode only if FCF support the addr_mode. 2085 */ 2086 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 2087 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 2088 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 2089 (*addr_mode & LPFC_FCF_SPMA)) 2090 *addr_mode = LPFC_FCF_SPMA; 2091 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 2092 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 2093 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 2094 (*addr_mode & LPFC_FCF_FPMA)) 2095 *addr_mode = LPFC_FCF_FPMA; 2096 2097 /* If matching connect list has a vlan id, use it */ 2098 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 2099 *vlan_id = conn_entry->conn_rec.vlan_tag; 2100 /* 2101 * If no vlan id is specified in connect list, use the vlan id 2102 * in the FCF record 2103 */ 2104 else if (fcf_vlan_id) 2105 *vlan_id = fcf_vlan_id; 2106 else 2107 *vlan_id = LPFC_FCOE_NULL_VID; 2108 2109 return 1; 2110 } 2111 2112 return 0; 2113 } 2114 2115 /** 2116 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. 2117 * @phba: pointer to lpfc hba data structure. 2118 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. 2119 * 2120 * This function check if there is any fcoe event pending while driver 2121 * scan FCF entries. If there is any pending event, it will restart the 2122 * FCF saning and return 1 else return 0. 2123 */ 2124 int 2125 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 2126 { 2127 /* 2128 * If the Link is up and no FCoE events while in the 2129 * FCF discovery, no need to restart FCF discovery. 2130 */ 2131 if ((phba->link_state >= LPFC_LINK_UP) && 2132 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 2133 return 0; 2134 2135 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2136 "2768 Pending link or FCF event during current " 2137 "handling of the previous event: link_state:x%x, " 2138 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", 2139 phba->link_state, phba->fcoe_eventtag_at_fcf_scan, 2140 phba->fcoe_eventtag); 2141 2142 spin_lock_irq(&phba->hbalock); 2143 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 2144 spin_unlock_irq(&phba->hbalock); 2145 2146 if (phba->link_state >= LPFC_LINK_UP) { 2147 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 2148 "2780 Restart FCF table scan due to " 2149 "pending FCF event:evt_tag_at_scan:x%x, " 2150 "evt_tag_current:x%x\n", 2151 phba->fcoe_eventtag_at_fcf_scan, 2152 phba->fcoe_eventtag); 2153 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 2154 } else { 2155 /* 2156 * Do not continue FCF discovery and clear FCF_TS_INPROG 2157 * flag 2158 */ 2159 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 2160 "2833 Stop FCF discovery process due to link " 2161 "state change (x%x)\n", phba->link_state); 2162 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 2163 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 2164 spin_lock_irq(&phba->hbalock); 2165 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 2166 spin_unlock_irq(&phba->hbalock); 2167 } 2168 2169 /* Unregister the currently registered FCF if required */ 2170 if (unreg_fcf) { 2171 spin_lock_irq(&phba->hbalock); 2172 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 2173 spin_unlock_irq(&phba->hbalock); 2174 lpfc_sli4_unregister_fcf(phba); 2175 } 2176 return 1; 2177 } 2178 2179 /** 2180 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record 2181 * @phba: pointer to lpfc hba data structure. 2182 * @fcf_cnt: number of eligible fcf record seen so far. 2183 * 2184 * This function makes an running random selection decision on FCF record to 2185 * use through a sequence of @fcf_cnt eligible FCF records with equal 2186 * probability. To perform integer manunipulation of random numbers with 2187 * size unit32_t, a 16-bit random number returned from get_random_u16() is 2188 * taken as the random random number generated. 2189 * 2190 * Returns true when outcome is for the newly read FCF record should be 2191 * chosen; otherwise, return false when outcome is for keeping the previously 2192 * chosen FCF record. 2193 **/ 2194 static bool 2195 lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) 2196 { 2197 uint32_t rand_num; 2198 2199 /* Get 16-bit uniform random number */ 2200 rand_num = get_random_u16(); 2201 2202 /* Decision with probability 1/fcf_cnt */ 2203 if ((fcf_cnt * rand_num) < 0xFFFF) 2204 return true; 2205 else 2206 return false; 2207 } 2208 2209 /** 2210 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. 2211 * @phba: pointer to lpfc hba data structure. 2212 * @mboxq: pointer to mailbox object. 2213 * @next_fcf_index: pointer to holder of next fcf index. 2214 * 2215 * This routine parses the non-embedded fcf mailbox command by performing the 2216 * necessarily error checking, non-embedded read FCF record mailbox command 2217 * SGE parsing, and endianness swapping. 2218 * 2219 * Returns the pointer to the new FCF record in the non-embedded mailbox 2220 * command DMA memory if successfully, other NULL. 2221 */ 2222 static struct fcf_record * 2223 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 2224 uint16_t *next_fcf_index) 2225 { 2226 void *virt_addr; 2227 struct lpfc_mbx_sge sge; 2228 struct lpfc_mbx_read_fcf_tbl *read_fcf; 2229 uint32_t shdr_status, shdr_add_status, if_type; 2230 union lpfc_sli4_cfg_shdr *shdr; 2231 struct fcf_record *new_fcf_record; 2232 2233 /* Get the first SGE entry from the non-embedded DMA memory. This 2234 * routine only uses a single SGE. 2235 */ 2236 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 2237 if (unlikely(!mboxq->sge_array)) { 2238 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2239 "2524 Failed to get the non-embedded SGE " 2240 "virtual address\n"); 2241 return NULL; 2242 } 2243 virt_addr = mboxq->sge_array->addr[0]; 2244 2245 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 2246 lpfc_sli_pcimem_bcopy(shdr, shdr, 2247 sizeof(union lpfc_sli4_cfg_shdr)); 2248 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 2249 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 2250 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 2251 if (shdr_status || shdr_add_status) { 2252 if (shdr_status == STATUS_FCF_TABLE_EMPTY || 2253 if_type == LPFC_SLI_INTF_IF_TYPE_2) 2254 lpfc_printf_log(phba, KERN_ERR, 2255 LOG_TRACE_EVENT, 2256 "2726 READ_FCF_RECORD Indicates empty " 2257 "FCF table.\n"); 2258 else 2259 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2260 "2521 READ_FCF_RECORD mailbox failed " 2261 "with status x%x add_status x%x, " 2262 "mbx\n", shdr_status, shdr_add_status); 2263 return NULL; 2264 } 2265 2266 /* Interpreting the returned information of the FCF record */ 2267 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 2268 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 2269 sizeof(struct lpfc_mbx_read_fcf_tbl)); 2270 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 2271 new_fcf_record = (struct fcf_record *)(virt_addr + 2272 sizeof(struct lpfc_mbx_read_fcf_tbl)); 2273 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 2274 offsetof(struct fcf_record, vlan_bitmap)); 2275 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); 2276 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); 2277 2278 return new_fcf_record; 2279 } 2280 2281 /** 2282 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record 2283 * @phba: pointer to lpfc hba data structure. 2284 * @fcf_record: pointer to the fcf record. 2285 * @vlan_id: the lowest vlan identifier associated to this fcf record. 2286 * @next_fcf_index: the index to the next fcf record in hba's fcf table. 2287 * 2288 * This routine logs the detailed FCF record if the LOG_FIP loggin is 2289 * enabled. 2290 **/ 2291 static void 2292 lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, 2293 struct fcf_record *fcf_record, 2294 uint16_t vlan_id, 2295 uint16_t next_fcf_index) 2296 { 2297 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2298 "2764 READ_FCF_RECORD:\n" 2299 "\tFCF_Index : x%x\n" 2300 "\tFCF_Avail : x%x\n" 2301 "\tFCF_Valid : x%x\n" 2302 "\tFCF_SOL : x%x\n" 2303 "\tFIP_Priority : x%x\n" 2304 "\tMAC_Provider : x%x\n" 2305 "\tLowest VLANID : x%x\n" 2306 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" 2307 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 2308 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 2309 "\tNext_FCF_Index: x%x\n", 2310 bf_get(lpfc_fcf_record_fcf_index, fcf_record), 2311 bf_get(lpfc_fcf_record_fcf_avail, fcf_record), 2312 bf_get(lpfc_fcf_record_fcf_valid, fcf_record), 2313 bf_get(lpfc_fcf_record_fcf_sol, fcf_record), 2314 fcf_record->fip_priority, 2315 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), 2316 vlan_id, 2317 bf_get(lpfc_fcf_record_mac_0, fcf_record), 2318 bf_get(lpfc_fcf_record_mac_1, fcf_record), 2319 bf_get(lpfc_fcf_record_mac_2, fcf_record), 2320 bf_get(lpfc_fcf_record_mac_3, fcf_record), 2321 bf_get(lpfc_fcf_record_mac_4, fcf_record), 2322 bf_get(lpfc_fcf_record_mac_5, fcf_record), 2323 bf_get(lpfc_fcf_record_fab_name_0, fcf_record), 2324 bf_get(lpfc_fcf_record_fab_name_1, fcf_record), 2325 bf_get(lpfc_fcf_record_fab_name_2, fcf_record), 2326 bf_get(lpfc_fcf_record_fab_name_3, fcf_record), 2327 bf_get(lpfc_fcf_record_fab_name_4, fcf_record), 2328 bf_get(lpfc_fcf_record_fab_name_5, fcf_record), 2329 bf_get(lpfc_fcf_record_fab_name_6, fcf_record), 2330 bf_get(lpfc_fcf_record_fab_name_7, fcf_record), 2331 bf_get(lpfc_fcf_record_switch_name_0, fcf_record), 2332 bf_get(lpfc_fcf_record_switch_name_1, fcf_record), 2333 bf_get(lpfc_fcf_record_switch_name_2, fcf_record), 2334 bf_get(lpfc_fcf_record_switch_name_3, fcf_record), 2335 bf_get(lpfc_fcf_record_switch_name_4, fcf_record), 2336 bf_get(lpfc_fcf_record_switch_name_5, fcf_record), 2337 bf_get(lpfc_fcf_record_switch_name_6, fcf_record), 2338 bf_get(lpfc_fcf_record_switch_name_7, fcf_record), 2339 next_fcf_index); 2340 } 2341 2342 /** 2343 * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF 2344 * @phba: pointer to lpfc hba data structure. 2345 * @fcf_rec: pointer to an existing FCF record. 2346 * @new_fcf_record: pointer to a new FCF record. 2347 * @new_vlan_id: vlan id from the new FCF record. 2348 * 2349 * This function performs matching test of a new FCF record against an existing 2350 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id 2351 * will not be used as part of the FCF record matching criteria. 2352 * 2353 * Returns true if all the fields matching, otherwise returns false. 2354 */ 2355 static bool 2356 lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, 2357 struct lpfc_fcf_rec *fcf_rec, 2358 struct fcf_record *new_fcf_record, 2359 uint16_t new_vlan_id) 2360 { 2361 if (new_vlan_id != LPFC_FCOE_IGNORE_VID) 2362 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id)) 2363 return false; 2364 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record)) 2365 return false; 2366 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record)) 2367 return false; 2368 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) 2369 return false; 2370 if (fcf_rec->priority != new_fcf_record->fip_priority) 2371 return false; 2372 return true; 2373 } 2374 2375 /** 2376 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf 2377 * @vport: Pointer to vport object. 2378 * @fcf_index: index to next fcf. 2379 * 2380 * This function processing the roundrobin fcf failover to next fcf index. 2381 * When this function is invoked, there will be a current fcf registered 2382 * for flogi. 2383 * Return: 0 for continue retrying flogi on currently registered fcf; 2384 * 1 for stop flogi on currently registered fcf; 2385 */ 2386 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) 2387 { 2388 struct lpfc_hba *phba = vport->phba; 2389 int rc; 2390 2391 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 2392 if (test_bit(HBA_DEVLOSS_TMO, &phba->hba_flag)) { 2393 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2394 "2872 Devloss tmo with no eligible " 2395 "FCF, unregister in-use FCF (x%x) " 2396 "and rescan FCF table\n", 2397 phba->fcf.current_rec.fcf_indx); 2398 lpfc_unregister_fcf_rescan(phba); 2399 goto stop_flogi_current_fcf; 2400 } 2401 /* Mark the end to FLOGI roundrobin failover */ 2402 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 2403 /* Allow action to new fcf asynchronous event */ 2404 spin_lock_irq(&phba->hbalock); 2405 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 2406 spin_unlock_irq(&phba->hbalock); 2407 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2408 "2865 No FCF available, stop roundrobin FCF " 2409 "failover and change port state:x%x/x%x\n", 2410 phba->pport->port_state, LPFC_VPORT_UNKNOWN); 2411 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2412 2413 if (!phba->fcf.fcf_redisc_attempted) { 2414 lpfc_unregister_fcf(phba); 2415 2416 rc = lpfc_sli4_redisc_fcf_table(phba); 2417 if (!rc) { 2418 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2419 "3195 Rediscover FCF table\n"); 2420 phba->fcf.fcf_redisc_attempted = 1; 2421 lpfc_sli4_clear_fcf_rr_bmask(phba); 2422 } else { 2423 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2424 "3196 Rediscover FCF table " 2425 "failed. Status:x%x\n", rc); 2426 } 2427 } else { 2428 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2429 "3197 Already rediscover FCF table " 2430 "attempted. No more retry\n"); 2431 } 2432 goto stop_flogi_current_fcf; 2433 } else { 2434 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, 2435 "2794 Try FLOGI roundrobin FCF failover to " 2436 "(x%x)\n", fcf_index); 2437 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); 2438 if (rc) 2439 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 2440 "2761 FLOGI roundrobin FCF failover " 2441 "failed (rc:x%x) to read FCF (x%x)\n", 2442 rc, phba->fcf.current_rec.fcf_indx); 2443 else 2444 goto stop_flogi_current_fcf; 2445 } 2446 return 0; 2447 2448 stop_flogi_current_fcf: 2449 lpfc_can_disctmo(vport); 2450 return 1; 2451 } 2452 2453 /** 2454 * lpfc_sli4_fcf_pri_list_del 2455 * @phba: pointer to lpfc hba data structure. 2456 * @fcf_index: the index of the fcf record to delete 2457 * This routine checks the on list flag of the fcf_index to be deleted. 2458 * If it is one the list then it is removed from the list, and the flag 2459 * is cleared. This routine grab the hbalock before removing the fcf 2460 * record from the list. 2461 **/ 2462 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, 2463 uint16_t fcf_index) 2464 { 2465 struct lpfc_fcf_pri *new_fcf_pri; 2466 2467 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2468 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2469 "3058 deleting idx x%x pri x%x flg x%x\n", 2470 fcf_index, new_fcf_pri->fcf_rec.priority, 2471 new_fcf_pri->fcf_rec.flag); 2472 spin_lock_irq(&phba->hbalock); 2473 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { 2474 if (phba->fcf.current_rec.priority == 2475 new_fcf_pri->fcf_rec.priority) 2476 phba->fcf.eligible_fcf_cnt--; 2477 list_del_init(&new_fcf_pri->list); 2478 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; 2479 } 2480 spin_unlock_irq(&phba->hbalock); 2481 } 2482 2483 /** 2484 * lpfc_sli4_set_fcf_flogi_fail 2485 * @phba: pointer to lpfc hba data structure. 2486 * @fcf_index: the index of the fcf record to update 2487 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED 2488 * flag so the round robin selection for the particular priority level 2489 * will try a different fcf record that does not have this bit set. 2490 * If the fcf record is re-read for any reason this flag is cleared brfore 2491 * adding it to the priority list. 2492 **/ 2493 void 2494 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) 2495 { 2496 struct lpfc_fcf_pri *new_fcf_pri; 2497 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2498 spin_lock_irq(&phba->hbalock); 2499 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; 2500 spin_unlock_irq(&phba->hbalock); 2501 } 2502 2503 /** 2504 * lpfc_sli4_fcf_pri_list_add 2505 * @phba: pointer to lpfc hba data structure. 2506 * @fcf_index: the index of the fcf record to add 2507 * @new_fcf_record: pointer to a new FCF record. 2508 * This routine checks the priority of the fcf_index to be added. 2509 * If it is a lower priority than the current head of the fcf_pri list 2510 * then it is added to the list in the right order. 2511 * If it is the same priority as the current head of the list then it 2512 * is added to the head of the list and its bit in the rr_bmask is set. 2513 * If the fcf_index to be added is of a higher priority than the current 2514 * head of the list then the rr_bmask is cleared, its bit is set in the 2515 * rr_bmask and it is added to the head of the list. 2516 * returns: 2517 * 0=success 1=failure 2518 **/ 2519 static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, 2520 uint16_t fcf_index, 2521 struct fcf_record *new_fcf_record) 2522 { 2523 uint16_t current_fcf_pri; 2524 uint16_t last_index; 2525 struct lpfc_fcf_pri *fcf_pri; 2526 struct lpfc_fcf_pri *next_fcf_pri; 2527 struct lpfc_fcf_pri *new_fcf_pri; 2528 int ret; 2529 2530 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2531 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2532 "3059 adding idx x%x pri x%x flg x%x\n", 2533 fcf_index, new_fcf_record->fip_priority, 2534 new_fcf_pri->fcf_rec.flag); 2535 spin_lock_irq(&phba->hbalock); 2536 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) 2537 list_del_init(&new_fcf_pri->list); 2538 new_fcf_pri->fcf_rec.fcf_index = fcf_index; 2539 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 2540 if (list_empty(&phba->fcf.fcf_pri_list)) { 2541 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2542 ret = lpfc_sli4_fcf_rr_index_set(phba, 2543 new_fcf_pri->fcf_rec.fcf_index); 2544 goto out; 2545 } 2546 2547 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 2548 LPFC_SLI4_FCF_TBL_INDX_MAX); 2549 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 2550 ret = 0; /* Empty rr list */ 2551 goto out; 2552 } 2553 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; 2554 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { 2555 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2556 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { 2557 memset(phba->fcf.fcf_rr_bmask, 0, 2558 sizeof(*phba->fcf.fcf_rr_bmask)); 2559 /* fcfs_at_this_priority_level = 1; */ 2560 phba->fcf.eligible_fcf_cnt = 1; 2561 } else 2562 /* fcfs_at_this_priority_level++; */ 2563 phba->fcf.eligible_fcf_cnt++; 2564 ret = lpfc_sli4_fcf_rr_index_set(phba, 2565 new_fcf_pri->fcf_rec.fcf_index); 2566 goto out; 2567 } 2568 2569 list_for_each_entry_safe(fcf_pri, next_fcf_pri, 2570 &phba->fcf.fcf_pri_list, list) { 2571 if (new_fcf_pri->fcf_rec.priority <= 2572 fcf_pri->fcf_rec.priority) { 2573 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) 2574 list_add(&new_fcf_pri->list, 2575 &phba->fcf.fcf_pri_list); 2576 else 2577 list_add(&new_fcf_pri->list, 2578 &((struct lpfc_fcf_pri *) 2579 fcf_pri->list.prev)->list); 2580 ret = 0; 2581 goto out; 2582 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list 2583 || new_fcf_pri->fcf_rec.priority < 2584 next_fcf_pri->fcf_rec.priority) { 2585 list_add(&new_fcf_pri->list, &fcf_pri->list); 2586 ret = 0; 2587 goto out; 2588 } 2589 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) 2590 continue; 2591 2592 } 2593 ret = 1; 2594 out: 2595 /* we use = instead of |= to clear the FLOGI_FAILED flag. */ 2596 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; 2597 spin_unlock_irq(&phba->hbalock); 2598 return ret; 2599 } 2600 2601 /** 2602 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 2603 * @phba: pointer to lpfc hba data structure. 2604 * @mboxq: pointer to mailbox object. 2605 * 2606 * This function iterates through all the fcf records available in 2607 * HBA and chooses the optimal FCF record for discovery. After finding 2608 * the FCF for discovery it registers the FCF record and kicks start 2609 * discovery. 2610 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to 2611 * use an FCF record which matches fabric name and mac address of the 2612 * currently used FCF record. 2613 * If the driver supports only one FCF, it will try to use the FCF record 2614 * used by BOOT_BIOS. 2615 */ 2616 void 2617 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2618 { 2619 struct fcf_record *new_fcf_record; 2620 uint32_t boot_flag, addr_mode; 2621 uint16_t fcf_index, next_fcf_index; 2622 struct lpfc_fcf_rec *fcf_rec = NULL; 2623 uint16_t vlan_id = LPFC_FCOE_NULL_VID; 2624 bool select_new_fcf; 2625 int rc; 2626 2627 /* If there is pending FCoE event restart FCF table scan */ 2628 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { 2629 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2630 return; 2631 } 2632 2633 /* Parse the FCF record from the non-embedded mailbox command */ 2634 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2635 &next_fcf_index); 2636 if (!new_fcf_record) { 2637 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2638 "2765 Mailbox command READ_FCF_RECORD " 2639 "failed to retrieve a FCF record.\n"); 2640 /* Let next new FCF event trigger fast failover */ 2641 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 2642 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2643 return; 2644 } 2645 2646 /* Check the FCF record against the connection list */ 2647 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2648 &addr_mode, &vlan_id); 2649 2650 /* Log the FCF record information if turned on */ 2651 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2652 next_fcf_index); 2653 2654 /* 2655 * If the fcf record does not match with connect list entries 2656 * read the next entry; otherwise, this is an eligible FCF 2657 * record for roundrobin FCF failover. 2658 */ 2659 if (!rc) { 2660 lpfc_sli4_fcf_pri_list_del(phba, 2661 bf_get(lpfc_fcf_record_fcf_index, 2662 new_fcf_record)); 2663 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2664 "2781 FCF (x%x) failed connection " 2665 "list check: (x%x/x%x/%x)\n", 2666 bf_get(lpfc_fcf_record_fcf_index, 2667 new_fcf_record), 2668 bf_get(lpfc_fcf_record_fcf_avail, 2669 new_fcf_record), 2670 bf_get(lpfc_fcf_record_fcf_valid, 2671 new_fcf_record), 2672 bf_get(lpfc_fcf_record_fcf_sol, 2673 new_fcf_record)); 2674 if ((phba->fcf.fcf_flag & FCF_IN_USE) && 2675 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2676 new_fcf_record, LPFC_FCOE_IGNORE_VID)) { 2677 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != 2678 phba->fcf.current_rec.fcf_indx) { 2679 lpfc_printf_log(phba, KERN_ERR, 2680 LOG_TRACE_EVENT, 2681 "2862 FCF (x%x) matches property " 2682 "of in-use FCF (x%x)\n", 2683 bf_get(lpfc_fcf_record_fcf_index, 2684 new_fcf_record), 2685 phba->fcf.current_rec.fcf_indx); 2686 goto read_next_fcf; 2687 } 2688 /* 2689 * In case the current in-use FCF record becomes 2690 * invalid/unavailable during FCF discovery that 2691 * was not triggered by fast FCF failover process, 2692 * treat it as fast FCF failover. 2693 */ 2694 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && 2695 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 2696 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2697 "2835 Invalid in-use FCF " 2698 "(x%x), enter FCF failover " 2699 "table scan.\n", 2700 phba->fcf.current_rec.fcf_indx); 2701 spin_lock_irq(&phba->hbalock); 2702 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2703 spin_unlock_irq(&phba->hbalock); 2704 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2705 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2706 LPFC_FCOE_FCF_GET_FIRST); 2707 return; 2708 } 2709 } 2710 goto read_next_fcf; 2711 } else { 2712 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2713 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, 2714 new_fcf_record); 2715 if (rc) 2716 goto read_next_fcf; 2717 } 2718 2719 /* 2720 * If this is not the first FCF discovery of the HBA, use last 2721 * FCF record for the discovery. The condition that a rescan 2722 * matches the in-use FCF record: fabric name, switch name, mac 2723 * address, and vlan_id. 2724 */ 2725 spin_lock_irq(&phba->hbalock); 2726 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2727 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2728 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2729 new_fcf_record, vlan_id)) { 2730 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 2731 phba->fcf.current_rec.fcf_indx) { 2732 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2733 if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 2734 /* Stop FCF redisc wait timer */ 2735 __lpfc_sli4_stop_fcf_redisc_wait_timer( 2736 phba); 2737 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2738 /* Fast failover, mark completed */ 2739 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2740 spin_unlock_irq(&phba->hbalock); 2741 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2742 "2836 New FCF matches in-use " 2743 "FCF (x%x), port_state:x%x, " 2744 "fc_flag:x%lx\n", 2745 phba->fcf.current_rec.fcf_indx, 2746 phba->pport->port_state, 2747 phba->pport->fc_flag); 2748 goto out; 2749 } else 2750 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 2751 "2863 New FCF (x%x) matches " 2752 "property of in-use FCF (x%x)\n", 2753 bf_get(lpfc_fcf_record_fcf_index, 2754 new_fcf_record), 2755 phba->fcf.current_rec.fcf_indx); 2756 } 2757 /* 2758 * Read next FCF record from HBA searching for the matching 2759 * with in-use record only if not during the fast failover 2760 * period. In case of fast failover period, it shall try to 2761 * determine whether the FCF record just read should be the 2762 * next candidate. 2763 */ 2764 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 2765 spin_unlock_irq(&phba->hbalock); 2766 goto read_next_fcf; 2767 } 2768 } 2769 /* 2770 * Update on failover FCF record only if it's in FCF fast-failover 2771 * period; otherwise, update on current FCF record. 2772 */ 2773 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2774 fcf_rec = &phba->fcf.failover_rec; 2775 else 2776 fcf_rec = &phba->fcf.current_rec; 2777 2778 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 2779 /* 2780 * If the driver FCF record does not have boot flag 2781 * set and new hba fcf record has boot flag set, use 2782 * the new hba fcf record. 2783 */ 2784 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { 2785 /* Choose this FCF record */ 2786 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2787 "2837 Update current FCF record " 2788 "(x%x) with new FCF record (x%x)\n", 2789 fcf_rec->fcf_indx, 2790 bf_get(lpfc_fcf_record_fcf_index, 2791 new_fcf_record)); 2792 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2793 addr_mode, vlan_id, BOOT_ENABLE); 2794 spin_unlock_irq(&phba->hbalock); 2795 goto read_next_fcf; 2796 } 2797 /* 2798 * If the driver FCF record has boot flag set and the 2799 * new hba FCF record does not have boot flag, read 2800 * the next FCF record. 2801 */ 2802 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { 2803 spin_unlock_irq(&phba->hbalock); 2804 goto read_next_fcf; 2805 } 2806 /* 2807 * If the new hba FCF record has lower priority value 2808 * than the driver FCF record, use the new record. 2809 */ 2810 if (new_fcf_record->fip_priority < fcf_rec->priority) { 2811 /* Choose the new FCF record with lower priority */ 2812 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2813 "2838 Update current FCF record " 2814 "(x%x) with new FCF record (x%x)\n", 2815 fcf_rec->fcf_indx, 2816 bf_get(lpfc_fcf_record_fcf_index, 2817 new_fcf_record)); 2818 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2819 addr_mode, vlan_id, 0); 2820 /* Reset running random FCF selection count */ 2821 phba->fcf.eligible_fcf_cnt = 1; 2822 } else if (new_fcf_record->fip_priority == fcf_rec->priority) { 2823 /* Update running random FCF selection count */ 2824 phba->fcf.eligible_fcf_cnt++; 2825 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, 2826 phba->fcf.eligible_fcf_cnt); 2827 if (select_new_fcf) { 2828 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2829 "2839 Update current FCF record " 2830 "(x%x) with new FCF record (x%x)\n", 2831 fcf_rec->fcf_indx, 2832 bf_get(lpfc_fcf_record_fcf_index, 2833 new_fcf_record)); 2834 /* Choose the new FCF by random selection */ 2835 __lpfc_update_fcf_record(phba, fcf_rec, 2836 new_fcf_record, 2837 addr_mode, vlan_id, 0); 2838 } 2839 } 2840 spin_unlock_irq(&phba->hbalock); 2841 goto read_next_fcf; 2842 } 2843 /* 2844 * This is the first suitable FCF record, choose this record for 2845 * initial best-fit FCF. 2846 */ 2847 if (fcf_rec) { 2848 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2849 "2840 Update initial FCF candidate " 2850 "with FCF (x%x)\n", 2851 bf_get(lpfc_fcf_record_fcf_index, 2852 new_fcf_record)); 2853 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2854 addr_mode, vlan_id, (boot_flag ? 2855 BOOT_ENABLE : 0)); 2856 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2857 /* Setup initial running random FCF selection count */ 2858 phba->fcf.eligible_fcf_cnt = 1; 2859 } 2860 spin_unlock_irq(&phba->hbalock); 2861 goto read_next_fcf; 2862 2863 read_next_fcf: 2864 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2865 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { 2866 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { 2867 /* 2868 * Case of FCF fast failover scan 2869 */ 2870 2871 /* 2872 * It has not found any suitable FCF record, cancel 2873 * FCF scan inprogress, and do nothing 2874 */ 2875 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2876 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2877 "2782 No suitable FCF found: " 2878 "(x%x/x%x)\n", 2879 phba->fcoe_eventtag_at_fcf_scan, 2880 bf_get(lpfc_fcf_record_fcf_index, 2881 new_fcf_record)); 2882 if (test_bit(HBA_DEVLOSS_TMO, 2883 &phba->hba_flag)) { 2884 clear_bit(FCF_TS_INPROG, 2885 &phba->hba_flag); 2886 /* Unregister in-use FCF and rescan */ 2887 lpfc_printf_log(phba, KERN_INFO, 2888 LOG_FIP, 2889 "2864 On devloss tmo " 2890 "unreg in-use FCF and " 2891 "rescan FCF table\n"); 2892 lpfc_unregister_fcf_rescan(phba); 2893 return; 2894 } 2895 /* 2896 * Let next new FCF event trigger fast failover 2897 */ 2898 clear_bit(FCF_TS_INPROG, &phba->hba_flag); 2899 return; 2900 } 2901 /* 2902 * It has found a suitable FCF record that is not 2903 * the same as in-use FCF record, unregister the 2904 * in-use FCF record, replace the in-use FCF record 2905 * with the new FCF record, mark FCF fast failover 2906 * completed, and then start register the new FCF 2907 * record. 2908 */ 2909 2910 /* Unregister the current in-use FCF record */ 2911 lpfc_unregister_fcf(phba); 2912 2913 /* Replace in-use record with the new record */ 2914 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2915 "2842 Replace in-use FCF (x%x) " 2916 "with failover FCF (x%x)\n", 2917 phba->fcf.current_rec.fcf_indx, 2918 phba->fcf.failover_rec.fcf_indx); 2919 memcpy(&phba->fcf.current_rec, 2920 &phba->fcf.failover_rec, 2921 sizeof(struct lpfc_fcf_rec)); 2922 /* 2923 * Mark the fast FCF failover rediscovery completed 2924 * and the start of the first round of the roundrobin 2925 * FCF failover. 2926 */ 2927 spin_lock_irq(&phba->hbalock); 2928 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2929 spin_unlock_irq(&phba->hbalock); 2930 /* Register to the new FCF record */ 2931 lpfc_register_fcf(phba); 2932 } else { 2933 /* 2934 * In case of transaction period to fast FCF failover, 2935 * do nothing when search to the end of the FCF table. 2936 */ 2937 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || 2938 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2939 return; 2940 2941 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2942 phba->fcf.fcf_flag & FCF_IN_USE) { 2943 /* 2944 * In case the current in-use FCF record no 2945 * longer existed during FCF discovery that 2946 * was not triggered by fast FCF failover 2947 * process, treat it as fast FCF failover. 2948 */ 2949 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2950 "2841 In-use FCF record (x%x) " 2951 "not reported, entering fast " 2952 "FCF failover mode scanning.\n", 2953 phba->fcf.current_rec.fcf_indx); 2954 spin_lock_irq(&phba->hbalock); 2955 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2956 spin_unlock_irq(&phba->hbalock); 2957 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2958 LPFC_FCOE_FCF_GET_FIRST); 2959 return; 2960 } 2961 /* Register to the new FCF record */ 2962 lpfc_register_fcf(phba); 2963 } 2964 } else 2965 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); 2966 return; 2967 2968 out: 2969 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2970 lpfc_register_fcf(phba); 2971 2972 return; 2973 } 2974 2975 /** 2976 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler 2977 * @phba: pointer to lpfc hba data structure. 2978 * @mboxq: pointer to mailbox object. 2979 * 2980 * This is the callback function for FLOGI failure roundrobin FCF failover 2981 * read FCF record mailbox command from the eligible FCF record bmask for 2982 * performing the failover. If the FCF read back is not valid/available, it 2983 * fails through to retrying FLOGI to the currently registered FCF again. 2984 * Otherwise, if the FCF read back is valid and available, it will set the 2985 * newly read FCF record to the failover FCF record, unregister currently 2986 * registered FCF record, copy the failover FCF record to the current 2987 * FCF record, and then register the current FCF record before proceeding 2988 * to trying FLOGI on the new failover FCF. 2989 */ 2990 void 2991 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2992 { 2993 struct fcf_record *new_fcf_record; 2994 uint32_t boot_flag, addr_mode; 2995 uint16_t next_fcf_index, fcf_index; 2996 uint16_t current_fcf_index; 2997 uint16_t vlan_id = LPFC_FCOE_NULL_VID; 2998 int rc; 2999 3000 /* If link state is not up, stop the roundrobin failover process */ 3001 if (phba->link_state < LPFC_LINK_UP) { 3002 spin_lock_irq(&phba->hbalock); 3003 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 3004 spin_unlock_irq(&phba->hbalock); 3005 clear_bit(FCF_RR_INPROG, &phba->hba_flag); 3006 goto out; 3007 } 3008 3009 /* Parse the FCF record from the non-embedded mailbox command */ 3010 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 3011 &next_fcf_index); 3012 if (!new_fcf_record) { 3013 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 3014 "2766 Mailbox command READ_FCF_RECORD " 3015 "failed to retrieve a FCF record. " 3016 "hba_flg x%lx fcf_flg x%x\n", phba->hba_flag, 3017 phba->fcf.fcf_flag); 3018 lpfc_unregister_fcf_rescan(phba); 3019 goto out; 3020 } 3021 3022 /* Get the needed parameters from FCF record */ 3023 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 3024 &addr_mode, &vlan_id); 3025 3026 /* Log the FCF record information if turned on */ 3027 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 3028 next_fcf_index); 3029 3030 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 3031 if (!rc) { 3032 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3033 "2848 Remove ineligible FCF (x%x) from " 3034 "from roundrobin bmask\n", fcf_index); 3035 /* Clear roundrobin bmask bit for ineligible FCF */ 3036 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); 3037 /* Perform next round of roundrobin FCF failover */ 3038 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 3039 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); 3040 if (rc) 3041 goto out; 3042 goto error_out; 3043 } 3044 3045 if (fcf_index == phba->fcf.current_rec.fcf_indx) { 3046 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3047 "2760 Perform FLOGI roundrobin FCF failover: " 3048 "FCF (x%x) back to FCF (x%x)\n", 3049 phba->fcf.current_rec.fcf_indx, fcf_index); 3050 /* Wait 500 ms before retrying FLOGI to current FCF */ 3051 msleep(500); 3052 lpfc_issue_init_vfi(phba->pport); 3053 goto out; 3054 } 3055 3056 /* Upload new FCF record to the failover FCF record */ 3057 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3058 "2834 Update current FCF (x%x) with new FCF (x%x)\n", 3059 phba->fcf.failover_rec.fcf_indx, fcf_index); 3060 spin_lock_irq(&phba->hbalock); 3061 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 3062 new_fcf_record, addr_mode, vlan_id, 3063 (boot_flag ? BOOT_ENABLE : 0)); 3064 spin_unlock_irq(&phba->hbalock); 3065 3066 current_fcf_index = phba->fcf.current_rec.fcf_indx; 3067 3068 /* Unregister the current in-use FCF record */ 3069 lpfc_unregister_fcf(phba); 3070 3071 /* Replace in-use record with the new record */ 3072 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, 3073 sizeof(struct lpfc_fcf_rec)); 3074 3075 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3076 "2783 Perform FLOGI roundrobin FCF failover: FCF " 3077 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); 3078 3079 error_out: 3080 lpfc_register_fcf(phba); 3081 out: 3082 lpfc_sli4_mbox_cmd_free(phba, mboxq); 3083 } 3084 3085 /** 3086 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. 3087 * @phba: pointer to lpfc hba data structure. 3088 * @mboxq: pointer to mailbox object. 3089 * 3090 * This is the callback function of read FCF record mailbox command for 3091 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF 3092 * failover when a new FCF event happened. If the FCF read back is 3093 * valid/available and it passes the connection list check, it updates 3094 * the bmask for the eligible FCF record for roundrobin failover. 3095 */ 3096 void 3097 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 3098 { 3099 struct fcf_record *new_fcf_record; 3100 uint32_t boot_flag, addr_mode; 3101 uint16_t fcf_index, next_fcf_index; 3102 uint16_t vlan_id = LPFC_FCOE_NULL_VID; 3103 int rc; 3104 3105 /* If link state is not up, no need to proceed */ 3106 if (phba->link_state < LPFC_LINK_UP) 3107 goto out; 3108 3109 /* If FCF discovery period is over, no need to proceed */ 3110 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) 3111 goto out; 3112 3113 /* Parse the FCF record from the non-embedded mailbox command */ 3114 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 3115 &next_fcf_index); 3116 if (!new_fcf_record) { 3117 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 3118 "2767 Mailbox command READ_FCF_RECORD " 3119 "failed to retrieve a FCF record.\n"); 3120 goto out; 3121 } 3122 3123 /* Check the connection list for eligibility */ 3124 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 3125 &addr_mode, &vlan_id); 3126 3127 /* Log the FCF record information if turned on */ 3128 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 3129 next_fcf_index); 3130 3131 if (!rc) 3132 goto out; 3133 3134 /* Update the eligible FCF record index bmask */ 3135 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 3136 3137 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); 3138 3139 out: 3140 lpfc_sli4_mbox_cmd_free(phba, mboxq); 3141 } 3142 3143 /** 3144 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. 3145 * @phba: pointer to lpfc hba data structure. 3146 * @mboxq: pointer to mailbox data structure. 3147 * 3148 * This function handles completion of init vfi mailbox command. 3149 */ 3150 static void 3151 lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 3152 { 3153 struct lpfc_vport *vport = mboxq->vport; 3154 3155 /* 3156 * VFI not supported on interface type 0, just do the flogi 3157 * Also continue if the VFI is in use - just use the same one. 3158 */ 3159 if (mboxq->u.mb.mbxStatus && 3160 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3161 LPFC_SLI_INTF_IF_TYPE_0) && 3162 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 3163 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3164 "2891 Init VFI mailbox failed 0x%x\n", 3165 mboxq->u.mb.mbxStatus); 3166 mempool_free(mboxq, phba->mbox_mem_pool); 3167 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3168 return; 3169 } 3170 3171 lpfc_initial_flogi(vport); 3172 mempool_free(mboxq, phba->mbox_mem_pool); 3173 return; 3174 } 3175 3176 /** 3177 * lpfc_issue_init_vfi - Issue init_vfi mailbox command. 3178 * @vport: pointer to lpfc_vport data structure. 3179 * 3180 * This function issue a init_vfi mailbox command to initialize the VFI and 3181 * VPI for the physical port. 3182 */ 3183 void 3184 lpfc_issue_init_vfi(struct lpfc_vport *vport) 3185 { 3186 LPFC_MBOXQ_t *mboxq; 3187 int rc; 3188 struct lpfc_hba *phba = vport->phba; 3189 3190 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3191 if (!mboxq) { 3192 lpfc_printf_vlog(vport, KERN_ERR, 3193 LOG_TRACE_EVENT, "2892 Failed to allocate " 3194 "init_vfi mailbox\n"); 3195 return; 3196 } 3197 lpfc_init_vfi(mboxq, vport); 3198 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; 3199 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 3200 if (rc == MBX_NOT_FINISHED) { 3201 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3202 "2893 Failed to issue init_vfi mailbox\n"); 3203 mempool_free(mboxq, vport->phba->mbox_mem_pool); 3204 } 3205 } 3206 3207 /** 3208 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 3209 * @phba: pointer to lpfc hba data structure. 3210 * @mboxq: pointer to mailbox data structure. 3211 * 3212 * This function handles completion of init vpi mailbox command. 3213 */ 3214 void 3215 lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 3216 { 3217 struct lpfc_vport *vport = mboxq->vport; 3218 struct lpfc_nodelist *ndlp; 3219 3220 if (mboxq->u.mb.mbxStatus) { 3221 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3222 "2609 Init VPI mailbox failed 0x%x\n", 3223 mboxq->u.mb.mbxStatus); 3224 mempool_free(mboxq, phba->mbox_mem_pool); 3225 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3226 return; 3227 } 3228 clear_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag); 3229 3230 /* If this port is physical port or FDISC is done, do reg_vpi */ 3231 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { 3232 ndlp = lpfc_findnode_did(vport, Fabric_DID); 3233 if (!ndlp) 3234 lpfc_printf_vlog(vport, KERN_ERR, 3235 LOG_TRACE_EVENT, 3236 "2731 Cannot find fabric " 3237 "controller node\n"); 3238 else 3239 lpfc_register_new_vport(phba, vport, ndlp); 3240 mempool_free(mboxq, phba->mbox_mem_pool); 3241 return; 3242 } 3243 3244 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 3245 lpfc_initial_fdisc(vport); 3246 else { 3247 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 3248 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3249 "2606 No NPIV Fabric support\n"); 3250 } 3251 mempool_free(mboxq, phba->mbox_mem_pool); 3252 return; 3253 } 3254 3255 /** 3256 * lpfc_issue_init_vpi - Issue init_vpi mailbox command. 3257 * @vport: pointer to lpfc_vport data structure. 3258 * 3259 * This function issue a init_vpi mailbox command to initialize 3260 * VPI for the vport. 3261 */ 3262 void 3263 lpfc_issue_init_vpi(struct lpfc_vport *vport) 3264 { 3265 LPFC_MBOXQ_t *mboxq; 3266 int rc, vpi; 3267 3268 if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { 3269 vpi = lpfc_alloc_vpi(vport->phba); 3270 if (!vpi) { 3271 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3272 "3303 Failed to obtain vport vpi\n"); 3273 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3274 return; 3275 } 3276 vport->vpi = vpi; 3277 } 3278 3279 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); 3280 if (!mboxq) { 3281 lpfc_printf_vlog(vport, KERN_ERR, 3282 LOG_TRACE_EVENT, "2607 Failed to allocate " 3283 "init_vpi mailbox\n"); 3284 return; 3285 } 3286 lpfc_init_vpi(vport->phba, mboxq, vport->vpi); 3287 mboxq->vport = vport; 3288 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; 3289 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); 3290 if (rc == MBX_NOT_FINISHED) { 3291 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3292 "2608 Failed to issue init_vpi mailbox\n"); 3293 mempool_free(mboxq, vport->phba->mbox_mem_pool); 3294 } 3295 } 3296 3297 /** 3298 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 3299 * @phba: pointer to lpfc hba data structure. 3300 * 3301 * This function loops through the list of vports on the @phba and issues an 3302 * FDISC if possible. 3303 */ 3304 void 3305 lpfc_start_fdiscs(struct lpfc_hba *phba) 3306 { 3307 struct lpfc_vport **vports; 3308 int i; 3309 3310 vports = lpfc_create_vport_work_array(phba); 3311 if (vports != NULL) { 3312 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 3313 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 3314 continue; 3315 /* There are no vpi for this vport */ 3316 if (vports[i]->vpi > phba->max_vpi) { 3317 lpfc_vport_set_state(vports[i], 3318 FC_VPORT_FAILED); 3319 continue; 3320 } 3321 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3322 lpfc_vport_set_state(vports[i], 3323 FC_VPORT_LINKDOWN); 3324 continue; 3325 } 3326 if (test_bit(FC_VPORT_NEEDS_INIT_VPI, 3327 &vports[i]->fc_flag)) { 3328 lpfc_issue_init_vpi(vports[i]); 3329 continue; 3330 } 3331 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 3332 lpfc_initial_fdisc(vports[i]); 3333 else { 3334 lpfc_vport_set_state(vports[i], 3335 FC_VPORT_NO_FABRIC_SUPP); 3336 lpfc_printf_vlog(vports[i], KERN_ERR, 3337 LOG_TRACE_EVENT, 3338 "0259 No NPIV " 3339 "Fabric support\n"); 3340 } 3341 } 3342 } 3343 lpfc_destroy_vport_work_array(phba, vports); 3344 } 3345 3346 void 3347 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 3348 { 3349 struct lpfc_vport *vport = mboxq->vport; 3350 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3351 3352 /* 3353 * VFI not supported for interface type 0, so ignore any mailbox 3354 * error (except VFI in use) and continue with the discovery. 3355 */ 3356 if (mboxq->u.mb.mbxStatus && 3357 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 3358 LPFC_SLI_INTF_IF_TYPE_0) && 3359 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 3360 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3361 "2018 REG_VFI mbxStatus error x%x " 3362 "HBA state x%x\n", 3363 mboxq->u.mb.mbxStatus, vport->port_state); 3364 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3365 /* FLOGI failed, use loop map to make discovery list */ 3366 lpfc_disc_list_loopmap(vport); 3367 /* Start discovery */ 3368 lpfc_disc_start(vport); 3369 goto out_free_mem; 3370 } 3371 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3372 goto out_free_mem; 3373 } 3374 3375 /* If the VFI is already registered, there is nothing else to do 3376 * Unless this was a VFI update and we are in PT2PT mode, then 3377 * we should drop through to set the port state to ready. 3378 */ 3379 if (test_bit(FC_VFI_REGISTERED, &vport->fc_flag)) 3380 if (!(phba->sli_rev == LPFC_SLI_REV4 && 3381 test_bit(FC_PT2PT, &vport->fc_flag))) 3382 goto out_free_mem; 3383 3384 /* The VPI is implicitly registered when the VFI is registered */ 3385 set_bit(FC_VFI_REGISTERED, &vport->fc_flag); 3386 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 3387 clear_bit(FC_VPORT_NEEDS_INIT_VPI, &vport->fc_flag); 3388 spin_lock_irq(shost->host_lock); 3389 vport->vpi_state |= LPFC_VPI_REGISTERED; 3390 spin_unlock_irq(shost->host_lock); 3391 3392 /* In case SLI4 FC loopback test, we are ready */ 3393 if ((phba->sli_rev == LPFC_SLI_REV4) && 3394 (phba->link_flag & LS_LOOPBACK_MODE)) { 3395 phba->link_state = LPFC_HBA_READY; 3396 goto out_free_mem; 3397 } 3398 3399 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3400 "3313 cmpl reg vfi port_state:%x fc_flag:%lx " 3401 "myDid:%x alpacnt:%d LinkState:%x topology:%x\n", 3402 vport->port_state, vport->fc_flag, vport->fc_myDID, 3403 vport->phba->alpa_map[0], 3404 phba->link_state, phba->fc_topology); 3405 3406 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 3407 /* 3408 * For private loop or for NPort pt2pt, 3409 * just start discovery and we are done. 3410 */ 3411 if (test_bit(FC_PT2PT, &vport->fc_flag) || 3412 (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 3413 !test_bit(FC_PUBLIC_LOOP, &vport->fc_flag))) { 3414 3415 /* Use loop map to make discovery list */ 3416 lpfc_disc_list_loopmap(vport); 3417 /* Start discovery */ 3418 if (test_bit(FC_PT2PT, &vport->fc_flag)) 3419 vport->port_state = LPFC_VPORT_READY; 3420 else 3421 lpfc_disc_start(vport); 3422 } else { 3423 lpfc_start_fdiscs(phba); 3424 lpfc_do_scr_ns_plogi(phba, vport); 3425 } 3426 } 3427 3428 out_free_mem: 3429 lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED); 3430 } 3431 3432 static void 3433 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3434 { 3435 MAILBOX_t *mb = &pmb->u.mb; 3436 struct lpfc_dmabuf *mp = pmb->ctx_buf; 3437 struct lpfc_vport *vport = pmb->vport; 3438 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3439 struct serv_parm *sp = &vport->fc_sparam; 3440 uint32_t ed_tov; 3441 3442 /* Check for error */ 3443 if (mb->mbxStatus) { 3444 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 3445 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3446 "0319 READ_SPARAM mbxStatus error x%x " 3447 "hba state x%x>\n", 3448 mb->mbxStatus, vport->port_state); 3449 lpfc_linkdown(phba); 3450 goto out; 3451 } 3452 3453 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, 3454 sizeof (struct serv_parm)); 3455 3456 ed_tov = be32_to_cpu(sp->cmn.e_d_tov); 3457 if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ 3458 ed_tov = (ed_tov + 999999) / 1000000; 3459 3460 phba->fc_edtov = ed_tov; 3461 phba->fc_ratov = (2 * ed_tov) / 1000; 3462 if (phba->fc_ratov < FF_DEF_RATOV) { 3463 /* RA_TOV should be atleast 10sec for initial flogi */ 3464 phba->fc_ratov = FF_DEF_RATOV; 3465 } 3466 3467 lpfc_update_vport_wwn(vport); 3468 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 3469 if (vport->port_type == LPFC_PHYSICAL_PORT) { 3470 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); 3471 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); 3472 } 3473 3474 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 3475 3476 /* Check if sending the FLOGI is being deferred to after we get 3477 * up to date CSPs from MBX_READ_SPARAM. 3478 */ 3479 if (test_bit(HBA_DEFER_FLOGI, &phba->hba_flag)) { 3480 lpfc_initial_flogi(vport); 3481 clear_bit(HBA_DEFER_FLOGI, &phba->hba_flag); 3482 } 3483 return; 3484 3485 out: 3486 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 3487 lpfc_issue_clear_la(phba, vport); 3488 } 3489 3490 static void 3491 lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) 3492 { 3493 struct lpfc_vport *vport = phba->pport; 3494 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; 3495 int i; 3496 int rc; 3497 struct fcf_record *fcf_record; 3498 unsigned long iflags; 3499 3500 spin_lock_irqsave(&phba->hbalock, iflags); 3501 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); 3502 3503 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 3504 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { 3505 case LPFC_LINK_SPEED_1GHZ: 3506 case LPFC_LINK_SPEED_2GHZ: 3507 case LPFC_LINK_SPEED_4GHZ: 3508 case LPFC_LINK_SPEED_8GHZ: 3509 case LPFC_LINK_SPEED_10GHZ: 3510 case LPFC_LINK_SPEED_16GHZ: 3511 case LPFC_LINK_SPEED_32GHZ: 3512 case LPFC_LINK_SPEED_64GHZ: 3513 case LPFC_LINK_SPEED_128GHZ: 3514 case LPFC_LINK_SPEED_256GHZ: 3515 break; 3516 default: 3517 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; 3518 break; 3519 } 3520 } 3521 3522 if (phba->fc_topology && 3523 phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { 3524 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3525 "3314 Toplogy changed was 0x%x is 0x%x\n", 3526 phba->fc_topology, 3527 bf_get(lpfc_mbx_read_top_topology, la)); 3528 phba->fc_topology_changed = 1; 3529 } 3530 3531 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); 3532 phba->link_flag &= ~(LS_NPIV_FAB_SUPPORTED | LS_CT_VEN_RPA); 3533 3534 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3535 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 3536 3537 /* if npiv is enabled and this adapter supports npiv log 3538 * a message that npiv is not supported in this topology 3539 */ 3540 if (phba->cfg_enable_npiv && phba->max_vpi) 3541 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3542 "1309 Link Up Event npiv not supported in loop " 3543 "topology\n"); 3544 /* Get Loop Map information */ 3545 if (bf_get(lpfc_mbx_read_top_il, la)) 3546 set_bit(FC_LBIT, &vport->fc_flag); 3547 3548 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); 3549 i = la->lilpBde64.tus.f.bdeSize; 3550 3551 if (i == 0) { 3552 phba->alpa_map[0] = 0; 3553 } else { 3554 if (vport->cfg_log_verbose & LOG_LINK_EVENT) { 3555 int numalpa, j, k; 3556 union { 3557 uint8_t pamap[16]; 3558 struct { 3559 uint32_t wd1; 3560 uint32_t wd2; 3561 uint32_t wd3; 3562 uint32_t wd4; 3563 } pa; 3564 } un; 3565 numalpa = phba->alpa_map[0]; 3566 j = 0; 3567 while (j < numalpa) { 3568 memset(un.pamap, 0, 16); 3569 for (k = 1; j < numalpa; k++) { 3570 un.pamap[k - 1] = 3571 phba->alpa_map[j + 1]; 3572 j++; 3573 if (k == 16) 3574 break; 3575 } 3576 /* Link Up Event ALPA map */ 3577 lpfc_printf_log(phba, 3578 KERN_WARNING, 3579 LOG_LINK_EVENT, 3580 "1304 Link Up Event " 3581 "ALPA map Data: x%x " 3582 "x%x x%x x%x\n", 3583 un.pa.wd1, un.pa.wd2, 3584 un.pa.wd3, un.pa.wd4); 3585 } 3586 } 3587 } 3588 } else { 3589 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 3590 if (phba->max_vpi && phba->cfg_enable_npiv && 3591 (phba->sli_rev >= LPFC_SLI_REV3)) 3592 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3593 } 3594 vport->fc_myDID = phba->fc_pref_DID; 3595 set_bit(FC_LBIT, &vport->fc_flag); 3596 } 3597 spin_unlock_irqrestore(&phba->hbalock, iflags); 3598 3599 lpfc_linkup(phba); 3600 sparam_mbox = NULL; 3601 3602 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3603 if (!sparam_mbox) 3604 goto out; 3605 3606 rc = lpfc_read_sparam(phba, sparam_mbox, 0); 3607 if (rc) { 3608 mempool_free(sparam_mbox, phba->mbox_mem_pool); 3609 goto out; 3610 } 3611 sparam_mbox->vport = vport; 3612 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 3613 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 3614 if (rc == MBX_NOT_FINISHED) { 3615 lpfc_mbox_rsrc_cleanup(phba, sparam_mbox, MBOX_THD_UNLOCKED); 3616 goto out; 3617 } 3618 3619 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { 3620 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3621 if (!cfglink_mbox) 3622 goto out; 3623 vport->port_state = LPFC_LOCAL_CFG_LINK; 3624 lpfc_config_link(phba, cfglink_mbox); 3625 cfglink_mbox->vport = vport; 3626 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 3627 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 3628 if (rc == MBX_NOT_FINISHED) { 3629 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 3630 goto out; 3631 } 3632 } else { 3633 vport->port_state = LPFC_VPORT_UNKNOWN; 3634 /* 3635 * Add the driver's default FCF record at FCF index 0 now. This 3636 * is phase 1 implementation that support FCF index 0 and driver 3637 * defaults. 3638 */ 3639 if (!test_bit(HBA_FIP_SUPPORT, &phba->hba_flag)) { 3640 fcf_record = kzalloc(sizeof(struct fcf_record), 3641 GFP_KERNEL); 3642 if (unlikely(!fcf_record)) { 3643 lpfc_printf_log(phba, KERN_ERR, 3644 LOG_TRACE_EVENT, 3645 "2554 Could not allocate memory for " 3646 "fcf record\n"); 3647 rc = -ENODEV; 3648 goto out; 3649 } 3650 3651 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, 3652 LPFC_FCOE_FCF_DEF_INDEX); 3653 rc = lpfc_sli4_add_fcf_record(phba, fcf_record); 3654 if (unlikely(rc)) { 3655 lpfc_printf_log(phba, KERN_ERR, 3656 LOG_TRACE_EVENT, 3657 "2013 Could not manually add FCF " 3658 "record 0, status %d\n", rc); 3659 rc = -ENODEV; 3660 kfree(fcf_record); 3661 goto out; 3662 } 3663 kfree(fcf_record); 3664 } 3665 /* 3666 * The driver is expected to do FIP/FCF. Call the port 3667 * and get the FCF Table. 3668 */ 3669 if (test_bit(FCF_TS_INPROG, &phba->hba_flag)) 3670 return; 3671 /* This is the initial FCF discovery scan */ 3672 spin_lock_irqsave(&phba->hbalock, iflags); 3673 phba->fcf.fcf_flag |= FCF_INIT_DISC; 3674 spin_unlock_irqrestore(&phba->hbalock, iflags); 3675 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3676 "2778 Start FCF table scan at linkup\n"); 3677 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3678 LPFC_FCOE_FCF_GET_FIRST); 3679 if (rc) { 3680 spin_lock_irqsave(&phba->hbalock, iflags); 3681 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 3682 spin_unlock_irqrestore(&phba->hbalock, iflags); 3683 goto out; 3684 } 3685 /* Reset FCF roundrobin bmask for new discovery */ 3686 lpfc_sli4_clear_fcf_rr_bmask(phba); 3687 } 3688 3689 /* Prepare for LINK up registrations */ 3690 memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); 3691 scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", 3692 init_utsname()->nodename); 3693 return; 3694 out: 3695 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3696 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3697 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n", 3698 vport->port_state, sparam_mbox, cfglink_mbox); 3699 lpfc_issue_clear_la(phba, vport); 3700 return; 3701 } 3702 3703 static void 3704 lpfc_enable_la(struct lpfc_hba *phba) 3705 { 3706 uint32_t control; 3707 struct lpfc_sli *psli = &phba->sli; 3708 spin_lock_irq(&phba->hbalock); 3709 psli->sli_flag |= LPFC_PROCESS_LA; 3710 if (phba->sli_rev <= LPFC_SLI_REV3) { 3711 control = readl(phba->HCregaddr); 3712 control |= HC_LAINT_ENA; 3713 writel(control, phba->HCregaddr); 3714 readl(phba->HCregaddr); /* flush */ 3715 } 3716 spin_unlock_irq(&phba->hbalock); 3717 } 3718 3719 static void 3720 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) 3721 { 3722 lpfc_linkdown(phba); 3723 lpfc_enable_la(phba); 3724 lpfc_unregister_unused_fcf(phba); 3725 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 3726 } 3727 3728 3729 /* 3730 * This routine handles processing a READ_TOPOLOGY mailbox 3731 * command upon completion. It is setup in the LPFC_MBOXQ 3732 * as the completion routine when the command is 3733 * handed off to the SLI layer. SLI4 only. 3734 */ 3735 void 3736 lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3737 { 3738 struct lpfc_vport *vport = pmb->vport; 3739 struct lpfc_mbx_read_top *la; 3740 struct lpfc_sli_ring *pring; 3741 MAILBOX_t *mb = &pmb->u.mb; 3742 struct lpfc_dmabuf *mp = pmb->ctx_buf; 3743 uint8_t attn_type; 3744 3745 /* Unblock ELS traffic */ 3746 pring = lpfc_phba_elsring(phba); 3747 if (pring) 3748 pring->flag &= ~LPFC_STOP_IOCB_EVENT; 3749 3750 /* Check for error */ 3751 if (mb->mbxStatus) { 3752 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3753 "1307 READ_LA mbox error x%x state x%x\n", 3754 mb->mbxStatus, vport->port_state); 3755 lpfc_mbx_issue_link_down(phba); 3756 phba->link_state = LPFC_HBA_ERROR; 3757 goto lpfc_mbx_cmpl_read_topology_free_mbuf; 3758 } 3759 3760 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3761 attn_type = bf_get(lpfc_mbx_read_top_att_type, la); 3762 3763 memcpy(&phba->alpa_map[0], mp->virt, 128); 3764 3765 if (bf_get(lpfc_mbx_read_top_pb, la)) 3766 set_bit(FC_BYPASSED_MODE, &vport->fc_flag); 3767 else 3768 clear_bit(FC_BYPASSED_MODE, &vport->fc_flag); 3769 3770 if (phba->fc_eventTag <= la->eventTag) { 3771 phba->fc_stat.LinkMultiEvent++; 3772 if (attn_type == LPFC_ATT_LINK_UP) 3773 if (phba->fc_eventTag != 0) 3774 lpfc_linkdown(phba); 3775 } 3776 3777 phba->fc_eventTag = la->eventTag; 3778 phba->link_events++; 3779 if (attn_type == LPFC_ATT_LINK_UP) { 3780 phba->fc_stat.LinkUp++; 3781 if (phba->link_flag & LS_LOOPBACK_MODE) { 3782 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3783 "1306 Link Up Event in loop back mode " 3784 "x%x received Data: x%x x%x x%x x%x\n", 3785 la->eventTag, phba->fc_eventTag, 3786 bf_get(lpfc_mbx_read_top_alpa_granted, 3787 la), 3788 bf_get(lpfc_mbx_read_top_link_spd, la), 3789 phba->alpa_map[0]); 3790 } else { 3791 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3792 "1303 Link Up Event x%x received " 3793 "Data: x%x x%x x%x x%x x%x\n", 3794 la->eventTag, phba->fc_eventTag, 3795 bf_get(lpfc_mbx_read_top_alpa_granted, 3796 la), 3797 bf_get(lpfc_mbx_read_top_link_spd, la), 3798 phba->alpa_map[0], 3799 bf_get(lpfc_mbx_read_top_fa, la)); 3800 } 3801 lpfc_mbx_process_link_up(phba, la); 3802 3803 if (phba->cmf_active_mode != LPFC_CFG_OFF) 3804 lpfc_cmf_signal_init(phba); 3805 3806 if (phba->lmt & LMT_64Gb) 3807 lpfc_read_lds_params(phba); 3808 3809 } else if (attn_type == LPFC_ATT_LINK_DOWN || 3810 attn_type == LPFC_ATT_UNEXP_WWPN) { 3811 phba->fc_stat.LinkDown++; 3812 if (phba->link_flag & LS_LOOPBACK_MODE) 3813 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3814 "1308 Link Down Event in loop back mode " 3815 "x%x received " 3816 "Data: x%x x%x x%lx\n", 3817 la->eventTag, phba->fc_eventTag, 3818 phba->pport->port_state, vport->fc_flag); 3819 else if (attn_type == LPFC_ATT_UNEXP_WWPN) 3820 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3821 "1313 Link Down Unexpected FA WWPN Event x%x " 3822 "received Data: x%x x%x x%lx x%x\n", 3823 la->eventTag, phba->fc_eventTag, 3824 phba->pport->port_state, vport->fc_flag, 3825 bf_get(lpfc_mbx_read_top_fa, la)); 3826 else 3827 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3828 "1305 Link Down Event x%x received " 3829 "Data: x%x x%x x%lx x%x\n", 3830 la->eventTag, phba->fc_eventTag, 3831 phba->pport->port_state, vport->fc_flag, 3832 bf_get(lpfc_mbx_read_top_fa, la)); 3833 lpfc_mbx_issue_link_down(phba); 3834 } 3835 3836 if ((phba->sli_rev < LPFC_SLI_REV4) && 3837 bf_get(lpfc_mbx_read_top_fa, la)) 3838 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3839 "1311 fa %d\n", 3840 bf_get(lpfc_mbx_read_top_fa, la)); 3841 3842 lpfc_mbx_cmpl_read_topology_free_mbuf: 3843 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 3844 } 3845 3846 /* 3847 * This routine handles processing a REG_LOGIN mailbox 3848 * command upon completion. It is setup in the LPFC_MBOXQ 3849 * as the completion routine when the command is 3850 * handed off to the SLI layer. 3851 */ 3852 void 3853 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3854 { 3855 struct lpfc_vport *vport = pmb->vport; 3856 struct lpfc_dmabuf *mp = pmb->ctx_buf; 3857 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 3858 3859 /* The driver calls the state machine with the pmb pointer 3860 * but wants to make sure a stale ctx_buf isn't acted on. 3861 * The ctx_buf is restored later and cleaned up. 3862 */ 3863 pmb->ctx_buf = NULL; 3864 pmb->ctx_ndlp = NULL; 3865 3866 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, 3867 "0002 rpi:%x DID:%x flg:%x %d x%px\n", 3868 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 3869 kref_read(&ndlp->kref), 3870 ndlp); 3871 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 3872 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3873 3874 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || 3875 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 3876 /* We rcvd a rscn after issuing this 3877 * mbox reg login, we may have cycled 3878 * back through the state and be 3879 * back at reg login state so this 3880 * mbox needs to be ignored becase 3881 * there is another reg login in 3882 * process. 3883 */ 3884 spin_lock_irq(&ndlp->lock); 3885 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 3886 spin_unlock_irq(&ndlp->lock); 3887 3888 /* 3889 * We cannot leave the RPI registered because 3890 * if we go thru discovery again for this ndlp 3891 * a subsequent REG_RPI will fail. 3892 */ 3893 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3894 lpfc_unreg_rpi(vport, ndlp); 3895 } 3896 3897 /* Call state machine */ 3898 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); 3899 pmb->ctx_buf = mp; 3900 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 3901 3902 /* decrement the node reference count held for this callback 3903 * function. 3904 */ 3905 lpfc_nlp_put(ndlp); 3906 3907 return; 3908 } 3909 3910 static void 3911 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3912 { 3913 MAILBOX_t *mb = &pmb->u.mb; 3914 struct lpfc_vport *vport = pmb->vport; 3915 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3916 3917 switch (mb->mbxStatus) { 3918 case 0x0011: 3919 case 0x0020: 3920 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3921 "0911 cmpl_unreg_vpi, mb status = 0x%x\n", 3922 mb->mbxStatus); 3923 break; 3924 /* If VPI is busy, reset the HBA */ 3925 case 0x9700: 3926 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3927 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", 3928 vport->vpi, mb->mbxStatus); 3929 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag)) 3930 lpfc_workq_post_event(phba, NULL, NULL, 3931 LPFC_EVT_RESET_HBA); 3932 } 3933 3934 set_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 3935 spin_lock_irq(shost->host_lock); 3936 vport->vpi_state &= ~LPFC_VPI_REGISTERED; 3937 spin_unlock_irq(shost->host_lock); 3938 mempool_free(pmb, phba->mbox_mem_pool); 3939 lpfc_cleanup_vports_rrqs(vport, NULL); 3940 /* 3941 * This shost reference might have been taken at the beginning of 3942 * lpfc_vport_delete() 3943 */ 3944 if (test_bit(FC_UNLOADING, &vport->load_flag) && vport != phba->pport) 3945 scsi_host_put(shost); 3946 } 3947 3948 int 3949 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) 3950 { 3951 struct lpfc_hba *phba = vport->phba; 3952 LPFC_MBOXQ_t *mbox; 3953 int rc; 3954 3955 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3956 if (!mbox) 3957 return 1; 3958 3959 lpfc_unreg_vpi(phba, vport->vpi, mbox); 3960 mbox->vport = vport; 3961 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; 3962 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3963 if (rc == MBX_NOT_FINISHED) { 3964 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 3965 "1800 Could not issue unreg_vpi\n"); 3966 mempool_free(mbox, phba->mbox_mem_pool); 3967 return rc; 3968 } 3969 return 0; 3970 } 3971 3972 static void 3973 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3974 { 3975 struct lpfc_vport *vport = pmb->vport; 3976 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3977 MAILBOX_t *mb = &pmb->u.mb; 3978 3979 switch (mb->mbxStatus) { 3980 case 0x0011: 3981 case 0x9601: 3982 case 0x9602: 3983 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3984 "0912 cmpl_reg_vpi, mb status = 0x%x\n", 3985 mb->mbxStatus); 3986 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3987 clear_bit(FC_FABRIC, &vport->fc_flag); 3988 clear_bit(FC_PUBLIC_LOOP, &vport->fc_flag); 3989 vport->fc_myDID = 0; 3990 3991 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 3992 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 3993 if (phba->nvmet_support) 3994 lpfc_nvmet_update_targetport(phba); 3995 else 3996 lpfc_nvme_update_localport(vport); 3997 } 3998 goto out; 3999 } 4000 4001 clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag); 4002 spin_lock_irq(shost->host_lock); 4003 vport->vpi_state |= LPFC_VPI_REGISTERED; 4004 spin_unlock_irq(shost->host_lock); 4005 vport->num_disc_nodes = 0; 4006 /* go thru NPR list and issue ELS PLOGIs */ 4007 if (atomic_read(&vport->fc_npr_cnt)) 4008 lpfc_els_disc_plogi(vport); 4009 4010 if (!vport->num_disc_nodes) { 4011 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 4012 lpfc_can_disctmo(vport); 4013 } 4014 vport->port_state = LPFC_VPORT_READY; 4015 4016 out: 4017 mempool_free(pmb, phba->mbox_mem_pool); 4018 return; 4019 } 4020 4021 /** 4022 * lpfc_create_static_vport - Read HBA config region to create static vports. 4023 * @phba: pointer to lpfc hba data structure. 4024 * 4025 * This routine issue a DUMP mailbox command for config region 22 to get 4026 * the list of static vports to be created. The function create vports 4027 * based on the information returned from the HBA. 4028 **/ 4029 void 4030 lpfc_create_static_vport(struct lpfc_hba *phba) 4031 { 4032 LPFC_MBOXQ_t *pmb = NULL; 4033 MAILBOX_t *mb; 4034 struct static_vport_info *vport_info; 4035 int mbx_wait_rc = 0, i; 4036 struct fc_vport_identifiers vport_id; 4037 struct fc_vport *new_fc_vport; 4038 struct Scsi_Host *shost; 4039 struct lpfc_vport *vport; 4040 uint16_t offset = 0; 4041 uint8_t *vport_buff; 4042 struct lpfc_dmabuf *mp; 4043 uint32_t byte_count = 0; 4044 4045 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4046 if (!pmb) { 4047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4048 "0542 lpfc_create_static_vport failed to" 4049 " allocate mailbox memory\n"); 4050 return; 4051 } 4052 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 4053 mb = &pmb->u.mb; 4054 4055 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); 4056 if (!vport_info) { 4057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4058 "0543 lpfc_create_static_vport failed to" 4059 " allocate vport_info\n"); 4060 mempool_free(pmb, phba->mbox_mem_pool); 4061 return; 4062 } 4063 4064 vport_buff = (uint8_t *) vport_info; 4065 do { 4066 /* While loop iteration forces a free dma buffer from 4067 * the previous loop because the mbox is reused and 4068 * the dump routine is a single-use construct. 4069 */ 4070 if (pmb->ctx_buf) { 4071 mp = pmb->ctx_buf; 4072 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4073 kfree(mp); 4074 pmb->ctx_buf = NULL; 4075 } 4076 if (lpfc_dump_static_vport(phba, pmb, offset)) 4077 goto out; 4078 4079 pmb->vport = phba->pport; 4080 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb, 4081 LPFC_MBOX_TMO); 4082 4083 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) { 4084 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4085 "0544 lpfc_create_static_vport failed to" 4086 " issue dump mailbox command ret 0x%x " 4087 "status 0x%x\n", 4088 mbx_wait_rc, mb->mbxStatus); 4089 goto out; 4090 } 4091 4092 if (phba->sli_rev == LPFC_SLI_REV4) { 4093 byte_count = pmb->u.mqe.un.mb_words[5]; 4094 mp = pmb->ctx_buf; 4095 if (byte_count > sizeof(struct static_vport_info) - 4096 offset) 4097 byte_count = sizeof(struct static_vport_info) 4098 - offset; 4099 memcpy(vport_buff + offset, mp->virt, byte_count); 4100 offset += byte_count; 4101 } else { 4102 if (mb->un.varDmp.word_cnt > 4103 sizeof(struct static_vport_info) - offset) 4104 mb->un.varDmp.word_cnt = 4105 sizeof(struct static_vport_info) 4106 - offset; 4107 byte_count = mb->un.varDmp.word_cnt; 4108 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 4109 vport_buff + offset, 4110 byte_count); 4111 4112 offset += byte_count; 4113 } 4114 4115 } while (byte_count && 4116 offset < sizeof(struct static_vport_info)); 4117 4118 4119 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || 4120 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) 4121 != VPORT_INFO_REV)) { 4122 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 4123 "0545 lpfc_create_static_vport bad" 4124 " information header 0x%x 0x%x\n", 4125 le32_to_cpu(vport_info->signature), 4126 le32_to_cpu(vport_info->rev) & 4127 VPORT_INFO_REV_MASK); 4128 4129 goto out; 4130 } 4131 4132 shost = lpfc_shost_from_vport(phba->pport); 4133 4134 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { 4135 memset(&vport_id, 0, sizeof(vport_id)); 4136 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); 4137 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); 4138 if (!vport_id.port_name || !vport_id.node_name) 4139 continue; 4140 4141 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 4142 vport_id.vport_type = FC_PORTTYPE_NPIV; 4143 vport_id.disable = false; 4144 new_fc_vport = fc_vport_create(shost, 0, &vport_id); 4145 4146 if (!new_fc_vport) { 4147 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4148 "0546 lpfc_create_static_vport failed to" 4149 " create vport\n"); 4150 continue; 4151 } 4152 4153 vport = *(struct lpfc_vport **)new_fc_vport->dd_data; 4154 vport->vport_flag |= STATIC_VPORT; 4155 } 4156 4157 out: 4158 kfree(vport_info); 4159 if (mbx_wait_rc != MBX_TIMEOUT) 4160 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4161 } 4162 4163 /* 4164 * This routine handles processing a Fabric REG_LOGIN mailbox 4165 * command upon completion. It is setup in the LPFC_MBOXQ 4166 * as the completion routine when the command is 4167 * handed off to the SLI layer. 4168 */ 4169 void 4170 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4171 { 4172 struct lpfc_vport *vport = pmb->vport; 4173 MAILBOX_t *mb = &pmb->u.mb; 4174 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 4175 4176 pmb->ctx_ndlp = NULL; 4177 4178 if (mb->mbxStatus) { 4179 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4180 "0258 Register Fabric login error: 0x%x\n", 4181 mb->mbxStatus); 4182 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4183 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4184 /* FLOGI failed, use loop map to make discovery list */ 4185 lpfc_disc_list_loopmap(vport); 4186 4187 /* Start discovery */ 4188 lpfc_disc_start(vport); 4189 /* Decrement the reference count to ndlp after the 4190 * reference to the ndlp are done. 4191 */ 4192 lpfc_nlp_put(ndlp); 4193 return; 4194 } 4195 4196 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4197 /* Decrement the reference count to ndlp after the reference 4198 * to the ndlp are done. 4199 */ 4200 lpfc_nlp_put(ndlp); 4201 return; 4202 } 4203 4204 if (phba->sli_rev < LPFC_SLI_REV4) 4205 ndlp->nlp_rpi = mb->un.varWords[0]; 4206 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 4207 ndlp->nlp_type |= NLP_FABRIC; 4208 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4209 4210 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 4211 /* when physical port receive logo donot start 4212 * vport discovery */ 4213 if (!test_and_clear_bit(FC_LOGO_RCVD_DID_CHNG, &vport->fc_flag)) 4214 lpfc_start_fdiscs(phba); 4215 lpfc_do_scr_ns_plogi(phba, vport); 4216 } 4217 4218 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4219 4220 /* Drop the reference count from the mbox at the end after 4221 * all the current reference to the ndlp have been done. 4222 */ 4223 lpfc_nlp_put(ndlp); 4224 return; 4225 } 4226 4227 /* 4228 * This routine will issue a GID_FT for each FC4 Type supported 4229 * by the driver. ALL GID_FTs must complete before discovery is started. 4230 */ 4231 int 4232 lpfc_issue_gidft(struct lpfc_vport *vport) 4233 { 4234 /* Good status, issue CT Request to NameServer */ 4235 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4236 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { 4237 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { 4238 /* Cannot issue NameServer FCP Query, so finish up 4239 * discovery 4240 */ 4241 lpfc_printf_vlog(vport, KERN_ERR, 4242 LOG_TRACE_EVENT, 4243 "0604 %s FC TYPE %x %s\n", 4244 "Failed to issue GID_FT to ", 4245 FC_TYPE_FCP, 4246 "Finishing discovery."); 4247 return 0; 4248 } 4249 vport->gidft_inp++; 4250 } 4251 4252 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4253 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { 4254 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { 4255 /* Cannot issue NameServer NVME Query, so finish up 4256 * discovery 4257 */ 4258 lpfc_printf_vlog(vport, KERN_ERR, 4259 LOG_TRACE_EVENT, 4260 "0605 %s FC_TYPE %x %s %d\n", 4261 "Failed to issue GID_FT to ", 4262 FC_TYPE_NVME, 4263 "Finishing discovery: gidftinp ", 4264 vport->gidft_inp); 4265 if (vport->gidft_inp == 0) 4266 return 0; 4267 } else 4268 vport->gidft_inp++; 4269 } 4270 return vport->gidft_inp; 4271 } 4272 4273 /** 4274 * lpfc_issue_gidpt - issue a GID_PT for all N_Ports 4275 * @vport: The virtual port for which this call is being executed. 4276 * 4277 * This routine will issue a GID_PT to get a list of all N_Ports 4278 * 4279 * Return value : 4280 * 0 - Failure to issue a GID_PT 4281 * 1 - GID_PT issued 4282 **/ 4283 int 4284 lpfc_issue_gidpt(struct lpfc_vport *vport) 4285 { 4286 /* Good status, issue CT Request to NameServer */ 4287 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) { 4288 /* Cannot issue NameServer FCP Query, so finish up 4289 * discovery 4290 */ 4291 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4292 "0606 %s Port TYPE %x %s\n", 4293 "Failed to issue GID_PT to ", 4294 GID_PT_N_PORT, 4295 "Finishing discovery."); 4296 return 0; 4297 } 4298 vport->gidft_inp++; 4299 return 1; 4300 } 4301 4302 /* 4303 * This routine handles processing a NameServer REG_LOGIN mailbox 4304 * command upon completion. It is setup in the LPFC_MBOXQ 4305 * as the completion routine when the command is 4306 * handed off to the SLI layer. 4307 */ 4308 void 4309 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4310 { 4311 MAILBOX_t *mb = &pmb->u.mb; 4312 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 4313 struct lpfc_vport *vport = pmb->vport; 4314 int rc; 4315 4316 pmb->ctx_ndlp = NULL; 4317 vport->gidft_inp = 0; 4318 4319 if (mb->mbxStatus) { 4320 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4321 "0260 Register NameServer error: 0x%x\n", 4322 mb->mbxStatus); 4323 4324 out: 4325 /* decrement the node reference count held for this 4326 * callback function. 4327 */ 4328 lpfc_nlp_put(ndlp); 4329 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4330 4331 /* If the node is not registered with the scsi or nvme 4332 * transport, remove the fabric node. The failed reg_login 4333 * is terminal and forces the removal of the last node 4334 * reference. 4335 */ 4336 if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { 4337 spin_lock_irq(&ndlp->lock); 4338 ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; 4339 spin_unlock_irq(&ndlp->lock); 4340 lpfc_nlp_put(ndlp); 4341 } 4342 4343 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 4344 /* 4345 * RegLogin failed, use loop map to make discovery 4346 * list 4347 */ 4348 lpfc_disc_list_loopmap(vport); 4349 4350 /* Start discovery */ 4351 lpfc_disc_start(vport); 4352 return; 4353 } 4354 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 4355 return; 4356 } 4357 4358 if (phba->sli_rev < LPFC_SLI_REV4) 4359 ndlp->nlp_rpi = mb->un.varWords[0]; 4360 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 4361 ndlp->nlp_type |= NLP_FABRIC; 4362 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4363 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, 4364 "0003 rpi:%x DID:%x flg:%x %d x%px\n", 4365 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 4366 kref_read(&ndlp->kref), 4367 ndlp); 4368 4369 if (vport->port_state < LPFC_VPORT_READY) { 4370 /* Link up discovery requires Fabric registration. */ 4371 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); 4372 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); 4373 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 4374 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); 4375 4376 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4377 (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) 4378 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); 4379 4380 if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || 4381 (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) 4382 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 4383 FC_TYPE_NVME); 4384 4385 /* Issue SCR just before NameServer GID_FT Query */ 4386 lpfc_issue_els_scr(vport, 0); 4387 4388 /* Link was bounced or a Fabric LOGO occurred. Start EDC 4389 * with initial FW values provided the congestion mode is 4390 * not off. Note that signals may or may not be supported 4391 * by the adapter but FPIN is provided by default for 1 4392 * or both missing signals support. 4393 */ 4394 if (phba->cmf_active_mode != LPFC_CFG_OFF) { 4395 phba->cgn_reg_fpin = phba->cgn_init_reg_fpin; 4396 phba->cgn_reg_signal = phba->cgn_init_reg_signal; 4397 rc = lpfc_issue_els_edc(vport, 0); 4398 lpfc_printf_log(phba, KERN_INFO, 4399 LOG_INIT | LOG_ELS | LOG_DISCOVERY, 4400 "4220 Issue EDC status x%x Data x%x\n", 4401 rc, phba->cgn_init_reg_signal); 4402 } else if (phba->lmt & LMT_64Gb) { 4403 /* may send link fault capability descriptor */ 4404 lpfc_issue_els_edc(vport, 0); 4405 } else { 4406 lpfc_issue_els_rdf(vport, 0); 4407 } 4408 } 4409 4410 vport->fc_ns_retry = 0; 4411 if (lpfc_issue_gidft(vport) == 0) 4412 goto out; 4413 4414 /* 4415 * At this point in time we may need to wait for multiple 4416 * SLI_CTNS_GID_FT CT commands to complete before we start discovery. 4417 * 4418 * decrement the node reference count held for this 4419 * callback function. 4420 */ 4421 lpfc_nlp_put(ndlp); 4422 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4423 return; 4424 } 4425 4426 /* 4427 * This routine handles processing a Fabric Controller REG_LOGIN mailbox 4428 * command upon completion. It is setup in the LPFC_MBOXQ 4429 * as the completion routine when the command is handed off to the SLI layer. 4430 */ 4431 void 4432 lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4433 { 4434 struct lpfc_vport *vport = pmb->vport; 4435 MAILBOX_t *mb = &pmb->u.mb; 4436 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 4437 4438 pmb->ctx_ndlp = NULL; 4439 if (mb->mbxStatus) { 4440 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 4441 "0933 %s: Register FC login error: 0x%x\n", 4442 __func__, mb->mbxStatus); 4443 goto out; 4444 } 4445 4446 lpfc_check_nlp_post_devloss(vport, ndlp); 4447 4448 if (phba->sli_rev < LPFC_SLI_REV4) 4449 ndlp->nlp_rpi = mb->un.varWords[0]; 4450 4451 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4452 "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n", 4453 __func__, ndlp->nlp_DID, ndlp->nlp_rpi, 4454 ndlp->nlp_state); 4455 4456 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 4457 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 4458 ndlp->nlp_type |= NLP_FABRIC; 4459 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4460 4461 out: 4462 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 4463 4464 /* Drop the reference count from the mbox at the end after 4465 * all the current reference to the ndlp have been done. 4466 */ 4467 lpfc_nlp_put(ndlp); 4468 } 4469 4470 static void 4471 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4472 { 4473 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4474 struct fc_rport *rport; 4475 struct lpfc_rport_data *rdata; 4476 struct fc_rport_identifiers rport_ids; 4477 struct lpfc_hba *phba = vport->phba; 4478 unsigned long flags; 4479 4480 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) 4481 return; 4482 4483 /* Remote port has reappeared. Re-register w/ FC transport */ 4484 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 4485 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 4486 rport_ids.port_id = ndlp->nlp_DID; 4487 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 4488 4489 4490 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 4491 "rport add: did:x%x flg:x%x type x%x", 4492 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 4493 4494 /* Don't add the remote port if unloading. */ 4495 if (test_bit(FC_UNLOADING, &vport->load_flag)) 4496 return; 4497 4498 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 4499 if (!rport) { 4500 dev_printk(KERN_WARNING, &phba->pcidev->dev, 4501 "Warning: fc_remote_port_add failed\n"); 4502 return; 4503 } 4504 4505 /* Successful port add. Complete initializing node data */ 4506 rport->maxframe_size = ndlp->nlp_maxframe; 4507 rport->supported_classes = ndlp->nlp_class_sup; 4508 rdata = rport->dd_data; 4509 rdata->pnode = lpfc_nlp_get(ndlp); 4510 if (!rdata->pnode) { 4511 dev_warn(&phba->pcidev->dev, 4512 "Warning - node ref failed. Unreg rport\n"); 4513 fc_remote_port_delete(rport); 4514 ndlp->rport = NULL; 4515 return; 4516 } 4517 4518 spin_lock_irqsave(&ndlp->lock, flags); 4519 ndlp->fc4_xpt_flags |= SCSI_XPT_REGD; 4520 spin_unlock_irqrestore(&ndlp->lock, flags); 4521 4522 if (ndlp->nlp_type & NLP_FCP_TARGET) 4523 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; 4524 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 4525 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; 4526 if (ndlp->nlp_type & NLP_NVME_INITIATOR) 4527 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR; 4528 if (ndlp->nlp_type & NLP_NVME_TARGET) 4529 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET; 4530 if (ndlp->nlp_type & NLP_NVME_DISCOVERY) 4531 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; 4532 4533 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 4534 fc_remote_port_rolechg(rport, rport_ids.roles); 4535 4536 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 4537 "3183 %s rport x%px DID x%x, role x%x refcnt %d\n", 4538 __func__, rport, rport->port_id, rport->roles, 4539 kref_read(&ndlp->kref)); 4540 4541 if ((rport->scsi_target_id != -1) && 4542 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 4543 ndlp->nlp_sid = rport->scsi_target_id; 4544 } 4545 4546 return; 4547 } 4548 4549 static void 4550 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) 4551 { 4552 struct fc_rport *rport = ndlp->rport; 4553 struct lpfc_vport *vport = ndlp->vport; 4554 4555 if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) 4556 return; 4557 4558 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 4559 "rport delete: did:x%x flg:x%x type x%x", 4560 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 4561 4562 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4563 "3184 rport unregister x%06x, rport x%px " 4564 "xptflg x%x refcnt %d\n", 4565 ndlp->nlp_DID, rport, ndlp->fc4_xpt_flags, 4566 kref_read(&ndlp->kref)); 4567 4568 fc_remote_port_delete(rport); 4569 lpfc_nlp_put(ndlp); 4570 } 4571 4572 static void 4573 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) 4574 { 4575 switch (state) { 4576 case NLP_STE_UNUSED_NODE: 4577 atomic_add(count, &vport->fc_unused_cnt); 4578 break; 4579 case NLP_STE_PLOGI_ISSUE: 4580 atomic_add(count, &vport->fc_plogi_cnt); 4581 break; 4582 case NLP_STE_ADISC_ISSUE: 4583 atomic_add(count, &vport->fc_adisc_cnt); 4584 break; 4585 case NLP_STE_REG_LOGIN_ISSUE: 4586 atomic_add(count, &vport->fc_reglogin_cnt); 4587 break; 4588 case NLP_STE_PRLI_ISSUE: 4589 atomic_add(count, &vport->fc_prli_cnt); 4590 break; 4591 case NLP_STE_UNMAPPED_NODE: 4592 atomic_add(count, &vport->fc_unmap_cnt); 4593 break; 4594 case NLP_STE_MAPPED_NODE: 4595 atomic_add(count, &vport->fc_map_cnt); 4596 break; 4597 case NLP_STE_NPR_NODE: 4598 if (!atomic_read(&vport->fc_npr_cnt) && count == -1) 4599 atomic_set(&vport->fc_npr_cnt, 0); 4600 else 4601 atomic_add(count, &vport->fc_npr_cnt); 4602 break; 4603 } 4604 } 4605 4606 /* Register a node with backend if not already done */ 4607 void 4608 lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4609 { 4610 unsigned long iflags; 4611 4612 lpfc_check_nlp_post_devloss(vport, ndlp); 4613 4614 spin_lock_irqsave(&ndlp->lock, iflags); 4615 if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { 4616 /* Already registered with backend, trigger rescan */ 4617 spin_unlock_irqrestore(&ndlp->lock, iflags); 4618 4619 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD && 4620 ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) { 4621 lpfc_nvme_rescan_port(vport, ndlp); 4622 } 4623 return; 4624 } 4625 4626 ndlp->fc4_xpt_flags |= NLP_XPT_REGD; 4627 spin_unlock_irqrestore(&ndlp->lock, iflags); 4628 4629 if (lpfc_valid_xpt_node(ndlp)) { 4630 vport->phba->nport_event_cnt++; 4631 /* 4632 * Tell the fc transport about the port, if we haven't 4633 * already. If we have, and it's a scsi entity, be 4634 */ 4635 lpfc_register_remote_port(vport, ndlp); 4636 } 4637 4638 /* We are done if we do not have any NVME remote node */ 4639 if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME)) 4640 return; 4641 4642 /* Notify the NVME transport of this new rport. */ 4643 if (vport->phba->sli_rev >= LPFC_SLI_REV4 && 4644 ndlp->nlp_fc4_type & NLP_FC4_NVME) { 4645 if (vport->phba->nvmet_support == 0) { 4646 /* Register this rport with the transport. 4647 * Only NVME Target Rports are registered with 4648 * the transport. 4649 */ 4650 if (ndlp->nlp_type & NLP_NVME_TARGET) { 4651 vport->phba->nport_event_cnt++; 4652 lpfc_nvme_register_port(vport, ndlp); 4653 } 4654 } else { 4655 /* Just take an NDLP ref count since the 4656 * target does not register rports. 4657 */ 4658 lpfc_nlp_get(ndlp); 4659 } 4660 } 4661 } 4662 4663 /* Unregister a node with backend if not already done */ 4664 void 4665 lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4666 { 4667 unsigned long iflags; 4668 4669 spin_lock_irqsave(&ndlp->lock, iflags); 4670 if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { 4671 spin_unlock_irqrestore(&ndlp->lock, iflags); 4672 lpfc_printf_vlog(vport, KERN_INFO, 4673 LOG_ELS | LOG_NODE | LOG_DISCOVERY, 4674 "0999 %s Not regd: ndlp x%px rport x%px DID " 4675 "x%x FLG x%x XPT x%x\n", 4676 __func__, ndlp, ndlp->rport, ndlp->nlp_DID, 4677 ndlp->nlp_flag, ndlp->fc4_xpt_flags); 4678 return; 4679 } 4680 4681 ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; 4682 spin_unlock_irqrestore(&ndlp->lock, iflags); 4683 4684 if (ndlp->rport && 4685 ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { 4686 vport->phba->nport_event_cnt++; 4687 lpfc_unregister_remote_port(ndlp); 4688 } else if (!ndlp->rport) { 4689 lpfc_printf_vlog(vport, KERN_INFO, 4690 LOG_ELS | LOG_NODE | LOG_DISCOVERY, 4691 "1999 %s NDLP in devloss x%px DID x%x FLG x%x" 4692 " XPT x%x refcnt %u\n", 4693 __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, 4694 ndlp->fc4_xpt_flags, 4695 kref_read(&ndlp->kref)); 4696 } 4697 4698 if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { 4699 vport->phba->nport_event_cnt++; 4700 if (vport->phba->nvmet_support == 0) { 4701 /* Start devloss if target. */ 4702 if (ndlp->nlp_type & NLP_NVME_TARGET) 4703 lpfc_nvme_unregister_port(vport, ndlp); 4704 } else { 4705 /* NVMET has no upcall. */ 4706 lpfc_nlp_put(ndlp); 4707 } 4708 } 4709 4710 } 4711 4712 /* 4713 * Adisc state change handling 4714 */ 4715 static void 4716 lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4717 int new_state) 4718 { 4719 switch (new_state) { 4720 /* 4721 * Any state to ADISC_ISSUE 4722 * Do nothing, adisc cmpl handling will trigger state changes 4723 */ 4724 case NLP_STE_ADISC_ISSUE: 4725 break; 4726 4727 /* 4728 * ADISC_ISSUE to mapped states 4729 * Trigger a registration with backend, it will be nop if 4730 * already registered 4731 */ 4732 case NLP_STE_UNMAPPED_NODE: 4733 ndlp->nlp_type |= NLP_FC_NODE; 4734 fallthrough; 4735 case NLP_STE_MAPPED_NODE: 4736 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 4737 lpfc_nlp_reg_node(vport, ndlp); 4738 break; 4739 4740 /* 4741 * ADISC_ISSUE to non-mapped states 4742 * We are moving from ADISC_ISSUE to a non-mapped state because 4743 * ADISC failed, we would have skipped unregistering with 4744 * backend, attempt it now 4745 */ 4746 case NLP_STE_NPR_NODE: 4747 ndlp->nlp_flag &= ~NLP_RCV_PLOGI; 4748 fallthrough; 4749 default: 4750 lpfc_nlp_unreg_node(vport, ndlp); 4751 break; 4752 } 4753 4754 } 4755 4756 static void 4757 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4758 int old_state, int new_state) 4759 { 4760 /* Trap ADISC changes here */ 4761 if (new_state == NLP_STE_ADISC_ISSUE || 4762 old_state == NLP_STE_ADISC_ISSUE) { 4763 lpfc_handle_adisc_state(vport, ndlp, new_state); 4764 return; 4765 } 4766 4767 if (new_state == NLP_STE_UNMAPPED_NODE) { 4768 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 4769 ndlp->nlp_type |= NLP_FC_NODE; 4770 } 4771 if (new_state == NLP_STE_MAPPED_NODE) 4772 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 4773 if (new_state == NLP_STE_NPR_NODE) 4774 ndlp->nlp_flag &= ~NLP_RCV_PLOGI; 4775 4776 /* Reg/Unreg for FCP and NVME Transport interface */ 4777 if ((old_state == NLP_STE_MAPPED_NODE || 4778 old_state == NLP_STE_UNMAPPED_NODE)) { 4779 /* For nodes marked for ADISC, Handle unreg in ADISC cmpl 4780 * if linkup. In linkdown do unreg_node 4781 */ 4782 if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || 4783 !lpfc_is_link_up(vport->phba)) 4784 lpfc_nlp_unreg_node(vport, ndlp); 4785 } 4786 4787 if (new_state == NLP_STE_MAPPED_NODE || 4788 new_state == NLP_STE_UNMAPPED_NODE) 4789 lpfc_nlp_reg_node(vport, ndlp); 4790 4791 /* 4792 * If the node just added to Mapped list was an FCP target, 4793 * but the remote port registration failed or assigned a target 4794 * id outside the presentable range - move the node to the 4795 * Unmapped List. 4796 */ 4797 if ((new_state == NLP_STE_MAPPED_NODE) && 4798 (ndlp->nlp_type & NLP_FCP_TARGET) && 4799 (!ndlp->rport || 4800 ndlp->rport->scsi_target_id == -1 || 4801 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { 4802 spin_lock_irq(&ndlp->lock); 4803 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; 4804 spin_unlock_irq(&ndlp->lock); 4805 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 4806 } 4807 } 4808 4809 static char * 4810 lpfc_nlp_state_name(char *buffer, size_t size, int state) 4811 { 4812 static char *states[] = { 4813 [NLP_STE_UNUSED_NODE] = "UNUSED", 4814 [NLP_STE_PLOGI_ISSUE] = "PLOGI", 4815 [NLP_STE_ADISC_ISSUE] = "ADISC", 4816 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", 4817 [NLP_STE_PRLI_ISSUE] = "PRLI", 4818 [NLP_STE_LOGO_ISSUE] = "LOGO", 4819 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", 4820 [NLP_STE_MAPPED_NODE] = "MAPPED", 4821 [NLP_STE_NPR_NODE] = "NPR", 4822 }; 4823 4824 if (state < NLP_STE_MAX_STATE && states[state]) 4825 strscpy(buffer, states[state], size); 4826 else 4827 snprintf(buffer, size, "unknown (%d)", state); 4828 return buffer; 4829 } 4830 4831 void 4832 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4833 int state) 4834 { 4835 int old_state = ndlp->nlp_state; 4836 int node_dropped = ndlp->nlp_flag & NLP_DROPPED; 4837 char name1[16], name2[16]; 4838 unsigned long iflags; 4839 4840 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4841 "0904 NPort state transition x%06x, %s -> %s\n", 4842 ndlp->nlp_DID, 4843 lpfc_nlp_state_name(name1, sizeof(name1), old_state), 4844 lpfc_nlp_state_name(name2, sizeof(name2), state)); 4845 4846 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 4847 "node statechg did:x%x old:%d ste:%d", 4848 ndlp->nlp_DID, old_state, state); 4849 4850 if (node_dropped && old_state == NLP_STE_UNUSED_NODE && 4851 state != NLP_STE_UNUSED_NODE) { 4852 ndlp->nlp_flag &= ~NLP_DROPPED; 4853 lpfc_nlp_get(ndlp); 4854 } 4855 4856 if (old_state == NLP_STE_NPR_NODE && 4857 state != NLP_STE_NPR_NODE) 4858 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4859 if (old_state == NLP_STE_UNMAPPED_NODE) { 4860 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; 4861 ndlp->nlp_type &= ~NLP_FC_NODE; 4862 } 4863 4864 if (list_empty(&ndlp->nlp_listp)) { 4865 spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); 4866 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 4867 spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags); 4868 } else if (old_state) 4869 lpfc_nlp_counters(vport, old_state, -1); 4870 4871 ndlp->nlp_state = state; 4872 lpfc_nlp_counters(vport, state, 1); 4873 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); 4874 } 4875 4876 void 4877 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4878 { 4879 unsigned long iflags; 4880 4881 if (list_empty(&ndlp->nlp_listp)) { 4882 spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); 4883 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 4884 spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags); 4885 } 4886 } 4887 4888 void 4889 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4890 { 4891 unsigned long iflags; 4892 4893 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4894 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 4895 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 4896 spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); 4897 list_del_init(&ndlp->nlp_listp); 4898 spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags); 4899 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 4900 NLP_STE_UNUSED_NODE); 4901 } 4902 4903 /** 4904 * lpfc_initialize_node - Initialize all fields of node object 4905 * @vport: Pointer to Virtual Port object. 4906 * @ndlp: Pointer to FC node object. 4907 * @did: FC_ID of the node. 4908 * 4909 * This function is always called when node object need to be initialized. 4910 * It initializes all the fields of the node object. Although the reference 4911 * to phba from @ndlp can be obtained indirectly through it's reference to 4912 * @vport, a direct reference to phba is taken here by @ndlp. This is due 4913 * to the life-span of the @ndlp might go beyond the existence of @vport as 4914 * the final release of ndlp is determined by its reference count. And, the 4915 * operation on @ndlp needs the reference to phba. 4916 **/ 4917 static inline void 4918 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4919 uint32_t did) 4920 { 4921 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 4922 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 4923 timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); 4924 INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp); 4925 4926 ndlp->nlp_DID = did; 4927 ndlp->vport = vport; 4928 ndlp->phba = vport->phba; 4929 ndlp->nlp_sid = NLP_NO_SID; 4930 ndlp->nlp_fc4_type = NLP_FC4_NONE; 4931 kref_init(&ndlp->kref); 4932 atomic_set(&ndlp->cmd_pending, 0); 4933 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 4934 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 4935 } 4936 4937 void 4938 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4939 { 4940 /* 4941 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should 4942 * be used when lpfc wants to remove the "last" lpfc_nlp_put() to 4943 * release the ndlp from the vport when conditions are correct. 4944 */ 4945 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 4946 return; 4947 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 4948 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 4949 lpfc_cleanup_vports_rrqs(vport, ndlp); 4950 lpfc_unreg_rpi(vport, ndlp); 4951 } 4952 4953 /* NLP_DROPPED means another thread already removed the initial 4954 * reference from lpfc_nlp_init. If set, don't drop it again and 4955 * introduce an imbalance. 4956 */ 4957 spin_lock_irq(&ndlp->lock); 4958 if (!(ndlp->nlp_flag & NLP_DROPPED)) { 4959 ndlp->nlp_flag |= NLP_DROPPED; 4960 spin_unlock_irq(&ndlp->lock); 4961 lpfc_nlp_put(ndlp); 4962 return; 4963 } 4964 spin_unlock_irq(&ndlp->lock); 4965 } 4966 4967 /* 4968 * Start / ReStart rescue timer for Discovery / RSCN handling 4969 */ 4970 void 4971 lpfc_set_disctmo(struct lpfc_vport *vport) 4972 { 4973 struct lpfc_hba *phba = vport->phba; 4974 uint32_t tmo; 4975 4976 if (vport->port_state == LPFC_LOCAL_CFG_LINK) { 4977 /* For FAN, timeout should be greater than edtov */ 4978 tmo = (((phba->fc_edtov + 999) / 1000) + 1); 4979 } else { 4980 /* Normal discovery timeout should be > than ELS/CT timeout 4981 * FC spec states we need 3 * ratov for CT requests 4982 */ 4983 tmo = ((phba->fc_ratov * 3) + 3); 4984 } 4985 4986 4987 if (!timer_pending(&vport->fc_disctmo)) { 4988 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4989 "set disc timer: tmo:x%x state:x%x flg:x%x", 4990 tmo, vport->port_state, vport->fc_flag); 4991 } 4992 4993 mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); 4994 set_bit(FC_DISC_TMO, &vport->fc_flag); 4995 4996 /* Start Discovery Timer state <hba_state> */ 4997 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4998 "0247 Start Discovery Timer state x%x " 4999 "Data: x%x x%lx x%x x%x\n", 5000 vport->port_state, tmo, 5001 (unsigned long)&vport->fc_disctmo, 5002 atomic_read(&vport->fc_plogi_cnt), 5003 atomic_read(&vport->fc_adisc_cnt)); 5004 5005 return; 5006 } 5007 5008 /* 5009 * Cancel rescue timer for Discovery / RSCN handling 5010 */ 5011 int 5012 lpfc_can_disctmo(struct lpfc_vport *vport) 5013 { 5014 unsigned long iflags; 5015 5016 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5017 "can disc timer: state:x%x rtry:x%x flg:x%x", 5018 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 5019 5020 /* Turn off discovery timer if its running */ 5021 if (test_bit(FC_DISC_TMO, &vport->fc_flag) || 5022 timer_pending(&vport->fc_disctmo)) { 5023 clear_bit(FC_DISC_TMO, &vport->fc_flag); 5024 del_timer_sync(&vport->fc_disctmo); 5025 spin_lock_irqsave(&vport->work_port_lock, iflags); 5026 vport->work_port_events &= ~WORKER_DISC_TMO; 5027 spin_unlock_irqrestore(&vport->work_port_lock, iflags); 5028 } 5029 5030 /* Cancel Discovery Timer state <hba_state> */ 5031 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5032 "0248 Cancel Discovery Timer state x%x " 5033 "Data: x%lx x%x x%x\n", 5034 vport->port_state, vport->fc_flag, 5035 atomic_read(&vport->fc_plogi_cnt), 5036 atomic_read(&vport->fc_adisc_cnt)); 5037 return 0; 5038 } 5039 5040 /* 5041 * Check specified ring for outstanding IOCB on the SLI queue 5042 * Return true if iocb matches the specified nport 5043 */ 5044 int 5045 lpfc_check_sli_ndlp(struct lpfc_hba *phba, 5046 struct lpfc_sli_ring *pring, 5047 struct lpfc_iocbq *iocb, 5048 struct lpfc_nodelist *ndlp) 5049 { 5050 struct lpfc_vport *vport = ndlp->vport; 5051 u8 ulp_command; 5052 u16 ulp_context; 5053 u32 remote_id; 5054 5055 if (iocb->vport != vport) 5056 return 0; 5057 5058 ulp_command = get_job_cmnd(phba, iocb); 5059 ulp_context = get_job_ulpcontext(phba, iocb); 5060 remote_id = get_job_els_rsp64_did(phba, iocb); 5061 5062 if (pring->ringno == LPFC_ELS_RING) { 5063 switch (ulp_command) { 5064 case CMD_GEN_REQUEST64_CR: 5065 if (iocb->ndlp == ndlp) 5066 return 1; 5067 fallthrough; 5068 case CMD_ELS_REQUEST64_CR: 5069 if (remote_id == ndlp->nlp_DID) 5070 return 1; 5071 fallthrough; 5072 case CMD_XMIT_ELS_RSP64_CX: 5073 if (iocb->ndlp == ndlp) 5074 return 1; 5075 } 5076 } else if (pring->ringno == LPFC_FCP_RING) { 5077 /* Skip match check if waiting to relogin to FCP target */ 5078 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 5079 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 5080 return 0; 5081 } 5082 if (ulp_context == ndlp->nlp_rpi) 5083 return 1; 5084 } 5085 return 0; 5086 } 5087 5088 static void 5089 __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba, 5090 struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring, 5091 struct list_head *dequeue_list) 5092 { 5093 struct lpfc_iocbq *iocb, *next_iocb; 5094 5095 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 5096 /* Check to see if iocb matches the nport */ 5097 if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) 5098 /* match, dequeue */ 5099 list_move_tail(&iocb->list, dequeue_list); 5100 } 5101 } 5102 5103 static void 5104 lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba, 5105 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) 5106 { 5107 struct lpfc_sli *psli = &phba->sli; 5108 uint32_t i; 5109 5110 spin_lock_irq(&phba->hbalock); 5111 for (i = 0; i < psli->num_rings; i++) 5112 __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i], 5113 dequeue_list); 5114 spin_unlock_irq(&phba->hbalock); 5115 } 5116 5117 static void 5118 lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba, 5119 struct lpfc_nodelist *ndlp, struct list_head *dequeue_list) 5120 { 5121 struct lpfc_sli_ring *pring; 5122 struct lpfc_queue *qp = NULL; 5123 5124 spin_lock_irq(&phba->hbalock); 5125 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 5126 pring = qp->pring; 5127 if (!pring) 5128 continue; 5129 spin_lock(&pring->ring_lock); 5130 __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list); 5131 spin_unlock(&pring->ring_lock); 5132 } 5133 spin_unlock_irq(&phba->hbalock); 5134 } 5135 5136 /* 5137 * Free resources / clean up outstanding I/Os 5138 * associated with nlp_rpi in the LPFC_NODELIST entry. 5139 */ 5140 static int 5141 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 5142 { 5143 LIST_HEAD(completions); 5144 5145 lpfc_fabric_abort_nport(ndlp); 5146 5147 /* 5148 * Everything that matches on txcmplq will be returned 5149 * by firmware with a no rpi error. 5150 */ 5151 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 5152 if (phba->sli_rev != LPFC_SLI_REV4) 5153 lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); 5154 else 5155 lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions); 5156 } 5157 5158 /* Cancel all the IOCBs from the completions list */ 5159 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 5160 IOERR_SLI_ABORTED); 5161 5162 return 0; 5163 } 5164 5165 /** 5166 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO 5167 * @phba: Pointer to HBA context object. 5168 * @pmb: Pointer to mailbox object. 5169 * 5170 * This function will issue an ELS LOGO command after completing 5171 * the UNREG_RPI. 5172 **/ 5173 static void 5174 lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5175 { 5176 struct lpfc_vport *vport = pmb->vport; 5177 struct lpfc_nodelist *ndlp; 5178 5179 ndlp = pmb->ctx_ndlp; 5180 if (!ndlp) 5181 return; 5182 lpfc_issue_els_logo(vport, ndlp, 0); 5183 5184 /* Check to see if there are any deferred events to process */ 5185 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 5186 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 5187 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5188 "1434 UNREG cmpl deferred logo x%x " 5189 "on NPort x%x Data: x%x x%px\n", 5190 ndlp->nlp_rpi, ndlp->nlp_DID, 5191 ndlp->nlp_defer_did, ndlp); 5192 5193 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5194 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 5195 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 5196 } else { 5197 /* NLP_RELEASE_RPI is only set for SLI4 ports. */ 5198 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 5199 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); 5200 spin_lock_irq(&ndlp->lock); 5201 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 5202 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 5203 spin_unlock_irq(&ndlp->lock); 5204 } 5205 spin_lock_irq(&ndlp->lock); 5206 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5207 spin_unlock_irq(&ndlp->lock); 5208 } 5209 5210 /* The node has an outstanding reference for the unreg. Now 5211 * that the LOGO action and cleanup are finished, release 5212 * resources. 5213 */ 5214 lpfc_nlp_put(ndlp); 5215 mempool_free(pmb, phba->mbox_mem_pool); 5216 } 5217 5218 /* 5219 * Sets the mailbox completion handler to be used for the 5220 * unreg_rpi command. The handler varies based on the state of 5221 * the port and what will be happening to the rpi next. 5222 */ 5223 static void 5224 lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, 5225 struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) 5226 { 5227 unsigned long iflags; 5228 5229 /* Driver always gets a reference on the mailbox job 5230 * in support of async jobs. 5231 */ 5232 mbox->ctx_ndlp = lpfc_nlp_get(ndlp); 5233 if (!mbox->ctx_ndlp) 5234 return; 5235 5236 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { 5237 mbox->mbox_cmpl = lpfc_nlp_logo_unreg; 5238 5239 } else if (phba->sli_rev == LPFC_SLI_REV4 && 5240 !test_bit(FC_UNLOADING, &vport->load_flag) && 5241 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= 5242 LPFC_SLI_INTF_IF_TYPE_2) && 5243 (kref_read(&ndlp->kref) > 0)) { 5244 mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; 5245 } else { 5246 if (test_bit(FC_UNLOADING, &vport->load_flag)) { 5247 if (phba->sli_rev == LPFC_SLI_REV4) { 5248 spin_lock_irqsave(&ndlp->lock, iflags); 5249 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5250 spin_unlock_irqrestore(&ndlp->lock, iflags); 5251 } 5252 } 5253 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5254 } 5255 } 5256 5257 /* 5258 * Free rpi associated with LPFC_NODELIST entry. 5259 * This routine is called from lpfc_freenode(), when we are removing 5260 * a LPFC_NODELIST entry. It is also called if the driver initiates a 5261 * LOGO that completes successfully, and we are waiting to PLOGI back 5262 * to the remote NPort. In addition, it is called after we receive 5263 * and unsolicated ELS cmd, send back a rsp, the rsp completes and 5264 * we are waiting to PLOGI back to the remote NPort. 5265 */ 5266 int 5267 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5268 { 5269 struct lpfc_hba *phba = vport->phba; 5270 LPFC_MBOXQ_t *mbox; 5271 int rc, acc_plogi = 1; 5272 uint16_t rpi; 5273 5274 if (ndlp->nlp_flag & NLP_RPI_REGISTERED || 5275 ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { 5276 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 5277 lpfc_printf_vlog(vport, KERN_INFO, 5278 LOG_NODE | LOG_DISCOVERY, 5279 "3366 RPI x%x needs to be " 5280 "unregistered nlp_flag x%x " 5281 "did x%x\n", 5282 ndlp->nlp_rpi, ndlp->nlp_flag, 5283 ndlp->nlp_DID); 5284 5285 /* If there is already an UNREG in progress for this ndlp, 5286 * no need to queue up another one. 5287 */ 5288 if (ndlp->nlp_flag & NLP_UNREG_INP) { 5289 lpfc_printf_vlog(vport, KERN_INFO, 5290 LOG_NODE | LOG_DISCOVERY, 5291 "1436 unreg_rpi SKIP UNREG x%x on " 5292 "NPort x%x deferred x%x flg x%x " 5293 "Data: x%px\n", 5294 ndlp->nlp_rpi, ndlp->nlp_DID, 5295 ndlp->nlp_defer_did, 5296 ndlp->nlp_flag, ndlp); 5297 goto out; 5298 } 5299 5300 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5301 if (mbox) { 5302 /* SLI4 ports require the physical rpi value. */ 5303 rpi = ndlp->nlp_rpi; 5304 if (phba->sli_rev == LPFC_SLI_REV4) 5305 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 5306 5307 lpfc_unreg_login(phba, vport->vpi, rpi, mbox); 5308 mbox->vport = vport; 5309 lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); 5310 if (!mbox->ctx_ndlp) { 5311 mempool_free(mbox, phba->mbox_mem_pool); 5312 return 1; 5313 } 5314 5315 if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) 5316 /* 5317 * accept PLOGIs after unreg_rpi_cmpl 5318 */ 5319 acc_plogi = 0; 5320 if (((ndlp->nlp_DID & Fabric_DID_MASK) != 5321 Fabric_DID_MASK) && 5322 (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) 5323 ndlp->nlp_flag |= NLP_UNREG_INP; 5324 5325 lpfc_printf_vlog(vport, KERN_INFO, 5326 LOG_NODE | LOG_DISCOVERY, 5327 "1433 unreg_rpi UNREG x%x on " 5328 "NPort x%x deferred flg x%x " 5329 "Data:x%px\n", 5330 ndlp->nlp_rpi, ndlp->nlp_DID, 5331 ndlp->nlp_flag, ndlp); 5332 5333 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5334 if (rc == MBX_NOT_FINISHED) { 5335 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5336 mempool_free(mbox, phba->mbox_mem_pool); 5337 acc_plogi = 1; 5338 lpfc_nlp_put(ndlp); 5339 } 5340 } else { 5341 lpfc_printf_vlog(vport, KERN_INFO, 5342 LOG_NODE | LOG_DISCOVERY, 5343 "1444 Failed to allocate mempool " 5344 "unreg_rpi UNREG x%x, " 5345 "DID x%x, flag x%x, " 5346 "ndlp x%px\n", 5347 ndlp->nlp_rpi, ndlp->nlp_DID, 5348 ndlp->nlp_flag, ndlp); 5349 5350 /* Because mempool_alloc failed, we 5351 * will issue a LOGO here and keep the rpi alive if 5352 * not unloading. 5353 */ 5354 if (!test_bit(FC_UNLOADING, &vport->load_flag)) { 5355 ndlp->nlp_flag &= ~NLP_UNREG_INP; 5356 lpfc_issue_els_logo(vport, ndlp, 0); 5357 ndlp->nlp_prev_state = ndlp->nlp_state; 5358 lpfc_nlp_set_state(vport, ndlp, 5359 NLP_STE_NPR_NODE); 5360 } 5361 5362 return 1; 5363 } 5364 lpfc_no_rpi(phba, ndlp); 5365 out: 5366 if (phba->sli_rev != LPFC_SLI_REV4) 5367 ndlp->nlp_rpi = 0; 5368 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 5369 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 5370 if (acc_plogi) 5371 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5372 return 1; 5373 } 5374 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 5375 return 0; 5376 } 5377 5378 /** 5379 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. 5380 * @phba: pointer to lpfc hba data structure. 5381 * 5382 * This routine is invoked to unregister all the currently registered RPIs 5383 * to the HBA. 5384 **/ 5385 void 5386 lpfc_unreg_hba_rpis(struct lpfc_hba *phba) 5387 { 5388 struct lpfc_vport **vports; 5389 struct lpfc_nodelist *ndlp; 5390 int i; 5391 unsigned long iflags; 5392 5393 vports = lpfc_create_vport_work_array(phba); 5394 if (!vports) { 5395 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 5396 "2884 Vport array allocation failed \n"); 5397 return; 5398 } 5399 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5400 spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); 5401 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 5402 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 5403 /* The mempool_alloc might sleep */ 5404 spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, 5405 iflags); 5406 lpfc_unreg_rpi(vports[i], ndlp); 5407 spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, 5408 iflags); 5409 } 5410 } 5411 spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); 5412 } 5413 lpfc_destroy_vport_work_array(phba, vports); 5414 } 5415 5416 void 5417 lpfc_unreg_all_rpis(struct lpfc_vport *vport) 5418 { 5419 struct lpfc_hba *phba = vport->phba; 5420 LPFC_MBOXQ_t *mbox; 5421 int rc; 5422 5423 if (phba->sli_rev == LPFC_SLI_REV4) { 5424 lpfc_sli4_unreg_all_rpis(vport); 5425 return; 5426 } 5427 5428 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5429 if (mbox) { 5430 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, 5431 mbox); 5432 mbox->vport = vport; 5433 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5434 mbox->ctx_ndlp = NULL; 5435 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 5436 if (rc != MBX_TIMEOUT) 5437 mempool_free(mbox, phba->mbox_mem_pool); 5438 5439 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 5440 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5441 "1836 Could not issue " 5442 "unreg_login(all_rpis) status %d\n", 5443 rc); 5444 } 5445 } 5446 5447 void 5448 lpfc_unreg_default_rpis(struct lpfc_vport *vport) 5449 { 5450 struct lpfc_hba *phba = vport->phba; 5451 LPFC_MBOXQ_t *mbox; 5452 int rc; 5453 5454 /* Unreg DID is an SLI3 operation. */ 5455 if (phba->sli_rev > LPFC_SLI_REV3) 5456 return; 5457 5458 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5459 if (mbox) { 5460 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, 5461 mbox); 5462 mbox->vport = vport; 5463 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5464 mbox->ctx_ndlp = NULL; 5465 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 5466 if (rc != MBX_TIMEOUT) 5467 mempool_free(mbox, phba->mbox_mem_pool); 5468 5469 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 5470 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, 5471 "1815 Could not issue " 5472 "unreg_did (default rpis) status %d\n", 5473 rc); 5474 } 5475 } 5476 5477 /* 5478 * Free resources associated with LPFC_NODELIST entry 5479 * so it can be freed. 5480 */ 5481 static int 5482 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5483 { 5484 struct lpfc_hba *phba = vport->phba; 5485 LPFC_MBOXQ_t *mb, *nextmb; 5486 5487 /* Cleanup node for NPort <nlp_DID> */ 5488 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5489 "0900 Cleanup node for NPort x%x " 5490 "Data: x%x x%x x%x\n", 5491 ndlp->nlp_DID, ndlp->nlp_flag, 5492 ndlp->nlp_state, ndlp->nlp_rpi); 5493 lpfc_dequeue_node(vport, ndlp); 5494 5495 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ 5496 5497 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 5498 if ((mb = phba->sli.mbox_active)) { 5499 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 5500 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 5501 (ndlp == mb->ctx_ndlp)) { 5502 mb->ctx_ndlp = NULL; 5503 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5504 } 5505 } 5506 5507 spin_lock_irq(&phba->hbalock); 5508 /* Cleanup REG_LOGIN completions which are not yet processed */ 5509 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 5510 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || 5511 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || 5512 (ndlp != mb->ctx_ndlp)) 5513 continue; 5514 5515 mb->ctx_ndlp = NULL; 5516 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5517 } 5518 5519 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 5520 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 5521 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 5522 (ndlp == mb->ctx_ndlp)) { 5523 list_del(&mb->list); 5524 lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_LOCKED); 5525 5526 /* Don't invoke lpfc_nlp_put. The driver is in 5527 * lpfc_nlp_release context. 5528 */ 5529 } 5530 } 5531 spin_unlock_irq(&phba->hbalock); 5532 5533 lpfc_els_abort(phba, ndlp); 5534 5535 spin_lock_irq(&ndlp->lock); 5536 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 5537 spin_unlock_irq(&ndlp->lock); 5538 5539 ndlp->nlp_last_elscmd = 0; 5540 del_timer_sync(&ndlp->nlp_delayfunc); 5541 5542 list_del_init(&ndlp->els_retry_evt.evt_listp); 5543 list_del_init(&ndlp->dev_loss_evt.evt_listp); 5544 list_del_init(&ndlp->recovery_evt.evt_listp); 5545 lpfc_cleanup_vports_rrqs(vport, ndlp); 5546 5547 if (phba->sli_rev == LPFC_SLI_REV4) 5548 ndlp->nlp_flag |= NLP_RELEASE_RPI; 5549 5550 return 0; 5551 } 5552 5553 static int 5554 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5555 uint32_t did) 5556 { 5557 D_ID mydid, ndlpdid, matchdid; 5558 5559 if (did == Bcast_DID) 5560 return 0; 5561 5562 /* First check for Direct match */ 5563 if (ndlp->nlp_DID == did) 5564 return 1; 5565 5566 /* Next check for area/domain identically equals 0 match */ 5567 mydid.un.word = vport->fc_myDID; 5568 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { 5569 return 0; 5570 } 5571 5572 matchdid.un.word = did; 5573 ndlpdid.un.word = ndlp->nlp_DID; 5574 if (matchdid.un.b.id == ndlpdid.un.b.id) { 5575 if ((mydid.un.b.domain == matchdid.un.b.domain) && 5576 (mydid.un.b.area == matchdid.un.b.area)) { 5577 /* This code is supposed to match the ID 5578 * for a private loop device that is 5579 * connect to fl_port. But we need to 5580 * check that the port did not just go 5581 * from pt2pt to fabric or we could end 5582 * up matching ndlp->nlp_DID 000001 to 5583 * fabric DID 0x20101 5584 */ 5585 if ((ndlpdid.un.b.domain == 0) && 5586 (ndlpdid.un.b.area == 0)) { 5587 if (ndlpdid.un.b.id && 5588 vport->phba->fc_topology == 5589 LPFC_TOPOLOGY_LOOP) 5590 return 1; 5591 } 5592 return 0; 5593 } 5594 5595 matchdid.un.word = ndlp->nlp_DID; 5596 if ((mydid.un.b.domain == ndlpdid.un.b.domain) && 5597 (mydid.un.b.area == ndlpdid.un.b.area)) { 5598 if ((matchdid.un.b.domain == 0) && 5599 (matchdid.un.b.area == 0)) { 5600 if (matchdid.un.b.id) 5601 return 1; 5602 } 5603 } 5604 } 5605 return 0; 5606 } 5607 5608 /* Search for a nodelist entry */ 5609 static struct lpfc_nodelist * 5610 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 5611 { 5612 struct lpfc_nodelist *ndlp; 5613 uint32_t data1; 5614 5615 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5616 if (lpfc_matchdid(vport, ndlp, did)) { 5617 data1 = (((uint32_t)ndlp->nlp_state << 24) | 5618 ((uint32_t)ndlp->nlp_xri << 16) | 5619 ((uint32_t)ndlp->nlp_type << 8) 5620 ); 5621 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, 5622 "0929 FIND node DID " 5623 "Data: x%px x%x x%x x%x x%x x%px\n", 5624 ndlp, ndlp->nlp_DID, 5625 ndlp->nlp_flag, data1, ndlp->nlp_rpi, 5626 ndlp->active_rrqs_xri_bitmap); 5627 return ndlp; 5628 } 5629 } 5630 5631 /* FIND node did <did> NOT FOUND */ 5632 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5633 "0932 FIND node did x%x NOT FOUND.\n", did); 5634 return NULL; 5635 } 5636 5637 struct lpfc_nodelist * 5638 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 5639 { 5640 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5641 struct lpfc_nodelist *ndlp; 5642 unsigned long iflags; 5643 5644 spin_lock_irqsave(shost->host_lock, iflags); 5645 ndlp = __lpfc_findnode_did(vport, did); 5646 spin_unlock_irqrestore(shost->host_lock, iflags); 5647 return ndlp; 5648 } 5649 5650 struct lpfc_nodelist * 5651 lpfc_findnode_mapped(struct lpfc_vport *vport) 5652 { 5653 struct lpfc_nodelist *ndlp; 5654 uint32_t data1; 5655 unsigned long iflags; 5656 5657 spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); 5658 5659 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5660 if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE || 5661 ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 5662 data1 = (((uint32_t)ndlp->nlp_state << 24) | 5663 ((uint32_t)ndlp->nlp_xri << 16) | 5664 ((uint32_t)ndlp->nlp_type << 8) | 5665 ((uint32_t)ndlp->nlp_rpi & 0xff)); 5666 spin_unlock_irqrestore(&vport->fc_nodes_list_lock, 5667 iflags); 5668 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, 5669 "2025 FIND node DID MAPPED " 5670 "Data: x%px x%x x%x x%x x%px\n", 5671 ndlp, ndlp->nlp_DID, 5672 ndlp->nlp_flag, data1, 5673 ndlp->active_rrqs_xri_bitmap); 5674 return ndlp; 5675 } 5676 } 5677 spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags); 5678 5679 /* FIND node did <did> NOT FOUND */ 5680 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5681 "2026 FIND mapped did NOT FOUND.\n"); 5682 return NULL; 5683 } 5684 5685 struct lpfc_nodelist * 5686 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) 5687 { 5688 struct lpfc_nodelist *ndlp; 5689 5690 ndlp = lpfc_findnode_did(vport, did); 5691 if (!ndlp) { 5692 if (vport->phba->nvmet_support) 5693 return NULL; 5694 if (test_bit(FC_RSCN_MODE, &vport->fc_flag) && 5695 lpfc_rscn_payload_check(vport, did) == 0) 5696 return NULL; 5697 ndlp = lpfc_nlp_init(vport, did); 5698 if (!ndlp) 5699 return NULL; 5700 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 5701 5702 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5703 "6453 Setup New Node 2B_DISC x%x " 5704 "Data:x%x x%x x%lx\n", 5705 ndlp->nlp_DID, ndlp->nlp_flag, 5706 ndlp->nlp_state, vport->fc_flag); 5707 5708 spin_lock_irq(&ndlp->lock); 5709 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 5710 spin_unlock_irq(&ndlp->lock); 5711 return ndlp; 5712 } 5713 5714 /* The NVME Target does not want to actively manage an rport. 5715 * The goal is to allow the target to reset its state and clear 5716 * pending IO in preparation for the initiator to recover. 5717 */ 5718 if (test_bit(FC_RSCN_MODE, &vport->fc_flag) && 5719 !test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { 5720 if (lpfc_rscn_payload_check(vport, did)) { 5721 5722 /* Since this node is marked for discovery, 5723 * delay timeout is not needed. 5724 */ 5725 lpfc_cancel_retry_delay_tmo(vport, ndlp); 5726 5727 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5728 "6455 Setup RSCN Node 2B_DISC x%x " 5729 "Data:x%x x%x x%lx\n", 5730 ndlp->nlp_DID, ndlp->nlp_flag, 5731 ndlp->nlp_state, vport->fc_flag); 5732 5733 /* NVME Target mode waits until rport is known to be 5734 * impacted by the RSCN before it transitions. No 5735 * active management - just go to NPR provided the 5736 * node had a valid login. 5737 */ 5738 if (vport->phba->nvmet_support) 5739 return ndlp; 5740 5741 if (ndlp->nlp_state > NLP_STE_UNUSED_NODE && 5742 ndlp->nlp_state <= NLP_STE_PRLI_ISSUE) { 5743 lpfc_disc_state_machine(vport, ndlp, NULL, 5744 NLP_EVT_DEVICE_RECOVERY); 5745 } 5746 5747 spin_lock_irq(&ndlp->lock); 5748 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 5749 spin_unlock_irq(&ndlp->lock); 5750 } else { 5751 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5752 "6456 Skip Setup RSCN Node x%x " 5753 "Data:x%x x%x x%lx\n", 5754 ndlp->nlp_DID, ndlp->nlp_flag, 5755 ndlp->nlp_state, vport->fc_flag); 5756 ndlp = NULL; 5757 } 5758 } else { 5759 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5760 "6457 Setup Active Node 2B_DISC x%x " 5761 "Data:x%x x%x x%lx\n", 5762 ndlp->nlp_DID, ndlp->nlp_flag, 5763 ndlp->nlp_state, vport->fc_flag); 5764 5765 /* If the initiator received a PLOGI from this NPort or if the 5766 * initiator is already in the process of discovery on it, 5767 * there's no need to try to discover it again. 5768 */ 5769 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 5770 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5771 (!vport->phba->nvmet_support && 5772 ndlp->nlp_flag & NLP_RCV_PLOGI)) 5773 return NULL; 5774 5775 if (vport->phba->nvmet_support) 5776 return ndlp; 5777 5778 /* Moving to NPR state clears unsolicited flags and 5779 * allows for rediscovery 5780 */ 5781 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 5782 5783 spin_lock_irq(&ndlp->lock); 5784 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 5785 spin_unlock_irq(&ndlp->lock); 5786 } 5787 return ndlp; 5788 } 5789 5790 /* Build a list of nodes to discover based on the loopmap */ 5791 void 5792 lpfc_disc_list_loopmap(struct lpfc_vport *vport) 5793 { 5794 struct lpfc_hba *phba = vport->phba; 5795 int j; 5796 uint32_t alpa, index; 5797 5798 if (!lpfc_is_link_up(phba)) 5799 return; 5800 5801 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 5802 return; 5803 5804 /* Check for loop map present or not */ 5805 if (phba->alpa_map[0]) { 5806 for (j = 1; j <= phba->alpa_map[0]; j++) { 5807 alpa = phba->alpa_map[j]; 5808 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) 5809 continue; 5810 lpfc_setup_disc_node(vport, alpa); 5811 } 5812 } else { 5813 /* No alpamap, so try all alpa's */ 5814 for (j = 0; j < FC_MAXLOOP; j++) { 5815 /* If cfg_scan_down is set, start from highest 5816 * ALPA (0xef) to lowest (0x1). 5817 */ 5818 if (vport->cfg_scan_down) 5819 index = j; 5820 else 5821 index = FC_MAXLOOP - j - 1; 5822 alpa = lpfcAlpaArray[index]; 5823 if ((vport->fc_myDID & 0xff) == alpa) 5824 continue; 5825 lpfc_setup_disc_node(vport, alpa); 5826 } 5827 } 5828 return; 5829 } 5830 5831 /* SLI3 only */ 5832 void 5833 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) 5834 { 5835 LPFC_MBOXQ_t *mbox; 5836 struct lpfc_sli *psli = &phba->sli; 5837 struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING]; 5838 struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING]; 5839 int rc; 5840 5841 /* 5842 * if it's not a physical port or if we already send 5843 * clear_la then don't send it. 5844 */ 5845 if ((phba->link_state >= LPFC_CLEAR_LA) || 5846 (vport->port_type != LPFC_PHYSICAL_PORT) || 5847 (phba->sli_rev == LPFC_SLI_REV4)) 5848 return; 5849 5850 /* Link up discovery */ 5851 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { 5852 phba->link_state = LPFC_CLEAR_LA; 5853 lpfc_clear_la(phba, mbox); 5854 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 5855 mbox->vport = vport; 5856 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5857 if (rc == MBX_NOT_FINISHED) { 5858 mempool_free(mbox, phba->mbox_mem_pool); 5859 lpfc_disc_flush_list(vport); 5860 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 5861 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 5862 phba->link_state = LPFC_HBA_ERROR; 5863 } 5864 } 5865 } 5866 5867 /* Reg_vpi to tell firmware to resume normal operations */ 5868 void 5869 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) 5870 { 5871 LPFC_MBOXQ_t *regvpimbox; 5872 5873 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5874 if (regvpimbox) { 5875 lpfc_reg_vpi(vport, regvpimbox); 5876 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 5877 regvpimbox->vport = vport; 5878 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 5879 == MBX_NOT_FINISHED) { 5880 mempool_free(regvpimbox, phba->mbox_mem_pool); 5881 } 5882 } 5883 } 5884 5885 /* Start Link up / RSCN discovery on NPR nodes */ 5886 void 5887 lpfc_disc_start(struct lpfc_vport *vport) 5888 { 5889 struct lpfc_hba *phba = vport->phba; 5890 uint32_t num_sent; 5891 uint32_t clear_la_pending; 5892 5893 if (!lpfc_is_link_up(phba)) { 5894 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 5895 "3315 Link is not up %x\n", 5896 phba->link_state); 5897 return; 5898 } 5899 5900 if (phba->link_state == LPFC_CLEAR_LA) 5901 clear_la_pending = 1; 5902 else 5903 clear_la_pending = 0; 5904 5905 if (vport->port_state < LPFC_VPORT_READY) 5906 vport->port_state = LPFC_DISC_AUTH; 5907 5908 lpfc_set_disctmo(vport); 5909 5910 vport->fc_prevDID = vport->fc_myDID; 5911 vport->num_disc_nodes = 0; 5912 5913 /* Start Discovery state <hba_state> */ 5914 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 5915 "0202 Start Discovery port state x%x " 5916 "flg x%lx Data: x%x x%x x%x\n", 5917 vport->port_state, vport->fc_flag, 5918 atomic_read(&vport->fc_plogi_cnt), 5919 atomic_read(&vport->fc_adisc_cnt), 5920 atomic_read(&vport->fc_npr_cnt)); 5921 5922 /* First do ADISCs - if any */ 5923 num_sent = lpfc_els_disc_adisc(vport); 5924 5925 if (num_sent) 5926 return; 5927 5928 /* Register the VPI for SLI3, NPIV only. */ 5929 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 5930 !test_bit(FC_PT2PT, &vport->fc_flag) && 5931 !test_bit(FC_RSCN_MODE, &vport->fc_flag) && 5932 (phba->sli_rev < LPFC_SLI_REV4)) { 5933 lpfc_issue_clear_la(phba, vport); 5934 lpfc_issue_reg_vpi(phba, vport); 5935 return; 5936 } 5937 5938 /* 5939 * For SLI2, we need to set port_state to READY and continue 5940 * discovery. 5941 */ 5942 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { 5943 /* If we get here, there is nothing to ADISC */ 5944 lpfc_issue_clear_la(phba, vport); 5945 5946 if (!test_bit(FC_ABORT_DISCOVERY, &vport->fc_flag)) { 5947 vport->num_disc_nodes = 0; 5948 /* go thru NPR nodes and issue ELS PLOGIs */ 5949 if (atomic_read(&vport->fc_npr_cnt)) 5950 lpfc_els_disc_plogi(vport); 5951 5952 if (!vport->num_disc_nodes) { 5953 clear_bit(FC_NDISC_ACTIVE, &vport->fc_flag); 5954 lpfc_can_disctmo(vport); 5955 } 5956 } 5957 vport->port_state = LPFC_VPORT_READY; 5958 } else { 5959 /* Next do PLOGIs - if any */ 5960 num_sent = lpfc_els_disc_plogi(vport); 5961 5962 if (num_sent) 5963 return; 5964 5965 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { 5966 /* Check to see if more RSCNs came in while we 5967 * were processing this one. 5968 */ 5969 if (vport->fc_rscn_id_cnt == 0 && 5970 !test_bit(FC_RSCN_DISCOVERY, &vport->fc_flag)) { 5971 clear_bit(FC_RSCN_MODE, &vport->fc_flag); 5972 lpfc_can_disctmo(vport); 5973 } else { 5974 lpfc_els_handle_rscn(vport); 5975 } 5976 } 5977 } 5978 return; 5979 } 5980 5981 /* 5982 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS 5983 * ring the match the sppecified nodelist. 5984 */ 5985 static void 5986 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 5987 { 5988 LIST_HEAD(completions); 5989 struct lpfc_iocbq *iocb, *next_iocb; 5990 struct lpfc_sli_ring *pring; 5991 u32 ulp_command; 5992 5993 pring = lpfc_phba_elsring(phba); 5994 if (unlikely(!pring)) 5995 return; 5996 5997 /* Error matching iocb on txq or txcmplq 5998 * First check the txq. 5999 */ 6000 spin_lock_irq(&phba->hbalock); 6001 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 6002 if (iocb->ndlp != ndlp) 6003 continue; 6004 6005 ulp_command = get_job_cmnd(phba, iocb); 6006 6007 if (ulp_command == CMD_ELS_REQUEST64_CR || 6008 ulp_command == CMD_XMIT_ELS_RSP64_CX) { 6009 6010 list_move_tail(&iocb->list, &completions); 6011 } 6012 } 6013 6014 /* Next check the txcmplq */ 6015 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 6016 if (iocb->ndlp != ndlp) 6017 continue; 6018 6019 ulp_command = get_job_cmnd(phba, iocb); 6020 6021 if (ulp_command == CMD_ELS_REQUEST64_CR || 6022 ulp_command == CMD_XMIT_ELS_RSP64_CX) { 6023 lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL); 6024 } 6025 } 6026 spin_unlock_irq(&phba->hbalock); 6027 6028 /* Make sure HBA is alive */ 6029 lpfc_issue_hb_tmo(phba); 6030 6031 /* Cancel all the IOCBs from the completions list */ 6032 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 6033 IOERR_SLI_ABORTED); 6034 } 6035 6036 static void 6037 lpfc_disc_flush_list(struct lpfc_vport *vport) 6038 { 6039 struct lpfc_nodelist *ndlp, *next_ndlp; 6040 struct lpfc_hba *phba = vport->phba; 6041 6042 if (atomic_read(&vport->fc_plogi_cnt) || 6043 atomic_read(&vport->fc_adisc_cnt)) { 6044 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 6045 nlp_listp) { 6046 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 6047 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 6048 lpfc_free_tx(phba, ndlp); 6049 } 6050 } 6051 } 6052 } 6053 6054 /* 6055 * lpfc_notify_xport_npr - notifies xport of node disappearance 6056 * @vport: Pointer to Virtual Port object. 6057 * 6058 * Transitions all ndlps to NPR state. When lpfc_nlp_set_state 6059 * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered 6060 * and transport notified that the node is gone. 6061 * Return Code: 6062 * none 6063 */ 6064 static void 6065 lpfc_notify_xport_npr(struct lpfc_vport *vport) 6066 { 6067 struct lpfc_nodelist *ndlp, *next_ndlp; 6068 6069 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 6070 nlp_listp) { 6071 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 6072 } 6073 } 6074 void 6075 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) 6076 { 6077 lpfc_els_flush_rscn(vport); 6078 lpfc_els_flush_cmd(vport); 6079 lpfc_disc_flush_list(vport); 6080 if (pci_channel_offline(vport->phba->pcidev)) 6081 lpfc_notify_xport_npr(vport); 6082 } 6083 6084 /*****************************************************************************/ 6085 /* 6086 * NAME: lpfc_disc_timeout 6087 * 6088 * FUNCTION: Fibre Channel driver discovery timeout routine. 6089 * 6090 * EXECUTION ENVIRONMENT: interrupt only 6091 * 6092 * CALLED FROM: 6093 * Timer function 6094 * 6095 * RETURNS: 6096 * none 6097 */ 6098 /*****************************************************************************/ 6099 void 6100 lpfc_disc_timeout(struct timer_list *t) 6101 { 6102 struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); 6103 struct lpfc_hba *phba = vport->phba; 6104 uint32_t tmo_posted; 6105 unsigned long flags = 0; 6106 6107 if (unlikely(!phba)) 6108 return; 6109 6110 spin_lock_irqsave(&vport->work_port_lock, flags); 6111 tmo_posted = vport->work_port_events & WORKER_DISC_TMO; 6112 if (!tmo_posted) 6113 vport->work_port_events |= WORKER_DISC_TMO; 6114 spin_unlock_irqrestore(&vport->work_port_lock, flags); 6115 6116 if (!tmo_posted) 6117 lpfc_worker_wake_up(phba); 6118 return; 6119 } 6120 6121 static void 6122 lpfc_disc_timeout_handler(struct lpfc_vport *vport) 6123 { 6124 struct lpfc_hba *phba = vport->phba; 6125 struct lpfc_sli *psli = &phba->sli; 6126 struct lpfc_nodelist *ndlp, *next_ndlp; 6127 LPFC_MBOXQ_t *initlinkmbox; 6128 int rc, clrlaerr = 0; 6129 6130 if (!test_and_clear_bit(FC_DISC_TMO, &vport->fc_flag)) 6131 return; 6132 6133 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 6134 "disc timeout: state:x%x rtry:x%x flg:x%x", 6135 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 6136 6137 switch (vport->port_state) { 6138 6139 case LPFC_LOCAL_CFG_LINK: 6140 /* 6141 * port_state is identically LPFC_LOCAL_CFG_LINK while 6142 * waiting for FAN timeout 6143 */ 6144 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, 6145 "0221 FAN timeout\n"); 6146 6147 /* Start discovery by sending FLOGI, clean up old rpis */ 6148 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 6149 nlp_listp) { 6150 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 6151 continue; 6152 if (ndlp->nlp_type & NLP_FABRIC) { 6153 /* Clean up the ndlp on Fabric connections */ 6154 lpfc_drop_node(vport, ndlp); 6155 6156 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 6157 /* Fail outstanding IO now since device 6158 * is marked for PLOGI. 6159 */ 6160 lpfc_unreg_rpi(vport, ndlp); 6161 } 6162 } 6163 if (vport->port_state != LPFC_FLOGI) { 6164 if (phba->sli_rev <= LPFC_SLI_REV3) 6165 lpfc_initial_flogi(vport); 6166 else 6167 lpfc_issue_init_vfi(vport); 6168 return; 6169 } 6170 break; 6171 6172 case LPFC_FDISC: 6173 case LPFC_FLOGI: 6174 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 6175 /* Initial FLOGI timeout */ 6176 lpfc_printf_vlog(vport, KERN_ERR, 6177 LOG_TRACE_EVENT, 6178 "0222 Initial %s timeout\n", 6179 vport->vpi ? "FDISC" : "FLOGI"); 6180 6181 /* Assume no Fabric and go on with discovery. 6182 * Check for outstanding ELS FLOGI to abort. 6183 */ 6184 6185 /* FLOGI failed, so just use loop map to make discovery list */ 6186 lpfc_disc_list_loopmap(vport); 6187 6188 /* Start discovery */ 6189 lpfc_disc_start(vport); 6190 break; 6191 6192 case LPFC_FABRIC_CFG_LINK: 6193 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 6194 NameServer login */ 6195 lpfc_printf_vlog(vport, KERN_ERR, 6196 LOG_TRACE_EVENT, 6197 "0223 Timeout while waiting for " 6198 "NameServer login\n"); 6199 /* Next look for NameServer ndlp */ 6200 ndlp = lpfc_findnode_did(vport, NameServer_DID); 6201 if (ndlp) 6202 lpfc_els_abort(phba, ndlp); 6203 6204 /* ReStart discovery */ 6205 goto restart_disc; 6206 6207 case LPFC_NS_QRY: 6208 /* Check for wait for NameServer Rsp timeout */ 6209 lpfc_printf_vlog(vport, KERN_ERR, 6210 LOG_TRACE_EVENT, 6211 "0224 NameServer Query timeout " 6212 "Data: x%x x%x\n", 6213 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 6214 6215 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 6216 /* Try it one more time */ 6217 vport->fc_ns_retry++; 6218 vport->gidft_inp = 0; 6219 rc = lpfc_issue_gidft(vport); 6220 if (rc == 0) 6221 break; 6222 } 6223 vport->fc_ns_retry = 0; 6224 6225 restart_disc: 6226 /* 6227 * Discovery is over. 6228 * set port_state to PORT_READY if SLI2. 6229 * cmpl_reg_vpi will set port_state to READY for SLI3. 6230 */ 6231 if (phba->sli_rev < LPFC_SLI_REV4) { 6232 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 6233 lpfc_issue_reg_vpi(phba, vport); 6234 else { 6235 lpfc_issue_clear_la(phba, vport); 6236 vport->port_state = LPFC_VPORT_READY; 6237 } 6238 } 6239 6240 /* Setup and issue mailbox INITIALIZE LINK command */ 6241 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6242 if (!initlinkmbox) { 6243 lpfc_printf_vlog(vport, KERN_ERR, 6244 LOG_TRACE_EVENT, 6245 "0206 Device Discovery " 6246 "completion error\n"); 6247 phba->link_state = LPFC_HBA_ERROR; 6248 break; 6249 } 6250 6251 lpfc_linkdown(phba); 6252 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 6253 phba->cfg_link_speed); 6254 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 6255 initlinkmbox->vport = vport; 6256 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 6257 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 6258 lpfc_set_loopback_flag(phba); 6259 if (rc == MBX_NOT_FINISHED) 6260 mempool_free(initlinkmbox, phba->mbox_mem_pool); 6261 6262 break; 6263 6264 case LPFC_DISC_AUTH: 6265 /* Node Authentication timeout */ 6266 lpfc_printf_vlog(vport, KERN_ERR, 6267 LOG_TRACE_EVENT, 6268 "0227 Node Authentication timeout\n"); 6269 lpfc_disc_flush_list(vport); 6270 6271 /* 6272 * set port_state to PORT_READY if SLI2. 6273 * cmpl_reg_vpi will set port_state to READY for SLI3. 6274 */ 6275 if (phba->sli_rev < LPFC_SLI_REV4) { 6276 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 6277 lpfc_issue_reg_vpi(phba, vport); 6278 else { /* NPIV Not enabled */ 6279 lpfc_issue_clear_la(phba, vport); 6280 vport->port_state = LPFC_VPORT_READY; 6281 } 6282 } 6283 break; 6284 6285 case LPFC_VPORT_READY: 6286 if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { 6287 lpfc_printf_vlog(vport, KERN_ERR, 6288 LOG_TRACE_EVENT, 6289 "0231 RSCN timeout Data: x%x " 6290 "x%x x%x x%x\n", 6291 vport->fc_ns_retry, LPFC_MAX_NS_RETRY, 6292 vport->port_state, vport->gidft_inp); 6293 6294 /* Cleanup any outstanding ELS commands */ 6295 lpfc_els_flush_cmd(vport); 6296 6297 lpfc_els_flush_rscn(vport); 6298 lpfc_disc_flush_list(vport); 6299 } 6300 break; 6301 6302 default: 6303 lpfc_printf_vlog(vport, KERN_ERR, 6304 LOG_TRACE_EVENT, 6305 "0273 Unexpected discovery timeout, " 6306 "vport State x%x\n", vport->port_state); 6307 break; 6308 } 6309 6310 switch (phba->link_state) { 6311 case LPFC_CLEAR_LA: 6312 /* CLEAR LA timeout */ 6313 lpfc_printf_vlog(vport, KERN_ERR, 6314 LOG_TRACE_EVENT, 6315 "0228 CLEAR LA timeout\n"); 6316 clrlaerr = 1; 6317 break; 6318 6319 case LPFC_LINK_UP: 6320 lpfc_issue_clear_la(phba, vport); 6321 fallthrough; 6322 case LPFC_LINK_UNKNOWN: 6323 case LPFC_WARM_START: 6324 case LPFC_INIT_START: 6325 case LPFC_INIT_MBX_CMDS: 6326 case LPFC_LINK_DOWN: 6327 case LPFC_HBA_ERROR: 6328 lpfc_printf_vlog(vport, KERN_ERR, 6329 LOG_TRACE_EVENT, 6330 "0230 Unexpected timeout, hba link " 6331 "state x%x\n", phba->link_state); 6332 clrlaerr = 1; 6333 break; 6334 6335 case LPFC_HBA_READY: 6336 break; 6337 } 6338 6339 if (clrlaerr) { 6340 lpfc_disc_flush_list(vport); 6341 if (phba->sli_rev != LPFC_SLI_REV4) { 6342 psli->sli3_ring[(LPFC_EXTRA_RING)].flag &= 6343 ~LPFC_STOP_IOCB_EVENT; 6344 psli->sli3_ring[LPFC_FCP_RING].flag &= 6345 ~LPFC_STOP_IOCB_EVENT; 6346 } 6347 vport->port_state = LPFC_VPORT_READY; 6348 } 6349 return; 6350 } 6351 6352 /* 6353 * This routine handles processing a NameServer REG_LOGIN mailbox 6354 * command upon completion. It is setup in the LPFC_MBOXQ 6355 * as the completion routine when the command is 6356 * handed off to the SLI layer. 6357 */ 6358 void 6359 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6360 { 6361 MAILBOX_t *mb = &pmb->u.mb; 6362 struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; 6363 struct lpfc_vport *vport = pmb->vport; 6364 6365 pmb->ctx_ndlp = NULL; 6366 6367 if (phba->sli_rev < LPFC_SLI_REV4) 6368 ndlp->nlp_rpi = mb->un.varWords[0]; 6369 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 6370 ndlp->nlp_type |= NLP_FABRIC; 6371 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 6372 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, 6373 "0004 rpi:%x DID:%x flg:%x %d x%px\n", 6374 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, 6375 kref_read(&ndlp->kref), 6376 ndlp); 6377 /* 6378 * Start issuing Fabric-Device Management Interface (FDMI) command to 6379 * 0xfffffa (FDMI well known port). 6380 * DHBA -> DPRT -> RHBA -> RPA (physical port) 6381 * DPRT -> RPRT (vports) 6382 */ 6383 if (vport->port_type == LPFC_PHYSICAL_PORT) { 6384 phba->link_flag &= ~LS_CT_VEN_RPA; /* For extra Vendor RPA */ 6385 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); 6386 } else { 6387 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); 6388 } 6389 6390 6391 /* decrement the node reference count held for this callback 6392 * function. 6393 */ 6394 lpfc_nlp_put(ndlp); 6395 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED); 6396 return; 6397 } 6398 6399 static int 6400 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) 6401 { 6402 uint16_t *rpi = param; 6403 6404 return ndlp->nlp_rpi == *rpi; 6405 } 6406 6407 static int 6408 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) 6409 { 6410 return memcmp(&ndlp->nlp_portname, param, 6411 sizeof(ndlp->nlp_portname)) == 0; 6412 } 6413 6414 static struct lpfc_nodelist * 6415 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 6416 { 6417 struct lpfc_nodelist *ndlp; 6418 6419 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 6420 if (filter(ndlp, param)) { 6421 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, 6422 "3185 FIND node filter %ps DID " 6423 "ndlp x%px did x%x flg x%x st x%x " 6424 "xri x%x type x%x rpi x%x\n", 6425 filter, ndlp, ndlp->nlp_DID, 6426 ndlp->nlp_flag, ndlp->nlp_state, 6427 ndlp->nlp_xri, ndlp->nlp_type, 6428 ndlp->nlp_rpi); 6429 return ndlp; 6430 } 6431 } 6432 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 6433 "3186 FIND node filter %ps NOT FOUND.\n", filter); 6434 return NULL; 6435 } 6436 6437 /* 6438 * This routine looks up the ndlp lists for the given RPI. If rpi found it 6439 * returns the node list element pointer else return NULL. 6440 */ 6441 struct lpfc_nodelist * 6442 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 6443 { 6444 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); 6445 } 6446 6447 /* 6448 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 6449 * returns the node element list pointer else return NULL. 6450 */ 6451 struct lpfc_nodelist * 6452 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) 6453 { 6454 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6455 struct lpfc_nodelist *ndlp; 6456 6457 spin_lock_irq(shost->host_lock); 6458 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); 6459 spin_unlock_irq(shost->host_lock); 6460 return ndlp; 6461 } 6462 6463 /* 6464 * This routine looks up the ndlp lists for the given RPI. If the rpi 6465 * is found, the routine returns the node element list pointer else 6466 * return NULL. 6467 */ 6468 struct lpfc_nodelist * 6469 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 6470 { 6471 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 6472 struct lpfc_nodelist *ndlp; 6473 unsigned long flags; 6474 6475 spin_lock_irqsave(shost->host_lock, flags); 6476 ndlp = __lpfc_findnode_rpi(vport, rpi); 6477 spin_unlock_irqrestore(shost->host_lock, flags); 6478 return ndlp; 6479 } 6480 6481 /** 6482 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier 6483 * @phba: pointer to lpfc hba data structure. 6484 * @vpi: the physical host virtual N_Port identifier. 6485 * 6486 * This routine finds a vport on a HBA (referred by @phba) through a 6487 * @vpi. The function walks the HBA's vport list and returns the address 6488 * of the vport with the matching @vpi. 6489 * 6490 * Return code 6491 * NULL - No vport with the matching @vpi found 6492 * Otherwise - Address to the vport with the matching @vpi. 6493 **/ 6494 struct lpfc_vport * 6495 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 6496 { 6497 struct lpfc_vport *vport; 6498 unsigned long flags; 6499 int i = 0; 6500 6501 /* The physical ports are always vpi 0 - translate is unnecessary. */ 6502 if (vpi > 0) { 6503 /* 6504 * Translate the physical vpi to the logical vpi. The 6505 * vport stores the logical vpi. 6506 */ 6507 for (i = 0; i <= phba->max_vpi; i++) { 6508 if (vpi == phba->vpi_ids[i]) 6509 break; 6510 } 6511 6512 if (i > phba->max_vpi) { 6513 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6514 "2936 Could not find Vport mapped " 6515 "to vpi %d\n", vpi); 6516 return NULL; 6517 } 6518 } 6519 6520 spin_lock_irqsave(&phba->port_list_lock, flags); 6521 list_for_each_entry(vport, &phba->port_list, listentry) { 6522 if (vport->vpi == i) { 6523 spin_unlock_irqrestore(&phba->port_list_lock, flags); 6524 return vport; 6525 } 6526 } 6527 spin_unlock_irqrestore(&phba->port_list_lock, flags); 6528 return NULL; 6529 } 6530 6531 struct lpfc_nodelist * 6532 lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) 6533 { 6534 struct lpfc_nodelist *ndlp; 6535 int rpi = LPFC_RPI_ALLOC_ERROR; 6536 6537 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 6538 rpi = lpfc_sli4_alloc_rpi(vport->phba); 6539 if (rpi == LPFC_RPI_ALLOC_ERROR) 6540 return NULL; 6541 } 6542 6543 ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); 6544 if (!ndlp) { 6545 if (vport->phba->sli_rev == LPFC_SLI_REV4) 6546 lpfc_sli4_free_rpi(vport->phba, rpi); 6547 return NULL; 6548 } 6549 6550 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 6551 6552 spin_lock_init(&ndlp->lock); 6553 6554 lpfc_initialize_node(vport, ndlp, did); 6555 INIT_LIST_HEAD(&ndlp->nlp_listp); 6556 if (vport->phba->sli_rev == LPFC_SLI_REV4) { 6557 ndlp->nlp_rpi = rpi; 6558 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, 6559 "0007 Init New ndlp x%px, rpi:x%x DID:%x " 6560 "flg:x%x refcnt:%d\n", 6561 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, 6562 ndlp->nlp_flag, kref_read(&ndlp->kref)); 6563 6564 ndlp->active_rrqs_xri_bitmap = 6565 mempool_alloc(vport->phba->active_rrq_pool, 6566 GFP_KERNEL); 6567 if (ndlp->active_rrqs_xri_bitmap) 6568 memset(ndlp->active_rrqs_xri_bitmap, 0, 6569 ndlp->phba->cfg_rrq_xri_bitmap_sz); 6570 } 6571 6572 6573 6574 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 6575 "node init: did:x%x", 6576 ndlp->nlp_DID, 0, 0); 6577 6578 return ndlp; 6579 } 6580 6581 /* This routine releases all resources associated with a specifc NPort's ndlp 6582 * and mempool_free's the nodelist. 6583 */ 6584 static void 6585 lpfc_nlp_release(struct kref *kref) 6586 { 6587 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 6588 kref); 6589 struct lpfc_vport *vport = ndlp->vport; 6590 6591 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6592 "node release: did:x%x flg:x%x type:x%x", 6593 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 6594 6595 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 6596 "0279 %s: ndlp: x%px did %x refcnt:%d rpi:%x\n", 6597 __func__, ndlp, ndlp->nlp_DID, 6598 kref_read(&ndlp->kref), ndlp->nlp_rpi); 6599 6600 /* remove ndlp from action. */ 6601 lpfc_cancel_retry_delay_tmo(vport, ndlp); 6602 lpfc_cleanup_node(vport, ndlp); 6603 6604 /* Not all ELS transactions have registered the RPI with the port. 6605 * In these cases the rpi usage is temporary and the node is 6606 * released when the WQE is completed. Catch this case to free the 6607 * RPI to the pool. Because this node is in the release path, a lock 6608 * is unnecessary. All references are gone and the node has been 6609 * dequeued. 6610 */ 6611 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 6612 if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && 6613 !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { 6614 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); 6615 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 6616 } 6617 } 6618 6619 /* The node is not freed back to memory, it is released to a pool so 6620 * the node fields need to be cleaned up. 6621 */ 6622 ndlp->vport = NULL; 6623 ndlp->nlp_state = NLP_STE_FREED_NODE; 6624 ndlp->nlp_flag = 0; 6625 ndlp->fc4_xpt_flags = 0; 6626 6627 /* free ndlp memory for final ndlp release */ 6628 if (ndlp->phba->sli_rev == LPFC_SLI_REV4) 6629 mempool_free(ndlp->active_rrqs_xri_bitmap, 6630 ndlp->phba->active_rrq_pool); 6631 mempool_free(ndlp, ndlp->phba->nlp_mem_pool); 6632 } 6633 6634 /* This routine bumps the reference count for a ndlp structure to ensure 6635 * that one discovery thread won't free a ndlp while another discovery thread 6636 * is using it. 6637 */ 6638 struct lpfc_nodelist * 6639 lpfc_nlp_get(struct lpfc_nodelist *ndlp) 6640 { 6641 unsigned long flags; 6642 6643 if (ndlp) { 6644 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6645 "node get: did:x%x flg:x%x refcnt:x%x", 6646 ndlp->nlp_DID, ndlp->nlp_flag, 6647 kref_read(&ndlp->kref)); 6648 6649 /* The check of ndlp usage to prevent incrementing the 6650 * ndlp reference count that is in the process of being 6651 * released. 6652 */ 6653 spin_lock_irqsave(&ndlp->lock, flags); 6654 if (!kref_get_unless_zero(&ndlp->kref)) { 6655 spin_unlock_irqrestore(&ndlp->lock, flags); 6656 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 6657 "0276 %s: ndlp:x%px refcnt:%d\n", 6658 __func__, (void *)ndlp, kref_read(&ndlp->kref)); 6659 return NULL; 6660 } 6661 spin_unlock_irqrestore(&ndlp->lock, flags); 6662 } else { 6663 WARN_ONCE(!ndlp, "**** %s, get ref on NULL ndlp!", __func__); 6664 } 6665 6666 return ndlp; 6667 } 6668 6669 /* This routine decrements the reference count for a ndlp structure. If the 6670 * count goes to 0, this indicates the associated nodelist should be freed. 6671 */ 6672 int 6673 lpfc_nlp_put(struct lpfc_nodelist *ndlp) 6674 { 6675 if (ndlp) { 6676 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 6677 "node put: did:x%x flg:x%x refcnt:x%x", 6678 ndlp->nlp_DID, ndlp->nlp_flag, 6679 kref_read(&ndlp->kref)); 6680 } else { 6681 WARN_ONCE(!ndlp, "**** %s, put ref on NULL ndlp!", __func__); 6682 } 6683 6684 return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; 6685 } 6686 6687 /** 6688 * lpfc_fcf_inuse - Check if FCF can be unregistered. 6689 * @phba: Pointer to hba context object. 6690 * 6691 * This function iterate through all FC nodes associated 6692 * will all vports to check if there is any node with 6693 * fc_rports associated with it. If there is an fc_rport 6694 * associated with the node, then the node is either in 6695 * discovered state or its devloss_timer is pending. 6696 */ 6697 static int 6698 lpfc_fcf_inuse(struct lpfc_hba *phba) 6699 { 6700 struct lpfc_vport **vports; 6701 int i, ret = 0; 6702 struct lpfc_nodelist *ndlp; 6703 unsigned long iflags; 6704 6705 vports = lpfc_create_vport_work_array(phba); 6706 6707 /* If driver cannot allocate memory, indicate fcf is in use */ 6708 if (!vports) 6709 return 1; 6710 6711 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 6712 /* 6713 * IF the CVL_RCVD bit is not set then we have sent the 6714 * flogi. 6715 * If dev_loss fires while we are waiting we do not want to 6716 * unreg the fcf. 6717 */ 6718 if (!test_bit(FC_VPORT_CVL_RCVD, &vports[i]->fc_flag)) { 6719 ret = 1; 6720 goto out; 6721 } 6722 spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); 6723 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 6724 if (ndlp->rport && 6725 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 6726 ret = 1; 6727 spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, 6728 iflags); 6729 goto out; 6730 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 6731 ret = 1; 6732 lpfc_printf_log(phba, KERN_INFO, 6733 LOG_NODE | LOG_DISCOVERY, 6734 "2624 RPI %x DID %x flag %x " 6735 "still logged in\n", 6736 ndlp->nlp_rpi, ndlp->nlp_DID, 6737 ndlp->nlp_flag); 6738 } 6739 } 6740 spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); 6741 } 6742 out: 6743 lpfc_destroy_vport_work_array(phba, vports); 6744 return ret; 6745 } 6746 6747 /** 6748 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. 6749 * @phba: Pointer to hba context object. 6750 * @mboxq: Pointer to mailbox object. 6751 * 6752 * This function frees memory associated with the mailbox command. 6753 */ 6754 void 6755 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 6756 { 6757 struct lpfc_vport *vport = mboxq->vport; 6758 6759 if (mboxq->u.mb.mbxStatus) { 6760 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6761 "2555 UNREG_VFI mbxStatus error x%x " 6762 "HBA state x%x\n", 6763 mboxq->u.mb.mbxStatus, vport->port_state); 6764 } 6765 clear_bit(FC_VFI_REGISTERED, &phba->pport->fc_flag); 6766 mempool_free(mboxq, phba->mbox_mem_pool); 6767 return; 6768 } 6769 6770 /** 6771 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. 6772 * @phba: Pointer to hba context object. 6773 * @mboxq: Pointer to mailbox object. 6774 * 6775 * This function frees memory associated with the mailbox command. 6776 */ 6777 static void 6778 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 6779 { 6780 struct lpfc_vport *vport = mboxq->vport; 6781 6782 if (mboxq->u.mb.mbxStatus) { 6783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6784 "2550 UNREG_FCFI mbxStatus error x%x " 6785 "HBA state x%x\n", 6786 mboxq->u.mb.mbxStatus, vport->port_state); 6787 } 6788 mempool_free(mboxq, phba->mbox_mem_pool); 6789 return; 6790 } 6791 6792 /** 6793 * lpfc_unregister_fcf_prep - Unregister fcf record preparation 6794 * @phba: Pointer to hba context object. 6795 * 6796 * This function prepare the HBA for unregistering the currently registered 6797 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and 6798 * VFIs. 6799 */ 6800 int 6801 lpfc_unregister_fcf_prep(struct lpfc_hba *phba) 6802 { 6803 struct lpfc_vport **vports; 6804 struct lpfc_nodelist *ndlp; 6805 struct Scsi_Host *shost; 6806 int i = 0, rc; 6807 6808 /* Unregister RPIs */ 6809 if (lpfc_fcf_inuse(phba)) 6810 lpfc_unreg_hba_rpis(phba); 6811 6812 /* At this point, all discovery is aborted */ 6813 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 6814 6815 /* Unregister VPIs */ 6816 vports = lpfc_create_vport_work_array(phba); 6817 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 6818 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 6819 /* Stop FLOGI/FDISC retries */ 6820 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 6821 if (ndlp) 6822 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 6823 lpfc_cleanup_pending_mbox(vports[i]); 6824 if (phba->sli_rev == LPFC_SLI_REV4) 6825 lpfc_sli4_unreg_all_rpis(vports[i]); 6826 lpfc_mbx_unreg_vpi(vports[i]); 6827 shost = lpfc_shost_from_vport(vports[i]); 6828 spin_lock_irq(shost->host_lock); 6829 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 6830 spin_unlock_irq(shost->host_lock); 6831 set_bit(FC_VPORT_NEEDS_INIT_VPI, &vports[i]->fc_flag); 6832 } 6833 lpfc_destroy_vport_work_array(phba, vports); 6834 if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { 6835 ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); 6836 if (ndlp) 6837 lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); 6838 lpfc_cleanup_pending_mbox(phba->pport); 6839 if (phba->sli_rev == LPFC_SLI_REV4) 6840 lpfc_sli4_unreg_all_rpis(phba->pport); 6841 lpfc_mbx_unreg_vpi(phba->pport); 6842 shost = lpfc_shost_from_vport(phba->pport); 6843 spin_lock_irq(shost->host_lock); 6844 phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; 6845 spin_unlock_irq(shost->host_lock); 6846 set_bit(FC_VPORT_NEEDS_INIT_VPI, &phba->pport->fc_flag); 6847 } 6848 6849 /* Cleanup any outstanding ELS commands */ 6850 lpfc_els_flush_all_cmd(phba); 6851 6852 /* Unregister the physical port VFI */ 6853 rc = lpfc_issue_unreg_vfi(phba->pport); 6854 return rc; 6855 } 6856 6857 /** 6858 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record 6859 * @phba: Pointer to hba context object. 6860 * 6861 * This function issues synchronous unregister FCF mailbox command to HBA to 6862 * unregister the currently registered FCF record. The driver does not reset 6863 * the driver FCF usage state flags. 6864 * 6865 * Return 0 if successfully issued, none-zero otherwise. 6866 */ 6867 int 6868 lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) 6869 { 6870 LPFC_MBOXQ_t *mbox; 6871 int rc; 6872 6873 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6874 if (!mbox) { 6875 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6876 "2551 UNREG_FCFI mbox allocation failed" 6877 "HBA state x%x\n", phba->pport->port_state); 6878 return -ENOMEM; 6879 } 6880 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 6881 mbox->vport = phba->pport; 6882 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 6883 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6884 6885 if (rc == MBX_NOT_FINISHED) { 6886 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6887 "2552 Unregister FCFI command failed rc x%x " 6888 "HBA state x%x\n", 6889 rc, phba->pport->port_state); 6890 return -EINVAL; 6891 } 6892 return 0; 6893 } 6894 6895 /** 6896 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan 6897 * @phba: Pointer to hba context object. 6898 * 6899 * This function unregisters the currently reigstered FCF. This function 6900 * also tries to find another FCF for discovery by rescan the HBA FCF table. 6901 */ 6902 void 6903 lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) 6904 { 6905 int rc; 6906 6907 /* Preparation for unregistering fcf */ 6908 rc = lpfc_unregister_fcf_prep(phba); 6909 if (rc) { 6910 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6911 "2748 Failed to prepare for unregistering " 6912 "HBA's FCF record: rc=%d\n", rc); 6913 return; 6914 } 6915 6916 /* Now, unregister FCF record and reset HBA FCF state */ 6917 rc = lpfc_sli4_unregister_fcf(phba); 6918 if (rc) 6919 return; 6920 /* Reset HBA FCF states after successful unregister FCF */ 6921 spin_lock_irq(&phba->hbalock); 6922 phba->fcf.fcf_flag = 0; 6923 spin_unlock_irq(&phba->hbalock); 6924 phba->fcf.current_rec.flag = 0; 6925 6926 /* 6927 * If driver is not unloading, check if there is any other 6928 * FCF record that can be used for discovery. 6929 */ 6930 if (test_bit(FC_UNLOADING, &phba->pport->load_flag) || 6931 phba->link_state < LPFC_LINK_UP) 6932 return; 6933 6934 /* This is considered as the initial FCF discovery scan */ 6935 spin_lock_irq(&phba->hbalock); 6936 phba->fcf.fcf_flag |= FCF_INIT_DISC; 6937 spin_unlock_irq(&phba->hbalock); 6938 6939 /* Reset FCF roundrobin bmask for new discovery */ 6940 lpfc_sli4_clear_fcf_rr_bmask(phba); 6941 6942 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 6943 6944 if (rc) { 6945 spin_lock_irq(&phba->hbalock); 6946 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 6947 spin_unlock_irq(&phba->hbalock); 6948 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6949 "2553 lpfc_unregister_unused_fcf failed " 6950 "to read FCF record HBA state x%x\n", 6951 phba->pport->port_state); 6952 } 6953 } 6954 6955 /** 6956 * lpfc_unregister_fcf - Unregister the currently registered fcf record 6957 * @phba: Pointer to hba context object. 6958 * 6959 * This function just unregisters the currently reigstered FCF. It does not 6960 * try to find another FCF for discovery. 6961 */ 6962 void 6963 lpfc_unregister_fcf(struct lpfc_hba *phba) 6964 { 6965 int rc; 6966 6967 /* Preparation for unregistering fcf */ 6968 rc = lpfc_unregister_fcf_prep(phba); 6969 if (rc) { 6970 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 6971 "2749 Failed to prepare for unregistering " 6972 "HBA's FCF record: rc=%d\n", rc); 6973 return; 6974 } 6975 6976 /* Now, unregister FCF record and reset HBA FCF state */ 6977 rc = lpfc_sli4_unregister_fcf(phba); 6978 if (rc) 6979 return; 6980 /* Set proper HBA FCF states after successful unregister FCF */ 6981 spin_lock_irq(&phba->hbalock); 6982 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 6983 spin_unlock_irq(&phba->hbalock); 6984 } 6985 6986 /** 6987 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. 6988 * @phba: Pointer to hba context object. 6989 * 6990 * This function check if there are any connected remote port for the FCF and 6991 * if all the devices are disconnected, this function unregister FCFI. 6992 * This function also tries to use another FCF for discovery. 6993 */ 6994 void 6995 lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 6996 { 6997 /* 6998 * If HBA is not running in FIP mode, if HBA does not support 6999 * FCoE, if FCF discovery is ongoing, or if FCF has not been 7000 * registered, do nothing. 7001 */ 7002 spin_lock_irq(&phba->hbalock); 7003 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) || 7004 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 7005 !test_bit(HBA_FIP_SUPPORT, &phba->hba_flag) || 7006 (phba->fcf.fcf_flag & FCF_DISCOVERY) || 7007 phba->pport->port_state == LPFC_FLOGI) { 7008 spin_unlock_irq(&phba->hbalock); 7009 return; 7010 } 7011 spin_unlock_irq(&phba->hbalock); 7012 7013 if (lpfc_fcf_inuse(phba)) 7014 return; 7015 7016 lpfc_unregister_fcf_rescan(phba); 7017 } 7018 7019 /** 7020 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. 7021 * @phba: Pointer to hba context object. 7022 * @buff: Buffer containing the FCF connection table as in the config 7023 * region. 7024 * This function create driver data structure for the FCF connection 7025 * record table read from config region 23. 7026 */ 7027 static void 7028 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, 7029 uint8_t *buff) 7030 { 7031 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 7032 struct lpfc_fcf_conn_hdr *conn_hdr; 7033 struct lpfc_fcf_conn_rec *conn_rec; 7034 uint32_t record_count; 7035 int i; 7036 7037 /* Free the current connect table */ 7038 list_for_each_entry_safe(conn_entry, next_conn_entry, 7039 &phba->fcf_conn_rec_list, list) { 7040 list_del_init(&conn_entry->list); 7041 kfree(conn_entry); 7042 } 7043 7044 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 7045 record_count = conn_hdr->length * sizeof(uint32_t)/ 7046 sizeof(struct lpfc_fcf_conn_rec); 7047 7048 conn_rec = (struct lpfc_fcf_conn_rec *) 7049 (buff + sizeof(struct lpfc_fcf_conn_hdr)); 7050 7051 for (i = 0; i < record_count; i++) { 7052 if (!(conn_rec[i].flags & FCFCNCT_VALID)) 7053 continue; 7054 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), 7055 GFP_KERNEL); 7056 if (!conn_entry) { 7057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7058 "2566 Failed to allocate connection" 7059 " table entry\n"); 7060 return; 7061 } 7062 7063 memcpy(&conn_entry->conn_rec, &conn_rec[i], 7064 sizeof(struct lpfc_fcf_conn_rec)); 7065 list_add_tail(&conn_entry->list, 7066 &phba->fcf_conn_rec_list); 7067 } 7068 7069 if (!list_empty(&phba->fcf_conn_rec_list)) { 7070 i = 0; 7071 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, 7072 list) { 7073 conn_rec = &conn_entry->conn_rec; 7074 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7075 "3345 FCF connection list rec[%02d]: " 7076 "flags:x%04x, vtag:x%04x, " 7077 "fabric_name:x%02x:%02x:%02x:%02x:" 7078 "%02x:%02x:%02x:%02x, " 7079 "switch_name:x%02x:%02x:%02x:%02x:" 7080 "%02x:%02x:%02x:%02x\n", i++, 7081 conn_rec->flags, conn_rec->vlan_tag, 7082 conn_rec->fabric_name[0], 7083 conn_rec->fabric_name[1], 7084 conn_rec->fabric_name[2], 7085 conn_rec->fabric_name[3], 7086 conn_rec->fabric_name[4], 7087 conn_rec->fabric_name[5], 7088 conn_rec->fabric_name[6], 7089 conn_rec->fabric_name[7], 7090 conn_rec->switch_name[0], 7091 conn_rec->switch_name[1], 7092 conn_rec->switch_name[2], 7093 conn_rec->switch_name[3], 7094 conn_rec->switch_name[4], 7095 conn_rec->switch_name[5], 7096 conn_rec->switch_name[6], 7097 conn_rec->switch_name[7]); 7098 } 7099 } 7100 } 7101 7102 /** 7103 * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. 7104 * @phba: Pointer to hba context object. 7105 * @buff: Buffer containing the FCoE parameter data structure. 7106 * 7107 * This function update driver data structure with config 7108 * parameters read from config region 23. 7109 */ 7110 static void 7111 lpfc_read_fcoe_param(struct lpfc_hba *phba, 7112 uint8_t *buff) 7113 { 7114 struct lpfc_fip_param_hdr *fcoe_param_hdr; 7115 struct lpfc_fcoe_params *fcoe_param; 7116 7117 fcoe_param_hdr = (struct lpfc_fip_param_hdr *) 7118 buff; 7119 fcoe_param = (struct lpfc_fcoe_params *) 7120 (buff + sizeof(struct lpfc_fip_param_hdr)); 7121 7122 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || 7123 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 7124 return; 7125 7126 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 7127 phba->valid_vlan = 1; 7128 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 7129 0xFFF; 7130 } 7131 7132 phba->fc_map[0] = fcoe_param->fc_map[0]; 7133 phba->fc_map[1] = fcoe_param->fc_map[1]; 7134 phba->fc_map[2] = fcoe_param->fc_map[2]; 7135 return; 7136 } 7137 7138 /** 7139 * lpfc_get_rec_conf23 - Get a record type in config region data. 7140 * @buff: Buffer containing config region 23 data. 7141 * @size: Size of the data buffer. 7142 * @rec_type: Record type to be searched. 7143 * 7144 * This function searches config region data to find the beginning 7145 * of the record specified by record_type. If record found, this 7146 * function return pointer to the record else return NULL. 7147 */ 7148 static uint8_t * 7149 lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) 7150 { 7151 uint32_t offset = 0, rec_length; 7152 7153 if ((buff[0] == LPFC_REGION23_LAST_REC) || 7154 (size < sizeof(uint32_t))) 7155 return NULL; 7156 7157 rec_length = buff[offset + 1]; 7158 7159 /* 7160 * One TLV record has one word header and number of data words 7161 * specified in the rec_length field of the record header. 7162 */ 7163 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) 7164 <= size) { 7165 if (buff[offset] == rec_type) 7166 return &buff[offset]; 7167 7168 if (buff[offset] == LPFC_REGION23_LAST_REC) 7169 return NULL; 7170 7171 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); 7172 rec_length = buff[offset + 1]; 7173 } 7174 return NULL; 7175 } 7176 7177 /** 7178 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. 7179 * @phba: Pointer to lpfc_hba data structure. 7180 * @buff: Buffer containing config region 23 data. 7181 * @size: Size of the data buffer. 7182 * 7183 * This function parses the FCoE config parameters in config region 23 and 7184 * populate driver data structure with the parameters. 7185 */ 7186 void 7187 lpfc_parse_fcoe_conf(struct lpfc_hba *phba, 7188 uint8_t *buff, 7189 uint32_t size) 7190 { 7191 uint32_t offset = 0; 7192 uint8_t *rec_ptr; 7193 7194 /* 7195 * If data size is less than 2 words signature and version cannot be 7196 * verified. 7197 */ 7198 if (size < 2*sizeof(uint32_t)) 7199 return; 7200 7201 /* Check the region signature first */ 7202 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { 7203 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7204 "2567 Config region 23 has bad signature\n"); 7205 return; 7206 } 7207 7208 offset += 4; 7209 7210 /* Check the data structure version */ 7211 if (buff[offset] != LPFC_REGION23_VERSION) { 7212 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, 7213 "2568 Config region 23 has bad version\n"); 7214 return; 7215 } 7216 offset += 4; 7217 7218 /* Read FCoE param record */ 7219 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 7220 size - offset, FCOE_PARAM_TYPE); 7221 if (rec_ptr) 7222 lpfc_read_fcoe_param(phba, rec_ptr); 7223 7224 /* Read FCF connection table */ 7225 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 7226 size - offset, FCOE_CONN_TBL_TYPE); 7227 if (rec_ptr) 7228 lpfc_read_fcf_conn_tbl(phba, rec_ptr); 7229 7230 } 7231 7232 /* 7233 * lpfc_error_lost_link - IO failure from link event or FW reset check. 7234 * 7235 * @vport: Pointer to lpfc_vport data structure. 7236 * @ulp_status: IO completion status. 7237 * @ulp_word4: Reason code for the ulp_status. 7238 * 7239 * This function evaluates the ulp_status and ulp_word4 values 7240 * for specific error values that indicate an internal link fault 7241 * or fw reset event for the completing IO. Callers require this 7242 * common data to decide next steps on the IO. 7243 * 7244 * Return: 7245 * false - No link or reset error occurred. 7246 * true - A link or reset error occurred. 7247 */ 7248 bool 7249 lpfc_error_lost_link(struct lpfc_vport *vport, u32 ulp_status, u32 ulp_word4) 7250 { 7251 /* Mask off the extra port data to get just the reason code. */ 7252 u32 rsn_code = IOERR_PARAM_MASK & ulp_word4; 7253 7254 if (ulp_status == IOSTAT_LOCAL_REJECT && 7255 (rsn_code == IOERR_SLI_ABORTED || 7256 rsn_code == IOERR_LINK_DOWN || 7257 rsn_code == IOERR_SLI_DOWN)) { 7258 lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI | LOG_ELS, 7259 "0408 Report link error true: <x%x:x%x>\n", 7260 ulp_status, ulp_word4); 7261 return true; 7262 } 7263 7264 return false; 7265 } 7266