1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/kthread.h> 25 #include <linux/interrupt.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_device.h> 29 #include <scsi/scsi_host.h> 30 #include <scsi/scsi_transport_fc.h> 31 32 #include "lpfc_hw4.h" 33 #include "lpfc_hw.h" 34 #include "lpfc_nl.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_sli.h" 37 #include "lpfc_sli4.h" 38 #include "lpfc_scsi.h" 39 #include "lpfc.h" 40 #include "lpfc_logmsg.h" 41 #include "lpfc_crtn.h" 42 #include "lpfc_vport.h" 43 #include "lpfc_debugfs.h" 44 45 /* AlpaArray for assignment of scsid for scan-down and bind_method */ 46 static uint8_t lpfcAlpaArray[] = { 47 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, 48 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 49 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 50 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 51 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, 52 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, 53 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 54 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, 55 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 56 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, 57 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 58 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, 59 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 60 }; 61 62 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 63 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 64 65 void 66 lpfc_terminate_rport_io(struct fc_rport *rport) 67 { 68 struct lpfc_rport_data *rdata; 69 struct lpfc_nodelist * ndlp; 70 struct lpfc_hba *phba; 71 72 rdata = rport->dd_data; 73 ndlp = rdata->pnode; 74 75 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 76 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 77 printk(KERN_ERR "Cannot find remote node" 78 " to terminate I/O Data x%x\n", 79 rport->port_id); 80 return; 81 } 82 83 phba = ndlp->phba; 84 85 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, 86 "rport terminate: sid:x%x did:x%x flg:x%x", 87 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 88 89 if (ndlp->nlp_sid != NLP_NO_SID) { 90 lpfc_sli_abort_iocb(ndlp->vport, 91 &phba->sli.ring[phba->sli.fcp_ring], 92 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 93 } 94 } 95 96 /* 97 * This function will be called when dev_loss_tmo fire. 98 */ 99 void 100 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) 101 { 102 struct lpfc_rport_data *rdata; 103 struct lpfc_nodelist * ndlp; 104 struct lpfc_vport *vport; 105 struct lpfc_hba *phba; 106 struct lpfc_work_evt *evtp; 107 int put_node; 108 int put_rport; 109 110 rdata = rport->dd_data; 111 ndlp = rdata->pnode; 112 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 113 return; 114 115 vport = ndlp->vport; 116 phba = vport->phba; 117 118 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 119 "rport devlosscb: sid:x%x did:x%x flg:x%x", 120 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 121 122 /* Don't defer this if we are in the process of deleting the vport 123 * or unloading the driver. The unload will cleanup the node 124 * appropriately we just need to cleanup the ndlp rport info here. 125 */ 126 if (vport->load_flag & FC_UNLOADING) { 127 put_node = rdata->pnode != NULL; 128 put_rport = ndlp->rport != NULL; 129 rdata->pnode = NULL; 130 ndlp->rport = NULL; 131 if (put_node) 132 lpfc_nlp_put(ndlp); 133 if (put_rport) 134 put_device(&rport->dev); 135 return; 136 } 137 138 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 139 return; 140 141 evtp = &ndlp->dev_loss_evt; 142 143 if (!list_empty(&evtp->evt_listp)) 144 return; 145 146 spin_lock_irq(&phba->hbalock); 147 /* We need to hold the node by incrementing the reference 148 * count until this queued work is done 149 */ 150 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 151 if (evtp->evt_arg1) { 152 evtp->evt = LPFC_EVT_DEV_LOSS; 153 list_add_tail(&evtp->evt_listp, &phba->work_list); 154 lpfc_worker_wake_up(phba); 155 } 156 spin_unlock_irq(&phba->hbalock); 157 158 return; 159 } 160 161 /* 162 * This function is called from the worker thread when dev_loss_tmo 163 * expire. 164 */ 165 static void 166 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 167 { 168 struct lpfc_rport_data *rdata; 169 struct fc_rport *rport; 170 struct lpfc_vport *vport; 171 struct lpfc_hba *phba; 172 uint8_t *name; 173 int put_node; 174 int put_rport; 175 int warn_on = 0; 176 177 rport = ndlp->rport; 178 179 if (!rport) 180 return; 181 182 rdata = rport->dd_data; 183 name = (uint8_t *) &ndlp->nlp_portname; 184 vport = ndlp->vport; 185 phba = vport->phba; 186 187 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 188 "rport devlosstmo:did:x%x type:x%x id:x%x", 189 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 190 191 /* Don't defer this if we are in the process of deleting the vport 192 * or unloading the driver. The unload will cleanup the node 193 * appropriately we just need to cleanup the ndlp rport info here. 194 */ 195 if (vport->load_flag & FC_UNLOADING) { 196 if (ndlp->nlp_sid != NLP_NO_SID) { 197 /* flush the target */ 198 lpfc_sli_abort_iocb(vport, 199 &phba->sli.ring[phba->sli.fcp_ring], 200 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 201 } 202 put_node = rdata->pnode != NULL; 203 put_rport = ndlp->rport != NULL; 204 rdata->pnode = NULL; 205 ndlp->rport = NULL; 206 if (put_node) 207 lpfc_nlp_put(ndlp); 208 if (put_rport) 209 put_device(&rport->dev); 210 return; 211 } 212 213 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 214 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 215 "0284 Devloss timeout Ignored on " 216 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 217 "NPort x%x\n", 218 *name, *(name+1), *(name+2), *(name+3), 219 *(name+4), *(name+5), *(name+6), *(name+7), 220 ndlp->nlp_DID); 221 return; 222 } 223 224 if (ndlp->nlp_type & NLP_FABRIC) { 225 /* We will clean up these Nodes in linkup */ 226 put_node = rdata->pnode != NULL; 227 put_rport = ndlp->rport != NULL; 228 rdata->pnode = NULL; 229 ndlp->rport = NULL; 230 if (put_node) 231 lpfc_nlp_put(ndlp); 232 if (put_rport) 233 put_device(&rport->dev); 234 return; 235 } 236 237 if (ndlp->nlp_sid != NLP_NO_SID) { 238 warn_on = 1; 239 /* flush the target */ 240 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 241 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 242 } 243 244 if (warn_on) { 245 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 246 "0203 Devloss timeout on " 247 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 248 "NPort x%06x Data: x%x x%x x%x\n", 249 *name, *(name+1), *(name+2), *(name+3), 250 *(name+4), *(name+5), *(name+6), *(name+7), 251 ndlp->nlp_DID, ndlp->nlp_flag, 252 ndlp->nlp_state, ndlp->nlp_rpi); 253 } else { 254 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 255 "0204 Devloss timeout on " 256 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 257 "NPort x%06x Data: x%x x%x x%x\n", 258 *name, *(name+1), *(name+2), *(name+3), 259 *(name+4), *(name+5), *(name+6), *(name+7), 260 ndlp->nlp_DID, ndlp->nlp_flag, 261 ndlp->nlp_state, ndlp->nlp_rpi); 262 } 263 264 put_node = rdata->pnode != NULL; 265 put_rport = ndlp->rport != NULL; 266 rdata->pnode = NULL; 267 ndlp->rport = NULL; 268 if (put_node) 269 lpfc_nlp_put(ndlp); 270 if (put_rport) 271 put_device(&rport->dev); 272 273 if (!(vport->load_flag & FC_UNLOADING) && 274 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 275 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 276 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) 277 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 278 279 lpfc_unregister_unused_fcf(phba); 280 } 281 282 /** 283 * lpfc_alloc_fast_evt - Allocates data structure for posting event 284 * @phba: Pointer to hba context object. 285 * 286 * This function is called from the functions which need to post 287 * events from interrupt context. This function allocates data 288 * structure required for posting event. It also keeps track of 289 * number of events pending and prevent event storm when there are 290 * too many events. 291 **/ 292 struct lpfc_fast_path_event * 293 lpfc_alloc_fast_evt(struct lpfc_hba *phba) { 294 struct lpfc_fast_path_event *ret; 295 296 /* If there are lot of fast event do not exhaust memory due to this */ 297 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) 298 return NULL; 299 300 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 301 GFP_ATOMIC); 302 if (ret) { 303 atomic_inc(&phba->fast_event_count); 304 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 305 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 306 } 307 return ret; 308 } 309 310 /** 311 * lpfc_free_fast_evt - Frees event data structure 312 * @phba: Pointer to hba context object. 313 * @evt: Event object which need to be freed. 314 * 315 * This function frees the data structure required for posting 316 * events. 317 **/ 318 void 319 lpfc_free_fast_evt(struct lpfc_hba *phba, 320 struct lpfc_fast_path_event *evt) { 321 322 atomic_dec(&phba->fast_event_count); 323 kfree(evt); 324 } 325 326 /** 327 * lpfc_send_fastpath_evt - Posts events generated from fast path 328 * @phba: Pointer to hba context object. 329 * @evtp: Event data structure. 330 * 331 * This function is called from worker thread, when the interrupt 332 * context need to post an event. This function posts the event 333 * to fc transport netlink interface. 334 **/ 335 static void 336 lpfc_send_fastpath_evt(struct lpfc_hba *phba, 337 struct lpfc_work_evt *evtp) 338 { 339 unsigned long evt_category, evt_sub_category; 340 struct lpfc_fast_path_event *fast_evt_data; 341 char *evt_data; 342 uint32_t evt_data_size; 343 struct Scsi_Host *shost; 344 345 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, 346 work_evt); 347 348 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; 349 evt_sub_category = (unsigned long) fast_evt_data->un. 350 fabric_evt.subcategory; 351 shost = lpfc_shost_from_vport(fast_evt_data->vport); 352 if (evt_category == FC_REG_FABRIC_EVENT) { 353 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { 354 evt_data = (char *) &fast_evt_data->un.read_check_error; 355 evt_data_size = sizeof(fast_evt_data->un. 356 read_check_error); 357 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || 358 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { 359 evt_data = (char *) &fast_evt_data->un.fabric_evt; 360 evt_data_size = sizeof(fast_evt_data->un.fabric_evt); 361 } else { 362 lpfc_free_fast_evt(phba, fast_evt_data); 363 return; 364 } 365 } else if (evt_category == FC_REG_SCSI_EVENT) { 366 switch (evt_sub_category) { 367 case LPFC_EVENT_QFULL: 368 case LPFC_EVENT_DEVBSY: 369 evt_data = (char *) &fast_evt_data->un.scsi_evt; 370 evt_data_size = sizeof(fast_evt_data->un.scsi_evt); 371 break; 372 case LPFC_EVENT_CHECK_COND: 373 evt_data = (char *) &fast_evt_data->un.check_cond_evt; 374 evt_data_size = sizeof(fast_evt_data->un. 375 check_cond_evt); 376 break; 377 case LPFC_EVENT_VARQUEDEPTH: 378 evt_data = (char *) &fast_evt_data->un.queue_depth_evt; 379 evt_data_size = sizeof(fast_evt_data->un. 380 queue_depth_evt); 381 break; 382 default: 383 lpfc_free_fast_evt(phba, fast_evt_data); 384 return; 385 } 386 } else { 387 lpfc_free_fast_evt(phba, fast_evt_data); 388 return; 389 } 390 391 fc_host_post_vendor_event(shost, 392 fc_get_event_number(), 393 evt_data_size, 394 evt_data, 395 LPFC_NL_VENDOR_ID); 396 397 lpfc_free_fast_evt(phba, fast_evt_data); 398 return; 399 } 400 401 static void 402 lpfc_work_list_done(struct lpfc_hba *phba) 403 { 404 struct lpfc_work_evt *evtp = NULL; 405 struct lpfc_nodelist *ndlp; 406 int free_evt; 407 408 spin_lock_irq(&phba->hbalock); 409 while (!list_empty(&phba->work_list)) { 410 list_remove_head((&phba->work_list), evtp, typeof(*evtp), 411 evt_listp); 412 spin_unlock_irq(&phba->hbalock); 413 free_evt = 1; 414 switch (evtp->evt) { 415 case LPFC_EVT_ELS_RETRY: 416 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 417 lpfc_els_retry_delay_handler(ndlp); 418 free_evt = 0; /* evt is part of ndlp */ 419 /* decrement the node reference count held 420 * for this queued work 421 */ 422 lpfc_nlp_put(ndlp); 423 break; 424 case LPFC_EVT_DEV_LOSS: 425 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 426 lpfc_dev_loss_tmo_handler(ndlp); 427 free_evt = 0; 428 /* decrement the node reference count held for 429 * this queued work 430 */ 431 lpfc_nlp_put(ndlp); 432 break; 433 case LPFC_EVT_ONLINE: 434 if (phba->link_state < LPFC_LINK_DOWN) 435 *(int *) (evtp->evt_arg1) = lpfc_online(phba); 436 else 437 *(int *) (evtp->evt_arg1) = 0; 438 complete((struct completion *)(evtp->evt_arg2)); 439 break; 440 case LPFC_EVT_OFFLINE_PREP: 441 if (phba->link_state >= LPFC_LINK_DOWN) 442 lpfc_offline_prep(phba); 443 *(int *)(evtp->evt_arg1) = 0; 444 complete((struct completion *)(evtp->evt_arg2)); 445 break; 446 case LPFC_EVT_OFFLINE: 447 lpfc_offline(phba); 448 lpfc_sli_brdrestart(phba); 449 *(int *)(evtp->evt_arg1) = 450 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); 451 lpfc_unblock_mgmt_io(phba); 452 complete((struct completion *)(evtp->evt_arg2)); 453 break; 454 case LPFC_EVT_WARM_START: 455 lpfc_offline(phba); 456 lpfc_reset_barrier(phba); 457 lpfc_sli_brdreset(phba); 458 lpfc_hba_down_post(phba); 459 *(int *)(evtp->evt_arg1) = 460 lpfc_sli_brdready(phba, HS_MBRDY); 461 lpfc_unblock_mgmt_io(phba); 462 complete((struct completion *)(evtp->evt_arg2)); 463 break; 464 case LPFC_EVT_KILL: 465 lpfc_offline(phba); 466 *(int *)(evtp->evt_arg1) 467 = (phba->pport->stopped) 468 ? 0 : lpfc_sli_brdkill(phba); 469 lpfc_unblock_mgmt_io(phba); 470 complete((struct completion *)(evtp->evt_arg2)); 471 break; 472 case LPFC_EVT_FASTPATH_MGMT_EVT: 473 lpfc_send_fastpath_evt(phba, evtp); 474 free_evt = 0; 475 break; 476 } 477 if (free_evt) 478 kfree(evtp); 479 spin_lock_irq(&phba->hbalock); 480 } 481 spin_unlock_irq(&phba->hbalock); 482 483 } 484 485 static void 486 lpfc_work_done(struct lpfc_hba *phba) 487 { 488 struct lpfc_sli_ring *pring; 489 uint32_t ha_copy, status, control, work_port_events; 490 struct lpfc_vport **vports; 491 struct lpfc_vport *vport; 492 int i; 493 494 spin_lock_irq(&phba->hbalock); 495 ha_copy = phba->work_ha; 496 phba->work_ha = 0; 497 spin_unlock_irq(&phba->hbalock); 498 499 /* First, try to post the next mailbox command to SLI4 device */ 500 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) 501 lpfc_sli4_post_async_mbox(phba); 502 503 if (ha_copy & HA_ERATT) 504 /* Handle the error attention event */ 505 lpfc_handle_eratt(phba); 506 507 if (ha_copy & HA_MBATT) 508 lpfc_sli_handle_mb_event(phba); 509 510 if (ha_copy & HA_LATT) 511 lpfc_handle_latt(phba); 512 513 /* Process SLI4 events */ 514 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { 515 if (phba->hba_flag & FCP_XRI_ABORT_EVENT) 516 lpfc_sli4_fcp_xri_abort_event_proc(phba); 517 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 518 lpfc_sli4_els_xri_abort_event_proc(phba); 519 if (phba->hba_flag & ASYNC_EVENT) 520 lpfc_sli4_async_event_proc(phba); 521 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { 522 spin_lock_irq(&phba->hbalock); 523 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; 524 spin_unlock_irq(&phba->hbalock); 525 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 526 } 527 if (phba->hba_flag & HBA_RECEIVE_BUFFER) 528 lpfc_sli4_handle_received_buffer(phba); 529 } 530 531 vports = lpfc_create_vport_work_array(phba); 532 if (vports != NULL) 533 for (i = 0; i <= phba->max_vports; i++) { 534 /* 535 * We could have no vports in array if unloading, so if 536 * this happens then just use the pport 537 */ 538 if (vports[i] == NULL && i == 0) 539 vport = phba->pport; 540 else 541 vport = vports[i]; 542 if (vport == NULL) 543 break; 544 spin_lock_irq(&vport->work_port_lock); 545 work_port_events = vport->work_port_events; 546 vport->work_port_events &= ~work_port_events; 547 spin_unlock_irq(&vport->work_port_lock); 548 if (work_port_events & WORKER_DISC_TMO) 549 lpfc_disc_timeout_handler(vport); 550 if (work_port_events & WORKER_ELS_TMO) 551 lpfc_els_timeout_handler(vport); 552 if (work_port_events & WORKER_HB_TMO) 553 lpfc_hb_timeout_handler(phba); 554 if (work_port_events & WORKER_MBOX_TMO) 555 lpfc_mbox_timeout_handler(phba); 556 if (work_port_events & WORKER_FABRIC_BLOCK_TMO) 557 lpfc_unblock_fabric_iocbs(phba); 558 if (work_port_events & WORKER_FDMI_TMO) 559 lpfc_fdmi_timeout_handler(vport); 560 if (work_port_events & WORKER_RAMP_DOWN_QUEUE) 561 lpfc_ramp_down_queue_handler(phba); 562 if (work_port_events & WORKER_RAMP_UP_QUEUE) 563 lpfc_ramp_up_queue_handler(phba); 564 } 565 lpfc_destroy_vport_work_array(phba, vports); 566 567 pring = &phba->sli.ring[LPFC_ELS_RING]; 568 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 569 status >>= (4*LPFC_ELS_RING); 570 if ((status & HA_RXMASK) 571 || (pring->flag & LPFC_DEFERRED_RING_EVENT)) { 572 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 573 pring->flag |= LPFC_DEFERRED_RING_EVENT; 574 /* Set the lpfc data pending flag */ 575 set_bit(LPFC_DATA_READY, &phba->data_flags); 576 } else { 577 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 578 lpfc_sli_handle_slow_ring_event(phba, pring, 579 (status & 580 HA_RXMASK)); 581 } 582 /* 583 * Turn on Ring interrupts 584 */ 585 if (phba->sli_rev <= LPFC_SLI_REV3) { 586 spin_lock_irq(&phba->hbalock); 587 control = readl(phba->HCregaddr); 588 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 589 lpfc_debugfs_slow_ring_trc(phba, 590 "WRK Enable ring: cntl:x%x hacopy:x%x", 591 control, ha_copy, 0); 592 593 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 594 writel(control, phba->HCregaddr); 595 readl(phba->HCregaddr); /* flush */ 596 } else { 597 lpfc_debugfs_slow_ring_trc(phba, 598 "WRK Ring ok: cntl:x%x hacopy:x%x", 599 control, ha_copy, 0); 600 } 601 spin_unlock_irq(&phba->hbalock); 602 } 603 } 604 lpfc_work_list_done(phba); 605 } 606 607 int 608 lpfc_do_work(void *p) 609 { 610 struct lpfc_hba *phba = p; 611 int rc; 612 613 set_user_nice(current, -20); 614 phba->data_flags = 0; 615 616 while (!kthread_should_stop()) { 617 /* wait and check worker queue activities */ 618 rc = wait_event_interruptible(phba->work_waitq, 619 (test_and_clear_bit(LPFC_DATA_READY, 620 &phba->data_flags) 621 || kthread_should_stop())); 622 /* Signal wakeup shall terminate the worker thread */ 623 if (rc) { 624 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 625 "0433 Wakeup on signal: rc=x%x\n", rc); 626 break; 627 } 628 629 /* Attend pending lpfc data processing */ 630 lpfc_work_done(phba); 631 } 632 phba->worker_thread = NULL; 633 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 634 "0432 Worker thread stopped.\n"); 635 return 0; 636 } 637 638 /* 639 * This is only called to handle FC worker events. Since this a rare 640 * occurance, we allocate a struct lpfc_work_evt structure here instead of 641 * embedding it in the IOCB. 642 */ 643 int 644 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, 645 uint32_t evt) 646 { 647 struct lpfc_work_evt *evtp; 648 unsigned long flags; 649 650 /* 651 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 652 * be queued to worker thread for processing 653 */ 654 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); 655 if (!evtp) 656 return 0; 657 658 evtp->evt_arg1 = arg1; 659 evtp->evt_arg2 = arg2; 660 evtp->evt = evt; 661 662 spin_lock_irqsave(&phba->hbalock, flags); 663 list_add_tail(&evtp->evt_listp, &phba->work_list); 664 spin_unlock_irqrestore(&phba->hbalock, flags); 665 666 lpfc_worker_wake_up(phba); 667 668 return 1; 669 } 670 671 void 672 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) 673 { 674 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 675 struct lpfc_hba *phba = vport->phba; 676 struct lpfc_nodelist *ndlp, *next_ndlp; 677 int rc; 678 679 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 680 if (!NLP_CHK_NODE_ACT(ndlp)) 681 continue; 682 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 683 continue; 684 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 685 ((vport->port_type == LPFC_NPIV_PORT) && 686 (ndlp->nlp_DID == NameServer_DID))) 687 lpfc_unreg_rpi(vport, ndlp); 688 689 /* Leave Fabric nodes alone on link down */ 690 if (!remove && ndlp->nlp_type & NLP_FABRIC) 691 continue; 692 rc = lpfc_disc_state_machine(vport, ndlp, NULL, 693 remove 694 ? NLP_EVT_DEVICE_RM 695 : NLP_EVT_DEVICE_RECOVERY); 696 } 697 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 698 lpfc_mbx_unreg_vpi(vport); 699 spin_lock_irq(shost->host_lock); 700 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 701 spin_unlock_irq(shost->host_lock); 702 } 703 } 704 705 void 706 lpfc_port_link_failure(struct lpfc_vport *vport) 707 { 708 /* Cleanup any outstanding RSCN activity */ 709 lpfc_els_flush_rscn(vport); 710 711 /* Cleanup any outstanding ELS commands */ 712 lpfc_els_flush_cmd(vport); 713 714 lpfc_cleanup_rpis(vport, 0); 715 716 /* Turn off discovery timer if its running */ 717 lpfc_can_disctmo(vport); 718 } 719 720 void 721 lpfc_linkdown_port(struct lpfc_vport *vport) 722 { 723 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 724 725 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); 726 727 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 728 "Link Down: state:x%x rtry:x%x flg:x%x", 729 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 730 731 lpfc_port_link_failure(vport); 732 733 } 734 735 int 736 lpfc_linkdown(struct lpfc_hba *phba) 737 { 738 struct lpfc_vport *vport = phba->pport; 739 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 740 struct lpfc_vport **vports; 741 LPFC_MBOXQ_t *mb; 742 int i; 743 744 if (phba->link_state == LPFC_LINK_DOWN) 745 return 0; 746 spin_lock_irq(&phba->hbalock); 747 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); 748 if (phba->link_state > LPFC_LINK_DOWN) { 749 phba->link_state = LPFC_LINK_DOWN; 750 phba->pport->fc_flag &= ~FC_LBIT; 751 } 752 spin_unlock_irq(&phba->hbalock); 753 vports = lpfc_create_vport_work_array(phba); 754 if (vports != NULL) 755 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 756 /* Issue a LINK DOWN event to all nodes */ 757 lpfc_linkdown_port(vports[i]); 758 } 759 lpfc_destroy_vport_work_array(phba, vports); 760 /* Clean up any firmware default rpi's */ 761 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 762 if (mb) { 763 lpfc_unreg_did(phba, 0xffff, 0xffffffff, mb); 764 mb->vport = vport; 765 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 766 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 767 == MBX_NOT_FINISHED) { 768 mempool_free(mb, phba->mbox_mem_pool); 769 } 770 } 771 772 /* Setup myDID for link up if we are in pt2pt mode */ 773 if (phba->pport->fc_flag & FC_PT2PT) { 774 phba->pport->fc_myDID = 0; 775 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 776 if (mb) { 777 lpfc_config_link(phba, mb); 778 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 779 mb->vport = vport; 780 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 781 == MBX_NOT_FINISHED) { 782 mempool_free(mb, phba->mbox_mem_pool); 783 } 784 } 785 spin_lock_irq(shost->host_lock); 786 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 787 spin_unlock_irq(shost->host_lock); 788 } 789 790 return 0; 791 } 792 793 static void 794 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) 795 { 796 struct lpfc_nodelist *ndlp; 797 798 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 799 if (!NLP_CHK_NODE_ACT(ndlp)) 800 continue; 801 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 802 continue; 803 if (ndlp->nlp_type & NLP_FABRIC) { 804 /* On Linkup its safe to clean up the ndlp 805 * from Fabric connections. 806 */ 807 if (ndlp->nlp_DID != Fabric_DID) 808 lpfc_unreg_rpi(vport, ndlp); 809 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 810 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 811 /* Fail outstanding IO now since device is 812 * marked for PLOGI. 813 */ 814 lpfc_unreg_rpi(vport, ndlp); 815 } 816 } 817 } 818 819 static void 820 lpfc_linkup_port(struct lpfc_vport *vport) 821 { 822 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 823 struct lpfc_hba *phba = vport->phba; 824 825 if ((vport->load_flag & FC_UNLOADING) != 0) 826 return; 827 828 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 829 "Link Up: top:x%x speed:x%x flg:x%x", 830 phba->fc_topology, phba->fc_linkspeed, phba->link_flag); 831 832 /* If NPIV is not enabled, only bring the physical port up */ 833 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 834 (vport != phba->pport)) 835 return; 836 837 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); 838 839 spin_lock_irq(shost->host_lock); 840 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 841 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); 842 vport->fc_flag |= FC_NDISC_ACTIVE; 843 vport->fc_ns_retry = 0; 844 spin_unlock_irq(shost->host_lock); 845 846 if (vport->fc_flag & FC_LBIT) 847 lpfc_linkup_cleanup_nodes(vport); 848 849 } 850 851 static int 852 lpfc_linkup(struct lpfc_hba *phba) 853 { 854 struct lpfc_vport **vports; 855 int i; 856 857 phba->link_state = LPFC_LINK_UP; 858 859 /* Unblock fabric iocbs if they are blocked */ 860 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 861 del_timer_sync(&phba->fabric_block_timer); 862 863 vports = lpfc_create_vport_work_array(phba); 864 if (vports != NULL) 865 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 866 lpfc_linkup_port(vports[i]); 867 lpfc_destroy_vport_work_array(phba, vports); 868 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 869 (phba->sli_rev < LPFC_SLI_REV4)) 870 lpfc_issue_clear_la(phba, phba->pport); 871 872 return 0; 873 } 874 875 /* 876 * This routine handles processing a CLEAR_LA mailbox 877 * command upon completion. It is setup in the LPFC_MBOXQ 878 * as the completion routine when the command is 879 * handed off to the SLI layer. 880 */ 881 static void 882 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 883 { 884 struct lpfc_vport *vport = pmb->vport; 885 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 886 struct lpfc_sli *psli = &phba->sli; 887 MAILBOX_t *mb = &pmb->u.mb; 888 uint32_t control; 889 890 /* Since we don't do discovery right now, turn these off here */ 891 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 892 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 893 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 894 895 /* Check for error */ 896 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 897 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ 898 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 899 "0320 CLEAR_LA mbxStatus error x%x hba " 900 "state x%x\n", 901 mb->mbxStatus, vport->port_state); 902 phba->link_state = LPFC_HBA_ERROR; 903 goto out; 904 } 905 906 if (vport->port_type == LPFC_PHYSICAL_PORT) 907 phba->link_state = LPFC_HBA_READY; 908 909 spin_lock_irq(&phba->hbalock); 910 psli->sli_flag |= LPFC_PROCESS_LA; 911 control = readl(phba->HCregaddr); 912 control |= HC_LAINT_ENA; 913 writel(control, phba->HCregaddr); 914 readl(phba->HCregaddr); /* flush */ 915 spin_unlock_irq(&phba->hbalock); 916 mempool_free(pmb, phba->mbox_mem_pool); 917 return; 918 919 out: 920 /* Device Discovery completes */ 921 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 922 "0225 Device Discovery completes\n"); 923 mempool_free(pmb, phba->mbox_mem_pool); 924 925 spin_lock_irq(shost->host_lock); 926 vport->fc_flag &= ~FC_ABORT_DISCOVERY; 927 spin_unlock_irq(shost->host_lock); 928 929 lpfc_can_disctmo(vport); 930 931 /* turn on Link Attention interrupts */ 932 933 spin_lock_irq(&phba->hbalock); 934 psli->sli_flag |= LPFC_PROCESS_LA; 935 control = readl(phba->HCregaddr); 936 control |= HC_LAINT_ENA; 937 writel(control, phba->HCregaddr); 938 readl(phba->HCregaddr); /* flush */ 939 spin_unlock_irq(&phba->hbalock); 940 941 return; 942 } 943 944 945 static void 946 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 947 { 948 struct lpfc_vport *vport = pmb->vport; 949 950 if (pmb->u.mb.mbxStatus) 951 goto out; 952 953 mempool_free(pmb, phba->mbox_mem_pool); 954 955 if (phba->fc_topology == TOPOLOGY_LOOP && 956 vport->fc_flag & FC_PUBLIC_LOOP && 957 !(vport->fc_flag & FC_LBIT)) { 958 /* Need to wait for FAN - use discovery timer 959 * for timeout. port_state is identically 960 * LPFC_LOCAL_CFG_LINK while waiting for FAN 961 */ 962 lpfc_set_disctmo(vport); 963 return; 964 } 965 966 /* Start discovery by sending a FLOGI. port_state is identically 967 * LPFC_FLOGI while waiting for FLOGI cmpl 968 */ 969 if (vport->port_state != LPFC_FLOGI) { 970 lpfc_initial_flogi(vport); 971 } 972 return; 973 974 out: 975 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 976 "0306 CONFIG_LINK mbxStatus error x%x " 977 "HBA state x%x\n", 978 pmb->u.mb.mbxStatus, vport->port_state); 979 mempool_free(pmb, phba->mbox_mem_pool); 980 981 lpfc_linkdown(phba); 982 983 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 984 "0200 CONFIG_LINK bad hba state x%x\n", 985 vport->port_state); 986 987 lpfc_issue_clear_la(phba, vport); 988 return; 989 } 990 991 static void 992 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 993 { 994 struct lpfc_vport *vport = mboxq->vport; 995 unsigned long flags; 996 997 if (mboxq->u.mb.mbxStatus) { 998 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 999 "2017 REG_FCFI mbxStatus error x%x " 1000 "HBA state x%x\n", 1001 mboxq->u.mb.mbxStatus, vport->port_state); 1002 mempool_free(mboxq, phba->mbox_mem_pool); 1003 return; 1004 } 1005 1006 /* Start FCoE discovery by sending a FLOGI. */ 1007 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); 1008 /* Set the FCFI registered flag */ 1009 spin_lock_irqsave(&phba->hbalock, flags); 1010 phba->fcf.fcf_flag |= FCF_REGISTERED; 1011 spin_unlock_irqrestore(&phba->hbalock, flags); 1012 if (vport->port_state != LPFC_FLOGI) { 1013 spin_lock_irqsave(&phba->hbalock, flags); 1014 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1015 spin_unlock_irqrestore(&phba->hbalock, flags); 1016 lpfc_initial_flogi(vport); 1017 } 1018 1019 mempool_free(mboxq, phba->mbox_mem_pool); 1020 return; 1021 } 1022 1023 /** 1024 * lpfc_fab_name_match - Check if the fcf fabric name match. 1025 * @fab_name: pointer to fabric name. 1026 * @new_fcf_record: pointer to fcf record. 1027 * 1028 * This routine compare the fcf record's fabric name with provided 1029 * fabric name. If the fabric name are identical this function 1030 * returns 1 else return 0. 1031 **/ 1032 static uint32_t 1033 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) 1034 { 1035 if ((fab_name[0] == 1036 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && 1037 (fab_name[1] == 1038 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && 1039 (fab_name[2] == 1040 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) && 1041 (fab_name[3] == 1042 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) && 1043 (fab_name[4] == 1044 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) && 1045 (fab_name[5] == 1046 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) && 1047 (fab_name[6] == 1048 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) && 1049 (fab_name[7] == 1050 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))) 1051 return 1; 1052 else 1053 return 0; 1054 } 1055 1056 /** 1057 * lpfc_mac_addr_match - Check if the fcf mac address match. 1058 * @phba: pointer to lpfc hba data structure. 1059 * @new_fcf_record: pointer to fcf record. 1060 * 1061 * This routine compare the fcf record's mac address with HBA's 1062 * FCF mac address. If the mac addresses are identical this function 1063 * returns 1 else return 0. 1064 **/ 1065 static uint32_t 1066 lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) 1067 { 1068 if ((phba->fcf.mac_addr[0] == 1069 bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && 1070 (phba->fcf.mac_addr[1] == 1071 bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) && 1072 (phba->fcf.mac_addr[2] == 1073 bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) && 1074 (phba->fcf.mac_addr[3] == 1075 bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) && 1076 (phba->fcf.mac_addr[4] == 1077 bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) && 1078 (phba->fcf.mac_addr[5] == 1079 bf_get(lpfc_fcf_record_mac_5, new_fcf_record))) 1080 return 1; 1081 else 1082 return 0; 1083 } 1084 1085 /** 1086 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1087 * @phba: pointer to lpfc hba data structure. 1088 * @new_fcf_record: pointer to fcf record. 1089 * 1090 * This routine copies the FCF information from the FCF 1091 * record to lpfc_hba data structure. 1092 **/ 1093 static void 1094 lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) 1095 { 1096 phba->fcf.fabric_name[0] = 1097 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); 1098 phba->fcf.fabric_name[1] = 1099 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); 1100 phba->fcf.fabric_name[2] = 1101 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); 1102 phba->fcf.fabric_name[3] = 1103 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); 1104 phba->fcf.fabric_name[4] = 1105 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); 1106 phba->fcf.fabric_name[5] = 1107 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); 1108 phba->fcf.fabric_name[6] = 1109 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); 1110 phba->fcf.fabric_name[7] = 1111 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); 1112 phba->fcf.mac_addr[0] = 1113 bf_get(lpfc_fcf_record_mac_0, new_fcf_record); 1114 phba->fcf.mac_addr[1] = 1115 bf_get(lpfc_fcf_record_mac_1, new_fcf_record); 1116 phba->fcf.mac_addr[2] = 1117 bf_get(lpfc_fcf_record_mac_2, new_fcf_record); 1118 phba->fcf.mac_addr[3] = 1119 bf_get(lpfc_fcf_record_mac_3, new_fcf_record); 1120 phba->fcf.mac_addr[4] = 1121 bf_get(lpfc_fcf_record_mac_4, new_fcf_record); 1122 phba->fcf.mac_addr[5] = 1123 bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1124 phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1125 phba->fcf.priority = new_fcf_record->fip_priority; 1126 } 1127 1128 /** 1129 * lpfc_register_fcf - Register the FCF with hba. 1130 * @phba: pointer to lpfc hba data structure. 1131 * 1132 * This routine issues a register fcfi mailbox command to register 1133 * the fcf with HBA. 1134 **/ 1135 static void 1136 lpfc_register_fcf(struct lpfc_hba *phba) 1137 { 1138 LPFC_MBOXQ_t *fcf_mbxq; 1139 int rc; 1140 unsigned long flags; 1141 1142 spin_lock_irqsave(&phba->hbalock, flags); 1143 1144 /* If the FCF is not availabe do nothing. */ 1145 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1146 spin_unlock_irqrestore(&phba->hbalock, flags); 1147 return; 1148 } 1149 1150 /* The FCF is already registered, start discovery */ 1151 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1152 phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); 1153 spin_unlock_irqrestore(&phba->hbalock, flags); 1154 if (phba->pport->port_state != LPFC_FLOGI) 1155 lpfc_initial_flogi(phba->pport); 1156 return; 1157 } 1158 spin_unlock_irqrestore(&phba->hbalock, flags); 1159 1160 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, 1161 GFP_KERNEL); 1162 if (!fcf_mbxq) 1163 return; 1164 1165 lpfc_reg_fcfi(phba, fcf_mbxq); 1166 fcf_mbxq->vport = phba->pport; 1167 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1168 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1169 if (rc == MBX_NOT_FINISHED) 1170 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1171 1172 return; 1173 } 1174 1175 /** 1176 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. 1177 * @phba: pointer to lpfc hba data structure. 1178 * @new_fcf_record: pointer to fcf record. 1179 * @boot_flag: Indicates if this record used by boot bios. 1180 * @addr_mode: The address mode to be used by this FCF 1181 * 1182 * This routine compare the fcf record with connect list obtained from the 1183 * config region to decide if this FCF can be used for SAN discovery. It returns 1184 * 1 if this record can be used for SAN discovery else return zero. If this FCF 1185 * record can be used for SAN discovery, the boot_flag will indicate if this FCF 1186 * is used by boot bios and addr_mode will indicate the addressing mode to be 1187 * used for this FCF when the function returns. 1188 * If the FCF record need to be used with a particular vlan id, the vlan is 1189 * set in the vlan_id on return of the function. If not VLAN tagging need to 1190 * be used with the FCF vlan_id will be set to 0xFFFF; 1191 **/ 1192 static int 1193 lpfc_match_fcf_conn_list(struct lpfc_hba *phba, 1194 struct fcf_record *new_fcf_record, 1195 uint32_t *boot_flag, uint32_t *addr_mode, 1196 uint16_t *vlan_id) 1197 { 1198 struct lpfc_fcf_conn_entry *conn_entry; 1199 1200 /* If FCF not available return 0 */ 1201 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1202 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1203 return 0; 1204 1205 if (!phba->cfg_enable_fip) { 1206 *boot_flag = 0; 1207 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1208 new_fcf_record); 1209 if (phba->valid_vlan) 1210 *vlan_id = phba->vlan_id; 1211 else 1212 *vlan_id = 0xFFFF; 1213 return 1; 1214 } 1215 1216 /* 1217 * If there are no FCF connection table entry, driver connect to all 1218 * FCFs. 1219 */ 1220 if (list_empty(&phba->fcf_conn_rec_list)) { 1221 *boot_flag = 0; 1222 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1223 new_fcf_record); 1224 1225 /* 1226 * When there are no FCF connect entries, use driver's default 1227 * addressing mode - FPMA. 1228 */ 1229 if (*addr_mode & LPFC_FCF_FPMA) 1230 *addr_mode = LPFC_FCF_FPMA; 1231 1232 *vlan_id = 0xFFFF; 1233 return 1; 1234 } 1235 1236 list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { 1237 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) 1238 continue; 1239 1240 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && 1241 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, 1242 new_fcf_record)) 1243 continue; 1244 1245 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { 1246 /* 1247 * If the vlan bit map does not have the bit set for the 1248 * vlan id to be used, then it is not a match. 1249 */ 1250 if (!(new_fcf_record->vlan_bitmap 1251 [conn_entry->conn_rec.vlan_tag / 8] & 1252 (1 << (conn_entry->conn_rec.vlan_tag % 8)))) 1253 continue; 1254 } 1255 1256 /* 1257 * If connection record does not support any addressing mode, 1258 * skip the FCF record. 1259 */ 1260 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) 1261 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) 1262 continue; 1263 1264 /* 1265 * Check if the connection record specifies a required 1266 * addressing mode. 1267 */ 1268 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1269 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { 1270 1271 /* 1272 * If SPMA required but FCF not support this continue. 1273 */ 1274 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1275 !(bf_get(lpfc_fcf_record_mac_addr_prov, 1276 new_fcf_record) & LPFC_FCF_SPMA)) 1277 continue; 1278 1279 /* 1280 * If FPMA required but FCF not support this continue. 1281 */ 1282 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1283 !(bf_get(lpfc_fcf_record_mac_addr_prov, 1284 new_fcf_record) & LPFC_FCF_FPMA)) 1285 continue; 1286 } 1287 1288 /* 1289 * This fcf record matches filtering criteria. 1290 */ 1291 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) 1292 *boot_flag = 1; 1293 else 1294 *boot_flag = 0; 1295 1296 /* 1297 * If user did not specify any addressing mode, or if the 1298 * prefered addressing mode specified by user is not supported 1299 * by FCF, allow fabric to pick the addressing mode. 1300 */ 1301 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1302 new_fcf_record); 1303 /* 1304 * If the user specified a required address mode, assign that 1305 * address mode 1306 */ 1307 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1308 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) 1309 *addr_mode = (conn_entry->conn_rec.flags & 1310 FCFCNCT_AM_SPMA) ? 1311 LPFC_FCF_SPMA : LPFC_FCF_FPMA; 1312 /* 1313 * If the user specified a prefered address mode, use the 1314 * addr mode only if FCF support the addr_mode. 1315 */ 1316 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1317 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 1318 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1319 (*addr_mode & LPFC_FCF_SPMA)) 1320 *addr_mode = LPFC_FCF_SPMA; 1321 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1322 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 1323 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1324 (*addr_mode & LPFC_FCF_FPMA)) 1325 *addr_mode = LPFC_FCF_FPMA; 1326 1327 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1328 *vlan_id = conn_entry->conn_rec.vlan_tag; 1329 else 1330 *vlan_id = 0xFFFF; 1331 1332 return 1; 1333 } 1334 1335 return 0; 1336 } 1337 1338 /** 1339 * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. 1340 * @phba: pointer to lpfc hba data structure. 1341 * @mboxq: pointer to mailbox object. 1342 * 1343 * This function iterate through all the fcf records available in 1344 * HBA and choose the optimal FCF record for discovery. After finding 1345 * the FCF for discovery it register the FCF record and kick start 1346 * discovery. 1347 * If FCF_IN_USE flag is set in currently used FCF, the routine try to 1348 * use a FCF record which match fabric name and mac address of the 1349 * currently used FCF record. 1350 * If the driver support only one FCF, it will try to use the FCF record 1351 * used by BOOT_BIOS. 1352 */ 1353 void 1354 lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1355 { 1356 void *virt_addr; 1357 dma_addr_t phys_addr; 1358 uint8_t *bytep; 1359 struct lpfc_mbx_sge sge; 1360 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1361 uint32_t shdr_status, shdr_add_status; 1362 union lpfc_sli4_cfg_shdr *shdr; 1363 struct fcf_record *new_fcf_record; 1364 int rc; 1365 uint32_t boot_flag, addr_mode; 1366 uint32_t next_fcf_index; 1367 unsigned long flags; 1368 uint16_t vlan_id; 1369 1370 /* Get the first SGE entry from the non-embedded DMA memory. This 1371 * routine only uses a single SGE. 1372 */ 1373 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 1374 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 1375 if (unlikely(!mboxq->sge_array)) { 1376 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1377 "2524 Failed to get the non-embedded SGE " 1378 "virtual address\n"); 1379 goto out; 1380 } 1381 virt_addr = mboxq->sge_array->addr[0]; 1382 1383 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1384 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1385 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 1386 &shdr->response); 1387 /* 1388 * The FCF Record was read and there is no reason for the driver 1389 * to maintain the FCF record data or memory. Instead, just need 1390 * to book keeping the FCFIs can be used. 1391 */ 1392 if (shdr_status || shdr_add_status) { 1393 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1394 "2521 READ_FCF_RECORD mailbox failed " 1395 "with status x%x add_status x%x, mbx\n", 1396 shdr_status, shdr_add_status); 1397 goto out; 1398 } 1399 /* Interpreting the returned information of FCF records */ 1400 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1401 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1402 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1403 next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1404 1405 new_fcf_record = (struct fcf_record *)(virt_addr + 1406 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1407 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1408 sizeof(struct fcf_record)); 1409 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 1410 1411 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, 1412 &boot_flag, &addr_mode, 1413 &vlan_id); 1414 /* 1415 * If the fcf record does not match with connect list entries 1416 * read the next entry. 1417 */ 1418 if (!rc) 1419 goto read_next_fcf; 1420 /* 1421 * If this is not the first FCF discovery of the HBA, use last 1422 * FCF record for the discovery. 1423 */ 1424 spin_lock_irqsave(&phba->hbalock, flags); 1425 if (phba->fcf.fcf_flag & FCF_IN_USE) { 1426 if (lpfc_fab_name_match(phba->fcf.fabric_name, 1427 new_fcf_record) && 1428 lpfc_mac_addr_match(phba, new_fcf_record)) { 1429 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1430 spin_unlock_irqrestore(&phba->hbalock, flags); 1431 goto out; 1432 } 1433 spin_unlock_irqrestore(&phba->hbalock, flags); 1434 goto read_next_fcf; 1435 } 1436 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 1437 /* 1438 * If the current FCF record does not have boot flag 1439 * set and new fcf record has boot flag set, use the 1440 * new fcf record. 1441 */ 1442 if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { 1443 /* Use this FCF record */ 1444 lpfc_copy_fcf_record(phba, new_fcf_record); 1445 phba->fcf.addr_mode = addr_mode; 1446 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; 1447 if (vlan_id != 0xFFFF) { 1448 phba->fcf.fcf_flag |= FCF_VALID_VLAN; 1449 phba->fcf.vlan_id = vlan_id; 1450 } 1451 spin_unlock_irqrestore(&phba->hbalock, flags); 1452 goto read_next_fcf; 1453 } 1454 /* 1455 * If the current FCF record has boot flag set and the 1456 * new FCF record does not have boot flag, read the next 1457 * FCF record. 1458 */ 1459 if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { 1460 spin_unlock_irqrestore(&phba->hbalock, flags); 1461 goto read_next_fcf; 1462 } 1463 /* 1464 * If there is a record with lower priority value for 1465 * the current FCF, use that record. 1466 */ 1467 if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) 1468 && (new_fcf_record->fip_priority < 1469 phba->fcf.priority)) { 1470 /* Use this FCF record */ 1471 lpfc_copy_fcf_record(phba, new_fcf_record); 1472 phba->fcf.addr_mode = addr_mode; 1473 if (vlan_id != 0xFFFF) { 1474 phba->fcf.fcf_flag |= FCF_VALID_VLAN; 1475 phba->fcf.vlan_id = vlan_id; 1476 } 1477 spin_unlock_irqrestore(&phba->hbalock, flags); 1478 goto read_next_fcf; 1479 } 1480 spin_unlock_irqrestore(&phba->hbalock, flags); 1481 goto read_next_fcf; 1482 } 1483 /* 1484 * This is the first available FCF record, use this 1485 * record. 1486 */ 1487 lpfc_copy_fcf_record(phba, new_fcf_record); 1488 phba->fcf.addr_mode = addr_mode; 1489 if (boot_flag) 1490 phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; 1491 phba->fcf.fcf_flag |= FCF_AVAILABLE; 1492 if (vlan_id != 0xFFFF) { 1493 phba->fcf.fcf_flag |= FCF_VALID_VLAN; 1494 phba->fcf.vlan_id = vlan_id; 1495 } 1496 spin_unlock_irqrestore(&phba->hbalock, flags); 1497 goto read_next_fcf; 1498 1499 read_next_fcf: 1500 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1501 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) 1502 lpfc_register_fcf(phba); 1503 else 1504 lpfc_sli4_read_fcf_record(phba, next_fcf_index); 1505 return; 1506 1507 out: 1508 lpfc_sli4_mbox_cmd_free(phba, mboxq); 1509 lpfc_register_fcf(phba); 1510 1511 return; 1512 } 1513 1514 /** 1515 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 1516 * @phba: pointer to lpfc hba data structure. 1517 * 1518 * This function loops through the list of vports on the @phba and issues an 1519 * FDISC if possible. 1520 */ 1521 void 1522 lpfc_start_fdiscs(struct lpfc_hba *phba) 1523 { 1524 struct lpfc_vport **vports; 1525 int i; 1526 1527 vports = lpfc_create_vport_work_array(phba); 1528 if (vports != NULL) { 1529 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 1530 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 1531 continue; 1532 /* There are no vpi for this vport */ 1533 if (vports[i]->vpi > phba->max_vpi) { 1534 lpfc_vport_set_state(vports[i], 1535 FC_VPORT_FAILED); 1536 continue; 1537 } 1538 if (phba->fc_topology == TOPOLOGY_LOOP) { 1539 lpfc_vport_set_state(vports[i], 1540 FC_VPORT_LINKDOWN); 1541 continue; 1542 } 1543 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 1544 lpfc_initial_fdisc(vports[i]); 1545 else { 1546 lpfc_vport_set_state(vports[i], 1547 FC_VPORT_NO_FABRIC_SUPP); 1548 lpfc_printf_vlog(vports[i], KERN_ERR, 1549 LOG_ELS, 1550 "0259 No NPIV " 1551 "Fabric support\n"); 1552 } 1553 } 1554 } 1555 lpfc_destroy_vport_work_array(phba, vports); 1556 } 1557 1558 void 1559 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1560 { 1561 struct lpfc_dmabuf *dmabuf = mboxq->context1; 1562 struct lpfc_vport *vport = mboxq->vport; 1563 1564 if (mboxq->u.mb.mbxStatus) { 1565 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1566 "2018 REG_VFI mbxStatus error x%x " 1567 "HBA state x%x\n", 1568 mboxq->u.mb.mbxStatus, vport->port_state); 1569 if (phba->fc_topology == TOPOLOGY_LOOP) { 1570 /* FLOGI failed, use loop map to make discovery list */ 1571 lpfc_disc_list_loopmap(vport); 1572 /* Start discovery */ 1573 lpfc_disc_start(vport); 1574 goto fail_free_mem; 1575 } 1576 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1577 goto fail_free_mem; 1578 } 1579 /* Mark the vport has registered with its VFI */ 1580 vport->vfi_state |= LPFC_VFI_REGISTERED; 1581 1582 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 1583 lpfc_start_fdiscs(phba); 1584 lpfc_do_scr_ns_plogi(phba, vport); 1585 } 1586 1587 fail_free_mem: 1588 mempool_free(mboxq, phba->mbox_mem_pool); 1589 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 1590 kfree(dmabuf); 1591 return; 1592 } 1593 1594 static void 1595 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1596 { 1597 MAILBOX_t *mb = &pmb->u.mb; 1598 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 1599 struct lpfc_vport *vport = pmb->vport; 1600 1601 1602 /* Check for error */ 1603 if (mb->mbxStatus) { 1604 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 1605 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1606 "0319 READ_SPARAM mbxStatus error x%x " 1607 "hba state x%x>\n", 1608 mb->mbxStatus, vport->port_state); 1609 lpfc_linkdown(phba); 1610 goto out; 1611 } 1612 1613 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, 1614 sizeof (struct serv_parm)); 1615 if (phba->cfg_soft_wwnn) 1616 u64_to_wwn(phba->cfg_soft_wwnn, 1617 vport->fc_sparam.nodeName.u.wwn); 1618 if (phba->cfg_soft_wwpn) 1619 u64_to_wwn(phba->cfg_soft_wwpn, 1620 vport->fc_sparam.portName.u.wwn); 1621 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 1622 sizeof(vport->fc_nodename)); 1623 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 1624 sizeof(vport->fc_portname)); 1625 if (vport->port_type == LPFC_PHYSICAL_PORT) { 1626 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); 1627 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); 1628 } 1629 1630 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1631 kfree(mp); 1632 mempool_free(pmb, phba->mbox_mem_pool); 1633 return; 1634 1635 out: 1636 pmb->context1 = NULL; 1637 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1638 kfree(mp); 1639 lpfc_issue_clear_la(phba, vport); 1640 mempool_free(pmb, phba->mbox_mem_pool); 1641 return; 1642 } 1643 1644 static void 1645 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) 1646 { 1647 struct lpfc_vport *vport = phba->pport; 1648 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; 1649 int i; 1650 struct lpfc_dmabuf *mp; 1651 int rc; 1652 struct fcf_record *fcf_record; 1653 1654 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1655 1656 spin_lock_irq(&phba->hbalock); 1657 switch (la->UlnkSpeed) { 1658 case LA_1GHZ_LINK: 1659 phba->fc_linkspeed = LA_1GHZ_LINK; 1660 break; 1661 case LA_2GHZ_LINK: 1662 phba->fc_linkspeed = LA_2GHZ_LINK; 1663 break; 1664 case LA_4GHZ_LINK: 1665 phba->fc_linkspeed = LA_4GHZ_LINK; 1666 break; 1667 case LA_8GHZ_LINK: 1668 phba->fc_linkspeed = LA_8GHZ_LINK; 1669 break; 1670 case LA_10GHZ_LINK: 1671 phba->fc_linkspeed = LA_10GHZ_LINK; 1672 break; 1673 default: 1674 phba->fc_linkspeed = LA_UNKNW_LINK; 1675 break; 1676 } 1677 1678 phba->fc_topology = la->topology; 1679 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 1680 1681 if (phba->fc_topology == TOPOLOGY_LOOP) { 1682 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 1683 1684 if (phba->cfg_enable_npiv) 1685 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1686 "1309 Link Up Event npiv not supported in loop " 1687 "topology\n"); 1688 /* Get Loop Map information */ 1689 if (la->il) 1690 vport->fc_flag |= FC_LBIT; 1691 1692 vport->fc_myDID = la->granted_AL_PA; 1693 i = la->un.lilpBde64.tus.f.bdeSize; 1694 1695 if (i == 0) { 1696 phba->alpa_map[0] = 0; 1697 } else { 1698 if (vport->cfg_log_verbose & LOG_LINK_EVENT) { 1699 int numalpa, j, k; 1700 union { 1701 uint8_t pamap[16]; 1702 struct { 1703 uint32_t wd1; 1704 uint32_t wd2; 1705 uint32_t wd3; 1706 uint32_t wd4; 1707 } pa; 1708 } un; 1709 numalpa = phba->alpa_map[0]; 1710 j = 0; 1711 while (j < numalpa) { 1712 memset(un.pamap, 0, 16); 1713 for (k = 1; j < numalpa; k++) { 1714 un.pamap[k - 1] = 1715 phba->alpa_map[j + 1]; 1716 j++; 1717 if (k == 16) 1718 break; 1719 } 1720 /* Link Up Event ALPA map */ 1721 lpfc_printf_log(phba, 1722 KERN_WARNING, 1723 LOG_LINK_EVENT, 1724 "1304 Link Up Event " 1725 "ALPA map Data: x%x " 1726 "x%x x%x x%x\n", 1727 un.pa.wd1, un.pa.wd2, 1728 un.pa.wd3, un.pa.wd4); 1729 } 1730 } 1731 } 1732 } else { 1733 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 1734 if (phba->max_vpi && phba->cfg_enable_npiv && 1735 (phba->sli_rev == 3)) 1736 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 1737 } 1738 vport->fc_myDID = phba->fc_pref_DID; 1739 vport->fc_flag |= FC_LBIT; 1740 } 1741 spin_unlock_irq(&phba->hbalock); 1742 1743 lpfc_linkup(phba); 1744 if (sparam_mbox) { 1745 lpfc_read_sparam(phba, sparam_mbox, 0); 1746 sparam_mbox->vport = vport; 1747 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 1748 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 1749 if (rc == MBX_NOT_FINISHED) { 1750 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 1751 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1752 kfree(mp); 1753 mempool_free(sparam_mbox, phba->mbox_mem_pool); 1754 goto out; 1755 } 1756 } 1757 1758 if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { 1759 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1760 if (!cfglink_mbox) 1761 goto out; 1762 vport->port_state = LPFC_LOCAL_CFG_LINK; 1763 lpfc_config_link(phba, cfglink_mbox); 1764 cfglink_mbox->vport = vport; 1765 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 1766 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 1767 if (rc == MBX_NOT_FINISHED) { 1768 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 1769 goto out; 1770 } 1771 } else { 1772 /* 1773 * Add the driver's default FCF record at FCF index 0 now. This 1774 * is phase 1 implementation that support FCF index 0 and driver 1775 * defaults. 1776 */ 1777 if (phba->cfg_enable_fip == 0) { 1778 fcf_record = kzalloc(sizeof(struct fcf_record), 1779 GFP_KERNEL); 1780 if (unlikely(!fcf_record)) { 1781 lpfc_printf_log(phba, KERN_ERR, 1782 LOG_MBOX | LOG_SLI, 1783 "2554 Could not allocate memmory for " 1784 "fcf record\n"); 1785 rc = -ENODEV; 1786 goto out; 1787 } 1788 1789 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, 1790 LPFC_FCOE_FCF_DEF_INDEX); 1791 rc = lpfc_sli4_add_fcf_record(phba, fcf_record); 1792 if (unlikely(rc)) { 1793 lpfc_printf_log(phba, KERN_ERR, 1794 LOG_MBOX | LOG_SLI, 1795 "2013 Could not manually add FCF " 1796 "record 0, status %d\n", rc); 1797 rc = -ENODEV; 1798 kfree(fcf_record); 1799 goto out; 1800 } 1801 kfree(fcf_record); 1802 } 1803 /* 1804 * The driver is expected to do FIP/FCF. Call the port 1805 * and get the FCF Table. 1806 */ 1807 rc = lpfc_sli4_read_fcf_record(phba, 1808 LPFC_FCOE_FCF_GET_FIRST); 1809 if (rc) 1810 goto out; 1811 } 1812 1813 return; 1814 out: 1815 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 1816 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1817 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n", 1818 vport->port_state, sparam_mbox, cfglink_mbox); 1819 lpfc_issue_clear_la(phba, vport); 1820 return; 1821 } 1822 1823 static void 1824 lpfc_enable_la(struct lpfc_hba *phba) 1825 { 1826 uint32_t control; 1827 struct lpfc_sli *psli = &phba->sli; 1828 spin_lock_irq(&phba->hbalock); 1829 psli->sli_flag |= LPFC_PROCESS_LA; 1830 if (phba->sli_rev <= LPFC_SLI_REV3) { 1831 control = readl(phba->HCregaddr); 1832 control |= HC_LAINT_ENA; 1833 writel(control, phba->HCregaddr); 1834 readl(phba->HCregaddr); /* flush */ 1835 } 1836 spin_unlock_irq(&phba->hbalock); 1837 } 1838 1839 static void 1840 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) 1841 { 1842 lpfc_linkdown(phba); 1843 lpfc_enable_la(phba); 1844 lpfc_unregister_unused_fcf(phba); 1845 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 1846 } 1847 1848 1849 /* 1850 * This routine handles processing a READ_LA mailbox 1851 * command upon completion. It is setup in the LPFC_MBOXQ 1852 * as the completion routine when the command is 1853 * handed off to the SLI layer. 1854 */ 1855 void 1856 lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1857 { 1858 struct lpfc_vport *vport = pmb->vport; 1859 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1860 READ_LA_VAR *la; 1861 MAILBOX_t *mb = &pmb->u.mb; 1862 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1863 1864 /* Unblock ELS traffic */ 1865 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 1866 /* Check for error */ 1867 if (mb->mbxStatus) { 1868 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1869 "1307 READ_LA mbox error x%x state x%x\n", 1870 mb->mbxStatus, vport->port_state); 1871 lpfc_mbx_issue_link_down(phba); 1872 phba->link_state = LPFC_HBA_ERROR; 1873 goto lpfc_mbx_cmpl_read_la_free_mbuf; 1874 } 1875 1876 la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; 1877 1878 memcpy(&phba->alpa_map[0], mp->virt, 128); 1879 1880 spin_lock_irq(shost->host_lock); 1881 if (la->pb) 1882 vport->fc_flag |= FC_BYPASSED_MODE; 1883 else 1884 vport->fc_flag &= ~FC_BYPASSED_MODE; 1885 spin_unlock_irq(shost->host_lock); 1886 1887 if ((phba->fc_eventTag < la->eventTag) || 1888 (phba->fc_eventTag == la->eventTag)) { 1889 phba->fc_stat.LinkMultiEvent++; 1890 if (la->attType == AT_LINK_UP) 1891 if (phba->fc_eventTag != 0) 1892 lpfc_linkdown(phba); 1893 } 1894 1895 phba->fc_eventTag = la->eventTag; 1896 if (la->mm) 1897 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 1898 else 1899 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 1900 1901 if (la->attType == AT_LINK_UP && (!la->mm)) { 1902 phba->fc_stat.LinkUp++; 1903 if (phba->link_flag & LS_LOOPBACK_MODE) { 1904 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1905 "1306 Link Up Event in loop back mode " 1906 "x%x received Data: x%x x%x x%x x%x\n", 1907 la->eventTag, phba->fc_eventTag, 1908 la->granted_AL_PA, la->UlnkSpeed, 1909 phba->alpa_map[0]); 1910 } else { 1911 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1912 "1303 Link Up Event x%x received " 1913 "Data: x%x x%x x%x x%x x%x x%x %d\n", 1914 la->eventTag, phba->fc_eventTag, 1915 la->granted_AL_PA, la->UlnkSpeed, 1916 phba->alpa_map[0], 1917 la->mm, la->fa, 1918 phba->wait_4_mlo_maint_flg); 1919 } 1920 lpfc_mbx_process_link_up(phba, la); 1921 } else if (la->attType == AT_LINK_DOWN) { 1922 phba->fc_stat.LinkDown++; 1923 if (phba->link_flag & LS_LOOPBACK_MODE) { 1924 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1925 "1308 Link Down Event in loop back mode " 1926 "x%x received " 1927 "Data: x%x x%x x%x\n", 1928 la->eventTag, phba->fc_eventTag, 1929 phba->pport->port_state, vport->fc_flag); 1930 } 1931 else { 1932 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1933 "1305 Link Down Event x%x received " 1934 "Data: x%x x%x x%x x%x x%x\n", 1935 la->eventTag, phba->fc_eventTag, 1936 phba->pport->port_state, vport->fc_flag, 1937 la->mm, la->fa); 1938 } 1939 lpfc_mbx_issue_link_down(phba); 1940 } 1941 if (la->mm && la->attType == AT_LINK_UP) { 1942 if (phba->link_state != LPFC_LINK_DOWN) { 1943 phba->fc_stat.LinkDown++; 1944 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1945 "1312 Link Down Event x%x received " 1946 "Data: x%x x%x x%x\n", 1947 la->eventTag, phba->fc_eventTag, 1948 phba->pport->port_state, vport->fc_flag); 1949 lpfc_mbx_issue_link_down(phba); 1950 } else 1951 lpfc_enable_la(phba); 1952 1953 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 1954 "1310 Menlo Maint Mode Link up Event x%x rcvd " 1955 "Data: x%x x%x x%x\n", 1956 la->eventTag, phba->fc_eventTag, 1957 phba->pport->port_state, vport->fc_flag); 1958 /* 1959 * The cmnd that triggered this will be waiting for this 1960 * signal. 1961 */ 1962 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */ 1963 if (phba->wait_4_mlo_maint_flg) { 1964 phba->wait_4_mlo_maint_flg = 0; 1965 wake_up_interruptible(&phba->wait_4_mlo_m_q); 1966 } 1967 } 1968 1969 if (la->fa) { 1970 if (la->mm) 1971 lpfc_issue_clear_la(phba, vport); 1972 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 1973 "1311 fa %d\n", la->fa); 1974 } 1975 1976 lpfc_mbx_cmpl_read_la_free_mbuf: 1977 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1978 kfree(mp); 1979 mempool_free(pmb, phba->mbox_mem_pool); 1980 return; 1981 } 1982 1983 /* 1984 * This routine handles processing a REG_LOGIN mailbox 1985 * command upon completion. It is setup in the LPFC_MBOXQ 1986 * as the completion routine when the command is 1987 * handed off to the SLI layer. 1988 */ 1989 void 1990 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1991 { 1992 struct lpfc_vport *vport = pmb->vport; 1993 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 1994 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 1995 1996 pmb->context1 = NULL; 1997 1998 /* Good status, call state machine */ 1999 lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN); 2000 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2001 kfree(mp); 2002 mempool_free(pmb, phba->mbox_mem_pool); 2003 /* decrement the node reference count held for this callback 2004 * function. 2005 */ 2006 lpfc_nlp_put(ndlp); 2007 2008 return; 2009 } 2010 2011 static void 2012 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2013 { 2014 MAILBOX_t *mb = &pmb->u.mb; 2015 struct lpfc_vport *vport = pmb->vport; 2016 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2017 2018 switch (mb->mbxStatus) { 2019 case 0x0011: 2020 case 0x0020: 2021 case 0x9700: 2022 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 2023 "0911 cmpl_unreg_vpi, mb status = 0x%x\n", 2024 mb->mbxStatus); 2025 break; 2026 } 2027 vport->unreg_vpi_cmpl = VPORT_OK; 2028 mempool_free(pmb, phba->mbox_mem_pool); 2029 /* 2030 * This shost reference might have been taken at the beginning of 2031 * lpfc_vport_delete() 2032 */ 2033 if (vport->load_flag & FC_UNLOADING) 2034 scsi_host_put(shost); 2035 } 2036 2037 int 2038 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) 2039 { 2040 struct lpfc_hba *phba = vport->phba; 2041 LPFC_MBOXQ_t *mbox; 2042 int rc; 2043 2044 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2045 if (!mbox) 2046 return 1; 2047 2048 lpfc_unreg_vpi(phba, vport->vpi, mbox); 2049 mbox->vport = vport; 2050 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; 2051 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 2052 if (rc == MBX_NOT_FINISHED) { 2053 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 2054 "1800 Could not issue unreg_vpi\n"); 2055 mempool_free(mbox, phba->mbox_mem_pool); 2056 vport->unreg_vpi_cmpl = VPORT_ERROR; 2057 return rc; 2058 } 2059 return 0; 2060 } 2061 2062 static void 2063 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2064 { 2065 struct lpfc_vport *vport = pmb->vport; 2066 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2067 MAILBOX_t *mb = &pmb->u.mb; 2068 2069 switch (mb->mbxStatus) { 2070 case 0x0011: 2071 case 0x9601: 2072 case 0x9602: 2073 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 2074 "0912 cmpl_reg_vpi, mb status = 0x%x\n", 2075 mb->mbxStatus); 2076 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2077 spin_lock_irq(shost->host_lock); 2078 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 2079 spin_unlock_irq(shost->host_lock); 2080 vport->fc_myDID = 0; 2081 goto out; 2082 } 2083 2084 vport->num_disc_nodes = 0; 2085 /* go thru NPR list and issue ELS PLOGIs */ 2086 if (vport->fc_npr_cnt) 2087 lpfc_els_disc_plogi(vport); 2088 2089 if (!vport->num_disc_nodes) { 2090 spin_lock_irq(shost->host_lock); 2091 vport->fc_flag &= ~FC_NDISC_ACTIVE; 2092 spin_unlock_irq(shost->host_lock); 2093 lpfc_can_disctmo(vport); 2094 } 2095 vport->port_state = LPFC_VPORT_READY; 2096 2097 out: 2098 mempool_free(pmb, phba->mbox_mem_pool); 2099 return; 2100 } 2101 2102 /** 2103 * lpfc_create_static_vport - Read HBA config region to create static vports. 2104 * @phba: pointer to lpfc hba data structure. 2105 * 2106 * This routine issue a DUMP mailbox command for config region 22 to get 2107 * the list of static vports to be created. The function create vports 2108 * based on the information returned from the HBA. 2109 **/ 2110 void 2111 lpfc_create_static_vport(struct lpfc_hba *phba) 2112 { 2113 LPFC_MBOXQ_t *pmb = NULL; 2114 MAILBOX_t *mb; 2115 struct static_vport_info *vport_info; 2116 int rc, i; 2117 struct fc_vport_identifiers vport_id; 2118 struct fc_vport *new_fc_vport; 2119 struct Scsi_Host *shost; 2120 struct lpfc_vport *vport; 2121 uint16_t offset = 0; 2122 uint8_t *vport_buff; 2123 2124 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2125 if (!pmb) { 2126 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2127 "0542 lpfc_create_static_vport failed to" 2128 " allocate mailbox memory\n"); 2129 return; 2130 } 2131 2132 mb = &pmb->u.mb; 2133 2134 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); 2135 if (!vport_info) { 2136 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2137 "0543 lpfc_create_static_vport failed to" 2138 " allocate vport_info\n"); 2139 mempool_free(pmb, phba->mbox_mem_pool); 2140 return; 2141 } 2142 2143 vport_buff = (uint8_t *) vport_info; 2144 do { 2145 lpfc_dump_static_vport(phba, pmb, offset); 2146 pmb->vport = phba->pport; 2147 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); 2148 2149 if ((rc != MBX_SUCCESS) || mb->mbxStatus) { 2150 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2151 "0544 lpfc_create_static_vport failed to" 2152 " issue dump mailbox command ret 0x%x " 2153 "status 0x%x\n", 2154 rc, mb->mbxStatus); 2155 goto out; 2156 } 2157 2158 if (mb->un.varDmp.word_cnt > 2159 sizeof(struct static_vport_info) - offset) 2160 mb->un.varDmp.word_cnt = 2161 sizeof(struct static_vport_info) - offset; 2162 2163 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 2164 vport_buff + offset, 2165 mb->un.varDmp.word_cnt); 2166 offset += mb->un.varDmp.word_cnt; 2167 2168 } while (mb->un.varDmp.word_cnt && 2169 offset < sizeof(struct static_vport_info)); 2170 2171 2172 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || 2173 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) 2174 != VPORT_INFO_REV)) { 2175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2176 "0545 lpfc_create_static_vport bad" 2177 " information header 0x%x 0x%x\n", 2178 le32_to_cpu(vport_info->signature), 2179 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); 2180 2181 goto out; 2182 } 2183 2184 shost = lpfc_shost_from_vport(phba->pport); 2185 2186 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { 2187 memset(&vport_id, 0, sizeof(vport_id)); 2188 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); 2189 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); 2190 if (!vport_id.port_name || !vport_id.node_name) 2191 continue; 2192 2193 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 2194 vport_id.vport_type = FC_PORTTYPE_NPIV; 2195 vport_id.disable = false; 2196 new_fc_vport = fc_vport_create(shost, 0, &vport_id); 2197 2198 if (!new_fc_vport) { 2199 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 2200 "0546 lpfc_create_static_vport failed to" 2201 " create vport \n"); 2202 continue; 2203 } 2204 2205 vport = *(struct lpfc_vport **)new_fc_vport->dd_data; 2206 vport->vport_flag |= STATIC_VPORT; 2207 } 2208 2209 out: 2210 /* 2211 * If this is timed out command, setting NULL to context2 tell SLI 2212 * layer not to use this buffer. 2213 */ 2214 spin_lock_irq(&phba->hbalock); 2215 pmb->context2 = NULL; 2216 spin_unlock_irq(&phba->hbalock); 2217 kfree(vport_info); 2218 if (rc != MBX_TIMEOUT) 2219 mempool_free(pmb, phba->mbox_mem_pool); 2220 2221 return; 2222 } 2223 2224 /* 2225 * This routine handles processing a Fabric REG_LOGIN mailbox 2226 * command upon completion. It is setup in the LPFC_MBOXQ 2227 * as the completion routine when the command is 2228 * handed off to the SLI layer. 2229 */ 2230 void 2231 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2232 { 2233 struct lpfc_vport *vport = pmb->vport; 2234 MAILBOX_t *mb = &pmb->u.mb; 2235 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2236 struct lpfc_nodelist *ndlp; 2237 2238 ndlp = (struct lpfc_nodelist *) pmb->context2; 2239 pmb->context1 = NULL; 2240 pmb->context2 = NULL; 2241 if (mb->mbxStatus) { 2242 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2243 "0258 Register Fabric login error: 0x%x\n", 2244 mb->mbxStatus); 2245 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2246 kfree(mp); 2247 mempool_free(pmb, phba->mbox_mem_pool); 2248 2249 if (phba->fc_topology == TOPOLOGY_LOOP) { 2250 /* FLOGI failed, use loop map to make discovery list */ 2251 lpfc_disc_list_loopmap(vport); 2252 2253 /* Start discovery */ 2254 lpfc_disc_start(vport); 2255 /* Decrement the reference count to ndlp after the 2256 * reference to the ndlp are done. 2257 */ 2258 lpfc_nlp_put(ndlp); 2259 return; 2260 } 2261 2262 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2263 /* Decrement the reference count to ndlp after the reference 2264 * to the ndlp are done. 2265 */ 2266 lpfc_nlp_put(ndlp); 2267 return; 2268 } 2269 2270 ndlp->nlp_rpi = mb->un.varWords[0]; 2271 ndlp->nlp_flag |= NLP_RPI_VALID; 2272 ndlp->nlp_type |= NLP_FABRIC; 2273 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2274 2275 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2276 lpfc_start_fdiscs(phba); 2277 lpfc_do_scr_ns_plogi(phba, vport); 2278 } 2279 2280 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2281 kfree(mp); 2282 mempool_free(pmb, phba->mbox_mem_pool); 2283 2284 /* Drop the reference count from the mbox at the end after 2285 * all the current reference to the ndlp have been done. 2286 */ 2287 lpfc_nlp_put(ndlp); 2288 return; 2289 } 2290 2291 /* 2292 * This routine handles processing a NameServer REG_LOGIN mailbox 2293 * command upon completion. It is setup in the LPFC_MBOXQ 2294 * as the completion routine when the command is 2295 * handed off to the SLI layer. 2296 */ 2297 void 2298 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2299 { 2300 MAILBOX_t *mb = &pmb->u.mb; 2301 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 2302 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 2303 struct lpfc_vport *vport = pmb->vport; 2304 2305 if (mb->mbxStatus) { 2306 out: 2307 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2308 "0260 Register NameServer error: 0x%x\n", 2309 mb->mbxStatus); 2310 /* decrement the node reference count held for this 2311 * callback function. 2312 */ 2313 lpfc_nlp_put(ndlp); 2314 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2315 kfree(mp); 2316 mempool_free(pmb, phba->mbox_mem_pool); 2317 2318 /* If no other thread is using the ndlp, free it */ 2319 lpfc_nlp_not_used(ndlp); 2320 2321 if (phba->fc_topology == TOPOLOGY_LOOP) { 2322 /* 2323 * RegLogin failed, use loop map to make discovery 2324 * list 2325 */ 2326 lpfc_disc_list_loopmap(vport); 2327 2328 /* Start discovery */ 2329 lpfc_disc_start(vport); 2330 return; 2331 } 2332 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2333 return; 2334 } 2335 2336 pmb->context1 = NULL; 2337 2338 ndlp->nlp_rpi = mb->un.varWords[0]; 2339 ndlp->nlp_flag |= NLP_RPI_VALID; 2340 ndlp->nlp_type |= NLP_FABRIC; 2341 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2342 2343 if (vport->port_state < LPFC_VPORT_READY) { 2344 /* Link up discovery requires Fabric registration. */ 2345 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */ 2346 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); 2347 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); 2348 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 2349 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); 2350 2351 /* Issue SCR just before NameServer GID_FT Query */ 2352 lpfc_issue_els_scr(vport, SCR_DID, 0); 2353 } 2354 2355 vport->fc_ns_retry = 0; 2356 /* Good status, issue CT Request to NameServer */ 2357 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { 2358 /* Cannot issue NameServer Query, so finish up discovery */ 2359 goto out; 2360 } 2361 2362 /* decrement the node reference count held for this 2363 * callback function. 2364 */ 2365 lpfc_nlp_put(ndlp); 2366 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2367 kfree(mp); 2368 mempool_free(pmb, phba->mbox_mem_pool); 2369 2370 return; 2371 } 2372 2373 static void 2374 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2375 { 2376 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2377 struct fc_rport *rport; 2378 struct lpfc_rport_data *rdata; 2379 struct fc_rport_identifiers rport_ids; 2380 struct lpfc_hba *phba = vport->phba; 2381 2382 /* Remote port has reappeared. Re-register w/ FC transport */ 2383 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2384 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2385 rport_ids.port_id = ndlp->nlp_DID; 2386 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 2387 2388 /* 2389 * We leave our node pointer in rport->dd_data when we unregister a 2390 * FCP target port. But fc_remote_port_add zeros the space to which 2391 * rport->dd_data points. So, if we're reusing a previously 2392 * registered port, drop the reference that we took the last time we 2393 * registered the port. 2394 */ 2395 if (ndlp->rport && ndlp->rport->dd_data && 2396 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) 2397 lpfc_nlp_put(ndlp); 2398 2399 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 2400 "rport add: did:x%x flg:x%x type x%x", 2401 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 2402 2403 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 2404 if (!rport || !get_device(&rport->dev)) { 2405 dev_printk(KERN_WARNING, &phba->pcidev->dev, 2406 "Warning: fc_remote_port_add failed\n"); 2407 return; 2408 } 2409 2410 /* initialize static port data */ 2411 rport->maxframe_size = ndlp->nlp_maxframe; 2412 rport->supported_classes = ndlp->nlp_class_sup; 2413 rdata = rport->dd_data; 2414 rdata->pnode = lpfc_nlp_get(ndlp); 2415 2416 if (ndlp->nlp_type & NLP_FCP_TARGET) 2417 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 2418 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 2419 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 2420 2421 2422 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 2423 fc_remote_port_rolechg(rport, rport_ids.roles); 2424 2425 if ((rport->scsi_target_id != -1) && 2426 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 2427 ndlp->nlp_sid = rport->scsi_target_id; 2428 } 2429 return; 2430 } 2431 2432 static void 2433 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) 2434 { 2435 struct fc_rport *rport = ndlp->rport; 2436 2437 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, 2438 "rport delete: did:x%x flg:x%x type x%x", 2439 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 2440 2441 fc_remote_port_delete(rport); 2442 2443 return; 2444 } 2445 2446 static void 2447 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) 2448 { 2449 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2450 2451 spin_lock_irq(shost->host_lock); 2452 switch (state) { 2453 case NLP_STE_UNUSED_NODE: 2454 vport->fc_unused_cnt += count; 2455 break; 2456 case NLP_STE_PLOGI_ISSUE: 2457 vport->fc_plogi_cnt += count; 2458 break; 2459 case NLP_STE_ADISC_ISSUE: 2460 vport->fc_adisc_cnt += count; 2461 break; 2462 case NLP_STE_REG_LOGIN_ISSUE: 2463 vport->fc_reglogin_cnt += count; 2464 break; 2465 case NLP_STE_PRLI_ISSUE: 2466 vport->fc_prli_cnt += count; 2467 break; 2468 case NLP_STE_UNMAPPED_NODE: 2469 vport->fc_unmap_cnt += count; 2470 break; 2471 case NLP_STE_MAPPED_NODE: 2472 vport->fc_map_cnt += count; 2473 break; 2474 case NLP_STE_NPR_NODE: 2475 vport->fc_npr_cnt += count; 2476 break; 2477 } 2478 spin_unlock_irq(shost->host_lock); 2479 } 2480 2481 static void 2482 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2483 int old_state, int new_state) 2484 { 2485 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2486 2487 if (new_state == NLP_STE_UNMAPPED_NODE) { 2488 ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); 2489 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 2490 ndlp->nlp_type |= NLP_FC_NODE; 2491 } 2492 if (new_state == NLP_STE_MAPPED_NODE) 2493 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 2494 if (new_state == NLP_STE_NPR_NODE) 2495 ndlp->nlp_flag &= ~NLP_RCV_PLOGI; 2496 2497 /* Transport interface */ 2498 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || 2499 old_state == NLP_STE_UNMAPPED_NODE)) { 2500 vport->phba->nport_event_cnt++; 2501 lpfc_unregister_remote_port(ndlp); 2502 } 2503 2504 if (new_state == NLP_STE_MAPPED_NODE || 2505 new_state == NLP_STE_UNMAPPED_NODE) { 2506 vport->phba->nport_event_cnt++; 2507 /* 2508 * Tell the fc transport about the port, if we haven't 2509 * already. If we have, and it's a scsi entity, be 2510 * sure to unblock any attached scsi devices 2511 */ 2512 lpfc_register_remote_port(vport, ndlp); 2513 } 2514 if ((new_state == NLP_STE_MAPPED_NODE) && 2515 (vport->stat_data_enabled)) { 2516 /* 2517 * A new target is discovered, if there is no buffer for 2518 * statistical data collection allocate buffer. 2519 */ 2520 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, 2521 sizeof(struct lpfc_scsicmd_bkt), 2522 GFP_KERNEL); 2523 2524 if (!ndlp->lat_data) 2525 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 2526 "0286 lpfc_nlp_state_cleanup failed to " 2527 "allocate statistical data buffer DID " 2528 "0x%x\n", ndlp->nlp_DID); 2529 } 2530 /* 2531 * if we added to Mapped list, but the remote port 2532 * registration failed or assigned a target id outside 2533 * our presentable range - move the node to the 2534 * Unmapped List 2535 */ 2536 if (new_state == NLP_STE_MAPPED_NODE && 2537 (!ndlp->rport || 2538 ndlp->rport->scsi_target_id == -1 || 2539 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { 2540 spin_lock_irq(shost->host_lock); 2541 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; 2542 spin_unlock_irq(shost->host_lock); 2543 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 2544 } 2545 } 2546 2547 static char * 2548 lpfc_nlp_state_name(char *buffer, size_t size, int state) 2549 { 2550 static char *states[] = { 2551 [NLP_STE_UNUSED_NODE] = "UNUSED", 2552 [NLP_STE_PLOGI_ISSUE] = "PLOGI", 2553 [NLP_STE_ADISC_ISSUE] = "ADISC", 2554 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", 2555 [NLP_STE_PRLI_ISSUE] = "PRLI", 2556 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", 2557 [NLP_STE_MAPPED_NODE] = "MAPPED", 2558 [NLP_STE_NPR_NODE] = "NPR", 2559 }; 2560 2561 if (state < NLP_STE_MAX_STATE && states[state]) 2562 strlcpy(buffer, states[state], size); 2563 else 2564 snprintf(buffer, size, "unknown (%d)", state); 2565 return buffer; 2566 } 2567 2568 void 2569 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2570 int state) 2571 { 2572 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2573 int old_state = ndlp->nlp_state; 2574 char name1[16], name2[16]; 2575 2576 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 2577 "0904 NPort state transition x%06x, %s -> %s\n", 2578 ndlp->nlp_DID, 2579 lpfc_nlp_state_name(name1, sizeof(name1), old_state), 2580 lpfc_nlp_state_name(name2, sizeof(name2), state)); 2581 2582 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2583 "node statechg did:x%x old:%d ste:%d", 2584 ndlp->nlp_DID, old_state, state); 2585 2586 if (old_state == NLP_STE_NPR_NODE && 2587 state != NLP_STE_NPR_NODE) 2588 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2589 if (old_state == NLP_STE_UNMAPPED_NODE) { 2590 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; 2591 ndlp->nlp_type &= ~NLP_FC_NODE; 2592 } 2593 2594 if (list_empty(&ndlp->nlp_listp)) { 2595 spin_lock_irq(shost->host_lock); 2596 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 2597 spin_unlock_irq(shost->host_lock); 2598 } else if (old_state) 2599 lpfc_nlp_counters(vport, old_state, -1); 2600 2601 ndlp->nlp_state = state; 2602 lpfc_nlp_counters(vport, state, 1); 2603 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); 2604 } 2605 2606 void 2607 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2608 { 2609 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2610 2611 if (list_empty(&ndlp->nlp_listp)) { 2612 spin_lock_irq(shost->host_lock); 2613 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 2614 spin_unlock_irq(shost->host_lock); 2615 } 2616 } 2617 2618 void 2619 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2620 { 2621 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2622 2623 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2624 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 2625 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 2626 spin_lock_irq(shost->host_lock); 2627 list_del_init(&ndlp->nlp_listp); 2628 spin_unlock_irq(shost->host_lock); 2629 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 2630 NLP_STE_UNUSED_NODE); 2631 } 2632 2633 static void 2634 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2635 { 2636 lpfc_cancel_retry_delay_tmo(vport, ndlp); 2637 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 2638 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 2639 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 2640 NLP_STE_UNUSED_NODE); 2641 } 2642 /** 2643 * lpfc_initialize_node - Initialize all fields of node object 2644 * @vport: Pointer to Virtual Port object. 2645 * @ndlp: Pointer to FC node object. 2646 * @did: FC_ID of the node. 2647 * 2648 * This function is always called when node object need to be initialized. 2649 * It initializes all the fields of the node object. Although the reference 2650 * to phba from @ndlp can be obtained indirectly through it's reference to 2651 * @vport, a direct reference to phba is taken here by @ndlp. This is due 2652 * to the life-span of the @ndlp might go beyond the existence of @vport as 2653 * the final release of ndlp is determined by its reference count. And, the 2654 * operation on @ndlp needs the reference to phba. 2655 **/ 2656 static inline void 2657 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2658 uint32_t did) 2659 { 2660 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 2661 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 2662 init_timer(&ndlp->nlp_delayfunc); 2663 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 2664 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 2665 ndlp->nlp_DID = did; 2666 ndlp->vport = vport; 2667 ndlp->phba = vport->phba; 2668 ndlp->nlp_sid = NLP_NO_SID; 2669 kref_init(&ndlp->kref); 2670 NLP_INT_NODE_ACT(ndlp); 2671 atomic_set(&ndlp->cmd_pending, 0); 2672 ndlp->cmd_qdepth = LPFC_MAX_TGT_QDEPTH; 2673 } 2674 2675 struct lpfc_nodelist * 2676 lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 2677 int state) 2678 { 2679 struct lpfc_hba *phba = vport->phba; 2680 uint32_t did; 2681 unsigned long flags; 2682 2683 if (!ndlp) 2684 return NULL; 2685 2686 spin_lock_irqsave(&phba->ndlp_lock, flags); 2687 /* The ndlp should not be in memory free mode */ 2688 if (NLP_CHK_FREE_REQ(ndlp)) { 2689 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 2690 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 2691 "0277 lpfc_enable_node: ndlp:x%p " 2692 "usgmap:x%x refcnt:%d\n", 2693 (void *)ndlp, ndlp->nlp_usg_map, 2694 atomic_read(&ndlp->kref.refcount)); 2695 return NULL; 2696 } 2697 /* The ndlp should not already be in active mode */ 2698 if (NLP_CHK_NODE_ACT(ndlp)) { 2699 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 2700 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 2701 "0278 lpfc_enable_node: ndlp:x%p " 2702 "usgmap:x%x refcnt:%d\n", 2703 (void *)ndlp, ndlp->nlp_usg_map, 2704 atomic_read(&ndlp->kref.refcount)); 2705 return NULL; 2706 } 2707 2708 /* Keep the original DID */ 2709 did = ndlp->nlp_DID; 2710 2711 /* re-initialize ndlp except of ndlp linked list pointer */ 2712 memset((((char *)ndlp) + sizeof (struct list_head)), 0, 2713 sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 2714 lpfc_initialize_node(vport, ndlp, did); 2715 2716 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 2717 2718 if (state != NLP_STE_UNUSED_NODE) 2719 lpfc_nlp_set_state(vport, ndlp, state); 2720 2721 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 2722 "node enable: did:x%x", 2723 ndlp->nlp_DID, 0, 0); 2724 return ndlp; 2725 } 2726 2727 void 2728 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2729 { 2730 /* 2731 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should 2732 * be used if we wish to issue the "last" lpfc_nlp_put() to remove 2733 * the ndlp from the vport. The ndlp marked as UNUSED on the list 2734 * until ALL other outstanding threads have completed. We check 2735 * that the ndlp not already in the UNUSED state before we proceed. 2736 */ 2737 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 2738 return; 2739 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 2740 lpfc_nlp_put(ndlp); 2741 return; 2742 } 2743 2744 /* 2745 * Start / ReStart rescue timer for Discovery / RSCN handling 2746 */ 2747 void 2748 lpfc_set_disctmo(struct lpfc_vport *vport) 2749 { 2750 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2751 struct lpfc_hba *phba = vport->phba; 2752 uint32_t tmo; 2753 2754 if (vport->port_state == LPFC_LOCAL_CFG_LINK) { 2755 /* For FAN, timeout should be greater than edtov */ 2756 tmo = (((phba->fc_edtov + 999) / 1000) + 1); 2757 } else { 2758 /* Normal discovery timeout should be > than ELS/CT timeout 2759 * FC spec states we need 3 * ratov for CT requests 2760 */ 2761 tmo = ((phba->fc_ratov * 3) + 3); 2762 } 2763 2764 2765 if (!timer_pending(&vport->fc_disctmo)) { 2766 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2767 "set disc timer: tmo:x%x state:x%x flg:x%x", 2768 tmo, vport->port_state, vport->fc_flag); 2769 } 2770 2771 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); 2772 spin_lock_irq(shost->host_lock); 2773 vport->fc_flag |= FC_DISC_TMO; 2774 spin_unlock_irq(shost->host_lock); 2775 2776 /* Start Discovery Timer state <hba_state> */ 2777 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2778 "0247 Start Discovery Timer state x%x " 2779 "Data: x%x x%lx x%x x%x\n", 2780 vport->port_state, tmo, 2781 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, 2782 vport->fc_adisc_cnt); 2783 2784 return; 2785 } 2786 2787 /* 2788 * Cancel rescue timer for Discovery / RSCN handling 2789 */ 2790 int 2791 lpfc_can_disctmo(struct lpfc_vport *vport) 2792 { 2793 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2794 unsigned long iflags; 2795 2796 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 2797 "can disc timer: state:x%x rtry:x%x flg:x%x", 2798 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 2799 2800 /* Turn off discovery timer if its running */ 2801 if (vport->fc_flag & FC_DISC_TMO) { 2802 spin_lock_irqsave(shost->host_lock, iflags); 2803 vport->fc_flag &= ~FC_DISC_TMO; 2804 spin_unlock_irqrestore(shost->host_lock, iflags); 2805 del_timer_sync(&vport->fc_disctmo); 2806 spin_lock_irqsave(&vport->work_port_lock, iflags); 2807 vport->work_port_events &= ~WORKER_DISC_TMO; 2808 spin_unlock_irqrestore(&vport->work_port_lock, iflags); 2809 } 2810 2811 /* Cancel Discovery Timer state <hba_state> */ 2812 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 2813 "0248 Cancel Discovery Timer state x%x " 2814 "Data: x%x x%x x%x\n", 2815 vport->port_state, vport->fc_flag, 2816 vport->fc_plogi_cnt, vport->fc_adisc_cnt); 2817 return 0; 2818 } 2819 2820 /* 2821 * Check specified ring for outstanding IOCB on the SLI queue 2822 * Return true if iocb matches the specified nport 2823 */ 2824 int 2825 lpfc_check_sli_ndlp(struct lpfc_hba *phba, 2826 struct lpfc_sli_ring *pring, 2827 struct lpfc_iocbq *iocb, 2828 struct lpfc_nodelist *ndlp) 2829 { 2830 struct lpfc_sli *psli = &phba->sli; 2831 IOCB_t *icmd = &iocb->iocb; 2832 struct lpfc_vport *vport = ndlp->vport; 2833 2834 if (iocb->vport != vport) 2835 return 0; 2836 2837 if (pring->ringno == LPFC_ELS_RING) { 2838 switch (icmd->ulpCommand) { 2839 case CMD_GEN_REQUEST64_CR: 2840 if (iocb->context_un.ndlp == ndlp) 2841 return 1; 2842 case CMD_ELS_REQUEST64_CR: 2843 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 2844 return 1; 2845 case CMD_XMIT_ELS_RSP64_CX: 2846 if (iocb->context1 == (uint8_t *) ndlp) 2847 return 1; 2848 } 2849 } else if (pring->ringno == psli->extra_ring) { 2850 2851 } else if (pring->ringno == psli->fcp_ring) { 2852 /* Skip match check if waiting to relogin to FCP target */ 2853 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 2854 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 2855 return 0; 2856 } 2857 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { 2858 return 1; 2859 } 2860 } else if (pring->ringno == psli->next_ring) { 2861 2862 } 2863 return 0; 2864 } 2865 2866 /* 2867 * Free resources / clean up outstanding I/Os 2868 * associated with nlp_rpi in the LPFC_NODELIST entry. 2869 */ 2870 static int 2871 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 2872 { 2873 LIST_HEAD(completions); 2874 struct lpfc_sli *psli; 2875 struct lpfc_sli_ring *pring; 2876 struct lpfc_iocbq *iocb, *next_iocb; 2877 uint32_t rpi, i; 2878 2879 lpfc_fabric_abort_nport(ndlp); 2880 2881 /* 2882 * Everything that matches on txcmplq will be returned 2883 * by firmware with a no rpi error. 2884 */ 2885 psli = &phba->sli; 2886 rpi = ndlp->nlp_rpi; 2887 if (ndlp->nlp_flag & NLP_RPI_VALID) { 2888 /* Now process each ring */ 2889 for (i = 0; i < psli->num_rings; i++) { 2890 pring = &psli->ring[i]; 2891 2892 spin_lock_irq(&phba->hbalock); 2893 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, 2894 list) { 2895 /* 2896 * Check to see if iocb matches the nport we are 2897 * looking for 2898 */ 2899 if ((lpfc_check_sli_ndlp(phba, pring, iocb, 2900 ndlp))) { 2901 /* It matches, so deque and call compl 2902 with an error */ 2903 list_move_tail(&iocb->list, 2904 &completions); 2905 pring->txq_cnt--; 2906 } 2907 } 2908 spin_unlock_irq(&phba->hbalock); 2909 } 2910 } 2911 2912 /* Cancel all the IOCBs from the completions list */ 2913 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 2914 IOERR_SLI_ABORTED); 2915 2916 return 0; 2917 } 2918 2919 /* 2920 * Free rpi associated with LPFC_NODELIST entry. 2921 * This routine is called from lpfc_freenode(), when we are removing 2922 * a LPFC_NODELIST entry. It is also called if the driver initiates a 2923 * LOGO that completes successfully, and we are waiting to PLOGI back 2924 * to the remote NPort. In addition, it is called after we receive 2925 * and unsolicated ELS cmd, send back a rsp, the rsp completes and 2926 * we are waiting to PLOGI back to the remote NPort. 2927 */ 2928 int 2929 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2930 { 2931 struct lpfc_hba *phba = vport->phba; 2932 LPFC_MBOXQ_t *mbox; 2933 int rc; 2934 2935 if (ndlp->nlp_flag & NLP_RPI_VALID) { 2936 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2937 if (mbox) { 2938 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); 2939 mbox->vport = vport; 2940 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2941 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 2942 if (rc == MBX_NOT_FINISHED) 2943 mempool_free(mbox, phba->mbox_mem_pool); 2944 } 2945 lpfc_no_rpi(phba, ndlp); 2946 ndlp->nlp_rpi = 0; 2947 ndlp->nlp_flag &= ~NLP_RPI_VALID; 2948 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2949 return 1; 2950 } 2951 return 0; 2952 } 2953 2954 void 2955 lpfc_unreg_all_rpis(struct lpfc_vport *vport) 2956 { 2957 struct lpfc_hba *phba = vport->phba; 2958 LPFC_MBOXQ_t *mbox; 2959 int rc; 2960 2961 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2962 if (mbox) { 2963 lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); 2964 mbox->vport = vport; 2965 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2966 mbox->context1 = NULL; 2967 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2968 if (rc != MBX_TIMEOUT) 2969 mempool_free(mbox, phba->mbox_mem_pool); 2970 2971 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 2972 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 2973 "1836 Could not issue " 2974 "unreg_login(all_rpis) status %d\n", rc); 2975 } 2976 } 2977 2978 void 2979 lpfc_unreg_default_rpis(struct lpfc_vport *vport) 2980 { 2981 struct lpfc_hba *phba = vport->phba; 2982 LPFC_MBOXQ_t *mbox; 2983 int rc; 2984 2985 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2986 if (mbox) { 2987 lpfc_unreg_did(phba, vport->vpi, 0xffffffff, mbox); 2988 mbox->vport = vport; 2989 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2990 mbox->context1 = NULL; 2991 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 2992 if (rc != MBX_TIMEOUT) 2993 mempool_free(mbox, phba->mbox_mem_pool); 2994 2995 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 2996 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 2997 "1815 Could not issue " 2998 "unreg_did (default rpis) status %d\n", 2999 rc); 3000 } 3001 } 3002 3003 /* 3004 * Free resources associated with LPFC_NODELIST entry 3005 * so it can be freed. 3006 */ 3007 static int 3008 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 3009 { 3010 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3011 struct lpfc_hba *phba = vport->phba; 3012 LPFC_MBOXQ_t *mb, *nextmb; 3013 struct lpfc_dmabuf *mp; 3014 3015 /* Cleanup node for NPort <nlp_DID> */ 3016 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3017 "0900 Cleanup node for NPort x%x " 3018 "Data: x%x x%x x%x\n", 3019 ndlp->nlp_DID, ndlp->nlp_flag, 3020 ndlp->nlp_state, ndlp->nlp_rpi); 3021 if (NLP_CHK_FREE_REQ(ndlp)) { 3022 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 3023 "0280 lpfc_cleanup_node: ndlp:x%p " 3024 "usgmap:x%x refcnt:%d\n", 3025 (void *)ndlp, ndlp->nlp_usg_map, 3026 atomic_read(&ndlp->kref.refcount)); 3027 lpfc_dequeue_node(vport, ndlp); 3028 } else { 3029 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 3030 "0281 lpfc_cleanup_node: ndlp:x%p " 3031 "usgmap:x%x refcnt:%d\n", 3032 (void *)ndlp, ndlp->nlp_usg_map, 3033 atomic_read(&ndlp->kref.refcount)); 3034 lpfc_disable_node(vport, ndlp); 3035 } 3036 3037 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 3038 if ((mb = phba->sli.mbox_active)) { 3039 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 3040 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3041 mb->context2 = NULL; 3042 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3043 } 3044 } 3045 3046 spin_lock_irq(&phba->hbalock); 3047 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 3048 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 3049 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 3050 mp = (struct lpfc_dmabuf *) (mb->context1); 3051 if (mp) { 3052 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 3053 kfree(mp); 3054 } 3055 list_del(&mb->list); 3056 mempool_free(mb, phba->mbox_mem_pool); 3057 /* We shall not invoke the lpfc_nlp_put to decrement 3058 * the ndlp reference count as we are in the process 3059 * of lpfc_nlp_release. 3060 */ 3061 } 3062 } 3063 spin_unlock_irq(&phba->hbalock); 3064 3065 lpfc_els_abort(phba, ndlp); 3066 3067 spin_lock_irq(shost->host_lock); 3068 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 3069 spin_unlock_irq(shost->host_lock); 3070 3071 ndlp->nlp_last_elscmd = 0; 3072 del_timer_sync(&ndlp->nlp_delayfunc); 3073 3074 list_del_init(&ndlp->els_retry_evt.evt_listp); 3075 list_del_init(&ndlp->dev_loss_evt.evt_listp); 3076 3077 lpfc_unreg_rpi(vport, ndlp); 3078 3079 return 0; 3080 } 3081 3082 /* 3083 * Check to see if we can free the nlp back to the freelist. 3084 * If we are in the middle of using the nlp in the discovery state 3085 * machine, defer the free till we reach the end of the state machine. 3086 */ 3087 static void 3088 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 3089 { 3090 struct lpfc_hba *phba = vport->phba; 3091 struct lpfc_rport_data *rdata; 3092 LPFC_MBOXQ_t *mbox; 3093 int rc; 3094 3095 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3096 if ((ndlp->nlp_flag & NLP_DEFER_RM) && 3097 !(ndlp->nlp_flag & NLP_RPI_VALID)) { 3098 /* For this case we need to cleanup the default rpi 3099 * allocated by the firmware. 3100 */ 3101 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 3102 != NULL) { 3103 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, 3104 (uint8_t *) &vport->fc_sparam, mbox, 0); 3105 if (rc) { 3106 mempool_free(mbox, phba->mbox_mem_pool); 3107 } 3108 else { 3109 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 3110 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 3111 mbox->vport = vport; 3112 mbox->context2 = NULL; 3113 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3114 if (rc == MBX_NOT_FINISHED) { 3115 mempool_free(mbox, phba->mbox_mem_pool); 3116 } 3117 } 3118 } 3119 } 3120 lpfc_cleanup_node(vport, ndlp); 3121 3122 /* 3123 * We can get here with a non-NULL ndlp->rport because when we 3124 * unregister a rport we don't break the rport/node linkage. So if we 3125 * do, make sure we don't leaving any dangling pointers behind. 3126 */ 3127 if (ndlp->rport) { 3128 rdata = ndlp->rport->dd_data; 3129 rdata->pnode = NULL; 3130 ndlp->rport = NULL; 3131 } 3132 } 3133 3134 static int 3135 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3136 uint32_t did) 3137 { 3138 D_ID mydid, ndlpdid, matchdid; 3139 3140 if (did == Bcast_DID) 3141 return 0; 3142 3143 /* First check for Direct match */ 3144 if (ndlp->nlp_DID == did) 3145 return 1; 3146 3147 /* Next check for area/domain identically equals 0 match */ 3148 mydid.un.word = vport->fc_myDID; 3149 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { 3150 return 0; 3151 } 3152 3153 matchdid.un.word = did; 3154 ndlpdid.un.word = ndlp->nlp_DID; 3155 if (matchdid.un.b.id == ndlpdid.un.b.id) { 3156 if ((mydid.un.b.domain == matchdid.un.b.domain) && 3157 (mydid.un.b.area == matchdid.un.b.area)) { 3158 if ((ndlpdid.un.b.domain == 0) && 3159 (ndlpdid.un.b.area == 0)) { 3160 if (ndlpdid.un.b.id) 3161 return 1; 3162 } 3163 return 0; 3164 } 3165 3166 matchdid.un.word = ndlp->nlp_DID; 3167 if ((mydid.un.b.domain == ndlpdid.un.b.domain) && 3168 (mydid.un.b.area == ndlpdid.un.b.area)) { 3169 if ((matchdid.un.b.domain == 0) && 3170 (matchdid.un.b.area == 0)) { 3171 if (matchdid.un.b.id) 3172 return 1; 3173 } 3174 } 3175 } 3176 return 0; 3177 } 3178 3179 /* Search for a nodelist entry */ 3180 static struct lpfc_nodelist * 3181 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 3182 { 3183 struct lpfc_nodelist *ndlp; 3184 uint32_t data1; 3185 3186 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3187 if (lpfc_matchdid(vport, ndlp, did)) { 3188 data1 = (((uint32_t) ndlp->nlp_state << 24) | 3189 ((uint32_t) ndlp->nlp_xri << 16) | 3190 ((uint32_t) ndlp->nlp_type << 8) | 3191 ((uint32_t) ndlp->nlp_rpi & 0xff)); 3192 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3193 "0929 FIND node DID " 3194 "Data: x%p x%x x%x x%x\n", 3195 ndlp, ndlp->nlp_DID, 3196 ndlp->nlp_flag, data1); 3197 return ndlp; 3198 } 3199 } 3200 3201 /* FIND node did <did> NOT FOUND */ 3202 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3203 "0932 FIND node did x%x NOT FOUND.\n", did); 3204 return NULL; 3205 } 3206 3207 struct lpfc_nodelist * 3208 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 3209 { 3210 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3211 struct lpfc_nodelist *ndlp; 3212 3213 spin_lock_irq(shost->host_lock); 3214 ndlp = __lpfc_findnode_did(vport, did); 3215 spin_unlock_irq(shost->host_lock); 3216 return ndlp; 3217 } 3218 3219 struct lpfc_nodelist * 3220 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) 3221 { 3222 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3223 struct lpfc_nodelist *ndlp; 3224 3225 ndlp = lpfc_findnode_did(vport, did); 3226 if (!ndlp) { 3227 if ((vport->fc_flag & FC_RSCN_MODE) != 0 && 3228 lpfc_rscn_payload_check(vport, did) == 0) 3229 return NULL; 3230 ndlp = (struct lpfc_nodelist *) 3231 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); 3232 if (!ndlp) 3233 return NULL; 3234 lpfc_nlp_init(vport, ndlp, did); 3235 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3236 spin_lock_irq(shost->host_lock); 3237 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3238 spin_unlock_irq(shost->host_lock); 3239 return ndlp; 3240 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 3241 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); 3242 if (!ndlp) 3243 return NULL; 3244 spin_lock_irq(shost->host_lock); 3245 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3246 spin_unlock_irq(shost->host_lock); 3247 return ndlp; 3248 } 3249 3250 if ((vport->fc_flag & FC_RSCN_MODE) && 3251 !(vport->fc_flag & FC_NDISC_ACTIVE)) { 3252 if (lpfc_rscn_payload_check(vport, did)) { 3253 /* If we've already recieved a PLOGI from this NPort 3254 * we don't need to try to discover it again. 3255 */ 3256 if (ndlp->nlp_flag & NLP_RCV_PLOGI) 3257 return NULL; 3258 3259 /* Since this node is marked for discovery, 3260 * delay timeout is not needed. 3261 */ 3262 lpfc_cancel_retry_delay_tmo(vport, ndlp); 3263 spin_lock_irq(shost->host_lock); 3264 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3265 spin_unlock_irq(shost->host_lock); 3266 } else 3267 ndlp = NULL; 3268 } else { 3269 /* If we've already recieved a PLOGI from this NPort, 3270 * or we are already in the process of discovery on it, 3271 * we don't need to try to discover it again. 3272 */ 3273 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 3274 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 3275 ndlp->nlp_flag & NLP_RCV_PLOGI) 3276 return NULL; 3277 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 3278 spin_lock_irq(shost->host_lock); 3279 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 3280 spin_unlock_irq(shost->host_lock); 3281 } 3282 return ndlp; 3283 } 3284 3285 /* Build a list of nodes to discover based on the loopmap */ 3286 void 3287 lpfc_disc_list_loopmap(struct lpfc_vport *vport) 3288 { 3289 struct lpfc_hba *phba = vport->phba; 3290 int j; 3291 uint32_t alpa, index; 3292 3293 if (!lpfc_is_link_up(phba)) 3294 return; 3295 3296 if (phba->fc_topology != TOPOLOGY_LOOP) 3297 return; 3298 3299 /* Check for loop map present or not */ 3300 if (phba->alpa_map[0]) { 3301 for (j = 1; j <= phba->alpa_map[0]; j++) { 3302 alpa = phba->alpa_map[j]; 3303 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) 3304 continue; 3305 lpfc_setup_disc_node(vport, alpa); 3306 } 3307 } else { 3308 /* No alpamap, so try all alpa's */ 3309 for (j = 0; j < FC_MAXLOOP; j++) { 3310 /* If cfg_scan_down is set, start from highest 3311 * ALPA (0xef) to lowest (0x1). 3312 */ 3313 if (vport->cfg_scan_down) 3314 index = j; 3315 else 3316 index = FC_MAXLOOP - j - 1; 3317 alpa = lpfcAlpaArray[index]; 3318 if ((vport->fc_myDID & 0xff) == alpa) 3319 continue; 3320 lpfc_setup_disc_node(vport, alpa); 3321 } 3322 } 3323 return; 3324 } 3325 3326 void 3327 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) 3328 { 3329 LPFC_MBOXQ_t *mbox; 3330 struct lpfc_sli *psli = &phba->sli; 3331 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; 3332 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; 3333 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; 3334 int rc; 3335 3336 /* 3337 * if it's not a physical port or if we already send 3338 * clear_la then don't send it. 3339 */ 3340 if ((phba->link_state >= LPFC_CLEAR_LA) || 3341 (vport->port_type != LPFC_PHYSICAL_PORT) || 3342 (phba->sli_rev == LPFC_SLI_REV4)) 3343 return; 3344 3345 /* Link up discovery */ 3346 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { 3347 phba->link_state = LPFC_CLEAR_LA; 3348 lpfc_clear_la(phba, mbox); 3349 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 3350 mbox->vport = vport; 3351 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3352 if (rc == MBX_NOT_FINISHED) { 3353 mempool_free(mbox, phba->mbox_mem_pool); 3354 lpfc_disc_flush_list(vport); 3355 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 3356 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 3357 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 3358 phba->link_state = LPFC_HBA_ERROR; 3359 } 3360 } 3361 } 3362 3363 /* Reg_vpi to tell firmware to resume normal operations */ 3364 void 3365 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) 3366 { 3367 LPFC_MBOXQ_t *regvpimbox; 3368 3369 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3370 if (regvpimbox) { 3371 lpfc_reg_vpi(vport, regvpimbox); 3372 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 3373 regvpimbox->vport = vport; 3374 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 3375 == MBX_NOT_FINISHED) { 3376 mempool_free(regvpimbox, phba->mbox_mem_pool); 3377 } 3378 } 3379 } 3380 3381 /* Start Link up / RSCN discovery on NPR nodes */ 3382 void 3383 lpfc_disc_start(struct lpfc_vport *vport) 3384 { 3385 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3386 struct lpfc_hba *phba = vport->phba; 3387 uint32_t num_sent; 3388 uint32_t clear_la_pending; 3389 int did_changed; 3390 3391 if (!lpfc_is_link_up(phba)) 3392 return; 3393 3394 if (phba->link_state == LPFC_CLEAR_LA) 3395 clear_la_pending = 1; 3396 else 3397 clear_la_pending = 0; 3398 3399 if (vport->port_state < LPFC_VPORT_READY) 3400 vport->port_state = LPFC_DISC_AUTH; 3401 3402 lpfc_set_disctmo(vport); 3403 3404 if (vport->fc_prevDID == vport->fc_myDID) 3405 did_changed = 0; 3406 else 3407 did_changed = 1; 3408 3409 vport->fc_prevDID = vport->fc_myDID; 3410 vport->num_disc_nodes = 0; 3411 3412 /* Start Discovery state <hba_state> */ 3413 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 3414 "0202 Start Discovery hba state x%x " 3415 "Data: x%x x%x x%x\n", 3416 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, 3417 vport->fc_adisc_cnt); 3418 3419 /* First do ADISCs - if any */ 3420 num_sent = lpfc_els_disc_adisc(vport); 3421 3422 if (num_sent) 3423 return; 3424 3425 /* 3426 * For SLI3, cmpl_reg_vpi will set port_state to READY, and 3427 * continue discovery. 3428 */ 3429 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 3430 !(vport->fc_flag & FC_PT2PT) && 3431 !(vport->fc_flag & FC_RSCN_MODE) && 3432 (phba->sli_rev < LPFC_SLI_REV4)) { 3433 lpfc_issue_reg_vpi(phba, vport); 3434 return; 3435 } 3436 3437 /* 3438 * For SLI2, we need to set port_state to READY and continue 3439 * discovery. 3440 */ 3441 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { 3442 /* If we get here, there is nothing to ADISC */ 3443 if (vport->port_type == LPFC_PHYSICAL_PORT) 3444 lpfc_issue_clear_la(phba, vport); 3445 3446 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 3447 vport->num_disc_nodes = 0; 3448 /* go thru NPR nodes and issue ELS PLOGIs */ 3449 if (vport->fc_npr_cnt) 3450 lpfc_els_disc_plogi(vport); 3451 3452 if (!vport->num_disc_nodes) { 3453 spin_lock_irq(shost->host_lock); 3454 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3455 spin_unlock_irq(shost->host_lock); 3456 lpfc_can_disctmo(vport); 3457 } 3458 } 3459 vport->port_state = LPFC_VPORT_READY; 3460 } else { 3461 /* Next do PLOGIs - if any */ 3462 num_sent = lpfc_els_disc_plogi(vport); 3463 3464 if (num_sent) 3465 return; 3466 3467 if (vport->fc_flag & FC_RSCN_MODE) { 3468 /* Check to see if more RSCNs came in while we 3469 * were processing this one. 3470 */ 3471 if ((vport->fc_rscn_id_cnt == 0) && 3472 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { 3473 spin_lock_irq(shost->host_lock); 3474 vport->fc_flag &= ~FC_RSCN_MODE; 3475 spin_unlock_irq(shost->host_lock); 3476 lpfc_can_disctmo(vport); 3477 } else 3478 lpfc_els_handle_rscn(vport); 3479 } 3480 } 3481 return; 3482 } 3483 3484 /* 3485 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS 3486 * ring the match the sppecified nodelist. 3487 */ 3488 static void 3489 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 3490 { 3491 LIST_HEAD(completions); 3492 struct lpfc_sli *psli; 3493 IOCB_t *icmd; 3494 struct lpfc_iocbq *iocb, *next_iocb; 3495 struct lpfc_sli_ring *pring; 3496 3497 psli = &phba->sli; 3498 pring = &psli->ring[LPFC_ELS_RING]; 3499 3500 /* Error matching iocb on txq or txcmplq 3501 * First check the txq. 3502 */ 3503 spin_lock_irq(&phba->hbalock); 3504 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 3505 if (iocb->context1 != ndlp) { 3506 continue; 3507 } 3508 icmd = &iocb->iocb; 3509 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || 3510 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 3511 3512 list_move_tail(&iocb->list, &completions); 3513 pring->txq_cnt--; 3514 } 3515 } 3516 3517 /* Next check the txcmplq */ 3518 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 3519 if (iocb->context1 != ndlp) { 3520 continue; 3521 } 3522 icmd = &iocb->iocb; 3523 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR || 3524 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) { 3525 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3526 } 3527 } 3528 spin_unlock_irq(&phba->hbalock); 3529 3530 /* Cancel all the IOCBs from the completions list */ 3531 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3532 IOERR_SLI_ABORTED); 3533 } 3534 3535 static void 3536 lpfc_disc_flush_list(struct lpfc_vport *vport) 3537 { 3538 struct lpfc_nodelist *ndlp, *next_ndlp; 3539 struct lpfc_hba *phba = vport->phba; 3540 3541 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { 3542 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 3543 nlp_listp) { 3544 if (!NLP_CHK_NODE_ACT(ndlp)) 3545 continue; 3546 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 3547 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 3548 lpfc_free_tx(phba, ndlp); 3549 } 3550 } 3551 } 3552 } 3553 3554 void 3555 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) 3556 { 3557 lpfc_els_flush_rscn(vport); 3558 lpfc_els_flush_cmd(vport); 3559 lpfc_disc_flush_list(vport); 3560 } 3561 3562 /*****************************************************************************/ 3563 /* 3564 * NAME: lpfc_disc_timeout 3565 * 3566 * FUNCTION: Fibre Channel driver discovery timeout routine. 3567 * 3568 * EXECUTION ENVIRONMENT: interrupt only 3569 * 3570 * CALLED FROM: 3571 * Timer function 3572 * 3573 * RETURNS: 3574 * none 3575 */ 3576 /*****************************************************************************/ 3577 void 3578 lpfc_disc_timeout(unsigned long ptr) 3579 { 3580 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 3581 struct lpfc_hba *phba = vport->phba; 3582 uint32_t tmo_posted; 3583 unsigned long flags = 0; 3584 3585 if (unlikely(!phba)) 3586 return; 3587 3588 spin_lock_irqsave(&vport->work_port_lock, flags); 3589 tmo_posted = vport->work_port_events & WORKER_DISC_TMO; 3590 if (!tmo_posted) 3591 vport->work_port_events |= WORKER_DISC_TMO; 3592 spin_unlock_irqrestore(&vport->work_port_lock, flags); 3593 3594 if (!tmo_posted) 3595 lpfc_worker_wake_up(phba); 3596 return; 3597 } 3598 3599 static void 3600 lpfc_disc_timeout_handler(struct lpfc_vport *vport) 3601 { 3602 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3603 struct lpfc_hba *phba = vport->phba; 3604 struct lpfc_sli *psli = &phba->sli; 3605 struct lpfc_nodelist *ndlp, *next_ndlp; 3606 LPFC_MBOXQ_t *initlinkmbox; 3607 int rc, clrlaerr = 0; 3608 3609 if (!(vport->fc_flag & FC_DISC_TMO)) 3610 return; 3611 3612 spin_lock_irq(shost->host_lock); 3613 vport->fc_flag &= ~FC_DISC_TMO; 3614 spin_unlock_irq(shost->host_lock); 3615 3616 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 3617 "disc timeout: state:x%x rtry:x%x flg:x%x", 3618 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 3619 3620 switch (vport->port_state) { 3621 3622 case LPFC_LOCAL_CFG_LINK: 3623 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for 3624 * FAN 3625 */ 3626 /* FAN timeout */ 3627 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, 3628 "0221 FAN timeout\n"); 3629 /* Start discovery by sending FLOGI, clean up old rpis */ 3630 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 3631 nlp_listp) { 3632 if (!NLP_CHK_NODE_ACT(ndlp)) 3633 continue; 3634 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 3635 continue; 3636 if (ndlp->nlp_type & NLP_FABRIC) { 3637 /* Clean up the ndlp on Fabric connections */ 3638 lpfc_drop_node(vport, ndlp); 3639 3640 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 3641 /* Fail outstanding IO now since device 3642 * is marked for PLOGI. 3643 */ 3644 lpfc_unreg_rpi(vport, ndlp); 3645 } 3646 } 3647 if (vport->port_state != LPFC_FLOGI) { 3648 lpfc_initial_flogi(vport); 3649 return; 3650 } 3651 break; 3652 3653 case LPFC_FDISC: 3654 case LPFC_FLOGI: 3655 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 3656 /* Initial FLOGI timeout */ 3657 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3658 "0222 Initial %s timeout\n", 3659 vport->vpi ? "FDISC" : "FLOGI"); 3660 3661 /* Assume no Fabric and go on with discovery. 3662 * Check for outstanding ELS FLOGI to abort. 3663 */ 3664 3665 /* FLOGI failed, so just use loop map to make discovery list */ 3666 lpfc_disc_list_loopmap(vport); 3667 3668 /* Start discovery */ 3669 lpfc_disc_start(vport); 3670 break; 3671 3672 case LPFC_FABRIC_CFG_LINK: 3673 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 3674 NameServer login */ 3675 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3676 "0223 Timeout while waiting for " 3677 "NameServer login\n"); 3678 /* Next look for NameServer ndlp */ 3679 ndlp = lpfc_findnode_did(vport, NameServer_DID); 3680 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 3681 lpfc_els_abort(phba, ndlp); 3682 3683 /* ReStart discovery */ 3684 goto restart_disc; 3685 3686 case LPFC_NS_QRY: 3687 /* Check for wait for NameServer Rsp timeout */ 3688 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3689 "0224 NameServer Query timeout " 3690 "Data: x%x x%x\n", 3691 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 3692 3693 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 3694 /* Try it one more time */ 3695 vport->fc_ns_retry++; 3696 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 3697 vport->fc_ns_retry, 0); 3698 if (rc == 0) 3699 break; 3700 } 3701 vport->fc_ns_retry = 0; 3702 3703 restart_disc: 3704 /* 3705 * Discovery is over. 3706 * set port_state to PORT_READY if SLI2. 3707 * cmpl_reg_vpi will set port_state to READY for SLI3. 3708 */ 3709 if (phba->sli_rev < LPFC_SLI_REV4) { 3710 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3711 lpfc_issue_reg_vpi(phba, vport); 3712 else { /* NPIV Not enabled */ 3713 lpfc_issue_clear_la(phba, vport); 3714 vport->port_state = LPFC_VPORT_READY; 3715 } 3716 } 3717 3718 /* Setup and issue mailbox INITIALIZE LINK command */ 3719 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3720 if (!initlinkmbox) { 3721 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3722 "0206 Device Discovery " 3723 "completion error\n"); 3724 phba->link_state = LPFC_HBA_ERROR; 3725 break; 3726 } 3727 3728 lpfc_linkdown(phba); 3729 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 3730 phba->cfg_link_speed); 3731 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 3732 initlinkmbox->vport = vport; 3733 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3734 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 3735 lpfc_set_loopback_flag(phba); 3736 if (rc == MBX_NOT_FINISHED) 3737 mempool_free(initlinkmbox, phba->mbox_mem_pool); 3738 3739 break; 3740 3741 case LPFC_DISC_AUTH: 3742 /* Node Authentication timeout */ 3743 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3744 "0227 Node Authentication timeout\n"); 3745 lpfc_disc_flush_list(vport); 3746 3747 /* 3748 * set port_state to PORT_READY if SLI2. 3749 * cmpl_reg_vpi will set port_state to READY for SLI3. 3750 */ 3751 if (phba->sli_rev < LPFC_SLI_REV4) { 3752 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 3753 lpfc_issue_reg_vpi(phba, vport); 3754 else { /* NPIV Not enabled */ 3755 lpfc_issue_clear_la(phba, vport); 3756 vport->port_state = LPFC_VPORT_READY; 3757 } 3758 } 3759 break; 3760 3761 case LPFC_VPORT_READY: 3762 if (vport->fc_flag & FC_RSCN_MODE) { 3763 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3764 "0231 RSCN timeout Data: x%x " 3765 "x%x\n", 3766 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 3767 3768 /* Cleanup any outstanding ELS commands */ 3769 lpfc_els_flush_cmd(vport); 3770 3771 lpfc_els_flush_rscn(vport); 3772 lpfc_disc_flush_list(vport); 3773 } 3774 break; 3775 3776 default: 3777 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3778 "0273 Unexpected discovery timeout, " 3779 "vport State x%x\n", vport->port_state); 3780 break; 3781 } 3782 3783 switch (phba->link_state) { 3784 case LPFC_CLEAR_LA: 3785 /* CLEAR LA timeout */ 3786 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3787 "0228 CLEAR LA timeout\n"); 3788 clrlaerr = 1; 3789 break; 3790 3791 case LPFC_LINK_UP: 3792 lpfc_issue_clear_la(phba, vport); 3793 /* Drop thru */ 3794 case LPFC_LINK_UNKNOWN: 3795 case LPFC_WARM_START: 3796 case LPFC_INIT_START: 3797 case LPFC_INIT_MBX_CMDS: 3798 case LPFC_LINK_DOWN: 3799 case LPFC_HBA_ERROR: 3800 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 3801 "0230 Unexpected timeout, hba link " 3802 "state x%x\n", phba->link_state); 3803 clrlaerr = 1; 3804 break; 3805 3806 case LPFC_HBA_READY: 3807 break; 3808 } 3809 3810 if (clrlaerr) { 3811 lpfc_disc_flush_list(vport); 3812 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 3813 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 3814 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 3815 vport->port_state = LPFC_VPORT_READY; 3816 } 3817 3818 return; 3819 } 3820 3821 /* 3822 * This routine handles processing a NameServer REG_LOGIN mailbox 3823 * command upon completion. It is setup in the LPFC_MBOXQ 3824 * as the completion routine when the command is 3825 * handed off to the SLI layer. 3826 */ 3827 void 3828 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3829 { 3830 MAILBOX_t *mb = &pmb->u.mb; 3831 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3832 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3833 struct lpfc_vport *vport = pmb->vport; 3834 3835 pmb->context1 = NULL; 3836 3837 ndlp->nlp_rpi = mb->un.varWords[0]; 3838 ndlp->nlp_flag |= NLP_RPI_VALID; 3839 ndlp->nlp_type |= NLP_FABRIC; 3840 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3841 3842 /* 3843 * Start issuing Fabric-Device Management Interface (FDMI) command to 3844 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if 3845 * fdmi-on=2 (supporting RPA/hostnmae) 3846 */ 3847 3848 if (vport->cfg_fdmi_on == 1) 3849 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 3850 else 3851 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 3852 3853 /* decrement the node reference count held for this callback 3854 * function. 3855 */ 3856 lpfc_nlp_put(ndlp); 3857 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3858 kfree(mp); 3859 mempool_free(pmb, phba->mbox_mem_pool); 3860 3861 return; 3862 } 3863 3864 static int 3865 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) 3866 { 3867 uint16_t *rpi = param; 3868 3869 return ndlp->nlp_rpi == *rpi; 3870 } 3871 3872 static int 3873 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) 3874 { 3875 return memcmp(&ndlp->nlp_portname, param, 3876 sizeof(ndlp->nlp_portname)) == 0; 3877 } 3878 3879 static struct lpfc_nodelist * 3880 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 3881 { 3882 struct lpfc_nodelist *ndlp; 3883 3884 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 3885 if (filter(ndlp, param)) 3886 return ndlp; 3887 } 3888 return NULL; 3889 } 3890 3891 /* 3892 * This routine looks up the ndlp lists for the given RPI. If rpi found it 3893 * returns the node list element pointer else return NULL. 3894 */ 3895 struct lpfc_nodelist * 3896 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 3897 { 3898 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); 3899 } 3900 3901 /* 3902 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 3903 * returns the node element list pointer else return NULL. 3904 */ 3905 struct lpfc_nodelist * 3906 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) 3907 { 3908 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3909 struct lpfc_nodelist *ndlp; 3910 3911 spin_lock_irq(shost->host_lock); 3912 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); 3913 spin_unlock_irq(shost->host_lock); 3914 return ndlp; 3915 } 3916 3917 void 3918 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3919 uint32_t did) 3920 { 3921 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 3922 3923 lpfc_initialize_node(vport, ndlp, did); 3924 INIT_LIST_HEAD(&ndlp->nlp_listp); 3925 3926 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 3927 "node init: did:x%x", 3928 ndlp->nlp_DID, 0, 0); 3929 3930 return; 3931 } 3932 3933 /* This routine releases all resources associated with a specifc NPort's ndlp 3934 * and mempool_free's the nodelist. 3935 */ 3936 static void 3937 lpfc_nlp_release(struct kref *kref) 3938 { 3939 struct lpfc_hba *phba; 3940 unsigned long flags; 3941 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 3942 kref); 3943 3944 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 3945 "node release: did:x%x flg:x%x type:x%x", 3946 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3947 3948 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3949 "0279 lpfc_nlp_release: ndlp:x%p " 3950 "usgmap:x%x refcnt:%d\n", 3951 (void *)ndlp, ndlp->nlp_usg_map, 3952 atomic_read(&ndlp->kref.refcount)); 3953 3954 /* remove ndlp from action. */ 3955 lpfc_nlp_remove(ndlp->vport, ndlp); 3956 3957 /* clear the ndlp active flag for all release cases */ 3958 phba = ndlp->phba; 3959 spin_lock_irqsave(&phba->ndlp_lock, flags); 3960 NLP_CLR_NODE_ACT(ndlp); 3961 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3962 3963 /* free ndlp memory for final ndlp release */ 3964 if (NLP_CHK_FREE_REQ(ndlp)) { 3965 kfree(ndlp->lat_data); 3966 mempool_free(ndlp, ndlp->phba->nlp_mem_pool); 3967 } 3968 } 3969 3970 /* This routine bumps the reference count for a ndlp structure to ensure 3971 * that one discovery thread won't free a ndlp while another discovery thread 3972 * is using it. 3973 */ 3974 struct lpfc_nodelist * 3975 lpfc_nlp_get(struct lpfc_nodelist *ndlp) 3976 { 3977 struct lpfc_hba *phba; 3978 unsigned long flags; 3979 3980 if (ndlp) { 3981 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 3982 "node get: did:x%x flg:x%x refcnt:x%x", 3983 ndlp->nlp_DID, ndlp->nlp_flag, 3984 atomic_read(&ndlp->kref.refcount)); 3985 /* The check of ndlp usage to prevent incrementing the 3986 * ndlp reference count that is in the process of being 3987 * released. 3988 */ 3989 phba = ndlp->phba; 3990 spin_lock_irqsave(&phba->ndlp_lock, flags); 3991 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { 3992 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 3993 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 3994 "0276 lpfc_nlp_get: ndlp:x%p " 3995 "usgmap:x%x refcnt:%d\n", 3996 (void *)ndlp, ndlp->nlp_usg_map, 3997 atomic_read(&ndlp->kref.refcount)); 3998 return NULL; 3999 } else 4000 kref_get(&ndlp->kref); 4001 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4002 } 4003 return ndlp; 4004 } 4005 4006 /* This routine decrements the reference count for a ndlp structure. If the 4007 * count goes to 0, this indicates the the associated nodelist should be 4008 * freed. Returning 1 indicates the ndlp resource has been released; on the 4009 * other hand, returning 0 indicates the ndlp resource has not been released 4010 * yet. 4011 */ 4012 int 4013 lpfc_nlp_put(struct lpfc_nodelist *ndlp) 4014 { 4015 struct lpfc_hba *phba; 4016 unsigned long flags; 4017 4018 if (!ndlp) 4019 return 1; 4020 4021 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 4022 "node put: did:x%x flg:x%x refcnt:x%x", 4023 ndlp->nlp_DID, ndlp->nlp_flag, 4024 atomic_read(&ndlp->kref.refcount)); 4025 phba = ndlp->phba; 4026 spin_lock_irqsave(&phba->ndlp_lock, flags); 4027 /* Check the ndlp memory free acknowledge flag to avoid the 4028 * possible race condition that kref_put got invoked again 4029 * after previous one has done ndlp memory free. 4030 */ 4031 if (NLP_CHK_FREE_ACK(ndlp)) { 4032 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4033 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 4034 "0274 lpfc_nlp_put: ndlp:x%p " 4035 "usgmap:x%x refcnt:%d\n", 4036 (void *)ndlp, ndlp->nlp_usg_map, 4037 atomic_read(&ndlp->kref.refcount)); 4038 return 1; 4039 } 4040 /* Check the ndlp inactivate log flag to avoid the possible 4041 * race condition that kref_put got invoked again after ndlp 4042 * is already in inactivating state. 4043 */ 4044 if (NLP_CHK_IACT_REQ(ndlp)) { 4045 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4046 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 4047 "0275 lpfc_nlp_put: ndlp:x%p " 4048 "usgmap:x%x refcnt:%d\n", 4049 (void *)ndlp, ndlp->nlp_usg_map, 4050 atomic_read(&ndlp->kref.refcount)); 4051 return 1; 4052 } 4053 /* For last put, mark the ndlp usage flags to make sure no 4054 * other kref_get and kref_put on the same ndlp shall get 4055 * in between the process when the final kref_put has been 4056 * invoked on this ndlp. 4057 */ 4058 if (atomic_read(&ndlp->kref.refcount) == 1) { 4059 /* Indicate ndlp is put to inactive state. */ 4060 NLP_SET_IACT_REQ(ndlp); 4061 /* Acknowledge ndlp memory free has been seen. */ 4062 if (NLP_CHK_FREE_REQ(ndlp)) 4063 NLP_SET_FREE_ACK(ndlp); 4064 } 4065 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4066 /* Note, the kref_put returns 1 when decrementing a reference 4067 * count that was 1, it invokes the release callback function, 4068 * but it still left the reference count as 1 (not actually 4069 * performs the last decrementation). Otherwise, it actually 4070 * decrements the reference count and returns 0. 4071 */ 4072 return kref_put(&ndlp->kref, lpfc_nlp_release); 4073 } 4074 4075 /* This routine free's the specified nodelist if it is not in use 4076 * by any other discovery thread. This routine returns 1 if the 4077 * ndlp has been freed. A return value of 0 indicates the ndlp is 4078 * not yet been released. 4079 */ 4080 int 4081 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) 4082 { 4083 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 4084 "node not used: did:x%x flg:x%x refcnt:x%x", 4085 ndlp->nlp_DID, ndlp->nlp_flag, 4086 atomic_read(&ndlp->kref.refcount)); 4087 if (atomic_read(&ndlp->kref.refcount) == 1) 4088 if (lpfc_nlp_put(ndlp)) 4089 return 1; 4090 return 0; 4091 } 4092 4093 /** 4094 * lpfc_fcf_inuse - Check if FCF can be unregistered. 4095 * @phba: Pointer to hba context object. 4096 * 4097 * This function iterate through all FC nodes associated 4098 * will all vports to check if there is any node with 4099 * fc_rports associated with it. If there is an fc_rport 4100 * associated with the node, then the node is either in 4101 * discovered state or its devloss_timer is pending. 4102 */ 4103 static int 4104 lpfc_fcf_inuse(struct lpfc_hba *phba) 4105 { 4106 struct lpfc_vport **vports; 4107 int i, ret = 0; 4108 struct lpfc_nodelist *ndlp; 4109 struct Scsi_Host *shost; 4110 4111 vports = lpfc_create_vport_work_array(phba); 4112 4113 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4114 shost = lpfc_shost_from_vport(vports[i]); 4115 spin_lock_irq(shost->host_lock); 4116 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 4117 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && 4118 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 4119 ret = 1; 4120 spin_unlock_irq(shost->host_lock); 4121 goto out; 4122 } 4123 } 4124 spin_unlock_irq(shost->host_lock); 4125 } 4126 out: 4127 lpfc_destroy_vport_work_array(phba, vports); 4128 return ret; 4129 } 4130 4131 /** 4132 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. 4133 * @phba: Pointer to hba context object. 4134 * @mboxq: Pointer to mailbox object. 4135 * 4136 * This function frees memory associated with the mailbox command. 4137 */ 4138 static void 4139 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 4140 { 4141 struct lpfc_vport *vport = mboxq->vport; 4142 4143 if (mboxq->u.mb.mbxStatus) { 4144 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4145 "2555 UNREG_VFI mbxStatus error x%x " 4146 "HBA state x%x\n", 4147 mboxq->u.mb.mbxStatus, vport->port_state); 4148 } 4149 mempool_free(mboxq, phba->mbox_mem_pool); 4150 return; 4151 } 4152 4153 /** 4154 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. 4155 * @phba: Pointer to hba context object. 4156 * @mboxq: Pointer to mailbox object. 4157 * 4158 * This function frees memory associated with the mailbox command. 4159 */ 4160 static void 4161 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 4162 { 4163 struct lpfc_vport *vport = mboxq->vport; 4164 4165 if (mboxq->u.mb.mbxStatus) { 4166 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4167 "2550 UNREG_FCFI mbxStatus error x%x " 4168 "HBA state x%x\n", 4169 mboxq->u.mb.mbxStatus, vport->port_state); 4170 } 4171 mempool_free(mboxq, phba->mbox_mem_pool); 4172 return; 4173 } 4174 4175 /** 4176 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. 4177 * @phba: Pointer to hba context object. 4178 * 4179 * This function check if there are any connected remote port for the FCF and 4180 * if all the devices are disconnected, this function unregister FCFI. 4181 * This function also tries to use another FCF for discovery. 4182 */ 4183 void 4184 lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 4185 { 4186 LPFC_MBOXQ_t *mbox; 4187 int rc; 4188 struct lpfc_vport **vports; 4189 int i; 4190 4191 spin_lock_irq(&phba->hbalock); 4192 /* 4193 * If HBA is not running in FIP mode or 4194 * If HBA does not support FCoE or 4195 * If FCF is not registered. 4196 * do nothing. 4197 */ 4198 if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || 4199 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 4200 (phba->cfg_enable_fip == 0)) { 4201 spin_unlock_irq(&phba->hbalock); 4202 return; 4203 } 4204 spin_unlock_irq(&phba->hbalock); 4205 4206 if (lpfc_fcf_inuse(phba)) 4207 return; 4208 4209 4210 /* Unregister VPIs */ 4211 vports = lpfc_create_vport_work_array(phba); 4212 if (vports && 4213 (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 4214 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4215 lpfc_mbx_unreg_vpi(vports[i]); 4216 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 4217 vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; 4218 } 4219 lpfc_destroy_vport_work_array(phba, vports); 4220 4221 /* Unregister VFI */ 4222 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4223 if (!mbox) { 4224 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4225 "2556 UNREG_VFI mbox allocation failed" 4226 "HBA state x%x\n", 4227 phba->pport->port_state); 4228 return; 4229 } 4230 4231 lpfc_unreg_vfi(mbox, phba->pport->vfi); 4232 mbox->vport = phba->pport; 4233 mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; 4234 4235 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4236 if (rc == MBX_NOT_FINISHED) { 4237 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4238 "2557 UNREG_VFI issue mbox failed rc x%x " 4239 "HBA state x%x\n", 4240 rc, phba->pport->port_state); 4241 mempool_free(mbox, phba->mbox_mem_pool); 4242 return; 4243 } 4244 4245 /* Unregister FCF */ 4246 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4247 if (!mbox) { 4248 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4249 "2551 UNREG_FCFI mbox allocation failed" 4250 "HBA state x%x\n", 4251 phba->pport->port_state); 4252 return; 4253 } 4254 4255 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 4256 mbox->vport = phba->pport; 4257 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 4258 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4259 4260 if (rc == MBX_NOT_FINISHED) { 4261 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4262 "2552 UNREG_FCFI issue mbox failed rc x%x " 4263 "HBA state x%x\n", 4264 rc, phba->pport->port_state); 4265 mempool_free(mbox, phba->mbox_mem_pool); 4266 return; 4267 } 4268 4269 spin_lock_irq(&phba->hbalock); 4270 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | 4271 FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | 4272 FCF_VALID_VLAN); 4273 spin_unlock_irq(&phba->hbalock); 4274 4275 /* 4276 * If driver is not unloading, check if there is any other 4277 * FCF record that can be used for discovery. 4278 */ 4279 if ((phba->pport->load_flag & FC_UNLOADING) || 4280 (phba->link_state < LPFC_LINK_UP)) 4281 return; 4282 4283 rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); 4284 4285 if (rc) 4286 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 4287 "2553 lpfc_unregister_unused_fcf failed to read FCF" 4288 " record HBA state x%x\n", 4289 phba->pport->port_state); 4290 } 4291 4292 /** 4293 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. 4294 * @phba: Pointer to hba context object. 4295 * @buff: Buffer containing the FCF connection table as in the config 4296 * region. 4297 * This function create driver data structure for the FCF connection 4298 * record table read from config region 23. 4299 */ 4300 static void 4301 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, 4302 uint8_t *buff) 4303 { 4304 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 4305 struct lpfc_fcf_conn_hdr *conn_hdr; 4306 struct lpfc_fcf_conn_rec *conn_rec; 4307 uint32_t record_count; 4308 int i; 4309 4310 /* Free the current connect table */ 4311 list_for_each_entry_safe(conn_entry, next_conn_entry, 4312 &phba->fcf_conn_rec_list, list) 4313 kfree(conn_entry); 4314 4315 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 4316 record_count = conn_hdr->length * sizeof(uint32_t)/ 4317 sizeof(struct lpfc_fcf_conn_rec); 4318 4319 conn_rec = (struct lpfc_fcf_conn_rec *) 4320 (buff + sizeof(struct lpfc_fcf_conn_hdr)); 4321 4322 for (i = 0; i < record_count; i++) { 4323 if (!(conn_rec[i].flags & FCFCNCT_VALID)) 4324 continue; 4325 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), 4326 GFP_KERNEL); 4327 if (!conn_entry) { 4328 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4329 "2566 Failed to allocate connection" 4330 " table entry\n"); 4331 return; 4332 } 4333 4334 memcpy(&conn_entry->conn_rec, &conn_rec[i], 4335 sizeof(struct lpfc_fcf_conn_rec)); 4336 conn_entry->conn_rec.vlan_tag = 4337 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; 4338 conn_entry->conn_rec.flags = 4339 le16_to_cpu(conn_entry->conn_rec.flags); 4340 list_add_tail(&conn_entry->list, 4341 &phba->fcf_conn_rec_list); 4342 } 4343 } 4344 4345 /** 4346 * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. 4347 * @phba: Pointer to hba context object. 4348 * @buff: Buffer containing the FCoE parameter data structure. 4349 * 4350 * This function update driver data structure with config 4351 * parameters read from config region 23. 4352 */ 4353 static void 4354 lpfc_read_fcoe_param(struct lpfc_hba *phba, 4355 uint8_t *buff) 4356 { 4357 struct lpfc_fip_param_hdr *fcoe_param_hdr; 4358 struct lpfc_fcoe_params *fcoe_param; 4359 4360 fcoe_param_hdr = (struct lpfc_fip_param_hdr *) 4361 buff; 4362 fcoe_param = (struct lpfc_fcoe_params *) 4363 buff + sizeof(struct lpfc_fip_param_hdr); 4364 4365 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || 4366 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 4367 return; 4368 4369 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == 4370 FIPP_MODE_ON) 4371 phba->cfg_enable_fip = 1; 4372 4373 if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == 4374 FIPP_MODE_OFF) 4375 phba->cfg_enable_fip = 0; 4376 4377 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 4378 phba->valid_vlan = 1; 4379 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 4380 0xFFF; 4381 } 4382 4383 phba->fc_map[0] = fcoe_param->fc_map[0]; 4384 phba->fc_map[1] = fcoe_param->fc_map[1]; 4385 phba->fc_map[2] = fcoe_param->fc_map[2]; 4386 return; 4387 } 4388 4389 /** 4390 * lpfc_get_rec_conf23 - Get a record type in config region data. 4391 * @buff: Buffer containing config region 23 data. 4392 * @size: Size of the data buffer. 4393 * @rec_type: Record type to be searched. 4394 * 4395 * This function searches config region data to find the begining 4396 * of the record specified by record_type. If record found, this 4397 * function return pointer to the record else return NULL. 4398 */ 4399 static uint8_t * 4400 lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) 4401 { 4402 uint32_t offset = 0, rec_length; 4403 4404 if ((buff[0] == LPFC_REGION23_LAST_REC) || 4405 (size < sizeof(uint32_t))) 4406 return NULL; 4407 4408 rec_length = buff[offset + 1]; 4409 4410 /* 4411 * One TLV record has one word header and number of data words 4412 * specified in the rec_length field of the record header. 4413 */ 4414 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) 4415 <= size) { 4416 if (buff[offset] == rec_type) 4417 return &buff[offset]; 4418 4419 if (buff[offset] == LPFC_REGION23_LAST_REC) 4420 return NULL; 4421 4422 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); 4423 rec_length = buff[offset + 1]; 4424 } 4425 return NULL; 4426 } 4427 4428 /** 4429 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. 4430 * @phba: Pointer to lpfc_hba data structure. 4431 * @buff: Buffer containing config region 23 data. 4432 * @size: Size of the data buffer. 4433 * 4434 * This fuction parse the FCoE config parameters in config region 23 and 4435 * populate driver data structure with the parameters. 4436 */ 4437 void 4438 lpfc_parse_fcoe_conf(struct lpfc_hba *phba, 4439 uint8_t *buff, 4440 uint32_t size) 4441 { 4442 uint32_t offset = 0, rec_length; 4443 uint8_t *rec_ptr; 4444 4445 /* 4446 * If data size is less than 2 words signature and version cannot be 4447 * verified. 4448 */ 4449 if (size < 2*sizeof(uint32_t)) 4450 return; 4451 4452 /* Check the region signature first */ 4453 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { 4454 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4455 "2567 Config region 23 has bad signature\n"); 4456 return; 4457 } 4458 4459 offset += 4; 4460 4461 /* Check the data structure version */ 4462 if (buff[offset] != LPFC_REGION23_VERSION) { 4463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4464 "2568 Config region 23 has bad version\n"); 4465 return; 4466 } 4467 offset += 4; 4468 4469 rec_length = buff[offset + 1]; 4470 4471 /* Read FCoE param record */ 4472 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 4473 size - offset, FCOE_PARAM_TYPE); 4474 if (rec_ptr) 4475 lpfc_read_fcoe_param(phba, rec_ptr); 4476 4477 /* Read FCF connection table */ 4478 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 4479 size - offset, FCOE_CONN_TBL_TYPE); 4480 if (rec_ptr) 4481 lpfc_read_fcf_conn_tbl(phba, rec_ptr); 4482 4483 } 4484