1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 26 * 27 */ 28 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/param.h> 32 #include <sys/errno.h> 33 #include <sys/uio.h> 34 #include <sys/buf.h> 35 #include <sys/modctl.h> 36 #include <sys/open.h> 37 #include <sys/file.h> 38 #include <sys/kmem.h> 39 #include <sys/poll.h> 40 #include <sys/conf.h> 41 #include <sys/thread.h> 42 #include <sys/var.h> 43 #include <sys/cmn_err.h> 44 #include <sys/stat.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/promif.h> 48 #include <sys/nvpair.h> 49 #include <sys/byteorder.h> 50 #include <sys/scsi/scsi.h> 51 #include <sys/fibre-channel/fc.h> 52 #include <sys/fibre-channel/impl/fc_ulpif.h> 53 #include <sys/fibre-channel/impl/fc_fcaif.h> 54 #include <sys/fibre-channel/impl/fctl_private.h> 55 #include <sys/fibre-channel/impl/fc_portif.h> 56 #include <sys/fibre-channel/impl/fp.h> 57 58 /* These are defined in fctl.c! */ 59 extern int did_table_size; 60 extern int pwwn_table_size; 61 62 static struct cb_ops fp_cb_ops = { 63 fp_open, /* open */ 64 fp_close, /* close */ 65 nodev, /* strategy */ 66 nodev, /* print */ 67 nodev, /* dump */ 68 nodev, /* read */ 69 nodev, /* write */ 70 fp_ioctl, /* ioctl */ 71 nodev, /* devmap */ 72 nodev, /* mmap */ 73 nodev, /* segmap */ 74 nochpoll, /* chpoll */ 75 ddi_prop_op, /* cb_prop_op */ 76 0, /* streamtab */ 77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 78 CB_REV, /* rev */ 79 nodev, /* aread */ 80 nodev /* awrite */ 81 }; 82 83 static struct dev_ops fp_ops = { 84 DEVO_REV, /* build revision */ 85 0, /* reference count */ 86 fp_getinfo, /* getinfo */ 87 nulldev, /* identify - Obsoleted */ 88 nulldev, /* probe */ 89 fp_attach, /* attach */ 90 fp_detach, /* detach */ 91 nodev, /* reset */ 92 &fp_cb_ops, /* cb_ops */ 93 NULL, /* bus_ops */ 94 fp_power, /* power */ 95 ddi_quiesce_not_needed /* quiesce */ 96 }; 97 98 #define FP_VERSION "20091123-1.101" 99 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 100 101 char *fp_version = FP_NAME_VERSION; 102 103 static struct modldrv modldrv = { 104 &mod_driverops, /* Type of Module */ 105 FP_NAME_VERSION, /* Name/Version of fp */ 106 &fp_ops /* driver ops */ 107 }; 108 109 static struct modlinkage modlinkage = { 110 MODREV_1, /* Rev of the loadable modules system */ 111 &modldrv, /* NULL terminated list of */ 112 NULL /* Linkage structures */ 113 }; 114 115 116 117 static uint16_t ns_reg_cmds[] = { 118 NS_RPN_ID, 119 NS_RNN_ID, 120 NS_RCS_ID, 121 NS_RFT_ID, 122 NS_RPT_ID, 123 NS_RSPN_ID, 124 NS_RSNN_NN 125 }; 126 127 struct fp_xlat { 128 uchar_t xlat_state; 129 int xlat_rval; 130 } fp_xlat [] = { 131 { FC_PKT_SUCCESS, FC_SUCCESS }, 132 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 133 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 134 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 135 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 136 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 138 { FC_PKT_NPORT_BSY, FC_PBUSY }, 139 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 140 { FC_PKT_LS_RJT, FC_FAILURE }, 141 { FC_PKT_BA_RJT, FC_FAILURE }, 142 { FC_PKT_TIMEOUT, FC_FAILURE }, 143 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 144 { FC_PKT_FAILURE, FC_FAILURE }, 145 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 146 }; 147 148 static uchar_t fp_valid_alpas[] = { 149 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 150 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 151 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 152 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 153 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 154 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 155 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 156 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 157 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 158 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 159 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 160 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 161 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 162 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 163 }; 164 165 static struct fp_perms { 166 uint16_t fp_ioctl_cmd; 167 uchar_t fp_open_flag; 168 } fp_perm_list [] = { 169 { FCIO_GET_NUM_DEVS, FP_OPEN }, 170 { FCIO_GET_DEV_LIST, FP_OPEN }, 171 { FCIO_GET_SYM_PNAME, FP_OPEN }, 172 { FCIO_GET_SYM_NNAME, FP_OPEN }, 173 { FCIO_SET_SYM_PNAME, FP_EXCL }, 174 { FCIO_SET_SYM_NNAME, FP_EXCL }, 175 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 176 { FCIO_DEV_LOGIN, FP_EXCL }, 177 { FCIO_DEV_LOGOUT, FP_EXCL }, 178 { FCIO_GET_STATE, FP_OPEN }, 179 { FCIO_DEV_REMOVE, FP_EXCL }, 180 { FCIO_GET_FCODE_REV, FP_OPEN }, 181 { FCIO_GET_FW_REV, FP_OPEN }, 182 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 183 { FCIO_FORCE_DUMP, FP_EXCL }, 184 { FCIO_GET_DUMP, FP_OPEN }, 185 { FCIO_GET_TOPOLOGY, FP_OPEN }, 186 { FCIO_RESET_LINK, FP_EXCL }, 187 { FCIO_RESET_HARD, FP_EXCL }, 188 { FCIO_RESET_HARD_CORE, FP_EXCL }, 189 { FCIO_DIAG, FP_OPEN }, 190 { FCIO_NS, FP_EXCL }, 191 { FCIO_DOWNLOAD_FW, FP_EXCL }, 192 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 193 { FCIO_LINK_STATUS, FP_OPEN }, 194 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 195 { FCIO_GET_NODE_ID, FP_OPEN }, 196 { FCIO_SET_NODE_ID, FP_EXCL }, 197 { FCIO_SEND_NODE_ID, FP_OPEN }, 198 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 199 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 200 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 204 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 205 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 206 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 207 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 208 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 209 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 210 }; 211 212 static char *fp_pm_comps[] = { 213 "NAME=FC Port", 214 "0=Port Down", 215 "1=Port Up" 216 }; 217 218 219 #ifdef _LITTLE_ENDIAN 220 #define MAKE_BE_32(x) { \ 221 uint32_t *ptr1, i; \ 222 ptr1 = (uint32_t *)(x); \ 223 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 224 *ptr1 = BE_32(*ptr1); \ 225 ptr1++; \ 226 } \ 227 } 228 #else 229 #define MAKE_BE_32(x) 230 #endif 231 232 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 233 static uint32_t fp_options = 0; 234 235 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 236 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 237 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 238 unsigned int fp_offline_ticker; /* seconds */ 239 240 /* 241 * Driver global variable to anchor the list of soft state structs for 242 * all fp driver instances. Used with the Solaris DDI soft state functions. 243 */ 244 static void *fp_driver_softstate; 245 246 static clock_t fp_retry_ticks; 247 static clock_t fp_offline_ticks; 248 249 static int fp_retry_ticker; 250 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 251 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 252 253 static int fp_log_size = FP_LOG_SIZE; 254 static int fp_trace = FP_TRACE_DEFAULT; 255 static fc_trace_logq_t *fp_logq = NULL; 256 257 int fp_get_adapter_paths(char *pathList, int count); 258 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 259 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 260 la_wwn_t tgt_pwwn, uint32_t port_id); 261 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 262 static void fp_init_symbolic_names(fc_local_port_t *port); 263 264 265 /* 266 * Perform global initialization 267 */ 268 int 269 _init(void) 270 { 271 int ret; 272 273 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 274 sizeof (struct fc_local_port), 8)) != 0) { 275 return (ret); 276 } 277 278 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 279 ddi_soft_state_fini(&fp_driver_softstate); 280 return (ret); 281 } 282 283 fp_logq = fc_trace_alloc_logq(fp_log_size); 284 285 if ((ret = mod_install(&modlinkage)) != 0) { 286 fc_trace_free_logq(fp_logq); 287 ddi_soft_state_fini(&fp_driver_softstate); 288 scsi_hba_fini(&modlinkage); 289 } 290 291 return (ret); 292 } 293 294 295 /* 296 * Prepare for driver unload 297 */ 298 int 299 _fini(void) 300 { 301 int ret; 302 303 if ((ret = mod_remove(&modlinkage)) == 0) { 304 fc_trace_free_logq(fp_logq); 305 ddi_soft_state_fini(&fp_driver_softstate); 306 scsi_hba_fini(&modlinkage); 307 } 308 309 return (ret); 310 } 311 312 313 /* 314 * Request mod_info() to handle all cases 315 */ 316 int 317 _info(struct modinfo *modinfo) 318 { 319 return (mod_info(&modlinkage, modinfo)); 320 } 321 322 323 /* 324 * fp_attach: 325 * 326 * The respective cmd handlers take care of performing 327 * ULP related invocations 328 */ 329 static int 330 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 331 { 332 int rval; 333 334 /* 335 * We check the value of fp_offline_ticker at this 336 * point. The variable is global for the driver and 337 * not specific to an instance. 338 * 339 * If there is no user-defined value found in /etc/system 340 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 341 * The minimum setting for this offline timeout according 342 * to the FC-FS2 standard (Fibre Channel Framing and 343 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 344 * 345 * We do not recommend setting the value to less than 10 346 * seconds (RA_TOV) or more than 90 seconds. If this 347 * variable is greater than 90 seconds then drivers above 348 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 349 */ 350 351 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 352 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 353 FP_OFFLINE_TICKER); 354 355 if ((fp_offline_ticker < 10) || 356 (fp_offline_ticker > 90)) { 357 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 358 "%d second(s). This is outside the " 359 "recommended range of 10..90 seconds", 360 fp_offline_ticker); 361 } 362 363 /* 364 * Tick every second when there are commands to retry. 365 * It should tick at the least granular value of pkt_timeout 366 * (which is one second) 367 */ 368 fp_retry_ticker = 1; 369 370 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 371 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 372 373 switch (cmd) { 374 case DDI_ATTACH: 375 rval = fp_attach_handler(dip); 376 break; 377 378 case DDI_RESUME: 379 rval = fp_resume_handler(dip); 380 break; 381 382 default: 383 rval = DDI_FAILURE; 384 break; 385 } 386 return (rval); 387 } 388 389 390 /* 391 * fp_detach: 392 * 393 * If a ULP fails to handle cmd request converse of 394 * cmd is invoked for ULPs that previously succeeded 395 * cmd request. 396 */ 397 static int 398 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 399 { 400 int rval = DDI_FAILURE; 401 fc_local_port_t *port; 402 fc_attach_cmd_t converse; 403 uint8_t cnt; 404 405 if ((port = ddi_get_soft_state(fp_driver_softstate, 406 ddi_get_instance(dip))) == NULL) { 407 return (DDI_FAILURE); 408 } 409 410 mutex_enter(&port->fp_mutex); 411 412 if (port->fp_ulp_attach) { 413 mutex_exit(&port->fp_mutex); 414 return (DDI_FAILURE); 415 } 416 417 switch (cmd) { 418 case DDI_DETACH: 419 if (port->fp_task != FP_TASK_IDLE) { 420 mutex_exit(&port->fp_mutex); 421 return (DDI_FAILURE); 422 } 423 424 /* Let's attempt to quit the job handler gracefully */ 425 port->fp_soft_state |= FP_DETACH_INPROGRESS; 426 427 mutex_exit(&port->fp_mutex); 428 converse = FC_CMD_ATTACH; 429 if (fctl_detach_ulps(port, FC_CMD_DETACH, 430 &modlinkage) != FC_SUCCESS) { 431 mutex_enter(&port->fp_mutex); 432 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 433 mutex_exit(&port->fp_mutex); 434 rval = DDI_FAILURE; 435 break; 436 } 437 438 mutex_enter(&port->fp_mutex); 439 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 440 cnt++) { 441 mutex_exit(&port->fp_mutex); 442 delay(drv_usectohz(1000000)); 443 mutex_enter(&port->fp_mutex); 444 } 445 446 if (port->fp_job_head) { 447 mutex_exit(&port->fp_mutex); 448 rval = DDI_FAILURE; 449 break; 450 } 451 mutex_exit(&port->fp_mutex); 452 453 rval = fp_detach_handler(port); 454 break; 455 456 case DDI_SUSPEND: 457 mutex_exit(&port->fp_mutex); 458 converse = FC_CMD_RESUME; 459 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 460 &modlinkage) != FC_SUCCESS) { 461 rval = DDI_FAILURE; 462 break; 463 } 464 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 465 (void) callb_generic_cpr(&port->fp_cpr_info, 466 CB_CODE_CPR_RESUME); 467 } 468 break; 469 470 default: 471 mutex_exit(&port->fp_mutex); 472 break; 473 } 474 475 /* 476 * Use softint to perform reattach. Mark fp_ulp_attach so we 477 * don't attempt to do this repeatedly on behalf of some persistent 478 * caller. 479 */ 480 if (rval != DDI_SUCCESS) { 481 mutex_enter(&port->fp_mutex); 482 port->fp_ulp_attach = 1; 483 484 /* 485 * If the port is in the low power mode then there is 486 * possibility that fca too could be in low power mode. 487 * Try to raise the power before calling attach ulps. 488 */ 489 490 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 491 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 492 mutex_exit(&port->fp_mutex); 493 (void) pm_raise_power(port->fp_port_dip, 494 FP_PM_COMPONENT, FP_PM_PORT_UP); 495 } else { 496 mutex_exit(&port->fp_mutex); 497 } 498 499 500 fp_attach_ulps(port, converse); 501 502 mutex_enter(&port->fp_mutex); 503 while (port->fp_ulp_attach) { 504 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 505 } 506 507 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 508 509 /* 510 * Mark state as detach failed so asynchronous ULP attach 511 * events (downstream, not the ones we're initiating with 512 * the call to fp_attach_ulps) are not honored. We're 513 * really still in pending detach. 514 */ 515 port->fp_soft_state |= FP_DETACH_FAILED; 516 517 mutex_exit(&port->fp_mutex); 518 } 519 520 return (rval); 521 } 522 523 524 /* 525 * fp_getinfo: 526 * Given the device number, return either the 527 * dev_info_t pointer or the instance number. 528 */ 529 530 /* ARGSUSED */ 531 static int 532 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 533 { 534 int rval; 535 minor_t instance; 536 fc_local_port_t *port; 537 538 rval = DDI_SUCCESS; 539 instance = getminor((dev_t)arg); 540 541 switch (cmd) { 542 case DDI_INFO_DEVT2DEVINFO: 543 if ((port = ddi_get_soft_state(fp_driver_softstate, 544 instance)) == NULL) { 545 rval = DDI_FAILURE; 546 break; 547 } 548 *result = (void *)port->fp_port_dip; 549 break; 550 551 case DDI_INFO_DEVT2INSTANCE: 552 *result = (void *)(uintptr_t)instance; 553 break; 554 555 default: 556 rval = DDI_FAILURE; 557 break; 558 } 559 560 return (rval); 561 } 562 563 564 /* 565 * Entry point for power up and power down request from kernel 566 */ 567 static int 568 fp_power(dev_info_t *dip, int comp, int level) 569 { 570 int rval = DDI_FAILURE; 571 fc_local_port_t *port; 572 573 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 574 if (port == NULL || comp != FP_PM_COMPONENT) { 575 return (rval); 576 } 577 578 switch (level) { 579 case FP_PM_PORT_UP: 580 rval = DDI_SUCCESS; 581 582 /* 583 * If the port is DDI_SUSPENDed, let the DDI_RESUME 584 * code complete the rediscovery. 585 */ 586 mutex_enter(&port->fp_mutex); 587 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 588 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 589 port->fp_pm_level = FP_PM_PORT_UP; 590 mutex_exit(&port->fp_mutex); 591 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 592 break; 593 } 594 595 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 596 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 597 598 port->fp_pm_level = FP_PM_PORT_UP; 599 rval = fp_power_up(port); 600 if (rval != DDI_SUCCESS) { 601 port->fp_pm_level = FP_PM_PORT_DOWN; 602 } 603 } else { 604 port->fp_pm_level = FP_PM_PORT_UP; 605 } 606 mutex_exit(&port->fp_mutex); 607 break; 608 609 case FP_PM_PORT_DOWN: 610 mutex_enter(&port->fp_mutex); 611 612 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 613 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 614 /* 615 * PM framework goofed up. We have don't 616 * have any PM components. Let's never go down. 617 */ 618 mutex_exit(&port->fp_mutex); 619 break; 620 621 } 622 623 if (port->fp_ulp_attach) { 624 /* We shouldn't let the power go down */ 625 mutex_exit(&port->fp_mutex); 626 break; 627 } 628 629 /* 630 * Not a whole lot to do if we are detaching 631 */ 632 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 633 port->fp_pm_level = FP_PM_PORT_DOWN; 634 mutex_exit(&port->fp_mutex); 635 rval = DDI_SUCCESS; 636 break; 637 } 638 639 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 640 port->fp_pm_level = FP_PM_PORT_DOWN; 641 642 rval = fp_power_down(port); 643 if (rval != DDI_SUCCESS) { 644 port->fp_pm_level = FP_PM_PORT_UP; 645 ASSERT(!(port->fp_soft_state & 646 FP_SOFT_POWER_DOWN)); 647 } else { 648 ASSERT(port->fp_soft_state & 649 FP_SOFT_POWER_DOWN); 650 } 651 } 652 mutex_exit(&port->fp_mutex); 653 break; 654 655 default: 656 break; 657 } 658 659 return (rval); 660 } 661 662 663 /* 664 * Open FC port devctl node 665 */ 666 static int 667 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 668 { 669 int instance; 670 fc_local_port_t *port; 671 672 if (otype != OTYP_CHR) { 673 return (EINVAL); 674 } 675 676 /* 677 * This is not a toy to play with. Allow only powerful 678 * users (hopefully knowledgeable) to access the port 679 * (A hacker potentially could download a sick binary 680 * file into FCA) 681 */ 682 if (drv_priv(credp)) { 683 return (EPERM); 684 } 685 686 instance = (int)getminor(*devp); 687 688 port = ddi_get_soft_state(fp_driver_softstate, instance); 689 if (port == NULL) { 690 return (ENXIO); 691 } 692 693 mutex_enter(&port->fp_mutex); 694 if (port->fp_flag & FP_EXCL) { 695 /* 696 * It is already open for exclusive access. 697 * So shut the door on this caller. 698 */ 699 mutex_exit(&port->fp_mutex); 700 return (EBUSY); 701 } 702 703 if (flag & FEXCL) { 704 if (port->fp_flag & FP_OPEN) { 705 /* 706 * Exclusive operation not possible 707 * as it is already opened 708 */ 709 mutex_exit(&port->fp_mutex); 710 return (EBUSY); 711 } 712 port->fp_flag |= FP_EXCL; 713 } 714 port->fp_flag |= FP_OPEN; 715 mutex_exit(&port->fp_mutex); 716 717 return (0); 718 } 719 720 721 /* 722 * The driver close entry point is called on the last close() 723 * of a device. So it is perfectly alright to just clobber the 724 * open flag and reset it to idle (instead of having to reset 725 * each flag bits). For any confusion, check out close(9E). 726 */ 727 728 /* ARGSUSED */ 729 static int 730 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 731 { 732 int instance; 733 fc_local_port_t *port; 734 735 if (otype != OTYP_CHR) { 736 return (EINVAL); 737 } 738 739 instance = (int)getminor(dev); 740 741 port = ddi_get_soft_state(fp_driver_softstate, instance); 742 if (port == NULL) { 743 return (ENXIO); 744 } 745 746 mutex_enter(&port->fp_mutex); 747 if ((port->fp_flag & FP_OPEN) == 0) { 748 mutex_exit(&port->fp_mutex); 749 return (ENODEV); 750 } 751 port->fp_flag = FP_IDLE; 752 mutex_exit(&port->fp_mutex); 753 754 return (0); 755 } 756 757 /* 758 * Handle IOCTL requests 759 */ 760 761 /* ARGSUSED */ 762 static int 763 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 764 { 765 int instance; 766 int ret = 0; 767 fcio_t fcio; 768 fc_local_port_t *port; 769 770 instance = (int)getminor(dev); 771 772 port = ddi_get_soft_state(fp_driver_softstate, instance); 773 if (port == NULL) { 774 return (ENXIO); 775 } 776 777 mutex_enter(&port->fp_mutex); 778 if ((port->fp_flag & FP_OPEN) == 0) { 779 mutex_exit(&port->fp_mutex); 780 return (ENXIO); 781 } 782 783 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 784 mutex_exit(&port->fp_mutex); 785 return (ENXIO); 786 } 787 788 mutex_exit(&port->fp_mutex); 789 790 /* this will raise power if necessary */ 791 ret = fctl_busy_port(port); 792 if (ret != 0) { 793 return (ret); 794 } 795 796 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 797 798 799 switch (cmd) { 800 case FCIO_CMD: { 801 #ifdef _MULTI_DATAMODEL 802 switch (ddi_model_convert_from(mode & FMODELS)) { 803 case DDI_MODEL_ILP32: { 804 struct fcio32 fcio32; 805 806 if (ddi_copyin((void *)data, (void *)&fcio32, 807 sizeof (struct fcio32), mode)) { 808 ret = EFAULT; 809 break; 810 } 811 fcio.fcio_xfer = fcio32.fcio_xfer; 812 fcio.fcio_cmd = fcio32.fcio_cmd; 813 fcio.fcio_flags = fcio32.fcio_flags; 814 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 815 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 816 fcio.fcio_ibuf = 817 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 818 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 819 fcio.fcio_obuf = 820 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 821 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 822 fcio.fcio_abuf = 823 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 824 fcio.fcio_errno = fcio32.fcio_errno; 825 break; 826 } 827 828 case DDI_MODEL_NONE: 829 if (ddi_copyin((void *)data, (void *)&fcio, 830 sizeof (fcio_t), mode)) { 831 ret = EFAULT; 832 } 833 break; 834 } 835 #else /* _MULTI_DATAMODEL */ 836 if (ddi_copyin((void *)data, (void *)&fcio, 837 sizeof (fcio_t), mode)) { 838 ret = EFAULT; 839 break; 840 } 841 #endif /* _MULTI_DATAMODEL */ 842 if (!ret) { 843 ret = fp_fciocmd(port, data, mode, &fcio); 844 } 845 break; 846 } 847 848 default: 849 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 850 mode, credp, rval); 851 } 852 853 fctl_idle_port(port); 854 855 return (ret); 856 } 857 858 859 /* 860 * Init Symbolic Port Name and Node Name 861 * LV will try to get symbolic names from FCA driver 862 * and register these to name server, 863 * if LV fails to get these, 864 * LV will register its default symbolic names to name server. 865 * The Default symbolic node name format is : 866 * <hostname>:<hba driver name>(instance) 867 * The Default symbolic port name format is : 868 * <fp path name> 869 */ 870 static void 871 fp_init_symbolic_names(fc_local_port_t *port) 872 { 873 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 874 char *sym_name; 875 char fcaname[50] = {0}; 876 int hostnlen, fcanlen; 877 878 if (port->fp_sym_node_namelen == 0) { 879 hostnlen = strlen(utsname.nodename); 880 (void) snprintf(fcaname, sizeof (fcaname), 881 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 882 fcanlen = strlen(fcaname); 883 884 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 885 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 886 port->fp_sym_node_namelen = strlen(sym_name); 887 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 888 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 889 } 890 (void) strncpy(port->fp_sym_node_name, sym_name, 891 port->fp_sym_node_namelen); 892 kmem_free(sym_name, hostnlen + fcanlen + 2); 893 } 894 895 if (port->fp_sym_port_namelen == 0) { 896 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 897 898 (void) ddi_pathname(port->fp_port_dip, pathname); 899 port->fp_sym_port_namelen = strlen(pathname); 900 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 901 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 902 } 903 (void) strncpy(port->fp_sym_port_name, pathname, 904 port->fp_sym_port_namelen); 905 kmem_free(pathname, MAXPATHLEN); 906 } 907 } 908 909 910 /* 911 * Perform port attach 912 */ 913 static int 914 fp_attach_handler(dev_info_t *dip) 915 { 916 int rval; 917 int instance; 918 int port_num; 919 int port_len; 920 char name[30]; 921 char i_pwwn[17]; 922 fp_cmd_t *pkt; 923 uint32_t ub_count; 924 fc_local_port_t *port; 925 job_request_t *job; 926 fc_local_port_t *phyport = NULL; 927 int portpro1; 928 char pwwn[17], nwwn[17]; 929 930 instance = ddi_get_instance(dip); 931 port_len = sizeof (port_num); 932 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 933 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 934 (caddr_t)&port_num, &port_len); 935 if (rval != DDI_SUCCESS) { 936 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 937 instance); 938 return (DDI_FAILURE); 939 } 940 941 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 942 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 943 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 944 instance); 945 return (DDI_FAILURE); 946 } 947 948 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 949 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 950 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 951 " point minor node", instance); 952 ddi_remove_minor_node(dip, NULL); 953 return (DDI_FAILURE); 954 } 955 956 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 957 != DDI_SUCCESS) { 958 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 959 instance); 960 ddi_remove_minor_node(dip, NULL); 961 return (DDI_FAILURE); 962 } 963 port = ddi_get_soft_state(fp_driver_softstate, instance); 964 965 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 966 967 port->fp_instance = instance; 968 port->fp_ulp_attach = 1; 969 port->fp_port_num = port_num; 970 port->fp_verbose = fp_verbosity; 971 port->fp_options = fp_options; 972 973 port->fp_fca_dip = ddi_get_parent(dip); 974 port->fp_port_dip = dip; 975 port->fp_fca_tran = (fc_fca_tran_t *) 976 ddi_get_driver_private(port->fp_fca_dip); 977 978 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 979 980 /* 981 * Init the starting value of fp_rscn_count. Note that if 982 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 983 * actual # of RSCNs will be (fp_rscn_count - 1) 984 */ 985 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 986 987 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 988 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 989 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 990 991 (void) sprintf(name, "fp%d_cache", instance); 992 993 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 994 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 995 "phyport-instance", -1)) != -1) { 996 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 997 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 998 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 999 port->fp_npiv_type = FC_NPIV_PORT; 1000 } 1001 1002 /* 1003 * Allocate the pool of fc_packet_t structs to be used with 1004 * this fp instance. 1005 */ 1006 port->fp_pkt_cache = kmem_cache_create(name, 1007 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1008 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1009 NULL, 0); 1010 port->fp_out_fpcmds = 0; 1011 if (port->fp_pkt_cache == NULL) { 1012 goto cache_alloc_failed; 1013 } 1014 1015 1016 /* 1017 * Allocate the d_id and pwwn hash tables for all remote ports 1018 * connected to this local port. 1019 */ 1020 port->fp_did_table = kmem_zalloc(did_table_size * 1021 sizeof (struct d_id_hash), KM_SLEEP); 1022 1023 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1024 sizeof (struct pwwn_hash), KM_SLEEP); 1025 1026 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1027 MINCLSYSPRI, 1, 16, 0); 1028 1029 /* Indicate that don't have the pm components yet */ 1030 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1031 1032 /* 1033 * Bind the callbacks with the FCA driver. This will open the gate 1034 * for asynchronous callbacks, so after this call the fp_mutex 1035 * must be held when updating the fc_local_port_t struct. 1036 * 1037 * This is done _before_ setting up the job thread so we can avoid 1038 * cleaning up after the thread_create() in the error path. This 1039 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1040 */ 1041 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1042 goto bind_callbacks_failed; 1043 } 1044 1045 if (phyport) { 1046 mutex_enter(&phyport->fp_mutex); 1047 if (phyport->fp_port_next) { 1048 phyport->fp_port_next->fp_port_prev = port; 1049 port->fp_port_next = phyport->fp_port_next; 1050 phyport->fp_port_next = port; 1051 port->fp_port_prev = phyport; 1052 } else { 1053 phyport->fp_port_next = port; 1054 phyport->fp_port_prev = port; 1055 port->fp_port_next = phyport; 1056 port->fp_port_prev = phyport; 1057 } 1058 mutex_exit(&phyport->fp_mutex); 1059 } 1060 1061 /* 1062 * Init Symbolic Names 1063 */ 1064 fp_init_symbolic_names(port); 1065 1066 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1067 KM_SLEEP, NULL); 1068 1069 if (pkt == NULL) { 1070 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1071 instance); 1072 goto alloc_els_packet_failed; 1073 } 1074 1075 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1076 v.v_maxsyspri - 2); 1077 1078 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1079 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1080 i_pwwn) != DDI_PROP_SUCCESS) { 1081 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1082 "fp(%d): Updating 'initiator-port' property" 1083 " on fp dev_info node failed", instance); 1084 } 1085 1086 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1087 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1088 i_pwwn) != DDI_PROP_SUCCESS) { 1089 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1090 "fp(%d): Updating 'initiator-node' property" 1091 " on fp dev_info node failed", instance); 1092 } 1093 1094 mutex_enter(&port->fp_mutex); 1095 port->fp_els_resp_pkt = pkt; 1096 mutex_exit(&port->fp_mutex); 1097 1098 /* 1099 * Determine the count of unsolicited buffers this FCA can support 1100 */ 1101 fp_retrieve_caps(port); 1102 1103 /* 1104 * Allocate unsolicited buffer tokens 1105 */ 1106 if (port->fp_ub_count) { 1107 ub_count = port->fp_ub_count; 1108 port->fp_ub_tokens = kmem_zalloc(ub_count * 1109 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1110 /* 1111 * Do not fail the attach if unsolicited buffer allocation 1112 * fails; Just try to get along with whatever the FCA can do. 1113 */ 1114 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1115 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1116 FC_SUCCESS || ub_count != port->fp_ub_count) { 1117 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1118 " Unsolicited buffers. proceeding with attach...", 1119 instance); 1120 kmem_free(port->fp_ub_tokens, 1121 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1122 port->fp_ub_tokens = NULL; 1123 } 1124 } 1125 1126 fp_load_ulp_modules(dip, port); 1127 1128 /* 1129 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1130 */ 1131 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1132 "pm-hardware-state", "needs-suspend-resume", 1133 strlen("needs-suspend-resume") + 1); 1134 1135 /* 1136 * fctl maintains a list of all port handles, so 1137 * help fctl add this one to its list now. 1138 */ 1139 mutex_enter(&port->fp_mutex); 1140 fctl_add_port(port); 1141 1142 /* 1143 * If a state change is already in progress, set the bind state t 1144 * OFFLINE as well, so further state change callbacks into ULPs 1145 * will pass the appropriate states 1146 */ 1147 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1148 port->fp_statec_busy) { 1149 port->fp_bind_state = FC_STATE_OFFLINE; 1150 mutex_exit(&port->fp_mutex); 1151 1152 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1153 } else { 1154 /* 1155 * Without dropping the mutex, ensure that the port 1156 * startup happens ahead of state change callback 1157 * processing 1158 */ 1159 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1160 1161 port->fp_last_task = port->fp_task; 1162 port->fp_task = FP_TASK_PORT_STARTUP; 1163 1164 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1165 fp_startup_done, (opaque_t)port, KM_SLEEP); 1166 1167 port->fp_job_head = port->fp_job_tail = job; 1168 1169 cv_signal(&port->fp_cv); 1170 1171 mutex_exit(&port->fp_mutex); 1172 } 1173 1174 mutex_enter(&port->fp_mutex); 1175 while (port->fp_ulp_attach) { 1176 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1177 } 1178 mutex_exit(&port->fp_mutex); 1179 1180 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1181 "pm-components", fp_pm_comps, 1182 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1183 DDI_PROP_SUCCESS) { 1184 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1185 " components property, PM disabled on this port."); 1186 mutex_enter(&port->fp_mutex); 1187 port->fp_pm_level = FP_PM_PORT_UP; 1188 mutex_exit(&port->fp_mutex); 1189 } else { 1190 if (pm_raise_power(dip, FP_PM_COMPONENT, 1191 FP_PM_PORT_UP) != DDI_SUCCESS) { 1192 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1193 " power level"); 1194 mutex_enter(&port->fp_mutex); 1195 port->fp_pm_level = FP_PM_PORT_UP; 1196 mutex_exit(&port->fp_mutex); 1197 } 1198 1199 /* 1200 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1201 * the call to pm_raise_power. The PM framework can't 1202 * handle multiple threads calling into it during attach. 1203 */ 1204 1205 mutex_enter(&port->fp_mutex); 1206 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1207 mutex_exit(&port->fp_mutex); 1208 } 1209 1210 ddi_report_dev(dip); 1211 1212 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1213 1214 return (DDI_SUCCESS); 1215 1216 /* 1217 * Unwind any/all preceeding allocations in the event of an error. 1218 */ 1219 1220 alloc_els_packet_failed: 1221 1222 if (port->fp_fca_handle != NULL) { 1223 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1224 port->fp_fca_handle = NULL; 1225 } 1226 1227 if (port->fp_ub_tokens != NULL) { 1228 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1229 port->fp_ub_tokens); 1230 kmem_free(port->fp_ub_tokens, 1231 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1232 port->fp_ub_tokens = NULL; 1233 } 1234 1235 if (port->fp_els_resp_pkt != NULL) { 1236 fp_free_pkt(port->fp_els_resp_pkt); 1237 port->fp_els_resp_pkt = NULL; 1238 } 1239 1240 bind_callbacks_failed: 1241 1242 if (port->fp_taskq != NULL) { 1243 taskq_destroy(port->fp_taskq); 1244 } 1245 1246 if (port->fp_pwwn_table != NULL) { 1247 kmem_free(port->fp_pwwn_table, 1248 pwwn_table_size * sizeof (struct pwwn_hash)); 1249 port->fp_pwwn_table = NULL; 1250 } 1251 1252 if (port->fp_did_table != NULL) { 1253 kmem_free(port->fp_did_table, 1254 did_table_size * sizeof (struct d_id_hash)); 1255 port->fp_did_table = NULL; 1256 } 1257 1258 if (port->fp_pkt_cache != NULL) { 1259 kmem_cache_destroy(port->fp_pkt_cache); 1260 port->fp_pkt_cache = NULL; 1261 } 1262 1263 cache_alloc_failed: 1264 1265 cv_destroy(&port->fp_attach_cv); 1266 cv_destroy(&port->fp_cv); 1267 mutex_destroy(&port->fp_mutex); 1268 ddi_remove_minor_node(port->fp_port_dip, NULL); 1269 ddi_soft_state_free(fp_driver_softstate, instance); 1270 ddi_prop_remove_all(dip); 1271 1272 return (DDI_FAILURE); 1273 } 1274 1275 1276 /* 1277 * Handle DDI_RESUME request 1278 */ 1279 static int 1280 fp_resume_handler(dev_info_t *dip) 1281 { 1282 int rval; 1283 fc_local_port_t *port; 1284 1285 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1286 1287 ASSERT(port != NULL); 1288 1289 #ifdef DEBUG 1290 mutex_enter(&port->fp_mutex); 1291 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1292 mutex_exit(&port->fp_mutex); 1293 #endif 1294 1295 /* 1296 * If the port was power suspended, raise the power level 1297 */ 1298 mutex_enter(&port->fp_mutex); 1299 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1300 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1301 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1302 1303 mutex_exit(&port->fp_mutex); 1304 if (pm_raise_power(dip, FP_PM_COMPONENT, 1305 FP_PM_PORT_UP) != DDI_SUCCESS) { 1306 FP_TRACE(FP_NHEAD2(9, 0), 1307 "Failed to raise the power level"); 1308 return (DDI_FAILURE); 1309 } 1310 mutex_enter(&port->fp_mutex); 1311 } 1312 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1313 mutex_exit(&port->fp_mutex); 1314 1315 /* 1316 * All the discovery is initiated and handled by per-port thread. 1317 * Further all the discovery is done in handled in callback mode 1318 * (not polled mode); In a specific case such as this, the discovery 1319 * is required to happen in polled mode. The easiest way out is 1320 * to bail out port thread and get started. Come back and fix this 1321 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1322 * will do on-demand discovery during pre-power-up busctl handling 1323 * which will only be possible when SCSA provides a new HBA vector 1324 * for sending down the PM busctl requests. 1325 */ 1326 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1327 1328 rval = fp_resume_all(port, FC_CMD_RESUME); 1329 if (rval != DDI_SUCCESS) { 1330 mutex_enter(&port->fp_mutex); 1331 port->fp_soft_state |= FP_SOFT_SUSPEND; 1332 mutex_exit(&port->fp_mutex); 1333 (void) callb_generic_cpr(&port->fp_cpr_info, 1334 CB_CODE_CPR_CHKPT); 1335 } 1336 1337 return (rval); 1338 } 1339 1340 /* 1341 * Perform FC Port power on initialization 1342 */ 1343 static int 1344 fp_power_up(fc_local_port_t *port) 1345 { 1346 int rval; 1347 1348 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1349 1350 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1351 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1352 1353 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1354 1355 mutex_exit(&port->fp_mutex); 1356 1357 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1358 if (rval != DDI_SUCCESS) { 1359 mutex_enter(&port->fp_mutex); 1360 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1361 } else { 1362 mutex_enter(&port->fp_mutex); 1363 } 1364 1365 return (rval); 1366 } 1367 1368 1369 /* 1370 * It is important to note that the power may possibly be removed between 1371 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1372 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1373 * (hardware state). In this case, the port driver may need to rediscover the 1374 * topology, perform LOGINs, register with the name server again and perform 1375 * any such port initialization procedures. To perform LOGINs, the driver could 1376 * use the port device handle to see if a LOGIN needs to be performed and use 1377 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1378 * or removed) which will be reflected in the map the ULPs will see. 1379 */ 1380 static int 1381 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1382 { 1383 1384 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1385 1386 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1387 return (DDI_FAILURE); 1388 } 1389 1390 mutex_enter(&port->fp_mutex); 1391 1392 /* 1393 * If there are commands queued for delayed retry, instead of 1394 * working the hard way to figure out which ones are good for 1395 * restart and which ones not (ELSs are definitely not good 1396 * as the port will have to go through a new spin of rediscovery 1397 * now), so just flush them out. 1398 */ 1399 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1400 fp_cmd_t *cmd; 1401 1402 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1403 1404 mutex_exit(&port->fp_mutex); 1405 while ((cmd = fp_deque_cmd(port)) != NULL) { 1406 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1407 fp_iodone(cmd); 1408 } 1409 mutex_enter(&port->fp_mutex); 1410 } 1411 1412 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1413 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1414 port->fp_dev_count) { 1415 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1416 port->fp_offline_tid = timeout(fp_offline_timeout, 1417 (caddr_t)port, fp_offline_ticks); 1418 } 1419 if (port->fp_job_head) { 1420 cv_signal(&port->fp_cv); 1421 } 1422 mutex_exit(&port->fp_mutex); 1423 fctl_attach_ulps(port, cmd, &modlinkage); 1424 } else { 1425 struct job_request *job; 1426 1427 /* 1428 * If an OFFLINE timer was running at the time of 1429 * suspending, there is no need to restart it as 1430 * the port is ONLINE now. 1431 */ 1432 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1433 if (port->fp_statec_busy == 0) { 1434 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1435 } 1436 port->fp_statec_busy++; 1437 mutex_exit(&port->fp_mutex); 1438 1439 job = fctl_alloc_job(JOB_PORT_ONLINE, 1440 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1441 fctl_enque_job(port, job); 1442 1443 fctl_jobwait(job); 1444 fctl_remove_oldies(port); 1445 1446 fctl_attach_ulps(port, cmd, &modlinkage); 1447 fctl_dealloc_job(job); 1448 } 1449 1450 return (DDI_SUCCESS); 1451 } 1452 1453 1454 /* 1455 * At this time, there shouldn't be any I/O requests on this port. 1456 * But the unsolicited callbacks from the underlying FCA port need 1457 * to be handled very carefully. The steps followed to handle the 1458 * DDI_DETACH are: 1459 * + Grab the port driver mutex, check if the unsolicited 1460 * callback is currently under processing. If true, fail 1461 * the DDI_DETACH request by printing a message; If false 1462 * mark the DDI_DETACH as under progress, so that any 1463 * further unsolicited callbacks get bounced. 1464 * + Perform PRLO/LOGO if necessary, cleanup all the data 1465 * structures. 1466 * + Get the job_handler thread to gracefully exit. 1467 * + Unregister callbacks with the FCA port. 1468 * + Now that some peace is found, notify all the ULPs of 1469 * DDI_DETACH request (using ulp_port_detach entry point) 1470 * + Free all mutexes, semaphores, conditional variables. 1471 * + Free the soft state, return success. 1472 * 1473 * Important considerations: 1474 * Port driver de-registers state change and unsolicited 1475 * callbacks before taking up the task of notifying ULPs 1476 * and performing PRLO and LOGOs. 1477 * 1478 * A port may go offline at the time PRLO/LOGO is being 1479 * requested. It is expected of all FCA drivers to fail 1480 * such requests either immediately with a FC_OFFLINE 1481 * return code to fc_fca_transport() or return the packet 1482 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1483 */ 1484 static int 1485 fp_detach_handler(fc_local_port_t *port) 1486 { 1487 job_request_t *job; 1488 uint32_t delay_count; 1489 fc_orphan_t *orp, *tmporp; 1490 1491 /* 1492 * In a Fabric topology with many host ports connected to 1493 * a switch, another detaching instance of fp might have 1494 * triggered a LOGO (which is an unsolicited request to 1495 * this instance). So in order to be able to successfully 1496 * detach by taking care of such cases a delay of about 1497 * 30 seconds is introduced. 1498 */ 1499 delay_count = 0; 1500 mutex_enter(&port->fp_mutex); 1501 if (port->fp_out_fpcmds != 0) { 1502 /* 1503 * At this time we can only check fp internal commands, because 1504 * sd/ssd/scsi_vhci should have finsihed all their commands, 1505 * fcp/fcip/fcsm should have finished all their commands. 1506 * 1507 * It seems that all fp internal commands are asynchronous now. 1508 */ 1509 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1510 mutex_exit(&port->fp_mutex); 1511 1512 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress" 1513 " Failing detach", port->fp_instance, port->fp_out_fpcmds); 1514 return (DDI_FAILURE); 1515 } 1516 1517 while ((port->fp_soft_state & 1518 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1519 (delay_count < 30)) { 1520 mutex_exit(&port->fp_mutex); 1521 delay_count++; 1522 delay(drv_usectohz(1000000)); 1523 mutex_enter(&port->fp_mutex); 1524 } 1525 1526 if (port->fp_soft_state & 1527 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1528 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1529 mutex_exit(&port->fp_mutex); 1530 1531 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1532 " Failing detach", port->fp_instance); 1533 return (DDI_FAILURE); 1534 } 1535 1536 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1537 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1538 mutex_exit(&port->fp_mutex); 1539 1540 /* 1541 * If we're powered down, we need to raise power prior to submitting 1542 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1543 * process the shutdown job. 1544 */ 1545 if (fctl_busy_port(port) != 0) { 1546 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1547 port->fp_instance); 1548 mutex_enter(&port->fp_mutex); 1549 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1550 mutex_exit(&port->fp_mutex); 1551 return (DDI_FAILURE); 1552 } 1553 1554 /* 1555 * This will deallocate data structs and cause the "job" thread 1556 * to exit, in preparation for DDI_DETACH on the instance. 1557 * This can sleep for an arbitrary duration, since it waits for 1558 * commands over the wire, timeout(9F) callbacks, etc. 1559 * 1560 * CAUTION: There is still a race here, where the "job" thread 1561 * can still be executing code even tho the fctl_jobwait() call 1562 * below has returned to us. In theory the fp driver could even be 1563 * modunloaded even tho the job thread isn't done executing. 1564 * without creating the race condition. 1565 */ 1566 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1567 (opaque_t)port, KM_SLEEP); 1568 fctl_enque_job(port, job); 1569 fctl_jobwait(job); 1570 fctl_dealloc_job(job); 1571 1572 1573 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1574 FP_PM_PORT_DOWN); 1575 1576 if (port->fp_taskq) { 1577 taskq_destroy(port->fp_taskq); 1578 } 1579 1580 ddi_prop_remove_all(port->fp_port_dip); 1581 1582 ddi_remove_minor_node(port->fp_port_dip, NULL); 1583 1584 fctl_remove_port(port); 1585 1586 fp_free_pkt(port->fp_els_resp_pkt); 1587 1588 if (port->fp_ub_tokens) { 1589 if (fc_ulp_ubfree(port, port->fp_ub_count, 1590 port->fp_ub_tokens) != FC_SUCCESS) { 1591 cmn_err(CE_WARN, "fp(%d): couldn't free " 1592 " unsolicited buffers", port->fp_instance); 1593 } 1594 kmem_free(port->fp_ub_tokens, 1595 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1596 port->fp_ub_tokens = NULL; 1597 } 1598 1599 if (port->fp_pkt_cache != NULL) { 1600 kmem_cache_destroy(port->fp_pkt_cache); 1601 } 1602 1603 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1604 1605 mutex_enter(&port->fp_mutex); 1606 if (port->fp_did_table) { 1607 kmem_free(port->fp_did_table, did_table_size * 1608 sizeof (struct d_id_hash)); 1609 } 1610 1611 if (port->fp_pwwn_table) { 1612 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1613 sizeof (struct pwwn_hash)); 1614 } 1615 orp = port->fp_orphan_list; 1616 while (orp) { 1617 tmporp = orp; 1618 orp = orp->orp_next; 1619 kmem_free(tmporp, sizeof (*orp)); 1620 } 1621 1622 mutex_exit(&port->fp_mutex); 1623 1624 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1625 1626 mutex_destroy(&port->fp_mutex); 1627 cv_destroy(&port->fp_attach_cv); 1628 cv_destroy(&port->fp_cv); 1629 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1630 1631 return (DDI_SUCCESS); 1632 } 1633 1634 1635 /* 1636 * Steps to perform DDI_SUSPEND operation on a FC port 1637 * 1638 * - If already suspended return DDI_FAILURE 1639 * - If already power-suspended return DDI_SUCCESS 1640 * - If an unsolicited callback or state change handling is in 1641 * in progress, throw a warning message, return DDI_FAILURE 1642 * - Cancel timeouts 1643 * - SUSPEND the job_handler thread (means do nothing as it is 1644 * taken care of by the CPR frame work) 1645 */ 1646 static int 1647 fp_suspend_handler(fc_local_port_t *port) 1648 { 1649 uint32_t delay_count; 1650 1651 mutex_enter(&port->fp_mutex); 1652 1653 /* 1654 * The following should never happen, but 1655 * let the driver be more defensive here 1656 */ 1657 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1658 mutex_exit(&port->fp_mutex); 1659 return (DDI_FAILURE); 1660 } 1661 1662 /* 1663 * If the port is already power suspended, there 1664 * is nothing else to do, So return DDI_SUCCESS, 1665 * but mark the SUSPEND bit in the soft state 1666 * before leaving. 1667 */ 1668 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1669 port->fp_soft_state |= FP_SOFT_SUSPEND; 1670 mutex_exit(&port->fp_mutex); 1671 return (DDI_SUCCESS); 1672 } 1673 1674 /* 1675 * Check if an unsolicited callback or state change handling is 1676 * in progress. If true, fail the suspend operation; also throw 1677 * a warning message notifying the failure. Note that Sun PCI 1678 * hotplug spec recommends messages in cases of failure (but 1679 * not flooding the console) 1680 * 1681 * Busy waiting for a short interval (500 millisecond ?) to see 1682 * if the callback processing completes may be another idea. Since 1683 * most of the callback processing involves a lot of work, it 1684 * is safe to just fail the SUSPEND operation. It is definitely 1685 * not bad to fail the SUSPEND operation if the driver is busy. 1686 */ 1687 delay_count = 0; 1688 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1689 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1690 mutex_exit(&port->fp_mutex); 1691 delay_count++; 1692 delay(drv_usectohz(1000000)); 1693 mutex_enter(&port->fp_mutex); 1694 } 1695 1696 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1697 FP_SOFT_IN_UNSOL_CB)) { 1698 mutex_exit(&port->fp_mutex); 1699 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1700 " Failing suspend", port->fp_instance); 1701 return (DDI_FAILURE); 1702 } 1703 1704 /* 1705 * Check of FC port thread is busy 1706 */ 1707 if (port->fp_job_head) { 1708 mutex_exit(&port->fp_mutex); 1709 FP_TRACE(FP_NHEAD2(9, 0), 1710 "FC port thread is busy: Failing suspend"); 1711 return (DDI_FAILURE); 1712 } 1713 port->fp_soft_state |= FP_SOFT_SUSPEND; 1714 1715 fp_suspend_all(port); 1716 mutex_exit(&port->fp_mutex); 1717 1718 return (DDI_SUCCESS); 1719 } 1720 1721 1722 /* 1723 * Prepare for graceful power down of a FC port 1724 */ 1725 static int 1726 fp_power_down(fc_local_port_t *port) 1727 { 1728 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1729 1730 /* 1731 * Power down request followed by a DDI_SUSPEND should 1732 * never happen; If it does return DDI_SUCCESS 1733 */ 1734 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1735 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1736 return (DDI_SUCCESS); 1737 } 1738 1739 /* 1740 * If the port is already power suspended, there 1741 * is nothing else to do, So return DDI_SUCCESS, 1742 */ 1743 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1744 return (DDI_SUCCESS); 1745 } 1746 1747 /* 1748 * Check if an unsolicited callback or state change handling 1749 * is in progress. If true, fail the PM suspend operation. 1750 * But don't print a message unless the verbosity of the 1751 * driver desires otherwise. 1752 */ 1753 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1754 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1755 FP_TRACE(FP_NHEAD2(9, 0), 1756 "Unsolicited callback in progress: Failing power down"); 1757 return (DDI_FAILURE); 1758 } 1759 1760 /* 1761 * Check of FC port thread is busy 1762 */ 1763 if (port->fp_job_head) { 1764 FP_TRACE(FP_NHEAD2(9, 0), 1765 "FC port thread is busy: Failing power down"); 1766 return (DDI_FAILURE); 1767 } 1768 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1769 1770 /* 1771 * check if the ULPs are ready for power down 1772 */ 1773 mutex_exit(&port->fp_mutex); 1774 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1775 &modlinkage) != FC_SUCCESS) { 1776 mutex_enter(&port->fp_mutex); 1777 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1778 mutex_exit(&port->fp_mutex); 1779 1780 /* 1781 * Power back up the obedient ULPs that went down 1782 */ 1783 fp_attach_ulps(port, FC_CMD_POWER_UP); 1784 1785 FP_TRACE(FP_NHEAD2(9, 0), 1786 "ULP(s) busy, detach_ulps failed. Failing power down"); 1787 mutex_enter(&port->fp_mutex); 1788 return (DDI_FAILURE); 1789 } 1790 mutex_enter(&port->fp_mutex); 1791 1792 fp_suspend_all(port); 1793 1794 return (DDI_SUCCESS); 1795 } 1796 1797 1798 /* 1799 * Suspend the entire FC port 1800 */ 1801 static void 1802 fp_suspend_all(fc_local_port_t *port) 1803 { 1804 int index; 1805 struct pwwn_hash *head; 1806 fc_remote_port_t *pd; 1807 1808 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1809 1810 if (port->fp_wait_tid != 0) { 1811 timeout_id_t tid; 1812 1813 tid = port->fp_wait_tid; 1814 port->fp_wait_tid = (timeout_id_t)NULL; 1815 mutex_exit(&port->fp_mutex); 1816 (void) untimeout(tid); 1817 mutex_enter(&port->fp_mutex); 1818 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1819 } 1820 1821 if (port->fp_offline_tid) { 1822 timeout_id_t tid; 1823 1824 tid = port->fp_offline_tid; 1825 port->fp_offline_tid = (timeout_id_t)NULL; 1826 mutex_exit(&port->fp_mutex); 1827 (void) untimeout(tid); 1828 mutex_enter(&port->fp_mutex); 1829 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1830 } 1831 mutex_exit(&port->fp_mutex); 1832 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1833 mutex_enter(&port->fp_mutex); 1834 1835 /* 1836 * Mark all devices as OLD, and reset the LOGIN state as well 1837 * (this will force the ULPs to perform a LOGIN after calling 1838 * fc_portgetmap() during RESUME/PM_RESUME) 1839 */ 1840 for (index = 0; index < pwwn_table_size; index++) { 1841 head = &port->fp_pwwn_table[index]; 1842 pd = head->pwwn_head; 1843 while (pd != NULL) { 1844 mutex_enter(&pd->pd_mutex); 1845 fp_remote_port_offline(pd); 1846 fctl_delist_did_table(port, pd); 1847 pd->pd_state = PORT_DEVICE_VALID; 1848 pd->pd_login_count = 0; 1849 mutex_exit(&pd->pd_mutex); 1850 pd = pd->pd_wwn_hnext; 1851 } 1852 } 1853 } 1854 1855 1856 /* 1857 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1858 * Performs intializations for fc_packet_t structs. 1859 * Returns 0 for success or -1 for failure. 1860 * 1861 * This function allocates DMA handles for both command and responses. 1862 * Most of the ELSs used have both command and responses so it is strongly 1863 * desired to move them to cache constructor routine. 1864 * 1865 * Context: Can sleep iff called with KM_SLEEP flag. 1866 */ 1867 static int 1868 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1869 { 1870 int (*cb) (caddr_t); 1871 fc_packet_t *pkt; 1872 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1873 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1874 1875 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1876 1877 cmd->cmd_next = NULL; 1878 cmd->cmd_flags = 0; 1879 cmd->cmd_dflags = 0; 1880 cmd->cmd_job = NULL; 1881 cmd->cmd_port = port; 1882 pkt = &cmd->cmd_pkt; 1883 1884 if (!(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 1885 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1886 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1887 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1888 return (-1); 1889 } 1890 1891 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1892 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1893 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1894 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1895 return (-1); 1896 } 1897 } else { 1898 pkt->pkt_cmd_dma = 0; 1899 pkt->pkt_resp_dma = 0; 1900 } 1901 1902 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1903 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1904 pkt->pkt_data_cookie_cnt = 0; 1905 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1906 pkt->pkt_data_cookie = NULL; 1907 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1908 1909 return (0); 1910 } 1911 1912 1913 /* 1914 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1915 * Performs un-intializations for fc_packet_t structs. 1916 */ 1917 /* ARGSUSED */ 1918 static void 1919 fp_cache_destructor(void *buf, void *cdarg) 1920 { 1921 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1922 fc_packet_t *pkt; 1923 1924 pkt = &cmd->cmd_pkt; 1925 if (pkt->pkt_cmd_dma) { 1926 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1927 } 1928 1929 if (pkt->pkt_resp_dma) { 1930 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1931 } 1932 } 1933 1934 1935 /* 1936 * Packet allocation for ELS and any other port driver commands 1937 * 1938 * Some ELSs like FLOGI and PLOGI are critical for topology and 1939 * device discovery and a system's inability to allocate memory 1940 * or DVMA resources while performing some of these critical ELSs 1941 * cause a lot of problem. While memory allocation failures are 1942 * rare, DVMA resource failures are common as the applications 1943 * are becoming more and more powerful on huge servers. So it 1944 * is desirable to have a framework support to reserve a fragment 1945 * of DVMA. So until this is fixed the correct way, the suffering 1946 * is huge whenever a LIP happens at a time DVMA resources are 1947 * drained out completely - So an attempt needs to be made to 1948 * KM_SLEEP while requesting for these resources, hoping that 1949 * the requests won't hang forever. 1950 * 1951 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1952 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1953 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1954 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1955 * fp_alloc_pkt() must be called with pd set to NULL. 1956 * 1957 * fp/fctl will resue fp_cmd_t somewhere, and change pkt_cmdlen/rsplen, 1958 * actually, it's a design fault. But there's no problem for physical 1959 * FCAs. But it will cause memory leak or panic for virtual FCAs like fcoei. 1960 * 1961 * For FCAs that don't support DMA, such as fcoei, we will use 1962 * pkt_fctl_rsvd1/rsvd2 to keep the real cmd_len/resp_len. 1963 */ 1964 1965 static fp_cmd_t * 1966 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1967 fc_remote_port_t *pd) 1968 { 1969 int rval; 1970 ulong_t real_len; 1971 fp_cmd_t *cmd; 1972 fc_packet_t *pkt; 1973 int (*cb) (caddr_t); 1974 ddi_dma_cookie_t pkt_cookie; 1975 ddi_dma_cookie_t *cp; 1976 uint32_t cnt; 1977 1978 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1979 1980 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1981 1982 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1983 if (cmd == NULL) { 1984 return (cmd); 1985 } 1986 1987 cmd->cmd_ulp_pkt = NULL; 1988 cmd->cmd_flags = 0; 1989 pkt = &cmd->cmd_pkt; 1990 ASSERT(cmd->cmd_dflags == 0); 1991 1992 pkt->pkt_datalen = 0; 1993 pkt->pkt_data = NULL; 1994 pkt->pkt_state = 0; 1995 pkt->pkt_action = 0; 1996 pkt->pkt_reason = 0; 1997 pkt->pkt_expln = 0; 1998 pkt->pkt_cmd = NULL; 1999 pkt->pkt_resp = NULL; 2000 pkt->pkt_fctl_rsvd1 = NULL; 2001 pkt->pkt_fctl_rsvd2 = NULL; 2002 2003 /* 2004 * Init pkt_pd with the given pointer; this must be done _before_ 2005 * the call to fc_ulp_init_packet(). 2006 */ 2007 pkt->pkt_pd = pd; 2008 2009 /* Now call the FCA driver to init its private, per-packet fields */ 2010 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 2011 goto alloc_pkt_failed; 2012 } 2013 2014 if (cmd_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 2015 ASSERT(pkt->pkt_cmd_dma != NULL); 2016 2017 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 2018 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 2019 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 2020 &pkt->pkt_cmd_acc); 2021 2022 if (rval != DDI_SUCCESS) { 2023 goto alloc_pkt_failed; 2024 } 2025 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 2026 2027 if (real_len < cmd_len) { 2028 goto alloc_pkt_failed; 2029 } 2030 2031 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2032 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2033 DDI_DMA_CONSISTENT, cb, NULL, 2034 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2035 2036 if (rval != DDI_DMA_MAPPED) { 2037 goto alloc_pkt_failed; 2038 } 2039 2040 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2041 2042 if (pkt->pkt_cmd_cookie_cnt > 2043 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2044 goto alloc_pkt_failed; 2045 } 2046 2047 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2048 2049 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2050 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2051 KM_NOSLEEP); 2052 2053 if (cp == NULL) { 2054 goto alloc_pkt_failed; 2055 } 2056 2057 *cp = pkt_cookie; 2058 cp++; 2059 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2060 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2061 *cp = pkt_cookie; 2062 } 2063 } else if (cmd_len != 0) { 2064 pkt->pkt_cmd = kmem_alloc(cmd_len, KM_SLEEP); 2065 pkt->pkt_fctl_rsvd1 = (opaque_t)(uintptr_t)cmd_len; 2066 } 2067 2068 if (resp_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 2069 ASSERT(pkt->pkt_resp_dma != NULL); 2070 2071 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2072 port->fp_fca_tran->fca_acc_attr, 2073 DDI_DMA_CONSISTENT, cb, NULL, 2074 (caddr_t *)&pkt->pkt_resp, &real_len, 2075 &pkt->pkt_resp_acc); 2076 2077 if (rval != DDI_SUCCESS) { 2078 goto alloc_pkt_failed; 2079 } 2080 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2081 2082 if (real_len < resp_len) { 2083 goto alloc_pkt_failed; 2084 } 2085 2086 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2087 pkt->pkt_resp, real_len, DDI_DMA_READ | 2088 DDI_DMA_CONSISTENT, cb, NULL, 2089 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2090 2091 if (rval != DDI_DMA_MAPPED) { 2092 goto alloc_pkt_failed; 2093 } 2094 2095 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2096 2097 if (pkt->pkt_resp_cookie_cnt > 2098 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2099 goto alloc_pkt_failed; 2100 } 2101 2102 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2103 2104 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2105 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2106 KM_NOSLEEP); 2107 2108 if (cp == NULL) { 2109 goto alloc_pkt_failed; 2110 } 2111 2112 *cp = pkt_cookie; 2113 cp++; 2114 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2115 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2116 *cp = pkt_cookie; 2117 } 2118 } else if (resp_len != 0) { 2119 pkt->pkt_resp = kmem_alloc(resp_len, KM_SLEEP); 2120 pkt->pkt_fctl_rsvd2 = (opaque_t)(uintptr_t)resp_len; 2121 } 2122 2123 pkt->pkt_cmdlen = cmd_len; 2124 pkt->pkt_rsplen = resp_len; 2125 pkt->pkt_ulp_private = cmd; 2126 2127 return (cmd); 2128 2129 alloc_pkt_failed: 2130 2131 fp_free_dma(cmd); 2132 2133 if (pkt->pkt_cmd_cookie != NULL) { 2134 kmem_free(pkt->pkt_cmd_cookie, 2135 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2136 pkt->pkt_cmd_cookie = NULL; 2137 } 2138 2139 if (pkt->pkt_resp_cookie != NULL) { 2140 kmem_free(pkt->pkt_resp_cookie, 2141 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2142 pkt->pkt_resp_cookie = NULL; 2143 } 2144 2145 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) { 2146 if (pkt->pkt_cmd) { 2147 kmem_free(pkt->pkt_cmd, cmd_len); 2148 } 2149 2150 if (pkt->pkt_resp) { 2151 kmem_free(pkt->pkt_resp, resp_len); 2152 } 2153 } 2154 2155 kmem_cache_free(port->fp_pkt_cache, cmd); 2156 2157 return (NULL); 2158 } 2159 2160 2161 /* 2162 * Free FC packet 2163 */ 2164 static void 2165 fp_free_pkt(fp_cmd_t *cmd) 2166 { 2167 fc_local_port_t *port; 2168 fc_packet_t *pkt; 2169 2170 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2171 2172 cmd->cmd_next = NULL; 2173 cmd->cmd_job = NULL; 2174 pkt = &cmd->cmd_pkt; 2175 pkt->pkt_ulp_private = 0; 2176 pkt->pkt_tran_flags = 0; 2177 pkt->pkt_tran_type = 0; 2178 port = cmd->cmd_port; 2179 2180 if (pkt->pkt_cmd_cookie != NULL) { 2181 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2182 sizeof (ddi_dma_cookie_t)); 2183 pkt->pkt_cmd_cookie = NULL; 2184 } 2185 2186 if (pkt->pkt_resp_cookie != NULL) { 2187 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2188 sizeof (ddi_dma_cookie_t)); 2189 pkt->pkt_resp_cookie = NULL; 2190 } 2191 2192 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) { 2193 if (pkt->pkt_cmd) { 2194 kmem_free(pkt->pkt_cmd, 2195 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd1); 2196 } 2197 2198 if (pkt->pkt_resp) { 2199 kmem_free(pkt->pkt_resp, 2200 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd2); 2201 } 2202 } 2203 2204 fp_free_dma(cmd); 2205 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2206 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2207 } 2208 2209 2210 /* 2211 * Release DVMA resources 2212 */ 2213 static void 2214 fp_free_dma(fp_cmd_t *cmd) 2215 { 2216 fc_packet_t *pkt = &cmd->cmd_pkt; 2217 2218 pkt->pkt_cmdlen = 0; 2219 pkt->pkt_rsplen = 0; 2220 pkt->pkt_tran_type = 0; 2221 pkt->pkt_tran_flags = 0; 2222 2223 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2224 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2225 } 2226 2227 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2228 if (pkt->pkt_cmd_acc) { 2229 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2230 } 2231 } 2232 2233 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2234 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2235 } 2236 2237 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2238 if (pkt->pkt_resp_acc) { 2239 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2240 } 2241 } 2242 cmd->cmd_dflags = 0; 2243 } 2244 2245 2246 /* 2247 * Dedicated thread to perform various activities. One thread for 2248 * each fc_local_port_t (driver soft state) instance. 2249 * Note, this effectively works out to one thread for each local 2250 * port, but there are also some Solaris taskq threads in use on a per-local 2251 * port basis; these also need to be taken into consideration. 2252 */ 2253 static void 2254 fp_job_handler(fc_local_port_t *port) 2255 { 2256 int rval; 2257 uint32_t *d_id; 2258 fc_remote_port_t *pd; 2259 job_request_t *job; 2260 2261 #ifndef __lock_lint 2262 /* 2263 * Solaris-internal stuff for proper operation of kernel threads 2264 * with Solaris CPR. 2265 */ 2266 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2267 callb_generic_cpr, "fp_job_handler"); 2268 #endif 2269 2270 2271 /* Loop forever waiting for work to do */ 2272 for (;;) { 2273 2274 mutex_enter(&port->fp_mutex); 2275 2276 /* 2277 * Sleep if no work to do right now, or if we want 2278 * to suspend or power-down. 2279 */ 2280 while (port->fp_job_head == NULL || 2281 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2282 FP_SOFT_SUSPEND))) { 2283 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2284 cv_wait(&port->fp_cv, &port->fp_mutex); 2285 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2286 } 2287 2288 /* 2289 * OK, we've just been woken up, so retrieve the next entry 2290 * from the head of the job queue for this local port. 2291 */ 2292 job = fctl_deque_job(port); 2293 2294 /* 2295 * Handle all the fp driver's supported job codes here 2296 * in this big honkin' switch. 2297 */ 2298 switch (job->job_code) { 2299 case JOB_PORT_SHUTDOWN: 2300 /* 2301 * fp_port_shutdown() is only called from here. This 2302 * will prepare the local port instance (softstate) 2303 * for detaching. This cancels timeout callbacks, 2304 * executes LOGOs with remote ports, cleans up tables, 2305 * and deallocates data structs. 2306 */ 2307 fp_port_shutdown(port, job); 2308 2309 /* 2310 * This will exit the job thread. 2311 */ 2312 #ifndef __lock_lint 2313 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2314 #else 2315 mutex_exit(&port->fp_mutex); 2316 #endif 2317 fctl_jobdone(job); 2318 thread_exit(); 2319 2320 /* NOTREACHED */ 2321 2322 case JOB_ATTACH_ULP: { 2323 /* 2324 * This job is spawned in response to a ULP calling 2325 * fc_ulp_add(). 2326 */ 2327 2328 boolean_t do_attach_ulps = B_TRUE; 2329 2330 /* 2331 * If fp is detaching, we don't want to call 2332 * fp_startup_done as this asynchronous 2333 * notification may interfere with the re-attach. 2334 */ 2335 2336 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2337 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2338 do_attach_ulps = B_FALSE; 2339 } else { 2340 /* 2341 * We are going to force the transport 2342 * to attach to the ULPs, so set 2343 * fp_ulp_attach. This will keep any 2344 * potential detach from occurring until 2345 * we are done. 2346 */ 2347 port->fp_ulp_attach = 1; 2348 } 2349 2350 mutex_exit(&port->fp_mutex); 2351 2352 /* 2353 * NOTE: Since we just dropped the mutex, there is now 2354 * a race window where the fp_soft_state check above 2355 * could change here. This race is covered because an 2356 * additional check was added in the functions hidden 2357 * under fp_startup_done(). 2358 */ 2359 if (do_attach_ulps == B_TRUE) { 2360 /* 2361 * This goes thru a bit of a convoluted call 2362 * chain before spawning off a DDI taskq 2363 * request to perform the actual attach 2364 * operations. Blocking can occur at a number 2365 * of points. 2366 */ 2367 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2368 } 2369 job->job_result = FC_SUCCESS; 2370 fctl_jobdone(job); 2371 break; 2372 } 2373 2374 case JOB_ULP_NOTIFY: { 2375 /* 2376 * Pass state change notifications up to any/all 2377 * registered ULPs. 2378 */ 2379 uint32_t statec; 2380 2381 statec = job->job_ulp_listlen; 2382 if (statec == FC_STATE_RESET_REQUESTED) { 2383 port->fp_last_task = port->fp_task; 2384 port->fp_task = FP_TASK_OFFLINE; 2385 fp_port_offline(port, 0); 2386 port->fp_task = port->fp_last_task; 2387 port->fp_last_task = FP_TASK_IDLE; 2388 } 2389 2390 if (--port->fp_statec_busy == 0) { 2391 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2392 } 2393 2394 mutex_exit(&port->fp_mutex); 2395 2396 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2397 fctl_jobdone(job); 2398 break; 2399 } 2400 2401 case JOB_PLOGI_ONE: 2402 /* 2403 * Issue a PLOGI to a single remote port. Multiple 2404 * PLOGIs to different remote ports may occur in 2405 * parallel. 2406 * This can create the fc_remote_port_t if it does not 2407 * already exist. 2408 */ 2409 2410 mutex_exit(&port->fp_mutex); 2411 d_id = (uint32_t *)job->job_private; 2412 pd = fctl_get_remote_port_by_did(port, *d_id); 2413 2414 if (pd) { 2415 mutex_enter(&pd->pd_mutex); 2416 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2417 pd->pd_login_count++; 2418 mutex_exit(&pd->pd_mutex); 2419 job->job_result = FC_SUCCESS; 2420 fctl_jobdone(job); 2421 break; 2422 } 2423 mutex_exit(&pd->pd_mutex); 2424 } else { 2425 mutex_enter(&port->fp_mutex); 2426 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2427 mutex_exit(&port->fp_mutex); 2428 pd = fp_create_remote_port_by_ns(port, 2429 *d_id, KM_SLEEP); 2430 if (pd == NULL) { 2431 job->job_result = FC_FAILURE; 2432 fctl_jobdone(job); 2433 break; 2434 } 2435 } else { 2436 mutex_exit(&port->fp_mutex); 2437 } 2438 } 2439 2440 job->job_flags |= JOB_TYPE_FP_ASYNC; 2441 job->job_counter = 1; 2442 2443 rval = fp_port_login(port, *d_id, job, 2444 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2445 2446 if (rval != FC_SUCCESS) { 2447 job->job_result = rval; 2448 fctl_jobdone(job); 2449 } 2450 break; 2451 2452 case JOB_LOGO_ONE: { 2453 /* 2454 * Issue a PLOGO to a single remote port. Multiple 2455 * PLOGOs to different remote ports may occur in 2456 * parallel. 2457 */ 2458 fc_remote_port_t *pd; 2459 2460 #ifndef __lock_lint 2461 ASSERT(job->job_counter > 0); 2462 #endif 2463 2464 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2465 2466 mutex_enter(&pd->pd_mutex); 2467 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2468 mutex_exit(&pd->pd_mutex); 2469 job->job_result = FC_LOGINREQ; 2470 mutex_exit(&port->fp_mutex); 2471 fctl_jobdone(job); 2472 break; 2473 } 2474 if (pd->pd_login_count > 1) { 2475 pd->pd_login_count--; 2476 mutex_exit(&pd->pd_mutex); 2477 job->job_result = FC_SUCCESS; 2478 mutex_exit(&port->fp_mutex); 2479 fctl_jobdone(job); 2480 break; 2481 } 2482 mutex_exit(&pd->pd_mutex); 2483 mutex_exit(&port->fp_mutex); 2484 job->job_flags |= JOB_TYPE_FP_ASYNC; 2485 (void) fp_logout(port, pd, job); 2486 break; 2487 } 2488 2489 case JOB_FCIO_LOGIN: 2490 /* 2491 * PLOGI initiated at ioctl request. 2492 */ 2493 mutex_exit(&port->fp_mutex); 2494 job->job_result = 2495 fp_fcio_login(port, job->job_private, job); 2496 fctl_jobdone(job); 2497 break; 2498 2499 case JOB_FCIO_LOGOUT: 2500 /* 2501 * PLOGO initiated at ioctl request. 2502 */ 2503 mutex_exit(&port->fp_mutex); 2504 job->job_result = 2505 fp_fcio_logout(port, job->job_private, job); 2506 fctl_jobdone(job); 2507 break; 2508 2509 case JOB_PORT_GETMAP: 2510 case JOB_PORT_GETMAP_PLOGI_ALL: { 2511 port->fp_last_task = port->fp_task; 2512 port->fp_task = FP_TASK_GETMAP; 2513 2514 switch (port->fp_topology) { 2515 case FC_TOP_PRIVATE_LOOP: 2516 job->job_counter = 1; 2517 2518 fp_get_loopmap(port, job); 2519 mutex_exit(&port->fp_mutex); 2520 fp_jobwait(job); 2521 fctl_fillout_map(port, 2522 (fc_portmap_t **)job->job_private, 2523 (uint32_t *)job->job_arg, 1, 0, 0); 2524 fctl_jobdone(job); 2525 mutex_enter(&port->fp_mutex); 2526 break; 2527 2528 case FC_TOP_PUBLIC_LOOP: 2529 case FC_TOP_FABRIC: 2530 mutex_exit(&port->fp_mutex); 2531 job->job_counter = 1; 2532 2533 job->job_result = fp_ns_getmap(port, 2534 job, (fc_portmap_t **)job->job_private, 2535 (uint32_t *)job->job_arg, 2536 FCTL_GAN_START_ID); 2537 fctl_jobdone(job); 2538 mutex_enter(&port->fp_mutex); 2539 break; 2540 2541 case FC_TOP_PT_PT: 2542 mutex_exit(&port->fp_mutex); 2543 fctl_fillout_map(port, 2544 (fc_portmap_t **)job->job_private, 2545 (uint32_t *)job->job_arg, 1, 0, 0); 2546 fctl_jobdone(job); 2547 mutex_enter(&port->fp_mutex); 2548 break; 2549 2550 default: 2551 mutex_exit(&port->fp_mutex); 2552 fctl_jobdone(job); 2553 mutex_enter(&port->fp_mutex); 2554 break; 2555 } 2556 port->fp_task = port->fp_last_task; 2557 port->fp_last_task = FP_TASK_IDLE; 2558 mutex_exit(&port->fp_mutex); 2559 break; 2560 } 2561 2562 case JOB_PORT_OFFLINE: { 2563 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2564 2565 port->fp_last_task = port->fp_task; 2566 port->fp_task = FP_TASK_OFFLINE; 2567 2568 if (port->fp_statec_busy > 2) { 2569 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2570 fp_port_offline(port, 0); 2571 if (--port->fp_statec_busy == 0) { 2572 port->fp_soft_state &= 2573 ~FP_SOFT_IN_STATEC_CB; 2574 } 2575 } else { 2576 fp_port_offline(port, 1); 2577 } 2578 2579 port->fp_task = port->fp_last_task; 2580 port->fp_last_task = FP_TASK_IDLE; 2581 2582 mutex_exit(&port->fp_mutex); 2583 2584 fctl_jobdone(job); 2585 break; 2586 } 2587 2588 case JOB_PORT_STARTUP: { 2589 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2590 if (port->fp_statec_busy > 1) { 2591 mutex_exit(&port->fp_mutex); 2592 break; 2593 } 2594 mutex_exit(&port->fp_mutex); 2595 2596 FP_TRACE(FP_NHEAD2(9, rval), 2597 "Topology discovery failed"); 2598 break; 2599 } 2600 2601 /* 2602 * Attempt building device handles in case 2603 * of private Loop. 2604 */ 2605 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2606 job->job_counter = 1; 2607 2608 fp_get_loopmap(port, job); 2609 mutex_exit(&port->fp_mutex); 2610 fp_jobwait(job); 2611 mutex_enter(&port->fp_mutex); 2612 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2613 ASSERT(port->fp_total_devices == 0); 2614 port->fp_total_devices = 2615 port->fp_dev_count; 2616 } 2617 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2618 /* 2619 * Hack to avoid state changes going up early 2620 */ 2621 port->fp_statec_busy++; 2622 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2623 2624 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2625 fp_fabric_online(port, job); 2626 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2627 } 2628 mutex_exit(&port->fp_mutex); 2629 fctl_jobdone(job); 2630 break; 2631 } 2632 2633 case JOB_PORT_ONLINE: { 2634 char *newtop; 2635 char *oldtop; 2636 uint32_t old_top; 2637 2638 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2639 2640 /* 2641 * Bail out early if there are a lot of 2642 * state changes in the pipeline 2643 */ 2644 if (port->fp_statec_busy > 1) { 2645 --port->fp_statec_busy; 2646 mutex_exit(&port->fp_mutex); 2647 fctl_jobdone(job); 2648 break; 2649 } 2650 2651 switch (old_top = port->fp_topology) { 2652 case FC_TOP_PRIVATE_LOOP: 2653 oldtop = "Private Loop"; 2654 break; 2655 2656 case FC_TOP_PUBLIC_LOOP: 2657 oldtop = "Public Loop"; 2658 break; 2659 2660 case FC_TOP_PT_PT: 2661 oldtop = "Point to Point"; 2662 break; 2663 2664 case FC_TOP_FABRIC: 2665 oldtop = "Fabric"; 2666 break; 2667 2668 default: 2669 oldtop = NULL; 2670 break; 2671 } 2672 2673 port->fp_last_task = port->fp_task; 2674 port->fp_task = FP_TASK_ONLINE; 2675 2676 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2677 2678 port->fp_task = port->fp_last_task; 2679 port->fp_last_task = FP_TASK_IDLE; 2680 2681 if (port->fp_statec_busy > 1) { 2682 --port->fp_statec_busy; 2683 mutex_exit(&port->fp_mutex); 2684 break; 2685 } 2686 2687 port->fp_state = FC_STATE_OFFLINE; 2688 2689 FP_TRACE(FP_NHEAD2(9, rval), 2690 "Topology discovery failed"); 2691 2692 if (--port->fp_statec_busy == 0) { 2693 port->fp_soft_state &= 2694 ~FP_SOFT_IN_STATEC_CB; 2695 } 2696 2697 if (port->fp_offline_tid == NULL) { 2698 port->fp_offline_tid = 2699 timeout(fp_offline_timeout, 2700 (caddr_t)port, fp_offline_ticks); 2701 } 2702 2703 mutex_exit(&port->fp_mutex); 2704 break; 2705 } 2706 2707 switch (port->fp_topology) { 2708 case FC_TOP_PRIVATE_LOOP: 2709 newtop = "Private Loop"; 2710 break; 2711 2712 case FC_TOP_PUBLIC_LOOP: 2713 newtop = "Public Loop"; 2714 break; 2715 2716 case FC_TOP_PT_PT: 2717 newtop = "Point to Point"; 2718 break; 2719 2720 case FC_TOP_FABRIC: 2721 newtop = "Fabric"; 2722 break; 2723 2724 default: 2725 newtop = NULL; 2726 break; 2727 } 2728 2729 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2730 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2731 "Change in FC Topology old = %s new = %s", 2732 oldtop, newtop); 2733 } 2734 2735 switch (port->fp_topology) { 2736 case FC_TOP_PRIVATE_LOOP: { 2737 int orphan = (old_top == FC_TOP_FABRIC || 2738 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2739 2740 mutex_exit(&port->fp_mutex); 2741 fp_loop_online(port, job, orphan); 2742 break; 2743 } 2744 2745 case FC_TOP_PUBLIC_LOOP: 2746 /* FALLTHROUGH */ 2747 case FC_TOP_FABRIC: 2748 fp_fabric_online(port, job); 2749 mutex_exit(&port->fp_mutex); 2750 break; 2751 2752 case FC_TOP_PT_PT: 2753 fp_p2p_online(port, job); 2754 mutex_exit(&port->fp_mutex); 2755 break; 2756 2757 default: 2758 if (--port->fp_statec_busy != 0) { 2759 /* 2760 * Watch curiously at what the next 2761 * state transition can do. 2762 */ 2763 mutex_exit(&port->fp_mutex); 2764 break; 2765 } 2766 2767 FP_TRACE(FP_NHEAD2(9, 0), 2768 "Topology Unknown, Offlining the port.."); 2769 2770 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2771 port->fp_state = FC_STATE_OFFLINE; 2772 2773 if (port->fp_offline_tid == NULL) { 2774 port->fp_offline_tid = 2775 timeout(fp_offline_timeout, 2776 (caddr_t)port, fp_offline_ticks); 2777 } 2778 mutex_exit(&port->fp_mutex); 2779 break; 2780 } 2781 2782 mutex_enter(&port->fp_mutex); 2783 2784 port->fp_task = port->fp_last_task; 2785 port->fp_last_task = FP_TASK_IDLE; 2786 2787 mutex_exit(&port->fp_mutex); 2788 2789 fctl_jobdone(job); 2790 break; 2791 } 2792 2793 case JOB_PLOGI_GROUP: { 2794 mutex_exit(&port->fp_mutex); 2795 fp_plogi_group(port, job); 2796 break; 2797 } 2798 2799 case JOB_UNSOL_REQUEST: { 2800 mutex_exit(&port->fp_mutex); 2801 fp_handle_unsol_buf(port, 2802 (fc_unsol_buf_t *)job->job_private, job); 2803 fctl_dealloc_job(job); 2804 break; 2805 } 2806 2807 case JOB_NS_CMD: { 2808 fctl_ns_req_t *ns_cmd; 2809 2810 mutex_exit(&port->fp_mutex); 2811 2812 job->job_flags |= JOB_TYPE_FP_ASYNC; 2813 ns_cmd = (fctl_ns_req_t *)job->job_private; 2814 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2815 ns_cmd->ns_cmd_code > NS_DA_ID) { 2816 job->job_result = FC_BADCMD; 2817 fctl_jobdone(job); 2818 break; 2819 } 2820 2821 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2822 if (ns_cmd->ns_pd != NULL) { 2823 job->job_result = FC_BADOBJECT; 2824 fctl_jobdone(job); 2825 break; 2826 } 2827 2828 job->job_counter = 1; 2829 2830 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2831 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2832 2833 if (rval != FC_SUCCESS) { 2834 job->job_result = rval; 2835 fctl_jobdone(job); 2836 } 2837 break; 2838 } 2839 job->job_result = FC_SUCCESS; 2840 job->job_counter = 1; 2841 2842 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2843 if (rval != FC_SUCCESS) { 2844 fctl_jobdone(job); 2845 } 2846 break; 2847 } 2848 2849 case JOB_LINK_RESET: { 2850 la_wwn_t *pwwn; 2851 uint32_t topology; 2852 2853 pwwn = (la_wwn_t *)job->job_private; 2854 ASSERT(pwwn != NULL); 2855 2856 topology = port->fp_topology; 2857 mutex_exit(&port->fp_mutex); 2858 2859 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2860 topology == FC_TOP_PRIVATE_LOOP) { 2861 job->job_flags |= JOB_TYPE_FP_ASYNC; 2862 rval = port->fp_fca_tran->fca_reset( 2863 port->fp_fca_handle, FC_FCA_LINK_RESET); 2864 job->job_result = rval; 2865 fp_jobdone(job); 2866 } else { 2867 ASSERT((job->job_flags & 2868 JOB_TYPE_FP_ASYNC) == 0); 2869 2870 if (FC_IS_TOP_SWITCH(topology)) { 2871 rval = fp_remote_lip(port, pwwn, 2872 KM_SLEEP, job); 2873 } else { 2874 rval = FC_FAILURE; 2875 } 2876 if (rval != FC_SUCCESS) { 2877 job->job_result = rval; 2878 } 2879 fctl_jobdone(job); 2880 } 2881 break; 2882 } 2883 2884 default: 2885 mutex_exit(&port->fp_mutex); 2886 job->job_result = FC_BADCMD; 2887 fctl_jobdone(job); 2888 break; 2889 } 2890 } 2891 /* NOTREACHED */ 2892 } 2893 2894 2895 /* 2896 * Perform FC port bring up initialization 2897 */ 2898 static int 2899 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2900 { 2901 int rval; 2902 uint32_t state; 2903 uint32_t src_id; 2904 fc_lilpmap_t *lilp_map; 2905 2906 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2907 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2908 2909 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2910 " port=%p, job=%p", port, job); 2911 2912 port->fp_topology = FC_TOP_UNKNOWN; 2913 port->fp_port_id.port_id = 0; 2914 state = FC_PORT_STATE_MASK(port->fp_state); 2915 2916 if (state == FC_STATE_OFFLINE) { 2917 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2918 job->job_result = FC_OFFLINE; 2919 mutex_exit(&port->fp_mutex); 2920 fctl_jobdone(job); 2921 mutex_enter(&port->fp_mutex); 2922 return (FC_OFFLINE); 2923 } 2924 2925 if (state == FC_STATE_LOOP) { 2926 port->fp_port_type.port_type = FC_NS_PORT_NL; 2927 mutex_exit(&port->fp_mutex); 2928 2929 lilp_map = &port->fp_lilp_map; 2930 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2931 job->job_result = FC_FAILURE; 2932 fctl_jobdone(job); 2933 2934 FP_TRACE(FP_NHEAD1(9, rval), 2935 "LILP map Invalid or not present"); 2936 mutex_enter(&port->fp_mutex); 2937 return (FC_FAILURE); 2938 } 2939 2940 if (lilp_map->lilp_length == 0) { 2941 job->job_result = FC_NO_MAP; 2942 fctl_jobdone(job); 2943 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2944 "LILP map length zero"); 2945 mutex_enter(&port->fp_mutex); 2946 return (FC_NO_MAP); 2947 } 2948 src_id = lilp_map->lilp_myalpa & 0xFF; 2949 } else { 2950 fc_remote_port_t *pd; 2951 fc_fca_pm_t pm; 2952 fc_fca_p2p_info_t p2p_info; 2953 int pd_recepient; 2954 2955 /* 2956 * Get P2P remote port info if possible 2957 */ 2958 bzero((caddr_t)&pm, sizeof (pm)); 2959 2960 pm.pm_cmd_flags = FC_FCA_PM_READ; 2961 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2962 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2963 pm.pm_data_buf = (caddr_t)&p2p_info; 2964 2965 rval = port->fp_fca_tran->fca_port_manage( 2966 port->fp_fca_handle, &pm); 2967 2968 if (rval == FC_SUCCESS) { 2969 port->fp_port_id.port_id = p2p_info.fca_d_id; 2970 port->fp_port_type.port_type = FC_NS_PORT_N; 2971 port->fp_topology = FC_TOP_PT_PT; 2972 port->fp_total_devices = 1; 2973 pd_recepient = fctl_wwn_cmp( 2974 &port->fp_service_params.nport_ww_name, 2975 &p2p_info.pwwn) < 0 ? 2976 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2977 mutex_exit(&port->fp_mutex); 2978 pd = fctl_create_remote_port(port, 2979 &p2p_info.nwwn, 2980 &p2p_info.pwwn, 2981 p2p_info.d_id, 2982 pd_recepient, KM_NOSLEEP); 2983 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2984 " P2P port=%p pd=%p fp %x pd %x", port, pd, 2985 port->fp_port_id.port_id, p2p_info.d_id); 2986 mutex_enter(&port->fp_mutex); 2987 return (FC_SUCCESS); 2988 } 2989 port->fp_port_type.port_type = FC_NS_PORT_N; 2990 mutex_exit(&port->fp_mutex); 2991 src_id = 0; 2992 } 2993 2994 job->job_counter = 1; 2995 job->job_result = FC_SUCCESS; 2996 2997 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2998 KM_SLEEP)) != FC_SUCCESS) { 2999 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 3000 job->job_result = FC_FAILURE; 3001 fctl_jobdone(job); 3002 3003 mutex_enter(&port->fp_mutex); 3004 if (port->fp_statec_busy <= 1) { 3005 mutex_exit(&port->fp_mutex); 3006 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 3007 "Couldn't transport FLOGI"); 3008 mutex_enter(&port->fp_mutex); 3009 } 3010 return (FC_FAILURE); 3011 } 3012 3013 fp_jobwait(job); 3014 3015 mutex_enter(&port->fp_mutex); 3016 if (job->job_result == FC_SUCCESS) { 3017 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3018 mutex_exit(&port->fp_mutex); 3019 fp_ns_init(port, job, KM_SLEEP); 3020 mutex_enter(&port->fp_mutex); 3021 } 3022 } else { 3023 if (state == FC_STATE_LOOP) { 3024 port->fp_topology = FC_TOP_PRIVATE_LOOP; 3025 port->fp_port_id.port_id = 3026 port->fp_lilp_map.lilp_myalpa & 0xFF; 3027 } 3028 } 3029 3030 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 3031 port, job); 3032 3033 return (FC_SUCCESS); 3034 } 3035 3036 3037 /* 3038 * Perform ULP invocations following FC port startup 3039 */ 3040 /* ARGSUSED */ 3041 static void 3042 fp_startup_done(opaque_t arg, uchar_t result) 3043 { 3044 fc_local_port_t *port = arg; 3045 3046 fp_attach_ulps(port, FC_CMD_ATTACH); 3047 3048 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 3049 } 3050 3051 3052 /* 3053 * Perform ULP port attach 3054 */ 3055 static void 3056 fp_ulp_port_attach(void *arg) 3057 { 3058 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 3059 fc_local_port_t *port = att->att_port; 3060 3061 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3062 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3063 3064 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3065 3066 if (att->att_need_pm_idle == B_TRUE) { 3067 fctl_idle_port(port); 3068 } 3069 3070 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3071 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3072 3073 mutex_enter(&att->att_port->fp_mutex); 3074 att->att_port->fp_ulp_attach = 0; 3075 3076 port->fp_task = port->fp_last_task; 3077 port->fp_last_task = FP_TASK_IDLE; 3078 3079 cv_signal(&att->att_port->fp_attach_cv); 3080 3081 mutex_exit(&att->att_port->fp_mutex); 3082 3083 kmem_free(att, sizeof (fp_soft_attach_t)); 3084 } 3085 3086 /* 3087 * Entry point to funnel all requests down to FCAs 3088 */ 3089 static int 3090 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3091 { 3092 int rval; 3093 3094 mutex_enter(&port->fp_mutex); 3095 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3096 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3097 FC_STATE_OFFLINE))) { 3098 /* 3099 * This means there is more than one state change 3100 * at this point of time - Since they are processed 3101 * serially, any processing of the current one should 3102 * be failed, failed and move up in processing the next 3103 */ 3104 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3105 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3106 if (cmd->cmd_job) { 3107 /* 3108 * A state change that is going to be invalidated 3109 * by another one already in the port driver's queue 3110 * need not go up to all ULPs. This will minimize 3111 * needless processing and ripples in ULP modules 3112 */ 3113 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3114 } 3115 mutex_exit(&port->fp_mutex); 3116 return (FC_STATEC_BUSY); 3117 } 3118 3119 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3120 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3121 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3122 mutex_exit(&port->fp_mutex); 3123 3124 return (FC_OFFLINE); 3125 } 3126 mutex_exit(&port->fp_mutex); 3127 3128 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3129 if (rval != FC_SUCCESS) { 3130 if (rval == FC_TRAN_BUSY) { 3131 cmd->cmd_retry_interval = fp_retry_delay; 3132 rval = fp_retry_cmd(&cmd->cmd_pkt); 3133 if (rval == FC_FAILURE) { 3134 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3135 } 3136 } 3137 } else { 3138 mutex_enter(&port->fp_mutex); 3139 port->fp_out_fpcmds++; 3140 mutex_exit(&port->fp_mutex); 3141 } 3142 3143 return (rval); 3144 } 3145 3146 3147 /* 3148 * Each time a timeout kicks in, walk the wait queue, decrement the 3149 * the retry_interval, when the retry_interval becomes less than 3150 * or equal to zero, re-transport the command: If the re-transport 3151 * fails with BUSY, enqueue the command in the wait queue. 3152 * 3153 * In order to prevent looping forever because of commands enqueued 3154 * from within this function itself, save the current tail pointer 3155 * (in cur_tail) and exit the loop after serving this command. 3156 */ 3157 static void 3158 fp_resendcmd(void *port_handle) 3159 { 3160 int rval; 3161 fc_local_port_t *port; 3162 fp_cmd_t *cmd; 3163 fp_cmd_t *cur_tail; 3164 3165 port = port_handle; 3166 mutex_enter(&port->fp_mutex); 3167 cur_tail = port->fp_wait_tail; 3168 mutex_exit(&port->fp_mutex); 3169 3170 while ((cmd = fp_deque_cmd(port)) != NULL) { 3171 cmd->cmd_retry_interval -= fp_retry_ticker; 3172 /* Check if we are detaching */ 3173 if (port->fp_soft_state & 3174 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3175 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3176 cmd->cmd_pkt.pkt_reason = 0; 3177 fp_iodone(cmd); 3178 } else if (cmd->cmd_retry_interval <= 0) { 3179 rval = cmd->cmd_transport(port->fp_fca_handle, 3180 &cmd->cmd_pkt); 3181 3182 if (rval != FC_SUCCESS) { 3183 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3184 if (--cmd->cmd_retry_count) { 3185 fp_enque_cmd(port, cmd); 3186 if (cmd == cur_tail) { 3187 break; 3188 } 3189 continue; 3190 } 3191 cmd->cmd_pkt.pkt_state = 3192 FC_PKT_TRAN_BSY; 3193 } else { 3194 cmd->cmd_pkt.pkt_state = 3195 FC_PKT_TRAN_ERROR; 3196 } 3197 cmd->cmd_pkt.pkt_reason = 0; 3198 fp_iodone(cmd); 3199 } else { 3200 mutex_enter(&port->fp_mutex); 3201 port->fp_out_fpcmds++; 3202 mutex_exit(&port->fp_mutex); 3203 } 3204 } else { 3205 fp_enque_cmd(port, cmd); 3206 } 3207 3208 if (cmd == cur_tail) { 3209 break; 3210 } 3211 } 3212 3213 mutex_enter(&port->fp_mutex); 3214 if (port->fp_wait_head) { 3215 timeout_id_t tid; 3216 3217 mutex_exit(&port->fp_mutex); 3218 tid = timeout(fp_resendcmd, (caddr_t)port, 3219 fp_retry_ticks); 3220 mutex_enter(&port->fp_mutex); 3221 port->fp_wait_tid = tid; 3222 } else { 3223 port->fp_wait_tid = NULL; 3224 } 3225 mutex_exit(&port->fp_mutex); 3226 } 3227 3228 3229 /* 3230 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3231 * 3232 * Yes, as you can see below, cmd_retry_count is used here too. That means 3233 * the retries for BUSY are less if there were transport failures (transport 3234 * failure means fca_transport failure). The goal is not to exceed overall 3235 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3236 * 3237 * Return Values: 3238 * FC_SUCCESS 3239 * FC_FAILURE 3240 */ 3241 static int 3242 fp_retry_cmd(fc_packet_t *pkt) 3243 { 3244 fp_cmd_t *cmd; 3245 3246 cmd = pkt->pkt_ulp_private; 3247 3248 if (--cmd->cmd_retry_count) { 3249 fp_enque_cmd(cmd->cmd_port, cmd); 3250 return (FC_SUCCESS); 3251 } else { 3252 return (FC_FAILURE); 3253 } 3254 } 3255 3256 3257 /* 3258 * Queue up FC packet for deferred retry 3259 */ 3260 static void 3261 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3262 { 3263 timeout_id_t tid; 3264 3265 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3266 3267 #ifdef DEBUG 3268 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3269 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3270 #endif 3271 3272 mutex_enter(&port->fp_mutex); 3273 if (port->fp_wait_tail) { 3274 port->fp_wait_tail->cmd_next = cmd; 3275 port->fp_wait_tail = cmd; 3276 } else { 3277 ASSERT(port->fp_wait_head == NULL); 3278 port->fp_wait_head = port->fp_wait_tail = cmd; 3279 if (port->fp_wait_tid == NULL) { 3280 mutex_exit(&port->fp_mutex); 3281 tid = timeout(fp_resendcmd, (caddr_t)port, 3282 fp_retry_ticks); 3283 mutex_enter(&port->fp_mutex); 3284 port->fp_wait_tid = tid; 3285 } 3286 } 3287 mutex_exit(&port->fp_mutex); 3288 } 3289 3290 3291 /* 3292 * Handle all RJT codes 3293 */ 3294 static int 3295 fp_handle_reject(fc_packet_t *pkt) 3296 { 3297 int rval = FC_FAILURE; 3298 uchar_t next_class; 3299 fp_cmd_t *cmd; 3300 fc_local_port_t *port; 3301 3302 cmd = pkt->pkt_ulp_private; 3303 port = cmd->cmd_port; 3304 3305 switch (pkt->pkt_state) { 3306 case FC_PKT_FABRIC_RJT: 3307 case FC_PKT_NPORT_RJT: 3308 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) { 3309 next_class = fp_get_nextclass(cmd->cmd_port, 3310 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3311 3312 if (next_class == FC_TRAN_CLASS_INVALID) { 3313 return (rval); 3314 } 3315 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3316 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3317 3318 rval = fp_sendcmd(cmd->cmd_port, cmd, 3319 cmd->cmd_port->fp_fca_handle); 3320 3321 if (rval != FC_SUCCESS) { 3322 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3323 } 3324 } 3325 break; 3326 3327 case FC_PKT_LS_RJT: 3328 case FC_PKT_BA_RJT: 3329 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) || 3330 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) { 3331 cmd->cmd_retry_interval = fp_retry_delay; 3332 rval = fp_retry_cmd(pkt); 3333 } 3334 break; 3335 3336 case FC_PKT_FS_RJT: 3337 if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) || 3338 ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) && 3339 (pkt->pkt_expln == 0x00))) { 3340 cmd->cmd_retry_interval = fp_retry_delay; 3341 rval = fp_retry_cmd(pkt); 3342 } 3343 break; 3344 3345 case FC_PKT_LOCAL_RJT: 3346 if (pkt->pkt_reason == FC_REASON_QFULL) { 3347 cmd->cmd_retry_interval = fp_retry_delay; 3348 rval = fp_retry_cmd(pkt); 3349 } 3350 break; 3351 3352 default: 3353 FP_TRACE(FP_NHEAD1(1, 0), 3354 "fp_handle_reject(): Invalid pkt_state"); 3355 break; 3356 } 3357 3358 return (rval); 3359 } 3360 3361 3362 /* 3363 * Return the next class of service supported by the FCA 3364 */ 3365 static uchar_t 3366 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3367 { 3368 uchar_t next_class; 3369 3370 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3371 3372 switch (cur_class) { 3373 case FC_TRAN_CLASS_INVALID: 3374 if (port->fp_cos & FC_NS_CLASS1) { 3375 next_class = FC_TRAN_CLASS1; 3376 break; 3377 } 3378 /* FALLTHROUGH */ 3379 3380 case FC_TRAN_CLASS1: 3381 if (port->fp_cos & FC_NS_CLASS2) { 3382 next_class = FC_TRAN_CLASS2; 3383 break; 3384 } 3385 /* FALLTHROUGH */ 3386 3387 case FC_TRAN_CLASS2: 3388 if (port->fp_cos & FC_NS_CLASS3) { 3389 next_class = FC_TRAN_CLASS3; 3390 break; 3391 } 3392 /* FALLTHROUGH */ 3393 3394 case FC_TRAN_CLASS3: 3395 default: 3396 next_class = FC_TRAN_CLASS_INVALID; 3397 break; 3398 } 3399 3400 return (next_class); 3401 } 3402 3403 3404 /* 3405 * Determine if a class of service is supported by the FCA 3406 */ 3407 static int 3408 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3409 { 3410 int rval; 3411 3412 switch (tran_class) { 3413 case FC_TRAN_CLASS1: 3414 if (cos & FC_NS_CLASS1) { 3415 rval = FC_SUCCESS; 3416 } else { 3417 rval = FC_FAILURE; 3418 } 3419 break; 3420 3421 case FC_TRAN_CLASS2: 3422 if (cos & FC_NS_CLASS2) { 3423 rval = FC_SUCCESS; 3424 } else { 3425 rval = FC_FAILURE; 3426 } 3427 break; 3428 3429 case FC_TRAN_CLASS3: 3430 if (cos & FC_NS_CLASS3) { 3431 rval = FC_SUCCESS; 3432 } else { 3433 rval = FC_FAILURE; 3434 } 3435 break; 3436 3437 default: 3438 rval = FC_FAILURE; 3439 break; 3440 } 3441 3442 return (rval); 3443 } 3444 3445 3446 /* 3447 * Dequeue FC packet for retry 3448 */ 3449 static fp_cmd_t * 3450 fp_deque_cmd(fc_local_port_t *port) 3451 { 3452 fp_cmd_t *cmd; 3453 3454 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3455 3456 mutex_enter(&port->fp_mutex); 3457 3458 if (port->fp_wait_head == NULL) { 3459 /* 3460 * To avoid races, NULL the fp_wait_tid as 3461 * we are about to exit the timeout thread. 3462 */ 3463 port->fp_wait_tid = NULL; 3464 mutex_exit(&port->fp_mutex); 3465 return (NULL); 3466 } 3467 3468 cmd = port->fp_wait_head; 3469 port->fp_wait_head = cmd->cmd_next; 3470 cmd->cmd_next = NULL; 3471 3472 if (port->fp_wait_head == NULL) { 3473 port->fp_wait_tail = NULL; 3474 } 3475 mutex_exit(&port->fp_mutex); 3476 3477 return (cmd); 3478 } 3479 3480 3481 /* 3482 * Wait for job completion 3483 */ 3484 static void 3485 fp_jobwait(job_request_t *job) 3486 { 3487 sema_p(&job->job_port_sema); 3488 } 3489 3490 3491 /* 3492 * Convert FC packet state to FC errno 3493 */ 3494 int 3495 fp_state_to_rval(uchar_t state) 3496 { 3497 int count; 3498 3499 for (count = 0; count < sizeof (fp_xlat) / 3500 sizeof (fp_xlat[0]); count++) { 3501 if (fp_xlat[count].xlat_state == state) { 3502 return (fp_xlat[count].xlat_rval); 3503 } 3504 } 3505 3506 return (FC_FAILURE); 3507 } 3508 3509 3510 /* 3511 * For Synchronous I/O requests, the caller is 3512 * expected to do fctl_jobdone(if necessary) 3513 * 3514 * We want to preserve at least one failure in the 3515 * job_result if it happens. 3516 * 3517 */ 3518 static void 3519 fp_iodone(fp_cmd_t *cmd) 3520 { 3521 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3522 job_request_t *job = cmd->cmd_job; 3523 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3524 3525 ASSERT(job != NULL); 3526 ASSERT(cmd->cmd_port != NULL); 3527 ASSERT(&cmd->cmd_pkt != NULL); 3528 3529 mutex_enter(&job->job_mutex); 3530 if (job->job_result == FC_SUCCESS) { 3531 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3532 } 3533 mutex_exit(&job->job_mutex); 3534 3535 if (pd) { 3536 mutex_enter(&pd->pd_mutex); 3537 pd->pd_flags = PD_IDLE; 3538 mutex_exit(&pd->pd_mutex); 3539 } 3540 3541 if (ulp_pkt) { 3542 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3543 FP_IS_PKT_ERROR(ulp_pkt)) { 3544 fc_local_port_t *port; 3545 fc_remote_node_t *node; 3546 3547 port = cmd->cmd_port; 3548 3549 mutex_enter(&pd->pd_mutex); 3550 pd->pd_state = PORT_DEVICE_INVALID; 3551 pd->pd_ref_count--; 3552 node = pd->pd_remote_nodep; 3553 mutex_exit(&pd->pd_mutex); 3554 3555 ASSERT(node != NULL); 3556 ASSERT(port != NULL); 3557 3558 if (fctl_destroy_remote_port(port, pd) == 0) { 3559 fctl_destroy_remote_node(node); 3560 } 3561 3562 ulp_pkt->pkt_pd = NULL; 3563 } 3564 3565 ulp_pkt->pkt_comp(ulp_pkt); 3566 } 3567 3568 fp_free_pkt(cmd); 3569 fp_jobdone(job); 3570 } 3571 3572 3573 /* 3574 * Job completion handler 3575 */ 3576 static void 3577 fp_jobdone(job_request_t *job) 3578 { 3579 mutex_enter(&job->job_mutex); 3580 ASSERT(job->job_counter > 0); 3581 3582 if (--job->job_counter != 0) { 3583 mutex_exit(&job->job_mutex); 3584 return; 3585 } 3586 3587 if (job->job_ulp_pkts) { 3588 ASSERT(job->job_ulp_listlen > 0); 3589 kmem_free(job->job_ulp_pkts, 3590 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3591 } 3592 3593 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3594 mutex_exit(&job->job_mutex); 3595 fctl_jobdone(job); 3596 } else { 3597 mutex_exit(&job->job_mutex); 3598 sema_v(&job->job_port_sema); 3599 } 3600 } 3601 3602 3603 /* 3604 * Try to perform shutdown of a port during a detach. No return 3605 * value since the detach should not fail because the port shutdown 3606 * failed. 3607 */ 3608 static void 3609 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3610 { 3611 int index; 3612 int count; 3613 int flags; 3614 fp_cmd_t *cmd; 3615 struct pwwn_hash *head; 3616 fc_remote_port_t *pd; 3617 3618 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3619 3620 job->job_result = FC_SUCCESS; 3621 3622 if (port->fp_taskq) { 3623 /* 3624 * We must release the mutex here to ensure that other 3625 * potential jobs can complete their processing. Many 3626 * also need this mutex. 3627 */ 3628 mutex_exit(&port->fp_mutex); 3629 taskq_wait(port->fp_taskq); 3630 mutex_enter(&port->fp_mutex); 3631 } 3632 3633 if (port->fp_offline_tid) { 3634 timeout_id_t tid; 3635 3636 tid = port->fp_offline_tid; 3637 port->fp_offline_tid = NULL; 3638 mutex_exit(&port->fp_mutex); 3639 (void) untimeout(tid); 3640 mutex_enter(&port->fp_mutex); 3641 } 3642 3643 if (port->fp_wait_tid) { 3644 timeout_id_t tid; 3645 3646 tid = port->fp_wait_tid; 3647 port->fp_wait_tid = NULL; 3648 mutex_exit(&port->fp_mutex); 3649 (void) untimeout(tid); 3650 } else { 3651 mutex_exit(&port->fp_mutex); 3652 } 3653 3654 /* 3655 * While we cancel the timeout, let's also return the 3656 * the outstanding requests back to the callers. 3657 */ 3658 while ((cmd = fp_deque_cmd(port)) != NULL) { 3659 ASSERT(cmd->cmd_job != NULL); 3660 cmd->cmd_job->job_result = FC_OFFLINE; 3661 fp_iodone(cmd); 3662 } 3663 3664 /* 3665 * Gracefully LOGO with all the devices logged in. 3666 */ 3667 mutex_enter(&port->fp_mutex); 3668 3669 for (count = index = 0; index < pwwn_table_size; index++) { 3670 head = &port->fp_pwwn_table[index]; 3671 pd = head->pwwn_head; 3672 while (pd != NULL) { 3673 mutex_enter(&pd->pd_mutex); 3674 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3675 count++; 3676 } 3677 mutex_exit(&pd->pd_mutex); 3678 pd = pd->pd_wwn_hnext; 3679 } 3680 } 3681 3682 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3683 flags = job->job_flags; 3684 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3685 } else { 3686 flags = 0; 3687 } 3688 if (count) { 3689 job->job_counter = count; 3690 3691 for (index = 0; index < pwwn_table_size; index++) { 3692 head = &port->fp_pwwn_table[index]; 3693 pd = head->pwwn_head; 3694 while (pd != NULL) { 3695 mutex_enter(&pd->pd_mutex); 3696 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3697 ASSERT(pd->pd_login_count > 0); 3698 /* 3699 * Force the counter to ONE in order 3700 * for us to really send LOGO els. 3701 */ 3702 pd->pd_login_count = 1; 3703 mutex_exit(&pd->pd_mutex); 3704 mutex_exit(&port->fp_mutex); 3705 (void) fp_logout(port, pd, job); 3706 mutex_enter(&port->fp_mutex); 3707 } else { 3708 mutex_exit(&pd->pd_mutex); 3709 } 3710 pd = pd->pd_wwn_hnext; 3711 } 3712 } 3713 mutex_exit(&port->fp_mutex); 3714 fp_jobwait(job); 3715 } else { 3716 mutex_exit(&port->fp_mutex); 3717 } 3718 3719 if (job->job_result != FC_SUCCESS) { 3720 FP_TRACE(FP_NHEAD1(9, 0), 3721 "Can't logout all devices. Proceeding with" 3722 " port shutdown"); 3723 job->job_result = FC_SUCCESS; 3724 } 3725 3726 fctl_destroy_all_remote_ports(port); 3727 3728 mutex_enter(&port->fp_mutex); 3729 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3730 mutex_exit(&port->fp_mutex); 3731 fp_ns_fini(port, job); 3732 } else { 3733 mutex_exit(&port->fp_mutex); 3734 } 3735 3736 if (flags) { 3737 job->job_flags = flags; 3738 } 3739 3740 mutex_enter(&port->fp_mutex); 3741 3742 } 3743 3744 3745 /* 3746 * Build the port driver's data structures based on the AL_PA list 3747 */ 3748 static void 3749 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3750 { 3751 int rval; 3752 int flag; 3753 int count; 3754 uint32_t d_id; 3755 fc_remote_port_t *pd; 3756 fc_lilpmap_t *lilp_map; 3757 3758 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3759 3760 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3761 job->job_result = FC_OFFLINE; 3762 mutex_exit(&port->fp_mutex); 3763 fp_jobdone(job); 3764 mutex_enter(&port->fp_mutex); 3765 return; 3766 } 3767 3768 if (port->fp_lilp_map.lilp_length == 0) { 3769 mutex_exit(&port->fp_mutex); 3770 job->job_result = FC_NO_MAP; 3771 fp_jobdone(job); 3772 mutex_enter(&port->fp_mutex); 3773 return; 3774 } 3775 mutex_exit(&port->fp_mutex); 3776 3777 lilp_map = &port->fp_lilp_map; 3778 job->job_counter = lilp_map->lilp_length; 3779 3780 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3781 flag = FP_CMD_PLOGI_RETAIN; 3782 } else { 3783 flag = FP_CMD_PLOGI_DONT_CARE; 3784 } 3785 3786 for (count = 0; count < lilp_map->lilp_length; count++) { 3787 d_id = lilp_map->lilp_alpalist[count]; 3788 3789 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3790 fp_jobdone(job); 3791 continue; 3792 } 3793 3794 pd = fctl_get_remote_port_by_did(port, d_id); 3795 if (pd) { 3796 mutex_enter(&pd->pd_mutex); 3797 if (flag == FP_CMD_PLOGI_DONT_CARE || 3798 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3799 mutex_exit(&pd->pd_mutex); 3800 fp_jobdone(job); 3801 continue; 3802 } 3803 mutex_exit(&pd->pd_mutex); 3804 } 3805 3806 rval = fp_port_login(port, d_id, job, flag, 3807 KM_SLEEP, pd, NULL); 3808 if (rval != FC_SUCCESS) { 3809 fp_jobdone(job); 3810 } 3811 } 3812 3813 mutex_enter(&port->fp_mutex); 3814 } 3815 3816 3817 /* 3818 * Perform loop ONLINE processing 3819 */ 3820 static void 3821 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3822 { 3823 int count; 3824 int rval; 3825 uint32_t d_id; 3826 uint32_t listlen; 3827 fc_lilpmap_t *lilp_map; 3828 fc_remote_port_t *pd; 3829 fc_portmap_t *changelist; 3830 3831 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3832 3833 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3834 port, job); 3835 3836 lilp_map = &port->fp_lilp_map; 3837 3838 if (lilp_map->lilp_length) { 3839 mutex_enter(&port->fp_mutex); 3840 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3841 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3842 mutex_exit(&port->fp_mutex); 3843 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3844 } else { 3845 mutex_exit(&port->fp_mutex); 3846 } 3847 3848 job->job_counter = lilp_map->lilp_length; 3849 3850 for (count = 0; count < lilp_map->lilp_length; count++) { 3851 d_id = lilp_map->lilp_alpalist[count]; 3852 3853 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3854 fp_jobdone(job); 3855 continue; 3856 } 3857 3858 pd = fctl_get_remote_port_by_did(port, d_id); 3859 if (pd != NULL) { 3860 #ifdef DEBUG 3861 mutex_enter(&pd->pd_mutex); 3862 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3863 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3864 } 3865 mutex_exit(&pd->pd_mutex); 3866 #endif 3867 fp_jobdone(job); 3868 continue; 3869 } 3870 3871 rval = fp_port_login(port, d_id, job, 3872 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3873 3874 if (rval != FC_SUCCESS) { 3875 fp_jobdone(job); 3876 } 3877 } 3878 fp_jobwait(job); 3879 } 3880 listlen = 0; 3881 changelist = NULL; 3882 3883 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3884 mutex_enter(&port->fp_mutex); 3885 ASSERT(port->fp_statec_busy > 0); 3886 if (port->fp_statec_busy == 1) { 3887 mutex_exit(&port->fp_mutex); 3888 fctl_fillout_map(port, &changelist, &listlen, 3889 1, 0, orphan); 3890 3891 mutex_enter(&port->fp_mutex); 3892 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3893 ASSERT(port->fp_total_devices == 0); 3894 port->fp_total_devices = port->fp_dev_count; 3895 } 3896 } else { 3897 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3898 } 3899 mutex_exit(&port->fp_mutex); 3900 } 3901 3902 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3903 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3904 listlen, listlen, KM_SLEEP); 3905 } else { 3906 mutex_enter(&port->fp_mutex); 3907 if (--port->fp_statec_busy == 0) { 3908 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3909 } 3910 ASSERT(changelist == NULL && listlen == 0); 3911 mutex_exit(&port->fp_mutex); 3912 } 3913 3914 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3915 port, job); 3916 } 3917 3918 3919 /* 3920 * Get an Arbitrated Loop map from the underlying FCA 3921 */ 3922 static int 3923 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3924 { 3925 int rval; 3926 3927 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3928 port, lilp_map); 3929 3930 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3931 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3932 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3933 3934 if (rval != FC_SUCCESS) { 3935 rval = FC_NO_MAP; 3936 } else if (lilp_map->lilp_length == 0 && 3937 (lilp_map->lilp_magic >= MAGIC_LISM && 3938 lilp_map->lilp_magic < MAGIC_LIRP)) { 3939 uchar_t lilp_length; 3940 3941 /* 3942 * Since the map length is zero, provide all 3943 * the valid AL_PAs for NL_ports discovery. 3944 */ 3945 lilp_length = sizeof (fp_valid_alpas) / 3946 sizeof (fp_valid_alpas[0]); 3947 lilp_map->lilp_length = lilp_length; 3948 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3949 lilp_length); 3950 } else { 3951 rval = fp_validate_lilp_map(lilp_map); 3952 3953 if (rval == FC_SUCCESS) { 3954 mutex_enter(&port->fp_mutex); 3955 port->fp_total_devices = lilp_map->lilp_length - 1; 3956 mutex_exit(&port->fp_mutex); 3957 } 3958 } 3959 3960 mutex_enter(&port->fp_mutex); 3961 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3962 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3963 mutex_exit(&port->fp_mutex); 3964 3965 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3966 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3967 FP_TRACE(FP_NHEAD1(9, 0), 3968 "FCA reset failed after LILP map was found" 3969 " to be invalid"); 3970 } 3971 } else if (rval == FC_SUCCESS) { 3972 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3973 mutex_exit(&port->fp_mutex); 3974 } else { 3975 mutex_exit(&port->fp_mutex); 3976 } 3977 3978 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3979 lilp_map); 3980 3981 return (rval); 3982 } 3983 3984 3985 /* 3986 * Perform Fabric Login: 3987 * 3988 * Return Values: 3989 * FC_SUCCESS 3990 * FC_FAILURE 3991 * FC_NOMEM 3992 * FC_TRANSPORT_ERROR 3993 * and a lot others defined in fc_error.h 3994 */ 3995 static int 3996 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3997 int flag, int sleep) 3998 { 3999 int rval; 4000 fp_cmd_t *cmd; 4001 uchar_t class; 4002 4003 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4004 4005 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 4006 port, job); 4007 4008 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4009 if (class == FC_TRAN_CLASS_INVALID) { 4010 return (FC_ELS_BAD); 4011 } 4012 4013 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4014 sizeof (la_els_logi_t), sleep, NULL); 4015 if (cmd == NULL) { 4016 return (FC_NOMEM); 4017 } 4018 4019 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4020 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4021 cmd->cmd_flags = flag; 4022 cmd->cmd_retry_count = fp_retry_count; 4023 cmd->cmd_ulp_pkt = NULL; 4024 4025 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 4026 job, LA_ELS_FLOGI); 4027 4028 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 4029 if (rval != FC_SUCCESS) { 4030 fp_free_pkt(cmd); 4031 } 4032 4033 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 4034 port, job); 4035 4036 return (rval); 4037 } 4038 4039 4040 /* 4041 * In some scenarios such as private loop device discovery period 4042 * the fc_remote_port_t data structure isn't allocated. The allocation 4043 * is done when the PLOGI is successful. In some other scenarios 4044 * such as Fabric topology, the fc_remote_port_t is already created 4045 * and initialized with appropriate values (as the NS provides 4046 * them) 4047 */ 4048 static int 4049 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 4050 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 4051 { 4052 uchar_t class; 4053 fp_cmd_t *cmd; 4054 uint32_t src_id; 4055 fc_remote_port_t *tmp_pd; 4056 int relogin; 4057 int found = 0; 4058 4059 #ifdef DEBUG 4060 if (pd == NULL) { 4061 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 4062 } 4063 #endif 4064 ASSERT(job->job_counter > 0); 4065 4066 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4067 if (class == FC_TRAN_CLASS_INVALID) { 4068 return (FC_ELS_BAD); 4069 } 4070 4071 mutex_enter(&port->fp_mutex); 4072 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 4073 mutex_exit(&port->fp_mutex); 4074 4075 relogin = 1; 4076 if (tmp_pd) { 4077 mutex_enter(&tmp_pd->pd_mutex); 4078 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 4079 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 4080 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 4081 relogin = 0; 4082 } 4083 mutex_exit(&tmp_pd->pd_mutex); 4084 } 4085 4086 if (!relogin) { 4087 mutex_enter(&tmp_pd->pd_mutex); 4088 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4089 cmd_flag |= FP_CMD_PLOGI_RETAIN; 4090 } 4091 mutex_exit(&tmp_pd->pd_mutex); 4092 4093 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 4094 sizeof (la_els_adisc_t), sleep, tmp_pd); 4095 if (cmd == NULL) { 4096 return (FC_NOMEM); 4097 } 4098 4099 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4100 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4101 cmd->cmd_flags = cmd_flag; 4102 cmd->cmd_retry_count = fp_retry_count; 4103 cmd->cmd_ulp_pkt = ulp_pkt; 4104 4105 mutex_enter(&port->fp_mutex); 4106 mutex_enter(&tmp_pd->pd_mutex); 4107 fp_adisc_init(cmd, job); 4108 mutex_exit(&tmp_pd->pd_mutex); 4109 mutex_exit(&port->fp_mutex); 4110 4111 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4112 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4113 4114 } else { 4115 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4116 sizeof (la_els_logi_t), sleep, pd); 4117 if (cmd == NULL) { 4118 return (FC_NOMEM); 4119 } 4120 4121 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4122 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4123 cmd->cmd_flags = cmd_flag; 4124 cmd->cmd_retry_count = fp_retry_count; 4125 cmd->cmd_ulp_pkt = ulp_pkt; 4126 4127 mutex_enter(&port->fp_mutex); 4128 src_id = port->fp_port_id.port_id; 4129 mutex_exit(&port->fp_mutex); 4130 4131 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4132 job, LA_ELS_PLOGI); 4133 } 4134 4135 if (pd) { 4136 mutex_enter(&pd->pd_mutex); 4137 pd->pd_flags = PD_ELS_IN_PROGRESS; 4138 mutex_exit(&pd->pd_mutex); 4139 } 4140 4141 /* npiv check to make sure we don't log into ourself */ 4142 if (relogin && 4143 ((port->fp_npiv_type == FC_NPIV_PORT) || 4144 (port->fp_npiv_flag == FC_NPIV_ENABLE))) { 4145 if ((d_id & 0xffff00) == 4146 (port->fp_port_id.port_id & 0xffff00)) { 4147 found = 1; 4148 } 4149 } 4150 4151 if (found || 4152 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4153 if (found) { 4154 fc_packet_t *pkt = &cmd->cmd_pkt; 4155 pkt->pkt_state = FC_PKT_NPORT_RJT; 4156 } 4157 if (pd) { 4158 mutex_enter(&pd->pd_mutex); 4159 pd->pd_flags = PD_IDLE; 4160 mutex_exit(&pd->pd_mutex); 4161 } 4162 4163 if (ulp_pkt) { 4164 fc_packet_t *pkt = &cmd->cmd_pkt; 4165 4166 ulp_pkt->pkt_state = pkt->pkt_state; 4167 ulp_pkt->pkt_reason = pkt->pkt_reason; 4168 ulp_pkt->pkt_action = pkt->pkt_action; 4169 ulp_pkt->pkt_expln = pkt->pkt_expln; 4170 } 4171 4172 fp_iodone(cmd); 4173 } 4174 4175 return (FC_SUCCESS); 4176 } 4177 4178 4179 /* 4180 * Register the LOGIN parameters with a port device 4181 */ 4182 static void 4183 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4184 la_els_logi_t *acc, uchar_t class) 4185 { 4186 fc_remote_node_t *node; 4187 4188 ASSERT(pd != NULL); 4189 4190 mutex_enter(&pd->pd_mutex); 4191 node = pd->pd_remote_nodep; 4192 if (pd->pd_login_count == 0) { 4193 pd->pd_login_count++; 4194 } 4195 4196 if (handle) { 4197 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_csp, 4198 (uint8_t *)&acc->common_service, 4199 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4200 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp1, 4201 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4202 DDI_DEV_AUTOINCR); 4203 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp2, 4204 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4205 DDI_DEV_AUTOINCR); 4206 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp3, 4207 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4208 DDI_DEV_AUTOINCR); 4209 } else { 4210 pd->pd_csp = acc->common_service; 4211 pd->pd_clsp1 = acc->class_1; 4212 pd->pd_clsp2 = acc->class_2; 4213 pd->pd_clsp3 = acc->class_3; 4214 } 4215 4216 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4217 pd->pd_login_class = class; 4218 mutex_exit(&pd->pd_mutex); 4219 4220 #ifndef __lock_lint 4221 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4222 pd->pd_port_id.port_id) == pd); 4223 #endif 4224 4225 mutex_enter(&node->fd_mutex); 4226 if (handle) { 4227 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)node->fd_vv, 4228 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4229 DDI_DEV_AUTOINCR); 4230 } else { 4231 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4232 } 4233 mutex_exit(&node->fd_mutex); 4234 } 4235 4236 4237 /* 4238 * Mark the remote port as OFFLINE 4239 */ 4240 static void 4241 fp_remote_port_offline(fc_remote_port_t *pd) 4242 { 4243 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4244 if (pd->pd_login_count && 4245 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4246 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4247 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4248 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4249 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4250 pd->pd_login_class = 0; 4251 } 4252 pd->pd_type = PORT_DEVICE_OLD; 4253 pd->pd_flags = PD_IDLE; 4254 fctl_tc_reset(&pd->pd_logo_tc); 4255 } 4256 4257 4258 /* 4259 * Deregistration of a port device 4260 */ 4261 static void 4262 fp_unregister_login(fc_remote_port_t *pd) 4263 { 4264 fc_remote_node_t *node; 4265 4266 ASSERT(pd != NULL); 4267 4268 mutex_enter(&pd->pd_mutex); 4269 pd->pd_login_count = 0; 4270 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4271 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4272 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4273 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4274 4275 pd->pd_state = PORT_DEVICE_VALID; 4276 pd->pd_login_class = 0; 4277 node = pd->pd_remote_nodep; 4278 mutex_exit(&pd->pd_mutex); 4279 4280 mutex_enter(&node->fd_mutex); 4281 bzero(node->fd_vv, sizeof (node->fd_vv)); 4282 mutex_exit(&node->fd_mutex); 4283 } 4284 4285 4286 /* 4287 * Handle OFFLINE state of an FCA port 4288 */ 4289 static void 4290 fp_port_offline(fc_local_port_t *port, int notify) 4291 { 4292 int index; 4293 int statec; 4294 timeout_id_t tid; 4295 struct pwwn_hash *head; 4296 fc_remote_port_t *pd; 4297 4298 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4299 4300 for (index = 0; index < pwwn_table_size; index++) { 4301 head = &port->fp_pwwn_table[index]; 4302 pd = head->pwwn_head; 4303 while (pd != NULL) { 4304 mutex_enter(&pd->pd_mutex); 4305 fp_remote_port_offline(pd); 4306 fctl_delist_did_table(port, pd); 4307 mutex_exit(&pd->pd_mutex); 4308 pd = pd->pd_wwn_hnext; 4309 } 4310 } 4311 port->fp_total_devices = 0; 4312 4313 statec = 0; 4314 if (notify) { 4315 /* 4316 * Decrement the statec busy counter as we 4317 * are almost done with handling the state 4318 * change 4319 */ 4320 ASSERT(port->fp_statec_busy > 0); 4321 if (--port->fp_statec_busy == 0) { 4322 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4323 } 4324 mutex_exit(&port->fp_mutex); 4325 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4326 0, 0, KM_SLEEP); 4327 mutex_enter(&port->fp_mutex); 4328 4329 if (port->fp_statec_busy) { 4330 statec++; 4331 } 4332 } else if (port->fp_statec_busy > 1) { 4333 statec++; 4334 } 4335 4336 if ((tid = port->fp_offline_tid) != NULL) { 4337 mutex_exit(&port->fp_mutex); 4338 (void) untimeout(tid); 4339 mutex_enter(&port->fp_mutex); 4340 } 4341 4342 if (!statec) { 4343 port->fp_offline_tid = timeout(fp_offline_timeout, 4344 (caddr_t)port, fp_offline_ticks); 4345 } 4346 } 4347 4348 4349 /* 4350 * Offline devices and send up a state change notification to ULPs 4351 */ 4352 static void 4353 fp_offline_timeout(void *port_handle) 4354 { 4355 int ret; 4356 fc_local_port_t *port = port_handle; 4357 uint32_t listlen = 0; 4358 fc_portmap_t *changelist = NULL; 4359 4360 mutex_enter(&port->fp_mutex); 4361 4362 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4363 (port->fp_soft_state & 4364 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4365 port->fp_dev_count == 0 || port->fp_statec_busy) { 4366 port->fp_offline_tid = NULL; 4367 mutex_exit(&port->fp_mutex); 4368 return; 4369 } 4370 4371 mutex_exit(&port->fp_mutex); 4372 4373 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4374 4375 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4376 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4377 FC_FCA_CORE)) != FC_SUCCESS) { 4378 FP_TRACE(FP_NHEAD1(9, ret), 4379 "Failed to force adapter dump"); 4380 } else { 4381 FP_TRACE(FP_NHEAD1(9, 0), 4382 "Forced adapter dump successfully"); 4383 } 4384 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4385 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4386 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4387 FP_TRACE(FP_NHEAD1(9, ret), 4388 "Failed to force adapter dump and reset"); 4389 } else { 4390 FP_TRACE(FP_NHEAD1(9, 0), 4391 "Forced adapter dump and reset successfully"); 4392 } 4393 } 4394 4395 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4396 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4397 listlen, listlen, KM_SLEEP); 4398 4399 mutex_enter(&port->fp_mutex); 4400 port->fp_offline_tid = NULL; 4401 mutex_exit(&port->fp_mutex); 4402 } 4403 4404 4405 /* 4406 * Perform general purpose ELS request initialization 4407 */ 4408 static void 4409 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4410 void (*comp) (), job_request_t *job) 4411 { 4412 fc_packet_t *pkt; 4413 4414 pkt = &cmd->cmd_pkt; 4415 cmd->cmd_job = job; 4416 4417 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4418 pkt->pkt_cmd_fhdr.d_id = d_id; 4419 pkt->pkt_cmd_fhdr.s_id = s_id; 4420 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4421 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4422 pkt->pkt_cmd_fhdr.seq_id = 0; 4423 pkt->pkt_cmd_fhdr.df_ctl = 0; 4424 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4425 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4426 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4427 pkt->pkt_cmd_fhdr.ro = 0; 4428 pkt->pkt_cmd_fhdr.rsvd = 0; 4429 pkt->pkt_comp = comp; 4430 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4431 } 4432 4433 4434 /* 4435 * Initialize PLOGI/FLOGI ELS request 4436 */ 4437 static void 4438 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4439 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4440 { 4441 ls_code_t payload; 4442 4443 fp_els_init(cmd, s_id, d_id, intr, job); 4444 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4445 4446 payload.ls_code = ls_code; 4447 payload.mbz = 0; 4448 4449 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, 4450 (uint8_t *)&port->fp_service_params, 4451 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4452 DDI_DEV_AUTOINCR); 4453 4454 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4455 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4456 DDI_DEV_AUTOINCR); 4457 } 4458 4459 4460 /* 4461 * Initialize LOGO ELS request 4462 */ 4463 static void 4464 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4465 { 4466 fc_local_port_t *port; 4467 fc_packet_t *pkt; 4468 la_els_logo_t payload; 4469 4470 port = pd->pd_port; 4471 pkt = &cmd->cmd_pkt; 4472 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4473 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4474 4475 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4476 fp_logo_intr, job); 4477 4478 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4479 4480 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4481 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4482 4483 payload.ls_code.ls_code = LA_ELS_LOGO; 4484 payload.ls_code.mbz = 0; 4485 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4486 payload.nport_id = port->fp_port_id; 4487 4488 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4489 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4490 } 4491 4492 /* 4493 * Initialize RNID ELS request 4494 */ 4495 static void 4496 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4497 { 4498 fc_local_port_t *port; 4499 fc_packet_t *pkt; 4500 la_els_rnid_t payload; 4501 fc_remote_port_t *pd; 4502 4503 pkt = &cmd->cmd_pkt; 4504 pd = pkt->pkt_pd; 4505 port = pd->pd_port; 4506 4507 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4508 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4509 4510 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4511 fp_rnid_intr, job); 4512 4513 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4514 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4515 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4516 4517 payload.ls_code.ls_code = LA_ELS_RNID; 4518 payload.ls_code.mbz = 0; 4519 payload.data_format = flag; 4520 4521 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4522 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4523 } 4524 4525 /* 4526 * Initialize RLS ELS request 4527 */ 4528 static void 4529 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4530 { 4531 fc_local_port_t *port; 4532 fc_packet_t *pkt; 4533 la_els_rls_t payload; 4534 fc_remote_port_t *pd; 4535 4536 pkt = &cmd->cmd_pkt; 4537 pd = pkt->pkt_pd; 4538 port = pd->pd_port; 4539 4540 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4541 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4542 4543 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4544 fp_rls_intr, job); 4545 4546 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4547 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4548 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4549 4550 payload.ls_code.ls_code = LA_ELS_RLS; 4551 payload.ls_code.mbz = 0; 4552 payload.rls_portid = port->fp_port_id; 4553 4554 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4555 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4556 } 4557 4558 4559 /* 4560 * Initialize an ADISC ELS request 4561 */ 4562 static void 4563 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4564 { 4565 fc_local_port_t *port; 4566 fc_packet_t *pkt; 4567 la_els_adisc_t payload; 4568 fc_remote_port_t *pd; 4569 4570 pkt = &cmd->cmd_pkt; 4571 pd = pkt->pkt_pd; 4572 port = pd->pd_port; 4573 4574 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4575 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4576 4577 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4578 fp_adisc_intr, job); 4579 4580 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4581 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4582 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4583 4584 payload.ls_code.ls_code = LA_ELS_ADISC; 4585 payload.ls_code.mbz = 0; 4586 payload.nport_id = port->fp_port_id; 4587 payload.port_wwn = port->fp_service_params.nport_ww_name; 4588 payload.node_wwn = port->fp_service_params.node_ww_name; 4589 payload.hard_addr = port->fp_hard_addr; 4590 4591 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4592 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4593 } 4594 4595 4596 /* 4597 * Send up a state change notification to ULPs. 4598 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4599 */ 4600 static int 4601 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4602 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4603 { 4604 fc_port_clist_t *clist; 4605 fc_remote_port_t *pd; 4606 int count; 4607 4608 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4609 4610 clist = kmem_zalloc(sizeof (*clist), sleep); 4611 if (clist == NULL) { 4612 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4613 return (FC_NOMEM); 4614 } 4615 4616 clist->clist_state = state; 4617 4618 mutex_enter(&port->fp_mutex); 4619 clist->clist_flags = port->fp_topology; 4620 mutex_exit(&port->fp_mutex); 4621 4622 clist->clist_port = (opaque_t)port; 4623 clist->clist_len = listlen; 4624 clist->clist_size = alloc_len; 4625 clist->clist_map = changelist; 4626 4627 /* 4628 * Bump the reference count of each fc_remote_port_t in this changelist. 4629 * This is necessary since these devices will be sitting in a taskq 4630 * and referenced later. When the state change notification is 4631 * complete, the reference counts will be decremented. 4632 */ 4633 for (count = 0; count < clist->clist_len; count++) { 4634 pd = clist->clist_map[count].map_pd; 4635 4636 if (pd != NULL) { 4637 mutex_enter(&pd->pd_mutex); 4638 ASSERT((pd->pd_ref_count >= 0) || 4639 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4640 pd->pd_ref_count++; 4641 4642 if (clist->clist_map[count].map_state != 4643 PORT_DEVICE_INVALID) { 4644 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4645 } 4646 4647 mutex_exit(&pd->pd_mutex); 4648 } 4649 } 4650 4651 #ifdef DEBUG 4652 /* 4653 * Sanity check for presence of OLD devices in the hash lists 4654 */ 4655 if (clist->clist_size) { 4656 ASSERT(clist->clist_map != NULL); 4657 for (count = 0; count < clist->clist_len; count++) { 4658 if (clist->clist_map[count].map_state == 4659 PORT_DEVICE_INVALID) { 4660 la_wwn_t pwwn; 4661 fc_portid_t d_id; 4662 4663 pd = clist->clist_map[count].map_pd; 4664 ASSERT(pd != NULL); 4665 4666 mutex_enter(&pd->pd_mutex); 4667 pwwn = pd->pd_port_name; 4668 d_id = pd->pd_port_id; 4669 mutex_exit(&pd->pd_mutex); 4670 4671 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4672 ASSERT(pd != clist->clist_map[count].map_pd); 4673 4674 pd = fctl_get_remote_port_by_did(port, 4675 d_id.port_id); 4676 ASSERT(pd != clist->clist_map[count].map_pd); 4677 } 4678 } 4679 } 4680 #endif 4681 4682 mutex_enter(&port->fp_mutex); 4683 4684 if (state == FC_STATE_ONLINE) { 4685 if (--port->fp_statec_busy == 0) { 4686 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4687 } 4688 } 4689 mutex_exit(&port->fp_mutex); 4690 4691 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4692 clist, KM_SLEEP); 4693 4694 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4695 "state=%x, len=%d", port, state, listlen); 4696 4697 return (FC_SUCCESS); 4698 } 4699 4700 4701 /* 4702 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4703 */ 4704 static int 4705 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4706 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4707 { 4708 int ret; 4709 fc_port_clist_t *clist; 4710 4711 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4712 4713 clist = kmem_zalloc(sizeof (*clist), sleep); 4714 if (clist == NULL) { 4715 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4716 return (FC_NOMEM); 4717 } 4718 4719 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4720 4721 mutex_enter(&port->fp_mutex); 4722 clist->clist_flags = port->fp_topology; 4723 mutex_exit(&port->fp_mutex); 4724 4725 clist->clist_port = (opaque_t)port; 4726 clist->clist_len = listlen; 4727 clist->clist_size = alloc_len; 4728 clist->clist_map = changelist; 4729 4730 /* Send sysevents for target state changes */ 4731 4732 if (clist->clist_size) { 4733 int count; 4734 fc_remote_port_t *pd; 4735 4736 ASSERT(clist->clist_map != NULL); 4737 for (count = 0; count < clist->clist_len; count++) { 4738 pd = clist->clist_map[count].map_pd; 4739 4740 /* 4741 * Bump reference counts on all fc_remote_port_t 4742 * structs in this list. We don't know when the task 4743 * will fire, and we don't need these fc_remote_port_t 4744 * structs going away behind our back. 4745 */ 4746 if (pd) { 4747 mutex_enter(&pd->pd_mutex); 4748 ASSERT((pd->pd_ref_count >= 0) || 4749 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4750 pd->pd_ref_count++; 4751 mutex_exit(&pd->pd_mutex); 4752 } 4753 4754 if (clist->clist_map[count].map_state == 4755 PORT_DEVICE_VALID) { 4756 if (clist->clist_map[count].map_type == 4757 PORT_DEVICE_NEW) { 4758 /* Update our state change counter */ 4759 mutex_enter(&port->fp_mutex); 4760 port->fp_last_change++; 4761 mutex_exit(&port->fp_mutex); 4762 4763 /* Additions */ 4764 fp_log_target_event(port, 4765 ESC_SUNFC_TARGET_ADD, 4766 clist->clist_map[count].map_pwwn, 4767 clist->clist_map[count].map_did. 4768 port_id); 4769 } 4770 4771 } else if ((clist->clist_map[count].map_type == 4772 PORT_DEVICE_OLD) && 4773 (clist->clist_map[count].map_state == 4774 PORT_DEVICE_INVALID)) { 4775 /* Update our state change counter */ 4776 mutex_enter(&port->fp_mutex); 4777 port->fp_last_change++; 4778 mutex_exit(&port->fp_mutex); 4779 4780 /* 4781 * For removals, we don't decrement 4782 * pd_ref_count until after the ULP's 4783 * state change callback function has 4784 * completed. 4785 */ 4786 4787 /* Removals */ 4788 fp_log_target_event(port, 4789 ESC_SUNFC_TARGET_REMOVE, 4790 clist->clist_map[count].map_pwwn, 4791 clist->clist_map[count].map_did.port_id); 4792 } 4793 4794 if (clist->clist_map[count].map_state != 4795 PORT_DEVICE_INVALID) { 4796 /* 4797 * Indicate that the ULPs are now aware of 4798 * this device. 4799 */ 4800 4801 mutex_enter(&pd->pd_mutex); 4802 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4803 mutex_exit(&pd->pd_mutex); 4804 } 4805 4806 #ifdef DEBUG 4807 /* 4808 * Sanity check for OLD devices in the hash lists 4809 */ 4810 if (pd && clist->clist_map[count].map_state == 4811 PORT_DEVICE_INVALID) { 4812 la_wwn_t pwwn; 4813 fc_portid_t d_id; 4814 4815 mutex_enter(&pd->pd_mutex); 4816 pwwn = pd->pd_port_name; 4817 d_id = pd->pd_port_id; 4818 mutex_exit(&pd->pd_mutex); 4819 4820 /* 4821 * This overwrites the 'pd' local variable. 4822 * Beware of this if 'pd' ever gets 4823 * referenced below this block. 4824 */ 4825 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4826 ASSERT(pd != clist->clist_map[count].map_pd); 4827 4828 pd = fctl_get_remote_port_by_did(port, 4829 d_id.port_id); 4830 ASSERT(pd != clist->clist_map[count].map_pd); 4831 } 4832 #endif 4833 } 4834 } 4835 4836 if (sync) { 4837 clist->clist_wait = 1; 4838 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4839 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4840 } 4841 4842 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4843 if (sync && ret) { 4844 mutex_enter(&clist->clist_mutex); 4845 while (clist->clist_wait) { 4846 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4847 } 4848 mutex_exit(&clist->clist_mutex); 4849 4850 mutex_destroy(&clist->clist_mutex); 4851 cv_destroy(&clist->clist_cv); 4852 kmem_free(clist, sizeof (*clist)); 4853 } 4854 4855 if (!ret) { 4856 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4857 "port=%p", port); 4858 kmem_free(clist->clist_map, 4859 sizeof (*(clist->clist_map)) * clist->clist_size); 4860 kmem_free(clist, sizeof (*clist)); 4861 } else { 4862 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4863 port, listlen); 4864 } 4865 4866 return (FC_SUCCESS); 4867 } 4868 4869 4870 /* 4871 * Perform PLOGI to the group of devices for ULPs 4872 */ 4873 static void 4874 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4875 { 4876 int offline; 4877 int count; 4878 int rval; 4879 uint32_t listlen; 4880 uint32_t done; 4881 uint32_t d_id; 4882 fc_remote_node_t *node; 4883 fc_remote_port_t *pd; 4884 fc_remote_port_t *tmp_pd; 4885 fc_packet_t *ulp_pkt; 4886 la_els_logi_t *els_data; 4887 ls_code_t ls_code; 4888 4889 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4890 port, job); 4891 4892 done = 0; 4893 listlen = job->job_ulp_listlen; 4894 job->job_counter = job->job_ulp_listlen; 4895 4896 mutex_enter(&port->fp_mutex); 4897 offline = (port->fp_statec_busy || 4898 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4899 mutex_exit(&port->fp_mutex); 4900 4901 for (count = 0; count < listlen; count++) { 4902 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4903 sizeof (la_els_logi_t)); 4904 4905 ulp_pkt = job->job_ulp_pkts[count]; 4906 pd = ulp_pkt->pkt_pd; 4907 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4908 4909 if (offline) { 4910 done++; 4911 4912 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4913 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4914 ulp_pkt->pkt_pd = NULL; 4915 ulp_pkt->pkt_comp(ulp_pkt); 4916 4917 job->job_ulp_pkts[count] = NULL; 4918 4919 fp_jobdone(job); 4920 continue; 4921 } 4922 4923 if (pd == NULL) { 4924 pd = fctl_get_remote_port_by_did(port, d_id); 4925 if (pd == NULL) { 4926 /* reset later */ 4927 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4928 continue; 4929 } 4930 mutex_enter(&pd->pd_mutex); 4931 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4932 mutex_exit(&pd->pd_mutex); 4933 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4934 done++; 4935 ulp_pkt->pkt_comp(ulp_pkt); 4936 job->job_ulp_pkts[count] = NULL; 4937 fp_jobdone(job); 4938 } else { 4939 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4940 mutex_exit(&pd->pd_mutex); 4941 } 4942 continue; 4943 } 4944 4945 switch (ulp_pkt->pkt_state) { 4946 case FC_PKT_ELS_IN_PROGRESS: 4947 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4948 /* FALLTHRU */ 4949 case FC_PKT_LOCAL_RJT: 4950 done++; 4951 ulp_pkt->pkt_comp(ulp_pkt); 4952 job->job_ulp_pkts[count] = NULL; 4953 fp_jobdone(job); 4954 continue; 4955 default: 4956 break; 4957 } 4958 4959 /* 4960 * Validate the pd corresponding to the d_id passed 4961 * by the ULPs 4962 */ 4963 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4964 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4965 done++; 4966 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4967 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4968 ulp_pkt->pkt_pd = NULL; 4969 ulp_pkt->pkt_comp(ulp_pkt); 4970 job->job_ulp_pkts[count] = NULL; 4971 fp_jobdone(job); 4972 continue; 4973 } 4974 4975 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4976 "port=%p, pd=%p", port, pd); 4977 4978 mutex_enter(&pd->pd_mutex); 4979 4980 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4981 done++; 4982 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4983 4984 ls_code.ls_code = LA_ELS_ACC; 4985 ls_code.mbz = 0; 4986 4987 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4988 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4989 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4990 4991 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4992 (uint8_t *)&pd->pd_csp, 4993 (uint8_t *)&els_data->common_service, 4994 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4995 4996 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4997 (uint8_t *)&pd->pd_port_name, 4998 (uint8_t *)&els_data->nport_ww_name, 4999 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 5000 5001 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5002 (uint8_t *)&pd->pd_clsp1, 5003 (uint8_t *)&els_data->class_1, 5004 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 5005 5006 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5007 (uint8_t *)&pd->pd_clsp2, 5008 (uint8_t *)&els_data->class_2, 5009 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 5010 5011 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5012 (uint8_t *)&pd->pd_clsp3, 5013 (uint8_t *)&els_data->class_3, 5014 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 5015 5016 node = pd->pd_remote_nodep; 5017 pd->pd_login_count++; 5018 pd->pd_flags = PD_IDLE; 5019 ulp_pkt->pkt_pd = pd; 5020 mutex_exit(&pd->pd_mutex); 5021 5022 mutex_enter(&node->fd_mutex); 5023 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5024 (uint8_t *)&node->fd_node_name, 5025 (uint8_t *)(&els_data->node_ww_name), 5026 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 5027 5028 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5029 (uint8_t *)&node->fd_vv, 5030 (uint8_t *)(&els_data->vendor_version), 5031 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 5032 5033 mutex_exit(&node->fd_mutex); 5034 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 5035 } else { 5036 5037 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 5038 mutex_exit(&pd->pd_mutex); 5039 } 5040 5041 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 5042 ulp_pkt->pkt_comp(ulp_pkt); 5043 job->job_ulp_pkts[count] = NULL; 5044 fp_jobdone(job); 5045 } 5046 } 5047 5048 if (done == listlen) { 5049 fp_jobwait(job); 5050 fctl_jobdone(job); 5051 return; 5052 } 5053 5054 job->job_counter = listlen - done; 5055 5056 for (count = 0; count < listlen; count++) { 5057 int cmd_flags; 5058 5059 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 5060 continue; 5061 } 5062 5063 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 5064 5065 cmd_flags = FP_CMD_PLOGI_RETAIN; 5066 5067 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5068 ASSERT(d_id != 0); 5069 5070 pd = fctl_get_remote_port_by_did(port, d_id); 5071 5072 /* 5073 * We need to properly adjust the port device 5074 * reference counter before we assign the pd 5075 * to the ULP packets port device pointer. 5076 */ 5077 if (pd != NULL && ulp_pkt->pkt_pd == NULL) { 5078 mutex_enter(&pd->pd_mutex); 5079 pd->pd_ref_count++; 5080 mutex_exit(&pd->pd_mutex); 5081 FP_TRACE(FP_NHEAD1(3, 0), 5082 "fp_plogi_group: DID = 0x%x using new pd %p \ 5083 old pd NULL\n", d_id, pd); 5084 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL && 5085 ulp_pkt->pkt_pd != pd) { 5086 mutex_enter(&pd->pd_mutex); 5087 pd->pd_ref_count++; 5088 mutex_exit(&pd->pd_mutex); 5089 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5090 ulp_pkt->pkt_pd->pd_ref_count--; 5091 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5092 FP_TRACE(FP_NHEAD1(3, 0), 5093 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n", 5094 d_id, ulp_pkt->pkt_pd, pd); 5095 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) { 5096 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5097 ulp_pkt->pkt_pd->pd_ref_count--; 5098 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5099 FP_TRACE(FP_NHEAD1(3, 0), 5100 "fp_plogi_group: DID = 0x%x pd is NULL and \ 5101 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd); 5102 } 5103 5104 ulp_pkt->pkt_pd = pd; 5105 5106 if (pd != NULL) { 5107 mutex_enter(&pd->pd_mutex); 5108 d_id = pd->pd_port_id.port_id; 5109 pd->pd_flags = PD_ELS_IN_PROGRESS; 5110 mutex_exit(&pd->pd_mutex); 5111 } else { 5112 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5113 #ifdef DEBUG 5114 pd = fctl_get_remote_port_by_did(port, d_id); 5115 ASSERT(pd == NULL); 5116 #endif 5117 /* 5118 * In the Fabric topology, use NS to create 5119 * port device, and if that fails still try 5120 * with PLOGI - which will make yet another 5121 * attempt to create after successful PLOGI 5122 */ 5123 mutex_enter(&port->fp_mutex); 5124 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 5125 mutex_exit(&port->fp_mutex); 5126 pd = fp_create_remote_port_by_ns(port, 5127 d_id, KM_SLEEP); 5128 if (pd) { 5129 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 5130 5131 mutex_enter(&pd->pd_mutex); 5132 pd->pd_flags = PD_ELS_IN_PROGRESS; 5133 mutex_exit(&pd->pd_mutex); 5134 5135 FP_TRACE(FP_NHEAD1(3, 0), 5136 "fp_plogi_group;" 5137 " NS created PD port=%p, job=%p," 5138 " pd=%p", port, job, pd); 5139 } 5140 } else { 5141 mutex_exit(&port->fp_mutex); 5142 } 5143 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5144 FP_TRACE(FP_NHEAD1(3, 0), 5145 "fp_plogi_group;" 5146 "ulp_pkt's pd is NULL, get a pd %p", 5147 pd); 5148 mutex_enter(&pd->pd_mutex); 5149 pd->pd_ref_count++; 5150 mutex_exit(&pd->pd_mutex); 5151 } 5152 ulp_pkt->pkt_pd = pd; 5153 } 5154 5155 rval = fp_port_login(port, d_id, job, cmd_flags, 5156 KM_SLEEP, pd, ulp_pkt); 5157 5158 if (rval == FC_SUCCESS) { 5159 continue; 5160 } 5161 5162 if (rval == FC_STATEC_BUSY) { 5163 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5164 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5165 } else { 5166 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5167 } 5168 5169 if (pd) { 5170 mutex_enter(&pd->pd_mutex); 5171 pd->pd_flags = PD_IDLE; 5172 mutex_exit(&pd->pd_mutex); 5173 } 5174 5175 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5176 ASSERT(pd != NULL); 5177 5178 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5179 " PD removed; port=%p, job=%p", port, job); 5180 5181 mutex_enter(&pd->pd_mutex); 5182 pd->pd_ref_count--; 5183 node = pd->pd_remote_nodep; 5184 mutex_exit(&pd->pd_mutex); 5185 5186 ASSERT(node != NULL); 5187 5188 if (fctl_destroy_remote_port(port, pd) == 0) { 5189 fctl_destroy_remote_node(node); 5190 } 5191 ulp_pkt->pkt_pd = NULL; 5192 } 5193 ulp_pkt->pkt_comp(ulp_pkt); 5194 fp_jobdone(job); 5195 } 5196 5197 fp_jobwait(job); 5198 fctl_jobdone(job); 5199 5200 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5201 port, job); 5202 } 5203 5204 5205 /* 5206 * Name server request initialization 5207 */ 5208 static void 5209 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5210 { 5211 int rval; 5212 int count; 5213 int size; 5214 5215 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5216 5217 job->job_counter = 1; 5218 job->job_result = FC_SUCCESS; 5219 5220 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5221 KM_SLEEP, NULL, NULL); 5222 5223 if (rval != FC_SUCCESS) { 5224 mutex_enter(&port->fp_mutex); 5225 port->fp_topology = FC_TOP_NO_NS; 5226 mutex_exit(&port->fp_mutex); 5227 return; 5228 } 5229 5230 fp_jobwait(job); 5231 5232 if (job->job_result != FC_SUCCESS) { 5233 mutex_enter(&port->fp_mutex); 5234 port->fp_topology = FC_TOP_NO_NS; 5235 mutex_exit(&port->fp_mutex); 5236 return; 5237 } 5238 5239 /* 5240 * At this time, we'll do NS registration for objects in the 5241 * ns_reg_cmds (see top of this file) array. 5242 * 5243 * Each time a ULP module registers with the transport, the 5244 * appropriate fc4 bit is set fc4 types and registered with 5245 * the NS for this support. Also, ULPs and FC admin utilities 5246 * may do registration for objects like IP address, symbolic 5247 * port/node name, Initial process associator at run time. 5248 */ 5249 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5250 job->job_counter = size; 5251 job->job_result = FC_SUCCESS; 5252 5253 for (count = 0; count < size; count++) { 5254 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5255 job, 0, sleep) != FC_SUCCESS) { 5256 fp_jobdone(job); 5257 } 5258 } 5259 if (size) { 5260 fp_jobwait(job); 5261 } 5262 5263 job->job_result = FC_SUCCESS; 5264 5265 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5266 5267 if (port->fp_dev_count < FP_MAX_DEVICES) { 5268 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5269 } 5270 5271 job->job_counter = 1; 5272 5273 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5274 sleep) == FC_SUCCESS) { 5275 fp_jobwait(job); 5276 } 5277 } 5278 5279 5280 /* 5281 * Name server finish: 5282 * Unregister for RSCNs 5283 * Unregister all the host port objects in the Name Server 5284 * Perform LOGO with the NS; 5285 */ 5286 static void 5287 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5288 { 5289 fp_cmd_t *cmd; 5290 uchar_t class; 5291 uint32_t s_id; 5292 fc_packet_t *pkt; 5293 la_els_logo_t payload; 5294 5295 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5296 5297 job->job_counter = 1; 5298 5299 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5300 FC_SUCCESS) { 5301 fp_jobdone(job); 5302 } 5303 fp_jobwait(job); 5304 5305 job->job_counter = 1; 5306 5307 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5308 fp_jobdone(job); 5309 } 5310 fp_jobwait(job); 5311 5312 job->job_counter = 1; 5313 5314 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5315 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5316 pkt = &cmd->cmd_pkt; 5317 5318 mutex_enter(&port->fp_mutex); 5319 class = port->fp_ns_login_class; 5320 s_id = port->fp_port_id.port_id; 5321 payload.nport_id = port->fp_port_id; 5322 mutex_exit(&port->fp_mutex); 5323 5324 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5325 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5326 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5327 cmd->cmd_retry_count = 1; 5328 cmd->cmd_ulp_pkt = NULL; 5329 5330 if (port->fp_npiv_type == FC_NPIV_PORT) { 5331 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5332 } else { 5333 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5334 } 5335 5336 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5337 5338 payload.ls_code.ls_code = LA_ELS_LOGO; 5339 payload.ls_code.mbz = 0; 5340 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5341 5342 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 5343 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5344 5345 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5346 fp_iodone(cmd); 5347 } 5348 fp_jobwait(job); 5349 } 5350 5351 5352 /* 5353 * NS Registration function. 5354 * 5355 * It should be seriously noted that FC-GS-2 currently doesn't support 5356 * an Object Registration by a D_ID other than the owner of the object. 5357 * What we are aiming at currently is to at least allow Symbolic Node/Port 5358 * Name registration for any N_Port Identifier by the host software. 5359 * 5360 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5361 * function treats the request as Host NS Object. 5362 */ 5363 static int 5364 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5365 job_request_t *job, int polled, int sleep) 5366 { 5367 int rval; 5368 fc_portid_t s_id; 5369 fc_packet_t *pkt; 5370 fp_cmd_t *cmd; 5371 5372 if (pd == NULL) { 5373 mutex_enter(&port->fp_mutex); 5374 s_id = port->fp_port_id; 5375 mutex_exit(&port->fp_mutex); 5376 } else { 5377 mutex_enter(&pd->pd_mutex); 5378 s_id = pd->pd_port_id; 5379 mutex_exit(&pd->pd_mutex); 5380 } 5381 5382 if (polled) { 5383 job->job_counter = 1; 5384 } 5385 5386 switch (cmd_code) { 5387 case NS_RPN_ID: 5388 case NS_RNN_ID: { 5389 ns_rxn_req_t rxn; 5390 5391 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5392 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5393 if (cmd == NULL) { 5394 return (FC_NOMEM); 5395 } 5396 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5397 pkt = &cmd->cmd_pkt; 5398 5399 if (pd == NULL) { 5400 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ? 5401 (port->fp_service_params.nport_ww_name) : 5402 (port->fp_service_params.node_ww_name)); 5403 } else { 5404 if (cmd_code == NS_RPN_ID) { 5405 mutex_enter(&pd->pd_mutex); 5406 rxn.rxn_xname = pd->pd_port_name; 5407 mutex_exit(&pd->pd_mutex); 5408 } else { 5409 fc_remote_node_t *node; 5410 5411 mutex_enter(&pd->pd_mutex); 5412 node = pd->pd_remote_nodep; 5413 mutex_exit(&pd->pd_mutex); 5414 5415 mutex_enter(&node->fd_mutex); 5416 rxn.rxn_xname = node->fd_node_name; 5417 mutex_exit(&node->fd_mutex); 5418 } 5419 } 5420 rxn.rxn_port_id = s_id; 5421 5422 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5423 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5424 sizeof (rxn), DDI_DEV_AUTOINCR); 5425 5426 break; 5427 } 5428 5429 case NS_RCS_ID: { 5430 ns_rcos_t rcos; 5431 5432 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5433 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5434 if (cmd == NULL) { 5435 return (FC_NOMEM); 5436 } 5437 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5438 pkt = &cmd->cmd_pkt; 5439 5440 if (pd == NULL) { 5441 rcos.rcos_cos = port->fp_cos; 5442 } else { 5443 mutex_enter(&pd->pd_mutex); 5444 rcos.rcos_cos = pd->pd_cos; 5445 mutex_exit(&pd->pd_mutex); 5446 } 5447 rcos.rcos_port_id = s_id; 5448 5449 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5450 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5451 sizeof (rcos), DDI_DEV_AUTOINCR); 5452 5453 break; 5454 } 5455 5456 case NS_RFT_ID: { 5457 ns_rfc_type_t rfc; 5458 5459 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5460 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5461 NULL); 5462 if (cmd == NULL) { 5463 return (FC_NOMEM); 5464 } 5465 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5466 pkt = &cmd->cmd_pkt; 5467 5468 if (pd == NULL) { 5469 mutex_enter(&port->fp_mutex); 5470 bcopy(port->fp_fc4_types, rfc.rfc_types, 5471 sizeof (port->fp_fc4_types)); 5472 mutex_exit(&port->fp_mutex); 5473 } else { 5474 mutex_enter(&pd->pd_mutex); 5475 bcopy(pd->pd_fc4types, rfc.rfc_types, 5476 sizeof (pd->pd_fc4types)); 5477 mutex_exit(&pd->pd_mutex); 5478 } 5479 rfc.rfc_port_id = s_id; 5480 5481 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5482 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5483 sizeof (rfc), DDI_DEV_AUTOINCR); 5484 5485 break; 5486 } 5487 5488 case NS_RSPN_ID: { 5489 uchar_t name_len; 5490 int pl_size; 5491 fc_portid_t spn; 5492 5493 if (pd == NULL) { 5494 mutex_enter(&port->fp_mutex); 5495 name_len = port->fp_sym_port_namelen; 5496 mutex_exit(&port->fp_mutex); 5497 } else { 5498 mutex_enter(&pd->pd_mutex); 5499 name_len = pd->pd_spn_len; 5500 mutex_exit(&pd->pd_mutex); 5501 } 5502 5503 pl_size = sizeof (fc_portid_t) + name_len + 1; 5504 5505 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5506 sizeof (fc_reg_resp_t), sleep, NULL); 5507 if (cmd == NULL) { 5508 return (FC_NOMEM); 5509 } 5510 5511 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5512 5513 pkt = &cmd->cmd_pkt; 5514 5515 spn = s_id; 5516 5517 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5518 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5519 DDI_DEV_AUTOINCR); 5520 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5521 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5522 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5523 5524 if (pd == NULL) { 5525 mutex_enter(&port->fp_mutex); 5526 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5527 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5528 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5529 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5530 mutex_exit(&port->fp_mutex); 5531 } else { 5532 mutex_enter(&pd->pd_mutex); 5533 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5534 (uint8_t *)pd->pd_spn, 5535 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5536 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5537 mutex_exit(&pd->pd_mutex); 5538 } 5539 break; 5540 } 5541 5542 case NS_RPT_ID: { 5543 ns_rpt_t rpt; 5544 5545 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5546 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5547 if (cmd == NULL) { 5548 return (FC_NOMEM); 5549 } 5550 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5551 pkt = &cmd->cmd_pkt; 5552 5553 if (pd == NULL) { 5554 rpt.rpt_type = port->fp_port_type; 5555 } else { 5556 mutex_enter(&pd->pd_mutex); 5557 rpt.rpt_type = pd->pd_porttype; 5558 mutex_exit(&pd->pd_mutex); 5559 } 5560 rpt.rpt_port_id = s_id; 5561 5562 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5563 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5564 sizeof (rpt), DDI_DEV_AUTOINCR); 5565 5566 break; 5567 } 5568 5569 case NS_RIP_NN: { 5570 ns_rip_t rip; 5571 5572 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5573 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5574 if (cmd == NULL) { 5575 return (FC_NOMEM); 5576 } 5577 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5578 pkt = &cmd->cmd_pkt; 5579 5580 if (pd == NULL) { 5581 rip.rip_node_name = 5582 port->fp_service_params.node_ww_name; 5583 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5584 sizeof (port->fp_ip_addr)); 5585 } else { 5586 fc_remote_node_t *node; 5587 5588 /* 5589 * The most correct implementation should have the IP 5590 * address in the fc_remote_node_t structure; I believe 5591 * Node WWN and IP address should have one to one 5592 * correlation (but guess what this is changing in 5593 * FC-GS-2 latest draft) 5594 */ 5595 mutex_enter(&pd->pd_mutex); 5596 node = pd->pd_remote_nodep; 5597 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5598 sizeof (pd->pd_ip_addr)); 5599 mutex_exit(&pd->pd_mutex); 5600 5601 mutex_enter(&node->fd_mutex); 5602 rip.rip_node_name = node->fd_node_name; 5603 mutex_exit(&node->fd_mutex); 5604 } 5605 5606 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rip, 5607 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5608 sizeof (rip), DDI_DEV_AUTOINCR); 5609 5610 break; 5611 } 5612 5613 case NS_RIPA_NN: { 5614 ns_ipa_t ipa; 5615 5616 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5617 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5618 if (cmd == NULL) { 5619 return (FC_NOMEM); 5620 } 5621 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5622 pkt = &cmd->cmd_pkt; 5623 5624 if (pd == NULL) { 5625 ipa.ipa_node_name = 5626 port->fp_service_params.node_ww_name; 5627 bcopy(port->fp_ipa, ipa.ipa_value, 5628 sizeof (port->fp_ipa)); 5629 } else { 5630 fc_remote_node_t *node; 5631 5632 mutex_enter(&pd->pd_mutex); 5633 node = pd->pd_remote_nodep; 5634 mutex_exit(&pd->pd_mutex); 5635 5636 mutex_enter(&node->fd_mutex); 5637 ipa.ipa_node_name = node->fd_node_name; 5638 bcopy(node->fd_ipa, ipa.ipa_value, 5639 sizeof (node->fd_ipa)); 5640 mutex_exit(&node->fd_mutex); 5641 } 5642 5643 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5644 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5645 sizeof (ipa), DDI_DEV_AUTOINCR); 5646 5647 break; 5648 } 5649 5650 case NS_RSNN_NN: { 5651 uchar_t name_len; 5652 int pl_size; 5653 la_wwn_t snn; 5654 fc_remote_node_t *node = NULL; 5655 5656 if (pd == NULL) { 5657 mutex_enter(&port->fp_mutex); 5658 name_len = port->fp_sym_node_namelen; 5659 mutex_exit(&port->fp_mutex); 5660 } else { 5661 mutex_enter(&pd->pd_mutex); 5662 node = pd->pd_remote_nodep; 5663 mutex_exit(&pd->pd_mutex); 5664 5665 mutex_enter(&node->fd_mutex); 5666 name_len = node->fd_snn_len; 5667 mutex_exit(&node->fd_mutex); 5668 } 5669 5670 pl_size = sizeof (la_wwn_t) + name_len + 1; 5671 5672 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5673 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5674 if (cmd == NULL) { 5675 return (FC_NOMEM); 5676 } 5677 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5678 5679 pkt = &cmd->cmd_pkt; 5680 5681 bcopy(&port->fp_service_params.node_ww_name, 5682 &snn, sizeof (la_wwn_t)); 5683 5684 if (pd == NULL) { 5685 mutex_enter(&port->fp_mutex); 5686 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5687 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5688 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5689 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5690 mutex_exit(&port->fp_mutex); 5691 } else { 5692 ASSERT(node != NULL); 5693 mutex_enter(&node->fd_mutex); 5694 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5695 (uint8_t *)node->fd_snn, 5696 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5697 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5698 mutex_exit(&node->fd_mutex); 5699 } 5700 5701 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&snn, 5702 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5703 sizeof (snn), DDI_DEV_AUTOINCR); 5704 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5705 (uint8_t *)(pkt->pkt_cmd 5706 + sizeof (fc_ct_header_t) + sizeof (snn)), 5707 1, DDI_DEV_AUTOINCR); 5708 5709 break; 5710 } 5711 5712 case NS_DA_ID: { 5713 ns_remall_t rall; 5714 char tmp[4] = {0}; 5715 char *ptr; 5716 5717 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5718 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5719 5720 if (cmd == NULL) { 5721 return (FC_NOMEM); 5722 } 5723 5724 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5725 pkt = &cmd->cmd_pkt; 5726 5727 ptr = (char *)(&s_id); 5728 tmp[3] = *ptr++; 5729 tmp[2] = *ptr++; 5730 tmp[1] = *ptr++; 5731 tmp[0] = *ptr; 5732 #if defined(_BIT_FIELDS_LTOH) 5733 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5734 #else 5735 rall.rem_port_id = s_id; 5736 #endif 5737 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rall, 5738 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5739 sizeof (rall), DDI_DEV_AUTOINCR); 5740 5741 break; 5742 } 5743 5744 default: 5745 return (FC_FAILURE); 5746 } 5747 5748 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5749 5750 if (rval != FC_SUCCESS) { 5751 job->job_result = rval; 5752 fp_iodone(cmd); 5753 } 5754 5755 if (polled) { 5756 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5757 fp_jobwait(job); 5758 } else { 5759 rval = FC_SUCCESS; 5760 } 5761 5762 return (rval); 5763 } 5764 5765 5766 /* 5767 * Common interrupt handler 5768 */ 5769 static int 5770 fp_common_intr(fc_packet_t *pkt, int iodone) 5771 { 5772 int rval = FC_FAILURE; 5773 fp_cmd_t *cmd; 5774 fc_local_port_t *port; 5775 5776 cmd = pkt->pkt_ulp_private; 5777 port = cmd->cmd_port; 5778 5779 /* 5780 * Fail fast the upper layer requests if 5781 * a state change has occurred amidst. 5782 */ 5783 mutex_enter(&port->fp_mutex); 5784 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5785 mutex_exit(&port->fp_mutex); 5786 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5787 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5788 } else if (!(port->fp_soft_state & 5789 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5790 mutex_exit(&port->fp_mutex); 5791 5792 switch (pkt->pkt_state) { 5793 case FC_PKT_LOCAL_BSY: 5794 case FC_PKT_FABRIC_BSY: 5795 case FC_PKT_NPORT_BSY: 5796 case FC_PKT_TIMEOUT: 5797 cmd->cmd_retry_interval = (pkt->pkt_state == 5798 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5799 rval = fp_retry_cmd(pkt); 5800 break; 5801 5802 case FC_PKT_FABRIC_RJT: 5803 case FC_PKT_NPORT_RJT: 5804 case FC_PKT_LOCAL_RJT: 5805 case FC_PKT_LS_RJT: 5806 case FC_PKT_FS_RJT: 5807 case FC_PKT_BA_RJT: 5808 rval = fp_handle_reject(pkt); 5809 break; 5810 5811 default: 5812 if (pkt->pkt_resp_resid) { 5813 cmd->cmd_retry_interval = 0; 5814 rval = fp_retry_cmd(pkt); 5815 } 5816 break; 5817 } 5818 } else { 5819 mutex_exit(&port->fp_mutex); 5820 } 5821 5822 if (rval != FC_SUCCESS && iodone) { 5823 fp_iodone(cmd); 5824 rval = FC_SUCCESS; 5825 } 5826 5827 return (rval); 5828 } 5829 5830 5831 /* 5832 * Some not so long winding theory on point to point topology: 5833 * 5834 * In the ACC payload, if the D_ID is ZERO and the common service 5835 * parameters indicate N_Port, then the topology is POINT TO POINT. 5836 * 5837 * In a point to point topology with an N_Port, during Fabric Login, 5838 * the destination N_Port will check with our WWN and decide if it 5839 * needs to issue PLOGI or not. That means, FLOGI could potentially 5840 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5841 * PLOGI creates the device handles. 5842 * 5843 * Assuming that the host port WWN is greater than the other N_Port 5844 * WWN, then we become the master (be aware that this isn't the word 5845 * used in the FC standards) and initiate the PLOGI. 5846 * 5847 */ 5848 static void 5849 fp_flogi_intr(fc_packet_t *pkt) 5850 { 5851 int state; 5852 int f_port; 5853 uint32_t s_id; 5854 uint32_t d_id; 5855 fp_cmd_t *cmd; 5856 fc_local_port_t *port; 5857 la_wwn_t *swwn; 5858 la_wwn_t dwwn; 5859 la_wwn_t nwwn; 5860 fc_remote_port_t *pd; 5861 la_els_logi_t *acc; 5862 com_svc_t csp; 5863 ls_code_t resp; 5864 5865 cmd = pkt->pkt_ulp_private; 5866 port = cmd->cmd_port; 5867 5868 mutex_enter(&port->fp_mutex); 5869 port->fp_out_fpcmds--; 5870 mutex_exit(&port->fp_mutex); 5871 5872 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5873 port, pkt, pkt->pkt_state); 5874 5875 if (FP_IS_PKT_ERROR(pkt)) { 5876 (void) fp_common_intr(pkt, 1); 5877 return; 5878 } 5879 5880 /* 5881 * Currently, we don't need to swap bytes here because qlc is faking the 5882 * response for us and so endianness is getting taken care of. But we 5883 * have to fix this and generalize this at some point 5884 */ 5885 acc = (la_els_logi_t *)pkt->pkt_resp; 5886 5887 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5888 sizeof (resp), DDI_DEV_AUTOINCR); 5889 5890 ASSERT(resp.ls_code == LA_ELS_ACC); 5891 if (resp.ls_code != LA_ELS_ACC) { 5892 (void) fp_common_intr(pkt, 1); 5893 return; 5894 } 5895 5896 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&csp, 5897 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5898 5899 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5900 5901 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5902 5903 mutex_enter(&port->fp_mutex); 5904 state = FC_PORT_STATE_MASK(port->fp_state); 5905 mutex_exit(&port->fp_mutex); 5906 5907 if (f_port == 0) { 5908 if (state != FC_STATE_LOOP) { 5909 swwn = &port->fp_service_params.nport_ww_name; 5910 5911 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5912 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5913 DDI_DEV_AUTOINCR); 5914 5915 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5916 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5917 DDI_DEV_AUTOINCR); 5918 5919 mutex_enter(&port->fp_mutex); 5920 5921 port->fp_topology = FC_TOP_PT_PT; 5922 port->fp_total_devices = 1; 5923 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5924 port->fp_ptpt_master = 1; 5925 /* 5926 * Let us choose 'X' as S_ID and 'Y' 5927 * as D_ID and that'll work; hopefully 5928 * If not, it will get changed. 5929 */ 5930 s_id = port->fp_instance + FP_DEFAULT_SID; 5931 d_id = port->fp_instance + FP_DEFAULT_DID; 5932 port->fp_port_id.port_id = s_id; 5933 mutex_exit(&port->fp_mutex); 5934 5935 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr: fp %x" 5936 "pd %x", port->fp_port_id.port_id, d_id); 5937 pd = fctl_create_remote_port(port, 5938 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5939 KM_NOSLEEP); 5940 if (pd == NULL) { 5941 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5942 0, NULL, "couldn't create device" 5943 " d_id=%X", d_id); 5944 fp_iodone(cmd); 5945 return; 5946 } 5947 5948 cmd->cmd_pkt.pkt_tran_flags = 5949 pkt->pkt_tran_flags; 5950 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5951 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5952 cmd->cmd_retry_count = fp_retry_count; 5953 5954 fp_xlogi_init(port, cmd, s_id, d_id, 5955 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5956 5957 (&cmd->cmd_pkt)->pkt_pd = pd; 5958 5959 /* 5960 * We've just created this fc_remote_port_t, and 5961 * we're about to use it to send a PLOGI, so 5962 * bump the reference count right now. When 5963 * the packet is freed, the reference count will 5964 * be decremented. The ULP may also start using 5965 * it, so mark it as given away as well. 5966 */ 5967 pd->pd_ref_count++; 5968 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5969 5970 if (fp_sendcmd(port, cmd, 5971 port->fp_fca_handle) == FC_SUCCESS) { 5972 return; 5973 } 5974 } else { 5975 /* 5976 * The device handles will be created when the 5977 * unsolicited PLOGI is completed successfully 5978 */ 5979 port->fp_ptpt_master = 0; 5980 mutex_exit(&port->fp_mutex); 5981 } 5982 } 5983 pkt->pkt_state = FC_PKT_FAILURE; 5984 } else { 5985 if (f_port) { 5986 mutex_enter(&port->fp_mutex); 5987 if (state == FC_STATE_LOOP) { 5988 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5989 } else { 5990 port->fp_topology = FC_TOP_FABRIC; 5991 5992 FC_GET_RSP(port, pkt->pkt_resp_acc, 5993 (uint8_t *)&port->fp_fabric_name, 5994 (uint8_t *)&acc->node_ww_name, 5995 sizeof (la_wwn_t), 5996 DDI_DEV_AUTOINCR); 5997 } 5998 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5999 mutex_exit(&port->fp_mutex); 6000 } else { 6001 pkt->pkt_state = FC_PKT_FAILURE; 6002 } 6003 } 6004 fp_iodone(cmd); 6005 } 6006 6007 6008 /* 6009 * Handle solicited PLOGI response 6010 */ 6011 static void 6012 fp_plogi_intr(fc_packet_t *pkt) 6013 { 6014 int nl_port; 6015 int bailout; 6016 uint32_t d_id; 6017 fp_cmd_t *cmd; 6018 la_els_logi_t *acc; 6019 fc_local_port_t *port; 6020 fc_remote_port_t *pd; 6021 la_wwn_t nwwn; 6022 la_wwn_t pwwn; 6023 ls_code_t resp; 6024 6025 nl_port = 0; 6026 cmd = pkt->pkt_ulp_private; 6027 port = cmd->cmd_port; 6028 d_id = pkt->pkt_cmd_fhdr.d_id; 6029 6030 #ifndef __lock_lint 6031 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6032 #endif 6033 6034 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 6035 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 6036 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 6037 6038 /* 6039 * Bail out early on ULP initiated requests if the 6040 * state change has occurred 6041 */ 6042 mutex_enter(&port->fp_mutex); 6043 port->fp_out_fpcmds--; 6044 bailout = ((port->fp_statec_busy || 6045 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6046 cmd->cmd_ulp_pkt) ? 1 : 0; 6047 mutex_exit(&port->fp_mutex); 6048 6049 if (FP_IS_PKT_ERROR(pkt) || bailout) { 6050 int skip_msg = 0; 6051 int giveup = 0; 6052 6053 if (cmd->cmd_ulp_pkt) { 6054 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6055 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 6056 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6057 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6058 } 6059 6060 /* 6061 * If an unsolicited cross login already created 6062 * a device speed up the discovery by not retrying 6063 * the command mindlessly. 6064 */ 6065 if (pkt->pkt_pd == NULL && 6066 fctl_get_remote_port_by_did(port, d_id) != NULL) { 6067 fp_iodone(cmd); 6068 return; 6069 } 6070 6071 if (pkt->pkt_pd != NULL) { 6072 giveup = (pkt->pkt_pd->pd_recepient == 6073 PD_PLOGI_RECEPIENT) ? 1 : 0; 6074 if (giveup) { 6075 /* 6076 * This pd is marked as plogi 6077 * recipient, stop retrying 6078 */ 6079 FP_TRACE(FP_NHEAD1(3, 0), 6080 "fp_plogi_intr: stop retry as" 6081 " a cross login was accepted" 6082 " from d_id=%x, port=%p.", 6083 d_id, port); 6084 fp_iodone(cmd); 6085 return; 6086 } 6087 } 6088 6089 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6090 return; 6091 } 6092 6093 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 6094 mutex_enter(&pd->pd_mutex); 6095 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 6096 skip_msg++; 6097 } 6098 mutex_exit(&pd->pd_mutex); 6099 } 6100 6101 mutex_enter(&port->fp_mutex); 6102 if (!bailout && !(skip_msg && port->fp_statec_busy) && 6103 port->fp_statec_busy <= 1 && 6104 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 6105 mutex_exit(&port->fp_mutex); 6106 /* 6107 * In case of Login Collisions, JNI HBAs returns the 6108 * FC pkt back to the Initiator with the state set to 6109 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 6110 * QLC HBAs handles such cases in the FW and doesnot 6111 * return the LS_RJT with Logical error when 6112 * login collision happens. 6113 */ 6114 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 6115 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 6116 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6117 "PLOGI to %x failed", d_id); 6118 } 6119 FP_TRACE(FP_NHEAD2(9, 0), 6120 "PLOGI to %x failed. state=%x reason=%x.", 6121 d_id, pkt->pkt_state, pkt->pkt_reason); 6122 } else { 6123 mutex_exit(&port->fp_mutex); 6124 } 6125 6126 fp_iodone(cmd); 6127 return; 6128 } 6129 6130 acc = (la_els_logi_t *)pkt->pkt_resp; 6131 6132 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 6133 sizeof (resp), DDI_DEV_AUTOINCR); 6134 6135 ASSERT(resp.ls_code == LA_ELS_ACC); 6136 if (resp.ls_code != LA_ELS_ACC) { 6137 (void) fp_common_intr(pkt, 1); 6138 return; 6139 } 6140 6141 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6142 mutex_enter(&port->fp_mutex); 6143 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6144 mutex_exit(&port->fp_mutex); 6145 fp_iodone(cmd); 6146 return; 6147 } 6148 6149 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6150 6151 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6152 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6153 DDI_DEV_AUTOINCR); 6154 6155 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6156 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6157 DDI_DEV_AUTOINCR); 6158 6159 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6160 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6161 6162 if ((pd = pkt->pkt_pd) == NULL) { 6163 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6164 if (pd == NULL) { 6165 FP_TRACE(FP_NHEAD2(9, 0), "fp_plogi_intr: fp %x pd %x", 6166 port->fp_port_id.port_id, d_id); 6167 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6168 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6169 if (pd == NULL) { 6170 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6171 "couldn't create port device handles" 6172 " d_id=%x", d_id); 6173 fp_iodone(cmd); 6174 return; 6175 } 6176 } else { 6177 fc_remote_port_t *tmp_pd; 6178 6179 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6180 if (tmp_pd != NULL) { 6181 fp_iodone(cmd); 6182 return; 6183 } 6184 6185 mutex_enter(&port->fp_mutex); 6186 mutex_enter(&pd->pd_mutex); 6187 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6188 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6189 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6190 } 6191 6192 if (pd->pd_type == PORT_DEVICE_OLD) { 6193 if (pd->pd_port_id.port_id != d_id) { 6194 fctl_delist_did_table(port, pd); 6195 pd->pd_type = PORT_DEVICE_CHANGED; 6196 pd->pd_port_id.port_id = d_id; 6197 } else { 6198 pd->pd_type = PORT_DEVICE_NOCHANGE; 6199 } 6200 } 6201 6202 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6203 char ww_name[17]; 6204 6205 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6206 6207 mutex_exit(&pd->pd_mutex); 6208 mutex_exit(&port->fp_mutex); 6209 FP_TRACE(FP_NHEAD2(9, 0), 6210 "Possible Duplicate name or address" 6211 " identifiers in the PLOGI response" 6212 " D_ID=%x, PWWN=%s: Please check the" 6213 " configuration", d_id, ww_name); 6214 fp_iodone(cmd); 6215 return; 6216 } 6217 fctl_enlist_did_table(port, pd); 6218 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6219 mutex_exit(&pd->pd_mutex); 6220 mutex_exit(&port->fp_mutex); 6221 } 6222 } else { 6223 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6224 6225 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6226 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6227 6228 mutex_enter(&port->fp_mutex); 6229 mutex_enter(&pd->pd_mutex); 6230 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6231 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6232 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6233 pd->pd_type); 6234 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6235 pd->pd_type == PORT_DEVICE_OLD) || 6236 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6237 pd->pd_type = PORT_DEVICE_NOCHANGE; 6238 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6239 pd->pd_type = PORT_DEVICE_NEW; 6240 } 6241 } else { 6242 char old_name[17]; 6243 char new_name[17]; 6244 6245 fc_wwn_to_str(&pd->pd_port_name, old_name); 6246 fc_wwn_to_str(&pwwn, new_name); 6247 6248 FP_TRACE(FP_NHEAD1(9, 0), 6249 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6250 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6251 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6252 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6253 cmd->cmd_ulp_pkt, bailout); 6254 6255 FP_TRACE(FP_NHEAD2(9, 0), 6256 "PWWN of a device with D_ID=%x changed." 6257 " New PWWN = %s, OLD PWWN = %s", d_id, 6258 new_name, old_name); 6259 6260 if (cmd->cmd_ulp_pkt && !bailout) { 6261 fc_remote_node_t *rnodep; 6262 fc_portmap_t *changelist; 6263 fc_portmap_t *listptr; 6264 int len = 1; 6265 /* # entries in changelist */ 6266 6267 fctl_delist_pwwn_table(port, pd); 6268 6269 /* 6270 * Lets now check if there already is a pd with 6271 * this new WWN in the table. If so, we'll mark 6272 * it as invalid 6273 */ 6274 6275 if (new_wwn_pd) { 6276 /* 6277 * There is another pd with in the pwwn 6278 * table with the same WWN that we got 6279 * in the PLOGI payload. We have to get 6280 * it out of the pwwn table, update the 6281 * pd's state (fp_fillout_old_map does 6282 * this for us) and add it to the 6283 * changelist that goes up to ULPs. 6284 * 6285 * len is length of changelist and so 6286 * increment it. 6287 */ 6288 len++; 6289 6290 if (tmp_pd != pd) { 6291 /* 6292 * Odd case where pwwn and did 6293 * tables are out of sync but 6294 * we will handle that too. See 6295 * more comments below. 6296 * 6297 * One more device that ULPs 6298 * should know about and so len 6299 * gets incremented again. 6300 */ 6301 len++; 6302 } 6303 6304 listptr = changelist = kmem_zalloc(len * 6305 sizeof (*changelist), KM_SLEEP); 6306 6307 mutex_enter(&new_wwn_pd->pd_mutex); 6308 rnodep = new_wwn_pd->pd_remote_nodep; 6309 mutex_exit(&new_wwn_pd->pd_mutex); 6310 6311 /* 6312 * Hold the fd_mutex since 6313 * fctl_copy_portmap_held expects it. 6314 * Preserve lock hierarchy by grabbing 6315 * fd_mutex before pd_mutex 6316 */ 6317 if (rnodep) { 6318 mutex_enter(&rnodep->fd_mutex); 6319 } 6320 mutex_enter(&new_wwn_pd->pd_mutex); 6321 fp_fillout_old_map_held(listptr++, 6322 new_wwn_pd, 0); 6323 mutex_exit(&new_wwn_pd->pd_mutex); 6324 if (rnodep) { 6325 mutex_exit(&rnodep->fd_mutex); 6326 } 6327 6328 /* 6329 * Safety check : 6330 * Lets ensure that the pwwn and did 6331 * tables are in sync. Ideally, we 6332 * should not find that these two pd's 6333 * are different. 6334 */ 6335 if (tmp_pd != pd) { 6336 mutex_enter(&tmp_pd->pd_mutex); 6337 rnodep = 6338 tmp_pd->pd_remote_nodep; 6339 mutex_exit(&tmp_pd->pd_mutex); 6340 6341 /* As above grab fd_mutex */ 6342 if (rnodep) { 6343 mutex_enter(&rnodep-> 6344 fd_mutex); 6345 } 6346 mutex_enter(&tmp_pd->pd_mutex); 6347 6348 fp_fillout_old_map_held( 6349 listptr++, tmp_pd, 0); 6350 6351 mutex_exit(&tmp_pd->pd_mutex); 6352 if (rnodep) { 6353 mutex_exit(&rnodep-> 6354 fd_mutex); 6355 } 6356 6357 /* 6358 * Now add "pd" (not tmp_pd) 6359 * to fp_did_table to sync it up 6360 * with fp_pwwn_table 6361 * 6362 * pd->pd_mutex is already held 6363 * at this point 6364 */ 6365 fctl_enlist_did_table(port, pd); 6366 } 6367 } else { 6368 listptr = changelist = kmem_zalloc( 6369 sizeof (*changelist), KM_SLEEP); 6370 } 6371 6372 ASSERT(changelist != NULL); 6373 6374 fp_fillout_changed_map(listptr, pd, &d_id, 6375 &pwwn); 6376 fctl_enlist_pwwn_table(port, pd); 6377 6378 mutex_exit(&pd->pd_mutex); 6379 mutex_exit(&port->fp_mutex); 6380 6381 fp_iodone(cmd); 6382 6383 (void) fp_ulp_devc_cb(port, changelist, len, 6384 len, KM_NOSLEEP, 0); 6385 6386 return; 6387 } 6388 } 6389 6390 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6391 nl_port = 1; 6392 } 6393 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6394 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6395 } 6396 6397 mutex_exit(&pd->pd_mutex); 6398 mutex_exit(&port->fp_mutex); 6399 6400 if (tmp_pd == NULL) { 6401 mutex_enter(&port->fp_mutex); 6402 mutex_enter(&pd->pd_mutex); 6403 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6404 char ww_name[17]; 6405 6406 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6407 mutex_exit(&pd->pd_mutex); 6408 mutex_exit(&port->fp_mutex); 6409 FP_TRACE(FP_NHEAD2(9, 0), 6410 "Possible Duplicate name or address" 6411 " identifiers in the PLOGI response" 6412 " D_ID=%x, PWWN=%s: Please check the" 6413 " configuration", d_id, ww_name); 6414 fp_iodone(cmd); 6415 return; 6416 } 6417 fctl_enlist_did_table(port, pd); 6418 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6419 mutex_exit(&pd->pd_mutex); 6420 mutex_exit(&port->fp_mutex); 6421 } 6422 } 6423 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6424 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6425 6426 if (cmd->cmd_ulp_pkt) { 6427 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6428 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6429 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6430 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6431 if (pd != NULL) { 6432 FP_TRACE(FP_NHEAD1(9, 0), 6433 "fp_plogi_intr;" 6434 "ulp_pkt's pd is NULL, get a pd %p", 6435 pd); 6436 mutex_enter(&pd->pd_mutex); 6437 pd->pd_ref_count++; 6438 mutex_exit(&pd->pd_mutex); 6439 } 6440 cmd->cmd_ulp_pkt->pkt_pd = pd; 6441 } 6442 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6443 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6444 sizeof (fc_frame_hdr_t)); 6445 bcopy((caddr_t)pkt->pkt_resp, 6446 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6447 sizeof (la_els_logi_t)); 6448 } 6449 6450 mutex_enter(&port->fp_mutex); 6451 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6452 mutex_enter(&pd->pd_mutex); 6453 6454 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6455 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6456 cmd->cmd_retry_count = fp_retry_count; 6457 6458 /* 6459 * If the fc_remote_port_t pointer is not set in the given 6460 * fc_packet_t, then this fc_remote_port_t must have just 6461 * been created. Save the pointer and also increment the 6462 * fc_remote_port_t reference count. 6463 */ 6464 if (pkt->pkt_pd == NULL) { 6465 pkt->pkt_pd = pd; 6466 pd->pd_ref_count++; /* It's in use! */ 6467 } 6468 6469 fp_adisc_init(cmd, cmd->cmd_job); 6470 6471 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6472 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6473 6474 mutex_exit(&pd->pd_mutex); 6475 mutex_exit(&port->fp_mutex); 6476 6477 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6478 return; 6479 } 6480 } else { 6481 mutex_exit(&port->fp_mutex); 6482 } 6483 6484 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6485 mutex_enter(&port->fp_mutex); 6486 mutex_enter(&pd->pd_mutex); 6487 6488 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6489 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6490 cmd->cmd_retry_count = fp_retry_count; 6491 6492 fp_logo_init(pd, cmd, cmd->cmd_job); 6493 6494 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6495 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6496 6497 mutex_exit(&pd->pd_mutex); 6498 mutex_exit(&port->fp_mutex); 6499 6500 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6501 return; 6502 } 6503 6504 } 6505 fp_iodone(cmd); 6506 } 6507 6508 6509 /* 6510 * Handle solicited ADISC response 6511 */ 6512 static void 6513 fp_adisc_intr(fc_packet_t *pkt) 6514 { 6515 int rval; 6516 int bailout; 6517 fp_cmd_t *cmd, *logi_cmd; 6518 fc_local_port_t *port; 6519 fc_remote_port_t *pd; 6520 la_els_adisc_t *acc; 6521 ls_code_t resp; 6522 fc_hardaddr_t ha; 6523 fc_portmap_t *changelist; 6524 int initiator, adiscfail = 0; 6525 6526 pd = pkt->pkt_pd; 6527 cmd = pkt->pkt_ulp_private; 6528 port = cmd->cmd_port; 6529 6530 #ifndef __lock_lint 6531 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6532 #endif 6533 6534 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6535 6536 mutex_enter(&port->fp_mutex); 6537 port->fp_out_fpcmds--; 6538 bailout = ((port->fp_statec_busy || 6539 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6540 cmd->cmd_ulp_pkt) ? 1 : 0; 6541 mutex_exit(&port->fp_mutex); 6542 6543 if (bailout) { 6544 fp_iodone(cmd); 6545 return; 6546 } 6547 6548 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6549 acc = (la_els_adisc_t *)pkt->pkt_resp; 6550 6551 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6552 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6553 6554 if (resp.ls_code == LA_ELS_ACC) { 6555 int is_private; 6556 6557 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&ha, 6558 (uint8_t *)&acc->hard_addr, sizeof (ha), 6559 DDI_DEV_AUTOINCR); 6560 6561 mutex_enter(&port->fp_mutex); 6562 6563 is_private = 6564 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6565 6566 mutex_enter(&pd->pd_mutex); 6567 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6568 fctl_enlist_did_table(port, pd); 6569 } 6570 mutex_exit(&pd->pd_mutex); 6571 6572 mutex_exit(&port->fp_mutex); 6573 6574 mutex_enter(&pd->pd_mutex); 6575 if (pd->pd_type != PORT_DEVICE_NEW) { 6576 if (is_private && (pd->pd_hard_addr.hard_addr != 6577 ha.hard_addr)) { 6578 pd->pd_type = PORT_DEVICE_CHANGED; 6579 } else { 6580 pd->pd_type = PORT_DEVICE_NOCHANGE; 6581 } 6582 } 6583 6584 if (is_private && (ha.hard_addr && 6585 pd->pd_port_id.port_id != ha.hard_addr)) { 6586 char ww_name[17]; 6587 6588 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6589 6590 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6591 "NL_Port Identifier %x doesn't match" 6592 " with Hard Address %x, Will use Port" 6593 " WWN %s", pd->pd_port_id.port_id, 6594 ha.hard_addr, ww_name); 6595 6596 pd->pd_hard_addr.hard_addr = 0; 6597 } else { 6598 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6599 } 6600 mutex_exit(&pd->pd_mutex); 6601 } else { 6602 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6603 return; 6604 } 6605 } 6606 } else { 6607 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6608 return; 6609 } 6610 6611 mutex_enter(&port->fp_mutex); 6612 if (port->fp_statec_busy <= 1) { 6613 mutex_exit(&port->fp_mutex); 6614 if (pkt->pkt_state == FC_PKT_LS_RJT && 6615 pkt->pkt_reason == FC_REASON_CMD_UNABLE) { 6616 uchar_t class; 6617 int cmd_flag; 6618 uint32_t src_id; 6619 6620 class = fp_get_nextclass(port, 6621 FC_TRAN_CLASS_INVALID); 6622 if (class == FC_TRAN_CLASS_INVALID) { 6623 fp_iodone(cmd); 6624 return; 6625 } 6626 6627 FP_TRACE(FP_NHEAD1(1, 0), "ADISC re-login; " 6628 "fp_state=0x%x, pkt_state=0x%x, " 6629 "reason=0x%x, class=0x%x", 6630 port->fp_state, pkt->pkt_state, 6631 pkt->pkt_reason, class); 6632 cmd_flag = FP_CMD_PLOGI_RETAIN; 6633 6634 logi_cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 6635 sizeof (la_els_logi_t), KM_SLEEP, pd); 6636 if (logi_cmd == NULL) { 6637 fp_iodone(cmd); 6638 return; 6639 } 6640 6641 logi_cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 6642 logi_cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6643 logi_cmd->cmd_flags = cmd_flag; 6644 logi_cmd->cmd_retry_count = fp_retry_count; 6645 logi_cmd->cmd_ulp_pkt = NULL; 6646 6647 mutex_enter(&port->fp_mutex); 6648 src_id = port->fp_port_id.port_id; 6649 mutex_exit(&port->fp_mutex); 6650 6651 fp_xlogi_init(port, logi_cmd, src_id, 6652 pkt->pkt_cmd_fhdr.d_id, fp_plogi_intr, 6653 cmd->cmd_job, LA_ELS_PLOGI); 6654 if (pd) { 6655 mutex_enter(&pd->pd_mutex); 6656 pd->pd_flags = PD_ELS_IN_PROGRESS; 6657 mutex_exit(&pd->pd_mutex); 6658 } 6659 6660 if (fp_sendcmd(port, logi_cmd, 6661 port->fp_fca_handle) == FC_SUCCESS) { 6662 fp_free_pkt(cmd); 6663 return; 6664 } else { 6665 fp_free_pkt(logi_cmd); 6666 } 6667 } else { 6668 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6669 "ADISC to %x failed, cmd_flags=%x", 6670 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6671 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6672 adiscfail = 1; 6673 } 6674 } else { 6675 mutex_exit(&port->fp_mutex); 6676 } 6677 } 6678 6679 if (cmd->cmd_ulp_pkt) { 6680 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6681 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6682 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6683 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6684 cmd->cmd_ulp_pkt->pkt_pd = pd; 6685 FP_TRACE(FP_NHEAD1(9, 0), 6686 "fp_adisc__intr;" 6687 "ulp_pkt's pd is NULL, get a pd %p", 6688 pd); 6689 6690 } 6691 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6692 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6693 sizeof (fc_frame_hdr_t)); 6694 bcopy((caddr_t)pkt->pkt_resp, 6695 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6696 sizeof (la_els_adisc_t)); 6697 } 6698 6699 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6700 FP_TRACE(FP_NHEAD1(9, 0), 6701 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6702 "fp_retry_count=%x, ulp_pkt=%p", 6703 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6704 6705 mutex_enter(&port->fp_mutex); 6706 mutex_enter(&pd->pd_mutex); 6707 6708 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6709 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6710 cmd->cmd_retry_count = fp_retry_count; 6711 6712 fp_logo_init(pd, cmd, cmd->cmd_job); 6713 6714 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6715 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6716 6717 mutex_exit(&pd->pd_mutex); 6718 mutex_exit(&port->fp_mutex); 6719 6720 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6721 if (adiscfail) { 6722 mutex_enter(&pd->pd_mutex); 6723 initiator = 6724 ((pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0); 6725 pd->pd_state = PORT_DEVICE_VALID; 6726 pd->pd_aux_flags |= PD_LOGGED_OUT; 6727 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6728 pd->pd_type = PORT_DEVICE_NEW; 6729 } else { 6730 pd->pd_type = PORT_DEVICE_NOCHANGE; 6731 } 6732 mutex_exit(&pd->pd_mutex); 6733 6734 changelist = 6735 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6736 6737 if (initiator) { 6738 fp_unregister_login(pd); 6739 fctl_copy_portmap(changelist, pd); 6740 } else { 6741 fp_fillout_old_map(changelist, pd, 0); 6742 } 6743 6744 FP_TRACE(FP_NHEAD1(9, 0), 6745 "fp_adisc_intr: Dev change notification " 6746 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6747 "map_flags=%x initiator=%d", port, pd, 6748 changelist->map_type, changelist->map_state, 6749 changelist->map_flags, initiator); 6750 6751 (void) fp_ulp_devc_cb(port, changelist, 6752 1, 1, KM_SLEEP, 0); 6753 } 6754 if (rval == FC_SUCCESS) { 6755 return; 6756 } 6757 } 6758 fp_iodone(cmd); 6759 } 6760 6761 6762 /* 6763 * Handle solicited LOGO response 6764 */ 6765 static void 6766 fp_logo_intr(fc_packet_t *pkt) 6767 { 6768 ls_code_t resp; 6769 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6770 6771 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6772 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6773 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6774 6775 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6776 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6777 6778 if (FP_IS_PKT_ERROR(pkt)) { 6779 (void) fp_common_intr(pkt, 1); 6780 return; 6781 } 6782 6783 ASSERT(resp.ls_code == LA_ELS_ACC); 6784 if (resp.ls_code != LA_ELS_ACC) { 6785 (void) fp_common_intr(pkt, 1); 6786 return; 6787 } 6788 6789 if (pkt->pkt_pd != NULL) { 6790 fp_unregister_login(pkt->pkt_pd); 6791 } 6792 6793 fp_iodone(pkt->pkt_ulp_private); 6794 } 6795 6796 6797 /* 6798 * Handle solicited RNID response 6799 */ 6800 static void 6801 fp_rnid_intr(fc_packet_t *pkt) 6802 { 6803 ls_code_t resp; 6804 job_request_t *job; 6805 fp_cmd_t *cmd; 6806 la_els_rnid_acc_t *acc; 6807 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6808 6809 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6810 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6811 cmd = pkt->pkt_ulp_private; 6812 6813 mutex_enter(&cmd->cmd_port->fp_mutex); 6814 cmd->cmd_port->fp_out_fpcmds--; 6815 mutex_exit(&cmd->cmd_port->fp_mutex); 6816 6817 job = cmd->cmd_job; 6818 ASSERT(job->job_private != NULL); 6819 6820 /* If failure or LS_RJT then retry the packet, if needed */ 6821 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6822 (void) fp_common_intr(pkt, 1); 6823 return; 6824 } 6825 6826 /* Save node_id memory allocated in ioctl code */ 6827 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6828 6829 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6830 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6831 6832 /* wakeup the ioctl thread and free the pkt */ 6833 fp_iodone(cmd); 6834 } 6835 6836 6837 /* 6838 * Handle solicited RLS response 6839 */ 6840 static void 6841 fp_rls_intr(fc_packet_t *pkt) 6842 { 6843 ls_code_t resp; 6844 job_request_t *job; 6845 fp_cmd_t *cmd; 6846 la_els_rls_acc_t *acc; 6847 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6848 6849 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6850 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6851 cmd = pkt->pkt_ulp_private; 6852 6853 mutex_enter(&cmd->cmd_port->fp_mutex); 6854 cmd->cmd_port->fp_out_fpcmds--; 6855 mutex_exit(&cmd->cmd_port->fp_mutex); 6856 6857 job = cmd->cmd_job; 6858 ASSERT(job->job_private != NULL); 6859 6860 /* If failure or LS_RJT then retry the packet, if needed */ 6861 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6862 (void) fp_common_intr(pkt, 1); 6863 return; 6864 } 6865 6866 /* Save link error status block in memory allocated in ioctl code */ 6867 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6868 6869 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6870 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6871 DDI_DEV_AUTOINCR); 6872 6873 /* wakeup the ioctl thread and free the pkt */ 6874 fp_iodone(cmd); 6875 } 6876 6877 6878 /* 6879 * A solicited command completion interrupt (mostly for commands 6880 * that require almost no post processing such as SCR ELS) 6881 */ 6882 static void 6883 fp_intr(fc_packet_t *pkt) 6884 { 6885 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6886 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6887 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6888 6889 if (FP_IS_PKT_ERROR(pkt)) { 6890 (void) fp_common_intr(pkt, 1); 6891 return; 6892 } 6893 fp_iodone(pkt->pkt_ulp_private); 6894 } 6895 6896 6897 /* 6898 * Handle the underlying port's state change 6899 */ 6900 static void 6901 fp_statec_cb(opaque_t port_handle, uint32_t state) 6902 { 6903 fc_local_port_t *port = port_handle; 6904 job_request_t *job; 6905 6906 /* 6907 * If it is not possible to process the callbacks 6908 * just drop the callback on the floor; Don't bother 6909 * to do something that isn't safe at this time 6910 */ 6911 mutex_enter(&port->fp_mutex); 6912 if ((port->fp_soft_state & 6913 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6914 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6915 mutex_exit(&port->fp_mutex); 6916 return; 6917 } 6918 6919 if (port->fp_statec_busy == 0) { 6920 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6921 #ifdef DEBUG 6922 } else { 6923 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6924 #endif 6925 } 6926 6927 port->fp_statec_busy++; 6928 6929 /* 6930 * For now, force the trusted method of device authentication (by 6931 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6932 */ 6933 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6934 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6935 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6936 fp_port_offline(port, 0); 6937 } 6938 mutex_exit(&port->fp_mutex); 6939 6940 switch (FC_PORT_STATE_MASK(state)) { 6941 case FC_STATE_OFFLINE: 6942 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6943 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6944 if (job == NULL) { 6945 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6946 " fp_statec_cb() couldn't submit a job " 6947 " to the thread: failing.."); 6948 mutex_enter(&port->fp_mutex); 6949 if (--port->fp_statec_busy == 0) { 6950 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6951 } 6952 mutex_exit(&port->fp_mutex); 6953 return; 6954 } 6955 mutex_enter(&port->fp_mutex); 6956 /* 6957 * Zero out this field so that we do not retain 6958 * the fabric name as its no longer valid 6959 */ 6960 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6961 port->fp_state = state; 6962 mutex_exit(&port->fp_mutex); 6963 6964 fctl_enque_job(port, job); 6965 break; 6966 6967 case FC_STATE_ONLINE: 6968 case FC_STATE_LOOP: 6969 mutex_enter(&port->fp_mutex); 6970 port->fp_state = state; 6971 6972 if (port->fp_offline_tid) { 6973 timeout_id_t tid; 6974 6975 tid = port->fp_offline_tid; 6976 port->fp_offline_tid = NULL; 6977 mutex_exit(&port->fp_mutex); 6978 (void) untimeout(tid); 6979 } else { 6980 mutex_exit(&port->fp_mutex); 6981 } 6982 6983 job = fctl_alloc_job(JOB_PORT_ONLINE, 6984 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6985 if (job == NULL) { 6986 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6987 "fp_statec_cb() couldn't submit a job " 6988 "to the thread: failing.."); 6989 6990 mutex_enter(&port->fp_mutex); 6991 if (--port->fp_statec_busy == 0) { 6992 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6993 } 6994 mutex_exit(&port->fp_mutex); 6995 return; 6996 } 6997 fctl_enque_job(port, job); 6998 break; 6999 7000 case FC_STATE_RESET_REQUESTED: 7001 mutex_enter(&port->fp_mutex); 7002 port->fp_state = FC_STATE_OFFLINE; 7003 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 7004 mutex_exit(&port->fp_mutex); 7005 /* FALLTHROUGH */ 7006 7007 case FC_STATE_RESET: 7008 job = fctl_alloc_job(JOB_ULP_NOTIFY, 7009 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 7010 if (job == NULL) { 7011 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 7012 "fp_statec_cb() couldn't submit a job" 7013 " to the thread: failing.."); 7014 7015 mutex_enter(&port->fp_mutex); 7016 if (--port->fp_statec_busy == 0) { 7017 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 7018 } 7019 mutex_exit(&port->fp_mutex); 7020 return; 7021 } 7022 7023 /* squeeze into some field in the job structure */ 7024 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 7025 fctl_enque_job(port, job); 7026 break; 7027 7028 case FC_STATE_TARGET_PORT_RESET: 7029 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 7030 /* FALLTHROUGH */ 7031 7032 case FC_STATE_NAMESERVICE: 7033 /* FALLTHROUGH */ 7034 7035 default: 7036 mutex_enter(&port->fp_mutex); 7037 if (--port->fp_statec_busy == 0) { 7038 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 7039 } 7040 mutex_exit(&port->fp_mutex); 7041 break; 7042 } 7043 } 7044 7045 7046 /* 7047 * Register with the Name Server for RSCNs 7048 */ 7049 static int 7050 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 7051 int sleep) 7052 { 7053 uint32_t s_id; 7054 uchar_t class; 7055 fc_scr_req_t payload; 7056 fp_cmd_t *cmd; 7057 fc_packet_t *pkt; 7058 7059 mutex_enter(&port->fp_mutex); 7060 s_id = port->fp_port_id.port_id; 7061 class = port->fp_ns_login_class; 7062 mutex_exit(&port->fp_mutex); 7063 7064 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 7065 sizeof (fc_scr_resp_t), sleep, NULL); 7066 if (cmd == NULL) { 7067 return (FC_NOMEM); 7068 } 7069 7070 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 7071 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 7072 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 7073 cmd->cmd_retry_count = fp_retry_count; 7074 cmd->cmd_ulp_pkt = NULL; 7075 7076 pkt = &cmd->cmd_pkt; 7077 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 7078 7079 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 7080 7081 payload.ls_code.ls_code = LA_ELS_SCR; 7082 payload.ls_code.mbz = 0; 7083 payload.scr_rsvd = 0; 7084 payload.scr_func = scr_func; 7085 7086 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 7087 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 7088 7089 job->job_counter = 1; 7090 7091 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 7092 fp_iodone(cmd); 7093 } 7094 7095 return (FC_SUCCESS); 7096 } 7097 7098 7099 /* 7100 * There are basically two methods to determine the total number of 7101 * devices out in the NS database; Reading the details of the two 7102 * methods described below, it shouldn't be hard to identify which 7103 * of the two methods is better. 7104 * 7105 * Method 1. 7106 * Iteratively issue GANs until all ports identifiers are walked 7107 * 7108 * Method 2. 7109 * Issue GID_PT (get port Identifiers) with Maximum residual 7110 * field in the request CT HEADER set to accommodate only the 7111 * CT HEADER in the response frame. And if FC-GS2 has been 7112 * carefully read, the NS here has a chance to FS_ACC the 7113 * request and indicate the residual size in the FS_ACC. 7114 * 7115 * Method 2 is wonderful, although it's not mandatory for the NS 7116 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 7117 * (note with particular care the use of the auxiliary verb 'may') 7118 * 7119 */ 7120 static int 7121 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 7122 int sleep) 7123 { 7124 int flags; 7125 int rval; 7126 uint32_t src_id; 7127 fctl_ns_req_t *ns_cmd; 7128 7129 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 7130 7131 mutex_enter(&port->fp_mutex); 7132 src_id = port->fp_port_id.port_id; 7133 mutex_exit(&port->fp_mutex); 7134 7135 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7136 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 7137 sizeof (ns_resp_gid_pt_t), 0, 7138 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 7139 7140 if (ns_cmd == NULL) { 7141 return (FC_NOMEM); 7142 } 7143 7144 ns_cmd->ns_cmd_code = NS_GID_PT; 7145 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 7146 = FC_NS_PORT_NX; /* All port types */ 7147 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 7148 7149 } else { 7150 uint32_t ns_flags; 7151 7152 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 7153 if (create) { 7154 ns_flags |= FCTL_NS_CREATE_DEVICE; 7155 } 7156 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 7157 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 7158 7159 if (ns_cmd == NULL) { 7160 return (FC_NOMEM); 7161 } 7162 ns_cmd->ns_gan_index = 0; 7163 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 7164 ns_cmd->ns_cmd_code = NS_GA_NXT; 7165 ns_cmd->ns_gan_max = 0xFFFF; 7166 7167 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 7168 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 7169 } 7170 7171 flags = job->job_flags; 7172 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 7173 job->job_counter = 1; 7174 7175 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 7176 job->job_flags = flags; 7177 7178 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7179 uint16_t max_resid; 7180 7181 /* 7182 * Revert to scanning the NS if NS_GID_PT isn't 7183 * helping us figure out total number of devices. 7184 */ 7185 if (job->job_result != FC_SUCCESS || 7186 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 7187 mutex_enter(&port->fp_mutex); 7188 port->fp_options &= ~FP_NS_SMART_COUNT; 7189 mutex_exit(&port->fp_mutex); 7190 7191 fctl_free_ns_cmd(ns_cmd); 7192 return (fp_ns_get_devcount(port, job, create, sleep)); 7193 } 7194 7195 mutex_enter(&port->fp_mutex); 7196 port->fp_total_devices = 1; 7197 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 7198 if (max_resid) { 7199 /* 7200 * Since port identifier is 4 bytes and max_resid 7201 * is also in WORDS, max_resid simply indicates 7202 * the total number of port identifiers not 7203 * transferred 7204 */ 7205 port->fp_total_devices += max_resid; 7206 } 7207 mutex_exit(&port->fp_mutex); 7208 } 7209 mutex_enter(&port->fp_mutex); 7210 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 7211 mutex_exit(&port->fp_mutex); 7212 fctl_free_ns_cmd(ns_cmd); 7213 7214 return (rval); 7215 } 7216 7217 /* 7218 * One heck of a function to serve userland. 7219 */ 7220 static int 7221 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7222 { 7223 int rval = 0; 7224 int jcode; 7225 uint32_t ret; 7226 uchar_t open_flag; 7227 fcio_t *kfcio; 7228 job_request_t *job; 7229 boolean_t use32 = B_FALSE; 7230 7231 #ifdef _MULTI_DATAMODEL 7232 switch (ddi_model_convert_from(mode & FMODELS)) { 7233 case DDI_MODEL_ILP32: 7234 use32 = B_TRUE; 7235 break; 7236 7237 case DDI_MODEL_NONE: 7238 default: 7239 break; 7240 } 7241 #endif 7242 7243 mutex_enter(&port->fp_mutex); 7244 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7245 FP_SOFT_IN_UNSOL_CB)) { 7246 fcio->fcio_errno = FC_STATEC_BUSY; 7247 mutex_exit(&port->fp_mutex); 7248 rval = EAGAIN; 7249 if (fp_fcio_copyout(fcio, data, mode)) { 7250 rval = EFAULT; 7251 } 7252 return (rval); 7253 } 7254 open_flag = port->fp_flag; 7255 mutex_exit(&port->fp_mutex); 7256 7257 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7258 fcio->fcio_errno = FC_FAILURE; 7259 rval = EACCES; 7260 if (fp_fcio_copyout(fcio, data, mode)) { 7261 rval = EFAULT; 7262 } 7263 return (rval); 7264 } 7265 7266 /* 7267 * If an exclusive open was demanded during open, don't let 7268 * either innocuous or devil threads to share the file 7269 * descriptor and fire down exclusive access commands 7270 */ 7271 mutex_enter(&port->fp_mutex); 7272 if (port->fp_flag & FP_EXCL) { 7273 if (port->fp_flag & FP_EXCL_BUSY) { 7274 mutex_exit(&port->fp_mutex); 7275 fcio->fcio_errno = FC_FAILURE; 7276 return (EBUSY); 7277 } 7278 port->fp_flag |= FP_EXCL_BUSY; 7279 } 7280 mutex_exit(&port->fp_mutex); 7281 7282 fcio->fcio_errno = FC_SUCCESS; 7283 7284 switch (fcio->fcio_cmd) { 7285 case FCIO_GET_HOST_PARAMS: { 7286 fc_port_dev_t *val; 7287 fc_port_dev32_t *val32; 7288 int index; 7289 int lilp_device_count; 7290 fc_lilpmap_t *lilp_map; 7291 uchar_t *alpa_list; 7292 7293 if (use32 == B_TRUE) { 7294 if (fcio->fcio_olen != sizeof (*val32) || 7295 fcio->fcio_xfer != FCIO_XFER_READ) { 7296 rval = EINVAL; 7297 break; 7298 } 7299 } else { 7300 if (fcio->fcio_olen != sizeof (*val) || 7301 fcio->fcio_xfer != FCIO_XFER_READ) { 7302 rval = EINVAL; 7303 break; 7304 } 7305 } 7306 7307 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7308 7309 mutex_enter(&port->fp_mutex); 7310 val->dev_did = port->fp_port_id; 7311 val->dev_hard_addr = port->fp_hard_addr; 7312 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7313 val->dev_nwwn = port->fp_service_params.node_ww_name; 7314 val->dev_state = port->fp_state; 7315 7316 lilp_map = &port->fp_lilp_map; 7317 alpa_list = &lilp_map->lilp_alpalist[0]; 7318 lilp_device_count = lilp_map->lilp_length; 7319 for (index = 0; index < lilp_device_count; index++) { 7320 uint32_t d_id; 7321 7322 d_id = alpa_list[index]; 7323 if (d_id == port->fp_port_id.port_id) { 7324 break; 7325 } 7326 } 7327 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7328 7329 bcopy(port->fp_fc4_types, val->dev_type, 7330 sizeof (port->fp_fc4_types)); 7331 mutex_exit(&port->fp_mutex); 7332 7333 if (use32 == B_TRUE) { 7334 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7335 7336 val32->dev_did = val->dev_did; 7337 val32->dev_hard_addr = val->dev_hard_addr; 7338 val32->dev_pwwn = val->dev_pwwn; 7339 val32->dev_nwwn = val->dev_nwwn; 7340 val32->dev_state = val->dev_state; 7341 val32->dev_did.priv_lilp_posit = 7342 val->dev_did.priv_lilp_posit; 7343 7344 bcopy(val->dev_type, val32->dev_type, 7345 sizeof (port->fp_fc4_types)); 7346 7347 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7348 fcio->fcio_olen, mode) == 0) { 7349 if (fp_fcio_copyout(fcio, data, mode)) { 7350 rval = EFAULT; 7351 } 7352 } else { 7353 rval = EFAULT; 7354 } 7355 7356 kmem_free(val32, sizeof (*val32)); 7357 } else { 7358 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7359 fcio->fcio_olen, mode) == 0) { 7360 if (fp_fcio_copyout(fcio, data, mode)) { 7361 rval = EFAULT; 7362 } 7363 } else { 7364 rval = EFAULT; 7365 } 7366 } 7367 7368 /* need to free "val" here */ 7369 kmem_free(val, sizeof (*val)); 7370 break; 7371 } 7372 7373 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7374 uint32_t index; 7375 char *tmpPath; 7376 fc_local_port_t *tmpPort; 7377 7378 if (fcio->fcio_olen < MAXPATHLEN || 7379 fcio->fcio_ilen != sizeof (uint32_t)) { 7380 rval = EINVAL; 7381 break; 7382 } 7383 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7384 rval = EFAULT; 7385 break; 7386 } 7387 7388 tmpPort = fctl_get_adapter_port_by_index(port, index); 7389 if (tmpPort == NULL) { 7390 FP_TRACE(FP_NHEAD1(9, 0), 7391 "User supplied index out of range"); 7392 fcio->fcio_errno = FC_BADPORT; 7393 rval = EFAULT; 7394 if (fp_fcio_copyout(fcio, data, mode)) { 7395 rval = EFAULT; 7396 } 7397 break; 7398 } 7399 7400 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7401 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7402 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7403 MAXPATHLEN, mode) == 0) { 7404 if (fp_fcio_copyout(fcio, data, mode)) { 7405 rval = EFAULT; 7406 } 7407 } else { 7408 rval = EFAULT; 7409 } 7410 kmem_free(tmpPath, MAXPATHLEN); 7411 break; 7412 } 7413 7414 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7415 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7416 fc_hba_adapter_attributes_t *val; 7417 fc_hba_adapter_attributes32_t *val32; 7418 7419 if (use32 == B_TRUE) { 7420 if (fcio->fcio_olen < sizeof (*val32) || 7421 fcio->fcio_xfer != FCIO_XFER_READ) { 7422 rval = EINVAL; 7423 break; 7424 } 7425 } else { 7426 if (fcio->fcio_olen < sizeof (*val) || 7427 fcio->fcio_xfer != FCIO_XFER_READ) { 7428 rval = EINVAL; 7429 break; 7430 } 7431 } 7432 7433 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7434 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7435 mutex_enter(&port->fp_mutex); 7436 bcopy(port->fp_hba_port_attrs.manufacturer, 7437 val->Manufacturer, 7438 sizeof (val->Manufacturer)); 7439 bcopy(port->fp_hba_port_attrs.serial_number, 7440 val->SerialNumber, 7441 sizeof (val->SerialNumber)); 7442 bcopy(port->fp_hba_port_attrs.model, 7443 val->Model, 7444 sizeof (val->Model)); 7445 bcopy(port->fp_hba_port_attrs.model_description, 7446 val->ModelDescription, 7447 sizeof (val->ModelDescription)); 7448 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7449 port->fp_sym_node_namelen); 7450 bcopy(port->fp_hba_port_attrs.hardware_version, 7451 val->HardwareVersion, 7452 sizeof (val->HardwareVersion)); 7453 bcopy(port->fp_hba_port_attrs.option_rom_version, 7454 val->OptionROMVersion, 7455 sizeof (val->OptionROMVersion)); 7456 bcopy(port->fp_hba_port_attrs.firmware_version, 7457 val->FirmwareVersion, 7458 sizeof (val->FirmwareVersion)); 7459 val->VendorSpecificID = 7460 port->fp_hba_port_attrs.vendor_specific_id; 7461 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7462 &val->NodeWWN.raw_wwn, 7463 sizeof (val->NodeWWN.raw_wwn)); 7464 7465 7466 bcopy(port->fp_hba_port_attrs.driver_name, 7467 val->DriverName, 7468 sizeof (val->DriverName)); 7469 bcopy(port->fp_hba_port_attrs.driver_version, 7470 val->DriverVersion, 7471 sizeof (val->DriverVersion)); 7472 mutex_exit(&port->fp_mutex); 7473 7474 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7475 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7476 } else { 7477 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7478 } 7479 7480 if (use32 == B_TRUE) { 7481 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7482 val32->version = val->version; 7483 bcopy(val->Manufacturer, val32->Manufacturer, 7484 sizeof (val->Manufacturer)); 7485 bcopy(val->SerialNumber, val32->SerialNumber, 7486 sizeof (val->SerialNumber)); 7487 bcopy(val->Model, val32->Model, 7488 sizeof (val->Model)); 7489 bcopy(val->ModelDescription, val32->ModelDescription, 7490 sizeof (val->ModelDescription)); 7491 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7492 sizeof (val->NodeSymbolicName)); 7493 bcopy(val->HardwareVersion, val32->HardwareVersion, 7494 sizeof (val->HardwareVersion)); 7495 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7496 sizeof (val->OptionROMVersion)); 7497 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7498 sizeof (val->FirmwareVersion)); 7499 val32->VendorSpecificID = val->VendorSpecificID; 7500 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7501 sizeof (val->NodeWWN.raw_wwn)); 7502 bcopy(val->DriverName, val32->DriverName, 7503 sizeof (val->DriverName)); 7504 bcopy(val->DriverVersion, val32->DriverVersion, 7505 sizeof (val->DriverVersion)); 7506 7507 val32->NumberOfPorts = val->NumberOfPorts; 7508 7509 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7510 fcio->fcio_olen, mode) == 0) { 7511 if (fp_fcio_copyout(fcio, data, mode)) { 7512 rval = EFAULT; 7513 } 7514 } else { 7515 rval = EFAULT; 7516 } 7517 7518 kmem_free(val32, sizeof (*val32)); 7519 } else { 7520 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7521 fcio->fcio_olen, mode) == 0) { 7522 if (fp_fcio_copyout(fcio, data, mode)) { 7523 rval = EFAULT; 7524 } 7525 } else { 7526 rval = EFAULT; 7527 } 7528 } 7529 7530 kmem_free(val, sizeof (*val)); 7531 break; 7532 } 7533 7534 case FCIO_GET_NPIV_ATTRIBUTES: { 7535 fc_hba_npiv_attributes_t *attrs; 7536 7537 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7538 mutex_enter(&port->fp_mutex); 7539 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7540 &attrs->NodeWWN.raw_wwn, 7541 sizeof (attrs->NodeWWN.raw_wwn)); 7542 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7543 &attrs->PortWWN.raw_wwn, 7544 sizeof (attrs->PortWWN.raw_wwn)); 7545 mutex_exit(&port->fp_mutex); 7546 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7547 fcio->fcio_olen, mode) == 0) { 7548 if (fp_fcio_copyout(fcio, data, mode)) { 7549 rval = EFAULT; 7550 } 7551 } else { 7552 rval = EFAULT; 7553 } 7554 kmem_free(attrs, sizeof (*attrs)); 7555 break; 7556 } 7557 7558 case FCIO_DELETE_NPIV_PORT: { 7559 fc_local_port_t *tmpport; 7560 char ww_pname[17]; 7561 la_wwn_t vwwn[1]; 7562 7563 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7564 if (ddi_copyin(fcio->fcio_ibuf, 7565 &vwwn, sizeof (la_wwn_t), mode)) { 7566 rval = EFAULT; 7567 break; 7568 } 7569 7570 fc_wwn_to_str(&vwwn[0], ww_pname); 7571 FP_TRACE(FP_NHEAD1(3, 0), 7572 "Delete NPIV Port %s", ww_pname); 7573 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7574 if (tmpport == NULL) { 7575 FP_TRACE(FP_NHEAD1(3, 0), 7576 "Delete NPIV Port : no found"); 7577 rval = EFAULT; 7578 } else { 7579 fc_local_port_t *nextport = tmpport->fp_port_next; 7580 fc_local_port_t *prevport = tmpport->fp_port_prev; 7581 int portlen, portindex, ret; 7582 7583 portlen = sizeof (portindex); 7584 ret = ddi_prop_op(DDI_DEV_T_ANY, 7585 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7586 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7587 (caddr_t)&portindex, &portlen); 7588 if (ret != DDI_SUCCESS) { 7589 rval = EFAULT; 7590 break; 7591 } 7592 if (ndi_devi_offline(tmpport->fp_port_dip, 7593 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7594 FP_TRACE(FP_NHEAD1(1, 0), 7595 "Delete NPIV Port failed"); 7596 mutex_enter(&port->fp_mutex); 7597 tmpport->fp_npiv_state = 0; 7598 mutex_exit(&port->fp_mutex); 7599 rval = EFAULT; 7600 } else { 7601 mutex_enter(&port->fp_mutex); 7602 nextport->fp_port_prev = prevport; 7603 prevport->fp_port_next = nextport; 7604 if (port == port->fp_port_next) { 7605 port->fp_port_next = 7606 port->fp_port_prev = NULL; 7607 } 7608 port->fp_npiv_portnum--; 7609 FP_TRACE(FP_NHEAD1(3, 0), 7610 "Delete NPIV Port %d", portindex); 7611 port->fp_npiv_portindex[portindex-1] = 0; 7612 mutex_exit(&port->fp_mutex); 7613 } 7614 } 7615 break; 7616 } 7617 7618 case FCIO_CREATE_NPIV_PORT: { 7619 char ww_nname[17], ww_pname[17]; 7620 la_npiv_create_entry_t entrybuf; 7621 uint32_t vportindex = 0; 7622 int npiv_ret = 0; 7623 char *portname, *fcaname; 7624 7625 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7626 (void) ddi_pathname(port->fp_port_dip, portname); 7627 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7628 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7629 FP_TRACE(FP_NHEAD1(1, 0), 7630 "Create NPIV port %s %s %s", portname, fcaname, 7631 ddi_driver_name(port->fp_fca_dip)); 7632 kmem_free(portname, MAXPATHLEN); 7633 kmem_free(fcaname, MAXPATHLEN); 7634 if (ddi_copyin(fcio->fcio_ibuf, 7635 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7636 rval = EFAULT; 7637 break; 7638 } 7639 7640 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7641 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7642 vportindex = entrybuf.vindex; 7643 FP_TRACE(FP_NHEAD1(3, 0), 7644 "Create NPIV Port %s %s %d", 7645 ww_nname, ww_pname, vportindex); 7646 7647 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7648 rval = EFAULT; 7649 break; 7650 } 7651 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7652 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7653 if (npiv_ret == NDI_SUCCESS) { 7654 mutex_enter(&port->fp_mutex); 7655 port->fp_npiv_portnum++; 7656 mutex_exit(&port->fp_mutex); 7657 if (fp_copyout((void *)&vportindex, 7658 (void *)fcio->fcio_obuf, 7659 fcio->fcio_olen, mode) == 0) { 7660 if (fp_fcio_copyout(fcio, data, mode)) { 7661 rval = EFAULT; 7662 } 7663 } else { 7664 rval = EFAULT; 7665 } 7666 } else { 7667 rval = EFAULT; 7668 } 7669 FP_TRACE(FP_NHEAD1(3, 0), 7670 "Create NPIV Port %d %d", npiv_ret, vportindex); 7671 break; 7672 } 7673 7674 case FCIO_GET_NPIV_PORT_LIST: { 7675 fc_hba_npiv_port_list_t *list; 7676 int count; 7677 7678 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7679 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7680 rval = EINVAL; 7681 break; 7682 } 7683 7684 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7685 list->version = FC_HBA_LIST_VERSION; 7686 /* build npiv port list */ 7687 count = fc_ulp_get_npiv_port_list(port, (char *)list->hbaPaths); 7688 if (count < 0) { 7689 rval = ENXIO; 7690 FP_TRACE(FP_NHEAD1(1, 0), "Build NPIV Port List error"); 7691 kmem_free(list, fcio->fcio_olen); 7692 break; 7693 } 7694 list->numAdapters = count; 7695 7696 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7697 fcio->fcio_olen, mode) == 0) { 7698 if (fp_fcio_copyout(fcio, data, mode)) { 7699 FP_TRACE(FP_NHEAD1(1, 0), 7700 "Copy NPIV Port data error"); 7701 rval = EFAULT; 7702 } 7703 } else { 7704 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7705 rval = EFAULT; 7706 } 7707 kmem_free(list, fcio->fcio_olen); 7708 break; 7709 } 7710 7711 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7712 fc_hba_port_npiv_attributes_t *val; 7713 7714 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7715 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7716 7717 mutex_enter(&port->fp_mutex); 7718 val->npivflag = port->fp_npiv_flag; 7719 val->lastChange = port->fp_last_change; 7720 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7721 &val->PortWWN.raw_wwn, 7722 sizeof (val->PortWWN.raw_wwn)); 7723 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7724 &val->NodeWWN.raw_wwn, 7725 sizeof (val->NodeWWN.raw_wwn)); 7726 mutex_exit(&port->fp_mutex); 7727 7728 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7729 if (port->fp_npiv_type != FC_NPIV_PORT) { 7730 val->MaxNumberOfNPIVPorts = 7731 port->fp_fca_tran->fca_num_npivports; 7732 } else { 7733 val->MaxNumberOfNPIVPorts = 0; 7734 } 7735 7736 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7737 fcio->fcio_olen, mode) == 0) { 7738 if (fp_fcio_copyout(fcio, data, mode)) { 7739 rval = EFAULT; 7740 } 7741 } else { 7742 rval = EFAULT; 7743 } 7744 kmem_free(val, sizeof (*val)); 7745 break; 7746 } 7747 7748 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7749 fc_hba_port_attributes_t *val; 7750 fc_hba_port_attributes32_t *val32; 7751 7752 if (use32 == B_TRUE) { 7753 if (fcio->fcio_olen < sizeof (*val32) || 7754 fcio->fcio_xfer != FCIO_XFER_READ) { 7755 rval = EINVAL; 7756 break; 7757 } 7758 } else { 7759 if (fcio->fcio_olen < sizeof (*val) || 7760 fcio->fcio_xfer != FCIO_XFER_READ) { 7761 rval = EINVAL; 7762 break; 7763 } 7764 } 7765 7766 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7767 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7768 mutex_enter(&port->fp_mutex); 7769 val->lastChange = port->fp_last_change; 7770 val->fp_minor = port->fp_instance; 7771 7772 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7773 &val->PortWWN.raw_wwn, 7774 sizeof (val->PortWWN.raw_wwn)); 7775 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7776 &val->NodeWWN.raw_wwn, 7777 sizeof (val->NodeWWN.raw_wwn)); 7778 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7779 sizeof (val->FabricName.raw_wwn)); 7780 7781 val->PortFcId = port->fp_port_id.port_id; 7782 7783 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7784 case FC_STATE_OFFLINE: 7785 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7786 break; 7787 case FC_STATE_ONLINE: 7788 case FC_STATE_LOOP: 7789 case FC_STATE_NAMESERVICE: 7790 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7791 break; 7792 default: 7793 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7794 break; 7795 } 7796 7797 /* Translate from LV to FC-HBA port type codes */ 7798 switch (port->fp_port_type.port_type) { 7799 case FC_NS_PORT_N: 7800 val->PortType = FC_HBA_PORTTYPE_NPORT; 7801 break; 7802 case FC_NS_PORT_NL: 7803 /* Actually means loop for us */ 7804 val->PortType = FC_HBA_PORTTYPE_LPORT; 7805 break; 7806 case FC_NS_PORT_F: 7807 val->PortType = FC_HBA_PORTTYPE_FPORT; 7808 break; 7809 case FC_NS_PORT_FL: 7810 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7811 break; 7812 case FC_NS_PORT_E: 7813 val->PortType = FC_HBA_PORTTYPE_EPORT; 7814 break; 7815 default: 7816 val->PortType = FC_HBA_PORTTYPE_OTHER; 7817 break; 7818 } 7819 7820 7821 /* 7822 * If fp has decided that the topology is public loop, 7823 * we will indicate that using the appropriate 7824 * FC HBA API constant. 7825 */ 7826 switch (port->fp_topology) { 7827 case FC_TOP_PUBLIC_LOOP: 7828 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7829 break; 7830 7831 case FC_TOP_PT_PT: 7832 val->PortType = FC_HBA_PORTTYPE_PTP; 7833 break; 7834 7835 case FC_TOP_UNKNOWN: 7836 /* 7837 * This should cover the case where nothing is connected 7838 * to the port. Crystal+ is p'bly an exception here. 7839 * For Crystal+, port 0 will come up as private loop 7840 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7841 * nothing is connected to it. 7842 * Current plan is to let userland handle this. 7843 */ 7844 if (port->fp_bind_state == FC_STATE_OFFLINE) { 7845 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7846 } 7847 break; 7848 7849 default: 7850 /* 7851 * Do Nothing. 7852 * Unused: 7853 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7854 */ 7855 break; 7856 } 7857 7858 val->PortSupportedClassofService = 7859 port->fp_hba_port_attrs.supported_cos; 7860 val->PortSupportedFc4Types[0] = 0; 7861 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7862 sizeof (val->PortActiveFc4Types)); 7863 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7864 port->fp_sym_port_namelen); 7865 val->PortSupportedSpeed = 7866 port->fp_hba_port_attrs.supported_speed; 7867 7868 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7869 case FC_STATE_1GBIT_SPEED: 7870 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7871 break; 7872 case FC_STATE_2GBIT_SPEED: 7873 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7874 break; 7875 case FC_STATE_4GBIT_SPEED: 7876 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7877 break; 7878 case FC_STATE_8GBIT_SPEED: 7879 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7880 break; 7881 case FC_STATE_10GBIT_SPEED: 7882 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7883 break; 7884 case FC_STATE_16GBIT_SPEED: 7885 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7886 break; 7887 default: 7888 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7889 break; 7890 } 7891 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7892 val->NumberofDiscoveredPorts = port->fp_dev_count; 7893 mutex_exit(&port->fp_mutex); 7894 7895 if (use32 == B_TRUE) { 7896 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7897 val32->version = val->version; 7898 val32->lastChange = val->lastChange; 7899 val32->fp_minor = val->fp_minor; 7900 7901 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7902 sizeof (val->PortWWN.raw_wwn)); 7903 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7904 sizeof (val->NodeWWN.raw_wwn)); 7905 val32->PortFcId = val->PortFcId; 7906 val32->PortState = val->PortState; 7907 val32->PortType = val->PortType; 7908 7909 val32->PortSupportedClassofService = 7910 val->PortSupportedClassofService; 7911 bcopy(val->PortActiveFc4Types, 7912 val32->PortActiveFc4Types, 7913 sizeof (val->PortActiveFc4Types)); 7914 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7915 sizeof (val->PortSymbolicName)); 7916 bcopy(&val->FabricName, &val32->FabricName, 7917 sizeof (val->FabricName.raw_wwn)); 7918 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7919 val32->PortSpeed = val->PortSpeed; 7920 7921 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7922 val32->NumberofDiscoveredPorts = 7923 val->NumberofDiscoveredPorts; 7924 7925 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7926 fcio->fcio_olen, mode) == 0) { 7927 if (fp_fcio_copyout(fcio, data, mode)) { 7928 rval = EFAULT; 7929 } 7930 } else { 7931 rval = EFAULT; 7932 } 7933 7934 kmem_free(val32, sizeof (*val32)); 7935 } else { 7936 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7937 fcio->fcio_olen, mode) == 0) { 7938 if (fp_fcio_copyout(fcio, data, mode)) { 7939 rval = EFAULT; 7940 } 7941 } else { 7942 rval = EFAULT; 7943 } 7944 } 7945 7946 kmem_free(val, sizeof (*val)); 7947 break; 7948 } 7949 7950 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7951 fc_hba_port_attributes_t *val; 7952 fc_hba_port_attributes32_t *val32; 7953 uint32_t index = 0; 7954 fc_remote_port_t *tmp_pd; 7955 7956 if (use32 == B_TRUE) { 7957 if (fcio->fcio_olen < sizeof (*val32) || 7958 fcio->fcio_xfer != FCIO_XFER_READ) { 7959 rval = EINVAL; 7960 break; 7961 } 7962 } else { 7963 if (fcio->fcio_olen < sizeof (*val) || 7964 fcio->fcio_xfer != FCIO_XFER_READ) { 7965 rval = EINVAL; 7966 break; 7967 } 7968 } 7969 7970 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7971 rval = EFAULT; 7972 break; 7973 } 7974 7975 if (index >= port->fp_dev_count) { 7976 FP_TRACE(FP_NHEAD1(9, 0), 7977 "User supplied index out of range"); 7978 fcio->fcio_errno = FC_OUTOFBOUNDS; 7979 rval = EINVAL; 7980 if (fp_fcio_copyout(fcio, data, mode)) { 7981 rval = EFAULT; 7982 } 7983 break; 7984 } 7985 7986 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7987 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7988 7989 mutex_enter(&port->fp_mutex); 7990 tmp_pd = fctl_lookup_pd_by_index(port, index); 7991 7992 if (tmp_pd == NULL) { 7993 fcio->fcio_errno = FC_BADPORT; 7994 rval = EINVAL; 7995 } else { 7996 val->lastChange = port->fp_last_change; 7997 val->fp_minor = port->fp_instance; 7998 7999 mutex_enter(&tmp_pd->pd_mutex); 8000 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8001 &val->PortWWN.raw_wwn, 8002 sizeof (val->PortWWN.raw_wwn)); 8003 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8004 &val->NodeWWN.raw_wwn, 8005 sizeof (val->NodeWWN.raw_wwn)); 8006 val->PortFcId = tmp_pd->pd_port_id.port_id; 8007 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8008 tmp_pd->pd_spn_len); 8009 val->PortSupportedClassofService = tmp_pd->pd_cos; 8010 /* 8011 * we will assume the sizeof these pd_fc4types and 8012 * portActiveFc4Types will remain the same. we could 8013 * add in a check for it, but we decided it was unneeded 8014 */ 8015 bcopy((caddr_t)tmp_pd->pd_fc4types, 8016 val->PortActiveFc4Types, 8017 sizeof (tmp_pd->pd_fc4types)); 8018 val->PortState = 8019 fp_map_remote_port_state(tmp_pd->pd_state); 8020 mutex_exit(&tmp_pd->pd_mutex); 8021 8022 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8023 val->PortSupportedFc4Types[0] = 0; 8024 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8025 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8026 val->PortMaxFrameSize = 0; 8027 val->NumberofDiscoveredPorts = 0; 8028 8029 if (use32 == B_TRUE) { 8030 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8031 val32->version = val->version; 8032 val32->lastChange = val->lastChange; 8033 val32->fp_minor = val->fp_minor; 8034 8035 bcopy(&val->PortWWN.raw_wwn, 8036 &val32->PortWWN.raw_wwn, 8037 sizeof (val->PortWWN.raw_wwn)); 8038 bcopy(&val->NodeWWN.raw_wwn, 8039 &val32->NodeWWN.raw_wwn, 8040 sizeof (val->NodeWWN.raw_wwn)); 8041 val32->PortFcId = val->PortFcId; 8042 bcopy(val->PortSymbolicName, 8043 val32->PortSymbolicName, 8044 sizeof (val->PortSymbolicName)); 8045 val32->PortSupportedClassofService = 8046 val->PortSupportedClassofService; 8047 bcopy(val->PortActiveFc4Types, 8048 val32->PortActiveFc4Types, 8049 sizeof (tmp_pd->pd_fc4types)); 8050 8051 val32->PortType = val->PortType; 8052 val32->PortState = val->PortState; 8053 val32->PortSupportedFc4Types[0] = 8054 val->PortSupportedFc4Types[0]; 8055 val32->PortSupportedSpeed = 8056 val->PortSupportedSpeed; 8057 val32->PortSpeed = val->PortSpeed; 8058 val32->PortMaxFrameSize = 8059 val->PortMaxFrameSize; 8060 val32->NumberofDiscoveredPorts = 8061 val->NumberofDiscoveredPorts; 8062 8063 if (fp_copyout((void *)val32, 8064 (void *)fcio->fcio_obuf, 8065 fcio->fcio_olen, mode) == 0) { 8066 if (fp_fcio_copyout(fcio, 8067 data, mode)) { 8068 rval = EFAULT; 8069 } 8070 } else { 8071 rval = EFAULT; 8072 } 8073 8074 kmem_free(val32, sizeof (*val32)); 8075 } else { 8076 if (fp_copyout((void *)val, 8077 (void *)fcio->fcio_obuf, 8078 fcio->fcio_olen, mode) == 0) { 8079 if (fp_fcio_copyout(fcio, data, mode)) { 8080 rval = EFAULT; 8081 } 8082 } else { 8083 rval = EFAULT; 8084 } 8085 } 8086 } 8087 8088 mutex_exit(&port->fp_mutex); 8089 kmem_free(val, sizeof (*val)); 8090 break; 8091 } 8092 8093 case FCIO_GET_PORT_ATTRIBUTES: { 8094 fc_hba_port_attributes_t *val; 8095 fc_hba_port_attributes32_t *val32; 8096 la_wwn_t wwn; 8097 fc_remote_port_t *tmp_pd; 8098 8099 if (use32 == B_TRUE) { 8100 if (fcio->fcio_olen < sizeof (*val32) || 8101 fcio->fcio_xfer != FCIO_XFER_READ) { 8102 rval = EINVAL; 8103 break; 8104 } 8105 } else { 8106 if (fcio->fcio_olen < sizeof (*val) || 8107 fcio->fcio_xfer != FCIO_XFER_READ) { 8108 rval = EINVAL; 8109 break; 8110 } 8111 } 8112 8113 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 8114 rval = EFAULT; 8115 break; 8116 } 8117 8118 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 8119 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 8120 8121 mutex_enter(&port->fp_mutex); 8122 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 8123 val->lastChange = port->fp_last_change; 8124 val->fp_minor = port->fp_instance; 8125 mutex_exit(&port->fp_mutex); 8126 8127 if (tmp_pd == NULL) { 8128 fcio->fcio_errno = FC_BADWWN; 8129 rval = EINVAL; 8130 } else { 8131 mutex_enter(&tmp_pd->pd_mutex); 8132 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8133 &val->PortWWN.raw_wwn, 8134 sizeof (val->PortWWN.raw_wwn)); 8135 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8136 &val->NodeWWN.raw_wwn, 8137 sizeof (val->NodeWWN.raw_wwn)); 8138 val->PortFcId = tmp_pd->pd_port_id.port_id; 8139 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8140 tmp_pd->pd_spn_len); 8141 val->PortSupportedClassofService = tmp_pd->pd_cos; 8142 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8143 val->PortState = 8144 fp_map_remote_port_state(tmp_pd->pd_state); 8145 val->PortSupportedFc4Types[0] = 0; 8146 /* 8147 * we will assume the sizeof these pd_fc4types and 8148 * portActiveFc4Types will remain the same. we could 8149 * add in a check for it, but we decided it was unneeded 8150 */ 8151 bcopy((caddr_t)tmp_pd->pd_fc4types, 8152 val->PortActiveFc4Types, 8153 sizeof (tmp_pd->pd_fc4types)); 8154 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8155 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8156 val->PortMaxFrameSize = 0; 8157 val->NumberofDiscoveredPorts = 0; 8158 mutex_exit(&tmp_pd->pd_mutex); 8159 8160 if (use32 == B_TRUE) { 8161 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8162 val32->version = val->version; 8163 val32->lastChange = val->lastChange; 8164 val32->fp_minor = val->fp_minor; 8165 bcopy(&val->PortWWN.raw_wwn, 8166 &val32->PortWWN.raw_wwn, 8167 sizeof (val->PortWWN.raw_wwn)); 8168 bcopy(&val->NodeWWN.raw_wwn, 8169 &val32->NodeWWN.raw_wwn, 8170 sizeof (val->NodeWWN.raw_wwn)); 8171 val32->PortFcId = val->PortFcId; 8172 bcopy(val->PortSymbolicName, 8173 val32->PortSymbolicName, 8174 sizeof (val->PortSymbolicName)); 8175 val32->PortSupportedClassofService = 8176 val->PortSupportedClassofService; 8177 val32->PortType = val->PortType; 8178 val32->PortState = val->PortState; 8179 val32->PortSupportedFc4Types[0] = 8180 val->PortSupportedFc4Types[0]; 8181 bcopy(val->PortActiveFc4Types, 8182 val32->PortActiveFc4Types, 8183 sizeof (tmp_pd->pd_fc4types)); 8184 val32->PortSupportedSpeed = 8185 val->PortSupportedSpeed; 8186 val32->PortSpeed = val->PortSpeed; 8187 val32->PortMaxFrameSize = val->PortMaxFrameSize; 8188 val32->NumberofDiscoveredPorts = 8189 val->NumberofDiscoveredPorts; 8190 8191 if (fp_copyout((void *)val32, 8192 (void *)fcio->fcio_obuf, 8193 fcio->fcio_olen, mode) == 0) { 8194 if (fp_fcio_copyout(fcio, data, mode)) { 8195 rval = EFAULT; 8196 } 8197 } else { 8198 rval = EFAULT; 8199 } 8200 8201 kmem_free(val32, sizeof (*val32)); 8202 } else { 8203 if (fp_copyout((void *)val, 8204 (void *)fcio->fcio_obuf, 8205 fcio->fcio_olen, mode) == 0) { 8206 if (fp_fcio_copyout(fcio, data, mode)) { 8207 rval = EFAULT; 8208 } 8209 } else { 8210 rval = EFAULT; 8211 } 8212 } 8213 } 8214 kmem_free(val, sizeof (*val)); 8215 break; 8216 } 8217 8218 case FCIO_GET_NUM_DEVS: { 8219 int num_devices; 8220 8221 if (fcio->fcio_olen != sizeof (num_devices) || 8222 fcio->fcio_xfer != FCIO_XFER_READ) { 8223 rval = EINVAL; 8224 break; 8225 } 8226 8227 mutex_enter(&port->fp_mutex); 8228 switch (port->fp_topology) { 8229 case FC_TOP_PRIVATE_LOOP: 8230 case FC_TOP_PT_PT: 8231 num_devices = port->fp_total_devices; 8232 fcio->fcio_errno = FC_SUCCESS; 8233 break; 8234 8235 case FC_TOP_PUBLIC_LOOP: 8236 case FC_TOP_FABRIC: 8237 mutex_exit(&port->fp_mutex); 8238 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8239 NULL, KM_SLEEP); 8240 ASSERT(job != NULL); 8241 8242 /* 8243 * In FC-GS-2 the Name Server doesn't send out 8244 * RSCNs for any Name Server Database updates 8245 * When it is finally fixed there is no need 8246 * to probe as below and should be removed. 8247 */ 8248 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8249 fctl_dealloc_job(job); 8250 8251 mutex_enter(&port->fp_mutex); 8252 num_devices = port->fp_total_devices; 8253 fcio->fcio_errno = FC_SUCCESS; 8254 break; 8255 8256 case FC_TOP_NO_NS: 8257 /* FALLTHROUGH */ 8258 case FC_TOP_UNKNOWN: 8259 /* FALLTHROUGH */ 8260 default: 8261 num_devices = 0; 8262 fcio->fcio_errno = FC_SUCCESS; 8263 break; 8264 } 8265 mutex_exit(&port->fp_mutex); 8266 8267 if (fp_copyout((void *)&num_devices, 8268 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8269 mode) == 0) { 8270 if (fp_fcio_copyout(fcio, data, mode)) { 8271 rval = EFAULT; 8272 } 8273 } else { 8274 rval = EFAULT; 8275 } 8276 break; 8277 } 8278 8279 case FCIO_GET_DEV_LIST: { 8280 int num_devices; 8281 int new_count; 8282 int map_size; 8283 8284 if (fcio->fcio_xfer != FCIO_XFER_READ || 8285 fcio->fcio_alen != sizeof (new_count)) { 8286 rval = EINVAL; 8287 break; 8288 } 8289 8290 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8291 8292 mutex_enter(&port->fp_mutex); 8293 if (num_devices < port->fp_total_devices) { 8294 fcio->fcio_errno = FC_TOOMANY; 8295 new_count = port->fp_total_devices; 8296 mutex_exit(&port->fp_mutex); 8297 8298 if (fp_copyout((void *)&new_count, 8299 (void *)fcio->fcio_abuf, 8300 sizeof (new_count), mode)) { 8301 rval = EFAULT; 8302 break; 8303 } 8304 8305 if (fp_fcio_copyout(fcio, data, mode)) { 8306 rval = EFAULT; 8307 break; 8308 } 8309 rval = EINVAL; 8310 break; 8311 } 8312 8313 if (port->fp_total_devices <= 0) { 8314 fcio->fcio_errno = FC_NO_MAP; 8315 new_count = port->fp_total_devices; 8316 mutex_exit(&port->fp_mutex); 8317 8318 if (fp_copyout((void *)&new_count, 8319 (void *)fcio->fcio_abuf, 8320 sizeof (new_count), mode)) { 8321 rval = EFAULT; 8322 break; 8323 } 8324 8325 if (fp_fcio_copyout(fcio, data, mode)) { 8326 rval = EFAULT; 8327 break; 8328 } 8329 rval = EINVAL; 8330 break; 8331 } 8332 8333 switch (port->fp_topology) { 8334 case FC_TOP_PRIVATE_LOOP: 8335 if (fp_fillout_loopmap(port, fcio, 8336 mode) != FC_SUCCESS) { 8337 rval = EFAULT; 8338 break; 8339 } 8340 if (fp_fcio_copyout(fcio, data, mode)) { 8341 rval = EFAULT; 8342 } 8343 break; 8344 8345 case FC_TOP_PT_PT: 8346 if (fp_fillout_p2pmap(port, fcio, 8347 mode) != FC_SUCCESS) { 8348 rval = EFAULT; 8349 break; 8350 } 8351 if (fp_fcio_copyout(fcio, data, mode)) { 8352 rval = EFAULT; 8353 } 8354 break; 8355 8356 case FC_TOP_PUBLIC_LOOP: 8357 case FC_TOP_FABRIC: { 8358 fctl_ns_req_t *ns_cmd; 8359 8360 map_size = 8361 sizeof (fc_port_dev_t) * port->fp_total_devices; 8362 8363 mutex_exit(&port->fp_mutex); 8364 8365 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8366 sizeof (ns_resp_gan_t), map_size, 8367 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8368 KM_SLEEP); 8369 ASSERT(ns_cmd != NULL); 8370 8371 ns_cmd->ns_gan_index = 0; 8372 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8373 ns_cmd->ns_cmd_code = NS_GA_NXT; 8374 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8375 8376 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8377 NULL, KM_SLEEP); 8378 ASSERT(job != NULL); 8379 8380 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8381 8382 if (ret != FC_SUCCESS || 8383 job->job_result != FC_SUCCESS) { 8384 fctl_free_ns_cmd(ns_cmd); 8385 8386 fcio->fcio_errno = job->job_result; 8387 new_count = 0; 8388 if (fp_copyout((void *)&new_count, 8389 (void *)fcio->fcio_abuf, 8390 sizeof (new_count), mode)) { 8391 fctl_dealloc_job(job); 8392 mutex_enter(&port->fp_mutex); 8393 rval = EFAULT; 8394 break; 8395 } 8396 8397 if (fp_fcio_copyout(fcio, data, mode)) { 8398 fctl_dealloc_job(job); 8399 mutex_enter(&port->fp_mutex); 8400 rval = EFAULT; 8401 break; 8402 } 8403 rval = EIO; 8404 mutex_enter(&port->fp_mutex); 8405 break; 8406 } 8407 fctl_dealloc_job(job); 8408 8409 new_count = ns_cmd->ns_gan_index; 8410 if (fp_copyout((void *)&new_count, 8411 (void *)fcio->fcio_abuf, sizeof (new_count), 8412 mode)) { 8413 rval = EFAULT; 8414 fctl_free_ns_cmd(ns_cmd); 8415 mutex_enter(&port->fp_mutex); 8416 break; 8417 } 8418 8419 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8420 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8421 ns_cmd->ns_gan_index, mode)) { 8422 rval = EFAULT; 8423 fctl_free_ns_cmd(ns_cmd); 8424 mutex_enter(&port->fp_mutex); 8425 break; 8426 } 8427 fctl_free_ns_cmd(ns_cmd); 8428 8429 if (fp_fcio_copyout(fcio, data, mode)) { 8430 rval = EFAULT; 8431 } 8432 mutex_enter(&port->fp_mutex); 8433 break; 8434 } 8435 8436 case FC_TOP_NO_NS: 8437 /* FALLTHROUGH */ 8438 case FC_TOP_UNKNOWN: 8439 /* FALLTHROUGH */ 8440 default: 8441 fcio->fcio_errno = FC_NO_MAP; 8442 num_devices = port->fp_total_devices; 8443 8444 if (fp_copyout((void *)&new_count, 8445 (void *)fcio->fcio_abuf, 8446 sizeof (new_count), mode)) { 8447 rval = EFAULT; 8448 break; 8449 } 8450 8451 if (fp_fcio_copyout(fcio, data, mode)) { 8452 rval = EFAULT; 8453 break; 8454 } 8455 rval = EINVAL; 8456 break; 8457 } 8458 mutex_exit(&port->fp_mutex); 8459 break; 8460 } 8461 8462 case FCIO_GET_SYM_PNAME: { 8463 rval = ENOTSUP; 8464 break; 8465 } 8466 8467 case FCIO_GET_SYM_NNAME: { 8468 rval = ENOTSUP; 8469 break; 8470 } 8471 8472 case FCIO_SET_SYM_PNAME: { 8473 rval = ENOTSUP; 8474 break; 8475 } 8476 8477 case FCIO_SET_SYM_NNAME: { 8478 rval = ENOTSUP; 8479 break; 8480 } 8481 8482 case FCIO_GET_LOGI_PARAMS: { 8483 la_wwn_t pwwn; 8484 la_wwn_t *my_pwwn; 8485 la_els_logi_t *params; 8486 la_els_logi32_t *params32; 8487 fc_remote_node_t *node; 8488 fc_remote_port_t *pd; 8489 8490 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8491 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8492 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8493 rval = EINVAL; 8494 break; 8495 } 8496 8497 if (use32 == B_TRUE) { 8498 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8499 rval = EINVAL; 8500 break; 8501 } 8502 } else { 8503 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8504 rval = EINVAL; 8505 break; 8506 } 8507 } 8508 8509 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8510 rval = EFAULT; 8511 break; 8512 } 8513 8514 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8515 if (pd == NULL) { 8516 mutex_enter(&port->fp_mutex); 8517 my_pwwn = &port->fp_service_params.nport_ww_name; 8518 mutex_exit(&port->fp_mutex); 8519 8520 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8521 rval = ENXIO; 8522 break; 8523 } 8524 8525 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8526 mutex_enter(&port->fp_mutex); 8527 *params = port->fp_service_params; 8528 mutex_exit(&port->fp_mutex); 8529 } else { 8530 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8531 8532 mutex_enter(&pd->pd_mutex); 8533 params->ls_code.mbz = params->ls_code.ls_code = 0; 8534 params->common_service = pd->pd_csp; 8535 params->nport_ww_name = pd->pd_port_name; 8536 params->class_1 = pd->pd_clsp1; 8537 params->class_2 = pd->pd_clsp2; 8538 params->class_3 = pd->pd_clsp3; 8539 node = pd->pd_remote_nodep; 8540 mutex_exit(&pd->pd_mutex); 8541 8542 bzero(params->reserved, sizeof (params->reserved)); 8543 8544 mutex_enter(&node->fd_mutex); 8545 bcopy(node->fd_vv, params->vendor_version, 8546 sizeof (node->fd_vv)); 8547 params->node_ww_name = node->fd_node_name; 8548 mutex_exit(&node->fd_mutex); 8549 8550 fctl_release_remote_port(pd); 8551 } 8552 8553 if (use32 == B_TRUE) { 8554 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8555 8556 params32->ls_code.mbz = params->ls_code.mbz; 8557 params32->common_service = params->common_service; 8558 params32->nport_ww_name = params->nport_ww_name; 8559 params32->class_1 = params->class_1; 8560 params32->class_2 = params->class_2; 8561 params32->class_3 = params->class_3; 8562 bzero(params32->reserved, sizeof (params32->reserved)); 8563 bcopy(params->vendor_version, params32->vendor_version, 8564 sizeof (node->fd_vv)); 8565 params32->node_ww_name = params->node_ww_name; 8566 8567 if (ddi_copyout((void *)params32, 8568 (void *)fcio->fcio_obuf, 8569 sizeof (*params32), mode)) { 8570 rval = EFAULT; 8571 } 8572 8573 kmem_free(params32, sizeof (*params32)); 8574 } else { 8575 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8576 sizeof (*params), mode)) { 8577 rval = EFAULT; 8578 } 8579 } 8580 8581 kmem_free(params, sizeof (*params)); 8582 if (fp_fcio_copyout(fcio, data, mode)) { 8583 rval = EFAULT; 8584 } 8585 break; 8586 } 8587 8588 case FCIO_DEV_LOGOUT: 8589 case FCIO_DEV_LOGIN: 8590 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8591 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8592 rval = EINVAL; 8593 8594 if (fp_fcio_copyout(fcio, data, mode)) { 8595 rval = EFAULT; 8596 } 8597 break; 8598 } 8599 8600 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8601 jcode = JOB_FCIO_LOGIN; 8602 } else { 8603 jcode = JOB_FCIO_LOGOUT; 8604 } 8605 8606 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8607 bcopy(fcio, kfcio, sizeof (*fcio)); 8608 8609 if (kfcio->fcio_ilen) { 8610 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8611 KM_SLEEP); 8612 8613 if (ddi_copyin((void *)fcio->fcio_ibuf, 8614 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8615 mode)) { 8616 rval = EFAULT; 8617 8618 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8619 kmem_free(kfcio, sizeof (*kfcio)); 8620 fcio->fcio_errno = job->job_result; 8621 if (fp_fcio_copyout(fcio, data, mode)) { 8622 rval = EFAULT; 8623 } 8624 break; 8625 } 8626 } 8627 8628 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8629 job->job_private = kfcio; 8630 8631 fctl_enque_job(port, job); 8632 fctl_jobwait(job); 8633 8634 rval = job->job_result; 8635 8636 fcio->fcio_errno = kfcio->fcio_errno; 8637 if (fp_fcio_copyout(fcio, data, mode)) { 8638 rval = EFAULT; 8639 } 8640 8641 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8642 kmem_free(kfcio, sizeof (*kfcio)); 8643 fctl_dealloc_job(job); 8644 break; 8645 8646 case FCIO_GET_STATE: { 8647 la_wwn_t pwwn; 8648 uint32_t state; 8649 fc_remote_port_t *pd; 8650 fctl_ns_req_t *ns_cmd; 8651 8652 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8653 fcio->fcio_olen != sizeof (state) || 8654 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8655 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8656 rval = EINVAL; 8657 break; 8658 } 8659 8660 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8661 rval = EFAULT; 8662 break; 8663 } 8664 fcio->fcio_errno = 0; 8665 8666 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8667 if (pd == NULL) { 8668 mutex_enter(&port->fp_mutex); 8669 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8670 mutex_exit(&port->fp_mutex); 8671 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8672 NULL, NULL, KM_SLEEP); 8673 8674 job->job_counter = 1; 8675 job->job_result = FC_SUCCESS; 8676 8677 ns_cmd = fctl_alloc_ns_cmd( 8678 sizeof (ns_req_gid_pn_t), 8679 sizeof (ns_resp_gid_pn_t), 8680 sizeof (ns_resp_gid_pn_t), 8681 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8682 ASSERT(ns_cmd != NULL); 8683 8684 ns_cmd->ns_cmd_code = NS_GID_PN; 8685 ((ns_req_gid_pn_t *) 8686 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8687 8688 ret = fp_ns_query(port, ns_cmd, job, 8689 1, KM_SLEEP); 8690 8691 if (ret != FC_SUCCESS || job->job_result != 8692 FC_SUCCESS) { 8693 if (ret != FC_SUCCESS) { 8694 fcio->fcio_errno = ret; 8695 } else { 8696 fcio->fcio_errno = 8697 job->job_result; 8698 } 8699 rval = EIO; 8700 } else { 8701 state = PORT_DEVICE_INVALID; 8702 } 8703 fctl_free_ns_cmd(ns_cmd); 8704 fctl_dealloc_job(job); 8705 } else { 8706 mutex_exit(&port->fp_mutex); 8707 fcio->fcio_errno = FC_BADWWN; 8708 rval = ENXIO; 8709 } 8710 } else { 8711 mutex_enter(&pd->pd_mutex); 8712 state = pd->pd_state; 8713 mutex_exit(&pd->pd_mutex); 8714 8715 fctl_release_remote_port(pd); 8716 } 8717 8718 if (!rval) { 8719 if (ddi_copyout((void *)&state, 8720 (void *)fcio->fcio_obuf, sizeof (state), 8721 mode)) { 8722 rval = EFAULT; 8723 } 8724 } 8725 if (fp_fcio_copyout(fcio, data, mode)) { 8726 rval = EFAULT; 8727 } 8728 break; 8729 } 8730 8731 case FCIO_DEV_REMOVE: { 8732 la_wwn_t pwwn; 8733 fc_portmap_t *changelist; 8734 fc_remote_port_t *pd; 8735 8736 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8737 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8738 rval = EINVAL; 8739 break; 8740 } 8741 8742 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8743 rval = EFAULT; 8744 break; 8745 } 8746 8747 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8748 if (pd == NULL) { 8749 rval = ENXIO; 8750 fcio->fcio_errno = FC_BADWWN; 8751 if (fp_fcio_copyout(fcio, data, mode)) { 8752 rval = EFAULT; 8753 } 8754 break; 8755 } 8756 8757 mutex_enter(&pd->pd_mutex); 8758 if (pd->pd_ref_count > 1) { 8759 mutex_exit(&pd->pd_mutex); 8760 8761 rval = EBUSY; 8762 fcio->fcio_errno = FC_FAILURE; 8763 fctl_release_remote_port(pd); 8764 8765 if (fp_fcio_copyout(fcio, data, mode)) { 8766 rval = EFAULT; 8767 } 8768 break; 8769 } 8770 mutex_exit(&pd->pd_mutex); 8771 8772 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8773 8774 fctl_copy_portmap(changelist, pd); 8775 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8776 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8777 8778 fctl_release_remote_port(pd); 8779 break; 8780 } 8781 8782 case FCIO_GET_FCODE_REV: { 8783 caddr_t fcode_rev; 8784 fc_fca_pm_t pm; 8785 8786 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8787 fcio->fcio_xfer != FCIO_XFER_READ) { 8788 rval = EINVAL; 8789 break; 8790 } 8791 bzero((caddr_t)&pm, sizeof (pm)); 8792 8793 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8794 8795 pm.pm_cmd_flags = FC_FCA_PM_READ; 8796 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8797 pm.pm_data_len = fcio->fcio_olen; 8798 pm.pm_data_buf = fcode_rev; 8799 8800 ret = port->fp_fca_tran->fca_port_manage( 8801 port->fp_fca_handle, &pm); 8802 8803 if (ret == FC_SUCCESS) { 8804 if (ddi_copyout((void *)fcode_rev, 8805 (void *)fcio->fcio_obuf, 8806 fcio->fcio_olen, mode) == 0) { 8807 if (fp_fcio_copyout(fcio, data, mode)) { 8808 rval = EFAULT; 8809 } 8810 } else { 8811 rval = EFAULT; 8812 } 8813 } else { 8814 /* 8815 * check if buffer was not large enough to obtain 8816 * FCODE version. 8817 */ 8818 if (pm.pm_data_len > fcio->fcio_olen) { 8819 rval = ENOMEM; 8820 } else { 8821 rval = EIO; 8822 } 8823 fcio->fcio_errno = ret; 8824 if (fp_fcio_copyout(fcio, data, mode)) { 8825 rval = EFAULT; 8826 } 8827 } 8828 kmem_free(fcode_rev, fcio->fcio_olen); 8829 break; 8830 } 8831 8832 case FCIO_GET_FW_REV: { 8833 caddr_t fw_rev; 8834 fc_fca_pm_t pm; 8835 8836 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8837 fcio->fcio_xfer != FCIO_XFER_READ) { 8838 rval = EINVAL; 8839 break; 8840 } 8841 bzero((caddr_t)&pm, sizeof (pm)); 8842 8843 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8844 8845 pm.pm_cmd_flags = FC_FCA_PM_READ; 8846 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8847 pm.pm_data_len = fcio->fcio_olen; 8848 pm.pm_data_buf = fw_rev; 8849 8850 ret = port->fp_fca_tran->fca_port_manage( 8851 port->fp_fca_handle, &pm); 8852 8853 if (ret == FC_SUCCESS) { 8854 if (ddi_copyout((void *)fw_rev, 8855 (void *)fcio->fcio_obuf, 8856 fcio->fcio_olen, mode) == 0) { 8857 if (fp_fcio_copyout(fcio, data, mode)) { 8858 rval = EFAULT; 8859 } 8860 } else { 8861 rval = EFAULT; 8862 } 8863 } else { 8864 if (fp_fcio_copyout(fcio, data, mode)) { 8865 rval = EFAULT; 8866 } 8867 rval = EIO; 8868 } 8869 kmem_free(fw_rev, fcio->fcio_olen); 8870 break; 8871 } 8872 8873 case FCIO_GET_DUMP_SIZE: { 8874 uint32_t dump_size; 8875 fc_fca_pm_t pm; 8876 8877 if (fcio->fcio_olen != sizeof (dump_size) || 8878 fcio->fcio_xfer != FCIO_XFER_READ) { 8879 rval = EINVAL; 8880 break; 8881 } 8882 bzero((caddr_t)&pm, sizeof (pm)); 8883 pm.pm_cmd_flags = FC_FCA_PM_READ; 8884 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8885 pm.pm_data_len = sizeof (dump_size); 8886 pm.pm_data_buf = (caddr_t)&dump_size; 8887 8888 ret = port->fp_fca_tran->fca_port_manage( 8889 port->fp_fca_handle, &pm); 8890 8891 if (ret == FC_SUCCESS) { 8892 if (ddi_copyout((void *)&dump_size, 8893 (void *)fcio->fcio_obuf, sizeof (dump_size), 8894 mode) == 0) { 8895 if (fp_fcio_copyout(fcio, data, mode)) { 8896 rval = EFAULT; 8897 } 8898 } else { 8899 rval = EFAULT; 8900 } 8901 } else { 8902 fcio->fcio_errno = ret; 8903 rval = EIO; 8904 if (fp_fcio_copyout(fcio, data, mode)) { 8905 rval = EFAULT; 8906 } 8907 } 8908 break; 8909 } 8910 8911 case FCIO_DOWNLOAD_FW: { 8912 caddr_t firmware; 8913 fc_fca_pm_t pm; 8914 8915 if (fcio->fcio_ilen <= 0 || 8916 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8917 rval = EINVAL; 8918 break; 8919 } 8920 8921 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8922 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8923 fcio->fcio_ilen, mode)) { 8924 rval = EFAULT; 8925 kmem_free(firmware, fcio->fcio_ilen); 8926 break; 8927 } 8928 8929 bzero((caddr_t)&pm, sizeof (pm)); 8930 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8931 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8932 pm.pm_data_len = fcio->fcio_ilen; 8933 pm.pm_data_buf = firmware; 8934 8935 ret = port->fp_fca_tran->fca_port_manage( 8936 port->fp_fca_handle, &pm); 8937 8938 kmem_free(firmware, fcio->fcio_ilen); 8939 8940 if (ret != FC_SUCCESS) { 8941 fcio->fcio_errno = ret; 8942 rval = EIO; 8943 if (fp_fcio_copyout(fcio, data, mode)) { 8944 rval = EFAULT; 8945 } 8946 } 8947 break; 8948 } 8949 8950 case FCIO_DOWNLOAD_FCODE: { 8951 caddr_t fcode; 8952 fc_fca_pm_t pm; 8953 8954 if (fcio->fcio_ilen <= 0 || 8955 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8956 rval = EINVAL; 8957 break; 8958 } 8959 8960 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8961 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8962 fcio->fcio_ilen, mode)) { 8963 rval = EFAULT; 8964 kmem_free(fcode, fcio->fcio_ilen); 8965 break; 8966 } 8967 8968 bzero((caddr_t)&pm, sizeof (pm)); 8969 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8970 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8971 pm.pm_data_len = fcio->fcio_ilen; 8972 pm.pm_data_buf = fcode; 8973 8974 ret = port->fp_fca_tran->fca_port_manage( 8975 port->fp_fca_handle, &pm); 8976 8977 kmem_free(fcode, fcio->fcio_ilen); 8978 8979 if (ret != FC_SUCCESS) { 8980 fcio->fcio_errno = ret; 8981 rval = EIO; 8982 if (fp_fcio_copyout(fcio, data, mode)) { 8983 rval = EFAULT; 8984 } 8985 } 8986 break; 8987 } 8988 8989 case FCIO_FORCE_DUMP: 8990 ret = port->fp_fca_tran->fca_reset( 8991 port->fp_fca_handle, FC_FCA_CORE); 8992 8993 if (ret != FC_SUCCESS) { 8994 fcio->fcio_errno = ret; 8995 rval = EIO; 8996 if (fp_fcio_copyout(fcio, data, mode)) { 8997 rval = EFAULT; 8998 } 8999 } 9000 break; 9001 9002 case FCIO_GET_DUMP: { 9003 caddr_t dump; 9004 uint32_t dump_size; 9005 fc_fca_pm_t pm; 9006 9007 if (fcio->fcio_xfer != FCIO_XFER_READ) { 9008 rval = EINVAL; 9009 break; 9010 } 9011 bzero((caddr_t)&pm, sizeof (pm)); 9012 9013 pm.pm_cmd_flags = FC_FCA_PM_READ; 9014 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 9015 pm.pm_data_len = sizeof (dump_size); 9016 pm.pm_data_buf = (caddr_t)&dump_size; 9017 9018 ret = port->fp_fca_tran->fca_port_manage( 9019 port->fp_fca_handle, &pm); 9020 9021 if (ret != FC_SUCCESS) { 9022 fcio->fcio_errno = ret; 9023 rval = EIO; 9024 if (fp_fcio_copyout(fcio, data, mode)) { 9025 rval = EFAULT; 9026 } 9027 break; 9028 } 9029 if (fcio->fcio_olen != dump_size) { 9030 fcio->fcio_errno = FC_NOMEM; 9031 rval = EINVAL; 9032 if (fp_fcio_copyout(fcio, data, mode)) { 9033 rval = EFAULT; 9034 } 9035 break; 9036 } 9037 9038 dump = kmem_zalloc(dump_size, KM_SLEEP); 9039 9040 bzero((caddr_t)&pm, sizeof (pm)); 9041 pm.pm_cmd_flags = FC_FCA_PM_READ; 9042 pm.pm_cmd_code = FC_PORT_GET_DUMP; 9043 pm.pm_data_len = dump_size; 9044 pm.pm_data_buf = dump; 9045 9046 ret = port->fp_fca_tran->fca_port_manage( 9047 port->fp_fca_handle, &pm); 9048 9049 if (ret == FC_SUCCESS) { 9050 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 9051 dump_size, mode) == 0) { 9052 if (fp_fcio_copyout(fcio, data, mode)) { 9053 rval = EFAULT; 9054 } 9055 } else { 9056 rval = EFAULT; 9057 } 9058 } else { 9059 fcio->fcio_errno = ret; 9060 rval = EIO; 9061 if (fp_fcio_copyout(fcio, data, mode)) { 9062 rval = EFAULT; 9063 } 9064 } 9065 kmem_free(dump, dump_size); 9066 break; 9067 } 9068 9069 case FCIO_GET_TOPOLOGY: { 9070 uint32_t user_topology; 9071 9072 if (fcio->fcio_xfer != FCIO_XFER_READ || 9073 fcio->fcio_olen != sizeof (user_topology)) { 9074 rval = EINVAL; 9075 break; 9076 } 9077 9078 mutex_enter(&port->fp_mutex); 9079 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 9080 user_topology = FC_TOP_UNKNOWN; 9081 } else { 9082 user_topology = port->fp_topology; 9083 } 9084 mutex_exit(&port->fp_mutex); 9085 9086 if (ddi_copyout((void *)&user_topology, 9087 (void *)fcio->fcio_obuf, sizeof (user_topology), 9088 mode)) { 9089 rval = EFAULT; 9090 } 9091 break; 9092 } 9093 9094 case FCIO_RESET_LINK: { 9095 la_wwn_t pwwn; 9096 9097 /* 9098 * Look at the output buffer field; if this field has zero 9099 * bytes then attempt to reset the local link/loop. If the 9100 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 9101 * and if yes, determine the LFA and reset the remote LIP 9102 * by LINIT ELS. 9103 */ 9104 9105 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 9106 fcio->fcio_ilen != sizeof (pwwn)) { 9107 rval = EINVAL; 9108 break; 9109 } 9110 9111 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9112 sizeof (pwwn), mode)) { 9113 rval = EFAULT; 9114 break; 9115 } 9116 9117 mutex_enter(&port->fp_mutex); 9118 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 9119 mutex_exit(&port->fp_mutex); 9120 break; 9121 } 9122 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 9123 mutex_exit(&port->fp_mutex); 9124 9125 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 9126 if (job == NULL) { 9127 rval = ENOMEM; 9128 break; 9129 } 9130 job->job_counter = 1; 9131 job->job_private = (void *)&pwwn; 9132 9133 fctl_enque_job(port, job); 9134 fctl_jobwait(job); 9135 9136 mutex_enter(&port->fp_mutex); 9137 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 9138 mutex_exit(&port->fp_mutex); 9139 9140 if (job->job_result != FC_SUCCESS) { 9141 fcio->fcio_errno = job->job_result; 9142 rval = EIO; 9143 if (fp_fcio_copyout(fcio, data, mode)) { 9144 rval = EFAULT; 9145 } 9146 } 9147 fctl_dealloc_job(job); 9148 break; 9149 } 9150 9151 case FCIO_RESET_HARD: 9152 ret = port->fp_fca_tran->fca_reset( 9153 port->fp_fca_handle, FC_FCA_RESET); 9154 if (ret != FC_SUCCESS) { 9155 fcio->fcio_errno = ret; 9156 rval = EIO; 9157 if (fp_fcio_copyout(fcio, data, mode)) { 9158 rval = EFAULT; 9159 } 9160 } 9161 break; 9162 9163 case FCIO_RESET_HARD_CORE: 9164 ret = port->fp_fca_tran->fca_reset( 9165 port->fp_fca_handle, FC_FCA_RESET_CORE); 9166 if (ret != FC_SUCCESS) { 9167 rval = EIO; 9168 fcio->fcio_errno = ret; 9169 if (fp_fcio_copyout(fcio, data, mode)) { 9170 rval = EFAULT; 9171 } 9172 } 9173 break; 9174 9175 case FCIO_DIAG: { 9176 fc_fca_pm_t pm; 9177 9178 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 9179 9180 /* Validate user buffer from ioctl call. */ 9181 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 9182 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 9183 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 9184 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 9185 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 9186 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 9187 rval = EFAULT; 9188 break; 9189 } 9190 9191 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 9192 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 9193 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 9194 fcio->fcio_ilen, mode)) { 9195 rval = EFAULT; 9196 goto fp_fcio_diag_cleanup; 9197 } 9198 } 9199 9200 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 9201 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 9202 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 9203 fcio->fcio_alen, mode)) { 9204 rval = EFAULT; 9205 goto fp_fcio_diag_cleanup; 9206 } 9207 } 9208 9209 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 9210 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 9211 } 9212 9213 pm.pm_cmd_code = FC_PORT_DIAG; 9214 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 9215 9216 ret = port->fp_fca_tran->fca_port_manage( 9217 port->fp_fca_handle, &pm); 9218 9219 if (ret != FC_SUCCESS) { 9220 if (ret == FC_INVALID_REQUEST) { 9221 rval = ENOTTY; 9222 } else { 9223 rval = EIO; 9224 } 9225 9226 fcio->fcio_errno = ret; 9227 if (fp_fcio_copyout(fcio, data, mode)) { 9228 rval = EFAULT; 9229 } 9230 goto fp_fcio_diag_cleanup; 9231 } 9232 9233 /* 9234 * pm_stat_len will contain the number of status bytes 9235 * an FCA driver requires to return the complete status 9236 * of the requested diag operation. If the user buffer 9237 * is not large enough to hold the entire status, We 9238 * copy only the portion of data the fits in the buffer and 9239 * return a ENOMEM to the user application. 9240 */ 9241 if (pm.pm_stat_len > fcio->fcio_olen) { 9242 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9243 "fp:FCIO_DIAG:status buffer too small\n"); 9244 9245 rval = ENOMEM; 9246 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9247 fcio->fcio_olen, mode)) { 9248 rval = EFAULT; 9249 goto fp_fcio_diag_cleanup; 9250 } 9251 } else { 9252 /* 9253 * Copy only data pm_stat_len bytes of data 9254 */ 9255 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9256 pm.pm_stat_len, mode)) { 9257 rval = EFAULT; 9258 goto fp_fcio_diag_cleanup; 9259 } 9260 } 9261 9262 if (fp_fcio_copyout(fcio, data, mode)) { 9263 rval = EFAULT; 9264 } 9265 9266 fp_fcio_diag_cleanup: 9267 if (pm.pm_cmd_buf != NULL) { 9268 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9269 } 9270 if (pm.pm_data_buf != NULL) { 9271 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9272 } 9273 if (pm.pm_stat_buf != NULL) { 9274 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9275 } 9276 9277 break; 9278 } 9279 9280 case FCIO_GET_NODE_ID: { 9281 /* validate parameters */ 9282 if (fcio->fcio_xfer != FCIO_XFER_READ || 9283 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9284 rval = EINVAL; 9285 break; 9286 } 9287 9288 rval = fp_get_rnid(port, data, mode, fcio); 9289 9290 /* ioctl handling is over */ 9291 break; 9292 } 9293 9294 case FCIO_SEND_NODE_ID: { 9295 la_wwn_t pwwn; 9296 9297 /* validate parameters */ 9298 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9299 fcio->fcio_xfer != FCIO_XFER_READ) { 9300 rval = EINVAL; 9301 break; 9302 } 9303 9304 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9305 sizeof (la_wwn_t), mode)) { 9306 rval = EFAULT; 9307 break; 9308 } 9309 9310 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9311 9312 /* ioctl handling is over */ 9313 break; 9314 } 9315 9316 case FCIO_SET_NODE_ID: { 9317 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9318 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9319 rval = EINVAL; 9320 break; 9321 } 9322 9323 rval = fp_set_rnid(port, data, mode, fcio); 9324 break; 9325 } 9326 9327 case FCIO_LINK_STATUS: { 9328 fc_portid_t rls_req; 9329 fc_rls_acc_t *rls_acc; 9330 fc_fca_pm_t pm; 9331 uint32_t dest, src_id; 9332 fp_cmd_t *cmd; 9333 fc_remote_port_t *pd; 9334 uchar_t pd_flags; 9335 9336 /* validate parameters */ 9337 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9338 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9339 fcio->fcio_xfer != FCIO_XFER_RW) { 9340 rval = EINVAL; 9341 break; 9342 } 9343 9344 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9345 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9346 rval = EINVAL; 9347 break; 9348 } 9349 9350 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9351 sizeof (fc_portid_t), mode)) { 9352 rval = EFAULT; 9353 break; 9354 } 9355 9356 9357 /* Determine the destination of the RLS frame */ 9358 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9359 dest = FS_FABRIC_F_PORT; 9360 } else { 9361 dest = rls_req.port_id; 9362 } 9363 9364 mutex_enter(&port->fp_mutex); 9365 src_id = port->fp_port_id.port_id; 9366 mutex_exit(&port->fp_mutex); 9367 9368 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9369 if (dest == 0 || dest == src_id) { 9370 9371 /* Allocate memory for link error status block */ 9372 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9373 ASSERT(rls_acc != NULL); 9374 9375 /* Prepare the port management structure */ 9376 bzero((caddr_t)&pm, sizeof (pm)); 9377 9378 pm.pm_cmd_flags = FC_FCA_PM_READ; 9379 pm.pm_cmd_code = FC_PORT_RLS; 9380 pm.pm_data_len = sizeof (*rls_acc); 9381 pm.pm_data_buf = (caddr_t)rls_acc; 9382 9383 /* Get the adapter's link error status block */ 9384 ret = port->fp_fca_tran->fca_port_manage( 9385 port->fp_fca_handle, &pm); 9386 9387 if (ret == FC_SUCCESS) { 9388 /* xfer link status block to userland */ 9389 if (ddi_copyout((void *)rls_acc, 9390 (void *)fcio->fcio_obuf, 9391 sizeof (*rls_acc), mode) == 0) { 9392 if (fp_fcio_copyout(fcio, data, 9393 mode)) { 9394 rval = EFAULT; 9395 } 9396 } else { 9397 rval = EFAULT; 9398 } 9399 } else { 9400 rval = EIO; 9401 fcio->fcio_errno = ret; 9402 if (fp_fcio_copyout(fcio, data, mode)) { 9403 rval = EFAULT; 9404 } 9405 } 9406 9407 kmem_free(rls_acc, sizeof (*rls_acc)); 9408 9409 /* ioctl handling is over */ 9410 break; 9411 } 9412 9413 /* 9414 * Send RLS to the destination port. 9415 * Having RLS frame destination is as FPORT is not yet 9416 * supported and will be implemented in future, if needed. 9417 * Following call to get "pd" will fail if dest is FPORT 9418 */ 9419 pd = fctl_hold_remote_port_by_did(port, dest); 9420 if (pd == NULL) { 9421 fcio->fcio_errno = FC_BADOBJECT; 9422 rval = ENXIO; 9423 if (fp_fcio_copyout(fcio, data, mode)) { 9424 rval = EFAULT; 9425 } 9426 break; 9427 } 9428 9429 mutex_enter(&pd->pd_mutex); 9430 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9431 mutex_exit(&pd->pd_mutex); 9432 fctl_release_remote_port(pd); 9433 9434 fcio->fcio_errno = FC_LOGINREQ; 9435 rval = EINVAL; 9436 if (fp_fcio_copyout(fcio, data, mode)) { 9437 rval = EFAULT; 9438 } 9439 break; 9440 } 9441 ASSERT(pd->pd_login_count >= 1); 9442 mutex_exit(&pd->pd_mutex); 9443 9444 /* 9445 * Allocate job structure and set job_code as DUMMY, 9446 * because we will not go through the job thread. 9447 * Instead fp_sendcmd() is called directly here. 9448 */ 9449 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9450 NULL, NULL, KM_SLEEP); 9451 ASSERT(job != NULL); 9452 9453 job->job_counter = 1; 9454 9455 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9456 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9457 if (cmd == NULL) { 9458 fcio->fcio_errno = FC_NOMEM; 9459 rval = ENOMEM; 9460 9461 fctl_release_remote_port(pd); 9462 9463 fctl_dealloc_job(job); 9464 if (fp_fcio_copyout(fcio, data, mode)) { 9465 rval = EFAULT; 9466 } 9467 break; 9468 } 9469 9470 /* Allocate memory for link error status block */ 9471 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9472 9473 mutex_enter(&port->fp_mutex); 9474 mutex_enter(&pd->pd_mutex); 9475 9476 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9477 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9478 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9479 cmd->cmd_retry_count = 1; 9480 cmd->cmd_ulp_pkt = NULL; 9481 9482 fp_rls_init(cmd, job); 9483 9484 job->job_private = (void *)rls_acc; 9485 9486 pd_flags = pd->pd_flags; 9487 pd->pd_flags = PD_ELS_IN_PROGRESS; 9488 9489 mutex_exit(&pd->pd_mutex); 9490 mutex_exit(&port->fp_mutex); 9491 9492 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9493 fctl_jobwait(job); 9494 9495 fcio->fcio_errno = job->job_result; 9496 if (job->job_result == FC_SUCCESS) { 9497 ASSERT(pd != NULL); 9498 /* 9499 * link error status block is now available. 9500 * Copy it to userland 9501 */ 9502 ASSERT(job->job_private == (void *)rls_acc); 9503 if (ddi_copyout((void *)rls_acc, 9504 (void *)fcio->fcio_obuf, 9505 sizeof (*rls_acc), mode) == 0) { 9506 if (fp_fcio_copyout(fcio, data, 9507 mode)) { 9508 rval = EFAULT; 9509 } 9510 } else { 9511 rval = EFAULT; 9512 } 9513 } else { 9514 rval = EIO; 9515 } 9516 } else { 9517 rval = EIO; 9518 fp_free_pkt(cmd); 9519 } 9520 9521 if (rval) { 9522 mutex_enter(&port->fp_mutex); 9523 mutex_enter(&pd->pd_mutex); 9524 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9525 pd->pd_flags = pd_flags; 9526 } 9527 mutex_exit(&pd->pd_mutex); 9528 mutex_exit(&port->fp_mutex); 9529 } 9530 9531 fctl_release_remote_port(pd); 9532 fctl_dealloc_job(job); 9533 kmem_free(rls_acc, sizeof (*rls_acc)); 9534 9535 if (fp_fcio_copyout(fcio, data, mode)) { 9536 rval = EFAULT; 9537 } 9538 break; 9539 } 9540 9541 case FCIO_NS: { 9542 fc_ns_cmd_t *ns_req; 9543 fc_ns_cmd32_t *ns_req32; 9544 fctl_ns_req_t *ns_cmd; 9545 9546 if (use32 == B_TRUE) { 9547 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9548 rval = EINVAL; 9549 break; 9550 } 9551 9552 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9553 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9554 9555 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9556 sizeof (*ns_req32), mode)) { 9557 rval = EFAULT; 9558 kmem_free(ns_req, sizeof (*ns_req)); 9559 kmem_free(ns_req32, sizeof (*ns_req32)); 9560 break; 9561 } 9562 9563 ns_req->ns_flags = ns_req32->ns_flags; 9564 ns_req->ns_cmd = ns_req32->ns_cmd; 9565 ns_req->ns_req_len = ns_req32->ns_req_len; 9566 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9567 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9568 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9569 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9570 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9571 9572 kmem_free(ns_req32, sizeof (*ns_req32)); 9573 } else { 9574 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9575 rval = EINVAL; 9576 break; 9577 } 9578 9579 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9580 9581 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9582 sizeof (fc_ns_cmd_t), mode)) { 9583 rval = EFAULT; 9584 kmem_free(ns_req, sizeof (*ns_req)); 9585 break; 9586 } 9587 } 9588 9589 if (ns_req->ns_req_len <= 0) { 9590 rval = EINVAL; 9591 kmem_free(ns_req, sizeof (*ns_req)); 9592 break; 9593 } 9594 9595 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9596 ASSERT(job != NULL); 9597 9598 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9599 ns_req->ns_resp_len, ns_req->ns_resp_len, 9600 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9601 ASSERT(ns_cmd != NULL); 9602 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9603 9604 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9605 ns_cmd->ns_gan_max = 1; 9606 ns_cmd->ns_gan_index = 0; 9607 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9608 } 9609 9610 if (ddi_copyin(ns_req->ns_req_payload, 9611 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9612 rval = EFAULT; 9613 fctl_free_ns_cmd(ns_cmd); 9614 fctl_dealloc_job(job); 9615 kmem_free(ns_req, sizeof (*ns_req)); 9616 break; 9617 } 9618 9619 job->job_private = (void *)ns_cmd; 9620 fctl_enque_job(port, job); 9621 fctl_jobwait(job); 9622 rval = job->job_result; 9623 9624 if (rval == FC_SUCCESS) { 9625 if (ns_req->ns_resp_len) { 9626 if (ddi_copyout(ns_cmd->ns_data_buf, 9627 ns_req->ns_resp_payload, 9628 ns_cmd->ns_data_len, mode)) { 9629 rval = EFAULT; 9630 fctl_free_ns_cmd(ns_cmd); 9631 fctl_dealloc_job(job); 9632 kmem_free(ns_req, sizeof (*ns_req)); 9633 break; 9634 } 9635 } 9636 } else { 9637 rval = EIO; 9638 } 9639 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9640 fctl_free_ns_cmd(ns_cmd); 9641 fctl_dealloc_job(job); 9642 kmem_free(ns_req, sizeof (*ns_req)); 9643 9644 if (fp_fcio_copyout(fcio, data, mode)) { 9645 rval = EFAULT; 9646 } 9647 break; 9648 } 9649 9650 default: 9651 rval = ENOTTY; 9652 break; 9653 } 9654 9655 /* 9656 * If set, reset the EXCL busy bit to 9657 * receive other exclusive access commands 9658 */ 9659 mutex_enter(&port->fp_mutex); 9660 if (port->fp_flag & FP_EXCL_BUSY) { 9661 port->fp_flag &= ~FP_EXCL_BUSY; 9662 } 9663 mutex_exit(&port->fp_mutex); 9664 9665 return (rval); 9666 } 9667 9668 9669 /* 9670 * This function assumes that the response length 9671 * is same regardless of data model (LP32 or LP64) 9672 * which is true for all the ioctls currently 9673 * supported. 9674 */ 9675 static int 9676 fp_copyout(void *from, void *to, size_t len, int mode) 9677 { 9678 return (ddi_copyout(from, to, len, mode)); 9679 } 9680 9681 /* 9682 * This function does the set rnid 9683 */ 9684 static int 9685 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9686 { 9687 int rval = 0; 9688 fc_rnid_t *rnid; 9689 fc_fca_pm_t pm; 9690 9691 /* Allocate memory for node id block */ 9692 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9693 9694 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9695 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9696 kmem_free(rnid, sizeof (fc_rnid_t)); 9697 return (EFAULT); 9698 } 9699 9700 /* Prepare the port management structure */ 9701 bzero((caddr_t)&pm, sizeof (pm)); 9702 9703 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9704 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9705 pm.pm_data_len = sizeof (*rnid); 9706 pm.pm_data_buf = (caddr_t)rnid; 9707 9708 /* Get the adapter's node data */ 9709 rval = port->fp_fca_tran->fca_port_manage( 9710 port->fp_fca_handle, &pm); 9711 9712 if (rval != FC_SUCCESS) { 9713 fcio->fcio_errno = rval; 9714 rval = EIO; 9715 if (fp_fcio_copyout(fcio, data, mode)) { 9716 rval = EFAULT; 9717 } 9718 } else { 9719 mutex_enter(&port->fp_mutex); 9720 /* copy to the port structure */ 9721 bcopy(rnid, &port->fp_rnid_params, 9722 sizeof (port->fp_rnid_params)); 9723 mutex_exit(&port->fp_mutex); 9724 } 9725 9726 kmem_free(rnid, sizeof (fc_rnid_t)); 9727 9728 if (rval != FC_SUCCESS) { 9729 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9730 } 9731 9732 return (rval); 9733 } 9734 9735 /* 9736 * This function does the local pwwn get rnid 9737 */ 9738 static int 9739 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9740 { 9741 fc_rnid_t *rnid; 9742 fc_fca_pm_t pm; 9743 int rval = 0; 9744 uint32_t ret; 9745 9746 /* Allocate memory for rnid data block */ 9747 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9748 9749 mutex_enter(&port->fp_mutex); 9750 if (port->fp_rnid_init == 1) { 9751 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9752 mutex_exit(&port->fp_mutex); 9753 /* xfer node info to userland */ 9754 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9755 sizeof (*rnid), mode) == 0) { 9756 if (fp_fcio_copyout(fcio, data, mode)) { 9757 rval = EFAULT; 9758 } 9759 } else { 9760 rval = EFAULT; 9761 } 9762 9763 kmem_free(rnid, sizeof (fc_rnid_t)); 9764 9765 if (rval != FC_SUCCESS) { 9766 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9767 rval); 9768 } 9769 9770 return (rval); 9771 } 9772 mutex_exit(&port->fp_mutex); 9773 9774 /* Prepare the port management structure */ 9775 bzero((caddr_t)&pm, sizeof (pm)); 9776 9777 pm.pm_cmd_flags = FC_FCA_PM_READ; 9778 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9779 pm.pm_data_len = sizeof (fc_rnid_t); 9780 pm.pm_data_buf = (caddr_t)rnid; 9781 9782 /* Get the adapter's node data */ 9783 ret = port->fp_fca_tran->fca_port_manage( 9784 port->fp_fca_handle, 9785 &pm); 9786 9787 if (ret == FC_SUCCESS) { 9788 /* initialize in the port_info */ 9789 mutex_enter(&port->fp_mutex); 9790 port->fp_rnid_init = 1; 9791 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9792 mutex_exit(&port->fp_mutex); 9793 9794 /* xfer node info to userland */ 9795 if (ddi_copyout((void *)rnid, 9796 (void *)fcio->fcio_obuf, 9797 sizeof (*rnid), mode) == 0) { 9798 if (fp_fcio_copyout(fcio, data, 9799 mode)) { 9800 rval = EFAULT; 9801 } 9802 } else { 9803 rval = EFAULT; 9804 } 9805 } else { 9806 rval = EIO; 9807 fcio->fcio_errno = ret; 9808 if (fp_fcio_copyout(fcio, data, mode)) { 9809 rval = EFAULT; 9810 } 9811 } 9812 9813 kmem_free(rnid, sizeof (fc_rnid_t)); 9814 9815 if (rval != FC_SUCCESS) { 9816 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9817 } 9818 9819 return (rval); 9820 } 9821 9822 static int 9823 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9824 la_wwn_t *pwwn) 9825 { 9826 int rval = 0; 9827 fc_remote_port_t *pd; 9828 fp_cmd_t *cmd; 9829 job_request_t *job; 9830 la_els_rnid_acc_t *rnid_acc; 9831 9832 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9833 if (pd == NULL) { 9834 /* 9835 * We can safely assume that the destination port 9836 * is logged in. Either the user land will explicitly 9837 * login before issuing RNID ioctl or the device would 9838 * have been configured, meaning already logged in. 9839 */ 9840 9841 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9842 9843 return (ENXIO); 9844 } 9845 /* 9846 * Allocate job structure and set job_code as DUMMY, 9847 * because we will not go thorugh the job thread. 9848 * Instead fp_sendcmd() is called directly here. 9849 */ 9850 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9851 NULL, NULL, KM_SLEEP); 9852 9853 ASSERT(job != NULL); 9854 9855 job->job_counter = 1; 9856 9857 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9858 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9859 if (cmd == NULL) { 9860 fcio->fcio_errno = FC_NOMEM; 9861 rval = ENOMEM; 9862 9863 fctl_dealloc_job(job); 9864 if (fp_fcio_copyout(fcio, data, mode)) { 9865 rval = EFAULT; 9866 } 9867 9868 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9869 9870 return (rval); 9871 } 9872 9873 /* Allocate memory for node id accept block */ 9874 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9875 9876 mutex_enter(&port->fp_mutex); 9877 mutex_enter(&pd->pd_mutex); 9878 9879 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9880 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9881 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9882 cmd->cmd_retry_count = 1; 9883 cmd->cmd_ulp_pkt = NULL; 9884 9885 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9886 9887 job->job_private = (void *)rnid_acc; 9888 9889 pd->pd_flags = PD_ELS_IN_PROGRESS; 9890 9891 mutex_exit(&pd->pd_mutex); 9892 mutex_exit(&port->fp_mutex); 9893 9894 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9895 fctl_jobwait(job); 9896 fcio->fcio_errno = job->job_result; 9897 if (job->job_result == FC_SUCCESS) { 9898 int rnid_cnt; 9899 ASSERT(pd != NULL); 9900 /* 9901 * node id block is now available. 9902 * Copy it to userland 9903 */ 9904 ASSERT(job->job_private == (void *)rnid_acc); 9905 9906 /* get the response length */ 9907 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9908 rnid_acc->hdr.cmn_len + 9909 rnid_acc->hdr.specific_len; 9910 9911 if (fcio->fcio_olen < rnid_cnt) { 9912 rval = EINVAL; 9913 } else if (ddi_copyout((void *)rnid_acc, 9914 (void *)fcio->fcio_obuf, 9915 rnid_cnt, mode) == 0) { 9916 if (fp_fcio_copyout(fcio, data, 9917 mode)) { 9918 rval = EFAULT; 9919 } 9920 } else { 9921 rval = EFAULT; 9922 } 9923 } else { 9924 rval = EIO; 9925 } 9926 } else { 9927 rval = EIO; 9928 if (pd) { 9929 mutex_enter(&pd->pd_mutex); 9930 pd->pd_flags = PD_IDLE; 9931 mutex_exit(&pd->pd_mutex); 9932 } 9933 fp_free_pkt(cmd); 9934 } 9935 9936 fctl_dealloc_job(job); 9937 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9938 9939 if (fp_fcio_copyout(fcio, data, mode)) { 9940 rval = EFAULT; 9941 } 9942 9943 if (rval != FC_SUCCESS) { 9944 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9945 } 9946 9947 return (rval); 9948 } 9949 9950 /* 9951 * Copy out to userland 9952 */ 9953 static int 9954 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9955 { 9956 int rval; 9957 9958 #ifdef _MULTI_DATAMODEL 9959 switch (ddi_model_convert_from(mode & FMODELS)) { 9960 case DDI_MODEL_ILP32: { 9961 struct fcio32 fcio32; 9962 9963 fcio32.fcio_xfer = fcio->fcio_xfer; 9964 fcio32.fcio_cmd = fcio->fcio_cmd; 9965 fcio32.fcio_flags = fcio->fcio_flags; 9966 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9967 fcio32.fcio_ilen = fcio->fcio_ilen; 9968 fcio32.fcio_ibuf = 9969 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9970 fcio32.fcio_olen = fcio->fcio_olen; 9971 fcio32.fcio_obuf = 9972 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9973 fcio32.fcio_alen = fcio->fcio_alen; 9974 fcio32.fcio_abuf = 9975 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9976 fcio32.fcio_errno = fcio->fcio_errno; 9977 9978 rval = ddi_copyout((void *)&fcio32, (void *)data, 9979 sizeof (struct fcio32), mode); 9980 break; 9981 } 9982 case DDI_MODEL_NONE: 9983 rval = ddi_copyout((void *)fcio, (void *)data, 9984 sizeof (fcio_t), mode); 9985 break; 9986 } 9987 #else 9988 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 9989 #endif 9990 9991 return (rval); 9992 } 9993 9994 9995 static void 9996 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 9997 { 9998 uint32_t listlen; 9999 fc_portmap_t *changelist; 10000 10001 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10002 ASSERT(port->fp_topology == FC_TOP_PT_PT); 10003 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10004 10005 listlen = 0; 10006 changelist = NULL; 10007 10008 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10009 if (port->fp_statec_busy > 1) { 10010 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10011 } 10012 } 10013 mutex_exit(&port->fp_mutex); 10014 10015 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10016 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10017 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10018 listlen, listlen, KM_SLEEP); 10019 10020 mutex_enter(&port->fp_mutex); 10021 } else { 10022 ASSERT(changelist == NULL && listlen == 0); 10023 mutex_enter(&port->fp_mutex); 10024 if (--port->fp_statec_busy == 0) { 10025 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10026 } 10027 } 10028 } 10029 10030 static int 10031 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10032 { 10033 int rval; 10034 int count; 10035 int index; 10036 int num_devices; 10037 fc_remote_node_t *node; 10038 fc_port_dev_t *devlist; 10039 struct pwwn_hash *head; 10040 fc_remote_port_t *pd; 10041 10042 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10043 10044 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10045 10046 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 10047 10048 for (count = index = 0; index < pwwn_table_size; index++) { 10049 head = &port->fp_pwwn_table[index]; 10050 pd = head->pwwn_head; 10051 while (pd != NULL) { 10052 mutex_enter(&pd->pd_mutex); 10053 if (pd->pd_state == PORT_DEVICE_INVALID) { 10054 mutex_exit(&pd->pd_mutex); 10055 pd = pd->pd_wwn_hnext; 10056 continue; 10057 } 10058 10059 devlist[count].dev_state = pd->pd_state; 10060 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10061 devlist[count].dev_did = pd->pd_port_id; 10062 devlist[count].dev_did.priv_lilp_posit = 10063 (uint8_t)(index & 0xff); 10064 bcopy((caddr_t)pd->pd_fc4types, 10065 (caddr_t)devlist[count].dev_type, 10066 sizeof (pd->pd_fc4types)); 10067 10068 bcopy((caddr_t)&pd->pd_port_name, 10069 (caddr_t)&devlist[count].dev_pwwn, 10070 sizeof (la_wwn_t)); 10071 10072 node = pd->pd_remote_nodep; 10073 mutex_exit(&pd->pd_mutex); 10074 10075 if (node) { 10076 mutex_enter(&node->fd_mutex); 10077 bcopy((caddr_t)&node->fd_node_name, 10078 (caddr_t)&devlist[count].dev_nwwn, 10079 sizeof (la_wwn_t)); 10080 mutex_exit(&node->fd_mutex); 10081 } 10082 count++; 10083 if (count >= num_devices) { 10084 goto found; 10085 } 10086 } 10087 } 10088 found: 10089 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10090 sizeof (count), mode)) { 10091 rval = FC_FAILURE; 10092 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10093 sizeof (fc_port_dev_t) * num_devices, mode)) { 10094 rval = FC_FAILURE; 10095 } else { 10096 rval = FC_SUCCESS; 10097 } 10098 10099 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 10100 10101 return (rval); 10102 } 10103 10104 10105 /* 10106 * Handle Fabric ONLINE 10107 */ 10108 static void 10109 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 10110 { 10111 int index; 10112 int rval; 10113 int dbg_count; 10114 int count = 0; 10115 char ww_name[17]; 10116 uint32_t d_id; 10117 uint32_t listlen; 10118 fctl_ns_req_t *ns_cmd; 10119 struct pwwn_hash *head; 10120 fc_remote_port_t *pd; 10121 fc_remote_port_t *npd; 10122 fc_portmap_t *changelist; 10123 10124 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10125 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 10126 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10127 10128 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 10129 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 10130 0, KM_SLEEP); 10131 10132 ASSERT(ns_cmd != NULL); 10133 10134 ns_cmd->ns_cmd_code = NS_GID_PN; 10135 10136 /* 10137 * Check if orphans are showing up now 10138 */ 10139 if (port->fp_orphan_count) { 10140 fc_orphan_t *orp; 10141 fc_orphan_t *norp = NULL; 10142 fc_orphan_t *prev = NULL; 10143 10144 for (orp = port->fp_orphan_list; orp; orp = norp) { 10145 norp = orp->orp_next; 10146 mutex_exit(&port->fp_mutex); 10147 orp->orp_nscan++; 10148 10149 job->job_counter = 1; 10150 job->job_result = FC_SUCCESS; 10151 10152 ((ns_req_gid_pn_t *) 10153 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 10154 ((ns_resp_gid_pn_t *) 10155 ns_cmd->ns_data_buf)->pid.port_id = 0; 10156 ((ns_resp_gid_pn_t *) 10157 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 10158 10159 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10160 if (rval == FC_SUCCESS) { 10161 d_id = 10162 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10163 pd = fp_create_remote_port_by_ns(port, 10164 d_id, KM_SLEEP); 10165 10166 if (pd != NULL) { 10167 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10168 10169 fp_printf(port, CE_WARN, FP_LOG_ONLY, 10170 0, NULL, "N_x Port with D_ID=%x," 10171 " PWWN=%s reappeared in fabric", 10172 d_id, ww_name); 10173 10174 mutex_enter(&port->fp_mutex); 10175 if (prev) { 10176 prev->orp_next = orp->orp_next; 10177 } else { 10178 ASSERT(orp == 10179 port->fp_orphan_list); 10180 port->fp_orphan_list = 10181 orp->orp_next; 10182 } 10183 port->fp_orphan_count--; 10184 mutex_exit(&port->fp_mutex); 10185 kmem_free(orp, sizeof (*orp)); 10186 count++; 10187 10188 mutex_enter(&pd->pd_mutex); 10189 pd->pd_flags = PD_ELS_MARK; 10190 10191 mutex_exit(&pd->pd_mutex); 10192 } else { 10193 prev = orp; 10194 } 10195 } else { 10196 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 10197 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10198 10199 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 10200 NULL, 10201 " Port WWN %s removed from orphan" 10202 " list after %d scans", ww_name, 10203 orp->orp_nscan); 10204 10205 mutex_enter(&port->fp_mutex); 10206 if (prev) { 10207 prev->orp_next = orp->orp_next; 10208 } else { 10209 ASSERT(orp == 10210 port->fp_orphan_list); 10211 port->fp_orphan_list = 10212 orp->orp_next; 10213 } 10214 port->fp_orphan_count--; 10215 mutex_exit(&port->fp_mutex); 10216 10217 kmem_free(orp, sizeof (*orp)); 10218 } else { 10219 prev = orp; 10220 } 10221 } 10222 mutex_enter(&port->fp_mutex); 10223 } 10224 } 10225 10226 /* 10227 * Walk the Port WWN hash table, reestablish LOGIN 10228 * if a LOGIN is already performed on a particular 10229 * device; Any failure to LOGIN should mark the 10230 * port device OLD. 10231 */ 10232 for (index = 0; index < pwwn_table_size; index++) { 10233 head = &port->fp_pwwn_table[index]; 10234 npd = head->pwwn_head; 10235 10236 while ((pd = npd) != NULL) { 10237 la_wwn_t *pwwn; 10238 10239 npd = pd->pd_wwn_hnext; 10240 10241 /* 10242 * Don't count in the port devices that are new 10243 * unless the total number of devices visible 10244 * through this port is less than FP_MAX_DEVICES 10245 */ 10246 mutex_enter(&pd->pd_mutex); 10247 if (port->fp_dev_count >= FP_MAX_DEVICES || 10248 (port->fp_options & FP_TARGET_MODE)) { 10249 if (pd->pd_type == PORT_DEVICE_NEW || 10250 pd->pd_flags == PD_ELS_MARK || 10251 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10252 mutex_exit(&pd->pd_mutex); 10253 continue; 10254 } 10255 } else { 10256 if (pd->pd_flags == PD_ELS_MARK || 10257 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10258 mutex_exit(&pd->pd_mutex); 10259 continue; 10260 } 10261 pd->pd_type = PORT_DEVICE_OLD; 10262 } 10263 count++; 10264 10265 /* 10266 * Consult with the name server about D_ID changes 10267 */ 10268 job->job_counter = 1; 10269 job->job_result = FC_SUCCESS; 10270 10271 ((ns_req_gid_pn_t *) 10272 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10273 ((ns_resp_gid_pn_t *) 10274 ns_cmd->ns_data_buf)->pid.port_id = 0; 10275 10276 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10277 pid.priv_lilp_posit = 0; 10278 10279 pwwn = &pd->pd_port_name; 10280 pd->pd_flags = PD_ELS_MARK; 10281 10282 mutex_exit(&pd->pd_mutex); 10283 mutex_exit(&port->fp_mutex); 10284 10285 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10286 if (rval != FC_SUCCESS) { 10287 fc_wwn_to_str(pwwn, ww_name); 10288 10289 mutex_enter(&pd->pd_mutex); 10290 d_id = pd->pd_port_id.port_id; 10291 pd->pd_type = PORT_DEVICE_DELETE; 10292 mutex_exit(&pd->pd_mutex); 10293 10294 FP_TRACE(FP_NHEAD1(3, 0), 10295 "fp_fabric_online: PD " 10296 "disappeared; d_id=%x, PWWN=%s", 10297 d_id, ww_name); 10298 10299 FP_TRACE(FP_NHEAD2(9, 0), 10300 "N_x Port with D_ID=%x, PWWN=%s" 10301 " disappeared from fabric", d_id, 10302 ww_name); 10303 10304 mutex_enter(&port->fp_mutex); 10305 continue; 10306 } 10307 10308 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10309 10310 mutex_enter(&port->fp_mutex); 10311 mutex_enter(&pd->pd_mutex); 10312 if (d_id != pd->pd_port_id.port_id) { 10313 fctl_delist_did_table(port, pd); 10314 fc_wwn_to_str(pwwn, ww_name); 10315 10316 FP_TRACE(FP_NHEAD2(9, 0), 10317 "D_ID of a device with PWWN %s changed." 10318 " New D_ID = %x, OLD D_ID = %x", ww_name, 10319 d_id, pd->pd_port_id.port_id); 10320 10321 pd->pd_port_id.port_id = BE_32(d_id); 10322 pd->pd_type = PORT_DEVICE_CHANGED; 10323 fctl_enlist_did_table(port, pd); 10324 } 10325 mutex_exit(&pd->pd_mutex); 10326 10327 } 10328 } 10329 10330 if (ns_cmd) { 10331 fctl_free_ns_cmd(ns_cmd); 10332 } 10333 10334 listlen = 0; 10335 changelist = NULL; 10336 if (count) { 10337 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10338 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10339 mutex_exit(&port->fp_mutex); 10340 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10341 mutex_enter(&port->fp_mutex); 10342 } 10343 10344 dbg_count = 0; 10345 10346 job->job_counter = count; 10347 10348 for (index = 0; index < pwwn_table_size; index++) { 10349 head = &port->fp_pwwn_table[index]; 10350 npd = head->pwwn_head; 10351 10352 while ((pd = npd) != NULL) { 10353 npd = pd->pd_wwn_hnext; 10354 10355 mutex_enter(&pd->pd_mutex); 10356 if (pd->pd_flags != PD_ELS_MARK) { 10357 mutex_exit(&pd->pd_mutex); 10358 continue; 10359 } 10360 10361 dbg_count++; 10362 10363 /* 10364 * If it is already marked deletion, nothing 10365 * else to do. 10366 */ 10367 if (pd->pd_type == PORT_DEVICE_DELETE) { 10368 pd->pd_type = PORT_DEVICE_OLD; 10369 10370 mutex_exit(&pd->pd_mutex); 10371 mutex_exit(&port->fp_mutex); 10372 fp_jobdone(job); 10373 mutex_enter(&port->fp_mutex); 10374 10375 continue; 10376 } 10377 10378 /* 10379 * If it is freshly discovered out of 10380 * the orphan list, nothing else to do 10381 */ 10382 if (pd->pd_type == PORT_DEVICE_NEW) { 10383 pd->pd_flags = PD_IDLE; 10384 10385 mutex_exit(&pd->pd_mutex); 10386 mutex_exit(&port->fp_mutex); 10387 fp_jobdone(job); 10388 mutex_enter(&port->fp_mutex); 10389 10390 continue; 10391 } 10392 10393 pd->pd_flags = PD_IDLE; 10394 d_id = pd->pd_port_id.port_id; 10395 10396 /* 10397 * Explicitly mark all devices OLD; successful 10398 * PLOGI should reset this to either NO_CHANGE 10399 * or CHANGED. 10400 */ 10401 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10402 pd->pd_type = PORT_DEVICE_OLD; 10403 } 10404 10405 mutex_exit(&pd->pd_mutex); 10406 mutex_exit(&port->fp_mutex); 10407 10408 rval = fp_port_login(port, d_id, job, 10409 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10410 10411 if (rval != FC_SUCCESS) { 10412 fp_jobdone(job); 10413 } 10414 mutex_enter(&port->fp_mutex); 10415 } 10416 } 10417 mutex_exit(&port->fp_mutex); 10418 10419 ASSERT(dbg_count == count); 10420 fp_jobwait(job); 10421 10422 mutex_enter(&port->fp_mutex); 10423 10424 ASSERT(port->fp_statec_busy > 0); 10425 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10426 if (port->fp_statec_busy > 1) { 10427 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10428 } 10429 } 10430 mutex_exit(&port->fp_mutex); 10431 } else { 10432 ASSERT(port->fp_statec_busy > 0); 10433 if (port->fp_statec_busy > 1) { 10434 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10435 } 10436 mutex_exit(&port->fp_mutex); 10437 } 10438 10439 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10440 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10441 10442 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10443 listlen, listlen, KM_SLEEP); 10444 10445 mutex_enter(&port->fp_mutex); 10446 } else { 10447 ASSERT(changelist == NULL && listlen == 0); 10448 mutex_enter(&port->fp_mutex); 10449 if (--port->fp_statec_busy == 0) { 10450 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10451 } 10452 } 10453 } 10454 10455 10456 /* 10457 * Fill out device list for userland ioctl in private loop 10458 */ 10459 static int 10460 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10461 { 10462 int rval; 10463 int count; 10464 int index; 10465 int num_devices; 10466 fc_remote_node_t *node; 10467 fc_port_dev_t *devlist; 10468 int lilp_device_count; 10469 fc_lilpmap_t *lilp_map; 10470 uchar_t *alpa_list; 10471 10472 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10473 10474 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10475 if (port->fp_total_devices > port->fp_dev_count && 10476 num_devices >= port->fp_total_devices) { 10477 job_request_t *job; 10478 10479 mutex_exit(&port->fp_mutex); 10480 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10481 job->job_counter = 1; 10482 10483 mutex_enter(&port->fp_mutex); 10484 fp_get_loopmap(port, job); 10485 mutex_exit(&port->fp_mutex); 10486 10487 fp_jobwait(job); 10488 fctl_dealloc_job(job); 10489 } else { 10490 mutex_exit(&port->fp_mutex); 10491 } 10492 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10493 10494 mutex_enter(&port->fp_mutex); 10495 10496 /* 10497 * Applications are accustomed to getting the device list in 10498 * LILP map order. The HBA firmware usually returns the device 10499 * map in the LILP map order and diagnostic applications would 10500 * prefer to receive in the device list in that order too 10501 */ 10502 lilp_map = &port->fp_lilp_map; 10503 alpa_list = &lilp_map->lilp_alpalist[0]; 10504 10505 /* 10506 * the length field corresponds to the offset in the LILP frame 10507 * which begins with 1. The thing to note here is that the 10508 * lilp_device_count is 1 more than fp->fp_total_devices since 10509 * the host adapter's alpa also shows up in the lilp map. We 10510 * don't however return details of the host adapter since 10511 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10512 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10513 * ioctl to obtain details about the host adapter port. 10514 */ 10515 lilp_device_count = lilp_map->lilp_length; 10516 10517 for (count = index = 0; index < lilp_device_count && 10518 count < num_devices; index++) { 10519 uint32_t d_id; 10520 fc_remote_port_t *pd; 10521 10522 d_id = alpa_list[index]; 10523 10524 mutex_exit(&port->fp_mutex); 10525 pd = fctl_get_remote_port_by_did(port, d_id); 10526 mutex_enter(&port->fp_mutex); 10527 10528 if (pd != NULL) { 10529 mutex_enter(&pd->pd_mutex); 10530 10531 if (pd->pd_state == PORT_DEVICE_INVALID) { 10532 mutex_exit(&pd->pd_mutex); 10533 continue; 10534 } 10535 10536 devlist[count].dev_state = pd->pd_state; 10537 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10538 devlist[count].dev_did = pd->pd_port_id; 10539 devlist[count].dev_did.priv_lilp_posit = 10540 (uint8_t)(index & 0xff); 10541 bcopy((caddr_t)pd->pd_fc4types, 10542 (caddr_t)devlist[count].dev_type, 10543 sizeof (pd->pd_fc4types)); 10544 10545 bcopy((caddr_t)&pd->pd_port_name, 10546 (caddr_t)&devlist[count].dev_pwwn, 10547 sizeof (la_wwn_t)); 10548 10549 node = pd->pd_remote_nodep; 10550 mutex_exit(&pd->pd_mutex); 10551 10552 if (node) { 10553 mutex_enter(&node->fd_mutex); 10554 bcopy((caddr_t)&node->fd_node_name, 10555 (caddr_t)&devlist[count].dev_nwwn, 10556 sizeof (la_wwn_t)); 10557 mutex_exit(&node->fd_mutex); 10558 } 10559 count++; 10560 } 10561 } 10562 10563 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10564 sizeof (count), mode)) { 10565 rval = FC_FAILURE; 10566 } 10567 10568 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10569 sizeof (fc_port_dev_t) * num_devices, mode)) { 10570 rval = FC_FAILURE; 10571 } else { 10572 rval = FC_SUCCESS; 10573 } 10574 10575 kmem_free(devlist, sizeof (*devlist) * num_devices); 10576 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10577 10578 return (rval); 10579 } 10580 10581 10582 /* 10583 * Completion function for responses to unsolicited commands 10584 */ 10585 static void 10586 fp_unsol_intr(fc_packet_t *pkt) 10587 { 10588 fp_cmd_t *cmd; 10589 fc_local_port_t *port; 10590 10591 cmd = pkt->pkt_ulp_private; 10592 port = cmd->cmd_port; 10593 10594 mutex_enter(&port->fp_mutex); 10595 port->fp_out_fpcmds--; 10596 mutex_exit(&port->fp_mutex); 10597 10598 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10599 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10600 "couldn't post response to unsolicited request;" 10601 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10602 pkt->pkt_resp_fhdr.rx_id); 10603 } 10604 10605 if (cmd == port->fp_els_resp_pkt) { 10606 mutex_enter(&port->fp_mutex); 10607 port->fp_els_resp_pkt_busy = 0; 10608 mutex_exit(&port->fp_mutex); 10609 return; 10610 } 10611 10612 fp_free_pkt(cmd); 10613 } 10614 10615 10616 /* 10617 * solicited LINIT ELS completion function 10618 */ 10619 static void 10620 fp_linit_intr(fc_packet_t *pkt) 10621 { 10622 fp_cmd_t *cmd; 10623 job_request_t *job; 10624 fc_linit_resp_t acc; 10625 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 10626 10627 cmd = (fp_cmd_t *)pkt->pkt_ulp_private; 10628 10629 mutex_enter(&cmd->cmd_port->fp_mutex); 10630 cmd->cmd_port->fp_out_fpcmds--; 10631 mutex_exit(&cmd->cmd_port->fp_mutex); 10632 10633 if (FP_IS_PKT_ERROR(pkt)) { 10634 (void) fp_common_intr(pkt, 1); 10635 return; 10636 } 10637 10638 job = cmd->cmd_job; 10639 10640 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&acc, 10641 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10642 if (acc.status != FC_LINIT_SUCCESS) { 10643 job->job_result = FC_FAILURE; 10644 } else { 10645 job->job_result = FC_SUCCESS; 10646 } 10647 10648 fp_iodone(cmd); 10649 } 10650 10651 10652 /* 10653 * Decode the unsolicited request; For FC-4 Device and Link data frames 10654 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10655 * ELS requests, submit a request to the job_handler thread to work on it. 10656 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10657 * and save much of the interrupt time processing of unsolicited ELS requests 10658 * and hand it off to the job_handler thread. 10659 */ 10660 static void 10661 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10662 { 10663 uchar_t r_ctl; 10664 uchar_t ls_code; 10665 uint32_t s_id; 10666 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10667 uint32_t cb_arg; 10668 fp_cmd_t *cmd; 10669 fc_local_port_t *port; 10670 job_request_t *job; 10671 fc_remote_port_t *pd; 10672 10673 port = port_handle; 10674 10675 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10676 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10677 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10678 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10679 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10680 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10681 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10682 buf->ub_buffer[0]); 10683 10684 if (type & 0x80000000) { 10685 /* 10686 * Huh ? Nothing much can be done without 10687 * a valid buffer. So just exit. 10688 */ 10689 return; 10690 } 10691 /* 10692 * If the unsolicited interrupts arrive while it isn't 10693 * safe to handle unsolicited callbacks; Drop them, yes, 10694 * drop them on the floor 10695 */ 10696 mutex_enter(&port->fp_mutex); 10697 port->fp_active_ubs++; 10698 if ((port->fp_soft_state & 10699 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10700 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10701 10702 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10703 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10704 "seq_id=%x, ox_id=%x, rx_id=%x" 10705 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10706 buf->ub_frame.type, buf->ub_frame.seq_id, 10707 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10708 10709 ASSERT(port->fp_active_ubs > 0); 10710 if (--(port->fp_active_ubs) == 0) { 10711 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10712 } 10713 10714 mutex_exit(&port->fp_mutex); 10715 10716 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10717 1, &buf->ub_token); 10718 10719 return; 10720 } 10721 10722 r_ctl = buf->ub_frame.r_ctl; 10723 s_id = buf->ub_frame.s_id; 10724 if (port->fp_active_ubs == 1) { 10725 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10726 } 10727 10728 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10729 port->fp_statec_busy) { 10730 mutex_exit(&port->fp_mutex); 10731 pd = fctl_get_remote_port_by_did(port, s_id); 10732 if (pd) { 10733 mutex_enter(&pd->pd_mutex); 10734 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10735 FP_TRACE(FP_NHEAD1(3, 0), 10736 "LOGO for LOGGED IN D_ID %x", 10737 buf->ub_frame.s_id); 10738 pd->pd_state = PORT_DEVICE_VALID; 10739 } 10740 mutex_exit(&pd->pd_mutex); 10741 } 10742 10743 mutex_enter(&port->fp_mutex); 10744 ASSERT(port->fp_active_ubs > 0); 10745 if (--(port->fp_active_ubs) == 0) { 10746 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10747 } 10748 mutex_exit(&port->fp_mutex); 10749 10750 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10751 1, &buf->ub_token); 10752 10753 FP_TRACE(FP_NHEAD1(3, 0), 10754 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10755 buf->ub_frame.s_id); 10756 return; 10757 } 10758 10759 if (port->fp_els_resp_pkt_busy == 0) { 10760 if (r_ctl == R_CTL_ELS_REQ) { 10761 ls_code = buf->ub_buffer[0]; 10762 10763 switch (ls_code) { 10764 case LA_ELS_PLOGI: 10765 case LA_ELS_FLOGI: 10766 port->fp_els_resp_pkt_busy = 1; 10767 mutex_exit(&port->fp_mutex); 10768 fp_i_handle_unsol_els(port, buf); 10769 10770 mutex_enter(&port->fp_mutex); 10771 ASSERT(port->fp_active_ubs > 0); 10772 if (--(port->fp_active_ubs) == 0) { 10773 port->fp_soft_state &= 10774 ~FP_SOFT_IN_UNSOL_CB; 10775 } 10776 mutex_exit(&port->fp_mutex); 10777 port->fp_fca_tran->fca_ub_release( 10778 port->fp_fca_handle, 1, &buf->ub_token); 10779 10780 return; 10781 case LA_ELS_RSCN: 10782 if (++(port)->fp_rscn_count == 10783 FC_INVALID_RSCN_COUNT) { 10784 ++(port)->fp_rscn_count; 10785 } 10786 rscn_count = port->fp_rscn_count; 10787 break; 10788 10789 default: 10790 break; 10791 } 10792 } 10793 } else if ((r_ctl == R_CTL_ELS_REQ) && 10794 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10795 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10796 ++port->fp_rscn_count; 10797 } 10798 rscn_count = port->fp_rscn_count; 10799 } 10800 10801 mutex_exit(&port->fp_mutex); 10802 10803 switch (r_ctl & R_CTL_ROUTING) { 10804 case R_CTL_DEVICE_DATA: 10805 /* 10806 * If the unsolicited buffer is a CT IU, 10807 * have the job_handler thread work on it. 10808 */ 10809 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10810 break; 10811 } 10812 /* FALLTHROUGH */ 10813 10814 case R_CTL_FC4_SVC: { 10815 int sendup = 0; 10816 10817 /* 10818 * If a LOGIN isn't performed before this request 10819 * shut the door on this port with a reply that a 10820 * LOGIN is required. We make an exception however 10821 * for IP broadcast packets and pass them through 10822 * to the IP ULP(s) to handle broadcast requests. 10823 * This is not a problem for private loop devices 10824 * but for fabric topologies we don't log into the 10825 * remote ports during port initialization and 10826 * the ULPs need to log into requesting ports on 10827 * demand. 10828 */ 10829 pd = fctl_get_remote_port_by_did(port, s_id); 10830 if (pd) { 10831 mutex_enter(&pd->pd_mutex); 10832 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10833 sendup++; 10834 } 10835 mutex_exit(&pd->pd_mutex); 10836 } else if ((pd == NULL) && 10837 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10838 (buf->ub_frame.d_id == 0xffffff || 10839 buf->ub_frame.d_id == 0x00)) { 10840 /* brodacst IP frame - so sendup via job thread */ 10841 break; 10842 } 10843 10844 /* 10845 * Send all FC4 services via job thread too 10846 */ 10847 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10848 break; 10849 } 10850 10851 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10852 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10853 return; 10854 } 10855 10856 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10857 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10858 0, KM_NOSLEEP, pd); 10859 if (cmd != NULL) { 10860 fp_els_rjt_init(port, cmd, buf, 10861 FC_ACTION_NON_RETRYABLE, 10862 FC_REASON_LOGIN_REQUIRED, NULL); 10863 10864 if (fp_sendcmd(port, cmd, 10865 port->fp_fca_handle) != FC_SUCCESS) { 10866 fp_free_pkt(cmd); 10867 } 10868 } 10869 } 10870 10871 mutex_enter(&port->fp_mutex); 10872 ASSERT(port->fp_active_ubs > 0); 10873 if (--(port->fp_active_ubs) == 0) { 10874 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10875 } 10876 mutex_exit(&port->fp_mutex); 10877 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10878 1, &buf->ub_token); 10879 10880 return; 10881 } 10882 10883 default: 10884 break; 10885 } 10886 10887 /* 10888 * Submit a Request to the job_handler thread to work 10889 * on the unsolicited request. The potential side effect 10890 * of this is that the unsolicited buffer takes a little 10891 * longer to get released but we save interrupt time in 10892 * the bargain. 10893 */ 10894 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10895 10896 /* 10897 * One way that the rscn_count will get used is described below : 10898 * 10899 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10900 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10901 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10902 * by overloading the job_cb_arg to pass the rscn_count 10903 * 4. When one of the routines processing the RSCN picks it up (ex: 10904 * fp_validate_rscn_page()), it passes this count in the map 10905 * structure (as part of the map_rscn_info structure member) to the 10906 * ULPs. 10907 * 5. When ULPs make calls back to the transport (example interfaces for 10908 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10909 * can now pass back this count as part of the fc_packet's 10910 * pkt_ulp_rscn_count member. fcp does this currently. 10911 * 6. When transport gets a call to transport a command on the wire, it 10912 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10913 * fc_packet. If there is, it will match that info with the current 10914 * rscn_count on that instance of the port. If they don't match up 10915 * then there was a newer RSCN. The ULP gets back an error code which 10916 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10917 * 7. At this point the ULP is free to make up its own mind as to how to 10918 * handle this. Currently, fcp will reset its retry counters and keep 10919 * retrying the operation it was doing in anticipation of getting a 10920 * new state change call back for the new RSCN. 10921 */ 10922 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10923 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10924 if (job == NULL) { 10925 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10926 "couldn't submit a job to the thread, failing.."); 10927 10928 mutex_enter(&port->fp_mutex); 10929 10930 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10931 --port->fp_rscn_count; 10932 } 10933 10934 ASSERT(port->fp_active_ubs > 0); 10935 if (--(port->fp_active_ubs) == 0) { 10936 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10937 } 10938 10939 mutex_exit(&port->fp_mutex); 10940 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10941 1, &buf->ub_token); 10942 10943 return; 10944 } 10945 job->job_private = (void *)buf; 10946 fctl_enque_job(port, job); 10947 } 10948 10949 10950 /* 10951 * Handle unsolicited requests 10952 */ 10953 static void 10954 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10955 job_request_t *job) 10956 { 10957 uchar_t r_ctl; 10958 uchar_t ls_code; 10959 uint32_t s_id; 10960 fp_cmd_t *cmd; 10961 fc_remote_port_t *pd; 10962 fp_unsol_spec_t *ub_spec; 10963 10964 r_ctl = buf->ub_frame.r_ctl; 10965 s_id = buf->ub_frame.s_id; 10966 10967 switch (r_ctl & R_CTL_ROUTING) { 10968 case R_CTL_EXTENDED_SVC: 10969 if (r_ctl != R_CTL_ELS_REQ) { 10970 break; 10971 } 10972 10973 ls_code = buf->ub_buffer[0]; 10974 switch (ls_code) { 10975 case LA_ELS_LOGO: 10976 case LA_ELS_ADISC: 10977 case LA_ELS_PRLO: 10978 pd = fctl_get_remote_port_by_did(port, s_id); 10979 if (pd == NULL) { 10980 if (!FC_IS_REAL_DEVICE(s_id)) { 10981 break; 10982 } 10983 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10984 break; 10985 } 10986 if ((cmd = fp_alloc_pkt(port, 10987 sizeof (la_els_rjt_t), 0, KM_SLEEP, 10988 NULL)) == NULL) { 10989 /* 10990 * Can this actually fail when 10991 * given KM_SLEEP? (Could be used 10992 * this way in a number of places.) 10993 */ 10994 break; 10995 } 10996 10997 fp_els_rjt_init(port, cmd, buf, 10998 FC_ACTION_NON_RETRYABLE, 10999 FC_REASON_INVALID_LINK_CTRL, job); 11000 11001 if (fp_sendcmd(port, cmd, 11002 port->fp_fca_handle) != FC_SUCCESS) { 11003 fp_free_pkt(cmd); 11004 } 11005 11006 break; 11007 } 11008 if (ls_code == LA_ELS_LOGO) { 11009 fp_handle_unsol_logo(port, buf, pd, job); 11010 } else if (ls_code == LA_ELS_ADISC) { 11011 fp_handle_unsol_adisc(port, buf, pd, job); 11012 } else { 11013 fp_handle_unsol_prlo(port, buf, pd, job); 11014 } 11015 break; 11016 11017 case LA_ELS_PLOGI: 11018 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 11019 break; 11020 11021 case LA_ELS_FLOGI: 11022 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 11023 break; 11024 11025 case LA_ELS_RSCN: 11026 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 11027 break; 11028 11029 default: 11030 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 11031 ub_spec->port = port; 11032 ub_spec->buf = buf; 11033 11034 (void) taskq_dispatch(port->fp_taskq, 11035 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 11036 return; 11037 } 11038 break; 11039 11040 case R_CTL_BASIC_SVC: 11041 /* 11042 * The unsolicited basic link services could be ABTS 11043 * and RMC (Or even a NOP). Just BA_RJT them until 11044 * such time there arises a need to handle them more 11045 * carefully. 11046 */ 11047 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11048 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 11049 0, KM_SLEEP, NULL); 11050 if (cmd != NULL) { 11051 fp_ba_rjt_init(port, cmd, buf, job); 11052 if (fp_sendcmd(port, cmd, 11053 port->fp_fca_handle) != FC_SUCCESS) { 11054 fp_free_pkt(cmd); 11055 } 11056 } 11057 } 11058 break; 11059 11060 case R_CTL_DEVICE_DATA: 11061 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 11062 /* 11063 * Mostly this is of type FC_TYPE_FC_SERVICES. 11064 * As we don't like any Unsolicited FC services 11065 * requests, we would do well to RJT them as 11066 * well. 11067 */ 11068 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11069 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11070 0, KM_SLEEP, NULL); 11071 if (cmd != NULL) { 11072 fp_els_rjt_init(port, cmd, buf, 11073 FC_ACTION_NON_RETRYABLE, 11074 FC_REASON_INVALID_LINK_CTRL, job); 11075 11076 if (fp_sendcmd(port, cmd, 11077 port->fp_fca_handle) != 11078 FC_SUCCESS) { 11079 fp_free_pkt(cmd); 11080 } 11081 } 11082 } 11083 break; 11084 } 11085 /* FALLTHROUGH */ 11086 11087 case R_CTL_FC4_SVC: 11088 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 11089 ub_spec->port = port; 11090 ub_spec->buf = buf; 11091 11092 (void) taskq_dispatch(port->fp_taskq, 11093 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 11094 return; 11095 11096 case R_CTL_LINK_CTL: 11097 /* 11098 * Turn deaf ear on unsolicited link control frames. 11099 * Typical unsolicited link control Frame is an LCR 11100 * (to reset End to End credit to the default login 11101 * value and abort current sequences for all classes) 11102 * An intelligent microcode/firmware should handle 11103 * this transparently at its level and not pass all 11104 * the way up here. 11105 * 11106 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 11107 * or F_BSY. P_RJT is chosen to be the most appropriate 11108 * at this time. 11109 */ 11110 /* FALLTHROUGH */ 11111 11112 default: 11113 /* 11114 * Just reject everything else as an invalid request. 11115 */ 11116 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11117 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11118 0, KM_SLEEP, NULL); 11119 if (cmd != NULL) { 11120 fp_els_rjt_init(port, cmd, buf, 11121 FC_ACTION_NON_RETRYABLE, 11122 FC_REASON_INVALID_LINK_CTRL, job); 11123 11124 if (fp_sendcmd(port, cmd, 11125 port->fp_fca_handle) != FC_SUCCESS) { 11126 fp_free_pkt(cmd); 11127 } 11128 } 11129 } 11130 break; 11131 } 11132 11133 mutex_enter(&port->fp_mutex); 11134 ASSERT(port->fp_active_ubs > 0); 11135 if (--(port->fp_active_ubs) == 0) { 11136 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 11137 } 11138 mutex_exit(&port->fp_mutex); 11139 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 11140 1, &buf->ub_token); 11141 } 11142 11143 11144 /* 11145 * Prepare a BA_RJT and send it over. 11146 */ 11147 static void 11148 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11149 job_request_t *job) 11150 { 11151 fc_packet_t *pkt; 11152 la_ba_rjt_t payload; 11153 11154 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11155 11156 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11157 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11158 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11159 cmd->cmd_retry_count = 1; 11160 cmd->cmd_ulp_pkt = NULL; 11161 11162 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11163 cmd->cmd_job = job; 11164 11165 pkt = &cmd->cmd_pkt; 11166 11167 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 11168 11169 payload.reserved = 0; 11170 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 11171 payload.explanation = FC_EXPLN_NONE; 11172 payload.vendor = 0; 11173 11174 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11175 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11176 } 11177 11178 11179 /* 11180 * Prepare an LS_RJT and send it over 11181 */ 11182 static void 11183 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11184 uchar_t action, uchar_t reason, job_request_t *job) 11185 { 11186 fc_packet_t *pkt; 11187 la_els_rjt_t payload; 11188 11189 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11190 11191 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11192 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11193 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11194 cmd->cmd_retry_count = 1; 11195 cmd->cmd_ulp_pkt = NULL; 11196 11197 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11198 cmd->cmd_job = job; 11199 11200 pkt = &cmd->cmd_pkt; 11201 11202 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11203 11204 payload.ls_code.ls_code = LA_ELS_RJT; 11205 payload.ls_code.mbz = 0; 11206 payload.action = action; 11207 payload.reason = reason; 11208 payload.reserved = 0; 11209 payload.vu = 0; 11210 11211 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11212 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11213 } 11214 11215 /* 11216 * Function: fp_prlo_acc_init 11217 * 11218 * Description: Initializes an Link Service Accept for a PRLO. 11219 * 11220 * Arguments: *port Local port through which the PRLO was 11221 * received. 11222 * cmd Command that will carry the accept. 11223 * *buf Unsolicited buffer containing the PRLO 11224 * request. 11225 * job Job request. 11226 * sleep Allocation mode. 11227 * 11228 * Return Value: *cmd Command containing the response. 11229 * 11230 * Context: Depends on the parameter sleep. 11231 */ 11232 fp_cmd_t * 11233 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 11234 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11235 { 11236 fp_cmd_t *cmd; 11237 fc_packet_t *pkt; 11238 la_els_prlo_t *req; 11239 size_t len; 11240 uint16_t flags; 11241 11242 req = (la_els_prlo_t *)buf->ub_buffer; 11243 len = (size_t)ntohs(req->payload_length); 11244 11245 /* 11246 * The payload of the accept to a PRLO has to be the exact match of 11247 * the payload of the request (at the exception of the code). 11248 */ 11249 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11250 11251 if (cmd) { 11252 /* 11253 * The fp command was successfully allocated. 11254 */ 11255 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11256 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11257 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11258 cmd->cmd_retry_count = 1; 11259 cmd->cmd_ulp_pkt = NULL; 11260 11261 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11262 cmd->cmd_job = job; 11263 11264 pkt = &cmd->cmd_pkt; 11265 11266 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11267 FC_TYPE_EXTENDED_LS); 11268 11269 /* The code is overwritten for the copy. */ 11270 req->ls_code = LA_ELS_ACC; 11271 /* Response code is set. */ 11272 flags = ntohs(req->flags); 11273 flags &= ~SP_RESP_CODE_MASK; 11274 flags |= SP_RESP_CODE_REQ_EXECUTED; 11275 req->flags = htons(flags); 11276 11277 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)req, 11278 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11279 } 11280 return (cmd); 11281 } 11282 11283 /* 11284 * Prepare an ACC response to an ELS request 11285 */ 11286 static void 11287 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11288 job_request_t *job) 11289 { 11290 fc_packet_t *pkt; 11291 ls_code_t payload; 11292 11293 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11294 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11295 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11296 cmd->cmd_retry_count = 1; 11297 cmd->cmd_ulp_pkt = NULL; 11298 11299 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11300 cmd->cmd_job = job; 11301 11302 pkt = &cmd->cmd_pkt; 11303 11304 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11305 11306 payload.ls_code = LA_ELS_ACC; 11307 payload.mbz = 0; 11308 11309 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11310 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11311 } 11312 11313 /* 11314 * Unsolicited PRLO handler 11315 * 11316 * A Process Logout should be handled by the ULP that established it. However, 11317 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11318 * when a device implicitly logs out an initiator (for whatever reason) and 11319 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11320 * The logical thing to do for the device would be to send a LOGO in response 11321 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11322 * a PRLO instead. 11323 * 11324 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11325 * think that the Port Login has been lost. If we follow the Fibre Channel 11326 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11327 * the Port Login has also been lost, the remote port will reject the PRLI 11328 * indicating that we must PLOGI first. The initiator will then turn around and 11329 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11330 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11331 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11332 * needed would be received by FCP. FCP would have, then, to tell the transport 11333 * (fp) to PLOGI. The problem is, the transport would still think the Port 11334 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11335 * if you think it's not necessary". To work around that difficulty, the PRLO 11336 * is treated by the transport as a LOGO. The downside to it is a Port Login 11337 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11338 * has nothing to do with the PRLO) may be impacted. However, this is a 11339 * scenario very unlikely to happen. As of today the only ULP in Leadville 11340 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11341 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11342 * unlikely). 11343 */ 11344 static void 11345 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11346 fc_remote_port_t *pd, job_request_t *job) 11347 { 11348 int busy; 11349 int rval; 11350 int retain; 11351 fp_cmd_t *cmd; 11352 fc_portmap_t *listptr; 11353 boolean_t tolerance; 11354 la_els_prlo_t *req; 11355 11356 req = (la_els_prlo_t *)buf->ub_buffer; 11357 11358 if ((ntohs(req->payload_length) != 11359 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11360 (req->page_length != sizeof (service_parameter_page_t))) { 11361 /* 11362 * We are being very restrictive. Only on page per 11363 * payload. If it is not the case we reject the ELS although 11364 * we should reply indicating we handle only single page 11365 * per PRLO. 11366 */ 11367 goto fp_reject_prlo; 11368 } 11369 11370 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11371 /* 11372 * This is in case the payload advertizes a size bigger than 11373 * what it really is. 11374 */ 11375 goto fp_reject_prlo; 11376 } 11377 11378 mutex_enter(&port->fp_mutex); 11379 busy = port->fp_statec_busy; 11380 mutex_exit(&port->fp_mutex); 11381 11382 mutex_enter(&pd->pd_mutex); 11383 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11384 if (!busy) { 11385 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11386 pd->pd_state == PORT_DEVICE_INVALID || 11387 pd->pd_flags == PD_ELS_IN_PROGRESS || 11388 pd->pd_type == PORT_DEVICE_OLD) { 11389 busy++; 11390 } 11391 } 11392 11393 if (busy) { 11394 mutex_exit(&pd->pd_mutex); 11395 11396 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11397 "pd=%p - busy", 11398 pd->pd_port_id.port_id, pd); 11399 11400 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11401 goto fp_reject_prlo; 11402 } 11403 } else { 11404 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11405 11406 if (tolerance) { 11407 fctl_tc_reset(&pd->pd_logo_tc); 11408 retain = 0; 11409 pd->pd_state = PORT_DEVICE_INVALID; 11410 } 11411 11412 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11413 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11414 tolerance, retain); 11415 11416 pd->pd_aux_flags |= PD_LOGGED_OUT; 11417 mutex_exit(&pd->pd_mutex); 11418 11419 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11420 if (cmd == NULL) { 11421 return; 11422 } 11423 11424 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11425 if (rval != FC_SUCCESS) { 11426 fp_free_pkt(cmd); 11427 return; 11428 } 11429 11430 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11431 11432 if (retain) { 11433 fp_unregister_login(pd); 11434 fctl_copy_portmap(listptr, pd); 11435 } else { 11436 uint32_t d_id; 11437 char ww_name[17]; 11438 11439 mutex_enter(&pd->pd_mutex); 11440 d_id = pd->pd_port_id.port_id; 11441 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11442 mutex_exit(&pd->pd_mutex); 11443 11444 FP_TRACE(FP_NHEAD2(9, 0), 11445 "N_x Port with D_ID=%x, PWWN=%s logged out" 11446 " %d times in %d us; Giving up", d_id, ww_name, 11447 FC_LOGO_TOLERANCE_LIMIT, 11448 FC_LOGO_TOLERANCE_TIME_LIMIT); 11449 11450 fp_fillout_old_map(listptr, pd, 0); 11451 listptr->map_type = PORT_DEVICE_OLD; 11452 } 11453 11454 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11455 return; 11456 } 11457 11458 fp_reject_prlo: 11459 11460 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11461 if (cmd != NULL) { 11462 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11463 FC_REASON_INVALID_LINK_CTRL, job); 11464 11465 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11466 fp_free_pkt(cmd); 11467 } 11468 } 11469 } 11470 11471 /* 11472 * Unsolicited LOGO handler 11473 */ 11474 static void 11475 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11476 fc_remote_port_t *pd, job_request_t *job) 11477 { 11478 int busy; 11479 int rval; 11480 int retain; 11481 fp_cmd_t *cmd; 11482 fc_portmap_t *listptr; 11483 boolean_t tolerance; 11484 11485 mutex_enter(&port->fp_mutex); 11486 busy = port->fp_statec_busy; 11487 mutex_exit(&port->fp_mutex); 11488 11489 mutex_enter(&pd->pd_mutex); 11490 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11491 if (!busy) { 11492 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11493 pd->pd_state == PORT_DEVICE_INVALID || 11494 pd->pd_flags == PD_ELS_IN_PROGRESS || 11495 pd->pd_type == PORT_DEVICE_OLD) { 11496 busy++; 11497 } 11498 } 11499 11500 if (busy) { 11501 mutex_exit(&pd->pd_mutex); 11502 11503 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11504 "pd=%p - busy", 11505 pd->pd_port_id.port_id, pd); 11506 11507 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11508 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11509 0, KM_SLEEP, pd); 11510 if (cmd != NULL) { 11511 fp_els_rjt_init(port, cmd, buf, 11512 FC_ACTION_NON_RETRYABLE, 11513 FC_REASON_INVALID_LINK_CTRL, job); 11514 11515 if (fp_sendcmd(port, cmd, 11516 port->fp_fca_handle) != FC_SUCCESS) { 11517 fp_free_pkt(cmd); 11518 } 11519 } 11520 } 11521 } else { 11522 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11523 11524 if (tolerance) { 11525 fctl_tc_reset(&pd->pd_logo_tc); 11526 retain = 0; 11527 pd->pd_state = PORT_DEVICE_INVALID; 11528 } 11529 11530 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11531 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11532 tolerance, retain); 11533 11534 pd->pd_aux_flags |= PD_LOGGED_OUT; 11535 mutex_exit(&pd->pd_mutex); 11536 11537 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11538 KM_SLEEP, pd); 11539 if (cmd == NULL) { 11540 return; 11541 } 11542 11543 fp_els_acc_init(port, cmd, buf, job); 11544 11545 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11546 if (rval != FC_SUCCESS) { 11547 fp_free_pkt(cmd); 11548 return; 11549 } 11550 11551 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11552 11553 if (retain) { 11554 job_request_t *job; 11555 fctl_ns_req_t *ns_cmd; 11556 11557 /* 11558 * when get LOGO, first try to get PID from nameserver 11559 * if failed, then we do not need 11560 * send PLOGI to that remote port 11561 */ 11562 job = fctl_alloc_job( 11563 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11564 11565 if (job != NULL) { 11566 ns_cmd = fctl_alloc_ns_cmd( 11567 sizeof (ns_req_gid_pn_t), 11568 sizeof (ns_resp_gid_pn_t), 11569 sizeof (ns_resp_gid_pn_t), 11570 0, KM_SLEEP); 11571 if (ns_cmd != NULL) { 11572 int ret; 11573 job->job_result = FC_SUCCESS; 11574 ns_cmd->ns_cmd_code = NS_GID_PN; 11575 ((ns_req_gid_pn_t *) 11576 (ns_cmd->ns_cmd_buf))->pwwn = 11577 pd->pd_port_name; 11578 ret = fp_ns_query( 11579 port, ns_cmd, job, 1, KM_SLEEP); 11580 if ((ret != FC_SUCCESS) || 11581 (job->job_result != FC_SUCCESS)) { 11582 fctl_free_ns_cmd(ns_cmd); 11583 fctl_dealloc_job(job); 11584 FP_TRACE(FP_NHEAD2(9, 0), 11585 "NS query failed,", 11586 " delete pd"); 11587 goto delete_pd; 11588 } 11589 fctl_free_ns_cmd(ns_cmd); 11590 } 11591 fctl_dealloc_job(job); 11592 } 11593 fp_unregister_login(pd); 11594 fctl_copy_portmap(listptr, pd); 11595 } else { 11596 uint32_t d_id; 11597 char ww_name[17]; 11598 11599 delete_pd: 11600 mutex_enter(&pd->pd_mutex); 11601 d_id = pd->pd_port_id.port_id; 11602 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11603 mutex_exit(&pd->pd_mutex); 11604 11605 FP_TRACE(FP_NHEAD2(9, 0), 11606 "N_x Port with D_ID=%x, PWWN=%s logged out" 11607 " %d times in %d us; Giving up", d_id, ww_name, 11608 FC_LOGO_TOLERANCE_LIMIT, 11609 FC_LOGO_TOLERANCE_TIME_LIMIT); 11610 11611 fp_fillout_old_map(listptr, pd, 0); 11612 listptr->map_type = PORT_DEVICE_OLD; 11613 } 11614 11615 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11616 } 11617 } 11618 11619 11620 /* 11621 * Perform general purpose preparation of a response to an unsolicited request 11622 */ 11623 static void 11624 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11625 uchar_t r_ctl, uchar_t type) 11626 { 11627 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11628 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11629 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11630 pkt->pkt_cmd_fhdr.type = type; 11631 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11632 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11633 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11634 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11635 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11636 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11637 pkt->pkt_cmd_fhdr.ro = 0; 11638 pkt->pkt_cmd_fhdr.rsvd = 0; 11639 pkt->pkt_comp = fp_unsol_intr; 11640 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11641 pkt->pkt_ub_resp_token = (opaque_t)buf; 11642 } 11643 11644 /* 11645 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11646 * early development days of public loop soc+ firmware, numerous problems 11647 * were encountered (the details are undocumented and history now) which 11648 * led to the birth of this function. 11649 * 11650 * If a pre-allocated unsolicited response packet is free, send out an 11651 * immediate response, otherwise submit the request to the port thread 11652 * to do the deferred processing. 11653 */ 11654 static void 11655 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11656 { 11657 int sent; 11658 int f_port; 11659 int do_acc; 11660 fp_cmd_t *cmd; 11661 la_els_logi_t *payload; 11662 fc_remote_port_t *pd; 11663 char dww_name[17]; 11664 11665 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11666 11667 cmd = port->fp_els_resp_pkt; 11668 11669 mutex_enter(&port->fp_mutex); 11670 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11671 mutex_exit(&port->fp_mutex); 11672 11673 switch (buf->ub_buffer[0]) { 11674 case LA_ELS_PLOGI: { 11675 int small; 11676 11677 payload = (la_els_logi_t *)buf->ub_buffer; 11678 11679 f_port = FP_IS_F_PORT(payload-> 11680 common_service.cmn_features) ? 1 : 0; 11681 11682 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11683 &payload->nport_ww_name); 11684 pd = fctl_get_remote_port_by_pwwn(port, 11685 &payload->nport_ww_name); 11686 if (pd) { 11687 mutex_enter(&pd->pd_mutex); 11688 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11689 /* 11690 * Most likely this means a cross login is in 11691 * progress or a device about to be yanked out. 11692 * Only accept the plogi if my wwn is smaller. 11693 */ 11694 if (pd->pd_type == PORT_DEVICE_OLD) { 11695 sent = 1; 11696 } 11697 /* 11698 * Stop plogi request (if any) 11699 * attempt from local side to speedup 11700 * the discovery progress. 11701 * Mark the pd as PD_PLOGI_RECEPIENT. 11702 */ 11703 if (f_port == 0 && small < 0) { 11704 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11705 } 11706 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11707 11708 mutex_exit(&pd->pd_mutex); 11709 11710 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11711 "Unsol PLOGI received. PD still exists in the " 11712 "PWWN list. pd=%p PWWN=%s, sent=%x", 11713 pd, dww_name, sent); 11714 11715 if (f_port == 0 && small < 0) { 11716 FP_TRACE(FP_NHEAD1(3, 0), 11717 "fp_i_handle_unsol_els: Mark the pd" 11718 " as plogi recipient, pd=%p, PWWN=%s" 11719 ", sent=%x", 11720 pd, dww_name, sent); 11721 } 11722 } else { 11723 sent = 0; 11724 } 11725 11726 /* 11727 * To avoid Login collisions, accept only if my WWN 11728 * is smaller than the requester (A curious side note 11729 * would be that this rule may not satisfy the PLOGIs 11730 * initiated by the switch from not-so-well known 11731 * ports such as 0xFFFC41) 11732 */ 11733 if ((f_port == 0 && small < 0) || 11734 (((small > 0 && do_acc) || 11735 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11736 if (fp_is_class_supported(port->fp_cos, 11737 buf->ub_class) == FC_FAILURE) { 11738 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11739 cmd->cmd_pkt.pkt_cmdlen = 11740 sizeof (la_els_rjt_t); 11741 cmd->cmd_pkt.pkt_rsplen = 0; 11742 fp_els_rjt_init(port, cmd, buf, 11743 FC_ACTION_NON_RETRYABLE, 11744 FC_REASON_CLASS_NOT_SUPP, NULL); 11745 FP_TRACE(FP_NHEAD1(3, 0), 11746 "fp_i_handle_unsol_els: " 11747 "Unsupported class. " 11748 "Rejecting PLOGI"); 11749 11750 } else { 11751 mutex_enter(&port->fp_mutex); 11752 port->fp_els_resp_pkt_busy = 0; 11753 mutex_exit(&port->fp_mutex); 11754 return; 11755 } 11756 } else { 11757 cmd->cmd_pkt.pkt_cmdlen = 11758 sizeof (la_els_logi_t); 11759 cmd->cmd_pkt.pkt_rsplen = 0; 11760 11761 /* 11762 * If fp_port_id is zero and topology is 11763 * Point-to-Point, get the local port id from 11764 * the d_id in the PLOGI request. 11765 * If the outgoing FLOGI hasn't been accepted, 11766 * the topology will be unknown here. But it's 11767 * still safe to save the d_id to fp_port_id, 11768 * just because it will be overwritten later 11769 * if the topology is not Point-to-Point. 11770 */ 11771 mutex_enter(&port->fp_mutex); 11772 if ((port->fp_port_id.port_id == 0) && 11773 (port->fp_topology == FC_TOP_PT_PT || 11774 port->fp_topology == FC_TOP_UNKNOWN)) { 11775 port->fp_port_id.port_id = 11776 buf->ub_frame.d_id; 11777 } 11778 mutex_exit(&port->fp_mutex); 11779 11780 /* 11781 * Sometime later, we should validate 11782 * the service parameters instead of 11783 * just accepting it. 11784 */ 11785 fp_login_acc_init(port, cmd, buf, NULL, 11786 KM_NOSLEEP); 11787 FP_TRACE(FP_NHEAD1(3, 0), 11788 "fp_i_handle_unsol_els: Accepting PLOGI," 11789 " f_port=%d, small=%d, do_acc=%d," 11790 " sent=%d.", f_port, small, do_acc, 11791 sent); 11792 } 11793 } else { 11794 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11795 port->fp_options & FP_SEND_RJT) { 11796 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11797 cmd->cmd_pkt.pkt_rsplen = 0; 11798 fp_els_rjt_init(port, cmd, buf, 11799 FC_ACTION_NON_RETRYABLE, 11800 FC_REASON_LOGICAL_BSY, NULL); 11801 FP_TRACE(FP_NHEAD1(3, 0), 11802 "fp_i_handle_unsol_els: " 11803 "Rejecting PLOGI with Logical Busy." 11804 "Possible Login collision."); 11805 } else { 11806 mutex_enter(&port->fp_mutex); 11807 port->fp_els_resp_pkt_busy = 0; 11808 mutex_exit(&port->fp_mutex); 11809 return; 11810 } 11811 } 11812 break; 11813 } 11814 11815 case LA_ELS_FLOGI: 11816 if (fp_is_class_supported(port->fp_cos, 11817 buf->ub_class) == FC_FAILURE) { 11818 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11819 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11820 cmd->cmd_pkt.pkt_rsplen = 0; 11821 fp_els_rjt_init(port, cmd, buf, 11822 FC_ACTION_NON_RETRYABLE, 11823 FC_REASON_CLASS_NOT_SUPP, NULL); 11824 FP_TRACE(FP_NHEAD1(3, 0), 11825 "fp_i_handle_unsol_els: " 11826 "Unsupported Class. Rejecting FLOGI."); 11827 } else { 11828 mutex_enter(&port->fp_mutex); 11829 port->fp_els_resp_pkt_busy = 0; 11830 mutex_exit(&port->fp_mutex); 11831 return; 11832 } 11833 } else { 11834 mutex_enter(&port->fp_mutex); 11835 if (FC_PORT_STATE_MASK(port->fp_state) != 11836 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11837 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11838 mutex_exit(&port->fp_mutex); 11839 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11840 cmd->cmd_pkt.pkt_cmdlen = 11841 sizeof (la_els_rjt_t); 11842 cmd->cmd_pkt.pkt_rsplen = 0; 11843 fp_els_rjt_init(port, cmd, buf, 11844 FC_ACTION_NON_RETRYABLE, 11845 FC_REASON_INVALID_LINK_CTRL, 11846 NULL); 11847 FP_TRACE(FP_NHEAD1(3, 0), 11848 "fp_i_handle_unsol_els: " 11849 "Invalid Link Ctrl. " 11850 "Rejecting FLOGI."); 11851 } else { 11852 mutex_enter(&port->fp_mutex); 11853 port->fp_els_resp_pkt_busy = 0; 11854 mutex_exit(&port->fp_mutex); 11855 return; 11856 } 11857 } else { 11858 mutex_exit(&port->fp_mutex); 11859 cmd->cmd_pkt.pkt_cmdlen = 11860 sizeof (la_els_logi_t); 11861 cmd->cmd_pkt.pkt_rsplen = 0; 11862 /* 11863 * Let's not aggressively validate the N_Port's 11864 * service parameters until PLOGI. Suffice it 11865 * to give a hint that we are an N_Port and we 11866 * are game to some serious stuff here. 11867 */ 11868 fp_login_acc_init(port, cmd, buf, 11869 NULL, KM_NOSLEEP); 11870 FP_TRACE(FP_NHEAD1(3, 0), 11871 "fp_i_handle_unsol_els: " 11872 "Accepting FLOGI."); 11873 } 11874 } 11875 break; 11876 11877 default: 11878 return; 11879 } 11880 11881 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11882 mutex_enter(&port->fp_mutex); 11883 port->fp_els_resp_pkt_busy = 0; 11884 mutex_exit(&port->fp_mutex); 11885 } 11886 } 11887 11888 11889 /* 11890 * Handle unsolicited PLOGI request 11891 */ 11892 static void 11893 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11894 job_request_t *job, int sleep) 11895 { 11896 int sent; 11897 int small; 11898 int f_port; 11899 int do_acc; 11900 fp_cmd_t *cmd; 11901 la_wwn_t *swwn; 11902 la_wwn_t *dwwn; 11903 la_els_logi_t *payload; 11904 fc_remote_port_t *pd; 11905 char dww_name[17]; 11906 11907 payload = (la_els_logi_t *)buf->ub_buffer; 11908 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11909 11910 mutex_enter(&port->fp_mutex); 11911 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11912 mutex_exit(&port->fp_mutex); 11913 11914 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11915 "type=%x, f_ctl=%x" 11916 " seq_id=%x, ox_id=%x, rx_id=%x" 11917 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11918 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11919 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11920 11921 swwn = &port->fp_service_params.nport_ww_name; 11922 dwwn = &payload->nport_ww_name; 11923 small = fctl_wwn_cmp(swwn, dwwn); 11924 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11925 if (pd) { 11926 mutex_enter(&pd->pd_mutex); 11927 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11928 /* 11929 * Most likely this means a cross login is in 11930 * progress or a device about to be yanked out. 11931 * Only accept the plogi if my wwn is smaller. 11932 */ 11933 11934 if (pd->pd_type == PORT_DEVICE_OLD) { 11935 sent = 1; 11936 } 11937 /* 11938 * Stop plogi request (if any) 11939 * attempt from local side to speedup 11940 * the discovery progress. 11941 * Mark the pd as PD_PLOGI_RECEPIENT. 11942 */ 11943 if (f_port == 0 && small < 0) { 11944 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11945 } 11946 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11947 11948 mutex_exit(&pd->pd_mutex); 11949 11950 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11951 " received. PD still exists in the PWWN list. pd=%p " 11952 "PWWN=%s, sent=%x", pd, dww_name, sent); 11953 11954 if (f_port == 0 && small < 0) { 11955 FP_TRACE(FP_NHEAD1(3, 0), 11956 "fp_handle_unsol_plogi: Mark the pd" 11957 " as plogi recipient, pd=%p, PWWN=%s" 11958 ", sent=%x", 11959 pd, dww_name, sent); 11960 } 11961 } else { 11962 sent = 0; 11963 } 11964 11965 /* 11966 * Avoid Login collisions by accepting only if my WWN is smaller. 11967 * 11968 * A side note: There is no need to start a PLOGI from this end in 11969 * this context if login isn't going to be accepted for the 11970 * above reason as either a LIP (in private loop), RSCN (in 11971 * fabric topology), or an FLOGI (in point to point - Huh ? 11972 * check FC-PH) would normally drive the PLOGI from this end. 11973 * At this point of time there is no need for an inbound PLOGI 11974 * to kick an outbound PLOGI when it is going to be rejected 11975 * for the reason of WWN being smaller. However it isn't hard 11976 * to do that either (when such a need arises, start a timer 11977 * for a duration that extends beyond a normal device discovery 11978 * time and check if an outbound PLOGI did go before that, if 11979 * none fire one) 11980 * 11981 * Unfortunately, as it turned out, during booting, it is possible 11982 * to miss another initiator in the same loop as port driver 11983 * instances are serially attached. While preserving the above 11984 * comments for belly laughs, please kick an outbound PLOGI in 11985 * a non-switch environment (which is a pt pt between N_Ports or 11986 * a private loop) 11987 * 11988 * While preserving the above comments for amusement, send an 11989 * ACC if the PLOGI is going to be rejected for WWN being smaller 11990 * when no discovery is in progress at this end. Turn around 11991 * and make the port device as the PLOGI initiator, so that 11992 * during subsequent link/loop initialization, this end drives 11993 * the PLOGI (In fact both ends do in this particular case, but 11994 * only one wins) 11995 * 11996 * Make sure the PLOGIs initiated by the switch from not-so-well-known 11997 * ports (such as 0xFFFC41) are accepted too. 11998 */ 11999 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 12000 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 12001 if (fp_is_class_supported(port->fp_cos, 12002 buf->ub_class) == FC_FAILURE) { 12003 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12004 cmd = fp_alloc_pkt(port, 12005 sizeof (la_els_logi_t), 0, sleep, pd); 12006 if (cmd == NULL) { 12007 return; 12008 } 12009 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 12010 cmd->cmd_pkt.pkt_rsplen = 0; 12011 fp_els_rjt_init(port, cmd, buf, 12012 FC_ACTION_NON_RETRYABLE, 12013 FC_REASON_CLASS_NOT_SUPP, job); 12014 FP_TRACE(FP_NHEAD1(3, 0), 12015 "fp_handle_unsol_plogi: " 12016 "Unsupported class. rejecting PLOGI"); 12017 } 12018 } else { 12019 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12020 0, sleep, pd); 12021 if (cmd == NULL) { 12022 return; 12023 } 12024 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 12025 cmd->cmd_pkt.pkt_rsplen = 0; 12026 12027 /* 12028 * Sometime later, we should validate the service 12029 * parameters instead of just accepting it. 12030 */ 12031 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12032 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 12033 "Accepting PLOGI, f_port=%d, small=%d, " 12034 "do_acc=%d, sent=%d.", f_port, small, do_acc, 12035 sent); 12036 12037 /* 12038 * If fp_port_id is zero and topology is 12039 * Point-to-Point, get the local port id from 12040 * the d_id in the PLOGI request. 12041 * If the outgoing FLOGI hasn't been accepted, 12042 * the topology will be unknown here. But it's 12043 * still safe to save the d_id to fp_port_id, 12044 * just because it will be overwritten later 12045 * if the topology is not Point-to-Point. 12046 */ 12047 mutex_enter(&port->fp_mutex); 12048 if ((port->fp_port_id.port_id == 0) && 12049 (port->fp_topology == FC_TOP_PT_PT || 12050 port->fp_topology == FC_TOP_UNKNOWN)) { 12051 port->fp_port_id.port_id = 12052 buf->ub_frame.d_id; 12053 } 12054 mutex_exit(&port->fp_mutex); 12055 } 12056 } else { 12057 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 12058 port->fp_options & FP_SEND_RJT) { 12059 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12060 0, sleep, pd); 12061 if (cmd == NULL) { 12062 return; 12063 } 12064 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 12065 cmd->cmd_pkt.pkt_rsplen = 0; 12066 /* 12067 * Send out Logical busy to indicate 12068 * the detection of PLOGI collision 12069 */ 12070 fp_els_rjt_init(port, cmd, buf, 12071 FC_ACTION_NON_RETRYABLE, 12072 FC_REASON_LOGICAL_BSY, job); 12073 12074 fc_wwn_to_str(dwwn, dww_name); 12075 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 12076 "Rejecting Unsol PLOGI with Logical Busy." 12077 "possible PLOGI collision. PWWN=%s, sent=%x", 12078 dww_name, sent); 12079 } else { 12080 return; 12081 } 12082 } 12083 12084 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12085 fp_free_pkt(cmd); 12086 } 12087 } 12088 12089 12090 /* 12091 * Handle mischievous turning over of our own FLOGI requests back to 12092 * us by the SOC+ microcode. In other words, look at the class of such 12093 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 12094 * on the floor 12095 */ 12096 static void 12097 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 12098 job_request_t *job, int sleep) 12099 { 12100 uint32_t state; 12101 uint32_t s_id; 12102 fp_cmd_t *cmd; 12103 12104 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 12105 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12106 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12107 0, sleep, NULL); 12108 if (cmd == NULL) { 12109 return; 12110 } 12111 fp_els_rjt_init(port, cmd, buf, 12112 FC_ACTION_NON_RETRYABLE, 12113 FC_REASON_CLASS_NOT_SUPP, job); 12114 } else { 12115 return; 12116 } 12117 } else { 12118 12119 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 12120 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 12121 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 12122 buf->ub_frame.s_id, buf->ub_frame.d_id, 12123 buf->ub_frame.type, buf->ub_frame.f_ctl, 12124 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 12125 buf->ub_frame.rx_id, buf->ub_frame.ro); 12126 12127 mutex_enter(&port->fp_mutex); 12128 state = FC_PORT_STATE_MASK(port->fp_state); 12129 s_id = port->fp_port_id.port_id; 12130 mutex_exit(&port->fp_mutex); 12131 12132 if (state != FC_STATE_ONLINE || 12133 (s_id && buf->ub_frame.s_id == s_id)) { 12134 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12135 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12136 0, sleep, NULL); 12137 if (cmd == NULL) { 12138 return; 12139 } 12140 fp_els_rjt_init(port, cmd, buf, 12141 FC_ACTION_NON_RETRYABLE, 12142 FC_REASON_INVALID_LINK_CTRL, job); 12143 FP_TRACE(FP_NHEAD1(3, 0), 12144 "fp_handle_unsol_flogi: " 12145 "Rejecting PLOGI. Invalid Link CTRL"); 12146 } else { 12147 return; 12148 } 12149 } else { 12150 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12151 0, sleep, NULL); 12152 if (cmd == NULL) { 12153 return; 12154 } 12155 /* 12156 * Let's not aggressively validate the N_Port's 12157 * service parameters until PLOGI. Suffice it 12158 * to give a hint that we are an N_Port and we 12159 * are game to some serious stuff here. 12160 */ 12161 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12162 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 12163 "Accepting PLOGI"); 12164 } 12165 } 12166 12167 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12168 fp_free_pkt(cmd); 12169 } 12170 } 12171 12172 12173 /* 12174 * Perform PLOGI accept 12175 */ 12176 static void 12177 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 12178 job_request_t *job, int sleep) 12179 { 12180 fc_packet_t *pkt; 12181 fc_portmap_t *listptr; 12182 la_els_logi_t payload; 12183 12184 ASSERT(buf != NULL); 12185 12186 /* 12187 * If we are sending ACC to PLOGI and we haven't already 12188 * create port and node device handles, let's create them 12189 * here. 12190 */ 12191 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 12192 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 12193 int small; 12194 int do_acc; 12195 fc_remote_port_t *pd; 12196 la_els_logi_t *req; 12197 12198 req = (la_els_logi_t *)buf->ub_buffer; 12199 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 12200 &req->nport_ww_name); 12201 12202 mutex_enter(&port->fp_mutex); 12203 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 12204 mutex_exit(&port->fp_mutex); 12205 12206 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_acc_init fp %x, pd %x", 12207 port->fp_port_id.port_id, buf->ub_frame.s_id); 12208 pd = fctl_create_remote_port(port, &req->node_ww_name, 12209 &req->nport_ww_name, buf->ub_frame.s_id, 12210 PD_PLOGI_RECEPIENT, sleep); 12211 if (pd == NULL) { 12212 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 12213 "Couldn't create port device for d_id:0x%x", 12214 buf->ub_frame.s_id); 12215 12216 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 12217 "couldn't create port device d_id=%x", 12218 buf->ub_frame.s_id); 12219 } else { 12220 /* 12221 * usoc currently returns PLOGIs inline and 12222 * the maximum buffer size is 60 bytes or so. 12223 * So attempt not to look beyond what is in 12224 * the unsolicited buffer 12225 * 12226 * JNI also traverses this path sometimes 12227 */ 12228 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 12229 fp_register_login(NULL, pd, req, buf->ub_class); 12230 } else { 12231 mutex_enter(&pd->pd_mutex); 12232 if (pd->pd_login_count == 0) { 12233 pd->pd_login_count++; 12234 } 12235 pd->pd_state = PORT_DEVICE_LOGGED_IN; 12236 pd->pd_login_class = buf->ub_class; 12237 mutex_exit(&pd->pd_mutex); 12238 } 12239 12240 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12241 if (listptr != NULL) { 12242 fctl_copy_portmap(listptr, pd); 12243 (void) fp_ulp_devc_cb(port, listptr, 12244 1, 1, sleep, 0); 12245 } 12246 12247 if (small > 0 && do_acc) { 12248 mutex_enter(&pd->pd_mutex); 12249 pd->pd_recepient = PD_PLOGI_INITIATOR; 12250 mutex_exit(&pd->pd_mutex); 12251 } 12252 } 12253 } 12254 12255 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12256 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12257 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12258 cmd->cmd_retry_count = 1; 12259 cmd->cmd_ulp_pkt = NULL; 12260 12261 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12262 cmd->cmd_job = job; 12263 12264 pkt = &cmd->cmd_pkt; 12265 12266 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12267 12268 payload = port->fp_service_params; 12269 payload.ls_code.ls_code = LA_ELS_ACC; 12270 12271 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 12272 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12273 12274 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12275 "bufsize:0x%x sizeof (la_els_logi):0x%x " 12276 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12277 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12278 buf->ub_bufsize, sizeof (la_els_logi_t), 12279 port->fp_service_params.nport_ww_name.w.naa_id, 12280 port->fp_service_params.nport_ww_name.w.nport_id, 12281 port->fp_service_params.nport_ww_name.w.wwn_hi, 12282 port->fp_service_params.nport_ww_name.w.wwn_lo, 12283 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12284 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12285 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12286 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12287 port->fp_statec_busy); 12288 } 12289 12290 12291 #define RSCN_EVENT_NAME_LEN 256 12292 12293 /* 12294 * Handle RSCNs 12295 */ 12296 static void 12297 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12298 job_request_t *job, int sleep) 12299 { 12300 uint32_t mask; 12301 fp_cmd_t *cmd; 12302 uint32_t count; 12303 int listindex; 12304 int16_t len; 12305 fc_rscn_t *payload; 12306 fc_portmap_t *listptr; 12307 fctl_ns_req_t *ns_cmd; 12308 fc_affected_id_t *page; 12309 caddr_t nvname; 12310 nvlist_t *attr_list = NULL; 12311 12312 mutex_enter(&port->fp_mutex); 12313 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12314 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12315 --port->fp_rscn_count; 12316 } 12317 mutex_exit(&port->fp_mutex); 12318 return; 12319 } 12320 mutex_exit(&port->fp_mutex); 12321 12322 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12323 if (cmd != NULL) { 12324 fp_els_acc_init(port, cmd, buf, job); 12325 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12326 fp_free_pkt(cmd); 12327 } 12328 } 12329 12330 payload = (fc_rscn_t *)buf->ub_buffer; 12331 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12332 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12333 12334 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12335 12336 if (len <= 0) { 12337 mutex_enter(&port->fp_mutex); 12338 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12339 --port->fp_rscn_count; 12340 } 12341 mutex_exit(&port->fp_mutex); 12342 12343 return; 12344 } 12345 12346 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12347 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12348 12349 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12350 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12351 12352 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12353 12354 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12355 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12356 0, sleep); 12357 if (ns_cmd == NULL) { 12358 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12359 12360 mutex_enter(&port->fp_mutex); 12361 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12362 --port->fp_rscn_count; 12363 } 12364 mutex_exit(&port->fp_mutex); 12365 12366 return; 12367 } 12368 12369 ns_cmd->ns_cmd_code = NS_GPN_ID; 12370 12371 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12372 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12373 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12374 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12375 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12376 12377 /* Only proceed if we can allocate nvname and the nvlist */ 12378 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12379 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12380 KM_NOSLEEP) == DDI_SUCCESS) { 12381 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12382 port->fp_instance) == DDI_SUCCESS && 12383 nvlist_add_byte_array(attr_list, "port-wwn", 12384 port->fp_service_params.nport_ww_name.raw_wwn, 12385 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12386 nvlist_free(attr_list); 12387 attr_list = NULL; 12388 } 12389 } 12390 12391 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12392 /* Add affected page to the event payload */ 12393 if (attr_list != NULL) { 12394 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12395 "affected_page_%d", listindex); 12396 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12397 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12398 /* We don't send a partial event, so dump it */ 12399 nvlist_free(attr_list); 12400 attr_list = NULL; 12401 } 12402 } 12403 /* 12404 * Query the NS to get the Port WWN for this 12405 * affected D_ID. 12406 */ 12407 mask = 0; 12408 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12409 case FC_RSCN_PORT_ADDRESS: 12410 fp_validate_rscn_page(port, page, job, ns_cmd, 12411 listptr, &listindex, sleep); 12412 12413 if (listindex == 0) { 12414 /* 12415 * We essentially did not process this RSCN. So, 12416 * ULPs are not going to be called and so we 12417 * decrement the rscn_count 12418 */ 12419 mutex_enter(&port->fp_mutex); 12420 if (--port->fp_rscn_count == 12421 FC_INVALID_RSCN_COUNT) { 12422 --port->fp_rscn_count; 12423 } 12424 mutex_exit(&port->fp_mutex); 12425 } 12426 break; 12427 12428 case FC_RSCN_AREA_ADDRESS: 12429 mask = 0xFFFF00; 12430 /* FALLTHROUGH */ 12431 12432 case FC_RSCN_DOMAIN_ADDRESS: 12433 if (!mask) { 12434 mask = 0xFF0000; 12435 } 12436 fp_validate_area_domain(port, page->aff_d_id, mask, 12437 job, sleep); 12438 break; 12439 12440 case FC_RSCN_FABRIC_ADDRESS: 12441 /* 12442 * We need to discover all the devices on this 12443 * port. 12444 */ 12445 fp_validate_area_domain(port, 0, 0, job, sleep); 12446 break; 12447 12448 default: 12449 break; 12450 } 12451 } 12452 if (attr_list != NULL) { 12453 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12454 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12455 NULL, DDI_SLEEP); 12456 nvlist_free(attr_list); 12457 } else { 12458 FP_TRACE(FP_NHEAD1(9, 0), 12459 "RSCN handled, but event not sent to userland"); 12460 } 12461 if (nvname != NULL) { 12462 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12463 } 12464 12465 if (ns_cmd) { 12466 fctl_free_ns_cmd(ns_cmd); 12467 } 12468 12469 if (listindex) { 12470 #ifdef DEBUG 12471 page = (fc_affected_id_t *)(buf->ub_buffer + 12472 sizeof (fc_rscn_t)); 12473 12474 if (listptr->map_did.port_id != page->aff_d_id) { 12475 FP_TRACE(FP_NHEAD1(9, 0), 12476 "PORT RSCN: processed=%x, reporting=%x", 12477 listptr->map_did.port_id, page->aff_d_id); 12478 } 12479 #endif 12480 12481 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12482 sleep, 0); 12483 } else { 12484 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12485 } 12486 } 12487 12488 12489 /* 12490 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12491 */ 12492 static void 12493 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12494 { 12495 int is_switch; 12496 int initiator; 12497 fc_local_port_t *port; 12498 12499 port = pd->pd_port; 12500 12501 /* This function has the following bunch of assumptions */ 12502 ASSERT(port != NULL); 12503 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12504 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12505 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12506 12507 pd->pd_state = PORT_DEVICE_INVALID; 12508 pd->pd_type = PORT_DEVICE_OLD; 12509 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12510 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12511 12512 fctl_delist_did_table(port, pd); 12513 fctl_delist_pwwn_table(port, pd); 12514 12515 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12516 " removed the PD=%p from DID and PWWN tables", 12517 port, pd->pd_port_id.port_id, pd); 12518 12519 if ((!flag) && port && initiator && is_switch) { 12520 (void) fctl_add_orphan_held(port, pd); 12521 } 12522 fctl_copy_portmap_held(map, pd); 12523 map->map_pd = pd; 12524 } 12525 12526 /* 12527 * Fill out old map for ULPs 12528 */ 12529 static void 12530 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12531 { 12532 int is_switch; 12533 int initiator; 12534 fc_local_port_t *port; 12535 12536 mutex_enter(&pd->pd_mutex); 12537 port = pd->pd_port; 12538 mutex_exit(&pd->pd_mutex); 12539 12540 mutex_enter(&port->fp_mutex); 12541 mutex_enter(&pd->pd_mutex); 12542 12543 pd->pd_state = PORT_DEVICE_INVALID; 12544 pd->pd_type = PORT_DEVICE_OLD; 12545 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12546 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12547 12548 fctl_delist_did_table(port, pd); 12549 fctl_delist_pwwn_table(port, pd); 12550 12551 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12552 " removed the PD=%p from DID and PWWN tables", 12553 port, pd->pd_port_id.port_id, pd); 12554 12555 mutex_exit(&pd->pd_mutex); 12556 mutex_exit(&port->fp_mutex); 12557 12558 ASSERT(port != NULL); 12559 if ((!flag) && port && initiator && is_switch) { 12560 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12561 } 12562 fctl_copy_portmap(map, pd); 12563 map->map_pd = pd; 12564 } 12565 12566 12567 /* 12568 * Fillout Changed Map for ULPs 12569 */ 12570 static void 12571 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12572 uint32_t *new_did, la_wwn_t *new_pwwn) 12573 { 12574 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12575 12576 pd->pd_type = PORT_DEVICE_CHANGED; 12577 if (new_did) { 12578 pd->pd_port_id.port_id = *new_did; 12579 } 12580 if (new_pwwn) { 12581 pd->pd_port_name = *new_pwwn; 12582 } 12583 mutex_exit(&pd->pd_mutex); 12584 12585 fctl_copy_portmap(map, pd); 12586 12587 mutex_enter(&pd->pd_mutex); 12588 pd->pd_type = PORT_DEVICE_NOCHANGE; 12589 } 12590 12591 12592 /* 12593 * Fillout New Name Server map 12594 */ 12595 static void 12596 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12597 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12598 { 12599 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12600 12601 if (handle) { 12602 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_pwwn, 12603 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12604 DDI_DEV_AUTOINCR); 12605 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_nwwn, 12606 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12607 DDI_DEV_AUTOINCR); 12608 FC_GET_RSP(port, *handle, (uint8_t *)port_map->map_fc4_types, 12609 (uint8_t *)gan_resp->gan_fc4types, 12610 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12611 } else { 12612 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12613 sizeof (gan_resp->gan_pwwn)); 12614 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12615 sizeof (gan_resp->gan_nwwn)); 12616 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12617 sizeof (gan_resp->gan_fc4types)); 12618 } 12619 port_map->map_did.port_id = d_id; 12620 port_map->map_did.priv_lilp_posit = 0; 12621 port_map->map_hard_addr.hard_addr = 0; 12622 port_map->map_hard_addr.rsvd = 0; 12623 port_map->map_state = PORT_DEVICE_INVALID; 12624 port_map->map_type = PORT_DEVICE_NEW; 12625 port_map->map_flags = 0; 12626 port_map->map_pd = NULL; 12627 12628 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12629 12630 ASSERT(port != NULL); 12631 } 12632 12633 12634 /* 12635 * Perform LINIT ELS 12636 */ 12637 static int 12638 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12639 job_request_t *job) 12640 { 12641 int rval; 12642 uint32_t d_id; 12643 uint32_t s_id; 12644 uint32_t lfa; 12645 uchar_t class; 12646 uint32_t ret; 12647 fp_cmd_t *cmd; 12648 fc_porttype_t ptype; 12649 fc_packet_t *pkt; 12650 fc_linit_req_t payload; 12651 fc_remote_port_t *pd; 12652 12653 rval = 0; 12654 12655 ASSERT(job != NULL); 12656 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12657 12658 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12659 if (pd == NULL) { 12660 fctl_ns_req_t *ns_cmd; 12661 12662 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12663 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12664 0, sleep); 12665 12666 if (ns_cmd == NULL) { 12667 return (FC_NOMEM); 12668 } 12669 job->job_result = FC_SUCCESS; 12670 ns_cmd->ns_cmd_code = NS_GID_PN; 12671 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12672 12673 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12674 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12675 fctl_free_ns_cmd(ns_cmd); 12676 return (FC_FAILURE); 12677 } 12678 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12679 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12680 12681 fctl_free_ns_cmd(ns_cmd); 12682 lfa = d_id & 0xFFFF00; 12683 12684 /* 12685 * Given this D_ID, get the port type to see if 12686 * we can do LINIT on the LFA 12687 */ 12688 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12689 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12690 0, sleep); 12691 12692 if (ns_cmd == NULL) { 12693 return (FC_NOMEM); 12694 } 12695 12696 job->job_result = FC_SUCCESS; 12697 ns_cmd->ns_cmd_code = NS_GPT_ID; 12698 12699 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12700 ((ns_req_gpt_id_t *) 12701 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12702 12703 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12704 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12705 fctl_free_ns_cmd(ns_cmd); 12706 return (FC_FAILURE); 12707 } 12708 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12709 12710 fctl_free_ns_cmd(ns_cmd); 12711 12712 switch (ptype.port_type) { 12713 case FC_NS_PORT_NL: 12714 case FC_NS_PORT_F_NL: 12715 case FC_NS_PORT_FL: 12716 break; 12717 12718 default: 12719 return (FC_FAILURE); 12720 } 12721 } else { 12722 mutex_enter(&pd->pd_mutex); 12723 ptype = pd->pd_porttype; 12724 12725 switch (pd->pd_porttype.port_type) { 12726 case FC_NS_PORT_NL: 12727 case FC_NS_PORT_F_NL: 12728 case FC_NS_PORT_FL: 12729 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12730 break; 12731 12732 default: 12733 mutex_exit(&pd->pd_mutex); 12734 return (FC_FAILURE); 12735 } 12736 mutex_exit(&pd->pd_mutex); 12737 } 12738 12739 mutex_enter(&port->fp_mutex); 12740 s_id = port->fp_port_id.port_id; 12741 class = port->fp_ns_login_class; 12742 mutex_exit(&port->fp_mutex); 12743 12744 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12745 sizeof (fc_linit_resp_t), sleep, pd); 12746 if (cmd == NULL) { 12747 return (FC_NOMEM); 12748 } 12749 12750 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12751 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12752 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12753 cmd->cmd_retry_count = fp_retry_count; 12754 cmd->cmd_ulp_pkt = NULL; 12755 12756 pkt = &cmd->cmd_pkt; 12757 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12758 12759 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12760 12761 /* 12762 * How does LIP work by the way ? 12763 * If the L_Port receives three consecutive identical ordered 12764 * sets whose first two characters (fully decoded) are equal to 12765 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12766 * recognize a Loop Initialization Primitive sequence. The 12767 * character 3 determines the type of lip: 12768 * LIP(F7) Normal LIP 12769 * LIP(F8) Loop Failure LIP 12770 * 12771 * The possible combination for the 3rd and 4th bytes are: 12772 * F7, F7 Normal Lip - No valid AL_PA 12773 * F8, F8 Loop Failure - No valid AL_PA 12774 * F7, AL_PS Normal Lip - Valid source AL_PA 12775 * F8, AL_PS Loop Failure - Valid source AL_PA 12776 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12777 * And Normal Lip for all other loop members 12778 * 0xFF AL_PS Vendor specific reset of all loop members 12779 * 12780 * Now, it may not always be that we, at the source, may have an 12781 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12782 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12783 * payload we are going to set: 12784 * lip_b3 = 0xF7; Normal LIP 12785 * lip_b4 = 0xF7; No valid source AL_PA 12786 */ 12787 payload.ls_code.ls_code = LA_ELS_LINIT; 12788 payload.ls_code.mbz = 0; 12789 payload.rsvd = 0; 12790 payload.func = 0; /* Let Fabric determine the best way */ 12791 payload.lip_b3 = 0xF7; /* Normal LIP */ 12792 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12793 12794 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 12795 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12796 12797 job->job_counter = 1; 12798 12799 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12800 if (ret == FC_SUCCESS) { 12801 fp_jobwait(job); 12802 rval = job->job_result; 12803 } else { 12804 rval = FC_FAILURE; 12805 fp_free_pkt(cmd); 12806 } 12807 12808 return (rval); 12809 } 12810 12811 12812 /* 12813 * Fill out the device handles with GAN response 12814 */ 12815 static void 12816 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12817 ns_resp_gan_t *gan_resp) 12818 { 12819 fc_remote_node_t *node; 12820 fc_porttype_t type; 12821 fc_local_port_t *port; 12822 12823 ASSERT(pd != NULL); 12824 ASSERT(handle != NULL); 12825 12826 port = pd->pd_port; 12827 12828 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12829 " port_id=%x, sym_len=%d fc4-type=%x", 12830 pd, gan_resp->gan_type_id.rsvd, 12831 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12832 12833 mutex_enter(&pd->pd_mutex); 12834 12835 FC_GET_RSP(port, *handle, (uint8_t *)&type, 12836 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12837 12838 pd->pd_porttype.port_type = type.port_type; 12839 pd->pd_porttype.rsvd = 0; 12840 12841 pd->pd_spn_len = gan_resp->gan_spnlen; 12842 if (pd->pd_spn_len) { 12843 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_spn, 12844 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12845 DDI_DEV_AUTOINCR); 12846 } 12847 12848 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_ip_addr, 12849 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12850 DDI_DEV_AUTOINCR); 12851 FC_GET_RSP(port, *handle, (uint8_t *)&pd->pd_cos, 12852 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12853 DDI_DEV_AUTOINCR); 12854 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_fc4types, 12855 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12856 DDI_DEV_AUTOINCR); 12857 12858 node = pd->pd_remote_nodep; 12859 mutex_exit(&pd->pd_mutex); 12860 12861 mutex_enter(&node->fd_mutex); 12862 12863 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_ipa, 12864 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12865 DDI_DEV_AUTOINCR); 12866 12867 node->fd_snn_len = gan_resp->gan_snnlen; 12868 if (node->fd_snn_len) { 12869 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_snn, 12870 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12871 DDI_DEV_AUTOINCR); 12872 } 12873 12874 mutex_exit(&node->fd_mutex); 12875 } 12876 12877 12878 /* 12879 * Handles all NS Queries (also means that this function 12880 * doesn't handle NS object registration) 12881 */ 12882 static int 12883 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12884 int polled, int sleep) 12885 { 12886 int rval; 12887 fp_cmd_t *cmd; 12888 12889 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12890 12891 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 12892 FP_TRACE(FP_NHEAD1(1, 0), "fp_ns_query GA_NXT fp %x pd %x", 12893 port->fp_port_id.port_id, ns_cmd->ns_gan_sid); 12894 } 12895 12896 if (ns_cmd->ns_cmd_size == 0) { 12897 return (FC_FAILURE); 12898 } 12899 12900 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12901 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12902 ns_cmd->ns_resp_size, sleep, NULL); 12903 if (cmd == NULL) { 12904 return (FC_NOMEM); 12905 } 12906 12907 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12908 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12909 12910 if (polled) { 12911 job->job_counter = 1; 12912 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12913 } 12914 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12915 if (rval != FC_SUCCESS) { 12916 job->job_result = rval; 12917 fp_iodone(cmd); 12918 if (polled == 0) { 12919 /* 12920 * Return FC_SUCCESS to indicate that 12921 * fp_iodone is performed already. 12922 */ 12923 rval = FC_SUCCESS; 12924 } 12925 } 12926 12927 if (polled) { 12928 fp_jobwait(job); 12929 rval = job->job_result; 12930 } 12931 12932 return (rval); 12933 } 12934 12935 12936 /* 12937 * Initialize Common Transport request 12938 */ 12939 static void 12940 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12941 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12942 uint16_t resp_len, job_request_t *job) 12943 { 12944 uint32_t s_id; 12945 uchar_t class; 12946 fc_packet_t *pkt; 12947 fc_ct_header_t ct; 12948 12949 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12950 12951 mutex_enter(&port->fp_mutex); 12952 s_id = port->fp_port_id.port_id; 12953 class = port->fp_ns_login_class; 12954 mutex_exit(&port->fp_mutex); 12955 12956 cmd->cmd_job = job; 12957 cmd->cmd_private = ns_cmd; 12958 pkt = &cmd->cmd_pkt; 12959 12960 ct.ct_rev = CT_REV; 12961 ct.ct_inid = 0; 12962 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12963 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12964 ct.ct_options = 0; 12965 ct.ct_reserved1 = 0; 12966 ct.ct_cmdrsp = cmd_code; 12967 ct.ct_aiusize = resp_len >> 2; 12968 ct.ct_reserved2 = 0; 12969 ct.ct_reason = 0; 12970 ct.ct_expln = 0; 12971 ct.ct_vendor = 0; 12972 12973 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ct, 12974 (uint8_t *)pkt->pkt_cmd, sizeof (ct), DDI_DEV_AUTOINCR); 12975 12976 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12977 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12978 pkt->pkt_cmd_fhdr.s_id = s_id; 12979 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12980 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12981 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12982 pkt->pkt_cmd_fhdr.seq_id = 0; 12983 pkt->pkt_cmd_fhdr.df_ctl = 0; 12984 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12985 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12986 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12987 pkt->pkt_cmd_fhdr.ro = 0; 12988 pkt->pkt_cmd_fhdr.rsvd = 0; 12989 12990 pkt->pkt_comp = fp_ns_intr; 12991 pkt->pkt_ulp_private = (opaque_t)cmd; 12992 pkt->pkt_timeout = FP_NS_TIMEOUT; 12993 12994 if (cmd_buf) { 12995 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 12996 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 12997 cmd_len, DDI_DEV_AUTOINCR); 12998 } 12999 13000 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 13001 13002 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 13003 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13004 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 13005 cmd->cmd_retry_count = fp_retry_count; 13006 cmd->cmd_ulp_pkt = NULL; 13007 } 13008 13009 13010 /* 13011 * Name Server request interrupt routine 13012 */ 13013 static void 13014 fp_ns_intr(fc_packet_t *pkt) 13015 { 13016 fp_cmd_t *cmd; 13017 fc_local_port_t *port; 13018 fc_ct_header_t resp_hdr; 13019 fc_ct_header_t cmd_hdr; 13020 fctl_ns_req_t *ns_cmd; 13021 13022 cmd = pkt->pkt_ulp_private; 13023 port = cmd->cmd_port; 13024 13025 mutex_enter(&port->fp_mutex); 13026 port->fp_out_fpcmds--; 13027 mutex_exit(&port->fp_mutex); 13028 13029 FC_GET_RSP(port, pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 13030 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 13031 ns_cmd = (fctl_ns_req_t *) 13032 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 13033 if (!FP_IS_PKT_ERROR(pkt)) { 13034 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 13035 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 13036 DDI_DEV_AUTOINCR); 13037 13038 /* 13039 * On x86 architectures, make sure the resp_hdr is big endian. 13040 * This macro is a NOP on sparc architectures mainly because 13041 * we don't want to end up wasting time since the end result 13042 * is going to be the same. 13043 */ 13044 MAKE_BE_32(&resp_hdr); 13045 13046 if (ns_cmd) { 13047 /* 13048 * Always copy out the response CT_HDR 13049 */ 13050 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 13051 sizeof (resp_hdr)); 13052 } 13053 13054 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 13055 pkt->pkt_state = FC_PKT_FS_RJT; 13056 pkt->pkt_reason = resp_hdr.ct_reason; 13057 pkt->pkt_expln = resp_hdr.ct_expln; 13058 } 13059 } 13060 13061 if (FP_IS_PKT_ERROR(pkt)) { 13062 if (ns_cmd) { 13063 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13064 ASSERT(ns_cmd->ns_pd != NULL); 13065 13066 /* Mark it OLD if not already done */ 13067 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13068 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 13069 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13070 } 13071 13072 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13073 fctl_free_ns_cmd(ns_cmd); 13074 ((fp_cmd_t *) 13075 (pkt->pkt_ulp_private))->cmd_private = NULL; 13076 } 13077 13078 } 13079 13080 FP_TRACE(FP_NHEAD2(9, 0), "%x NS failure pkt state=%x" 13081 "reason=%x, expln=%x, NSCMD=%04X, NSRSP=%04X", 13082 port->fp_port_id.port_id, pkt->pkt_state, 13083 pkt->pkt_reason, pkt->pkt_expln, 13084 cmd_hdr.ct_cmdrsp, resp_hdr.ct_cmdrsp); 13085 13086 (void) fp_common_intr(pkt, 1); 13087 13088 return; 13089 } 13090 13091 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 13092 uint32_t d_id; 13093 fc_local_port_t *port; 13094 fp_cmd_t *cmd; 13095 13096 d_id = pkt->pkt_cmd_fhdr.d_id; 13097 cmd = pkt->pkt_ulp_private; 13098 port = cmd->cmd_port; 13099 FP_TRACE(FP_NHEAD2(9, 0), 13100 "Bogus NS response received for D_ID=%x", d_id); 13101 } 13102 13103 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 13104 fp_gan_handler(pkt, ns_cmd); 13105 return; 13106 } 13107 13108 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 13109 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 13110 if (ns_cmd) { 13111 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 13112 fp_ns_query_handler(pkt, ns_cmd); 13113 return; 13114 } 13115 } 13116 } 13117 13118 fp_iodone(pkt->pkt_ulp_private); 13119 } 13120 13121 13122 /* 13123 * Process NS_GAN response 13124 */ 13125 static void 13126 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13127 { 13128 int my_did; 13129 fc_portid_t d_id; 13130 fp_cmd_t *cmd; 13131 fc_local_port_t *port; 13132 fc_remote_port_t *pd; 13133 ns_req_gan_t gan_req; 13134 ns_resp_gan_t *gan_resp; 13135 13136 ASSERT(ns_cmd != NULL); 13137 13138 cmd = pkt->pkt_ulp_private; 13139 port = cmd->cmd_port; 13140 13141 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 13142 13143 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&d_id, 13144 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 13145 13146 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 13147 13148 /* 13149 * In this case the priv_lilp_posit field in reality 13150 * is actually represents the relative position on a private loop. 13151 * So zero it while dealing with Port Identifiers. 13152 */ 13153 d_id.priv_lilp_posit = 0; 13154 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 13155 if (ns_cmd->ns_gan_sid == d_id.port_id) { 13156 /* 13157 * We've come a full circle; time to get out. 13158 */ 13159 fp_iodone(cmd); 13160 return; 13161 } 13162 13163 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 13164 ns_cmd->ns_gan_sid = d_id.port_id; 13165 } 13166 13167 mutex_enter(&port->fp_mutex); 13168 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 13169 mutex_exit(&port->fp_mutex); 13170 13171 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, fp %x pd %x", port, 13172 port->fp_port_id.port_id, d_id.port_id); 13173 if (my_did == 0) { 13174 la_wwn_t pwwn; 13175 la_wwn_t nwwn; 13176 13177 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 13178 "port=%p, d_id=%x, type_id=%x, " 13179 "pwwn=%x %x %x %x %x %x %x %x, " 13180 "nwwn=%x %x %x %x %x %x %x %x", 13181 port, d_id.port_id, gan_resp->gan_type_id, 13182 13183 gan_resp->gan_pwwn.raw_wwn[0], 13184 gan_resp->gan_pwwn.raw_wwn[1], 13185 gan_resp->gan_pwwn.raw_wwn[2], 13186 gan_resp->gan_pwwn.raw_wwn[3], 13187 gan_resp->gan_pwwn.raw_wwn[4], 13188 gan_resp->gan_pwwn.raw_wwn[5], 13189 gan_resp->gan_pwwn.raw_wwn[6], 13190 gan_resp->gan_pwwn.raw_wwn[7], 13191 13192 gan_resp->gan_nwwn.raw_wwn[0], 13193 gan_resp->gan_nwwn.raw_wwn[1], 13194 gan_resp->gan_nwwn.raw_wwn[2], 13195 gan_resp->gan_nwwn.raw_wwn[3], 13196 gan_resp->gan_nwwn.raw_wwn[4], 13197 gan_resp->gan_nwwn.raw_wwn[5], 13198 gan_resp->gan_nwwn.raw_wwn[6], 13199 gan_resp->gan_nwwn.raw_wwn[7]); 13200 13201 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 13202 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 13203 DDI_DEV_AUTOINCR); 13204 13205 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn, 13206 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 13207 DDI_DEV_AUTOINCR); 13208 13209 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 13210 FP_TRACE(FP_NHEAD1(1, 0), "fp %x gan_hander create" 13211 "pd %x", port->fp_port_id.port_id, d_id.port_id); 13212 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 13213 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 13214 } 13215 if (pd != NULL) { 13216 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 13217 pd, gan_resp); 13218 } 13219 13220 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 13221 *((int *)ns_cmd->ns_data_buf) += 1; 13222 } 13223 13224 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 13225 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 13226 13227 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 13228 fc_port_dev_t *userbuf; 13229 13230 userbuf = ((fc_port_dev_t *) 13231 ns_cmd->ns_data_buf) + 13232 ns_cmd->ns_gan_index++; 13233 13234 userbuf->dev_did = d_id; 13235 13236 FC_GET_RSP(port, pkt->pkt_resp_acc, 13237 (uint8_t *)userbuf->dev_type, 13238 (uint8_t *)gan_resp->gan_fc4types, 13239 sizeof (userbuf->dev_type), 13240 DDI_DEV_AUTOINCR); 13241 13242 userbuf->dev_nwwn = nwwn; 13243 userbuf->dev_pwwn = pwwn; 13244 13245 if (pd != NULL) { 13246 mutex_enter(&pd->pd_mutex); 13247 userbuf->dev_state = pd->pd_state; 13248 userbuf->dev_hard_addr = 13249 pd->pd_hard_addr; 13250 mutex_exit(&pd->pd_mutex); 13251 } else { 13252 userbuf->dev_state = 13253 PORT_DEVICE_INVALID; 13254 } 13255 } else if (ns_cmd->ns_flags & 13256 FCTL_NS_BUF_IS_FC_PORTMAP) { 13257 fc_portmap_t *map; 13258 13259 map = ((fc_portmap_t *) 13260 ns_cmd->ns_data_buf) + 13261 ns_cmd->ns_gan_index++; 13262 13263 /* 13264 * First fill it like any new map 13265 * and update the port device info 13266 * below. 13267 */ 13268 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13269 map, gan_resp, d_id.port_id); 13270 if (pd != NULL) { 13271 fctl_copy_portmap(map, pd); 13272 } else { 13273 map->map_state = PORT_DEVICE_INVALID; 13274 map->map_type = PORT_DEVICE_NOCHANGE; 13275 } 13276 } else { 13277 caddr_t dst_ptr; 13278 13279 dst_ptr = ns_cmd->ns_data_buf + 13280 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13281 13282 FC_GET_RSP(port, pkt->pkt_resp_acc, 13283 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13284 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13285 } 13286 } else { 13287 ns_cmd->ns_gan_index++; 13288 } 13289 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13290 fp_iodone(cmd); 13291 return; 13292 } 13293 } 13294 13295 gan_req.pid = d_id; 13296 13297 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13298 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13299 sizeof (gan_req), DDI_DEV_AUTOINCR); 13300 13301 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13302 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13303 fp_iodone(cmd); 13304 } else { 13305 mutex_enter(&port->fp_mutex); 13306 port->fp_out_fpcmds++; 13307 mutex_exit(&port->fp_mutex); 13308 } 13309 } 13310 13311 13312 /* 13313 * Handle NS Query interrupt 13314 */ 13315 static void 13316 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13317 { 13318 fp_cmd_t *cmd; 13319 fc_local_port_t *port; 13320 caddr_t src_ptr; 13321 uint32_t xfer_len; 13322 13323 cmd = pkt->pkt_ulp_private; 13324 port = cmd->cmd_port; 13325 13326 xfer_len = ns_cmd->ns_resp_size; 13327 13328 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13329 ns_cmd->ns_cmd_code, xfer_len); 13330 13331 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13332 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13333 13334 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13335 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13336 } 13337 13338 if (xfer_len <= ns_cmd->ns_data_len) { 13339 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13340 FC_GET_RSP(port, pkt->pkt_resp_acc, 13341 (uint8_t *)ns_cmd->ns_data_buf, 13342 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13343 } 13344 13345 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13346 ASSERT(ns_cmd->ns_pd != NULL); 13347 13348 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13349 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13350 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13351 } 13352 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13353 } 13354 13355 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13356 fctl_free_ns_cmd(ns_cmd); 13357 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13358 } 13359 fp_iodone(cmd); 13360 } 13361 13362 13363 /* 13364 * Handle unsolicited ADISC ELS request 13365 */ 13366 static void 13367 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13368 fc_remote_port_t *pd, job_request_t *job) 13369 { 13370 int rval; 13371 fp_cmd_t *cmd; 13372 13373 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13374 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13375 mutex_enter(&pd->pd_mutex); 13376 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13377 mutex_exit(&pd->pd_mutex); 13378 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13379 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13380 0, KM_SLEEP, pd); 13381 if (cmd != NULL) { 13382 fp_els_rjt_init(port, cmd, buf, 13383 FC_ACTION_NON_RETRYABLE, 13384 FC_REASON_INVALID_LINK_CTRL, job); 13385 13386 if (fp_sendcmd(port, cmd, 13387 port->fp_fca_handle) != FC_SUCCESS) { 13388 fp_free_pkt(cmd); 13389 } 13390 } 13391 } 13392 } else { 13393 mutex_exit(&pd->pd_mutex); 13394 /* 13395 * Yes, yes, we don't have a hard address. But we 13396 * we should still respond. Huh ? Visit 21.19.2 13397 * of FC-PH-2 which essentially says that if an 13398 * NL_Port doesn't have a hard address, or if a port 13399 * does not have FC-AL capability, it shall report 13400 * zeroes in this field. 13401 */ 13402 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13403 0, KM_SLEEP, pd); 13404 if (cmd == NULL) { 13405 return; 13406 } 13407 fp_adisc_acc_init(port, cmd, buf, job); 13408 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13409 if (rval != FC_SUCCESS) { 13410 fp_free_pkt(cmd); 13411 } 13412 } 13413 } 13414 13415 13416 /* 13417 * Initialize ADISC response. 13418 */ 13419 static void 13420 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13421 job_request_t *job) 13422 { 13423 fc_packet_t *pkt; 13424 la_els_adisc_t payload; 13425 13426 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13427 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13428 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13429 cmd->cmd_retry_count = 1; 13430 cmd->cmd_ulp_pkt = NULL; 13431 13432 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13433 cmd->cmd_job = job; 13434 13435 pkt = &cmd->cmd_pkt; 13436 13437 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13438 13439 payload.ls_code.ls_code = LA_ELS_ACC; 13440 payload.ls_code.mbz = 0; 13441 13442 mutex_enter(&port->fp_mutex); 13443 payload.nport_id = port->fp_port_id; 13444 payload.hard_addr = port->fp_hard_addr; 13445 mutex_exit(&port->fp_mutex); 13446 13447 payload.port_wwn = port->fp_service_params.nport_ww_name; 13448 payload.node_wwn = port->fp_service_params.node_ww_name; 13449 13450 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 13451 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13452 } 13453 13454 13455 /* 13456 * Hold and Install the requested ULP drivers 13457 */ 13458 static void 13459 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13460 { 13461 int len; 13462 int count; 13463 int data_len; 13464 major_t ulp_major; 13465 caddr_t ulp_name; 13466 caddr_t data_ptr; 13467 caddr_t data_buf; 13468 13469 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13470 13471 data_buf = NULL; 13472 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13473 DDI_PROP_DONTPASS, "load-ulp-list", 13474 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13475 return; 13476 } 13477 13478 len = strlen(data_buf); 13479 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13480 13481 data_ptr = data_buf + len + 1; 13482 for (count = 0; count < port->fp_ulp_nload; count++) { 13483 len = strlen(data_ptr) + 1; 13484 ulp_name = kmem_zalloc(len, KM_SLEEP); 13485 bcopy(data_ptr, ulp_name, len); 13486 13487 ulp_major = ddi_name_to_major(ulp_name); 13488 13489 if (ulp_major != (major_t)-1) { 13490 if (modload("drv", ulp_name) < 0) { 13491 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13492 0, NULL, "failed to load %s", 13493 ulp_name); 13494 } 13495 } else { 13496 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13497 "%s isn't a valid driver", ulp_name); 13498 } 13499 13500 kmem_free(ulp_name, len); 13501 data_ptr += len; /* Skip to next field */ 13502 } 13503 13504 /* 13505 * Free the memory allocated by DDI 13506 */ 13507 if (data_buf != NULL) { 13508 kmem_free(data_buf, data_len); 13509 } 13510 } 13511 13512 13513 /* 13514 * Perform LOGO operation 13515 */ 13516 static int 13517 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13518 { 13519 int rval; 13520 fp_cmd_t *cmd; 13521 13522 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13523 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13524 13525 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13526 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13527 13528 mutex_enter(&port->fp_mutex); 13529 mutex_enter(&pd->pd_mutex); 13530 13531 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13532 ASSERT(pd->pd_login_count == 1); 13533 13534 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13535 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13536 cmd->cmd_flags = 0; 13537 cmd->cmd_retry_count = 1; 13538 cmd->cmd_ulp_pkt = NULL; 13539 13540 fp_logo_init(pd, cmd, job); 13541 13542 mutex_exit(&pd->pd_mutex); 13543 mutex_exit(&port->fp_mutex); 13544 13545 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13546 if (rval != FC_SUCCESS) { 13547 fp_iodone(cmd); 13548 } 13549 13550 return (rval); 13551 } 13552 13553 13554 /* 13555 * Perform Port attach callbacks to registered ULPs 13556 */ 13557 static void 13558 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13559 { 13560 fp_soft_attach_t *att; 13561 13562 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13563 att->att_cmd = cmd; 13564 att->att_port = port; 13565 13566 /* 13567 * We need to remember whether or not fctl_busy_port 13568 * succeeded so we know whether or not to call 13569 * fctl_idle_port when the task is complete. 13570 */ 13571 13572 if (fctl_busy_port(port) == 0) { 13573 att->att_need_pm_idle = B_TRUE; 13574 } else { 13575 att->att_need_pm_idle = B_FALSE; 13576 } 13577 13578 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13579 att, KM_SLEEP); 13580 } 13581 13582 13583 /* 13584 * Forward state change notifications on to interested ULPs. 13585 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13586 * real work. 13587 */ 13588 static int 13589 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13590 { 13591 fc_port_clist_t *clist; 13592 13593 clist = kmem_zalloc(sizeof (*clist), sleep); 13594 if (clist == NULL) { 13595 return (FC_NOMEM); 13596 } 13597 13598 clist->clist_state = statec; 13599 13600 mutex_enter(&port->fp_mutex); 13601 clist->clist_flags = port->fp_topology; 13602 mutex_exit(&port->fp_mutex); 13603 13604 clist->clist_port = (opaque_t)port; 13605 clist->clist_len = 0; 13606 clist->clist_size = 0; 13607 clist->clist_map = NULL; 13608 13609 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13610 clist, KM_SLEEP); 13611 13612 return (FC_SUCCESS); 13613 } 13614 13615 13616 /* 13617 * Get name server map 13618 */ 13619 static int 13620 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13621 uint32_t *len, uint32_t sid) 13622 { 13623 int ret; 13624 fctl_ns_req_t *ns_cmd; 13625 13626 /* 13627 * Don't let the allocator do anything for response; 13628 * we have have buffer ready to fillout. 13629 */ 13630 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13631 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13632 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13633 13634 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13635 ns_cmd->ns_data_buf = (caddr_t)*map; 13636 13637 ASSERT(ns_cmd != NULL); 13638 13639 ns_cmd->ns_gan_index = 0; 13640 ns_cmd->ns_gan_sid = sid; 13641 ns_cmd->ns_cmd_code = NS_GA_NXT; 13642 ns_cmd->ns_gan_max = *len; 13643 13644 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13645 13646 if (ns_cmd->ns_gan_index != *len) { 13647 *len = ns_cmd->ns_gan_index; 13648 } 13649 ns_cmd->ns_data_len = 0; 13650 ns_cmd->ns_data_buf = NULL; 13651 fctl_free_ns_cmd(ns_cmd); 13652 13653 return (ret); 13654 } 13655 13656 13657 /* 13658 * Create a remote port in Fabric topology by using NS services 13659 */ 13660 static fc_remote_port_t * 13661 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13662 { 13663 int rval; 13664 job_request_t *job; 13665 fctl_ns_req_t *ns_cmd; 13666 fc_remote_port_t *pd; 13667 13668 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13669 13670 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13671 port, d_id); 13672 13673 #ifdef DEBUG 13674 mutex_enter(&port->fp_mutex); 13675 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13676 mutex_exit(&port->fp_mutex); 13677 #endif 13678 13679 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13680 if (job == NULL) { 13681 return (NULL); 13682 } 13683 13684 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13685 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13686 FCTL_NS_NO_DATA_BUF), sleep); 13687 if (ns_cmd == NULL) { 13688 return (NULL); 13689 } 13690 13691 job->job_result = FC_SUCCESS; 13692 ns_cmd->ns_gan_max = 1; 13693 ns_cmd->ns_cmd_code = NS_GA_NXT; 13694 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13695 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13696 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13697 13698 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13699 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13700 fctl_free_ns_cmd(ns_cmd); 13701 13702 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13703 fctl_dealloc_job(job); 13704 return (NULL); 13705 } 13706 fctl_dealloc_job(job); 13707 13708 pd = fctl_get_remote_port_by_did(port, d_id); 13709 13710 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13711 port, d_id, pd); 13712 13713 return (pd); 13714 } 13715 13716 13717 /* 13718 * Check for the permissions on an ioctl command. If it is required to have an 13719 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13720 * the ioctl command isn't in one of the list built, shut the door on that too. 13721 * 13722 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13723 * to be made sure that users open the port for an exclusive access while 13724 * performing those operations. 13725 * 13726 * This can prevent a casual user from inflicting damage on the port by 13727 * sending these ioctls from multiple processes/threads (there is no good 13728 * reason why one would need to do that) without actually realizing how 13729 * expensive such commands could turn out to be. 13730 * 13731 * It is also important to note that, even with an exclusive access, 13732 * multiple threads can share the same file descriptor and fire down 13733 * commands in parallel. To prevent that the driver needs to make sure 13734 * that such commands aren't in progress already. This is taken care of 13735 * in the FP_EXCL_BUSY bit of fp_flag. 13736 */ 13737 static int 13738 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13739 { 13740 int ret = FC_FAILURE; 13741 int count; 13742 13743 for (count = 0; 13744 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13745 count++) { 13746 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13747 if (fp_perm_list[count].fp_open_flag & open_flag) { 13748 ret = FC_SUCCESS; 13749 } 13750 break; 13751 } 13752 } 13753 13754 return (ret); 13755 } 13756 13757 13758 /* 13759 * Bind Port driver's unsolicited, state change callbacks 13760 */ 13761 static int 13762 fp_bind_callbacks(fc_local_port_t *port) 13763 { 13764 fc_fca_bind_info_t bind_info = {0}; 13765 fc_fca_port_info_t *port_info; 13766 int rval = DDI_SUCCESS; 13767 uint16_t class; 13768 int node_namelen, port_namelen; 13769 char *nname = NULL, *pname = NULL; 13770 13771 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13772 13773 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13774 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13775 "node-name", &nname) != DDI_PROP_SUCCESS) { 13776 FP_TRACE(FP_NHEAD1(1, 0), 13777 "fp_bind_callback fail to get node-name"); 13778 } 13779 if (nname) { 13780 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13781 } 13782 13783 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13784 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13785 "port-name", &pname) != DDI_PROP_SUCCESS) { 13786 FP_TRACE(FP_NHEAD1(1, 0), 13787 "fp_bind_callback fail to get port-name"); 13788 } 13789 if (pname) { 13790 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13791 } 13792 13793 if (port->fp_npiv_type == FC_NPIV_PORT) { 13794 bind_info.port_npiv = 1; 13795 } 13796 13797 /* 13798 * fca_bind_port returns the FCA driver's handle for the local 13799 * port instance. If the port number isn't supported it returns NULL. 13800 * It also sets up callback in the FCA for various 13801 * things like state change, ELS etc.. 13802 */ 13803 bind_info.port_statec_cb = fp_statec_cb; 13804 bind_info.port_unsol_cb = fp_unsol_cb; 13805 bind_info.port_num = port->fp_port_num; 13806 bind_info.port_handle = (opaque_t)port; 13807 13808 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13809 13810 /* 13811 * Hold the port driver mutex as the callbacks are bound until the 13812 * service parameters are properly filled in (in order to be able to 13813 * properly respond to unsolicited ELS requests) 13814 */ 13815 mutex_enter(&port->fp_mutex); 13816 13817 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13818 port->fp_fca_dip, port_info, &bind_info); 13819 13820 if (port->fp_fca_handle == NULL) { 13821 rval = DDI_FAILURE; 13822 goto exit; 13823 } 13824 13825 /* 13826 * Only fcoei will set this bit 13827 */ 13828 if (port_info->pi_port_state & FC_STATE_FCA_IS_NODMA) { 13829 port->fp_soft_state |= FP_SOFT_FCA_IS_NODMA; 13830 port_info->pi_port_state &= ~(FC_STATE_FCA_IS_NODMA); 13831 } 13832 13833 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13834 port->fp_service_params = port_info->pi_login_params; 13835 port->fp_hard_addr = port_info->pi_hard_addr; 13836 13837 /* Copy from the FCA structure to the FP structure */ 13838 port->fp_hba_port_attrs = port_info->pi_attrs; 13839 13840 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13841 port->fp_rnid_init = 1; 13842 bcopy(&port_info->pi_rnid_params.params, 13843 &port->fp_rnid_params, 13844 sizeof (port->fp_rnid_params)); 13845 } else { 13846 port->fp_rnid_init = 0; 13847 } 13848 13849 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13850 if (node_namelen) { 13851 bcopy(&port_info->pi_attrs.sym_node_name, 13852 &port->fp_sym_node_name, 13853 node_namelen); 13854 port->fp_sym_node_namelen = node_namelen; 13855 } 13856 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13857 if (port_namelen) { 13858 bcopy(&port_info->pi_attrs.sym_port_name, 13859 &port->fp_sym_port_name, 13860 port_namelen); 13861 port->fp_sym_port_namelen = port_namelen; 13862 } 13863 13864 /* zero out the normally unused fields right away */ 13865 port->fp_service_params.ls_code.mbz = 0; 13866 port->fp_service_params.ls_code.ls_code = 0; 13867 bzero(&port->fp_service_params.reserved, 13868 sizeof (port->fp_service_params.reserved)); 13869 13870 class = port_info->pi_login_params.class_1.class_opt; 13871 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13872 13873 class = port_info->pi_login_params.class_2.class_opt; 13874 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13875 13876 class = port_info->pi_login_params.class_3.class_opt; 13877 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13878 13879 exit: 13880 if (nname) { 13881 ddi_prop_free(nname); 13882 } 13883 if (pname) { 13884 ddi_prop_free(pname); 13885 } 13886 mutex_exit(&port->fp_mutex); 13887 kmem_free(port_info, sizeof (*port_info)); 13888 13889 return (rval); 13890 } 13891 13892 13893 /* 13894 * Retrieve FCA capabilities 13895 */ 13896 static void 13897 fp_retrieve_caps(fc_local_port_t *port) 13898 { 13899 int rval; 13900 int ub_count; 13901 fc_fcp_dma_t fcp_dma; 13902 fc_reset_action_t action; 13903 fc_dma_behavior_t dma_behavior; 13904 13905 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13906 13907 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13908 FC_CAP_UNSOL_BUF, &ub_count); 13909 13910 switch (rval) { 13911 case FC_CAP_FOUND: 13912 case FC_CAP_SETTABLE: 13913 switch (ub_count) { 13914 case 0: 13915 break; 13916 13917 case -1: 13918 ub_count = fp_unsol_buf_count; 13919 break; 13920 13921 default: 13922 /* 1/4th of total buffers is my share */ 13923 ub_count = 13924 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13925 break; 13926 } 13927 break; 13928 13929 default: 13930 ub_count = 0; 13931 break; 13932 } 13933 13934 mutex_enter(&port->fp_mutex); 13935 port->fp_ub_count = ub_count; 13936 mutex_exit(&port->fp_mutex); 13937 13938 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13939 FC_CAP_POST_RESET_BEHAVIOR, &action); 13940 13941 switch (rval) { 13942 case FC_CAP_FOUND: 13943 case FC_CAP_SETTABLE: 13944 switch (action) { 13945 case FC_RESET_RETURN_NONE: 13946 case FC_RESET_RETURN_ALL: 13947 case FC_RESET_RETURN_OUTSTANDING: 13948 break; 13949 13950 default: 13951 action = FC_RESET_RETURN_NONE; 13952 break; 13953 } 13954 break; 13955 13956 default: 13957 action = FC_RESET_RETURN_NONE; 13958 break; 13959 } 13960 mutex_enter(&port->fp_mutex); 13961 port->fp_reset_action = action; 13962 mutex_exit(&port->fp_mutex); 13963 13964 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13965 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13966 13967 switch (rval) { 13968 case FC_CAP_FOUND: 13969 switch (dma_behavior) { 13970 case FC_ALLOW_STREAMING: 13971 /* FALLTHROUGH */ 13972 case FC_NO_STREAMING: 13973 break; 13974 13975 default: 13976 /* 13977 * If capability was found and the value 13978 * was incorrect assume the worst 13979 */ 13980 dma_behavior = FC_NO_STREAMING; 13981 break; 13982 } 13983 break; 13984 13985 default: 13986 /* 13987 * If capability was not defined - allow streaming; existing 13988 * FCAs should not be affected. 13989 */ 13990 dma_behavior = FC_ALLOW_STREAMING; 13991 break; 13992 } 13993 mutex_enter(&port->fp_mutex); 13994 port->fp_dma_behavior = dma_behavior; 13995 mutex_exit(&port->fp_mutex); 13996 13997 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13998 FC_CAP_FCP_DMA, &fcp_dma); 13999 14000 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 14001 fcp_dma != FC_DVMA_SPACE)) { 14002 fcp_dma = FC_DVMA_SPACE; 14003 } 14004 14005 mutex_enter(&port->fp_mutex); 14006 port->fp_fcp_dma = fcp_dma; 14007 mutex_exit(&port->fp_mutex); 14008 } 14009 14010 14011 /* 14012 * Handle Domain, Area changes in the Fabric. 14013 */ 14014 static void 14015 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 14016 job_request_t *job, int sleep) 14017 { 14018 #ifdef DEBUG 14019 uint32_t dcnt; 14020 #endif 14021 int rval; 14022 int send; 14023 int index; 14024 int listindex; 14025 int login; 14026 int job_flags; 14027 char ww_name[17]; 14028 uint32_t d_id; 14029 uint32_t count; 14030 fctl_ns_req_t *ns_cmd; 14031 fc_portmap_t *list; 14032 fc_orphan_t *orp; 14033 fc_orphan_t *norp; 14034 fc_orphan_t *prev; 14035 fc_remote_port_t *pd; 14036 fc_remote_port_t *npd; 14037 struct pwwn_hash *head; 14038 14039 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14040 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14041 0, sleep); 14042 if (ns_cmd == NULL) { 14043 mutex_enter(&port->fp_mutex); 14044 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14045 --port->fp_rscn_count; 14046 } 14047 mutex_exit(&port->fp_mutex); 14048 14049 return; 14050 } 14051 ns_cmd->ns_cmd_code = NS_GID_PN; 14052 14053 /* 14054 * We need to get a new count of devices from the 14055 * name server, which will also create any new devices 14056 * as needed. 14057 */ 14058 14059 (void) fp_ns_get_devcount(port, job, 1, sleep); 14060 14061 FP_TRACE(FP_NHEAD1(3, 0), 14062 "fp_validate_area_domain: get_devcount found %d devices", 14063 port->fp_total_devices); 14064 14065 mutex_enter(&port->fp_mutex); 14066 14067 for (count = index = 0; index < pwwn_table_size; index++) { 14068 head = &port->fp_pwwn_table[index]; 14069 pd = head->pwwn_head; 14070 while (pd != NULL) { 14071 mutex_enter(&pd->pd_mutex); 14072 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14073 if ((pd->pd_port_id.port_id & mask) == id && 14074 pd->pd_recepient == PD_PLOGI_INITIATOR) { 14075 count++; 14076 pd->pd_type = PORT_DEVICE_OLD; 14077 pd->pd_flags = PD_ELS_MARK; 14078 } 14079 } 14080 mutex_exit(&pd->pd_mutex); 14081 pd = pd->pd_wwn_hnext; 14082 } 14083 } 14084 14085 #ifdef DEBUG 14086 dcnt = count; 14087 #endif /* DEBUG */ 14088 14089 /* 14090 * Since port->fp_orphan_count is declared an 'int' it is 14091 * theoretically possible that the count could go negative. 14092 * 14093 * This would be bad and if that happens we really do want 14094 * to know. 14095 */ 14096 14097 ASSERT(port->fp_orphan_count >= 0); 14098 14099 count += port->fp_orphan_count; 14100 14101 /* 14102 * We add the port->fp_total_devices value to the count 14103 * in the case where our port is newly attached. This is 14104 * because we haven't done any discovery and we don't have 14105 * any orphans in the port's orphan list. If we do not do 14106 * this addition to count then we won't alloc enough kmem 14107 * to do discovery with. 14108 */ 14109 14110 if (count == 0) { 14111 count += port->fp_total_devices; 14112 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 14113 "0x%x orphans found, using 0x%x", 14114 port->fp_orphan_count, count); 14115 } 14116 14117 mutex_exit(&port->fp_mutex); 14118 14119 /* 14120 * Allocate the change list 14121 */ 14122 14123 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 14124 if (list == NULL) { 14125 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 14126 " Not enough memory to service RSCNs" 14127 " for %d ports, continuing...", count); 14128 14129 fctl_free_ns_cmd(ns_cmd); 14130 14131 mutex_enter(&port->fp_mutex); 14132 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14133 --port->fp_rscn_count; 14134 } 14135 mutex_exit(&port->fp_mutex); 14136 14137 return; 14138 } 14139 14140 /* 14141 * Attempt to validate or invalidate the devices that were 14142 * already in the pwwn hash table. 14143 */ 14144 14145 mutex_enter(&port->fp_mutex); 14146 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 14147 head = &port->fp_pwwn_table[index]; 14148 npd = head->pwwn_head; 14149 14150 while ((pd = npd) != NULL) { 14151 npd = pd->pd_wwn_hnext; 14152 14153 mutex_enter(&pd->pd_mutex); 14154 if ((pd->pd_port_id.port_id & mask) == id && 14155 pd->pd_flags == PD_ELS_MARK) { 14156 la_wwn_t *pwwn; 14157 14158 job->job_result = FC_SUCCESS; 14159 14160 ((ns_req_gid_pn_t *) 14161 (ns_cmd->ns_cmd_buf))->pwwn = 14162 pd->pd_port_name; 14163 14164 pwwn = &pd->pd_port_name; 14165 d_id = pd->pd_port_id.port_id; 14166 14167 mutex_exit(&pd->pd_mutex); 14168 mutex_exit(&port->fp_mutex); 14169 14170 rval = fp_ns_query(port, ns_cmd, job, 1, 14171 sleep); 14172 if (rval != FC_SUCCESS) { 14173 fc_wwn_to_str(pwwn, ww_name); 14174 14175 FP_TRACE(FP_NHEAD1(3, 0), 14176 "AREA RSCN: PD disappeared; " 14177 "d_id=%x, PWWN=%s", d_id, ww_name); 14178 14179 FP_TRACE(FP_NHEAD2(9, 0), 14180 "N_x Port with D_ID=%x," 14181 " PWWN=%s disappeared from fabric", 14182 d_id, ww_name); 14183 14184 fp_fillout_old_map(list + listindex++, 14185 pd, 1); 14186 } else { 14187 fctl_copy_portmap(list + listindex++, 14188 pd); 14189 14190 mutex_enter(&pd->pd_mutex); 14191 pd->pd_flags = PD_ELS_IN_PROGRESS; 14192 mutex_exit(&pd->pd_mutex); 14193 } 14194 14195 mutex_enter(&port->fp_mutex); 14196 } else { 14197 mutex_exit(&pd->pd_mutex); 14198 } 14199 } 14200 } 14201 14202 mutex_exit(&port->fp_mutex); 14203 14204 ASSERT(listindex == dcnt); 14205 14206 job->job_counter = listindex; 14207 job_flags = job->job_flags; 14208 job->job_flags |= JOB_TYPE_FP_ASYNC; 14209 14210 /* 14211 * Login (if we were the initiator) or validate devices in the 14212 * port map. 14213 */ 14214 14215 for (index = 0; index < listindex; index++) { 14216 pd = list[index].map_pd; 14217 14218 mutex_enter(&pd->pd_mutex); 14219 ASSERT((pd->pd_port_id.port_id & mask) == id); 14220 14221 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14222 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 14223 mutex_exit(&pd->pd_mutex); 14224 fp_jobdone(job); 14225 continue; 14226 } 14227 14228 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 14229 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 14230 d_id = pd->pd_port_id.port_id; 14231 mutex_exit(&pd->pd_mutex); 14232 14233 if ((d_id & mask) == id && send) { 14234 if (login) { 14235 FP_TRACE(FP_NHEAD1(6, 0), 14236 "RSCN and PLOGI request;" 14237 " pd=%p, job=%p d_id=%x, index=%d", pd, 14238 job, d_id, index); 14239 14240 rval = fp_port_login(port, d_id, job, 14241 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 14242 if (rval != FC_SUCCESS) { 14243 mutex_enter(&pd->pd_mutex); 14244 pd->pd_flags = PD_IDLE; 14245 mutex_exit(&pd->pd_mutex); 14246 14247 job->job_result = rval; 14248 fp_jobdone(job); 14249 } 14250 FP_TRACE(FP_NHEAD1(1, 0), 14251 "PLOGI succeeded:no skip(1) for " 14252 "D_ID %x", d_id); 14253 list[index].map_flags |= 14254 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14255 } else { 14256 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 14257 " pd=%p, job=%p d_id=%x, index=%d", pd, 14258 job, d_id, index); 14259 14260 rval = fp_ns_validate_device(port, pd, job, 14261 0, sleep); 14262 if (rval != FC_SUCCESS) { 14263 fp_jobdone(job); 14264 } 14265 mutex_enter(&pd->pd_mutex); 14266 pd->pd_flags = PD_IDLE; 14267 mutex_exit(&pd->pd_mutex); 14268 } 14269 } else { 14270 FP_TRACE(FP_NHEAD1(6, 0), 14271 "RSCN and NO request sent; pd=%p," 14272 " d_id=%x, index=%d", pd, d_id, index); 14273 14274 mutex_enter(&pd->pd_mutex); 14275 pd->pd_flags = PD_IDLE; 14276 mutex_exit(&pd->pd_mutex); 14277 14278 fp_jobdone(job); 14279 } 14280 } 14281 14282 if (listindex) { 14283 fctl_jobwait(job); 14284 } 14285 job->job_flags = job_flags; 14286 14287 /* 14288 * Orphan list validation. 14289 */ 14290 mutex_enter(&port->fp_mutex); 14291 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14292 orp != NULL; orp = norp) { 14293 norp = orp->orp_next; 14294 mutex_exit(&port->fp_mutex); 14295 14296 job->job_counter = 1; 14297 job->job_result = FC_SUCCESS; 14298 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14299 14300 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14301 14302 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14303 ((ns_resp_gid_pn_t *) 14304 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14305 14306 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14307 if (rval == FC_SUCCESS) { 14308 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14309 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14310 if (pd != NULL) { 14311 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14312 14313 FP_TRACE(FP_NHEAD1(6, 0), 14314 "RSCN and ORPHAN list " 14315 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14316 14317 FP_TRACE(FP_NHEAD2(6, 0), 14318 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14319 " in fabric", d_id, ww_name); 14320 14321 mutex_enter(&port->fp_mutex); 14322 if (prev) { 14323 prev->orp_next = orp->orp_next; 14324 } else { 14325 ASSERT(orp == port->fp_orphan_list); 14326 port->fp_orphan_list = orp->orp_next; 14327 } 14328 port->fp_orphan_count--; 14329 mutex_exit(&port->fp_mutex); 14330 14331 kmem_free(orp, sizeof (*orp)); 14332 fctl_copy_portmap(list + listindex++, pd); 14333 } else { 14334 prev = orp; 14335 } 14336 } else { 14337 prev = orp; 14338 } 14339 mutex_enter(&port->fp_mutex); 14340 } 14341 mutex_exit(&port->fp_mutex); 14342 14343 /* 14344 * One more pass through the list to delist old devices from 14345 * the d_id and pwwn tables and possibly add to the orphan list. 14346 */ 14347 14348 for (index = 0; index < listindex; index++) { 14349 pd = list[index].map_pd; 14350 ASSERT(pd != NULL); 14351 14352 /* 14353 * Update PLOGI results; For NS validation 14354 * of orphan list, it is redundant 14355 * 14356 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14357 * appropriate as fctl_copy_portmap() will clear map_flags. 14358 */ 14359 if (list[index].map_flags & 14360 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14361 fctl_copy_portmap(list + index, pd); 14362 list[index].map_flags |= 14363 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14364 } else { 14365 fctl_copy_portmap(list + index, pd); 14366 } 14367 14368 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14369 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14370 pd, pd->pd_port_id.port_id, 14371 pd->pd_port_name.raw_wwn[0], 14372 pd->pd_port_name.raw_wwn[1], 14373 pd->pd_port_name.raw_wwn[2], 14374 pd->pd_port_name.raw_wwn[3], 14375 pd->pd_port_name.raw_wwn[4], 14376 pd->pd_port_name.raw_wwn[5], 14377 pd->pd_port_name.raw_wwn[6], 14378 pd->pd_port_name.raw_wwn[7]); 14379 14380 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14381 "results continued, pd=%p type=%x, flags=%x, state=%x", 14382 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14383 14384 mutex_enter(&pd->pd_mutex); 14385 if (pd->pd_type == PORT_DEVICE_OLD) { 14386 int initiator; 14387 14388 pd->pd_flags = PD_IDLE; 14389 initiator = (pd->pd_recepient == 14390 PD_PLOGI_INITIATOR) ? 1 : 0; 14391 14392 mutex_exit(&pd->pd_mutex); 14393 14394 mutex_enter(&port->fp_mutex); 14395 mutex_enter(&pd->pd_mutex); 14396 14397 pd->pd_state = PORT_DEVICE_INVALID; 14398 fctl_delist_did_table(port, pd); 14399 fctl_delist_pwwn_table(port, pd); 14400 14401 mutex_exit(&pd->pd_mutex); 14402 mutex_exit(&port->fp_mutex); 14403 14404 if (initiator) { 14405 (void) fctl_add_orphan(port, pd, sleep); 14406 } 14407 list[index].map_pd = pd; 14408 } else { 14409 ASSERT(pd->pd_flags == PD_IDLE); 14410 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14411 /* 14412 * Reset LOGO tolerance to zero 14413 */ 14414 fctl_tc_reset(&pd->pd_logo_tc); 14415 } 14416 mutex_exit(&pd->pd_mutex); 14417 } 14418 } 14419 14420 if (ns_cmd) { 14421 fctl_free_ns_cmd(ns_cmd); 14422 } 14423 if (listindex) { 14424 (void) fp_ulp_devc_cb(port, list, listindex, count, 14425 sleep, 0); 14426 } else { 14427 kmem_free(list, sizeof (*list) * count); 14428 14429 mutex_enter(&port->fp_mutex); 14430 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14431 --port->fp_rscn_count; 14432 } 14433 mutex_exit(&port->fp_mutex); 14434 } 14435 } 14436 14437 14438 /* 14439 * Work hard to make sense out of an RSCN page. 14440 */ 14441 static void 14442 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14443 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14444 int *listindex, int sleep) 14445 { 14446 int rval; 14447 char ww_name[17]; 14448 la_wwn_t *pwwn; 14449 fc_remote_port_t *pwwn_pd; 14450 fc_remote_port_t *did_pd; 14451 14452 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14453 14454 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14455 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14456 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14457 14458 if (did_pd != NULL) { 14459 mutex_enter(&did_pd->pd_mutex); 14460 if (did_pd->pd_flags != PD_IDLE) { 14461 mutex_exit(&did_pd->pd_mutex); 14462 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14463 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14464 port, page->aff_d_id, did_pd); 14465 return; 14466 } 14467 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14468 mutex_exit(&did_pd->pd_mutex); 14469 } 14470 14471 job->job_counter = 1; 14472 14473 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14474 14475 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14476 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14477 14478 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14479 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14480 14481 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14482 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14483 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14484 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14485 ns_cmd->ns_resp_hdr.ct_expln); 14486 14487 job->job_counter = 1; 14488 14489 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14490 /* 14491 * What this means is that the D_ID 14492 * disappeared from the Fabric. 14493 */ 14494 if (did_pd == NULL) { 14495 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14496 " NULL PD disappeared, rval=%x", rval); 14497 return; 14498 } 14499 14500 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14501 14502 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14503 (uint32_t)(uintptr_t)job->job_cb_arg; 14504 14505 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14506 14507 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14508 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14509 14510 FP_TRACE(FP_NHEAD2(9, 0), 14511 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14512 14513 FP_TRACE(FP_NHEAD2(9, 0), 14514 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14515 " fabric", page->aff_d_id, ww_name); 14516 14517 mutex_enter(&did_pd->pd_mutex); 14518 did_pd->pd_flags = PD_IDLE; 14519 mutex_exit(&did_pd->pd_mutex); 14520 14521 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14522 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14523 14524 return; 14525 } 14526 14527 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14528 14529 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14530 /* 14531 * There is no change. Do PLOGI again and add it to 14532 * ULP portmap baggage and return. Note: When RSCNs 14533 * arrive with per page states, the need for PLOGI 14534 * can be determined correctly. 14535 */ 14536 mutex_enter(&pwwn_pd->pd_mutex); 14537 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14538 mutex_exit(&pwwn_pd->pd_mutex); 14539 14540 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14541 (uint32_t)(uintptr_t)job->job_cb_arg; 14542 14543 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14544 14545 mutex_enter(&pwwn_pd->pd_mutex); 14546 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14547 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14548 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14549 mutex_exit(&pwwn_pd->pd_mutex); 14550 14551 rval = fp_port_login(port, page->aff_d_id, job, 14552 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14553 if (rval == FC_SUCCESS) { 14554 fp_jobwait(job); 14555 rval = job->job_result; 14556 14557 /* 14558 * Reset LOGO tolerance to zero 14559 * Also we are the PLOGI initiator now. 14560 */ 14561 mutex_enter(&pwwn_pd->pd_mutex); 14562 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14563 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14564 mutex_exit(&pwwn_pd->pd_mutex); 14565 } 14566 14567 if (rval == FC_SUCCESS) { 14568 struct fc_portmap *map = 14569 listptr + *listindex - 1; 14570 14571 FP_TRACE(FP_NHEAD1(1, 0), 14572 "PLOGI succeeded: no skip(2)" 14573 " for D_ID %x", page->aff_d_id); 14574 map->map_flags |= 14575 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14576 } else { 14577 FP_TRACE(FP_NHEAD2(9, rval), 14578 "PLOGI to D_ID=%x failed", page->aff_d_id); 14579 14580 FP_TRACE(FP_NHEAD2(9, 0), 14581 "N_x Port with D_ID=%x, PWWN=%s" 14582 " disappeared from fabric", 14583 page->aff_d_id, ww_name); 14584 14585 fp_fillout_old_map(listptr + 14586 *listindex - 1, pwwn_pd, 0); 14587 } 14588 } else { 14589 mutex_exit(&pwwn_pd->pd_mutex); 14590 } 14591 14592 mutex_enter(&did_pd->pd_mutex); 14593 did_pd->pd_flags = PD_IDLE; 14594 mutex_exit(&did_pd->pd_mutex); 14595 14596 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14597 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14598 job->job_result, pwwn_pd); 14599 14600 return; 14601 } 14602 14603 if (did_pd == NULL && pwwn_pd == NULL) { 14604 14605 fc_orphan_t *orp = NULL; 14606 fc_orphan_t *norp = NULL; 14607 fc_orphan_t *prev = NULL; 14608 14609 /* 14610 * Hunt down the orphan list before giving up. 14611 */ 14612 14613 mutex_enter(&port->fp_mutex); 14614 if (port->fp_orphan_count) { 14615 14616 for (orp = port->fp_orphan_list; orp; orp = norp) { 14617 norp = orp->orp_next; 14618 14619 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14620 prev = orp; 14621 continue; 14622 } 14623 14624 if (prev) { 14625 prev->orp_next = orp->orp_next; 14626 } else { 14627 ASSERT(orp == 14628 port->fp_orphan_list); 14629 port->fp_orphan_list = 14630 orp->orp_next; 14631 } 14632 port->fp_orphan_count--; 14633 break; 14634 } 14635 } 14636 14637 mutex_exit(&port->fp_mutex); 14638 pwwn_pd = fp_create_remote_port_by_ns(port, 14639 page->aff_d_id, sleep); 14640 14641 if (pwwn_pd != NULL) { 14642 14643 if (orp) { 14644 fc_wwn_to_str(&orp->orp_pwwn, 14645 ww_name); 14646 14647 FP_TRACE(FP_NHEAD2(9, 0), 14648 "N_x Port with D_ID=%x," 14649 " PWWN=%s reappeared in fabric", 14650 page->aff_d_id, ww_name); 14651 14652 kmem_free(orp, sizeof (*orp)); 14653 } 14654 14655 (listptr + *listindex)-> 14656 map_rscn_info.ulp_rscn_count = 14657 (uint32_t)(uintptr_t)job->job_cb_arg; 14658 14659 fctl_copy_portmap(listptr + 14660 (*listindex)++, pwwn_pd); 14661 } 14662 14663 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14664 "Case TWO", page->aff_d_id); 14665 14666 return; 14667 } 14668 14669 if (pwwn_pd != NULL && did_pd == NULL) { 14670 uint32_t old_d_id; 14671 uint32_t d_id = page->aff_d_id; 14672 14673 /* 14674 * What this means is there is a new D_ID for this 14675 * Port WWN. Take out the port device off D_ID 14676 * list and put it back with a new D_ID. Perform 14677 * PLOGI if already logged in. 14678 */ 14679 mutex_enter(&port->fp_mutex); 14680 mutex_enter(&pwwn_pd->pd_mutex); 14681 14682 old_d_id = pwwn_pd->pd_port_id.port_id; 14683 14684 fctl_delist_did_table(port, pwwn_pd); 14685 14686 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14687 (uint32_t)(uintptr_t)job->job_cb_arg; 14688 14689 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14690 &d_id, NULL); 14691 fctl_enlist_did_table(port, pwwn_pd); 14692 14693 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14694 " Case THREE, pd=%p," 14695 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14696 14697 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14698 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14699 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14700 14701 mutex_exit(&pwwn_pd->pd_mutex); 14702 mutex_exit(&port->fp_mutex); 14703 14704 FP_TRACE(FP_NHEAD2(9, 0), 14705 "N_x Port with D_ID=%x, PWWN=%s has a new" 14706 " D_ID=%x now", old_d_id, ww_name, d_id); 14707 14708 rval = fp_port_login(port, page->aff_d_id, job, 14709 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14710 if (rval == FC_SUCCESS) { 14711 fp_jobwait(job); 14712 rval = job->job_result; 14713 } 14714 14715 if (rval != FC_SUCCESS) { 14716 fp_fillout_old_map(listptr + 14717 *listindex - 1, pwwn_pd, 0); 14718 } 14719 } else { 14720 mutex_exit(&pwwn_pd->pd_mutex); 14721 mutex_exit(&port->fp_mutex); 14722 } 14723 14724 return; 14725 } 14726 14727 if (pwwn_pd == NULL && did_pd != NULL) { 14728 fc_portmap_t *ptr; 14729 uint32_t len = 1; 14730 char old_ww_name[17]; 14731 14732 mutex_enter(&did_pd->pd_mutex); 14733 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14734 mutex_exit(&did_pd->pd_mutex); 14735 14736 fc_wwn_to_str(pwwn, ww_name); 14737 14738 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14739 (uint32_t)(uintptr_t)job->job_cb_arg; 14740 14741 /* 14742 * What this means is that there is a new Port WWN for 14743 * this D_ID; Mark the Port device as old and provide 14744 * the new PWWN and D_ID combination as new. 14745 */ 14746 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14747 14748 FP_TRACE(FP_NHEAD2(9, 0), 14749 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14750 page->aff_d_id, old_ww_name, ww_name); 14751 14752 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14753 (uint32_t)(uintptr_t)job->job_cb_arg; 14754 14755 ptr = listptr + (*listindex)++; 14756 14757 job->job_counter = 1; 14758 14759 if (fp_ns_getmap(port, job, &ptr, &len, 14760 page->aff_d_id - 1) != FC_SUCCESS) { 14761 (*listindex)--; 14762 } 14763 14764 mutex_enter(&did_pd->pd_mutex); 14765 did_pd->pd_flags = PD_IDLE; 14766 mutex_exit(&did_pd->pd_mutex); 14767 14768 return; 14769 } 14770 14771 /* 14772 * A weird case of Port WWN and D_ID existence but not matching up 14773 * between them. Trust your instincts - Take the port device handle 14774 * off Port WWN list, fix it with new Port WWN and put it back, In 14775 * the mean time mark the port device corresponding to the old port 14776 * WWN as OLD. 14777 */ 14778 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14779 " did_pd=%p", pwwn_pd, did_pd); 14780 14781 mutex_enter(&port->fp_mutex); 14782 mutex_enter(&pwwn_pd->pd_mutex); 14783 14784 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14785 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14786 fctl_delist_did_table(port, pwwn_pd); 14787 fctl_delist_pwwn_table(port, pwwn_pd); 14788 14789 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14790 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14791 pwwn_pd->pd_port_id.port_id, 14792 14793 pwwn_pd->pd_port_name.raw_wwn[0], 14794 pwwn_pd->pd_port_name.raw_wwn[1], 14795 pwwn_pd->pd_port_name.raw_wwn[2], 14796 pwwn_pd->pd_port_name.raw_wwn[3], 14797 pwwn_pd->pd_port_name.raw_wwn[4], 14798 pwwn_pd->pd_port_name.raw_wwn[5], 14799 pwwn_pd->pd_port_name.raw_wwn[6], 14800 pwwn_pd->pd_port_name.raw_wwn[7]); 14801 14802 mutex_exit(&pwwn_pd->pd_mutex); 14803 mutex_exit(&port->fp_mutex); 14804 14805 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14806 (uint32_t)(uintptr_t)job->job_cb_arg; 14807 14808 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14809 14810 mutex_enter(&port->fp_mutex); 14811 mutex_enter(&did_pd->pd_mutex); 14812 14813 fctl_delist_pwwn_table(port, did_pd); 14814 14815 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14816 (uint32_t)(uintptr_t)job->job_cb_arg; 14817 14818 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14819 fctl_enlist_pwwn_table(port, did_pd); 14820 14821 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14822 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14823 did_pd->pd_port_id.port_id, did_pd->pd_state, 14824 14825 did_pd->pd_port_name.raw_wwn[0], 14826 did_pd->pd_port_name.raw_wwn[1], 14827 did_pd->pd_port_name.raw_wwn[2], 14828 did_pd->pd_port_name.raw_wwn[3], 14829 did_pd->pd_port_name.raw_wwn[4], 14830 did_pd->pd_port_name.raw_wwn[5], 14831 did_pd->pd_port_name.raw_wwn[6], 14832 did_pd->pd_port_name.raw_wwn[7]); 14833 14834 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14835 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14836 mutex_exit(&did_pd->pd_mutex); 14837 mutex_exit(&port->fp_mutex); 14838 14839 rval = fp_port_login(port, page->aff_d_id, job, 14840 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14841 if (rval == FC_SUCCESS) { 14842 fp_jobwait(job); 14843 if (job->job_result != FC_SUCCESS) { 14844 fp_fillout_old_map(listptr + 14845 *listindex - 1, did_pd, 0); 14846 } 14847 } else { 14848 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14849 } 14850 } else { 14851 mutex_exit(&did_pd->pd_mutex); 14852 mutex_exit(&port->fp_mutex); 14853 } 14854 14855 mutex_enter(&did_pd->pd_mutex); 14856 did_pd->pd_flags = PD_IDLE; 14857 mutex_exit(&did_pd->pd_mutex); 14858 } 14859 14860 14861 /* 14862 * Check with NS for the presence of this port WWN 14863 */ 14864 static int 14865 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14866 job_request_t *job, int polled, int sleep) 14867 { 14868 la_wwn_t pwwn; 14869 uint32_t flags; 14870 fctl_ns_req_t *ns_cmd; 14871 14872 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14873 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14874 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14875 flags, sleep); 14876 if (ns_cmd == NULL) { 14877 return (FC_NOMEM); 14878 } 14879 14880 mutex_enter(&pd->pd_mutex); 14881 pwwn = pd->pd_port_name; 14882 mutex_exit(&pd->pd_mutex); 14883 14884 ns_cmd->ns_cmd_code = NS_GID_PN; 14885 ns_cmd->ns_pd = pd; 14886 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14887 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14888 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14889 14890 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14891 } 14892 14893 14894 /* 14895 * Sanity check the LILP map returned by FCA 14896 */ 14897 static int 14898 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14899 { 14900 int count; 14901 14902 if (lilp_map->lilp_length == 0) { 14903 return (FC_FAILURE); 14904 } 14905 14906 for (count = 0; count < lilp_map->lilp_length; count++) { 14907 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14908 FC_SUCCESS) { 14909 return (FC_FAILURE); 14910 } 14911 } 14912 14913 return (FC_SUCCESS); 14914 } 14915 14916 14917 /* 14918 * Sanity check if the AL_PA is a valid address 14919 */ 14920 static int 14921 fp_is_valid_alpa(uchar_t al_pa) 14922 { 14923 int count; 14924 14925 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14926 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14927 return (FC_SUCCESS); 14928 } 14929 } 14930 14931 return (FC_FAILURE); 14932 } 14933 14934 14935 /* 14936 * Post unsolicited callbacks to ULPs 14937 */ 14938 static void 14939 fp_ulp_unsol_cb(void *arg) 14940 { 14941 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14942 14943 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14944 ub_spec->buf->ub_frame.type); 14945 kmem_free(ub_spec, sizeof (*ub_spec)); 14946 } 14947 14948 14949 /* 14950 * Perform message reporting in a consistent manner. Unless there is 14951 * a strong reason NOT to use this function (which is very very rare) 14952 * all message reporting should go through this. 14953 */ 14954 static void 14955 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14956 fc_packet_t *pkt, const char *fmt, ...) 14957 { 14958 caddr_t buf; 14959 va_list ap; 14960 14961 switch (level) { 14962 case CE_NOTE: 14963 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14964 return; 14965 } 14966 break; 14967 14968 case CE_WARN: 14969 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14970 return; 14971 } 14972 break; 14973 } 14974 14975 buf = kmem_zalloc(256, KM_NOSLEEP); 14976 if (buf == NULL) { 14977 return; 14978 } 14979 14980 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14981 14982 va_start(ap, fmt); 14983 (void) vsprintf(buf + strlen(buf), fmt, ap); 14984 va_end(ap); 14985 14986 if (fc_errno) { 14987 char *errmsg; 14988 14989 (void) fc_ulp_error(fc_errno, &errmsg); 14990 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 14991 } else { 14992 if (pkt) { 14993 caddr_t state, reason, action, expln; 14994 14995 (void) fc_ulp_pkt_error(pkt, &state, &reason, 14996 &action, &expln); 14997 14998 (void) sprintf(buf + strlen(buf), 14999 " state=%s, reason=%s", state, reason); 15000 15001 if (pkt->pkt_resp_resid) { 15002 (void) sprintf(buf + strlen(buf), 15003 " resp resid=%x\n", pkt->pkt_resp_resid); 15004 } 15005 } 15006 } 15007 15008 switch (dest) { 15009 case FP_CONSOLE_ONLY: 15010 cmn_err(level, "^%s", buf); 15011 break; 15012 15013 case FP_LOG_ONLY: 15014 cmn_err(level, "!%s", buf); 15015 break; 15016 15017 default: 15018 cmn_err(level, "%s", buf); 15019 break; 15020 } 15021 15022 kmem_free(buf, 256); 15023 } 15024 15025 static int 15026 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15027 { 15028 int ret; 15029 uint32_t d_id; 15030 la_wwn_t pwwn; 15031 fc_remote_port_t *pd = NULL; 15032 fc_remote_port_t *held_pd = NULL; 15033 fctl_ns_req_t *ns_cmd; 15034 fc_portmap_t *changelist; 15035 15036 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15037 15038 mutex_enter(&port->fp_mutex); 15039 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 15040 mutex_exit(&port->fp_mutex); 15041 job->job_counter = 1; 15042 15043 job->job_result = FC_SUCCESS; 15044 15045 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 15046 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 15047 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 15048 15049 ASSERT(ns_cmd != NULL); 15050 15051 ns_cmd->ns_cmd_code = NS_GID_PN; 15052 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 15053 15054 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 15055 15056 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 15057 if (ret != FC_SUCCESS) { 15058 fcio->fcio_errno = ret; 15059 } else { 15060 fcio->fcio_errno = job->job_result; 15061 } 15062 fctl_free_ns_cmd(ns_cmd); 15063 return (EIO); 15064 } 15065 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 15066 fctl_free_ns_cmd(ns_cmd); 15067 } else { 15068 mutex_exit(&port->fp_mutex); 15069 15070 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15071 if (held_pd == NULL) { 15072 fcio->fcio_errno = FC_BADWWN; 15073 return (EIO); 15074 } 15075 pd = held_pd; 15076 15077 mutex_enter(&pd->pd_mutex); 15078 d_id = pd->pd_port_id.port_id; 15079 mutex_exit(&pd->pd_mutex); 15080 } 15081 15082 job->job_counter = 1; 15083 15084 pd = fctl_get_remote_port_by_did(port, d_id); 15085 15086 if (pd) { 15087 mutex_enter(&pd->pd_mutex); 15088 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 15089 pd->pd_login_count++; 15090 mutex_exit(&pd->pd_mutex); 15091 15092 fcio->fcio_errno = FC_SUCCESS; 15093 if (held_pd) { 15094 fctl_release_remote_port(held_pd); 15095 } 15096 15097 return (0); 15098 } 15099 mutex_exit(&pd->pd_mutex); 15100 } else { 15101 mutex_enter(&port->fp_mutex); 15102 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 15103 mutex_exit(&port->fp_mutex); 15104 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 15105 if (pd == NULL) { 15106 fcio->fcio_errno = FC_FAILURE; 15107 if (held_pd) { 15108 fctl_release_remote_port(held_pd); 15109 } 15110 return (EIO); 15111 } 15112 } else { 15113 mutex_exit(&port->fp_mutex); 15114 } 15115 } 15116 15117 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 15118 job->job_counter = 1; 15119 15120 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 15121 KM_SLEEP, pd, NULL); 15122 15123 if (ret != FC_SUCCESS) { 15124 fcio->fcio_errno = ret; 15125 if (held_pd) { 15126 fctl_release_remote_port(held_pd); 15127 } 15128 return (EIO); 15129 } 15130 fp_jobwait(job); 15131 15132 fcio->fcio_errno = job->job_result; 15133 15134 if (held_pd) { 15135 fctl_release_remote_port(held_pd); 15136 } 15137 15138 if (job->job_result != FC_SUCCESS) { 15139 return (EIO); 15140 } 15141 15142 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15143 if (pd == NULL) { 15144 fcio->fcio_errno = FC_BADDEV; 15145 return (ENODEV); 15146 } 15147 15148 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15149 15150 fctl_copy_portmap(changelist, pd); 15151 changelist->map_type = PORT_DEVICE_USER_LOGIN; 15152 15153 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15154 15155 mutex_enter(&pd->pd_mutex); 15156 pd->pd_type = PORT_DEVICE_NOCHANGE; 15157 mutex_exit(&pd->pd_mutex); 15158 15159 fctl_release_remote_port(pd); 15160 15161 return (0); 15162 } 15163 15164 15165 static int 15166 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15167 { 15168 la_wwn_t pwwn; 15169 fp_cmd_t *cmd; 15170 fc_portmap_t *changelist; 15171 fc_remote_port_t *pd; 15172 15173 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15174 15175 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15176 if (pd == NULL) { 15177 fcio->fcio_errno = FC_BADWWN; 15178 return (ENXIO); 15179 } 15180 15181 mutex_enter(&pd->pd_mutex); 15182 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 15183 fcio->fcio_errno = FC_LOGINREQ; 15184 mutex_exit(&pd->pd_mutex); 15185 15186 fctl_release_remote_port(pd); 15187 15188 return (EINVAL); 15189 } 15190 15191 ASSERT(pd->pd_login_count >= 1); 15192 15193 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 15194 fcio->fcio_errno = FC_FAILURE; 15195 mutex_exit(&pd->pd_mutex); 15196 15197 fctl_release_remote_port(pd); 15198 15199 return (EBUSY); 15200 } 15201 15202 if (pd->pd_login_count > 1) { 15203 pd->pd_login_count--; 15204 fcio->fcio_errno = FC_SUCCESS; 15205 mutex_exit(&pd->pd_mutex); 15206 15207 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15208 15209 fctl_copy_portmap(changelist, pd); 15210 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15211 15212 fctl_release_remote_port(pd); 15213 15214 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15215 15216 return (0); 15217 } 15218 15219 pd->pd_flags = PD_ELS_IN_PROGRESS; 15220 mutex_exit(&pd->pd_mutex); 15221 15222 job->job_counter = 1; 15223 15224 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 15225 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 15226 if (cmd == NULL) { 15227 fcio->fcio_errno = FC_NOMEM; 15228 fctl_release_remote_port(pd); 15229 15230 mutex_enter(&pd->pd_mutex); 15231 pd->pd_flags = PD_IDLE; 15232 mutex_exit(&pd->pd_mutex); 15233 15234 return (ENOMEM); 15235 } 15236 15237 mutex_enter(&port->fp_mutex); 15238 mutex_enter(&pd->pd_mutex); 15239 15240 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 15241 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 15242 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 15243 cmd->cmd_retry_count = 1; 15244 cmd->cmd_ulp_pkt = NULL; 15245 15246 fp_logo_init(pd, cmd, job); 15247 15248 mutex_exit(&pd->pd_mutex); 15249 mutex_exit(&port->fp_mutex); 15250 15251 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 15252 mutex_enter(&pd->pd_mutex); 15253 pd->pd_flags = PD_IDLE; 15254 mutex_exit(&pd->pd_mutex); 15255 15256 fp_free_pkt(cmd); 15257 fctl_release_remote_port(pd); 15258 15259 return (EIO); 15260 } 15261 15262 fp_jobwait(job); 15263 15264 fcio->fcio_errno = job->job_result; 15265 if (job->job_result != FC_SUCCESS) { 15266 mutex_enter(&pd->pd_mutex); 15267 pd->pd_flags = PD_IDLE; 15268 mutex_exit(&pd->pd_mutex); 15269 15270 fctl_release_remote_port(pd); 15271 15272 return (EIO); 15273 } 15274 15275 ASSERT(pd != NULL); 15276 15277 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15278 15279 fctl_copy_portmap(changelist, pd); 15280 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15281 changelist->map_state = PORT_DEVICE_INVALID; 15282 15283 mutex_enter(&port->fp_mutex); 15284 mutex_enter(&pd->pd_mutex); 15285 15286 fctl_delist_did_table(port, pd); 15287 fctl_delist_pwwn_table(port, pd); 15288 pd->pd_flags = PD_IDLE; 15289 15290 mutex_exit(&pd->pd_mutex); 15291 mutex_exit(&port->fp_mutex); 15292 15293 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15294 15295 fctl_release_remote_port(pd); 15296 15297 return (0); 15298 } 15299 15300 15301 15302 /* 15303 * Send a syslog event for adapter port level events. 15304 */ 15305 static void 15306 fp_log_port_event(fc_local_port_t *port, char *subclass) 15307 { 15308 nvlist_t *attr_list; 15309 15310 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15311 KM_SLEEP) != DDI_SUCCESS) { 15312 goto alloc_failed; 15313 } 15314 15315 if (nvlist_add_uint32(attr_list, "instance", 15316 port->fp_instance) != DDI_SUCCESS) { 15317 goto error; 15318 } 15319 15320 if (nvlist_add_byte_array(attr_list, "port-wwn", 15321 port->fp_service_params.nport_ww_name.raw_wwn, 15322 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15323 goto error; 15324 } 15325 15326 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15327 subclass, attr_list, NULL, DDI_SLEEP); 15328 15329 nvlist_free(attr_list); 15330 return; 15331 15332 error: 15333 nvlist_free(attr_list); 15334 alloc_failed: 15335 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15336 } 15337 15338 15339 static void 15340 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15341 uint32_t port_id) 15342 { 15343 nvlist_t *attr_list; 15344 15345 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15346 KM_SLEEP) != DDI_SUCCESS) { 15347 goto alloc_failed; 15348 } 15349 15350 if (nvlist_add_uint32(attr_list, "instance", 15351 port->fp_instance) != DDI_SUCCESS) { 15352 goto error; 15353 } 15354 15355 if (nvlist_add_byte_array(attr_list, "port-wwn", 15356 port->fp_service_params.nport_ww_name.raw_wwn, 15357 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15358 goto error; 15359 } 15360 15361 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15362 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15363 goto error; 15364 } 15365 15366 if (nvlist_add_uint32(attr_list, "target-port-id", 15367 port_id) != DDI_SUCCESS) { 15368 goto error; 15369 } 15370 15371 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15372 subclass, attr_list, NULL, DDI_SLEEP); 15373 15374 nvlist_free(attr_list); 15375 return; 15376 15377 error: 15378 nvlist_free(attr_list); 15379 alloc_failed: 15380 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15381 } 15382 15383 static uint32_t 15384 fp_map_remote_port_state(uint32_t rm_state) 15385 { 15386 switch (rm_state) { 15387 case PORT_DEVICE_LOGGED_IN: 15388 return (FC_HBA_PORTSTATE_ONLINE); 15389 case PORT_DEVICE_VALID: 15390 case PORT_DEVICE_INVALID: 15391 default: 15392 return (FC_HBA_PORTSTATE_UNKNOWN); 15393 } 15394 } 15395