1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 26 * 27 */ 28 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/param.h> 32 #include <sys/errno.h> 33 #include <sys/uio.h> 34 #include <sys/buf.h> 35 #include <sys/modctl.h> 36 #include <sys/open.h> 37 #include <sys/file.h> 38 #include <sys/kmem.h> 39 #include <sys/poll.h> 40 #include <sys/conf.h> 41 #include <sys/thread.h> 42 #include <sys/var.h> 43 #include <sys/cmn_err.h> 44 #include <sys/stat.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/promif.h> 48 #include <sys/nvpair.h> 49 #include <sys/byteorder.h> 50 #include <sys/scsi/scsi.h> 51 #include <sys/fibre-channel/fc.h> 52 #include <sys/fibre-channel/impl/fc_ulpif.h> 53 #include <sys/fibre-channel/impl/fc_fcaif.h> 54 #include <sys/fibre-channel/impl/fctl_private.h> 55 #include <sys/fibre-channel/impl/fc_portif.h> 56 #include <sys/fibre-channel/impl/fp.h> 57 58 /* These are defined in fctl.c! */ 59 extern int did_table_size; 60 extern int pwwn_table_size; 61 62 static struct cb_ops fp_cb_ops = { 63 fp_open, /* open */ 64 fp_close, /* close */ 65 nodev, /* strategy */ 66 nodev, /* print */ 67 nodev, /* dump */ 68 nodev, /* read */ 69 nodev, /* write */ 70 fp_ioctl, /* ioctl */ 71 nodev, /* devmap */ 72 nodev, /* mmap */ 73 nodev, /* segmap */ 74 nochpoll, /* chpoll */ 75 ddi_prop_op, /* cb_prop_op */ 76 0, /* streamtab */ 77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 78 CB_REV, /* rev */ 79 nodev, /* aread */ 80 nodev /* awrite */ 81 }; 82 83 static struct dev_ops fp_ops = { 84 DEVO_REV, /* build revision */ 85 0, /* reference count */ 86 fp_getinfo, /* getinfo */ 87 nulldev, /* identify - Obsoleted */ 88 nulldev, /* probe */ 89 fp_attach, /* attach */ 90 fp_detach, /* detach */ 91 nodev, /* reset */ 92 &fp_cb_ops, /* cb_ops */ 93 NULL, /* bus_ops */ 94 fp_power /* power */ 95 }; 96 97 #define FP_VERSION "1.96" 98 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 99 100 char *fp_version = FP_NAME_VERSION; 101 102 static struct modldrv modldrv = { 103 &mod_driverops, /* Type of Module */ 104 FP_NAME_VERSION, /* Name/Version of fp */ 105 &fp_ops /* driver ops */ 106 }; 107 108 static struct modlinkage modlinkage = { 109 MODREV_1, /* Rev of the loadable modules system */ 110 &modldrv, /* NULL terminated list of */ 111 NULL /* Linkage structures */ 112 }; 113 114 115 116 static uint16_t ns_reg_cmds[] = { 117 NS_RPN_ID, 118 NS_RNN_ID, 119 NS_RCS_ID, 120 NS_RFT_ID, 121 NS_RPT_ID, 122 NS_RSPN_ID, 123 NS_RSNN_NN 124 }; 125 126 struct fp_xlat { 127 uchar_t xlat_state; 128 int xlat_rval; 129 } fp_xlat [] = { 130 { FC_PKT_SUCCESS, FC_SUCCESS }, 131 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 132 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 133 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 134 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 135 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 136 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_NPORT_BSY, FC_PBUSY }, 138 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 139 { FC_PKT_LS_RJT, FC_FAILURE }, 140 { FC_PKT_BA_RJT, FC_FAILURE }, 141 { FC_PKT_TIMEOUT, FC_FAILURE }, 142 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 143 { FC_PKT_FAILURE, FC_FAILURE }, 144 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 145 }; 146 147 static uchar_t fp_valid_alpas[] = { 148 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 149 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 150 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 151 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 152 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 153 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 154 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 155 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 156 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 157 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 158 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 159 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 160 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 161 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 162 }; 163 164 static struct fp_perms { 165 uint16_t fp_ioctl_cmd; 166 uchar_t fp_open_flag; 167 } fp_perm_list [] = { 168 { FCIO_GET_NUM_DEVS, FP_OPEN }, 169 { FCIO_GET_DEV_LIST, FP_OPEN }, 170 { FCIO_GET_SYM_PNAME, FP_OPEN }, 171 { FCIO_GET_SYM_NNAME, FP_OPEN }, 172 { FCIO_SET_SYM_PNAME, FP_EXCL }, 173 { FCIO_SET_SYM_NNAME, FP_EXCL }, 174 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 175 { FCIO_DEV_LOGIN, FP_EXCL }, 176 { FCIO_DEV_LOGOUT, FP_EXCL }, 177 { FCIO_GET_STATE, FP_OPEN }, 178 { FCIO_DEV_REMOVE, FP_EXCL }, 179 { FCIO_GET_FCODE_REV, FP_OPEN }, 180 { FCIO_GET_FW_REV, FP_OPEN }, 181 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 182 { FCIO_FORCE_DUMP, FP_EXCL }, 183 { FCIO_GET_DUMP, FP_OPEN }, 184 { FCIO_GET_TOPOLOGY, FP_OPEN }, 185 { FCIO_RESET_LINK, FP_EXCL }, 186 { FCIO_RESET_HARD, FP_EXCL }, 187 { FCIO_RESET_HARD_CORE, FP_EXCL }, 188 { FCIO_DIAG, FP_OPEN }, 189 { FCIO_NS, FP_EXCL }, 190 { FCIO_DOWNLOAD_FW, FP_EXCL }, 191 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 192 { FCIO_LINK_STATUS, FP_OPEN }, 193 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 194 { FCIO_GET_NODE_ID, FP_OPEN }, 195 { FCIO_SET_NODE_ID, FP_EXCL }, 196 { FCIO_SEND_NODE_ID, FP_OPEN }, 197 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 198 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 199 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 200 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 204 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 205 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 206 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 207 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 208 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 209 }; 210 211 static char *fp_pm_comps[] = { 212 "NAME=FC Port", 213 "0=Port Down", 214 "1=Port Up" 215 }; 216 217 218 #ifdef _LITTLE_ENDIAN 219 #define MAKE_BE_32(x) { \ 220 uint32_t *ptr1, i; \ 221 ptr1 = (uint32_t *)(x); \ 222 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 223 *ptr1 = BE_32(*ptr1); \ 224 ptr1++; \ 225 } \ 226 } 227 #else 228 #define MAKE_BE_32(x) 229 #endif 230 231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 232 static uint32_t fp_options = 0; 233 234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 235 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 236 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 237 unsigned int fp_offline_ticker; /* seconds */ 238 239 /* 240 * Driver global variable to anchor the list of soft state structs for 241 * all fp driver instances. Used with the Solaris DDI soft state functions. 242 */ 243 static void *fp_driver_softstate; 244 245 static clock_t fp_retry_ticks; 246 static clock_t fp_offline_ticks; 247 248 static int fp_retry_ticker; 249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 251 252 static int fp_log_size = FP_LOG_SIZE; 253 static int fp_trace = FP_TRACE_DEFAULT; 254 static fc_trace_logq_t *fp_logq = NULL; 255 256 int fp_get_adapter_paths(char *pathList, int count); 257 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 258 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 259 la_wwn_t tgt_pwwn, uint32_t port_id); 260 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 261 static void fp_init_symbolic_names(fc_local_port_t *port); 262 263 264 /* 265 * Perform global initialization 266 */ 267 int 268 _init(void) 269 { 270 int ret; 271 272 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 273 sizeof (struct fc_local_port), 8)) != 0) { 274 return (ret); 275 } 276 277 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 278 ddi_soft_state_fini(&fp_driver_softstate); 279 return (ret); 280 } 281 282 fp_logq = fc_trace_alloc_logq(fp_log_size); 283 284 if ((ret = mod_install(&modlinkage)) != 0) { 285 fc_trace_free_logq(fp_logq); 286 ddi_soft_state_fini(&fp_driver_softstate); 287 scsi_hba_fini(&modlinkage); 288 } 289 290 return (ret); 291 } 292 293 294 /* 295 * Prepare for driver unload 296 */ 297 int 298 _fini(void) 299 { 300 int ret; 301 302 if ((ret = mod_remove(&modlinkage)) == 0) { 303 fc_trace_free_logq(fp_logq); 304 ddi_soft_state_fini(&fp_driver_softstate); 305 scsi_hba_fini(&modlinkage); 306 } 307 308 return (ret); 309 } 310 311 312 /* 313 * Request mod_info() to handle all cases 314 */ 315 int 316 _info(struct modinfo *modinfo) 317 { 318 return (mod_info(&modlinkage, modinfo)); 319 } 320 321 322 /* 323 * fp_attach: 324 * 325 * The respective cmd handlers take care of performing 326 * ULP related invocations 327 */ 328 static int 329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 330 { 331 int rval; 332 333 /* 334 * We check the value of fp_offline_ticker at this 335 * point. The variable is global for the driver and 336 * not specific to an instance. 337 * 338 * If there is no user-defined value found in /etc/system 339 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 340 * The minimum setting for this offline timeout according 341 * to the FC-FS2 standard (Fibre Channel Framing and 342 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 343 * 344 * We do not recommend setting the value to less than 10 345 * seconds (RA_TOV) or more than 90 seconds. If this 346 * variable is greater than 90 seconds then drivers above 347 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 348 */ 349 350 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 351 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 352 FP_OFFLINE_TICKER); 353 354 if ((fp_offline_ticker < 10) || 355 (fp_offline_ticker > 90)) { 356 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 357 "%d second(s). This is outside the " 358 "recommended range of 10..90 seconds", 359 fp_offline_ticker); 360 } 361 362 /* 363 * Tick every second when there are commands to retry. 364 * It should tick at the least granular value of pkt_timeout 365 * (which is one second) 366 */ 367 fp_retry_ticker = 1; 368 369 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 370 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 371 372 switch (cmd) { 373 case DDI_ATTACH: 374 rval = fp_attach_handler(dip); 375 break; 376 377 case DDI_RESUME: 378 rval = fp_resume_handler(dip); 379 break; 380 381 default: 382 rval = DDI_FAILURE; 383 break; 384 } 385 return (rval); 386 } 387 388 389 /* 390 * fp_detach: 391 * 392 * If a ULP fails to handle cmd request converse of 393 * cmd is invoked for ULPs that previously succeeded 394 * cmd request. 395 */ 396 static int 397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 398 { 399 int rval = DDI_FAILURE; 400 fc_local_port_t *port; 401 fc_attach_cmd_t converse; 402 uint8_t cnt; 403 404 if ((port = ddi_get_soft_state(fp_driver_softstate, 405 ddi_get_instance(dip))) == NULL) { 406 return (DDI_FAILURE); 407 } 408 409 mutex_enter(&port->fp_mutex); 410 411 if (port->fp_ulp_attach) { 412 mutex_exit(&port->fp_mutex); 413 return (DDI_FAILURE); 414 } 415 416 switch (cmd) { 417 case DDI_DETACH: 418 if (port->fp_task != FP_TASK_IDLE) { 419 mutex_exit(&port->fp_mutex); 420 return (DDI_FAILURE); 421 } 422 423 /* Let's attempt to quit the job handler gracefully */ 424 port->fp_soft_state |= FP_DETACH_INPROGRESS; 425 426 mutex_exit(&port->fp_mutex); 427 converse = FC_CMD_ATTACH; 428 if (fctl_detach_ulps(port, FC_CMD_DETACH, 429 &modlinkage) != FC_SUCCESS) { 430 mutex_enter(&port->fp_mutex); 431 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 432 mutex_exit(&port->fp_mutex); 433 rval = DDI_FAILURE; 434 break; 435 } 436 437 mutex_enter(&port->fp_mutex); 438 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 439 cnt++) { 440 mutex_exit(&port->fp_mutex); 441 delay(drv_usectohz(1000000)); 442 mutex_enter(&port->fp_mutex); 443 } 444 445 if (port->fp_job_head) { 446 mutex_exit(&port->fp_mutex); 447 rval = DDI_FAILURE; 448 break; 449 } 450 mutex_exit(&port->fp_mutex); 451 452 rval = fp_detach_handler(port); 453 break; 454 455 case DDI_SUSPEND: 456 mutex_exit(&port->fp_mutex); 457 converse = FC_CMD_RESUME; 458 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 459 &modlinkage) != FC_SUCCESS) { 460 rval = DDI_FAILURE; 461 break; 462 } 463 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 464 (void) callb_generic_cpr(&port->fp_cpr_info, 465 CB_CODE_CPR_RESUME); 466 } 467 break; 468 469 default: 470 mutex_exit(&port->fp_mutex); 471 break; 472 } 473 474 /* 475 * Use softint to perform reattach. Mark fp_ulp_attach so we 476 * don't attempt to do this repeatedly on behalf of some persistent 477 * caller. 478 */ 479 if (rval != DDI_SUCCESS) { 480 mutex_enter(&port->fp_mutex); 481 port->fp_ulp_attach = 1; 482 483 /* 484 * If the port is in the low power mode then there is 485 * possibility that fca too could be in low power mode. 486 * Try to raise the power before calling attach ulps. 487 */ 488 489 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 490 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 491 mutex_exit(&port->fp_mutex); 492 (void) pm_raise_power(port->fp_port_dip, 493 FP_PM_COMPONENT, FP_PM_PORT_UP); 494 } else { 495 mutex_exit(&port->fp_mutex); 496 } 497 498 499 fp_attach_ulps(port, converse); 500 501 mutex_enter(&port->fp_mutex); 502 while (port->fp_ulp_attach) { 503 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 504 } 505 506 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 507 508 /* 509 * Mark state as detach failed so asynchronous ULP attach 510 * events (downstream, not the ones we're initiating with 511 * the call to fp_attach_ulps) are not honored. We're 512 * really still in pending detach. 513 */ 514 port->fp_soft_state |= FP_DETACH_FAILED; 515 516 mutex_exit(&port->fp_mutex); 517 } 518 519 return (rval); 520 } 521 522 523 /* 524 * fp_getinfo: 525 * Given the device number, return either the 526 * dev_info_t pointer or the instance number. 527 */ 528 529 /* ARGSUSED */ 530 static int 531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 532 { 533 int rval; 534 minor_t instance; 535 fc_local_port_t *port; 536 537 rval = DDI_SUCCESS; 538 instance = getminor((dev_t)arg); 539 540 switch (cmd) { 541 case DDI_INFO_DEVT2DEVINFO: 542 if ((port = ddi_get_soft_state(fp_driver_softstate, 543 instance)) == NULL) { 544 rval = DDI_FAILURE; 545 break; 546 } 547 *result = (void *)port->fp_port_dip; 548 break; 549 550 case DDI_INFO_DEVT2INSTANCE: 551 *result = (void *)(uintptr_t)instance; 552 break; 553 554 default: 555 rval = DDI_FAILURE; 556 break; 557 } 558 559 return (rval); 560 } 561 562 563 /* 564 * Entry point for power up and power down request from kernel 565 */ 566 static int 567 fp_power(dev_info_t *dip, int comp, int level) 568 { 569 int rval = DDI_FAILURE; 570 fc_local_port_t *port; 571 572 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 573 if (port == NULL || comp != FP_PM_COMPONENT) { 574 return (rval); 575 } 576 577 switch (level) { 578 case FP_PM_PORT_UP: 579 rval = DDI_SUCCESS; 580 581 /* 582 * If the port is DDI_SUSPENDed, let the DDI_RESUME 583 * code complete the rediscovery. 584 */ 585 mutex_enter(&port->fp_mutex); 586 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 587 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 588 port->fp_pm_level = FP_PM_PORT_UP; 589 mutex_exit(&port->fp_mutex); 590 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 591 break; 592 } 593 594 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 595 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 596 597 port->fp_pm_level = FP_PM_PORT_UP; 598 rval = fp_power_up(port); 599 if (rval != DDI_SUCCESS) { 600 port->fp_pm_level = FP_PM_PORT_DOWN; 601 } 602 } else { 603 port->fp_pm_level = FP_PM_PORT_UP; 604 } 605 mutex_exit(&port->fp_mutex); 606 break; 607 608 case FP_PM_PORT_DOWN: 609 mutex_enter(&port->fp_mutex); 610 611 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 612 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 613 /* 614 * PM framework goofed up. We have don't 615 * have any PM components. Let's never go down. 616 */ 617 mutex_exit(&port->fp_mutex); 618 break; 619 620 } 621 622 if (port->fp_ulp_attach) { 623 /* We shouldn't let the power go down */ 624 mutex_exit(&port->fp_mutex); 625 break; 626 } 627 628 /* 629 * Not a whole lot to do if we are detaching 630 */ 631 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 632 port->fp_pm_level = FP_PM_PORT_DOWN; 633 mutex_exit(&port->fp_mutex); 634 rval = DDI_SUCCESS; 635 break; 636 } 637 638 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 639 port->fp_pm_level = FP_PM_PORT_DOWN; 640 641 rval = fp_power_down(port); 642 if (rval != DDI_SUCCESS) { 643 port->fp_pm_level = FP_PM_PORT_UP; 644 ASSERT(!(port->fp_soft_state & 645 FP_SOFT_POWER_DOWN)); 646 } else { 647 ASSERT(port->fp_soft_state & 648 FP_SOFT_POWER_DOWN); 649 } 650 } 651 mutex_exit(&port->fp_mutex); 652 break; 653 654 default: 655 break; 656 } 657 658 return (rval); 659 } 660 661 662 /* 663 * Open FC port devctl node 664 */ 665 static int 666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 667 { 668 int instance; 669 fc_local_port_t *port; 670 671 if (otype != OTYP_CHR) { 672 return (EINVAL); 673 } 674 675 /* 676 * This is not a toy to play with. Allow only powerful 677 * users (hopefully knowledgeable) to access the port 678 * (A hacker potentially could download a sick binary 679 * file into FCA) 680 */ 681 if (drv_priv(credp)) { 682 return (EPERM); 683 } 684 685 instance = (int)getminor(*devp); 686 687 port = ddi_get_soft_state(fp_driver_softstate, instance); 688 if (port == NULL) { 689 return (ENXIO); 690 } 691 692 mutex_enter(&port->fp_mutex); 693 if (port->fp_flag & FP_EXCL) { 694 /* 695 * It is already open for exclusive access. 696 * So shut the door on this caller. 697 */ 698 mutex_exit(&port->fp_mutex); 699 return (EBUSY); 700 } 701 702 if (flag & FEXCL) { 703 if (port->fp_flag & FP_OPEN) { 704 /* 705 * Exclusive operation not possible 706 * as it is already opened 707 */ 708 mutex_exit(&port->fp_mutex); 709 return (EBUSY); 710 } 711 port->fp_flag |= FP_EXCL; 712 } 713 port->fp_flag |= FP_OPEN; 714 mutex_exit(&port->fp_mutex); 715 716 return (0); 717 } 718 719 720 /* 721 * The driver close entry point is called on the last close() 722 * of a device. So it is perfectly alright to just clobber the 723 * open flag and reset it to idle (instead of having to reset 724 * each flag bits). For any confusion, check out close(9E). 725 */ 726 727 /* ARGSUSED */ 728 static int 729 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 730 { 731 int instance; 732 fc_local_port_t *port; 733 734 if (otype != OTYP_CHR) { 735 return (EINVAL); 736 } 737 738 instance = (int)getminor(dev); 739 740 port = ddi_get_soft_state(fp_driver_softstate, instance); 741 if (port == NULL) { 742 return (ENXIO); 743 } 744 745 mutex_enter(&port->fp_mutex); 746 if ((port->fp_flag & FP_OPEN) == 0) { 747 mutex_exit(&port->fp_mutex); 748 return (ENODEV); 749 } 750 port->fp_flag = FP_IDLE; 751 mutex_exit(&port->fp_mutex); 752 753 return (0); 754 } 755 756 /* 757 * Handle IOCTL requests 758 */ 759 760 /* ARGSUSED */ 761 static int 762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 763 { 764 int instance; 765 int ret = 0; 766 fcio_t fcio; 767 fc_local_port_t *port; 768 769 instance = (int)getminor(dev); 770 771 port = ddi_get_soft_state(fp_driver_softstate, instance); 772 if (port == NULL) { 773 return (ENXIO); 774 } 775 776 mutex_enter(&port->fp_mutex); 777 if ((port->fp_flag & FP_OPEN) == 0) { 778 mutex_exit(&port->fp_mutex); 779 return (ENXIO); 780 } 781 782 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 783 mutex_exit(&port->fp_mutex); 784 return (ENXIO); 785 } 786 787 mutex_exit(&port->fp_mutex); 788 789 /* this will raise power if necessary */ 790 ret = fctl_busy_port(port); 791 if (ret != 0) { 792 return (ret); 793 } 794 795 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 796 797 798 switch (cmd) { 799 case FCIO_CMD: { 800 #ifdef _MULTI_DATAMODEL 801 switch (ddi_model_convert_from(mode & FMODELS)) { 802 case DDI_MODEL_ILP32: { 803 struct fcio32 fcio32; 804 805 if (ddi_copyin((void *)data, (void *)&fcio32, 806 sizeof (struct fcio32), mode)) { 807 ret = EFAULT; 808 break; 809 } 810 fcio.fcio_xfer = fcio32.fcio_xfer; 811 fcio.fcio_cmd = fcio32.fcio_cmd; 812 fcio.fcio_flags = fcio32.fcio_flags; 813 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 814 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 815 fcio.fcio_ibuf = 816 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 817 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 818 fcio.fcio_obuf = 819 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 820 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 821 fcio.fcio_abuf = 822 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 823 fcio.fcio_errno = fcio32.fcio_errno; 824 break; 825 } 826 827 case DDI_MODEL_NONE: 828 if (ddi_copyin((void *)data, (void *)&fcio, 829 sizeof (fcio_t), mode)) { 830 ret = EFAULT; 831 } 832 break; 833 } 834 #else /* _MULTI_DATAMODEL */ 835 if (ddi_copyin((void *)data, (void *)&fcio, 836 sizeof (fcio_t), mode)) { 837 ret = EFAULT; 838 break; 839 } 840 #endif /* _MULTI_DATAMODEL */ 841 if (!ret) { 842 ret = fp_fciocmd(port, data, mode, &fcio); 843 } 844 break; 845 } 846 847 default: 848 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 849 mode, credp, rval); 850 } 851 852 fctl_idle_port(port); 853 854 return (ret); 855 } 856 857 858 /* 859 * Init Symbolic Port Name and Node Name 860 * LV will try to get symbolic names from FCA driver 861 * and register these to name server, 862 * if LV fails to get these, 863 * LV will register its default symbolic names to name server. 864 * The Default symbolic node name format is : 865 * <hostname>:<hba driver name>(instance) 866 * The Default symbolic port name format is : 867 * <fp path name> 868 */ 869 static void 870 fp_init_symbolic_names(fc_local_port_t *port) 871 { 872 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 873 char *sym_name; 874 char fcaname[50] = {0}; 875 int hostnlen, fcanlen; 876 877 if (port->fp_sym_node_namelen == 0) { 878 hostnlen = strlen(utsname.nodename); 879 (void) snprintf(fcaname, sizeof (fcaname), 880 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 881 fcanlen = strlen(fcaname); 882 883 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 884 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 885 port->fp_sym_node_namelen = strlen(sym_name); 886 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 887 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 888 } 889 (void) strncpy(port->fp_sym_node_name, sym_name, 890 port->fp_sym_node_namelen); 891 kmem_free(sym_name, hostnlen + fcanlen + 2); 892 } 893 894 if (port->fp_sym_port_namelen == 0) { 895 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 896 897 (void) ddi_pathname(port->fp_port_dip, pathname); 898 port->fp_sym_port_namelen = strlen(pathname); 899 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 900 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 901 } 902 (void) strncpy(port->fp_sym_port_name, pathname, 903 port->fp_sym_port_namelen); 904 kmem_free(pathname, MAXPATHLEN); 905 } 906 } 907 908 909 /* 910 * Perform port attach 911 */ 912 static int 913 fp_attach_handler(dev_info_t *dip) 914 { 915 int rval; 916 int instance; 917 int port_num; 918 int port_len; 919 char name[30]; 920 char i_pwwn[17]; 921 fp_cmd_t *pkt; 922 uint32_t ub_count; 923 fc_local_port_t *port; 924 job_request_t *job; 925 fc_local_port_t *phyport = NULL; 926 int portpro1; 927 char pwwn[17], nwwn[17]; 928 929 instance = ddi_get_instance(dip); 930 931 port_len = sizeof (port_num); 932 933 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 934 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 935 (caddr_t)&port_num, &port_len); 936 937 if (rval != DDI_SUCCESS) { 938 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 939 instance); 940 return (DDI_FAILURE); 941 } 942 943 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 944 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 945 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 946 instance); 947 return (DDI_FAILURE); 948 } 949 950 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 951 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 952 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 953 " point minor node", instance); 954 ddi_remove_minor_node(dip, NULL); 955 return (DDI_FAILURE); 956 } 957 958 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 959 != DDI_SUCCESS) { 960 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 961 instance); 962 ddi_remove_minor_node(dip, NULL); 963 return (DDI_FAILURE); 964 } 965 port = ddi_get_soft_state(fp_driver_softstate, instance); 966 967 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 968 969 port->fp_instance = instance; 970 port->fp_ulp_attach = 1; 971 port->fp_port_num = port_num; 972 port->fp_verbose = fp_verbosity; 973 port->fp_options = fp_options; 974 975 port->fp_fca_dip = ddi_get_parent(dip); 976 port->fp_port_dip = dip; 977 port->fp_fca_tran = (fc_fca_tran_t *) 978 ddi_get_driver_private(port->fp_fca_dip); 979 980 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 981 982 /* 983 * Init the starting value of fp_rscn_count. Note that if 984 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 985 * actual # of RSCNs will be (fp_rscn_count - 1) 986 */ 987 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 988 989 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 990 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 991 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 992 993 (void) sprintf(name, "fp%d_cache", instance); 994 995 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 996 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 997 "phyport-instance", -1)) != -1) { 998 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 999 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 1000 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 1001 port->fp_npiv_type = FC_NPIV_PORT; 1002 } 1003 1004 /* 1005 * Allocate the pool of fc_packet_t structs to be used with 1006 * this fp instance. 1007 */ 1008 port->fp_pkt_cache = kmem_cache_create(name, 1009 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1010 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1011 NULL, 0); 1012 1013 if (port->fp_pkt_cache == NULL) { 1014 goto cache_alloc_failed; 1015 } 1016 1017 1018 /* 1019 * Allocate the d_id and pwwn hash tables for all remote ports 1020 * connected to this local port. 1021 */ 1022 port->fp_did_table = kmem_zalloc(did_table_size * 1023 sizeof (struct d_id_hash), KM_SLEEP); 1024 1025 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1026 sizeof (struct pwwn_hash), KM_SLEEP); 1027 1028 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1029 MINCLSYSPRI, 1, 16, 0); 1030 1031 /* Indicate that don't have the pm components yet */ 1032 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1033 1034 /* 1035 * Bind the callbacks with the FCA driver. This will open the gate 1036 * for asynchronous callbacks, so after this call the fp_mutex 1037 * must be held when updating the fc_local_port_t struct. 1038 * 1039 * This is done _before_ setting up the job thread so we can avoid 1040 * cleaning up after the thread_create() in the error path. This 1041 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1042 */ 1043 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1044 goto bind_callbacks_failed; 1045 } 1046 1047 if (phyport) { 1048 mutex_enter(&phyport->fp_mutex); 1049 if (phyport->fp_port_next) { 1050 phyport->fp_port_next->fp_port_prev = port; 1051 port->fp_port_next = phyport->fp_port_next; 1052 phyport->fp_port_next = port; 1053 port->fp_port_prev = phyport; 1054 } else { 1055 phyport->fp_port_next = port; 1056 phyport->fp_port_prev = port; 1057 port->fp_port_next = phyport; 1058 port->fp_port_prev = phyport; 1059 } 1060 mutex_exit(&phyport->fp_mutex); 1061 } 1062 1063 /* 1064 * Init Symbolic Names 1065 */ 1066 fp_init_symbolic_names(port); 1067 1068 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1069 KM_SLEEP, NULL); 1070 1071 if (pkt == NULL) { 1072 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1073 instance); 1074 goto alloc_els_packet_failed; 1075 } 1076 1077 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1078 v.v_maxsyspri - 2); 1079 1080 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1081 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1082 i_pwwn) != DDI_PROP_SUCCESS) { 1083 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1084 "fp(%d): Updating 'initiator-port' property" 1085 " on fp dev_info node failed", instance); 1086 } 1087 1088 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1089 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1090 i_pwwn) != DDI_PROP_SUCCESS) { 1091 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1092 "fp(%d): Updating 'initiator-node' property" 1093 " on fp dev_info node failed", instance); 1094 } 1095 1096 mutex_enter(&port->fp_mutex); 1097 port->fp_els_resp_pkt = pkt; 1098 mutex_exit(&port->fp_mutex); 1099 1100 /* 1101 * Determine the count of unsolicited buffers this FCA can support 1102 */ 1103 fp_retrieve_caps(port); 1104 1105 /* 1106 * Allocate unsolicited buffer tokens 1107 */ 1108 if (port->fp_ub_count) { 1109 ub_count = port->fp_ub_count; 1110 port->fp_ub_tokens = kmem_zalloc(ub_count * 1111 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1112 /* 1113 * Do not fail the attach if unsolicited buffer allocation 1114 * fails; Just try to get along with whatever the FCA can do. 1115 */ 1116 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1117 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1118 FC_SUCCESS || ub_count != port->fp_ub_count) { 1119 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1120 " Unsolicited buffers. proceeding with attach...", 1121 instance); 1122 kmem_free(port->fp_ub_tokens, 1123 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1124 port->fp_ub_tokens = NULL; 1125 } 1126 } 1127 1128 fp_load_ulp_modules(dip, port); 1129 1130 /* 1131 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1132 */ 1133 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1134 "pm-hardware-state", "needs-suspend-resume", 1135 strlen("needs-suspend-resume") + 1); 1136 1137 /* 1138 * fctl maintains a list of all port handles, so 1139 * help fctl add this one to its list now. 1140 */ 1141 mutex_enter(&port->fp_mutex); 1142 fctl_add_port(port); 1143 1144 /* 1145 * If a state change is already in progress, set the bind state t 1146 * OFFLINE as well, so further state change callbacks into ULPs 1147 * will pass the appropriate states 1148 */ 1149 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1150 port->fp_statec_busy) { 1151 port->fp_bind_state = FC_STATE_OFFLINE; 1152 mutex_exit(&port->fp_mutex); 1153 1154 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1155 } else { 1156 /* 1157 * Without dropping the mutex, ensure that the port 1158 * startup happens ahead of state change callback 1159 * processing 1160 */ 1161 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1162 1163 port->fp_last_task = port->fp_task; 1164 port->fp_task = FP_TASK_PORT_STARTUP; 1165 1166 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1167 fp_startup_done, (opaque_t)port, KM_SLEEP); 1168 1169 port->fp_job_head = port->fp_job_tail = job; 1170 1171 cv_signal(&port->fp_cv); 1172 1173 mutex_exit(&port->fp_mutex); 1174 } 1175 1176 mutex_enter(&port->fp_mutex); 1177 while (port->fp_ulp_attach) { 1178 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1179 } 1180 mutex_exit(&port->fp_mutex); 1181 1182 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1183 "pm-components", fp_pm_comps, 1184 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1185 DDI_PROP_SUCCESS) { 1186 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1187 " components property, PM disabled on this port."); 1188 mutex_enter(&port->fp_mutex); 1189 port->fp_pm_level = FP_PM_PORT_UP; 1190 mutex_exit(&port->fp_mutex); 1191 } else { 1192 if (pm_raise_power(dip, FP_PM_COMPONENT, 1193 FP_PM_PORT_UP) != DDI_SUCCESS) { 1194 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1195 " power level"); 1196 mutex_enter(&port->fp_mutex); 1197 port->fp_pm_level = FP_PM_PORT_UP; 1198 mutex_exit(&port->fp_mutex); 1199 } 1200 1201 /* 1202 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1203 * the call to pm_raise_power. The PM framework can't 1204 * handle multiple threads calling into it during attach. 1205 */ 1206 1207 mutex_enter(&port->fp_mutex); 1208 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1209 mutex_exit(&port->fp_mutex); 1210 } 1211 1212 ddi_report_dev(dip); 1213 1214 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1215 1216 return (DDI_SUCCESS); 1217 1218 /* 1219 * Unwind any/all preceeding allocations in the event of an error. 1220 */ 1221 1222 alloc_els_packet_failed: 1223 1224 if (port->fp_fca_handle != NULL) { 1225 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1226 port->fp_fca_handle = NULL; 1227 } 1228 1229 if (port->fp_ub_tokens != NULL) { 1230 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1231 port->fp_ub_tokens); 1232 kmem_free(port->fp_ub_tokens, 1233 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1234 port->fp_ub_tokens = NULL; 1235 } 1236 1237 if (port->fp_els_resp_pkt != NULL) { 1238 fp_free_pkt(port->fp_els_resp_pkt); 1239 port->fp_els_resp_pkt = NULL; 1240 } 1241 1242 bind_callbacks_failed: 1243 1244 if (port->fp_taskq != NULL) { 1245 taskq_destroy(port->fp_taskq); 1246 } 1247 1248 if (port->fp_pwwn_table != NULL) { 1249 kmem_free(port->fp_pwwn_table, 1250 pwwn_table_size * sizeof (struct pwwn_hash)); 1251 port->fp_pwwn_table = NULL; 1252 } 1253 1254 if (port->fp_did_table != NULL) { 1255 kmem_free(port->fp_did_table, 1256 did_table_size * sizeof (struct d_id_hash)); 1257 port->fp_did_table = NULL; 1258 } 1259 1260 if (port->fp_pkt_cache != NULL) { 1261 kmem_cache_destroy(port->fp_pkt_cache); 1262 port->fp_pkt_cache = NULL; 1263 } 1264 1265 cache_alloc_failed: 1266 1267 cv_destroy(&port->fp_attach_cv); 1268 cv_destroy(&port->fp_cv); 1269 mutex_destroy(&port->fp_mutex); 1270 ddi_remove_minor_node(port->fp_port_dip, NULL); 1271 ddi_soft_state_free(fp_driver_softstate, instance); 1272 ddi_prop_remove_all(dip); 1273 1274 return (DDI_FAILURE); 1275 } 1276 1277 1278 /* 1279 * Handle DDI_RESUME request 1280 */ 1281 static int 1282 fp_resume_handler(dev_info_t *dip) 1283 { 1284 int rval; 1285 fc_local_port_t *port; 1286 1287 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1288 1289 ASSERT(port != NULL); 1290 1291 #ifdef DEBUG 1292 mutex_enter(&port->fp_mutex); 1293 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1294 mutex_exit(&port->fp_mutex); 1295 #endif 1296 1297 /* 1298 * If the port was power suspended, raise the power level 1299 */ 1300 mutex_enter(&port->fp_mutex); 1301 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1302 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1303 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1304 1305 mutex_exit(&port->fp_mutex); 1306 if (pm_raise_power(dip, FP_PM_COMPONENT, 1307 FP_PM_PORT_UP) != DDI_SUCCESS) { 1308 FP_TRACE(FP_NHEAD2(9, 0), 1309 "Failed to raise the power level"); 1310 return (DDI_FAILURE); 1311 } 1312 mutex_enter(&port->fp_mutex); 1313 } 1314 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1315 mutex_exit(&port->fp_mutex); 1316 1317 /* 1318 * All the discovery is initiated and handled by per-port thread. 1319 * Further all the discovery is done in handled in callback mode 1320 * (not polled mode); In a specific case such as this, the discovery 1321 * is required to happen in polled mode. The easiest way out is 1322 * to bail out port thread and get started. Come back and fix this 1323 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1324 * will do on-demand discovery during pre-power-up busctl handling 1325 * which will only be possible when SCSA provides a new HBA vector 1326 * for sending down the PM busctl requests. 1327 */ 1328 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1329 1330 rval = fp_resume_all(port, FC_CMD_RESUME); 1331 if (rval != DDI_SUCCESS) { 1332 mutex_enter(&port->fp_mutex); 1333 port->fp_soft_state |= FP_SOFT_SUSPEND; 1334 mutex_exit(&port->fp_mutex); 1335 (void) callb_generic_cpr(&port->fp_cpr_info, 1336 CB_CODE_CPR_CHKPT); 1337 } 1338 1339 return (rval); 1340 } 1341 1342 /* 1343 * Perform FC Port power on initialization 1344 */ 1345 static int 1346 fp_power_up(fc_local_port_t *port) 1347 { 1348 int rval; 1349 1350 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1351 1352 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1353 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1354 1355 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1356 1357 mutex_exit(&port->fp_mutex); 1358 1359 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1360 if (rval != DDI_SUCCESS) { 1361 mutex_enter(&port->fp_mutex); 1362 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1363 } else { 1364 mutex_enter(&port->fp_mutex); 1365 } 1366 1367 return (rval); 1368 } 1369 1370 1371 /* 1372 * It is important to note that the power may possibly be removed between 1373 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1374 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1375 * (hardware state). In this case, the port driver may need to rediscover the 1376 * topology, perform LOGINs, register with the name server again and perform 1377 * any such port initialization procedures. To perform LOGINs, the driver could 1378 * use the port device handle to see if a LOGIN needs to be performed and use 1379 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1380 * or removed) which will be reflected in the map the ULPs will see. 1381 */ 1382 static int 1383 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1384 { 1385 1386 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1387 1388 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1389 return (DDI_FAILURE); 1390 } 1391 1392 mutex_enter(&port->fp_mutex); 1393 1394 /* 1395 * If there are commands queued for delayed retry, instead of 1396 * working the hard way to figure out which ones are good for 1397 * restart and which ones not (ELSs are definitely not good 1398 * as the port will have to go through a new spin of rediscovery 1399 * now), so just flush them out. 1400 */ 1401 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1402 fp_cmd_t *cmd; 1403 1404 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1405 1406 mutex_exit(&port->fp_mutex); 1407 while ((cmd = fp_deque_cmd(port)) != NULL) { 1408 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1409 fp_iodone(cmd); 1410 } 1411 mutex_enter(&port->fp_mutex); 1412 } 1413 1414 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1415 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1416 port->fp_dev_count) { 1417 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1418 port->fp_offline_tid = timeout(fp_offline_timeout, 1419 (caddr_t)port, fp_offline_ticks); 1420 } 1421 if (port->fp_job_head) { 1422 cv_signal(&port->fp_cv); 1423 } 1424 mutex_exit(&port->fp_mutex); 1425 fctl_attach_ulps(port, cmd, &modlinkage); 1426 } else { 1427 struct job_request *job; 1428 1429 /* 1430 * If an OFFLINE timer was running at the time of 1431 * suspending, there is no need to restart it as 1432 * the port is ONLINE now. 1433 */ 1434 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1435 if (port->fp_statec_busy == 0) { 1436 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1437 } 1438 port->fp_statec_busy++; 1439 mutex_exit(&port->fp_mutex); 1440 1441 job = fctl_alloc_job(JOB_PORT_ONLINE, 1442 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1443 fctl_enque_job(port, job); 1444 1445 fctl_jobwait(job); 1446 fctl_remove_oldies(port); 1447 1448 fctl_attach_ulps(port, cmd, &modlinkage); 1449 fctl_dealloc_job(job); 1450 } 1451 1452 return (DDI_SUCCESS); 1453 } 1454 1455 1456 /* 1457 * At this time, there shouldn't be any I/O requests on this port. 1458 * But the unsolicited callbacks from the underlying FCA port need 1459 * to be handled very carefully. The steps followed to handle the 1460 * DDI_DETACH are: 1461 * + Grab the port driver mutex, check if the unsolicited 1462 * callback is currently under processing. If true, fail 1463 * the DDI_DETACH request by printing a message; If false 1464 * mark the DDI_DETACH as under progress, so that any 1465 * further unsolicited callbacks get bounced. 1466 * + Perform PRLO/LOGO if necessary, cleanup all the data 1467 * structures. 1468 * + Get the job_handler thread to gracefully exit. 1469 * + Unregister callbacks with the FCA port. 1470 * + Now that some peace is found, notify all the ULPs of 1471 * DDI_DETACH request (using ulp_port_detach entry point) 1472 * + Free all mutexes, semaphores, conditional variables. 1473 * + Free the soft state, return success. 1474 * 1475 * Important considerations: 1476 * Port driver de-registers state change and unsolicited 1477 * callbacks before taking up the task of notifying ULPs 1478 * and performing PRLO and LOGOs. 1479 * 1480 * A port may go offline at the time PRLO/LOGO is being 1481 * requested. It is expected of all FCA drivers to fail 1482 * such requests either immediately with a FC_OFFLINE 1483 * return code to fc_fca_transport() or return the packet 1484 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1485 */ 1486 static int 1487 fp_detach_handler(fc_local_port_t *port) 1488 { 1489 job_request_t *job; 1490 uint32_t delay_count; 1491 fc_orphan_t *orp, *tmporp; 1492 1493 /* 1494 * In a Fabric topology with many host ports connected to 1495 * a switch, another detaching instance of fp might have 1496 * triggered a LOGO (which is an unsolicited request to 1497 * this instance). So in order to be able to successfully 1498 * detach by taking care of such cases a delay of about 1499 * 30 seconds is introduced. 1500 */ 1501 delay_count = 0; 1502 mutex_enter(&port->fp_mutex); 1503 while ((port->fp_soft_state & 1504 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1505 (delay_count < 30)) { 1506 mutex_exit(&port->fp_mutex); 1507 delay_count++; 1508 delay(drv_usectohz(1000000)); 1509 mutex_enter(&port->fp_mutex); 1510 } 1511 1512 if (port->fp_soft_state & 1513 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1514 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1515 mutex_exit(&port->fp_mutex); 1516 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1517 " Failing detach", port->fp_instance); 1518 return (DDI_FAILURE); 1519 } 1520 1521 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1522 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1523 mutex_exit(&port->fp_mutex); 1524 1525 /* 1526 * If we're powered down, we need to raise power prior to submitting 1527 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1528 * process the shutdown job. 1529 */ 1530 if (fctl_busy_port(port) != 0) { 1531 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1532 port->fp_instance); 1533 mutex_enter(&port->fp_mutex); 1534 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1535 mutex_exit(&port->fp_mutex); 1536 return (DDI_FAILURE); 1537 } 1538 1539 /* 1540 * This will deallocate data structs and cause the "job" thread 1541 * to exit, in preparation for DDI_DETACH on the instance. 1542 * This can sleep for an arbitrary duration, since it waits for 1543 * commands over the wire, timeout(9F) callbacks, etc. 1544 * 1545 * CAUTION: There is still a race here, where the "job" thread 1546 * can still be executing code even tho the fctl_jobwait() call 1547 * below has returned to us. In theory the fp driver could even be 1548 * modunloaded even tho the job thread isn't done executing. 1549 * without creating the race condition. 1550 */ 1551 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1552 (opaque_t)port, KM_SLEEP); 1553 fctl_enque_job(port, job); 1554 fctl_jobwait(job); 1555 fctl_dealloc_job(job); 1556 1557 1558 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1559 FP_PM_PORT_DOWN); 1560 1561 if (port->fp_taskq) { 1562 taskq_destroy(port->fp_taskq); 1563 } 1564 1565 ddi_prop_remove_all(port->fp_port_dip); 1566 1567 ddi_remove_minor_node(port->fp_port_dip, NULL); 1568 1569 fctl_remove_port(port); 1570 1571 fp_free_pkt(port->fp_els_resp_pkt); 1572 1573 if (port->fp_ub_tokens) { 1574 if (fc_ulp_ubfree(port, port->fp_ub_count, 1575 port->fp_ub_tokens) != FC_SUCCESS) { 1576 cmn_err(CE_WARN, "fp(%d): couldn't free " 1577 " unsolicited buffers", port->fp_instance); 1578 } 1579 kmem_free(port->fp_ub_tokens, 1580 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1581 port->fp_ub_tokens = NULL; 1582 } 1583 1584 if (port->fp_pkt_cache != NULL) { 1585 kmem_cache_destroy(port->fp_pkt_cache); 1586 } 1587 1588 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1589 1590 mutex_enter(&port->fp_mutex); 1591 if (port->fp_did_table) { 1592 kmem_free(port->fp_did_table, did_table_size * 1593 sizeof (struct d_id_hash)); 1594 } 1595 1596 if (port->fp_pwwn_table) { 1597 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1598 sizeof (struct pwwn_hash)); 1599 } 1600 orp = port->fp_orphan_list; 1601 while (orp) { 1602 tmporp = orp; 1603 orp = orp->orp_next; 1604 kmem_free(tmporp, sizeof (*orp)); 1605 } 1606 1607 mutex_exit(&port->fp_mutex); 1608 1609 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1610 1611 mutex_destroy(&port->fp_mutex); 1612 cv_destroy(&port->fp_attach_cv); 1613 cv_destroy(&port->fp_cv); 1614 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1615 1616 return (DDI_SUCCESS); 1617 } 1618 1619 1620 /* 1621 * Steps to perform DDI_SUSPEND operation on a FC port 1622 * 1623 * - If already suspended return DDI_FAILURE 1624 * - If already power-suspended return DDI_SUCCESS 1625 * - If an unsolicited callback or state change handling is in 1626 * in progress, throw a warning message, return DDI_FAILURE 1627 * - Cancel timeouts 1628 * - SUSPEND the job_handler thread (means do nothing as it is 1629 * taken care of by the CPR frame work) 1630 */ 1631 static int 1632 fp_suspend_handler(fc_local_port_t *port) 1633 { 1634 uint32_t delay_count; 1635 1636 mutex_enter(&port->fp_mutex); 1637 1638 /* 1639 * The following should never happen, but 1640 * let the driver be more defensive here 1641 */ 1642 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1643 mutex_exit(&port->fp_mutex); 1644 return (DDI_FAILURE); 1645 } 1646 1647 /* 1648 * If the port is already power suspended, there 1649 * is nothing else to do, So return DDI_SUCCESS, 1650 * but mark the SUSPEND bit in the soft state 1651 * before leaving. 1652 */ 1653 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1654 port->fp_soft_state |= FP_SOFT_SUSPEND; 1655 mutex_exit(&port->fp_mutex); 1656 return (DDI_SUCCESS); 1657 } 1658 1659 /* 1660 * Check if an unsolicited callback or state change handling is 1661 * in progress. If true, fail the suspend operation; also throw 1662 * a warning message notifying the failure. Note that Sun PCI 1663 * hotplug spec recommends messages in cases of failure (but 1664 * not flooding the console) 1665 * 1666 * Busy waiting for a short interval (500 millisecond ?) to see 1667 * if the callback processing completes may be another idea. Since 1668 * most of the callback processing involves a lot of work, it 1669 * is safe to just fail the SUSPEND operation. It is definitely 1670 * not bad to fail the SUSPEND operation if the driver is busy. 1671 */ 1672 delay_count = 0; 1673 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1674 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1675 mutex_exit(&port->fp_mutex); 1676 delay_count++; 1677 delay(drv_usectohz(1000000)); 1678 mutex_enter(&port->fp_mutex); 1679 } 1680 1681 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1682 FP_SOFT_IN_UNSOL_CB)) { 1683 mutex_exit(&port->fp_mutex); 1684 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1685 " Failing suspend", port->fp_instance); 1686 return (DDI_FAILURE); 1687 } 1688 1689 /* 1690 * Check of FC port thread is busy 1691 */ 1692 if (port->fp_job_head) { 1693 mutex_exit(&port->fp_mutex); 1694 FP_TRACE(FP_NHEAD2(9, 0), 1695 "FC port thread is busy: Failing suspend"); 1696 return (DDI_FAILURE); 1697 } 1698 port->fp_soft_state |= FP_SOFT_SUSPEND; 1699 1700 fp_suspend_all(port); 1701 mutex_exit(&port->fp_mutex); 1702 1703 return (DDI_SUCCESS); 1704 } 1705 1706 1707 /* 1708 * Prepare for graceful power down of a FC port 1709 */ 1710 static int 1711 fp_power_down(fc_local_port_t *port) 1712 { 1713 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1714 1715 /* 1716 * Power down request followed by a DDI_SUSPEND should 1717 * never happen; If it does return DDI_SUCCESS 1718 */ 1719 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1720 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1721 return (DDI_SUCCESS); 1722 } 1723 1724 /* 1725 * If the port is already power suspended, there 1726 * is nothing else to do, So return DDI_SUCCESS, 1727 */ 1728 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1729 return (DDI_SUCCESS); 1730 } 1731 1732 /* 1733 * Check if an unsolicited callback or state change handling 1734 * is in progress. If true, fail the PM suspend operation. 1735 * But don't print a message unless the verbosity of the 1736 * driver desires otherwise. 1737 */ 1738 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1739 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1740 FP_TRACE(FP_NHEAD2(9, 0), 1741 "Unsolicited callback in progress: Failing power down"); 1742 return (DDI_FAILURE); 1743 } 1744 1745 /* 1746 * Check of FC port thread is busy 1747 */ 1748 if (port->fp_job_head) { 1749 FP_TRACE(FP_NHEAD2(9, 0), 1750 "FC port thread is busy: Failing power down"); 1751 return (DDI_FAILURE); 1752 } 1753 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1754 1755 /* 1756 * check if the ULPs are ready for power down 1757 */ 1758 mutex_exit(&port->fp_mutex); 1759 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1760 &modlinkage) != FC_SUCCESS) { 1761 mutex_enter(&port->fp_mutex); 1762 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1763 mutex_exit(&port->fp_mutex); 1764 1765 /* 1766 * Power back up the obedient ULPs that went down 1767 */ 1768 fp_attach_ulps(port, FC_CMD_POWER_UP); 1769 1770 FP_TRACE(FP_NHEAD2(9, 0), 1771 "ULP(s) busy, detach_ulps failed. Failing power down"); 1772 mutex_enter(&port->fp_mutex); 1773 return (DDI_FAILURE); 1774 } 1775 mutex_enter(&port->fp_mutex); 1776 1777 fp_suspend_all(port); 1778 1779 return (DDI_SUCCESS); 1780 } 1781 1782 1783 /* 1784 * Suspend the entire FC port 1785 */ 1786 static void 1787 fp_suspend_all(fc_local_port_t *port) 1788 { 1789 int index; 1790 struct pwwn_hash *head; 1791 fc_remote_port_t *pd; 1792 1793 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1794 1795 if (port->fp_wait_tid != 0) { 1796 timeout_id_t tid; 1797 1798 tid = port->fp_wait_tid; 1799 port->fp_wait_tid = (timeout_id_t)NULL; 1800 mutex_exit(&port->fp_mutex); 1801 (void) untimeout(tid); 1802 mutex_enter(&port->fp_mutex); 1803 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1804 } 1805 1806 if (port->fp_offline_tid) { 1807 timeout_id_t tid; 1808 1809 tid = port->fp_offline_tid; 1810 port->fp_offline_tid = (timeout_id_t)NULL; 1811 mutex_exit(&port->fp_mutex); 1812 (void) untimeout(tid); 1813 mutex_enter(&port->fp_mutex); 1814 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1815 } 1816 mutex_exit(&port->fp_mutex); 1817 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1818 mutex_enter(&port->fp_mutex); 1819 1820 /* 1821 * Mark all devices as OLD, and reset the LOGIN state as well 1822 * (this will force the ULPs to perform a LOGIN after calling 1823 * fc_portgetmap() during RESUME/PM_RESUME) 1824 */ 1825 for (index = 0; index < pwwn_table_size; index++) { 1826 head = &port->fp_pwwn_table[index]; 1827 pd = head->pwwn_head; 1828 while (pd != NULL) { 1829 mutex_enter(&pd->pd_mutex); 1830 fp_remote_port_offline(pd); 1831 fctl_delist_did_table(port, pd); 1832 pd->pd_state = PORT_DEVICE_VALID; 1833 pd->pd_login_count = 0; 1834 mutex_exit(&pd->pd_mutex); 1835 pd = pd->pd_wwn_hnext; 1836 } 1837 } 1838 } 1839 1840 1841 /* 1842 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1843 * Performs intializations for fc_packet_t structs. 1844 * Returns 0 for success or -1 for failure. 1845 * 1846 * This function allocates DMA handles for both command and responses. 1847 * Most of the ELSs used have both command and responses so it is strongly 1848 * desired to move them to cache constructor routine. 1849 * 1850 * Context: Can sleep iff called with KM_SLEEP flag. 1851 */ 1852 static int 1853 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1854 { 1855 int (*cb) (caddr_t); 1856 fc_packet_t *pkt; 1857 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1858 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1859 1860 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1861 1862 cmd->cmd_next = NULL; 1863 cmd->cmd_flags = 0; 1864 cmd->cmd_dflags = 0; 1865 cmd->cmd_job = NULL; 1866 cmd->cmd_port = port; 1867 pkt = &cmd->cmd_pkt; 1868 1869 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1870 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1871 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1872 return (-1); 1873 } 1874 1875 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1876 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1877 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1878 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1879 return (-1); 1880 } 1881 1882 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1883 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1884 pkt->pkt_data_cookie_cnt = 0; 1885 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1886 pkt->pkt_data_cookie = NULL; 1887 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1888 1889 return (0); 1890 } 1891 1892 1893 /* 1894 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1895 * Performs un-intializations for fc_packet_t structs. 1896 */ 1897 /* ARGSUSED */ 1898 static void 1899 fp_cache_destructor(void *buf, void *cdarg) 1900 { 1901 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1902 fc_packet_t *pkt; 1903 1904 pkt = &cmd->cmd_pkt; 1905 if (pkt->pkt_cmd_dma) { 1906 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1907 } 1908 1909 if (pkt->pkt_resp_dma) { 1910 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1911 } 1912 } 1913 1914 1915 /* 1916 * Packet allocation for ELS and any other port driver commands 1917 * 1918 * Some ELSs like FLOGI and PLOGI are critical for topology and 1919 * device discovery and a system's inability to allocate memory 1920 * or DVMA resources while performing some of these critical ELSs 1921 * cause a lot of problem. While memory allocation failures are 1922 * rare, DVMA resource failures are common as the applications 1923 * are becoming more and more powerful on huge servers. So it 1924 * is desirable to have a framework support to reserve a fragment 1925 * of DVMA. So until this is fixed the correct way, the suffering 1926 * is huge whenever a LIP happens at a time DVMA resources are 1927 * drained out completely - So an attempt needs to be made to 1928 * KM_SLEEP while requesting for these resources, hoping that 1929 * the requests won't hang forever. 1930 * 1931 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1932 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1933 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1934 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1935 * fp_alloc_pkt() must be called with pd set to NULL. 1936 */ 1937 1938 static fp_cmd_t * 1939 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1940 fc_remote_port_t *pd) 1941 { 1942 int rval; 1943 ulong_t real_len; 1944 fp_cmd_t *cmd; 1945 fc_packet_t *pkt; 1946 int (*cb) (caddr_t); 1947 ddi_dma_cookie_t pkt_cookie; 1948 ddi_dma_cookie_t *cp; 1949 uint32_t cnt; 1950 1951 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1952 1953 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1954 1955 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1956 if (cmd == NULL) { 1957 return (cmd); 1958 } 1959 1960 cmd->cmd_ulp_pkt = NULL; 1961 cmd->cmd_flags = 0; 1962 pkt = &cmd->cmd_pkt; 1963 ASSERT(cmd->cmd_dflags == 0); 1964 1965 pkt->pkt_datalen = 0; 1966 pkt->pkt_data = NULL; 1967 pkt->pkt_state = 0; 1968 pkt->pkt_action = 0; 1969 pkt->pkt_reason = 0; 1970 pkt->pkt_expln = 0; 1971 1972 /* 1973 * Init pkt_pd with the given pointer; this must be done _before_ 1974 * the call to fc_ulp_init_packet(). 1975 */ 1976 pkt->pkt_pd = pd; 1977 1978 /* Now call the FCA driver to init its private, per-packet fields */ 1979 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 1980 goto alloc_pkt_failed; 1981 } 1982 1983 if (cmd_len) { 1984 ASSERT(pkt->pkt_cmd_dma != NULL); 1985 1986 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 1987 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 1988 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 1989 &pkt->pkt_cmd_acc); 1990 1991 if (rval != DDI_SUCCESS) { 1992 goto alloc_pkt_failed; 1993 } 1994 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 1995 1996 if (real_len < cmd_len) { 1997 goto alloc_pkt_failed; 1998 } 1999 2000 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2001 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2002 DDI_DMA_CONSISTENT, cb, NULL, 2003 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2004 2005 if (rval != DDI_DMA_MAPPED) { 2006 goto alloc_pkt_failed; 2007 } 2008 2009 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2010 2011 if (pkt->pkt_cmd_cookie_cnt > 2012 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2013 goto alloc_pkt_failed; 2014 } 2015 2016 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2017 2018 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2019 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2020 KM_NOSLEEP); 2021 2022 if (cp == NULL) { 2023 goto alloc_pkt_failed; 2024 } 2025 2026 *cp = pkt_cookie; 2027 cp++; 2028 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2029 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2030 *cp = pkt_cookie; 2031 } 2032 } 2033 2034 if (resp_len) { 2035 ASSERT(pkt->pkt_resp_dma != NULL); 2036 2037 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2038 port->fp_fca_tran->fca_acc_attr, 2039 DDI_DMA_CONSISTENT, cb, NULL, 2040 (caddr_t *)&pkt->pkt_resp, &real_len, 2041 &pkt->pkt_resp_acc); 2042 2043 if (rval != DDI_SUCCESS) { 2044 goto alloc_pkt_failed; 2045 } 2046 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2047 2048 if (real_len < resp_len) { 2049 goto alloc_pkt_failed; 2050 } 2051 2052 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2053 pkt->pkt_resp, real_len, DDI_DMA_READ | 2054 DDI_DMA_CONSISTENT, cb, NULL, 2055 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2056 2057 if (rval != DDI_DMA_MAPPED) { 2058 goto alloc_pkt_failed; 2059 } 2060 2061 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2062 2063 if (pkt->pkt_resp_cookie_cnt > 2064 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2065 goto alloc_pkt_failed; 2066 } 2067 2068 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2069 2070 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2071 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2072 KM_NOSLEEP); 2073 2074 if (cp == NULL) { 2075 goto alloc_pkt_failed; 2076 } 2077 2078 *cp = pkt_cookie; 2079 cp++; 2080 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2081 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2082 *cp = pkt_cookie; 2083 } 2084 } 2085 2086 pkt->pkt_cmdlen = cmd_len; 2087 pkt->pkt_rsplen = resp_len; 2088 pkt->pkt_ulp_private = cmd; 2089 2090 return (cmd); 2091 2092 alloc_pkt_failed: 2093 2094 fp_free_dma(cmd); 2095 2096 if (pkt->pkt_cmd_cookie != NULL) { 2097 kmem_free(pkt->pkt_cmd_cookie, 2098 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2099 pkt->pkt_cmd_cookie = NULL; 2100 } 2101 2102 if (pkt->pkt_resp_cookie != NULL) { 2103 kmem_free(pkt->pkt_resp_cookie, 2104 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2105 pkt->pkt_resp_cookie = NULL; 2106 } 2107 2108 kmem_cache_free(port->fp_pkt_cache, cmd); 2109 2110 return (NULL); 2111 } 2112 2113 2114 /* 2115 * Free FC packet 2116 */ 2117 static void 2118 fp_free_pkt(fp_cmd_t *cmd) 2119 { 2120 fc_local_port_t *port; 2121 fc_packet_t *pkt; 2122 2123 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2124 2125 cmd->cmd_next = NULL; 2126 cmd->cmd_job = NULL; 2127 pkt = &cmd->cmd_pkt; 2128 pkt->pkt_ulp_private = 0; 2129 pkt->pkt_tran_flags = 0; 2130 pkt->pkt_tran_type = 0; 2131 port = cmd->cmd_port; 2132 2133 if (pkt->pkt_cmd_cookie != NULL) { 2134 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2135 sizeof (ddi_dma_cookie_t)); 2136 pkt->pkt_cmd_cookie = NULL; 2137 } 2138 2139 if (pkt->pkt_resp_cookie != NULL) { 2140 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2141 sizeof (ddi_dma_cookie_t)); 2142 pkt->pkt_resp_cookie = NULL; 2143 } 2144 2145 fp_free_dma(cmd); 2146 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2147 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2148 } 2149 2150 2151 /* 2152 * Release DVMA resources 2153 */ 2154 static void 2155 fp_free_dma(fp_cmd_t *cmd) 2156 { 2157 fc_packet_t *pkt = &cmd->cmd_pkt; 2158 2159 pkt->pkt_cmdlen = 0; 2160 pkt->pkt_rsplen = 0; 2161 pkt->pkt_tran_type = 0; 2162 pkt->pkt_tran_flags = 0; 2163 2164 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2165 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2166 } 2167 2168 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2169 if (pkt->pkt_cmd_acc) { 2170 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2171 } 2172 } 2173 2174 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2175 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2176 } 2177 2178 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2179 if (pkt->pkt_resp_acc) { 2180 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2181 } 2182 } 2183 cmd->cmd_dflags = 0; 2184 } 2185 2186 2187 /* 2188 * Dedicated thread to perform various activities. One thread for 2189 * each fc_local_port_t (driver soft state) instance. 2190 * Note, this effectively works out to one thread for each local 2191 * port, but there are also some Solaris taskq threads in use on a per-local 2192 * port basis; these also need to be taken into consideration. 2193 */ 2194 static void 2195 fp_job_handler(fc_local_port_t *port) 2196 { 2197 int rval; 2198 uint32_t *d_id; 2199 fc_remote_port_t *pd; 2200 job_request_t *job; 2201 2202 #ifndef __lock_lint 2203 /* 2204 * Solaris-internal stuff for proper operation of kernel threads 2205 * with Solaris CPR. 2206 */ 2207 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2208 callb_generic_cpr, "fp_job_handler"); 2209 #endif 2210 2211 2212 /* Loop forever waiting for work to do */ 2213 for (;;) { 2214 2215 mutex_enter(&port->fp_mutex); 2216 2217 /* 2218 * Sleep if no work to do right now, or if we want 2219 * to suspend or power-down. 2220 */ 2221 while (port->fp_job_head == NULL || 2222 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2223 FP_SOFT_SUSPEND))) { 2224 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2225 cv_wait(&port->fp_cv, &port->fp_mutex); 2226 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2227 } 2228 2229 /* 2230 * OK, we've just been woken up, so retrieve the next entry 2231 * from the head of the job queue for this local port. 2232 */ 2233 job = fctl_deque_job(port); 2234 2235 /* 2236 * Handle all the fp driver's supported job codes here 2237 * in this big honkin' switch. 2238 */ 2239 switch (job->job_code) { 2240 case JOB_PORT_SHUTDOWN: 2241 /* 2242 * fp_port_shutdown() is only called from here. This 2243 * will prepare the local port instance (softstate) 2244 * for detaching. This cancels timeout callbacks, 2245 * executes LOGOs with remote ports, cleans up tables, 2246 * and deallocates data structs. 2247 */ 2248 fp_port_shutdown(port, job); 2249 2250 /* 2251 * This will exit the job thread. 2252 */ 2253 #ifndef __lock_lint 2254 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2255 #else 2256 mutex_exit(&port->fp_mutex); 2257 #endif 2258 fctl_jobdone(job); 2259 thread_exit(); 2260 2261 /* NOTREACHED */ 2262 2263 case JOB_ATTACH_ULP: { 2264 /* 2265 * This job is spawned in response to a ULP calling 2266 * fc_ulp_add(). 2267 */ 2268 2269 boolean_t do_attach_ulps = B_TRUE; 2270 2271 /* 2272 * If fp is detaching, we don't want to call 2273 * fp_startup_done as this asynchronous 2274 * notification may interfere with the re-attach. 2275 */ 2276 2277 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2278 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2279 do_attach_ulps = B_FALSE; 2280 } else { 2281 /* 2282 * We are going to force the transport 2283 * to attach to the ULPs, so set 2284 * fp_ulp_attach. This will keep any 2285 * potential detach from occurring until 2286 * we are done. 2287 */ 2288 port->fp_ulp_attach = 1; 2289 } 2290 2291 mutex_exit(&port->fp_mutex); 2292 2293 /* 2294 * NOTE: Since we just dropped the mutex, there is now 2295 * a race window where the fp_soft_state check above 2296 * could change here. This race is covered because an 2297 * additional check was added in the functions hidden 2298 * under fp_startup_done(). 2299 */ 2300 if (do_attach_ulps == B_TRUE) { 2301 /* 2302 * This goes thru a bit of a convoluted call 2303 * chain before spawning off a DDI taskq 2304 * request to perform the actual attach 2305 * operations. Blocking can occur at a number 2306 * of points. 2307 */ 2308 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2309 } 2310 job->job_result = FC_SUCCESS; 2311 fctl_jobdone(job); 2312 break; 2313 } 2314 2315 case JOB_ULP_NOTIFY: { 2316 /* 2317 * Pass state change notifications up to any/all 2318 * registered ULPs. 2319 */ 2320 uint32_t statec; 2321 2322 statec = job->job_ulp_listlen; 2323 if (statec == FC_STATE_RESET_REQUESTED) { 2324 port->fp_last_task = port->fp_task; 2325 port->fp_task = FP_TASK_OFFLINE; 2326 fp_port_offline(port, 0); 2327 port->fp_task = port->fp_last_task; 2328 port->fp_last_task = FP_TASK_IDLE; 2329 } 2330 2331 if (--port->fp_statec_busy == 0) { 2332 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2333 } 2334 2335 mutex_exit(&port->fp_mutex); 2336 2337 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2338 fctl_jobdone(job); 2339 break; 2340 } 2341 2342 case JOB_PLOGI_ONE: 2343 /* 2344 * Issue a PLOGI to a single remote port. Multiple 2345 * PLOGIs to different remote ports may occur in 2346 * parallel. 2347 * This can create the fc_remote_port_t if it does not 2348 * already exist. 2349 */ 2350 2351 mutex_exit(&port->fp_mutex); 2352 d_id = (uint32_t *)job->job_private; 2353 pd = fctl_get_remote_port_by_did(port, *d_id); 2354 2355 if (pd) { 2356 mutex_enter(&pd->pd_mutex); 2357 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2358 pd->pd_login_count++; 2359 mutex_exit(&pd->pd_mutex); 2360 job->job_result = FC_SUCCESS; 2361 fctl_jobdone(job); 2362 break; 2363 } 2364 mutex_exit(&pd->pd_mutex); 2365 } else { 2366 mutex_enter(&port->fp_mutex); 2367 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2368 mutex_exit(&port->fp_mutex); 2369 pd = fp_create_remote_port_by_ns(port, 2370 *d_id, KM_SLEEP); 2371 if (pd == NULL) { 2372 job->job_result = FC_FAILURE; 2373 fctl_jobdone(job); 2374 break; 2375 } 2376 } else { 2377 mutex_exit(&port->fp_mutex); 2378 } 2379 } 2380 2381 job->job_flags |= JOB_TYPE_FP_ASYNC; 2382 job->job_counter = 1; 2383 2384 rval = fp_port_login(port, *d_id, job, 2385 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2386 2387 if (rval != FC_SUCCESS) { 2388 job->job_result = rval; 2389 fctl_jobdone(job); 2390 } 2391 break; 2392 2393 case JOB_LOGO_ONE: { 2394 /* 2395 * Issue a PLOGO to a single remote port. Multiple 2396 * PLOGOs to different remote ports may occur in 2397 * parallel. 2398 */ 2399 fc_remote_port_t *pd; 2400 2401 #ifndef __lock_lint 2402 ASSERT(job->job_counter > 0); 2403 #endif 2404 2405 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2406 2407 mutex_enter(&pd->pd_mutex); 2408 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2409 mutex_exit(&pd->pd_mutex); 2410 job->job_result = FC_LOGINREQ; 2411 mutex_exit(&port->fp_mutex); 2412 fctl_jobdone(job); 2413 break; 2414 } 2415 if (pd->pd_login_count > 1) { 2416 pd->pd_login_count--; 2417 mutex_exit(&pd->pd_mutex); 2418 job->job_result = FC_SUCCESS; 2419 mutex_exit(&port->fp_mutex); 2420 fctl_jobdone(job); 2421 break; 2422 } 2423 mutex_exit(&pd->pd_mutex); 2424 mutex_exit(&port->fp_mutex); 2425 job->job_flags |= JOB_TYPE_FP_ASYNC; 2426 (void) fp_logout(port, pd, job); 2427 break; 2428 } 2429 2430 case JOB_FCIO_LOGIN: 2431 /* 2432 * PLOGI initiated at ioctl request. 2433 */ 2434 mutex_exit(&port->fp_mutex); 2435 job->job_result = 2436 fp_fcio_login(port, job->job_private, job); 2437 fctl_jobdone(job); 2438 break; 2439 2440 case JOB_FCIO_LOGOUT: 2441 /* 2442 * PLOGO initiated at ioctl request. 2443 */ 2444 mutex_exit(&port->fp_mutex); 2445 job->job_result = 2446 fp_fcio_logout(port, job->job_private, job); 2447 fctl_jobdone(job); 2448 break; 2449 2450 case JOB_PORT_GETMAP: 2451 case JOB_PORT_GETMAP_PLOGI_ALL: { 2452 port->fp_last_task = port->fp_task; 2453 port->fp_task = FP_TASK_GETMAP; 2454 2455 switch (port->fp_topology) { 2456 case FC_TOP_PRIVATE_LOOP: 2457 job->job_counter = 1; 2458 2459 fp_get_loopmap(port, job); 2460 mutex_exit(&port->fp_mutex); 2461 fp_jobwait(job); 2462 fctl_fillout_map(port, 2463 (fc_portmap_t **)job->job_private, 2464 (uint32_t *)job->job_arg, 1, 0, 0); 2465 fctl_jobdone(job); 2466 mutex_enter(&port->fp_mutex); 2467 break; 2468 2469 case FC_TOP_PUBLIC_LOOP: 2470 case FC_TOP_FABRIC: 2471 mutex_exit(&port->fp_mutex); 2472 job->job_counter = 1; 2473 2474 job->job_result = fp_ns_getmap(port, 2475 job, (fc_portmap_t **)job->job_private, 2476 (uint32_t *)job->job_arg, 2477 FCTL_GAN_START_ID); 2478 fctl_jobdone(job); 2479 mutex_enter(&port->fp_mutex); 2480 break; 2481 2482 case FC_TOP_PT_PT: 2483 mutex_exit(&port->fp_mutex); 2484 fctl_fillout_map(port, 2485 (fc_portmap_t **)job->job_private, 2486 (uint32_t *)job->job_arg, 1, 0, 0); 2487 fctl_jobdone(job); 2488 mutex_enter(&port->fp_mutex); 2489 break; 2490 2491 default: 2492 mutex_exit(&port->fp_mutex); 2493 fctl_jobdone(job); 2494 mutex_enter(&port->fp_mutex); 2495 break; 2496 } 2497 port->fp_task = port->fp_last_task; 2498 port->fp_last_task = FP_TASK_IDLE; 2499 mutex_exit(&port->fp_mutex); 2500 break; 2501 } 2502 2503 case JOB_PORT_OFFLINE: { 2504 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2505 2506 port->fp_last_task = port->fp_task; 2507 port->fp_task = FP_TASK_OFFLINE; 2508 2509 if (port->fp_statec_busy > 2) { 2510 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2511 fp_port_offline(port, 0); 2512 if (--port->fp_statec_busy == 0) { 2513 port->fp_soft_state &= 2514 ~FP_SOFT_IN_STATEC_CB; 2515 } 2516 } else { 2517 fp_port_offline(port, 1); 2518 } 2519 2520 port->fp_task = port->fp_last_task; 2521 port->fp_last_task = FP_TASK_IDLE; 2522 2523 mutex_exit(&port->fp_mutex); 2524 2525 fctl_jobdone(job); 2526 break; 2527 } 2528 2529 case JOB_PORT_STARTUP: { 2530 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2531 if (port->fp_statec_busy > 1) { 2532 mutex_exit(&port->fp_mutex); 2533 break; 2534 } 2535 mutex_exit(&port->fp_mutex); 2536 2537 FP_TRACE(FP_NHEAD2(9, rval), 2538 "Topology discovery failed"); 2539 break; 2540 } 2541 2542 /* 2543 * Attempt building device handles in case 2544 * of private Loop. 2545 */ 2546 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2547 job->job_counter = 1; 2548 2549 fp_get_loopmap(port, job); 2550 mutex_exit(&port->fp_mutex); 2551 fp_jobwait(job); 2552 mutex_enter(&port->fp_mutex); 2553 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2554 ASSERT(port->fp_total_devices == 0); 2555 port->fp_total_devices = 2556 port->fp_dev_count; 2557 } 2558 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2559 /* 2560 * Hack to avoid state changes going up early 2561 */ 2562 port->fp_statec_busy++; 2563 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2564 2565 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2566 fp_fabric_online(port, job); 2567 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2568 } 2569 mutex_exit(&port->fp_mutex); 2570 fctl_jobdone(job); 2571 break; 2572 } 2573 2574 case JOB_PORT_ONLINE: { 2575 char *newtop; 2576 char *oldtop; 2577 uint32_t old_top; 2578 2579 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2580 2581 /* 2582 * Bail out early if there are a lot of 2583 * state changes in the pipeline 2584 */ 2585 if (port->fp_statec_busy > 1) { 2586 --port->fp_statec_busy; 2587 mutex_exit(&port->fp_mutex); 2588 fctl_jobdone(job); 2589 break; 2590 } 2591 2592 switch (old_top = port->fp_topology) { 2593 case FC_TOP_PRIVATE_LOOP: 2594 oldtop = "Private Loop"; 2595 break; 2596 2597 case FC_TOP_PUBLIC_LOOP: 2598 oldtop = "Public Loop"; 2599 break; 2600 2601 case FC_TOP_PT_PT: 2602 oldtop = "Point to Point"; 2603 break; 2604 2605 case FC_TOP_FABRIC: 2606 oldtop = "Fabric"; 2607 break; 2608 2609 default: 2610 oldtop = NULL; 2611 break; 2612 } 2613 2614 port->fp_last_task = port->fp_task; 2615 port->fp_task = FP_TASK_ONLINE; 2616 2617 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2618 2619 port->fp_task = port->fp_last_task; 2620 port->fp_last_task = FP_TASK_IDLE; 2621 2622 if (port->fp_statec_busy > 1) { 2623 --port->fp_statec_busy; 2624 mutex_exit(&port->fp_mutex); 2625 break; 2626 } 2627 2628 port->fp_state = FC_STATE_OFFLINE; 2629 2630 FP_TRACE(FP_NHEAD2(9, rval), 2631 "Topology discovery failed"); 2632 2633 if (--port->fp_statec_busy == 0) { 2634 port->fp_soft_state &= 2635 ~FP_SOFT_IN_STATEC_CB; 2636 } 2637 2638 if (port->fp_offline_tid == NULL) { 2639 port->fp_offline_tid = 2640 timeout(fp_offline_timeout, 2641 (caddr_t)port, fp_offline_ticks); 2642 } 2643 2644 mutex_exit(&port->fp_mutex); 2645 break; 2646 } 2647 2648 switch (port->fp_topology) { 2649 case FC_TOP_PRIVATE_LOOP: 2650 newtop = "Private Loop"; 2651 break; 2652 2653 case FC_TOP_PUBLIC_LOOP: 2654 newtop = "Public Loop"; 2655 break; 2656 2657 case FC_TOP_PT_PT: 2658 newtop = "Point to Point"; 2659 break; 2660 2661 case FC_TOP_FABRIC: 2662 newtop = "Fabric"; 2663 break; 2664 2665 default: 2666 newtop = NULL; 2667 break; 2668 } 2669 2670 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2671 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2672 "Change in FC Topology old = %s new = %s", 2673 oldtop, newtop); 2674 } 2675 2676 switch (port->fp_topology) { 2677 case FC_TOP_PRIVATE_LOOP: { 2678 int orphan = (old_top == FC_TOP_FABRIC || 2679 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2680 2681 mutex_exit(&port->fp_mutex); 2682 fp_loop_online(port, job, orphan); 2683 break; 2684 } 2685 2686 case FC_TOP_PUBLIC_LOOP: 2687 /* FALLTHROUGH */ 2688 case FC_TOP_FABRIC: 2689 fp_fabric_online(port, job); 2690 mutex_exit(&port->fp_mutex); 2691 break; 2692 2693 case FC_TOP_PT_PT: 2694 fp_p2p_online(port, job); 2695 mutex_exit(&port->fp_mutex); 2696 break; 2697 2698 default: 2699 if (--port->fp_statec_busy != 0) { 2700 /* 2701 * Watch curiously at what the next 2702 * state transition can do. 2703 */ 2704 mutex_exit(&port->fp_mutex); 2705 break; 2706 } 2707 2708 FP_TRACE(FP_NHEAD2(9, 0), 2709 "Topology Unknown, Offlining the port.."); 2710 2711 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2712 port->fp_state = FC_STATE_OFFLINE; 2713 2714 if (port->fp_offline_tid == NULL) { 2715 port->fp_offline_tid = 2716 timeout(fp_offline_timeout, 2717 (caddr_t)port, fp_offline_ticks); 2718 } 2719 mutex_exit(&port->fp_mutex); 2720 break; 2721 } 2722 2723 mutex_enter(&port->fp_mutex); 2724 2725 port->fp_task = port->fp_last_task; 2726 port->fp_last_task = FP_TASK_IDLE; 2727 2728 mutex_exit(&port->fp_mutex); 2729 2730 fctl_jobdone(job); 2731 break; 2732 } 2733 2734 case JOB_PLOGI_GROUP: { 2735 mutex_exit(&port->fp_mutex); 2736 fp_plogi_group(port, job); 2737 break; 2738 } 2739 2740 case JOB_UNSOL_REQUEST: { 2741 mutex_exit(&port->fp_mutex); 2742 fp_handle_unsol_buf(port, 2743 (fc_unsol_buf_t *)job->job_private, job); 2744 fctl_dealloc_job(job); 2745 break; 2746 } 2747 2748 case JOB_NS_CMD: { 2749 fctl_ns_req_t *ns_cmd; 2750 2751 mutex_exit(&port->fp_mutex); 2752 2753 job->job_flags |= JOB_TYPE_FP_ASYNC; 2754 ns_cmd = (fctl_ns_req_t *)job->job_private; 2755 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2756 ns_cmd->ns_cmd_code > NS_DA_ID) { 2757 job->job_result = FC_BADCMD; 2758 fctl_jobdone(job); 2759 break; 2760 } 2761 2762 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2763 if (ns_cmd->ns_pd != NULL) { 2764 job->job_result = FC_BADOBJECT; 2765 fctl_jobdone(job); 2766 break; 2767 } 2768 2769 job->job_counter = 1; 2770 2771 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2772 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2773 2774 if (rval != FC_SUCCESS) { 2775 job->job_result = rval; 2776 fctl_jobdone(job); 2777 } 2778 break; 2779 } 2780 job->job_result = FC_SUCCESS; 2781 job->job_counter = 1; 2782 2783 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2784 if (rval != FC_SUCCESS) { 2785 fctl_jobdone(job); 2786 } 2787 break; 2788 } 2789 2790 case JOB_LINK_RESET: { 2791 la_wwn_t *pwwn; 2792 uint32_t topology; 2793 2794 pwwn = (la_wwn_t *)job->job_private; 2795 ASSERT(pwwn != NULL); 2796 2797 topology = port->fp_topology; 2798 mutex_exit(&port->fp_mutex); 2799 2800 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2801 topology == FC_TOP_PRIVATE_LOOP) { 2802 job->job_flags |= JOB_TYPE_FP_ASYNC; 2803 rval = port->fp_fca_tran->fca_reset( 2804 port->fp_fca_handle, FC_FCA_LINK_RESET); 2805 job->job_result = rval; 2806 fp_jobdone(job); 2807 } else { 2808 ASSERT((job->job_flags & 2809 JOB_TYPE_FP_ASYNC) == 0); 2810 2811 if (FC_IS_TOP_SWITCH(topology)) { 2812 rval = fp_remote_lip(port, pwwn, 2813 KM_SLEEP, job); 2814 } else { 2815 rval = FC_FAILURE; 2816 } 2817 if (rval != FC_SUCCESS) { 2818 job->job_result = rval; 2819 } 2820 fctl_jobdone(job); 2821 } 2822 break; 2823 } 2824 2825 default: 2826 mutex_exit(&port->fp_mutex); 2827 job->job_result = FC_BADCMD; 2828 fctl_jobdone(job); 2829 break; 2830 } 2831 } 2832 /* NOTREACHED */ 2833 } 2834 2835 2836 /* 2837 * Perform FC port bring up initialization 2838 */ 2839 static int 2840 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2841 { 2842 int rval; 2843 uint32_t state; 2844 uint32_t src_id; 2845 fc_lilpmap_t *lilp_map; 2846 2847 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2848 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2849 2850 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2851 " port=%p, job=%p", port, job); 2852 2853 port->fp_topology = FC_TOP_UNKNOWN; 2854 port->fp_port_id.port_id = 0; 2855 state = FC_PORT_STATE_MASK(port->fp_state); 2856 2857 if (state == FC_STATE_OFFLINE) { 2858 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2859 job->job_result = FC_OFFLINE; 2860 mutex_exit(&port->fp_mutex); 2861 fctl_jobdone(job); 2862 mutex_enter(&port->fp_mutex); 2863 return (FC_OFFLINE); 2864 } 2865 2866 if (state == FC_STATE_LOOP) { 2867 port->fp_port_type.port_type = FC_NS_PORT_NL; 2868 mutex_exit(&port->fp_mutex); 2869 2870 lilp_map = &port->fp_lilp_map; 2871 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2872 job->job_result = FC_FAILURE; 2873 fctl_jobdone(job); 2874 2875 FP_TRACE(FP_NHEAD1(9, rval), 2876 "LILP map Invalid or not present"); 2877 mutex_enter(&port->fp_mutex); 2878 return (FC_FAILURE); 2879 } 2880 2881 if (lilp_map->lilp_length == 0) { 2882 job->job_result = FC_NO_MAP; 2883 fctl_jobdone(job); 2884 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2885 "LILP map length zero"); 2886 mutex_enter(&port->fp_mutex); 2887 return (FC_NO_MAP); 2888 } 2889 src_id = lilp_map->lilp_myalpa & 0xFF; 2890 } else { 2891 fc_remote_port_t *pd; 2892 fc_fca_pm_t pm; 2893 fc_fca_p2p_info_t p2p_info; 2894 int pd_recepient; 2895 2896 /* 2897 * Get P2P remote port info if possible 2898 */ 2899 bzero((caddr_t)&pm, sizeof (pm)); 2900 2901 pm.pm_cmd_flags = FC_FCA_PM_READ; 2902 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2903 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2904 pm.pm_data_buf = (caddr_t)&p2p_info; 2905 2906 rval = port->fp_fca_tran->fca_port_manage( 2907 port->fp_fca_handle, &pm); 2908 2909 if (rval == FC_SUCCESS) { 2910 port->fp_port_id.port_id = p2p_info.fca_d_id; 2911 port->fp_port_type.port_type = FC_NS_PORT_N; 2912 port->fp_topology = FC_TOP_PT_PT; 2913 port->fp_total_devices = 1; 2914 pd_recepient = fctl_wwn_cmp( 2915 &port->fp_service_params.nport_ww_name, 2916 &p2p_info.pwwn) < 0 ? 2917 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2918 mutex_exit(&port->fp_mutex); 2919 pd = fctl_create_remote_port(port, 2920 &p2p_info.nwwn, 2921 &p2p_info.pwwn, 2922 p2p_info.d_id, 2923 pd_recepient, KM_NOSLEEP); 2924 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2925 " P2P port=%p pd=%p", port, pd); 2926 mutex_enter(&port->fp_mutex); 2927 return (FC_SUCCESS); 2928 } 2929 port->fp_port_type.port_type = FC_NS_PORT_N; 2930 mutex_exit(&port->fp_mutex); 2931 src_id = 0; 2932 } 2933 2934 job->job_counter = 1; 2935 job->job_result = FC_SUCCESS; 2936 2937 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2938 KM_SLEEP)) != FC_SUCCESS) { 2939 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2940 job->job_result = FC_FAILURE; 2941 fctl_jobdone(job); 2942 2943 mutex_enter(&port->fp_mutex); 2944 if (port->fp_statec_busy <= 1) { 2945 mutex_exit(&port->fp_mutex); 2946 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 2947 "Couldn't transport FLOGI"); 2948 mutex_enter(&port->fp_mutex); 2949 } 2950 return (FC_FAILURE); 2951 } 2952 2953 fp_jobwait(job); 2954 2955 mutex_enter(&port->fp_mutex); 2956 if (job->job_result == FC_SUCCESS) { 2957 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2958 mutex_exit(&port->fp_mutex); 2959 fp_ns_init(port, job, KM_SLEEP); 2960 mutex_enter(&port->fp_mutex); 2961 } 2962 } else { 2963 if (state == FC_STATE_LOOP) { 2964 port->fp_topology = FC_TOP_PRIVATE_LOOP; 2965 port->fp_port_id.port_id = 2966 port->fp_lilp_map.lilp_myalpa & 0xFF; 2967 } 2968 } 2969 2970 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 2971 port, job); 2972 2973 return (FC_SUCCESS); 2974 } 2975 2976 2977 /* 2978 * Perform ULP invocations following FC port startup 2979 */ 2980 /* ARGSUSED */ 2981 static void 2982 fp_startup_done(opaque_t arg, uchar_t result) 2983 { 2984 fc_local_port_t *port = arg; 2985 2986 fp_attach_ulps(port, FC_CMD_ATTACH); 2987 2988 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 2989 } 2990 2991 2992 /* 2993 * Perform ULP port attach 2994 */ 2995 static void 2996 fp_ulp_port_attach(void *arg) 2997 { 2998 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 2999 fc_local_port_t *port = att->att_port; 3000 3001 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3002 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3003 3004 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3005 3006 if (att->att_need_pm_idle == B_TRUE) { 3007 fctl_idle_port(port); 3008 } 3009 3010 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3011 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3012 3013 mutex_enter(&att->att_port->fp_mutex); 3014 att->att_port->fp_ulp_attach = 0; 3015 3016 port->fp_task = port->fp_last_task; 3017 port->fp_last_task = FP_TASK_IDLE; 3018 3019 cv_signal(&att->att_port->fp_attach_cv); 3020 3021 mutex_exit(&att->att_port->fp_mutex); 3022 3023 kmem_free(att, sizeof (fp_soft_attach_t)); 3024 } 3025 3026 /* 3027 * Entry point to funnel all requests down to FCAs 3028 */ 3029 static int 3030 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3031 { 3032 int rval; 3033 3034 mutex_enter(&port->fp_mutex); 3035 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3036 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3037 FC_STATE_OFFLINE))) { 3038 /* 3039 * This means there is more than one state change 3040 * at this point of time - Since they are processed 3041 * serially, any processing of the current one should 3042 * be failed, failed and move up in processing the next 3043 */ 3044 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3045 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3046 if (cmd->cmd_job) { 3047 /* 3048 * A state change that is going to be invalidated 3049 * by another one already in the port driver's queue 3050 * need not go up to all ULPs. This will minimize 3051 * needless processing and ripples in ULP modules 3052 */ 3053 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3054 } 3055 mutex_exit(&port->fp_mutex); 3056 return (FC_STATEC_BUSY); 3057 } 3058 3059 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3060 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3061 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3062 mutex_exit(&port->fp_mutex); 3063 3064 return (FC_OFFLINE); 3065 } 3066 mutex_exit(&port->fp_mutex); 3067 3068 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3069 if (rval != FC_SUCCESS) { 3070 if (rval == FC_TRAN_BUSY) { 3071 cmd->cmd_retry_interval = fp_retry_delay; 3072 rval = fp_retry_cmd(&cmd->cmd_pkt); 3073 if (rval == FC_FAILURE) { 3074 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3075 } 3076 } 3077 } 3078 3079 return (rval); 3080 } 3081 3082 3083 /* 3084 * Each time a timeout kicks in, walk the wait queue, decrement the 3085 * the retry_interval, when the retry_interval becomes less than 3086 * or equal to zero, re-transport the command: If the re-transport 3087 * fails with BUSY, enqueue the command in the wait queue. 3088 * 3089 * In order to prevent looping forever because of commands enqueued 3090 * from within this function itself, save the current tail pointer 3091 * (in cur_tail) and exit the loop after serving this command. 3092 */ 3093 static void 3094 fp_resendcmd(void *port_handle) 3095 { 3096 int rval; 3097 fc_local_port_t *port; 3098 fp_cmd_t *cmd; 3099 fp_cmd_t *cur_tail; 3100 3101 port = port_handle; 3102 mutex_enter(&port->fp_mutex); 3103 cur_tail = port->fp_wait_tail; 3104 mutex_exit(&port->fp_mutex); 3105 3106 while ((cmd = fp_deque_cmd(port)) != NULL) { 3107 cmd->cmd_retry_interval -= fp_retry_ticker; 3108 /* Check if we are detaching */ 3109 if (port->fp_soft_state & 3110 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3111 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3112 cmd->cmd_pkt.pkt_reason = 0; 3113 fp_iodone(cmd); 3114 } else if (cmd->cmd_retry_interval <= 0) { 3115 rval = cmd->cmd_transport(port->fp_fca_handle, 3116 &cmd->cmd_pkt); 3117 3118 if (rval != FC_SUCCESS) { 3119 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3120 if (--cmd->cmd_retry_count) { 3121 fp_enque_cmd(port, cmd); 3122 if (cmd == cur_tail) { 3123 break; 3124 } 3125 continue; 3126 } 3127 cmd->cmd_pkt.pkt_state = 3128 FC_PKT_TRAN_BSY; 3129 } else { 3130 cmd->cmd_pkt.pkt_state = 3131 FC_PKT_TRAN_ERROR; 3132 } 3133 cmd->cmd_pkt.pkt_reason = 0; 3134 fp_iodone(cmd); 3135 } 3136 } else { 3137 fp_enque_cmd(port, cmd); 3138 } 3139 3140 if (cmd == cur_tail) { 3141 break; 3142 } 3143 } 3144 3145 mutex_enter(&port->fp_mutex); 3146 if (port->fp_wait_head) { 3147 timeout_id_t tid; 3148 3149 mutex_exit(&port->fp_mutex); 3150 tid = timeout(fp_resendcmd, (caddr_t)port, 3151 fp_retry_ticks); 3152 mutex_enter(&port->fp_mutex); 3153 port->fp_wait_tid = tid; 3154 } else { 3155 port->fp_wait_tid = NULL; 3156 } 3157 mutex_exit(&port->fp_mutex); 3158 } 3159 3160 3161 /* 3162 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3163 * 3164 * Yes, as you can see below, cmd_retry_count is used here too. That means 3165 * the retries for BUSY are less if there were transport failures (transport 3166 * failure means fca_transport failure). The goal is not to exceed overall 3167 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3168 * 3169 * Return Values: 3170 * FC_SUCCESS 3171 * FC_FAILURE 3172 */ 3173 static int 3174 fp_retry_cmd(fc_packet_t *pkt) 3175 { 3176 fp_cmd_t *cmd; 3177 3178 cmd = pkt->pkt_ulp_private; 3179 3180 if (--cmd->cmd_retry_count) { 3181 fp_enque_cmd(cmd->cmd_port, cmd); 3182 return (FC_SUCCESS); 3183 } else { 3184 return (FC_FAILURE); 3185 } 3186 } 3187 3188 3189 /* 3190 * Queue up FC packet for deferred retry 3191 */ 3192 static void 3193 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3194 { 3195 timeout_id_t tid; 3196 3197 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3198 3199 #ifdef DEBUG 3200 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3201 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3202 #endif 3203 3204 mutex_enter(&port->fp_mutex); 3205 if (port->fp_wait_tail) { 3206 port->fp_wait_tail->cmd_next = cmd; 3207 port->fp_wait_tail = cmd; 3208 } else { 3209 ASSERT(port->fp_wait_head == NULL); 3210 port->fp_wait_head = port->fp_wait_tail = cmd; 3211 if (port->fp_wait_tid == NULL) { 3212 mutex_exit(&port->fp_mutex); 3213 tid = timeout(fp_resendcmd, (caddr_t)port, 3214 fp_retry_ticks); 3215 mutex_enter(&port->fp_mutex); 3216 port->fp_wait_tid = tid; 3217 } 3218 } 3219 mutex_exit(&port->fp_mutex); 3220 } 3221 3222 3223 /* 3224 * Handle all RJT codes 3225 */ 3226 static int 3227 fp_handle_reject(fc_packet_t *pkt) 3228 { 3229 int rval = FC_FAILURE; 3230 uchar_t next_class; 3231 fp_cmd_t *cmd; 3232 3233 cmd = pkt->pkt_ulp_private; 3234 3235 if (pkt->pkt_reason != FC_REASON_CLASS_NOT_SUPP) { 3236 if (pkt->pkt_reason == FC_REASON_QFULL || 3237 pkt->pkt_reason == FC_REASON_LOGICAL_BSY) { 3238 cmd->cmd_retry_interval = fp_retry_delay; 3239 rval = fp_retry_cmd(pkt); 3240 } 3241 3242 return (rval); 3243 } 3244 3245 next_class = fp_get_nextclass(cmd->cmd_port, 3246 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3247 3248 if (next_class == FC_TRAN_CLASS_INVALID) { 3249 return (rval); 3250 } 3251 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3252 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3253 3254 rval = fp_sendcmd(cmd->cmd_port, cmd, 3255 cmd->cmd_port->fp_fca_handle); 3256 3257 if (rval != FC_SUCCESS) { 3258 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3259 } 3260 3261 return (rval); 3262 } 3263 3264 3265 /* 3266 * Return the next class of service supported by the FCA 3267 */ 3268 static uchar_t 3269 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3270 { 3271 uchar_t next_class; 3272 3273 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3274 3275 switch (cur_class) { 3276 case FC_TRAN_CLASS_INVALID: 3277 if (port->fp_cos & FC_NS_CLASS1) { 3278 next_class = FC_TRAN_CLASS1; 3279 break; 3280 } 3281 /* FALLTHROUGH */ 3282 3283 case FC_TRAN_CLASS1: 3284 if (port->fp_cos & FC_NS_CLASS2) { 3285 next_class = FC_TRAN_CLASS2; 3286 break; 3287 } 3288 /* FALLTHROUGH */ 3289 3290 case FC_TRAN_CLASS2: 3291 if (port->fp_cos & FC_NS_CLASS3) { 3292 next_class = FC_TRAN_CLASS3; 3293 break; 3294 } 3295 /* FALLTHROUGH */ 3296 3297 case FC_TRAN_CLASS3: 3298 default: 3299 next_class = FC_TRAN_CLASS_INVALID; 3300 break; 3301 } 3302 3303 return (next_class); 3304 } 3305 3306 3307 /* 3308 * Determine if a class of service is supported by the FCA 3309 */ 3310 static int 3311 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3312 { 3313 int rval; 3314 3315 switch (tran_class) { 3316 case FC_TRAN_CLASS1: 3317 if (cos & FC_NS_CLASS1) { 3318 rval = FC_SUCCESS; 3319 } else { 3320 rval = FC_FAILURE; 3321 } 3322 break; 3323 3324 case FC_TRAN_CLASS2: 3325 if (cos & FC_NS_CLASS2) { 3326 rval = FC_SUCCESS; 3327 } else { 3328 rval = FC_FAILURE; 3329 } 3330 break; 3331 3332 case FC_TRAN_CLASS3: 3333 if (cos & FC_NS_CLASS3) { 3334 rval = FC_SUCCESS; 3335 } else { 3336 rval = FC_FAILURE; 3337 } 3338 break; 3339 3340 default: 3341 rval = FC_FAILURE; 3342 break; 3343 } 3344 3345 return (rval); 3346 } 3347 3348 3349 /* 3350 * Dequeue FC packet for retry 3351 */ 3352 static fp_cmd_t * 3353 fp_deque_cmd(fc_local_port_t *port) 3354 { 3355 fp_cmd_t *cmd; 3356 3357 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3358 3359 mutex_enter(&port->fp_mutex); 3360 3361 if (port->fp_wait_head == NULL) { 3362 /* 3363 * To avoid races, NULL the fp_wait_tid as 3364 * we are about to exit the timeout thread. 3365 */ 3366 port->fp_wait_tid = NULL; 3367 mutex_exit(&port->fp_mutex); 3368 return (NULL); 3369 } 3370 3371 cmd = port->fp_wait_head; 3372 port->fp_wait_head = cmd->cmd_next; 3373 cmd->cmd_next = NULL; 3374 3375 if (port->fp_wait_head == NULL) { 3376 port->fp_wait_tail = NULL; 3377 } 3378 mutex_exit(&port->fp_mutex); 3379 3380 return (cmd); 3381 } 3382 3383 3384 /* 3385 * Wait for job completion 3386 */ 3387 static void 3388 fp_jobwait(job_request_t *job) 3389 { 3390 sema_p(&job->job_port_sema); 3391 } 3392 3393 3394 /* 3395 * Convert FC packet state to FC errno 3396 */ 3397 int 3398 fp_state_to_rval(uchar_t state) 3399 { 3400 int count; 3401 3402 for (count = 0; count < sizeof (fp_xlat) / 3403 sizeof (fp_xlat[0]); count++) { 3404 if (fp_xlat[count].xlat_state == state) { 3405 return (fp_xlat[count].xlat_rval); 3406 } 3407 } 3408 3409 return (FC_FAILURE); 3410 } 3411 3412 3413 /* 3414 * For Synchronous I/O requests, the caller is 3415 * expected to do fctl_jobdone(if necessary) 3416 * 3417 * We want to preserve at least one failure in the 3418 * job_result if it happens. 3419 * 3420 */ 3421 static void 3422 fp_iodone(fp_cmd_t *cmd) 3423 { 3424 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3425 job_request_t *job = cmd->cmd_job; 3426 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3427 3428 ASSERT(job != NULL); 3429 ASSERT(cmd->cmd_port != NULL); 3430 ASSERT(&cmd->cmd_pkt != NULL); 3431 3432 mutex_enter(&job->job_mutex); 3433 if (job->job_result == FC_SUCCESS) { 3434 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3435 } 3436 mutex_exit(&job->job_mutex); 3437 3438 if (pd) { 3439 mutex_enter(&pd->pd_mutex); 3440 pd->pd_flags = PD_IDLE; 3441 mutex_exit(&pd->pd_mutex); 3442 } 3443 3444 if (ulp_pkt) { 3445 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3446 FP_IS_PKT_ERROR(ulp_pkt)) { 3447 fc_local_port_t *port; 3448 fc_remote_node_t *node; 3449 3450 port = cmd->cmd_port; 3451 3452 mutex_enter(&pd->pd_mutex); 3453 pd->pd_state = PORT_DEVICE_INVALID; 3454 pd->pd_ref_count--; 3455 node = pd->pd_remote_nodep; 3456 mutex_exit(&pd->pd_mutex); 3457 3458 ASSERT(node != NULL); 3459 ASSERT(port != NULL); 3460 3461 if (fctl_destroy_remote_port(port, pd) == 0) { 3462 fctl_destroy_remote_node(node); 3463 } 3464 3465 ulp_pkt->pkt_pd = NULL; 3466 } 3467 3468 ulp_pkt->pkt_comp(ulp_pkt); 3469 } 3470 3471 fp_free_pkt(cmd); 3472 fp_jobdone(job); 3473 } 3474 3475 3476 /* 3477 * Job completion handler 3478 */ 3479 static void 3480 fp_jobdone(job_request_t *job) 3481 { 3482 mutex_enter(&job->job_mutex); 3483 ASSERT(job->job_counter > 0); 3484 3485 if (--job->job_counter != 0) { 3486 mutex_exit(&job->job_mutex); 3487 return; 3488 } 3489 3490 if (job->job_ulp_pkts) { 3491 ASSERT(job->job_ulp_listlen > 0); 3492 kmem_free(job->job_ulp_pkts, 3493 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3494 } 3495 3496 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3497 mutex_exit(&job->job_mutex); 3498 fctl_jobdone(job); 3499 } else { 3500 mutex_exit(&job->job_mutex); 3501 sema_v(&job->job_port_sema); 3502 } 3503 } 3504 3505 3506 /* 3507 * Try to perform shutdown of a port during a detach. No return 3508 * value since the detach should not fail because the port shutdown 3509 * failed. 3510 */ 3511 static void 3512 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3513 { 3514 int index; 3515 int count; 3516 int flags; 3517 fp_cmd_t *cmd; 3518 struct pwwn_hash *head; 3519 fc_remote_port_t *pd; 3520 3521 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3522 3523 job->job_result = FC_SUCCESS; 3524 3525 if (port->fp_taskq) { 3526 /* 3527 * We must release the mutex here to ensure that other 3528 * potential jobs can complete their processing. Many 3529 * also need this mutex. 3530 */ 3531 mutex_exit(&port->fp_mutex); 3532 taskq_wait(port->fp_taskq); 3533 mutex_enter(&port->fp_mutex); 3534 } 3535 3536 if (port->fp_offline_tid) { 3537 timeout_id_t tid; 3538 3539 tid = port->fp_offline_tid; 3540 port->fp_offline_tid = NULL; 3541 mutex_exit(&port->fp_mutex); 3542 (void) untimeout(tid); 3543 mutex_enter(&port->fp_mutex); 3544 } 3545 3546 if (port->fp_wait_tid) { 3547 timeout_id_t tid; 3548 3549 tid = port->fp_wait_tid; 3550 port->fp_wait_tid = NULL; 3551 mutex_exit(&port->fp_mutex); 3552 (void) untimeout(tid); 3553 } else { 3554 mutex_exit(&port->fp_mutex); 3555 } 3556 3557 /* 3558 * While we cancel the timeout, let's also return the 3559 * the outstanding requests back to the callers. 3560 */ 3561 while ((cmd = fp_deque_cmd(port)) != NULL) { 3562 ASSERT(cmd->cmd_job != NULL); 3563 cmd->cmd_job->job_result = FC_OFFLINE; 3564 fp_iodone(cmd); 3565 } 3566 3567 /* 3568 * Gracefully LOGO with all the devices logged in. 3569 */ 3570 mutex_enter(&port->fp_mutex); 3571 3572 for (count = index = 0; index < pwwn_table_size; index++) { 3573 head = &port->fp_pwwn_table[index]; 3574 pd = head->pwwn_head; 3575 while (pd != NULL) { 3576 mutex_enter(&pd->pd_mutex); 3577 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3578 count++; 3579 } 3580 mutex_exit(&pd->pd_mutex); 3581 pd = pd->pd_wwn_hnext; 3582 } 3583 } 3584 3585 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3586 flags = job->job_flags; 3587 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3588 } else { 3589 flags = 0; 3590 } 3591 if (count) { 3592 job->job_counter = count; 3593 3594 for (index = 0; index < pwwn_table_size; index++) { 3595 head = &port->fp_pwwn_table[index]; 3596 pd = head->pwwn_head; 3597 while (pd != NULL) { 3598 mutex_enter(&pd->pd_mutex); 3599 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3600 ASSERT(pd->pd_login_count > 0); 3601 /* 3602 * Force the counter to ONE in order 3603 * for us to really send LOGO els. 3604 */ 3605 pd->pd_login_count = 1; 3606 mutex_exit(&pd->pd_mutex); 3607 mutex_exit(&port->fp_mutex); 3608 (void) fp_logout(port, pd, job); 3609 mutex_enter(&port->fp_mutex); 3610 } else { 3611 mutex_exit(&pd->pd_mutex); 3612 } 3613 pd = pd->pd_wwn_hnext; 3614 } 3615 } 3616 mutex_exit(&port->fp_mutex); 3617 fp_jobwait(job); 3618 } else { 3619 mutex_exit(&port->fp_mutex); 3620 } 3621 3622 if (job->job_result != FC_SUCCESS) { 3623 FP_TRACE(FP_NHEAD1(9, 0), 3624 "Can't logout all devices. Proceeding with" 3625 " port shutdown"); 3626 job->job_result = FC_SUCCESS; 3627 } 3628 3629 fctl_destroy_all_remote_ports(port); 3630 3631 mutex_enter(&port->fp_mutex); 3632 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3633 mutex_exit(&port->fp_mutex); 3634 fp_ns_fini(port, job); 3635 } else { 3636 mutex_exit(&port->fp_mutex); 3637 } 3638 3639 if (flags) { 3640 job->job_flags = flags; 3641 } 3642 3643 mutex_enter(&port->fp_mutex); 3644 3645 } 3646 3647 3648 /* 3649 * Build the port driver's data structures based on the AL_PA list 3650 */ 3651 static void 3652 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3653 { 3654 int rval; 3655 int flag; 3656 int count; 3657 uint32_t d_id; 3658 fc_remote_port_t *pd; 3659 fc_lilpmap_t *lilp_map; 3660 3661 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3662 3663 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3664 job->job_result = FC_OFFLINE; 3665 mutex_exit(&port->fp_mutex); 3666 fp_jobdone(job); 3667 mutex_enter(&port->fp_mutex); 3668 return; 3669 } 3670 3671 if (port->fp_lilp_map.lilp_length == 0) { 3672 mutex_exit(&port->fp_mutex); 3673 job->job_result = FC_NO_MAP; 3674 fp_jobdone(job); 3675 mutex_enter(&port->fp_mutex); 3676 return; 3677 } 3678 mutex_exit(&port->fp_mutex); 3679 3680 lilp_map = &port->fp_lilp_map; 3681 job->job_counter = lilp_map->lilp_length; 3682 3683 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3684 flag = FP_CMD_PLOGI_RETAIN; 3685 } else { 3686 flag = FP_CMD_PLOGI_DONT_CARE; 3687 } 3688 3689 for (count = 0; count < lilp_map->lilp_length; count++) { 3690 d_id = lilp_map->lilp_alpalist[count]; 3691 3692 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3693 fp_jobdone(job); 3694 continue; 3695 } 3696 3697 pd = fctl_get_remote_port_by_did(port, d_id); 3698 if (pd) { 3699 mutex_enter(&pd->pd_mutex); 3700 if (flag == FP_CMD_PLOGI_DONT_CARE || 3701 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3702 mutex_exit(&pd->pd_mutex); 3703 fp_jobdone(job); 3704 continue; 3705 } 3706 mutex_exit(&pd->pd_mutex); 3707 } 3708 3709 rval = fp_port_login(port, d_id, job, flag, 3710 KM_SLEEP, pd, NULL); 3711 if (rval != FC_SUCCESS) { 3712 fp_jobdone(job); 3713 } 3714 } 3715 3716 mutex_enter(&port->fp_mutex); 3717 } 3718 3719 3720 /* 3721 * Perform loop ONLINE processing 3722 */ 3723 static void 3724 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3725 { 3726 int count; 3727 int rval; 3728 uint32_t d_id; 3729 uint32_t listlen; 3730 fc_lilpmap_t *lilp_map; 3731 fc_remote_port_t *pd; 3732 fc_portmap_t *changelist; 3733 3734 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3735 3736 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3737 port, job); 3738 3739 lilp_map = &port->fp_lilp_map; 3740 3741 if (lilp_map->lilp_length) { 3742 mutex_enter(&port->fp_mutex); 3743 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3744 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3745 mutex_exit(&port->fp_mutex); 3746 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3747 } else { 3748 mutex_exit(&port->fp_mutex); 3749 } 3750 3751 job->job_counter = lilp_map->lilp_length; 3752 3753 for (count = 0; count < lilp_map->lilp_length; count++) { 3754 d_id = lilp_map->lilp_alpalist[count]; 3755 3756 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3757 fp_jobdone(job); 3758 continue; 3759 } 3760 3761 pd = fctl_get_remote_port_by_did(port, d_id); 3762 if (pd != NULL) { 3763 #ifdef DEBUG 3764 mutex_enter(&pd->pd_mutex); 3765 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3766 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3767 } 3768 mutex_exit(&pd->pd_mutex); 3769 #endif 3770 fp_jobdone(job); 3771 continue; 3772 } 3773 3774 rval = fp_port_login(port, d_id, job, 3775 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3776 3777 if (rval != FC_SUCCESS) { 3778 fp_jobdone(job); 3779 } 3780 } 3781 fp_jobwait(job); 3782 } 3783 listlen = 0; 3784 changelist = NULL; 3785 3786 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3787 mutex_enter(&port->fp_mutex); 3788 ASSERT(port->fp_statec_busy > 0); 3789 if (port->fp_statec_busy == 1) { 3790 mutex_exit(&port->fp_mutex); 3791 fctl_fillout_map(port, &changelist, &listlen, 3792 1, 0, orphan); 3793 3794 mutex_enter(&port->fp_mutex); 3795 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3796 ASSERT(port->fp_total_devices == 0); 3797 port->fp_total_devices = port->fp_dev_count; 3798 } 3799 } else { 3800 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3801 } 3802 mutex_exit(&port->fp_mutex); 3803 } 3804 3805 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3806 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3807 listlen, listlen, KM_SLEEP); 3808 } else { 3809 mutex_enter(&port->fp_mutex); 3810 if (--port->fp_statec_busy == 0) { 3811 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3812 } 3813 ASSERT(changelist == NULL && listlen == 0); 3814 mutex_exit(&port->fp_mutex); 3815 } 3816 3817 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3818 port, job); 3819 } 3820 3821 3822 /* 3823 * Get an Arbitrated Loop map from the underlying FCA 3824 */ 3825 static int 3826 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3827 { 3828 int rval; 3829 3830 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3831 port, lilp_map); 3832 3833 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3834 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3835 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3836 3837 if (rval != FC_SUCCESS) { 3838 rval = FC_NO_MAP; 3839 } else if (lilp_map->lilp_length == 0 && 3840 (lilp_map->lilp_magic >= MAGIC_LISM && 3841 lilp_map->lilp_magic < MAGIC_LIRP)) { 3842 uchar_t lilp_length; 3843 3844 /* 3845 * Since the map length is zero, provide all 3846 * the valid AL_PAs for NL_ports discovery. 3847 */ 3848 lilp_length = sizeof (fp_valid_alpas) / 3849 sizeof (fp_valid_alpas[0]); 3850 lilp_map->lilp_length = lilp_length; 3851 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3852 lilp_length); 3853 } else { 3854 rval = fp_validate_lilp_map(lilp_map); 3855 3856 if (rval == FC_SUCCESS) { 3857 mutex_enter(&port->fp_mutex); 3858 port->fp_total_devices = lilp_map->lilp_length - 1; 3859 mutex_exit(&port->fp_mutex); 3860 } 3861 } 3862 3863 mutex_enter(&port->fp_mutex); 3864 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3865 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3866 mutex_exit(&port->fp_mutex); 3867 3868 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3869 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3870 FP_TRACE(FP_NHEAD1(9, 0), 3871 "FCA reset failed after LILP map was found" 3872 " to be invalid"); 3873 } 3874 } else if (rval == FC_SUCCESS) { 3875 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3876 mutex_exit(&port->fp_mutex); 3877 } else { 3878 mutex_exit(&port->fp_mutex); 3879 } 3880 3881 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3882 lilp_map); 3883 3884 return (rval); 3885 } 3886 3887 3888 /* 3889 * Perform Fabric Login: 3890 * 3891 * Return Values: 3892 * FC_SUCCESS 3893 * FC_FAILURE 3894 * FC_NOMEM 3895 * FC_TRANSPORT_ERROR 3896 * and a lot others defined in fc_error.h 3897 */ 3898 static int 3899 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3900 int flag, int sleep) 3901 { 3902 int rval; 3903 fp_cmd_t *cmd; 3904 uchar_t class; 3905 3906 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3907 3908 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 3909 port, job); 3910 3911 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3912 if (class == FC_TRAN_CLASS_INVALID) { 3913 return (FC_ELS_BAD); 3914 } 3915 3916 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 3917 sizeof (la_els_logi_t), sleep, NULL); 3918 if (cmd == NULL) { 3919 return (FC_NOMEM); 3920 } 3921 3922 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 3923 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 3924 cmd->cmd_flags = flag; 3925 cmd->cmd_retry_count = fp_retry_count; 3926 cmd->cmd_ulp_pkt = NULL; 3927 3928 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 3929 job, LA_ELS_FLOGI); 3930 3931 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 3932 if (rval != FC_SUCCESS) { 3933 fp_free_pkt(cmd); 3934 } 3935 3936 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 3937 port, job); 3938 3939 return (rval); 3940 } 3941 3942 3943 /* 3944 * In some scenarios such as private loop device discovery period 3945 * the fc_remote_port_t data structure isn't allocated. The allocation 3946 * is done when the PLOGI is successful. In some other scenarios 3947 * such as Fabric topology, the fc_remote_port_t is already created 3948 * and initialized with appropriate values (as the NS provides 3949 * them) 3950 */ 3951 static int 3952 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 3953 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 3954 { 3955 uchar_t class; 3956 fp_cmd_t *cmd; 3957 uint32_t src_id; 3958 fc_remote_port_t *tmp_pd; 3959 int relogin; 3960 int found = 0; 3961 3962 #ifdef DEBUG 3963 if (pd == NULL) { 3964 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 3965 } 3966 #endif 3967 ASSERT(job->job_counter > 0); 3968 3969 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3970 if (class == FC_TRAN_CLASS_INVALID) { 3971 return (FC_ELS_BAD); 3972 } 3973 3974 mutex_enter(&port->fp_mutex); 3975 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 3976 mutex_exit(&port->fp_mutex); 3977 3978 relogin = 1; 3979 if (tmp_pd) { 3980 mutex_enter(&tmp_pd->pd_mutex); 3981 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 3982 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 3983 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 3984 relogin = 0; 3985 } 3986 mutex_exit(&tmp_pd->pd_mutex); 3987 } 3988 3989 if (!relogin) { 3990 mutex_enter(&tmp_pd->pd_mutex); 3991 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3992 cmd_flag |= FP_CMD_PLOGI_RETAIN; 3993 } 3994 mutex_exit(&tmp_pd->pd_mutex); 3995 3996 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 3997 sizeof (la_els_adisc_t), sleep, tmp_pd); 3998 if (cmd == NULL) { 3999 return (FC_NOMEM); 4000 } 4001 4002 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4003 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4004 cmd->cmd_flags = cmd_flag; 4005 cmd->cmd_retry_count = fp_retry_count; 4006 cmd->cmd_ulp_pkt = ulp_pkt; 4007 4008 mutex_enter(&port->fp_mutex); 4009 mutex_enter(&tmp_pd->pd_mutex); 4010 fp_adisc_init(cmd, job); 4011 mutex_exit(&tmp_pd->pd_mutex); 4012 mutex_exit(&port->fp_mutex); 4013 4014 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4015 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4016 4017 } else { 4018 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4019 sizeof (la_els_logi_t), sleep, pd); 4020 if (cmd == NULL) { 4021 return (FC_NOMEM); 4022 } 4023 4024 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4025 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4026 cmd->cmd_flags = cmd_flag; 4027 cmd->cmd_retry_count = fp_retry_count; 4028 cmd->cmd_ulp_pkt = ulp_pkt; 4029 4030 mutex_enter(&port->fp_mutex); 4031 src_id = port->fp_port_id.port_id; 4032 mutex_exit(&port->fp_mutex); 4033 4034 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4035 job, LA_ELS_PLOGI); 4036 } 4037 4038 if (pd) { 4039 mutex_enter(&pd->pd_mutex); 4040 pd->pd_flags = PD_ELS_IN_PROGRESS; 4041 mutex_exit(&pd->pd_mutex); 4042 } 4043 4044 /* npiv check to make sure we don't log into ourself */ 4045 if (relogin && (port->fp_topology == FC_TOP_FABRIC)) { 4046 if ((d_id & 0xffff00) == 4047 (port->fp_port_id.port_id & 0xffff00)) { 4048 found = 1; 4049 } 4050 } 4051 4052 if (found || 4053 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4054 if (found) { 4055 fc_packet_t *pkt = &cmd->cmd_pkt; 4056 pkt->pkt_state = FC_PKT_NPORT_RJT; 4057 } 4058 if (pd) { 4059 mutex_enter(&pd->pd_mutex); 4060 pd->pd_flags = PD_IDLE; 4061 mutex_exit(&pd->pd_mutex); 4062 } 4063 4064 if (ulp_pkt) { 4065 fc_packet_t *pkt = &cmd->cmd_pkt; 4066 4067 ulp_pkt->pkt_state = pkt->pkt_state; 4068 ulp_pkt->pkt_reason = pkt->pkt_reason; 4069 ulp_pkt->pkt_action = pkt->pkt_action; 4070 ulp_pkt->pkt_expln = pkt->pkt_expln; 4071 } 4072 4073 fp_iodone(cmd); 4074 } 4075 4076 return (FC_SUCCESS); 4077 } 4078 4079 4080 /* 4081 * Register the LOGIN parameters with a port device 4082 */ 4083 static void 4084 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4085 la_els_logi_t *acc, uchar_t class) 4086 { 4087 fc_remote_node_t *node; 4088 4089 ASSERT(pd != NULL); 4090 4091 mutex_enter(&pd->pd_mutex); 4092 node = pd->pd_remote_nodep; 4093 if (pd->pd_login_count == 0) { 4094 pd->pd_login_count++; 4095 } 4096 4097 if (handle) { 4098 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_csp, 4099 (uint8_t *)&acc->common_service, 4100 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4101 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp1, 4102 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4103 DDI_DEV_AUTOINCR); 4104 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp2, 4105 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4106 DDI_DEV_AUTOINCR); 4107 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp3, 4108 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4109 DDI_DEV_AUTOINCR); 4110 } else { 4111 pd->pd_csp = acc->common_service; 4112 pd->pd_clsp1 = acc->class_1; 4113 pd->pd_clsp2 = acc->class_2; 4114 pd->pd_clsp3 = acc->class_3; 4115 } 4116 4117 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4118 pd->pd_login_class = class; 4119 mutex_exit(&pd->pd_mutex); 4120 4121 #ifndef __lock_lint 4122 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4123 pd->pd_port_id.port_id) == pd); 4124 #endif 4125 4126 mutex_enter(&node->fd_mutex); 4127 if (handle) { 4128 ddi_rep_get8(*handle, (uint8_t *)node->fd_vv, 4129 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4130 DDI_DEV_AUTOINCR); 4131 } else { 4132 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4133 } 4134 mutex_exit(&node->fd_mutex); 4135 } 4136 4137 4138 /* 4139 * Mark the remote port as OFFLINE 4140 */ 4141 static void 4142 fp_remote_port_offline(fc_remote_port_t *pd) 4143 { 4144 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4145 if (pd->pd_login_count && 4146 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4147 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4148 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4149 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4150 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4151 pd->pd_login_class = 0; 4152 } 4153 pd->pd_type = PORT_DEVICE_OLD; 4154 pd->pd_flags = PD_IDLE; 4155 fctl_tc_reset(&pd->pd_logo_tc); 4156 } 4157 4158 4159 /* 4160 * Deregistration of a port device 4161 */ 4162 static void 4163 fp_unregister_login(fc_remote_port_t *pd) 4164 { 4165 fc_remote_node_t *node; 4166 4167 ASSERT(pd != NULL); 4168 4169 mutex_enter(&pd->pd_mutex); 4170 pd->pd_login_count = 0; 4171 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4172 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4173 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4174 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4175 4176 pd->pd_state = PORT_DEVICE_VALID; 4177 pd->pd_login_class = 0; 4178 node = pd->pd_remote_nodep; 4179 mutex_exit(&pd->pd_mutex); 4180 4181 mutex_enter(&node->fd_mutex); 4182 bzero(node->fd_vv, sizeof (node->fd_vv)); 4183 mutex_exit(&node->fd_mutex); 4184 } 4185 4186 4187 /* 4188 * Handle OFFLINE state of an FCA port 4189 */ 4190 static void 4191 fp_port_offline(fc_local_port_t *port, int notify) 4192 { 4193 int index; 4194 int statec; 4195 timeout_id_t tid; 4196 struct pwwn_hash *head; 4197 fc_remote_port_t *pd; 4198 4199 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4200 4201 for (index = 0; index < pwwn_table_size; index++) { 4202 head = &port->fp_pwwn_table[index]; 4203 pd = head->pwwn_head; 4204 while (pd != NULL) { 4205 mutex_enter(&pd->pd_mutex); 4206 fp_remote_port_offline(pd); 4207 fctl_delist_did_table(port, pd); 4208 mutex_exit(&pd->pd_mutex); 4209 pd = pd->pd_wwn_hnext; 4210 } 4211 } 4212 port->fp_total_devices = 0; 4213 4214 statec = 0; 4215 if (notify) { 4216 /* 4217 * Decrement the statec busy counter as we 4218 * are almost done with handling the state 4219 * change 4220 */ 4221 ASSERT(port->fp_statec_busy > 0); 4222 if (--port->fp_statec_busy == 0) { 4223 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4224 } 4225 mutex_exit(&port->fp_mutex); 4226 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4227 0, 0, KM_SLEEP); 4228 mutex_enter(&port->fp_mutex); 4229 4230 if (port->fp_statec_busy) { 4231 statec++; 4232 } 4233 } else if (port->fp_statec_busy > 1) { 4234 statec++; 4235 } 4236 4237 if ((tid = port->fp_offline_tid) != NULL) { 4238 mutex_exit(&port->fp_mutex); 4239 (void) untimeout(tid); 4240 mutex_enter(&port->fp_mutex); 4241 } 4242 4243 if (!statec) { 4244 port->fp_offline_tid = timeout(fp_offline_timeout, 4245 (caddr_t)port, fp_offline_ticks); 4246 } 4247 } 4248 4249 4250 /* 4251 * Offline devices and send up a state change notification to ULPs 4252 */ 4253 static void 4254 fp_offline_timeout(void *port_handle) 4255 { 4256 int ret; 4257 fc_local_port_t *port = port_handle; 4258 uint32_t listlen = 0; 4259 fc_portmap_t *changelist = NULL; 4260 4261 mutex_enter(&port->fp_mutex); 4262 4263 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4264 (port->fp_soft_state & 4265 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4266 port->fp_dev_count == 0 || port->fp_statec_busy) { 4267 port->fp_offline_tid = NULL; 4268 mutex_exit(&port->fp_mutex); 4269 return; 4270 } 4271 4272 mutex_exit(&port->fp_mutex); 4273 4274 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4275 4276 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4277 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4278 FC_FCA_CORE)) != FC_SUCCESS) { 4279 FP_TRACE(FP_NHEAD1(9, ret), 4280 "Failed to force adapter dump"); 4281 } else { 4282 FP_TRACE(FP_NHEAD1(9, 0), 4283 "Forced adapter dump successfully"); 4284 } 4285 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4286 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4287 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4288 FP_TRACE(FP_NHEAD1(9, ret), 4289 "Failed to force adapter dump and reset"); 4290 } else { 4291 FP_TRACE(FP_NHEAD1(9, 0), 4292 "Forced adapter dump and reset successfully"); 4293 } 4294 } 4295 4296 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4297 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4298 listlen, listlen, KM_SLEEP); 4299 4300 mutex_enter(&port->fp_mutex); 4301 port->fp_offline_tid = NULL; 4302 mutex_exit(&port->fp_mutex); 4303 } 4304 4305 4306 /* 4307 * Perform general purpose ELS request initialization 4308 */ 4309 static void 4310 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4311 void (*comp) (), job_request_t *job) 4312 { 4313 fc_packet_t *pkt; 4314 4315 pkt = &cmd->cmd_pkt; 4316 cmd->cmd_job = job; 4317 4318 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4319 pkt->pkt_cmd_fhdr.d_id = d_id; 4320 pkt->pkt_cmd_fhdr.s_id = s_id; 4321 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4322 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4323 pkt->pkt_cmd_fhdr.seq_id = 0; 4324 pkt->pkt_cmd_fhdr.df_ctl = 0; 4325 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4326 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4327 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4328 pkt->pkt_cmd_fhdr.ro = 0; 4329 pkt->pkt_cmd_fhdr.rsvd = 0; 4330 pkt->pkt_comp = comp; 4331 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4332 } 4333 4334 4335 /* 4336 * Initialize PLOGI/FLOGI ELS request 4337 */ 4338 static void 4339 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4340 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4341 { 4342 ls_code_t payload; 4343 4344 fp_els_init(cmd, s_id, d_id, intr, job); 4345 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4346 4347 payload.ls_code = ls_code; 4348 payload.mbz = 0; 4349 4350 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, 4351 (uint8_t *)&port->fp_service_params, 4352 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4353 DDI_DEV_AUTOINCR); 4354 4355 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4356 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4357 DDI_DEV_AUTOINCR); 4358 } 4359 4360 4361 /* 4362 * Initialize LOGO ELS request 4363 */ 4364 static void 4365 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4366 { 4367 fc_local_port_t *port; 4368 fc_packet_t *pkt; 4369 la_els_logo_t payload; 4370 4371 port = pd->pd_port; 4372 pkt = &cmd->cmd_pkt; 4373 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4374 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4375 4376 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4377 fp_logo_intr, job); 4378 4379 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4380 4381 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4382 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4383 4384 payload.ls_code.ls_code = LA_ELS_LOGO; 4385 payload.ls_code.mbz = 0; 4386 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4387 payload.nport_id = port->fp_port_id; 4388 4389 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4390 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4391 } 4392 4393 /* 4394 * Initialize RNID ELS request 4395 */ 4396 static void 4397 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4398 { 4399 fc_local_port_t *port; 4400 fc_packet_t *pkt; 4401 la_els_rnid_t payload; 4402 fc_remote_port_t *pd; 4403 4404 pkt = &cmd->cmd_pkt; 4405 pd = pkt->pkt_pd; 4406 port = pd->pd_port; 4407 4408 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4409 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4410 4411 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4412 fp_rnid_intr, job); 4413 4414 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4415 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4416 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4417 4418 payload.ls_code.ls_code = LA_ELS_RNID; 4419 payload.ls_code.mbz = 0; 4420 payload.data_format = flag; 4421 4422 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4423 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4424 } 4425 4426 /* 4427 * Initialize RLS ELS request 4428 */ 4429 static void 4430 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4431 { 4432 fc_local_port_t *port; 4433 fc_packet_t *pkt; 4434 la_els_rls_t payload; 4435 fc_remote_port_t *pd; 4436 4437 pkt = &cmd->cmd_pkt; 4438 pd = pkt->pkt_pd; 4439 port = pd->pd_port; 4440 4441 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4442 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4443 4444 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4445 fp_rls_intr, job); 4446 4447 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4448 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4449 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4450 4451 payload.ls_code.ls_code = LA_ELS_RLS; 4452 payload.ls_code.mbz = 0; 4453 payload.rls_portid = port->fp_port_id; 4454 4455 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4456 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4457 } 4458 4459 4460 /* 4461 * Initialize an ADISC ELS request 4462 */ 4463 static void 4464 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4465 { 4466 fc_local_port_t *port; 4467 fc_packet_t *pkt; 4468 la_els_adisc_t payload; 4469 fc_remote_port_t *pd; 4470 4471 pkt = &cmd->cmd_pkt; 4472 pd = pkt->pkt_pd; 4473 port = pd->pd_port; 4474 4475 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4476 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4477 4478 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4479 fp_adisc_intr, job); 4480 4481 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4482 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4483 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4484 4485 payload.ls_code.ls_code = LA_ELS_ADISC; 4486 payload.ls_code.mbz = 0; 4487 payload.nport_id = port->fp_port_id; 4488 payload.port_wwn = port->fp_service_params.nport_ww_name; 4489 payload.node_wwn = port->fp_service_params.node_ww_name; 4490 payload.hard_addr = port->fp_hard_addr; 4491 4492 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4493 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4494 } 4495 4496 4497 /* 4498 * Send up a state change notification to ULPs. 4499 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4500 */ 4501 static int 4502 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4503 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4504 { 4505 fc_port_clist_t *clist; 4506 fc_remote_port_t *pd; 4507 int count; 4508 4509 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4510 4511 clist = kmem_zalloc(sizeof (*clist), sleep); 4512 if (clist == NULL) { 4513 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4514 return (FC_NOMEM); 4515 } 4516 4517 clist->clist_state = state; 4518 4519 mutex_enter(&port->fp_mutex); 4520 clist->clist_flags = port->fp_topology; 4521 mutex_exit(&port->fp_mutex); 4522 4523 clist->clist_port = (opaque_t)port; 4524 clist->clist_len = listlen; 4525 clist->clist_size = alloc_len; 4526 clist->clist_map = changelist; 4527 4528 /* 4529 * Bump the reference count of each fc_remote_port_t in this changelist. 4530 * This is necessary since these devices will be sitting in a taskq 4531 * and referenced later. When the state change notification is 4532 * complete, the reference counts will be decremented. 4533 */ 4534 for (count = 0; count < clist->clist_len; count++) { 4535 pd = clist->clist_map[count].map_pd; 4536 4537 if (pd != NULL) { 4538 mutex_enter(&pd->pd_mutex); 4539 ASSERT((pd->pd_ref_count >= 0) || 4540 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4541 pd->pd_ref_count++; 4542 4543 if (clist->clist_map[count].map_state != 4544 PORT_DEVICE_INVALID) { 4545 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4546 } 4547 4548 mutex_exit(&pd->pd_mutex); 4549 } 4550 } 4551 4552 #ifdef DEBUG 4553 /* 4554 * Sanity check for presence of OLD devices in the hash lists 4555 */ 4556 if (clist->clist_size) { 4557 ASSERT(clist->clist_map != NULL); 4558 for (count = 0; count < clist->clist_len; count++) { 4559 if (clist->clist_map[count].map_state == 4560 PORT_DEVICE_INVALID) { 4561 la_wwn_t pwwn; 4562 fc_portid_t d_id; 4563 4564 pd = clist->clist_map[count].map_pd; 4565 ASSERT(pd != NULL); 4566 4567 mutex_enter(&pd->pd_mutex); 4568 pwwn = pd->pd_port_name; 4569 d_id = pd->pd_port_id; 4570 mutex_exit(&pd->pd_mutex); 4571 4572 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4573 ASSERT(pd != clist->clist_map[count].map_pd); 4574 4575 pd = fctl_get_remote_port_by_did(port, 4576 d_id.port_id); 4577 ASSERT(pd != clist->clist_map[count].map_pd); 4578 } 4579 } 4580 } 4581 #endif 4582 4583 mutex_enter(&port->fp_mutex); 4584 4585 if (state == FC_STATE_ONLINE) { 4586 if (--port->fp_statec_busy == 0) { 4587 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4588 } 4589 } 4590 mutex_exit(&port->fp_mutex); 4591 4592 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4593 clist, KM_SLEEP); 4594 4595 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4596 "state=%x, len=%d", port, state, listlen); 4597 4598 return (FC_SUCCESS); 4599 } 4600 4601 4602 /* 4603 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4604 */ 4605 static int 4606 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4607 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4608 { 4609 int ret; 4610 fc_port_clist_t *clist; 4611 4612 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4613 4614 clist = kmem_zalloc(sizeof (*clist), sleep); 4615 if (clist == NULL) { 4616 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4617 return (FC_NOMEM); 4618 } 4619 4620 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4621 4622 mutex_enter(&port->fp_mutex); 4623 clist->clist_flags = port->fp_topology; 4624 mutex_exit(&port->fp_mutex); 4625 4626 clist->clist_port = (opaque_t)port; 4627 clist->clist_len = listlen; 4628 clist->clist_size = alloc_len; 4629 clist->clist_map = changelist; 4630 4631 /* Send sysevents for target state changes */ 4632 4633 if (clist->clist_size) { 4634 int count; 4635 fc_remote_port_t *pd; 4636 4637 ASSERT(clist->clist_map != NULL); 4638 for (count = 0; count < clist->clist_len; count++) { 4639 pd = clist->clist_map[count].map_pd; 4640 4641 /* 4642 * Bump reference counts on all fc_remote_port_t 4643 * structs in this list. We don't know when the task 4644 * will fire, and we don't need these fc_remote_port_t 4645 * structs going away behind our back. 4646 */ 4647 if (pd) { 4648 mutex_enter(&pd->pd_mutex); 4649 ASSERT((pd->pd_ref_count >= 0) || 4650 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4651 pd->pd_ref_count++; 4652 mutex_exit(&pd->pd_mutex); 4653 } 4654 4655 if (clist->clist_map[count].map_state == 4656 PORT_DEVICE_VALID) { 4657 if (clist->clist_map[count].map_type == 4658 PORT_DEVICE_NEW) { 4659 /* Update our state change counter */ 4660 mutex_enter(&port->fp_mutex); 4661 port->fp_last_change++; 4662 mutex_exit(&port->fp_mutex); 4663 4664 /* Additions */ 4665 fp_log_target_event(port, 4666 ESC_SUNFC_TARGET_ADD, 4667 clist->clist_map[count].map_pwwn, 4668 clist->clist_map[count].map_did. 4669 port_id); 4670 } 4671 4672 } else if ((clist->clist_map[count].map_type == 4673 PORT_DEVICE_OLD) && 4674 (clist->clist_map[count].map_state == 4675 PORT_DEVICE_INVALID)) { 4676 /* Update our state change counter */ 4677 mutex_enter(&port->fp_mutex); 4678 port->fp_last_change++; 4679 mutex_exit(&port->fp_mutex); 4680 4681 /* 4682 * For removals, we don't decrement 4683 * pd_ref_count until after the ULP's 4684 * state change callback function has 4685 * completed. 4686 */ 4687 4688 /* Removals */ 4689 fp_log_target_event(port, 4690 ESC_SUNFC_TARGET_REMOVE, 4691 clist->clist_map[count].map_pwwn, 4692 clist->clist_map[count].map_did.port_id); 4693 } 4694 4695 if (clist->clist_map[count].map_state != 4696 PORT_DEVICE_INVALID) { 4697 /* 4698 * Indicate that the ULPs are now aware of 4699 * this device. 4700 */ 4701 4702 mutex_enter(&pd->pd_mutex); 4703 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4704 mutex_exit(&pd->pd_mutex); 4705 } 4706 4707 #ifdef DEBUG 4708 /* 4709 * Sanity check for OLD devices in the hash lists 4710 */ 4711 if (pd && clist->clist_map[count].map_state == 4712 PORT_DEVICE_INVALID) { 4713 la_wwn_t pwwn; 4714 fc_portid_t d_id; 4715 4716 mutex_enter(&pd->pd_mutex); 4717 pwwn = pd->pd_port_name; 4718 d_id = pd->pd_port_id; 4719 mutex_exit(&pd->pd_mutex); 4720 4721 /* 4722 * This overwrites the 'pd' local variable. 4723 * Beware of this if 'pd' ever gets 4724 * referenced below this block. 4725 */ 4726 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4727 ASSERT(pd != clist->clist_map[count].map_pd); 4728 4729 pd = fctl_get_remote_port_by_did(port, 4730 d_id.port_id); 4731 ASSERT(pd != clist->clist_map[count].map_pd); 4732 } 4733 #endif 4734 } 4735 } 4736 4737 if (sync) { 4738 clist->clist_wait = 1; 4739 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4740 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4741 } 4742 4743 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4744 if (sync && ret) { 4745 mutex_enter(&clist->clist_mutex); 4746 while (clist->clist_wait) { 4747 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4748 } 4749 mutex_exit(&clist->clist_mutex); 4750 4751 mutex_destroy(&clist->clist_mutex); 4752 cv_destroy(&clist->clist_cv); 4753 kmem_free(clist, sizeof (*clist)); 4754 } 4755 4756 if (!ret) { 4757 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4758 "port=%p", port); 4759 kmem_free(clist->clist_map, 4760 sizeof (*(clist->clist_map)) * clist->clist_size); 4761 kmem_free(clist, sizeof (*clist)); 4762 } else { 4763 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4764 port, listlen); 4765 } 4766 4767 return (FC_SUCCESS); 4768 } 4769 4770 4771 /* 4772 * Perform PLOGI to the group of devices for ULPs 4773 */ 4774 static void 4775 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4776 { 4777 int offline; 4778 int count; 4779 int rval; 4780 uint32_t listlen; 4781 uint32_t done; 4782 uint32_t d_id; 4783 fc_remote_node_t *node; 4784 fc_remote_port_t *pd; 4785 fc_remote_port_t *tmp_pd; 4786 fc_packet_t *ulp_pkt; 4787 la_els_logi_t *els_data; 4788 ls_code_t ls_code; 4789 4790 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4791 port, job); 4792 4793 done = 0; 4794 listlen = job->job_ulp_listlen; 4795 job->job_counter = job->job_ulp_listlen; 4796 4797 mutex_enter(&port->fp_mutex); 4798 offline = (port->fp_statec_busy || 4799 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4800 mutex_exit(&port->fp_mutex); 4801 4802 for (count = 0; count < listlen; count++) { 4803 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4804 sizeof (la_els_logi_t)); 4805 4806 ulp_pkt = job->job_ulp_pkts[count]; 4807 pd = ulp_pkt->pkt_pd; 4808 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4809 4810 if (offline) { 4811 done++; 4812 4813 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4814 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4815 ulp_pkt->pkt_pd = NULL; 4816 ulp_pkt->pkt_comp(ulp_pkt); 4817 4818 job->job_ulp_pkts[count] = NULL; 4819 4820 fp_jobdone(job); 4821 continue; 4822 } 4823 4824 if (pd == NULL) { 4825 pd = fctl_get_remote_port_by_did(port, d_id); 4826 if (pd == NULL) { /* reset later */ 4827 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4828 continue; 4829 } 4830 mutex_enter(&pd->pd_mutex); 4831 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4832 mutex_exit(&pd->pd_mutex); 4833 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4834 done++; 4835 ulp_pkt->pkt_comp(ulp_pkt); 4836 job->job_ulp_pkts[count] = NULL; 4837 fp_jobdone(job); 4838 } else { 4839 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4840 mutex_exit(&pd->pd_mutex); 4841 } 4842 continue; 4843 } 4844 4845 switch (ulp_pkt->pkt_state) { 4846 case FC_PKT_ELS_IN_PROGRESS: 4847 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4848 /* FALLTHRU */ 4849 case FC_PKT_LOCAL_RJT: 4850 done++; 4851 ulp_pkt->pkt_comp(ulp_pkt); 4852 job->job_ulp_pkts[count] = NULL; 4853 fp_jobdone(job); 4854 continue; 4855 default: 4856 break; 4857 } 4858 4859 /* 4860 * Validate the pd corresponding to the d_id passed 4861 * by the ULPs 4862 */ 4863 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4864 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4865 done++; 4866 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4867 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4868 ulp_pkt->pkt_pd = NULL; 4869 ulp_pkt->pkt_comp(ulp_pkt); 4870 job->job_ulp_pkts[count] = NULL; 4871 fp_jobdone(job); 4872 continue; 4873 } 4874 4875 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4876 "port=%p, pd=%p", port, pd); 4877 4878 mutex_enter(&pd->pd_mutex); 4879 4880 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4881 done++; 4882 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4883 4884 ls_code.ls_code = LA_ELS_ACC; 4885 ls_code.mbz = 0; 4886 4887 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4888 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4889 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4890 4891 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4892 (uint8_t *)&pd->pd_csp, 4893 (uint8_t *)&els_data->common_service, 4894 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4895 4896 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4897 (uint8_t *)&pd->pd_port_name, 4898 (uint8_t *)&els_data->nport_ww_name, 4899 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 4900 4901 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4902 (uint8_t *)&pd->pd_clsp1, 4903 (uint8_t *)&els_data->class_1, 4904 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 4905 4906 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4907 (uint8_t *)&pd->pd_clsp2, 4908 (uint8_t *)&els_data->class_2, 4909 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 4910 4911 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4912 (uint8_t *)&pd->pd_clsp3, 4913 (uint8_t *)&els_data->class_3, 4914 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 4915 4916 node = pd->pd_remote_nodep; 4917 pd->pd_login_count++; 4918 pd->pd_flags = PD_IDLE; 4919 ulp_pkt->pkt_pd = pd; 4920 mutex_exit(&pd->pd_mutex); 4921 4922 mutex_enter(&node->fd_mutex); 4923 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4924 (uint8_t *)&node->fd_node_name, 4925 (uint8_t *)(&els_data->node_ww_name), 4926 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 4927 4928 4929 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4930 (uint8_t *)&node->fd_vv, 4931 (uint8_t *)(&els_data->vendor_version), 4932 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 4933 4934 mutex_exit(&node->fd_mutex); 4935 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 4936 } else { 4937 4938 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 4939 mutex_exit(&pd->pd_mutex); 4940 } 4941 4942 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 4943 ulp_pkt->pkt_comp(ulp_pkt); 4944 job->job_ulp_pkts[count] = NULL; 4945 fp_jobdone(job); 4946 } 4947 } 4948 4949 if (done == listlen) { 4950 fp_jobwait(job); 4951 fctl_jobdone(job); 4952 return; 4953 } 4954 4955 job->job_counter = listlen - done; 4956 4957 for (count = 0; count < listlen; count++) { 4958 int cmd_flags; 4959 4960 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 4961 continue; 4962 } 4963 4964 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 4965 4966 cmd_flags = FP_CMD_PLOGI_RETAIN; 4967 4968 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4969 ASSERT(d_id != 0); 4970 4971 pd = fctl_get_remote_port_by_did(port, d_id); 4972 ulp_pkt->pkt_pd = pd; 4973 4974 if (pd != NULL) { 4975 mutex_enter(&pd->pd_mutex); 4976 d_id = pd->pd_port_id.port_id; 4977 pd->pd_flags = PD_ELS_IN_PROGRESS; 4978 mutex_exit(&pd->pd_mutex); 4979 } else { 4980 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4981 #ifdef DEBUG 4982 pd = fctl_get_remote_port_by_did(port, d_id); 4983 ASSERT(pd == NULL); 4984 #endif 4985 /* 4986 * In the Fabric topology, use NS to create 4987 * port device, and if that fails still try 4988 * with PLOGI - which will make yet another 4989 * attempt to create after successful PLOGI 4990 */ 4991 mutex_enter(&port->fp_mutex); 4992 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 4993 mutex_exit(&port->fp_mutex); 4994 pd = fp_create_remote_port_by_ns(port, 4995 d_id, KM_SLEEP); 4996 if (pd) { 4997 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 4998 4999 mutex_enter(&pd->pd_mutex); 5000 pd->pd_flags = PD_ELS_IN_PROGRESS; 5001 mutex_exit(&pd->pd_mutex); 5002 5003 FP_TRACE(FP_NHEAD1(3, 0), 5004 "fp_plogi_group;" 5005 " NS created PD port=%p, job=%p," 5006 " pd=%p", port, job, pd); 5007 } 5008 } else { 5009 mutex_exit(&port->fp_mutex); 5010 } 5011 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5012 FP_TRACE(FP_NHEAD1(3, 0), 5013 "fp_plogi_group;" 5014 "ulp_pkt's pd is NULL, get a pd %p", 5015 pd); 5016 mutex_enter(&pd->pd_mutex); 5017 pd->pd_ref_count++; 5018 mutex_exit(&pd->pd_mutex); 5019 } 5020 ulp_pkt->pkt_pd = pd; 5021 } 5022 5023 rval = fp_port_login(port, d_id, job, cmd_flags, 5024 KM_SLEEP, pd, ulp_pkt); 5025 5026 if (rval == FC_SUCCESS) { 5027 continue; 5028 } 5029 5030 if (rval == FC_STATEC_BUSY) { 5031 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5032 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5033 } else { 5034 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5035 } 5036 5037 if (pd) { 5038 mutex_enter(&pd->pd_mutex); 5039 pd->pd_flags = PD_IDLE; 5040 mutex_exit(&pd->pd_mutex); 5041 } 5042 5043 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5044 ASSERT(pd != NULL); 5045 5046 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5047 " PD removed; port=%p, job=%p", port, job); 5048 5049 mutex_enter(&pd->pd_mutex); 5050 pd->pd_ref_count--; 5051 node = pd->pd_remote_nodep; 5052 mutex_exit(&pd->pd_mutex); 5053 5054 ASSERT(node != NULL); 5055 5056 if (fctl_destroy_remote_port(port, pd) == 0) { 5057 fctl_destroy_remote_node(node); 5058 } 5059 ulp_pkt->pkt_pd = NULL; 5060 } 5061 ulp_pkt->pkt_comp(ulp_pkt); 5062 fp_jobdone(job); 5063 } 5064 5065 fp_jobwait(job); 5066 fctl_jobdone(job); 5067 5068 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5069 port, job); 5070 } 5071 5072 5073 /* 5074 * Name server request initialization 5075 */ 5076 static void 5077 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5078 { 5079 int rval; 5080 int count; 5081 int size; 5082 5083 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5084 5085 job->job_counter = 1; 5086 job->job_result = FC_SUCCESS; 5087 5088 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5089 KM_SLEEP, NULL, NULL); 5090 5091 if (rval != FC_SUCCESS) { 5092 mutex_enter(&port->fp_mutex); 5093 port->fp_topology = FC_TOP_NO_NS; 5094 mutex_exit(&port->fp_mutex); 5095 return; 5096 } 5097 5098 fp_jobwait(job); 5099 5100 if (job->job_result != FC_SUCCESS) { 5101 mutex_enter(&port->fp_mutex); 5102 port->fp_topology = FC_TOP_NO_NS; 5103 mutex_exit(&port->fp_mutex); 5104 return; 5105 } 5106 5107 /* 5108 * At this time, we'll do NS registration for objects in the 5109 * ns_reg_cmds (see top of this file) array. 5110 * 5111 * Each time a ULP module registers with the transport, the 5112 * appropriate fc4 bit is set fc4 types and registered with 5113 * the NS for this support. Also, ULPs and FC admin utilities 5114 * may do registration for objects like IP address, symbolic 5115 * port/node name, Initial process associator at run time. 5116 */ 5117 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5118 job->job_counter = size; 5119 job->job_result = FC_SUCCESS; 5120 5121 for (count = 0; count < size; count++) { 5122 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5123 job, 0, sleep) != FC_SUCCESS) { 5124 fp_jobdone(job); 5125 } 5126 } 5127 if (size) { 5128 fp_jobwait(job); 5129 } 5130 5131 job->job_result = FC_SUCCESS; 5132 5133 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5134 5135 if (port->fp_dev_count < FP_MAX_DEVICES) { 5136 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5137 } 5138 5139 job->job_counter = 1; 5140 5141 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5142 sleep) == FC_SUCCESS) { 5143 fp_jobwait(job); 5144 } 5145 } 5146 5147 5148 /* 5149 * Name server finish: 5150 * Unregister for RSCNs 5151 * Unregister all the host port objects in the Name Server 5152 * Perform LOGO with the NS; 5153 */ 5154 static void 5155 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5156 { 5157 fp_cmd_t *cmd; 5158 uchar_t class; 5159 uint32_t s_id; 5160 fc_packet_t *pkt; 5161 la_els_logo_t payload; 5162 5163 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5164 5165 job->job_counter = 1; 5166 5167 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5168 FC_SUCCESS) { 5169 fp_jobdone(job); 5170 } 5171 fp_jobwait(job); 5172 5173 job->job_counter = 1; 5174 5175 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5176 fp_jobdone(job); 5177 } 5178 fp_jobwait(job); 5179 5180 job->job_counter = 1; 5181 5182 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5183 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5184 pkt = &cmd->cmd_pkt; 5185 5186 mutex_enter(&port->fp_mutex); 5187 class = port->fp_ns_login_class; 5188 s_id = port->fp_port_id.port_id; 5189 payload.nport_id = port->fp_port_id; 5190 mutex_exit(&port->fp_mutex); 5191 5192 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5193 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5194 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5195 cmd->cmd_retry_count = 1; 5196 cmd->cmd_ulp_pkt = NULL; 5197 5198 if (port->fp_npiv_type == FC_NPIV_PORT) { 5199 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5200 } else { 5201 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5202 } 5203 5204 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5205 5206 payload.ls_code.ls_code = LA_ELS_LOGO; 5207 payload.ls_code.mbz = 0; 5208 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5209 5210 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 5211 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5212 5213 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5214 fp_iodone(cmd); 5215 } 5216 fp_jobwait(job); 5217 } 5218 5219 5220 /* 5221 * NS Registration function. 5222 * 5223 * It should be seriously noted that FC-GS-2 currently doesn't support 5224 * an Object Registration by a D_ID other than the owner of the object. 5225 * What we are aiming at currently is to at least allow Symbolic Node/Port 5226 * Name registration for any N_Port Identifier by the host software. 5227 * 5228 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5229 * function treats the request as Host NS Object. 5230 */ 5231 static int 5232 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5233 job_request_t *job, int polled, int sleep) 5234 { 5235 int rval; 5236 fc_portid_t s_id; 5237 fc_packet_t *pkt; 5238 fp_cmd_t *cmd; 5239 5240 if (pd == NULL) { 5241 mutex_enter(&port->fp_mutex); 5242 s_id = port->fp_port_id; 5243 mutex_exit(&port->fp_mutex); 5244 } else { 5245 mutex_enter(&pd->pd_mutex); 5246 s_id = pd->pd_port_id; 5247 mutex_exit(&pd->pd_mutex); 5248 } 5249 5250 if (polled) { 5251 job->job_counter = 1; 5252 } 5253 5254 switch (cmd_code) { 5255 case NS_RPN_ID: 5256 case NS_RNN_ID: { 5257 ns_rxn_req_t rxn; 5258 5259 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5260 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5261 if (cmd == NULL) { 5262 return (FC_NOMEM); 5263 } 5264 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5265 pkt = &cmd->cmd_pkt; 5266 5267 if (pd == NULL) { 5268 rxn.rxn_xname = (cmd_code == NS_RPN_ID) ? 5269 (port->fp_service_params.nport_ww_name) : 5270 (port->fp_service_params.node_ww_name); 5271 } else { 5272 if (cmd_code == NS_RPN_ID) { 5273 mutex_enter(&pd->pd_mutex); 5274 rxn.rxn_xname = pd->pd_port_name; 5275 mutex_exit(&pd->pd_mutex); 5276 } else { 5277 fc_remote_node_t *node; 5278 5279 mutex_enter(&pd->pd_mutex); 5280 node = pd->pd_remote_nodep; 5281 mutex_exit(&pd->pd_mutex); 5282 5283 mutex_enter(&node->fd_mutex); 5284 rxn.rxn_xname = node->fd_node_name; 5285 mutex_exit(&node->fd_mutex); 5286 } 5287 } 5288 rxn.rxn_port_id = s_id; 5289 5290 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5291 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5292 sizeof (rxn), DDI_DEV_AUTOINCR); 5293 5294 break; 5295 } 5296 5297 case NS_RCS_ID: { 5298 ns_rcos_t rcos; 5299 5300 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5301 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5302 if (cmd == NULL) { 5303 return (FC_NOMEM); 5304 } 5305 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5306 pkt = &cmd->cmd_pkt; 5307 5308 if (pd == NULL) { 5309 rcos.rcos_cos = port->fp_cos; 5310 } else { 5311 mutex_enter(&pd->pd_mutex); 5312 rcos.rcos_cos = pd->pd_cos; 5313 mutex_exit(&pd->pd_mutex); 5314 } 5315 rcos.rcos_port_id = s_id; 5316 5317 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5318 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5319 sizeof (rcos), DDI_DEV_AUTOINCR); 5320 5321 break; 5322 } 5323 5324 case NS_RFT_ID: { 5325 ns_rfc_type_t rfc; 5326 5327 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5328 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5329 NULL); 5330 if (cmd == NULL) { 5331 return (FC_NOMEM); 5332 } 5333 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5334 pkt = &cmd->cmd_pkt; 5335 5336 if (pd == NULL) { 5337 mutex_enter(&port->fp_mutex); 5338 bcopy(port->fp_fc4_types, rfc.rfc_types, 5339 sizeof (port->fp_fc4_types)); 5340 mutex_exit(&port->fp_mutex); 5341 } else { 5342 mutex_enter(&pd->pd_mutex); 5343 bcopy(pd->pd_fc4types, rfc.rfc_types, 5344 sizeof (pd->pd_fc4types)); 5345 mutex_exit(&pd->pd_mutex); 5346 } 5347 rfc.rfc_port_id = s_id; 5348 5349 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5350 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5351 sizeof (rfc), DDI_DEV_AUTOINCR); 5352 5353 break; 5354 } 5355 5356 case NS_RSPN_ID: { 5357 uchar_t name_len; 5358 int pl_size; 5359 fc_portid_t spn; 5360 5361 if (pd == NULL) { 5362 mutex_enter(&port->fp_mutex); 5363 name_len = port->fp_sym_port_namelen; 5364 mutex_exit(&port->fp_mutex); 5365 } else { 5366 mutex_enter(&pd->pd_mutex); 5367 name_len = pd->pd_spn_len; 5368 mutex_exit(&pd->pd_mutex); 5369 } 5370 5371 pl_size = sizeof (fc_portid_t) + name_len + 1; 5372 5373 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5374 sizeof (fc_reg_resp_t), sleep, NULL); 5375 if (cmd == NULL) { 5376 return (FC_NOMEM); 5377 } 5378 5379 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5380 5381 pkt = &cmd->cmd_pkt; 5382 5383 spn = s_id; 5384 5385 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5386 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5387 DDI_DEV_AUTOINCR); 5388 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5389 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5390 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5391 5392 if (pd == NULL) { 5393 mutex_enter(&port->fp_mutex); 5394 ddi_rep_put8(pkt->pkt_cmd_acc, 5395 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5396 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5397 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5398 mutex_exit(&port->fp_mutex); 5399 } else { 5400 mutex_enter(&pd->pd_mutex); 5401 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)pd->pd_spn, 5402 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5403 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5404 mutex_exit(&pd->pd_mutex); 5405 } 5406 break; 5407 } 5408 5409 case NS_RPT_ID: { 5410 ns_rpt_t rpt; 5411 5412 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5413 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5414 if (cmd == NULL) { 5415 return (FC_NOMEM); 5416 } 5417 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5418 pkt = &cmd->cmd_pkt; 5419 5420 if (pd == NULL) { 5421 rpt.rpt_type = port->fp_port_type; 5422 } else { 5423 mutex_enter(&pd->pd_mutex); 5424 rpt.rpt_type = pd->pd_porttype; 5425 mutex_exit(&pd->pd_mutex); 5426 } 5427 rpt.rpt_port_id = s_id; 5428 5429 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5430 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5431 sizeof (rpt), DDI_DEV_AUTOINCR); 5432 5433 break; 5434 } 5435 5436 case NS_RIP_NN: { 5437 ns_rip_t rip; 5438 5439 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5440 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5441 if (cmd == NULL) { 5442 return (FC_NOMEM); 5443 } 5444 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5445 pkt = &cmd->cmd_pkt; 5446 5447 if (pd == NULL) { 5448 rip.rip_node_name = 5449 port->fp_service_params.node_ww_name; 5450 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5451 sizeof (port->fp_ip_addr)); 5452 } else { 5453 fc_remote_node_t *node; 5454 5455 /* 5456 * The most correct implementation should have the IP 5457 * address in the fc_remote_node_t structure; I believe 5458 * Node WWN and IP address should have one to one 5459 * correlation (but guess what this is changing in 5460 * FC-GS-2 latest draft) 5461 */ 5462 mutex_enter(&pd->pd_mutex); 5463 node = pd->pd_remote_nodep; 5464 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5465 sizeof (pd->pd_ip_addr)); 5466 mutex_exit(&pd->pd_mutex); 5467 5468 mutex_enter(&node->fd_mutex); 5469 rip.rip_node_name = node->fd_node_name; 5470 mutex_exit(&node->fd_mutex); 5471 } 5472 5473 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rip, 5474 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5475 sizeof (rip), DDI_DEV_AUTOINCR); 5476 5477 break; 5478 } 5479 5480 case NS_RIPA_NN: { 5481 ns_ipa_t ipa; 5482 5483 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5484 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5485 if (cmd == NULL) { 5486 return (FC_NOMEM); 5487 } 5488 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5489 pkt = &cmd->cmd_pkt; 5490 5491 if (pd == NULL) { 5492 ipa.ipa_node_name = 5493 port->fp_service_params.node_ww_name; 5494 bcopy(port->fp_ipa, ipa.ipa_value, 5495 sizeof (port->fp_ipa)); 5496 } else { 5497 fc_remote_node_t *node; 5498 5499 mutex_enter(&pd->pd_mutex); 5500 node = pd->pd_remote_nodep; 5501 mutex_exit(&pd->pd_mutex); 5502 5503 mutex_enter(&node->fd_mutex); 5504 ipa.ipa_node_name = node->fd_node_name; 5505 bcopy(node->fd_ipa, ipa.ipa_value, 5506 sizeof (node->fd_ipa)); 5507 mutex_exit(&node->fd_mutex); 5508 } 5509 5510 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5511 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5512 sizeof (ipa), DDI_DEV_AUTOINCR); 5513 5514 break; 5515 } 5516 5517 case NS_RSNN_NN: { 5518 uchar_t name_len; 5519 int pl_size; 5520 la_wwn_t snn; 5521 fc_remote_node_t *node = NULL; 5522 5523 if (pd == NULL) { 5524 mutex_enter(&port->fp_mutex); 5525 name_len = port->fp_sym_node_namelen; 5526 mutex_exit(&port->fp_mutex); 5527 } else { 5528 mutex_enter(&pd->pd_mutex); 5529 node = pd->pd_remote_nodep; 5530 mutex_exit(&pd->pd_mutex); 5531 5532 mutex_enter(&node->fd_mutex); 5533 name_len = node->fd_snn_len; 5534 mutex_exit(&node->fd_mutex); 5535 } 5536 5537 pl_size = sizeof (la_wwn_t) + name_len + 1; 5538 5539 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5540 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5541 if (cmd == NULL) { 5542 return (FC_NOMEM); 5543 } 5544 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5545 5546 pkt = &cmd->cmd_pkt; 5547 5548 bcopy(&port->fp_service_params.node_ww_name, 5549 &snn, sizeof (la_wwn_t)); 5550 5551 if (pd == NULL) { 5552 mutex_enter(&port->fp_mutex); 5553 ddi_rep_put8(pkt->pkt_cmd_acc, 5554 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5555 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5556 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5557 mutex_exit(&port->fp_mutex); 5558 } else { 5559 ASSERT(node != NULL); 5560 mutex_enter(&node->fd_mutex); 5561 ddi_rep_put8(pkt->pkt_cmd_acc, 5562 (uint8_t *)node->fd_snn, 5563 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5564 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5565 mutex_exit(&node->fd_mutex); 5566 } 5567 5568 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&snn, 5569 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5570 sizeof (snn), DDI_DEV_AUTOINCR); 5571 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5572 (uint8_t *)(pkt->pkt_cmd 5573 + sizeof (fc_ct_header_t) + sizeof (snn)), 5574 1, DDI_DEV_AUTOINCR); 5575 5576 break; 5577 } 5578 5579 case NS_DA_ID: { 5580 ns_remall_t rall; 5581 char tmp[4] = {0}; 5582 char *ptr; 5583 5584 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5585 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5586 5587 if (cmd == NULL) { 5588 return (FC_NOMEM); 5589 } 5590 5591 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5592 pkt = &cmd->cmd_pkt; 5593 5594 ptr = (char *)(&s_id); 5595 tmp[3] = *ptr++; 5596 tmp[2] = *ptr++; 5597 tmp[1] = *ptr++; 5598 tmp[0] = *ptr; 5599 #if defined(_BIT_FIELDS_LTOH) 5600 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5601 #else 5602 rall.rem_port_id = s_id; 5603 #endif 5604 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rall, 5605 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5606 sizeof (rall), DDI_DEV_AUTOINCR); 5607 5608 break; 5609 } 5610 5611 default: 5612 return (FC_FAILURE); 5613 } 5614 5615 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5616 5617 if (rval != FC_SUCCESS) { 5618 job->job_result = rval; 5619 fp_iodone(cmd); 5620 } 5621 5622 if (polled) { 5623 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5624 fp_jobwait(job); 5625 } else { 5626 rval = FC_SUCCESS; 5627 } 5628 5629 return (rval); 5630 } 5631 5632 5633 /* 5634 * Common interrupt handler 5635 */ 5636 static int 5637 fp_common_intr(fc_packet_t *pkt, int iodone) 5638 { 5639 int rval = FC_FAILURE; 5640 fp_cmd_t *cmd; 5641 fc_local_port_t *port; 5642 5643 cmd = pkt->pkt_ulp_private; 5644 port = cmd->cmd_port; 5645 5646 /* 5647 * Fail fast the upper layer requests if 5648 * a state change has occurred amidst. 5649 */ 5650 mutex_enter(&port->fp_mutex); 5651 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5652 mutex_exit(&port->fp_mutex); 5653 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5654 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5655 } else if (!(port->fp_soft_state & 5656 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5657 mutex_exit(&port->fp_mutex); 5658 5659 switch (pkt->pkt_state) { 5660 case FC_PKT_LOCAL_BSY: 5661 case FC_PKT_FABRIC_BSY: 5662 case FC_PKT_NPORT_BSY: 5663 case FC_PKT_TIMEOUT: 5664 cmd->cmd_retry_interval = (pkt->pkt_state == 5665 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5666 rval = fp_retry_cmd(pkt); 5667 break; 5668 5669 case FC_PKT_FABRIC_RJT: 5670 case FC_PKT_NPORT_RJT: 5671 case FC_PKT_LOCAL_RJT: 5672 case FC_PKT_LS_RJT: 5673 case FC_PKT_FS_RJT: 5674 rval = fp_handle_reject(pkt); 5675 break; 5676 5677 default: 5678 if (pkt->pkt_resp_resid) { 5679 cmd->cmd_retry_interval = 0; 5680 rval = fp_retry_cmd(pkt); 5681 } 5682 break; 5683 } 5684 } else { 5685 mutex_exit(&port->fp_mutex); 5686 } 5687 5688 if (rval != FC_SUCCESS && iodone) { 5689 fp_iodone(cmd); 5690 rval = FC_SUCCESS; 5691 } 5692 5693 return (rval); 5694 } 5695 5696 5697 /* 5698 * Some not so long winding theory on point to point topology: 5699 * 5700 * In the ACC payload, if the D_ID is ZERO and the common service 5701 * parameters indicate N_Port, then the topology is POINT TO POINT. 5702 * 5703 * In a point to point topology with an N_Port, during Fabric Login, 5704 * the destination N_Port will check with our WWN and decide if it 5705 * needs to issue PLOGI or not. That means, FLOGI could potentially 5706 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5707 * PLOGI creates the device handles. 5708 * 5709 * Assuming that the host port WWN is greater than the other N_Port 5710 * WWN, then we become the master (be aware that this isn't the word 5711 * used in the FC standards) and initiate the PLOGI. 5712 * 5713 */ 5714 static void 5715 fp_flogi_intr(fc_packet_t *pkt) 5716 { 5717 int state; 5718 int f_port; 5719 uint32_t s_id; 5720 uint32_t d_id; 5721 fp_cmd_t *cmd; 5722 fc_local_port_t *port; 5723 la_wwn_t *swwn; 5724 la_wwn_t dwwn; 5725 la_wwn_t nwwn; 5726 fc_remote_port_t *pd; 5727 la_els_logi_t *acc; 5728 com_svc_t csp; 5729 ls_code_t resp; 5730 5731 cmd = pkt->pkt_ulp_private; 5732 port = cmd->cmd_port; 5733 5734 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5735 port, pkt, pkt->pkt_state); 5736 5737 if (FP_IS_PKT_ERROR(pkt)) { 5738 (void) fp_common_intr(pkt, 1); 5739 return; 5740 } 5741 5742 /* 5743 * Currently, we don't need to swap bytes here because qlc is faking the 5744 * response for us and so endianness is getting taken care of. But we 5745 * have to fix this and generalize this at some point 5746 */ 5747 acc = (la_els_logi_t *)pkt->pkt_resp; 5748 5749 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5750 sizeof (resp), DDI_DEV_AUTOINCR); 5751 5752 ASSERT(resp.ls_code == LA_ELS_ACC); 5753 if (resp.ls_code != LA_ELS_ACC) { 5754 (void) fp_common_intr(pkt, 1); 5755 return; 5756 } 5757 5758 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&csp, 5759 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5760 5761 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5762 5763 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5764 5765 mutex_enter(&port->fp_mutex); 5766 state = FC_PORT_STATE_MASK(port->fp_state); 5767 mutex_exit(&port->fp_mutex); 5768 5769 if (pkt->pkt_resp_fhdr.d_id == 0) { 5770 if (f_port == 0 && state != FC_STATE_LOOP) { 5771 swwn = &port->fp_service_params.nport_ww_name; 5772 5773 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5774 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5775 DDI_DEV_AUTOINCR); 5776 5777 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5778 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5779 DDI_DEV_AUTOINCR); 5780 5781 mutex_enter(&port->fp_mutex); 5782 5783 port->fp_topology = FC_TOP_PT_PT; 5784 port->fp_total_devices = 1; 5785 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5786 port->fp_ptpt_master = 1; 5787 /* 5788 * Let us choose 'X' as S_ID and 'Y' 5789 * as D_ID and that'll work; hopefully 5790 * If not, it will get changed. 5791 */ 5792 s_id = port->fp_instance + FP_DEFAULT_SID; 5793 d_id = port->fp_instance + FP_DEFAULT_DID; 5794 port->fp_port_id.port_id = s_id; 5795 mutex_exit(&port->fp_mutex); 5796 5797 pd = fctl_create_remote_port(port, 5798 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5799 KM_NOSLEEP); 5800 if (pd == NULL) { 5801 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5802 0, NULL, "couldn't create device" 5803 " d_id=%X", d_id); 5804 fp_iodone(cmd); 5805 return; 5806 } 5807 5808 cmd->cmd_pkt.pkt_tran_flags = 5809 pkt->pkt_tran_flags; 5810 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5811 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5812 cmd->cmd_retry_count = fp_retry_count; 5813 5814 fp_xlogi_init(port, cmd, s_id, d_id, 5815 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5816 5817 (&cmd->cmd_pkt)->pkt_pd = pd; 5818 5819 /* 5820 * We've just created this fc_remote_port_t, and 5821 * we're about to use it to send a PLOGI, so 5822 * bump the reference count right now. When 5823 * the packet is freed, the reference count will 5824 * be decremented. The ULP may also start using 5825 * it, so mark it as given away as well. 5826 */ 5827 pd->pd_ref_count++; 5828 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5829 5830 if (fp_sendcmd(port, cmd, 5831 port->fp_fca_handle) == FC_SUCCESS) { 5832 return; 5833 } 5834 } else { 5835 /* 5836 * The device handles will be created when the 5837 * unsolicited PLOGI is completed successfully 5838 */ 5839 port->fp_ptpt_master = 0; 5840 mutex_exit(&port->fp_mutex); 5841 } 5842 } 5843 pkt->pkt_state = FC_PKT_FAILURE; 5844 } else { 5845 if (f_port) { 5846 mutex_enter(&port->fp_mutex); 5847 if (state == FC_STATE_LOOP) { 5848 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5849 } else { 5850 port->fp_topology = FC_TOP_FABRIC; 5851 5852 ddi_rep_get8(pkt->pkt_resp_acc, 5853 (uint8_t *)&port->fp_fabric_name, 5854 (uint8_t *)&acc->node_ww_name, 5855 sizeof (la_wwn_t), 5856 DDI_DEV_AUTOINCR); 5857 } 5858 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5859 mutex_exit(&port->fp_mutex); 5860 } else { 5861 pkt->pkt_state = FC_PKT_FAILURE; 5862 } 5863 } 5864 fp_iodone(cmd); 5865 } 5866 5867 5868 /* 5869 * Handle solicited PLOGI response 5870 */ 5871 static void 5872 fp_plogi_intr(fc_packet_t *pkt) 5873 { 5874 int nl_port; 5875 int bailout; 5876 uint32_t d_id; 5877 fp_cmd_t *cmd; 5878 la_els_logi_t *acc; 5879 fc_local_port_t *port; 5880 fc_remote_port_t *pd; 5881 la_wwn_t nwwn; 5882 la_wwn_t pwwn; 5883 ls_code_t resp; 5884 5885 nl_port = 0; 5886 cmd = pkt->pkt_ulp_private; 5887 port = cmd->cmd_port; 5888 d_id = pkt->pkt_cmd_fhdr.d_id; 5889 5890 #ifndef __lock_lint 5891 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 5892 #endif 5893 5894 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 5895 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 5896 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 5897 5898 /* 5899 * Bail out early on ULP initiated requests if the 5900 * state change has occurred 5901 */ 5902 mutex_enter(&port->fp_mutex); 5903 bailout = ((port->fp_statec_busy || 5904 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 5905 cmd->cmd_ulp_pkt) ? 1 : 0; 5906 mutex_exit(&port->fp_mutex); 5907 5908 if (FP_IS_PKT_ERROR(pkt) || bailout) { 5909 int skip_msg = 0; 5910 int giveup = 0; 5911 5912 if (cmd->cmd_ulp_pkt) { 5913 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 5914 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 5915 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 5916 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 5917 } 5918 5919 /* 5920 * If an unsolicited cross login already created 5921 * a device speed up the discovery by not retrying 5922 * the command mindlessly. 5923 */ 5924 if (pkt->pkt_pd == NULL && 5925 fctl_get_remote_port_by_did(port, d_id) != NULL) { 5926 fp_iodone(cmd); 5927 return; 5928 } 5929 5930 if (pkt->pkt_pd != NULL) { 5931 giveup = (pkt->pkt_pd->pd_recepient == 5932 PD_PLOGI_RECEPIENT) ? 1 : 0; 5933 if (giveup) { 5934 /* 5935 * This pd is marked as plogi 5936 * recipient, stop retrying 5937 */ 5938 FP_TRACE(FP_NHEAD1(3, 0), 5939 "fp_plogi_intr: stop retry as" 5940 " a cross login was accepted" 5941 " from d_id=%x, port=%p.", 5942 d_id, port); 5943 fp_iodone(cmd); 5944 return; 5945 } 5946 } 5947 5948 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 5949 return; 5950 } 5951 5952 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 5953 mutex_enter(&pd->pd_mutex); 5954 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 5955 skip_msg++; 5956 } 5957 mutex_exit(&pd->pd_mutex); 5958 } 5959 5960 mutex_enter(&port->fp_mutex); 5961 if (!bailout && !(skip_msg && port->fp_statec_busy) && 5962 port->fp_statec_busy <= 1 && 5963 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 5964 mutex_exit(&port->fp_mutex); 5965 /* 5966 * In case of Login Collisions, JNI HBAs returns the 5967 * FC pkt back to the Initiator with the state set to 5968 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 5969 * QLC HBAs handles such cases in the FW and doesnot 5970 * return the LS_RJT with Logical error when 5971 * login collision happens. 5972 */ 5973 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 5974 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 5975 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 5976 "PLOGI to %x failed", d_id); 5977 } 5978 FP_TRACE(FP_NHEAD2(9, 0), 5979 "PLOGI to %x failed. state=%x reason=%x.", 5980 d_id, pkt->pkt_state, pkt->pkt_reason); 5981 } else { 5982 mutex_exit(&port->fp_mutex); 5983 } 5984 5985 fp_iodone(cmd); 5986 return; 5987 } 5988 5989 acc = (la_els_logi_t *)pkt->pkt_resp; 5990 5991 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5992 sizeof (resp), DDI_DEV_AUTOINCR); 5993 5994 ASSERT(resp.ls_code == LA_ELS_ACC); 5995 if (resp.ls_code != LA_ELS_ACC) { 5996 (void) fp_common_intr(pkt, 1); 5997 return; 5998 } 5999 6000 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6001 mutex_enter(&port->fp_mutex); 6002 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6003 mutex_exit(&port->fp_mutex); 6004 fp_iodone(cmd); 6005 return; 6006 } 6007 6008 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6009 6010 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6011 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6012 DDI_DEV_AUTOINCR); 6013 6014 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6015 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6016 DDI_DEV_AUTOINCR); 6017 6018 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6019 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6020 6021 if ((pd = pkt->pkt_pd) == NULL) { 6022 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6023 if (pd == NULL) { 6024 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6025 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6026 if (pd == NULL) { 6027 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6028 "couldn't create port device handles" 6029 " d_id=%x", d_id); 6030 fp_iodone(cmd); 6031 return; 6032 } 6033 } else { 6034 fc_remote_port_t *tmp_pd; 6035 6036 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6037 if (tmp_pd != NULL) { 6038 fp_iodone(cmd); 6039 return; 6040 } 6041 6042 mutex_enter(&port->fp_mutex); 6043 mutex_enter(&pd->pd_mutex); 6044 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6045 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6046 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6047 } 6048 6049 if (pd->pd_type == PORT_DEVICE_OLD) { 6050 if (pd->pd_port_id.port_id != d_id) { 6051 fctl_delist_did_table(port, pd); 6052 pd->pd_type = PORT_DEVICE_CHANGED; 6053 pd->pd_port_id.port_id = d_id; 6054 } else { 6055 pd->pd_type = PORT_DEVICE_NOCHANGE; 6056 } 6057 } 6058 6059 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6060 char ww_name[17]; 6061 6062 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6063 6064 mutex_exit(&pd->pd_mutex); 6065 mutex_exit(&port->fp_mutex); 6066 FP_TRACE(FP_NHEAD2(9, 0), 6067 "Possible Duplicate name or address" 6068 " identifiers in the PLOGI response" 6069 " D_ID=%x, PWWN=%s: Please check the" 6070 " configuration", d_id, ww_name); 6071 fp_iodone(cmd); 6072 return; 6073 } 6074 fctl_enlist_did_table(port, pd); 6075 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6076 mutex_exit(&pd->pd_mutex); 6077 mutex_exit(&port->fp_mutex); 6078 } 6079 } else { 6080 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6081 6082 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6083 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6084 6085 mutex_enter(&port->fp_mutex); 6086 mutex_enter(&pd->pd_mutex); 6087 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6088 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6089 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6090 pd->pd_type); 6091 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6092 pd->pd_type == PORT_DEVICE_OLD) || 6093 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6094 pd->pd_type = PORT_DEVICE_NOCHANGE; 6095 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6096 pd->pd_type = PORT_DEVICE_NEW; 6097 } 6098 } else { 6099 char old_name[17]; 6100 char new_name[17]; 6101 6102 fc_wwn_to_str(&pd->pd_port_name, old_name); 6103 fc_wwn_to_str(&pwwn, new_name); 6104 6105 FP_TRACE(FP_NHEAD1(9, 0), 6106 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6107 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6108 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6109 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6110 cmd->cmd_ulp_pkt, bailout); 6111 6112 FP_TRACE(FP_NHEAD2(9, 0), 6113 "PWWN of a device with D_ID=%x changed." 6114 " New PWWN = %s, OLD PWWN = %s", d_id, 6115 new_name, old_name); 6116 6117 if (cmd->cmd_ulp_pkt && !bailout) { 6118 fc_remote_node_t *rnodep; 6119 fc_portmap_t *changelist; 6120 fc_portmap_t *listptr; 6121 int len = 1; 6122 /* # entries in changelist */ 6123 6124 fctl_delist_pwwn_table(port, pd); 6125 6126 /* 6127 * Lets now check if there already is a pd with 6128 * this new WWN in the table. If so, we'll mark 6129 * it as invalid 6130 */ 6131 6132 if (new_wwn_pd) { 6133 /* 6134 * There is another pd with in the pwwn 6135 * table with the same WWN that we got 6136 * in the PLOGI payload. We have to get 6137 * it out of the pwwn table, update the 6138 * pd's state (fp_fillout_old_map does 6139 * this for us) and add it to the 6140 * changelist that goes up to ULPs. 6141 * 6142 * len is length of changelist and so 6143 * increment it. 6144 */ 6145 len++; 6146 6147 if (tmp_pd != pd) { 6148 /* 6149 * Odd case where pwwn and did 6150 * tables are out of sync but 6151 * we will handle that too. See 6152 * more comments below. 6153 * 6154 * One more device that ULPs 6155 * should know about and so len 6156 * gets incremented again. 6157 */ 6158 len++; 6159 } 6160 6161 listptr = changelist = kmem_zalloc(len * 6162 sizeof (*changelist), KM_SLEEP); 6163 6164 mutex_enter(&new_wwn_pd->pd_mutex); 6165 rnodep = new_wwn_pd->pd_remote_nodep; 6166 mutex_exit(&new_wwn_pd->pd_mutex); 6167 6168 /* 6169 * Hold the fd_mutex since 6170 * fctl_copy_portmap_held expects it. 6171 * Preserve lock hierarchy by grabbing 6172 * fd_mutex before pd_mutex 6173 */ 6174 if (rnodep) { 6175 mutex_enter(&rnodep->fd_mutex); 6176 } 6177 mutex_enter(&new_wwn_pd->pd_mutex); 6178 fp_fillout_old_map_held(listptr++, 6179 new_wwn_pd, 0); 6180 mutex_exit(&new_wwn_pd->pd_mutex); 6181 if (rnodep) { 6182 mutex_exit(&rnodep->fd_mutex); 6183 } 6184 6185 /* 6186 * Safety check : 6187 * Lets ensure that the pwwn and did 6188 * tables are in sync. Ideally, we 6189 * should not find that these two pd's 6190 * are different. 6191 */ 6192 if (tmp_pd != pd) { 6193 mutex_enter(&tmp_pd->pd_mutex); 6194 rnodep = 6195 tmp_pd->pd_remote_nodep; 6196 mutex_exit(&tmp_pd->pd_mutex); 6197 6198 /* As above grab fd_mutex */ 6199 if (rnodep) { 6200 mutex_enter(&rnodep-> 6201 fd_mutex); 6202 } 6203 mutex_enter(&tmp_pd->pd_mutex); 6204 6205 fp_fillout_old_map_held( 6206 listptr++, tmp_pd, 0); 6207 6208 mutex_exit(&tmp_pd->pd_mutex); 6209 if (rnodep) { 6210 mutex_exit(&rnodep-> 6211 fd_mutex); 6212 } 6213 6214 /* 6215 * Now add "pd" (not tmp_pd) 6216 * to fp_did_table to sync it up 6217 * with fp_pwwn_table 6218 * 6219 * pd->pd_mutex is already held 6220 * at this point 6221 */ 6222 fctl_enlist_did_table(port, pd); 6223 } 6224 } else { 6225 listptr = changelist = kmem_zalloc( 6226 sizeof (*changelist), KM_SLEEP); 6227 } 6228 6229 ASSERT(changelist != NULL); 6230 6231 fp_fillout_changed_map(listptr, pd, &d_id, 6232 &pwwn); 6233 fctl_enlist_pwwn_table(port, pd); 6234 6235 mutex_exit(&pd->pd_mutex); 6236 mutex_exit(&port->fp_mutex); 6237 6238 fp_iodone(cmd); 6239 6240 (void) fp_ulp_devc_cb(port, changelist, len, 6241 len, KM_NOSLEEP, 0); 6242 6243 return; 6244 } 6245 } 6246 6247 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6248 nl_port = 1; 6249 } 6250 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) 6251 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6252 6253 mutex_exit(&pd->pd_mutex); 6254 mutex_exit(&port->fp_mutex); 6255 6256 if (tmp_pd == NULL) { 6257 mutex_enter(&port->fp_mutex); 6258 mutex_enter(&pd->pd_mutex); 6259 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6260 char ww_name[17]; 6261 6262 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6263 mutex_exit(&pd->pd_mutex); 6264 mutex_exit(&port->fp_mutex); 6265 FP_TRACE(FP_NHEAD2(9, 0), 6266 "Possible Duplicate name or address" 6267 " identifiers in the PLOGI response" 6268 " D_ID=%x, PWWN=%s: Please check the" 6269 " configuration", d_id, ww_name); 6270 fp_iodone(cmd); 6271 return; 6272 } 6273 fctl_enlist_did_table(port, pd); 6274 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6275 mutex_exit(&pd->pd_mutex); 6276 mutex_exit(&port->fp_mutex); 6277 } 6278 } 6279 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6280 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6281 6282 if (cmd->cmd_ulp_pkt) { 6283 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6284 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6285 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6286 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6287 if (pd != NULL) { 6288 FP_TRACE(FP_NHEAD1(9, 0), 6289 "fp_plogi_intr;" 6290 "ulp_pkt's pd is NULL, get a pd %p", 6291 pd); 6292 mutex_enter(&pd->pd_mutex); 6293 pd->pd_ref_count++; 6294 mutex_exit(&pd->pd_mutex); 6295 } 6296 cmd->cmd_ulp_pkt->pkt_pd = pd; 6297 } 6298 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6299 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6300 sizeof (fc_frame_hdr_t)); 6301 bcopy((caddr_t)pkt->pkt_resp, 6302 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6303 sizeof (la_els_logi_t)); 6304 } 6305 6306 mutex_enter(&port->fp_mutex); 6307 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6308 mutex_enter(&pd->pd_mutex); 6309 6310 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6311 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6312 cmd->cmd_retry_count = fp_retry_count; 6313 6314 /* 6315 * If the fc_remote_port_t pointer is not set in the given 6316 * fc_packet_t, then this fc_remote_port_t must have just 6317 * been created. Save the pointer and also increment the 6318 * fc_remote_port_t reference count. 6319 */ 6320 if (pkt->pkt_pd == NULL) { 6321 pkt->pkt_pd = pd; 6322 pd->pd_ref_count++; /* It's in use! */ 6323 } 6324 6325 fp_adisc_init(cmd, cmd->cmd_job); 6326 6327 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6328 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6329 6330 mutex_exit(&pd->pd_mutex); 6331 mutex_exit(&port->fp_mutex); 6332 6333 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6334 return; 6335 } 6336 } else { 6337 mutex_exit(&port->fp_mutex); 6338 } 6339 6340 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6341 mutex_enter(&port->fp_mutex); 6342 mutex_enter(&pd->pd_mutex); 6343 6344 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6345 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6346 cmd->cmd_retry_count = fp_retry_count; 6347 6348 fp_logo_init(pd, cmd, cmd->cmd_job); 6349 6350 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6351 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6352 6353 mutex_exit(&pd->pd_mutex); 6354 mutex_exit(&port->fp_mutex); 6355 6356 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6357 return; 6358 } 6359 6360 } 6361 fp_iodone(cmd); 6362 } 6363 6364 6365 /* 6366 * Handle solicited ADISC response 6367 */ 6368 static void 6369 fp_adisc_intr(fc_packet_t *pkt) 6370 { 6371 int rval; 6372 int bailout; 6373 fp_cmd_t *cmd; 6374 fc_local_port_t *port; 6375 fc_remote_port_t *pd; 6376 la_els_adisc_t *acc; 6377 ls_code_t resp; 6378 fc_hardaddr_t ha; 6379 fc_portmap_t *changelist; 6380 int initiator, adiscfail = 0; 6381 6382 pd = pkt->pkt_pd; 6383 cmd = pkt->pkt_ulp_private; 6384 port = cmd->cmd_port; 6385 6386 #ifndef __lock_lint 6387 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6388 #endif 6389 6390 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6391 6392 mutex_enter(&port->fp_mutex); 6393 bailout = ((port->fp_statec_busy || 6394 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6395 cmd->cmd_ulp_pkt) ? 1 : 0; 6396 mutex_exit(&port->fp_mutex); 6397 6398 if (bailout) { 6399 fp_iodone(cmd); 6400 return; 6401 } 6402 6403 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6404 acc = (la_els_adisc_t *)pkt->pkt_resp; 6405 6406 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6407 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6408 6409 if (resp.ls_code == LA_ELS_ACC) { 6410 int is_private; 6411 6412 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&ha, 6413 (uint8_t *)&acc->hard_addr, sizeof (ha), 6414 DDI_DEV_AUTOINCR); 6415 6416 mutex_enter(&port->fp_mutex); 6417 6418 is_private = 6419 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6420 6421 mutex_enter(&pd->pd_mutex); 6422 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6423 fctl_enlist_did_table(port, pd); 6424 } 6425 mutex_exit(&pd->pd_mutex); 6426 6427 mutex_exit(&port->fp_mutex); 6428 6429 mutex_enter(&pd->pd_mutex); 6430 if (pd->pd_type != PORT_DEVICE_NEW) { 6431 if (is_private && (pd->pd_hard_addr.hard_addr != 6432 ha.hard_addr)) { 6433 pd->pd_type = PORT_DEVICE_CHANGED; 6434 } else { 6435 pd->pd_type = PORT_DEVICE_NOCHANGE; 6436 } 6437 } 6438 6439 if (is_private && (ha.hard_addr && 6440 pd->pd_port_id.port_id != ha.hard_addr)) { 6441 char ww_name[17]; 6442 6443 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6444 6445 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6446 "NL_Port Identifier %x doesn't match" 6447 " with Hard Address %x, Will use Port" 6448 " WWN %s", pd->pd_port_id.port_id, 6449 ha.hard_addr, ww_name); 6450 6451 pd->pd_hard_addr.hard_addr = 0; 6452 } else { 6453 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6454 } 6455 mutex_exit(&pd->pd_mutex); 6456 } else { 6457 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6458 return; 6459 } 6460 } 6461 } else { 6462 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6463 return; 6464 } 6465 6466 mutex_enter(&port->fp_mutex); 6467 if (port->fp_statec_busy <= 1) { 6468 mutex_exit(&port->fp_mutex); 6469 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6470 "ADISC to %x failed, cmd_flags=%x", 6471 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6472 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6473 adiscfail = 1; 6474 } else { 6475 mutex_exit(&port->fp_mutex); 6476 } 6477 } 6478 6479 if (cmd->cmd_ulp_pkt) { 6480 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6481 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6482 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6483 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6484 cmd->cmd_ulp_pkt->pkt_pd = pd; 6485 FP_TRACE(FP_NHEAD1(9, 0), 6486 "fp_adisc__intr;" 6487 "ulp_pkt's pd is NULL, get a pd %p", 6488 pd); 6489 6490 } 6491 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6492 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6493 sizeof (fc_frame_hdr_t)); 6494 bcopy((caddr_t)pkt->pkt_resp, 6495 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6496 sizeof (la_els_logi_t)); 6497 } 6498 6499 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6500 FP_TRACE(FP_NHEAD1(9, 0), 6501 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6502 "fp_retry_count=%x, ulp_pkt=%p", 6503 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6504 6505 mutex_enter(&port->fp_mutex); 6506 mutex_enter(&pd->pd_mutex); 6507 6508 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6509 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6510 cmd->cmd_retry_count = fp_retry_count; 6511 6512 fp_logo_init(pd, cmd, cmd->cmd_job); 6513 6514 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6515 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6516 6517 mutex_exit(&pd->pd_mutex); 6518 mutex_exit(&port->fp_mutex); 6519 6520 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6521 if (adiscfail) { 6522 mutex_enter(&pd->pd_mutex); 6523 initiator = 6524 (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 6525 pd->pd_state = PORT_DEVICE_VALID; 6526 pd->pd_aux_flags |= PD_LOGGED_OUT; 6527 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) 6528 pd->pd_type = PORT_DEVICE_NEW; 6529 else 6530 pd->pd_type = PORT_DEVICE_NOCHANGE; 6531 mutex_exit(&pd->pd_mutex); 6532 6533 changelist = 6534 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6535 6536 if (initiator) { 6537 fp_unregister_login(pd); 6538 fctl_copy_portmap(changelist, pd); 6539 } else { 6540 fp_fillout_old_map(changelist, pd, 0); 6541 } 6542 6543 FP_TRACE(FP_NHEAD1(9, 0), 6544 "fp_adisc_intr: Dev change notification " 6545 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6546 "map_flags=%x initiator=%d", port, pd, 6547 changelist->map_type, changelist->map_state, 6548 changelist->map_flags, initiator); 6549 6550 (void) fp_ulp_devc_cb(port, changelist, 6551 1, 1, KM_SLEEP, 0); 6552 } 6553 if (rval == FC_SUCCESS) { 6554 return; 6555 } 6556 } 6557 fp_iodone(cmd); 6558 } 6559 6560 6561 /* 6562 * Handle solicited LOGO response 6563 */ 6564 static void 6565 fp_logo_intr(fc_packet_t *pkt) 6566 { 6567 ls_code_t resp; 6568 6569 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6570 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6571 6572 if (FP_IS_PKT_ERROR(pkt)) { 6573 (void) fp_common_intr(pkt, 1); 6574 return; 6575 } 6576 6577 ASSERT(resp.ls_code == LA_ELS_ACC); 6578 if (resp.ls_code != LA_ELS_ACC) { 6579 (void) fp_common_intr(pkt, 1); 6580 return; 6581 } 6582 6583 if (pkt->pkt_pd != NULL) { 6584 fp_unregister_login(pkt->pkt_pd); 6585 } 6586 fp_iodone(pkt->pkt_ulp_private); 6587 } 6588 6589 6590 /* 6591 * Handle solicited RNID response 6592 */ 6593 static void 6594 fp_rnid_intr(fc_packet_t *pkt) 6595 { 6596 ls_code_t resp; 6597 job_request_t *job; 6598 fp_cmd_t *cmd; 6599 la_els_rnid_acc_t *acc; 6600 6601 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6602 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6603 6604 cmd = pkt->pkt_ulp_private; 6605 job = cmd->cmd_job; 6606 ASSERT(job->job_private != NULL); 6607 6608 /* If failure or LS_RJT then retry the packet, if needed */ 6609 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6610 (void) fp_common_intr(pkt, 1); 6611 return; 6612 } 6613 6614 /* Save node_id memory allocated in ioctl code */ 6615 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6616 6617 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6618 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6619 6620 /* wakeup the ioctl thread and free the pkt */ 6621 fp_iodone(cmd); 6622 } 6623 6624 6625 /* 6626 * Handle solicited RLS response 6627 */ 6628 static void 6629 fp_rls_intr(fc_packet_t *pkt) 6630 { 6631 ls_code_t resp; 6632 job_request_t *job; 6633 fp_cmd_t *cmd; 6634 la_els_rls_acc_t *acc; 6635 6636 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6637 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6638 6639 cmd = pkt->pkt_ulp_private; 6640 job = cmd->cmd_job; 6641 ASSERT(job->job_private != NULL); 6642 6643 /* If failure or LS_RJT then retry the packet, if needed */ 6644 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6645 (void) fp_common_intr(pkt, 1); 6646 return; 6647 } 6648 6649 /* Save link error status block in memory allocated in ioctl code */ 6650 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6651 6652 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6653 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6654 DDI_DEV_AUTOINCR); 6655 6656 /* wakeup the ioctl thread and free the pkt */ 6657 fp_iodone(cmd); 6658 } 6659 6660 6661 /* 6662 * A solicited command completion interrupt (mostly for commands 6663 * that require almost no post processing such as SCR ELS) 6664 */ 6665 static void 6666 fp_intr(fc_packet_t *pkt) 6667 { 6668 if (FP_IS_PKT_ERROR(pkt)) { 6669 (void) fp_common_intr(pkt, 1); 6670 return; 6671 } 6672 fp_iodone(pkt->pkt_ulp_private); 6673 } 6674 6675 6676 /* 6677 * Handle the underlying port's state change 6678 */ 6679 static void 6680 fp_statec_cb(opaque_t port_handle, uint32_t state) 6681 { 6682 fc_local_port_t *port = port_handle; 6683 job_request_t *job; 6684 6685 /* 6686 * If it is not possible to process the callbacks 6687 * just drop the callback on the floor; Don't bother 6688 * to do something that isn't safe at this time 6689 */ 6690 mutex_enter(&port->fp_mutex); 6691 if ((port->fp_soft_state & 6692 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6693 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6694 mutex_exit(&port->fp_mutex); 6695 return; 6696 } 6697 6698 if (port->fp_statec_busy == 0) { 6699 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6700 #ifdef DEBUG 6701 } else { 6702 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6703 #endif 6704 } 6705 6706 port->fp_statec_busy++; 6707 6708 /* 6709 * For now, force the trusted method of device authentication (by 6710 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6711 */ 6712 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6713 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6714 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6715 fp_port_offline(port, 0); 6716 } 6717 mutex_exit(&port->fp_mutex); 6718 6719 switch (FC_PORT_STATE_MASK(state)) { 6720 case FC_STATE_OFFLINE: 6721 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6722 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6723 if (job == NULL) { 6724 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6725 " fp_statec_cb() couldn't submit a job " 6726 " to the thread: failing.."); 6727 mutex_enter(&port->fp_mutex); 6728 if (--port->fp_statec_busy == 0) { 6729 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6730 } 6731 mutex_exit(&port->fp_mutex); 6732 return; 6733 } 6734 mutex_enter(&port->fp_mutex); 6735 /* 6736 * Zero out this field so that we do not retain 6737 * the fabric name as its no longer valid 6738 */ 6739 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6740 port->fp_state = state; 6741 mutex_exit(&port->fp_mutex); 6742 6743 fctl_enque_job(port, job); 6744 break; 6745 6746 case FC_STATE_ONLINE: 6747 case FC_STATE_LOOP: 6748 mutex_enter(&port->fp_mutex); 6749 port->fp_state = state; 6750 6751 if (port->fp_offline_tid) { 6752 timeout_id_t tid; 6753 6754 tid = port->fp_offline_tid; 6755 port->fp_offline_tid = NULL; 6756 mutex_exit(&port->fp_mutex); 6757 (void) untimeout(tid); 6758 } else { 6759 mutex_exit(&port->fp_mutex); 6760 } 6761 6762 job = fctl_alloc_job(JOB_PORT_ONLINE, 6763 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6764 if (job == NULL) { 6765 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6766 "fp_statec_cb() couldn't submit a job " 6767 "to the thread: failing.."); 6768 6769 mutex_enter(&port->fp_mutex); 6770 if (--port->fp_statec_busy == 0) { 6771 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6772 } 6773 mutex_exit(&port->fp_mutex); 6774 return; 6775 } 6776 fctl_enque_job(port, job); 6777 break; 6778 6779 case FC_STATE_RESET_REQUESTED: 6780 mutex_enter(&port->fp_mutex); 6781 port->fp_state = FC_STATE_OFFLINE; 6782 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 6783 mutex_exit(&port->fp_mutex); 6784 /* FALLTHROUGH */ 6785 6786 case FC_STATE_RESET: 6787 job = fctl_alloc_job(JOB_ULP_NOTIFY, 6788 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6789 if (job == NULL) { 6790 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6791 "fp_statec_cb() couldn't submit a job" 6792 " to the thread: failing.."); 6793 6794 mutex_enter(&port->fp_mutex); 6795 if (--port->fp_statec_busy == 0) { 6796 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6797 } 6798 mutex_exit(&port->fp_mutex); 6799 return; 6800 } 6801 6802 /* squeeze into some field in the job structure */ 6803 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 6804 fctl_enque_job(port, job); 6805 break; 6806 6807 case FC_STATE_TARGET_PORT_RESET: 6808 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 6809 /* FALLTHROUGH */ 6810 6811 case FC_STATE_NAMESERVICE: 6812 /* FALLTHROUGH */ 6813 6814 default: 6815 mutex_enter(&port->fp_mutex); 6816 if (--port->fp_statec_busy == 0) { 6817 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6818 } 6819 mutex_exit(&port->fp_mutex); 6820 break; 6821 } 6822 } 6823 6824 6825 /* 6826 * Register with the Name Server for RSCNs 6827 */ 6828 static int 6829 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 6830 int sleep) 6831 { 6832 uint32_t s_id; 6833 uchar_t class; 6834 fc_scr_req_t payload; 6835 fp_cmd_t *cmd; 6836 fc_packet_t *pkt; 6837 6838 mutex_enter(&port->fp_mutex); 6839 s_id = port->fp_port_id.port_id; 6840 class = port->fp_ns_login_class; 6841 mutex_exit(&port->fp_mutex); 6842 6843 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 6844 sizeof (fc_scr_resp_t), sleep, NULL); 6845 if (cmd == NULL) { 6846 return (FC_NOMEM); 6847 } 6848 6849 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 6850 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6851 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 6852 cmd->cmd_retry_count = fp_retry_count; 6853 cmd->cmd_ulp_pkt = NULL; 6854 6855 pkt = &cmd->cmd_pkt; 6856 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 6857 6858 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 6859 6860 payload.ls_code.ls_code = LA_ELS_SCR; 6861 payload.ls_code.mbz = 0; 6862 payload.scr_rsvd = 0; 6863 payload.scr_func = scr_func; 6864 6865 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 6866 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 6867 6868 job->job_counter = 1; 6869 6870 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 6871 fp_iodone(cmd); 6872 } 6873 6874 return (FC_SUCCESS); 6875 } 6876 6877 6878 /* 6879 * There are basically two methods to determine the total number of 6880 * devices out in the NS database; Reading the details of the two 6881 * methods described below, it shouldn't be hard to identify which 6882 * of the two methods is better. 6883 * 6884 * Method 1. 6885 * Iteratively issue GANs until all ports identifiers are walked 6886 * 6887 * Method 2. 6888 * Issue GID_PT (get port Identifiers) with Maximum residual 6889 * field in the request CT HEADER set to accommodate only the 6890 * CT HEADER in the response frame. And if FC-GS2 has been 6891 * carefully read, the NS here has a chance to FS_ACC the 6892 * request and indicate the residual size in the FS_ACC. 6893 * 6894 * Method 2 is wonderful, although it's not mandatory for the NS 6895 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 6896 * (note with particular care the use of the auxiliary verb 'may') 6897 * 6898 */ 6899 static int 6900 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 6901 int sleep) 6902 { 6903 int flags; 6904 int rval; 6905 uint32_t src_id; 6906 fctl_ns_req_t *ns_cmd; 6907 6908 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 6909 6910 mutex_enter(&port->fp_mutex); 6911 src_id = port->fp_port_id.port_id; 6912 mutex_exit(&port->fp_mutex); 6913 6914 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 6915 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 6916 sizeof (ns_resp_gid_pt_t), 0, 6917 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 6918 6919 if (ns_cmd == NULL) { 6920 return (FC_NOMEM); 6921 } 6922 6923 ns_cmd->ns_cmd_code = NS_GID_PT; 6924 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 6925 = FC_NS_PORT_NX; /* All port types */ 6926 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 6927 6928 } else { 6929 uint32_t ns_flags; 6930 6931 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 6932 if (create) { 6933 ns_flags |= FCTL_NS_CREATE_DEVICE; 6934 } 6935 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 6936 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 6937 6938 if (ns_cmd == NULL) { 6939 return (FC_NOMEM); 6940 } 6941 ns_cmd->ns_gan_index = 0; 6942 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 6943 ns_cmd->ns_cmd_code = NS_GA_NXT; 6944 ns_cmd->ns_gan_max = 0xFFFF; 6945 6946 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 6947 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 6948 } 6949 6950 flags = job->job_flags; 6951 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 6952 job->job_counter = 1; 6953 6954 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 6955 job->job_flags = flags; 6956 6957 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 6958 uint16_t max_resid; 6959 6960 /* 6961 * Revert to scanning the NS if NS_GID_PT isn't 6962 * helping us figure out total number of devices. 6963 */ 6964 if (job->job_result != FC_SUCCESS || 6965 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 6966 mutex_enter(&port->fp_mutex); 6967 port->fp_options &= ~FP_NS_SMART_COUNT; 6968 mutex_exit(&port->fp_mutex); 6969 6970 fctl_free_ns_cmd(ns_cmd); 6971 return (fp_ns_get_devcount(port, job, create, sleep)); 6972 } 6973 6974 mutex_enter(&port->fp_mutex); 6975 port->fp_total_devices = 1; 6976 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 6977 if (max_resid) { 6978 /* 6979 * Since port identifier is 4 bytes and max_resid 6980 * is also in WORDS, max_resid simply indicates 6981 * the total number of port identifiers not 6982 * transferred 6983 */ 6984 port->fp_total_devices += max_resid; 6985 } 6986 mutex_exit(&port->fp_mutex); 6987 } 6988 mutex_enter(&port->fp_mutex); 6989 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 6990 mutex_exit(&port->fp_mutex); 6991 fctl_free_ns_cmd(ns_cmd); 6992 6993 return (rval); 6994 } 6995 6996 /* 6997 * One heck of a function to serve userland. 6998 */ 6999 static int 7000 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7001 { 7002 int rval = 0; 7003 int jcode; 7004 uint32_t ret; 7005 uchar_t open_flag; 7006 fcio_t *kfcio; 7007 job_request_t *job; 7008 boolean_t use32 = B_FALSE; 7009 7010 #ifdef _MULTI_DATAMODEL 7011 switch (ddi_model_convert_from(mode & FMODELS)) { 7012 case DDI_MODEL_ILP32: 7013 use32 = B_TRUE; 7014 break; 7015 7016 case DDI_MODEL_NONE: 7017 default: 7018 break; 7019 } 7020 #endif 7021 7022 mutex_enter(&port->fp_mutex); 7023 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7024 FP_SOFT_IN_UNSOL_CB)) { 7025 fcio->fcio_errno = FC_STATEC_BUSY; 7026 mutex_exit(&port->fp_mutex); 7027 rval = EAGAIN; 7028 if (fp_fcio_copyout(fcio, data, mode)) { 7029 rval = EFAULT; 7030 } 7031 return (rval); 7032 } 7033 open_flag = port->fp_flag; 7034 mutex_exit(&port->fp_mutex); 7035 7036 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7037 fcio->fcio_errno = FC_FAILURE; 7038 rval = EACCES; 7039 if (fp_fcio_copyout(fcio, data, mode)) { 7040 rval = EFAULT; 7041 } 7042 return (rval); 7043 } 7044 7045 /* 7046 * If an exclusive open was demanded during open, don't let 7047 * either innocuous or devil threads to share the file 7048 * descriptor and fire down exclusive access commands 7049 */ 7050 mutex_enter(&port->fp_mutex); 7051 if (port->fp_flag & FP_EXCL) { 7052 if (port->fp_flag & FP_EXCL_BUSY) { 7053 mutex_exit(&port->fp_mutex); 7054 fcio->fcio_errno = FC_FAILURE; 7055 return (EBUSY); 7056 } 7057 port->fp_flag |= FP_EXCL_BUSY; 7058 } 7059 mutex_exit(&port->fp_mutex); 7060 7061 switch (fcio->fcio_cmd) { 7062 case FCIO_GET_HOST_PARAMS: { 7063 fc_port_dev_t *val; 7064 fc_port_dev32_t *val32; 7065 int index; 7066 int lilp_device_count; 7067 fc_lilpmap_t *lilp_map; 7068 uchar_t *alpa_list; 7069 7070 if (use32 == B_TRUE) { 7071 if (fcio->fcio_olen != sizeof (*val32) || 7072 fcio->fcio_xfer != FCIO_XFER_READ) { 7073 rval = EINVAL; 7074 break; 7075 } 7076 } else { 7077 if (fcio->fcio_olen != sizeof (*val) || 7078 fcio->fcio_xfer != FCIO_XFER_READ) { 7079 rval = EINVAL; 7080 break; 7081 } 7082 } 7083 7084 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7085 7086 mutex_enter(&port->fp_mutex); 7087 val->dev_did = port->fp_port_id; 7088 val->dev_hard_addr = port->fp_hard_addr; 7089 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7090 val->dev_nwwn = port->fp_service_params.node_ww_name; 7091 val->dev_state = port->fp_state; 7092 7093 lilp_map = &port->fp_lilp_map; 7094 alpa_list = &lilp_map->lilp_alpalist[0]; 7095 lilp_device_count = lilp_map->lilp_length; 7096 for (index = 0; index < lilp_device_count; index++) { 7097 uint32_t d_id; 7098 7099 d_id = alpa_list[index]; 7100 if (d_id == port->fp_port_id.port_id) { 7101 break; 7102 } 7103 } 7104 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7105 7106 bcopy(port->fp_fc4_types, val->dev_type, 7107 sizeof (port->fp_fc4_types)); 7108 mutex_exit(&port->fp_mutex); 7109 7110 if (use32 == B_TRUE) { 7111 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7112 7113 val32->dev_did = val->dev_did; 7114 val32->dev_hard_addr = val->dev_hard_addr; 7115 val32->dev_pwwn = val->dev_pwwn; 7116 val32->dev_nwwn = val->dev_nwwn; 7117 val32->dev_state = val->dev_state; 7118 val32->dev_did.priv_lilp_posit = 7119 val->dev_did.priv_lilp_posit; 7120 7121 bcopy(val->dev_type, val32->dev_type, 7122 sizeof (port->fp_fc4_types)); 7123 7124 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7125 fcio->fcio_olen, mode) == 0) { 7126 if (fp_fcio_copyout(fcio, data, mode)) { 7127 rval = EFAULT; 7128 } 7129 } else { 7130 rval = EFAULT; 7131 } 7132 7133 kmem_free(val32, sizeof (*val32)); 7134 } else { 7135 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7136 fcio->fcio_olen, mode) == 0) { 7137 if (fp_fcio_copyout(fcio, data, mode)) { 7138 rval = EFAULT; 7139 } 7140 } else { 7141 rval = EFAULT; 7142 } 7143 } 7144 7145 /* need to free "val" here */ 7146 kmem_free(val, sizeof (*val)); 7147 break; 7148 } 7149 7150 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7151 uint32_t index; 7152 char *tmpPath; 7153 fc_local_port_t *tmpPort; 7154 7155 if (fcio->fcio_olen < MAXPATHLEN || 7156 fcio->fcio_ilen != sizeof (uint32_t)) { 7157 rval = EINVAL; 7158 break; 7159 } 7160 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7161 rval = EFAULT; 7162 break; 7163 } 7164 7165 tmpPort = fctl_get_adapter_port_by_index(port, index); 7166 if (tmpPort == NULL) { 7167 FP_TRACE(FP_NHEAD1(9, 0), 7168 "User supplied index out of range"); 7169 fcio->fcio_errno = FC_BADPORT; 7170 rval = EFAULT; 7171 if (fp_fcio_copyout(fcio, data, mode)) { 7172 rval = EFAULT; 7173 } 7174 break; 7175 } 7176 7177 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7178 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7179 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7180 MAXPATHLEN, mode) == 0) { 7181 if (fp_fcio_copyout(fcio, data, mode)) { 7182 rval = EFAULT; 7183 } 7184 } else { 7185 rval = EFAULT; 7186 } 7187 kmem_free(tmpPath, MAXPATHLEN); 7188 break; 7189 } 7190 7191 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7192 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7193 fc_hba_adapter_attributes_t *val; 7194 fc_hba_adapter_attributes32_t *val32; 7195 7196 if (use32 == B_TRUE) { 7197 if (fcio->fcio_olen < sizeof (*val32) || 7198 fcio->fcio_xfer != FCIO_XFER_READ) { 7199 rval = EINVAL; 7200 break; 7201 } 7202 } else { 7203 if (fcio->fcio_olen < sizeof (*val) || 7204 fcio->fcio_xfer != FCIO_XFER_READ) { 7205 rval = EINVAL; 7206 break; 7207 } 7208 } 7209 7210 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7211 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7212 mutex_enter(&port->fp_mutex); 7213 bcopy(port->fp_hba_port_attrs.manufacturer, 7214 val->Manufacturer, 7215 sizeof (val->Manufacturer)); 7216 bcopy(port->fp_hba_port_attrs.serial_number, 7217 val->SerialNumber, 7218 sizeof (val->SerialNumber)); 7219 bcopy(port->fp_hba_port_attrs.model, 7220 val->Model, 7221 sizeof (val->Model)); 7222 bcopy(port->fp_hba_port_attrs.model_description, 7223 val->ModelDescription, 7224 sizeof (val->ModelDescription)); 7225 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7226 sizeof (val->NodeSymbolicName)); 7227 bcopy(port->fp_hba_port_attrs.hardware_version, 7228 val->HardwareVersion, 7229 sizeof (val->HardwareVersion)); 7230 bcopy(port->fp_hba_port_attrs.option_rom_version, 7231 val->OptionROMVersion, 7232 sizeof (val->OptionROMVersion)); 7233 bcopy(port->fp_hba_port_attrs.firmware_version, 7234 val->FirmwareVersion, 7235 sizeof (val->FirmwareVersion)); 7236 val->VendorSpecificID = 7237 port->fp_hba_port_attrs.vendor_specific_id; 7238 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7239 &val->NodeWWN.raw_wwn, 7240 sizeof (val->NodeWWN.raw_wwn)); 7241 7242 7243 bcopy(port->fp_hba_port_attrs.driver_name, 7244 val->DriverName, 7245 sizeof (val->DriverName)); 7246 bcopy(port->fp_hba_port_attrs.driver_version, 7247 val->DriverVersion, 7248 sizeof (val->DriverVersion)); 7249 mutex_exit(&port->fp_mutex); 7250 7251 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7252 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7253 } else { 7254 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7255 } 7256 7257 if (use32 == B_TRUE) { 7258 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7259 val32->version = val->version; 7260 bcopy(val->Manufacturer, val32->Manufacturer, 7261 sizeof (val->Manufacturer)); 7262 bcopy(val->SerialNumber, val32->SerialNumber, 7263 sizeof (val->SerialNumber)); 7264 bcopy(val->Model, val32->Model, 7265 sizeof (val->Model)); 7266 bcopy(val->ModelDescription, val32->ModelDescription, 7267 sizeof (val->ModelDescription)); 7268 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7269 sizeof (val->NodeSymbolicName)); 7270 bcopy(val->HardwareVersion, val32->HardwareVersion, 7271 sizeof (val->HardwareVersion)); 7272 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7273 sizeof (val->OptionROMVersion)); 7274 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7275 sizeof (val->FirmwareVersion)); 7276 val32->VendorSpecificID = val->VendorSpecificID; 7277 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7278 sizeof (val->NodeWWN.raw_wwn)); 7279 bcopy(val->DriverName, val32->DriverName, 7280 sizeof (val->DriverName)); 7281 bcopy(val->DriverVersion, val32->DriverVersion, 7282 sizeof (val->DriverVersion)); 7283 7284 val32->NumberOfPorts = val->NumberOfPorts; 7285 7286 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7287 fcio->fcio_olen, mode) == 0) { 7288 if (fp_fcio_copyout(fcio, data, mode)) { 7289 rval = EFAULT; 7290 } 7291 } else { 7292 rval = EFAULT; 7293 } 7294 7295 kmem_free(val32, sizeof (*val32)); 7296 } else { 7297 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7298 fcio->fcio_olen, mode) == 0) { 7299 if (fp_fcio_copyout(fcio, data, mode)) { 7300 rval = EFAULT; 7301 } 7302 } else { 7303 rval = EFAULT; 7304 } 7305 } 7306 7307 kmem_free(val, sizeof (*val)); 7308 break; 7309 } 7310 7311 case FCIO_GET_NPIV_ATTRIBUTES: { 7312 fc_hba_npiv_attributes_t *attrs; 7313 7314 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7315 mutex_enter(&port->fp_mutex); 7316 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7317 &attrs->NodeWWN.raw_wwn, 7318 sizeof (attrs->NodeWWN.raw_wwn)); 7319 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7320 &attrs->PortWWN.raw_wwn, 7321 sizeof (attrs->PortWWN.raw_wwn)); 7322 mutex_exit(&port->fp_mutex); 7323 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7324 fcio->fcio_olen, mode) == 0) { 7325 if (fp_fcio_copyout(fcio, data, mode)) { 7326 rval = EFAULT; 7327 } 7328 } else { 7329 rval = EFAULT; 7330 } 7331 kmem_free(attrs, sizeof (*attrs)); 7332 break; 7333 } 7334 7335 case FCIO_DELETE_NPIV_PORT: { 7336 fc_local_port_t *tmpport; 7337 char ww_pname[17]; 7338 la_wwn_t vwwn[1]; 7339 7340 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7341 if (ddi_copyin(fcio->fcio_ibuf, 7342 &vwwn, sizeof (la_wwn_t), mode)) { 7343 rval = EFAULT; 7344 break; 7345 } 7346 7347 fc_wwn_to_str(&vwwn[0], ww_pname); 7348 FP_TRACE(FP_NHEAD1(3, 0), 7349 "Delete NPIV Port %s", ww_pname); 7350 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7351 if (tmpport == NULL) { 7352 FP_TRACE(FP_NHEAD1(3, 0), 7353 "Delete NPIV Port : no found"); 7354 rval = EFAULT; 7355 } else { 7356 fc_local_port_t *nextport = tmpport->fp_port_next; 7357 fc_local_port_t *prevport = tmpport->fp_port_prev; 7358 int portlen, portindex, ret; 7359 7360 portlen = sizeof (portindex); 7361 ret = ddi_prop_op(DDI_DEV_T_ANY, 7362 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7363 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7364 (caddr_t)&portindex, &portlen); 7365 if (ret != DDI_SUCCESS) { 7366 rval = EFAULT; 7367 break; 7368 } 7369 if (ndi_devi_offline(tmpport->fp_port_dip, 7370 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7371 FP_TRACE(FP_NHEAD1(1, 0), 7372 "Delete NPIV Port failed"); 7373 mutex_enter(&port->fp_mutex); 7374 tmpport->fp_npiv_state = 0; 7375 mutex_exit(&port->fp_mutex); 7376 rval = EFAULT; 7377 } else { 7378 mutex_enter(&port->fp_mutex); 7379 nextport->fp_port_prev = prevport; 7380 prevport->fp_port_next = nextport; 7381 if (port == port->fp_port_next) { 7382 port->fp_port_next = 7383 port->fp_port_prev = NULL; 7384 } 7385 port->fp_npiv_portnum--; 7386 FP_TRACE(FP_NHEAD1(3, 0), 7387 "Delete NPIV Port %d", portindex); 7388 port->fp_npiv_portindex[portindex-1] = 0; 7389 mutex_exit(&port->fp_mutex); 7390 } 7391 } 7392 break; 7393 } 7394 7395 case FCIO_CREATE_NPIV_PORT: { 7396 char ww_nname[17], ww_pname[17]; 7397 la_npiv_create_entry_t entrybuf; 7398 uint32_t vportindex = 0; 7399 int npiv_ret = 0; 7400 char *portname, *fcaname; 7401 7402 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7403 (void) ddi_pathname(port->fp_port_dip, portname); 7404 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7405 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7406 FP_TRACE(FP_NHEAD1(1, 0), 7407 "Create NPIV port %s %s %s", portname, fcaname, 7408 ddi_driver_name(port->fp_fca_dip)); 7409 kmem_free(portname, MAXPATHLEN); 7410 kmem_free(fcaname, MAXPATHLEN); 7411 if (ddi_copyin(fcio->fcio_ibuf, 7412 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7413 rval = EFAULT; 7414 break; 7415 } 7416 7417 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7418 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7419 vportindex = entrybuf.vindex; 7420 FP_TRACE(FP_NHEAD1(3, 0), 7421 "Create NPIV Port %s %s %d", 7422 ww_nname, ww_pname, vportindex); 7423 7424 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7425 rval = EFAULT; 7426 break; 7427 } 7428 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7429 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7430 if (npiv_ret == NDI_SUCCESS) { 7431 mutex_enter(&port->fp_mutex); 7432 port->fp_npiv_portnum++; 7433 mutex_exit(&port->fp_mutex); 7434 if (fp_copyout((void *)&vportindex, 7435 (void *)fcio->fcio_obuf, 7436 fcio->fcio_olen, mode) == 0) { 7437 if (fp_fcio_copyout(fcio, data, mode)) { 7438 rval = EFAULT; 7439 } 7440 } else { 7441 rval = EFAULT; 7442 } 7443 } else { 7444 rval = EFAULT; 7445 } 7446 FP_TRACE(FP_NHEAD1(3, 0), 7447 "Create NPIV Port %d %d", npiv_ret, vportindex); 7448 break; 7449 } 7450 7451 case FCIO_GET_NPIV_PORT_LIST: { 7452 fc_hba_npiv_port_list_t *list; 7453 int count; 7454 7455 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7456 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7457 rval = EINVAL; 7458 break; 7459 } 7460 7461 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7462 list->version = FC_HBA_LIST_VERSION; 7463 /* build npiv port list */ 7464 count = fc_ulp_get_npiv_port_list(port, (char *)list->hbaPaths); 7465 if (count < 0) { 7466 rval = ENXIO; 7467 FP_TRACE(FP_NHEAD1(1, 0), "Build NPIV Port List error"); 7468 kmem_free(list, fcio->fcio_olen); 7469 break; 7470 } 7471 list->numAdapters = count; 7472 7473 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7474 fcio->fcio_olen, mode) == 0) { 7475 if (fp_fcio_copyout(fcio, data, mode)) { 7476 FP_TRACE(FP_NHEAD1(1, 0), 7477 "Copy NPIV Port data error"); 7478 rval = EFAULT; 7479 } 7480 } else { 7481 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7482 rval = EFAULT; 7483 } 7484 kmem_free(list, fcio->fcio_olen); 7485 break; 7486 } 7487 7488 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7489 fc_hba_port_npiv_attributes_t *val; 7490 7491 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7492 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7493 7494 mutex_enter(&port->fp_mutex); 7495 val->npivflag = port->fp_npiv_flag; 7496 val->lastChange = port->fp_last_change; 7497 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7498 &val->PortWWN.raw_wwn, 7499 sizeof (val->PortWWN.raw_wwn)); 7500 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7501 &val->NodeWWN.raw_wwn, 7502 sizeof (val->NodeWWN.raw_wwn)); 7503 mutex_exit(&port->fp_mutex); 7504 7505 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7506 if (port->fp_npiv_type != FC_NPIV_PORT) { 7507 val->MaxNumberOfNPIVPorts = 7508 port->fp_fca_tran->fca_num_npivports; 7509 } else { 7510 val->MaxNumberOfNPIVPorts = 0; 7511 } 7512 7513 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7514 fcio->fcio_olen, mode) == 0) { 7515 if (fp_fcio_copyout(fcio, data, mode)) { 7516 rval = EFAULT; 7517 } 7518 } else { 7519 rval = EFAULT; 7520 } 7521 kmem_free(val, sizeof (*val)); 7522 break; 7523 } 7524 7525 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7526 fc_hba_port_attributes_t *val; 7527 fc_hba_port_attributes32_t *val32; 7528 7529 if (use32 == B_TRUE) { 7530 if (fcio->fcio_olen < sizeof (*val32) || 7531 fcio->fcio_xfer != FCIO_XFER_READ) { 7532 rval = EINVAL; 7533 break; 7534 } 7535 } else { 7536 if (fcio->fcio_olen < sizeof (*val) || 7537 fcio->fcio_xfer != FCIO_XFER_READ) { 7538 rval = EINVAL; 7539 break; 7540 } 7541 } 7542 7543 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7544 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7545 mutex_enter(&port->fp_mutex); 7546 val->lastChange = port->fp_last_change; 7547 val->fp_minor = port->fp_instance; 7548 7549 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7550 &val->PortWWN.raw_wwn, 7551 sizeof (val->PortWWN.raw_wwn)); 7552 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7553 &val->NodeWWN.raw_wwn, 7554 sizeof (val->NodeWWN.raw_wwn)); 7555 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7556 sizeof (val->FabricName.raw_wwn)); 7557 7558 val->PortFcId = port->fp_port_id.port_id; 7559 7560 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7561 case FC_STATE_OFFLINE: 7562 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7563 break; 7564 case FC_STATE_ONLINE: 7565 case FC_STATE_LOOP: 7566 case FC_STATE_NAMESERVICE: 7567 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7568 break; 7569 default: 7570 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7571 break; 7572 } 7573 7574 /* Translate from LV to FC-HBA port type codes */ 7575 switch (port->fp_port_type.port_type) { 7576 case FC_NS_PORT_N: 7577 val->PortType = FC_HBA_PORTTYPE_NPORT; 7578 break; 7579 case FC_NS_PORT_NL: /* Actually means loop for us */ 7580 val->PortType = FC_HBA_PORTTYPE_LPORT; 7581 break; 7582 case FC_NS_PORT_F: 7583 val->PortType = FC_HBA_PORTTYPE_FPORT; 7584 break; 7585 case FC_NS_PORT_FL: 7586 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7587 break; 7588 case FC_NS_PORT_E: 7589 val->PortType = FC_HBA_PORTTYPE_EPORT; 7590 break; 7591 default: 7592 val->PortType = FC_HBA_PORTTYPE_OTHER; 7593 break; 7594 } 7595 7596 7597 /* 7598 * If fp has decided that the topology is public loop, 7599 * we will indicate that using the appropriate 7600 * FC HBA API constant. 7601 */ 7602 switch (port->fp_topology) { 7603 case FC_TOP_PUBLIC_LOOP: 7604 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7605 break; 7606 7607 case FC_TOP_PT_PT: 7608 val->PortType = FC_HBA_PORTTYPE_PTP; 7609 break; 7610 7611 case FC_TOP_UNKNOWN: 7612 /* 7613 * This should cover the case where nothing is connected 7614 * to the port. Crystal+ is p'bly an exception here. 7615 * For Crystal+, port 0 will come up as private loop 7616 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7617 * nothing is connected to it. 7618 * Current plan is to let userland handle this. 7619 */ 7620 if (port->fp_bind_state == FC_STATE_OFFLINE) 7621 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7622 break; 7623 7624 default: 7625 /* 7626 * Do Nothing. 7627 * Unused: 7628 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7629 */ 7630 break; 7631 } 7632 7633 val->PortSupportedClassofService = 7634 port->fp_hba_port_attrs.supported_cos; 7635 val->PortSupportedFc4Types[0] = 0; 7636 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7637 sizeof (val->PortActiveFc4Types)); 7638 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7639 sizeof (val->PortSymbolicName)); 7640 val->PortSupportedSpeed = 7641 port->fp_hba_port_attrs.supported_speed; 7642 7643 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7644 case FC_STATE_1GBIT_SPEED: 7645 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7646 break; 7647 case FC_STATE_2GBIT_SPEED: 7648 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7649 break; 7650 case FC_STATE_4GBIT_SPEED: 7651 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7652 break; 7653 case FC_STATE_8GBIT_SPEED: 7654 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7655 break; 7656 case FC_STATE_10GBIT_SPEED: 7657 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7658 break; 7659 case FC_STATE_16GBIT_SPEED: 7660 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7661 break; 7662 default: 7663 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7664 break; 7665 } 7666 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7667 val->NumberofDiscoveredPorts = port->fp_dev_count; 7668 mutex_exit(&port->fp_mutex); 7669 7670 if (use32 == B_TRUE) { 7671 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7672 val32->version = val->version; 7673 val32->lastChange = val->lastChange; 7674 val32->fp_minor = val->fp_minor; 7675 7676 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7677 sizeof (val->PortWWN.raw_wwn)); 7678 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7679 sizeof (val->NodeWWN.raw_wwn)); 7680 val32->PortFcId = val->PortFcId; 7681 val32->PortState = val->PortState; 7682 val32->PortType = val->PortType; 7683 7684 val32->PortSupportedClassofService = 7685 val->PortSupportedClassofService; 7686 bcopy(val->PortActiveFc4Types, 7687 val32->PortActiveFc4Types, 7688 sizeof (val->PortActiveFc4Types)); 7689 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7690 sizeof (val->PortSymbolicName)); 7691 bcopy(&val->FabricName, &val32->FabricName, 7692 sizeof (val->FabricName.raw_wwn)); 7693 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7694 val32->PortSpeed = val->PortSpeed; 7695 7696 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7697 val32->NumberofDiscoveredPorts = 7698 val->NumberofDiscoveredPorts; 7699 7700 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7701 fcio->fcio_olen, mode) == 0) { 7702 if (fp_fcio_copyout(fcio, data, mode)) { 7703 rval = EFAULT; 7704 } 7705 } else { 7706 rval = EFAULT; 7707 } 7708 7709 kmem_free(val32, sizeof (*val32)); 7710 } else { 7711 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7712 fcio->fcio_olen, mode) == 0) { 7713 if (fp_fcio_copyout(fcio, data, mode)) { 7714 rval = EFAULT; 7715 } 7716 } else { 7717 rval = EFAULT; 7718 } 7719 } 7720 7721 kmem_free(val, sizeof (*val)); 7722 break; 7723 } 7724 7725 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7726 fc_hba_port_attributes_t *val; 7727 fc_hba_port_attributes32_t *val32; 7728 uint32_t index = 0; 7729 fc_remote_port_t *tmp_pd; 7730 7731 if (use32 == B_TRUE) { 7732 if (fcio->fcio_olen < sizeof (*val32) || 7733 fcio->fcio_xfer != FCIO_XFER_READ) { 7734 rval = EINVAL; 7735 break; 7736 } 7737 } else { 7738 if (fcio->fcio_olen < sizeof (*val) || 7739 fcio->fcio_xfer != FCIO_XFER_READ) { 7740 rval = EINVAL; 7741 break; 7742 } 7743 } 7744 7745 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7746 rval = EFAULT; 7747 break; 7748 } 7749 7750 if (index >= port->fp_dev_count) { 7751 FP_TRACE(FP_NHEAD1(9, 0), 7752 "User supplied index out of range"); 7753 fcio->fcio_errno = FC_OUTOFBOUNDS; 7754 rval = EINVAL; 7755 if (fp_fcio_copyout(fcio, data, mode)) { 7756 rval = EFAULT; 7757 } 7758 break; 7759 } 7760 7761 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7762 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7763 7764 mutex_enter(&port->fp_mutex); 7765 tmp_pd = fctl_lookup_pd_by_index(port, index); 7766 7767 if (tmp_pd == NULL) { 7768 fcio->fcio_errno = FC_BADPORT; 7769 rval = EINVAL; 7770 } else { 7771 val->lastChange = port->fp_last_change; 7772 val->fp_minor = port->fp_instance; 7773 7774 mutex_enter(&tmp_pd->pd_mutex); 7775 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7776 &val->PortWWN.raw_wwn, 7777 sizeof (val->PortWWN.raw_wwn)); 7778 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 7779 &val->NodeWWN.raw_wwn, 7780 sizeof (val->NodeWWN.raw_wwn)); 7781 val->PortFcId = tmp_pd->pd_port_id.port_id; 7782 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 7783 sizeof (val->PortSymbolicName)); 7784 val->PortSupportedClassofService = tmp_pd->pd_cos; 7785 /* 7786 * we will assume the sizeof these pd_fc4types and 7787 * portActiveFc4Types will remain the same. we could 7788 * add in a check for it, but we decided it was unneeded 7789 */ 7790 bcopy((caddr_t)tmp_pd->pd_fc4types, 7791 val->PortActiveFc4Types, 7792 sizeof (tmp_pd->pd_fc4types)); 7793 val->PortState = 7794 fp_map_remote_port_state(tmp_pd->pd_state); 7795 mutex_exit(&tmp_pd->pd_mutex); 7796 7797 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7798 val->PortSupportedFc4Types[0] = 0; 7799 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7800 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7801 val->PortMaxFrameSize = 0; 7802 val->NumberofDiscoveredPorts = 0; 7803 7804 if (use32 == B_TRUE) { 7805 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7806 val32->version = val->version; 7807 val32->lastChange = val->lastChange; 7808 val32->fp_minor = val->fp_minor; 7809 7810 bcopy(&val->PortWWN.raw_wwn, 7811 &val32->PortWWN.raw_wwn, 7812 sizeof (val->PortWWN.raw_wwn)); 7813 bcopy(&val->NodeWWN.raw_wwn, 7814 &val32->NodeWWN.raw_wwn, 7815 sizeof (val->NodeWWN.raw_wwn)); 7816 val32->PortFcId = val->PortFcId; 7817 bcopy(val->PortSymbolicName, 7818 val32->PortSymbolicName, 7819 sizeof (val->PortSymbolicName)); 7820 val32->PortSupportedClassofService = 7821 val->PortSupportedClassofService; 7822 bcopy(val->PortActiveFc4Types, 7823 val32->PortActiveFc4Types, 7824 sizeof (tmp_pd->pd_fc4types)); 7825 7826 val32->PortType = val->PortType; 7827 val32->PortState = val->PortState; 7828 val32->PortSupportedFc4Types[0] = 7829 val->PortSupportedFc4Types[0]; 7830 val32->PortSupportedSpeed = 7831 val->PortSupportedSpeed; 7832 val32->PortSpeed = val->PortSpeed; 7833 val32->PortMaxFrameSize = 7834 val->PortMaxFrameSize; 7835 val32->NumberofDiscoveredPorts = 7836 val->NumberofDiscoveredPorts; 7837 7838 if (fp_copyout((void *)val32, 7839 (void *)fcio->fcio_obuf, 7840 fcio->fcio_olen, mode) == 0) { 7841 if (fp_fcio_copyout(fcio, 7842 data, mode)) { 7843 rval = EFAULT; 7844 } 7845 } else { 7846 rval = EFAULT; 7847 } 7848 7849 kmem_free(val32, sizeof (*val32)); 7850 } else { 7851 if (fp_copyout((void *)val, 7852 (void *)fcio->fcio_obuf, 7853 fcio->fcio_olen, mode) == 0) { 7854 if (fp_fcio_copyout(fcio, data, mode)) { 7855 rval = EFAULT; 7856 } 7857 } else { 7858 rval = EFAULT; 7859 } 7860 } 7861 } 7862 7863 mutex_exit(&port->fp_mutex); 7864 kmem_free(val, sizeof (*val)); 7865 break; 7866 } 7867 7868 case FCIO_GET_PORT_ATTRIBUTES: { 7869 fc_hba_port_attributes_t *val; 7870 fc_hba_port_attributes32_t *val32; 7871 la_wwn_t wwn; 7872 fc_remote_port_t *tmp_pd; 7873 7874 if (use32 == B_TRUE) { 7875 if (fcio->fcio_olen < sizeof (*val32) || 7876 fcio->fcio_xfer != FCIO_XFER_READ) { 7877 rval = EINVAL; 7878 break; 7879 } 7880 } else { 7881 if (fcio->fcio_olen < sizeof (*val) || 7882 fcio->fcio_xfer != FCIO_XFER_READ) { 7883 rval = EINVAL; 7884 break; 7885 } 7886 } 7887 7888 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 7889 rval = EFAULT; 7890 break; 7891 } 7892 7893 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7894 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7895 7896 mutex_enter(&port->fp_mutex); 7897 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 7898 val->lastChange = port->fp_last_change; 7899 val->fp_minor = port->fp_instance; 7900 mutex_exit(&port->fp_mutex); 7901 7902 if (tmp_pd == NULL) { 7903 fcio->fcio_errno = FC_BADWWN; 7904 rval = EINVAL; 7905 } else { 7906 mutex_enter(&tmp_pd->pd_mutex); 7907 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7908 &val->PortWWN.raw_wwn, 7909 sizeof (val->PortWWN.raw_wwn)); 7910 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 7911 &val->NodeWWN.raw_wwn, 7912 sizeof (val->NodeWWN.raw_wwn)); 7913 val->PortFcId = tmp_pd->pd_port_id.port_id; 7914 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 7915 sizeof (val->PortSymbolicName)); 7916 val->PortSupportedClassofService = tmp_pd->pd_cos; 7917 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7918 val->PortState = 7919 fp_map_remote_port_state(tmp_pd->pd_state); 7920 val->PortSupportedFc4Types[0] = 0; 7921 /* 7922 * we will assume the sizeof these pd_fc4types and 7923 * portActiveFc4Types will remain the same. we could 7924 * add in a check for it, but we decided it was unneeded 7925 */ 7926 bcopy((caddr_t)tmp_pd->pd_fc4types, 7927 val->PortActiveFc4Types, 7928 sizeof (tmp_pd->pd_fc4types)); 7929 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7930 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7931 val->PortMaxFrameSize = 0; 7932 val->NumberofDiscoveredPorts = 0; 7933 mutex_exit(&tmp_pd->pd_mutex); 7934 7935 if (use32 == B_TRUE) { 7936 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7937 val32->version = val->version; 7938 val32->lastChange = val->lastChange; 7939 val32->fp_minor = val->fp_minor; 7940 bcopy(&val->PortWWN.raw_wwn, 7941 &val32->PortWWN.raw_wwn, 7942 sizeof (val->PortWWN.raw_wwn)); 7943 bcopy(&val->NodeWWN.raw_wwn, 7944 &val32->NodeWWN.raw_wwn, 7945 sizeof (val->NodeWWN.raw_wwn)); 7946 val32->PortFcId = val->PortFcId; 7947 bcopy(val->PortSymbolicName, 7948 val32->PortSymbolicName, 7949 sizeof (val->PortSymbolicName)); 7950 val32->PortSupportedClassofService = 7951 val->PortSupportedClassofService; 7952 val32->PortType = val->PortType; 7953 val32->PortState = val->PortState; 7954 val32->PortSupportedFc4Types[0] = 7955 val->PortSupportedFc4Types[0]; 7956 bcopy(val->PortActiveFc4Types, 7957 val32->PortActiveFc4Types, 7958 sizeof (tmp_pd->pd_fc4types)); 7959 val32->PortSupportedSpeed = 7960 val->PortSupportedSpeed; 7961 val32->PortSpeed = val->PortSpeed; 7962 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7963 val32->NumberofDiscoveredPorts = 7964 val->NumberofDiscoveredPorts; 7965 7966 if (fp_copyout((void *)val32, 7967 (void *)fcio->fcio_obuf, 7968 fcio->fcio_olen, mode) == 0) { 7969 if (fp_fcio_copyout(fcio, data, mode)) { 7970 rval = EFAULT; 7971 } 7972 } else { 7973 rval = EFAULT; 7974 } 7975 7976 kmem_free(val32, sizeof (*val32)); 7977 } else { 7978 if (fp_copyout((void *)val, 7979 (void *)fcio->fcio_obuf, 7980 fcio->fcio_olen, mode) == 0) { 7981 if (fp_fcio_copyout(fcio, data, mode)) { 7982 rval = EFAULT; 7983 } 7984 } else { 7985 rval = EFAULT; 7986 } 7987 } 7988 } 7989 kmem_free(val, sizeof (*val)); 7990 break; 7991 } 7992 7993 case FCIO_GET_NUM_DEVS: { 7994 int num_devices; 7995 7996 if (fcio->fcio_olen != sizeof (num_devices) || 7997 fcio->fcio_xfer != FCIO_XFER_READ) { 7998 rval = EINVAL; 7999 break; 8000 } 8001 8002 mutex_enter(&port->fp_mutex); 8003 switch (port->fp_topology) { 8004 case FC_TOP_PRIVATE_LOOP: 8005 case FC_TOP_PT_PT: 8006 num_devices = port->fp_total_devices; 8007 fcio->fcio_errno = FC_SUCCESS; 8008 break; 8009 8010 case FC_TOP_PUBLIC_LOOP: 8011 case FC_TOP_FABRIC: 8012 mutex_exit(&port->fp_mutex); 8013 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8014 NULL, KM_SLEEP); 8015 ASSERT(job != NULL); 8016 8017 /* 8018 * In FC-GS-2 the Name Server doesn't send out 8019 * RSCNs for any Name Server Database updates 8020 * When it is finally fixed there is no need 8021 * to probe as below and should be removed. 8022 */ 8023 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8024 fctl_dealloc_job(job); 8025 8026 mutex_enter(&port->fp_mutex); 8027 num_devices = port->fp_total_devices; 8028 fcio->fcio_errno = FC_SUCCESS; 8029 break; 8030 8031 case FC_TOP_NO_NS: 8032 /* FALLTHROUGH */ 8033 case FC_TOP_UNKNOWN: 8034 /* FALLTHROUGH */ 8035 default: 8036 num_devices = 0; 8037 fcio->fcio_errno = FC_SUCCESS; 8038 break; 8039 } 8040 mutex_exit(&port->fp_mutex); 8041 8042 if (fp_copyout((void *)&num_devices, 8043 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8044 mode) == 0) { 8045 if (fp_fcio_copyout(fcio, data, mode)) { 8046 rval = EFAULT; 8047 } 8048 } else { 8049 rval = EFAULT; 8050 } 8051 break; 8052 } 8053 8054 case FCIO_GET_DEV_LIST: { 8055 int num_devices; 8056 int new_count; 8057 int map_size; 8058 8059 if (fcio->fcio_xfer != FCIO_XFER_READ || 8060 fcio->fcio_alen != sizeof (new_count)) { 8061 rval = EINVAL; 8062 break; 8063 } 8064 8065 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8066 8067 mutex_enter(&port->fp_mutex); 8068 if (num_devices < port->fp_total_devices) { 8069 fcio->fcio_errno = FC_TOOMANY; 8070 new_count = port->fp_total_devices; 8071 mutex_exit(&port->fp_mutex); 8072 8073 if (fp_copyout((void *)&new_count, 8074 (void *)fcio->fcio_abuf, 8075 sizeof (new_count), mode)) { 8076 rval = EFAULT; 8077 break; 8078 } 8079 8080 if (fp_fcio_copyout(fcio, data, mode)) { 8081 rval = EFAULT; 8082 break; 8083 } 8084 rval = EINVAL; 8085 break; 8086 } 8087 8088 if (port->fp_total_devices <= 0) { 8089 fcio->fcio_errno = FC_NO_MAP; 8090 new_count = port->fp_total_devices; 8091 mutex_exit(&port->fp_mutex); 8092 8093 if (fp_copyout((void *)&new_count, 8094 (void *)fcio->fcio_abuf, 8095 sizeof (new_count), mode)) { 8096 rval = EFAULT; 8097 break; 8098 } 8099 8100 if (fp_fcio_copyout(fcio, data, mode)) { 8101 rval = EFAULT; 8102 break; 8103 } 8104 rval = EINVAL; 8105 break; 8106 } 8107 8108 switch (port->fp_topology) { 8109 case FC_TOP_PRIVATE_LOOP: 8110 if (fp_fillout_loopmap(port, fcio, 8111 mode) != FC_SUCCESS) { 8112 rval = EFAULT; 8113 break; 8114 } 8115 if (fp_fcio_copyout(fcio, data, mode)) { 8116 rval = EFAULT; 8117 } 8118 break; 8119 8120 case FC_TOP_PT_PT: 8121 if (fp_fillout_p2pmap(port, fcio, 8122 mode) != FC_SUCCESS) { 8123 rval = EFAULT; 8124 break; 8125 } 8126 if (fp_fcio_copyout(fcio, data, mode)) { 8127 rval = EFAULT; 8128 } 8129 break; 8130 8131 case FC_TOP_PUBLIC_LOOP: 8132 case FC_TOP_FABRIC: { 8133 fctl_ns_req_t *ns_cmd; 8134 8135 map_size = 8136 sizeof (fc_port_dev_t) * port->fp_total_devices; 8137 8138 mutex_exit(&port->fp_mutex); 8139 8140 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8141 sizeof (ns_resp_gan_t), map_size, 8142 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8143 KM_SLEEP); 8144 ASSERT(ns_cmd != NULL); 8145 8146 ns_cmd->ns_gan_index = 0; 8147 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8148 ns_cmd->ns_cmd_code = NS_GA_NXT; 8149 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8150 8151 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8152 NULL, KM_SLEEP); 8153 ASSERT(job != NULL); 8154 8155 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8156 8157 if (ret != FC_SUCCESS || 8158 job->job_result != FC_SUCCESS) { 8159 fctl_free_ns_cmd(ns_cmd); 8160 8161 fcio->fcio_errno = job->job_result; 8162 new_count = 0; 8163 if (fp_copyout((void *)&new_count, 8164 (void *)fcio->fcio_abuf, 8165 sizeof (new_count), mode)) { 8166 fctl_dealloc_job(job); 8167 mutex_enter(&port->fp_mutex); 8168 rval = EFAULT; 8169 break; 8170 } 8171 8172 if (fp_fcio_copyout(fcio, data, mode)) { 8173 fctl_dealloc_job(job); 8174 mutex_enter(&port->fp_mutex); 8175 rval = EFAULT; 8176 break; 8177 } 8178 rval = EIO; 8179 mutex_enter(&port->fp_mutex); 8180 break; 8181 } 8182 fctl_dealloc_job(job); 8183 8184 new_count = ns_cmd->ns_gan_index; 8185 if (fp_copyout((void *)&new_count, 8186 (void *)fcio->fcio_abuf, sizeof (new_count), 8187 mode)) { 8188 rval = EFAULT; 8189 fctl_free_ns_cmd(ns_cmd); 8190 mutex_enter(&port->fp_mutex); 8191 break; 8192 } 8193 8194 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8195 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8196 ns_cmd->ns_gan_index, mode)) { 8197 rval = EFAULT; 8198 fctl_free_ns_cmd(ns_cmd); 8199 mutex_enter(&port->fp_mutex); 8200 break; 8201 } 8202 fctl_free_ns_cmd(ns_cmd); 8203 8204 if (fp_fcio_copyout(fcio, data, mode)) { 8205 rval = EFAULT; 8206 } 8207 mutex_enter(&port->fp_mutex); 8208 break; 8209 } 8210 8211 case FC_TOP_NO_NS: 8212 /* FALLTHROUGH */ 8213 case FC_TOP_UNKNOWN: 8214 /* FALLTHROUGH */ 8215 default: 8216 fcio->fcio_errno = FC_NO_MAP; 8217 num_devices = port->fp_total_devices; 8218 8219 if (fp_copyout((void *)&new_count, 8220 (void *)fcio->fcio_abuf, 8221 sizeof (new_count), mode)) { 8222 rval = EFAULT; 8223 break; 8224 } 8225 8226 if (fp_fcio_copyout(fcio, data, mode)) { 8227 rval = EFAULT; 8228 break; 8229 } 8230 rval = EINVAL; 8231 break; 8232 } 8233 mutex_exit(&port->fp_mutex); 8234 break; 8235 } 8236 8237 case FCIO_GET_SYM_PNAME: { 8238 rval = ENOTSUP; 8239 break; 8240 } 8241 8242 case FCIO_GET_SYM_NNAME: { 8243 rval = ENOTSUP; 8244 break; 8245 } 8246 8247 case FCIO_SET_SYM_PNAME: { 8248 rval = ENOTSUP; 8249 break; 8250 } 8251 8252 case FCIO_SET_SYM_NNAME: { 8253 rval = ENOTSUP; 8254 break; 8255 } 8256 8257 case FCIO_GET_LOGI_PARAMS: { 8258 la_wwn_t pwwn; 8259 la_wwn_t *my_pwwn; 8260 la_els_logi_t *params; 8261 la_els_logi32_t *params32; 8262 fc_remote_node_t *node; 8263 fc_remote_port_t *pd; 8264 8265 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8266 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8267 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8268 rval = EINVAL; 8269 break; 8270 } 8271 8272 if (use32 == B_TRUE) { 8273 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8274 rval = EINVAL; 8275 break; 8276 } 8277 } else { 8278 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8279 rval = EINVAL; 8280 break; 8281 } 8282 } 8283 8284 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8285 rval = EFAULT; 8286 break; 8287 } 8288 8289 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8290 if (pd == NULL) { 8291 mutex_enter(&port->fp_mutex); 8292 my_pwwn = &port->fp_service_params.nport_ww_name; 8293 mutex_exit(&port->fp_mutex); 8294 8295 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8296 rval = ENXIO; 8297 break; 8298 } 8299 8300 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8301 mutex_enter(&port->fp_mutex); 8302 *params = port->fp_service_params; 8303 mutex_exit(&port->fp_mutex); 8304 } else { 8305 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8306 8307 mutex_enter(&pd->pd_mutex); 8308 params->ls_code.mbz = params->ls_code.ls_code = 0; 8309 params->common_service = pd->pd_csp; 8310 params->nport_ww_name = pd->pd_port_name; 8311 params->class_1 = pd->pd_clsp1; 8312 params->class_2 = pd->pd_clsp2; 8313 params->class_3 = pd->pd_clsp3; 8314 node = pd->pd_remote_nodep; 8315 mutex_exit(&pd->pd_mutex); 8316 8317 bzero(params->reserved, sizeof (params->reserved)); 8318 8319 mutex_enter(&node->fd_mutex); 8320 bcopy(node->fd_vv, params->vendor_version, 8321 sizeof (node->fd_vv)); 8322 params->node_ww_name = node->fd_node_name; 8323 mutex_exit(&node->fd_mutex); 8324 8325 fctl_release_remote_port(pd); 8326 } 8327 8328 if (use32 == B_TRUE) { 8329 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8330 8331 params32->ls_code.mbz = params->ls_code.mbz; 8332 params32->common_service = params->common_service; 8333 params32->nport_ww_name = params->nport_ww_name; 8334 params32->class_1 = params->class_1; 8335 params32->class_2 = params->class_2; 8336 params32->class_3 = params->class_3; 8337 bzero(params32->reserved, sizeof (params32->reserved)); 8338 bcopy(params->vendor_version, params32->vendor_version, 8339 sizeof (node->fd_vv)); 8340 params32->node_ww_name = params->node_ww_name; 8341 8342 if (ddi_copyout((void *)params32, 8343 (void *)fcio->fcio_obuf, 8344 sizeof (*params32), mode)) { 8345 rval = EFAULT; 8346 } 8347 8348 kmem_free(params32, sizeof (*params32)); 8349 } else { 8350 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8351 sizeof (*params), mode)) { 8352 rval = EFAULT; 8353 } 8354 } 8355 8356 kmem_free(params, sizeof (*params)); 8357 if (fp_fcio_copyout(fcio, data, mode)) { 8358 rval = EFAULT; 8359 } 8360 break; 8361 } 8362 8363 case FCIO_DEV_LOGOUT: 8364 case FCIO_DEV_LOGIN: 8365 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8366 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8367 rval = EINVAL; 8368 8369 if (fp_fcio_copyout(fcio, data, mode)) { 8370 rval = EFAULT; 8371 } 8372 break; 8373 } 8374 8375 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8376 jcode = JOB_FCIO_LOGIN; 8377 } else { 8378 jcode = JOB_FCIO_LOGOUT; 8379 } 8380 8381 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8382 bcopy(fcio, kfcio, sizeof (*fcio)); 8383 8384 if (kfcio->fcio_ilen) { 8385 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8386 KM_SLEEP); 8387 8388 if (ddi_copyin((void *)fcio->fcio_ibuf, 8389 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8390 mode)) { 8391 rval = EFAULT; 8392 8393 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8394 kmem_free(kfcio, sizeof (*kfcio)); 8395 fcio->fcio_errno = job->job_result; 8396 if (fp_fcio_copyout(fcio, data, mode)) { 8397 rval = EFAULT; 8398 } 8399 break; 8400 } 8401 } 8402 8403 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8404 job->job_private = kfcio; 8405 8406 fctl_enque_job(port, job); 8407 fctl_jobwait(job); 8408 8409 rval = job->job_result; 8410 8411 fcio->fcio_errno = kfcio->fcio_errno; 8412 if (fp_fcio_copyout(fcio, data, mode)) { 8413 rval = EFAULT; 8414 } 8415 8416 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8417 kmem_free(kfcio, sizeof (*kfcio)); 8418 fctl_dealloc_job(job); 8419 break; 8420 8421 case FCIO_GET_STATE: { 8422 la_wwn_t pwwn; 8423 uint32_t state; 8424 fc_remote_port_t *pd; 8425 fctl_ns_req_t *ns_cmd; 8426 8427 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8428 fcio->fcio_olen != sizeof (state) || 8429 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8430 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8431 rval = EINVAL; 8432 break; 8433 } 8434 8435 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8436 rval = EFAULT; 8437 break; 8438 } 8439 fcio->fcio_errno = 0; 8440 8441 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8442 if (pd == NULL) { 8443 mutex_enter(&port->fp_mutex); 8444 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8445 mutex_exit(&port->fp_mutex); 8446 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8447 NULL, NULL, KM_SLEEP); 8448 8449 job->job_counter = 1; 8450 job->job_result = FC_SUCCESS; 8451 8452 ns_cmd = fctl_alloc_ns_cmd( 8453 sizeof (ns_req_gid_pn_t), 8454 sizeof (ns_resp_gid_pn_t), 8455 sizeof (ns_resp_gid_pn_t), 8456 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8457 ASSERT(ns_cmd != NULL); 8458 8459 ns_cmd->ns_cmd_code = NS_GID_PN; 8460 ((ns_req_gid_pn_t *) 8461 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8462 8463 ret = fp_ns_query(port, ns_cmd, job, 8464 1, KM_SLEEP); 8465 8466 if (ret != FC_SUCCESS || job->job_result != 8467 FC_SUCCESS) { 8468 if (ret != FC_SUCCESS) { 8469 fcio->fcio_errno = ret; 8470 } else { 8471 fcio->fcio_errno = 8472 job->job_result; 8473 } 8474 rval = EIO; 8475 } else { 8476 state = PORT_DEVICE_INVALID; 8477 } 8478 fctl_free_ns_cmd(ns_cmd); 8479 fctl_dealloc_job(job); 8480 } else { 8481 mutex_exit(&port->fp_mutex); 8482 fcio->fcio_errno = FC_BADWWN; 8483 rval = ENXIO; 8484 } 8485 } else { 8486 mutex_enter(&pd->pd_mutex); 8487 state = pd->pd_state; 8488 mutex_exit(&pd->pd_mutex); 8489 8490 fctl_release_remote_port(pd); 8491 } 8492 8493 if (!rval) { 8494 if (ddi_copyout((void *)&state, 8495 (void *)fcio->fcio_obuf, sizeof (state), 8496 mode)) { 8497 rval = EFAULT; 8498 } 8499 } 8500 if (fp_fcio_copyout(fcio, data, mode)) { 8501 rval = EFAULT; 8502 } 8503 break; 8504 } 8505 8506 case FCIO_DEV_REMOVE: { 8507 la_wwn_t pwwn; 8508 fc_portmap_t *changelist; 8509 fc_remote_port_t *pd; 8510 8511 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8512 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8513 rval = EINVAL; 8514 break; 8515 } 8516 8517 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8518 rval = EFAULT; 8519 break; 8520 } 8521 8522 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8523 if (pd == NULL) { 8524 rval = ENXIO; 8525 fcio->fcio_errno = FC_BADWWN; 8526 if (fp_fcio_copyout(fcio, data, mode)) { 8527 rval = EFAULT; 8528 } 8529 break; 8530 } 8531 8532 mutex_enter(&pd->pd_mutex); 8533 if (pd->pd_ref_count > 1) { 8534 mutex_exit(&pd->pd_mutex); 8535 8536 rval = EBUSY; 8537 fcio->fcio_errno = FC_FAILURE; 8538 fctl_release_remote_port(pd); 8539 8540 if (fp_fcio_copyout(fcio, data, mode)) { 8541 rval = EFAULT; 8542 } 8543 break; 8544 } 8545 mutex_exit(&pd->pd_mutex); 8546 8547 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8548 8549 fctl_copy_portmap(changelist, pd); 8550 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8551 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8552 8553 fctl_release_remote_port(pd); 8554 break; 8555 } 8556 8557 case FCIO_GET_FCODE_REV: { 8558 caddr_t fcode_rev; 8559 fc_fca_pm_t pm; 8560 8561 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8562 fcio->fcio_xfer != FCIO_XFER_READ) { 8563 rval = EINVAL; 8564 break; 8565 } 8566 bzero((caddr_t)&pm, sizeof (pm)); 8567 8568 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8569 8570 pm.pm_cmd_flags = FC_FCA_PM_READ; 8571 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8572 pm.pm_data_len = fcio->fcio_olen; 8573 pm.pm_data_buf = fcode_rev; 8574 8575 ret = port->fp_fca_tran->fca_port_manage( 8576 port->fp_fca_handle, &pm); 8577 8578 if (ret == FC_SUCCESS) { 8579 if (ddi_copyout((void *)fcode_rev, 8580 (void *)fcio->fcio_obuf, 8581 fcio->fcio_olen, mode) == 0) { 8582 if (fp_fcio_copyout(fcio, data, mode)) { 8583 rval = EFAULT; 8584 } 8585 } else { 8586 rval = EFAULT; 8587 } 8588 } else { 8589 /* 8590 * check if buffer was not large enough to obtain 8591 * FCODE version. 8592 */ 8593 if (pm.pm_data_len > fcio->fcio_olen) { 8594 rval = ENOMEM; 8595 } else { 8596 rval = EIO; 8597 } 8598 fcio->fcio_errno = ret; 8599 if (fp_fcio_copyout(fcio, data, mode)) { 8600 rval = EFAULT; 8601 } 8602 } 8603 kmem_free(fcode_rev, fcio->fcio_olen); 8604 break; 8605 } 8606 8607 case FCIO_GET_FW_REV: { 8608 caddr_t fw_rev; 8609 fc_fca_pm_t pm; 8610 8611 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8612 fcio->fcio_xfer != FCIO_XFER_READ) { 8613 rval = EINVAL; 8614 break; 8615 } 8616 bzero((caddr_t)&pm, sizeof (pm)); 8617 8618 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8619 8620 pm.pm_cmd_flags = FC_FCA_PM_READ; 8621 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8622 pm.pm_data_len = fcio->fcio_olen; 8623 pm.pm_data_buf = fw_rev; 8624 8625 ret = port->fp_fca_tran->fca_port_manage( 8626 port->fp_fca_handle, &pm); 8627 8628 if (ret == FC_SUCCESS) { 8629 if (ddi_copyout((void *)fw_rev, 8630 (void *)fcio->fcio_obuf, 8631 fcio->fcio_olen, mode) == 0) { 8632 if (fp_fcio_copyout(fcio, data, mode)) { 8633 rval = EFAULT; 8634 } 8635 } else { 8636 rval = EFAULT; 8637 } 8638 } else { 8639 if (fp_fcio_copyout(fcio, data, mode)) { 8640 rval = EFAULT; 8641 } 8642 rval = EIO; 8643 } 8644 kmem_free(fw_rev, fcio->fcio_olen); 8645 break; 8646 } 8647 8648 case FCIO_GET_DUMP_SIZE: { 8649 uint32_t dump_size; 8650 fc_fca_pm_t pm; 8651 8652 if (fcio->fcio_olen != sizeof (dump_size) || 8653 fcio->fcio_xfer != FCIO_XFER_READ) { 8654 rval = EINVAL; 8655 break; 8656 } 8657 bzero((caddr_t)&pm, sizeof (pm)); 8658 pm.pm_cmd_flags = FC_FCA_PM_READ; 8659 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8660 pm.pm_data_len = sizeof (dump_size); 8661 pm.pm_data_buf = (caddr_t)&dump_size; 8662 8663 ret = port->fp_fca_tran->fca_port_manage( 8664 port->fp_fca_handle, &pm); 8665 8666 if (ret == FC_SUCCESS) { 8667 if (ddi_copyout((void *)&dump_size, 8668 (void *)fcio->fcio_obuf, sizeof (dump_size), 8669 mode) == 0) { 8670 if (fp_fcio_copyout(fcio, data, mode)) { 8671 rval = EFAULT; 8672 } 8673 } else { 8674 rval = EFAULT; 8675 } 8676 } else { 8677 fcio->fcio_errno = ret; 8678 rval = EIO; 8679 if (fp_fcio_copyout(fcio, data, mode)) { 8680 rval = EFAULT; 8681 } 8682 } 8683 break; 8684 } 8685 8686 case FCIO_DOWNLOAD_FW: { 8687 caddr_t firmware; 8688 fc_fca_pm_t pm; 8689 8690 if (fcio->fcio_ilen <= 0 || 8691 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8692 rval = EINVAL; 8693 break; 8694 } 8695 8696 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8697 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8698 fcio->fcio_ilen, mode)) { 8699 rval = EFAULT; 8700 kmem_free(firmware, fcio->fcio_ilen); 8701 break; 8702 } 8703 8704 bzero((caddr_t)&pm, sizeof (pm)); 8705 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8706 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8707 pm.pm_data_len = fcio->fcio_ilen; 8708 pm.pm_data_buf = firmware; 8709 8710 ret = port->fp_fca_tran->fca_port_manage( 8711 port->fp_fca_handle, &pm); 8712 8713 kmem_free(firmware, fcio->fcio_ilen); 8714 8715 if (ret != FC_SUCCESS) { 8716 fcio->fcio_errno = ret; 8717 rval = EIO; 8718 if (fp_fcio_copyout(fcio, data, mode)) { 8719 rval = EFAULT; 8720 } 8721 } 8722 break; 8723 } 8724 8725 case FCIO_DOWNLOAD_FCODE: { 8726 caddr_t fcode; 8727 fc_fca_pm_t pm; 8728 8729 if (fcio->fcio_ilen <= 0 || 8730 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8731 rval = EINVAL; 8732 break; 8733 } 8734 8735 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8736 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8737 fcio->fcio_ilen, mode)) { 8738 rval = EFAULT; 8739 kmem_free(fcode, fcio->fcio_ilen); 8740 break; 8741 } 8742 8743 bzero((caddr_t)&pm, sizeof (pm)); 8744 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8745 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8746 pm.pm_data_len = fcio->fcio_ilen; 8747 pm.pm_data_buf = fcode; 8748 8749 ret = port->fp_fca_tran->fca_port_manage( 8750 port->fp_fca_handle, &pm); 8751 8752 kmem_free(fcode, fcio->fcio_ilen); 8753 8754 if (ret != FC_SUCCESS) { 8755 fcio->fcio_errno = ret; 8756 rval = EIO; 8757 if (fp_fcio_copyout(fcio, data, mode)) { 8758 rval = EFAULT; 8759 } 8760 } 8761 break; 8762 } 8763 8764 case FCIO_FORCE_DUMP: 8765 ret = port->fp_fca_tran->fca_reset( 8766 port->fp_fca_handle, FC_FCA_CORE); 8767 8768 if (ret != FC_SUCCESS) { 8769 fcio->fcio_errno = ret; 8770 rval = EIO; 8771 if (fp_fcio_copyout(fcio, data, mode)) { 8772 rval = EFAULT; 8773 } 8774 } 8775 break; 8776 8777 case FCIO_GET_DUMP: { 8778 caddr_t dump; 8779 uint32_t dump_size; 8780 fc_fca_pm_t pm; 8781 8782 if (fcio->fcio_xfer != FCIO_XFER_READ) { 8783 rval = EINVAL; 8784 break; 8785 } 8786 bzero((caddr_t)&pm, sizeof (pm)); 8787 8788 pm.pm_cmd_flags = FC_FCA_PM_READ; 8789 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8790 pm.pm_data_len = sizeof (dump_size); 8791 pm.pm_data_buf = (caddr_t)&dump_size; 8792 8793 ret = port->fp_fca_tran->fca_port_manage( 8794 port->fp_fca_handle, &pm); 8795 8796 if (ret != FC_SUCCESS) { 8797 fcio->fcio_errno = ret; 8798 rval = EIO; 8799 if (fp_fcio_copyout(fcio, data, mode)) { 8800 rval = EFAULT; 8801 } 8802 break; 8803 } 8804 if (fcio->fcio_olen != dump_size) { 8805 fcio->fcio_errno = FC_NOMEM; 8806 rval = EINVAL; 8807 if (fp_fcio_copyout(fcio, data, mode)) { 8808 rval = EFAULT; 8809 } 8810 break; 8811 } 8812 8813 dump = kmem_zalloc(dump_size, KM_SLEEP); 8814 8815 bzero((caddr_t)&pm, sizeof (pm)); 8816 pm.pm_cmd_flags = FC_FCA_PM_READ; 8817 pm.pm_cmd_code = FC_PORT_GET_DUMP; 8818 pm.pm_data_len = dump_size; 8819 pm.pm_data_buf = dump; 8820 8821 ret = port->fp_fca_tran->fca_port_manage( 8822 port->fp_fca_handle, &pm); 8823 8824 if (ret == FC_SUCCESS) { 8825 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 8826 dump_size, mode) == 0) { 8827 if (fp_fcio_copyout(fcio, data, mode)) { 8828 rval = EFAULT; 8829 } 8830 } else { 8831 rval = EFAULT; 8832 } 8833 } else { 8834 fcio->fcio_errno = ret; 8835 rval = EIO; 8836 if (fp_fcio_copyout(fcio, data, mode)) { 8837 rval = EFAULT; 8838 } 8839 } 8840 kmem_free(dump, dump_size); 8841 break; 8842 } 8843 8844 case FCIO_GET_TOPOLOGY: { 8845 uint32_t user_topology; 8846 8847 if (fcio->fcio_xfer != FCIO_XFER_READ || 8848 fcio->fcio_olen != sizeof (user_topology)) { 8849 rval = EINVAL; 8850 break; 8851 } 8852 8853 mutex_enter(&port->fp_mutex); 8854 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 8855 user_topology = FC_TOP_UNKNOWN; 8856 } else { 8857 user_topology = port->fp_topology; 8858 } 8859 mutex_exit(&port->fp_mutex); 8860 8861 if (ddi_copyout((void *)&user_topology, 8862 (void *)fcio->fcio_obuf, sizeof (user_topology), 8863 mode)) { 8864 rval = EFAULT; 8865 } 8866 break; 8867 } 8868 8869 case FCIO_RESET_LINK: { 8870 la_wwn_t pwwn; 8871 8872 /* 8873 * Look at the output buffer field; if this field has zero 8874 * bytes then attempt to reset the local link/loop. If the 8875 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 8876 * and if yes, determine the LFA and reset the remote LIP 8877 * by LINIT ELS. 8878 */ 8879 8880 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 8881 fcio->fcio_ilen != sizeof (pwwn)) { 8882 rval = EINVAL; 8883 break; 8884 } 8885 8886 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 8887 sizeof (pwwn), mode)) { 8888 rval = EFAULT; 8889 break; 8890 } 8891 8892 mutex_enter(&port->fp_mutex); 8893 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 8894 mutex_exit(&port->fp_mutex); 8895 break; 8896 } 8897 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 8898 mutex_exit(&port->fp_mutex); 8899 8900 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 8901 if (job == NULL) { 8902 rval = ENOMEM; 8903 break; 8904 } 8905 job->job_counter = 1; 8906 job->job_private = (void *)&pwwn; 8907 8908 fctl_enque_job(port, job); 8909 fctl_jobwait(job); 8910 8911 mutex_enter(&port->fp_mutex); 8912 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 8913 mutex_exit(&port->fp_mutex); 8914 8915 if (job->job_result != FC_SUCCESS) { 8916 fcio->fcio_errno = job->job_result; 8917 rval = EIO; 8918 if (fp_fcio_copyout(fcio, data, mode)) { 8919 rval = EFAULT; 8920 } 8921 } 8922 fctl_dealloc_job(job); 8923 break; 8924 } 8925 8926 case FCIO_RESET_HARD: 8927 ret = port->fp_fca_tran->fca_reset( 8928 port->fp_fca_handle, FC_FCA_RESET); 8929 if (ret != FC_SUCCESS) { 8930 fcio->fcio_errno = ret; 8931 rval = EIO; 8932 if (fp_fcio_copyout(fcio, data, mode)) { 8933 rval = EFAULT; 8934 } 8935 } 8936 break; 8937 8938 case FCIO_RESET_HARD_CORE: 8939 ret = port->fp_fca_tran->fca_reset( 8940 port->fp_fca_handle, FC_FCA_RESET_CORE); 8941 if (ret != FC_SUCCESS) { 8942 rval = EIO; 8943 fcio->fcio_errno = ret; 8944 if (fp_fcio_copyout(fcio, data, mode)) { 8945 rval = EFAULT; 8946 } 8947 } 8948 break; 8949 8950 case FCIO_DIAG: { 8951 fc_fca_pm_t pm; 8952 8953 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 8954 8955 /* Validate user buffer from ioctl call. */ 8956 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 8957 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 8958 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 8959 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 8960 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 8961 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 8962 rval = EFAULT; 8963 break; 8964 } 8965 8966 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 8967 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8968 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 8969 fcio->fcio_ilen, mode)) { 8970 rval = EFAULT; 8971 goto fp_fcio_diag_cleanup; 8972 } 8973 } 8974 8975 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 8976 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 8977 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 8978 fcio->fcio_alen, mode)) { 8979 rval = EFAULT; 8980 goto fp_fcio_diag_cleanup; 8981 } 8982 } 8983 8984 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 8985 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8986 } 8987 8988 pm.pm_cmd_code = FC_PORT_DIAG; 8989 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 8990 8991 ret = port->fp_fca_tran->fca_port_manage( 8992 port->fp_fca_handle, &pm); 8993 8994 if (ret != FC_SUCCESS) { 8995 if (ret == FC_INVALID_REQUEST) { 8996 rval = ENOTTY; 8997 } else { 8998 rval = EIO; 8999 } 9000 9001 fcio->fcio_errno = ret; 9002 if (fp_fcio_copyout(fcio, data, mode)) { 9003 rval = EFAULT; 9004 } 9005 goto fp_fcio_diag_cleanup; 9006 } 9007 9008 /* 9009 * pm_stat_len will contain the number of status bytes 9010 * an FCA driver requires to return the complete status 9011 * of the requested diag operation. If the user buffer 9012 * is not large enough to hold the entire status, We 9013 * copy only the portion of data the fits in the buffer and 9014 * return a ENOMEM to the user application. 9015 */ 9016 if (pm.pm_stat_len > fcio->fcio_olen) { 9017 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9018 "fp:FCIO_DIAG:status buffer too small\n"); 9019 9020 rval = ENOMEM; 9021 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9022 fcio->fcio_olen, mode)) { 9023 rval = EFAULT; 9024 goto fp_fcio_diag_cleanup; 9025 } 9026 } else { 9027 /* 9028 * Copy only data pm_stat_len bytes of data 9029 */ 9030 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9031 pm.pm_stat_len, mode)) { 9032 rval = EFAULT; 9033 goto fp_fcio_diag_cleanup; 9034 } 9035 } 9036 9037 if (fp_fcio_copyout(fcio, data, mode)) { 9038 rval = EFAULT; 9039 } 9040 9041 fp_fcio_diag_cleanup: 9042 if (pm.pm_cmd_buf != NULL) { 9043 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9044 } 9045 if (pm.pm_data_buf != NULL) { 9046 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9047 } 9048 if (pm.pm_stat_buf != NULL) { 9049 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9050 } 9051 9052 break; 9053 } 9054 9055 case FCIO_GET_NODE_ID: { 9056 /* validate parameters */ 9057 if (fcio->fcio_xfer != FCIO_XFER_READ || 9058 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9059 rval = EINVAL; 9060 break; 9061 } 9062 9063 rval = fp_get_rnid(port, data, mode, fcio); 9064 9065 /* ioctl handling is over */ 9066 break; 9067 } 9068 9069 case FCIO_SEND_NODE_ID: { 9070 la_wwn_t pwwn; 9071 9072 /* validate parameters */ 9073 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9074 fcio->fcio_xfer != FCIO_XFER_READ) { 9075 rval = EINVAL; 9076 break; 9077 } 9078 9079 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9080 sizeof (la_wwn_t), mode)) { 9081 rval = EFAULT; 9082 break; 9083 } 9084 9085 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9086 9087 /* ioctl handling is over */ 9088 break; 9089 } 9090 9091 case FCIO_SET_NODE_ID: { 9092 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9093 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9094 rval = EINVAL; 9095 break; 9096 } 9097 9098 rval = fp_set_rnid(port, data, mode, fcio); 9099 break; 9100 } 9101 9102 case FCIO_LINK_STATUS: { 9103 fc_portid_t rls_req; 9104 fc_rls_acc_t *rls_acc; 9105 fc_fca_pm_t pm; 9106 uint32_t dest, src_id; 9107 fp_cmd_t *cmd; 9108 fc_remote_port_t *pd; 9109 uchar_t pd_flags; 9110 9111 /* validate parameters */ 9112 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9113 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9114 fcio->fcio_xfer != FCIO_XFER_RW) { 9115 rval = EINVAL; 9116 break; 9117 } 9118 9119 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9120 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9121 rval = EINVAL; 9122 break; 9123 } 9124 9125 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9126 sizeof (fc_portid_t), mode)) { 9127 rval = EFAULT; 9128 break; 9129 } 9130 9131 9132 /* Determine the destination of the RLS frame */ 9133 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9134 dest = FS_FABRIC_F_PORT; 9135 } else { 9136 dest = rls_req.port_id; 9137 } 9138 9139 mutex_enter(&port->fp_mutex); 9140 src_id = port->fp_port_id.port_id; 9141 mutex_exit(&port->fp_mutex); 9142 9143 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9144 if (dest == 0 || dest == src_id) { 9145 9146 /* Allocate memory for link error status block */ 9147 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9148 ASSERT(rls_acc != NULL); 9149 9150 /* Prepare the port management structure */ 9151 bzero((caddr_t)&pm, sizeof (pm)); 9152 9153 pm.pm_cmd_flags = FC_FCA_PM_READ; 9154 pm.pm_cmd_code = FC_PORT_RLS; 9155 pm.pm_data_len = sizeof (*rls_acc); 9156 pm.pm_data_buf = (caddr_t)rls_acc; 9157 9158 /* Get the adapter's link error status block */ 9159 ret = port->fp_fca_tran->fca_port_manage( 9160 port->fp_fca_handle, &pm); 9161 9162 if (ret == FC_SUCCESS) { 9163 /* xfer link status block to userland */ 9164 if (ddi_copyout((void *)rls_acc, 9165 (void *)fcio->fcio_obuf, 9166 sizeof (*rls_acc), mode) == 0) { 9167 if (fp_fcio_copyout(fcio, data, 9168 mode)) { 9169 rval = EFAULT; 9170 } 9171 } else { 9172 rval = EFAULT; 9173 } 9174 } else { 9175 rval = EIO; 9176 fcio->fcio_errno = ret; 9177 if (fp_fcio_copyout(fcio, data, mode)) { 9178 rval = EFAULT; 9179 } 9180 } 9181 9182 kmem_free(rls_acc, sizeof (*rls_acc)); 9183 9184 /* ioctl handling is over */ 9185 break; 9186 } 9187 9188 /* 9189 * Send RLS to the destination port. 9190 * Having RLS frame destination is as FPORT is not yet 9191 * supported and will be implemented in future, if needed. 9192 * Following call to get "pd" will fail if dest is FPORT 9193 */ 9194 pd = fctl_hold_remote_port_by_did(port, dest); 9195 if (pd == NULL) { 9196 fcio->fcio_errno = FC_BADOBJECT; 9197 rval = ENXIO; 9198 if (fp_fcio_copyout(fcio, data, mode)) { 9199 rval = EFAULT; 9200 } 9201 break; 9202 } 9203 9204 mutex_enter(&pd->pd_mutex); 9205 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9206 mutex_exit(&pd->pd_mutex); 9207 fctl_release_remote_port(pd); 9208 9209 fcio->fcio_errno = FC_LOGINREQ; 9210 rval = EINVAL; 9211 if (fp_fcio_copyout(fcio, data, mode)) { 9212 rval = EFAULT; 9213 } 9214 break; 9215 } 9216 ASSERT(pd->pd_login_count >= 1); 9217 mutex_exit(&pd->pd_mutex); 9218 9219 /* 9220 * Allocate job structure and set job_code as DUMMY, 9221 * because we will not go through the job thread. 9222 * Instead fp_sendcmd() is called directly here. 9223 */ 9224 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9225 NULL, NULL, KM_SLEEP); 9226 ASSERT(job != NULL); 9227 9228 job->job_counter = 1; 9229 9230 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9231 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9232 if (cmd == NULL) { 9233 fcio->fcio_errno = FC_NOMEM; 9234 rval = ENOMEM; 9235 9236 fctl_release_remote_port(pd); 9237 9238 fctl_dealloc_job(job); 9239 if (fp_fcio_copyout(fcio, data, mode)) { 9240 rval = EFAULT; 9241 } 9242 break; 9243 } 9244 9245 /* Allocate memory for link error status block */ 9246 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9247 9248 mutex_enter(&port->fp_mutex); 9249 mutex_enter(&pd->pd_mutex); 9250 9251 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9252 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9253 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9254 cmd->cmd_retry_count = 1; 9255 cmd->cmd_ulp_pkt = NULL; 9256 9257 fp_rls_init(cmd, job); 9258 9259 job->job_private = (void *)rls_acc; 9260 9261 pd_flags = pd->pd_flags; 9262 pd->pd_flags = PD_ELS_IN_PROGRESS; 9263 9264 mutex_exit(&pd->pd_mutex); 9265 mutex_exit(&port->fp_mutex); 9266 9267 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9268 fctl_jobwait(job); 9269 9270 fcio->fcio_errno = job->job_result; 9271 if (job->job_result == FC_SUCCESS) { 9272 ASSERT(pd != NULL); 9273 /* 9274 * link error status block is now available. 9275 * Copy it to userland 9276 */ 9277 ASSERT(job->job_private == (void *)rls_acc); 9278 if (ddi_copyout((void *)rls_acc, 9279 (void *)fcio->fcio_obuf, 9280 sizeof (*rls_acc), mode) == 0) { 9281 if (fp_fcio_copyout(fcio, data, 9282 mode)) { 9283 rval = EFAULT; 9284 } 9285 } else { 9286 rval = EFAULT; 9287 } 9288 } else { 9289 rval = EIO; 9290 } 9291 } else { 9292 rval = EIO; 9293 fp_free_pkt(cmd); 9294 } 9295 9296 if (rval) { 9297 mutex_enter(&port->fp_mutex); 9298 mutex_enter(&pd->pd_mutex); 9299 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9300 pd->pd_flags = pd_flags; 9301 } 9302 mutex_exit(&pd->pd_mutex); 9303 mutex_exit(&port->fp_mutex); 9304 } 9305 9306 fctl_release_remote_port(pd); 9307 fctl_dealloc_job(job); 9308 kmem_free(rls_acc, sizeof (*rls_acc)); 9309 9310 if (fp_fcio_copyout(fcio, data, mode)) { 9311 rval = EFAULT; 9312 } 9313 break; 9314 } 9315 9316 case FCIO_NS: { 9317 fc_ns_cmd_t *ns_req; 9318 fc_ns_cmd32_t *ns_req32; 9319 fctl_ns_req_t *ns_cmd; 9320 9321 if (use32 == B_TRUE) { 9322 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9323 rval = EINVAL; 9324 break; 9325 } 9326 9327 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9328 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9329 9330 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9331 sizeof (*ns_req32), mode)) { 9332 rval = EFAULT; 9333 kmem_free(ns_req, sizeof (*ns_req)); 9334 kmem_free(ns_req32, sizeof (*ns_req32)); 9335 break; 9336 } 9337 9338 ns_req->ns_flags = ns_req32->ns_flags; 9339 ns_req->ns_cmd = ns_req32->ns_cmd; 9340 ns_req->ns_req_len = ns_req32->ns_req_len; 9341 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9342 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9343 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9344 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9345 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9346 9347 kmem_free(ns_req32, sizeof (*ns_req32)); 9348 } else { 9349 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9350 rval = EINVAL; 9351 break; 9352 } 9353 9354 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9355 9356 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9357 sizeof (fc_ns_cmd_t), mode)) { 9358 rval = EFAULT; 9359 kmem_free(ns_req, sizeof (*ns_req)); 9360 break; 9361 } 9362 } 9363 9364 if (ns_req->ns_req_len <= 0) { 9365 rval = EINVAL; 9366 kmem_free(ns_req, sizeof (*ns_req)); 9367 break; 9368 } 9369 9370 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9371 ASSERT(job != NULL); 9372 9373 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9374 ns_req->ns_resp_len, ns_req->ns_resp_len, 9375 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9376 ASSERT(ns_cmd != NULL); 9377 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9378 9379 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9380 ns_cmd->ns_gan_max = 1; 9381 ns_cmd->ns_gan_index = 0; 9382 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9383 } 9384 9385 if (ddi_copyin(ns_req->ns_req_payload, 9386 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9387 rval = EFAULT; 9388 fctl_free_ns_cmd(ns_cmd); 9389 fctl_dealloc_job(job); 9390 kmem_free(ns_req, sizeof (*ns_req)); 9391 break; 9392 } 9393 9394 job->job_private = (void *)ns_cmd; 9395 fctl_enque_job(port, job); 9396 fctl_jobwait(job); 9397 rval = job->job_result; 9398 9399 if (rval == FC_SUCCESS) { 9400 if (ns_req->ns_resp_len) { 9401 if (ddi_copyout(ns_cmd->ns_data_buf, 9402 ns_req->ns_resp_payload, 9403 ns_cmd->ns_data_len, mode)) { 9404 rval = EFAULT; 9405 fctl_free_ns_cmd(ns_cmd); 9406 fctl_dealloc_job(job); 9407 kmem_free(ns_req, sizeof (*ns_req)); 9408 break; 9409 } 9410 } 9411 } else { 9412 rval = EIO; 9413 } 9414 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9415 fctl_free_ns_cmd(ns_cmd); 9416 fctl_dealloc_job(job); 9417 kmem_free(ns_req, sizeof (*ns_req)); 9418 9419 if (fp_fcio_copyout(fcio, data, mode)) { 9420 rval = EFAULT; 9421 } 9422 break; 9423 } 9424 9425 default: 9426 rval = ENOTTY; 9427 break; 9428 } 9429 9430 /* 9431 * If set, reset the EXCL busy bit to 9432 * receive other exclusive access commands 9433 */ 9434 mutex_enter(&port->fp_mutex); 9435 if (port->fp_flag & FP_EXCL_BUSY) { 9436 port->fp_flag &= ~FP_EXCL_BUSY; 9437 } 9438 mutex_exit(&port->fp_mutex); 9439 9440 return (rval); 9441 } 9442 9443 9444 /* 9445 * This function assumes that the response length 9446 * is same regardless of data model (LP32 or LP64) 9447 * which is true for all the ioctls currently 9448 * supported. 9449 */ 9450 static int 9451 fp_copyout(void *from, void *to, size_t len, int mode) 9452 { 9453 return (ddi_copyout(from, to, len, mode)); 9454 } 9455 9456 /* 9457 * This function does the set rnid 9458 */ 9459 static int 9460 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9461 { 9462 int rval = 0; 9463 fc_rnid_t *rnid; 9464 fc_fca_pm_t pm; 9465 9466 /* Allocate memory for node id block */ 9467 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9468 9469 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9470 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9471 kmem_free(rnid, sizeof (fc_rnid_t)); 9472 return (EFAULT); 9473 } 9474 9475 /* Prepare the port management structure */ 9476 bzero((caddr_t)&pm, sizeof (pm)); 9477 9478 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9479 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9480 pm.pm_data_len = sizeof (*rnid); 9481 pm.pm_data_buf = (caddr_t)rnid; 9482 9483 /* Get the adapter's node data */ 9484 rval = port->fp_fca_tran->fca_port_manage( 9485 port->fp_fca_handle, &pm); 9486 9487 if (rval != FC_SUCCESS) { 9488 fcio->fcio_errno = rval; 9489 rval = EIO; 9490 if (fp_fcio_copyout(fcio, data, mode)) { 9491 rval = EFAULT; 9492 } 9493 } else { 9494 mutex_enter(&port->fp_mutex); 9495 /* copy to the port structure */ 9496 bcopy(rnid, &port->fp_rnid_params, 9497 sizeof (port->fp_rnid_params)); 9498 mutex_exit(&port->fp_mutex); 9499 } 9500 9501 kmem_free(rnid, sizeof (fc_rnid_t)); 9502 9503 if (rval != FC_SUCCESS) { 9504 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9505 } 9506 9507 return (rval); 9508 } 9509 9510 /* 9511 * This function does the local pwwn get rnid 9512 */ 9513 static int 9514 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9515 { 9516 fc_rnid_t *rnid; 9517 fc_fca_pm_t pm; 9518 int rval = 0; 9519 uint32_t ret; 9520 9521 /* Allocate memory for rnid data block */ 9522 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9523 9524 mutex_enter(&port->fp_mutex); 9525 if (port->fp_rnid_init == 1) { 9526 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9527 mutex_exit(&port->fp_mutex); 9528 /* xfer node info to userland */ 9529 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9530 sizeof (*rnid), mode) == 0) { 9531 if (fp_fcio_copyout(fcio, data, mode)) { 9532 rval = EFAULT; 9533 } 9534 } else { 9535 rval = EFAULT; 9536 } 9537 9538 kmem_free(rnid, sizeof (fc_rnid_t)); 9539 9540 if (rval != FC_SUCCESS) { 9541 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9542 rval); 9543 } 9544 9545 return (rval); 9546 } 9547 mutex_exit(&port->fp_mutex); 9548 9549 /* Prepare the port management structure */ 9550 bzero((caddr_t)&pm, sizeof (pm)); 9551 9552 pm.pm_cmd_flags = FC_FCA_PM_READ; 9553 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9554 pm.pm_data_len = sizeof (fc_rnid_t); 9555 pm.pm_data_buf = (caddr_t)rnid; 9556 9557 /* Get the adapter's node data */ 9558 ret = port->fp_fca_tran->fca_port_manage( 9559 port->fp_fca_handle, 9560 &pm); 9561 9562 if (ret == FC_SUCCESS) { 9563 /* initialize in the port_info */ 9564 mutex_enter(&port->fp_mutex); 9565 port->fp_rnid_init = 1; 9566 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9567 mutex_exit(&port->fp_mutex); 9568 9569 /* xfer node info to userland */ 9570 if (ddi_copyout((void *)rnid, 9571 (void *)fcio->fcio_obuf, 9572 sizeof (*rnid), mode) == 0) { 9573 if (fp_fcio_copyout(fcio, data, 9574 mode)) { 9575 rval = EFAULT; 9576 } 9577 } else { 9578 rval = EFAULT; 9579 } 9580 } else { 9581 rval = EIO; 9582 fcio->fcio_errno = ret; 9583 if (fp_fcio_copyout(fcio, data, mode)) { 9584 rval = EFAULT; 9585 } 9586 } 9587 9588 kmem_free(rnid, sizeof (fc_rnid_t)); 9589 9590 if (rval != FC_SUCCESS) { 9591 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9592 } 9593 9594 return (rval); 9595 } 9596 9597 static int 9598 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9599 la_wwn_t *pwwn) 9600 { 9601 int rval = 0; 9602 fc_remote_port_t *pd; 9603 fp_cmd_t *cmd; 9604 job_request_t *job; 9605 la_els_rnid_acc_t *rnid_acc; 9606 9607 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9608 if (pd == NULL) { 9609 /* 9610 * We can safely assume that the destination port 9611 * is logged in. Either the user land will explicitly 9612 * login before issuing RNID ioctl or the device would 9613 * have been configured, meaning already logged in. 9614 */ 9615 9616 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9617 9618 return (ENXIO); 9619 } 9620 /* 9621 * Allocate job structure and set job_code as DUMMY, 9622 * because we will not go thorugh the job thread. 9623 * Instead fp_sendcmd() is called directly here. 9624 */ 9625 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9626 NULL, NULL, KM_SLEEP); 9627 9628 ASSERT(job != NULL); 9629 9630 job->job_counter = 1; 9631 9632 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9633 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9634 if (cmd == NULL) { 9635 fcio->fcio_errno = FC_NOMEM; 9636 rval = ENOMEM; 9637 9638 fctl_dealloc_job(job); 9639 if (fp_fcio_copyout(fcio, data, mode)) { 9640 rval = EFAULT; 9641 } 9642 9643 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9644 9645 return (rval); 9646 } 9647 9648 /* Allocate memory for node id accept block */ 9649 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9650 9651 mutex_enter(&port->fp_mutex); 9652 mutex_enter(&pd->pd_mutex); 9653 9654 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9655 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9656 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9657 cmd->cmd_retry_count = 1; 9658 cmd->cmd_ulp_pkt = NULL; 9659 9660 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9661 9662 job->job_private = (void *)rnid_acc; 9663 9664 pd->pd_flags = PD_ELS_IN_PROGRESS; 9665 9666 mutex_exit(&pd->pd_mutex); 9667 mutex_exit(&port->fp_mutex); 9668 9669 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9670 fctl_jobwait(job); 9671 fcio->fcio_errno = job->job_result; 9672 if (job->job_result == FC_SUCCESS) { 9673 int rnid_cnt; 9674 ASSERT(pd != NULL); 9675 /* 9676 * node id block is now available. 9677 * Copy it to userland 9678 */ 9679 ASSERT(job->job_private == (void *)rnid_acc); 9680 9681 /* get the response length */ 9682 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9683 rnid_acc->hdr.cmn_len + 9684 rnid_acc->hdr.specific_len; 9685 9686 if (fcio->fcio_olen < rnid_cnt) { 9687 rval = EINVAL; 9688 } else if (ddi_copyout((void *)rnid_acc, 9689 (void *)fcio->fcio_obuf, 9690 rnid_cnt, mode) == 0) { 9691 if (fp_fcio_copyout(fcio, data, 9692 mode)) { 9693 rval = EFAULT; 9694 } 9695 } else { 9696 rval = EFAULT; 9697 } 9698 } else { 9699 rval = EIO; 9700 } 9701 } else { 9702 rval = EIO; 9703 if (pd) { 9704 mutex_enter(&pd->pd_mutex); 9705 pd->pd_flags = PD_IDLE; 9706 mutex_exit(&pd->pd_mutex); 9707 } 9708 fp_free_pkt(cmd); 9709 } 9710 9711 fctl_dealloc_job(job); 9712 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9713 9714 if (fp_fcio_copyout(fcio, data, mode)) { 9715 rval = EFAULT; 9716 } 9717 9718 if (rval != FC_SUCCESS) { 9719 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9720 } 9721 9722 return (rval); 9723 } 9724 9725 /* 9726 * Copy out to userland 9727 */ 9728 static int 9729 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9730 { 9731 int rval; 9732 9733 #ifdef _MULTI_DATAMODEL 9734 switch (ddi_model_convert_from(mode & FMODELS)) { 9735 case DDI_MODEL_ILP32: { 9736 struct fcio32 fcio32; 9737 9738 fcio32.fcio_xfer = fcio->fcio_xfer; 9739 fcio32.fcio_cmd = fcio->fcio_cmd; 9740 fcio32.fcio_flags = fcio->fcio_flags; 9741 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9742 fcio32.fcio_ilen = fcio->fcio_ilen; 9743 fcio32.fcio_ibuf = 9744 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9745 fcio32.fcio_olen = fcio->fcio_olen; 9746 fcio32.fcio_obuf = 9747 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9748 fcio32.fcio_alen = fcio->fcio_alen; 9749 fcio32.fcio_abuf = 9750 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9751 fcio32.fcio_errno = fcio->fcio_errno; 9752 9753 rval = ddi_copyout((void *)&fcio32, (void *)data, 9754 sizeof (struct fcio32), mode); 9755 break; 9756 } 9757 case DDI_MODEL_NONE: 9758 rval = ddi_copyout((void *)fcio, (void *)data, 9759 sizeof (fcio_t), mode); 9760 break; 9761 } 9762 #else 9763 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 9764 #endif 9765 9766 return (rval); 9767 } 9768 9769 9770 static void 9771 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 9772 { 9773 uint32_t listlen; 9774 fc_portmap_t *changelist; 9775 9776 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9777 ASSERT(port->fp_topology == FC_TOP_PT_PT); 9778 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 9779 9780 listlen = 0; 9781 changelist = NULL; 9782 9783 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9784 if (port->fp_statec_busy > 1) { 9785 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 9786 } 9787 } 9788 mutex_exit(&port->fp_mutex); 9789 9790 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9791 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 9792 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 9793 listlen, listlen, KM_SLEEP); 9794 9795 mutex_enter(&port->fp_mutex); 9796 } else { 9797 ASSERT(changelist == NULL && listlen == 0); 9798 mutex_enter(&port->fp_mutex); 9799 if (--port->fp_statec_busy == 0) { 9800 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 9801 } 9802 } 9803 } 9804 9805 static int 9806 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 9807 { 9808 int rval; 9809 int count; 9810 int index; 9811 int num_devices; 9812 fc_remote_node_t *node; 9813 fc_port_dev_t *devlist; 9814 struct pwwn_hash *head; 9815 fc_remote_port_t *pd; 9816 9817 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9818 9819 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 9820 9821 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 9822 9823 for (count = index = 0; index < pwwn_table_size; index++) { 9824 head = &port->fp_pwwn_table[index]; 9825 pd = head->pwwn_head; 9826 while (pd != NULL) { 9827 mutex_enter(&pd->pd_mutex); 9828 if (pd->pd_state == PORT_DEVICE_INVALID) { 9829 mutex_exit(&pd->pd_mutex); 9830 pd = pd->pd_wwn_hnext; 9831 continue; 9832 } 9833 9834 devlist[count].dev_state = pd->pd_state; 9835 devlist[count].dev_hard_addr = pd->pd_hard_addr; 9836 devlist[count].dev_did = pd->pd_port_id; 9837 devlist[count].dev_did.priv_lilp_posit = 9838 (uint8_t)(index & 0xff); 9839 bcopy((caddr_t)pd->pd_fc4types, 9840 (caddr_t)devlist[count].dev_type, 9841 sizeof (pd->pd_fc4types)); 9842 9843 bcopy((caddr_t)&pd->pd_port_name, 9844 (caddr_t)&devlist[count].dev_pwwn, 9845 sizeof (la_wwn_t)); 9846 9847 node = pd->pd_remote_nodep; 9848 mutex_exit(&pd->pd_mutex); 9849 9850 if (node) { 9851 mutex_enter(&node->fd_mutex); 9852 bcopy((caddr_t)&node->fd_node_name, 9853 (caddr_t)&devlist[count].dev_nwwn, 9854 sizeof (la_wwn_t)); 9855 mutex_exit(&node->fd_mutex); 9856 } 9857 count++; 9858 if (count >= num_devices) { 9859 goto found; 9860 } 9861 } 9862 } 9863 found: 9864 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 9865 sizeof (count), mode)) { 9866 rval = FC_FAILURE; 9867 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 9868 sizeof (fc_port_dev_t) * num_devices, mode)) { 9869 rval = FC_FAILURE; 9870 } else { 9871 rval = FC_SUCCESS; 9872 } 9873 9874 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 9875 9876 return (rval); 9877 } 9878 9879 9880 /* 9881 * Handle Fabric ONLINE 9882 */ 9883 static void 9884 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 9885 { 9886 int index; 9887 int rval; 9888 int dbg_count; 9889 int count = 0; 9890 char ww_name[17]; 9891 uint32_t d_id; 9892 uint32_t listlen; 9893 fctl_ns_req_t *ns_cmd; 9894 struct pwwn_hash *head; 9895 fc_remote_port_t *pd; 9896 fc_remote_port_t *npd; 9897 fc_portmap_t *changelist; 9898 9899 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9900 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 9901 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 9902 9903 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 9904 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 9905 0, KM_SLEEP); 9906 9907 ASSERT(ns_cmd != NULL); 9908 9909 ns_cmd->ns_cmd_code = NS_GID_PN; 9910 9911 /* 9912 * Check if orphans are showing up now 9913 */ 9914 if (port->fp_orphan_count) { 9915 fc_orphan_t *orp; 9916 fc_orphan_t *norp = NULL; 9917 fc_orphan_t *prev = NULL; 9918 9919 for (orp = port->fp_orphan_list; orp; orp = norp) { 9920 norp = orp->orp_next; 9921 mutex_exit(&port->fp_mutex); 9922 orp->orp_nscan++; 9923 9924 job->job_counter = 1; 9925 job->job_result = FC_SUCCESS; 9926 9927 ((ns_req_gid_pn_t *) 9928 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 9929 ((ns_resp_gid_pn_t *) 9930 ns_cmd->ns_data_buf)->pid.port_id = 0; 9931 ((ns_resp_gid_pn_t *) 9932 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 9933 9934 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 9935 if (rval == FC_SUCCESS) { 9936 d_id = 9937 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 9938 pd = fp_create_remote_port_by_ns(port, 9939 d_id, KM_SLEEP); 9940 9941 if (pd != NULL) { 9942 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 9943 9944 fp_printf(port, CE_WARN, FP_LOG_ONLY, 9945 0, NULL, "N_x Port with D_ID=%x," 9946 " PWWN=%s reappeared in fabric", 9947 d_id, ww_name); 9948 9949 mutex_enter(&port->fp_mutex); 9950 if (prev) { 9951 prev->orp_next = orp->orp_next; 9952 } else { 9953 ASSERT(orp == 9954 port->fp_orphan_list); 9955 port->fp_orphan_list = 9956 orp->orp_next; 9957 } 9958 port->fp_orphan_count--; 9959 mutex_exit(&port->fp_mutex); 9960 kmem_free(orp, sizeof (*orp)); 9961 count++; 9962 9963 mutex_enter(&pd->pd_mutex); 9964 pd->pd_flags = PD_ELS_MARK; 9965 9966 mutex_exit(&pd->pd_mutex); 9967 } else { 9968 prev = orp; 9969 } 9970 } else { 9971 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 9972 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 9973 9974 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 9975 NULL, 9976 " Port WWN %s removed from orphan" 9977 " list after %d scans", ww_name, 9978 orp->orp_nscan); 9979 9980 mutex_enter(&port->fp_mutex); 9981 if (prev) { 9982 prev->orp_next = orp->orp_next; 9983 } else { 9984 ASSERT(orp == 9985 port->fp_orphan_list); 9986 port->fp_orphan_list = 9987 orp->orp_next; 9988 } 9989 port->fp_orphan_count--; 9990 mutex_exit(&port->fp_mutex); 9991 9992 kmem_free(orp, sizeof (*orp)); 9993 } else { 9994 prev = orp; 9995 } 9996 } 9997 mutex_enter(&port->fp_mutex); 9998 } 9999 } 10000 10001 /* 10002 * Walk the Port WWN hash table, reestablish LOGIN 10003 * if a LOGIN is already performed on a particular 10004 * device; Any failure to LOGIN should mark the 10005 * port device OLD. 10006 */ 10007 for (index = 0; index < pwwn_table_size; index++) { 10008 head = &port->fp_pwwn_table[index]; 10009 npd = head->pwwn_head; 10010 10011 while ((pd = npd) != NULL) { 10012 la_wwn_t *pwwn; 10013 10014 npd = pd->pd_wwn_hnext; 10015 10016 /* 10017 * Don't count in the port devices that are new 10018 * unless the total number of devices visible 10019 * through this port is less than FP_MAX_DEVICES 10020 */ 10021 mutex_enter(&pd->pd_mutex); 10022 if (port->fp_dev_count >= FP_MAX_DEVICES || 10023 (port->fp_options & FP_TARGET_MODE)) { 10024 if (pd->pd_type == PORT_DEVICE_NEW || 10025 pd->pd_flags == PD_ELS_MARK || 10026 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10027 mutex_exit(&pd->pd_mutex); 10028 continue; 10029 } 10030 } else { 10031 if (pd->pd_flags == PD_ELS_MARK || 10032 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10033 mutex_exit(&pd->pd_mutex); 10034 continue; 10035 } 10036 pd->pd_type = PORT_DEVICE_OLD; 10037 } 10038 count++; 10039 10040 /* 10041 * Consult with the name server about D_ID changes 10042 */ 10043 job->job_counter = 1; 10044 job->job_result = FC_SUCCESS; 10045 10046 ((ns_req_gid_pn_t *) 10047 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10048 ((ns_resp_gid_pn_t *) 10049 ns_cmd->ns_data_buf)->pid.port_id = 0; 10050 10051 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10052 pid.priv_lilp_posit = 0; 10053 10054 pwwn = &pd->pd_port_name; 10055 pd->pd_flags = PD_ELS_MARK; 10056 10057 mutex_exit(&pd->pd_mutex); 10058 mutex_exit(&port->fp_mutex); 10059 10060 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10061 if (rval != FC_SUCCESS) { 10062 fc_wwn_to_str(pwwn, ww_name); 10063 10064 mutex_enter(&pd->pd_mutex); 10065 d_id = pd->pd_port_id.port_id; 10066 pd->pd_type = PORT_DEVICE_DELETE; 10067 mutex_exit(&pd->pd_mutex); 10068 10069 FP_TRACE(FP_NHEAD1(3, 0), 10070 "fp_fabric_online: PD " 10071 "disappeared; d_id=%x, PWWN=%s", 10072 d_id, ww_name); 10073 10074 FP_TRACE(FP_NHEAD2(9, 0), 10075 "N_x Port with D_ID=%x, PWWN=%s" 10076 " disappeared from fabric", d_id, 10077 ww_name); 10078 10079 mutex_enter(&port->fp_mutex); 10080 continue; 10081 } 10082 10083 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10084 10085 mutex_enter(&port->fp_mutex); 10086 mutex_enter(&pd->pd_mutex); 10087 if (d_id != pd->pd_port_id.port_id) { 10088 fctl_delist_did_table(port, pd); 10089 fc_wwn_to_str(pwwn, ww_name); 10090 10091 FP_TRACE(FP_NHEAD2(9, 0), 10092 "D_ID of a device with PWWN %s changed." 10093 " New D_ID = %x, OLD D_ID = %x", ww_name, 10094 d_id, pd->pd_port_id.port_id); 10095 10096 pd->pd_port_id.port_id = BE_32(d_id); 10097 pd->pd_type = PORT_DEVICE_CHANGED; 10098 fctl_enlist_did_table(port, pd); 10099 } 10100 mutex_exit(&pd->pd_mutex); 10101 10102 } 10103 } 10104 10105 if (ns_cmd) { 10106 fctl_free_ns_cmd(ns_cmd); 10107 } 10108 10109 listlen = 0; 10110 changelist = NULL; 10111 if (count) { 10112 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10113 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10114 mutex_exit(&port->fp_mutex); 10115 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10116 mutex_enter(&port->fp_mutex); 10117 } 10118 10119 dbg_count = 0; 10120 10121 job->job_counter = count; 10122 10123 for (index = 0; index < pwwn_table_size; index++) { 10124 head = &port->fp_pwwn_table[index]; 10125 npd = head->pwwn_head; 10126 10127 while ((pd = npd) != NULL) { 10128 npd = pd->pd_wwn_hnext; 10129 10130 mutex_enter(&pd->pd_mutex); 10131 if (pd->pd_flags != PD_ELS_MARK) { 10132 mutex_exit(&pd->pd_mutex); 10133 continue; 10134 } 10135 10136 dbg_count++; 10137 10138 /* 10139 * If it is already marked deletion, nothing 10140 * else to do. 10141 */ 10142 if (pd->pd_type == PORT_DEVICE_DELETE) { 10143 pd->pd_type = PORT_DEVICE_OLD; 10144 10145 mutex_exit(&pd->pd_mutex); 10146 mutex_exit(&port->fp_mutex); 10147 fp_jobdone(job); 10148 mutex_enter(&port->fp_mutex); 10149 10150 continue; 10151 } 10152 10153 /* 10154 * If it is freshly discovered out of 10155 * the orphan list, nothing else to do 10156 */ 10157 if (pd->pd_type == PORT_DEVICE_NEW) { 10158 pd->pd_flags = PD_IDLE; 10159 10160 mutex_exit(&pd->pd_mutex); 10161 mutex_exit(&port->fp_mutex); 10162 fp_jobdone(job); 10163 mutex_enter(&port->fp_mutex); 10164 10165 continue; 10166 } 10167 10168 pd->pd_flags = PD_IDLE; 10169 d_id = pd->pd_port_id.port_id; 10170 10171 /* 10172 * Explicitly mark all devices OLD; successful 10173 * PLOGI should reset this to either NO_CHANGE 10174 * or CHANGED. 10175 */ 10176 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10177 pd->pd_type = PORT_DEVICE_OLD; 10178 } 10179 10180 mutex_exit(&pd->pd_mutex); 10181 mutex_exit(&port->fp_mutex); 10182 10183 rval = fp_port_login(port, d_id, job, 10184 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10185 10186 if (rval != FC_SUCCESS) { 10187 fp_jobdone(job); 10188 } 10189 mutex_enter(&port->fp_mutex); 10190 } 10191 } 10192 mutex_exit(&port->fp_mutex); 10193 10194 ASSERT(dbg_count == count); 10195 fp_jobwait(job); 10196 10197 mutex_enter(&port->fp_mutex); 10198 10199 ASSERT(port->fp_statec_busy > 0); 10200 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10201 if (port->fp_statec_busy > 1) { 10202 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10203 } 10204 } 10205 mutex_exit(&port->fp_mutex); 10206 } else { 10207 ASSERT(port->fp_statec_busy > 0); 10208 if (port->fp_statec_busy > 1) { 10209 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10210 } 10211 mutex_exit(&port->fp_mutex); 10212 } 10213 10214 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10215 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10216 10217 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10218 listlen, listlen, KM_SLEEP); 10219 10220 mutex_enter(&port->fp_mutex); 10221 } else { 10222 ASSERT(changelist == NULL && listlen == 0); 10223 mutex_enter(&port->fp_mutex); 10224 if (--port->fp_statec_busy == 0) { 10225 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10226 } 10227 } 10228 } 10229 10230 10231 /* 10232 * Fill out device list for userland ioctl in private loop 10233 */ 10234 static int 10235 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10236 { 10237 int rval; 10238 int count; 10239 int index; 10240 int num_devices; 10241 fc_remote_node_t *node; 10242 fc_port_dev_t *devlist; 10243 int lilp_device_count; 10244 fc_lilpmap_t *lilp_map; 10245 uchar_t *alpa_list; 10246 10247 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10248 10249 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10250 if (port->fp_total_devices > port->fp_dev_count && 10251 num_devices >= port->fp_total_devices) { 10252 job_request_t *job; 10253 10254 mutex_exit(&port->fp_mutex); 10255 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10256 job->job_counter = 1; 10257 10258 mutex_enter(&port->fp_mutex); 10259 fp_get_loopmap(port, job); 10260 mutex_exit(&port->fp_mutex); 10261 10262 fp_jobwait(job); 10263 fctl_dealloc_job(job); 10264 } else { 10265 mutex_exit(&port->fp_mutex); 10266 } 10267 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10268 10269 mutex_enter(&port->fp_mutex); 10270 10271 /* 10272 * Applications are accustomed to getting the device list in 10273 * LILP map order. The HBA firmware usually returns the device 10274 * map in the LILP map order and diagnostic applications would 10275 * prefer to receive in the device list in that order too 10276 */ 10277 lilp_map = &port->fp_lilp_map; 10278 alpa_list = &lilp_map->lilp_alpalist[0]; 10279 10280 /* 10281 * the length field corresponds to the offset in the LILP frame 10282 * which begins with 1. The thing to note here is that the 10283 * lilp_device_count is 1 more than fp->fp_total_devices since 10284 * the host adapter's alpa also shows up in the lilp map. We 10285 * don't however return details of the host adapter since 10286 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10287 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10288 * ioctl to obtain details about the host adapter port. 10289 */ 10290 lilp_device_count = lilp_map->lilp_length; 10291 10292 for (count = index = 0; index < lilp_device_count && 10293 count < num_devices; index++) { 10294 uint32_t d_id; 10295 fc_remote_port_t *pd; 10296 10297 d_id = alpa_list[index]; 10298 10299 mutex_exit(&port->fp_mutex); 10300 pd = fctl_get_remote_port_by_did(port, d_id); 10301 mutex_enter(&port->fp_mutex); 10302 10303 if (pd != NULL) { 10304 mutex_enter(&pd->pd_mutex); 10305 10306 if (pd->pd_state == PORT_DEVICE_INVALID) { 10307 mutex_exit(&pd->pd_mutex); 10308 continue; 10309 } 10310 10311 devlist[count].dev_state = pd->pd_state; 10312 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10313 devlist[count].dev_did = pd->pd_port_id; 10314 devlist[count].dev_did.priv_lilp_posit = 10315 (uint8_t)(index & 0xff); 10316 bcopy((caddr_t)pd->pd_fc4types, 10317 (caddr_t)devlist[count].dev_type, 10318 sizeof (pd->pd_fc4types)); 10319 10320 bcopy((caddr_t)&pd->pd_port_name, 10321 (caddr_t)&devlist[count].dev_pwwn, 10322 sizeof (la_wwn_t)); 10323 10324 node = pd->pd_remote_nodep; 10325 mutex_exit(&pd->pd_mutex); 10326 10327 if (node) { 10328 mutex_enter(&node->fd_mutex); 10329 bcopy((caddr_t)&node->fd_node_name, 10330 (caddr_t)&devlist[count].dev_nwwn, 10331 sizeof (la_wwn_t)); 10332 mutex_exit(&node->fd_mutex); 10333 } 10334 count++; 10335 } 10336 } 10337 10338 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10339 sizeof (count), mode)) { 10340 rval = FC_FAILURE; 10341 } 10342 10343 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10344 sizeof (fc_port_dev_t) * num_devices, mode)) { 10345 rval = FC_FAILURE; 10346 } else { 10347 rval = FC_SUCCESS; 10348 } 10349 10350 kmem_free(devlist, sizeof (*devlist) * num_devices); 10351 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10352 10353 return (rval); 10354 } 10355 10356 10357 /* 10358 * Completion function for responses to unsolicited commands 10359 */ 10360 static void 10361 fp_unsol_intr(fc_packet_t *pkt) 10362 { 10363 fp_cmd_t *cmd; 10364 fc_local_port_t *port; 10365 10366 cmd = pkt->pkt_ulp_private; 10367 port = cmd->cmd_port; 10368 10369 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10370 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10371 "couldn't post response to unsolicited request;" 10372 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10373 pkt->pkt_resp_fhdr.rx_id); 10374 } 10375 10376 if (cmd == port->fp_els_resp_pkt) { 10377 mutex_enter(&port->fp_mutex); 10378 port->fp_els_resp_pkt_busy = 0; 10379 mutex_exit(&port->fp_mutex); 10380 return; 10381 } 10382 10383 fp_free_pkt(cmd); 10384 } 10385 10386 10387 /* 10388 * solicited LINIT ELS completion function 10389 */ 10390 static void 10391 fp_linit_intr(fc_packet_t *pkt) 10392 { 10393 fp_cmd_t *cmd; 10394 job_request_t *job; 10395 fc_linit_resp_t acc; 10396 10397 if (FP_IS_PKT_ERROR(pkt)) { 10398 (void) fp_common_intr(pkt, 1); 10399 return; 10400 } 10401 10402 cmd = pkt->pkt_ulp_private; 10403 job = cmd->cmd_job; 10404 10405 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&acc, 10406 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10407 10408 if (acc.status != FC_LINIT_SUCCESS) { 10409 job->job_result = FC_FAILURE; 10410 } else { 10411 job->job_result = FC_SUCCESS; 10412 } 10413 fp_iodone(cmd); 10414 } 10415 10416 10417 /* 10418 * Decode the unsolicited request; For FC-4 Device and Link data frames 10419 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10420 * ELS requests, submit a request to the job_handler thread to work on it. 10421 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10422 * and save much of the interrupt time processing of unsolicited ELS requests 10423 * and hand it off to the job_handler thread. 10424 */ 10425 static void 10426 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10427 { 10428 uchar_t r_ctl; 10429 uchar_t ls_code; 10430 uint32_t s_id; 10431 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10432 uint32_t cb_arg; 10433 fp_cmd_t *cmd; 10434 fc_local_port_t *port; 10435 job_request_t *job; 10436 fc_remote_port_t *pd; 10437 10438 port = port_handle; 10439 10440 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10441 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10442 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10443 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10444 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10445 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10446 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10447 buf->ub_buffer[0]); 10448 10449 if (type & 0x80000000) { 10450 /* 10451 * Huh ? Nothing much can be done without 10452 * a valid buffer. So just exit. 10453 */ 10454 return; 10455 } 10456 /* 10457 * If the unsolicited interrupts arrive while it isn't 10458 * safe to handle unsolicited callbacks; Drop them, yes, 10459 * drop them on the floor 10460 */ 10461 mutex_enter(&port->fp_mutex); 10462 port->fp_active_ubs++; 10463 if ((port->fp_soft_state & 10464 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10465 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10466 10467 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10468 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10469 "seq_id=%x, ox_id=%x, rx_id=%x" 10470 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10471 buf->ub_frame.type, buf->ub_frame.seq_id, 10472 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10473 10474 ASSERT(port->fp_active_ubs > 0); 10475 if (--(port->fp_active_ubs) == 0) { 10476 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10477 } 10478 10479 mutex_exit(&port->fp_mutex); 10480 10481 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10482 1, &buf->ub_token); 10483 10484 return; 10485 } 10486 10487 r_ctl = buf->ub_frame.r_ctl; 10488 s_id = buf->ub_frame.s_id; 10489 if (port->fp_active_ubs == 1) { 10490 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10491 } 10492 10493 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10494 port->fp_statec_busy) { 10495 mutex_exit(&port->fp_mutex); 10496 pd = fctl_get_remote_port_by_did(port, s_id); 10497 if (pd) { 10498 mutex_enter(&pd->pd_mutex); 10499 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10500 FP_TRACE(FP_NHEAD1(3, 0), 10501 "LOGO for LOGGED IN D_ID %x", 10502 buf->ub_frame.s_id); 10503 pd->pd_state = PORT_DEVICE_VALID; 10504 } 10505 mutex_exit(&pd->pd_mutex); 10506 } 10507 10508 mutex_enter(&port->fp_mutex); 10509 ASSERT(port->fp_active_ubs > 0); 10510 if (--(port->fp_active_ubs) == 0) { 10511 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10512 } 10513 mutex_exit(&port->fp_mutex); 10514 10515 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10516 1, &buf->ub_token); 10517 10518 FP_TRACE(FP_NHEAD1(3, 0), 10519 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10520 buf->ub_frame.s_id); 10521 return; 10522 } 10523 10524 if (port->fp_els_resp_pkt_busy == 0) { 10525 if (r_ctl == R_CTL_ELS_REQ) { 10526 ls_code = buf->ub_buffer[0]; 10527 10528 switch (ls_code) { 10529 case LA_ELS_PLOGI: 10530 case LA_ELS_FLOGI: 10531 port->fp_els_resp_pkt_busy = 1; 10532 mutex_exit(&port->fp_mutex); 10533 fp_i_handle_unsol_els(port, buf); 10534 10535 mutex_enter(&port->fp_mutex); 10536 ASSERT(port->fp_active_ubs > 0); 10537 if (--(port->fp_active_ubs) == 0) { 10538 port->fp_soft_state &= 10539 ~FP_SOFT_IN_UNSOL_CB; 10540 } 10541 mutex_exit(&port->fp_mutex); 10542 port->fp_fca_tran->fca_ub_release( 10543 port->fp_fca_handle, 1, &buf->ub_token); 10544 10545 return; 10546 case LA_ELS_RSCN: 10547 if (++(port)->fp_rscn_count == 10548 FC_INVALID_RSCN_COUNT) { 10549 ++(port)->fp_rscn_count; 10550 } 10551 rscn_count = port->fp_rscn_count; 10552 break; 10553 10554 default: 10555 break; 10556 } 10557 } 10558 } else if ((r_ctl == R_CTL_ELS_REQ) && 10559 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10560 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10561 ++port->fp_rscn_count; 10562 } 10563 rscn_count = port->fp_rscn_count; 10564 } 10565 10566 mutex_exit(&port->fp_mutex); 10567 10568 switch (r_ctl & R_CTL_ROUTING) { 10569 case R_CTL_DEVICE_DATA: 10570 /* 10571 * If the unsolicited buffer is a CT IU, 10572 * have the job_handler thread work on it. 10573 */ 10574 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10575 break; 10576 } 10577 /* FALLTHROUGH */ 10578 10579 case R_CTL_FC4_SVC: { 10580 int sendup = 0; 10581 10582 /* 10583 * If a LOGIN isn't performed before this request 10584 * shut the door on this port with a reply that a 10585 * LOGIN is required. We make an exception however 10586 * for IP broadcast packets and pass them through 10587 * to the IP ULP(s) to handle broadcast requests. 10588 * This is not a problem for private loop devices 10589 * but for fabric topologies we don't log into the 10590 * remote ports during port initialization and 10591 * the ULPs need to log into requesting ports on 10592 * demand. 10593 */ 10594 pd = fctl_get_remote_port_by_did(port, s_id); 10595 if (pd) { 10596 mutex_enter(&pd->pd_mutex); 10597 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10598 sendup++; 10599 } 10600 mutex_exit(&pd->pd_mutex); 10601 } else if ((pd == NULL) && 10602 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10603 (buf->ub_frame.d_id == 0xffffff || 10604 buf->ub_frame.d_id == 0x00)) { 10605 /* brodacst IP frame - so sendup via job thread */ 10606 break; 10607 } 10608 10609 /* 10610 * Send all FC4 services via job thread too 10611 */ 10612 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10613 break; 10614 } 10615 10616 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10617 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10618 return; 10619 } 10620 10621 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10622 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10623 0, KM_NOSLEEP, pd); 10624 if (cmd != NULL) { 10625 fp_els_rjt_init(port, cmd, buf, 10626 FC_ACTION_NON_RETRYABLE, 10627 FC_REASON_LOGIN_REQUIRED, NULL); 10628 10629 if (fp_sendcmd(port, cmd, 10630 port->fp_fca_handle) != FC_SUCCESS) { 10631 fp_free_pkt(cmd); 10632 } 10633 } 10634 } 10635 10636 mutex_enter(&port->fp_mutex); 10637 ASSERT(port->fp_active_ubs > 0); 10638 if (--(port->fp_active_ubs) == 0) { 10639 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10640 } 10641 mutex_exit(&port->fp_mutex); 10642 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10643 1, &buf->ub_token); 10644 10645 return; 10646 } 10647 10648 default: 10649 break; 10650 } 10651 10652 /* 10653 * Submit a Request to the job_handler thread to work 10654 * on the unsolicited request. The potential side effect 10655 * of this is that the unsolicited buffer takes a little 10656 * longer to get released but we save interrupt time in 10657 * the bargain. 10658 */ 10659 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10660 10661 /* 10662 * One way that the rscn_count will get used is described below : 10663 * 10664 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10665 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10666 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10667 * by overloading the job_cb_arg to pass the rscn_count 10668 * 4. When one of the routines processing the RSCN picks it up (ex: 10669 * fp_validate_rscn_page()), it passes this count in the map 10670 * structure (as part of the map_rscn_info structure member) to the 10671 * ULPs. 10672 * 5. When ULPs make calls back to the transport (example interfaces for 10673 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10674 * can now pass back this count as part of the fc_packet's 10675 * pkt_ulp_rscn_count member. fcp does this currently. 10676 * 6. When transport gets a call to transport a command on the wire, it 10677 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10678 * fc_packet. If there is, it will match that info with the current 10679 * rscn_count on that instance of the port. If they don't match up 10680 * then there was a newer RSCN. The ULP gets back an error code which 10681 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10682 * 7. At this point the ULP is free to make up its own mind as to how to 10683 * handle this. Currently, fcp will reset its retry counters and keep 10684 * retrying the operation it was doing in anticipation of getting a 10685 * new state change call back for the new RSCN. 10686 */ 10687 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10688 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10689 if (job == NULL) { 10690 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10691 "couldn't submit a job to the thread, failing.."); 10692 10693 mutex_enter(&port->fp_mutex); 10694 10695 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10696 --port->fp_rscn_count; 10697 } 10698 10699 ASSERT(port->fp_active_ubs > 0); 10700 if (--(port->fp_active_ubs) == 0) { 10701 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10702 } 10703 10704 mutex_exit(&port->fp_mutex); 10705 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10706 1, &buf->ub_token); 10707 10708 return; 10709 } 10710 job->job_private = (void *)buf; 10711 fctl_enque_job(port, job); 10712 } 10713 10714 10715 /* 10716 * Handle unsolicited requests 10717 */ 10718 static void 10719 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10720 job_request_t *job) 10721 { 10722 uchar_t r_ctl; 10723 uchar_t ls_code; 10724 uint32_t s_id; 10725 fp_cmd_t *cmd; 10726 fc_remote_port_t *pd; 10727 fp_unsol_spec_t *ub_spec; 10728 10729 r_ctl = buf->ub_frame.r_ctl; 10730 s_id = buf->ub_frame.s_id; 10731 10732 switch (r_ctl & R_CTL_ROUTING) { 10733 case R_CTL_EXTENDED_SVC: 10734 if (r_ctl != R_CTL_ELS_REQ) { 10735 break; 10736 } 10737 10738 ls_code = buf->ub_buffer[0]; 10739 switch (ls_code) { 10740 case LA_ELS_LOGO: 10741 case LA_ELS_ADISC: 10742 case LA_ELS_PRLO: 10743 pd = fctl_get_remote_port_by_did(port, s_id); 10744 if (pd == NULL) { 10745 if (!FC_IS_REAL_DEVICE(s_id)) { 10746 break; 10747 } 10748 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10749 break; 10750 } 10751 if ((cmd = fp_alloc_pkt(port, 10752 sizeof (la_els_rjt_t), 0, KM_SLEEP, 10753 NULL)) == NULL) { 10754 /* 10755 * Can this actually fail when 10756 * given KM_SLEEP? (Could be used 10757 * this way in a number of places.) 10758 */ 10759 break; 10760 } 10761 10762 fp_els_rjt_init(port, cmd, buf, 10763 FC_ACTION_NON_RETRYABLE, 10764 FC_REASON_INVALID_LINK_CTRL, job); 10765 10766 if (fp_sendcmd(port, cmd, 10767 port->fp_fca_handle) != FC_SUCCESS) { 10768 fp_free_pkt(cmd); 10769 } 10770 10771 break; 10772 } 10773 if (ls_code == LA_ELS_LOGO) { 10774 fp_handle_unsol_logo(port, buf, pd, job); 10775 } else if (ls_code == LA_ELS_ADISC) { 10776 fp_handle_unsol_adisc(port, buf, pd, job); 10777 } else { 10778 fp_handle_unsol_prlo(port, buf, pd, job); 10779 } 10780 break; 10781 10782 case LA_ELS_PLOGI: 10783 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 10784 break; 10785 10786 case LA_ELS_FLOGI: 10787 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 10788 break; 10789 10790 case LA_ELS_RSCN: 10791 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 10792 break; 10793 10794 default: 10795 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10796 ub_spec->port = port; 10797 ub_spec->buf = buf; 10798 10799 (void) taskq_dispatch(port->fp_taskq, 10800 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10801 return; 10802 } 10803 break; 10804 10805 case R_CTL_BASIC_SVC: 10806 /* 10807 * The unsolicited basic link services could be ABTS 10808 * and RMC (Or even a NOP). Just BA_RJT them until 10809 * such time there arises a need to handle them more 10810 * carefully. 10811 */ 10812 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10813 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 10814 0, KM_SLEEP, NULL); 10815 if (cmd != NULL) { 10816 fp_ba_rjt_init(port, cmd, buf, job); 10817 if (fp_sendcmd(port, cmd, 10818 port->fp_fca_handle) != FC_SUCCESS) { 10819 fp_free_pkt(cmd); 10820 } 10821 } 10822 } 10823 break; 10824 10825 case R_CTL_DEVICE_DATA: 10826 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10827 /* 10828 * Mostly this is of type FC_TYPE_FC_SERVICES. 10829 * As we don't like any Unsolicited FC services 10830 * requests, we would do well to RJT them as 10831 * well. 10832 */ 10833 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10834 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10835 0, KM_SLEEP, NULL); 10836 if (cmd != NULL) { 10837 fp_els_rjt_init(port, cmd, buf, 10838 FC_ACTION_NON_RETRYABLE, 10839 FC_REASON_INVALID_LINK_CTRL, job); 10840 10841 if (fp_sendcmd(port, cmd, 10842 port->fp_fca_handle) != 10843 FC_SUCCESS) { 10844 fp_free_pkt(cmd); 10845 } 10846 } 10847 } 10848 break; 10849 } 10850 /* FALLTHROUGH */ 10851 10852 case R_CTL_FC4_SVC: 10853 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10854 ub_spec->port = port; 10855 ub_spec->buf = buf; 10856 10857 (void) taskq_dispatch(port->fp_taskq, 10858 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10859 return; 10860 10861 case R_CTL_LINK_CTL: 10862 /* 10863 * Turn deaf ear on unsolicited link control frames. 10864 * Typical unsolicited link control Frame is an LCR 10865 * (to reset End to End credit to the default login 10866 * value and abort current sequences for all classes) 10867 * An intelligent microcode/firmware should handle 10868 * this transparently at its level and not pass all 10869 * the way up here. 10870 * 10871 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 10872 * or F_BSY. P_RJT is chosen to be the most appropriate 10873 * at this time. 10874 */ 10875 /* FALLTHROUGH */ 10876 10877 default: 10878 /* 10879 * Just reject everything else as an invalid request. 10880 */ 10881 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10882 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10883 0, KM_SLEEP, NULL); 10884 if (cmd != NULL) { 10885 fp_els_rjt_init(port, cmd, buf, 10886 FC_ACTION_NON_RETRYABLE, 10887 FC_REASON_INVALID_LINK_CTRL, job); 10888 10889 if (fp_sendcmd(port, cmd, 10890 port->fp_fca_handle) != FC_SUCCESS) { 10891 fp_free_pkt(cmd); 10892 } 10893 } 10894 } 10895 break; 10896 } 10897 10898 mutex_enter(&port->fp_mutex); 10899 ASSERT(port->fp_active_ubs > 0); 10900 if (--(port->fp_active_ubs) == 0) { 10901 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10902 } 10903 mutex_exit(&port->fp_mutex); 10904 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10905 1, &buf->ub_token); 10906 } 10907 10908 10909 /* 10910 * Prepare a BA_RJT and send it over. 10911 */ 10912 static void 10913 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 10914 job_request_t *job) 10915 { 10916 fc_packet_t *pkt; 10917 la_ba_rjt_t payload; 10918 10919 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 10920 10921 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 10922 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 10923 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 10924 cmd->cmd_retry_count = 1; 10925 cmd->cmd_ulp_pkt = NULL; 10926 10927 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 10928 cmd->cmd_job = job; 10929 10930 pkt = &cmd->cmd_pkt; 10931 10932 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 10933 10934 payload.reserved = 0; 10935 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 10936 payload.explanation = FC_EXPLN_NONE; 10937 payload.vendor = 0; 10938 10939 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 10940 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 10941 } 10942 10943 10944 /* 10945 * Prepare an LS_RJT and send it over 10946 */ 10947 static void 10948 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 10949 uchar_t action, uchar_t reason, job_request_t *job) 10950 { 10951 fc_packet_t *pkt; 10952 la_els_rjt_t payload; 10953 10954 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 10955 10956 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 10957 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 10958 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 10959 cmd->cmd_retry_count = 1; 10960 cmd->cmd_ulp_pkt = NULL; 10961 10962 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 10963 cmd->cmd_job = job; 10964 10965 pkt = &cmd->cmd_pkt; 10966 10967 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 10968 10969 payload.ls_code.ls_code = LA_ELS_RJT; 10970 payload.ls_code.mbz = 0; 10971 payload.action = action; 10972 payload.reason = reason; 10973 payload.reserved = 0; 10974 payload.vu = 0; 10975 10976 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 10977 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 10978 } 10979 10980 /* 10981 * Function: fp_prlo_acc_init 10982 * 10983 * Description: Initializes an Link Service Accept for a PRLO. 10984 * 10985 * Arguments: *port Local port through which the PRLO was 10986 * received. 10987 * cmd Command that will carry the accept. 10988 * *buf Unsolicited buffer containing the PRLO 10989 * request. 10990 * job Job request. 10991 * sleep Allocation mode. 10992 * 10993 * Return Value: *cmd Command containing the response. 10994 * 10995 * Context: Depends on the parameter sleep. 10996 */ 10997 fp_cmd_t * 10998 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 10999 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11000 { 11001 fp_cmd_t *cmd; 11002 fc_packet_t *pkt; 11003 la_els_prlo_t *req; 11004 size_t len; 11005 uint16_t flags; 11006 11007 req = (la_els_prlo_t *)buf->ub_buffer; 11008 len = (size_t)ntohs(req->payload_length); 11009 11010 /* 11011 * The payload of the accept to a PRLO has to be the exact match of 11012 * the payload of the request (at the exception of the code). 11013 */ 11014 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11015 11016 if (cmd) { 11017 /* 11018 * The fp command was successfully allocated. 11019 */ 11020 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11021 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11022 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11023 cmd->cmd_retry_count = 1; 11024 cmd->cmd_ulp_pkt = NULL; 11025 11026 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11027 cmd->cmd_job = job; 11028 11029 pkt = &cmd->cmd_pkt; 11030 11031 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11032 FC_TYPE_EXTENDED_LS); 11033 11034 /* The code is overwritten for the copy. */ 11035 req->ls_code = LA_ELS_ACC; 11036 /* Response code is set. */ 11037 flags = ntohs(req->flags); 11038 flags &= ~SP_RESP_CODE_MASK; 11039 flags |= SP_RESP_CODE_REQ_EXECUTED; 11040 req->flags = htons(flags); 11041 11042 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)req, 11043 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11044 } 11045 return (cmd); 11046 } 11047 11048 /* 11049 * Prepare an ACC response to an ELS request 11050 */ 11051 static void 11052 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11053 job_request_t *job) 11054 { 11055 fc_packet_t *pkt; 11056 ls_code_t payload; 11057 11058 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11059 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11060 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11061 cmd->cmd_retry_count = 1; 11062 cmd->cmd_ulp_pkt = NULL; 11063 11064 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11065 cmd->cmd_job = job; 11066 11067 pkt = &cmd->cmd_pkt; 11068 11069 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11070 11071 payload.ls_code = LA_ELS_ACC; 11072 payload.mbz = 0; 11073 11074 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11075 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11076 } 11077 11078 /* 11079 * Unsolicited PRLO handler 11080 * 11081 * A Process Logout should be handled by the ULP that established it. However, 11082 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11083 * when a device implicitly logs out an initiator (for whatever reason) and 11084 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11085 * The logical thing to do for the device would be to send a LOGO in response 11086 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11087 * a PRLO instead. 11088 * 11089 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11090 * think that the Port Login has been lost. If we follow the Fibre Channel 11091 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11092 * the Port Login has also been lost, the remote port will reject the PRLI 11093 * indicating that we must PLOGI first. The initiator will then turn around and 11094 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11095 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11096 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11097 * needed would be received by FCP. FCP would have, then, to tell the transport 11098 * (fp) to PLOGI. The problem is, the transport would still think the Port 11099 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11100 * if you think it's not necessary". To work around that difficulty, the PRLO 11101 * is treated by the transport as a LOGO. The downside to it is a Port Login 11102 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11103 * has nothing to do with the PRLO) may be impacted. However, this is a 11104 * scenario very unlikely to happen. As of today the only ULP in Leadville 11105 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11106 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11107 * unlikely). 11108 */ 11109 static void 11110 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11111 fc_remote_port_t *pd, job_request_t *job) 11112 { 11113 int busy; 11114 int rval; 11115 int retain; 11116 fp_cmd_t *cmd; 11117 fc_portmap_t *listptr; 11118 boolean_t tolerance; 11119 la_els_prlo_t *req; 11120 11121 req = (la_els_prlo_t *)buf->ub_buffer; 11122 11123 if ((ntohs(req->payload_length) != 11124 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11125 (req->page_length != sizeof (service_parameter_page_t))) { 11126 /* 11127 * We are being very restrictive. Only on page per 11128 * payload. If it is not the case we reject the ELS although 11129 * we should reply indicating we handle only single page 11130 * per PRLO. 11131 */ 11132 goto fp_reject_prlo; 11133 } 11134 11135 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11136 /* 11137 * This is in case the payload advertizes a size bigger than 11138 * what it really is. 11139 */ 11140 goto fp_reject_prlo; 11141 } 11142 11143 mutex_enter(&port->fp_mutex); 11144 busy = port->fp_statec_busy; 11145 mutex_exit(&port->fp_mutex); 11146 11147 mutex_enter(&pd->pd_mutex); 11148 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11149 if (!busy) { 11150 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11151 pd->pd_state == PORT_DEVICE_INVALID || 11152 pd->pd_flags == PD_ELS_IN_PROGRESS || 11153 pd->pd_type == PORT_DEVICE_OLD) { 11154 busy++; 11155 } 11156 } 11157 11158 if (busy) { 11159 mutex_exit(&pd->pd_mutex); 11160 11161 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11162 "pd=%p - busy", 11163 pd->pd_port_id.port_id, pd); 11164 11165 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11166 goto fp_reject_prlo; 11167 } 11168 } else { 11169 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11170 11171 if (tolerance) { 11172 fctl_tc_reset(&pd->pd_logo_tc); 11173 retain = 0; 11174 pd->pd_state = PORT_DEVICE_INVALID; 11175 } 11176 11177 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11178 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11179 tolerance, retain); 11180 11181 pd->pd_aux_flags |= PD_LOGGED_OUT; 11182 mutex_exit(&pd->pd_mutex); 11183 11184 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11185 if (cmd == NULL) { 11186 return; 11187 } 11188 11189 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11190 if (rval != FC_SUCCESS) { 11191 fp_free_pkt(cmd); 11192 return; 11193 } 11194 11195 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11196 11197 if (retain) { 11198 fp_unregister_login(pd); 11199 fctl_copy_portmap(listptr, pd); 11200 } else { 11201 uint32_t d_id; 11202 char ww_name[17]; 11203 11204 mutex_enter(&pd->pd_mutex); 11205 d_id = pd->pd_port_id.port_id; 11206 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11207 mutex_exit(&pd->pd_mutex); 11208 11209 FP_TRACE(FP_NHEAD2(9, 0), 11210 "N_x Port with D_ID=%x, PWWN=%s logged out" 11211 " %d times in %d us; Giving up", d_id, ww_name, 11212 FC_LOGO_TOLERANCE_LIMIT, 11213 FC_LOGO_TOLERANCE_TIME_LIMIT); 11214 11215 fp_fillout_old_map(listptr, pd, 0); 11216 listptr->map_type = PORT_DEVICE_OLD; 11217 } 11218 11219 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11220 return; 11221 } 11222 11223 fp_reject_prlo: 11224 11225 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11226 if (cmd != NULL) { 11227 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11228 FC_REASON_INVALID_LINK_CTRL, job); 11229 11230 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11231 fp_free_pkt(cmd); 11232 } 11233 } 11234 } 11235 11236 /* 11237 * Unsolicited LOGO handler 11238 */ 11239 static void 11240 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11241 fc_remote_port_t *pd, job_request_t *job) 11242 { 11243 int busy; 11244 int rval; 11245 int retain; 11246 fp_cmd_t *cmd; 11247 fc_portmap_t *listptr; 11248 boolean_t tolerance; 11249 11250 mutex_enter(&port->fp_mutex); 11251 busy = port->fp_statec_busy; 11252 mutex_exit(&port->fp_mutex); 11253 11254 mutex_enter(&pd->pd_mutex); 11255 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11256 if (!busy) { 11257 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11258 pd->pd_state == PORT_DEVICE_INVALID || 11259 pd->pd_flags == PD_ELS_IN_PROGRESS || 11260 pd->pd_type == PORT_DEVICE_OLD) { 11261 busy++; 11262 } 11263 } 11264 11265 if (busy) { 11266 mutex_exit(&pd->pd_mutex); 11267 11268 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11269 "pd=%p - busy", 11270 pd->pd_port_id.port_id, pd); 11271 11272 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11273 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11274 0, KM_SLEEP, pd); 11275 if (cmd != NULL) { 11276 fp_els_rjt_init(port, cmd, buf, 11277 FC_ACTION_NON_RETRYABLE, 11278 FC_REASON_INVALID_LINK_CTRL, job); 11279 11280 if (fp_sendcmd(port, cmd, 11281 port->fp_fca_handle) != FC_SUCCESS) { 11282 fp_free_pkt(cmd); 11283 } 11284 } 11285 } 11286 } else { 11287 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11288 11289 if (tolerance) { 11290 fctl_tc_reset(&pd->pd_logo_tc); 11291 retain = 0; 11292 pd->pd_state = PORT_DEVICE_INVALID; 11293 } 11294 11295 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11296 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11297 tolerance, retain); 11298 11299 pd->pd_aux_flags |= PD_LOGGED_OUT; 11300 mutex_exit(&pd->pd_mutex); 11301 11302 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11303 KM_SLEEP, pd); 11304 if (cmd == NULL) { 11305 return; 11306 } 11307 11308 fp_els_acc_init(port, cmd, buf, job); 11309 11310 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11311 if (rval != FC_SUCCESS) { 11312 fp_free_pkt(cmd); 11313 return; 11314 } 11315 11316 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11317 11318 if (retain) { 11319 job_request_t *job; 11320 fctl_ns_req_t *ns_cmd; 11321 11322 /* 11323 * when get LOGO, first try to get PID from nameserver 11324 * if failed, then we do not need 11325 * send PLOGI to that remote port 11326 */ 11327 job = fctl_alloc_job( 11328 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11329 11330 if (job != NULL) { 11331 ns_cmd = fctl_alloc_ns_cmd( 11332 sizeof (ns_req_gid_pn_t), 11333 sizeof (ns_resp_gid_pn_t), 11334 sizeof (ns_resp_gid_pn_t), 11335 0, KM_SLEEP); 11336 if (ns_cmd != NULL) { 11337 int ret; 11338 job->job_result = FC_SUCCESS; 11339 ns_cmd->ns_cmd_code = NS_GID_PN; 11340 ((ns_req_gid_pn_t *) 11341 (ns_cmd->ns_cmd_buf))->pwwn = 11342 pd->pd_port_name; 11343 ret = fp_ns_query( 11344 port, ns_cmd, job, 1, KM_SLEEP); 11345 if ((ret != FC_SUCCESS) || 11346 (job->job_result != FC_SUCCESS)) { 11347 fctl_free_ns_cmd(ns_cmd); 11348 fctl_dealloc_job(job); 11349 FP_TRACE(FP_NHEAD2(9, 0), 11350 "NS query failed,", 11351 " delete pd"); 11352 goto delete_pd; 11353 } 11354 fctl_free_ns_cmd(ns_cmd); 11355 } 11356 fctl_dealloc_job(job); 11357 } 11358 fp_unregister_login(pd); 11359 fctl_copy_portmap(listptr, pd); 11360 } else { 11361 uint32_t d_id; 11362 char ww_name[17]; 11363 11364 delete_pd: 11365 mutex_enter(&pd->pd_mutex); 11366 d_id = pd->pd_port_id.port_id; 11367 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11368 mutex_exit(&pd->pd_mutex); 11369 11370 FP_TRACE(FP_NHEAD2(9, 0), 11371 "N_x Port with D_ID=%x, PWWN=%s logged out" 11372 " %d times in %d us; Giving up", d_id, ww_name, 11373 FC_LOGO_TOLERANCE_LIMIT, 11374 FC_LOGO_TOLERANCE_TIME_LIMIT); 11375 11376 fp_fillout_old_map(listptr, pd, 0); 11377 listptr->map_type = PORT_DEVICE_OLD; 11378 } 11379 11380 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11381 } 11382 } 11383 11384 11385 /* 11386 * Perform general purpose preparation of a response to an unsolicited request 11387 */ 11388 static void 11389 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11390 uchar_t r_ctl, uchar_t type) 11391 { 11392 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11393 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11394 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11395 pkt->pkt_cmd_fhdr.type = type; 11396 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11397 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11398 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11399 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11400 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11401 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11402 pkt->pkt_cmd_fhdr.ro = 0; 11403 pkt->pkt_cmd_fhdr.rsvd = 0; 11404 pkt->pkt_comp = fp_unsol_intr; 11405 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11406 } 11407 11408 /* 11409 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11410 * early development days of public loop soc+ firmware, numerous problems 11411 * were encountered (the details are undocumented and history now) which 11412 * led to the birth of this function. 11413 * 11414 * If a pre-allocated unsolicited response packet is free, send out an 11415 * immediate response, otherwise submit the request to the port thread 11416 * to do the deferred processing. 11417 */ 11418 static void 11419 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11420 { 11421 int sent; 11422 int f_port; 11423 int do_acc; 11424 fp_cmd_t *cmd; 11425 la_els_logi_t *payload; 11426 fc_remote_port_t *pd; 11427 char dww_name[17]; 11428 11429 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11430 11431 cmd = port->fp_els_resp_pkt; 11432 11433 mutex_enter(&port->fp_mutex); 11434 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11435 mutex_exit(&port->fp_mutex); 11436 11437 switch (buf->ub_buffer[0]) { 11438 case LA_ELS_PLOGI: { 11439 int small; 11440 11441 payload = (la_els_logi_t *)buf->ub_buffer; 11442 11443 f_port = FP_IS_F_PORT(payload-> 11444 common_service.cmn_features) ? 1 : 0; 11445 11446 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11447 &payload->nport_ww_name); 11448 pd = fctl_get_remote_port_by_pwwn(port, 11449 &payload->nport_ww_name); 11450 if (pd) { 11451 mutex_enter(&pd->pd_mutex); 11452 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11453 /* 11454 * Most likely this means a cross login is in 11455 * progress or a device about to be yanked out. 11456 * Only accept the plogi if my wwn is smaller. 11457 */ 11458 if (pd->pd_type == PORT_DEVICE_OLD) { 11459 sent = 1; 11460 } 11461 /* 11462 * Stop plogi request (if any) 11463 * attempt from local side to speedup 11464 * the discovery progress. 11465 * Mark the pd as PD_PLOGI_RECEPIENT. 11466 */ 11467 if (f_port == 0 && small < 0) { 11468 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11469 } 11470 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11471 11472 mutex_exit(&pd->pd_mutex); 11473 11474 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11475 "Unsol PLOGI received. PD still exists in the " 11476 "PWWN list. pd=%p PWWN=%s, sent=%x", 11477 pd, dww_name, sent); 11478 11479 if (f_port == 0 && small < 0) { 11480 FP_TRACE(FP_NHEAD1(3, 0), 11481 "fp_i_handle_unsol_els: Mark the pd" 11482 " as plogi recipient, pd=%p, PWWN=%s" 11483 ", sent=%x", 11484 pd, dww_name, sent); 11485 } 11486 } else { 11487 sent = 0; 11488 } 11489 11490 /* 11491 * To avoid Login collisions, accept only if my WWN 11492 * is smaller than the requester (A curious side note 11493 * would be that this rule may not satisfy the PLOGIs 11494 * initiated by the switch from not-so-well known 11495 * ports such as 0xFFFC41) 11496 */ 11497 if ((f_port == 0 && small < 0) || 11498 (((small > 0 && do_acc) || 11499 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11500 if (fp_is_class_supported(port->fp_cos, 11501 buf->ub_class) == FC_FAILURE) { 11502 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11503 cmd->cmd_pkt.pkt_cmdlen = 11504 sizeof (la_els_rjt_t); 11505 cmd->cmd_pkt.pkt_rsplen = 0; 11506 fp_els_rjt_init(port, cmd, buf, 11507 FC_ACTION_NON_RETRYABLE, 11508 FC_REASON_CLASS_NOT_SUPP, NULL); 11509 FP_TRACE(FP_NHEAD1(3, 0), 11510 "fp_i_handle_unsol_els: " 11511 "Unsupported class. " 11512 "Rejecting PLOGI"); 11513 11514 } else { 11515 mutex_enter(&port->fp_mutex); 11516 port->fp_els_resp_pkt_busy = 0; 11517 mutex_exit(&port->fp_mutex); 11518 return; 11519 } 11520 } else { 11521 cmd->cmd_pkt.pkt_cmdlen = 11522 sizeof (la_els_logi_t); 11523 cmd->cmd_pkt.pkt_rsplen = 0; 11524 11525 /* 11526 * Sometime later, we should validate 11527 * the service parameters instead of 11528 * just accepting it. 11529 */ 11530 fp_login_acc_init(port, cmd, buf, NULL, 11531 KM_NOSLEEP); 11532 FP_TRACE(FP_NHEAD1(3, 0), 11533 "fp_i_handle_unsol_els: Accepting PLOGI," 11534 " f_port=%d, small=%d, do_acc=%d," 11535 " sent=%d.", f_port, small, do_acc, 11536 sent); 11537 /* 11538 * If fp_port_id is zero and topology is 11539 * Point-to-Point, get the local port id from 11540 * the d_id in the PLOGI request. 11541 * If the outgoing FLOGI hasn't been accepted, 11542 * the topology will be unknown here. But it's 11543 * still safe to save the d_id to fp_port_id, 11544 * just because it will be overwritten later 11545 * if the topology is not Point-to-Point. 11546 */ 11547 mutex_enter(&port->fp_mutex); 11548 if ((port->fp_port_id.port_id == 0) && 11549 (port->fp_topology == FC_TOP_PT_PT || 11550 port->fp_topology == FC_TOP_UNKNOWN)) { 11551 port->fp_port_id.port_id = 11552 buf->ub_frame.d_id; 11553 } 11554 mutex_exit(&port->fp_mutex); 11555 } 11556 } else { 11557 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11558 port->fp_options & FP_SEND_RJT) { 11559 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11560 cmd->cmd_pkt.pkt_rsplen = 0; 11561 fp_els_rjt_init(port, cmd, buf, 11562 FC_ACTION_NON_RETRYABLE, 11563 FC_REASON_LOGICAL_BSY, NULL); 11564 FP_TRACE(FP_NHEAD1(3, 0), 11565 "fp_i_handle_unsol_els: " 11566 "Rejecting PLOGI with Logical Busy." 11567 "Possible Login collision."); 11568 } else { 11569 mutex_enter(&port->fp_mutex); 11570 port->fp_els_resp_pkt_busy = 0; 11571 mutex_exit(&port->fp_mutex); 11572 return; 11573 } 11574 } 11575 break; 11576 } 11577 11578 case LA_ELS_FLOGI: 11579 if (fp_is_class_supported(port->fp_cos, 11580 buf->ub_class) == FC_FAILURE) { 11581 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11582 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11583 cmd->cmd_pkt.pkt_rsplen = 0; 11584 fp_els_rjt_init(port, cmd, buf, 11585 FC_ACTION_NON_RETRYABLE, 11586 FC_REASON_CLASS_NOT_SUPP, NULL); 11587 FP_TRACE(FP_NHEAD1(3, 0), 11588 "fp_i_handle_unsol_els: " 11589 "Unsupported Class. Rejecting FLOGI."); 11590 } else { 11591 mutex_enter(&port->fp_mutex); 11592 port->fp_els_resp_pkt_busy = 0; 11593 mutex_exit(&port->fp_mutex); 11594 return; 11595 } 11596 } else { 11597 mutex_enter(&port->fp_mutex); 11598 if (FC_PORT_STATE_MASK(port->fp_state) != 11599 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11600 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11601 mutex_exit(&port->fp_mutex); 11602 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11603 cmd->cmd_pkt.pkt_cmdlen = 11604 sizeof (la_els_rjt_t); 11605 cmd->cmd_pkt.pkt_rsplen = 0; 11606 fp_els_rjt_init(port, cmd, buf, 11607 FC_ACTION_NON_RETRYABLE, 11608 FC_REASON_INVALID_LINK_CTRL, 11609 NULL); 11610 FP_TRACE(FP_NHEAD1(3, 0), 11611 "fp_i_handle_unsol_els: " 11612 "Invalid Link Ctrl. " 11613 "Rejecting FLOGI."); 11614 } else { 11615 mutex_enter(&port->fp_mutex); 11616 port->fp_els_resp_pkt_busy = 0; 11617 mutex_exit(&port->fp_mutex); 11618 return; 11619 } 11620 } else { 11621 mutex_exit(&port->fp_mutex); 11622 cmd->cmd_pkt.pkt_cmdlen = 11623 sizeof (la_els_logi_t); 11624 cmd->cmd_pkt.pkt_rsplen = 0; 11625 /* 11626 * Let's not aggressively validate the N_Port's 11627 * service parameters until PLOGI. Suffice it 11628 * to give a hint that we are an N_Port and we 11629 * are game to some serious stuff here. 11630 */ 11631 fp_login_acc_init(port, cmd, buf, 11632 NULL, KM_NOSLEEP); 11633 FP_TRACE(FP_NHEAD1(3, 0), 11634 "fp_i_handle_unsol_els: " 11635 "Accepting FLOGI."); 11636 } 11637 } 11638 break; 11639 11640 default: 11641 return; 11642 } 11643 11644 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11645 mutex_enter(&port->fp_mutex); 11646 port->fp_els_resp_pkt_busy = 0; 11647 mutex_exit(&port->fp_mutex); 11648 } 11649 } 11650 11651 11652 /* 11653 * Handle unsolicited PLOGI request 11654 */ 11655 static void 11656 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11657 job_request_t *job, int sleep) 11658 { 11659 int sent; 11660 int small; 11661 int f_port; 11662 int do_acc; 11663 fp_cmd_t *cmd; 11664 la_wwn_t *swwn; 11665 la_wwn_t *dwwn; 11666 la_els_logi_t *payload; 11667 fc_remote_port_t *pd; 11668 char dww_name[17]; 11669 11670 payload = (la_els_logi_t *)buf->ub_buffer; 11671 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11672 11673 mutex_enter(&port->fp_mutex); 11674 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11675 mutex_exit(&port->fp_mutex); 11676 11677 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11678 "type=%x, f_ctl=%x" 11679 " seq_id=%x, ox_id=%x, rx_id=%x" 11680 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11681 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11682 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11683 11684 swwn = &port->fp_service_params.nport_ww_name; 11685 dwwn = &payload->nport_ww_name; 11686 small = fctl_wwn_cmp(swwn, dwwn); 11687 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11688 if (pd) { 11689 mutex_enter(&pd->pd_mutex); 11690 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11691 /* 11692 * Most likely this means a cross login is in 11693 * progress or a device about to be yanked out. 11694 * Only accept the plogi if my wwn is smaller. 11695 */ 11696 11697 if (pd->pd_type == PORT_DEVICE_OLD) { 11698 sent = 1; 11699 } 11700 /* 11701 * Stop plogi request (if any) 11702 * attempt from local side to speedup 11703 * the discovery progress. 11704 * Mark the pd as PD_PLOGI_RECEPIENT. 11705 */ 11706 if (f_port == 0 && small < 0) { 11707 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11708 } 11709 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11710 11711 mutex_exit(&pd->pd_mutex); 11712 11713 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11714 " received. PD still exists in the PWWN list. pd=%p " 11715 "PWWN=%s, sent=%x", pd, dww_name, sent); 11716 11717 if (f_port == 0 && small < 0) { 11718 FP_TRACE(FP_NHEAD1(3, 0), 11719 "fp_handle_unsol_plogi: Mark the pd" 11720 " as plogi recipient, pd=%p, PWWN=%s" 11721 ", sent=%x", 11722 pd, dww_name, sent); 11723 } 11724 } else { 11725 sent = 0; 11726 } 11727 11728 /* 11729 * Avoid Login collisions by accepting only if my WWN is smaller. 11730 * 11731 * A side note: There is no need to start a PLOGI from this end in 11732 * this context if login isn't going to be accepted for the 11733 * above reason as either a LIP (in private loop), RSCN (in 11734 * fabric topology), or an FLOGI (in point to point - Huh ? 11735 * check FC-PH) would normally drive the PLOGI from this end. 11736 * At this point of time there is no need for an inbound PLOGI 11737 * to kick an outbound PLOGI when it is going to be rejected 11738 * for the reason of WWN being smaller. However it isn't hard 11739 * to do that either (when such a need arises, start a timer 11740 * for a duration that extends beyond a normal device discovery 11741 * time and check if an outbound PLOGI did go before that, if 11742 * none fire one) 11743 * 11744 * Unfortunately, as it turned out, during booting, it is possible 11745 * to miss another initiator in the same loop as port driver 11746 * instances are serially attached. While preserving the above 11747 * comments for belly laughs, please kick an outbound PLOGI in 11748 * a non-switch environment (which is a pt pt between N_Ports or 11749 * a private loop) 11750 * 11751 * While preserving the above comments for amusement, send an 11752 * ACC if the PLOGI is going to be rejected for WWN being smaller 11753 * when no discovery is in progress at this end. Turn around 11754 * and make the port device as the PLOGI initiator, so that 11755 * during subsequent link/loop initialization, this end drives 11756 * the PLOGI (In fact both ends do in this particular case, but 11757 * only one wins) 11758 * 11759 * Make sure the PLOGIs initiated by the switch from not-so-well-known 11760 * ports (such as 0xFFFC41) are accepted too. 11761 */ 11762 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 11763 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11764 if (fp_is_class_supported(port->fp_cos, 11765 buf->ub_class) == FC_FAILURE) { 11766 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11767 cmd = fp_alloc_pkt(port, 11768 sizeof (la_els_logi_t), 0, sleep, pd); 11769 if (cmd == NULL) { 11770 return; 11771 } 11772 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11773 cmd->cmd_pkt.pkt_rsplen = 0; 11774 fp_els_rjt_init(port, cmd, buf, 11775 FC_ACTION_NON_RETRYABLE, 11776 FC_REASON_CLASS_NOT_SUPP, job); 11777 FP_TRACE(FP_NHEAD1(3, 0), 11778 "fp_handle_unsol_plogi: " 11779 "Unsupported class. rejecting PLOGI"); 11780 } 11781 } else { 11782 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11783 0, sleep, pd); 11784 if (cmd == NULL) { 11785 return; 11786 } 11787 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 11788 cmd->cmd_pkt.pkt_rsplen = 0; 11789 11790 /* 11791 * Sometime later, we should validate the service 11792 * parameters instead of just accepting it. 11793 */ 11794 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 11795 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11796 "Accepting PLOGI, f_port=%d, small=%d, " 11797 "do_acc=%d, sent=%d.", f_port, small, do_acc, 11798 sent); 11799 11800 /* 11801 * If fp_port_id is zero and topology is 11802 * Point-to-Point, get the local port id from 11803 * the d_id in the PLOGI request. 11804 * If the outgoing FLOGI hasn't been accepted, 11805 * the topology will be unknown here. But it's 11806 * still safe to save the d_id to fp_port_id, 11807 * just because it will be overwritten later 11808 * if the topology is not Point-to-Point. 11809 */ 11810 mutex_enter(&port->fp_mutex); 11811 if ((port->fp_port_id.port_id == 0) && 11812 (port->fp_topology == FC_TOP_PT_PT || 11813 port->fp_topology == FC_TOP_UNKNOWN)) { 11814 port->fp_port_id.port_id = 11815 buf->ub_frame.d_id; 11816 } 11817 mutex_exit(&port->fp_mutex); 11818 } 11819 } else { 11820 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11821 port->fp_options & FP_SEND_RJT) { 11822 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11823 0, sleep, pd); 11824 if (cmd == NULL) { 11825 return; 11826 } 11827 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11828 cmd->cmd_pkt.pkt_rsplen = 0; 11829 /* 11830 * Send out Logical busy to indicate 11831 * the detection of PLOGI collision 11832 */ 11833 fp_els_rjt_init(port, cmd, buf, 11834 FC_ACTION_NON_RETRYABLE, 11835 FC_REASON_LOGICAL_BSY, job); 11836 11837 fc_wwn_to_str(dwwn, dww_name); 11838 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11839 "Rejecting Unsol PLOGI with Logical Busy." 11840 "possible PLOGI collision. PWWN=%s, sent=%x", 11841 dww_name, sent); 11842 } else { 11843 return; 11844 } 11845 } 11846 11847 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11848 fp_free_pkt(cmd); 11849 } 11850 } 11851 11852 11853 /* 11854 * Handle mischievous turning over of our own FLOGI requests back to 11855 * us by the SOC+ microcode. In other words, look at the class of such 11856 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 11857 * on the floor 11858 */ 11859 static void 11860 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11861 job_request_t *job, int sleep) 11862 { 11863 uint32_t state; 11864 uint32_t s_id; 11865 fp_cmd_t *cmd; 11866 11867 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 11868 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11869 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11870 0, sleep, NULL); 11871 if (cmd == NULL) { 11872 return; 11873 } 11874 fp_els_rjt_init(port, cmd, buf, 11875 FC_ACTION_NON_RETRYABLE, 11876 FC_REASON_CLASS_NOT_SUPP, job); 11877 } else { 11878 return; 11879 } 11880 } else { 11881 11882 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 11883 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 11884 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 11885 buf->ub_frame.s_id, buf->ub_frame.d_id, 11886 buf->ub_frame.type, buf->ub_frame.f_ctl, 11887 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 11888 buf->ub_frame.rx_id, buf->ub_frame.ro); 11889 11890 mutex_enter(&port->fp_mutex); 11891 state = FC_PORT_STATE_MASK(port->fp_state); 11892 s_id = port->fp_port_id.port_id; 11893 mutex_exit(&port->fp_mutex); 11894 11895 if (state != FC_STATE_ONLINE || 11896 (s_id && buf->ub_frame.s_id == s_id)) { 11897 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11898 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11899 0, sleep, NULL); 11900 if (cmd == NULL) { 11901 return; 11902 } 11903 fp_els_rjt_init(port, cmd, buf, 11904 FC_ACTION_NON_RETRYABLE, 11905 FC_REASON_INVALID_LINK_CTRL, job); 11906 FP_TRACE(FP_NHEAD1(3, 0), 11907 "fp_handle_unsol_flogi: " 11908 "Rejecting PLOGI. Invalid Link CTRL"); 11909 } else { 11910 return; 11911 } 11912 } else { 11913 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11914 0, sleep, NULL); 11915 if (cmd == NULL) { 11916 return; 11917 } 11918 /* 11919 * Let's not aggressively validate the N_Port's 11920 * service parameters until PLOGI. Suffice it 11921 * to give a hint that we are an N_Port and we 11922 * are game to some serious stuff here. 11923 */ 11924 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 11925 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 11926 "Accepting PLOGI"); 11927 } 11928 } 11929 11930 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11931 fp_free_pkt(cmd); 11932 } 11933 } 11934 11935 11936 /* 11937 * Perform PLOGI accept 11938 */ 11939 static void 11940 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11941 job_request_t *job, int sleep) 11942 { 11943 fc_packet_t *pkt; 11944 fc_portmap_t *listptr; 11945 la_els_logi_t payload; 11946 11947 ASSERT(buf != NULL); 11948 11949 /* 11950 * If we are sending ACC to PLOGI and we haven't already 11951 * create port and node device handles, let's create them 11952 * here. 11953 */ 11954 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 11955 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 11956 int small; 11957 int do_acc; 11958 fc_remote_port_t *pd; 11959 la_els_logi_t *req; 11960 11961 req = (la_els_logi_t *)buf->ub_buffer; 11962 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11963 &req->nport_ww_name); 11964 11965 mutex_enter(&port->fp_mutex); 11966 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11967 mutex_exit(&port->fp_mutex); 11968 11969 pd = fctl_create_remote_port(port, &req->node_ww_name, 11970 &req->nport_ww_name, buf->ub_frame.s_id, 11971 PD_PLOGI_RECEPIENT, sleep); 11972 if (pd == NULL) { 11973 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 11974 "Couldn't create port device for d_id:0x%x", 11975 buf->ub_frame.s_id); 11976 11977 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 11978 "couldn't create port device d_id=%x", 11979 buf->ub_frame.s_id); 11980 } else { 11981 /* 11982 * usoc currently returns PLOGIs inline and 11983 * the maximum buffer size is 60 bytes or so. 11984 * So attempt not to look beyond what is in 11985 * the unsolicited buffer 11986 * 11987 * JNI also traverses this path sometimes 11988 */ 11989 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 11990 fp_register_login(NULL, pd, req, buf->ub_class); 11991 } else { 11992 mutex_enter(&pd->pd_mutex); 11993 if (pd->pd_login_count == 0) { 11994 pd->pd_login_count++; 11995 } 11996 pd->pd_state = PORT_DEVICE_LOGGED_IN; 11997 pd->pd_login_class = buf->ub_class; 11998 mutex_exit(&pd->pd_mutex); 11999 } 12000 12001 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12002 if (listptr != NULL) { 12003 fctl_copy_portmap(listptr, pd); 12004 (void) fp_ulp_devc_cb(port, listptr, 12005 1, 1, sleep, 0); 12006 } 12007 12008 if (small > 0 && do_acc) { 12009 mutex_enter(&pd->pd_mutex); 12010 pd->pd_recepient = PD_PLOGI_INITIATOR; 12011 mutex_exit(&pd->pd_mutex); 12012 } 12013 } 12014 } 12015 12016 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12017 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12018 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12019 cmd->cmd_retry_count = 1; 12020 cmd->cmd_ulp_pkt = NULL; 12021 12022 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12023 cmd->cmd_job = job; 12024 12025 pkt = &cmd->cmd_pkt; 12026 12027 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12028 12029 payload = port->fp_service_params; 12030 payload.ls_code.ls_code = LA_ELS_ACC; 12031 12032 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12033 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12034 12035 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12036 "bufsize:0x%x sizeof(la_els_logi):0x%x " 12037 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12038 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12039 buf->ub_bufsize, sizeof (la_els_logi_t), 12040 port->fp_service_params.nport_ww_name.w.naa_id, 12041 port->fp_service_params.nport_ww_name.w.nport_id, 12042 port->fp_service_params.nport_ww_name.w.wwn_hi, 12043 port->fp_service_params.nport_ww_name.w.wwn_lo, 12044 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12045 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12046 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12047 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12048 port->fp_statec_busy); 12049 } 12050 12051 12052 #define RSCN_EVENT_NAME_LEN 256 12053 12054 /* 12055 * Handle RSCNs 12056 */ 12057 static void 12058 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12059 job_request_t *job, int sleep) 12060 { 12061 uint32_t mask; 12062 fp_cmd_t *cmd; 12063 uint32_t count; 12064 int listindex; 12065 int16_t len; 12066 fc_rscn_t *payload; 12067 fc_portmap_t *listptr; 12068 fctl_ns_req_t *ns_cmd; 12069 fc_affected_id_t *page; 12070 caddr_t nvname; 12071 nvlist_t *attr_list = NULL; 12072 12073 mutex_enter(&port->fp_mutex); 12074 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12075 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12076 --port->fp_rscn_count; 12077 } 12078 mutex_exit(&port->fp_mutex); 12079 return; 12080 } 12081 mutex_exit(&port->fp_mutex); 12082 12083 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12084 if (cmd != NULL) { 12085 fp_els_acc_init(port, cmd, buf, job); 12086 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12087 fp_free_pkt(cmd); 12088 } 12089 } 12090 12091 payload = (fc_rscn_t *)buf->ub_buffer; 12092 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12093 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12094 12095 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12096 12097 if (len <= 0) { 12098 mutex_enter(&port->fp_mutex); 12099 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12100 --port->fp_rscn_count; 12101 } 12102 mutex_exit(&port->fp_mutex); 12103 12104 return; 12105 } 12106 12107 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12108 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12109 12110 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12111 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12112 12113 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12114 12115 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12116 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12117 0, sleep); 12118 if (ns_cmd == NULL) { 12119 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12120 12121 mutex_enter(&port->fp_mutex); 12122 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12123 --port->fp_rscn_count; 12124 } 12125 mutex_exit(&port->fp_mutex); 12126 12127 return; 12128 } 12129 12130 ns_cmd->ns_cmd_code = NS_GPN_ID; 12131 12132 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12133 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12134 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12135 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12136 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12137 12138 /* Only proceed if we can allocate nvname and the nvlist */ 12139 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12140 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12141 KM_NOSLEEP) == DDI_SUCCESS) { 12142 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12143 port->fp_instance) == DDI_SUCCESS && 12144 nvlist_add_byte_array(attr_list, "port-wwn", 12145 port->fp_service_params.nport_ww_name.raw_wwn, 12146 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12147 nvlist_free(attr_list); 12148 attr_list = NULL; 12149 } 12150 } 12151 12152 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12153 /* Add affected page to the event payload */ 12154 if (attr_list != NULL) { 12155 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12156 "affected_page_%d", listindex); 12157 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12158 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12159 /* We don't want to send a partial event, so dump it */ 12160 nvlist_free(attr_list); 12161 attr_list = NULL; 12162 } 12163 } 12164 /* 12165 * Query the NS to get the Port WWN for this 12166 * affected D_ID. 12167 */ 12168 mask = 0; 12169 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12170 case FC_RSCN_PORT_ADDRESS: 12171 fp_validate_rscn_page(port, page, job, ns_cmd, 12172 listptr, &listindex, sleep); 12173 12174 if (listindex == 0) { 12175 /* 12176 * We essentially did not process this RSCN. So, 12177 * ULPs are not going to be called and so we 12178 * decrement the rscn_count 12179 */ 12180 mutex_enter(&port->fp_mutex); 12181 if (--port->fp_rscn_count == 12182 FC_INVALID_RSCN_COUNT) { 12183 --port->fp_rscn_count; 12184 } 12185 mutex_exit(&port->fp_mutex); 12186 } 12187 break; 12188 12189 case FC_RSCN_AREA_ADDRESS: 12190 mask = 0xFFFF00; 12191 /* FALLTHROUGH */ 12192 12193 case FC_RSCN_DOMAIN_ADDRESS: 12194 if (!mask) { 12195 mask = 0xFF0000; 12196 } 12197 fp_validate_area_domain(port, page->aff_d_id, mask, 12198 job, sleep); 12199 break; 12200 12201 case FC_RSCN_FABRIC_ADDRESS: 12202 /* 12203 * We need to discover all the devices on this 12204 * port. 12205 */ 12206 fp_validate_area_domain(port, 0, 0, job, sleep); 12207 break; 12208 12209 default: 12210 break; 12211 } 12212 } 12213 if (attr_list != NULL) { 12214 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12215 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12216 NULL, DDI_SLEEP); 12217 nvlist_free(attr_list); 12218 } else { 12219 FP_TRACE(FP_NHEAD1(9, 0), 12220 "RSCN handled, but event not sent to userland"); 12221 } 12222 if (nvname != NULL) { 12223 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12224 } 12225 12226 if (ns_cmd) { 12227 fctl_free_ns_cmd(ns_cmd); 12228 } 12229 12230 if (listindex) { 12231 #ifdef DEBUG 12232 page = (fc_affected_id_t *)(buf->ub_buffer + 12233 sizeof (fc_rscn_t)); 12234 12235 if (listptr->map_did.port_id != page->aff_d_id) { 12236 FP_TRACE(FP_NHEAD1(9, 0), 12237 "PORT RSCN: processed=%x, reporting=%x", 12238 listptr->map_did.port_id, page->aff_d_id); 12239 } 12240 #endif 12241 12242 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12243 sleep, 0); 12244 } else { 12245 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12246 } 12247 } 12248 12249 12250 /* 12251 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12252 */ 12253 static void 12254 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12255 { 12256 int is_switch; 12257 int initiator; 12258 fc_local_port_t *port; 12259 12260 port = pd->pd_port; 12261 12262 /* This function has the following bunch of assumptions */ 12263 ASSERT(port != NULL); 12264 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12265 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12266 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12267 12268 pd->pd_state = PORT_DEVICE_INVALID; 12269 pd->pd_type = PORT_DEVICE_OLD; 12270 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12271 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12272 12273 fctl_delist_did_table(port, pd); 12274 fctl_delist_pwwn_table(port, pd); 12275 12276 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12277 " removed the PD=%p from DID and PWWN tables", 12278 port, pd->pd_port_id.port_id, pd); 12279 12280 if ((!flag) && port && initiator && is_switch) { 12281 (void) fctl_add_orphan_held(port, pd); 12282 } 12283 fctl_copy_portmap_held(map, pd); 12284 map->map_pd = pd; 12285 } 12286 12287 /* 12288 * Fill out old map for ULPs 12289 */ 12290 static void 12291 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12292 { 12293 int is_switch; 12294 int initiator; 12295 fc_local_port_t *port; 12296 12297 mutex_enter(&pd->pd_mutex); 12298 port = pd->pd_port; 12299 mutex_exit(&pd->pd_mutex); 12300 12301 mutex_enter(&port->fp_mutex); 12302 mutex_enter(&pd->pd_mutex); 12303 12304 pd->pd_state = PORT_DEVICE_INVALID; 12305 pd->pd_type = PORT_DEVICE_OLD; 12306 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12307 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12308 12309 fctl_delist_did_table(port, pd); 12310 fctl_delist_pwwn_table(port, pd); 12311 12312 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12313 " removed the PD=%p from DID and PWWN tables", 12314 port, pd->pd_port_id.port_id, pd); 12315 12316 mutex_exit(&pd->pd_mutex); 12317 mutex_exit(&port->fp_mutex); 12318 12319 ASSERT(port != NULL); 12320 if ((!flag) && port && initiator && is_switch) { 12321 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12322 } 12323 fctl_copy_portmap(map, pd); 12324 map->map_pd = pd; 12325 } 12326 12327 12328 /* 12329 * Fillout Changed Map for ULPs 12330 */ 12331 static void 12332 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12333 uint32_t *new_did, la_wwn_t *new_pwwn) 12334 { 12335 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12336 12337 pd->pd_type = PORT_DEVICE_CHANGED; 12338 if (new_did) { 12339 pd->pd_port_id.port_id = *new_did; 12340 } 12341 if (new_pwwn) { 12342 pd->pd_port_name = *new_pwwn; 12343 } 12344 mutex_exit(&pd->pd_mutex); 12345 12346 fctl_copy_portmap(map, pd); 12347 12348 mutex_enter(&pd->pd_mutex); 12349 pd->pd_type = PORT_DEVICE_NOCHANGE; 12350 } 12351 12352 12353 /* 12354 * Fillout New Name Server map 12355 */ 12356 static void 12357 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12358 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12359 { 12360 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12361 12362 if (handle) { 12363 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_pwwn, 12364 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12365 DDI_DEV_AUTOINCR); 12366 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_nwwn, 12367 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12368 DDI_DEV_AUTOINCR); 12369 ddi_rep_get8(*handle, (uint8_t *)port_map->map_fc4_types, 12370 (uint8_t *)gan_resp->gan_fc4types, 12371 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12372 } else { 12373 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12374 sizeof (gan_resp->gan_pwwn)); 12375 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12376 sizeof (gan_resp->gan_nwwn)); 12377 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12378 sizeof (gan_resp->gan_fc4types)); 12379 } 12380 port_map->map_did.port_id = d_id; 12381 port_map->map_did.priv_lilp_posit = 0; 12382 port_map->map_hard_addr.hard_addr = 0; 12383 port_map->map_hard_addr.rsvd = 0; 12384 port_map->map_state = PORT_DEVICE_INVALID; 12385 port_map->map_type = PORT_DEVICE_NEW; 12386 port_map->map_flags = 0; 12387 port_map->map_pd = NULL; 12388 12389 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12390 12391 ASSERT(port != NULL); 12392 } 12393 12394 12395 /* 12396 * Perform LINIT ELS 12397 */ 12398 static int 12399 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12400 job_request_t *job) 12401 { 12402 int rval; 12403 uint32_t d_id; 12404 uint32_t s_id; 12405 uint32_t lfa; 12406 uchar_t class; 12407 uint32_t ret; 12408 fp_cmd_t *cmd; 12409 fc_porttype_t ptype; 12410 fc_packet_t *pkt; 12411 fc_linit_req_t payload; 12412 fc_remote_port_t *pd; 12413 12414 rval = 0; 12415 12416 ASSERT(job != NULL); 12417 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12418 12419 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12420 if (pd == NULL) { 12421 fctl_ns_req_t *ns_cmd; 12422 12423 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12424 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12425 0, sleep); 12426 12427 if (ns_cmd == NULL) { 12428 return (FC_NOMEM); 12429 } 12430 job->job_result = FC_SUCCESS; 12431 ns_cmd->ns_cmd_code = NS_GID_PN; 12432 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12433 12434 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12435 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12436 fctl_free_ns_cmd(ns_cmd); 12437 return (FC_FAILURE); 12438 } 12439 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12440 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12441 12442 fctl_free_ns_cmd(ns_cmd); 12443 lfa = d_id & 0xFFFF00; 12444 12445 /* 12446 * Given this D_ID, get the port type to see if 12447 * we can do LINIT on the LFA 12448 */ 12449 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12450 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12451 0, sleep); 12452 12453 if (ns_cmd == NULL) { 12454 return (FC_NOMEM); 12455 } 12456 12457 job->job_result = FC_SUCCESS; 12458 ns_cmd->ns_cmd_code = NS_GPT_ID; 12459 12460 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12461 ((ns_req_gpt_id_t *) 12462 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12463 12464 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12465 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12466 fctl_free_ns_cmd(ns_cmd); 12467 return (FC_FAILURE); 12468 } 12469 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12470 12471 fctl_free_ns_cmd(ns_cmd); 12472 12473 switch (ptype.port_type) { 12474 case FC_NS_PORT_NL: 12475 case FC_NS_PORT_F_NL: 12476 case FC_NS_PORT_FL: 12477 break; 12478 12479 default: 12480 return (FC_FAILURE); 12481 } 12482 } else { 12483 mutex_enter(&pd->pd_mutex); 12484 ptype = pd->pd_porttype; 12485 12486 switch (pd->pd_porttype.port_type) { 12487 case FC_NS_PORT_NL: 12488 case FC_NS_PORT_F_NL: 12489 case FC_NS_PORT_FL: 12490 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12491 break; 12492 12493 default: 12494 mutex_exit(&pd->pd_mutex); 12495 return (FC_FAILURE); 12496 } 12497 mutex_exit(&pd->pd_mutex); 12498 } 12499 12500 mutex_enter(&port->fp_mutex); 12501 s_id = port->fp_port_id.port_id; 12502 class = port->fp_ns_login_class; 12503 mutex_exit(&port->fp_mutex); 12504 12505 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12506 sizeof (fc_linit_resp_t), sleep, pd); 12507 if (cmd == NULL) { 12508 return (FC_NOMEM); 12509 } 12510 12511 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12512 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12513 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12514 cmd->cmd_retry_count = fp_retry_count; 12515 cmd->cmd_ulp_pkt = NULL; 12516 12517 pkt = &cmd->cmd_pkt; 12518 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12519 12520 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12521 12522 /* 12523 * How does LIP work by the way ? 12524 * If the L_Port receives three consecutive identical ordered 12525 * sets whose first two characters (fully decoded) are equal to 12526 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12527 * recognize a Loop Initialization Primitive sequence. The 12528 * character 3 determines the type of lip: 12529 * LIP(F7) Normal LIP 12530 * LIP(F8) Loop Failure LIP 12531 * 12532 * The possible combination for the 3rd and 4th bytes are: 12533 * F7, F7 Normal Lip - No valid AL_PA 12534 * F8, F8 Loop Failure - No valid AL_PA 12535 * F7, AL_PS Normal Lip - Valid source AL_PA 12536 * F8, AL_PS Loop Failure - Valid source AL_PA 12537 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12538 * And Normal Lip for all other loop members 12539 * 0xFF AL_PS Vendor specific reset of all loop members 12540 * 12541 * Now, it may not always be that we, at the source, may have an 12542 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12543 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12544 * payload we are going to set: 12545 * lip_b3 = 0xF7; Normal LIP 12546 * lip_b4 = 0xF7; No valid source AL_PA 12547 */ 12548 payload.ls_code.ls_code = LA_ELS_LINIT; 12549 payload.ls_code.mbz = 0; 12550 payload.rsvd = 0; 12551 payload.func = 0; /* Let Fabric determine the best way */ 12552 payload.lip_b3 = 0xF7; /* Normal LIP */ 12553 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12554 12555 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12556 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12557 12558 job->job_counter = 1; 12559 12560 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12561 if (ret == FC_SUCCESS) { 12562 fp_jobwait(job); 12563 rval = job->job_result; 12564 } else { 12565 rval = FC_FAILURE; 12566 fp_free_pkt(cmd); 12567 } 12568 12569 return (rval); 12570 } 12571 12572 12573 /* 12574 * Fill out the device handles with GAN response 12575 */ 12576 static void 12577 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12578 ns_resp_gan_t *gan_resp) 12579 { 12580 fc_remote_node_t *node; 12581 fc_porttype_t type; 12582 fc_local_port_t *port; 12583 12584 ASSERT(pd != NULL); 12585 ASSERT(handle != NULL); 12586 12587 port = pd->pd_port; 12588 12589 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12590 " port_id=%x, sym_len=%d fc4-type=%x", 12591 pd, gan_resp->gan_type_id.rsvd, 12592 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12593 12594 mutex_enter(&pd->pd_mutex); 12595 12596 ddi_rep_get8(*handle, (uint8_t *)&type, 12597 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12598 12599 pd->pd_porttype.port_type = type.port_type; 12600 pd->pd_porttype.rsvd = 0; 12601 12602 pd->pd_spn_len = gan_resp->gan_spnlen; 12603 if (pd->pd_spn_len) { 12604 ddi_rep_get8(*handle, (uint8_t *)pd->pd_spn, 12605 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12606 DDI_DEV_AUTOINCR); 12607 } 12608 12609 ddi_rep_get8(*handle, (uint8_t *)pd->pd_ip_addr, 12610 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12611 DDI_DEV_AUTOINCR); 12612 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_cos, 12613 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12614 DDI_DEV_AUTOINCR); 12615 ddi_rep_get8(*handle, (uint8_t *)pd->pd_fc4types, 12616 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12617 DDI_DEV_AUTOINCR); 12618 12619 node = pd->pd_remote_nodep; 12620 mutex_exit(&pd->pd_mutex); 12621 12622 mutex_enter(&node->fd_mutex); 12623 12624 ddi_rep_get8(*handle, (uint8_t *)node->fd_ipa, 12625 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12626 DDI_DEV_AUTOINCR); 12627 12628 node->fd_snn_len = gan_resp->gan_snnlen; 12629 if (node->fd_snn_len) { 12630 ddi_rep_get8(*handle, (uint8_t *)node->fd_snn, 12631 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12632 DDI_DEV_AUTOINCR); 12633 } 12634 12635 mutex_exit(&node->fd_mutex); 12636 } 12637 12638 12639 /* 12640 * Handles all NS Queries (also means that this function 12641 * doesn't handle NS object registration) 12642 */ 12643 static int 12644 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12645 int polled, int sleep) 12646 { 12647 int rval; 12648 fp_cmd_t *cmd; 12649 12650 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12651 12652 if (ns_cmd->ns_cmd_size == 0) { 12653 return (FC_FAILURE); 12654 } 12655 12656 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12657 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12658 ns_cmd->ns_resp_size, sleep, NULL); 12659 if (cmd == NULL) { 12660 return (FC_NOMEM); 12661 } 12662 12663 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12664 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12665 12666 if (polled) { 12667 job->job_counter = 1; 12668 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12669 } 12670 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12671 if (rval != FC_SUCCESS) { 12672 job->job_result = rval; 12673 fp_iodone(cmd); 12674 if (polled == 0) { 12675 /* 12676 * Return FC_SUCCESS to indicate that 12677 * fp_iodone is performed already. 12678 */ 12679 rval = FC_SUCCESS; 12680 } 12681 } 12682 12683 if (polled) { 12684 fp_jobwait(job); 12685 rval = job->job_result; 12686 } 12687 12688 return (rval); 12689 } 12690 12691 12692 /* 12693 * Initialize Common Transport request 12694 */ 12695 static void 12696 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12697 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12698 uint16_t resp_len, job_request_t *job) 12699 { 12700 uint32_t s_id; 12701 uchar_t class; 12702 fc_packet_t *pkt; 12703 fc_ct_header_t ct; 12704 12705 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12706 12707 mutex_enter(&port->fp_mutex); 12708 s_id = port->fp_port_id.port_id; 12709 class = port->fp_ns_login_class; 12710 mutex_exit(&port->fp_mutex); 12711 12712 cmd->cmd_job = job; 12713 cmd->cmd_private = ns_cmd; 12714 pkt = &cmd->cmd_pkt; 12715 12716 ct.ct_rev = CT_REV; 12717 ct.ct_inid = 0; 12718 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12719 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12720 ct.ct_options = 0; 12721 ct.ct_reserved1 = 0; 12722 ct.ct_cmdrsp = cmd_code; 12723 ct.ct_aiusize = resp_len >> 2; 12724 ct.ct_reserved2 = 0; 12725 ct.ct_reason = 0; 12726 ct.ct_expln = 0; 12727 ct.ct_vendor = 0; 12728 12729 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ct, (uint8_t *)pkt->pkt_cmd, 12730 sizeof (ct), DDI_DEV_AUTOINCR); 12731 12732 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12733 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12734 pkt->pkt_cmd_fhdr.s_id = s_id; 12735 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12736 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12737 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12738 pkt->pkt_cmd_fhdr.seq_id = 0; 12739 pkt->pkt_cmd_fhdr.df_ctl = 0; 12740 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12741 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12742 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12743 pkt->pkt_cmd_fhdr.ro = 0; 12744 pkt->pkt_cmd_fhdr.rsvd = 0; 12745 12746 pkt->pkt_comp = fp_ns_intr; 12747 pkt->pkt_ulp_private = (opaque_t)cmd; 12748 pkt->pkt_timeout = FP_NS_TIMEOUT; 12749 12750 if (cmd_buf) { 12751 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 12752 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 12753 cmd_len, DDI_DEV_AUTOINCR); 12754 } 12755 12756 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 12757 12758 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12759 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12760 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 12761 cmd->cmd_retry_count = fp_retry_count; 12762 cmd->cmd_ulp_pkt = NULL; 12763 } 12764 12765 12766 /* 12767 * Name Server request interrupt routine 12768 */ 12769 static void 12770 fp_ns_intr(fc_packet_t *pkt) 12771 { 12772 fp_cmd_t *cmd; 12773 fc_local_port_t *port; 12774 fc_ct_header_t resp_hdr; 12775 fc_ct_header_t cmd_hdr; 12776 fctl_ns_req_t *ns_cmd; 12777 12778 cmd = pkt->pkt_ulp_private; 12779 port = cmd->cmd_port; 12780 12781 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 12782 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 12783 12784 ns_cmd = (fctl_ns_req_t *) 12785 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 12786 12787 if (!FP_IS_PKT_ERROR(pkt)) { 12788 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 12789 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 12790 DDI_DEV_AUTOINCR); 12791 12792 /* 12793 * On x86 architectures, make sure the resp_hdr is big endian. 12794 * This macro is a NOP on sparc architectures mainly because 12795 * we don't want to end up wasting time since the end result 12796 * is going to be the same. 12797 */ 12798 MAKE_BE_32(&resp_hdr); 12799 12800 if (ns_cmd) { 12801 /* 12802 * Always copy out the response CT_HDR 12803 */ 12804 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 12805 sizeof (resp_hdr)); 12806 } 12807 12808 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 12809 pkt->pkt_state = FC_PKT_FS_RJT; 12810 pkt->pkt_reason = resp_hdr.ct_reason; 12811 pkt->pkt_expln = resp_hdr.ct_expln; 12812 } 12813 } 12814 12815 if (FP_IS_PKT_ERROR(pkt)) { 12816 if (ns_cmd) { 12817 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 12818 ASSERT(ns_cmd->ns_pd != NULL); 12819 12820 /* Mark it OLD if not already done */ 12821 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 12822 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 12823 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 12824 } 12825 12826 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 12827 fctl_free_ns_cmd(ns_cmd); 12828 ((fp_cmd_t *) 12829 (pkt->pkt_ulp_private))->cmd_private = NULL; 12830 } 12831 12832 } 12833 12834 FP_TRACE(FP_NHEAD1(4, 0), "NS failure; pkt state=%x reason=%x", 12835 pkt->pkt_state, pkt->pkt_reason); 12836 12837 (void) fp_common_intr(pkt, 1); 12838 12839 return; 12840 } 12841 12842 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 12843 uint32_t d_id; 12844 fc_local_port_t *port; 12845 fp_cmd_t *cmd; 12846 12847 d_id = pkt->pkt_cmd_fhdr.d_id; 12848 cmd = pkt->pkt_ulp_private; 12849 port = cmd->cmd_port; 12850 FP_TRACE(FP_NHEAD2(9, 0), 12851 "Bogus NS response received for D_ID=%x", d_id); 12852 } 12853 12854 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 12855 fp_gan_handler(pkt, ns_cmd); 12856 return; 12857 } 12858 12859 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 12860 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 12861 if (ns_cmd) { 12862 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 12863 fp_ns_query_handler(pkt, ns_cmd); 12864 return; 12865 } 12866 } 12867 } 12868 12869 fp_iodone(pkt->pkt_ulp_private); 12870 } 12871 12872 12873 /* 12874 * Process NS_GAN response 12875 */ 12876 static void 12877 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 12878 { 12879 int my_did; 12880 fc_portid_t d_id; 12881 fp_cmd_t *cmd; 12882 fc_local_port_t *port; 12883 fc_remote_port_t *pd; 12884 ns_req_gan_t gan_req; 12885 ns_resp_gan_t *gan_resp; 12886 12887 ASSERT(ns_cmd != NULL); 12888 12889 cmd = pkt->pkt_ulp_private; 12890 port = cmd->cmd_port; 12891 12892 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 12893 12894 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&d_id, 12895 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 12896 12897 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 12898 12899 /* 12900 * In this case the priv_lilp_posit field in reality 12901 * is actually represents the relative position on a private loop. 12902 * So zero it while dealing with Port Identifiers. 12903 */ 12904 d_id.priv_lilp_posit = 0; 12905 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 12906 if (ns_cmd->ns_gan_sid == d_id.port_id) { 12907 /* 12908 * We've come a full circle; time to get out. 12909 */ 12910 fp_iodone(cmd); 12911 return; 12912 } 12913 12914 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 12915 ns_cmd->ns_gan_sid = d_id.port_id; 12916 } 12917 12918 mutex_enter(&port->fp_mutex); 12919 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 12920 mutex_exit(&port->fp_mutex); 12921 12922 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, d_id=%x", port, 12923 d_id.port_id); 12924 12925 if (my_did == 0) { 12926 la_wwn_t pwwn; 12927 la_wwn_t nwwn; 12928 12929 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 12930 "port=%p, d_id=%x, type_id=%x, " 12931 "pwwn=%x %x %x %x %x %x %x %x, " 12932 "nwwn=%x %x %x %x %x %x %x %x", 12933 port, d_id.port_id, gan_resp->gan_type_id, 12934 12935 gan_resp->gan_pwwn.raw_wwn[0], 12936 gan_resp->gan_pwwn.raw_wwn[1], 12937 gan_resp->gan_pwwn.raw_wwn[2], 12938 gan_resp->gan_pwwn.raw_wwn[3], 12939 gan_resp->gan_pwwn.raw_wwn[4], 12940 gan_resp->gan_pwwn.raw_wwn[5], 12941 gan_resp->gan_pwwn.raw_wwn[6], 12942 gan_resp->gan_pwwn.raw_wwn[7], 12943 12944 gan_resp->gan_nwwn.raw_wwn[0], 12945 gan_resp->gan_nwwn.raw_wwn[1], 12946 gan_resp->gan_nwwn.raw_wwn[2], 12947 gan_resp->gan_nwwn.raw_wwn[3], 12948 gan_resp->gan_nwwn.raw_wwn[4], 12949 gan_resp->gan_nwwn.raw_wwn[5], 12950 gan_resp->gan_nwwn.raw_wwn[6], 12951 gan_resp->gan_nwwn.raw_wwn[7]); 12952 12953 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 12954 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 12955 DDI_DEV_AUTOINCR); 12956 12957 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 12958 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 12959 DDI_DEV_AUTOINCR); 12960 12961 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 12962 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 12963 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 12964 } 12965 if (pd != NULL) { 12966 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 12967 pd, gan_resp); 12968 } 12969 12970 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 12971 *((int *)ns_cmd->ns_data_buf) += 1; 12972 } 12973 12974 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 12975 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 12976 12977 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 12978 fc_port_dev_t *userbuf; 12979 12980 userbuf = ((fc_port_dev_t *) 12981 ns_cmd->ns_data_buf) + 12982 ns_cmd->ns_gan_index++; 12983 12984 userbuf->dev_did = d_id; 12985 12986 ddi_rep_get8(pkt->pkt_resp_acc, 12987 (uint8_t *)userbuf->dev_type, 12988 (uint8_t *)gan_resp->gan_fc4types, 12989 sizeof (userbuf->dev_type), 12990 DDI_DEV_AUTOINCR); 12991 12992 userbuf->dev_nwwn = nwwn; 12993 userbuf->dev_pwwn = pwwn; 12994 12995 if (pd != NULL) { 12996 mutex_enter(&pd->pd_mutex); 12997 userbuf->dev_state = pd->pd_state; 12998 userbuf->dev_hard_addr = 12999 pd->pd_hard_addr; 13000 mutex_exit(&pd->pd_mutex); 13001 } else { 13002 userbuf->dev_state = 13003 PORT_DEVICE_INVALID; 13004 } 13005 } else if (ns_cmd->ns_flags & 13006 FCTL_NS_BUF_IS_FC_PORTMAP) { 13007 fc_portmap_t *map; 13008 13009 map = ((fc_portmap_t *) 13010 ns_cmd->ns_data_buf) + 13011 ns_cmd->ns_gan_index++; 13012 13013 /* 13014 * First fill it like any new map 13015 * and update the port device info 13016 * below. 13017 */ 13018 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13019 map, gan_resp, d_id.port_id); 13020 if (pd != NULL) { 13021 fctl_copy_portmap(map, pd); 13022 } else { 13023 map->map_state = PORT_DEVICE_INVALID; 13024 map->map_type = PORT_DEVICE_NOCHANGE; 13025 } 13026 } else { 13027 caddr_t dst_ptr; 13028 13029 dst_ptr = ns_cmd->ns_data_buf + 13030 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13031 13032 ddi_rep_get8(pkt->pkt_resp_acc, 13033 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13034 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13035 } 13036 } else { 13037 ns_cmd->ns_gan_index++; 13038 } 13039 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13040 fp_iodone(cmd); 13041 return; 13042 } 13043 } 13044 13045 gan_req.pid = d_id; 13046 13047 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13048 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13049 sizeof (gan_req), DDI_DEV_AUTOINCR); 13050 13051 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13052 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13053 fp_iodone(cmd); 13054 } 13055 } 13056 13057 13058 /* 13059 * Handle NS Query interrupt 13060 */ 13061 static void 13062 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13063 { 13064 fp_cmd_t *cmd; 13065 fc_local_port_t *port; 13066 caddr_t src_ptr; 13067 uint32_t xfer_len; 13068 13069 cmd = pkt->pkt_ulp_private; 13070 port = cmd->cmd_port; 13071 13072 xfer_len = ns_cmd->ns_resp_size; 13073 13074 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13075 ns_cmd->ns_cmd_code, xfer_len); 13076 13077 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13078 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13079 13080 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13081 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13082 } 13083 13084 if (xfer_len <= ns_cmd->ns_data_len) { 13085 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13086 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)ns_cmd->ns_data_buf, 13087 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13088 } 13089 13090 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13091 ASSERT(ns_cmd->ns_pd != NULL); 13092 13093 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13094 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13095 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13096 } 13097 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13098 } 13099 13100 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13101 fctl_free_ns_cmd(ns_cmd); 13102 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13103 } 13104 fp_iodone(cmd); 13105 } 13106 13107 13108 /* 13109 * Handle unsolicited ADISC ELS request 13110 */ 13111 static void 13112 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13113 fc_remote_port_t *pd, job_request_t *job) 13114 { 13115 int rval; 13116 fp_cmd_t *cmd; 13117 13118 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13119 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13120 mutex_enter(&pd->pd_mutex); 13121 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13122 mutex_exit(&pd->pd_mutex); 13123 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13124 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13125 0, KM_SLEEP, pd); 13126 if (cmd != NULL) { 13127 fp_els_rjt_init(port, cmd, buf, 13128 FC_ACTION_NON_RETRYABLE, 13129 FC_REASON_INVALID_LINK_CTRL, job); 13130 13131 if (fp_sendcmd(port, cmd, 13132 port->fp_fca_handle) != FC_SUCCESS) { 13133 fp_free_pkt(cmd); 13134 } 13135 } 13136 } 13137 } else { 13138 mutex_exit(&pd->pd_mutex); 13139 /* 13140 * Yes, yes, we don't have a hard address. But we 13141 * we should still respond. Huh ? Visit 21.19.2 13142 * of FC-PH-2 which essentially says that if an 13143 * NL_Port doesn't have a hard address, or if a port 13144 * does not have FC-AL capability, it shall report 13145 * zeroes in this field. 13146 */ 13147 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13148 0, KM_SLEEP, pd); 13149 if (cmd == NULL) { 13150 return; 13151 } 13152 fp_adisc_acc_init(port, cmd, buf, job); 13153 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13154 if (rval != FC_SUCCESS) { 13155 fp_free_pkt(cmd); 13156 } 13157 } 13158 } 13159 13160 13161 /* 13162 * Initialize ADISC response. 13163 */ 13164 static void 13165 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13166 job_request_t *job) 13167 { 13168 fc_packet_t *pkt; 13169 la_els_adisc_t payload; 13170 13171 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13172 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13173 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13174 cmd->cmd_retry_count = 1; 13175 cmd->cmd_ulp_pkt = NULL; 13176 13177 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13178 cmd->cmd_job = job; 13179 13180 pkt = &cmd->cmd_pkt; 13181 13182 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13183 13184 payload.ls_code.ls_code = LA_ELS_ACC; 13185 payload.ls_code.mbz = 0; 13186 13187 mutex_enter(&port->fp_mutex); 13188 payload.nport_id = port->fp_port_id; 13189 payload.hard_addr = port->fp_hard_addr; 13190 mutex_exit(&port->fp_mutex); 13191 13192 payload.port_wwn = port->fp_service_params.nport_ww_name; 13193 payload.node_wwn = port->fp_service_params.node_ww_name; 13194 13195 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 13196 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13197 } 13198 13199 13200 /* 13201 * Hold and Install the requested ULP drivers 13202 */ 13203 static void 13204 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13205 { 13206 int len; 13207 int count; 13208 int data_len; 13209 major_t ulp_major; 13210 caddr_t ulp_name; 13211 caddr_t data_ptr; 13212 caddr_t data_buf; 13213 13214 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13215 13216 data_buf = NULL; 13217 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13218 DDI_PROP_DONTPASS, "load-ulp-list", 13219 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13220 return; 13221 } 13222 13223 len = strlen(data_buf); 13224 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13225 13226 data_ptr = data_buf + len + 1; 13227 for (count = 0; count < port->fp_ulp_nload; count++) { 13228 len = strlen(data_ptr) + 1; 13229 ulp_name = kmem_zalloc(len, KM_SLEEP); 13230 bcopy(data_ptr, ulp_name, len); 13231 13232 ulp_major = ddi_name_to_major(ulp_name); 13233 13234 if (ulp_major != (major_t)-1) { 13235 if (modload("drv", ulp_name) < 0) { 13236 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13237 0, NULL, "failed to load %s", 13238 ulp_name); 13239 } 13240 } else { 13241 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13242 "%s isn't a valid driver", ulp_name); 13243 } 13244 13245 kmem_free(ulp_name, len); 13246 data_ptr += len; /* Skip to next field */ 13247 } 13248 13249 /* 13250 * Free the memory allocated by DDI 13251 */ 13252 if (data_buf != NULL) { 13253 kmem_free(data_buf, data_len); 13254 } 13255 } 13256 13257 13258 /* 13259 * Perform LOGO operation 13260 */ 13261 static int 13262 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13263 { 13264 int rval; 13265 fp_cmd_t *cmd; 13266 13267 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13268 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13269 13270 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13271 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13272 13273 mutex_enter(&port->fp_mutex); 13274 mutex_enter(&pd->pd_mutex); 13275 13276 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13277 ASSERT(pd->pd_login_count == 1); 13278 13279 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13280 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13281 cmd->cmd_flags = 0; 13282 cmd->cmd_retry_count = 1; 13283 cmd->cmd_ulp_pkt = NULL; 13284 13285 fp_logo_init(pd, cmd, job); 13286 13287 mutex_exit(&pd->pd_mutex); 13288 mutex_exit(&port->fp_mutex); 13289 13290 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13291 if (rval != FC_SUCCESS) { 13292 fp_iodone(cmd); 13293 } 13294 13295 return (rval); 13296 } 13297 13298 13299 /* 13300 * Perform Port attach callbacks to registered ULPs 13301 */ 13302 static void 13303 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13304 { 13305 fp_soft_attach_t *att; 13306 13307 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13308 att->att_cmd = cmd; 13309 att->att_port = port; 13310 13311 /* 13312 * We need to remember whether or not fctl_busy_port 13313 * succeeded so we know whether or not to call 13314 * fctl_idle_port when the task is complete. 13315 */ 13316 13317 if (fctl_busy_port(port) == 0) { 13318 att->att_need_pm_idle = B_TRUE; 13319 } else { 13320 att->att_need_pm_idle = B_FALSE; 13321 } 13322 13323 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13324 att, KM_SLEEP); 13325 } 13326 13327 13328 /* 13329 * Forward state change notifications on to interested ULPs. 13330 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13331 * real work. 13332 */ 13333 static int 13334 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13335 { 13336 fc_port_clist_t *clist; 13337 13338 clist = kmem_zalloc(sizeof (*clist), sleep); 13339 if (clist == NULL) { 13340 return (FC_NOMEM); 13341 } 13342 13343 clist->clist_state = statec; 13344 13345 mutex_enter(&port->fp_mutex); 13346 clist->clist_flags = port->fp_topology; 13347 mutex_exit(&port->fp_mutex); 13348 13349 clist->clist_port = (opaque_t)port; 13350 clist->clist_len = 0; 13351 clist->clist_size = 0; 13352 clist->clist_map = NULL; 13353 13354 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13355 clist, KM_SLEEP); 13356 13357 return (FC_SUCCESS); 13358 } 13359 13360 13361 /* 13362 * Get name server map 13363 */ 13364 static int 13365 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13366 uint32_t *len, uint32_t sid) 13367 { 13368 int ret; 13369 fctl_ns_req_t *ns_cmd; 13370 13371 /* 13372 * Don't let the allocator do anything for response; 13373 * we have have buffer ready to fillout. 13374 */ 13375 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13376 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13377 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13378 13379 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13380 ns_cmd->ns_data_buf = (caddr_t)*map; 13381 13382 ASSERT(ns_cmd != NULL); 13383 13384 ns_cmd->ns_gan_index = 0; 13385 ns_cmd->ns_gan_sid = sid; 13386 ns_cmd->ns_cmd_code = NS_GA_NXT; 13387 ns_cmd->ns_gan_max = *len; 13388 13389 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13390 13391 if (ns_cmd->ns_gan_index != *len) { 13392 *len = ns_cmd->ns_gan_index; 13393 } 13394 ns_cmd->ns_data_len = 0; 13395 ns_cmd->ns_data_buf = NULL; 13396 fctl_free_ns_cmd(ns_cmd); 13397 13398 return (ret); 13399 } 13400 13401 13402 /* 13403 * Create a remote port in Fabric topology by using NS services 13404 */ 13405 static fc_remote_port_t * 13406 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13407 { 13408 int rval; 13409 job_request_t *job; 13410 fctl_ns_req_t *ns_cmd; 13411 fc_remote_port_t *pd; 13412 13413 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13414 13415 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13416 port, d_id); 13417 13418 #ifdef DEBUG 13419 mutex_enter(&port->fp_mutex); 13420 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13421 mutex_exit(&port->fp_mutex); 13422 #endif 13423 13424 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13425 if (job == NULL) { 13426 return (NULL); 13427 } 13428 13429 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13430 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13431 FCTL_NS_NO_DATA_BUF), sleep); 13432 if (ns_cmd == NULL) { 13433 return (NULL); 13434 } 13435 13436 job->job_result = FC_SUCCESS; 13437 ns_cmd->ns_gan_max = 1; 13438 ns_cmd->ns_cmd_code = NS_GA_NXT; 13439 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13440 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13441 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13442 13443 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13444 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13445 fctl_free_ns_cmd(ns_cmd); 13446 13447 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13448 fctl_dealloc_job(job); 13449 return (NULL); 13450 } 13451 fctl_dealloc_job(job); 13452 13453 pd = fctl_get_remote_port_by_did(port, d_id); 13454 13455 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13456 port, d_id, pd); 13457 13458 return (pd); 13459 } 13460 13461 13462 /* 13463 * Check for the permissions on an ioctl command. If it is required to have an 13464 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13465 * the ioctl command isn't in one of the list built, shut the door on that too. 13466 * 13467 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13468 * to be made sure that users open the port for an exclusive access while 13469 * performing those operations. 13470 * 13471 * This can prevent a casual user from inflicting damage on the port by 13472 * sending these ioctls from multiple processes/threads (there is no good 13473 * reason why one would need to do that) without actually realizing how 13474 * expensive such commands could turn out to be. 13475 * 13476 * It is also important to note that, even with an exclusive access, 13477 * multiple threads can share the same file descriptor and fire down 13478 * commands in parallel. To prevent that the driver needs to make sure 13479 * that such commands aren't in progress already. This is taken care of 13480 * in the FP_EXCL_BUSY bit of fp_flag. 13481 */ 13482 static int 13483 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13484 { 13485 int ret = FC_FAILURE; 13486 int count; 13487 13488 for (count = 0; 13489 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13490 count++) { 13491 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13492 if (fp_perm_list[count].fp_open_flag & open_flag) { 13493 ret = FC_SUCCESS; 13494 } 13495 break; 13496 } 13497 } 13498 13499 return (ret); 13500 } 13501 13502 13503 /* 13504 * Bind Port driver's unsolicited, state change callbacks 13505 */ 13506 static int 13507 fp_bind_callbacks(fc_local_port_t *port) 13508 { 13509 fc_fca_bind_info_t bind_info = {0}; 13510 fc_fca_port_info_t *port_info; 13511 int rval = DDI_SUCCESS; 13512 uint16_t class; 13513 int node_namelen, port_namelen; 13514 char *nname = NULL, *pname = NULL; 13515 13516 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13517 13518 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13519 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13520 "node-name", &nname) != DDI_PROP_SUCCESS) { 13521 FP_TRACE(FP_NHEAD1(1, 0), 13522 "fp_bind_callback fail to get node-name"); 13523 } 13524 if (nname) { 13525 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13526 } 13527 13528 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13529 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13530 "port-name", &pname) != DDI_PROP_SUCCESS) { 13531 FP_TRACE(FP_NHEAD1(1, 0), 13532 "fp_bind_callback fail to get port-name"); 13533 } 13534 if (pname) { 13535 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13536 } 13537 13538 if (port->fp_npiv_type == FC_NPIV_PORT) { 13539 bind_info.port_npiv = 1; 13540 } 13541 13542 /* 13543 * fca_bind_port returns the FCA driver's handle for the local 13544 * port instance. If the port number isn't supported it returns NULL. 13545 * It also sets up callback in the FCA for various 13546 * things like state change, ELS etc.. 13547 */ 13548 bind_info.port_statec_cb = fp_statec_cb; 13549 bind_info.port_unsol_cb = fp_unsol_cb; 13550 bind_info.port_num = port->fp_port_num; 13551 bind_info.port_handle = (opaque_t)port; 13552 13553 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13554 13555 /* 13556 * Hold the port driver mutex as the callbacks are bound until the 13557 * service parameters are properly filled in (in order to be able to 13558 * properly respond to unsolicited ELS requests) 13559 */ 13560 mutex_enter(&port->fp_mutex); 13561 13562 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13563 port->fp_fca_dip, port_info, &bind_info); 13564 13565 if (port->fp_fca_handle == NULL) { 13566 rval = DDI_FAILURE; 13567 goto exit; 13568 } 13569 13570 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13571 port->fp_service_params = port_info->pi_login_params; 13572 port->fp_hard_addr = port_info->pi_hard_addr; 13573 13574 /* Copy from the FCA structure to the FP structure */ 13575 port->fp_hba_port_attrs = port_info->pi_attrs; 13576 13577 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13578 port->fp_rnid_init = 1; 13579 bcopy(&port_info->pi_rnid_params.params, 13580 &port->fp_rnid_params, 13581 sizeof (port->fp_rnid_params)); 13582 } else { 13583 port->fp_rnid_init = 0; 13584 } 13585 13586 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13587 if (node_namelen) { 13588 bcopy(&port_info->pi_attrs.sym_node_name, 13589 &port->fp_sym_node_name, 13590 node_namelen); 13591 port->fp_sym_node_namelen = node_namelen; 13592 } 13593 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13594 if (port_namelen) { 13595 bcopy(&port_info->pi_attrs.sym_port_name, 13596 &port->fp_sym_port_name, 13597 port_namelen); 13598 port->fp_sym_port_namelen = port_namelen; 13599 } 13600 13601 /* zero out the normally unused fields right away */ 13602 port->fp_service_params.ls_code.mbz = 0; 13603 port->fp_service_params.ls_code.ls_code = 0; 13604 bzero(&port->fp_service_params.reserved, 13605 sizeof (port->fp_service_params.reserved)); 13606 13607 class = port_info->pi_login_params.class_1.class_opt; 13608 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13609 13610 class = port_info->pi_login_params.class_2.class_opt; 13611 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13612 13613 class = port_info->pi_login_params.class_3.class_opt; 13614 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13615 13616 exit: 13617 if (nname) { 13618 ddi_prop_free(nname); 13619 } 13620 if (pname) { 13621 ddi_prop_free(pname); 13622 } 13623 mutex_exit(&port->fp_mutex); 13624 kmem_free(port_info, sizeof (*port_info)); 13625 13626 return (rval); 13627 } 13628 13629 13630 /* 13631 * Retrieve FCA capabilities 13632 */ 13633 static void 13634 fp_retrieve_caps(fc_local_port_t *port) 13635 { 13636 int rval; 13637 int ub_count; 13638 fc_fcp_dma_t fcp_dma; 13639 fc_reset_action_t action; 13640 fc_dma_behavior_t dma_behavior; 13641 13642 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13643 13644 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13645 FC_CAP_UNSOL_BUF, &ub_count); 13646 13647 switch (rval) { 13648 case FC_CAP_FOUND: 13649 case FC_CAP_SETTABLE: 13650 switch (ub_count) { 13651 case 0: 13652 break; 13653 13654 case -1: 13655 ub_count = fp_unsol_buf_count; 13656 break; 13657 13658 default: 13659 /* 1/4th of total buffers is my share */ 13660 ub_count = 13661 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13662 break; 13663 } 13664 break; 13665 13666 default: 13667 ub_count = 0; 13668 break; 13669 } 13670 13671 mutex_enter(&port->fp_mutex); 13672 port->fp_ub_count = ub_count; 13673 mutex_exit(&port->fp_mutex); 13674 13675 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13676 FC_CAP_POST_RESET_BEHAVIOR, &action); 13677 13678 switch (rval) { 13679 case FC_CAP_FOUND: 13680 case FC_CAP_SETTABLE: 13681 switch (action) { 13682 case FC_RESET_RETURN_NONE: 13683 case FC_RESET_RETURN_ALL: 13684 case FC_RESET_RETURN_OUTSTANDING: 13685 break; 13686 13687 default: 13688 action = FC_RESET_RETURN_NONE; 13689 break; 13690 } 13691 break; 13692 13693 default: 13694 action = FC_RESET_RETURN_NONE; 13695 break; 13696 } 13697 mutex_enter(&port->fp_mutex); 13698 port->fp_reset_action = action; 13699 mutex_exit(&port->fp_mutex); 13700 13701 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13702 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13703 13704 switch (rval) { 13705 case FC_CAP_FOUND: 13706 switch (dma_behavior) { 13707 case FC_ALLOW_STREAMING: 13708 /* FALLTHROUGH */ 13709 case FC_NO_STREAMING: 13710 break; 13711 13712 default: 13713 /* 13714 * If capability was found and the value 13715 * was incorrect assume the worst 13716 */ 13717 dma_behavior = FC_NO_STREAMING; 13718 break; 13719 } 13720 break; 13721 13722 default: 13723 /* 13724 * If capability was not defined - allow streaming; existing 13725 * FCAs should not be affected. 13726 */ 13727 dma_behavior = FC_ALLOW_STREAMING; 13728 break; 13729 } 13730 mutex_enter(&port->fp_mutex); 13731 port->fp_dma_behavior = dma_behavior; 13732 mutex_exit(&port->fp_mutex); 13733 13734 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13735 FC_CAP_FCP_DMA, &fcp_dma); 13736 13737 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 13738 fcp_dma != FC_DVMA_SPACE)) { 13739 fcp_dma = FC_DVMA_SPACE; 13740 } 13741 13742 mutex_enter(&port->fp_mutex); 13743 port->fp_fcp_dma = fcp_dma; 13744 mutex_exit(&port->fp_mutex); 13745 } 13746 13747 13748 /* 13749 * Handle Domain, Area changes in the Fabric. 13750 */ 13751 static void 13752 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 13753 job_request_t *job, int sleep) 13754 { 13755 #ifdef DEBUG 13756 uint32_t dcnt; 13757 #endif 13758 int rval; 13759 int send; 13760 int index; 13761 int listindex; 13762 int login; 13763 int job_flags; 13764 char ww_name[17]; 13765 uint32_t d_id; 13766 uint32_t count; 13767 fctl_ns_req_t *ns_cmd; 13768 fc_portmap_t *list; 13769 fc_orphan_t *orp; 13770 fc_orphan_t *norp; 13771 fc_orphan_t *prev; 13772 fc_remote_port_t *pd; 13773 fc_remote_port_t *npd; 13774 struct pwwn_hash *head; 13775 13776 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 13777 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 13778 0, sleep); 13779 if (ns_cmd == NULL) { 13780 mutex_enter(&port->fp_mutex); 13781 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13782 --port->fp_rscn_count; 13783 } 13784 mutex_exit(&port->fp_mutex); 13785 13786 return; 13787 } 13788 ns_cmd->ns_cmd_code = NS_GID_PN; 13789 13790 /* 13791 * We need to get a new count of devices from the 13792 * name server, which will also create any new devices 13793 * as needed. 13794 */ 13795 13796 (void) fp_ns_get_devcount(port, job, 1, sleep); 13797 13798 FP_TRACE(FP_NHEAD1(3, 0), 13799 "fp_validate_area_domain: get_devcount found %d devices", 13800 port->fp_total_devices); 13801 13802 mutex_enter(&port->fp_mutex); 13803 13804 for (count = index = 0; index < pwwn_table_size; index++) { 13805 head = &port->fp_pwwn_table[index]; 13806 pd = head->pwwn_head; 13807 while (pd != NULL) { 13808 mutex_enter(&pd->pd_mutex); 13809 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 13810 if ((pd->pd_port_id.port_id & mask) == id && 13811 pd->pd_recepient == PD_PLOGI_INITIATOR) { 13812 count++; 13813 pd->pd_type = PORT_DEVICE_OLD; 13814 pd->pd_flags = PD_ELS_MARK; 13815 } 13816 } 13817 mutex_exit(&pd->pd_mutex); 13818 pd = pd->pd_wwn_hnext; 13819 } 13820 } 13821 13822 #ifdef DEBUG 13823 dcnt = count; 13824 #endif /* DEBUG */ 13825 13826 /* 13827 * Since port->fp_orphan_count is declared an 'int' it is 13828 * theoretically possible that the count could go negative. 13829 * 13830 * This would be bad and if that happens we really do want 13831 * to know. 13832 */ 13833 13834 ASSERT(port->fp_orphan_count >= 0); 13835 13836 count += port->fp_orphan_count; 13837 13838 /* 13839 * We add the port->fp_total_devices value to the count 13840 * in the case where our port is newly attached. This is 13841 * because we haven't done any discovery and we don't have 13842 * any orphans in the port's orphan list. If we do not do 13843 * this addition to count then we won't alloc enough kmem 13844 * to do discovery with. 13845 */ 13846 13847 if (count == 0) { 13848 count += port->fp_total_devices; 13849 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 13850 "0x%x orphans found, using 0x%x", 13851 port->fp_orphan_count, count); 13852 } 13853 13854 mutex_exit(&port->fp_mutex); 13855 13856 /* 13857 * Allocate the change list 13858 */ 13859 13860 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 13861 if (list == NULL) { 13862 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13863 " Not enough memory to service RSCNs" 13864 " for %d ports, continuing...", count); 13865 13866 fctl_free_ns_cmd(ns_cmd); 13867 13868 mutex_enter(&port->fp_mutex); 13869 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13870 --port->fp_rscn_count; 13871 } 13872 mutex_exit(&port->fp_mutex); 13873 13874 return; 13875 } 13876 13877 /* 13878 * Attempt to validate or invalidate the devices that were 13879 * already in the pwwn hash table. 13880 */ 13881 13882 mutex_enter(&port->fp_mutex); 13883 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 13884 head = &port->fp_pwwn_table[index]; 13885 npd = head->pwwn_head; 13886 13887 while ((pd = npd) != NULL) { 13888 npd = pd->pd_wwn_hnext; 13889 13890 mutex_enter(&pd->pd_mutex); 13891 if ((pd->pd_port_id.port_id & mask) == id && 13892 pd->pd_flags == PD_ELS_MARK) { 13893 la_wwn_t *pwwn; 13894 13895 job->job_result = FC_SUCCESS; 13896 13897 ((ns_req_gid_pn_t *) 13898 (ns_cmd->ns_cmd_buf))->pwwn = 13899 pd->pd_port_name; 13900 13901 pwwn = &pd->pd_port_name; 13902 d_id = pd->pd_port_id.port_id; 13903 13904 mutex_exit(&pd->pd_mutex); 13905 mutex_exit(&port->fp_mutex); 13906 13907 rval = fp_ns_query(port, ns_cmd, job, 1, 13908 sleep); 13909 if (rval != FC_SUCCESS) { 13910 fc_wwn_to_str(pwwn, ww_name); 13911 13912 FP_TRACE(FP_NHEAD1(3, 0), 13913 "AREA RSCN: PD disappeared; " 13914 "d_id=%x, PWWN=%s", d_id, ww_name); 13915 13916 FP_TRACE(FP_NHEAD2(9, 0), 13917 "N_x Port with D_ID=%x," 13918 " PWWN=%s disappeared from fabric", 13919 d_id, ww_name); 13920 13921 fp_fillout_old_map(list + listindex++, 13922 pd, 1); 13923 } else { 13924 fctl_copy_portmap(list + listindex++, 13925 pd); 13926 13927 mutex_enter(&pd->pd_mutex); 13928 pd->pd_flags = PD_ELS_IN_PROGRESS; 13929 mutex_exit(&pd->pd_mutex); 13930 } 13931 13932 mutex_enter(&port->fp_mutex); 13933 } else { 13934 mutex_exit(&pd->pd_mutex); 13935 } 13936 } 13937 } 13938 13939 mutex_exit(&port->fp_mutex); 13940 13941 ASSERT(listindex == dcnt); 13942 13943 job->job_counter = listindex; 13944 job_flags = job->job_flags; 13945 job->job_flags |= JOB_TYPE_FP_ASYNC; 13946 13947 /* 13948 * Login (if we were the initiator) or validate devices in the 13949 * port map. 13950 */ 13951 13952 for (index = 0; index < listindex; index++) { 13953 pd = list[index].map_pd; 13954 13955 mutex_enter(&pd->pd_mutex); 13956 ASSERT((pd->pd_port_id.port_id & mask) == id); 13957 13958 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 13959 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 13960 mutex_exit(&pd->pd_mutex); 13961 fp_jobdone(job); 13962 continue; 13963 } 13964 13965 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 13966 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 13967 d_id = pd->pd_port_id.port_id; 13968 mutex_exit(&pd->pd_mutex); 13969 13970 if ((d_id & mask) == id && send) { 13971 if (login) { 13972 FP_TRACE(FP_NHEAD1(6, 0), 13973 "RSCN and PLOGI request;" 13974 " pd=%p, job=%p d_id=%x, index=%d", pd, 13975 job, d_id, index); 13976 13977 rval = fp_port_login(port, d_id, job, 13978 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 13979 if (rval != FC_SUCCESS) { 13980 mutex_enter(&pd->pd_mutex); 13981 pd->pd_flags = PD_IDLE; 13982 mutex_exit(&pd->pd_mutex); 13983 13984 job->job_result = rval; 13985 fp_jobdone(job); 13986 } 13987 13988 FP_TRACE(FP_NHEAD2(4, 0), 13989 "PLOGI succeeded:no skip(1) for " 13990 "D_ID %x", d_id); 13991 list[index].map_flags |= 13992 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 13993 } else { 13994 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 13995 " pd=%p, job=%p d_id=%x, index=%d", pd, 13996 job, d_id, index); 13997 13998 rval = fp_ns_validate_device(port, pd, job, 13999 0, sleep); 14000 if (rval != FC_SUCCESS) { 14001 fp_jobdone(job); 14002 } 14003 mutex_enter(&pd->pd_mutex); 14004 pd->pd_flags = PD_IDLE; 14005 mutex_exit(&pd->pd_mutex); 14006 } 14007 } else { 14008 FP_TRACE(FP_NHEAD1(6, 0), 14009 "RSCN and NO request sent; pd=%p," 14010 " d_id=%x, index=%d", pd, d_id, index); 14011 14012 mutex_enter(&pd->pd_mutex); 14013 pd->pd_flags = PD_IDLE; 14014 mutex_exit(&pd->pd_mutex); 14015 14016 fp_jobdone(job); 14017 } 14018 } 14019 14020 if (listindex) { 14021 fctl_jobwait(job); 14022 } 14023 job->job_flags = job_flags; 14024 14025 /* 14026 * Orphan list validation. 14027 */ 14028 mutex_enter(&port->fp_mutex); 14029 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14030 orp != NULL; orp = norp) { 14031 norp = orp->orp_next; 14032 mutex_exit(&port->fp_mutex); 14033 14034 job->job_counter = 1; 14035 job->job_result = FC_SUCCESS; 14036 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14037 14038 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14039 14040 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14041 ((ns_resp_gid_pn_t *) 14042 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14043 14044 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14045 if (rval == FC_SUCCESS) { 14046 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14047 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14048 if (pd != NULL) { 14049 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14050 14051 FP_TRACE(FP_NHEAD1(6, 0), 14052 "RSCN and ORPHAN list " 14053 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14054 14055 FP_TRACE(FP_NHEAD2(6, 0), 14056 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14057 " in fabric", d_id, ww_name); 14058 14059 mutex_enter(&port->fp_mutex); 14060 if (prev) { 14061 prev->orp_next = orp->orp_next; 14062 } else { 14063 ASSERT(orp == port->fp_orphan_list); 14064 port->fp_orphan_list = orp->orp_next; 14065 } 14066 port->fp_orphan_count--; 14067 mutex_exit(&port->fp_mutex); 14068 14069 kmem_free(orp, sizeof (*orp)); 14070 fctl_copy_portmap(list + listindex++, pd); 14071 } else { 14072 prev = orp; 14073 } 14074 } else { 14075 prev = orp; 14076 } 14077 mutex_enter(&port->fp_mutex); 14078 } 14079 mutex_exit(&port->fp_mutex); 14080 14081 /* 14082 * One more pass through the list to delist old devices from 14083 * the d_id and pwwn tables and possibly add to the orphan list. 14084 */ 14085 14086 for (index = 0; index < listindex; index++) { 14087 pd = list[index].map_pd; 14088 ASSERT(pd != NULL); 14089 14090 /* 14091 * Update PLOGI results; For NS validation 14092 * of orphan list, it is redundant 14093 * 14094 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14095 * appropriate as fctl_copy_portmap() will clear map_flags. 14096 */ 14097 if (list[index].map_flags & 14098 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14099 fctl_copy_portmap(list + index, pd); 14100 list[index].map_flags |= 14101 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14102 } else { 14103 fctl_copy_portmap(list + index, pd); 14104 } 14105 14106 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14107 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14108 pd, pd->pd_port_id.port_id, 14109 pd->pd_port_name.raw_wwn[0], 14110 pd->pd_port_name.raw_wwn[1], 14111 pd->pd_port_name.raw_wwn[2], 14112 pd->pd_port_name.raw_wwn[3], 14113 pd->pd_port_name.raw_wwn[4], 14114 pd->pd_port_name.raw_wwn[5], 14115 pd->pd_port_name.raw_wwn[6], 14116 pd->pd_port_name.raw_wwn[7]); 14117 14118 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14119 "results continued, pd=%p type=%x, flags=%x, state=%x", 14120 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14121 14122 mutex_enter(&pd->pd_mutex); 14123 if (pd->pd_type == PORT_DEVICE_OLD) { 14124 int initiator; 14125 14126 pd->pd_flags = PD_IDLE; 14127 initiator = (pd->pd_recepient == 14128 PD_PLOGI_INITIATOR) ? 1 : 0; 14129 14130 mutex_exit(&pd->pd_mutex); 14131 14132 mutex_enter(&port->fp_mutex); 14133 mutex_enter(&pd->pd_mutex); 14134 14135 pd->pd_state = PORT_DEVICE_INVALID; 14136 fctl_delist_did_table(port, pd); 14137 fctl_delist_pwwn_table(port, pd); 14138 14139 mutex_exit(&pd->pd_mutex); 14140 mutex_exit(&port->fp_mutex); 14141 14142 if (initiator) { 14143 (void) fctl_add_orphan(port, pd, sleep); 14144 } 14145 list[index].map_pd = pd; 14146 } else { 14147 ASSERT(pd->pd_flags == PD_IDLE); 14148 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14149 /* 14150 * Reset LOGO tolerance to zero 14151 */ 14152 fctl_tc_reset(&pd->pd_logo_tc); 14153 } 14154 mutex_exit(&pd->pd_mutex); 14155 } 14156 } 14157 14158 if (ns_cmd) { 14159 fctl_free_ns_cmd(ns_cmd); 14160 } 14161 if (listindex) { 14162 (void) fp_ulp_devc_cb(port, list, listindex, count, 14163 sleep, 0); 14164 } else { 14165 kmem_free(list, sizeof (*list) * count); 14166 14167 mutex_enter(&port->fp_mutex); 14168 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14169 --port->fp_rscn_count; 14170 } 14171 mutex_exit(&port->fp_mutex); 14172 } 14173 } 14174 14175 14176 /* 14177 * Work hard to make sense out of an RSCN page. 14178 */ 14179 static void 14180 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14181 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14182 int *listindex, int sleep) 14183 { 14184 int rval; 14185 char ww_name[17]; 14186 la_wwn_t *pwwn; 14187 fc_remote_port_t *pwwn_pd; 14188 fc_remote_port_t *did_pd; 14189 14190 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14191 14192 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14193 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14194 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14195 14196 if (did_pd != NULL) { 14197 mutex_enter(&did_pd->pd_mutex); 14198 if (did_pd->pd_flags != PD_IDLE) { 14199 mutex_exit(&did_pd->pd_mutex); 14200 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14201 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14202 port, page->aff_d_id, did_pd); 14203 return; 14204 } 14205 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14206 mutex_exit(&did_pd->pd_mutex); 14207 } 14208 14209 job->job_counter = 1; 14210 14211 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14212 14213 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14214 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14215 14216 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14217 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14218 14219 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14220 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14221 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14222 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14223 ns_cmd->ns_resp_hdr.ct_expln); 14224 14225 job->job_counter = 1; 14226 14227 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14228 /* 14229 * What this means is that the D_ID 14230 * disappeared from the Fabric. 14231 */ 14232 if (did_pd == NULL) { 14233 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14234 " NULL PD disappeared, rval=%x", rval); 14235 return; 14236 } 14237 14238 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14239 14240 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14241 (uint32_t)(uintptr_t)job->job_cb_arg; 14242 14243 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14244 14245 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14246 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14247 14248 FP_TRACE(FP_NHEAD2(9, 0), 14249 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14250 14251 FP_TRACE(FP_NHEAD2(9, 0), 14252 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14253 " fabric", page->aff_d_id, ww_name); 14254 14255 mutex_enter(&did_pd->pd_mutex); 14256 did_pd->pd_flags = PD_IDLE; 14257 mutex_exit(&did_pd->pd_mutex); 14258 14259 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14260 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14261 14262 return; 14263 } 14264 14265 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14266 14267 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14268 /* 14269 * There is no change. Do PLOGI again and add it to 14270 * ULP portmap baggage and return. Note: When RSCNs 14271 * arrive with per page states, the need for PLOGI 14272 * can be determined correctly. 14273 */ 14274 mutex_enter(&pwwn_pd->pd_mutex); 14275 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14276 mutex_exit(&pwwn_pd->pd_mutex); 14277 14278 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14279 (uint32_t)(uintptr_t)job->job_cb_arg; 14280 14281 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14282 14283 mutex_enter(&pwwn_pd->pd_mutex); 14284 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14285 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14286 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14287 mutex_exit(&pwwn_pd->pd_mutex); 14288 14289 rval = fp_port_login(port, page->aff_d_id, job, 14290 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14291 if (rval == FC_SUCCESS) { 14292 fp_jobwait(job); 14293 rval = job->job_result; 14294 14295 /* 14296 * Reset LOGO tolerance to zero 14297 * Also we are the PLOGI initiator now. 14298 */ 14299 mutex_enter(&pwwn_pd->pd_mutex); 14300 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14301 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14302 mutex_exit(&pwwn_pd->pd_mutex); 14303 } 14304 14305 if (rval == FC_SUCCESS) { 14306 struct fc_portmap *map = 14307 listptr + *listindex - 1; 14308 14309 FP_TRACE(FP_NHEAD2(4, 0), 14310 "PLOGI succeeded: no skip(2)" 14311 " for D_ID %x", page->aff_d_id); 14312 map->map_flags |= 14313 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14314 } else { 14315 FP_TRACE(FP_NHEAD2(9, rval), 14316 "PLOGI to D_ID=%x failed", page->aff_d_id); 14317 14318 FP_TRACE(FP_NHEAD2(9, 0), 14319 "N_x Port with D_ID=%x, PWWN=%s" 14320 " disappeared from fabric", 14321 page->aff_d_id, ww_name); 14322 14323 fp_fillout_old_map(listptr + 14324 *listindex - 1, pwwn_pd, 0); 14325 } 14326 } else { 14327 mutex_exit(&pwwn_pd->pd_mutex); 14328 } 14329 14330 mutex_enter(&did_pd->pd_mutex); 14331 did_pd->pd_flags = PD_IDLE; 14332 mutex_exit(&did_pd->pd_mutex); 14333 14334 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14335 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14336 job->job_result, pwwn_pd); 14337 14338 return; 14339 } 14340 14341 if (did_pd == NULL && pwwn_pd == NULL) { 14342 14343 fc_orphan_t *orp = NULL; 14344 fc_orphan_t *norp = NULL; 14345 fc_orphan_t *prev = NULL; 14346 14347 /* 14348 * Hunt down the orphan list before giving up. 14349 */ 14350 14351 mutex_enter(&port->fp_mutex); 14352 if (port->fp_orphan_count) { 14353 14354 for (orp = port->fp_orphan_list; orp; orp = norp) { 14355 norp = orp->orp_next; 14356 14357 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14358 prev = orp; 14359 continue; 14360 } 14361 14362 if (prev) { 14363 prev->orp_next = orp->orp_next; 14364 } else { 14365 ASSERT(orp == 14366 port->fp_orphan_list); 14367 port->fp_orphan_list = 14368 orp->orp_next; 14369 } 14370 port->fp_orphan_count--; 14371 break; 14372 } 14373 } 14374 14375 mutex_exit(&port->fp_mutex); 14376 pwwn_pd = fp_create_remote_port_by_ns(port, 14377 page->aff_d_id, sleep); 14378 14379 if (pwwn_pd != NULL) { 14380 14381 if (orp) { 14382 fc_wwn_to_str(&orp->orp_pwwn, 14383 ww_name); 14384 14385 FP_TRACE(FP_NHEAD2(9, 0), 14386 "N_x Port with D_ID=%x," 14387 " PWWN=%s reappeared in fabric", 14388 page->aff_d_id, ww_name); 14389 14390 kmem_free(orp, sizeof (*orp)); 14391 } 14392 14393 (listptr + *listindex)-> 14394 map_rscn_info.ulp_rscn_count = 14395 (uint32_t)(uintptr_t)job->job_cb_arg; 14396 14397 fctl_copy_portmap(listptr + 14398 (*listindex)++, pwwn_pd); 14399 } 14400 14401 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14402 "Case TWO", page->aff_d_id); 14403 14404 return; 14405 } 14406 14407 if (pwwn_pd != NULL && did_pd == NULL) { 14408 uint32_t old_d_id; 14409 uint32_t d_id = page->aff_d_id; 14410 14411 /* 14412 * What this means is there is a new D_ID for this 14413 * Port WWN. Take out the port device off D_ID 14414 * list and put it back with a new D_ID. Perform 14415 * PLOGI if already logged in. 14416 */ 14417 mutex_enter(&port->fp_mutex); 14418 mutex_enter(&pwwn_pd->pd_mutex); 14419 14420 old_d_id = pwwn_pd->pd_port_id.port_id; 14421 14422 fctl_delist_did_table(port, pwwn_pd); 14423 14424 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14425 (uint32_t)(uintptr_t)job->job_cb_arg; 14426 14427 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14428 &d_id, NULL); 14429 fctl_enlist_did_table(port, pwwn_pd); 14430 14431 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14432 " Case THREE, pd=%p," 14433 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14434 14435 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14436 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14437 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14438 14439 mutex_exit(&pwwn_pd->pd_mutex); 14440 mutex_exit(&port->fp_mutex); 14441 14442 FP_TRACE(FP_NHEAD2(9, 0), 14443 "N_x Port with D_ID=%x, PWWN=%s has a new" 14444 " D_ID=%x now", old_d_id, ww_name, d_id); 14445 14446 rval = fp_port_login(port, page->aff_d_id, job, 14447 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14448 if (rval == FC_SUCCESS) { 14449 fp_jobwait(job); 14450 rval = job->job_result; 14451 } 14452 14453 if (rval != FC_SUCCESS) { 14454 fp_fillout_old_map(listptr + 14455 *listindex - 1, pwwn_pd, 0); 14456 } 14457 } else { 14458 mutex_exit(&pwwn_pd->pd_mutex); 14459 mutex_exit(&port->fp_mutex); 14460 } 14461 14462 return; 14463 } 14464 14465 if (pwwn_pd == NULL && did_pd != NULL) { 14466 fc_portmap_t *ptr; 14467 uint32_t len = 1; 14468 char old_ww_name[17]; 14469 14470 mutex_enter(&did_pd->pd_mutex); 14471 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14472 mutex_exit(&did_pd->pd_mutex); 14473 14474 fc_wwn_to_str(pwwn, ww_name); 14475 14476 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14477 (uint32_t)(uintptr_t)job->job_cb_arg; 14478 14479 /* 14480 * What this means is that there is a new Port WWN for 14481 * this D_ID; Mark the Port device as old and provide 14482 * the new PWWN and D_ID combination as new. 14483 */ 14484 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14485 14486 FP_TRACE(FP_NHEAD2(9, 0), 14487 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14488 page->aff_d_id, old_ww_name, ww_name); 14489 14490 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14491 (uint32_t)(uintptr_t)job->job_cb_arg; 14492 14493 ptr = listptr + (*listindex)++; 14494 14495 job->job_counter = 1; 14496 14497 if (fp_ns_getmap(port, job, &ptr, &len, 14498 page->aff_d_id - 1) != FC_SUCCESS) { 14499 (*listindex)--; 14500 } 14501 14502 mutex_enter(&did_pd->pd_mutex); 14503 did_pd->pd_flags = PD_IDLE; 14504 mutex_exit(&did_pd->pd_mutex); 14505 14506 return; 14507 } 14508 14509 /* 14510 * A weird case of Port WWN and D_ID existence but not matching up 14511 * between them. Trust your instincts - Take the port device handle 14512 * off Port WWN list, fix it with new Port WWN and put it back, In 14513 * the mean time mark the port device corresponding to the old port 14514 * WWN as OLD. 14515 */ 14516 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14517 " did_pd=%p", pwwn_pd, did_pd); 14518 14519 mutex_enter(&port->fp_mutex); 14520 mutex_enter(&pwwn_pd->pd_mutex); 14521 14522 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14523 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14524 fctl_delist_did_table(port, pwwn_pd); 14525 fctl_delist_pwwn_table(port, pwwn_pd); 14526 14527 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14528 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14529 pwwn_pd->pd_port_id.port_id, 14530 14531 pwwn_pd->pd_port_name.raw_wwn[0], 14532 pwwn_pd->pd_port_name.raw_wwn[1], 14533 pwwn_pd->pd_port_name.raw_wwn[2], 14534 pwwn_pd->pd_port_name.raw_wwn[3], 14535 pwwn_pd->pd_port_name.raw_wwn[4], 14536 pwwn_pd->pd_port_name.raw_wwn[5], 14537 pwwn_pd->pd_port_name.raw_wwn[6], 14538 pwwn_pd->pd_port_name.raw_wwn[7]); 14539 14540 mutex_exit(&pwwn_pd->pd_mutex); 14541 mutex_exit(&port->fp_mutex); 14542 14543 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14544 (uint32_t)(uintptr_t)job->job_cb_arg; 14545 14546 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14547 14548 mutex_enter(&port->fp_mutex); 14549 mutex_enter(&did_pd->pd_mutex); 14550 14551 fctl_delist_pwwn_table(port, did_pd); 14552 14553 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14554 (uint32_t)(uintptr_t)job->job_cb_arg; 14555 14556 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14557 fctl_enlist_pwwn_table(port, did_pd); 14558 14559 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14560 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14561 did_pd->pd_port_id.port_id, did_pd->pd_state, 14562 14563 did_pd->pd_port_name.raw_wwn[0], 14564 did_pd->pd_port_name.raw_wwn[1], 14565 did_pd->pd_port_name.raw_wwn[2], 14566 did_pd->pd_port_name.raw_wwn[3], 14567 did_pd->pd_port_name.raw_wwn[4], 14568 did_pd->pd_port_name.raw_wwn[5], 14569 did_pd->pd_port_name.raw_wwn[6], 14570 did_pd->pd_port_name.raw_wwn[7]); 14571 14572 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14573 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14574 mutex_exit(&did_pd->pd_mutex); 14575 mutex_exit(&port->fp_mutex); 14576 14577 rval = fp_port_login(port, page->aff_d_id, job, 14578 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14579 if (rval == FC_SUCCESS) { 14580 fp_jobwait(job); 14581 if (job->job_result != FC_SUCCESS) { 14582 fp_fillout_old_map(listptr + 14583 *listindex - 1, did_pd, 0); 14584 } 14585 } else { 14586 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14587 } 14588 } else { 14589 mutex_exit(&did_pd->pd_mutex); 14590 mutex_exit(&port->fp_mutex); 14591 } 14592 14593 mutex_enter(&did_pd->pd_mutex); 14594 did_pd->pd_flags = PD_IDLE; 14595 mutex_exit(&did_pd->pd_mutex); 14596 } 14597 14598 14599 /* 14600 * Check with NS for the presence of this port WWN 14601 */ 14602 static int 14603 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14604 job_request_t *job, int polled, int sleep) 14605 { 14606 la_wwn_t pwwn; 14607 uint32_t flags; 14608 fctl_ns_req_t *ns_cmd; 14609 14610 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14611 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14612 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14613 flags, sleep); 14614 if (ns_cmd == NULL) { 14615 return (FC_NOMEM); 14616 } 14617 14618 mutex_enter(&pd->pd_mutex); 14619 pwwn = pd->pd_port_name; 14620 mutex_exit(&pd->pd_mutex); 14621 14622 ns_cmd->ns_cmd_code = NS_GID_PN; 14623 ns_cmd->ns_pd = pd; 14624 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14625 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14626 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14627 14628 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14629 } 14630 14631 14632 /* 14633 * Sanity check the LILP map returned by FCA 14634 */ 14635 static int 14636 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14637 { 14638 int count; 14639 14640 if (lilp_map->lilp_length == 0) { 14641 return (FC_FAILURE); 14642 } 14643 14644 for (count = 0; count < lilp_map->lilp_length; count++) { 14645 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14646 FC_SUCCESS) { 14647 return (FC_FAILURE); 14648 } 14649 } 14650 14651 return (FC_SUCCESS); 14652 } 14653 14654 14655 /* 14656 * Sanity check if the AL_PA is a valid address 14657 */ 14658 static int 14659 fp_is_valid_alpa(uchar_t al_pa) 14660 { 14661 int count; 14662 14663 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14664 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14665 return (FC_SUCCESS); 14666 } 14667 } 14668 14669 return (FC_FAILURE); 14670 } 14671 14672 14673 /* 14674 * Post unsolicited callbacks to ULPs 14675 */ 14676 static void 14677 fp_ulp_unsol_cb(void *arg) 14678 { 14679 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14680 14681 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14682 ub_spec->buf->ub_frame.type); 14683 kmem_free(ub_spec, sizeof (*ub_spec)); 14684 } 14685 14686 14687 /* 14688 * Perform message reporting in a consistent manner. Unless there is 14689 * a strong reason NOT to use this function (which is very very rare) 14690 * all message reporting should go through this. 14691 */ 14692 static void 14693 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14694 fc_packet_t *pkt, const char *fmt, ...) 14695 { 14696 caddr_t buf; 14697 va_list ap; 14698 14699 switch (level) { 14700 case CE_NOTE: 14701 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14702 return; 14703 } 14704 break; 14705 14706 case CE_WARN: 14707 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14708 return; 14709 } 14710 break; 14711 } 14712 14713 buf = kmem_zalloc(256, KM_NOSLEEP); 14714 if (buf == NULL) { 14715 return; 14716 } 14717 14718 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14719 14720 va_start(ap, fmt); 14721 (void) vsprintf(buf + strlen(buf), fmt, ap); 14722 va_end(ap); 14723 14724 if (fc_errno) { 14725 char *errmsg; 14726 14727 (void) fc_ulp_error(fc_errno, &errmsg); 14728 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 14729 } else { 14730 if (pkt) { 14731 caddr_t state, reason, action, expln; 14732 14733 (void) fc_ulp_pkt_error(pkt, &state, &reason, 14734 &action, &expln); 14735 14736 (void) sprintf(buf + strlen(buf), 14737 " state=%s, reason=%s", state, reason); 14738 14739 if (pkt->pkt_resp_resid) { 14740 (void) sprintf(buf + strlen(buf), 14741 " resp resid=%x\n", pkt->pkt_resp_resid); 14742 } 14743 } 14744 } 14745 14746 switch (dest) { 14747 case FP_CONSOLE_ONLY: 14748 cmn_err(level, "^%s", buf); 14749 break; 14750 14751 case FP_LOG_ONLY: 14752 cmn_err(level, "!%s", buf); 14753 break; 14754 14755 default: 14756 cmn_err(level, "%s", buf); 14757 break; 14758 } 14759 14760 kmem_free(buf, 256); 14761 } 14762 14763 static int 14764 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 14765 { 14766 int ret; 14767 uint32_t d_id; 14768 la_wwn_t pwwn; 14769 fc_remote_port_t *pd = NULL; 14770 fc_remote_port_t *held_pd = NULL; 14771 fctl_ns_req_t *ns_cmd; 14772 fc_portmap_t *changelist; 14773 14774 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 14775 14776 mutex_enter(&port->fp_mutex); 14777 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14778 mutex_exit(&port->fp_mutex); 14779 job->job_counter = 1; 14780 14781 job->job_result = FC_SUCCESS; 14782 14783 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14784 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14785 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 14786 14787 ASSERT(ns_cmd != NULL); 14788 14789 ns_cmd->ns_cmd_code = NS_GID_PN; 14790 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 14791 14792 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14793 14794 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 14795 if (ret != FC_SUCCESS) { 14796 fcio->fcio_errno = ret; 14797 } else { 14798 fcio->fcio_errno = job->job_result; 14799 } 14800 fctl_free_ns_cmd(ns_cmd); 14801 return (EIO); 14802 } 14803 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14804 fctl_free_ns_cmd(ns_cmd); 14805 } else { 14806 mutex_exit(&port->fp_mutex); 14807 14808 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14809 if (held_pd == NULL) { 14810 fcio->fcio_errno = FC_BADWWN; 14811 return (EIO); 14812 } 14813 pd = held_pd; 14814 14815 mutex_enter(&pd->pd_mutex); 14816 d_id = pd->pd_port_id.port_id; 14817 mutex_exit(&pd->pd_mutex); 14818 } 14819 14820 job->job_counter = 1; 14821 14822 pd = fctl_get_remote_port_by_did(port, d_id); 14823 14824 if (pd) { 14825 mutex_enter(&pd->pd_mutex); 14826 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14827 pd->pd_login_count++; 14828 mutex_exit(&pd->pd_mutex); 14829 14830 fcio->fcio_errno = FC_SUCCESS; 14831 if (held_pd) { 14832 fctl_release_remote_port(held_pd); 14833 } 14834 14835 return (0); 14836 } 14837 mutex_exit(&pd->pd_mutex); 14838 } else { 14839 mutex_enter(&port->fp_mutex); 14840 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14841 mutex_exit(&port->fp_mutex); 14842 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14843 if (pd == NULL) { 14844 fcio->fcio_errno = FC_FAILURE; 14845 if (held_pd) { 14846 fctl_release_remote_port(held_pd); 14847 } 14848 return (EIO); 14849 } 14850 } else { 14851 mutex_exit(&port->fp_mutex); 14852 } 14853 } 14854 14855 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 14856 job->job_counter = 1; 14857 14858 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 14859 KM_SLEEP, pd, NULL); 14860 14861 if (ret != FC_SUCCESS) { 14862 fcio->fcio_errno = ret; 14863 if (held_pd) { 14864 fctl_release_remote_port(held_pd); 14865 } 14866 return (EIO); 14867 } 14868 fp_jobwait(job); 14869 14870 fcio->fcio_errno = job->job_result; 14871 14872 if (held_pd) { 14873 fctl_release_remote_port(held_pd); 14874 } 14875 14876 if (job->job_result != FC_SUCCESS) { 14877 return (EIO); 14878 } 14879 14880 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14881 if (pd == NULL) { 14882 fcio->fcio_errno = FC_BADDEV; 14883 return (ENODEV); 14884 } 14885 14886 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 14887 14888 fctl_copy_portmap(changelist, pd); 14889 changelist->map_type = PORT_DEVICE_USER_LOGIN; 14890 14891 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 14892 14893 mutex_enter(&pd->pd_mutex); 14894 pd->pd_type = PORT_DEVICE_NOCHANGE; 14895 mutex_exit(&pd->pd_mutex); 14896 14897 fctl_release_remote_port(pd); 14898 14899 return (0); 14900 } 14901 14902 14903 static int 14904 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 14905 { 14906 la_wwn_t pwwn; 14907 fp_cmd_t *cmd; 14908 fc_portmap_t *changelist; 14909 fc_remote_port_t *pd; 14910 14911 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 14912 14913 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14914 if (pd == NULL) { 14915 fcio->fcio_errno = FC_BADWWN; 14916 return (ENXIO); 14917 } 14918 14919 mutex_enter(&pd->pd_mutex); 14920 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 14921 fcio->fcio_errno = FC_LOGINREQ; 14922 mutex_exit(&pd->pd_mutex); 14923 14924 fctl_release_remote_port(pd); 14925 14926 return (EINVAL); 14927 } 14928 14929 ASSERT(pd->pd_login_count >= 1); 14930 14931 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 14932 fcio->fcio_errno = FC_FAILURE; 14933 mutex_exit(&pd->pd_mutex); 14934 14935 fctl_release_remote_port(pd); 14936 14937 return (EBUSY); 14938 } 14939 14940 if (pd->pd_login_count > 1) { 14941 pd->pd_login_count--; 14942 fcio->fcio_errno = FC_SUCCESS; 14943 mutex_exit(&pd->pd_mutex); 14944 14945 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 14946 14947 fctl_copy_portmap(changelist, pd); 14948 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 14949 14950 fctl_release_remote_port(pd); 14951 14952 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 14953 14954 return (0); 14955 } 14956 14957 pd->pd_flags = PD_ELS_IN_PROGRESS; 14958 mutex_exit(&pd->pd_mutex); 14959 14960 job->job_counter = 1; 14961 14962 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 14963 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 14964 if (cmd == NULL) { 14965 fcio->fcio_errno = FC_NOMEM; 14966 fctl_release_remote_port(pd); 14967 14968 mutex_enter(&pd->pd_mutex); 14969 pd->pd_flags = PD_IDLE; 14970 mutex_exit(&pd->pd_mutex); 14971 14972 return (ENOMEM); 14973 } 14974 14975 mutex_enter(&port->fp_mutex); 14976 mutex_enter(&pd->pd_mutex); 14977 14978 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 14979 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 14980 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 14981 cmd->cmd_retry_count = 1; 14982 cmd->cmd_ulp_pkt = NULL; 14983 14984 fp_logo_init(pd, cmd, job); 14985 14986 mutex_exit(&pd->pd_mutex); 14987 mutex_exit(&port->fp_mutex); 14988 14989 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 14990 mutex_enter(&pd->pd_mutex); 14991 pd->pd_flags = PD_IDLE; 14992 mutex_exit(&pd->pd_mutex); 14993 14994 fp_free_pkt(cmd); 14995 fctl_release_remote_port(pd); 14996 14997 return (EIO); 14998 } 14999 15000 fp_jobwait(job); 15001 15002 fcio->fcio_errno = job->job_result; 15003 if (job->job_result != FC_SUCCESS) { 15004 mutex_enter(&pd->pd_mutex); 15005 pd->pd_flags = PD_IDLE; 15006 mutex_exit(&pd->pd_mutex); 15007 15008 fctl_release_remote_port(pd); 15009 15010 return (EIO); 15011 } 15012 15013 ASSERT(pd != NULL); 15014 15015 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15016 15017 fctl_copy_portmap(changelist, pd); 15018 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15019 changelist->map_state = PORT_DEVICE_INVALID; 15020 15021 mutex_enter(&port->fp_mutex); 15022 mutex_enter(&pd->pd_mutex); 15023 15024 fctl_delist_did_table(port, pd); 15025 fctl_delist_pwwn_table(port, pd); 15026 pd->pd_flags = PD_IDLE; 15027 15028 mutex_exit(&pd->pd_mutex); 15029 mutex_exit(&port->fp_mutex); 15030 15031 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15032 15033 fctl_release_remote_port(pd); 15034 15035 return (0); 15036 } 15037 15038 15039 15040 /* 15041 * Send a syslog event for adapter port level events. 15042 */ 15043 static void 15044 fp_log_port_event(fc_local_port_t *port, char *subclass) 15045 { 15046 nvlist_t *attr_list; 15047 15048 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15049 KM_SLEEP) != DDI_SUCCESS) { 15050 goto alloc_failed; 15051 } 15052 15053 if (nvlist_add_uint32(attr_list, "instance", 15054 port->fp_instance) != DDI_SUCCESS) { 15055 goto error; 15056 } 15057 15058 if (nvlist_add_byte_array(attr_list, "port-wwn", 15059 port->fp_service_params.nport_ww_name.raw_wwn, 15060 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15061 goto error; 15062 } 15063 15064 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15065 subclass, attr_list, NULL, DDI_SLEEP); 15066 15067 nvlist_free(attr_list); 15068 return; 15069 15070 error: 15071 nvlist_free(attr_list); 15072 alloc_failed: 15073 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15074 } 15075 15076 15077 static void 15078 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15079 uint32_t port_id) 15080 { 15081 nvlist_t *attr_list; 15082 15083 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15084 KM_SLEEP) != DDI_SUCCESS) { 15085 goto alloc_failed; 15086 } 15087 15088 if (nvlist_add_uint32(attr_list, "instance", 15089 port->fp_instance) != DDI_SUCCESS) { 15090 goto error; 15091 } 15092 15093 if (nvlist_add_byte_array(attr_list, "port-wwn", 15094 port->fp_service_params.nport_ww_name.raw_wwn, 15095 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15096 goto error; 15097 } 15098 15099 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15100 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15101 goto error; 15102 } 15103 15104 if (nvlist_add_uint32(attr_list, "target-port-id", 15105 port_id) != DDI_SUCCESS) { 15106 goto error; 15107 } 15108 15109 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15110 subclass, attr_list, NULL, DDI_SLEEP); 15111 15112 nvlist_free(attr_list); 15113 return; 15114 15115 error: 15116 nvlist_free(attr_list); 15117 alloc_failed: 15118 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15119 } 15120 15121 static uint32_t 15122 fp_map_remote_port_state(uint32_t rm_state) 15123 { 15124 switch (rm_state) { 15125 case PORT_DEVICE_LOGGED_IN: 15126 return (FC_HBA_PORTSTATE_ONLINE); 15127 case PORT_DEVICE_VALID: 15128 case PORT_DEVICE_INVALID: 15129 default: 15130 return (FC_HBA_PORTSTATE_UNKNOWN); 15131 } 15132 } 15133