1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 26 * 27 */ 28 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/param.h> 32 #include <sys/errno.h> 33 #include <sys/uio.h> 34 #include <sys/buf.h> 35 #include <sys/modctl.h> 36 #include <sys/open.h> 37 #include <sys/file.h> 38 #include <sys/kmem.h> 39 #include <sys/poll.h> 40 #include <sys/conf.h> 41 #include <sys/thread.h> 42 #include <sys/var.h> 43 #include <sys/cmn_err.h> 44 #include <sys/stat.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/promif.h> 48 #include <sys/nvpair.h> 49 #include <sys/byteorder.h> 50 #include <sys/scsi/scsi.h> 51 #include <sys/fibre-channel/fc.h> 52 #include <sys/fibre-channel/impl/fc_ulpif.h> 53 #include <sys/fibre-channel/impl/fc_fcaif.h> 54 #include <sys/fibre-channel/impl/fctl_private.h> 55 #include <sys/fibre-channel/impl/fc_portif.h> 56 #include <sys/fibre-channel/impl/fp.h> 57 58 /* These are defined in fctl.c! */ 59 extern int did_table_size; 60 extern int pwwn_table_size; 61 62 static struct cb_ops fp_cb_ops = { 63 fp_open, /* open */ 64 fp_close, /* close */ 65 nodev, /* strategy */ 66 nodev, /* print */ 67 nodev, /* dump */ 68 nodev, /* read */ 69 nodev, /* write */ 70 fp_ioctl, /* ioctl */ 71 nodev, /* devmap */ 72 nodev, /* mmap */ 73 nodev, /* segmap */ 74 nochpoll, /* chpoll */ 75 ddi_prop_op, /* cb_prop_op */ 76 0, /* streamtab */ 77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 78 CB_REV, /* rev */ 79 nodev, /* aread */ 80 nodev /* awrite */ 81 }; 82 83 static struct dev_ops fp_ops = { 84 DEVO_REV, /* build revision */ 85 0, /* reference count */ 86 fp_getinfo, /* getinfo */ 87 nulldev, /* identify - Obsoleted */ 88 nulldev, /* probe */ 89 fp_attach, /* attach */ 90 fp_detach, /* detach */ 91 nodev, /* reset */ 92 &fp_cb_ops, /* cb_ops */ 93 NULL, /* bus_ops */ 94 fp_power, /* power */ 95 ddi_quiesce_not_needed /* quiesce */ 96 }; 97 98 #define FP_VERSION "1.99" 99 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 100 101 char *fp_version = FP_NAME_VERSION; 102 103 static struct modldrv modldrv = { 104 &mod_driverops, /* Type of Module */ 105 FP_NAME_VERSION, /* Name/Version of fp */ 106 &fp_ops /* driver ops */ 107 }; 108 109 static struct modlinkage modlinkage = { 110 MODREV_1, /* Rev of the loadable modules system */ 111 &modldrv, /* NULL terminated list of */ 112 NULL /* Linkage structures */ 113 }; 114 115 116 117 static uint16_t ns_reg_cmds[] = { 118 NS_RPN_ID, 119 NS_RNN_ID, 120 NS_RCS_ID, 121 NS_RFT_ID, 122 NS_RPT_ID, 123 NS_RSPN_ID, 124 NS_RSNN_NN 125 }; 126 127 struct fp_xlat { 128 uchar_t xlat_state; 129 int xlat_rval; 130 } fp_xlat [] = { 131 { FC_PKT_SUCCESS, FC_SUCCESS }, 132 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 133 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 134 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 135 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 136 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 138 { FC_PKT_NPORT_BSY, FC_PBUSY }, 139 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 140 { FC_PKT_LS_RJT, FC_FAILURE }, 141 { FC_PKT_BA_RJT, FC_FAILURE }, 142 { FC_PKT_TIMEOUT, FC_FAILURE }, 143 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 144 { FC_PKT_FAILURE, FC_FAILURE }, 145 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 146 }; 147 148 static uchar_t fp_valid_alpas[] = { 149 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 150 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 151 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 152 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 153 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 154 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 155 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 156 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 157 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 158 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 159 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 160 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 161 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 162 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 163 }; 164 165 static struct fp_perms { 166 uint16_t fp_ioctl_cmd; 167 uchar_t fp_open_flag; 168 } fp_perm_list [] = { 169 { FCIO_GET_NUM_DEVS, FP_OPEN }, 170 { FCIO_GET_DEV_LIST, FP_OPEN }, 171 { FCIO_GET_SYM_PNAME, FP_OPEN }, 172 { FCIO_GET_SYM_NNAME, FP_OPEN }, 173 { FCIO_SET_SYM_PNAME, FP_EXCL }, 174 { FCIO_SET_SYM_NNAME, FP_EXCL }, 175 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 176 { FCIO_DEV_LOGIN, FP_EXCL }, 177 { FCIO_DEV_LOGOUT, FP_EXCL }, 178 { FCIO_GET_STATE, FP_OPEN }, 179 { FCIO_DEV_REMOVE, FP_EXCL }, 180 { FCIO_GET_FCODE_REV, FP_OPEN }, 181 { FCIO_GET_FW_REV, FP_OPEN }, 182 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 183 { FCIO_FORCE_DUMP, FP_EXCL }, 184 { FCIO_GET_DUMP, FP_OPEN }, 185 { FCIO_GET_TOPOLOGY, FP_OPEN }, 186 { FCIO_RESET_LINK, FP_EXCL }, 187 { FCIO_RESET_HARD, FP_EXCL }, 188 { FCIO_RESET_HARD_CORE, FP_EXCL }, 189 { FCIO_DIAG, FP_OPEN }, 190 { FCIO_NS, FP_EXCL }, 191 { FCIO_DOWNLOAD_FW, FP_EXCL }, 192 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 193 { FCIO_LINK_STATUS, FP_OPEN }, 194 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 195 { FCIO_GET_NODE_ID, FP_OPEN }, 196 { FCIO_SET_NODE_ID, FP_EXCL }, 197 { FCIO_SEND_NODE_ID, FP_OPEN }, 198 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 199 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 200 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 204 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 205 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 206 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 207 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 208 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 209 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 210 }; 211 212 static char *fp_pm_comps[] = { 213 "NAME=FC Port", 214 "0=Port Down", 215 "1=Port Up" 216 }; 217 218 219 #ifdef _LITTLE_ENDIAN 220 #define MAKE_BE_32(x) { \ 221 uint32_t *ptr1, i; \ 222 ptr1 = (uint32_t *)(x); \ 223 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 224 *ptr1 = BE_32(*ptr1); \ 225 ptr1++; \ 226 } \ 227 } 228 #else 229 #define MAKE_BE_32(x) 230 #endif 231 232 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 233 static uint32_t fp_options = 0; 234 235 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 236 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 237 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 238 unsigned int fp_offline_ticker; /* seconds */ 239 240 /* 241 * Driver global variable to anchor the list of soft state structs for 242 * all fp driver instances. Used with the Solaris DDI soft state functions. 243 */ 244 static void *fp_driver_softstate; 245 246 static clock_t fp_retry_ticks; 247 static clock_t fp_offline_ticks; 248 249 static int fp_retry_ticker; 250 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 251 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 252 253 static int fp_log_size = FP_LOG_SIZE; 254 static int fp_trace = FP_TRACE_DEFAULT; 255 static fc_trace_logq_t *fp_logq = NULL; 256 257 int fp_get_adapter_paths(char *pathList, int count); 258 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 259 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 260 la_wwn_t tgt_pwwn, uint32_t port_id); 261 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 262 static void fp_init_symbolic_names(fc_local_port_t *port); 263 264 265 /* 266 * Perform global initialization 267 */ 268 int 269 _init(void) 270 { 271 int ret; 272 273 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 274 sizeof (struct fc_local_port), 8)) != 0) { 275 return (ret); 276 } 277 278 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 279 ddi_soft_state_fini(&fp_driver_softstate); 280 return (ret); 281 } 282 283 fp_logq = fc_trace_alloc_logq(fp_log_size); 284 285 if ((ret = mod_install(&modlinkage)) != 0) { 286 fc_trace_free_logq(fp_logq); 287 ddi_soft_state_fini(&fp_driver_softstate); 288 scsi_hba_fini(&modlinkage); 289 } 290 291 return (ret); 292 } 293 294 295 /* 296 * Prepare for driver unload 297 */ 298 int 299 _fini(void) 300 { 301 int ret; 302 303 if ((ret = mod_remove(&modlinkage)) == 0) { 304 fc_trace_free_logq(fp_logq); 305 ddi_soft_state_fini(&fp_driver_softstate); 306 scsi_hba_fini(&modlinkage); 307 } 308 309 return (ret); 310 } 311 312 313 /* 314 * Request mod_info() to handle all cases 315 */ 316 int 317 _info(struct modinfo *modinfo) 318 { 319 return (mod_info(&modlinkage, modinfo)); 320 } 321 322 323 /* 324 * fp_attach: 325 * 326 * The respective cmd handlers take care of performing 327 * ULP related invocations 328 */ 329 static int 330 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 331 { 332 int rval; 333 334 /* 335 * We check the value of fp_offline_ticker at this 336 * point. The variable is global for the driver and 337 * not specific to an instance. 338 * 339 * If there is no user-defined value found in /etc/system 340 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 341 * The minimum setting for this offline timeout according 342 * to the FC-FS2 standard (Fibre Channel Framing and 343 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 344 * 345 * We do not recommend setting the value to less than 10 346 * seconds (RA_TOV) or more than 90 seconds. If this 347 * variable is greater than 90 seconds then drivers above 348 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 349 */ 350 351 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 352 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 353 FP_OFFLINE_TICKER); 354 355 if ((fp_offline_ticker < 10) || 356 (fp_offline_ticker > 90)) { 357 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 358 "%d second(s). This is outside the " 359 "recommended range of 10..90 seconds", 360 fp_offline_ticker); 361 } 362 363 /* 364 * Tick every second when there are commands to retry. 365 * It should tick at the least granular value of pkt_timeout 366 * (which is one second) 367 */ 368 fp_retry_ticker = 1; 369 370 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 371 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 372 373 switch (cmd) { 374 case DDI_ATTACH: 375 rval = fp_attach_handler(dip); 376 break; 377 378 case DDI_RESUME: 379 rval = fp_resume_handler(dip); 380 break; 381 382 default: 383 rval = DDI_FAILURE; 384 break; 385 } 386 return (rval); 387 } 388 389 390 /* 391 * fp_detach: 392 * 393 * If a ULP fails to handle cmd request converse of 394 * cmd is invoked for ULPs that previously succeeded 395 * cmd request. 396 */ 397 static int 398 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 399 { 400 int rval = DDI_FAILURE; 401 fc_local_port_t *port; 402 fc_attach_cmd_t converse; 403 uint8_t cnt; 404 405 if ((port = ddi_get_soft_state(fp_driver_softstate, 406 ddi_get_instance(dip))) == NULL) { 407 return (DDI_FAILURE); 408 } 409 410 mutex_enter(&port->fp_mutex); 411 412 if (port->fp_ulp_attach) { 413 mutex_exit(&port->fp_mutex); 414 return (DDI_FAILURE); 415 } 416 417 switch (cmd) { 418 case DDI_DETACH: 419 if (port->fp_task != FP_TASK_IDLE) { 420 mutex_exit(&port->fp_mutex); 421 return (DDI_FAILURE); 422 } 423 424 /* Let's attempt to quit the job handler gracefully */ 425 port->fp_soft_state |= FP_DETACH_INPROGRESS; 426 427 mutex_exit(&port->fp_mutex); 428 converse = FC_CMD_ATTACH; 429 if (fctl_detach_ulps(port, FC_CMD_DETACH, 430 &modlinkage) != FC_SUCCESS) { 431 mutex_enter(&port->fp_mutex); 432 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 433 mutex_exit(&port->fp_mutex); 434 rval = DDI_FAILURE; 435 break; 436 } 437 438 mutex_enter(&port->fp_mutex); 439 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 440 cnt++) { 441 mutex_exit(&port->fp_mutex); 442 delay(drv_usectohz(1000000)); 443 mutex_enter(&port->fp_mutex); 444 } 445 446 if (port->fp_job_head) { 447 mutex_exit(&port->fp_mutex); 448 rval = DDI_FAILURE; 449 break; 450 } 451 mutex_exit(&port->fp_mutex); 452 453 rval = fp_detach_handler(port); 454 break; 455 456 case DDI_SUSPEND: 457 mutex_exit(&port->fp_mutex); 458 converse = FC_CMD_RESUME; 459 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 460 &modlinkage) != FC_SUCCESS) { 461 rval = DDI_FAILURE; 462 break; 463 } 464 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 465 (void) callb_generic_cpr(&port->fp_cpr_info, 466 CB_CODE_CPR_RESUME); 467 } 468 break; 469 470 default: 471 mutex_exit(&port->fp_mutex); 472 break; 473 } 474 475 /* 476 * Use softint to perform reattach. Mark fp_ulp_attach so we 477 * don't attempt to do this repeatedly on behalf of some persistent 478 * caller. 479 */ 480 if (rval != DDI_SUCCESS) { 481 mutex_enter(&port->fp_mutex); 482 port->fp_ulp_attach = 1; 483 484 /* 485 * If the port is in the low power mode then there is 486 * possibility that fca too could be in low power mode. 487 * Try to raise the power before calling attach ulps. 488 */ 489 490 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 491 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 492 mutex_exit(&port->fp_mutex); 493 (void) pm_raise_power(port->fp_port_dip, 494 FP_PM_COMPONENT, FP_PM_PORT_UP); 495 } else { 496 mutex_exit(&port->fp_mutex); 497 } 498 499 500 fp_attach_ulps(port, converse); 501 502 mutex_enter(&port->fp_mutex); 503 while (port->fp_ulp_attach) { 504 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 505 } 506 507 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 508 509 /* 510 * Mark state as detach failed so asynchronous ULP attach 511 * events (downstream, not the ones we're initiating with 512 * the call to fp_attach_ulps) are not honored. We're 513 * really still in pending detach. 514 */ 515 port->fp_soft_state |= FP_DETACH_FAILED; 516 517 mutex_exit(&port->fp_mutex); 518 } 519 520 return (rval); 521 } 522 523 524 /* 525 * fp_getinfo: 526 * Given the device number, return either the 527 * dev_info_t pointer or the instance number. 528 */ 529 530 /* ARGSUSED */ 531 static int 532 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 533 { 534 int rval; 535 minor_t instance; 536 fc_local_port_t *port; 537 538 rval = DDI_SUCCESS; 539 instance = getminor((dev_t)arg); 540 541 switch (cmd) { 542 case DDI_INFO_DEVT2DEVINFO: 543 if ((port = ddi_get_soft_state(fp_driver_softstate, 544 instance)) == NULL) { 545 rval = DDI_FAILURE; 546 break; 547 } 548 *result = (void *)port->fp_port_dip; 549 break; 550 551 case DDI_INFO_DEVT2INSTANCE: 552 *result = (void *)(uintptr_t)instance; 553 break; 554 555 default: 556 rval = DDI_FAILURE; 557 break; 558 } 559 560 return (rval); 561 } 562 563 564 /* 565 * Entry point for power up and power down request from kernel 566 */ 567 static int 568 fp_power(dev_info_t *dip, int comp, int level) 569 { 570 int rval = DDI_FAILURE; 571 fc_local_port_t *port; 572 573 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 574 if (port == NULL || comp != FP_PM_COMPONENT) { 575 return (rval); 576 } 577 578 switch (level) { 579 case FP_PM_PORT_UP: 580 rval = DDI_SUCCESS; 581 582 /* 583 * If the port is DDI_SUSPENDed, let the DDI_RESUME 584 * code complete the rediscovery. 585 */ 586 mutex_enter(&port->fp_mutex); 587 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 588 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 589 port->fp_pm_level = FP_PM_PORT_UP; 590 mutex_exit(&port->fp_mutex); 591 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 592 break; 593 } 594 595 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 596 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 597 598 port->fp_pm_level = FP_PM_PORT_UP; 599 rval = fp_power_up(port); 600 if (rval != DDI_SUCCESS) { 601 port->fp_pm_level = FP_PM_PORT_DOWN; 602 } 603 } else { 604 port->fp_pm_level = FP_PM_PORT_UP; 605 } 606 mutex_exit(&port->fp_mutex); 607 break; 608 609 case FP_PM_PORT_DOWN: 610 mutex_enter(&port->fp_mutex); 611 612 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 613 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 614 /* 615 * PM framework goofed up. We have don't 616 * have any PM components. Let's never go down. 617 */ 618 mutex_exit(&port->fp_mutex); 619 break; 620 621 } 622 623 if (port->fp_ulp_attach) { 624 /* We shouldn't let the power go down */ 625 mutex_exit(&port->fp_mutex); 626 break; 627 } 628 629 /* 630 * Not a whole lot to do if we are detaching 631 */ 632 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 633 port->fp_pm_level = FP_PM_PORT_DOWN; 634 mutex_exit(&port->fp_mutex); 635 rval = DDI_SUCCESS; 636 break; 637 } 638 639 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 640 port->fp_pm_level = FP_PM_PORT_DOWN; 641 642 rval = fp_power_down(port); 643 if (rval != DDI_SUCCESS) { 644 port->fp_pm_level = FP_PM_PORT_UP; 645 ASSERT(!(port->fp_soft_state & 646 FP_SOFT_POWER_DOWN)); 647 } else { 648 ASSERT(port->fp_soft_state & 649 FP_SOFT_POWER_DOWN); 650 } 651 } 652 mutex_exit(&port->fp_mutex); 653 break; 654 655 default: 656 break; 657 } 658 659 return (rval); 660 } 661 662 663 /* 664 * Open FC port devctl node 665 */ 666 static int 667 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 668 { 669 int instance; 670 fc_local_port_t *port; 671 672 if (otype != OTYP_CHR) { 673 return (EINVAL); 674 } 675 676 /* 677 * This is not a toy to play with. Allow only powerful 678 * users (hopefully knowledgeable) to access the port 679 * (A hacker potentially could download a sick binary 680 * file into FCA) 681 */ 682 if (drv_priv(credp)) { 683 return (EPERM); 684 } 685 686 instance = (int)getminor(*devp); 687 688 port = ddi_get_soft_state(fp_driver_softstate, instance); 689 if (port == NULL) { 690 return (ENXIO); 691 } 692 693 mutex_enter(&port->fp_mutex); 694 if (port->fp_flag & FP_EXCL) { 695 /* 696 * It is already open for exclusive access. 697 * So shut the door on this caller. 698 */ 699 mutex_exit(&port->fp_mutex); 700 return (EBUSY); 701 } 702 703 if (flag & FEXCL) { 704 if (port->fp_flag & FP_OPEN) { 705 /* 706 * Exclusive operation not possible 707 * as it is already opened 708 */ 709 mutex_exit(&port->fp_mutex); 710 return (EBUSY); 711 } 712 port->fp_flag |= FP_EXCL; 713 } 714 port->fp_flag |= FP_OPEN; 715 mutex_exit(&port->fp_mutex); 716 717 return (0); 718 } 719 720 721 /* 722 * The driver close entry point is called on the last close() 723 * of a device. So it is perfectly alright to just clobber the 724 * open flag and reset it to idle (instead of having to reset 725 * each flag bits). For any confusion, check out close(9E). 726 */ 727 728 /* ARGSUSED */ 729 static int 730 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 731 { 732 int instance; 733 fc_local_port_t *port; 734 735 if (otype != OTYP_CHR) { 736 return (EINVAL); 737 } 738 739 instance = (int)getminor(dev); 740 741 port = ddi_get_soft_state(fp_driver_softstate, instance); 742 if (port == NULL) { 743 return (ENXIO); 744 } 745 746 mutex_enter(&port->fp_mutex); 747 if ((port->fp_flag & FP_OPEN) == 0) { 748 mutex_exit(&port->fp_mutex); 749 return (ENODEV); 750 } 751 port->fp_flag = FP_IDLE; 752 mutex_exit(&port->fp_mutex); 753 754 return (0); 755 } 756 757 /* 758 * Handle IOCTL requests 759 */ 760 761 /* ARGSUSED */ 762 static int 763 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 764 { 765 int instance; 766 int ret = 0; 767 fcio_t fcio; 768 fc_local_port_t *port; 769 770 instance = (int)getminor(dev); 771 772 port = ddi_get_soft_state(fp_driver_softstate, instance); 773 if (port == NULL) { 774 return (ENXIO); 775 } 776 777 mutex_enter(&port->fp_mutex); 778 if ((port->fp_flag & FP_OPEN) == 0) { 779 mutex_exit(&port->fp_mutex); 780 return (ENXIO); 781 } 782 783 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 784 mutex_exit(&port->fp_mutex); 785 return (ENXIO); 786 } 787 788 mutex_exit(&port->fp_mutex); 789 790 /* this will raise power if necessary */ 791 ret = fctl_busy_port(port); 792 if (ret != 0) { 793 return (ret); 794 } 795 796 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 797 798 799 switch (cmd) { 800 case FCIO_CMD: { 801 #ifdef _MULTI_DATAMODEL 802 switch (ddi_model_convert_from(mode & FMODELS)) { 803 case DDI_MODEL_ILP32: { 804 struct fcio32 fcio32; 805 806 if (ddi_copyin((void *)data, (void *)&fcio32, 807 sizeof (struct fcio32), mode)) { 808 ret = EFAULT; 809 break; 810 } 811 fcio.fcio_xfer = fcio32.fcio_xfer; 812 fcio.fcio_cmd = fcio32.fcio_cmd; 813 fcio.fcio_flags = fcio32.fcio_flags; 814 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 815 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 816 fcio.fcio_ibuf = 817 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 818 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 819 fcio.fcio_obuf = 820 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 821 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 822 fcio.fcio_abuf = 823 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 824 fcio.fcio_errno = fcio32.fcio_errno; 825 break; 826 } 827 828 case DDI_MODEL_NONE: 829 if (ddi_copyin((void *)data, (void *)&fcio, 830 sizeof (fcio_t), mode)) { 831 ret = EFAULT; 832 } 833 break; 834 } 835 #else /* _MULTI_DATAMODEL */ 836 if (ddi_copyin((void *)data, (void *)&fcio, 837 sizeof (fcio_t), mode)) { 838 ret = EFAULT; 839 break; 840 } 841 #endif /* _MULTI_DATAMODEL */ 842 if (!ret) { 843 ret = fp_fciocmd(port, data, mode, &fcio); 844 } 845 break; 846 } 847 848 default: 849 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 850 mode, credp, rval); 851 } 852 853 fctl_idle_port(port); 854 855 return (ret); 856 } 857 858 859 /* 860 * Init Symbolic Port Name and Node Name 861 * LV will try to get symbolic names from FCA driver 862 * and register these to name server, 863 * if LV fails to get these, 864 * LV will register its default symbolic names to name server. 865 * The Default symbolic node name format is : 866 * <hostname>:<hba driver name>(instance) 867 * The Default symbolic port name format is : 868 * <fp path name> 869 */ 870 static void 871 fp_init_symbolic_names(fc_local_port_t *port) 872 { 873 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 874 char *sym_name; 875 char fcaname[50] = {0}; 876 int hostnlen, fcanlen; 877 878 if (port->fp_sym_node_namelen == 0) { 879 hostnlen = strlen(utsname.nodename); 880 (void) snprintf(fcaname, sizeof (fcaname), 881 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 882 fcanlen = strlen(fcaname); 883 884 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 885 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 886 port->fp_sym_node_namelen = strlen(sym_name); 887 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 888 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 889 } 890 (void) strncpy(port->fp_sym_node_name, sym_name, 891 port->fp_sym_node_namelen); 892 kmem_free(sym_name, hostnlen + fcanlen + 2); 893 } 894 895 if (port->fp_sym_port_namelen == 0) { 896 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 897 898 (void) ddi_pathname(port->fp_port_dip, pathname); 899 port->fp_sym_port_namelen = strlen(pathname); 900 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 901 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 902 } 903 (void) strncpy(port->fp_sym_port_name, pathname, 904 port->fp_sym_port_namelen); 905 kmem_free(pathname, MAXPATHLEN); 906 } 907 } 908 909 910 /* 911 * Perform port attach 912 */ 913 static int 914 fp_attach_handler(dev_info_t *dip) 915 { 916 int rval; 917 int instance; 918 int port_num; 919 int port_len; 920 char name[30]; 921 char i_pwwn[17]; 922 fp_cmd_t *pkt; 923 uint32_t ub_count; 924 fc_local_port_t *port; 925 job_request_t *job; 926 fc_local_port_t *phyport = NULL; 927 int portpro1; 928 char pwwn[17], nwwn[17]; 929 930 instance = ddi_get_instance(dip); 931 932 port_len = sizeof (port_num); 933 934 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 935 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 936 (caddr_t)&port_num, &port_len); 937 938 if (rval != DDI_SUCCESS) { 939 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 940 instance); 941 return (DDI_FAILURE); 942 } 943 944 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 945 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 946 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 947 instance); 948 return (DDI_FAILURE); 949 } 950 951 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 952 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 953 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 954 " point minor node", instance); 955 ddi_remove_minor_node(dip, NULL); 956 return (DDI_FAILURE); 957 } 958 959 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 960 != DDI_SUCCESS) { 961 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 962 instance); 963 ddi_remove_minor_node(dip, NULL); 964 return (DDI_FAILURE); 965 } 966 port = ddi_get_soft_state(fp_driver_softstate, instance); 967 968 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 969 970 port->fp_instance = instance; 971 port->fp_ulp_attach = 1; 972 port->fp_port_num = port_num; 973 port->fp_verbose = fp_verbosity; 974 port->fp_options = fp_options; 975 976 port->fp_fca_dip = ddi_get_parent(dip); 977 port->fp_port_dip = dip; 978 port->fp_fca_tran = (fc_fca_tran_t *) 979 ddi_get_driver_private(port->fp_fca_dip); 980 981 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 982 983 /* 984 * Init the starting value of fp_rscn_count. Note that if 985 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 986 * actual # of RSCNs will be (fp_rscn_count - 1) 987 */ 988 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 989 990 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 991 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 992 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 993 994 (void) sprintf(name, "fp%d_cache", instance); 995 996 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 997 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 998 "phyport-instance", -1)) != -1) { 999 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 1000 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 1001 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 1002 port->fp_npiv_type = FC_NPIV_PORT; 1003 } 1004 1005 /* 1006 * Allocate the pool of fc_packet_t structs to be used with 1007 * this fp instance. 1008 */ 1009 port->fp_pkt_cache = kmem_cache_create(name, 1010 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1011 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1012 NULL, 0); 1013 port->fp_out_fpcmds = 0; 1014 if (port->fp_pkt_cache == NULL) { 1015 goto cache_alloc_failed; 1016 } 1017 1018 1019 /* 1020 * Allocate the d_id and pwwn hash tables for all remote ports 1021 * connected to this local port. 1022 */ 1023 port->fp_did_table = kmem_zalloc(did_table_size * 1024 sizeof (struct d_id_hash), KM_SLEEP); 1025 1026 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1027 sizeof (struct pwwn_hash), KM_SLEEP); 1028 1029 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1030 MINCLSYSPRI, 1, 16, 0); 1031 1032 /* Indicate that don't have the pm components yet */ 1033 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1034 1035 /* 1036 * Bind the callbacks with the FCA driver. This will open the gate 1037 * for asynchronous callbacks, so after this call the fp_mutex 1038 * must be held when updating the fc_local_port_t struct. 1039 * 1040 * This is done _before_ setting up the job thread so we can avoid 1041 * cleaning up after the thread_create() in the error path. This 1042 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1043 */ 1044 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1045 goto bind_callbacks_failed; 1046 } 1047 1048 if (phyport) { 1049 mutex_enter(&phyport->fp_mutex); 1050 if (phyport->fp_port_next) { 1051 phyport->fp_port_next->fp_port_prev = port; 1052 port->fp_port_next = phyport->fp_port_next; 1053 phyport->fp_port_next = port; 1054 port->fp_port_prev = phyport; 1055 } else { 1056 phyport->fp_port_next = port; 1057 phyport->fp_port_prev = port; 1058 port->fp_port_next = phyport; 1059 port->fp_port_prev = phyport; 1060 } 1061 mutex_exit(&phyport->fp_mutex); 1062 } 1063 1064 /* 1065 * Init Symbolic Names 1066 */ 1067 fp_init_symbolic_names(port); 1068 1069 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1070 KM_SLEEP, NULL); 1071 1072 if (pkt == NULL) { 1073 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1074 instance); 1075 goto alloc_els_packet_failed; 1076 } 1077 1078 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1079 v.v_maxsyspri - 2); 1080 1081 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1082 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1083 i_pwwn) != DDI_PROP_SUCCESS) { 1084 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1085 "fp(%d): Updating 'initiator-port' property" 1086 " on fp dev_info node failed", instance); 1087 } 1088 1089 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1090 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1091 i_pwwn) != DDI_PROP_SUCCESS) { 1092 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1093 "fp(%d): Updating 'initiator-node' property" 1094 " on fp dev_info node failed", instance); 1095 } 1096 1097 mutex_enter(&port->fp_mutex); 1098 port->fp_els_resp_pkt = pkt; 1099 mutex_exit(&port->fp_mutex); 1100 1101 /* 1102 * Determine the count of unsolicited buffers this FCA can support 1103 */ 1104 fp_retrieve_caps(port); 1105 1106 /* 1107 * Allocate unsolicited buffer tokens 1108 */ 1109 if (port->fp_ub_count) { 1110 ub_count = port->fp_ub_count; 1111 port->fp_ub_tokens = kmem_zalloc(ub_count * 1112 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1113 /* 1114 * Do not fail the attach if unsolicited buffer allocation 1115 * fails; Just try to get along with whatever the FCA can do. 1116 */ 1117 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1118 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1119 FC_SUCCESS || ub_count != port->fp_ub_count) { 1120 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1121 " Unsolicited buffers. proceeding with attach...", 1122 instance); 1123 kmem_free(port->fp_ub_tokens, 1124 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1125 port->fp_ub_tokens = NULL; 1126 } 1127 } 1128 1129 fp_load_ulp_modules(dip, port); 1130 1131 /* 1132 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1133 */ 1134 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1135 "pm-hardware-state", "needs-suspend-resume", 1136 strlen("needs-suspend-resume") + 1); 1137 1138 /* 1139 * fctl maintains a list of all port handles, so 1140 * help fctl add this one to its list now. 1141 */ 1142 mutex_enter(&port->fp_mutex); 1143 fctl_add_port(port); 1144 1145 /* 1146 * If a state change is already in progress, set the bind state t 1147 * OFFLINE as well, so further state change callbacks into ULPs 1148 * will pass the appropriate states 1149 */ 1150 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1151 port->fp_statec_busy) { 1152 port->fp_bind_state = FC_STATE_OFFLINE; 1153 mutex_exit(&port->fp_mutex); 1154 1155 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1156 } else { 1157 /* 1158 * Without dropping the mutex, ensure that the port 1159 * startup happens ahead of state change callback 1160 * processing 1161 */ 1162 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1163 1164 port->fp_last_task = port->fp_task; 1165 port->fp_task = FP_TASK_PORT_STARTUP; 1166 1167 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1168 fp_startup_done, (opaque_t)port, KM_SLEEP); 1169 1170 port->fp_job_head = port->fp_job_tail = job; 1171 1172 cv_signal(&port->fp_cv); 1173 1174 mutex_exit(&port->fp_mutex); 1175 } 1176 1177 mutex_enter(&port->fp_mutex); 1178 while (port->fp_ulp_attach) { 1179 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1180 } 1181 mutex_exit(&port->fp_mutex); 1182 1183 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1184 "pm-components", fp_pm_comps, 1185 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1186 DDI_PROP_SUCCESS) { 1187 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1188 " components property, PM disabled on this port."); 1189 mutex_enter(&port->fp_mutex); 1190 port->fp_pm_level = FP_PM_PORT_UP; 1191 mutex_exit(&port->fp_mutex); 1192 } else { 1193 if (pm_raise_power(dip, FP_PM_COMPONENT, 1194 FP_PM_PORT_UP) != DDI_SUCCESS) { 1195 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1196 " power level"); 1197 mutex_enter(&port->fp_mutex); 1198 port->fp_pm_level = FP_PM_PORT_UP; 1199 mutex_exit(&port->fp_mutex); 1200 } 1201 1202 /* 1203 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1204 * the call to pm_raise_power. The PM framework can't 1205 * handle multiple threads calling into it during attach. 1206 */ 1207 1208 mutex_enter(&port->fp_mutex); 1209 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1210 mutex_exit(&port->fp_mutex); 1211 } 1212 1213 ddi_report_dev(dip); 1214 1215 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1216 1217 return (DDI_SUCCESS); 1218 1219 /* 1220 * Unwind any/all preceeding allocations in the event of an error. 1221 */ 1222 1223 alloc_els_packet_failed: 1224 1225 if (port->fp_fca_handle != NULL) { 1226 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1227 port->fp_fca_handle = NULL; 1228 } 1229 1230 if (port->fp_ub_tokens != NULL) { 1231 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1232 port->fp_ub_tokens); 1233 kmem_free(port->fp_ub_tokens, 1234 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1235 port->fp_ub_tokens = NULL; 1236 } 1237 1238 if (port->fp_els_resp_pkt != NULL) { 1239 fp_free_pkt(port->fp_els_resp_pkt); 1240 port->fp_els_resp_pkt = NULL; 1241 } 1242 1243 bind_callbacks_failed: 1244 1245 if (port->fp_taskq != NULL) { 1246 taskq_destroy(port->fp_taskq); 1247 } 1248 1249 if (port->fp_pwwn_table != NULL) { 1250 kmem_free(port->fp_pwwn_table, 1251 pwwn_table_size * sizeof (struct pwwn_hash)); 1252 port->fp_pwwn_table = NULL; 1253 } 1254 1255 if (port->fp_did_table != NULL) { 1256 kmem_free(port->fp_did_table, 1257 did_table_size * sizeof (struct d_id_hash)); 1258 port->fp_did_table = NULL; 1259 } 1260 1261 if (port->fp_pkt_cache != NULL) { 1262 kmem_cache_destroy(port->fp_pkt_cache); 1263 port->fp_pkt_cache = NULL; 1264 } 1265 1266 cache_alloc_failed: 1267 1268 cv_destroy(&port->fp_attach_cv); 1269 cv_destroy(&port->fp_cv); 1270 mutex_destroy(&port->fp_mutex); 1271 ddi_remove_minor_node(port->fp_port_dip, NULL); 1272 ddi_soft_state_free(fp_driver_softstate, instance); 1273 ddi_prop_remove_all(dip); 1274 1275 return (DDI_FAILURE); 1276 } 1277 1278 1279 /* 1280 * Handle DDI_RESUME request 1281 */ 1282 static int 1283 fp_resume_handler(dev_info_t *dip) 1284 { 1285 int rval; 1286 fc_local_port_t *port; 1287 1288 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1289 1290 ASSERT(port != NULL); 1291 1292 #ifdef DEBUG 1293 mutex_enter(&port->fp_mutex); 1294 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1295 mutex_exit(&port->fp_mutex); 1296 #endif 1297 1298 /* 1299 * If the port was power suspended, raise the power level 1300 */ 1301 mutex_enter(&port->fp_mutex); 1302 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1303 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1304 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1305 1306 mutex_exit(&port->fp_mutex); 1307 if (pm_raise_power(dip, FP_PM_COMPONENT, 1308 FP_PM_PORT_UP) != DDI_SUCCESS) { 1309 FP_TRACE(FP_NHEAD2(9, 0), 1310 "Failed to raise the power level"); 1311 return (DDI_FAILURE); 1312 } 1313 mutex_enter(&port->fp_mutex); 1314 } 1315 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1316 mutex_exit(&port->fp_mutex); 1317 1318 /* 1319 * All the discovery is initiated and handled by per-port thread. 1320 * Further all the discovery is done in handled in callback mode 1321 * (not polled mode); In a specific case such as this, the discovery 1322 * is required to happen in polled mode. The easiest way out is 1323 * to bail out port thread and get started. Come back and fix this 1324 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1325 * will do on-demand discovery during pre-power-up busctl handling 1326 * which will only be possible when SCSA provides a new HBA vector 1327 * for sending down the PM busctl requests. 1328 */ 1329 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1330 1331 rval = fp_resume_all(port, FC_CMD_RESUME); 1332 if (rval != DDI_SUCCESS) { 1333 mutex_enter(&port->fp_mutex); 1334 port->fp_soft_state |= FP_SOFT_SUSPEND; 1335 mutex_exit(&port->fp_mutex); 1336 (void) callb_generic_cpr(&port->fp_cpr_info, 1337 CB_CODE_CPR_CHKPT); 1338 } 1339 1340 return (rval); 1341 } 1342 1343 /* 1344 * Perform FC Port power on initialization 1345 */ 1346 static int 1347 fp_power_up(fc_local_port_t *port) 1348 { 1349 int rval; 1350 1351 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1352 1353 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1354 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1355 1356 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1357 1358 mutex_exit(&port->fp_mutex); 1359 1360 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1361 if (rval != DDI_SUCCESS) { 1362 mutex_enter(&port->fp_mutex); 1363 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1364 } else { 1365 mutex_enter(&port->fp_mutex); 1366 } 1367 1368 return (rval); 1369 } 1370 1371 1372 /* 1373 * It is important to note that the power may possibly be removed between 1374 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1375 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1376 * (hardware state). In this case, the port driver may need to rediscover the 1377 * topology, perform LOGINs, register with the name server again and perform 1378 * any such port initialization procedures. To perform LOGINs, the driver could 1379 * use the port device handle to see if a LOGIN needs to be performed and use 1380 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1381 * or removed) which will be reflected in the map the ULPs will see. 1382 */ 1383 static int 1384 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1385 { 1386 1387 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1388 1389 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1390 return (DDI_FAILURE); 1391 } 1392 1393 mutex_enter(&port->fp_mutex); 1394 1395 /* 1396 * If there are commands queued for delayed retry, instead of 1397 * working the hard way to figure out which ones are good for 1398 * restart and which ones not (ELSs are definitely not good 1399 * as the port will have to go through a new spin of rediscovery 1400 * now), so just flush them out. 1401 */ 1402 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1403 fp_cmd_t *cmd; 1404 1405 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1406 1407 mutex_exit(&port->fp_mutex); 1408 while ((cmd = fp_deque_cmd(port)) != NULL) { 1409 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1410 fp_iodone(cmd); 1411 } 1412 mutex_enter(&port->fp_mutex); 1413 } 1414 1415 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1416 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1417 port->fp_dev_count) { 1418 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1419 port->fp_offline_tid = timeout(fp_offline_timeout, 1420 (caddr_t)port, fp_offline_ticks); 1421 } 1422 if (port->fp_job_head) { 1423 cv_signal(&port->fp_cv); 1424 } 1425 mutex_exit(&port->fp_mutex); 1426 fctl_attach_ulps(port, cmd, &modlinkage); 1427 } else { 1428 struct job_request *job; 1429 1430 /* 1431 * If an OFFLINE timer was running at the time of 1432 * suspending, there is no need to restart it as 1433 * the port is ONLINE now. 1434 */ 1435 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1436 if (port->fp_statec_busy == 0) { 1437 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1438 } 1439 port->fp_statec_busy++; 1440 mutex_exit(&port->fp_mutex); 1441 1442 job = fctl_alloc_job(JOB_PORT_ONLINE, 1443 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1444 fctl_enque_job(port, job); 1445 1446 fctl_jobwait(job); 1447 fctl_remove_oldies(port); 1448 1449 fctl_attach_ulps(port, cmd, &modlinkage); 1450 fctl_dealloc_job(job); 1451 } 1452 1453 return (DDI_SUCCESS); 1454 } 1455 1456 1457 /* 1458 * At this time, there shouldn't be any I/O requests on this port. 1459 * But the unsolicited callbacks from the underlying FCA port need 1460 * to be handled very carefully. The steps followed to handle the 1461 * DDI_DETACH are: 1462 * + Grab the port driver mutex, check if the unsolicited 1463 * callback is currently under processing. If true, fail 1464 * the DDI_DETACH request by printing a message; If false 1465 * mark the DDI_DETACH as under progress, so that any 1466 * further unsolicited callbacks get bounced. 1467 * + Perform PRLO/LOGO if necessary, cleanup all the data 1468 * structures. 1469 * + Get the job_handler thread to gracefully exit. 1470 * + Unregister callbacks with the FCA port. 1471 * + Now that some peace is found, notify all the ULPs of 1472 * DDI_DETACH request (using ulp_port_detach entry point) 1473 * + Free all mutexes, semaphores, conditional variables. 1474 * + Free the soft state, return success. 1475 * 1476 * Important considerations: 1477 * Port driver de-registers state change and unsolicited 1478 * callbacks before taking up the task of notifying ULPs 1479 * and performing PRLO and LOGOs. 1480 * 1481 * A port may go offline at the time PRLO/LOGO is being 1482 * requested. It is expected of all FCA drivers to fail 1483 * such requests either immediately with a FC_OFFLINE 1484 * return code to fc_fca_transport() or return the packet 1485 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1486 */ 1487 static int 1488 fp_detach_handler(fc_local_port_t *port) 1489 { 1490 job_request_t *job; 1491 uint32_t delay_count; 1492 fc_orphan_t *orp, *tmporp; 1493 1494 /* 1495 * In a Fabric topology with many host ports connected to 1496 * a switch, another detaching instance of fp might have 1497 * triggered a LOGO (which is an unsolicited request to 1498 * this instance). So in order to be able to successfully 1499 * detach by taking care of such cases a delay of about 1500 * 30 seconds is introduced. 1501 */ 1502 delay_count = 0; 1503 mutex_enter(&port->fp_mutex); 1504 if (port->fp_out_fpcmds != 0) { 1505 /* 1506 * At this time we can only check fp internal commands, because 1507 * sd/ssd/scsi_vhci should have finsihed all their commands, 1508 * fcp/fcip/fcsm should have finished all their commands. 1509 * 1510 * It seems that all fp internal commands are asynchronous now. 1511 */ 1512 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1513 mutex_exit(&port->fp_mutex); 1514 1515 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress" 1516 " Failing detach", port->fp_instance, port->fp_out_fpcmds); 1517 return (DDI_FAILURE); 1518 } 1519 1520 while ((port->fp_soft_state & 1521 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1522 (delay_count < 30)) { 1523 mutex_exit(&port->fp_mutex); 1524 delay_count++; 1525 delay(drv_usectohz(1000000)); 1526 mutex_enter(&port->fp_mutex); 1527 } 1528 1529 if (port->fp_soft_state & 1530 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1531 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1532 mutex_exit(&port->fp_mutex); 1533 1534 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1535 " Failing detach", port->fp_instance); 1536 return (DDI_FAILURE); 1537 } 1538 1539 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1540 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1541 mutex_exit(&port->fp_mutex); 1542 1543 /* 1544 * If we're powered down, we need to raise power prior to submitting 1545 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1546 * process the shutdown job. 1547 */ 1548 if (fctl_busy_port(port) != 0) { 1549 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1550 port->fp_instance); 1551 mutex_enter(&port->fp_mutex); 1552 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1553 mutex_exit(&port->fp_mutex); 1554 return (DDI_FAILURE); 1555 } 1556 1557 /* 1558 * This will deallocate data structs and cause the "job" thread 1559 * to exit, in preparation for DDI_DETACH on the instance. 1560 * This can sleep for an arbitrary duration, since it waits for 1561 * commands over the wire, timeout(9F) callbacks, etc. 1562 * 1563 * CAUTION: There is still a race here, where the "job" thread 1564 * can still be executing code even tho the fctl_jobwait() call 1565 * below has returned to us. In theory the fp driver could even be 1566 * modunloaded even tho the job thread isn't done executing. 1567 * without creating the race condition. 1568 */ 1569 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1570 (opaque_t)port, KM_SLEEP); 1571 fctl_enque_job(port, job); 1572 fctl_jobwait(job); 1573 fctl_dealloc_job(job); 1574 1575 1576 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1577 FP_PM_PORT_DOWN); 1578 1579 if (port->fp_taskq) { 1580 taskq_destroy(port->fp_taskq); 1581 } 1582 1583 ddi_prop_remove_all(port->fp_port_dip); 1584 1585 ddi_remove_minor_node(port->fp_port_dip, NULL); 1586 1587 fctl_remove_port(port); 1588 1589 fp_free_pkt(port->fp_els_resp_pkt); 1590 1591 if (port->fp_ub_tokens) { 1592 if (fc_ulp_ubfree(port, port->fp_ub_count, 1593 port->fp_ub_tokens) != FC_SUCCESS) { 1594 cmn_err(CE_WARN, "fp(%d): couldn't free " 1595 " unsolicited buffers", port->fp_instance); 1596 } 1597 kmem_free(port->fp_ub_tokens, 1598 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1599 port->fp_ub_tokens = NULL; 1600 } 1601 1602 if (port->fp_pkt_cache != NULL) { 1603 kmem_cache_destroy(port->fp_pkt_cache); 1604 } 1605 1606 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1607 1608 mutex_enter(&port->fp_mutex); 1609 if (port->fp_did_table) { 1610 kmem_free(port->fp_did_table, did_table_size * 1611 sizeof (struct d_id_hash)); 1612 } 1613 1614 if (port->fp_pwwn_table) { 1615 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1616 sizeof (struct pwwn_hash)); 1617 } 1618 orp = port->fp_orphan_list; 1619 while (orp) { 1620 tmporp = orp; 1621 orp = orp->orp_next; 1622 kmem_free(tmporp, sizeof (*orp)); 1623 } 1624 1625 mutex_exit(&port->fp_mutex); 1626 1627 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1628 1629 mutex_destroy(&port->fp_mutex); 1630 cv_destroy(&port->fp_attach_cv); 1631 cv_destroy(&port->fp_cv); 1632 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1633 1634 return (DDI_SUCCESS); 1635 } 1636 1637 1638 /* 1639 * Steps to perform DDI_SUSPEND operation on a FC port 1640 * 1641 * - If already suspended return DDI_FAILURE 1642 * - If already power-suspended return DDI_SUCCESS 1643 * - If an unsolicited callback or state change handling is in 1644 * in progress, throw a warning message, return DDI_FAILURE 1645 * - Cancel timeouts 1646 * - SUSPEND the job_handler thread (means do nothing as it is 1647 * taken care of by the CPR frame work) 1648 */ 1649 static int 1650 fp_suspend_handler(fc_local_port_t *port) 1651 { 1652 uint32_t delay_count; 1653 1654 mutex_enter(&port->fp_mutex); 1655 1656 /* 1657 * The following should never happen, but 1658 * let the driver be more defensive here 1659 */ 1660 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1661 mutex_exit(&port->fp_mutex); 1662 return (DDI_FAILURE); 1663 } 1664 1665 /* 1666 * If the port is already power suspended, there 1667 * is nothing else to do, So return DDI_SUCCESS, 1668 * but mark the SUSPEND bit in the soft state 1669 * before leaving. 1670 */ 1671 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1672 port->fp_soft_state |= FP_SOFT_SUSPEND; 1673 mutex_exit(&port->fp_mutex); 1674 return (DDI_SUCCESS); 1675 } 1676 1677 /* 1678 * Check if an unsolicited callback or state change handling is 1679 * in progress. If true, fail the suspend operation; also throw 1680 * a warning message notifying the failure. Note that Sun PCI 1681 * hotplug spec recommends messages in cases of failure (but 1682 * not flooding the console) 1683 * 1684 * Busy waiting for a short interval (500 millisecond ?) to see 1685 * if the callback processing completes may be another idea. Since 1686 * most of the callback processing involves a lot of work, it 1687 * is safe to just fail the SUSPEND operation. It is definitely 1688 * not bad to fail the SUSPEND operation if the driver is busy. 1689 */ 1690 delay_count = 0; 1691 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1692 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1693 mutex_exit(&port->fp_mutex); 1694 delay_count++; 1695 delay(drv_usectohz(1000000)); 1696 mutex_enter(&port->fp_mutex); 1697 } 1698 1699 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1700 FP_SOFT_IN_UNSOL_CB)) { 1701 mutex_exit(&port->fp_mutex); 1702 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1703 " Failing suspend", port->fp_instance); 1704 return (DDI_FAILURE); 1705 } 1706 1707 /* 1708 * Check of FC port thread is busy 1709 */ 1710 if (port->fp_job_head) { 1711 mutex_exit(&port->fp_mutex); 1712 FP_TRACE(FP_NHEAD2(9, 0), 1713 "FC port thread is busy: Failing suspend"); 1714 return (DDI_FAILURE); 1715 } 1716 port->fp_soft_state |= FP_SOFT_SUSPEND; 1717 1718 fp_suspend_all(port); 1719 mutex_exit(&port->fp_mutex); 1720 1721 return (DDI_SUCCESS); 1722 } 1723 1724 1725 /* 1726 * Prepare for graceful power down of a FC port 1727 */ 1728 static int 1729 fp_power_down(fc_local_port_t *port) 1730 { 1731 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1732 1733 /* 1734 * Power down request followed by a DDI_SUSPEND should 1735 * never happen; If it does return DDI_SUCCESS 1736 */ 1737 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1738 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1739 return (DDI_SUCCESS); 1740 } 1741 1742 /* 1743 * If the port is already power suspended, there 1744 * is nothing else to do, So return DDI_SUCCESS, 1745 */ 1746 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1747 return (DDI_SUCCESS); 1748 } 1749 1750 /* 1751 * Check if an unsolicited callback or state change handling 1752 * is in progress. If true, fail the PM suspend operation. 1753 * But don't print a message unless the verbosity of the 1754 * driver desires otherwise. 1755 */ 1756 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1757 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1758 FP_TRACE(FP_NHEAD2(9, 0), 1759 "Unsolicited callback in progress: Failing power down"); 1760 return (DDI_FAILURE); 1761 } 1762 1763 /* 1764 * Check of FC port thread is busy 1765 */ 1766 if (port->fp_job_head) { 1767 FP_TRACE(FP_NHEAD2(9, 0), 1768 "FC port thread is busy: Failing power down"); 1769 return (DDI_FAILURE); 1770 } 1771 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1772 1773 /* 1774 * check if the ULPs are ready for power down 1775 */ 1776 mutex_exit(&port->fp_mutex); 1777 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1778 &modlinkage) != FC_SUCCESS) { 1779 mutex_enter(&port->fp_mutex); 1780 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1781 mutex_exit(&port->fp_mutex); 1782 1783 /* 1784 * Power back up the obedient ULPs that went down 1785 */ 1786 fp_attach_ulps(port, FC_CMD_POWER_UP); 1787 1788 FP_TRACE(FP_NHEAD2(9, 0), 1789 "ULP(s) busy, detach_ulps failed. Failing power down"); 1790 mutex_enter(&port->fp_mutex); 1791 return (DDI_FAILURE); 1792 } 1793 mutex_enter(&port->fp_mutex); 1794 1795 fp_suspend_all(port); 1796 1797 return (DDI_SUCCESS); 1798 } 1799 1800 1801 /* 1802 * Suspend the entire FC port 1803 */ 1804 static void 1805 fp_suspend_all(fc_local_port_t *port) 1806 { 1807 int index; 1808 struct pwwn_hash *head; 1809 fc_remote_port_t *pd; 1810 1811 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1812 1813 if (port->fp_wait_tid != 0) { 1814 timeout_id_t tid; 1815 1816 tid = port->fp_wait_tid; 1817 port->fp_wait_tid = (timeout_id_t)NULL; 1818 mutex_exit(&port->fp_mutex); 1819 (void) untimeout(tid); 1820 mutex_enter(&port->fp_mutex); 1821 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1822 } 1823 1824 if (port->fp_offline_tid) { 1825 timeout_id_t tid; 1826 1827 tid = port->fp_offline_tid; 1828 port->fp_offline_tid = (timeout_id_t)NULL; 1829 mutex_exit(&port->fp_mutex); 1830 (void) untimeout(tid); 1831 mutex_enter(&port->fp_mutex); 1832 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1833 } 1834 mutex_exit(&port->fp_mutex); 1835 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1836 mutex_enter(&port->fp_mutex); 1837 1838 /* 1839 * Mark all devices as OLD, and reset the LOGIN state as well 1840 * (this will force the ULPs to perform a LOGIN after calling 1841 * fc_portgetmap() during RESUME/PM_RESUME) 1842 */ 1843 for (index = 0; index < pwwn_table_size; index++) { 1844 head = &port->fp_pwwn_table[index]; 1845 pd = head->pwwn_head; 1846 while (pd != NULL) { 1847 mutex_enter(&pd->pd_mutex); 1848 fp_remote_port_offline(pd); 1849 fctl_delist_did_table(port, pd); 1850 pd->pd_state = PORT_DEVICE_VALID; 1851 pd->pd_login_count = 0; 1852 mutex_exit(&pd->pd_mutex); 1853 pd = pd->pd_wwn_hnext; 1854 } 1855 } 1856 } 1857 1858 1859 /* 1860 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1861 * Performs intializations for fc_packet_t structs. 1862 * Returns 0 for success or -1 for failure. 1863 * 1864 * This function allocates DMA handles for both command and responses. 1865 * Most of the ELSs used have both command and responses so it is strongly 1866 * desired to move them to cache constructor routine. 1867 * 1868 * Context: Can sleep iff called with KM_SLEEP flag. 1869 */ 1870 static int 1871 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1872 { 1873 int (*cb) (caddr_t); 1874 fc_packet_t *pkt; 1875 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1876 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1877 1878 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1879 1880 cmd->cmd_next = NULL; 1881 cmd->cmd_flags = 0; 1882 cmd->cmd_dflags = 0; 1883 cmd->cmd_job = NULL; 1884 cmd->cmd_port = port; 1885 pkt = &cmd->cmd_pkt; 1886 1887 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1888 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1889 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1890 return (-1); 1891 } 1892 1893 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1894 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1895 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1896 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1897 return (-1); 1898 } 1899 1900 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1901 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1902 pkt->pkt_data_cookie_cnt = 0; 1903 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1904 pkt->pkt_data_cookie = NULL; 1905 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1906 1907 return (0); 1908 } 1909 1910 1911 /* 1912 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1913 * Performs un-intializations for fc_packet_t structs. 1914 */ 1915 /* ARGSUSED */ 1916 static void 1917 fp_cache_destructor(void *buf, void *cdarg) 1918 { 1919 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1920 fc_packet_t *pkt; 1921 1922 pkt = &cmd->cmd_pkt; 1923 if (pkt->pkt_cmd_dma) { 1924 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1925 } 1926 1927 if (pkt->pkt_resp_dma) { 1928 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1929 } 1930 } 1931 1932 1933 /* 1934 * Packet allocation for ELS and any other port driver commands 1935 * 1936 * Some ELSs like FLOGI and PLOGI are critical for topology and 1937 * device discovery and a system's inability to allocate memory 1938 * or DVMA resources while performing some of these critical ELSs 1939 * cause a lot of problem. While memory allocation failures are 1940 * rare, DVMA resource failures are common as the applications 1941 * are becoming more and more powerful on huge servers. So it 1942 * is desirable to have a framework support to reserve a fragment 1943 * of DVMA. So until this is fixed the correct way, the suffering 1944 * is huge whenever a LIP happens at a time DVMA resources are 1945 * drained out completely - So an attempt needs to be made to 1946 * KM_SLEEP while requesting for these resources, hoping that 1947 * the requests won't hang forever. 1948 * 1949 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1950 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1951 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1952 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1953 * fp_alloc_pkt() must be called with pd set to NULL. 1954 */ 1955 1956 static fp_cmd_t * 1957 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1958 fc_remote_port_t *pd) 1959 { 1960 int rval; 1961 ulong_t real_len; 1962 fp_cmd_t *cmd; 1963 fc_packet_t *pkt; 1964 int (*cb) (caddr_t); 1965 ddi_dma_cookie_t pkt_cookie; 1966 ddi_dma_cookie_t *cp; 1967 uint32_t cnt; 1968 1969 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1970 1971 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1972 1973 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1974 if (cmd == NULL) { 1975 return (cmd); 1976 } 1977 1978 cmd->cmd_ulp_pkt = NULL; 1979 cmd->cmd_flags = 0; 1980 pkt = &cmd->cmd_pkt; 1981 ASSERT(cmd->cmd_dflags == 0); 1982 1983 pkt->pkt_datalen = 0; 1984 pkt->pkt_data = NULL; 1985 pkt->pkt_state = 0; 1986 pkt->pkt_action = 0; 1987 pkt->pkt_reason = 0; 1988 pkt->pkt_expln = 0; 1989 1990 /* 1991 * Init pkt_pd with the given pointer; this must be done _before_ 1992 * the call to fc_ulp_init_packet(). 1993 */ 1994 pkt->pkt_pd = pd; 1995 1996 /* Now call the FCA driver to init its private, per-packet fields */ 1997 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 1998 goto alloc_pkt_failed; 1999 } 2000 2001 if (cmd_len) { 2002 ASSERT(pkt->pkt_cmd_dma != NULL); 2003 2004 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 2005 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 2006 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 2007 &pkt->pkt_cmd_acc); 2008 2009 if (rval != DDI_SUCCESS) { 2010 goto alloc_pkt_failed; 2011 } 2012 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 2013 2014 if (real_len < cmd_len) { 2015 goto alloc_pkt_failed; 2016 } 2017 2018 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2019 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2020 DDI_DMA_CONSISTENT, cb, NULL, 2021 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2022 2023 if (rval != DDI_DMA_MAPPED) { 2024 goto alloc_pkt_failed; 2025 } 2026 2027 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2028 2029 if (pkt->pkt_cmd_cookie_cnt > 2030 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2031 goto alloc_pkt_failed; 2032 } 2033 2034 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2035 2036 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2037 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2038 KM_NOSLEEP); 2039 2040 if (cp == NULL) { 2041 goto alloc_pkt_failed; 2042 } 2043 2044 *cp = pkt_cookie; 2045 cp++; 2046 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2047 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2048 *cp = pkt_cookie; 2049 } 2050 } 2051 2052 if (resp_len) { 2053 ASSERT(pkt->pkt_resp_dma != NULL); 2054 2055 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2056 port->fp_fca_tran->fca_acc_attr, 2057 DDI_DMA_CONSISTENT, cb, NULL, 2058 (caddr_t *)&pkt->pkt_resp, &real_len, 2059 &pkt->pkt_resp_acc); 2060 2061 if (rval != DDI_SUCCESS) { 2062 goto alloc_pkt_failed; 2063 } 2064 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2065 2066 if (real_len < resp_len) { 2067 goto alloc_pkt_failed; 2068 } 2069 2070 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2071 pkt->pkt_resp, real_len, DDI_DMA_READ | 2072 DDI_DMA_CONSISTENT, cb, NULL, 2073 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2074 2075 if (rval != DDI_DMA_MAPPED) { 2076 goto alloc_pkt_failed; 2077 } 2078 2079 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2080 2081 if (pkt->pkt_resp_cookie_cnt > 2082 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2083 goto alloc_pkt_failed; 2084 } 2085 2086 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2087 2088 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2089 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2090 KM_NOSLEEP); 2091 2092 if (cp == NULL) { 2093 goto alloc_pkt_failed; 2094 } 2095 2096 *cp = pkt_cookie; 2097 cp++; 2098 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2099 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2100 *cp = pkt_cookie; 2101 } 2102 } 2103 2104 pkt->pkt_cmdlen = cmd_len; 2105 pkt->pkt_rsplen = resp_len; 2106 pkt->pkt_ulp_private = cmd; 2107 2108 return (cmd); 2109 2110 alloc_pkt_failed: 2111 2112 fp_free_dma(cmd); 2113 2114 if (pkt->pkt_cmd_cookie != NULL) { 2115 kmem_free(pkt->pkt_cmd_cookie, 2116 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2117 pkt->pkt_cmd_cookie = NULL; 2118 } 2119 2120 if (pkt->pkt_resp_cookie != NULL) { 2121 kmem_free(pkt->pkt_resp_cookie, 2122 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2123 pkt->pkt_resp_cookie = NULL; 2124 } 2125 2126 kmem_cache_free(port->fp_pkt_cache, cmd); 2127 2128 return (NULL); 2129 } 2130 2131 2132 /* 2133 * Free FC packet 2134 */ 2135 static void 2136 fp_free_pkt(fp_cmd_t *cmd) 2137 { 2138 fc_local_port_t *port; 2139 fc_packet_t *pkt; 2140 2141 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2142 2143 cmd->cmd_next = NULL; 2144 cmd->cmd_job = NULL; 2145 pkt = &cmd->cmd_pkt; 2146 pkt->pkt_ulp_private = 0; 2147 pkt->pkt_tran_flags = 0; 2148 pkt->pkt_tran_type = 0; 2149 port = cmd->cmd_port; 2150 2151 if (pkt->pkt_cmd_cookie != NULL) { 2152 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2153 sizeof (ddi_dma_cookie_t)); 2154 pkt->pkt_cmd_cookie = NULL; 2155 } 2156 2157 if (pkt->pkt_resp_cookie != NULL) { 2158 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2159 sizeof (ddi_dma_cookie_t)); 2160 pkt->pkt_resp_cookie = NULL; 2161 } 2162 2163 fp_free_dma(cmd); 2164 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2165 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2166 } 2167 2168 2169 /* 2170 * Release DVMA resources 2171 */ 2172 static void 2173 fp_free_dma(fp_cmd_t *cmd) 2174 { 2175 fc_packet_t *pkt = &cmd->cmd_pkt; 2176 2177 pkt->pkt_cmdlen = 0; 2178 pkt->pkt_rsplen = 0; 2179 pkt->pkt_tran_type = 0; 2180 pkt->pkt_tran_flags = 0; 2181 2182 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2183 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2184 } 2185 2186 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2187 if (pkt->pkt_cmd_acc) { 2188 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2189 } 2190 } 2191 2192 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2193 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2194 } 2195 2196 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2197 if (pkt->pkt_resp_acc) { 2198 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2199 } 2200 } 2201 cmd->cmd_dflags = 0; 2202 } 2203 2204 2205 /* 2206 * Dedicated thread to perform various activities. One thread for 2207 * each fc_local_port_t (driver soft state) instance. 2208 * Note, this effectively works out to one thread for each local 2209 * port, but there are also some Solaris taskq threads in use on a per-local 2210 * port basis; these also need to be taken into consideration. 2211 */ 2212 static void 2213 fp_job_handler(fc_local_port_t *port) 2214 { 2215 int rval; 2216 uint32_t *d_id; 2217 fc_remote_port_t *pd; 2218 job_request_t *job; 2219 2220 #ifndef __lock_lint 2221 /* 2222 * Solaris-internal stuff for proper operation of kernel threads 2223 * with Solaris CPR. 2224 */ 2225 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2226 callb_generic_cpr, "fp_job_handler"); 2227 #endif 2228 2229 2230 /* Loop forever waiting for work to do */ 2231 for (;;) { 2232 2233 mutex_enter(&port->fp_mutex); 2234 2235 /* 2236 * Sleep if no work to do right now, or if we want 2237 * to suspend or power-down. 2238 */ 2239 while (port->fp_job_head == NULL || 2240 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2241 FP_SOFT_SUSPEND))) { 2242 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2243 cv_wait(&port->fp_cv, &port->fp_mutex); 2244 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2245 } 2246 2247 /* 2248 * OK, we've just been woken up, so retrieve the next entry 2249 * from the head of the job queue for this local port. 2250 */ 2251 job = fctl_deque_job(port); 2252 2253 /* 2254 * Handle all the fp driver's supported job codes here 2255 * in this big honkin' switch. 2256 */ 2257 switch (job->job_code) { 2258 case JOB_PORT_SHUTDOWN: 2259 /* 2260 * fp_port_shutdown() is only called from here. This 2261 * will prepare the local port instance (softstate) 2262 * for detaching. This cancels timeout callbacks, 2263 * executes LOGOs with remote ports, cleans up tables, 2264 * and deallocates data structs. 2265 */ 2266 fp_port_shutdown(port, job); 2267 2268 /* 2269 * This will exit the job thread. 2270 */ 2271 #ifndef __lock_lint 2272 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2273 #else 2274 mutex_exit(&port->fp_mutex); 2275 #endif 2276 fctl_jobdone(job); 2277 thread_exit(); 2278 2279 /* NOTREACHED */ 2280 2281 case JOB_ATTACH_ULP: { 2282 /* 2283 * This job is spawned in response to a ULP calling 2284 * fc_ulp_add(). 2285 */ 2286 2287 boolean_t do_attach_ulps = B_TRUE; 2288 2289 /* 2290 * If fp is detaching, we don't want to call 2291 * fp_startup_done as this asynchronous 2292 * notification may interfere with the re-attach. 2293 */ 2294 2295 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2296 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2297 do_attach_ulps = B_FALSE; 2298 } else { 2299 /* 2300 * We are going to force the transport 2301 * to attach to the ULPs, so set 2302 * fp_ulp_attach. This will keep any 2303 * potential detach from occurring until 2304 * we are done. 2305 */ 2306 port->fp_ulp_attach = 1; 2307 } 2308 2309 mutex_exit(&port->fp_mutex); 2310 2311 /* 2312 * NOTE: Since we just dropped the mutex, there is now 2313 * a race window where the fp_soft_state check above 2314 * could change here. This race is covered because an 2315 * additional check was added in the functions hidden 2316 * under fp_startup_done(). 2317 */ 2318 if (do_attach_ulps == B_TRUE) { 2319 /* 2320 * This goes thru a bit of a convoluted call 2321 * chain before spawning off a DDI taskq 2322 * request to perform the actual attach 2323 * operations. Blocking can occur at a number 2324 * of points. 2325 */ 2326 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2327 } 2328 job->job_result = FC_SUCCESS; 2329 fctl_jobdone(job); 2330 break; 2331 } 2332 2333 case JOB_ULP_NOTIFY: { 2334 /* 2335 * Pass state change notifications up to any/all 2336 * registered ULPs. 2337 */ 2338 uint32_t statec; 2339 2340 statec = job->job_ulp_listlen; 2341 if (statec == FC_STATE_RESET_REQUESTED) { 2342 port->fp_last_task = port->fp_task; 2343 port->fp_task = FP_TASK_OFFLINE; 2344 fp_port_offline(port, 0); 2345 port->fp_task = port->fp_last_task; 2346 port->fp_last_task = FP_TASK_IDLE; 2347 } 2348 2349 if (--port->fp_statec_busy == 0) { 2350 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2351 } 2352 2353 mutex_exit(&port->fp_mutex); 2354 2355 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2356 fctl_jobdone(job); 2357 break; 2358 } 2359 2360 case JOB_PLOGI_ONE: 2361 /* 2362 * Issue a PLOGI to a single remote port. Multiple 2363 * PLOGIs to different remote ports may occur in 2364 * parallel. 2365 * This can create the fc_remote_port_t if it does not 2366 * already exist. 2367 */ 2368 2369 mutex_exit(&port->fp_mutex); 2370 d_id = (uint32_t *)job->job_private; 2371 pd = fctl_get_remote_port_by_did(port, *d_id); 2372 2373 if (pd) { 2374 mutex_enter(&pd->pd_mutex); 2375 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2376 pd->pd_login_count++; 2377 mutex_exit(&pd->pd_mutex); 2378 job->job_result = FC_SUCCESS; 2379 fctl_jobdone(job); 2380 break; 2381 } 2382 mutex_exit(&pd->pd_mutex); 2383 } else { 2384 mutex_enter(&port->fp_mutex); 2385 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2386 mutex_exit(&port->fp_mutex); 2387 pd = fp_create_remote_port_by_ns(port, 2388 *d_id, KM_SLEEP); 2389 if (pd == NULL) { 2390 job->job_result = FC_FAILURE; 2391 fctl_jobdone(job); 2392 break; 2393 } 2394 } else { 2395 mutex_exit(&port->fp_mutex); 2396 } 2397 } 2398 2399 job->job_flags |= JOB_TYPE_FP_ASYNC; 2400 job->job_counter = 1; 2401 2402 rval = fp_port_login(port, *d_id, job, 2403 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2404 2405 if (rval != FC_SUCCESS) { 2406 job->job_result = rval; 2407 fctl_jobdone(job); 2408 } 2409 break; 2410 2411 case JOB_LOGO_ONE: { 2412 /* 2413 * Issue a PLOGO to a single remote port. Multiple 2414 * PLOGOs to different remote ports may occur in 2415 * parallel. 2416 */ 2417 fc_remote_port_t *pd; 2418 2419 #ifndef __lock_lint 2420 ASSERT(job->job_counter > 0); 2421 #endif 2422 2423 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2424 2425 mutex_enter(&pd->pd_mutex); 2426 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2427 mutex_exit(&pd->pd_mutex); 2428 job->job_result = FC_LOGINREQ; 2429 mutex_exit(&port->fp_mutex); 2430 fctl_jobdone(job); 2431 break; 2432 } 2433 if (pd->pd_login_count > 1) { 2434 pd->pd_login_count--; 2435 mutex_exit(&pd->pd_mutex); 2436 job->job_result = FC_SUCCESS; 2437 mutex_exit(&port->fp_mutex); 2438 fctl_jobdone(job); 2439 break; 2440 } 2441 mutex_exit(&pd->pd_mutex); 2442 mutex_exit(&port->fp_mutex); 2443 job->job_flags |= JOB_TYPE_FP_ASYNC; 2444 (void) fp_logout(port, pd, job); 2445 break; 2446 } 2447 2448 case JOB_FCIO_LOGIN: 2449 /* 2450 * PLOGI initiated at ioctl request. 2451 */ 2452 mutex_exit(&port->fp_mutex); 2453 job->job_result = 2454 fp_fcio_login(port, job->job_private, job); 2455 fctl_jobdone(job); 2456 break; 2457 2458 case JOB_FCIO_LOGOUT: 2459 /* 2460 * PLOGO initiated at ioctl request. 2461 */ 2462 mutex_exit(&port->fp_mutex); 2463 job->job_result = 2464 fp_fcio_logout(port, job->job_private, job); 2465 fctl_jobdone(job); 2466 break; 2467 2468 case JOB_PORT_GETMAP: 2469 case JOB_PORT_GETMAP_PLOGI_ALL: { 2470 port->fp_last_task = port->fp_task; 2471 port->fp_task = FP_TASK_GETMAP; 2472 2473 switch (port->fp_topology) { 2474 case FC_TOP_PRIVATE_LOOP: 2475 job->job_counter = 1; 2476 2477 fp_get_loopmap(port, job); 2478 mutex_exit(&port->fp_mutex); 2479 fp_jobwait(job); 2480 fctl_fillout_map(port, 2481 (fc_portmap_t **)job->job_private, 2482 (uint32_t *)job->job_arg, 1, 0, 0); 2483 fctl_jobdone(job); 2484 mutex_enter(&port->fp_mutex); 2485 break; 2486 2487 case FC_TOP_PUBLIC_LOOP: 2488 case FC_TOP_FABRIC: 2489 mutex_exit(&port->fp_mutex); 2490 job->job_counter = 1; 2491 2492 job->job_result = fp_ns_getmap(port, 2493 job, (fc_portmap_t **)job->job_private, 2494 (uint32_t *)job->job_arg, 2495 FCTL_GAN_START_ID); 2496 fctl_jobdone(job); 2497 mutex_enter(&port->fp_mutex); 2498 break; 2499 2500 case FC_TOP_PT_PT: 2501 mutex_exit(&port->fp_mutex); 2502 fctl_fillout_map(port, 2503 (fc_portmap_t **)job->job_private, 2504 (uint32_t *)job->job_arg, 1, 0, 0); 2505 fctl_jobdone(job); 2506 mutex_enter(&port->fp_mutex); 2507 break; 2508 2509 default: 2510 mutex_exit(&port->fp_mutex); 2511 fctl_jobdone(job); 2512 mutex_enter(&port->fp_mutex); 2513 break; 2514 } 2515 port->fp_task = port->fp_last_task; 2516 port->fp_last_task = FP_TASK_IDLE; 2517 mutex_exit(&port->fp_mutex); 2518 break; 2519 } 2520 2521 case JOB_PORT_OFFLINE: { 2522 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2523 2524 port->fp_last_task = port->fp_task; 2525 port->fp_task = FP_TASK_OFFLINE; 2526 2527 if (port->fp_statec_busy > 2) { 2528 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2529 fp_port_offline(port, 0); 2530 if (--port->fp_statec_busy == 0) { 2531 port->fp_soft_state &= 2532 ~FP_SOFT_IN_STATEC_CB; 2533 } 2534 } else { 2535 fp_port_offline(port, 1); 2536 } 2537 2538 port->fp_task = port->fp_last_task; 2539 port->fp_last_task = FP_TASK_IDLE; 2540 2541 mutex_exit(&port->fp_mutex); 2542 2543 fctl_jobdone(job); 2544 break; 2545 } 2546 2547 case JOB_PORT_STARTUP: { 2548 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2549 if (port->fp_statec_busy > 1) { 2550 mutex_exit(&port->fp_mutex); 2551 break; 2552 } 2553 mutex_exit(&port->fp_mutex); 2554 2555 FP_TRACE(FP_NHEAD2(9, rval), 2556 "Topology discovery failed"); 2557 break; 2558 } 2559 2560 /* 2561 * Attempt building device handles in case 2562 * of private Loop. 2563 */ 2564 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2565 job->job_counter = 1; 2566 2567 fp_get_loopmap(port, job); 2568 mutex_exit(&port->fp_mutex); 2569 fp_jobwait(job); 2570 mutex_enter(&port->fp_mutex); 2571 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2572 ASSERT(port->fp_total_devices == 0); 2573 port->fp_total_devices = 2574 port->fp_dev_count; 2575 } 2576 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2577 /* 2578 * Hack to avoid state changes going up early 2579 */ 2580 port->fp_statec_busy++; 2581 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2582 2583 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2584 fp_fabric_online(port, job); 2585 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2586 } 2587 mutex_exit(&port->fp_mutex); 2588 fctl_jobdone(job); 2589 break; 2590 } 2591 2592 case JOB_PORT_ONLINE: { 2593 char *newtop; 2594 char *oldtop; 2595 uint32_t old_top; 2596 2597 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2598 2599 /* 2600 * Bail out early if there are a lot of 2601 * state changes in the pipeline 2602 */ 2603 if (port->fp_statec_busy > 1) { 2604 --port->fp_statec_busy; 2605 mutex_exit(&port->fp_mutex); 2606 fctl_jobdone(job); 2607 break; 2608 } 2609 2610 switch (old_top = port->fp_topology) { 2611 case FC_TOP_PRIVATE_LOOP: 2612 oldtop = "Private Loop"; 2613 break; 2614 2615 case FC_TOP_PUBLIC_LOOP: 2616 oldtop = "Public Loop"; 2617 break; 2618 2619 case FC_TOP_PT_PT: 2620 oldtop = "Point to Point"; 2621 break; 2622 2623 case FC_TOP_FABRIC: 2624 oldtop = "Fabric"; 2625 break; 2626 2627 default: 2628 oldtop = NULL; 2629 break; 2630 } 2631 2632 port->fp_last_task = port->fp_task; 2633 port->fp_task = FP_TASK_ONLINE; 2634 2635 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2636 2637 port->fp_task = port->fp_last_task; 2638 port->fp_last_task = FP_TASK_IDLE; 2639 2640 if (port->fp_statec_busy > 1) { 2641 --port->fp_statec_busy; 2642 mutex_exit(&port->fp_mutex); 2643 break; 2644 } 2645 2646 port->fp_state = FC_STATE_OFFLINE; 2647 2648 FP_TRACE(FP_NHEAD2(9, rval), 2649 "Topology discovery failed"); 2650 2651 if (--port->fp_statec_busy == 0) { 2652 port->fp_soft_state &= 2653 ~FP_SOFT_IN_STATEC_CB; 2654 } 2655 2656 if (port->fp_offline_tid == NULL) { 2657 port->fp_offline_tid = 2658 timeout(fp_offline_timeout, 2659 (caddr_t)port, fp_offline_ticks); 2660 } 2661 2662 mutex_exit(&port->fp_mutex); 2663 break; 2664 } 2665 2666 switch (port->fp_topology) { 2667 case FC_TOP_PRIVATE_LOOP: 2668 newtop = "Private Loop"; 2669 break; 2670 2671 case FC_TOP_PUBLIC_LOOP: 2672 newtop = "Public Loop"; 2673 break; 2674 2675 case FC_TOP_PT_PT: 2676 newtop = "Point to Point"; 2677 break; 2678 2679 case FC_TOP_FABRIC: 2680 newtop = "Fabric"; 2681 break; 2682 2683 default: 2684 newtop = NULL; 2685 break; 2686 } 2687 2688 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2689 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2690 "Change in FC Topology old = %s new = %s", 2691 oldtop, newtop); 2692 } 2693 2694 switch (port->fp_topology) { 2695 case FC_TOP_PRIVATE_LOOP: { 2696 int orphan = (old_top == FC_TOP_FABRIC || 2697 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2698 2699 mutex_exit(&port->fp_mutex); 2700 fp_loop_online(port, job, orphan); 2701 break; 2702 } 2703 2704 case FC_TOP_PUBLIC_LOOP: 2705 /* FALLTHROUGH */ 2706 case FC_TOP_FABRIC: 2707 fp_fabric_online(port, job); 2708 mutex_exit(&port->fp_mutex); 2709 break; 2710 2711 case FC_TOP_PT_PT: 2712 fp_p2p_online(port, job); 2713 mutex_exit(&port->fp_mutex); 2714 break; 2715 2716 default: 2717 if (--port->fp_statec_busy != 0) { 2718 /* 2719 * Watch curiously at what the next 2720 * state transition can do. 2721 */ 2722 mutex_exit(&port->fp_mutex); 2723 break; 2724 } 2725 2726 FP_TRACE(FP_NHEAD2(9, 0), 2727 "Topology Unknown, Offlining the port.."); 2728 2729 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2730 port->fp_state = FC_STATE_OFFLINE; 2731 2732 if (port->fp_offline_tid == NULL) { 2733 port->fp_offline_tid = 2734 timeout(fp_offline_timeout, 2735 (caddr_t)port, fp_offline_ticks); 2736 } 2737 mutex_exit(&port->fp_mutex); 2738 break; 2739 } 2740 2741 mutex_enter(&port->fp_mutex); 2742 2743 port->fp_task = port->fp_last_task; 2744 port->fp_last_task = FP_TASK_IDLE; 2745 2746 mutex_exit(&port->fp_mutex); 2747 2748 fctl_jobdone(job); 2749 break; 2750 } 2751 2752 case JOB_PLOGI_GROUP: { 2753 mutex_exit(&port->fp_mutex); 2754 fp_plogi_group(port, job); 2755 break; 2756 } 2757 2758 case JOB_UNSOL_REQUEST: { 2759 mutex_exit(&port->fp_mutex); 2760 fp_handle_unsol_buf(port, 2761 (fc_unsol_buf_t *)job->job_private, job); 2762 fctl_dealloc_job(job); 2763 break; 2764 } 2765 2766 case JOB_NS_CMD: { 2767 fctl_ns_req_t *ns_cmd; 2768 2769 mutex_exit(&port->fp_mutex); 2770 2771 job->job_flags |= JOB_TYPE_FP_ASYNC; 2772 ns_cmd = (fctl_ns_req_t *)job->job_private; 2773 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2774 ns_cmd->ns_cmd_code > NS_DA_ID) { 2775 job->job_result = FC_BADCMD; 2776 fctl_jobdone(job); 2777 break; 2778 } 2779 2780 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2781 if (ns_cmd->ns_pd != NULL) { 2782 job->job_result = FC_BADOBJECT; 2783 fctl_jobdone(job); 2784 break; 2785 } 2786 2787 job->job_counter = 1; 2788 2789 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2790 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2791 2792 if (rval != FC_SUCCESS) { 2793 job->job_result = rval; 2794 fctl_jobdone(job); 2795 } 2796 break; 2797 } 2798 job->job_result = FC_SUCCESS; 2799 job->job_counter = 1; 2800 2801 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2802 if (rval != FC_SUCCESS) { 2803 fctl_jobdone(job); 2804 } 2805 break; 2806 } 2807 2808 case JOB_LINK_RESET: { 2809 la_wwn_t *pwwn; 2810 uint32_t topology; 2811 2812 pwwn = (la_wwn_t *)job->job_private; 2813 ASSERT(pwwn != NULL); 2814 2815 topology = port->fp_topology; 2816 mutex_exit(&port->fp_mutex); 2817 2818 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2819 topology == FC_TOP_PRIVATE_LOOP) { 2820 job->job_flags |= JOB_TYPE_FP_ASYNC; 2821 rval = port->fp_fca_tran->fca_reset( 2822 port->fp_fca_handle, FC_FCA_LINK_RESET); 2823 job->job_result = rval; 2824 fp_jobdone(job); 2825 } else { 2826 ASSERT((job->job_flags & 2827 JOB_TYPE_FP_ASYNC) == 0); 2828 2829 if (FC_IS_TOP_SWITCH(topology)) { 2830 rval = fp_remote_lip(port, pwwn, 2831 KM_SLEEP, job); 2832 } else { 2833 rval = FC_FAILURE; 2834 } 2835 if (rval != FC_SUCCESS) { 2836 job->job_result = rval; 2837 } 2838 fctl_jobdone(job); 2839 } 2840 break; 2841 } 2842 2843 default: 2844 mutex_exit(&port->fp_mutex); 2845 job->job_result = FC_BADCMD; 2846 fctl_jobdone(job); 2847 break; 2848 } 2849 } 2850 /* NOTREACHED */ 2851 } 2852 2853 2854 /* 2855 * Perform FC port bring up initialization 2856 */ 2857 static int 2858 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2859 { 2860 int rval; 2861 uint32_t state; 2862 uint32_t src_id; 2863 fc_lilpmap_t *lilp_map; 2864 2865 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2866 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2867 2868 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2869 " port=%p, job=%p", port, job); 2870 2871 port->fp_topology = FC_TOP_UNKNOWN; 2872 port->fp_port_id.port_id = 0; 2873 state = FC_PORT_STATE_MASK(port->fp_state); 2874 2875 if (state == FC_STATE_OFFLINE) { 2876 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2877 job->job_result = FC_OFFLINE; 2878 mutex_exit(&port->fp_mutex); 2879 fctl_jobdone(job); 2880 mutex_enter(&port->fp_mutex); 2881 return (FC_OFFLINE); 2882 } 2883 2884 if (state == FC_STATE_LOOP) { 2885 port->fp_port_type.port_type = FC_NS_PORT_NL; 2886 mutex_exit(&port->fp_mutex); 2887 2888 lilp_map = &port->fp_lilp_map; 2889 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2890 job->job_result = FC_FAILURE; 2891 fctl_jobdone(job); 2892 2893 FP_TRACE(FP_NHEAD1(9, rval), 2894 "LILP map Invalid or not present"); 2895 mutex_enter(&port->fp_mutex); 2896 return (FC_FAILURE); 2897 } 2898 2899 if (lilp_map->lilp_length == 0) { 2900 job->job_result = FC_NO_MAP; 2901 fctl_jobdone(job); 2902 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2903 "LILP map length zero"); 2904 mutex_enter(&port->fp_mutex); 2905 return (FC_NO_MAP); 2906 } 2907 src_id = lilp_map->lilp_myalpa & 0xFF; 2908 } else { 2909 fc_remote_port_t *pd; 2910 fc_fca_pm_t pm; 2911 fc_fca_p2p_info_t p2p_info; 2912 int pd_recepient; 2913 2914 /* 2915 * Get P2P remote port info if possible 2916 */ 2917 bzero((caddr_t)&pm, sizeof (pm)); 2918 2919 pm.pm_cmd_flags = FC_FCA_PM_READ; 2920 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2921 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2922 pm.pm_data_buf = (caddr_t)&p2p_info; 2923 2924 rval = port->fp_fca_tran->fca_port_manage( 2925 port->fp_fca_handle, &pm); 2926 2927 if (rval == FC_SUCCESS) { 2928 port->fp_port_id.port_id = p2p_info.fca_d_id; 2929 port->fp_port_type.port_type = FC_NS_PORT_N; 2930 port->fp_topology = FC_TOP_PT_PT; 2931 port->fp_total_devices = 1; 2932 pd_recepient = fctl_wwn_cmp( 2933 &port->fp_service_params.nport_ww_name, 2934 &p2p_info.pwwn) < 0 ? 2935 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2936 mutex_exit(&port->fp_mutex); 2937 pd = fctl_create_remote_port(port, 2938 &p2p_info.nwwn, 2939 &p2p_info.pwwn, 2940 p2p_info.d_id, 2941 pd_recepient, KM_NOSLEEP); 2942 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2943 " P2P port=%p pd=%p", port, pd); 2944 mutex_enter(&port->fp_mutex); 2945 return (FC_SUCCESS); 2946 } 2947 port->fp_port_type.port_type = FC_NS_PORT_N; 2948 mutex_exit(&port->fp_mutex); 2949 src_id = 0; 2950 } 2951 2952 job->job_counter = 1; 2953 job->job_result = FC_SUCCESS; 2954 2955 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2956 KM_SLEEP)) != FC_SUCCESS) { 2957 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2958 job->job_result = FC_FAILURE; 2959 fctl_jobdone(job); 2960 2961 mutex_enter(&port->fp_mutex); 2962 if (port->fp_statec_busy <= 1) { 2963 mutex_exit(&port->fp_mutex); 2964 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 2965 "Couldn't transport FLOGI"); 2966 mutex_enter(&port->fp_mutex); 2967 } 2968 return (FC_FAILURE); 2969 } 2970 2971 fp_jobwait(job); 2972 2973 mutex_enter(&port->fp_mutex); 2974 if (job->job_result == FC_SUCCESS) { 2975 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2976 mutex_exit(&port->fp_mutex); 2977 fp_ns_init(port, job, KM_SLEEP); 2978 mutex_enter(&port->fp_mutex); 2979 } 2980 } else { 2981 if (state == FC_STATE_LOOP) { 2982 port->fp_topology = FC_TOP_PRIVATE_LOOP; 2983 port->fp_port_id.port_id = 2984 port->fp_lilp_map.lilp_myalpa & 0xFF; 2985 } 2986 } 2987 2988 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 2989 port, job); 2990 2991 return (FC_SUCCESS); 2992 } 2993 2994 2995 /* 2996 * Perform ULP invocations following FC port startup 2997 */ 2998 /* ARGSUSED */ 2999 static void 3000 fp_startup_done(opaque_t arg, uchar_t result) 3001 { 3002 fc_local_port_t *port = arg; 3003 3004 fp_attach_ulps(port, FC_CMD_ATTACH); 3005 3006 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 3007 } 3008 3009 3010 /* 3011 * Perform ULP port attach 3012 */ 3013 static void 3014 fp_ulp_port_attach(void *arg) 3015 { 3016 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 3017 fc_local_port_t *port = att->att_port; 3018 3019 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3020 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3021 3022 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3023 3024 if (att->att_need_pm_idle == B_TRUE) { 3025 fctl_idle_port(port); 3026 } 3027 3028 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3029 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3030 3031 mutex_enter(&att->att_port->fp_mutex); 3032 att->att_port->fp_ulp_attach = 0; 3033 3034 port->fp_task = port->fp_last_task; 3035 port->fp_last_task = FP_TASK_IDLE; 3036 3037 cv_signal(&att->att_port->fp_attach_cv); 3038 3039 mutex_exit(&att->att_port->fp_mutex); 3040 3041 kmem_free(att, sizeof (fp_soft_attach_t)); 3042 } 3043 3044 /* 3045 * Entry point to funnel all requests down to FCAs 3046 */ 3047 static int 3048 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3049 { 3050 int rval; 3051 3052 mutex_enter(&port->fp_mutex); 3053 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3054 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3055 FC_STATE_OFFLINE))) { 3056 /* 3057 * This means there is more than one state change 3058 * at this point of time - Since they are processed 3059 * serially, any processing of the current one should 3060 * be failed, failed and move up in processing the next 3061 */ 3062 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3063 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3064 if (cmd->cmd_job) { 3065 /* 3066 * A state change that is going to be invalidated 3067 * by another one already in the port driver's queue 3068 * need not go up to all ULPs. This will minimize 3069 * needless processing and ripples in ULP modules 3070 */ 3071 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3072 } 3073 mutex_exit(&port->fp_mutex); 3074 return (FC_STATEC_BUSY); 3075 } 3076 3077 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3078 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3079 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3080 mutex_exit(&port->fp_mutex); 3081 3082 return (FC_OFFLINE); 3083 } 3084 mutex_exit(&port->fp_mutex); 3085 3086 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3087 if (rval != FC_SUCCESS) { 3088 if (rval == FC_TRAN_BUSY) { 3089 cmd->cmd_retry_interval = fp_retry_delay; 3090 rval = fp_retry_cmd(&cmd->cmd_pkt); 3091 if (rval == FC_FAILURE) { 3092 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3093 } 3094 } 3095 } else { 3096 mutex_enter(&port->fp_mutex); 3097 port->fp_out_fpcmds++; 3098 mutex_exit(&port->fp_mutex); 3099 } 3100 3101 return (rval); 3102 } 3103 3104 3105 /* 3106 * Each time a timeout kicks in, walk the wait queue, decrement the 3107 * the retry_interval, when the retry_interval becomes less than 3108 * or equal to zero, re-transport the command: If the re-transport 3109 * fails with BUSY, enqueue the command in the wait queue. 3110 * 3111 * In order to prevent looping forever because of commands enqueued 3112 * from within this function itself, save the current tail pointer 3113 * (in cur_tail) and exit the loop after serving this command. 3114 */ 3115 static void 3116 fp_resendcmd(void *port_handle) 3117 { 3118 int rval; 3119 fc_local_port_t *port; 3120 fp_cmd_t *cmd; 3121 fp_cmd_t *cur_tail; 3122 3123 port = port_handle; 3124 mutex_enter(&port->fp_mutex); 3125 cur_tail = port->fp_wait_tail; 3126 mutex_exit(&port->fp_mutex); 3127 3128 while ((cmd = fp_deque_cmd(port)) != NULL) { 3129 cmd->cmd_retry_interval -= fp_retry_ticker; 3130 /* Check if we are detaching */ 3131 if (port->fp_soft_state & 3132 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3133 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3134 cmd->cmd_pkt.pkt_reason = 0; 3135 fp_iodone(cmd); 3136 } else if (cmd->cmd_retry_interval <= 0) { 3137 rval = cmd->cmd_transport(port->fp_fca_handle, 3138 &cmd->cmd_pkt); 3139 3140 if (rval != FC_SUCCESS) { 3141 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3142 if (--cmd->cmd_retry_count) { 3143 fp_enque_cmd(port, cmd); 3144 if (cmd == cur_tail) { 3145 break; 3146 } 3147 continue; 3148 } 3149 cmd->cmd_pkt.pkt_state = 3150 FC_PKT_TRAN_BSY; 3151 } else { 3152 cmd->cmd_pkt.pkt_state = 3153 FC_PKT_TRAN_ERROR; 3154 } 3155 cmd->cmd_pkt.pkt_reason = 0; 3156 fp_iodone(cmd); 3157 } else { 3158 mutex_enter(&port->fp_mutex); 3159 port->fp_out_fpcmds++; 3160 mutex_exit(&port->fp_mutex); 3161 } 3162 } else { 3163 fp_enque_cmd(port, cmd); 3164 } 3165 3166 if (cmd == cur_tail) { 3167 break; 3168 } 3169 } 3170 3171 mutex_enter(&port->fp_mutex); 3172 if (port->fp_wait_head) { 3173 timeout_id_t tid; 3174 3175 mutex_exit(&port->fp_mutex); 3176 tid = timeout(fp_resendcmd, (caddr_t)port, 3177 fp_retry_ticks); 3178 mutex_enter(&port->fp_mutex); 3179 port->fp_wait_tid = tid; 3180 } else { 3181 port->fp_wait_tid = NULL; 3182 } 3183 mutex_exit(&port->fp_mutex); 3184 } 3185 3186 3187 /* 3188 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3189 * 3190 * Yes, as you can see below, cmd_retry_count is used here too. That means 3191 * the retries for BUSY are less if there were transport failures (transport 3192 * failure means fca_transport failure). The goal is not to exceed overall 3193 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3194 * 3195 * Return Values: 3196 * FC_SUCCESS 3197 * FC_FAILURE 3198 */ 3199 static int 3200 fp_retry_cmd(fc_packet_t *pkt) 3201 { 3202 fp_cmd_t *cmd; 3203 3204 cmd = pkt->pkt_ulp_private; 3205 3206 if (--cmd->cmd_retry_count) { 3207 fp_enque_cmd(cmd->cmd_port, cmd); 3208 return (FC_SUCCESS); 3209 } else { 3210 return (FC_FAILURE); 3211 } 3212 } 3213 3214 3215 /* 3216 * Queue up FC packet for deferred retry 3217 */ 3218 static void 3219 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3220 { 3221 timeout_id_t tid; 3222 3223 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3224 3225 #ifdef DEBUG 3226 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3227 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3228 #endif 3229 3230 mutex_enter(&port->fp_mutex); 3231 if (port->fp_wait_tail) { 3232 port->fp_wait_tail->cmd_next = cmd; 3233 port->fp_wait_tail = cmd; 3234 } else { 3235 ASSERT(port->fp_wait_head == NULL); 3236 port->fp_wait_head = port->fp_wait_tail = cmd; 3237 if (port->fp_wait_tid == NULL) { 3238 mutex_exit(&port->fp_mutex); 3239 tid = timeout(fp_resendcmd, (caddr_t)port, 3240 fp_retry_ticks); 3241 mutex_enter(&port->fp_mutex); 3242 port->fp_wait_tid = tid; 3243 } 3244 } 3245 mutex_exit(&port->fp_mutex); 3246 } 3247 3248 3249 /* 3250 * Handle all RJT codes 3251 */ 3252 static int 3253 fp_handle_reject(fc_packet_t *pkt) 3254 { 3255 int rval = FC_FAILURE; 3256 uchar_t next_class; 3257 fp_cmd_t *cmd; 3258 fc_local_port_t *port; 3259 3260 cmd = pkt->pkt_ulp_private; 3261 port = cmd->cmd_port; 3262 3263 switch (pkt->pkt_state) { 3264 case FC_PKT_FABRIC_RJT: 3265 case FC_PKT_NPORT_RJT: 3266 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) { 3267 next_class = fp_get_nextclass(cmd->cmd_port, 3268 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3269 3270 if (next_class == FC_TRAN_CLASS_INVALID) { 3271 return (rval); 3272 } 3273 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3274 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3275 3276 rval = fp_sendcmd(cmd->cmd_port, cmd, 3277 cmd->cmd_port->fp_fca_handle); 3278 3279 if (rval != FC_SUCCESS) { 3280 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3281 } 3282 } 3283 break; 3284 3285 case FC_PKT_LS_RJT: 3286 case FC_PKT_BA_RJT: 3287 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) || 3288 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) { 3289 cmd->cmd_retry_interval = fp_retry_delay; 3290 rval = fp_retry_cmd(pkt); 3291 } 3292 break; 3293 3294 case FC_PKT_FS_RJT: 3295 if (pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) { 3296 cmd->cmd_retry_interval = fp_retry_delay; 3297 rval = fp_retry_cmd(pkt); 3298 } 3299 break; 3300 3301 case FC_PKT_LOCAL_RJT: 3302 if (pkt->pkt_reason == FC_REASON_QFULL) { 3303 cmd->cmd_retry_interval = fp_retry_delay; 3304 rval = fp_retry_cmd(pkt); 3305 } 3306 break; 3307 3308 default: 3309 FP_TRACE(FP_NHEAD1(1, 0), 3310 "fp_handle_reject(): Invalid pkt_state"); 3311 break; 3312 } 3313 3314 return (rval); 3315 } 3316 3317 3318 /* 3319 * Return the next class of service supported by the FCA 3320 */ 3321 static uchar_t 3322 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3323 { 3324 uchar_t next_class; 3325 3326 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3327 3328 switch (cur_class) { 3329 case FC_TRAN_CLASS_INVALID: 3330 if (port->fp_cos & FC_NS_CLASS1) { 3331 next_class = FC_TRAN_CLASS1; 3332 break; 3333 } 3334 /* FALLTHROUGH */ 3335 3336 case FC_TRAN_CLASS1: 3337 if (port->fp_cos & FC_NS_CLASS2) { 3338 next_class = FC_TRAN_CLASS2; 3339 break; 3340 } 3341 /* FALLTHROUGH */ 3342 3343 case FC_TRAN_CLASS2: 3344 if (port->fp_cos & FC_NS_CLASS3) { 3345 next_class = FC_TRAN_CLASS3; 3346 break; 3347 } 3348 /* FALLTHROUGH */ 3349 3350 case FC_TRAN_CLASS3: 3351 default: 3352 next_class = FC_TRAN_CLASS_INVALID; 3353 break; 3354 } 3355 3356 return (next_class); 3357 } 3358 3359 3360 /* 3361 * Determine if a class of service is supported by the FCA 3362 */ 3363 static int 3364 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3365 { 3366 int rval; 3367 3368 switch (tran_class) { 3369 case FC_TRAN_CLASS1: 3370 if (cos & FC_NS_CLASS1) { 3371 rval = FC_SUCCESS; 3372 } else { 3373 rval = FC_FAILURE; 3374 } 3375 break; 3376 3377 case FC_TRAN_CLASS2: 3378 if (cos & FC_NS_CLASS2) { 3379 rval = FC_SUCCESS; 3380 } else { 3381 rval = FC_FAILURE; 3382 } 3383 break; 3384 3385 case FC_TRAN_CLASS3: 3386 if (cos & FC_NS_CLASS3) { 3387 rval = FC_SUCCESS; 3388 } else { 3389 rval = FC_FAILURE; 3390 } 3391 break; 3392 3393 default: 3394 rval = FC_FAILURE; 3395 break; 3396 } 3397 3398 return (rval); 3399 } 3400 3401 3402 /* 3403 * Dequeue FC packet for retry 3404 */ 3405 static fp_cmd_t * 3406 fp_deque_cmd(fc_local_port_t *port) 3407 { 3408 fp_cmd_t *cmd; 3409 3410 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3411 3412 mutex_enter(&port->fp_mutex); 3413 3414 if (port->fp_wait_head == NULL) { 3415 /* 3416 * To avoid races, NULL the fp_wait_tid as 3417 * we are about to exit the timeout thread. 3418 */ 3419 port->fp_wait_tid = NULL; 3420 mutex_exit(&port->fp_mutex); 3421 return (NULL); 3422 } 3423 3424 cmd = port->fp_wait_head; 3425 port->fp_wait_head = cmd->cmd_next; 3426 cmd->cmd_next = NULL; 3427 3428 if (port->fp_wait_head == NULL) { 3429 port->fp_wait_tail = NULL; 3430 } 3431 mutex_exit(&port->fp_mutex); 3432 3433 return (cmd); 3434 } 3435 3436 3437 /* 3438 * Wait for job completion 3439 */ 3440 static void 3441 fp_jobwait(job_request_t *job) 3442 { 3443 sema_p(&job->job_port_sema); 3444 } 3445 3446 3447 /* 3448 * Convert FC packet state to FC errno 3449 */ 3450 int 3451 fp_state_to_rval(uchar_t state) 3452 { 3453 int count; 3454 3455 for (count = 0; count < sizeof (fp_xlat) / 3456 sizeof (fp_xlat[0]); count++) { 3457 if (fp_xlat[count].xlat_state == state) { 3458 return (fp_xlat[count].xlat_rval); 3459 } 3460 } 3461 3462 return (FC_FAILURE); 3463 } 3464 3465 3466 /* 3467 * For Synchronous I/O requests, the caller is 3468 * expected to do fctl_jobdone(if necessary) 3469 * 3470 * We want to preserve at least one failure in the 3471 * job_result if it happens. 3472 * 3473 */ 3474 static void 3475 fp_iodone(fp_cmd_t *cmd) 3476 { 3477 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3478 job_request_t *job = cmd->cmd_job; 3479 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3480 3481 ASSERT(job != NULL); 3482 ASSERT(cmd->cmd_port != NULL); 3483 ASSERT(&cmd->cmd_pkt != NULL); 3484 3485 mutex_enter(&job->job_mutex); 3486 if (job->job_result == FC_SUCCESS) { 3487 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3488 } 3489 mutex_exit(&job->job_mutex); 3490 3491 if (pd) { 3492 mutex_enter(&pd->pd_mutex); 3493 pd->pd_flags = PD_IDLE; 3494 mutex_exit(&pd->pd_mutex); 3495 } 3496 3497 if (ulp_pkt) { 3498 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3499 FP_IS_PKT_ERROR(ulp_pkt)) { 3500 fc_local_port_t *port; 3501 fc_remote_node_t *node; 3502 3503 port = cmd->cmd_port; 3504 3505 mutex_enter(&pd->pd_mutex); 3506 pd->pd_state = PORT_DEVICE_INVALID; 3507 pd->pd_ref_count--; 3508 node = pd->pd_remote_nodep; 3509 mutex_exit(&pd->pd_mutex); 3510 3511 ASSERT(node != NULL); 3512 ASSERT(port != NULL); 3513 3514 if (fctl_destroy_remote_port(port, pd) == 0) { 3515 fctl_destroy_remote_node(node); 3516 } 3517 3518 ulp_pkt->pkt_pd = NULL; 3519 } 3520 3521 ulp_pkt->pkt_comp(ulp_pkt); 3522 } 3523 3524 fp_free_pkt(cmd); 3525 fp_jobdone(job); 3526 } 3527 3528 3529 /* 3530 * Job completion handler 3531 */ 3532 static void 3533 fp_jobdone(job_request_t *job) 3534 { 3535 mutex_enter(&job->job_mutex); 3536 ASSERT(job->job_counter > 0); 3537 3538 if (--job->job_counter != 0) { 3539 mutex_exit(&job->job_mutex); 3540 return; 3541 } 3542 3543 if (job->job_ulp_pkts) { 3544 ASSERT(job->job_ulp_listlen > 0); 3545 kmem_free(job->job_ulp_pkts, 3546 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3547 } 3548 3549 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3550 mutex_exit(&job->job_mutex); 3551 fctl_jobdone(job); 3552 } else { 3553 mutex_exit(&job->job_mutex); 3554 sema_v(&job->job_port_sema); 3555 } 3556 } 3557 3558 3559 /* 3560 * Try to perform shutdown of a port during a detach. No return 3561 * value since the detach should not fail because the port shutdown 3562 * failed. 3563 */ 3564 static void 3565 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3566 { 3567 int index; 3568 int count; 3569 int flags; 3570 fp_cmd_t *cmd; 3571 struct pwwn_hash *head; 3572 fc_remote_port_t *pd; 3573 3574 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3575 3576 job->job_result = FC_SUCCESS; 3577 3578 if (port->fp_taskq) { 3579 /* 3580 * We must release the mutex here to ensure that other 3581 * potential jobs can complete their processing. Many 3582 * also need this mutex. 3583 */ 3584 mutex_exit(&port->fp_mutex); 3585 taskq_wait(port->fp_taskq); 3586 mutex_enter(&port->fp_mutex); 3587 } 3588 3589 if (port->fp_offline_tid) { 3590 timeout_id_t tid; 3591 3592 tid = port->fp_offline_tid; 3593 port->fp_offline_tid = NULL; 3594 mutex_exit(&port->fp_mutex); 3595 (void) untimeout(tid); 3596 mutex_enter(&port->fp_mutex); 3597 } 3598 3599 if (port->fp_wait_tid) { 3600 timeout_id_t tid; 3601 3602 tid = port->fp_wait_tid; 3603 port->fp_wait_tid = NULL; 3604 mutex_exit(&port->fp_mutex); 3605 (void) untimeout(tid); 3606 } else { 3607 mutex_exit(&port->fp_mutex); 3608 } 3609 3610 /* 3611 * While we cancel the timeout, let's also return the 3612 * the outstanding requests back to the callers. 3613 */ 3614 while ((cmd = fp_deque_cmd(port)) != NULL) { 3615 ASSERT(cmd->cmd_job != NULL); 3616 cmd->cmd_job->job_result = FC_OFFLINE; 3617 fp_iodone(cmd); 3618 } 3619 3620 /* 3621 * Gracefully LOGO with all the devices logged in. 3622 */ 3623 mutex_enter(&port->fp_mutex); 3624 3625 for (count = index = 0; index < pwwn_table_size; index++) { 3626 head = &port->fp_pwwn_table[index]; 3627 pd = head->pwwn_head; 3628 while (pd != NULL) { 3629 mutex_enter(&pd->pd_mutex); 3630 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3631 count++; 3632 } 3633 mutex_exit(&pd->pd_mutex); 3634 pd = pd->pd_wwn_hnext; 3635 } 3636 } 3637 3638 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3639 flags = job->job_flags; 3640 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3641 } else { 3642 flags = 0; 3643 } 3644 if (count) { 3645 job->job_counter = count; 3646 3647 for (index = 0; index < pwwn_table_size; index++) { 3648 head = &port->fp_pwwn_table[index]; 3649 pd = head->pwwn_head; 3650 while (pd != NULL) { 3651 mutex_enter(&pd->pd_mutex); 3652 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3653 ASSERT(pd->pd_login_count > 0); 3654 /* 3655 * Force the counter to ONE in order 3656 * for us to really send LOGO els. 3657 */ 3658 pd->pd_login_count = 1; 3659 mutex_exit(&pd->pd_mutex); 3660 mutex_exit(&port->fp_mutex); 3661 (void) fp_logout(port, pd, job); 3662 mutex_enter(&port->fp_mutex); 3663 } else { 3664 mutex_exit(&pd->pd_mutex); 3665 } 3666 pd = pd->pd_wwn_hnext; 3667 } 3668 } 3669 mutex_exit(&port->fp_mutex); 3670 fp_jobwait(job); 3671 } else { 3672 mutex_exit(&port->fp_mutex); 3673 } 3674 3675 if (job->job_result != FC_SUCCESS) { 3676 FP_TRACE(FP_NHEAD1(9, 0), 3677 "Can't logout all devices. Proceeding with" 3678 " port shutdown"); 3679 job->job_result = FC_SUCCESS; 3680 } 3681 3682 fctl_destroy_all_remote_ports(port); 3683 3684 mutex_enter(&port->fp_mutex); 3685 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3686 mutex_exit(&port->fp_mutex); 3687 fp_ns_fini(port, job); 3688 } else { 3689 mutex_exit(&port->fp_mutex); 3690 } 3691 3692 if (flags) { 3693 job->job_flags = flags; 3694 } 3695 3696 mutex_enter(&port->fp_mutex); 3697 3698 } 3699 3700 3701 /* 3702 * Build the port driver's data structures based on the AL_PA list 3703 */ 3704 static void 3705 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3706 { 3707 int rval; 3708 int flag; 3709 int count; 3710 uint32_t d_id; 3711 fc_remote_port_t *pd; 3712 fc_lilpmap_t *lilp_map; 3713 3714 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3715 3716 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3717 job->job_result = FC_OFFLINE; 3718 mutex_exit(&port->fp_mutex); 3719 fp_jobdone(job); 3720 mutex_enter(&port->fp_mutex); 3721 return; 3722 } 3723 3724 if (port->fp_lilp_map.lilp_length == 0) { 3725 mutex_exit(&port->fp_mutex); 3726 job->job_result = FC_NO_MAP; 3727 fp_jobdone(job); 3728 mutex_enter(&port->fp_mutex); 3729 return; 3730 } 3731 mutex_exit(&port->fp_mutex); 3732 3733 lilp_map = &port->fp_lilp_map; 3734 job->job_counter = lilp_map->lilp_length; 3735 3736 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3737 flag = FP_CMD_PLOGI_RETAIN; 3738 } else { 3739 flag = FP_CMD_PLOGI_DONT_CARE; 3740 } 3741 3742 for (count = 0; count < lilp_map->lilp_length; count++) { 3743 d_id = lilp_map->lilp_alpalist[count]; 3744 3745 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3746 fp_jobdone(job); 3747 continue; 3748 } 3749 3750 pd = fctl_get_remote_port_by_did(port, d_id); 3751 if (pd) { 3752 mutex_enter(&pd->pd_mutex); 3753 if (flag == FP_CMD_PLOGI_DONT_CARE || 3754 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3755 mutex_exit(&pd->pd_mutex); 3756 fp_jobdone(job); 3757 continue; 3758 } 3759 mutex_exit(&pd->pd_mutex); 3760 } 3761 3762 rval = fp_port_login(port, d_id, job, flag, 3763 KM_SLEEP, pd, NULL); 3764 if (rval != FC_SUCCESS) { 3765 fp_jobdone(job); 3766 } 3767 } 3768 3769 mutex_enter(&port->fp_mutex); 3770 } 3771 3772 3773 /* 3774 * Perform loop ONLINE processing 3775 */ 3776 static void 3777 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3778 { 3779 int count; 3780 int rval; 3781 uint32_t d_id; 3782 uint32_t listlen; 3783 fc_lilpmap_t *lilp_map; 3784 fc_remote_port_t *pd; 3785 fc_portmap_t *changelist; 3786 3787 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3788 3789 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3790 port, job); 3791 3792 lilp_map = &port->fp_lilp_map; 3793 3794 if (lilp_map->lilp_length) { 3795 mutex_enter(&port->fp_mutex); 3796 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3797 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3798 mutex_exit(&port->fp_mutex); 3799 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3800 } else { 3801 mutex_exit(&port->fp_mutex); 3802 } 3803 3804 job->job_counter = lilp_map->lilp_length; 3805 3806 for (count = 0; count < lilp_map->lilp_length; count++) { 3807 d_id = lilp_map->lilp_alpalist[count]; 3808 3809 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3810 fp_jobdone(job); 3811 continue; 3812 } 3813 3814 pd = fctl_get_remote_port_by_did(port, d_id); 3815 if (pd != NULL) { 3816 #ifdef DEBUG 3817 mutex_enter(&pd->pd_mutex); 3818 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3819 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3820 } 3821 mutex_exit(&pd->pd_mutex); 3822 #endif 3823 fp_jobdone(job); 3824 continue; 3825 } 3826 3827 rval = fp_port_login(port, d_id, job, 3828 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3829 3830 if (rval != FC_SUCCESS) { 3831 fp_jobdone(job); 3832 } 3833 } 3834 fp_jobwait(job); 3835 } 3836 listlen = 0; 3837 changelist = NULL; 3838 3839 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3840 mutex_enter(&port->fp_mutex); 3841 ASSERT(port->fp_statec_busy > 0); 3842 if (port->fp_statec_busy == 1) { 3843 mutex_exit(&port->fp_mutex); 3844 fctl_fillout_map(port, &changelist, &listlen, 3845 1, 0, orphan); 3846 3847 mutex_enter(&port->fp_mutex); 3848 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3849 ASSERT(port->fp_total_devices == 0); 3850 port->fp_total_devices = port->fp_dev_count; 3851 } 3852 } else { 3853 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3854 } 3855 mutex_exit(&port->fp_mutex); 3856 } 3857 3858 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3859 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3860 listlen, listlen, KM_SLEEP); 3861 } else { 3862 mutex_enter(&port->fp_mutex); 3863 if (--port->fp_statec_busy == 0) { 3864 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3865 } 3866 ASSERT(changelist == NULL && listlen == 0); 3867 mutex_exit(&port->fp_mutex); 3868 } 3869 3870 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3871 port, job); 3872 } 3873 3874 3875 /* 3876 * Get an Arbitrated Loop map from the underlying FCA 3877 */ 3878 static int 3879 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3880 { 3881 int rval; 3882 3883 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3884 port, lilp_map); 3885 3886 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3887 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3888 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3889 3890 if (rval != FC_SUCCESS) { 3891 rval = FC_NO_MAP; 3892 } else if (lilp_map->lilp_length == 0 && 3893 (lilp_map->lilp_magic >= MAGIC_LISM && 3894 lilp_map->lilp_magic < MAGIC_LIRP)) { 3895 uchar_t lilp_length; 3896 3897 /* 3898 * Since the map length is zero, provide all 3899 * the valid AL_PAs for NL_ports discovery. 3900 */ 3901 lilp_length = sizeof (fp_valid_alpas) / 3902 sizeof (fp_valid_alpas[0]); 3903 lilp_map->lilp_length = lilp_length; 3904 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3905 lilp_length); 3906 } else { 3907 rval = fp_validate_lilp_map(lilp_map); 3908 3909 if (rval == FC_SUCCESS) { 3910 mutex_enter(&port->fp_mutex); 3911 port->fp_total_devices = lilp_map->lilp_length - 1; 3912 mutex_exit(&port->fp_mutex); 3913 } 3914 } 3915 3916 mutex_enter(&port->fp_mutex); 3917 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3918 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3919 mutex_exit(&port->fp_mutex); 3920 3921 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3922 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3923 FP_TRACE(FP_NHEAD1(9, 0), 3924 "FCA reset failed after LILP map was found" 3925 " to be invalid"); 3926 } 3927 } else if (rval == FC_SUCCESS) { 3928 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3929 mutex_exit(&port->fp_mutex); 3930 } else { 3931 mutex_exit(&port->fp_mutex); 3932 } 3933 3934 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3935 lilp_map); 3936 3937 return (rval); 3938 } 3939 3940 3941 /* 3942 * Perform Fabric Login: 3943 * 3944 * Return Values: 3945 * FC_SUCCESS 3946 * FC_FAILURE 3947 * FC_NOMEM 3948 * FC_TRANSPORT_ERROR 3949 * and a lot others defined in fc_error.h 3950 */ 3951 static int 3952 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3953 int flag, int sleep) 3954 { 3955 int rval; 3956 fp_cmd_t *cmd; 3957 uchar_t class; 3958 3959 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3960 3961 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 3962 port, job); 3963 3964 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3965 if (class == FC_TRAN_CLASS_INVALID) { 3966 return (FC_ELS_BAD); 3967 } 3968 3969 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 3970 sizeof (la_els_logi_t), sleep, NULL); 3971 if (cmd == NULL) { 3972 return (FC_NOMEM); 3973 } 3974 3975 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 3976 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 3977 cmd->cmd_flags = flag; 3978 cmd->cmd_retry_count = fp_retry_count; 3979 cmd->cmd_ulp_pkt = NULL; 3980 3981 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 3982 job, LA_ELS_FLOGI); 3983 3984 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 3985 if (rval != FC_SUCCESS) { 3986 fp_free_pkt(cmd); 3987 } 3988 3989 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 3990 port, job); 3991 3992 return (rval); 3993 } 3994 3995 3996 /* 3997 * In some scenarios such as private loop device discovery period 3998 * the fc_remote_port_t data structure isn't allocated. The allocation 3999 * is done when the PLOGI is successful. In some other scenarios 4000 * such as Fabric topology, the fc_remote_port_t is already created 4001 * and initialized with appropriate values (as the NS provides 4002 * them) 4003 */ 4004 static int 4005 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 4006 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 4007 { 4008 uchar_t class; 4009 fp_cmd_t *cmd; 4010 uint32_t src_id; 4011 fc_remote_port_t *tmp_pd; 4012 int relogin; 4013 int found = 0; 4014 4015 #ifdef DEBUG 4016 if (pd == NULL) { 4017 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 4018 } 4019 #endif 4020 ASSERT(job->job_counter > 0); 4021 4022 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4023 if (class == FC_TRAN_CLASS_INVALID) { 4024 return (FC_ELS_BAD); 4025 } 4026 4027 mutex_enter(&port->fp_mutex); 4028 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 4029 mutex_exit(&port->fp_mutex); 4030 4031 relogin = 1; 4032 if (tmp_pd) { 4033 mutex_enter(&tmp_pd->pd_mutex); 4034 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 4035 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 4036 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 4037 relogin = 0; 4038 } 4039 mutex_exit(&tmp_pd->pd_mutex); 4040 } 4041 4042 if (!relogin) { 4043 mutex_enter(&tmp_pd->pd_mutex); 4044 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4045 cmd_flag |= FP_CMD_PLOGI_RETAIN; 4046 } 4047 mutex_exit(&tmp_pd->pd_mutex); 4048 4049 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 4050 sizeof (la_els_adisc_t), sleep, tmp_pd); 4051 if (cmd == NULL) { 4052 return (FC_NOMEM); 4053 } 4054 4055 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4056 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4057 cmd->cmd_flags = cmd_flag; 4058 cmd->cmd_retry_count = fp_retry_count; 4059 cmd->cmd_ulp_pkt = ulp_pkt; 4060 4061 mutex_enter(&port->fp_mutex); 4062 mutex_enter(&tmp_pd->pd_mutex); 4063 fp_adisc_init(cmd, job); 4064 mutex_exit(&tmp_pd->pd_mutex); 4065 mutex_exit(&port->fp_mutex); 4066 4067 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4068 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4069 4070 } else { 4071 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4072 sizeof (la_els_logi_t), sleep, pd); 4073 if (cmd == NULL) { 4074 return (FC_NOMEM); 4075 } 4076 4077 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4078 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4079 cmd->cmd_flags = cmd_flag; 4080 cmd->cmd_retry_count = fp_retry_count; 4081 cmd->cmd_ulp_pkt = ulp_pkt; 4082 4083 mutex_enter(&port->fp_mutex); 4084 src_id = port->fp_port_id.port_id; 4085 mutex_exit(&port->fp_mutex); 4086 4087 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4088 job, LA_ELS_PLOGI); 4089 } 4090 4091 if (pd) { 4092 mutex_enter(&pd->pd_mutex); 4093 pd->pd_flags = PD_ELS_IN_PROGRESS; 4094 mutex_exit(&pd->pd_mutex); 4095 } 4096 4097 /* npiv check to make sure we don't log into ourself */ 4098 if (relogin && 4099 ((port->fp_npiv_type == FC_NPIV_PORT) || 4100 (port->fp_npiv_flag == FC_NPIV_ENABLE))) { 4101 if ((d_id & 0xffff00) == 4102 (port->fp_port_id.port_id & 0xffff00)) { 4103 found = 1; 4104 } 4105 } 4106 4107 if (found || 4108 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4109 if (found) { 4110 fc_packet_t *pkt = &cmd->cmd_pkt; 4111 pkt->pkt_state = FC_PKT_NPORT_RJT; 4112 } 4113 if (pd) { 4114 mutex_enter(&pd->pd_mutex); 4115 pd->pd_flags = PD_IDLE; 4116 mutex_exit(&pd->pd_mutex); 4117 } 4118 4119 if (ulp_pkt) { 4120 fc_packet_t *pkt = &cmd->cmd_pkt; 4121 4122 ulp_pkt->pkt_state = pkt->pkt_state; 4123 ulp_pkt->pkt_reason = pkt->pkt_reason; 4124 ulp_pkt->pkt_action = pkt->pkt_action; 4125 ulp_pkt->pkt_expln = pkt->pkt_expln; 4126 } 4127 4128 fp_iodone(cmd); 4129 } 4130 4131 return (FC_SUCCESS); 4132 } 4133 4134 4135 /* 4136 * Register the LOGIN parameters with a port device 4137 */ 4138 static void 4139 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4140 la_els_logi_t *acc, uchar_t class) 4141 { 4142 fc_remote_node_t *node; 4143 4144 ASSERT(pd != NULL); 4145 4146 mutex_enter(&pd->pd_mutex); 4147 node = pd->pd_remote_nodep; 4148 if (pd->pd_login_count == 0) { 4149 pd->pd_login_count++; 4150 } 4151 4152 if (handle) { 4153 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_csp, 4154 (uint8_t *)&acc->common_service, 4155 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4156 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp1, 4157 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4158 DDI_DEV_AUTOINCR); 4159 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp2, 4160 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4161 DDI_DEV_AUTOINCR); 4162 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp3, 4163 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4164 DDI_DEV_AUTOINCR); 4165 } else { 4166 pd->pd_csp = acc->common_service; 4167 pd->pd_clsp1 = acc->class_1; 4168 pd->pd_clsp2 = acc->class_2; 4169 pd->pd_clsp3 = acc->class_3; 4170 } 4171 4172 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4173 pd->pd_login_class = class; 4174 mutex_exit(&pd->pd_mutex); 4175 4176 #ifndef __lock_lint 4177 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4178 pd->pd_port_id.port_id) == pd); 4179 #endif 4180 4181 mutex_enter(&node->fd_mutex); 4182 if (handle) { 4183 ddi_rep_get8(*handle, (uint8_t *)node->fd_vv, 4184 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4185 DDI_DEV_AUTOINCR); 4186 } else { 4187 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4188 } 4189 mutex_exit(&node->fd_mutex); 4190 } 4191 4192 4193 /* 4194 * Mark the remote port as OFFLINE 4195 */ 4196 static void 4197 fp_remote_port_offline(fc_remote_port_t *pd) 4198 { 4199 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4200 if (pd->pd_login_count && 4201 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4202 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4203 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4204 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4205 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4206 pd->pd_login_class = 0; 4207 } 4208 pd->pd_type = PORT_DEVICE_OLD; 4209 pd->pd_flags = PD_IDLE; 4210 fctl_tc_reset(&pd->pd_logo_tc); 4211 } 4212 4213 4214 /* 4215 * Deregistration of a port device 4216 */ 4217 static void 4218 fp_unregister_login(fc_remote_port_t *pd) 4219 { 4220 fc_remote_node_t *node; 4221 4222 ASSERT(pd != NULL); 4223 4224 mutex_enter(&pd->pd_mutex); 4225 pd->pd_login_count = 0; 4226 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4227 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4228 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4229 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4230 4231 pd->pd_state = PORT_DEVICE_VALID; 4232 pd->pd_login_class = 0; 4233 node = pd->pd_remote_nodep; 4234 mutex_exit(&pd->pd_mutex); 4235 4236 mutex_enter(&node->fd_mutex); 4237 bzero(node->fd_vv, sizeof (node->fd_vv)); 4238 mutex_exit(&node->fd_mutex); 4239 } 4240 4241 4242 /* 4243 * Handle OFFLINE state of an FCA port 4244 */ 4245 static void 4246 fp_port_offline(fc_local_port_t *port, int notify) 4247 { 4248 int index; 4249 int statec; 4250 timeout_id_t tid; 4251 struct pwwn_hash *head; 4252 fc_remote_port_t *pd; 4253 4254 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4255 4256 for (index = 0; index < pwwn_table_size; index++) { 4257 head = &port->fp_pwwn_table[index]; 4258 pd = head->pwwn_head; 4259 while (pd != NULL) { 4260 mutex_enter(&pd->pd_mutex); 4261 fp_remote_port_offline(pd); 4262 fctl_delist_did_table(port, pd); 4263 mutex_exit(&pd->pd_mutex); 4264 pd = pd->pd_wwn_hnext; 4265 } 4266 } 4267 port->fp_total_devices = 0; 4268 4269 statec = 0; 4270 if (notify) { 4271 /* 4272 * Decrement the statec busy counter as we 4273 * are almost done with handling the state 4274 * change 4275 */ 4276 ASSERT(port->fp_statec_busy > 0); 4277 if (--port->fp_statec_busy == 0) { 4278 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4279 } 4280 mutex_exit(&port->fp_mutex); 4281 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4282 0, 0, KM_SLEEP); 4283 mutex_enter(&port->fp_mutex); 4284 4285 if (port->fp_statec_busy) { 4286 statec++; 4287 } 4288 } else if (port->fp_statec_busy > 1) { 4289 statec++; 4290 } 4291 4292 if ((tid = port->fp_offline_tid) != NULL) { 4293 mutex_exit(&port->fp_mutex); 4294 (void) untimeout(tid); 4295 mutex_enter(&port->fp_mutex); 4296 } 4297 4298 if (!statec) { 4299 port->fp_offline_tid = timeout(fp_offline_timeout, 4300 (caddr_t)port, fp_offline_ticks); 4301 } 4302 } 4303 4304 4305 /* 4306 * Offline devices and send up a state change notification to ULPs 4307 */ 4308 static void 4309 fp_offline_timeout(void *port_handle) 4310 { 4311 int ret; 4312 fc_local_port_t *port = port_handle; 4313 uint32_t listlen = 0; 4314 fc_portmap_t *changelist = NULL; 4315 4316 mutex_enter(&port->fp_mutex); 4317 4318 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4319 (port->fp_soft_state & 4320 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4321 port->fp_dev_count == 0 || port->fp_statec_busy) { 4322 port->fp_offline_tid = NULL; 4323 mutex_exit(&port->fp_mutex); 4324 return; 4325 } 4326 4327 mutex_exit(&port->fp_mutex); 4328 4329 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4330 4331 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4332 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4333 FC_FCA_CORE)) != FC_SUCCESS) { 4334 FP_TRACE(FP_NHEAD1(9, ret), 4335 "Failed to force adapter dump"); 4336 } else { 4337 FP_TRACE(FP_NHEAD1(9, 0), 4338 "Forced adapter dump successfully"); 4339 } 4340 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4341 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4342 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4343 FP_TRACE(FP_NHEAD1(9, ret), 4344 "Failed to force adapter dump and reset"); 4345 } else { 4346 FP_TRACE(FP_NHEAD1(9, 0), 4347 "Forced adapter dump and reset successfully"); 4348 } 4349 } 4350 4351 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4352 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4353 listlen, listlen, KM_SLEEP); 4354 4355 mutex_enter(&port->fp_mutex); 4356 port->fp_offline_tid = NULL; 4357 mutex_exit(&port->fp_mutex); 4358 } 4359 4360 4361 /* 4362 * Perform general purpose ELS request initialization 4363 */ 4364 static void 4365 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4366 void (*comp) (), job_request_t *job) 4367 { 4368 fc_packet_t *pkt; 4369 4370 pkt = &cmd->cmd_pkt; 4371 cmd->cmd_job = job; 4372 4373 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4374 pkt->pkt_cmd_fhdr.d_id = d_id; 4375 pkt->pkt_cmd_fhdr.s_id = s_id; 4376 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4377 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4378 pkt->pkt_cmd_fhdr.seq_id = 0; 4379 pkt->pkt_cmd_fhdr.df_ctl = 0; 4380 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4381 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4382 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4383 pkt->pkt_cmd_fhdr.ro = 0; 4384 pkt->pkt_cmd_fhdr.rsvd = 0; 4385 pkt->pkt_comp = comp; 4386 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4387 } 4388 4389 4390 /* 4391 * Initialize PLOGI/FLOGI ELS request 4392 */ 4393 static void 4394 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4395 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4396 { 4397 ls_code_t payload; 4398 4399 fp_els_init(cmd, s_id, d_id, intr, job); 4400 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4401 4402 payload.ls_code = ls_code; 4403 payload.mbz = 0; 4404 4405 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, 4406 (uint8_t *)&port->fp_service_params, 4407 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4408 DDI_DEV_AUTOINCR); 4409 4410 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4411 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4412 DDI_DEV_AUTOINCR); 4413 } 4414 4415 4416 /* 4417 * Initialize LOGO ELS request 4418 */ 4419 static void 4420 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4421 { 4422 fc_local_port_t *port; 4423 fc_packet_t *pkt; 4424 la_els_logo_t payload; 4425 4426 port = pd->pd_port; 4427 pkt = &cmd->cmd_pkt; 4428 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4429 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4430 4431 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4432 fp_logo_intr, job); 4433 4434 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4435 4436 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4437 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4438 4439 payload.ls_code.ls_code = LA_ELS_LOGO; 4440 payload.ls_code.mbz = 0; 4441 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4442 payload.nport_id = port->fp_port_id; 4443 4444 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4445 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4446 } 4447 4448 /* 4449 * Initialize RNID ELS request 4450 */ 4451 static void 4452 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4453 { 4454 fc_local_port_t *port; 4455 fc_packet_t *pkt; 4456 la_els_rnid_t payload; 4457 fc_remote_port_t *pd; 4458 4459 pkt = &cmd->cmd_pkt; 4460 pd = pkt->pkt_pd; 4461 port = pd->pd_port; 4462 4463 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4464 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4465 4466 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4467 fp_rnid_intr, job); 4468 4469 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4470 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4471 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4472 4473 payload.ls_code.ls_code = LA_ELS_RNID; 4474 payload.ls_code.mbz = 0; 4475 payload.data_format = flag; 4476 4477 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4478 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4479 } 4480 4481 /* 4482 * Initialize RLS ELS request 4483 */ 4484 static void 4485 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4486 { 4487 fc_local_port_t *port; 4488 fc_packet_t *pkt; 4489 la_els_rls_t payload; 4490 fc_remote_port_t *pd; 4491 4492 pkt = &cmd->cmd_pkt; 4493 pd = pkt->pkt_pd; 4494 port = pd->pd_port; 4495 4496 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4497 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4498 4499 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4500 fp_rls_intr, job); 4501 4502 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4503 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4504 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4505 4506 payload.ls_code.ls_code = LA_ELS_RLS; 4507 payload.ls_code.mbz = 0; 4508 payload.rls_portid = port->fp_port_id; 4509 4510 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4511 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4512 } 4513 4514 4515 /* 4516 * Initialize an ADISC ELS request 4517 */ 4518 static void 4519 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4520 { 4521 fc_local_port_t *port; 4522 fc_packet_t *pkt; 4523 la_els_adisc_t payload; 4524 fc_remote_port_t *pd; 4525 4526 pkt = &cmd->cmd_pkt; 4527 pd = pkt->pkt_pd; 4528 port = pd->pd_port; 4529 4530 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4531 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4532 4533 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4534 fp_adisc_intr, job); 4535 4536 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4537 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4538 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4539 4540 payload.ls_code.ls_code = LA_ELS_ADISC; 4541 payload.ls_code.mbz = 0; 4542 payload.nport_id = port->fp_port_id; 4543 payload.port_wwn = port->fp_service_params.nport_ww_name; 4544 payload.node_wwn = port->fp_service_params.node_ww_name; 4545 payload.hard_addr = port->fp_hard_addr; 4546 4547 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4548 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4549 } 4550 4551 4552 /* 4553 * Send up a state change notification to ULPs. 4554 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4555 */ 4556 static int 4557 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4558 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4559 { 4560 fc_port_clist_t *clist; 4561 fc_remote_port_t *pd; 4562 int count; 4563 4564 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4565 4566 clist = kmem_zalloc(sizeof (*clist), sleep); 4567 if (clist == NULL) { 4568 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4569 return (FC_NOMEM); 4570 } 4571 4572 clist->clist_state = state; 4573 4574 mutex_enter(&port->fp_mutex); 4575 clist->clist_flags = port->fp_topology; 4576 mutex_exit(&port->fp_mutex); 4577 4578 clist->clist_port = (opaque_t)port; 4579 clist->clist_len = listlen; 4580 clist->clist_size = alloc_len; 4581 clist->clist_map = changelist; 4582 4583 /* 4584 * Bump the reference count of each fc_remote_port_t in this changelist. 4585 * This is necessary since these devices will be sitting in a taskq 4586 * and referenced later. When the state change notification is 4587 * complete, the reference counts will be decremented. 4588 */ 4589 for (count = 0; count < clist->clist_len; count++) { 4590 pd = clist->clist_map[count].map_pd; 4591 4592 if (pd != NULL) { 4593 mutex_enter(&pd->pd_mutex); 4594 ASSERT((pd->pd_ref_count >= 0) || 4595 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4596 pd->pd_ref_count++; 4597 4598 if (clist->clist_map[count].map_state != 4599 PORT_DEVICE_INVALID) { 4600 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4601 } 4602 4603 mutex_exit(&pd->pd_mutex); 4604 } 4605 } 4606 4607 #ifdef DEBUG 4608 /* 4609 * Sanity check for presence of OLD devices in the hash lists 4610 */ 4611 if (clist->clist_size) { 4612 ASSERT(clist->clist_map != NULL); 4613 for (count = 0; count < clist->clist_len; count++) { 4614 if (clist->clist_map[count].map_state == 4615 PORT_DEVICE_INVALID) { 4616 la_wwn_t pwwn; 4617 fc_portid_t d_id; 4618 4619 pd = clist->clist_map[count].map_pd; 4620 ASSERT(pd != NULL); 4621 4622 mutex_enter(&pd->pd_mutex); 4623 pwwn = pd->pd_port_name; 4624 d_id = pd->pd_port_id; 4625 mutex_exit(&pd->pd_mutex); 4626 4627 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4628 ASSERT(pd != clist->clist_map[count].map_pd); 4629 4630 pd = fctl_get_remote_port_by_did(port, 4631 d_id.port_id); 4632 ASSERT(pd != clist->clist_map[count].map_pd); 4633 } 4634 } 4635 } 4636 #endif 4637 4638 mutex_enter(&port->fp_mutex); 4639 4640 if (state == FC_STATE_ONLINE) { 4641 if (--port->fp_statec_busy == 0) { 4642 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4643 } 4644 } 4645 mutex_exit(&port->fp_mutex); 4646 4647 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4648 clist, KM_SLEEP); 4649 4650 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4651 "state=%x, len=%d", port, state, listlen); 4652 4653 return (FC_SUCCESS); 4654 } 4655 4656 4657 /* 4658 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4659 */ 4660 static int 4661 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4662 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4663 { 4664 int ret; 4665 fc_port_clist_t *clist; 4666 4667 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4668 4669 clist = kmem_zalloc(sizeof (*clist), sleep); 4670 if (clist == NULL) { 4671 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4672 return (FC_NOMEM); 4673 } 4674 4675 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4676 4677 mutex_enter(&port->fp_mutex); 4678 clist->clist_flags = port->fp_topology; 4679 mutex_exit(&port->fp_mutex); 4680 4681 clist->clist_port = (opaque_t)port; 4682 clist->clist_len = listlen; 4683 clist->clist_size = alloc_len; 4684 clist->clist_map = changelist; 4685 4686 /* Send sysevents for target state changes */ 4687 4688 if (clist->clist_size) { 4689 int count; 4690 fc_remote_port_t *pd; 4691 4692 ASSERT(clist->clist_map != NULL); 4693 for (count = 0; count < clist->clist_len; count++) { 4694 pd = clist->clist_map[count].map_pd; 4695 4696 /* 4697 * Bump reference counts on all fc_remote_port_t 4698 * structs in this list. We don't know when the task 4699 * will fire, and we don't need these fc_remote_port_t 4700 * structs going away behind our back. 4701 */ 4702 if (pd) { 4703 mutex_enter(&pd->pd_mutex); 4704 ASSERT((pd->pd_ref_count >= 0) || 4705 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4706 pd->pd_ref_count++; 4707 mutex_exit(&pd->pd_mutex); 4708 } 4709 4710 if (clist->clist_map[count].map_state == 4711 PORT_DEVICE_VALID) { 4712 if (clist->clist_map[count].map_type == 4713 PORT_DEVICE_NEW) { 4714 /* Update our state change counter */ 4715 mutex_enter(&port->fp_mutex); 4716 port->fp_last_change++; 4717 mutex_exit(&port->fp_mutex); 4718 4719 /* Additions */ 4720 fp_log_target_event(port, 4721 ESC_SUNFC_TARGET_ADD, 4722 clist->clist_map[count].map_pwwn, 4723 clist->clist_map[count].map_did. 4724 port_id); 4725 } 4726 4727 } else if ((clist->clist_map[count].map_type == 4728 PORT_DEVICE_OLD) && 4729 (clist->clist_map[count].map_state == 4730 PORT_DEVICE_INVALID)) { 4731 /* Update our state change counter */ 4732 mutex_enter(&port->fp_mutex); 4733 port->fp_last_change++; 4734 mutex_exit(&port->fp_mutex); 4735 4736 /* 4737 * For removals, we don't decrement 4738 * pd_ref_count until after the ULP's 4739 * state change callback function has 4740 * completed. 4741 */ 4742 4743 /* Removals */ 4744 fp_log_target_event(port, 4745 ESC_SUNFC_TARGET_REMOVE, 4746 clist->clist_map[count].map_pwwn, 4747 clist->clist_map[count].map_did.port_id); 4748 } 4749 4750 if (clist->clist_map[count].map_state != 4751 PORT_DEVICE_INVALID) { 4752 /* 4753 * Indicate that the ULPs are now aware of 4754 * this device. 4755 */ 4756 4757 mutex_enter(&pd->pd_mutex); 4758 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4759 mutex_exit(&pd->pd_mutex); 4760 } 4761 4762 #ifdef DEBUG 4763 /* 4764 * Sanity check for OLD devices in the hash lists 4765 */ 4766 if (pd && clist->clist_map[count].map_state == 4767 PORT_DEVICE_INVALID) { 4768 la_wwn_t pwwn; 4769 fc_portid_t d_id; 4770 4771 mutex_enter(&pd->pd_mutex); 4772 pwwn = pd->pd_port_name; 4773 d_id = pd->pd_port_id; 4774 mutex_exit(&pd->pd_mutex); 4775 4776 /* 4777 * This overwrites the 'pd' local variable. 4778 * Beware of this if 'pd' ever gets 4779 * referenced below this block. 4780 */ 4781 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4782 ASSERT(pd != clist->clist_map[count].map_pd); 4783 4784 pd = fctl_get_remote_port_by_did(port, 4785 d_id.port_id); 4786 ASSERT(pd != clist->clist_map[count].map_pd); 4787 } 4788 #endif 4789 } 4790 } 4791 4792 if (sync) { 4793 clist->clist_wait = 1; 4794 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4795 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4796 } 4797 4798 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4799 if (sync && ret) { 4800 mutex_enter(&clist->clist_mutex); 4801 while (clist->clist_wait) { 4802 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4803 } 4804 mutex_exit(&clist->clist_mutex); 4805 4806 mutex_destroy(&clist->clist_mutex); 4807 cv_destroy(&clist->clist_cv); 4808 kmem_free(clist, sizeof (*clist)); 4809 } 4810 4811 if (!ret) { 4812 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4813 "port=%p", port); 4814 kmem_free(clist->clist_map, 4815 sizeof (*(clist->clist_map)) * clist->clist_size); 4816 kmem_free(clist, sizeof (*clist)); 4817 } else { 4818 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4819 port, listlen); 4820 } 4821 4822 return (FC_SUCCESS); 4823 } 4824 4825 4826 /* 4827 * Perform PLOGI to the group of devices for ULPs 4828 */ 4829 static void 4830 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4831 { 4832 int offline; 4833 int count; 4834 int rval; 4835 uint32_t listlen; 4836 uint32_t done; 4837 uint32_t d_id; 4838 fc_remote_node_t *node; 4839 fc_remote_port_t *pd; 4840 fc_remote_port_t *tmp_pd; 4841 fc_packet_t *ulp_pkt; 4842 la_els_logi_t *els_data; 4843 ls_code_t ls_code; 4844 4845 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4846 port, job); 4847 4848 done = 0; 4849 listlen = job->job_ulp_listlen; 4850 job->job_counter = job->job_ulp_listlen; 4851 4852 mutex_enter(&port->fp_mutex); 4853 offline = (port->fp_statec_busy || 4854 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4855 mutex_exit(&port->fp_mutex); 4856 4857 for (count = 0; count < listlen; count++) { 4858 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4859 sizeof (la_els_logi_t)); 4860 4861 ulp_pkt = job->job_ulp_pkts[count]; 4862 pd = ulp_pkt->pkt_pd; 4863 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4864 4865 if (offline) { 4866 done++; 4867 4868 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4869 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4870 ulp_pkt->pkt_pd = NULL; 4871 ulp_pkt->pkt_comp(ulp_pkt); 4872 4873 job->job_ulp_pkts[count] = NULL; 4874 4875 fp_jobdone(job); 4876 continue; 4877 } 4878 4879 if (pd == NULL) { 4880 pd = fctl_get_remote_port_by_did(port, d_id); 4881 if (pd == NULL) { 4882 /* reset later */ 4883 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4884 continue; 4885 } 4886 mutex_enter(&pd->pd_mutex); 4887 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4888 mutex_exit(&pd->pd_mutex); 4889 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4890 done++; 4891 ulp_pkt->pkt_comp(ulp_pkt); 4892 job->job_ulp_pkts[count] = NULL; 4893 fp_jobdone(job); 4894 } else { 4895 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4896 mutex_exit(&pd->pd_mutex); 4897 } 4898 continue; 4899 } 4900 4901 switch (ulp_pkt->pkt_state) { 4902 case FC_PKT_ELS_IN_PROGRESS: 4903 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4904 /* FALLTHRU */ 4905 case FC_PKT_LOCAL_RJT: 4906 done++; 4907 ulp_pkt->pkt_comp(ulp_pkt); 4908 job->job_ulp_pkts[count] = NULL; 4909 fp_jobdone(job); 4910 continue; 4911 default: 4912 break; 4913 } 4914 4915 /* 4916 * Validate the pd corresponding to the d_id passed 4917 * by the ULPs 4918 */ 4919 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4920 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4921 done++; 4922 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4923 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4924 ulp_pkt->pkt_pd = NULL; 4925 ulp_pkt->pkt_comp(ulp_pkt); 4926 job->job_ulp_pkts[count] = NULL; 4927 fp_jobdone(job); 4928 continue; 4929 } 4930 4931 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4932 "port=%p, pd=%p", port, pd); 4933 4934 mutex_enter(&pd->pd_mutex); 4935 4936 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4937 done++; 4938 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4939 4940 ls_code.ls_code = LA_ELS_ACC; 4941 ls_code.mbz = 0; 4942 4943 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4944 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4945 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4946 4947 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4948 (uint8_t *)&pd->pd_csp, 4949 (uint8_t *)&els_data->common_service, 4950 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4951 4952 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4953 (uint8_t *)&pd->pd_port_name, 4954 (uint8_t *)&els_data->nport_ww_name, 4955 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 4956 4957 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4958 (uint8_t *)&pd->pd_clsp1, 4959 (uint8_t *)&els_data->class_1, 4960 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 4961 4962 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4963 (uint8_t *)&pd->pd_clsp2, 4964 (uint8_t *)&els_data->class_2, 4965 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 4966 4967 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4968 (uint8_t *)&pd->pd_clsp3, 4969 (uint8_t *)&els_data->class_3, 4970 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 4971 4972 node = pd->pd_remote_nodep; 4973 pd->pd_login_count++; 4974 pd->pd_flags = PD_IDLE; 4975 ulp_pkt->pkt_pd = pd; 4976 mutex_exit(&pd->pd_mutex); 4977 4978 mutex_enter(&node->fd_mutex); 4979 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4980 (uint8_t *)&node->fd_node_name, 4981 (uint8_t *)(&els_data->node_ww_name), 4982 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 4983 4984 4985 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4986 (uint8_t *)&node->fd_vv, 4987 (uint8_t *)(&els_data->vendor_version), 4988 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 4989 4990 mutex_exit(&node->fd_mutex); 4991 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 4992 } else { 4993 4994 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 4995 mutex_exit(&pd->pd_mutex); 4996 } 4997 4998 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 4999 ulp_pkt->pkt_comp(ulp_pkt); 5000 job->job_ulp_pkts[count] = NULL; 5001 fp_jobdone(job); 5002 } 5003 } 5004 5005 if (done == listlen) { 5006 fp_jobwait(job); 5007 fctl_jobdone(job); 5008 return; 5009 } 5010 5011 job->job_counter = listlen - done; 5012 5013 for (count = 0; count < listlen; count++) { 5014 int cmd_flags; 5015 5016 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 5017 continue; 5018 } 5019 5020 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 5021 5022 cmd_flags = FP_CMD_PLOGI_RETAIN; 5023 5024 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5025 ASSERT(d_id != 0); 5026 5027 pd = fctl_get_remote_port_by_did(port, d_id); 5028 5029 /* 5030 * We need to properly adjust the port device 5031 * reference counter before we assign the pd 5032 * to the ULP packets port device pointer. 5033 */ 5034 if (pd != NULL && ulp_pkt->pkt_pd == NULL) { 5035 mutex_enter(&pd->pd_mutex); 5036 pd->pd_ref_count++; 5037 mutex_exit(&pd->pd_mutex); 5038 FP_TRACE(FP_NHEAD1(3, 0), 5039 "fp_plogi_group: DID = 0x%x using new pd %p \ 5040 old pd NULL\n", d_id, pd); 5041 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL && 5042 ulp_pkt->pkt_pd != pd) { 5043 mutex_enter(&pd->pd_mutex); 5044 pd->pd_ref_count++; 5045 mutex_exit(&pd->pd_mutex); 5046 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5047 ulp_pkt->pkt_pd->pd_ref_count--; 5048 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5049 FP_TRACE(FP_NHEAD1(3, 0), 5050 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n", 5051 d_id, ulp_pkt->pkt_pd, pd); 5052 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) { 5053 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5054 ulp_pkt->pkt_pd->pd_ref_count--; 5055 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5056 FP_TRACE(FP_NHEAD1(3, 0), 5057 "fp_plogi_group: DID = 0x%x pd is NULL and \ 5058 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd); 5059 } 5060 5061 ulp_pkt->pkt_pd = pd; 5062 5063 if (pd != NULL) { 5064 mutex_enter(&pd->pd_mutex); 5065 d_id = pd->pd_port_id.port_id; 5066 pd->pd_flags = PD_ELS_IN_PROGRESS; 5067 mutex_exit(&pd->pd_mutex); 5068 } else { 5069 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5070 #ifdef DEBUG 5071 pd = fctl_get_remote_port_by_did(port, d_id); 5072 ASSERT(pd == NULL); 5073 #endif 5074 /* 5075 * In the Fabric topology, use NS to create 5076 * port device, and if that fails still try 5077 * with PLOGI - which will make yet another 5078 * attempt to create after successful PLOGI 5079 */ 5080 mutex_enter(&port->fp_mutex); 5081 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 5082 mutex_exit(&port->fp_mutex); 5083 pd = fp_create_remote_port_by_ns(port, 5084 d_id, KM_SLEEP); 5085 if (pd) { 5086 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 5087 5088 mutex_enter(&pd->pd_mutex); 5089 pd->pd_flags = PD_ELS_IN_PROGRESS; 5090 mutex_exit(&pd->pd_mutex); 5091 5092 FP_TRACE(FP_NHEAD1(3, 0), 5093 "fp_plogi_group;" 5094 " NS created PD port=%p, job=%p," 5095 " pd=%p", port, job, pd); 5096 } 5097 } else { 5098 mutex_exit(&port->fp_mutex); 5099 } 5100 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5101 FP_TRACE(FP_NHEAD1(3, 0), 5102 "fp_plogi_group;" 5103 "ulp_pkt's pd is NULL, get a pd %p", 5104 pd); 5105 mutex_enter(&pd->pd_mutex); 5106 pd->pd_ref_count++; 5107 mutex_exit(&pd->pd_mutex); 5108 } 5109 ulp_pkt->pkt_pd = pd; 5110 } 5111 5112 rval = fp_port_login(port, d_id, job, cmd_flags, 5113 KM_SLEEP, pd, ulp_pkt); 5114 5115 if (rval == FC_SUCCESS) { 5116 continue; 5117 } 5118 5119 if (rval == FC_STATEC_BUSY) { 5120 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5121 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5122 } else { 5123 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5124 } 5125 5126 if (pd) { 5127 mutex_enter(&pd->pd_mutex); 5128 pd->pd_flags = PD_IDLE; 5129 mutex_exit(&pd->pd_mutex); 5130 } 5131 5132 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5133 ASSERT(pd != NULL); 5134 5135 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5136 " PD removed; port=%p, job=%p", port, job); 5137 5138 mutex_enter(&pd->pd_mutex); 5139 pd->pd_ref_count--; 5140 node = pd->pd_remote_nodep; 5141 mutex_exit(&pd->pd_mutex); 5142 5143 ASSERT(node != NULL); 5144 5145 if (fctl_destroy_remote_port(port, pd) == 0) { 5146 fctl_destroy_remote_node(node); 5147 } 5148 ulp_pkt->pkt_pd = NULL; 5149 } 5150 ulp_pkt->pkt_comp(ulp_pkt); 5151 fp_jobdone(job); 5152 } 5153 5154 fp_jobwait(job); 5155 fctl_jobdone(job); 5156 5157 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5158 port, job); 5159 } 5160 5161 5162 /* 5163 * Name server request initialization 5164 */ 5165 static void 5166 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5167 { 5168 int rval; 5169 int count; 5170 int size; 5171 5172 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5173 5174 job->job_counter = 1; 5175 job->job_result = FC_SUCCESS; 5176 5177 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5178 KM_SLEEP, NULL, NULL); 5179 5180 if (rval != FC_SUCCESS) { 5181 mutex_enter(&port->fp_mutex); 5182 port->fp_topology = FC_TOP_NO_NS; 5183 mutex_exit(&port->fp_mutex); 5184 return; 5185 } 5186 5187 fp_jobwait(job); 5188 5189 if (job->job_result != FC_SUCCESS) { 5190 mutex_enter(&port->fp_mutex); 5191 port->fp_topology = FC_TOP_NO_NS; 5192 mutex_exit(&port->fp_mutex); 5193 return; 5194 } 5195 5196 /* 5197 * At this time, we'll do NS registration for objects in the 5198 * ns_reg_cmds (see top of this file) array. 5199 * 5200 * Each time a ULP module registers with the transport, the 5201 * appropriate fc4 bit is set fc4 types and registered with 5202 * the NS for this support. Also, ULPs and FC admin utilities 5203 * may do registration for objects like IP address, symbolic 5204 * port/node name, Initial process associator at run time. 5205 */ 5206 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5207 job->job_counter = size; 5208 job->job_result = FC_SUCCESS; 5209 5210 for (count = 0; count < size; count++) { 5211 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5212 job, 0, sleep) != FC_SUCCESS) { 5213 fp_jobdone(job); 5214 } 5215 } 5216 if (size) { 5217 fp_jobwait(job); 5218 } 5219 5220 job->job_result = FC_SUCCESS; 5221 5222 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5223 5224 if (port->fp_dev_count < FP_MAX_DEVICES) { 5225 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5226 } 5227 5228 job->job_counter = 1; 5229 5230 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5231 sleep) == FC_SUCCESS) { 5232 fp_jobwait(job); 5233 } 5234 } 5235 5236 5237 /* 5238 * Name server finish: 5239 * Unregister for RSCNs 5240 * Unregister all the host port objects in the Name Server 5241 * Perform LOGO with the NS; 5242 */ 5243 static void 5244 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5245 { 5246 fp_cmd_t *cmd; 5247 uchar_t class; 5248 uint32_t s_id; 5249 fc_packet_t *pkt; 5250 la_els_logo_t payload; 5251 5252 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5253 5254 job->job_counter = 1; 5255 5256 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5257 FC_SUCCESS) { 5258 fp_jobdone(job); 5259 } 5260 fp_jobwait(job); 5261 5262 job->job_counter = 1; 5263 5264 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5265 fp_jobdone(job); 5266 } 5267 fp_jobwait(job); 5268 5269 job->job_counter = 1; 5270 5271 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5272 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5273 pkt = &cmd->cmd_pkt; 5274 5275 mutex_enter(&port->fp_mutex); 5276 class = port->fp_ns_login_class; 5277 s_id = port->fp_port_id.port_id; 5278 payload.nport_id = port->fp_port_id; 5279 mutex_exit(&port->fp_mutex); 5280 5281 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5282 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5283 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5284 cmd->cmd_retry_count = 1; 5285 cmd->cmd_ulp_pkt = NULL; 5286 5287 if (port->fp_npiv_type == FC_NPIV_PORT) { 5288 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5289 } else { 5290 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5291 } 5292 5293 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5294 5295 payload.ls_code.ls_code = LA_ELS_LOGO; 5296 payload.ls_code.mbz = 0; 5297 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5298 5299 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 5300 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5301 5302 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5303 fp_iodone(cmd); 5304 } 5305 fp_jobwait(job); 5306 } 5307 5308 5309 /* 5310 * NS Registration function. 5311 * 5312 * It should be seriously noted that FC-GS-2 currently doesn't support 5313 * an Object Registration by a D_ID other than the owner of the object. 5314 * What we are aiming at currently is to at least allow Symbolic Node/Port 5315 * Name registration for any N_Port Identifier by the host software. 5316 * 5317 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5318 * function treats the request as Host NS Object. 5319 */ 5320 static int 5321 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5322 job_request_t *job, int polled, int sleep) 5323 { 5324 int rval; 5325 fc_portid_t s_id; 5326 fc_packet_t *pkt; 5327 fp_cmd_t *cmd; 5328 5329 if (pd == NULL) { 5330 mutex_enter(&port->fp_mutex); 5331 s_id = port->fp_port_id; 5332 mutex_exit(&port->fp_mutex); 5333 } else { 5334 mutex_enter(&pd->pd_mutex); 5335 s_id = pd->pd_port_id; 5336 mutex_exit(&pd->pd_mutex); 5337 } 5338 5339 if (polled) { 5340 job->job_counter = 1; 5341 } 5342 5343 switch (cmd_code) { 5344 case NS_RPN_ID: 5345 case NS_RNN_ID: { 5346 ns_rxn_req_t rxn; 5347 5348 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5349 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5350 if (cmd == NULL) { 5351 return (FC_NOMEM); 5352 } 5353 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5354 pkt = &cmd->cmd_pkt; 5355 5356 if (pd == NULL) { 5357 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ? 5358 (port->fp_service_params.nport_ww_name) : 5359 (port->fp_service_params.node_ww_name)); 5360 } else { 5361 if (cmd_code == NS_RPN_ID) { 5362 mutex_enter(&pd->pd_mutex); 5363 rxn.rxn_xname = pd->pd_port_name; 5364 mutex_exit(&pd->pd_mutex); 5365 } else { 5366 fc_remote_node_t *node; 5367 5368 mutex_enter(&pd->pd_mutex); 5369 node = pd->pd_remote_nodep; 5370 mutex_exit(&pd->pd_mutex); 5371 5372 mutex_enter(&node->fd_mutex); 5373 rxn.rxn_xname = node->fd_node_name; 5374 mutex_exit(&node->fd_mutex); 5375 } 5376 } 5377 rxn.rxn_port_id = s_id; 5378 5379 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5380 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5381 sizeof (rxn), DDI_DEV_AUTOINCR); 5382 5383 break; 5384 } 5385 5386 case NS_RCS_ID: { 5387 ns_rcos_t rcos; 5388 5389 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5390 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5391 if (cmd == NULL) { 5392 return (FC_NOMEM); 5393 } 5394 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5395 pkt = &cmd->cmd_pkt; 5396 5397 if (pd == NULL) { 5398 rcos.rcos_cos = port->fp_cos; 5399 } else { 5400 mutex_enter(&pd->pd_mutex); 5401 rcos.rcos_cos = pd->pd_cos; 5402 mutex_exit(&pd->pd_mutex); 5403 } 5404 rcos.rcos_port_id = s_id; 5405 5406 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5407 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5408 sizeof (rcos), DDI_DEV_AUTOINCR); 5409 5410 break; 5411 } 5412 5413 case NS_RFT_ID: { 5414 ns_rfc_type_t rfc; 5415 5416 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5417 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5418 NULL); 5419 if (cmd == NULL) { 5420 return (FC_NOMEM); 5421 } 5422 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5423 pkt = &cmd->cmd_pkt; 5424 5425 if (pd == NULL) { 5426 mutex_enter(&port->fp_mutex); 5427 bcopy(port->fp_fc4_types, rfc.rfc_types, 5428 sizeof (port->fp_fc4_types)); 5429 mutex_exit(&port->fp_mutex); 5430 } else { 5431 mutex_enter(&pd->pd_mutex); 5432 bcopy(pd->pd_fc4types, rfc.rfc_types, 5433 sizeof (pd->pd_fc4types)); 5434 mutex_exit(&pd->pd_mutex); 5435 } 5436 rfc.rfc_port_id = s_id; 5437 5438 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5439 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5440 sizeof (rfc), DDI_DEV_AUTOINCR); 5441 5442 break; 5443 } 5444 5445 case NS_RSPN_ID: { 5446 uchar_t name_len; 5447 int pl_size; 5448 fc_portid_t spn; 5449 5450 if (pd == NULL) { 5451 mutex_enter(&port->fp_mutex); 5452 name_len = port->fp_sym_port_namelen; 5453 mutex_exit(&port->fp_mutex); 5454 } else { 5455 mutex_enter(&pd->pd_mutex); 5456 name_len = pd->pd_spn_len; 5457 mutex_exit(&pd->pd_mutex); 5458 } 5459 5460 pl_size = sizeof (fc_portid_t) + name_len + 1; 5461 5462 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5463 sizeof (fc_reg_resp_t), sleep, NULL); 5464 if (cmd == NULL) { 5465 return (FC_NOMEM); 5466 } 5467 5468 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5469 5470 pkt = &cmd->cmd_pkt; 5471 5472 spn = s_id; 5473 5474 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5475 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5476 DDI_DEV_AUTOINCR); 5477 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5478 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5479 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5480 5481 if (pd == NULL) { 5482 mutex_enter(&port->fp_mutex); 5483 ddi_rep_put8(pkt->pkt_cmd_acc, 5484 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5485 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5486 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5487 mutex_exit(&port->fp_mutex); 5488 } else { 5489 mutex_enter(&pd->pd_mutex); 5490 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)pd->pd_spn, 5491 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5492 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5493 mutex_exit(&pd->pd_mutex); 5494 } 5495 break; 5496 } 5497 5498 case NS_RPT_ID: { 5499 ns_rpt_t rpt; 5500 5501 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5502 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5503 if (cmd == NULL) { 5504 return (FC_NOMEM); 5505 } 5506 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5507 pkt = &cmd->cmd_pkt; 5508 5509 if (pd == NULL) { 5510 rpt.rpt_type = port->fp_port_type; 5511 } else { 5512 mutex_enter(&pd->pd_mutex); 5513 rpt.rpt_type = pd->pd_porttype; 5514 mutex_exit(&pd->pd_mutex); 5515 } 5516 rpt.rpt_port_id = s_id; 5517 5518 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5519 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5520 sizeof (rpt), DDI_DEV_AUTOINCR); 5521 5522 break; 5523 } 5524 5525 case NS_RIP_NN: { 5526 ns_rip_t rip; 5527 5528 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5529 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5530 if (cmd == NULL) { 5531 return (FC_NOMEM); 5532 } 5533 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5534 pkt = &cmd->cmd_pkt; 5535 5536 if (pd == NULL) { 5537 rip.rip_node_name = 5538 port->fp_service_params.node_ww_name; 5539 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5540 sizeof (port->fp_ip_addr)); 5541 } else { 5542 fc_remote_node_t *node; 5543 5544 /* 5545 * The most correct implementation should have the IP 5546 * address in the fc_remote_node_t structure; I believe 5547 * Node WWN and IP address should have one to one 5548 * correlation (but guess what this is changing in 5549 * FC-GS-2 latest draft) 5550 */ 5551 mutex_enter(&pd->pd_mutex); 5552 node = pd->pd_remote_nodep; 5553 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5554 sizeof (pd->pd_ip_addr)); 5555 mutex_exit(&pd->pd_mutex); 5556 5557 mutex_enter(&node->fd_mutex); 5558 rip.rip_node_name = node->fd_node_name; 5559 mutex_exit(&node->fd_mutex); 5560 } 5561 5562 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rip, 5563 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5564 sizeof (rip), DDI_DEV_AUTOINCR); 5565 5566 break; 5567 } 5568 5569 case NS_RIPA_NN: { 5570 ns_ipa_t ipa; 5571 5572 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5573 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5574 if (cmd == NULL) { 5575 return (FC_NOMEM); 5576 } 5577 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5578 pkt = &cmd->cmd_pkt; 5579 5580 if (pd == NULL) { 5581 ipa.ipa_node_name = 5582 port->fp_service_params.node_ww_name; 5583 bcopy(port->fp_ipa, ipa.ipa_value, 5584 sizeof (port->fp_ipa)); 5585 } else { 5586 fc_remote_node_t *node; 5587 5588 mutex_enter(&pd->pd_mutex); 5589 node = pd->pd_remote_nodep; 5590 mutex_exit(&pd->pd_mutex); 5591 5592 mutex_enter(&node->fd_mutex); 5593 ipa.ipa_node_name = node->fd_node_name; 5594 bcopy(node->fd_ipa, ipa.ipa_value, 5595 sizeof (node->fd_ipa)); 5596 mutex_exit(&node->fd_mutex); 5597 } 5598 5599 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5600 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5601 sizeof (ipa), DDI_DEV_AUTOINCR); 5602 5603 break; 5604 } 5605 5606 case NS_RSNN_NN: { 5607 uchar_t name_len; 5608 int pl_size; 5609 la_wwn_t snn; 5610 fc_remote_node_t *node = NULL; 5611 5612 if (pd == NULL) { 5613 mutex_enter(&port->fp_mutex); 5614 name_len = port->fp_sym_node_namelen; 5615 mutex_exit(&port->fp_mutex); 5616 } else { 5617 mutex_enter(&pd->pd_mutex); 5618 node = pd->pd_remote_nodep; 5619 mutex_exit(&pd->pd_mutex); 5620 5621 mutex_enter(&node->fd_mutex); 5622 name_len = node->fd_snn_len; 5623 mutex_exit(&node->fd_mutex); 5624 } 5625 5626 pl_size = sizeof (la_wwn_t) + name_len + 1; 5627 5628 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5629 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5630 if (cmd == NULL) { 5631 return (FC_NOMEM); 5632 } 5633 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5634 5635 pkt = &cmd->cmd_pkt; 5636 5637 bcopy(&port->fp_service_params.node_ww_name, 5638 &snn, sizeof (la_wwn_t)); 5639 5640 if (pd == NULL) { 5641 mutex_enter(&port->fp_mutex); 5642 ddi_rep_put8(pkt->pkt_cmd_acc, 5643 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5644 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5645 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5646 mutex_exit(&port->fp_mutex); 5647 } else { 5648 ASSERT(node != NULL); 5649 mutex_enter(&node->fd_mutex); 5650 ddi_rep_put8(pkt->pkt_cmd_acc, 5651 (uint8_t *)node->fd_snn, 5652 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5653 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5654 mutex_exit(&node->fd_mutex); 5655 } 5656 5657 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&snn, 5658 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5659 sizeof (snn), DDI_DEV_AUTOINCR); 5660 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5661 (uint8_t *)(pkt->pkt_cmd 5662 + sizeof (fc_ct_header_t) + sizeof (snn)), 5663 1, DDI_DEV_AUTOINCR); 5664 5665 break; 5666 } 5667 5668 case NS_DA_ID: { 5669 ns_remall_t rall; 5670 char tmp[4] = {0}; 5671 char *ptr; 5672 5673 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5674 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5675 5676 if (cmd == NULL) { 5677 return (FC_NOMEM); 5678 } 5679 5680 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5681 pkt = &cmd->cmd_pkt; 5682 5683 ptr = (char *)(&s_id); 5684 tmp[3] = *ptr++; 5685 tmp[2] = *ptr++; 5686 tmp[1] = *ptr++; 5687 tmp[0] = *ptr; 5688 #if defined(_BIT_FIELDS_LTOH) 5689 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5690 #else 5691 rall.rem_port_id = s_id; 5692 #endif 5693 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rall, 5694 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5695 sizeof (rall), DDI_DEV_AUTOINCR); 5696 5697 break; 5698 } 5699 5700 default: 5701 return (FC_FAILURE); 5702 } 5703 5704 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5705 5706 if (rval != FC_SUCCESS) { 5707 job->job_result = rval; 5708 fp_iodone(cmd); 5709 } 5710 5711 if (polled) { 5712 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5713 fp_jobwait(job); 5714 } else { 5715 rval = FC_SUCCESS; 5716 } 5717 5718 return (rval); 5719 } 5720 5721 5722 /* 5723 * Common interrupt handler 5724 */ 5725 static int 5726 fp_common_intr(fc_packet_t *pkt, int iodone) 5727 { 5728 int rval = FC_FAILURE; 5729 fp_cmd_t *cmd; 5730 fc_local_port_t *port; 5731 5732 cmd = pkt->pkt_ulp_private; 5733 port = cmd->cmd_port; 5734 5735 /* 5736 * Fail fast the upper layer requests if 5737 * a state change has occurred amidst. 5738 */ 5739 mutex_enter(&port->fp_mutex); 5740 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5741 mutex_exit(&port->fp_mutex); 5742 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5743 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5744 } else if (!(port->fp_soft_state & 5745 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5746 mutex_exit(&port->fp_mutex); 5747 5748 switch (pkt->pkt_state) { 5749 case FC_PKT_LOCAL_BSY: 5750 case FC_PKT_FABRIC_BSY: 5751 case FC_PKT_NPORT_BSY: 5752 case FC_PKT_TIMEOUT: 5753 cmd->cmd_retry_interval = (pkt->pkt_state == 5754 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5755 rval = fp_retry_cmd(pkt); 5756 break; 5757 5758 case FC_PKT_FABRIC_RJT: 5759 case FC_PKT_NPORT_RJT: 5760 case FC_PKT_LOCAL_RJT: 5761 case FC_PKT_LS_RJT: 5762 case FC_PKT_FS_RJT: 5763 case FC_PKT_BA_RJT: 5764 rval = fp_handle_reject(pkt); 5765 break; 5766 5767 default: 5768 if (pkt->pkt_resp_resid) { 5769 cmd->cmd_retry_interval = 0; 5770 rval = fp_retry_cmd(pkt); 5771 } 5772 break; 5773 } 5774 } else { 5775 mutex_exit(&port->fp_mutex); 5776 } 5777 5778 if (rval != FC_SUCCESS && iodone) { 5779 fp_iodone(cmd); 5780 rval = FC_SUCCESS; 5781 } 5782 5783 return (rval); 5784 } 5785 5786 5787 /* 5788 * Some not so long winding theory on point to point topology: 5789 * 5790 * In the ACC payload, if the D_ID is ZERO and the common service 5791 * parameters indicate N_Port, then the topology is POINT TO POINT. 5792 * 5793 * In a point to point topology with an N_Port, during Fabric Login, 5794 * the destination N_Port will check with our WWN and decide if it 5795 * needs to issue PLOGI or not. That means, FLOGI could potentially 5796 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5797 * PLOGI creates the device handles. 5798 * 5799 * Assuming that the host port WWN is greater than the other N_Port 5800 * WWN, then we become the master (be aware that this isn't the word 5801 * used in the FC standards) and initiate the PLOGI. 5802 * 5803 */ 5804 static void 5805 fp_flogi_intr(fc_packet_t *pkt) 5806 { 5807 int state; 5808 int f_port; 5809 uint32_t s_id; 5810 uint32_t d_id; 5811 fp_cmd_t *cmd; 5812 fc_local_port_t *port; 5813 la_wwn_t *swwn; 5814 la_wwn_t dwwn; 5815 la_wwn_t nwwn; 5816 fc_remote_port_t *pd; 5817 la_els_logi_t *acc; 5818 com_svc_t csp; 5819 ls_code_t resp; 5820 5821 cmd = pkt->pkt_ulp_private; 5822 port = cmd->cmd_port; 5823 5824 mutex_enter(&port->fp_mutex); 5825 port->fp_out_fpcmds--; 5826 mutex_exit(&port->fp_mutex); 5827 5828 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5829 port, pkt, pkt->pkt_state); 5830 5831 if (FP_IS_PKT_ERROR(pkt)) { 5832 (void) fp_common_intr(pkt, 1); 5833 return; 5834 } 5835 5836 /* 5837 * Currently, we don't need to swap bytes here because qlc is faking the 5838 * response for us and so endianness is getting taken care of. But we 5839 * have to fix this and generalize this at some point 5840 */ 5841 acc = (la_els_logi_t *)pkt->pkt_resp; 5842 5843 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5844 sizeof (resp), DDI_DEV_AUTOINCR); 5845 5846 ASSERT(resp.ls_code == LA_ELS_ACC); 5847 if (resp.ls_code != LA_ELS_ACC) { 5848 (void) fp_common_intr(pkt, 1); 5849 return; 5850 } 5851 5852 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&csp, 5853 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5854 5855 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5856 5857 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5858 5859 mutex_enter(&port->fp_mutex); 5860 state = FC_PORT_STATE_MASK(port->fp_state); 5861 mutex_exit(&port->fp_mutex); 5862 5863 if (pkt->pkt_resp_fhdr.d_id == 0) { 5864 if (f_port == 0 && state != FC_STATE_LOOP) { 5865 swwn = &port->fp_service_params.nport_ww_name; 5866 5867 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5868 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5869 DDI_DEV_AUTOINCR); 5870 5871 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5872 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5873 DDI_DEV_AUTOINCR); 5874 5875 mutex_enter(&port->fp_mutex); 5876 5877 port->fp_topology = FC_TOP_PT_PT; 5878 port->fp_total_devices = 1; 5879 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5880 port->fp_ptpt_master = 1; 5881 /* 5882 * Let us choose 'X' as S_ID and 'Y' 5883 * as D_ID and that'll work; hopefully 5884 * If not, it will get changed. 5885 */ 5886 s_id = port->fp_instance + FP_DEFAULT_SID; 5887 d_id = port->fp_instance + FP_DEFAULT_DID; 5888 port->fp_port_id.port_id = s_id; 5889 mutex_exit(&port->fp_mutex); 5890 5891 pd = fctl_create_remote_port(port, 5892 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5893 KM_NOSLEEP); 5894 if (pd == NULL) { 5895 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5896 0, NULL, "couldn't create device" 5897 " d_id=%X", d_id); 5898 fp_iodone(cmd); 5899 return; 5900 } 5901 5902 cmd->cmd_pkt.pkt_tran_flags = 5903 pkt->pkt_tran_flags; 5904 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5905 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5906 cmd->cmd_retry_count = fp_retry_count; 5907 5908 fp_xlogi_init(port, cmd, s_id, d_id, 5909 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5910 5911 (&cmd->cmd_pkt)->pkt_pd = pd; 5912 5913 /* 5914 * We've just created this fc_remote_port_t, and 5915 * we're about to use it to send a PLOGI, so 5916 * bump the reference count right now. When 5917 * the packet is freed, the reference count will 5918 * be decremented. The ULP may also start using 5919 * it, so mark it as given away as well. 5920 */ 5921 pd->pd_ref_count++; 5922 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5923 5924 if (fp_sendcmd(port, cmd, 5925 port->fp_fca_handle) == FC_SUCCESS) { 5926 return; 5927 } 5928 } else { 5929 /* 5930 * The device handles will be created when the 5931 * unsolicited PLOGI is completed successfully 5932 */ 5933 port->fp_ptpt_master = 0; 5934 mutex_exit(&port->fp_mutex); 5935 } 5936 } 5937 pkt->pkt_state = FC_PKT_FAILURE; 5938 } else { 5939 if (f_port) { 5940 mutex_enter(&port->fp_mutex); 5941 if (state == FC_STATE_LOOP) { 5942 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5943 } else { 5944 port->fp_topology = FC_TOP_FABRIC; 5945 5946 ddi_rep_get8(pkt->pkt_resp_acc, 5947 (uint8_t *)&port->fp_fabric_name, 5948 (uint8_t *)&acc->node_ww_name, 5949 sizeof (la_wwn_t), 5950 DDI_DEV_AUTOINCR); 5951 } 5952 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5953 mutex_exit(&port->fp_mutex); 5954 } else { 5955 pkt->pkt_state = FC_PKT_FAILURE; 5956 } 5957 } 5958 fp_iodone(cmd); 5959 } 5960 5961 5962 /* 5963 * Handle solicited PLOGI response 5964 */ 5965 static void 5966 fp_plogi_intr(fc_packet_t *pkt) 5967 { 5968 int nl_port; 5969 int bailout; 5970 uint32_t d_id; 5971 fp_cmd_t *cmd; 5972 la_els_logi_t *acc; 5973 fc_local_port_t *port; 5974 fc_remote_port_t *pd; 5975 la_wwn_t nwwn; 5976 la_wwn_t pwwn; 5977 ls_code_t resp; 5978 5979 nl_port = 0; 5980 cmd = pkt->pkt_ulp_private; 5981 port = cmd->cmd_port; 5982 d_id = pkt->pkt_cmd_fhdr.d_id; 5983 5984 #ifndef __lock_lint 5985 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 5986 #endif 5987 5988 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 5989 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 5990 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 5991 5992 /* 5993 * Bail out early on ULP initiated requests if the 5994 * state change has occurred 5995 */ 5996 mutex_enter(&port->fp_mutex); 5997 port->fp_out_fpcmds--; 5998 bailout = ((port->fp_statec_busy || 5999 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6000 cmd->cmd_ulp_pkt) ? 1 : 0; 6001 mutex_exit(&port->fp_mutex); 6002 6003 if (FP_IS_PKT_ERROR(pkt) || bailout) { 6004 int skip_msg = 0; 6005 int giveup = 0; 6006 6007 if (cmd->cmd_ulp_pkt) { 6008 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6009 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 6010 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6011 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6012 } 6013 6014 /* 6015 * If an unsolicited cross login already created 6016 * a device speed up the discovery by not retrying 6017 * the command mindlessly. 6018 */ 6019 if (pkt->pkt_pd == NULL && 6020 fctl_get_remote_port_by_did(port, d_id) != NULL) { 6021 fp_iodone(cmd); 6022 return; 6023 } 6024 6025 if (pkt->pkt_pd != NULL) { 6026 giveup = (pkt->pkt_pd->pd_recepient == 6027 PD_PLOGI_RECEPIENT) ? 1 : 0; 6028 if (giveup) { 6029 /* 6030 * This pd is marked as plogi 6031 * recipient, stop retrying 6032 */ 6033 FP_TRACE(FP_NHEAD1(3, 0), 6034 "fp_plogi_intr: stop retry as" 6035 " a cross login was accepted" 6036 " from d_id=%x, port=%p.", 6037 d_id, port); 6038 fp_iodone(cmd); 6039 return; 6040 } 6041 } 6042 6043 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6044 return; 6045 } 6046 6047 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 6048 mutex_enter(&pd->pd_mutex); 6049 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 6050 skip_msg++; 6051 } 6052 mutex_exit(&pd->pd_mutex); 6053 } 6054 6055 mutex_enter(&port->fp_mutex); 6056 if (!bailout && !(skip_msg && port->fp_statec_busy) && 6057 port->fp_statec_busy <= 1 && 6058 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 6059 mutex_exit(&port->fp_mutex); 6060 /* 6061 * In case of Login Collisions, JNI HBAs returns the 6062 * FC pkt back to the Initiator with the state set to 6063 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 6064 * QLC HBAs handles such cases in the FW and doesnot 6065 * return the LS_RJT with Logical error when 6066 * login collision happens. 6067 */ 6068 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 6069 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 6070 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6071 "PLOGI to %x failed", d_id); 6072 } 6073 FP_TRACE(FP_NHEAD2(9, 0), 6074 "PLOGI to %x failed. state=%x reason=%x.", 6075 d_id, pkt->pkt_state, pkt->pkt_reason); 6076 } else { 6077 mutex_exit(&port->fp_mutex); 6078 } 6079 6080 fp_iodone(cmd); 6081 return; 6082 } 6083 6084 acc = (la_els_logi_t *)pkt->pkt_resp; 6085 6086 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 6087 sizeof (resp), DDI_DEV_AUTOINCR); 6088 6089 ASSERT(resp.ls_code == LA_ELS_ACC); 6090 if (resp.ls_code != LA_ELS_ACC) { 6091 (void) fp_common_intr(pkt, 1); 6092 return; 6093 } 6094 6095 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6096 mutex_enter(&port->fp_mutex); 6097 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6098 mutex_exit(&port->fp_mutex); 6099 fp_iodone(cmd); 6100 return; 6101 } 6102 6103 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6104 6105 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6106 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6107 DDI_DEV_AUTOINCR); 6108 6109 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6110 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6111 DDI_DEV_AUTOINCR); 6112 6113 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6114 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6115 6116 if ((pd = pkt->pkt_pd) == NULL) { 6117 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6118 if (pd == NULL) { 6119 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6120 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6121 if (pd == NULL) { 6122 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6123 "couldn't create port device handles" 6124 " d_id=%x", d_id); 6125 fp_iodone(cmd); 6126 return; 6127 } 6128 } else { 6129 fc_remote_port_t *tmp_pd; 6130 6131 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6132 if (tmp_pd != NULL) { 6133 fp_iodone(cmd); 6134 return; 6135 } 6136 6137 mutex_enter(&port->fp_mutex); 6138 mutex_enter(&pd->pd_mutex); 6139 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6140 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6141 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6142 } 6143 6144 if (pd->pd_type == PORT_DEVICE_OLD) { 6145 if (pd->pd_port_id.port_id != d_id) { 6146 fctl_delist_did_table(port, pd); 6147 pd->pd_type = PORT_DEVICE_CHANGED; 6148 pd->pd_port_id.port_id = d_id; 6149 } else { 6150 pd->pd_type = PORT_DEVICE_NOCHANGE; 6151 } 6152 } 6153 6154 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6155 char ww_name[17]; 6156 6157 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6158 6159 mutex_exit(&pd->pd_mutex); 6160 mutex_exit(&port->fp_mutex); 6161 FP_TRACE(FP_NHEAD2(9, 0), 6162 "Possible Duplicate name or address" 6163 " identifiers in the PLOGI response" 6164 " D_ID=%x, PWWN=%s: Please check the" 6165 " configuration", d_id, ww_name); 6166 fp_iodone(cmd); 6167 return; 6168 } 6169 fctl_enlist_did_table(port, pd); 6170 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6171 mutex_exit(&pd->pd_mutex); 6172 mutex_exit(&port->fp_mutex); 6173 } 6174 } else { 6175 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6176 6177 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6178 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6179 6180 mutex_enter(&port->fp_mutex); 6181 mutex_enter(&pd->pd_mutex); 6182 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6183 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6184 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6185 pd->pd_type); 6186 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6187 pd->pd_type == PORT_DEVICE_OLD) || 6188 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6189 pd->pd_type = PORT_DEVICE_NOCHANGE; 6190 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6191 pd->pd_type = PORT_DEVICE_NEW; 6192 } 6193 } else { 6194 char old_name[17]; 6195 char new_name[17]; 6196 6197 fc_wwn_to_str(&pd->pd_port_name, old_name); 6198 fc_wwn_to_str(&pwwn, new_name); 6199 6200 FP_TRACE(FP_NHEAD1(9, 0), 6201 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6202 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6203 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6204 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6205 cmd->cmd_ulp_pkt, bailout); 6206 6207 FP_TRACE(FP_NHEAD2(9, 0), 6208 "PWWN of a device with D_ID=%x changed." 6209 " New PWWN = %s, OLD PWWN = %s", d_id, 6210 new_name, old_name); 6211 6212 if (cmd->cmd_ulp_pkt && !bailout) { 6213 fc_remote_node_t *rnodep; 6214 fc_portmap_t *changelist; 6215 fc_portmap_t *listptr; 6216 int len = 1; 6217 /* # entries in changelist */ 6218 6219 fctl_delist_pwwn_table(port, pd); 6220 6221 /* 6222 * Lets now check if there already is a pd with 6223 * this new WWN in the table. If so, we'll mark 6224 * it as invalid 6225 */ 6226 6227 if (new_wwn_pd) { 6228 /* 6229 * There is another pd with in the pwwn 6230 * table with the same WWN that we got 6231 * in the PLOGI payload. We have to get 6232 * it out of the pwwn table, update the 6233 * pd's state (fp_fillout_old_map does 6234 * this for us) and add it to the 6235 * changelist that goes up to ULPs. 6236 * 6237 * len is length of changelist and so 6238 * increment it. 6239 */ 6240 len++; 6241 6242 if (tmp_pd != pd) { 6243 /* 6244 * Odd case where pwwn and did 6245 * tables are out of sync but 6246 * we will handle that too. See 6247 * more comments below. 6248 * 6249 * One more device that ULPs 6250 * should know about and so len 6251 * gets incremented again. 6252 */ 6253 len++; 6254 } 6255 6256 listptr = changelist = kmem_zalloc(len * 6257 sizeof (*changelist), KM_SLEEP); 6258 6259 mutex_enter(&new_wwn_pd->pd_mutex); 6260 rnodep = new_wwn_pd->pd_remote_nodep; 6261 mutex_exit(&new_wwn_pd->pd_mutex); 6262 6263 /* 6264 * Hold the fd_mutex since 6265 * fctl_copy_portmap_held expects it. 6266 * Preserve lock hierarchy by grabbing 6267 * fd_mutex before pd_mutex 6268 */ 6269 if (rnodep) { 6270 mutex_enter(&rnodep->fd_mutex); 6271 } 6272 mutex_enter(&new_wwn_pd->pd_mutex); 6273 fp_fillout_old_map_held(listptr++, 6274 new_wwn_pd, 0); 6275 mutex_exit(&new_wwn_pd->pd_mutex); 6276 if (rnodep) { 6277 mutex_exit(&rnodep->fd_mutex); 6278 } 6279 6280 /* 6281 * Safety check : 6282 * Lets ensure that the pwwn and did 6283 * tables are in sync. Ideally, we 6284 * should not find that these two pd's 6285 * are different. 6286 */ 6287 if (tmp_pd != pd) { 6288 mutex_enter(&tmp_pd->pd_mutex); 6289 rnodep = 6290 tmp_pd->pd_remote_nodep; 6291 mutex_exit(&tmp_pd->pd_mutex); 6292 6293 /* As above grab fd_mutex */ 6294 if (rnodep) { 6295 mutex_enter(&rnodep-> 6296 fd_mutex); 6297 } 6298 mutex_enter(&tmp_pd->pd_mutex); 6299 6300 fp_fillout_old_map_held( 6301 listptr++, tmp_pd, 0); 6302 6303 mutex_exit(&tmp_pd->pd_mutex); 6304 if (rnodep) { 6305 mutex_exit(&rnodep-> 6306 fd_mutex); 6307 } 6308 6309 /* 6310 * Now add "pd" (not tmp_pd) 6311 * to fp_did_table to sync it up 6312 * with fp_pwwn_table 6313 * 6314 * pd->pd_mutex is already held 6315 * at this point 6316 */ 6317 fctl_enlist_did_table(port, pd); 6318 } 6319 } else { 6320 listptr = changelist = kmem_zalloc( 6321 sizeof (*changelist), KM_SLEEP); 6322 } 6323 6324 ASSERT(changelist != NULL); 6325 6326 fp_fillout_changed_map(listptr, pd, &d_id, 6327 &pwwn); 6328 fctl_enlist_pwwn_table(port, pd); 6329 6330 mutex_exit(&pd->pd_mutex); 6331 mutex_exit(&port->fp_mutex); 6332 6333 fp_iodone(cmd); 6334 6335 (void) fp_ulp_devc_cb(port, changelist, len, 6336 len, KM_NOSLEEP, 0); 6337 6338 return; 6339 } 6340 } 6341 6342 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6343 nl_port = 1; 6344 } 6345 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6346 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6347 } 6348 6349 mutex_exit(&pd->pd_mutex); 6350 mutex_exit(&port->fp_mutex); 6351 6352 if (tmp_pd == NULL) { 6353 mutex_enter(&port->fp_mutex); 6354 mutex_enter(&pd->pd_mutex); 6355 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6356 char ww_name[17]; 6357 6358 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6359 mutex_exit(&pd->pd_mutex); 6360 mutex_exit(&port->fp_mutex); 6361 FP_TRACE(FP_NHEAD2(9, 0), 6362 "Possible Duplicate name or address" 6363 " identifiers in the PLOGI response" 6364 " D_ID=%x, PWWN=%s: Please check the" 6365 " configuration", d_id, ww_name); 6366 fp_iodone(cmd); 6367 return; 6368 } 6369 fctl_enlist_did_table(port, pd); 6370 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6371 mutex_exit(&pd->pd_mutex); 6372 mutex_exit(&port->fp_mutex); 6373 } 6374 } 6375 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6376 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6377 6378 if (cmd->cmd_ulp_pkt) { 6379 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6380 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6381 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6382 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6383 if (pd != NULL) { 6384 FP_TRACE(FP_NHEAD1(9, 0), 6385 "fp_plogi_intr;" 6386 "ulp_pkt's pd is NULL, get a pd %p", 6387 pd); 6388 mutex_enter(&pd->pd_mutex); 6389 pd->pd_ref_count++; 6390 mutex_exit(&pd->pd_mutex); 6391 } 6392 cmd->cmd_ulp_pkt->pkt_pd = pd; 6393 } 6394 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6395 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6396 sizeof (fc_frame_hdr_t)); 6397 bcopy((caddr_t)pkt->pkt_resp, 6398 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6399 sizeof (la_els_logi_t)); 6400 } 6401 6402 mutex_enter(&port->fp_mutex); 6403 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6404 mutex_enter(&pd->pd_mutex); 6405 6406 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6407 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6408 cmd->cmd_retry_count = fp_retry_count; 6409 6410 /* 6411 * If the fc_remote_port_t pointer is not set in the given 6412 * fc_packet_t, then this fc_remote_port_t must have just 6413 * been created. Save the pointer and also increment the 6414 * fc_remote_port_t reference count. 6415 */ 6416 if (pkt->pkt_pd == NULL) { 6417 pkt->pkt_pd = pd; 6418 pd->pd_ref_count++; /* It's in use! */ 6419 } 6420 6421 fp_adisc_init(cmd, cmd->cmd_job); 6422 6423 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6424 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6425 6426 mutex_exit(&pd->pd_mutex); 6427 mutex_exit(&port->fp_mutex); 6428 6429 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6430 return; 6431 } 6432 } else { 6433 mutex_exit(&port->fp_mutex); 6434 } 6435 6436 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6437 mutex_enter(&port->fp_mutex); 6438 mutex_enter(&pd->pd_mutex); 6439 6440 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6441 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6442 cmd->cmd_retry_count = fp_retry_count; 6443 6444 fp_logo_init(pd, cmd, cmd->cmd_job); 6445 6446 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6447 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6448 6449 mutex_exit(&pd->pd_mutex); 6450 mutex_exit(&port->fp_mutex); 6451 6452 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6453 return; 6454 } 6455 6456 } 6457 fp_iodone(cmd); 6458 } 6459 6460 6461 /* 6462 * Handle solicited ADISC response 6463 */ 6464 static void 6465 fp_adisc_intr(fc_packet_t *pkt) 6466 { 6467 int rval; 6468 int bailout; 6469 fp_cmd_t *cmd; 6470 fc_local_port_t *port; 6471 fc_remote_port_t *pd; 6472 la_els_adisc_t *acc; 6473 ls_code_t resp; 6474 fc_hardaddr_t ha; 6475 fc_portmap_t *changelist; 6476 int initiator, adiscfail = 0; 6477 6478 pd = pkt->pkt_pd; 6479 cmd = pkt->pkt_ulp_private; 6480 port = cmd->cmd_port; 6481 6482 #ifndef __lock_lint 6483 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6484 #endif 6485 6486 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6487 6488 mutex_enter(&port->fp_mutex); 6489 port->fp_out_fpcmds--; 6490 bailout = ((port->fp_statec_busy || 6491 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6492 cmd->cmd_ulp_pkt) ? 1 : 0; 6493 mutex_exit(&port->fp_mutex); 6494 6495 if (bailout) { 6496 fp_iodone(cmd); 6497 return; 6498 } 6499 6500 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6501 acc = (la_els_adisc_t *)pkt->pkt_resp; 6502 6503 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6504 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6505 6506 if (resp.ls_code == LA_ELS_ACC) { 6507 int is_private; 6508 6509 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&ha, 6510 (uint8_t *)&acc->hard_addr, sizeof (ha), 6511 DDI_DEV_AUTOINCR); 6512 6513 mutex_enter(&port->fp_mutex); 6514 6515 is_private = 6516 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6517 6518 mutex_enter(&pd->pd_mutex); 6519 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6520 fctl_enlist_did_table(port, pd); 6521 } 6522 mutex_exit(&pd->pd_mutex); 6523 6524 mutex_exit(&port->fp_mutex); 6525 6526 mutex_enter(&pd->pd_mutex); 6527 if (pd->pd_type != PORT_DEVICE_NEW) { 6528 if (is_private && (pd->pd_hard_addr.hard_addr != 6529 ha.hard_addr)) { 6530 pd->pd_type = PORT_DEVICE_CHANGED; 6531 } else { 6532 pd->pd_type = PORT_DEVICE_NOCHANGE; 6533 } 6534 } 6535 6536 if (is_private && (ha.hard_addr && 6537 pd->pd_port_id.port_id != ha.hard_addr)) { 6538 char ww_name[17]; 6539 6540 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6541 6542 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6543 "NL_Port Identifier %x doesn't match" 6544 " with Hard Address %x, Will use Port" 6545 " WWN %s", pd->pd_port_id.port_id, 6546 ha.hard_addr, ww_name); 6547 6548 pd->pd_hard_addr.hard_addr = 0; 6549 } else { 6550 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6551 } 6552 mutex_exit(&pd->pd_mutex); 6553 } else { 6554 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6555 return; 6556 } 6557 } 6558 } else { 6559 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6560 return; 6561 } 6562 6563 mutex_enter(&port->fp_mutex); 6564 if (port->fp_statec_busy <= 1) { 6565 mutex_exit(&port->fp_mutex); 6566 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6567 "ADISC to %x failed, cmd_flags=%x", 6568 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6569 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6570 adiscfail = 1; 6571 } else { 6572 mutex_exit(&port->fp_mutex); 6573 } 6574 } 6575 6576 if (cmd->cmd_ulp_pkt) { 6577 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6578 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6579 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6580 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6581 cmd->cmd_ulp_pkt->pkt_pd = pd; 6582 FP_TRACE(FP_NHEAD1(9, 0), 6583 "fp_adisc__intr;" 6584 "ulp_pkt's pd is NULL, get a pd %p", 6585 pd); 6586 6587 } 6588 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6589 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6590 sizeof (fc_frame_hdr_t)); 6591 bcopy((caddr_t)pkt->pkt_resp, 6592 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6593 sizeof (la_els_adisc_t)); 6594 } 6595 6596 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6597 FP_TRACE(FP_NHEAD1(9, 0), 6598 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6599 "fp_retry_count=%x, ulp_pkt=%p", 6600 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6601 6602 mutex_enter(&port->fp_mutex); 6603 mutex_enter(&pd->pd_mutex); 6604 6605 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6606 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6607 cmd->cmd_retry_count = fp_retry_count; 6608 6609 fp_logo_init(pd, cmd, cmd->cmd_job); 6610 6611 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6612 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6613 6614 mutex_exit(&pd->pd_mutex); 6615 mutex_exit(&port->fp_mutex); 6616 6617 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6618 if (adiscfail) { 6619 mutex_enter(&pd->pd_mutex); 6620 initiator = 6621 (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 6622 pd->pd_state = PORT_DEVICE_VALID; 6623 pd->pd_aux_flags |= PD_LOGGED_OUT; 6624 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6625 pd->pd_type = PORT_DEVICE_NEW; 6626 } else { 6627 pd->pd_type = PORT_DEVICE_NOCHANGE; 6628 } 6629 mutex_exit(&pd->pd_mutex); 6630 6631 changelist = 6632 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6633 6634 if (initiator) { 6635 fp_unregister_login(pd); 6636 fctl_copy_portmap(changelist, pd); 6637 } else { 6638 fp_fillout_old_map(changelist, pd, 0); 6639 } 6640 6641 FP_TRACE(FP_NHEAD1(9, 0), 6642 "fp_adisc_intr: Dev change notification " 6643 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6644 "map_flags=%x initiator=%d", port, pd, 6645 changelist->map_type, changelist->map_state, 6646 changelist->map_flags, initiator); 6647 6648 (void) fp_ulp_devc_cb(port, changelist, 6649 1, 1, KM_SLEEP, 0); 6650 } 6651 if (rval == FC_SUCCESS) { 6652 return; 6653 } 6654 } 6655 fp_iodone(cmd); 6656 } 6657 6658 6659 /* 6660 * Handle solicited LOGO response 6661 */ 6662 static void 6663 fp_logo_intr(fc_packet_t *pkt) 6664 { 6665 ls_code_t resp; 6666 6667 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6668 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6669 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6670 6671 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6672 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6673 6674 if (FP_IS_PKT_ERROR(pkt)) { 6675 (void) fp_common_intr(pkt, 1); 6676 return; 6677 } 6678 6679 ASSERT(resp.ls_code == LA_ELS_ACC); 6680 if (resp.ls_code != LA_ELS_ACC) { 6681 (void) fp_common_intr(pkt, 1); 6682 return; 6683 } 6684 6685 if (pkt->pkt_pd != NULL) { 6686 fp_unregister_login(pkt->pkt_pd); 6687 } 6688 6689 fp_iodone(pkt->pkt_ulp_private); 6690 } 6691 6692 6693 /* 6694 * Handle solicited RNID response 6695 */ 6696 static void 6697 fp_rnid_intr(fc_packet_t *pkt) 6698 { 6699 ls_code_t resp; 6700 job_request_t *job; 6701 fp_cmd_t *cmd; 6702 la_els_rnid_acc_t *acc; 6703 6704 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6705 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6706 cmd = pkt->pkt_ulp_private; 6707 6708 mutex_enter(&cmd->cmd_port->fp_mutex); 6709 cmd->cmd_port->fp_out_fpcmds--; 6710 mutex_exit(&cmd->cmd_port->fp_mutex); 6711 6712 job = cmd->cmd_job; 6713 ASSERT(job->job_private != NULL); 6714 6715 /* If failure or LS_RJT then retry the packet, if needed */ 6716 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6717 (void) fp_common_intr(pkt, 1); 6718 return; 6719 } 6720 6721 /* Save node_id memory allocated in ioctl code */ 6722 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6723 6724 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6725 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6726 6727 /* wakeup the ioctl thread and free the pkt */ 6728 fp_iodone(cmd); 6729 } 6730 6731 6732 /* 6733 * Handle solicited RLS response 6734 */ 6735 static void 6736 fp_rls_intr(fc_packet_t *pkt) 6737 { 6738 ls_code_t resp; 6739 job_request_t *job; 6740 fp_cmd_t *cmd; 6741 la_els_rls_acc_t *acc; 6742 6743 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6744 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6745 cmd = pkt->pkt_ulp_private; 6746 6747 mutex_enter(&cmd->cmd_port->fp_mutex); 6748 cmd->cmd_port->fp_out_fpcmds--; 6749 mutex_exit(&cmd->cmd_port->fp_mutex); 6750 6751 job = cmd->cmd_job; 6752 ASSERT(job->job_private != NULL); 6753 6754 /* If failure or LS_RJT then retry the packet, if needed */ 6755 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6756 (void) fp_common_intr(pkt, 1); 6757 return; 6758 } 6759 6760 /* Save link error status block in memory allocated in ioctl code */ 6761 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6762 6763 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6764 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6765 DDI_DEV_AUTOINCR); 6766 6767 /* wakeup the ioctl thread and free the pkt */ 6768 fp_iodone(cmd); 6769 } 6770 6771 6772 /* 6773 * A solicited command completion interrupt (mostly for commands 6774 * that require almost no post processing such as SCR ELS) 6775 */ 6776 static void 6777 fp_intr(fc_packet_t *pkt) 6778 { 6779 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6780 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6781 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6782 6783 if (FP_IS_PKT_ERROR(pkt)) { 6784 (void) fp_common_intr(pkt, 1); 6785 return; 6786 } 6787 fp_iodone(pkt->pkt_ulp_private); 6788 } 6789 6790 6791 /* 6792 * Handle the underlying port's state change 6793 */ 6794 static void 6795 fp_statec_cb(opaque_t port_handle, uint32_t state) 6796 { 6797 fc_local_port_t *port = port_handle; 6798 job_request_t *job; 6799 6800 /* 6801 * If it is not possible to process the callbacks 6802 * just drop the callback on the floor; Don't bother 6803 * to do something that isn't safe at this time 6804 */ 6805 mutex_enter(&port->fp_mutex); 6806 if ((port->fp_soft_state & 6807 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6808 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6809 mutex_exit(&port->fp_mutex); 6810 return; 6811 } 6812 6813 if (port->fp_statec_busy == 0) { 6814 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6815 #ifdef DEBUG 6816 } else { 6817 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6818 #endif 6819 } 6820 6821 port->fp_statec_busy++; 6822 6823 /* 6824 * For now, force the trusted method of device authentication (by 6825 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6826 */ 6827 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6828 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6829 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6830 fp_port_offline(port, 0); 6831 } 6832 mutex_exit(&port->fp_mutex); 6833 6834 switch (FC_PORT_STATE_MASK(state)) { 6835 case FC_STATE_OFFLINE: 6836 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6837 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6838 if (job == NULL) { 6839 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6840 " fp_statec_cb() couldn't submit a job " 6841 " to the thread: failing.."); 6842 mutex_enter(&port->fp_mutex); 6843 if (--port->fp_statec_busy == 0) { 6844 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6845 } 6846 mutex_exit(&port->fp_mutex); 6847 return; 6848 } 6849 mutex_enter(&port->fp_mutex); 6850 /* 6851 * Zero out this field so that we do not retain 6852 * the fabric name as its no longer valid 6853 */ 6854 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6855 port->fp_state = state; 6856 mutex_exit(&port->fp_mutex); 6857 6858 fctl_enque_job(port, job); 6859 break; 6860 6861 case FC_STATE_ONLINE: 6862 case FC_STATE_LOOP: 6863 mutex_enter(&port->fp_mutex); 6864 port->fp_state = state; 6865 6866 if (port->fp_offline_tid) { 6867 timeout_id_t tid; 6868 6869 tid = port->fp_offline_tid; 6870 port->fp_offline_tid = NULL; 6871 mutex_exit(&port->fp_mutex); 6872 (void) untimeout(tid); 6873 } else { 6874 mutex_exit(&port->fp_mutex); 6875 } 6876 6877 job = fctl_alloc_job(JOB_PORT_ONLINE, 6878 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6879 if (job == NULL) { 6880 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6881 "fp_statec_cb() couldn't submit a job " 6882 "to the thread: failing.."); 6883 6884 mutex_enter(&port->fp_mutex); 6885 if (--port->fp_statec_busy == 0) { 6886 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6887 } 6888 mutex_exit(&port->fp_mutex); 6889 return; 6890 } 6891 fctl_enque_job(port, job); 6892 break; 6893 6894 case FC_STATE_RESET_REQUESTED: 6895 mutex_enter(&port->fp_mutex); 6896 port->fp_state = FC_STATE_OFFLINE; 6897 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 6898 mutex_exit(&port->fp_mutex); 6899 /* FALLTHROUGH */ 6900 6901 case FC_STATE_RESET: 6902 job = fctl_alloc_job(JOB_ULP_NOTIFY, 6903 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6904 if (job == NULL) { 6905 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6906 "fp_statec_cb() couldn't submit a job" 6907 " to the thread: failing.."); 6908 6909 mutex_enter(&port->fp_mutex); 6910 if (--port->fp_statec_busy == 0) { 6911 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6912 } 6913 mutex_exit(&port->fp_mutex); 6914 return; 6915 } 6916 6917 /* squeeze into some field in the job structure */ 6918 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 6919 fctl_enque_job(port, job); 6920 break; 6921 6922 case FC_STATE_TARGET_PORT_RESET: 6923 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 6924 /* FALLTHROUGH */ 6925 6926 case FC_STATE_NAMESERVICE: 6927 /* FALLTHROUGH */ 6928 6929 default: 6930 mutex_enter(&port->fp_mutex); 6931 if (--port->fp_statec_busy == 0) { 6932 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6933 } 6934 mutex_exit(&port->fp_mutex); 6935 break; 6936 } 6937 } 6938 6939 6940 /* 6941 * Register with the Name Server for RSCNs 6942 */ 6943 static int 6944 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 6945 int sleep) 6946 { 6947 uint32_t s_id; 6948 uchar_t class; 6949 fc_scr_req_t payload; 6950 fp_cmd_t *cmd; 6951 fc_packet_t *pkt; 6952 6953 mutex_enter(&port->fp_mutex); 6954 s_id = port->fp_port_id.port_id; 6955 class = port->fp_ns_login_class; 6956 mutex_exit(&port->fp_mutex); 6957 6958 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 6959 sizeof (fc_scr_resp_t), sleep, NULL); 6960 if (cmd == NULL) { 6961 return (FC_NOMEM); 6962 } 6963 6964 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 6965 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6966 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 6967 cmd->cmd_retry_count = fp_retry_count; 6968 cmd->cmd_ulp_pkt = NULL; 6969 6970 pkt = &cmd->cmd_pkt; 6971 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 6972 6973 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 6974 6975 payload.ls_code.ls_code = LA_ELS_SCR; 6976 payload.ls_code.mbz = 0; 6977 payload.scr_rsvd = 0; 6978 payload.scr_func = scr_func; 6979 6980 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 6981 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 6982 6983 job->job_counter = 1; 6984 6985 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 6986 fp_iodone(cmd); 6987 } 6988 6989 return (FC_SUCCESS); 6990 } 6991 6992 6993 /* 6994 * There are basically two methods to determine the total number of 6995 * devices out in the NS database; Reading the details of the two 6996 * methods described below, it shouldn't be hard to identify which 6997 * of the two methods is better. 6998 * 6999 * Method 1. 7000 * Iteratively issue GANs until all ports identifiers are walked 7001 * 7002 * Method 2. 7003 * Issue GID_PT (get port Identifiers) with Maximum residual 7004 * field in the request CT HEADER set to accommodate only the 7005 * CT HEADER in the response frame. And if FC-GS2 has been 7006 * carefully read, the NS here has a chance to FS_ACC the 7007 * request and indicate the residual size in the FS_ACC. 7008 * 7009 * Method 2 is wonderful, although it's not mandatory for the NS 7010 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 7011 * (note with particular care the use of the auxiliary verb 'may') 7012 * 7013 */ 7014 static int 7015 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 7016 int sleep) 7017 { 7018 int flags; 7019 int rval; 7020 uint32_t src_id; 7021 fctl_ns_req_t *ns_cmd; 7022 7023 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 7024 7025 mutex_enter(&port->fp_mutex); 7026 src_id = port->fp_port_id.port_id; 7027 mutex_exit(&port->fp_mutex); 7028 7029 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7030 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 7031 sizeof (ns_resp_gid_pt_t), 0, 7032 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 7033 7034 if (ns_cmd == NULL) { 7035 return (FC_NOMEM); 7036 } 7037 7038 ns_cmd->ns_cmd_code = NS_GID_PT; 7039 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 7040 = FC_NS_PORT_NX; /* All port types */ 7041 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 7042 7043 } else { 7044 uint32_t ns_flags; 7045 7046 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 7047 if (create) { 7048 ns_flags |= FCTL_NS_CREATE_DEVICE; 7049 } 7050 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 7051 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 7052 7053 if (ns_cmd == NULL) { 7054 return (FC_NOMEM); 7055 } 7056 ns_cmd->ns_gan_index = 0; 7057 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 7058 ns_cmd->ns_cmd_code = NS_GA_NXT; 7059 ns_cmd->ns_gan_max = 0xFFFF; 7060 7061 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 7062 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 7063 } 7064 7065 flags = job->job_flags; 7066 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 7067 job->job_counter = 1; 7068 7069 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 7070 job->job_flags = flags; 7071 7072 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7073 uint16_t max_resid; 7074 7075 /* 7076 * Revert to scanning the NS if NS_GID_PT isn't 7077 * helping us figure out total number of devices. 7078 */ 7079 if (job->job_result != FC_SUCCESS || 7080 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 7081 mutex_enter(&port->fp_mutex); 7082 port->fp_options &= ~FP_NS_SMART_COUNT; 7083 mutex_exit(&port->fp_mutex); 7084 7085 fctl_free_ns_cmd(ns_cmd); 7086 return (fp_ns_get_devcount(port, job, create, sleep)); 7087 } 7088 7089 mutex_enter(&port->fp_mutex); 7090 port->fp_total_devices = 1; 7091 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 7092 if (max_resid) { 7093 /* 7094 * Since port identifier is 4 bytes and max_resid 7095 * is also in WORDS, max_resid simply indicates 7096 * the total number of port identifiers not 7097 * transferred 7098 */ 7099 port->fp_total_devices += max_resid; 7100 } 7101 mutex_exit(&port->fp_mutex); 7102 } 7103 mutex_enter(&port->fp_mutex); 7104 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 7105 mutex_exit(&port->fp_mutex); 7106 fctl_free_ns_cmd(ns_cmd); 7107 7108 return (rval); 7109 } 7110 7111 /* 7112 * One heck of a function to serve userland. 7113 */ 7114 static int 7115 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7116 { 7117 int rval = 0; 7118 int jcode; 7119 uint32_t ret; 7120 uchar_t open_flag; 7121 fcio_t *kfcio; 7122 job_request_t *job; 7123 boolean_t use32 = B_FALSE; 7124 7125 #ifdef _MULTI_DATAMODEL 7126 switch (ddi_model_convert_from(mode & FMODELS)) { 7127 case DDI_MODEL_ILP32: 7128 use32 = B_TRUE; 7129 break; 7130 7131 case DDI_MODEL_NONE: 7132 default: 7133 break; 7134 } 7135 #endif 7136 7137 mutex_enter(&port->fp_mutex); 7138 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7139 FP_SOFT_IN_UNSOL_CB)) { 7140 fcio->fcio_errno = FC_STATEC_BUSY; 7141 mutex_exit(&port->fp_mutex); 7142 rval = EAGAIN; 7143 if (fp_fcio_copyout(fcio, data, mode)) { 7144 rval = EFAULT; 7145 } 7146 return (rval); 7147 } 7148 open_flag = port->fp_flag; 7149 mutex_exit(&port->fp_mutex); 7150 7151 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7152 fcio->fcio_errno = FC_FAILURE; 7153 rval = EACCES; 7154 if (fp_fcio_copyout(fcio, data, mode)) { 7155 rval = EFAULT; 7156 } 7157 return (rval); 7158 } 7159 7160 /* 7161 * If an exclusive open was demanded during open, don't let 7162 * either innocuous or devil threads to share the file 7163 * descriptor and fire down exclusive access commands 7164 */ 7165 mutex_enter(&port->fp_mutex); 7166 if (port->fp_flag & FP_EXCL) { 7167 if (port->fp_flag & FP_EXCL_BUSY) { 7168 mutex_exit(&port->fp_mutex); 7169 fcio->fcio_errno = FC_FAILURE; 7170 return (EBUSY); 7171 } 7172 port->fp_flag |= FP_EXCL_BUSY; 7173 } 7174 mutex_exit(&port->fp_mutex); 7175 7176 switch (fcio->fcio_cmd) { 7177 case FCIO_GET_HOST_PARAMS: { 7178 fc_port_dev_t *val; 7179 fc_port_dev32_t *val32; 7180 int index; 7181 int lilp_device_count; 7182 fc_lilpmap_t *lilp_map; 7183 uchar_t *alpa_list; 7184 7185 if (use32 == B_TRUE) { 7186 if (fcio->fcio_olen != sizeof (*val32) || 7187 fcio->fcio_xfer != FCIO_XFER_READ) { 7188 rval = EINVAL; 7189 break; 7190 } 7191 } else { 7192 if (fcio->fcio_olen != sizeof (*val) || 7193 fcio->fcio_xfer != FCIO_XFER_READ) { 7194 rval = EINVAL; 7195 break; 7196 } 7197 } 7198 7199 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7200 7201 mutex_enter(&port->fp_mutex); 7202 val->dev_did = port->fp_port_id; 7203 val->dev_hard_addr = port->fp_hard_addr; 7204 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7205 val->dev_nwwn = port->fp_service_params.node_ww_name; 7206 val->dev_state = port->fp_state; 7207 7208 lilp_map = &port->fp_lilp_map; 7209 alpa_list = &lilp_map->lilp_alpalist[0]; 7210 lilp_device_count = lilp_map->lilp_length; 7211 for (index = 0; index < lilp_device_count; index++) { 7212 uint32_t d_id; 7213 7214 d_id = alpa_list[index]; 7215 if (d_id == port->fp_port_id.port_id) { 7216 break; 7217 } 7218 } 7219 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7220 7221 bcopy(port->fp_fc4_types, val->dev_type, 7222 sizeof (port->fp_fc4_types)); 7223 mutex_exit(&port->fp_mutex); 7224 7225 if (use32 == B_TRUE) { 7226 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7227 7228 val32->dev_did = val->dev_did; 7229 val32->dev_hard_addr = val->dev_hard_addr; 7230 val32->dev_pwwn = val->dev_pwwn; 7231 val32->dev_nwwn = val->dev_nwwn; 7232 val32->dev_state = val->dev_state; 7233 val32->dev_did.priv_lilp_posit = 7234 val->dev_did.priv_lilp_posit; 7235 7236 bcopy(val->dev_type, val32->dev_type, 7237 sizeof (port->fp_fc4_types)); 7238 7239 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7240 fcio->fcio_olen, mode) == 0) { 7241 if (fp_fcio_copyout(fcio, data, mode)) { 7242 rval = EFAULT; 7243 } 7244 } else { 7245 rval = EFAULT; 7246 } 7247 7248 kmem_free(val32, sizeof (*val32)); 7249 } else { 7250 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7251 fcio->fcio_olen, mode) == 0) { 7252 if (fp_fcio_copyout(fcio, data, mode)) { 7253 rval = EFAULT; 7254 } 7255 } else { 7256 rval = EFAULT; 7257 } 7258 } 7259 7260 /* need to free "val" here */ 7261 kmem_free(val, sizeof (*val)); 7262 break; 7263 } 7264 7265 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7266 uint32_t index; 7267 char *tmpPath; 7268 fc_local_port_t *tmpPort; 7269 7270 if (fcio->fcio_olen < MAXPATHLEN || 7271 fcio->fcio_ilen != sizeof (uint32_t)) { 7272 rval = EINVAL; 7273 break; 7274 } 7275 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7276 rval = EFAULT; 7277 break; 7278 } 7279 7280 tmpPort = fctl_get_adapter_port_by_index(port, index); 7281 if (tmpPort == NULL) { 7282 FP_TRACE(FP_NHEAD1(9, 0), 7283 "User supplied index out of range"); 7284 fcio->fcio_errno = FC_BADPORT; 7285 rval = EFAULT; 7286 if (fp_fcio_copyout(fcio, data, mode)) { 7287 rval = EFAULT; 7288 } 7289 break; 7290 } 7291 7292 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7293 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7294 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7295 MAXPATHLEN, mode) == 0) { 7296 if (fp_fcio_copyout(fcio, data, mode)) { 7297 rval = EFAULT; 7298 } 7299 } else { 7300 rval = EFAULT; 7301 } 7302 kmem_free(tmpPath, MAXPATHLEN); 7303 break; 7304 } 7305 7306 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7307 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7308 fc_hba_adapter_attributes_t *val; 7309 fc_hba_adapter_attributes32_t *val32; 7310 7311 if (use32 == B_TRUE) { 7312 if (fcio->fcio_olen < sizeof (*val32) || 7313 fcio->fcio_xfer != FCIO_XFER_READ) { 7314 rval = EINVAL; 7315 break; 7316 } 7317 } else { 7318 if (fcio->fcio_olen < sizeof (*val) || 7319 fcio->fcio_xfer != FCIO_XFER_READ) { 7320 rval = EINVAL; 7321 break; 7322 } 7323 } 7324 7325 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7326 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7327 mutex_enter(&port->fp_mutex); 7328 bcopy(port->fp_hba_port_attrs.manufacturer, 7329 val->Manufacturer, 7330 sizeof (val->Manufacturer)); 7331 bcopy(port->fp_hba_port_attrs.serial_number, 7332 val->SerialNumber, 7333 sizeof (val->SerialNumber)); 7334 bcopy(port->fp_hba_port_attrs.model, 7335 val->Model, 7336 sizeof (val->Model)); 7337 bcopy(port->fp_hba_port_attrs.model_description, 7338 val->ModelDescription, 7339 sizeof (val->ModelDescription)); 7340 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7341 sizeof (val->NodeSymbolicName)); 7342 bcopy(port->fp_hba_port_attrs.hardware_version, 7343 val->HardwareVersion, 7344 sizeof (val->HardwareVersion)); 7345 bcopy(port->fp_hba_port_attrs.option_rom_version, 7346 val->OptionROMVersion, 7347 sizeof (val->OptionROMVersion)); 7348 bcopy(port->fp_hba_port_attrs.firmware_version, 7349 val->FirmwareVersion, 7350 sizeof (val->FirmwareVersion)); 7351 val->VendorSpecificID = 7352 port->fp_hba_port_attrs.vendor_specific_id; 7353 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7354 &val->NodeWWN.raw_wwn, 7355 sizeof (val->NodeWWN.raw_wwn)); 7356 7357 7358 bcopy(port->fp_hba_port_attrs.driver_name, 7359 val->DriverName, 7360 sizeof (val->DriverName)); 7361 bcopy(port->fp_hba_port_attrs.driver_version, 7362 val->DriverVersion, 7363 sizeof (val->DriverVersion)); 7364 mutex_exit(&port->fp_mutex); 7365 7366 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7367 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7368 } else { 7369 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7370 } 7371 7372 if (use32 == B_TRUE) { 7373 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7374 val32->version = val->version; 7375 bcopy(val->Manufacturer, val32->Manufacturer, 7376 sizeof (val->Manufacturer)); 7377 bcopy(val->SerialNumber, val32->SerialNumber, 7378 sizeof (val->SerialNumber)); 7379 bcopy(val->Model, val32->Model, 7380 sizeof (val->Model)); 7381 bcopy(val->ModelDescription, val32->ModelDescription, 7382 sizeof (val->ModelDescription)); 7383 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7384 sizeof (val->NodeSymbolicName)); 7385 bcopy(val->HardwareVersion, val32->HardwareVersion, 7386 sizeof (val->HardwareVersion)); 7387 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7388 sizeof (val->OptionROMVersion)); 7389 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7390 sizeof (val->FirmwareVersion)); 7391 val32->VendorSpecificID = val->VendorSpecificID; 7392 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7393 sizeof (val->NodeWWN.raw_wwn)); 7394 bcopy(val->DriverName, val32->DriverName, 7395 sizeof (val->DriverName)); 7396 bcopy(val->DriverVersion, val32->DriverVersion, 7397 sizeof (val->DriverVersion)); 7398 7399 val32->NumberOfPorts = val->NumberOfPorts; 7400 7401 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7402 fcio->fcio_olen, mode) == 0) { 7403 if (fp_fcio_copyout(fcio, data, mode)) { 7404 rval = EFAULT; 7405 } 7406 } else { 7407 rval = EFAULT; 7408 } 7409 7410 kmem_free(val32, sizeof (*val32)); 7411 } else { 7412 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7413 fcio->fcio_olen, mode) == 0) { 7414 if (fp_fcio_copyout(fcio, data, mode)) { 7415 rval = EFAULT; 7416 } 7417 } else { 7418 rval = EFAULT; 7419 } 7420 } 7421 7422 kmem_free(val, sizeof (*val)); 7423 break; 7424 } 7425 7426 case FCIO_GET_NPIV_ATTRIBUTES: { 7427 fc_hba_npiv_attributes_t *attrs; 7428 7429 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7430 mutex_enter(&port->fp_mutex); 7431 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7432 &attrs->NodeWWN.raw_wwn, 7433 sizeof (attrs->NodeWWN.raw_wwn)); 7434 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7435 &attrs->PortWWN.raw_wwn, 7436 sizeof (attrs->PortWWN.raw_wwn)); 7437 mutex_exit(&port->fp_mutex); 7438 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7439 fcio->fcio_olen, mode) == 0) { 7440 if (fp_fcio_copyout(fcio, data, mode)) { 7441 rval = EFAULT; 7442 } 7443 } else { 7444 rval = EFAULT; 7445 } 7446 kmem_free(attrs, sizeof (*attrs)); 7447 break; 7448 } 7449 7450 case FCIO_DELETE_NPIV_PORT: { 7451 fc_local_port_t *tmpport; 7452 char ww_pname[17]; 7453 la_wwn_t vwwn[1]; 7454 7455 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7456 if (ddi_copyin(fcio->fcio_ibuf, 7457 &vwwn, sizeof (la_wwn_t), mode)) { 7458 rval = EFAULT; 7459 break; 7460 } 7461 7462 fc_wwn_to_str(&vwwn[0], ww_pname); 7463 FP_TRACE(FP_NHEAD1(3, 0), 7464 "Delete NPIV Port %s", ww_pname); 7465 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7466 if (tmpport == NULL) { 7467 FP_TRACE(FP_NHEAD1(3, 0), 7468 "Delete NPIV Port : no found"); 7469 rval = EFAULT; 7470 } else { 7471 fc_local_port_t *nextport = tmpport->fp_port_next; 7472 fc_local_port_t *prevport = tmpport->fp_port_prev; 7473 int portlen, portindex, ret; 7474 7475 portlen = sizeof (portindex); 7476 ret = ddi_prop_op(DDI_DEV_T_ANY, 7477 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7478 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7479 (caddr_t)&portindex, &portlen); 7480 if (ret != DDI_SUCCESS) { 7481 rval = EFAULT; 7482 break; 7483 } 7484 if (ndi_devi_offline(tmpport->fp_port_dip, 7485 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7486 FP_TRACE(FP_NHEAD1(1, 0), 7487 "Delete NPIV Port failed"); 7488 mutex_enter(&port->fp_mutex); 7489 tmpport->fp_npiv_state = 0; 7490 mutex_exit(&port->fp_mutex); 7491 rval = EFAULT; 7492 } else { 7493 mutex_enter(&port->fp_mutex); 7494 nextport->fp_port_prev = prevport; 7495 prevport->fp_port_next = nextport; 7496 if (port == port->fp_port_next) { 7497 port->fp_port_next = 7498 port->fp_port_prev = NULL; 7499 } 7500 port->fp_npiv_portnum--; 7501 FP_TRACE(FP_NHEAD1(3, 0), 7502 "Delete NPIV Port %d", portindex); 7503 port->fp_npiv_portindex[portindex-1] = 0; 7504 mutex_exit(&port->fp_mutex); 7505 } 7506 } 7507 break; 7508 } 7509 7510 case FCIO_CREATE_NPIV_PORT: { 7511 char ww_nname[17], ww_pname[17]; 7512 la_npiv_create_entry_t entrybuf; 7513 uint32_t vportindex = 0; 7514 int npiv_ret = 0; 7515 char *portname, *fcaname; 7516 7517 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7518 (void) ddi_pathname(port->fp_port_dip, portname); 7519 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7520 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7521 FP_TRACE(FP_NHEAD1(1, 0), 7522 "Create NPIV port %s %s %s", portname, fcaname, 7523 ddi_driver_name(port->fp_fca_dip)); 7524 kmem_free(portname, MAXPATHLEN); 7525 kmem_free(fcaname, MAXPATHLEN); 7526 if (ddi_copyin(fcio->fcio_ibuf, 7527 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7528 rval = EFAULT; 7529 break; 7530 } 7531 7532 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7533 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7534 vportindex = entrybuf.vindex; 7535 FP_TRACE(FP_NHEAD1(3, 0), 7536 "Create NPIV Port %s %s %d", 7537 ww_nname, ww_pname, vportindex); 7538 7539 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7540 rval = EFAULT; 7541 break; 7542 } 7543 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7544 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7545 if (npiv_ret == NDI_SUCCESS) { 7546 mutex_enter(&port->fp_mutex); 7547 port->fp_npiv_portnum++; 7548 mutex_exit(&port->fp_mutex); 7549 if (fp_copyout((void *)&vportindex, 7550 (void *)fcio->fcio_obuf, 7551 fcio->fcio_olen, mode) == 0) { 7552 if (fp_fcio_copyout(fcio, data, mode)) { 7553 rval = EFAULT; 7554 } 7555 } else { 7556 rval = EFAULT; 7557 } 7558 } else { 7559 rval = EFAULT; 7560 } 7561 FP_TRACE(FP_NHEAD1(3, 0), 7562 "Create NPIV Port %d %d", npiv_ret, vportindex); 7563 break; 7564 } 7565 7566 case FCIO_GET_NPIV_PORT_LIST: { 7567 fc_hba_npiv_port_list_t *list; 7568 int count; 7569 7570 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7571 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7572 rval = EINVAL; 7573 break; 7574 } 7575 7576 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7577 list->version = FC_HBA_LIST_VERSION; 7578 /* build npiv port list */ 7579 count = fc_ulp_get_npiv_port_list(port, (char *)list->hbaPaths); 7580 if (count < 0) { 7581 rval = ENXIO; 7582 FP_TRACE(FP_NHEAD1(1, 0), "Build NPIV Port List error"); 7583 kmem_free(list, fcio->fcio_olen); 7584 break; 7585 } 7586 list->numAdapters = count; 7587 7588 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7589 fcio->fcio_olen, mode) == 0) { 7590 if (fp_fcio_copyout(fcio, data, mode)) { 7591 FP_TRACE(FP_NHEAD1(1, 0), 7592 "Copy NPIV Port data error"); 7593 rval = EFAULT; 7594 } 7595 } else { 7596 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7597 rval = EFAULT; 7598 } 7599 kmem_free(list, fcio->fcio_olen); 7600 break; 7601 } 7602 7603 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7604 fc_hba_port_npiv_attributes_t *val; 7605 7606 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7607 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7608 7609 mutex_enter(&port->fp_mutex); 7610 val->npivflag = port->fp_npiv_flag; 7611 val->lastChange = port->fp_last_change; 7612 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7613 &val->PortWWN.raw_wwn, 7614 sizeof (val->PortWWN.raw_wwn)); 7615 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7616 &val->NodeWWN.raw_wwn, 7617 sizeof (val->NodeWWN.raw_wwn)); 7618 mutex_exit(&port->fp_mutex); 7619 7620 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7621 if (port->fp_npiv_type != FC_NPIV_PORT) { 7622 val->MaxNumberOfNPIVPorts = 7623 port->fp_fca_tran->fca_num_npivports; 7624 } else { 7625 val->MaxNumberOfNPIVPorts = 0; 7626 } 7627 7628 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7629 fcio->fcio_olen, mode) == 0) { 7630 if (fp_fcio_copyout(fcio, data, mode)) { 7631 rval = EFAULT; 7632 } 7633 } else { 7634 rval = EFAULT; 7635 } 7636 kmem_free(val, sizeof (*val)); 7637 break; 7638 } 7639 7640 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7641 fc_hba_port_attributes_t *val; 7642 fc_hba_port_attributes32_t *val32; 7643 7644 if (use32 == B_TRUE) { 7645 if (fcio->fcio_olen < sizeof (*val32) || 7646 fcio->fcio_xfer != FCIO_XFER_READ) { 7647 rval = EINVAL; 7648 break; 7649 } 7650 } else { 7651 if (fcio->fcio_olen < sizeof (*val) || 7652 fcio->fcio_xfer != FCIO_XFER_READ) { 7653 rval = EINVAL; 7654 break; 7655 } 7656 } 7657 7658 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7659 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7660 mutex_enter(&port->fp_mutex); 7661 val->lastChange = port->fp_last_change; 7662 val->fp_minor = port->fp_instance; 7663 7664 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7665 &val->PortWWN.raw_wwn, 7666 sizeof (val->PortWWN.raw_wwn)); 7667 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7668 &val->NodeWWN.raw_wwn, 7669 sizeof (val->NodeWWN.raw_wwn)); 7670 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7671 sizeof (val->FabricName.raw_wwn)); 7672 7673 val->PortFcId = port->fp_port_id.port_id; 7674 7675 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7676 case FC_STATE_OFFLINE: 7677 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7678 break; 7679 case FC_STATE_ONLINE: 7680 case FC_STATE_LOOP: 7681 case FC_STATE_NAMESERVICE: 7682 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7683 break; 7684 default: 7685 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7686 break; 7687 } 7688 7689 /* Translate from LV to FC-HBA port type codes */ 7690 switch (port->fp_port_type.port_type) { 7691 case FC_NS_PORT_N: 7692 val->PortType = FC_HBA_PORTTYPE_NPORT; 7693 break; 7694 case FC_NS_PORT_NL: 7695 /* Actually means loop for us */ 7696 val->PortType = FC_HBA_PORTTYPE_LPORT; 7697 break; 7698 case FC_NS_PORT_F: 7699 val->PortType = FC_HBA_PORTTYPE_FPORT; 7700 break; 7701 case FC_NS_PORT_FL: 7702 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7703 break; 7704 case FC_NS_PORT_E: 7705 val->PortType = FC_HBA_PORTTYPE_EPORT; 7706 break; 7707 default: 7708 val->PortType = FC_HBA_PORTTYPE_OTHER; 7709 break; 7710 } 7711 7712 7713 /* 7714 * If fp has decided that the topology is public loop, 7715 * we will indicate that using the appropriate 7716 * FC HBA API constant. 7717 */ 7718 switch (port->fp_topology) { 7719 case FC_TOP_PUBLIC_LOOP: 7720 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7721 break; 7722 7723 case FC_TOP_PT_PT: 7724 val->PortType = FC_HBA_PORTTYPE_PTP; 7725 break; 7726 7727 case FC_TOP_UNKNOWN: 7728 /* 7729 * This should cover the case where nothing is connected 7730 * to the port. Crystal+ is p'bly an exception here. 7731 * For Crystal+, port 0 will come up as private loop 7732 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7733 * nothing is connected to it. 7734 * Current plan is to let userland handle this. 7735 */ 7736 if (port->fp_bind_state == FC_STATE_OFFLINE) { 7737 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7738 } 7739 break; 7740 7741 default: 7742 /* 7743 * Do Nothing. 7744 * Unused: 7745 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7746 */ 7747 break; 7748 } 7749 7750 val->PortSupportedClassofService = 7751 port->fp_hba_port_attrs.supported_cos; 7752 val->PortSupportedFc4Types[0] = 0; 7753 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7754 sizeof (val->PortActiveFc4Types)); 7755 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7756 sizeof (val->PortSymbolicName)); 7757 val->PortSupportedSpeed = 7758 port->fp_hba_port_attrs.supported_speed; 7759 7760 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7761 case FC_STATE_1GBIT_SPEED: 7762 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7763 break; 7764 case FC_STATE_2GBIT_SPEED: 7765 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7766 break; 7767 case FC_STATE_4GBIT_SPEED: 7768 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7769 break; 7770 case FC_STATE_8GBIT_SPEED: 7771 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7772 break; 7773 case FC_STATE_10GBIT_SPEED: 7774 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7775 break; 7776 case FC_STATE_16GBIT_SPEED: 7777 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7778 break; 7779 default: 7780 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7781 break; 7782 } 7783 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7784 val->NumberofDiscoveredPorts = port->fp_dev_count; 7785 mutex_exit(&port->fp_mutex); 7786 7787 if (use32 == B_TRUE) { 7788 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7789 val32->version = val->version; 7790 val32->lastChange = val->lastChange; 7791 val32->fp_minor = val->fp_minor; 7792 7793 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7794 sizeof (val->PortWWN.raw_wwn)); 7795 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7796 sizeof (val->NodeWWN.raw_wwn)); 7797 val32->PortFcId = val->PortFcId; 7798 val32->PortState = val->PortState; 7799 val32->PortType = val->PortType; 7800 7801 val32->PortSupportedClassofService = 7802 val->PortSupportedClassofService; 7803 bcopy(val->PortActiveFc4Types, 7804 val32->PortActiveFc4Types, 7805 sizeof (val->PortActiveFc4Types)); 7806 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7807 sizeof (val->PortSymbolicName)); 7808 bcopy(&val->FabricName, &val32->FabricName, 7809 sizeof (val->FabricName.raw_wwn)); 7810 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7811 val32->PortSpeed = val->PortSpeed; 7812 7813 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7814 val32->NumberofDiscoveredPorts = 7815 val->NumberofDiscoveredPorts; 7816 7817 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7818 fcio->fcio_olen, mode) == 0) { 7819 if (fp_fcio_copyout(fcio, data, mode)) { 7820 rval = EFAULT; 7821 } 7822 } else { 7823 rval = EFAULT; 7824 } 7825 7826 kmem_free(val32, sizeof (*val32)); 7827 } else { 7828 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7829 fcio->fcio_olen, mode) == 0) { 7830 if (fp_fcio_copyout(fcio, data, mode)) { 7831 rval = EFAULT; 7832 } 7833 } else { 7834 rval = EFAULT; 7835 } 7836 } 7837 7838 kmem_free(val, sizeof (*val)); 7839 break; 7840 } 7841 7842 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7843 fc_hba_port_attributes_t *val; 7844 fc_hba_port_attributes32_t *val32; 7845 uint32_t index = 0; 7846 fc_remote_port_t *tmp_pd; 7847 7848 if (use32 == B_TRUE) { 7849 if (fcio->fcio_olen < sizeof (*val32) || 7850 fcio->fcio_xfer != FCIO_XFER_READ) { 7851 rval = EINVAL; 7852 break; 7853 } 7854 } else { 7855 if (fcio->fcio_olen < sizeof (*val) || 7856 fcio->fcio_xfer != FCIO_XFER_READ) { 7857 rval = EINVAL; 7858 break; 7859 } 7860 } 7861 7862 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7863 rval = EFAULT; 7864 break; 7865 } 7866 7867 if (index >= port->fp_dev_count) { 7868 FP_TRACE(FP_NHEAD1(9, 0), 7869 "User supplied index out of range"); 7870 fcio->fcio_errno = FC_OUTOFBOUNDS; 7871 rval = EINVAL; 7872 if (fp_fcio_copyout(fcio, data, mode)) { 7873 rval = EFAULT; 7874 } 7875 break; 7876 } 7877 7878 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7879 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7880 7881 mutex_enter(&port->fp_mutex); 7882 tmp_pd = fctl_lookup_pd_by_index(port, index); 7883 7884 if (tmp_pd == NULL) { 7885 fcio->fcio_errno = FC_BADPORT; 7886 rval = EINVAL; 7887 } else { 7888 val->lastChange = port->fp_last_change; 7889 val->fp_minor = port->fp_instance; 7890 7891 mutex_enter(&tmp_pd->pd_mutex); 7892 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7893 &val->PortWWN.raw_wwn, 7894 sizeof (val->PortWWN.raw_wwn)); 7895 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 7896 &val->NodeWWN.raw_wwn, 7897 sizeof (val->NodeWWN.raw_wwn)); 7898 val->PortFcId = tmp_pd->pd_port_id.port_id; 7899 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 7900 sizeof (val->PortSymbolicName)); 7901 val->PortSupportedClassofService = tmp_pd->pd_cos; 7902 /* 7903 * we will assume the sizeof these pd_fc4types and 7904 * portActiveFc4Types will remain the same. we could 7905 * add in a check for it, but we decided it was unneeded 7906 */ 7907 bcopy((caddr_t)tmp_pd->pd_fc4types, 7908 val->PortActiveFc4Types, 7909 sizeof (tmp_pd->pd_fc4types)); 7910 val->PortState = 7911 fp_map_remote_port_state(tmp_pd->pd_state); 7912 mutex_exit(&tmp_pd->pd_mutex); 7913 7914 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7915 val->PortSupportedFc4Types[0] = 0; 7916 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7917 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7918 val->PortMaxFrameSize = 0; 7919 val->NumberofDiscoveredPorts = 0; 7920 7921 if (use32 == B_TRUE) { 7922 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7923 val32->version = val->version; 7924 val32->lastChange = val->lastChange; 7925 val32->fp_minor = val->fp_minor; 7926 7927 bcopy(&val->PortWWN.raw_wwn, 7928 &val32->PortWWN.raw_wwn, 7929 sizeof (val->PortWWN.raw_wwn)); 7930 bcopy(&val->NodeWWN.raw_wwn, 7931 &val32->NodeWWN.raw_wwn, 7932 sizeof (val->NodeWWN.raw_wwn)); 7933 val32->PortFcId = val->PortFcId; 7934 bcopy(val->PortSymbolicName, 7935 val32->PortSymbolicName, 7936 sizeof (val->PortSymbolicName)); 7937 val32->PortSupportedClassofService = 7938 val->PortSupportedClassofService; 7939 bcopy(val->PortActiveFc4Types, 7940 val32->PortActiveFc4Types, 7941 sizeof (tmp_pd->pd_fc4types)); 7942 7943 val32->PortType = val->PortType; 7944 val32->PortState = val->PortState; 7945 val32->PortSupportedFc4Types[0] = 7946 val->PortSupportedFc4Types[0]; 7947 val32->PortSupportedSpeed = 7948 val->PortSupportedSpeed; 7949 val32->PortSpeed = val->PortSpeed; 7950 val32->PortMaxFrameSize = 7951 val->PortMaxFrameSize; 7952 val32->NumberofDiscoveredPorts = 7953 val->NumberofDiscoveredPorts; 7954 7955 if (fp_copyout((void *)val32, 7956 (void *)fcio->fcio_obuf, 7957 fcio->fcio_olen, mode) == 0) { 7958 if (fp_fcio_copyout(fcio, 7959 data, mode)) { 7960 rval = EFAULT; 7961 } 7962 } else { 7963 rval = EFAULT; 7964 } 7965 7966 kmem_free(val32, sizeof (*val32)); 7967 } else { 7968 if (fp_copyout((void *)val, 7969 (void *)fcio->fcio_obuf, 7970 fcio->fcio_olen, mode) == 0) { 7971 if (fp_fcio_copyout(fcio, data, mode)) { 7972 rval = EFAULT; 7973 } 7974 } else { 7975 rval = EFAULT; 7976 } 7977 } 7978 } 7979 7980 mutex_exit(&port->fp_mutex); 7981 kmem_free(val, sizeof (*val)); 7982 break; 7983 } 7984 7985 case FCIO_GET_PORT_ATTRIBUTES: { 7986 fc_hba_port_attributes_t *val; 7987 fc_hba_port_attributes32_t *val32; 7988 la_wwn_t wwn; 7989 fc_remote_port_t *tmp_pd; 7990 7991 if (use32 == B_TRUE) { 7992 if (fcio->fcio_olen < sizeof (*val32) || 7993 fcio->fcio_xfer != FCIO_XFER_READ) { 7994 rval = EINVAL; 7995 break; 7996 } 7997 } else { 7998 if (fcio->fcio_olen < sizeof (*val) || 7999 fcio->fcio_xfer != FCIO_XFER_READ) { 8000 rval = EINVAL; 8001 break; 8002 } 8003 } 8004 8005 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 8006 rval = EFAULT; 8007 break; 8008 } 8009 8010 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 8011 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 8012 8013 mutex_enter(&port->fp_mutex); 8014 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 8015 val->lastChange = port->fp_last_change; 8016 val->fp_minor = port->fp_instance; 8017 mutex_exit(&port->fp_mutex); 8018 8019 if (tmp_pd == NULL) { 8020 fcio->fcio_errno = FC_BADWWN; 8021 rval = EINVAL; 8022 } else { 8023 mutex_enter(&tmp_pd->pd_mutex); 8024 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8025 &val->PortWWN.raw_wwn, 8026 sizeof (val->PortWWN.raw_wwn)); 8027 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8028 &val->NodeWWN.raw_wwn, 8029 sizeof (val->NodeWWN.raw_wwn)); 8030 val->PortFcId = tmp_pd->pd_port_id.port_id; 8031 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8032 sizeof (val->PortSymbolicName)); 8033 val->PortSupportedClassofService = tmp_pd->pd_cos; 8034 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8035 val->PortState = 8036 fp_map_remote_port_state(tmp_pd->pd_state); 8037 val->PortSupportedFc4Types[0] = 0; 8038 /* 8039 * we will assume the sizeof these pd_fc4types and 8040 * portActiveFc4Types will remain the same. we could 8041 * add in a check for it, but we decided it was unneeded 8042 */ 8043 bcopy((caddr_t)tmp_pd->pd_fc4types, 8044 val->PortActiveFc4Types, 8045 sizeof (tmp_pd->pd_fc4types)); 8046 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8047 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8048 val->PortMaxFrameSize = 0; 8049 val->NumberofDiscoveredPorts = 0; 8050 mutex_exit(&tmp_pd->pd_mutex); 8051 8052 if (use32 == B_TRUE) { 8053 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8054 val32->version = val->version; 8055 val32->lastChange = val->lastChange; 8056 val32->fp_minor = val->fp_minor; 8057 bcopy(&val->PortWWN.raw_wwn, 8058 &val32->PortWWN.raw_wwn, 8059 sizeof (val->PortWWN.raw_wwn)); 8060 bcopy(&val->NodeWWN.raw_wwn, 8061 &val32->NodeWWN.raw_wwn, 8062 sizeof (val->NodeWWN.raw_wwn)); 8063 val32->PortFcId = val->PortFcId; 8064 bcopy(val->PortSymbolicName, 8065 val32->PortSymbolicName, 8066 sizeof (val->PortSymbolicName)); 8067 val32->PortSupportedClassofService = 8068 val->PortSupportedClassofService; 8069 val32->PortType = val->PortType; 8070 val32->PortState = val->PortState; 8071 val32->PortSupportedFc4Types[0] = 8072 val->PortSupportedFc4Types[0]; 8073 bcopy(val->PortActiveFc4Types, 8074 val32->PortActiveFc4Types, 8075 sizeof (tmp_pd->pd_fc4types)); 8076 val32->PortSupportedSpeed = 8077 val->PortSupportedSpeed; 8078 val32->PortSpeed = val->PortSpeed; 8079 val32->PortMaxFrameSize = val->PortMaxFrameSize; 8080 val32->NumberofDiscoveredPorts = 8081 val->NumberofDiscoveredPorts; 8082 8083 if (fp_copyout((void *)val32, 8084 (void *)fcio->fcio_obuf, 8085 fcio->fcio_olen, mode) == 0) { 8086 if (fp_fcio_copyout(fcio, data, mode)) { 8087 rval = EFAULT; 8088 } 8089 } else { 8090 rval = EFAULT; 8091 } 8092 8093 kmem_free(val32, sizeof (*val32)); 8094 } else { 8095 if (fp_copyout((void *)val, 8096 (void *)fcio->fcio_obuf, 8097 fcio->fcio_olen, mode) == 0) { 8098 if (fp_fcio_copyout(fcio, data, mode)) { 8099 rval = EFAULT; 8100 } 8101 } else { 8102 rval = EFAULT; 8103 } 8104 } 8105 } 8106 kmem_free(val, sizeof (*val)); 8107 break; 8108 } 8109 8110 case FCIO_GET_NUM_DEVS: { 8111 int num_devices; 8112 8113 if (fcio->fcio_olen != sizeof (num_devices) || 8114 fcio->fcio_xfer != FCIO_XFER_READ) { 8115 rval = EINVAL; 8116 break; 8117 } 8118 8119 mutex_enter(&port->fp_mutex); 8120 switch (port->fp_topology) { 8121 case FC_TOP_PRIVATE_LOOP: 8122 case FC_TOP_PT_PT: 8123 num_devices = port->fp_total_devices; 8124 fcio->fcio_errno = FC_SUCCESS; 8125 break; 8126 8127 case FC_TOP_PUBLIC_LOOP: 8128 case FC_TOP_FABRIC: 8129 mutex_exit(&port->fp_mutex); 8130 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8131 NULL, KM_SLEEP); 8132 ASSERT(job != NULL); 8133 8134 /* 8135 * In FC-GS-2 the Name Server doesn't send out 8136 * RSCNs for any Name Server Database updates 8137 * When it is finally fixed there is no need 8138 * to probe as below and should be removed. 8139 */ 8140 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8141 fctl_dealloc_job(job); 8142 8143 mutex_enter(&port->fp_mutex); 8144 num_devices = port->fp_total_devices; 8145 fcio->fcio_errno = FC_SUCCESS; 8146 break; 8147 8148 case FC_TOP_NO_NS: 8149 /* FALLTHROUGH */ 8150 case FC_TOP_UNKNOWN: 8151 /* FALLTHROUGH */ 8152 default: 8153 num_devices = 0; 8154 fcio->fcio_errno = FC_SUCCESS; 8155 break; 8156 } 8157 mutex_exit(&port->fp_mutex); 8158 8159 if (fp_copyout((void *)&num_devices, 8160 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8161 mode) == 0) { 8162 if (fp_fcio_copyout(fcio, data, mode)) { 8163 rval = EFAULT; 8164 } 8165 } else { 8166 rval = EFAULT; 8167 } 8168 break; 8169 } 8170 8171 case FCIO_GET_DEV_LIST: { 8172 int num_devices; 8173 int new_count; 8174 int map_size; 8175 8176 if (fcio->fcio_xfer != FCIO_XFER_READ || 8177 fcio->fcio_alen != sizeof (new_count)) { 8178 rval = EINVAL; 8179 break; 8180 } 8181 8182 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8183 8184 mutex_enter(&port->fp_mutex); 8185 if (num_devices < port->fp_total_devices) { 8186 fcio->fcio_errno = FC_TOOMANY; 8187 new_count = port->fp_total_devices; 8188 mutex_exit(&port->fp_mutex); 8189 8190 if (fp_copyout((void *)&new_count, 8191 (void *)fcio->fcio_abuf, 8192 sizeof (new_count), mode)) { 8193 rval = EFAULT; 8194 break; 8195 } 8196 8197 if (fp_fcio_copyout(fcio, data, mode)) { 8198 rval = EFAULT; 8199 break; 8200 } 8201 rval = EINVAL; 8202 break; 8203 } 8204 8205 if (port->fp_total_devices <= 0) { 8206 fcio->fcio_errno = FC_NO_MAP; 8207 new_count = port->fp_total_devices; 8208 mutex_exit(&port->fp_mutex); 8209 8210 if (fp_copyout((void *)&new_count, 8211 (void *)fcio->fcio_abuf, 8212 sizeof (new_count), mode)) { 8213 rval = EFAULT; 8214 break; 8215 } 8216 8217 if (fp_fcio_copyout(fcio, data, mode)) { 8218 rval = EFAULT; 8219 break; 8220 } 8221 rval = EINVAL; 8222 break; 8223 } 8224 8225 switch (port->fp_topology) { 8226 case FC_TOP_PRIVATE_LOOP: 8227 if (fp_fillout_loopmap(port, fcio, 8228 mode) != FC_SUCCESS) { 8229 rval = EFAULT; 8230 break; 8231 } 8232 if (fp_fcio_copyout(fcio, data, mode)) { 8233 rval = EFAULT; 8234 } 8235 break; 8236 8237 case FC_TOP_PT_PT: 8238 if (fp_fillout_p2pmap(port, fcio, 8239 mode) != FC_SUCCESS) { 8240 rval = EFAULT; 8241 break; 8242 } 8243 if (fp_fcio_copyout(fcio, data, mode)) { 8244 rval = EFAULT; 8245 } 8246 break; 8247 8248 case FC_TOP_PUBLIC_LOOP: 8249 case FC_TOP_FABRIC: { 8250 fctl_ns_req_t *ns_cmd; 8251 8252 map_size = 8253 sizeof (fc_port_dev_t) * port->fp_total_devices; 8254 8255 mutex_exit(&port->fp_mutex); 8256 8257 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8258 sizeof (ns_resp_gan_t), map_size, 8259 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8260 KM_SLEEP); 8261 ASSERT(ns_cmd != NULL); 8262 8263 ns_cmd->ns_gan_index = 0; 8264 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8265 ns_cmd->ns_cmd_code = NS_GA_NXT; 8266 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8267 8268 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8269 NULL, KM_SLEEP); 8270 ASSERT(job != NULL); 8271 8272 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8273 8274 if (ret != FC_SUCCESS || 8275 job->job_result != FC_SUCCESS) { 8276 fctl_free_ns_cmd(ns_cmd); 8277 8278 fcio->fcio_errno = job->job_result; 8279 new_count = 0; 8280 if (fp_copyout((void *)&new_count, 8281 (void *)fcio->fcio_abuf, 8282 sizeof (new_count), mode)) { 8283 fctl_dealloc_job(job); 8284 mutex_enter(&port->fp_mutex); 8285 rval = EFAULT; 8286 break; 8287 } 8288 8289 if (fp_fcio_copyout(fcio, data, mode)) { 8290 fctl_dealloc_job(job); 8291 mutex_enter(&port->fp_mutex); 8292 rval = EFAULT; 8293 break; 8294 } 8295 rval = EIO; 8296 mutex_enter(&port->fp_mutex); 8297 break; 8298 } 8299 fctl_dealloc_job(job); 8300 8301 new_count = ns_cmd->ns_gan_index; 8302 if (fp_copyout((void *)&new_count, 8303 (void *)fcio->fcio_abuf, sizeof (new_count), 8304 mode)) { 8305 rval = EFAULT; 8306 fctl_free_ns_cmd(ns_cmd); 8307 mutex_enter(&port->fp_mutex); 8308 break; 8309 } 8310 8311 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8312 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8313 ns_cmd->ns_gan_index, mode)) { 8314 rval = EFAULT; 8315 fctl_free_ns_cmd(ns_cmd); 8316 mutex_enter(&port->fp_mutex); 8317 break; 8318 } 8319 fctl_free_ns_cmd(ns_cmd); 8320 8321 if (fp_fcio_copyout(fcio, data, mode)) { 8322 rval = EFAULT; 8323 } 8324 mutex_enter(&port->fp_mutex); 8325 break; 8326 } 8327 8328 case FC_TOP_NO_NS: 8329 /* FALLTHROUGH */ 8330 case FC_TOP_UNKNOWN: 8331 /* FALLTHROUGH */ 8332 default: 8333 fcio->fcio_errno = FC_NO_MAP; 8334 num_devices = port->fp_total_devices; 8335 8336 if (fp_copyout((void *)&new_count, 8337 (void *)fcio->fcio_abuf, 8338 sizeof (new_count), mode)) { 8339 rval = EFAULT; 8340 break; 8341 } 8342 8343 if (fp_fcio_copyout(fcio, data, mode)) { 8344 rval = EFAULT; 8345 break; 8346 } 8347 rval = EINVAL; 8348 break; 8349 } 8350 mutex_exit(&port->fp_mutex); 8351 break; 8352 } 8353 8354 case FCIO_GET_SYM_PNAME: { 8355 rval = ENOTSUP; 8356 break; 8357 } 8358 8359 case FCIO_GET_SYM_NNAME: { 8360 rval = ENOTSUP; 8361 break; 8362 } 8363 8364 case FCIO_SET_SYM_PNAME: { 8365 rval = ENOTSUP; 8366 break; 8367 } 8368 8369 case FCIO_SET_SYM_NNAME: { 8370 rval = ENOTSUP; 8371 break; 8372 } 8373 8374 case FCIO_GET_LOGI_PARAMS: { 8375 la_wwn_t pwwn; 8376 la_wwn_t *my_pwwn; 8377 la_els_logi_t *params; 8378 la_els_logi32_t *params32; 8379 fc_remote_node_t *node; 8380 fc_remote_port_t *pd; 8381 8382 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8383 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8384 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8385 rval = EINVAL; 8386 break; 8387 } 8388 8389 if (use32 == B_TRUE) { 8390 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8391 rval = EINVAL; 8392 break; 8393 } 8394 } else { 8395 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8396 rval = EINVAL; 8397 break; 8398 } 8399 } 8400 8401 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8402 rval = EFAULT; 8403 break; 8404 } 8405 8406 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8407 if (pd == NULL) { 8408 mutex_enter(&port->fp_mutex); 8409 my_pwwn = &port->fp_service_params.nport_ww_name; 8410 mutex_exit(&port->fp_mutex); 8411 8412 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8413 rval = ENXIO; 8414 break; 8415 } 8416 8417 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8418 mutex_enter(&port->fp_mutex); 8419 *params = port->fp_service_params; 8420 mutex_exit(&port->fp_mutex); 8421 } else { 8422 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8423 8424 mutex_enter(&pd->pd_mutex); 8425 params->ls_code.mbz = params->ls_code.ls_code = 0; 8426 params->common_service = pd->pd_csp; 8427 params->nport_ww_name = pd->pd_port_name; 8428 params->class_1 = pd->pd_clsp1; 8429 params->class_2 = pd->pd_clsp2; 8430 params->class_3 = pd->pd_clsp3; 8431 node = pd->pd_remote_nodep; 8432 mutex_exit(&pd->pd_mutex); 8433 8434 bzero(params->reserved, sizeof (params->reserved)); 8435 8436 mutex_enter(&node->fd_mutex); 8437 bcopy(node->fd_vv, params->vendor_version, 8438 sizeof (node->fd_vv)); 8439 params->node_ww_name = node->fd_node_name; 8440 mutex_exit(&node->fd_mutex); 8441 8442 fctl_release_remote_port(pd); 8443 } 8444 8445 if (use32 == B_TRUE) { 8446 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8447 8448 params32->ls_code.mbz = params->ls_code.mbz; 8449 params32->common_service = params->common_service; 8450 params32->nport_ww_name = params->nport_ww_name; 8451 params32->class_1 = params->class_1; 8452 params32->class_2 = params->class_2; 8453 params32->class_3 = params->class_3; 8454 bzero(params32->reserved, sizeof (params32->reserved)); 8455 bcopy(params->vendor_version, params32->vendor_version, 8456 sizeof (node->fd_vv)); 8457 params32->node_ww_name = params->node_ww_name; 8458 8459 if (ddi_copyout((void *)params32, 8460 (void *)fcio->fcio_obuf, 8461 sizeof (*params32), mode)) { 8462 rval = EFAULT; 8463 } 8464 8465 kmem_free(params32, sizeof (*params32)); 8466 } else { 8467 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8468 sizeof (*params), mode)) { 8469 rval = EFAULT; 8470 } 8471 } 8472 8473 kmem_free(params, sizeof (*params)); 8474 if (fp_fcio_copyout(fcio, data, mode)) { 8475 rval = EFAULT; 8476 } 8477 break; 8478 } 8479 8480 case FCIO_DEV_LOGOUT: 8481 case FCIO_DEV_LOGIN: 8482 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8483 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8484 rval = EINVAL; 8485 8486 if (fp_fcio_copyout(fcio, data, mode)) { 8487 rval = EFAULT; 8488 } 8489 break; 8490 } 8491 8492 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8493 jcode = JOB_FCIO_LOGIN; 8494 } else { 8495 jcode = JOB_FCIO_LOGOUT; 8496 } 8497 8498 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8499 bcopy(fcio, kfcio, sizeof (*fcio)); 8500 8501 if (kfcio->fcio_ilen) { 8502 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8503 KM_SLEEP); 8504 8505 if (ddi_copyin((void *)fcio->fcio_ibuf, 8506 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8507 mode)) { 8508 rval = EFAULT; 8509 8510 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8511 kmem_free(kfcio, sizeof (*kfcio)); 8512 fcio->fcio_errno = job->job_result; 8513 if (fp_fcio_copyout(fcio, data, mode)) { 8514 rval = EFAULT; 8515 } 8516 break; 8517 } 8518 } 8519 8520 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8521 job->job_private = kfcio; 8522 8523 fctl_enque_job(port, job); 8524 fctl_jobwait(job); 8525 8526 rval = job->job_result; 8527 8528 fcio->fcio_errno = kfcio->fcio_errno; 8529 if (fp_fcio_copyout(fcio, data, mode)) { 8530 rval = EFAULT; 8531 } 8532 8533 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8534 kmem_free(kfcio, sizeof (*kfcio)); 8535 fctl_dealloc_job(job); 8536 break; 8537 8538 case FCIO_GET_STATE: { 8539 la_wwn_t pwwn; 8540 uint32_t state; 8541 fc_remote_port_t *pd; 8542 fctl_ns_req_t *ns_cmd; 8543 8544 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8545 fcio->fcio_olen != sizeof (state) || 8546 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8547 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8548 rval = EINVAL; 8549 break; 8550 } 8551 8552 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8553 rval = EFAULT; 8554 break; 8555 } 8556 fcio->fcio_errno = 0; 8557 8558 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8559 if (pd == NULL) { 8560 mutex_enter(&port->fp_mutex); 8561 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8562 mutex_exit(&port->fp_mutex); 8563 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8564 NULL, NULL, KM_SLEEP); 8565 8566 job->job_counter = 1; 8567 job->job_result = FC_SUCCESS; 8568 8569 ns_cmd = fctl_alloc_ns_cmd( 8570 sizeof (ns_req_gid_pn_t), 8571 sizeof (ns_resp_gid_pn_t), 8572 sizeof (ns_resp_gid_pn_t), 8573 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8574 ASSERT(ns_cmd != NULL); 8575 8576 ns_cmd->ns_cmd_code = NS_GID_PN; 8577 ((ns_req_gid_pn_t *) 8578 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8579 8580 ret = fp_ns_query(port, ns_cmd, job, 8581 1, KM_SLEEP); 8582 8583 if (ret != FC_SUCCESS || job->job_result != 8584 FC_SUCCESS) { 8585 if (ret != FC_SUCCESS) { 8586 fcio->fcio_errno = ret; 8587 } else { 8588 fcio->fcio_errno = 8589 job->job_result; 8590 } 8591 rval = EIO; 8592 } else { 8593 state = PORT_DEVICE_INVALID; 8594 } 8595 fctl_free_ns_cmd(ns_cmd); 8596 fctl_dealloc_job(job); 8597 } else { 8598 mutex_exit(&port->fp_mutex); 8599 fcio->fcio_errno = FC_BADWWN; 8600 rval = ENXIO; 8601 } 8602 } else { 8603 mutex_enter(&pd->pd_mutex); 8604 state = pd->pd_state; 8605 mutex_exit(&pd->pd_mutex); 8606 8607 fctl_release_remote_port(pd); 8608 } 8609 8610 if (!rval) { 8611 if (ddi_copyout((void *)&state, 8612 (void *)fcio->fcio_obuf, sizeof (state), 8613 mode)) { 8614 rval = EFAULT; 8615 } 8616 } 8617 if (fp_fcio_copyout(fcio, data, mode)) { 8618 rval = EFAULT; 8619 } 8620 break; 8621 } 8622 8623 case FCIO_DEV_REMOVE: { 8624 la_wwn_t pwwn; 8625 fc_portmap_t *changelist; 8626 fc_remote_port_t *pd; 8627 8628 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8629 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8630 rval = EINVAL; 8631 break; 8632 } 8633 8634 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8635 rval = EFAULT; 8636 break; 8637 } 8638 8639 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8640 if (pd == NULL) { 8641 rval = ENXIO; 8642 fcio->fcio_errno = FC_BADWWN; 8643 if (fp_fcio_copyout(fcio, data, mode)) { 8644 rval = EFAULT; 8645 } 8646 break; 8647 } 8648 8649 mutex_enter(&pd->pd_mutex); 8650 if (pd->pd_ref_count > 1) { 8651 mutex_exit(&pd->pd_mutex); 8652 8653 rval = EBUSY; 8654 fcio->fcio_errno = FC_FAILURE; 8655 fctl_release_remote_port(pd); 8656 8657 if (fp_fcio_copyout(fcio, data, mode)) { 8658 rval = EFAULT; 8659 } 8660 break; 8661 } 8662 mutex_exit(&pd->pd_mutex); 8663 8664 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8665 8666 fctl_copy_portmap(changelist, pd); 8667 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8668 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8669 8670 fctl_release_remote_port(pd); 8671 break; 8672 } 8673 8674 case FCIO_GET_FCODE_REV: { 8675 caddr_t fcode_rev; 8676 fc_fca_pm_t pm; 8677 8678 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8679 fcio->fcio_xfer != FCIO_XFER_READ) { 8680 rval = EINVAL; 8681 break; 8682 } 8683 bzero((caddr_t)&pm, sizeof (pm)); 8684 8685 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8686 8687 pm.pm_cmd_flags = FC_FCA_PM_READ; 8688 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8689 pm.pm_data_len = fcio->fcio_olen; 8690 pm.pm_data_buf = fcode_rev; 8691 8692 ret = port->fp_fca_tran->fca_port_manage( 8693 port->fp_fca_handle, &pm); 8694 8695 if (ret == FC_SUCCESS) { 8696 if (ddi_copyout((void *)fcode_rev, 8697 (void *)fcio->fcio_obuf, 8698 fcio->fcio_olen, mode) == 0) { 8699 if (fp_fcio_copyout(fcio, data, mode)) { 8700 rval = EFAULT; 8701 } 8702 } else { 8703 rval = EFAULT; 8704 } 8705 } else { 8706 /* 8707 * check if buffer was not large enough to obtain 8708 * FCODE version. 8709 */ 8710 if (pm.pm_data_len > fcio->fcio_olen) { 8711 rval = ENOMEM; 8712 } else { 8713 rval = EIO; 8714 } 8715 fcio->fcio_errno = ret; 8716 if (fp_fcio_copyout(fcio, data, mode)) { 8717 rval = EFAULT; 8718 } 8719 } 8720 kmem_free(fcode_rev, fcio->fcio_olen); 8721 break; 8722 } 8723 8724 case FCIO_GET_FW_REV: { 8725 caddr_t fw_rev; 8726 fc_fca_pm_t pm; 8727 8728 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8729 fcio->fcio_xfer != FCIO_XFER_READ) { 8730 rval = EINVAL; 8731 break; 8732 } 8733 bzero((caddr_t)&pm, sizeof (pm)); 8734 8735 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8736 8737 pm.pm_cmd_flags = FC_FCA_PM_READ; 8738 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8739 pm.pm_data_len = fcio->fcio_olen; 8740 pm.pm_data_buf = fw_rev; 8741 8742 ret = port->fp_fca_tran->fca_port_manage( 8743 port->fp_fca_handle, &pm); 8744 8745 if (ret == FC_SUCCESS) { 8746 if (ddi_copyout((void *)fw_rev, 8747 (void *)fcio->fcio_obuf, 8748 fcio->fcio_olen, mode) == 0) { 8749 if (fp_fcio_copyout(fcio, data, mode)) { 8750 rval = EFAULT; 8751 } 8752 } else { 8753 rval = EFAULT; 8754 } 8755 } else { 8756 if (fp_fcio_copyout(fcio, data, mode)) { 8757 rval = EFAULT; 8758 } 8759 rval = EIO; 8760 } 8761 kmem_free(fw_rev, fcio->fcio_olen); 8762 break; 8763 } 8764 8765 case FCIO_GET_DUMP_SIZE: { 8766 uint32_t dump_size; 8767 fc_fca_pm_t pm; 8768 8769 if (fcio->fcio_olen != sizeof (dump_size) || 8770 fcio->fcio_xfer != FCIO_XFER_READ) { 8771 rval = EINVAL; 8772 break; 8773 } 8774 bzero((caddr_t)&pm, sizeof (pm)); 8775 pm.pm_cmd_flags = FC_FCA_PM_READ; 8776 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8777 pm.pm_data_len = sizeof (dump_size); 8778 pm.pm_data_buf = (caddr_t)&dump_size; 8779 8780 ret = port->fp_fca_tran->fca_port_manage( 8781 port->fp_fca_handle, &pm); 8782 8783 if (ret == FC_SUCCESS) { 8784 if (ddi_copyout((void *)&dump_size, 8785 (void *)fcio->fcio_obuf, sizeof (dump_size), 8786 mode) == 0) { 8787 if (fp_fcio_copyout(fcio, data, mode)) { 8788 rval = EFAULT; 8789 } 8790 } else { 8791 rval = EFAULT; 8792 } 8793 } else { 8794 fcio->fcio_errno = ret; 8795 rval = EIO; 8796 if (fp_fcio_copyout(fcio, data, mode)) { 8797 rval = EFAULT; 8798 } 8799 } 8800 break; 8801 } 8802 8803 case FCIO_DOWNLOAD_FW: { 8804 caddr_t firmware; 8805 fc_fca_pm_t pm; 8806 8807 if (fcio->fcio_ilen <= 0 || 8808 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8809 rval = EINVAL; 8810 break; 8811 } 8812 8813 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8814 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8815 fcio->fcio_ilen, mode)) { 8816 rval = EFAULT; 8817 kmem_free(firmware, fcio->fcio_ilen); 8818 break; 8819 } 8820 8821 bzero((caddr_t)&pm, sizeof (pm)); 8822 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8823 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8824 pm.pm_data_len = fcio->fcio_ilen; 8825 pm.pm_data_buf = firmware; 8826 8827 ret = port->fp_fca_tran->fca_port_manage( 8828 port->fp_fca_handle, &pm); 8829 8830 kmem_free(firmware, fcio->fcio_ilen); 8831 8832 if (ret != FC_SUCCESS) { 8833 fcio->fcio_errno = ret; 8834 rval = EIO; 8835 if (fp_fcio_copyout(fcio, data, mode)) { 8836 rval = EFAULT; 8837 } 8838 } 8839 break; 8840 } 8841 8842 case FCIO_DOWNLOAD_FCODE: { 8843 caddr_t fcode; 8844 fc_fca_pm_t pm; 8845 8846 if (fcio->fcio_ilen <= 0 || 8847 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8848 rval = EINVAL; 8849 break; 8850 } 8851 8852 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8853 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8854 fcio->fcio_ilen, mode)) { 8855 rval = EFAULT; 8856 kmem_free(fcode, fcio->fcio_ilen); 8857 break; 8858 } 8859 8860 bzero((caddr_t)&pm, sizeof (pm)); 8861 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8862 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8863 pm.pm_data_len = fcio->fcio_ilen; 8864 pm.pm_data_buf = fcode; 8865 8866 ret = port->fp_fca_tran->fca_port_manage( 8867 port->fp_fca_handle, &pm); 8868 8869 kmem_free(fcode, fcio->fcio_ilen); 8870 8871 if (ret != FC_SUCCESS) { 8872 fcio->fcio_errno = ret; 8873 rval = EIO; 8874 if (fp_fcio_copyout(fcio, data, mode)) { 8875 rval = EFAULT; 8876 } 8877 } 8878 break; 8879 } 8880 8881 case FCIO_FORCE_DUMP: 8882 ret = port->fp_fca_tran->fca_reset( 8883 port->fp_fca_handle, FC_FCA_CORE); 8884 8885 if (ret != FC_SUCCESS) { 8886 fcio->fcio_errno = ret; 8887 rval = EIO; 8888 if (fp_fcio_copyout(fcio, data, mode)) { 8889 rval = EFAULT; 8890 } 8891 } 8892 break; 8893 8894 case FCIO_GET_DUMP: { 8895 caddr_t dump; 8896 uint32_t dump_size; 8897 fc_fca_pm_t pm; 8898 8899 if (fcio->fcio_xfer != FCIO_XFER_READ) { 8900 rval = EINVAL; 8901 break; 8902 } 8903 bzero((caddr_t)&pm, sizeof (pm)); 8904 8905 pm.pm_cmd_flags = FC_FCA_PM_READ; 8906 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8907 pm.pm_data_len = sizeof (dump_size); 8908 pm.pm_data_buf = (caddr_t)&dump_size; 8909 8910 ret = port->fp_fca_tran->fca_port_manage( 8911 port->fp_fca_handle, &pm); 8912 8913 if (ret != FC_SUCCESS) { 8914 fcio->fcio_errno = ret; 8915 rval = EIO; 8916 if (fp_fcio_copyout(fcio, data, mode)) { 8917 rval = EFAULT; 8918 } 8919 break; 8920 } 8921 if (fcio->fcio_olen != dump_size) { 8922 fcio->fcio_errno = FC_NOMEM; 8923 rval = EINVAL; 8924 if (fp_fcio_copyout(fcio, data, mode)) { 8925 rval = EFAULT; 8926 } 8927 break; 8928 } 8929 8930 dump = kmem_zalloc(dump_size, KM_SLEEP); 8931 8932 bzero((caddr_t)&pm, sizeof (pm)); 8933 pm.pm_cmd_flags = FC_FCA_PM_READ; 8934 pm.pm_cmd_code = FC_PORT_GET_DUMP; 8935 pm.pm_data_len = dump_size; 8936 pm.pm_data_buf = dump; 8937 8938 ret = port->fp_fca_tran->fca_port_manage( 8939 port->fp_fca_handle, &pm); 8940 8941 if (ret == FC_SUCCESS) { 8942 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 8943 dump_size, mode) == 0) { 8944 if (fp_fcio_copyout(fcio, data, mode)) { 8945 rval = EFAULT; 8946 } 8947 } else { 8948 rval = EFAULT; 8949 } 8950 } else { 8951 fcio->fcio_errno = ret; 8952 rval = EIO; 8953 if (fp_fcio_copyout(fcio, data, mode)) { 8954 rval = EFAULT; 8955 } 8956 } 8957 kmem_free(dump, dump_size); 8958 break; 8959 } 8960 8961 case FCIO_GET_TOPOLOGY: { 8962 uint32_t user_topology; 8963 8964 if (fcio->fcio_xfer != FCIO_XFER_READ || 8965 fcio->fcio_olen != sizeof (user_topology)) { 8966 rval = EINVAL; 8967 break; 8968 } 8969 8970 mutex_enter(&port->fp_mutex); 8971 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 8972 user_topology = FC_TOP_UNKNOWN; 8973 } else { 8974 user_topology = port->fp_topology; 8975 } 8976 mutex_exit(&port->fp_mutex); 8977 8978 if (ddi_copyout((void *)&user_topology, 8979 (void *)fcio->fcio_obuf, sizeof (user_topology), 8980 mode)) { 8981 rval = EFAULT; 8982 } 8983 break; 8984 } 8985 8986 case FCIO_RESET_LINK: { 8987 la_wwn_t pwwn; 8988 8989 /* 8990 * Look at the output buffer field; if this field has zero 8991 * bytes then attempt to reset the local link/loop. If the 8992 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 8993 * and if yes, determine the LFA and reset the remote LIP 8994 * by LINIT ELS. 8995 */ 8996 8997 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 8998 fcio->fcio_ilen != sizeof (pwwn)) { 8999 rval = EINVAL; 9000 break; 9001 } 9002 9003 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9004 sizeof (pwwn), mode)) { 9005 rval = EFAULT; 9006 break; 9007 } 9008 9009 mutex_enter(&port->fp_mutex); 9010 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 9011 mutex_exit(&port->fp_mutex); 9012 break; 9013 } 9014 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 9015 mutex_exit(&port->fp_mutex); 9016 9017 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 9018 if (job == NULL) { 9019 rval = ENOMEM; 9020 break; 9021 } 9022 job->job_counter = 1; 9023 job->job_private = (void *)&pwwn; 9024 9025 fctl_enque_job(port, job); 9026 fctl_jobwait(job); 9027 9028 mutex_enter(&port->fp_mutex); 9029 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 9030 mutex_exit(&port->fp_mutex); 9031 9032 if (job->job_result != FC_SUCCESS) { 9033 fcio->fcio_errno = job->job_result; 9034 rval = EIO; 9035 if (fp_fcio_copyout(fcio, data, mode)) { 9036 rval = EFAULT; 9037 } 9038 } 9039 fctl_dealloc_job(job); 9040 break; 9041 } 9042 9043 case FCIO_RESET_HARD: 9044 ret = port->fp_fca_tran->fca_reset( 9045 port->fp_fca_handle, FC_FCA_RESET); 9046 if (ret != FC_SUCCESS) { 9047 fcio->fcio_errno = ret; 9048 rval = EIO; 9049 if (fp_fcio_copyout(fcio, data, mode)) { 9050 rval = EFAULT; 9051 } 9052 } 9053 break; 9054 9055 case FCIO_RESET_HARD_CORE: 9056 ret = port->fp_fca_tran->fca_reset( 9057 port->fp_fca_handle, FC_FCA_RESET_CORE); 9058 if (ret != FC_SUCCESS) { 9059 rval = EIO; 9060 fcio->fcio_errno = ret; 9061 if (fp_fcio_copyout(fcio, data, mode)) { 9062 rval = EFAULT; 9063 } 9064 } 9065 break; 9066 9067 case FCIO_DIAG: { 9068 fc_fca_pm_t pm; 9069 9070 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 9071 9072 /* Validate user buffer from ioctl call. */ 9073 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 9074 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 9075 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 9076 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 9077 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 9078 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 9079 rval = EFAULT; 9080 break; 9081 } 9082 9083 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 9084 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 9085 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 9086 fcio->fcio_ilen, mode)) { 9087 rval = EFAULT; 9088 goto fp_fcio_diag_cleanup; 9089 } 9090 } 9091 9092 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 9093 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 9094 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 9095 fcio->fcio_alen, mode)) { 9096 rval = EFAULT; 9097 goto fp_fcio_diag_cleanup; 9098 } 9099 } 9100 9101 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 9102 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 9103 } 9104 9105 pm.pm_cmd_code = FC_PORT_DIAG; 9106 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 9107 9108 ret = port->fp_fca_tran->fca_port_manage( 9109 port->fp_fca_handle, &pm); 9110 9111 if (ret != FC_SUCCESS) { 9112 if (ret == FC_INVALID_REQUEST) { 9113 rval = ENOTTY; 9114 } else { 9115 rval = EIO; 9116 } 9117 9118 fcio->fcio_errno = ret; 9119 if (fp_fcio_copyout(fcio, data, mode)) { 9120 rval = EFAULT; 9121 } 9122 goto fp_fcio_diag_cleanup; 9123 } 9124 9125 /* 9126 * pm_stat_len will contain the number of status bytes 9127 * an FCA driver requires to return the complete status 9128 * of the requested diag operation. If the user buffer 9129 * is not large enough to hold the entire status, We 9130 * copy only the portion of data the fits in the buffer and 9131 * return a ENOMEM to the user application. 9132 */ 9133 if (pm.pm_stat_len > fcio->fcio_olen) { 9134 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9135 "fp:FCIO_DIAG:status buffer too small\n"); 9136 9137 rval = ENOMEM; 9138 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9139 fcio->fcio_olen, mode)) { 9140 rval = EFAULT; 9141 goto fp_fcio_diag_cleanup; 9142 } 9143 } else { 9144 /* 9145 * Copy only data pm_stat_len bytes of data 9146 */ 9147 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9148 pm.pm_stat_len, mode)) { 9149 rval = EFAULT; 9150 goto fp_fcio_diag_cleanup; 9151 } 9152 } 9153 9154 if (fp_fcio_copyout(fcio, data, mode)) { 9155 rval = EFAULT; 9156 } 9157 9158 fp_fcio_diag_cleanup: 9159 if (pm.pm_cmd_buf != NULL) { 9160 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9161 } 9162 if (pm.pm_data_buf != NULL) { 9163 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9164 } 9165 if (pm.pm_stat_buf != NULL) { 9166 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9167 } 9168 9169 break; 9170 } 9171 9172 case FCIO_GET_NODE_ID: { 9173 /* validate parameters */ 9174 if (fcio->fcio_xfer != FCIO_XFER_READ || 9175 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9176 rval = EINVAL; 9177 break; 9178 } 9179 9180 rval = fp_get_rnid(port, data, mode, fcio); 9181 9182 /* ioctl handling is over */ 9183 break; 9184 } 9185 9186 case FCIO_SEND_NODE_ID: { 9187 la_wwn_t pwwn; 9188 9189 /* validate parameters */ 9190 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9191 fcio->fcio_xfer != FCIO_XFER_READ) { 9192 rval = EINVAL; 9193 break; 9194 } 9195 9196 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9197 sizeof (la_wwn_t), mode)) { 9198 rval = EFAULT; 9199 break; 9200 } 9201 9202 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9203 9204 /* ioctl handling is over */ 9205 break; 9206 } 9207 9208 case FCIO_SET_NODE_ID: { 9209 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9210 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9211 rval = EINVAL; 9212 break; 9213 } 9214 9215 rval = fp_set_rnid(port, data, mode, fcio); 9216 break; 9217 } 9218 9219 case FCIO_LINK_STATUS: { 9220 fc_portid_t rls_req; 9221 fc_rls_acc_t *rls_acc; 9222 fc_fca_pm_t pm; 9223 uint32_t dest, src_id; 9224 fp_cmd_t *cmd; 9225 fc_remote_port_t *pd; 9226 uchar_t pd_flags; 9227 9228 /* validate parameters */ 9229 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9230 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9231 fcio->fcio_xfer != FCIO_XFER_RW) { 9232 rval = EINVAL; 9233 break; 9234 } 9235 9236 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9237 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9238 rval = EINVAL; 9239 break; 9240 } 9241 9242 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9243 sizeof (fc_portid_t), mode)) { 9244 rval = EFAULT; 9245 break; 9246 } 9247 9248 9249 /* Determine the destination of the RLS frame */ 9250 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9251 dest = FS_FABRIC_F_PORT; 9252 } else { 9253 dest = rls_req.port_id; 9254 } 9255 9256 mutex_enter(&port->fp_mutex); 9257 src_id = port->fp_port_id.port_id; 9258 mutex_exit(&port->fp_mutex); 9259 9260 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9261 if (dest == 0 || dest == src_id) { 9262 9263 /* Allocate memory for link error status block */ 9264 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9265 ASSERT(rls_acc != NULL); 9266 9267 /* Prepare the port management structure */ 9268 bzero((caddr_t)&pm, sizeof (pm)); 9269 9270 pm.pm_cmd_flags = FC_FCA_PM_READ; 9271 pm.pm_cmd_code = FC_PORT_RLS; 9272 pm.pm_data_len = sizeof (*rls_acc); 9273 pm.pm_data_buf = (caddr_t)rls_acc; 9274 9275 /* Get the adapter's link error status block */ 9276 ret = port->fp_fca_tran->fca_port_manage( 9277 port->fp_fca_handle, &pm); 9278 9279 if (ret == FC_SUCCESS) { 9280 /* xfer link status block to userland */ 9281 if (ddi_copyout((void *)rls_acc, 9282 (void *)fcio->fcio_obuf, 9283 sizeof (*rls_acc), mode) == 0) { 9284 if (fp_fcio_copyout(fcio, data, 9285 mode)) { 9286 rval = EFAULT; 9287 } 9288 } else { 9289 rval = EFAULT; 9290 } 9291 } else { 9292 rval = EIO; 9293 fcio->fcio_errno = ret; 9294 if (fp_fcio_copyout(fcio, data, mode)) { 9295 rval = EFAULT; 9296 } 9297 } 9298 9299 kmem_free(rls_acc, sizeof (*rls_acc)); 9300 9301 /* ioctl handling is over */ 9302 break; 9303 } 9304 9305 /* 9306 * Send RLS to the destination port. 9307 * Having RLS frame destination is as FPORT is not yet 9308 * supported and will be implemented in future, if needed. 9309 * Following call to get "pd" will fail if dest is FPORT 9310 */ 9311 pd = fctl_hold_remote_port_by_did(port, dest); 9312 if (pd == NULL) { 9313 fcio->fcio_errno = FC_BADOBJECT; 9314 rval = ENXIO; 9315 if (fp_fcio_copyout(fcio, data, mode)) { 9316 rval = EFAULT; 9317 } 9318 break; 9319 } 9320 9321 mutex_enter(&pd->pd_mutex); 9322 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9323 mutex_exit(&pd->pd_mutex); 9324 fctl_release_remote_port(pd); 9325 9326 fcio->fcio_errno = FC_LOGINREQ; 9327 rval = EINVAL; 9328 if (fp_fcio_copyout(fcio, data, mode)) { 9329 rval = EFAULT; 9330 } 9331 break; 9332 } 9333 ASSERT(pd->pd_login_count >= 1); 9334 mutex_exit(&pd->pd_mutex); 9335 9336 /* 9337 * Allocate job structure and set job_code as DUMMY, 9338 * because we will not go through the job thread. 9339 * Instead fp_sendcmd() is called directly here. 9340 */ 9341 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9342 NULL, NULL, KM_SLEEP); 9343 ASSERT(job != NULL); 9344 9345 job->job_counter = 1; 9346 9347 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9348 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9349 if (cmd == NULL) { 9350 fcio->fcio_errno = FC_NOMEM; 9351 rval = ENOMEM; 9352 9353 fctl_release_remote_port(pd); 9354 9355 fctl_dealloc_job(job); 9356 if (fp_fcio_copyout(fcio, data, mode)) { 9357 rval = EFAULT; 9358 } 9359 break; 9360 } 9361 9362 /* Allocate memory for link error status block */ 9363 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9364 9365 mutex_enter(&port->fp_mutex); 9366 mutex_enter(&pd->pd_mutex); 9367 9368 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9369 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9370 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9371 cmd->cmd_retry_count = 1; 9372 cmd->cmd_ulp_pkt = NULL; 9373 9374 fp_rls_init(cmd, job); 9375 9376 job->job_private = (void *)rls_acc; 9377 9378 pd_flags = pd->pd_flags; 9379 pd->pd_flags = PD_ELS_IN_PROGRESS; 9380 9381 mutex_exit(&pd->pd_mutex); 9382 mutex_exit(&port->fp_mutex); 9383 9384 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9385 fctl_jobwait(job); 9386 9387 fcio->fcio_errno = job->job_result; 9388 if (job->job_result == FC_SUCCESS) { 9389 ASSERT(pd != NULL); 9390 /* 9391 * link error status block is now available. 9392 * Copy it to userland 9393 */ 9394 ASSERT(job->job_private == (void *)rls_acc); 9395 if (ddi_copyout((void *)rls_acc, 9396 (void *)fcio->fcio_obuf, 9397 sizeof (*rls_acc), mode) == 0) { 9398 if (fp_fcio_copyout(fcio, data, 9399 mode)) { 9400 rval = EFAULT; 9401 } 9402 } else { 9403 rval = EFAULT; 9404 } 9405 } else { 9406 rval = EIO; 9407 } 9408 } else { 9409 rval = EIO; 9410 fp_free_pkt(cmd); 9411 } 9412 9413 if (rval) { 9414 mutex_enter(&port->fp_mutex); 9415 mutex_enter(&pd->pd_mutex); 9416 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9417 pd->pd_flags = pd_flags; 9418 } 9419 mutex_exit(&pd->pd_mutex); 9420 mutex_exit(&port->fp_mutex); 9421 } 9422 9423 fctl_release_remote_port(pd); 9424 fctl_dealloc_job(job); 9425 kmem_free(rls_acc, sizeof (*rls_acc)); 9426 9427 if (fp_fcio_copyout(fcio, data, mode)) { 9428 rval = EFAULT; 9429 } 9430 break; 9431 } 9432 9433 case FCIO_NS: { 9434 fc_ns_cmd_t *ns_req; 9435 fc_ns_cmd32_t *ns_req32; 9436 fctl_ns_req_t *ns_cmd; 9437 9438 if (use32 == B_TRUE) { 9439 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9440 rval = EINVAL; 9441 break; 9442 } 9443 9444 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9445 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9446 9447 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9448 sizeof (*ns_req32), mode)) { 9449 rval = EFAULT; 9450 kmem_free(ns_req, sizeof (*ns_req)); 9451 kmem_free(ns_req32, sizeof (*ns_req32)); 9452 break; 9453 } 9454 9455 ns_req->ns_flags = ns_req32->ns_flags; 9456 ns_req->ns_cmd = ns_req32->ns_cmd; 9457 ns_req->ns_req_len = ns_req32->ns_req_len; 9458 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9459 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9460 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9461 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9462 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9463 9464 kmem_free(ns_req32, sizeof (*ns_req32)); 9465 } else { 9466 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9467 rval = EINVAL; 9468 break; 9469 } 9470 9471 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9472 9473 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9474 sizeof (fc_ns_cmd_t), mode)) { 9475 rval = EFAULT; 9476 kmem_free(ns_req, sizeof (*ns_req)); 9477 break; 9478 } 9479 } 9480 9481 if (ns_req->ns_req_len <= 0) { 9482 rval = EINVAL; 9483 kmem_free(ns_req, sizeof (*ns_req)); 9484 break; 9485 } 9486 9487 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9488 ASSERT(job != NULL); 9489 9490 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9491 ns_req->ns_resp_len, ns_req->ns_resp_len, 9492 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9493 ASSERT(ns_cmd != NULL); 9494 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9495 9496 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9497 ns_cmd->ns_gan_max = 1; 9498 ns_cmd->ns_gan_index = 0; 9499 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9500 } 9501 9502 if (ddi_copyin(ns_req->ns_req_payload, 9503 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9504 rval = EFAULT; 9505 fctl_free_ns_cmd(ns_cmd); 9506 fctl_dealloc_job(job); 9507 kmem_free(ns_req, sizeof (*ns_req)); 9508 break; 9509 } 9510 9511 job->job_private = (void *)ns_cmd; 9512 fctl_enque_job(port, job); 9513 fctl_jobwait(job); 9514 rval = job->job_result; 9515 9516 if (rval == FC_SUCCESS) { 9517 if (ns_req->ns_resp_len) { 9518 if (ddi_copyout(ns_cmd->ns_data_buf, 9519 ns_req->ns_resp_payload, 9520 ns_cmd->ns_data_len, mode)) { 9521 rval = EFAULT; 9522 fctl_free_ns_cmd(ns_cmd); 9523 fctl_dealloc_job(job); 9524 kmem_free(ns_req, sizeof (*ns_req)); 9525 break; 9526 } 9527 } 9528 } else { 9529 rval = EIO; 9530 } 9531 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9532 fctl_free_ns_cmd(ns_cmd); 9533 fctl_dealloc_job(job); 9534 kmem_free(ns_req, sizeof (*ns_req)); 9535 9536 if (fp_fcio_copyout(fcio, data, mode)) { 9537 rval = EFAULT; 9538 } 9539 break; 9540 } 9541 9542 default: 9543 rval = ENOTTY; 9544 break; 9545 } 9546 9547 /* 9548 * If set, reset the EXCL busy bit to 9549 * receive other exclusive access commands 9550 */ 9551 mutex_enter(&port->fp_mutex); 9552 if (port->fp_flag & FP_EXCL_BUSY) { 9553 port->fp_flag &= ~FP_EXCL_BUSY; 9554 } 9555 mutex_exit(&port->fp_mutex); 9556 9557 return (rval); 9558 } 9559 9560 9561 /* 9562 * This function assumes that the response length 9563 * is same regardless of data model (LP32 or LP64) 9564 * which is true for all the ioctls currently 9565 * supported. 9566 */ 9567 static int 9568 fp_copyout(void *from, void *to, size_t len, int mode) 9569 { 9570 return (ddi_copyout(from, to, len, mode)); 9571 } 9572 9573 /* 9574 * This function does the set rnid 9575 */ 9576 static int 9577 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9578 { 9579 int rval = 0; 9580 fc_rnid_t *rnid; 9581 fc_fca_pm_t pm; 9582 9583 /* Allocate memory for node id block */ 9584 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9585 9586 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9587 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9588 kmem_free(rnid, sizeof (fc_rnid_t)); 9589 return (EFAULT); 9590 } 9591 9592 /* Prepare the port management structure */ 9593 bzero((caddr_t)&pm, sizeof (pm)); 9594 9595 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9596 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9597 pm.pm_data_len = sizeof (*rnid); 9598 pm.pm_data_buf = (caddr_t)rnid; 9599 9600 /* Get the adapter's node data */ 9601 rval = port->fp_fca_tran->fca_port_manage( 9602 port->fp_fca_handle, &pm); 9603 9604 if (rval != FC_SUCCESS) { 9605 fcio->fcio_errno = rval; 9606 rval = EIO; 9607 if (fp_fcio_copyout(fcio, data, mode)) { 9608 rval = EFAULT; 9609 } 9610 } else { 9611 mutex_enter(&port->fp_mutex); 9612 /* copy to the port structure */ 9613 bcopy(rnid, &port->fp_rnid_params, 9614 sizeof (port->fp_rnid_params)); 9615 mutex_exit(&port->fp_mutex); 9616 } 9617 9618 kmem_free(rnid, sizeof (fc_rnid_t)); 9619 9620 if (rval != FC_SUCCESS) { 9621 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9622 } 9623 9624 return (rval); 9625 } 9626 9627 /* 9628 * This function does the local pwwn get rnid 9629 */ 9630 static int 9631 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9632 { 9633 fc_rnid_t *rnid; 9634 fc_fca_pm_t pm; 9635 int rval = 0; 9636 uint32_t ret; 9637 9638 /* Allocate memory for rnid data block */ 9639 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9640 9641 mutex_enter(&port->fp_mutex); 9642 if (port->fp_rnid_init == 1) { 9643 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9644 mutex_exit(&port->fp_mutex); 9645 /* xfer node info to userland */ 9646 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9647 sizeof (*rnid), mode) == 0) { 9648 if (fp_fcio_copyout(fcio, data, mode)) { 9649 rval = EFAULT; 9650 } 9651 } else { 9652 rval = EFAULT; 9653 } 9654 9655 kmem_free(rnid, sizeof (fc_rnid_t)); 9656 9657 if (rval != FC_SUCCESS) { 9658 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9659 rval); 9660 } 9661 9662 return (rval); 9663 } 9664 mutex_exit(&port->fp_mutex); 9665 9666 /* Prepare the port management structure */ 9667 bzero((caddr_t)&pm, sizeof (pm)); 9668 9669 pm.pm_cmd_flags = FC_FCA_PM_READ; 9670 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9671 pm.pm_data_len = sizeof (fc_rnid_t); 9672 pm.pm_data_buf = (caddr_t)rnid; 9673 9674 /* Get the adapter's node data */ 9675 ret = port->fp_fca_tran->fca_port_manage( 9676 port->fp_fca_handle, 9677 &pm); 9678 9679 if (ret == FC_SUCCESS) { 9680 /* initialize in the port_info */ 9681 mutex_enter(&port->fp_mutex); 9682 port->fp_rnid_init = 1; 9683 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9684 mutex_exit(&port->fp_mutex); 9685 9686 /* xfer node info to userland */ 9687 if (ddi_copyout((void *)rnid, 9688 (void *)fcio->fcio_obuf, 9689 sizeof (*rnid), mode) == 0) { 9690 if (fp_fcio_copyout(fcio, data, 9691 mode)) { 9692 rval = EFAULT; 9693 } 9694 } else { 9695 rval = EFAULT; 9696 } 9697 } else { 9698 rval = EIO; 9699 fcio->fcio_errno = ret; 9700 if (fp_fcio_copyout(fcio, data, mode)) { 9701 rval = EFAULT; 9702 } 9703 } 9704 9705 kmem_free(rnid, sizeof (fc_rnid_t)); 9706 9707 if (rval != FC_SUCCESS) { 9708 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9709 } 9710 9711 return (rval); 9712 } 9713 9714 static int 9715 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9716 la_wwn_t *pwwn) 9717 { 9718 int rval = 0; 9719 fc_remote_port_t *pd; 9720 fp_cmd_t *cmd; 9721 job_request_t *job; 9722 la_els_rnid_acc_t *rnid_acc; 9723 9724 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9725 if (pd == NULL) { 9726 /* 9727 * We can safely assume that the destination port 9728 * is logged in. Either the user land will explicitly 9729 * login before issuing RNID ioctl or the device would 9730 * have been configured, meaning already logged in. 9731 */ 9732 9733 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9734 9735 return (ENXIO); 9736 } 9737 /* 9738 * Allocate job structure and set job_code as DUMMY, 9739 * because we will not go thorugh the job thread. 9740 * Instead fp_sendcmd() is called directly here. 9741 */ 9742 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9743 NULL, NULL, KM_SLEEP); 9744 9745 ASSERT(job != NULL); 9746 9747 job->job_counter = 1; 9748 9749 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9750 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9751 if (cmd == NULL) { 9752 fcio->fcio_errno = FC_NOMEM; 9753 rval = ENOMEM; 9754 9755 fctl_dealloc_job(job); 9756 if (fp_fcio_copyout(fcio, data, mode)) { 9757 rval = EFAULT; 9758 } 9759 9760 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9761 9762 return (rval); 9763 } 9764 9765 /* Allocate memory for node id accept block */ 9766 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9767 9768 mutex_enter(&port->fp_mutex); 9769 mutex_enter(&pd->pd_mutex); 9770 9771 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9772 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9773 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9774 cmd->cmd_retry_count = 1; 9775 cmd->cmd_ulp_pkt = NULL; 9776 9777 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9778 9779 job->job_private = (void *)rnid_acc; 9780 9781 pd->pd_flags = PD_ELS_IN_PROGRESS; 9782 9783 mutex_exit(&pd->pd_mutex); 9784 mutex_exit(&port->fp_mutex); 9785 9786 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9787 fctl_jobwait(job); 9788 fcio->fcio_errno = job->job_result; 9789 if (job->job_result == FC_SUCCESS) { 9790 int rnid_cnt; 9791 ASSERT(pd != NULL); 9792 /* 9793 * node id block is now available. 9794 * Copy it to userland 9795 */ 9796 ASSERT(job->job_private == (void *)rnid_acc); 9797 9798 /* get the response length */ 9799 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9800 rnid_acc->hdr.cmn_len + 9801 rnid_acc->hdr.specific_len; 9802 9803 if (fcio->fcio_olen < rnid_cnt) { 9804 rval = EINVAL; 9805 } else if (ddi_copyout((void *)rnid_acc, 9806 (void *)fcio->fcio_obuf, 9807 rnid_cnt, mode) == 0) { 9808 if (fp_fcio_copyout(fcio, data, 9809 mode)) { 9810 rval = EFAULT; 9811 } 9812 } else { 9813 rval = EFAULT; 9814 } 9815 } else { 9816 rval = EIO; 9817 } 9818 } else { 9819 rval = EIO; 9820 if (pd) { 9821 mutex_enter(&pd->pd_mutex); 9822 pd->pd_flags = PD_IDLE; 9823 mutex_exit(&pd->pd_mutex); 9824 } 9825 fp_free_pkt(cmd); 9826 } 9827 9828 fctl_dealloc_job(job); 9829 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9830 9831 if (fp_fcio_copyout(fcio, data, mode)) { 9832 rval = EFAULT; 9833 } 9834 9835 if (rval != FC_SUCCESS) { 9836 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9837 } 9838 9839 return (rval); 9840 } 9841 9842 /* 9843 * Copy out to userland 9844 */ 9845 static int 9846 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9847 { 9848 int rval; 9849 9850 #ifdef _MULTI_DATAMODEL 9851 switch (ddi_model_convert_from(mode & FMODELS)) { 9852 case DDI_MODEL_ILP32: { 9853 struct fcio32 fcio32; 9854 9855 fcio32.fcio_xfer = fcio->fcio_xfer; 9856 fcio32.fcio_cmd = fcio->fcio_cmd; 9857 fcio32.fcio_flags = fcio->fcio_flags; 9858 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9859 fcio32.fcio_ilen = fcio->fcio_ilen; 9860 fcio32.fcio_ibuf = 9861 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9862 fcio32.fcio_olen = fcio->fcio_olen; 9863 fcio32.fcio_obuf = 9864 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9865 fcio32.fcio_alen = fcio->fcio_alen; 9866 fcio32.fcio_abuf = 9867 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9868 fcio32.fcio_errno = fcio->fcio_errno; 9869 9870 rval = ddi_copyout((void *)&fcio32, (void *)data, 9871 sizeof (struct fcio32), mode); 9872 break; 9873 } 9874 case DDI_MODEL_NONE: 9875 rval = ddi_copyout((void *)fcio, (void *)data, 9876 sizeof (fcio_t), mode); 9877 break; 9878 } 9879 #else 9880 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 9881 #endif 9882 9883 return (rval); 9884 } 9885 9886 9887 static void 9888 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 9889 { 9890 uint32_t listlen; 9891 fc_portmap_t *changelist; 9892 9893 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9894 ASSERT(port->fp_topology == FC_TOP_PT_PT); 9895 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 9896 9897 listlen = 0; 9898 changelist = NULL; 9899 9900 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9901 if (port->fp_statec_busy > 1) { 9902 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 9903 } 9904 } 9905 mutex_exit(&port->fp_mutex); 9906 9907 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9908 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 9909 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 9910 listlen, listlen, KM_SLEEP); 9911 9912 mutex_enter(&port->fp_mutex); 9913 } else { 9914 ASSERT(changelist == NULL && listlen == 0); 9915 mutex_enter(&port->fp_mutex); 9916 if (--port->fp_statec_busy == 0) { 9917 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 9918 } 9919 } 9920 } 9921 9922 static int 9923 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 9924 { 9925 int rval; 9926 int count; 9927 int index; 9928 int num_devices; 9929 fc_remote_node_t *node; 9930 fc_port_dev_t *devlist; 9931 struct pwwn_hash *head; 9932 fc_remote_port_t *pd; 9933 9934 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9935 9936 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 9937 9938 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 9939 9940 for (count = index = 0; index < pwwn_table_size; index++) { 9941 head = &port->fp_pwwn_table[index]; 9942 pd = head->pwwn_head; 9943 while (pd != NULL) { 9944 mutex_enter(&pd->pd_mutex); 9945 if (pd->pd_state == PORT_DEVICE_INVALID) { 9946 mutex_exit(&pd->pd_mutex); 9947 pd = pd->pd_wwn_hnext; 9948 continue; 9949 } 9950 9951 devlist[count].dev_state = pd->pd_state; 9952 devlist[count].dev_hard_addr = pd->pd_hard_addr; 9953 devlist[count].dev_did = pd->pd_port_id; 9954 devlist[count].dev_did.priv_lilp_posit = 9955 (uint8_t)(index & 0xff); 9956 bcopy((caddr_t)pd->pd_fc4types, 9957 (caddr_t)devlist[count].dev_type, 9958 sizeof (pd->pd_fc4types)); 9959 9960 bcopy((caddr_t)&pd->pd_port_name, 9961 (caddr_t)&devlist[count].dev_pwwn, 9962 sizeof (la_wwn_t)); 9963 9964 node = pd->pd_remote_nodep; 9965 mutex_exit(&pd->pd_mutex); 9966 9967 if (node) { 9968 mutex_enter(&node->fd_mutex); 9969 bcopy((caddr_t)&node->fd_node_name, 9970 (caddr_t)&devlist[count].dev_nwwn, 9971 sizeof (la_wwn_t)); 9972 mutex_exit(&node->fd_mutex); 9973 } 9974 count++; 9975 if (count >= num_devices) { 9976 goto found; 9977 } 9978 } 9979 } 9980 found: 9981 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 9982 sizeof (count), mode)) { 9983 rval = FC_FAILURE; 9984 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 9985 sizeof (fc_port_dev_t) * num_devices, mode)) { 9986 rval = FC_FAILURE; 9987 } else { 9988 rval = FC_SUCCESS; 9989 } 9990 9991 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 9992 9993 return (rval); 9994 } 9995 9996 9997 /* 9998 * Handle Fabric ONLINE 9999 */ 10000 static void 10001 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 10002 { 10003 int index; 10004 int rval; 10005 int dbg_count; 10006 int count = 0; 10007 char ww_name[17]; 10008 uint32_t d_id; 10009 uint32_t listlen; 10010 fctl_ns_req_t *ns_cmd; 10011 struct pwwn_hash *head; 10012 fc_remote_port_t *pd; 10013 fc_remote_port_t *npd; 10014 fc_portmap_t *changelist; 10015 10016 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10017 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 10018 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10019 10020 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 10021 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 10022 0, KM_SLEEP); 10023 10024 ASSERT(ns_cmd != NULL); 10025 10026 ns_cmd->ns_cmd_code = NS_GID_PN; 10027 10028 /* 10029 * Check if orphans are showing up now 10030 */ 10031 if (port->fp_orphan_count) { 10032 fc_orphan_t *orp; 10033 fc_orphan_t *norp = NULL; 10034 fc_orphan_t *prev = NULL; 10035 10036 for (orp = port->fp_orphan_list; orp; orp = norp) { 10037 norp = orp->orp_next; 10038 mutex_exit(&port->fp_mutex); 10039 orp->orp_nscan++; 10040 10041 job->job_counter = 1; 10042 job->job_result = FC_SUCCESS; 10043 10044 ((ns_req_gid_pn_t *) 10045 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 10046 ((ns_resp_gid_pn_t *) 10047 ns_cmd->ns_data_buf)->pid.port_id = 0; 10048 ((ns_resp_gid_pn_t *) 10049 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 10050 10051 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10052 if (rval == FC_SUCCESS) { 10053 d_id = 10054 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10055 pd = fp_create_remote_port_by_ns(port, 10056 d_id, KM_SLEEP); 10057 10058 if (pd != NULL) { 10059 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10060 10061 fp_printf(port, CE_WARN, FP_LOG_ONLY, 10062 0, NULL, "N_x Port with D_ID=%x," 10063 " PWWN=%s reappeared in fabric", 10064 d_id, ww_name); 10065 10066 mutex_enter(&port->fp_mutex); 10067 if (prev) { 10068 prev->orp_next = orp->orp_next; 10069 } else { 10070 ASSERT(orp == 10071 port->fp_orphan_list); 10072 port->fp_orphan_list = 10073 orp->orp_next; 10074 } 10075 port->fp_orphan_count--; 10076 mutex_exit(&port->fp_mutex); 10077 kmem_free(orp, sizeof (*orp)); 10078 count++; 10079 10080 mutex_enter(&pd->pd_mutex); 10081 pd->pd_flags = PD_ELS_MARK; 10082 10083 mutex_exit(&pd->pd_mutex); 10084 } else { 10085 prev = orp; 10086 } 10087 } else { 10088 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 10089 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10090 10091 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 10092 NULL, 10093 " Port WWN %s removed from orphan" 10094 " list after %d scans", ww_name, 10095 orp->orp_nscan); 10096 10097 mutex_enter(&port->fp_mutex); 10098 if (prev) { 10099 prev->orp_next = orp->orp_next; 10100 } else { 10101 ASSERT(orp == 10102 port->fp_orphan_list); 10103 port->fp_orphan_list = 10104 orp->orp_next; 10105 } 10106 port->fp_orphan_count--; 10107 mutex_exit(&port->fp_mutex); 10108 10109 kmem_free(orp, sizeof (*orp)); 10110 } else { 10111 prev = orp; 10112 } 10113 } 10114 mutex_enter(&port->fp_mutex); 10115 } 10116 } 10117 10118 /* 10119 * Walk the Port WWN hash table, reestablish LOGIN 10120 * if a LOGIN is already performed on a particular 10121 * device; Any failure to LOGIN should mark the 10122 * port device OLD. 10123 */ 10124 for (index = 0; index < pwwn_table_size; index++) { 10125 head = &port->fp_pwwn_table[index]; 10126 npd = head->pwwn_head; 10127 10128 while ((pd = npd) != NULL) { 10129 la_wwn_t *pwwn; 10130 10131 npd = pd->pd_wwn_hnext; 10132 10133 /* 10134 * Don't count in the port devices that are new 10135 * unless the total number of devices visible 10136 * through this port is less than FP_MAX_DEVICES 10137 */ 10138 mutex_enter(&pd->pd_mutex); 10139 if (port->fp_dev_count >= FP_MAX_DEVICES || 10140 (port->fp_options & FP_TARGET_MODE)) { 10141 if (pd->pd_type == PORT_DEVICE_NEW || 10142 pd->pd_flags == PD_ELS_MARK || 10143 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10144 mutex_exit(&pd->pd_mutex); 10145 continue; 10146 } 10147 } else { 10148 if (pd->pd_flags == PD_ELS_MARK || 10149 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10150 mutex_exit(&pd->pd_mutex); 10151 continue; 10152 } 10153 pd->pd_type = PORT_DEVICE_OLD; 10154 } 10155 count++; 10156 10157 /* 10158 * Consult with the name server about D_ID changes 10159 */ 10160 job->job_counter = 1; 10161 job->job_result = FC_SUCCESS; 10162 10163 ((ns_req_gid_pn_t *) 10164 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10165 ((ns_resp_gid_pn_t *) 10166 ns_cmd->ns_data_buf)->pid.port_id = 0; 10167 10168 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10169 pid.priv_lilp_posit = 0; 10170 10171 pwwn = &pd->pd_port_name; 10172 pd->pd_flags = PD_ELS_MARK; 10173 10174 mutex_exit(&pd->pd_mutex); 10175 mutex_exit(&port->fp_mutex); 10176 10177 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10178 if (rval != FC_SUCCESS) { 10179 fc_wwn_to_str(pwwn, ww_name); 10180 10181 mutex_enter(&pd->pd_mutex); 10182 d_id = pd->pd_port_id.port_id; 10183 pd->pd_type = PORT_DEVICE_DELETE; 10184 mutex_exit(&pd->pd_mutex); 10185 10186 FP_TRACE(FP_NHEAD1(3, 0), 10187 "fp_fabric_online: PD " 10188 "disappeared; d_id=%x, PWWN=%s", 10189 d_id, ww_name); 10190 10191 FP_TRACE(FP_NHEAD2(9, 0), 10192 "N_x Port with D_ID=%x, PWWN=%s" 10193 " disappeared from fabric", d_id, 10194 ww_name); 10195 10196 mutex_enter(&port->fp_mutex); 10197 continue; 10198 } 10199 10200 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10201 10202 mutex_enter(&port->fp_mutex); 10203 mutex_enter(&pd->pd_mutex); 10204 if (d_id != pd->pd_port_id.port_id) { 10205 fctl_delist_did_table(port, pd); 10206 fc_wwn_to_str(pwwn, ww_name); 10207 10208 FP_TRACE(FP_NHEAD2(9, 0), 10209 "D_ID of a device with PWWN %s changed." 10210 " New D_ID = %x, OLD D_ID = %x", ww_name, 10211 d_id, pd->pd_port_id.port_id); 10212 10213 pd->pd_port_id.port_id = BE_32(d_id); 10214 pd->pd_type = PORT_DEVICE_CHANGED; 10215 fctl_enlist_did_table(port, pd); 10216 } 10217 mutex_exit(&pd->pd_mutex); 10218 10219 } 10220 } 10221 10222 if (ns_cmd) { 10223 fctl_free_ns_cmd(ns_cmd); 10224 } 10225 10226 listlen = 0; 10227 changelist = NULL; 10228 if (count) { 10229 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10230 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10231 mutex_exit(&port->fp_mutex); 10232 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10233 mutex_enter(&port->fp_mutex); 10234 } 10235 10236 dbg_count = 0; 10237 10238 job->job_counter = count; 10239 10240 for (index = 0; index < pwwn_table_size; index++) { 10241 head = &port->fp_pwwn_table[index]; 10242 npd = head->pwwn_head; 10243 10244 while ((pd = npd) != NULL) { 10245 npd = pd->pd_wwn_hnext; 10246 10247 mutex_enter(&pd->pd_mutex); 10248 if (pd->pd_flags != PD_ELS_MARK) { 10249 mutex_exit(&pd->pd_mutex); 10250 continue; 10251 } 10252 10253 dbg_count++; 10254 10255 /* 10256 * If it is already marked deletion, nothing 10257 * else to do. 10258 */ 10259 if (pd->pd_type == PORT_DEVICE_DELETE) { 10260 pd->pd_type = PORT_DEVICE_OLD; 10261 10262 mutex_exit(&pd->pd_mutex); 10263 mutex_exit(&port->fp_mutex); 10264 fp_jobdone(job); 10265 mutex_enter(&port->fp_mutex); 10266 10267 continue; 10268 } 10269 10270 /* 10271 * If it is freshly discovered out of 10272 * the orphan list, nothing else to do 10273 */ 10274 if (pd->pd_type == PORT_DEVICE_NEW) { 10275 pd->pd_flags = PD_IDLE; 10276 10277 mutex_exit(&pd->pd_mutex); 10278 mutex_exit(&port->fp_mutex); 10279 fp_jobdone(job); 10280 mutex_enter(&port->fp_mutex); 10281 10282 continue; 10283 } 10284 10285 pd->pd_flags = PD_IDLE; 10286 d_id = pd->pd_port_id.port_id; 10287 10288 /* 10289 * Explicitly mark all devices OLD; successful 10290 * PLOGI should reset this to either NO_CHANGE 10291 * or CHANGED. 10292 */ 10293 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10294 pd->pd_type = PORT_DEVICE_OLD; 10295 } 10296 10297 mutex_exit(&pd->pd_mutex); 10298 mutex_exit(&port->fp_mutex); 10299 10300 rval = fp_port_login(port, d_id, job, 10301 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10302 10303 if (rval != FC_SUCCESS) { 10304 fp_jobdone(job); 10305 } 10306 mutex_enter(&port->fp_mutex); 10307 } 10308 } 10309 mutex_exit(&port->fp_mutex); 10310 10311 ASSERT(dbg_count == count); 10312 fp_jobwait(job); 10313 10314 mutex_enter(&port->fp_mutex); 10315 10316 ASSERT(port->fp_statec_busy > 0); 10317 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10318 if (port->fp_statec_busy > 1) { 10319 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10320 } 10321 } 10322 mutex_exit(&port->fp_mutex); 10323 } else { 10324 ASSERT(port->fp_statec_busy > 0); 10325 if (port->fp_statec_busy > 1) { 10326 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10327 } 10328 mutex_exit(&port->fp_mutex); 10329 } 10330 10331 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10332 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10333 10334 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10335 listlen, listlen, KM_SLEEP); 10336 10337 mutex_enter(&port->fp_mutex); 10338 } else { 10339 ASSERT(changelist == NULL && listlen == 0); 10340 mutex_enter(&port->fp_mutex); 10341 if (--port->fp_statec_busy == 0) { 10342 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10343 } 10344 } 10345 } 10346 10347 10348 /* 10349 * Fill out device list for userland ioctl in private loop 10350 */ 10351 static int 10352 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10353 { 10354 int rval; 10355 int count; 10356 int index; 10357 int num_devices; 10358 fc_remote_node_t *node; 10359 fc_port_dev_t *devlist; 10360 int lilp_device_count; 10361 fc_lilpmap_t *lilp_map; 10362 uchar_t *alpa_list; 10363 10364 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10365 10366 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10367 if (port->fp_total_devices > port->fp_dev_count && 10368 num_devices >= port->fp_total_devices) { 10369 job_request_t *job; 10370 10371 mutex_exit(&port->fp_mutex); 10372 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10373 job->job_counter = 1; 10374 10375 mutex_enter(&port->fp_mutex); 10376 fp_get_loopmap(port, job); 10377 mutex_exit(&port->fp_mutex); 10378 10379 fp_jobwait(job); 10380 fctl_dealloc_job(job); 10381 } else { 10382 mutex_exit(&port->fp_mutex); 10383 } 10384 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10385 10386 mutex_enter(&port->fp_mutex); 10387 10388 /* 10389 * Applications are accustomed to getting the device list in 10390 * LILP map order. The HBA firmware usually returns the device 10391 * map in the LILP map order and diagnostic applications would 10392 * prefer to receive in the device list in that order too 10393 */ 10394 lilp_map = &port->fp_lilp_map; 10395 alpa_list = &lilp_map->lilp_alpalist[0]; 10396 10397 /* 10398 * the length field corresponds to the offset in the LILP frame 10399 * which begins with 1. The thing to note here is that the 10400 * lilp_device_count is 1 more than fp->fp_total_devices since 10401 * the host adapter's alpa also shows up in the lilp map. We 10402 * don't however return details of the host adapter since 10403 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10404 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10405 * ioctl to obtain details about the host adapter port. 10406 */ 10407 lilp_device_count = lilp_map->lilp_length; 10408 10409 for (count = index = 0; index < lilp_device_count && 10410 count < num_devices; index++) { 10411 uint32_t d_id; 10412 fc_remote_port_t *pd; 10413 10414 d_id = alpa_list[index]; 10415 10416 mutex_exit(&port->fp_mutex); 10417 pd = fctl_get_remote_port_by_did(port, d_id); 10418 mutex_enter(&port->fp_mutex); 10419 10420 if (pd != NULL) { 10421 mutex_enter(&pd->pd_mutex); 10422 10423 if (pd->pd_state == PORT_DEVICE_INVALID) { 10424 mutex_exit(&pd->pd_mutex); 10425 continue; 10426 } 10427 10428 devlist[count].dev_state = pd->pd_state; 10429 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10430 devlist[count].dev_did = pd->pd_port_id; 10431 devlist[count].dev_did.priv_lilp_posit = 10432 (uint8_t)(index & 0xff); 10433 bcopy((caddr_t)pd->pd_fc4types, 10434 (caddr_t)devlist[count].dev_type, 10435 sizeof (pd->pd_fc4types)); 10436 10437 bcopy((caddr_t)&pd->pd_port_name, 10438 (caddr_t)&devlist[count].dev_pwwn, 10439 sizeof (la_wwn_t)); 10440 10441 node = pd->pd_remote_nodep; 10442 mutex_exit(&pd->pd_mutex); 10443 10444 if (node) { 10445 mutex_enter(&node->fd_mutex); 10446 bcopy((caddr_t)&node->fd_node_name, 10447 (caddr_t)&devlist[count].dev_nwwn, 10448 sizeof (la_wwn_t)); 10449 mutex_exit(&node->fd_mutex); 10450 } 10451 count++; 10452 } 10453 } 10454 10455 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10456 sizeof (count), mode)) { 10457 rval = FC_FAILURE; 10458 } 10459 10460 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10461 sizeof (fc_port_dev_t) * num_devices, mode)) { 10462 rval = FC_FAILURE; 10463 } else { 10464 rval = FC_SUCCESS; 10465 } 10466 10467 kmem_free(devlist, sizeof (*devlist) * num_devices); 10468 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10469 10470 return (rval); 10471 } 10472 10473 10474 /* 10475 * Completion function for responses to unsolicited commands 10476 */ 10477 static void 10478 fp_unsol_intr(fc_packet_t *pkt) 10479 { 10480 fp_cmd_t *cmd; 10481 fc_local_port_t *port; 10482 10483 cmd = pkt->pkt_ulp_private; 10484 port = cmd->cmd_port; 10485 10486 mutex_enter(&port->fp_mutex); 10487 port->fp_out_fpcmds--; 10488 mutex_exit(&port->fp_mutex); 10489 10490 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10491 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10492 "couldn't post response to unsolicited request;" 10493 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10494 pkt->pkt_resp_fhdr.rx_id); 10495 } 10496 10497 if (cmd == port->fp_els_resp_pkt) { 10498 mutex_enter(&port->fp_mutex); 10499 port->fp_els_resp_pkt_busy = 0; 10500 mutex_exit(&port->fp_mutex); 10501 return; 10502 } 10503 10504 fp_free_pkt(cmd); 10505 } 10506 10507 10508 /* 10509 * solicited LINIT ELS completion function 10510 */ 10511 static void 10512 fp_linit_intr(fc_packet_t *pkt) 10513 { 10514 fp_cmd_t *cmd; 10515 job_request_t *job; 10516 fc_linit_resp_t acc; 10517 10518 cmd = (fp_cmd_t *)pkt->pkt_ulp_private; 10519 10520 mutex_enter(&cmd->cmd_port->fp_mutex); 10521 cmd->cmd_port->fp_out_fpcmds--; 10522 mutex_exit(&cmd->cmd_port->fp_mutex); 10523 10524 if (FP_IS_PKT_ERROR(pkt)) { 10525 (void) fp_common_intr(pkt, 1); 10526 return; 10527 } 10528 10529 job = cmd->cmd_job; 10530 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&acc, 10531 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10532 if (acc.status != FC_LINIT_SUCCESS) { 10533 job->job_result = FC_FAILURE; 10534 } else { 10535 job->job_result = FC_SUCCESS; 10536 } 10537 10538 fp_iodone(cmd); 10539 } 10540 10541 10542 /* 10543 * Decode the unsolicited request; For FC-4 Device and Link data frames 10544 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10545 * ELS requests, submit a request to the job_handler thread to work on it. 10546 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10547 * and save much of the interrupt time processing of unsolicited ELS requests 10548 * and hand it off to the job_handler thread. 10549 */ 10550 static void 10551 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10552 { 10553 uchar_t r_ctl; 10554 uchar_t ls_code; 10555 uint32_t s_id; 10556 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10557 uint32_t cb_arg; 10558 fp_cmd_t *cmd; 10559 fc_local_port_t *port; 10560 job_request_t *job; 10561 fc_remote_port_t *pd; 10562 10563 port = port_handle; 10564 10565 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10566 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10567 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10568 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10569 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10570 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10571 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10572 buf->ub_buffer[0]); 10573 10574 if (type & 0x80000000) { 10575 /* 10576 * Huh ? Nothing much can be done without 10577 * a valid buffer. So just exit. 10578 */ 10579 return; 10580 } 10581 /* 10582 * If the unsolicited interrupts arrive while it isn't 10583 * safe to handle unsolicited callbacks; Drop them, yes, 10584 * drop them on the floor 10585 */ 10586 mutex_enter(&port->fp_mutex); 10587 port->fp_active_ubs++; 10588 if ((port->fp_soft_state & 10589 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10590 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10591 10592 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10593 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10594 "seq_id=%x, ox_id=%x, rx_id=%x" 10595 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10596 buf->ub_frame.type, buf->ub_frame.seq_id, 10597 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10598 10599 ASSERT(port->fp_active_ubs > 0); 10600 if (--(port->fp_active_ubs) == 0) { 10601 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10602 } 10603 10604 mutex_exit(&port->fp_mutex); 10605 10606 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10607 1, &buf->ub_token); 10608 10609 return; 10610 } 10611 10612 r_ctl = buf->ub_frame.r_ctl; 10613 s_id = buf->ub_frame.s_id; 10614 if (port->fp_active_ubs == 1) { 10615 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10616 } 10617 10618 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10619 port->fp_statec_busy) { 10620 mutex_exit(&port->fp_mutex); 10621 pd = fctl_get_remote_port_by_did(port, s_id); 10622 if (pd) { 10623 mutex_enter(&pd->pd_mutex); 10624 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10625 FP_TRACE(FP_NHEAD1(3, 0), 10626 "LOGO for LOGGED IN D_ID %x", 10627 buf->ub_frame.s_id); 10628 pd->pd_state = PORT_DEVICE_VALID; 10629 } 10630 mutex_exit(&pd->pd_mutex); 10631 } 10632 10633 mutex_enter(&port->fp_mutex); 10634 ASSERT(port->fp_active_ubs > 0); 10635 if (--(port->fp_active_ubs) == 0) { 10636 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10637 } 10638 mutex_exit(&port->fp_mutex); 10639 10640 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10641 1, &buf->ub_token); 10642 10643 FP_TRACE(FP_NHEAD1(3, 0), 10644 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10645 buf->ub_frame.s_id); 10646 return; 10647 } 10648 10649 if (port->fp_els_resp_pkt_busy == 0) { 10650 if (r_ctl == R_CTL_ELS_REQ) { 10651 ls_code = buf->ub_buffer[0]; 10652 10653 switch (ls_code) { 10654 case LA_ELS_PLOGI: 10655 case LA_ELS_FLOGI: 10656 port->fp_els_resp_pkt_busy = 1; 10657 mutex_exit(&port->fp_mutex); 10658 fp_i_handle_unsol_els(port, buf); 10659 10660 mutex_enter(&port->fp_mutex); 10661 ASSERT(port->fp_active_ubs > 0); 10662 if (--(port->fp_active_ubs) == 0) { 10663 port->fp_soft_state &= 10664 ~FP_SOFT_IN_UNSOL_CB; 10665 } 10666 mutex_exit(&port->fp_mutex); 10667 port->fp_fca_tran->fca_ub_release( 10668 port->fp_fca_handle, 1, &buf->ub_token); 10669 10670 return; 10671 case LA_ELS_RSCN: 10672 if (++(port)->fp_rscn_count == 10673 FC_INVALID_RSCN_COUNT) { 10674 ++(port)->fp_rscn_count; 10675 } 10676 rscn_count = port->fp_rscn_count; 10677 break; 10678 10679 default: 10680 break; 10681 } 10682 } 10683 } else if ((r_ctl == R_CTL_ELS_REQ) && 10684 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10685 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10686 ++port->fp_rscn_count; 10687 } 10688 rscn_count = port->fp_rscn_count; 10689 } 10690 10691 mutex_exit(&port->fp_mutex); 10692 10693 switch (r_ctl & R_CTL_ROUTING) { 10694 case R_CTL_DEVICE_DATA: 10695 /* 10696 * If the unsolicited buffer is a CT IU, 10697 * have the job_handler thread work on it. 10698 */ 10699 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10700 break; 10701 } 10702 /* FALLTHROUGH */ 10703 10704 case R_CTL_FC4_SVC: { 10705 int sendup = 0; 10706 10707 /* 10708 * If a LOGIN isn't performed before this request 10709 * shut the door on this port with a reply that a 10710 * LOGIN is required. We make an exception however 10711 * for IP broadcast packets and pass them through 10712 * to the IP ULP(s) to handle broadcast requests. 10713 * This is not a problem for private loop devices 10714 * but for fabric topologies we don't log into the 10715 * remote ports during port initialization and 10716 * the ULPs need to log into requesting ports on 10717 * demand. 10718 */ 10719 pd = fctl_get_remote_port_by_did(port, s_id); 10720 if (pd) { 10721 mutex_enter(&pd->pd_mutex); 10722 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10723 sendup++; 10724 } 10725 mutex_exit(&pd->pd_mutex); 10726 } else if ((pd == NULL) && 10727 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10728 (buf->ub_frame.d_id == 0xffffff || 10729 buf->ub_frame.d_id == 0x00)) { 10730 /* brodacst IP frame - so sendup via job thread */ 10731 break; 10732 } 10733 10734 /* 10735 * Send all FC4 services via job thread too 10736 */ 10737 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10738 break; 10739 } 10740 10741 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10742 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10743 return; 10744 } 10745 10746 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10747 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10748 0, KM_NOSLEEP, pd); 10749 if (cmd != NULL) { 10750 fp_els_rjt_init(port, cmd, buf, 10751 FC_ACTION_NON_RETRYABLE, 10752 FC_REASON_LOGIN_REQUIRED, NULL); 10753 10754 if (fp_sendcmd(port, cmd, 10755 port->fp_fca_handle) != FC_SUCCESS) { 10756 fp_free_pkt(cmd); 10757 } 10758 } 10759 } 10760 10761 mutex_enter(&port->fp_mutex); 10762 ASSERT(port->fp_active_ubs > 0); 10763 if (--(port->fp_active_ubs) == 0) { 10764 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10765 } 10766 mutex_exit(&port->fp_mutex); 10767 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10768 1, &buf->ub_token); 10769 10770 return; 10771 } 10772 10773 default: 10774 break; 10775 } 10776 10777 /* 10778 * Submit a Request to the job_handler thread to work 10779 * on the unsolicited request. The potential side effect 10780 * of this is that the unsolicited buffer takes a little 10781 * longer to get released but we save interrupt time in 10782 * the bargain. 10783 */ 10784 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10785 10786 /* 10787 * One way that the rscn_count will get used is described below : 10788 * 10789 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10790 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10791 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10792 * by overloading the job_cb_arg to pass the rscn_count 10793 * 4. When one of the routines processing the RSCN picks it up (ex: 10794 * fp_validate_rscn_page()), it passes this count in the map 10795 * structure (as part of the map_rscn_info structure member) to the 10796 * ULPs. 10797 * 5. When ULPs make calls back to the transport (example interfaces for 10798 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10799 * can now pass back this count as part of the fc_packet's 10800 * pkt_ulp_rscn_count member. fcp does this currently. 10801 * 6. When transport gets a call to transport a command on the wire, it 10802 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10803 * fc_packet. If there is, it will match that info with the current 10804 * rscn_count on that instance of the port. If they don't match up 10805 * then there was a newer RSCN. The ULP gets back an error code which 10806 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10807 * 7. At this point the ULP is free to make up its own mind as to how to 10808 * handle this. Currently, fcp will reset its retry counters and keep 10809 * retrying the operation it was doing in anticipation of getting a 10810 * new state change call back for the new RSCN. 10811 */ 10812 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10813 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10814 if (job == NULL) { 10815 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10816 "couldn't submit a job to the thread, failing.."); 10817 10818 mutex_enter(&port->fp_mutex); 10819 10820 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10821 --port->fp_rscn_count; 10822 } 10823 10824 ASSERT(port->fp_active_ubs > 0); 10825 if (--(port->fp_active_ubs) == 0) { 10826 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10827 } 10828 10829 mutex_exit(&port->fp_mutex); 10830 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10831 1, &buf->ub_token); 10832 10833 return; 10834 } 10835 job->job_private = (void *)buf; 10836 fctl_enque_job(port, job); 10837 } 10838 10839 10840 /* 10841 * Handle unsolicited requests 10842 */ 10843 static void 10844 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10845 job_request_t *job) 10846 { 10847 uchar_t r_ctl; 10848 uchar_t ls_code; 10849 uint32_t s_id; 10850 fp_cmd_t *cmd; 10851 fc_remote_port_t *pd; 10852 fp_unsol_spec_t *ub_spec; 10853 10854 r_ctl = buf->ub_frame.r_ctl; 10855 s_id = buf->ub_frame.s_id; 10856 10857 switch (r_ctl & R_CTL_ROUTING) { 10858 case R_CTL_EXTENDED_SVC: 10859 if (r_ctl != R_CTL_ELS_REQ) { 10860 break; 10861 } 10862 10863 ls_code = buf->ub_buffer[0]; 10864 switch (ls_code) { 10865 case LA_ELS_LOGO: 10866 case LA_ELS_ADISC: 10867 case LA_ELS_PRLO: 10868 pd = fctl_get_remote_port_by_did(port, s_id); 10869 if (pd == NULL) { 10870 if (!FC_IS_REAL_DEVICE(s_id)) { 10871 break; 10872 } 10873 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10874 break; 10875 } 10876 if ((cmd = fp_alloc_pkt(port, 10877 sizeof (la_els_rjt_t), 0, KM_SLEEP, 10878 NULL)) == NULL) { 10879 /* 10880 * Can this actually fail when 10881 * given KM_SLEEP? (Could be used 10882 * this way in a number of places.) 10883 */ 10884 break; 10885 } 10886 10887 fp_els_rjt_init(port, cmd, buf, 10888 FC_ACTION_NON_RETRYABLE, 10889 FC_REASON_INVALID_LINK_CTRL, job); 10890 10891 if (fp_sendcmd(port, cmd, 10892 port->fp_fca_handle) != FC_SUCCESS) { 10893 fp_free_pkt(cmd); 10894 } 10895 10896 break; 10897 } 10898 if (ls_code == LA_ELS_LOGO) { 10899 fp_handle_unsol_logo(port, buf, pd, job); 10900 } else if (ls_code == LA_ELS_ADISC) { 10901 fp_handle_unsol_adisc(port, buf, pd, job); 10902 } else { 10903 fp_handle_unsol_prlo(port, buf, pd, job); 10904 } 10905 break; 10906 10907 case LA_ELS_PLOGI: 10908 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 10909 break; 10910 10911 case LA_ELS_FLOGI: 10912 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 10913 break; 10914 10915 case LA_ELS_RSCN: 10916 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 10917 break; 10918 10919 default: 10920 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10921 ub_spec->port = port; 10922 ub_spec->buf = buf; 10923 10924 (void) taskq_dispatch(port->fp_taskq, 10925 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10926 return; 10927 } 10928 break; 10929 10930 case R_CTL_BASIC_SVC: 10931 /* 10932 * The unsolicited basic link services could be ABTS 10933 * and RMC (Or even a NOP). Just BA_RJT them until 10934 * such time there arises a need to handle them more 10935 * carefully. 10936 */ 10937 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10938 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 10939 0, KM_SLEEP, NULL); 10940 if (cmd != NULL) { 10941 fp_ba_rjt_init(port, cmd, buf, job); 10942 if (fp_sendcmd(port, cmd, 10943 port->fp_fca_handle) != FC_SUCCESS) { 10944 fp_free_pkt(cmd); 10945 } 10946 } 10947 } 10948 break; 10949 10950 case R_CTL_DEVICE_DATA: 10951 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10952 /* 10953 * Mostly this is of type FC_TYPE_FC_SERVICES. 10954 * As we don't like any Unsolicited FC services 10955 * requests, we would do well to RJT them as 10956 * well. 10957 */ 10958 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10959 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10960 0, KM_SLEEP, NULL); 10961 if (cmd != NULL) { 10962 fp_els_rjt_init(port, cmd, buf, 10963 FC_ACTION_NON_RETRYABLE, 10964 FC_REASON_INVALID_LINK_CTRL, job); 10965 10966 if (fp_sendcmd(port, cmd, 10967 port->fp_fca_handle) != 10968 FC_SUCCESS) { 10969 fp_free_pkt(cmd); 10970 } 10971 } 10972 } 10973 break; 10974 } 10975 /* FALLTHROUGH */ 10976 10977 case R_CTL_FC4_SVC: 10978 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10979 ub_spec->port = port; 10980 ub_spec->buf = buf; 10981 10982 (void) taskq_dispatch(port->fp_taskq, 10983 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10984 return; 10985 10986 case R_CTL_LINK_CTL: 10987 /* 10988 * Turn deaf ear on unsolicited link control frames. 10989 * Typical unsolicited link control Frame is an LCR 10990 * (to reset End to End credit to the default login 10991 * value and abort current sequences for all classes) 10992 * An intelligent microcode/firmware should handle 10993 * this transparently at its level and not pass all 10994 * the way up here. 10995 * 10996 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 10997 * or F_BSY. P_RJT is chosen to be the most appropriate 10998 * at this time. 10999 */ 11000 /* FALLTHROUGH */ 11001 11002 default: 11003 /* 11004 * Just reject everything else as an invalid request. 11005 */ 11006 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11007 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11008 0, KM_SLEEP, NULL); 11009 if (cmd != NULL) { 11010 fp_els_rjt_init(port, cmd, buf, 11011 FC_ACTION_NON_RETRYABLE, 11012 FC_REASON_INVALID_LINK_CTRL, job); 11013 11014 if (fp_sendcmd(port, cmd, 11015 port->fp_fca_handle) != FC_SUCCESS) { 11016 fp_free_pkt(cmd); 11017 } 11018 } 11019 } 11020 break; 11021 } 11022 11023 mutex_enter(&port->fp_mutex); 11024 ASSERT(port->fp_active_ubs > 0); 11025 if (--(port->fp_active_ubs) == 0) { 11026 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 11027 } 11028 mutex_exit(&port->fp_mutex); 11029 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 11030 1, &buf->ub_token); 11031 } 11032 11033 11034 /* 11035 * Prepare a BA_RJT and send it over. 11036 */ 11037 static void 11038 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11039 job_request_t *job) 11040 { 11041 fc_packet_t *pkt; 11042 la_ba_rjt_t payload; 11043 11044 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11045 11046 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11047 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11048 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11049 cmd->cmd_retry_count = 1; 11050 cmd->cmd_ulp_pkt = NULL; 11051 11052 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11053 cmd->cmd_job = job; 11054 11055 pkt = &cmd->cmd_pkt; 11056 11057 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 11058 11059 payload.reserved = 0; 11060 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 11061 payload.explanation = FC_EXPLN_NONE; 11062 payload.vendor = 0; 11063 11064 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11065 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11066 } 11067 11068 11069 /* 11070 * Prepare an LS_RJT and send it over 11071 */ 11072 static void 11073 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11074 uchar_t action, uchar_t reason, job_request_t *job) 11075 { 11076 fc_packet_t *pkt; 11077 la_els_rjt_t payload; 11078 11079 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11080 11081 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11082 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11083 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11084 cmd->cmd_retry_count = 1; 11085 cmd->cmd_ulp_pkt = NULL; 11086 11087 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11088 cmd->cmd_job = job; 11089 11090 pkt = &cmd->cmd_pkt; 11091 11092 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11093 11094 payload.ls_code.ls_code = LA_ELS_RJT; 11095 payload.ls_code.mbz = 0; 11096 payload.action = action; 11097 payload.reason = reason; 11098 payload.reserved = 0; 11099 payload.vu = 0; 11100 11101 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11102 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11103 } 11104 11105 /* 11106 * Function: fp_prlo_acc_init 11107 * 11108 * Description: Initializes an Link Service Accept for a PRLO. 11109 * 11110 * Arguments: *port Local port through which the PRLO was 11111 * received. 11112 * cmd Command that will carry the accept. 11113 * *buf Unsolicited buffer containing the PRLO 11114 * request. 11115 * job Job request. 11116 * sleep Allocation mode. 11117 * 11118 * Return Value: *cmd Command containing the response. 11119 * 11120 * Context: Depends on the parameter sleep. 11121 */ 11122 fp_cmd_t * 11123 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 11124 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11125 { 11126 fp_cmd_t *cmd; 11127 fc_packet_t *pkt; 11128 la_els_prlo_t *req; 11129 size_t len; 11130 uint16_t flags; 11131 11132 req = (la_els_prlo_t *)buf->ub_buffer; 11133 len = (size_t)ntohs(req->payload_length); 11134 11135 /* 11136 * The payload of the accept to a PRLO has to be the exact match of 11137 * the payload of the request (at the exception of the code). 11138 */ 11139 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11140 11141 if (cmd) { 11142 /* 11143 * The fp command was successfully allocated. 11144 */ 11145 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11146 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11147 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11148 cmd->cmd_retry_count = 1; 11149 cmd->cmd_ulp_pkt = NULL; 11150 11151 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11152 cmd->cmd_job = job; 11153 11154 pkt = &cmd->cmd_pkt; 11155 11156 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11157 FC_TYPE_EXTENDED_LS); 11158 11159 /* The code is overwritten for the copy. */ 11160 req->ls_code = LA_ELS_ACC; 11161 /* Response code is set. */ 11162 flags = ntohs(req->flags); 11163 flags &= ~SP_RESP_CODE_MASK; 11164 flags |= SP_RESP_CODE_REQ_EXECUTED; 11165 req->flags = htons(flags); 11166 11167 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)req, 11168 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11169 } 11170 return (cmd); 11171 } 11172 11173 /* 11174 * Prepare an ACC response to an ELS request 11175 */ 11176 static void 11177 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11178 job_request_t *job) 11179 { 11180 fc_packet_t *pkt; 11181 ls_code_t payload; 11182 11183 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11184 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11185 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11186 cmd->cmd_retry_count = 1; 11187 cmd->cmd_ulp_pkt = NULL; 11188 11189 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11190 cmd->cmd_job = job; 11191 11192 pkt = &cmd->cmd_pkt; 11193 11194 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11195 11196 payload.ls_code = LA_ELS_ACC; 11197 payload.mbz = 0; 11198 11199 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11200 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11201 } 11202 11203 /* 11204 * Unsolicited PRLO handler 11205 * 11206 * A Process Logout should be handled by the ULP that established it. However, 11207 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11208 * when a device implicitly logs out an initiator (for whatever reason) and 11209 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11210 * The logical thing to do for the device would be to send a LOGO in response 11211 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11212 * a PRLO instead. 11213 * 11214 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11215 * think that the Port Login has been lost. If we follow the Fibre Channel 11216 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11217 * the Port Login has also been lost, the remote port will reject the PRLI 11218 * indicating that we must PLOGI first. The initiator will then turn around and 11219 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11220 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11221 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11222 * needed would be received by FCP. FCP would have, then, to tell the transport 11223 * (fp) to PLOGI. The problem is, the transport would still think the Port 11224 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11225 * if you think it's not necessary". To work around that difficulty, the PRLO 11226 * is treated by the transport as a LOGO. The downside to it is a Port Login 11227 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11228 * has nothing to do with the PRLO) may be impacted. However, this is a 11229 * scenario very unlikely to happen. As of today the only ULP in Leadville 11230 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11231 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11232 * unlikely). 11233 */ 11234 static void 11235 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11236 fc_remote_port_t *pd, job_request_t *job) 11237 { 11238 int busy; 11239 int rval; 11240 int retain; 11241 fp_cmd_t *cmd; 11242 fc_portmap_t *listptr; 11243 boolean_t tolerance; 11244 la_els_prlo_t *req; 11245 11246 req = (la_els_prlo_t *)buf->ub_buffer; 11247 11248 if ((ntohs(req->payload_length) != 11249 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11250 (req->page_length != sizeof (service_parameter_page_t))) { 11251 /* 11252 * We are being very restrictive. Only on page per 11253 * payload. If it is not the case we reject the ELS although 11254 * we should reply indicating we handle only single page 11255 * per PRLO. 11256 */ 11257 goto fp_reject_prlo; 11258 } 11259 11260 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11261 /* 11262 * This is in case the payload advertizes a size bigger than 11263 * what it really is. 11264 */ 11265 goto fp_reject_prlo; 11266 } 11267 11268 mutex_enter(&port->fp_mutex); 11269 busy = port->fp_statec_busy; 11270 mutex_exit(&port->fp_mutex); 11271 11272 mutex_enter(&pd->pd_mutex); 11273 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11274 if (!busy) { 11275 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11276 pd->pd_state == PORT_DEVICE_INVALID || 11277 pd->pd_flags == PD_ELS_IN_PROGRESS || 11278 pd->pd_type == PORT_DEVICE_OLD) { 11279 busy++; 11280 } 11281 } 11282 11283 if (busy) { 11284 mutex_exit(&pd->pd_mutex); 11285 11286 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11287 "pd=%p - busy", 11288 pd->pd_port_id.port_id, pd); 11289 11290 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11291 goto fp_reject_prlo; 11292 } 11293 } else { 11294 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11295 11296 if (tolerance) { 11297 fctl_tc_reset(&pd->pd_logo_tc); 11298 retain = 0; 11299 pd->pd_state = PORT_DEVICE_INVALID; 11300 } 11301 11302 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11303 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11304 tolerance, retain); 11305 11306 pd->pd_aux_flags |= PD_LOGGED_OUT; 11307 mutex_exit(&pd->pd_mutex); 11308 11309 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11310 if (cmd == NULL) { 11311 return; 11312 } 11313 11314 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11315 if (rval != FC_SUCCESS) { 11316 fp_free_pkt(cmd); 11317 return; 11318 } 11319 11320 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11321 11322 if (retain) { 11323 fp_unregister_login(pd); 11324 fctl_copy_portmap(listptr, pd); 11325 } else { 11326 uint32_t d_id; 11327 char ww_name[17]; 11328 11329 mutex_enter(&pd->pd_mutex); 11330 d_id = pd->pd_port_id.port_id; 11331 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11332 mutex_exit(&pd->pd_mutex); 11333 11334 FP_TRACE(FP_NHEAD2(9, 0), 11335 "N_x Port with D_ID=%x, PWWN=%s logged out" 11336 " %d times in %d us; Giving up", d_id, ww_name, 11337 FC_LOGO_TOLERANCE_LIMIT, 11338 FC_LOGO_TOLERANCE_TIME_LIMIT); 11339 11340 fp_fillout_old_map(listptr, pd, 0); 11341 listptr->map_type = PORT_DEVICE_OLD; 11342 } 11343 11344 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11345 return; 11346 } 11347 11348 fp_reject_prlo: 11349 11350 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11351 if (cmd != NULL) { 11352 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11353 FC_REASON_INVALID_LINK_CTRL, job); 11354 11355 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11356 fp_free_pkt(cmd); 11357 } 11358 } 11359 } 11360 11361 /* 11362 * Unsolicited LOGO handler 11363 */ 11364 static void 11365 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11366 fc_remote_port_t *pd, job_request_t *job) 11367 { 11368 int busy; 11369 int rval; 11370 int retain; 11371 fp_cmd_t *cmd; 11372 fc_portmap_t *listptr; 11373 boolean_t tolerance; 11374 11375 mutex_enter(&port->fp_mutex); 11376 busy = port->fp_statec_busy; 11377 mutex_exit(&port->fp_mutex); 11378 11379 mutex_enter(&pd->pd_mutex); 11380 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11381 if (!busy) { 11382 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11383 pd->pd_state == PORT_DEVICE_INVALID || 11384 pd->pd_flags == PD_ELS_IN_PROGRESS || 11385 pd->pd_type == PORT_DEVICE_OLD) { 11386 busy++; 11387 } 11388 } 11389 11390 if (busy) { 11391 mutex_exit(&pd->pd_mutex); 11392 11393 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11394 "pd=%p - busy", 11395 pd->pd_port_id.port_id, pd); 11396 11397 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11398 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11399 0, KM_SLEEP, pd); 11400 if (cmd != NULL) { 11401 fp_els_rjt_init(port, cmd, buf, 11402 FC_ACTION_NON_RETRYABLE, 11403 FC_REASON_INVALID_LINK_CTRL, job); 11404 11405 if (fp_sendcmd(port, cmd, 11406 port->fp_fca_handle) != FC_SUCCESS) { 11407 fp_free_pkt(cmd); 11408 } 11409 } 11410 } 11411 } else { 11412 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11413 11414 if (tolerance) { 11415 fctl_tc_reset(&pd->pd_logo_tc); 11416 retain = 0; 11417 pd->pd_state = PORT_DEVICE_INVALID; 11418 } 11419 11420 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11421 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11422 tolerance, retain); 11423 11424 pd->pd_aux_flags |= PD_LOGGED_OUT; 11425 mutex_exit(&pd->pd_mutex); 11426 11427 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11428 KM_SLEEP, pd); 11429 if (cmd == NULL) { 11430 return; 11431 } 11432 11433 fp_els_acc_init(port, cmd, buf, job); 11434 11435 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11436 if (rval != FC_SUCCESS) { 11437 fp_free_pkt(cmd); 11438 return; 11439 } 11440 11441 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11442 11443 if (retain) { 11444 job_request_t *job; 11445 fctl_ns_req_t *ns_cmd; 11446 11447 /* 11448 * when get LOGO, first try to get PID from nameserver 11449 * if failed, then we do not need 11450 * send PLOGI to that remote port 11451 */ 11452 job = fctl_alloc_job( 11453 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11454 11455 if (job != NULL) { 11456 ns_cmd = fctl_alloc_ns_cmd( 11457 sizeof (ns_req_gid_pn_t), 11458 sizeof (ns_resp_gid_pn_t), 11459 sizeof (ns_resp_gid_pn_t), 11460 0, KM_SLEEP); 11461 if (ns_cmd != NULL) { 11462 int ret; 11463 job->job_result = FC_SUCCESS; 11464 ns_cmd->ns_cmd_code = NS_GID_PN; 11465 ((ns_req_gid_pn_t *) 11466 (ns_cmd->ns_cmd_buf))->pwwn = 11467 pd->pd_port_name; 11468 ret = fp_ns_query( 11469 port, ns_cmd, job, 1, KM_SLEEP); 11470 if ((ret != FC_SUCCESS) || 11471 (job->job_result != FC_SUCCESS)) { 11472 fctl_free_ns_cmd(ns_cmd); 11473 fctl_dealloc_job(job); 11474 FP_TRACE(FP_NHEAD2(9, 0), 11475 "NS query failed,", 11476 " delete pd"); 11477 goto delete_pd; 11478 } 11479 fctl_free_ns_cmd(ns_cmd); 11480 } 11481 fctl_dealloc_job(job); 11482 } 11483 fp_unregister_login(pd); 11484 fctl_copy_portmap(listptr, pd); 11485 } else { 11486 uint32_t d_id; 11487 char ww_name[17]; 11488 11489 delete_pd: 11490 mutex_enter(&pd->pd_mutex); 11491 d_id = pd->pd_port_id.port_id; 11492 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11493 mutex_exit(&pd->pd_mutex); 11494 11495 FP_TRACE(FP_NHEAD2(9, 0), 11496 "N_x Port with D_ID=%x, PWWN=%s logged out" 11497 " %d times in %d us; Giving up", d_id, ww_name, 11498 FC_LOGO_TOLERANCE_LIMIT, 11499 FC_LOGO_TOLERANCE_TIME_LIMIT); 11500 11501 fp_fillout_old_map(listptr, pd, 0); 11502 listptr->map_type = PORT_DEVICE_OLD; 11503 } 11504 11505 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11506 } 11507 } 11508 11509 11510 /* 11511 * Perform general purpose preparation of a response to an unsolicited request 11512 */ 11513 static void 11514 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11515 uchar_t r_ctl, uchar_t type) 11516 { 11517 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11518 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11519 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11520 pkt->pkt_cmd_fhdr.type = type; 11521 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11522 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11523 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11524 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11525 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11526 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11527 pkt->pkt_cmd_fhdr.ro = 0; 11528 pkt->pkt_cmd_fhdr.rsvd = 0; 11529 pkt->pkt_comp = fp_unsol_intr; 11530 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11531 } 11532 11533 /* 11534 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11535 * early development days of public loop soc+ firmware, numerous problems 11536 * were encountered (the details are undocumented and history now) which 11537 * led to the birth of this function. 11538 * 11539 * If a pre-allocated unsolicited response packet is free, send out an 11540 * immediate response, otherwise submit the request to the port thread 11541 * to do the deferred processing. 11542 */ 11543 static void 11544 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11545 { 11546 int sent; 11547 int f_port; 11548 int do_acc; 11549 fp_cmd_t *cmd; 11550 la_els_logi_t *payload; 11551 fc_remote_port_t *pd; 11552 char dww_name[17]; 11553 11554 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11555 11556 cmd = port->fp_els_resp_pkt; 11557 11558 mutex_enter(&port->fp_mutex); 11559 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11560 mutex_exit(&port->fp_mutex); 11561 11562 switch (buf->ub_buffer[0]) { 11563 case LA_ELS_PLOGI: { 11564 int small; 11565 11566 payload = (la_els_logi_t *)buf->ub_buffer; 11567 11568 f_port = FP_IS_F_PORT(payload-> 11569 common_service.cmn_features) ? 1 : 0; 11570 11571 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11572 &payload->nport_ww_name); 11573 pd = fctl_get_remote_port_by_pwwn(port, 11574 &payload->nport_ww_name); 11575 if (pd) { 11576 mutex_enter(&pd->pd_mutex); 11577 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11578 /* 11579 * Most likely this means a cross login is in 11580 * progress or a device about to be yanked out. 11581 * Only accept the plogi if my wwn is smaller. 11582 */ 11583 if (pd->pd_type == PORT_DEVICE_OLD) { 11584 sent = 1; 11585 } 11586 /* 11587 * Stop plogi request (if any) 11588 * attempt from local side to speedup 11589 * the discovery progress. 11590 * Mark the pd as PD_PLOGI_RECEPIENT. 11591 */ 11592 if (f_port == 0 && small < 0) { 11593 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11594 } 11595 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11596 11597 mutex_exit(&pd->pd_mutex); 11598 11599 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11600 "Unsol PLOGI received. PD still exists in the " 11601 "PWWN list. pd=%p PWWN=%s, sent=%x", 11602 pd, dww_name, sent); 11603 11604 if (f_port == 0 && small < 0) { 11605 FP_TRACE(FP_NHEAD1(3, 0), 11606 "fp_i_handle_unsol_els: Mark the pd" 11607 " as plogi recipient, pd=%p, PWWN=%s" 11608 ", sent=%x", 11609 pd, dww_name, sent); 11610 } 11611 } else { 11612 sent = 0; 11613 } 11614 11615 /* 11616 * To avoid Login collisions, accept only if my WWN 11617 * is smaller than the requester (A curious side note 11618 * would be that this rule may not satisfy the PLOGIs 11619 * initiated by the switch from not-so-well known 11620 * ports such as 0xFFFC41) 11621 */ 11622 if ((f_port == 0 && small < 0) || 11623 (((small > 0 && do_acc) || 11624 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11625 if (fp_is_class_supported(port->fp_cos, 11626 buf->ub_class) == FC_FAILURE) { 11627 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11628 cmd->cmd_pkt.pkt_cmdlen = 11629 sizeof (la_els_rjt_t); 11630 cmd->cmd_pkt.pkt_rsplen = 0; 11631 fp_els_rjt_init(port, cmd, buf, 11632 FC_ACTION_NON_RETRYABLE, 11633 FC_REASON_CLASS_NOT_SUPP, NULL); 11634 FP_TRACE(FP_NHEAD1(3, 0), 11635 "fp_i_handle_unsol_els: " 11636 "Unsupported class. " 11637 "Rejecting PLOGI"); 11638 11639 } else { 11640 mutex_enter(&port->fp_mutex); 11641 port->fp_els_resp_pkt_busy = 0; 11642 mutex_exit(&port->fp_mutex); 11643 return; 11644 } 11645 } else { 11646 cmd->cmd_pkt.pkt_cmdlen = 11647 sizeof (la_els_logi_t); 11648 cmd->cmd_pkt.pkt_rsplen = 0; 11649 11650 /* 11651 * Sometime later, we should validate 11652 * the service parameters instead of 11653 * just accepting it. 11654 */ 11655 fp_login_acc_init(port, cmd, buf, NULL, 11656 KM_NOSLEEP); 11657 FP_TRACE(FP_NHEAD1(3, 0), 11658 "fp_i_handle_unsol_els: Accepting PLOGI," 11659 " f_port=%d, small=%d, do_acc=%d," 11660 " sent=%d.", f_port, small, do_acc, 11661 sent); 11662 /* 11663 * If fp_port_id is zero and topology is 11664 * Point-to-Point, get the local port id from 11665 * the d_id in the PLOGI request. 11666 * If the outgoing FLOGI hasn't been accepted, 11667 * the topology will be unknown here. But it's 11668 * still safe to save the d_id to fp_port_id, 11669 * just because it will be overwritten later 11670 * if the topology is not Point-to-Point. 11671 */ 11672 mutex_enter(&port->fp_mutex); 11673 if ((port->fp_port_id.port_id == 0) && 11674 (port->fp_topology == FC_TOP_PT_PT || 11675 port->fp_topology == FC_TOP_UNKNOWN)) { 11676 port->fp_port_id.port_id = 11677 buf->ub_frame.d_id; 11678 } 11679 mutex_exit(&port->fp_mutex); 11680 } 11681 } else { 11682 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11683 port->fp_options & FP_SEND_RJT) { 11684 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11685 cmd->cmd_pkt.pkt_rsplen = 0; 11686 fp_els_rjt_init(port, cmd, buf, 11687 FC_ACTION_NON_RETRYABLE, 11688 FC_REASON_LOGICAL_BSY, NULL); 11689 FP_TRACE(FP_NHEAD1(3, 0), 11690 "fp_i_handle_unsol_els: " 11691 "Rejecting PLOGI with Logical Busy." 11692 "Possible Login collision."); 11693 } else { 11694 mutex_enter(&port->fp_mutex); 11695 port->fp_els_resp_pkt_busy = 0; 11696 mutex_exit(&port->fp_mutex); 11697 return; 11698 } 11699 } 11700 break; 11701 } 11702 11703 case LA_ELS_FLOGI: 11704 if (fp_is_class_supported(port->fp_cos, 11705 buf->ub_class) == FC_FAILURE) { 11706 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11707 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11708 cmd->cmd_pkt.pkt_rsplen = 0; 11709 fp_els_rjt_init(port, cmd, buf, 11710 FC_ACTION_NON_RETRYABLE, 11711 FC_REASON_CLASS_NOT_SUPP, NULL); 11712 FP_TRACE(FP_NHEAD1(3, 0), 11713 "fp_i_handle_unsol_els: " 11714 "Unsupported Class. Rejecting FLOGI."); 11715 } else { 11716 mutex_enter(&port->fp_mutex); 11717 port->fp_els_resp_pkt_busy = 0; 11718 mutex_exit(&port->fp_mutex); 11719 return; 11720 } 11721 } else { 11722 mutex_enter(&port->fp_mutex); 11723 if (FC_PORT_STATE_MASK(port->fp_state) != 11724 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11725 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11726 mutex_exit(&port->fp_mutex); 11727 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11728 cmd->cmd_pkt.pkt_cmdlen = 11729 sizeof (la_els_rjt_t); 11730 cmd->cmd_pkt.pkt_rsplen = 0; 11731 fp_els_rjt_init(port, cmd, buf, 11732 FC_ACTION_NON_RETRYABLE, 11733 FC_REASON_INVALID_LINK_CTRL, 11734 NULL); 11735 FP_TRACE(FP_NHEAD1(3, 0), 11736 "fp_i_handle_unsol_els: " 11737 "Invalid Link Ctrl. " 11738 "Rejecting FLOGI."); 11739 } else { 11740 mutex_enter(&port->fp_mutex); 11741 port->fp_els_resp_pkt_busy = 0; 11742 mutex_exit(&port->fp_mutex); 11743 return; 11744 } 11745 } else { 11746 mutex_exit(&port->fp_mutex); 11747 cmd->cmd_pkt.pkt_cmdlen = 11748 sizeof (la_els_logi_t); 11749 cmd->cmd_pkt.pkt_rsplen = 0; 11750 /* 11751 * Let's not aggressively validate the N_Port's 11752 * service parameters until PLOGI. Suffice it 11753 * to give a hint that we are an N_Port and we 11754 * are game to some serious stuff here. 11755 */ 11756 fp_login_acc_init(port, cmd, buf, 11757 NULL, KM_NOSLEEP); 11758 FP_TRACE(FP_NHEAD1(3, 0), 11759 "fp_i_handle_unsol_els: " 11760 "Accepting FLOGI."); 11761 } 11762 } 11763 break; 11764 11765 default: 11766 return; 11767 } 11768 11769 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11770 mutex_enter(&port->fp_mutex); 11771 port->fp_els_resp_pkt_busy = 0; 11772 mutex_exit(&port->fp_mutex); 11773 } 11774 } 11775 11776 11777 /* 11778 * Handle unsolicited PLOGI request 11779 */ 11780 static void 11781 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11782 job_request_t *job, int sleep) 11783 { 11784 int sent; 11785 int small; 11786 int f_port; 11787 int do_acc; 11788 fp_cmd_t *cmd; 11789 la_wwn_t *swwn; 11790 la_wwn_t *dwwn; 11791 la_els_logi_t *payload; 11792 fc_remote_port_t *pd; 11793 char dww_name[17]; 11794 11795 payload = (la_els_logi_t *)buf->ub_buffer; 11796 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11797 11798 mutex_enter(&port->fp_mutex); 11799 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11800 mutex_exit(&port->fp_mutex); 11801 11802 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11803 "type=%x, f_ctl=%x" 11804 " seq_id=%x, ox_id=%x, rx_id=%x" 11805 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11806 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11807 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11808 11809 swwn = &port->fp_service_params.nport_ww_name; 11810 dwwn = &payload->nport_ww_name; 11811 small = fctl_wwn_cmp(swwn, dwwn); 11812 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11813 if (pd) { 11814 mutex_enter(&pd->pd_mutex); 11815 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11816 /* 11817 * Most likely this means a cross login is in 11818 * progress or a device about to be yanked out. 11819 * Only accept the plogi if my wwn is smaller. 11820 */ 11821 11822 if (pd->pd_type == PORT_DEVICE_OLD) { 11823 sent = 1; 11824 } 11825 /* 11826 * Stop plogi request (if any) 11827 * attempt from local side to speedup 11828 * the discovery progress. 11829 * Mark the pd as PD_PLOGI_RECEPIENT. 11830 */ 11831 if (f_port == 0 && small < 0) { 11832 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11833 } 11834 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11835 11836 mutex_exit(&pd->pd_mutex); 11837 11838 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11839 " received. PD still exists in the PWWN list. pd=%p " 11840 "PWWN=%s, sent=%x", pd, dww_name, sent); 11841 11842 if (f_port == 0 && small < 0) { 11843 FP_TRACE(FP_NHEAD1(3, 0), 11844 "fp_handle_unsol_plogi: Mark the pd" 11845 " as plogi recipient, pd=%p, PWWN=%s" 11846 ", sent=%x", 11847 pd, dww_name, sent); 11848 } 11849 } else { 11850 sent = 0; 11851 } 11852 11853 /* 11854 * Avoid Login collisions by accepting only if my WWN is smaller. 11855 * 11856 * A side note: There is no need to start a PLOGI from this end in 11857 * this context if login isn't going to be accepted for the 11858 * above reason as either a LIP (in private loop), RSCN (in 11859 * fabric topology), or an FLOGI (in point to point - Huh ? 11860 * check FC-PH) would normally drive the PLOGI from this end. 11861 * At this point of time there is no need for an inbound PLOGI 11862 * to kick an outbound PLOGI when it is going to be rejected 11863 * for the reason of WWN being smaller. However it isn't hard 11864 * to do that either (when such a need arises, start a timer 11865 * for a duration that extends beyond a normal device discovery 11866 * time and check if an outbound PLOGI did go before that, if 11867 * none fire one) 11868 * 11869 * Unfortunately, as it turned out, during booting, it is possible 11870 * to miss another initiator in the same loop as port driver 11871 * instances are serially attached. While preserving the above 11872 * comments for belly laughs, please kick an outbound PLOGI in 11873 * a non-switch environment (which is a pt pt between N_Ports or 11874 * a private loop) 11875 * 11876 * While preserving the above comments for amusement, send an 11877 * ACC if the PLOGI is going to be rejected for WWN being smaller 11878 * when no discovery is in progress at this end. Turn around 11879 * and make the port device as the PLOGI initiator, so that 11880 * during subsequent link/loop initialization, this end drives 11881 * the PLOGI (In fact both ends do in this particular case, but 11882 * only one wins) 11883 * 11884 * Make sure the PLOGIs initiated by the switch from not-so-well-known 11885 * ports (such as 0xFFFC41) are accepted too. 11886 */ 11887 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 11888 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11889 if (fp_is_class_supported(port->fp_cos, 11890 buf->ub_class) == FC_FAILURE) { 11891 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11892 cmd = fp_alloc_pkt(port, 11893 sizeof (la_els_logi_t), 0, sleep, pd); 11894 if (cmd == NULL) { 11895 return; 11896 } 11897 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11898 cmd->cmd_pkt.pkt_rsplen = 0; 11899 fp_els_rjt_init(port, cmd, buf, 11900 FC_ACTION_NON_RETRYABLE, 11901 FC_REASON_CLASS_NOT_SUPP, job); 11902 FP_TRACE(FP_NHEAD1(3, 0), 11903 "fp_handle_unsol_plogi: " 11904 "Unsupported class. rejecting PLOGI"); 11905 } 11906 } else { 11907 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11908 0, sleep, pd); 11909 if (cmd == NULL) { 11910 return; 11911 } 11912 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 11913 cmd->cmd_pkt.pkt_rsplen = 0; 11914 11915 /* 11916 * Sometime later, we should validate the service 11917 * parameters instead of just accepting it. 11918 */ 11919 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 11920 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11921 "Accepting PLOGI, f_port=%d, small=%d, " 11922 "do_acc=%d, sent=%d.", f_port, small, do_acc, 11923 sent); 11924 11925 /* 11926 * If fp_port_id is zero and topology is 11927 * Point-to-Point, get the local port id from 11928 * the d_id in the PLOGI request. 11929 * If the outgoing FLOGI hasn't been accepted, 11930 * the topology will be unknown here. But it's 11931 * still safe to save the d_id to fp_port_id, 11932 * just because it will be overwritten later 11933 * if the topology is not Point-to-Point. 11934 */ 11935 mutex_enter(&port->fp_mutex); 11936 if ((port->fp_port_id.port_id == 0) && 11937 (port->fp_topology == FC_TOP_PT_PT || 11938 port->fp_topology == FC_TOP_UNKNOWN)) { 11939 port->fp_port_id.port_id = 11940 buf->ub_frame.d_id; 11941 } 11942 mutex_exit(&port->fp_mutex); 11943 } 11944 } else { 11945 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11946 port->fp_options & FP_SEND_RJT) { 11947 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11948 0, sleep, pd); 11949 if (cmd == NULL) { 11950 return; 11951 } 11952 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11953 cmd->cmd_pkt.pkt_rsplen = 0; 11954 /* 11955 * Send out Logical busy to indicate 11956 * the detection of PLOGI collision 11957 */ 11958 fp_els_rjt_init(port, cmd, buf, 11959 FC_ACTION_NON_RETRYABLE, 11960 FC_REASON_LOGICAL_BSY, job); 11961 11962 fc_wwn_to_str(dwwn, dww_name); 11963 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11964 "Rejecting Unsol PLOGI with Logical Busy." 11965 "possible PLOGI collision. PWWN=%s, sent=%x", 11966 dww_name, sent); 11967 } else { 11968 return; 11969 } 11970 } 11971 11972 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11973 fp_free_pkt(cmd); 11974 } 11975 } 11976 11977 11978 /* 11979 * Handle mischievous turning over of our own FLOGI requests back to 11980 * us by the SOC+ microcode. In other words, look at the class of such 11981 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 11982 * on the floor 11983 */ 11984 static void 11985 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11986 job_request_t *job, int sleep) 11987 { 11988 uint32_t state; 11989 uint32_t s_id; 11990 fp_cmd_t *cmd; 11991 11992 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 11993 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11994 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11995 0, sleep, NULL); 11996 if (cmd == NULL) { 11997 return; 11998 } 11999 fp_els_rjt_init(port, cmd, buf, 12000 FC_ACTION_NON_RETRYABLE, 12001 FC_REASON_CLASS_NOT_SUPP, job); 12002 } else { 12003 return; 12004 } 12005 } else { 12006 12007 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 12008 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 12009 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 12010 buf->ub_frame.s_id, buf->ub_frame.d_id, 12011 buf->ub_frame.type, buf->ub_frame.f_ctl, 12012 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 12013 buf->ub_frame.rx_id, buf->ub_frame.ro); 12014 12015 mutex_enter(&port->fp_mutex); 12016 state = FC_PORT_STATE_MASK(port->fp_state); 12017 s_id = port->fp_port_id.port_id; 12018 mutex_exit(&port->fp_mutex); 12019 12020 if (state != FC_STATE_ONLINE || 12021 (s_id && buf->ub_frame.s_id == s_id)) { 12022 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12023 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12024 0, sleep, NULL); 12025 if (cmd == NULL) { 12026 return; 12027 } 12028 fp_els_rjt_init(port, cmd, buf, 12029 FC_ACTION_NON_RETRYABLE, 12030 FC_REASON_INVALID_LINK_CTRL, job); 12031 FP_TRACE(FP_NHEAD1(3, 0), 12032 "fp_handle_unsol_flogi: " 12033 "Rejecting PLOGI. Invalid Link CTRL"); 12034 } else { 12035 return; 12036 } 12037 } else { 12038 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12039 0, sleep, NULL); 12040 if (cmd == NULL) { 12041 return; 12042 } 12043 /* 12044 * Let's not aggressively validate the N_Port's 12045 * service parameters until PLOGI. Suffice it 12046 * to give a hint that we are an N_Port and we 12047 * are game to some serious stuff here. 12048 */ 12049 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12050 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 12051 "Accepting PLOGI"); 12052 } 12053 } 12054 12055 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12056 fp_free_pkt(cmd); 12057 } 12058 } 12059 12060 12061 /* 12062 * Perform PLOGI accept 12063 */ 12064 static void 12065 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 12066 job_request_t *job, int sleep) 12067 { 12068 fc_packet_t *pkt; 12069 fc_portmap_t *listptr; 12070 la_els_logi_t payload; 12071 12072 ASSERT(buf != NULL); 12073 12074 /* 12075 * If we are sending ACC to PLOGI and we haven't already 12076 * create port and node device handles, let's create them 12077 * here. 12078 */ 12079 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 12080 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 12081 int small; 12082 int do_acc; 12083 fc_remote_port_t *pd; 12084 la_els_logi_t *req; 12085 12086 req = (la_els_logi_t *)buf->ub_buffer; 12087 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 12088 &req->nport_ww_name); 12089 12090 mutex_enter(&port->fp_mutex); 12091 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 12092 mutex_exit(&port->fp_mutex); 12093 12094 pd = fctl_create_remote_port(port, &req->node_ww_name, 12095 &req->nport_ww_name, buf->ub_frame.s_id, 12096 PD_PLOGI_RECEPIENT, sleep); 12097 if (pd == NULL) { 12098 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 12099 "Couldn't create port device for d_id:0x%x", 12100 buf->ub_frame.s_id); 12101 12102 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 12103 "couldn't create port device d_id=%x", 12104 buf->ub_frame.s_id); 12105 } else { 12106 /* 12107 * usoc currently returns PLOGIs inline and 12108 * the maximum buffer size is 60 bytes or so. 12109 * So attempt not to look beyond what is in 12110 * the unsolicited buffer 12111 * 12112 * JNI also traverses this path sometimes 12113 */ 12114 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 12115 fp_register_login(NULL, pd, req, buf->ub_class); 12116 } else { 12117 mutex_enter(&pd->pd_mutex); 12118 if (pd->pd_login_count == 0) { 12119 pd->pd_login_count++; 12120 } 12121 pd->pd_state = PORT_DEVICE_LOGGED_IN; 12122 pd->pd_login_class = buf->ub_class; 12123 mutex_exit(&pd->pd_mutex); 12124 } 12125 12126 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12127 if (listptr != NULL) { 12128 fctl_copy_portmap(listptr, pd); 12129 (void) fp_ulp_devc_cb(port, listptr, 12130 1, 1, sleep, 0); 12131 } 12132 12133 if (small > 0 && do_acc) { 12134 mutex_enter(&pd->pd_mutex); 12135 pd->pd_recepient = PD_PLOGI_INITIATOR; 12136 mutex_exit(&pd->pd_mutex); 12137 } 12138 } 12139 } 12140 12141 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12142 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12143 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12144 cmd->cmd_retry_count = 1; 12145 cmd->cmd_ulp_pkt = NULL; 12146 12147 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12148 cmd->cmd_job = job; 12149 12150 pkt = &cmd->cmd_pkt; 12151 12152 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12153 12154 payload = port->fp_service_params; 12155 payload.ls_code.ls_code = LA_ELS_ACC; 12156 12157 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12158 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12159 12160 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12161 "bufsize:0x%x sizeof (la_els_logi):0x%x " 12162 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12163 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12164 buf->ub_bufsize, sizeof (la_els_logi_t), 12165 port->fp_service_params.nport_ww_name.w.naa_id, 12166 port->fp_service_params.nport_ww_name.w.nport_id, 12167 port->fp_service_params.nport_ww_name.w.wwn_hi, 12168 port->fp_service_params.nport_ww_name.w.wwn_lo, 12169 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12170 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12171 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12172 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12173 port->fp_statec_busy); 12174 } 12175 12176 12177 #define RSCN_EVENT_NAME_LEN 256 12178 12179 /* 12180 * Handle RSCNs 12181 */ 12182 static void 12183 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12184 job_request_t *job, int sleep) 12185 { 12186 uint32_t mask; 12187 fp_cmd_t *cmd; 12188 uint32_t count; 12189 int listindex; 12190 int16_t len; 12191 fc_rscn_t *payload; 12192 fc_portmap_t *listptr; 12193 fctl_ns_req_t *ns_cmd; 12194 fc_affected_id_t *page; 12195 caddr_t nvname; 12196 nvlist_t *attr_list = NULL; 12197 12198 mutex_enter(&port->fp_mutex); 12199 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12200 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12201 --port->fp_rscn_count; 12202 } 12203 mutex_exit(&port->fp_mutex); 12204 return; 12205 } 12206 mutex_exit(&port->fp_mutex); 12207 12208 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12209 if (cmd != NULL) { 12210 fp_els_acc_init(port, cmd, buf, job); 12211 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12212 fp_free_pkt(cmd); 12213 } 12214 } 12215 12216 payload = (fc_rscn_t *)buf->ub_buffer; 12217 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12218 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12219 12220 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12221 12222 if (len <= 0) { 12223 mutex_enter(&port->fp_mutex); 12224 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12225 --port->fp_rscn_count; 12226 } 12227 mutex_exit(&port->fp_mutex); 12228 12229 return; 12230 } 12231 12232 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12233 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12234 12235 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12236 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12237 12238 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12239 12240 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12241 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12242 0, sleep); 12243 if (ns_cmd == NULL) { 12244 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12245 12246 mutex_enter(&port->fp_mutex); 12247 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12248 --port->fp_rscn_count; 12249 } 12250 mutex_exit(&port->fp_mutex); 12251 12252 return; 12253 } 12254 12255 ns_cmd->ns_cmd_code = NS_GPN_ID; 12256 12257 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12258 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12259 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12260 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12261 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12262 12263 /* Only proceed if we can allocate nvname and the nvlist */ 12264 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12265 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12266 KM_NOSLEEP) == DDI_SUCCESS) { 12267 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12268 port->fp_instance) == DDI_SUCCESS && 12269 nvlist_add_byte_array(attr_list, "port-wwn", 12270 port->fp_service_params.nport_ww_name.raw_wwn, 12271 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12272 nvlist_free(attr_list); 12273 attr_list = NULL; 12274 } 12275 } 12276 12277 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12278 /* Add affected page to the event payload */ 12279 if (attr_list != NULL) { 12280 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12281 "affected_page_%d", listindex); 12282 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12283 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12284 /* We don't send a partial event, so dump it */ 12285 nvlist_free(attr_list); 12286 attr_list = NULL; 12287 } 12288 } 12289 /* 12290 * Query the NS to get the Port WWN for this 12291 * affected D_ID. 12292 */ 12293 mask = 0; 12294 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12295 case FC_RSCN_PORT_ADDRESS: 12296 fp_validate_rscn_page(port, page, job, ns_cmd, 12297 listptr, &listindex, sleep); 12298 12299 if (listindex == 0) { 12300 /* 12301 * We essentially did not process this RSCN. So, 12302 * ULPs are not going to be called and so we 12303 * decrement the rscn_count 12304 */ 12305 mutex_enter(&port->fp_mutex); 12306 if (--port->fp_rscn_count == 12307 FC_INVALID_RSCN_COUNT) { 12308 --port->fp_rscn_count; 12309 } 12310 mutex_exit(&port->fp_mutex); 12311 } 12312 break; 12313 12314 case FC_RSCN_AREA_ADDRESS: 12315 mask = 0xFFFF00; 12316 /* FALLTHROUGH */ 12317 12318 case FC_RSCN_DOMAIN_ADDRESS: 12319 if (!mask) { 12320 mask = 0xFF0000; 12321 } 12322 fp_validate_area_domain(port, page->aff_d_id, mask, 12323 job, sleep); 12324 break; 12325 12326 case FC_RSCN_FABRIC_ADDRESS: 12327 /* 12328 * We need to discover all the devices on this 12329 * port. 12330 */ 12331 fp_validate_area_domain(port, 0, 0, job, sleep); 12332 break; 12333 12334 default: 12335 break; 12336 } 12337 } 12338 if (attr_list != NULL) { 12339 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12340 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12341 NULL, DDI_SLEEP); 12342 nvlist_free(attr_list); 12343 } else { 12344 FP_TRACE(FP_NHEAD1(9, 0), 12345 "RSCN handled, but event not sent to userland"); 12346 } 12347 if (nvname != NULL) { 12348 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12349 } 12350 12351 if (ns_cmd) { 12352 fctl_free_ns_cmd(ns_cmd); 12353 } 12354 12355 if (listindex) { 12356 #ifdef DEBUG 12357 page = (fc_affected_id_t *)(buf->ub_buffer + 12358 sizeof (fc_rscn_t)); 12359 12360 if (listptr->map_did.port_id != page->aff_d_id) { 12361 FP_TRACE(FP_NHEAD1(9, 0), 12362 "PORT RSCN: processed=%x, reporting=%x", 12363 listptr->map_did.port_id, page->aff_d_id); 12364 } 12365 #endif 12366 12367 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12368 sleep, 0); 12369 } else { 12370 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12371 } 12372 } 12373 12374 12375 /* 12376 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12377 */ 12378 static void 12379 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12380 { 12381 int is_switch; 12382 int initiator; 12383 fc_local_port_t *port; 12384 12385 port = pd->pd_port; 12386 12387 /* This function has the following bunch of assumptions */ 12388 ASSERT(port != NULL); 12389 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12390 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12391 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12392 12393 pd->pd_state = PORT_DEVICE_INVALID; 12394 pd->pd_type = PORT_DEVICE_OLD; 12395 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12396 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12397 12398 fctl_delist_did_table(port, pd); 12399 fctl_delist_pwwn_table(port, pd); 12400 12401 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12402 " removed the PD=%p from DID and PWWN tables", 12403 port, pd->pd_port_id.port_id, pd); 12404 12405 if ((!flag) && port && initiator && is_switch) { 12406 (void) fctl_add_orphan_held(port, pd); 12407 } 12408 fctl_copy_portmap_held(map, pd); 12409 map->map_pd = pd; 12410 } 12411 12412 /* 12413 * Fill out old map for ULPs 12414 */ 12415 static void 12416 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12417 { 12418 int is_switch; 12419 int initiator; 12420 fc_local_port_t *port; 12421 12422 mutex_enter(&pd->pd_mutex); 12423 port = pd->pd_port; 12424 mutex_exit(&pd->pd_mutex); 12425 12426 mutex_enter(&port->fp_mutex); 12427 mutex_enter(&pd->pd_mutex); 12428 12429 pd->pd_state = PORT_DEVICE_INVALID; 12430 pd->pd_type = PORT_DEVICE_OLD; 12431 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12432 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12433 12434 fctl_delist_did_table(port, pd); 12435 fctl_delist_pwwn_table(port, pd); 12436 12437 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12438 " removed the PD=%p from DID and PWWN tables", 12439 port, pd->pd_port_id.port_id, pd); 12440 12441 mutex_exit(&pd->pd_mutex); 12442 mutex_exit(&port->fp_mutex); 12443 12444 ASSERT(port != NULL); 12445 if ((!flag) && port && initiator && is_switch) { 12446 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12447 } 12448 fctl_copy_portmap(map, pd); 12449 map->map_pd = pd; 12450 } 12451 12452 12453 /* 12454 * Fillout Changed Map for ULPs 12455 */ 12456 static void 12457 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12458 uint32_t *new_did, la_wwn_t *new_pwwn) 12459 { 12460 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12461 12462 pd->pd_type = PORT_DEVICE_CHANGED; 12463 if (new_did) { 12464 pd->pd_port_id.port_id = *new_did; 12465 } 12466 if (new_pwwn) { 12467 pd->pd_port_name = *new_pwwn; 12468 } 12469 mutex_exit(&pd->pd_mutex); 12470 12471 fctl_copy_portmap(map, pd); 12472 12473 mutex_enter(&pd->pd_mutex); 12474 pd->pd_type = PORT_DEVICE_NOCHANGE; 12475 } 12476 12477 12478 /* 12479 * Fillout New Name Server map 12480 */ 12481 static void 12482 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12483 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12484 { 12485 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12486 12487 if (handle) { 12488 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_pwwn, 12489 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12490 DDI_DEV_AUTOINCR); 12491 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_nwwn, 12492 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12493 DDI_DEV_AUTOINCR); 12494 ddi_rep_get8(*handle, (uint8_t *)port_map->map_fc4_types, 12495 (uint8_t *)gan_resp->gan_fc4types, 12496 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12497 } else { 12498 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12499 sizeof (gan_resp->gan_pwwn)); 12500 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12501 sizeof (gan_resp->gan_nwwn)); 12502 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12503 sizeof (gan_resp->gan_fc4types)); 12504 } 12505 port_map->map_did.port_id = d_id; 12506 port_map->map_did.priv_lilp_posit = 0; 12507 port_map->map_hard_addr.hard_addr = 0; 12508 port_map->map_hard_addr.rsvd = 0; 12509 port_map->map_state = PORT_DEVICE_INVALID; 12510 port_map->map_type = PORT_DEVICE_NEW; 12511 port_map->map_flags = 0; 12512 port_map->map_pd = NULL; 12513 12514 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12515 12516 ASSERT(port != NULL); 12517 } 12518 12519 12520 /* 12521 * Perform LINIT ELS 12522 */ 12523 static int 12524 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12525 job_request_t *job) 12526 { 12527 int rval; 12528 uint32_t d_id; 12529 uint32_t s_id; 12530 uint32_t lfa; 12531 uchar_t class; 12532 uint32_t ret; 12533 fp_cmd_t *cmd; 12534 fc_porttype_t ptype; 12535 fc_packet_t *pkt; 12536 fc_linit_req_t payload; 12537 fc_remote_port_t *pd; 12538 12539 rval = 0; 12540 12541 ASSERT(job != NULL); 12542 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12543 12544 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12545 if (pd == NULL) { 12546 fctl_ns_req_t *ns_cmd; 12547 12548 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12549 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12550 0, sleep); 12551 12552 if (ns_cmd == NULL) { 12553 return (FC_NOMEM); 12554 } 12555 job->job_result = FC_SUCCESS; 12556 ns_cmd->ns_cmd_code = NS_GID_PN; 12557 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12558 12559 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12560 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12561 fctl_free_ns_cmd(ns_cmd); 12562 return (FC_FAILURE); 12563 } 12564 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12565 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12566 12567 fctl_free_ns_cmd(ns_cmd); 12568 lfa = d_id & 0xFFFF00; 12569 12570 /* 12571 * Given this D_ID, get the port type to see if 12572 * we can do LINIT on the LFA 12573 */ 12574 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12575 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12576 0, sleep); 12577 12578 if (ns_cmd == NULL) { 12579 return (FC_NOMEM); 12580 } 12581 12582 job->job_result = FC_SUCCESS; 12583 ns_cmd->ns_cmd_code = NS_GPT_ID; 12584 12585 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12586 ((ns_req_gpt_id_t *) 12587 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12588 12589 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12590 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12591 fctl_free_ns_cmd(ns_cmd); 12592 return (FC_FAILURE); 12593 } 12594 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12595 12596 fctl_free_ns_cmd(ns_cmd); 12597 12598 switch (ptype.port_type) { 12599 case FC_NS_PORT_NL: 12600 case FC_NS_PORT_F_NL: 12601 case FC_NS_PORT_FL: 12602 break; 12603 12604 default: 12605 return (FC_FAILURE); 12606 } 12607 } else { 12608 mutex_enter(&pd->pd_mutex); 12609 ptype = pd->pd_porttype; 12610 12611 switch (pd->pd_porttype.port_type) { 12612 case FC_NS_PORT_NL: 12613 case FC_NS_PORT_F_NL: 12614 case FC_NS_PORT_FL: 12615 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12616 break; 12617 12618 default: 12619 mutex_exit(&pd->pd_mutex); 12620 return (FC_FAILURE); 12621 } 12622 mutex_exit(&pd->pd_mutex); 12623 } 12624 12625 mutex_enter(&port->fp_mutex); 12626 s_id = port->fp_port_id.port_id; 12627 class = port->fp_ns_login_class; 12628 mutex_exit(&port->fp_mutex); 12629 12630 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12631 sizeof (fc_linit_resp_t), sleep, pd); 12632 if (cmd == NULL) { 12633 return (FC_NOMEM); 12634 } 12635 12636 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12637 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12638 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12639 cmd->cmd_retry_count = fp_retry_count; 12640 cmd->cmd_ulp_pkt = NULL; 12641 12642 pkt = &cmd->cmd_pkt; 12643 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12644 12645 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12646 12647 /* 12648 * How does LIP work by the way ? 12649 * If the L_Port receives three consecutive identical ordered 12650 * sets whose first two characters (fully decoded) are equal to 12651 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12652 * recognize a Loop Initialization Primitive sequence. The 12653 * character 3 determines the type of lip: 12654 * LIP(F7) Normal LIP 12655 * LIP(F8) Loop Failure LIP 12656 * 12657 * The possible combination for the 3rd and 4th bytes are: 12658 * F7, F7 Normal Lip - No valid AL_PA 12659 * F8, F8 Loop Failure - No valid AL_PA 12660 * F7, AL_PS Normal Lip - Valid source AL_PA 12661 * F8, AL_PS Loop Failure - Valid source AL_PA 12662 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12663 * And Normal Lip for all other loop members 12664 * 0xFF AL_PS Vendor specific reset of all loop members 12665 * 12666 * Now, it may not always be that we, at the source, may have an 12667 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12668 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12669 * payload we are going to set: 12670 * lip_b3 = 0xF7; Normal LIP 12671 * lip_b4 = 0xF7; No valid source AL_PA 12672 */ 12673 payload.ls_code.ls_code = LA_ELS_LINIT; 12674 payload.ls_code.mbz = 0; 12675 payload.rsvd = 0; 12676 payload.func = 0; /* Let Fabric determine the best way */ 12677 payload.lip_b3 = 0xF7; /* Normal LIP */ 12678 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12679 12680 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12681 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12682 12683 job->job_counter = 1; 12684 12685 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12686 if (ret == FC_SUCCESS) { 12687 fp_jobwait(job); 12688 rval = job->job_result; 12689 } else { 12690 rval = FC_FAILURE; 12691 fp_free_pkt(cmd); 12692 } 12693 12694 return (rval); 12695 } 12696 12697 12698 /* 12699 * Fill out the device handles with GAN response 12700 */ 12701 static void 12702 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12703 ns_resp_gan_t *gan_resp) 12704 { 12705 fc_remote_node_t *node; 12706 fc_porttype_t type; 12707 fc_local_port_t *port; 12708 12709 ASSERT(pd != NULL); 12710 ASSERT(handle != NULL); 12711 12712 port = pd->pd_port; 12713 12714 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12715 " port_id=%x, sym_len=%d fc4-type=%x", 12716 pd, gan_resp->gan_type_id.rsvd, 12717 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12718 12719 mutex_enter(&pd->pd_mutex); 12720 12721 ddi_rep_get8(*handle, (uint8_t *)&type, 12722 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12723 12724 pd->pd_porttype.port_type = type.port_type; 12725 pd->pd_porttype.rsvd = 0; 12726 12727 pd->pd_spn_len = gan_resp->gan_spnlen; 12728 if (pd->pd_spn_len) { 12729 ddi_rep_get8(*handle, (uint8_t *)pd->pd_spn, 12730 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12731 DDI_DEV_AUTOINCR); 12732 } 12733 12734 ddi_rep_get8(*handle, (uint8_t *)pd->pd_ip_addr, 12735 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12736 DDI_DEV_AUTOINCR); 12737 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_cos, 12738 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12739 DDI_DEV_AUTOINCR); 12740 ddi_rep_get8(*handle, (uint8_t *)pd->pd_fc4types, 12741 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12742 DDI_DEV_AUTOINCR); 12743 12744 node = pd->pd_remote_nodep; 12745 mutex_exit(&pd->pd_mutex); 12746 12747 mutex_enter(&node->fd_mutex); 12748 12749 ddi_rep_get8(*handle, (uint8_t *)node->fd_ipa, 12750 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12751 DDI_DEV_AUTOINCR); 12752 12753 node->fd_snn_len = gan_resp->gan_snnlen; 12754 if (node->fd_snn_len) { 12755 ddi_rep_get8(*handle, (uint8_t *)node->fd_snn, 12756 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12757 DDI_DEV_AUTOINCR); 12758 } 12759 12760 mutex_exit(&node->fd_mutex); 12761 } 12762 12763 12764 /* 12765 * Handles all NS Queries (also means that this function 12766 * doesn't handle NS object registration) 12767 */ 12768 static int 12769 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12770 int polled, int sleep) 12771 { 12772 int rval; 12773 fp_cmd_t *cmd; 12774 12775 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12776 12777 if (ns_cmd->ns_cmd_size == 0) { 12778 return (FC_FAILURE); 12779 } 12780 12781 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12782 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12783 ns_cmd->ns_resp_size, sleep, NULL); 12784 if (cmd == NULL) { 12785 return (FC_NOMEM); 12786 } 12787 12788 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12789 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12790 12791 if (polled) { 12792 job->job_counter = 1; 12793 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12794 } 12795 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12796 if (rval != FC_SUCCESS) { 12797 job->job_result = rval; 12798 fp_iodone(cmd); 12799 if (polled == 0) { 12800 /* 12801 * Return FC_SUCCESS to indicate that 12802 * fp_iodone is performed already. 12803 */ 12804 rval = FC_SUCCESS; 12805 } 12806 } 12807 12808 if (polled) { 12809 fp_jobwait(job); 12810 rval = job->job_result; 12811 } 12812 12813 return (rval); 12814 } 12815 12816 12817 /* 12818 * Initialize Common Transport request 12819 */ 12820 static void 12821 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12822 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12823 uint16_t resp_len, job_request_t *job) 12824 { 12825 uint32_t s_id; 12826 uchar_t class; 12827 fc_packet_t *pkt; 12828 fc_ct_header_t ct; 12829 12830 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12831 12832 mutex_enter(&port->fp_mutex); 12833 s_id = port->fp_port_id.port_id; 12834 class = port->fp_ns_login_class; 12835 mutex_exit(&port->fp_mutex); 12836 12837 cmd->cmd_job = job; 12838 cmd->cmd_private = ns_cmd; 12839 pkt = &cmd->cmd_pkt; 12840 12841 ct.ct_rev = CT_REV; 12842 ct.ct_inid = 0; 12843 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12844 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12845 ct.ct_options = 0; 12846 ct.ct_reserved1 = 0; 12847 ct.ct_cmdrsp = cmd_code; 12848 ct.ct_aiusize = resp_len >> 2; 12849 ct.ct_reserved2 = 0; 12850 ct.ct_reason = 0; 12851 ct.ct_expln = 0; 12852 ct.ct_vendor = 0; 12853 12854 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ct, (uint8_t *)pkt->pkt_cmd, 12855 sizeof (ct), DDI_DEV_AUTOINCR); 12856 12857 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12858 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12859 pkt->pkt_cmd_fhdr.s_id = s_id; 12860 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12861 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12862 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12863 pkt->pkt_cmd_fhdr.seq_id = 0; 12864 pkt->pkt_cmd_fhdr.df_ctl = 0; 12865 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12866 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12867 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12868 pkt->pkt_cmd_fhdr.ro = 0; 12869 pkt->pkt_cmd_fhdr.rsvd = 0; 12870 12871 pkt->pkt_comp = fp_ns_intr; 12872 pkt->pkt_ulp_private = (opaque_t)cmd; 12873 pkt->pkt_timeout = FP_NS_TIMEOUT; 12874 12875 if (cmd_buf) { 12876 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 12877 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 12878 cmd_len, DDI_DEV_AUTOINCR); 12879 } 12880 12881 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 12882 12883 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12884 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12885 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 12886 cmd->cmd_retry_count = fp_retry_count; 12887 cmd->cmd_ulp_pkt = NULL; 12888 } 12889 12890 12891 /* 12892 * Name Server request interrupt routine 12893 */ 12894 static void 12895 fp_ns_intr(fc_packet_t *pkt) 12896 { 12897 fp_cmd_t *cmd; 12898 fc_local_port_t *port; 12899 fc_ct_header_t resp_hdr; 12900 fc_ct_header_t cmd_hdr; 12901 fctl_ns_req_t *ns_cmd; 12902 12903 cmd = pkt->pkt_ulp_private; 12904 port = cmd->cmd_port; 12905 12906 mutex_enter(&port->fp_mutex); 12907 port->fp_out_fpcmds--; 12908 mutex_exit(&port->fp_mutex); 12909 12910 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 12911 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 12912 ns_cmd = (fctl_ns_req_t *) 12913 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 12914 if (!FP_IS_PKT_ERROR(pkt)) { 12915 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 12916 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 12917 DDI_DEV_AUTOINCR); 12918 12919 /* 12920 * On x86 architectures, make sure the resp_hdr is big endian. 12921 * This macro is a NOP on sparc architectures mainly because 12922 * we don't want to end up wasting time since the end result 12923 * is going to be the same. 12924 */ 12925 MAKE_BE_32(&resp_hdr); 12926 12927 if (ns_cmd) { 12928 /* 12929 * Always copy out the response CT_HDR 12930 */ 12931 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 12932 sizeof (resp_hdr)); 12933 } 12934 12935 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 12936 pkt->pkt_state = FC_PKT_FS_RJT; 12937 pkt->pkt_reason = resp_hdr.ct_reason; 12938 pkt->pkt_expln = resp_hdr.ct_expln; 12939 } 12940 } 12941 12942 if (FP_IS_PKT_ERROR(pkt)) { 12943 if (ns_cmd) { 12944 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 12945 ASSERT(ns_cmd->ns_pd != NULL); 12946 12947 /* Mark it OLD if not already done */ 12948 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 12949 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 12950 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 12951 } 12952 12953 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 12954 fctl_free_ns_cmd(ns_cmd); 12955 ((fp_cmd_t *) 12956 (pkt->pkt_ulp_private))->cmd_private = NULL; 12957 } 12958 12959 } 12960 12961 FP_TRACE(FP_NHEAD1(4, 0), "NS failure; pkt state=%x reason=%x", 12962 pkt->pkt_state, pkt->pkt_reason); 12963 12964 (void) fp_common_intr(pkt, 1); 12965 12966 return; 12967 } 12968 12969 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 12970 uint32_t d_id; 12971 fc_local_port_t *port; 12972 fp_cmd_t *cmd; 12973 12974 d_id = pkt->pkt_cmd_fhdr.d_id; 12975 cmd = pkt->pkt_ulp_private; 12976 port = cmd->cmd_port; 12977 FP_TRACE(FP_NHEAD2(9, 0), 12978 "Bogus NS response received for D_ID=%x", d_id); 12979 } 12980 12981 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 12982 fp_gan_handler(pkt, ns_cmd); 12983 return; 12984 } 12985 12986 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 12987 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 12988 if (ns_cmd) { 12989 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 12990 fp_ns_query_handler(pkt, ns_cmd); 12991 return; 12992 } 12993 } 12994 } 12995 12996 fp_iodone(pkt->pkt_ulp_private); 12997 } 12998 12999 13000 /* 13001 * Process NS_GAN response 13002 */ 13003 static void 13004 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13005 { 13006 int my_did; 13007 fc_portid_t d_id; 13008 fp_cmd_t *cmd; 13009 fc_local_port_t *port; 13010 fc_remote_port_t *pd; 13011 ns_req_gan_t gan_req; 13012 ns_resp_gan_t *gan_resp; 13013 13014 ASSERT(ns_cmd != NULL); 13015 13016 cmd = pkt->pkt_ulp_private; 13017 port = cmd->cmd_port; 13018 13019 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 13020 13021 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&d_id, 13022 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 13023 13024 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 13025 13026 /* 13027 * In this case the priv_lilp_posit field in reality 13028 * is actually represents the relative position on a private loop. 13029 * So zero it while dealing with Port Identifiers. 13030 */ 13031 d_id.priv_lilp_posit = 0; 13032 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 13033 if (ns_cmd->ns_gan_sid == d_id.port_id) { 13034 /* 13035 * We've come a full circle; time to get out. 13036 */ 13037 fp_iodone(cmd); 13038 return; 13039 } 13040 13041 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 13042 ns_cmd->ns_gan_sid = d_id.port_id; 13043 } 13044 13045 mutex_enter(&port->fp_mutex); 13046 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 13047 mutex_exit(&port->fp_mutex); 13048 13049 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, d_id=%x", port, 13050 d_id.port_id); 13051 13052 if (my_did == 0) { 13053 la_wwn_t pwwn; 13054 la_wwn_t nwwn; 13055 13056 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 13057 "port=%p, d_id=%x, type_id=%x, " 13058 "pwwn=%x %x %x %x %x %x %x %x, " 13059 "nwwn=%x %x %x %x %x %x %x %x", 13060 port, d_id.port_id, gan_resp->gan_type_id, 13061 13062 gan_resp->gan_pwwn.raw_wwn[0], 13063 gan_resp->gan_pwwn.raw_wwn[1], 13064 gan_resp->gan_pwwn.raw_wwn[2], 13065 gan_resp->gan_pwwn.raw_wwn[3], 13066 gan_resp->gan_pwwn.raw_wwn[4], 13067 gan_resp->gan_pwwn.raw_wwn[5], 13068 gan_resp->gan_pwwn.raw_wwn[6], 13069 gan_resp->gan_pwwn.raw_wwn[7], 13070 13071 gan_resp->gan_nwwn.raw_wwn[0], 13072 gan_resp->gan_nwwn.raw_wwn[1], 13073 gan_resp->gan_nwwn.raw_wwn[2], 13074 gan_resp->gan_nwwn.raw_wwn[3], 13075 gan_resp->gan_nwwn.raw_wwn[4], 13076 gan_resp->gan_nwwn.raw_wwn[5], 13077 gan_resp->gan_nwwn.raw_wwn[6], 13078 gan_resp->gan_nwwn.raw_wwn[7]); 13079 13080 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 13081 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 13082 DDI_DEV_AUTOINCR); 13083 13084 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 13085 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 13086 DDI_DEV_AUTOINCR); 13087 13088 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 13089 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 13090 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 13091 } 13092 if (pd != NULL) { 13093 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 13094 pd, gan_resp); 13095 } 13096 13097 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 13098 *((int *)ns_cmd->ns_data_buf) += 1; 13099 } 13100 13101 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 13102 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 13103 13104 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 13105 fc_port_dev_t *userbuf; 13106 13107 userbuf = ((fc_port_dev_t *) 13108 ns_cmd->ns_data_buf) + 13109 ns_cmd->ns_gan_index++; 13110 13111 userbuf->dev_did = d_id; 13112 13113 ddi_rep_get8(pkt->pkt_resp_acc, 13114 (uint8_t *)userbuf->dev_type, 13115 (uint8_t *)gan_resp->gan_fc4types, 13116 sizeof (userbuf->dev_type), 13117 DDI_DEV_AUTOINCR); 13118 13119 userbuf->dev_nwwn = nwwn; 13120 userbuf->dev_pwwn = pwwn; 13121 13122 if (pd != NULL) { 13123 mutex_enter(&pd->pd_mutex); 13124 userbuf->dev_state = pd->pd_state; 13125 userbuf->dev_hard_addr = 13126 pd->pd_hard_addr; 13127 mutex_exit(&pd->pd_mutex); 13128 } else { 13129 userbuf->dev_state = 13130 PORT_DEVICE_INVALID; 13131 } 13132 } else if (ns_cmd->ns_flags & 13133 FCTL_NS_BUF_IS_FC_PORTMAP) { 13134 fc_portmap_t *map; 13135 13136 map = ((fc_portmap_t *) 13137 ns_cmd->ns_data_buf) + 13138 ns_cmd->ns_gan_index++; 13139 13140 /* 13141 * First fill it like any new map 13142 * and update the port device info 13143 * below. 13144 */ 13145 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13146 map, gan_resp, d_id.port_id); 13147 if (pd != NULL) { 13148 fctl_copy_portmap(map, pd); 13149 } else { 13150 map->map_state = PORT_DEVICE_INVALID; 13151 map->map_type = PORT_DEVICE_NOCHANGE; 13152 } 13153 } else { 13154 caddr_t dst_ptr; 13155 13156 dst_ptr = ns_cmd->ns_data_buf + 13157 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13158 13159 ddi_rep_get8(pkt->pkt_resp_acc, 13160 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13161 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13162 } 13163 } else { 13164 ns_cmd->ns_gan_index++; 13165 } 13166 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13167 fp_iodone(cmd); 13168 return; 13169 } 13170 } 13171 13172 gan_req.pid = d_id; 13173 13174 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13175 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13176 sizeof (gan_req), DDI_DEV_AUTOINCR); 13177 13178 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13179 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13180 fp_iodone(cmd); 13181 } else { 13182 mutex_enter(&port->fp_mutex); 13183 port->fp_out_fpcmds++; 13184 mutex_exit(&port->fp_mutex); 13185 } 13186 } 13187 13188 13189 /* 13190 * Handle NS Query interrupt 13191 */ 13192 static void 13193 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13194 { 13195 fp_cmd_t *cmd; 13196 fc_local_port_t *port; 13197 caddr_t src_ptr; 13198 uint32_t xfer_len; 13199 13200 cmd = pkt->pkt_ulp_private; 13201 port = cmd->cmd_port; 13202 13203 xfer_len = ns_cmd->ns_resp_size; 13204 13205 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13206 ns_cmd->ns_cmd_code, xfer_len); 13207 13208 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13209 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13210 13211 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13212 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13213 } 13214 13215 if (xfer_len <= ns_cmd->ns_data_len) { 13216 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13217 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)ns_cmd->ns_data_buf, 13218 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13219 } 13220 13221 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13222 ASSERT(ns_cmd->ns_pd != NULL); 13223 13224 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13225 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13226 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13227 } 13228 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13229 } 13230 13231 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13232 fctl_free_ns_cmd(ns_cmd); 13233 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13234 } 13235 fp_iodone(cmd); 13236 } 13237 13238 13239 /* 13240 * Handle unsolicited ADISC ELS request 13241 */ 13242 static void 13243 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13244 fc_remote_port_t *pd, job_request_t *job) 13245 { 13246 int rval; 13247 fp_cmd_t *cmd; 13248 13249 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13250 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13251 mutex_enter(&pd->pd_mutex); 13252 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13253 mutex_exit(&pd->pd_mutex); 13254 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13255 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13256 0, KM_SLEEP, pd); 13257 if (cmd != NULL) { 13258 fp_els_rjt_init(port, cmd, buf, 13259 FC_ACTION_NON_RETRYABLE, 13260 FC_REASON_INVALID_LINK_CTRL, job); 13261 13262 if (fp_sendcmd(port, cmd, 13263 port->fp_fca_handle) != FC_SUCCESS) { 13264 fp_free_pkt(cmd); 13265 } 13266 } 13267 } 13268 } else { 13269 mutex_exit(&pd->pd_mutex); 13270 /* 13271 * Yes, yes, we don't have a hard address. But we 13272 * we should still respond. Huh ? Visit 21.19.2 13273 * of FC-PH-2 which essentially says that if an 13274 * NL_Port doesn't have a hard address, or if a port 13275 * does not have FC-AL capability, it shall report 13276 * zeroes in this field. 13277 */ 13278 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13279 0, KM_SLEEP, pd); 13280 if (cmd == NULL) { 13281 return; 13282 } 13283 fp_adisc_acc_init(port, cmd, buf, job); 13284 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13285 if (rval != FC_SUCCESS) { 13286 fp_free_pkt(cmd); 13287 } 13288 } 13289 } 13290 13291 13292 /* 13293 * Initialize ADISC response. 13294 */ 13295 static void 13296 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13297 job_request_t *job) 13298 { 13299 fc_packet_t *pkt; 13300 la_els_adisc_t payload; 13301 13302 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13303 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13304 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13305 cmd->cmd_retry_count = 1; 13306 cmd->cmd_ulp_pkt = NULL; 13307 13308 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13309 cmd->cmd_job = job; 13310 13311 pkt = &cmd->cmd_pkt; 13312 13313 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13314 13315 payload.ls_code.ls_code = LA_ELS_ACC; 13316 payload.ls_code.mbz = 0; 13317 13318 mutex_enter(&port->fp_mutex); 13319 payload.nport_id = port->fp_port_id; 13320 payload.hard_addr = port->fp_hard_addr; 13321 mutex_exit(&port->fp_mutex); 13322 13323 payload.port_wwn = port->fp_service_params.nport_ww_name; 13324 payload.node_wwn = port->fp_service_params.node_ww_name; 13325 13326 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 13327 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13328 } 13329 13330 13331 /* 13332 * Hold and Install the requested ULP drivers 13333 */ 13334 static void 13335 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13336 { 13337 int len; 13338 int count; 13339 int data_len; 13340 major_t ulp_major; 13341 caddr_t ulp_name; 13342 caddr_t data_ptr; 13343 caddr_t data_buf; 13344 13345 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13346 13347 data_buf = NULL; 13348 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13349 DDI_PROP_DONTPASS, "load-ulp-list", 13350 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13351 return; 13352 } 13353 13354 len = strlen(data_buf); 13355 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13356 13357 data_ptr = data_buf + len + 1; 13358 for (count = 0; count < port->fp_ulp_nload; count++) { 13359 len = strlen(data_ptr) + 1; 13360 ulp_name = kmem_zalloc(len, KM_SLEEP); 13361 bcopy(data_ptr, ulp_name, len); 13362 13363 ulp_major = ddi_name_to_major(ulp_name); 13364 13365 if (ulp_major != (major_t)-1) { 13366 if (modload("drv", ulp_name) < 0) { 13367 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13368 0, NULL, "failed to load %s", 13369 ulp_name); 13370 } 13371 } else { 13372 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13373 "%s isn't a valid driver", ulp_name); 13374 } 13375 13376 kmem_free(ulp_name, len); 13377 data_ptr += len; /* Skip to next field */ 13378 } 13379 13380 /* 13381 * Free the memory allocated by DDI 13382 */ 13383 if (data_buf != NULL) { 13384 kmem_free(data_buf, data_len); 13385 } 13386 } 13387 13388 13389 /* 13390 * Perform LOGO operation 13391 */ 13392 static int 13393 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13394 { 13395 int rval; 13396 fp_cmd_t *cmd; 13397 13398 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13399 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13400 13401 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13402 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13403 13404 mutex_enter(&port->fp_mutex); 13405 mutex_enter(&pd->pd_mutex); 13406 13407 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13408 ASSERT(pd->pd_login_count == 1); 13409 13410 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13411 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13412 cmd->cmd_flags = 0; 13413 cmd->cmd_retry_count = 1; 13414 cmd->cmd_ulp_pkt = NULL; 13415 13416 fp_logo_init(pd, cmd, job); 13417 13418 mutex_exit(&pd->pd_mutex); 13419 mutex_exit(&port->fp_mutex); 13420 13421 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13422 if (rval != FC_SUCCESS) { 13423 fp_iodone(cmd); 13424 } 13425 13426 return (rval); 13427 } 13428 13429 13430 /* 13431 * Perform Port attach callbacks to registered ULPs 13432 */ 13433 static void 13434 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13435 { 13436 fp_soft_attach_t *att; 13437 13438 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13439 att->att_cmd = cmd; 13440 att->att_port = port; 13441 13442 /* 13443 * We need to remember whether or not fctl_busy_port 13444 * succeeded so we know whether or not to call 13445 * fctl_idle_port when the task is complete. 13446 */ 13447 13448 if (fctl_busy_port(port) == 0) { 13449 att->att_need_pm_idle = B_TRUE; 13450 } else { 13451 att->att_need_pm_idle = B_FALSE; 13452 } 13453 13454 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13455 att, KM_SLEEP); 13456 } 13457 13458 13459 /* 13460 * Forward state change notifications on to interested ULPs. 13461 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13462 * real work. 13463 */ 13464 static int 13465 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13466 { 13467 fc_port_clist_t *clist; 13468 13469 clist = kmem_zalloc(sizeof (*clist), sleep); 13470 if (clist == NULL) { 13471 return (FC_NOMEM); 13472 } 13473 13474 clist->clist_state = statec; 13475 13476 mutex_enter(&port->fp_mutex); 13477 clist->clist_flags = port->fp_topology; 13478 mutex_exit(&port->fp_mutex); 13479 13480 clist->clist_port = (opaque_t)port; 13481 clist->clist_len = 0; 13482 clist->clist_size = 0; 13483 clist->clist_map = NULL; 13484 13485 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13486 clist, KM_SLEEP); 13487 13488 return (FC_SUCCESS); 13489 } 13490 13491 13492 /* 13493 * Get name server map 13494 */ 13495 static int 13496 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13497 uint32_t *len, uint32_t sid) 13498 { 13499 int ret; 13500 fctl_ns_req_t *ns_cmd; 13501 13502 /* 13503 * Don't let the allocator do anything for response; 13504 * we have have buffer ready to fillout. 13505 */ 13506 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13507 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13508 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13509 13510 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13511 ns_cmd->ns_data_buf = (caddr_t)*map; 13512 13513 ASSERT(ns_cmd != NULL); 13514 13515 ns_cmd->ns_gan_index = 0; 13516 ns_cmd->ns_gan_sid = sid; 13517 ns_cmd->ns_cmd_code = NS_GA_NXT; 13518 ns_cmd->ns_gan_max = *len; 13519 13520 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13521 13522 if (ns_cmd->ns_gan_index != *len) { 13523 *len = ns_cmd->ns_gan_index; 13524 } 13525 ns_cmd->ns_data_len = 0; 13526 ns_cmd->ns_data_buf = NULL; 13527 fctl_free_ns_cmd(ns_cmd); 13528 13529 return (ret); 13530 } 13531 13532 13533 /* 13534 * Create a remote port in Fabric topology by using NS services 13535 */ 13536 static fc_remote_port_t * 13537 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13538 { 13539 int rval; 13540 job_request_t *job; 13541 fctl_ns_req_t *ns_cmd; 13542 fc_remote_port_t *pd; 13543 13544 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13545 13546 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13547 port, d_id); 13548 13549 #ifdef DEBUG 13550 mutex_enter(&port->fp_mutex); 13551 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13552 mutex_exit(&port->fp_mutex); 13553 #endif 13554 13555 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13556 if (job == NULL) { 13557 return (NULL); 13558 } 13559 13560 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13561 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13562 FCTL_NS_NO_DATA_BUF), sleep); 13563 if (ns_cmd == NULL) { 13564 return (NULL); 13565 } 13566 13567 job->job_result = FC_SUCCESS; 13568 ns_cmd->ns_gan_max = 1; 13569 ns_cmd->ns_cmd_code = NS_GA_NXT; 13570 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13571 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13572 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13573 13574 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13575 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13576 fctl_free_ns_cmd(ns_cmd); 13577 13578 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13579 fctl_dealloc_job(job); 13580 return (NULL); 13581 } 13582 fctl_dealloc_job(job); 13583 13584 pd = fctl_get_remote_port_by_did(port, d_id); 13585 13586 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13587 port, d_id, pd); 13588 13589 return (pd); 13590 } 13591 13592 13593 /* 13594 * Check for the permissions on an ioctl command. If it is required to have an 13595 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13596 * the ioctl command isn't in one of the list built, shut the door on that too. 13597 * 13598 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13599 * to be made sure that users open the port for an exclusive access while 13600 * performing those operations. 13601 * 13602 * This can prevent a casual user from inflicting damage on the port by 13603 * sending these ioctls from multiple processes/threads (there is no good 13604 * reason why one would need to do that) without actually realizing how 13605 * expensive such commands could turn out to be. 13606 * 13607 * It is also important to note that, even with an exclusive access, 13608 * multiple threads can share the same file descriptor and fire down 13609 * commands in parallel. To prevent that the driver needs to make sure 13610 * that such commands aren't in progress already. This is taken care of 13611 * in the FP_EXCL_BUSY bit of fp_flag. 13612 */ 13613 static int 13614 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13615 { 13616 int ret = FC_FAILURE; 13617 int count; 13618 13619 for (count = 0; 13620 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13621 count++) { 13622 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13623 if (fp_perm_list[count].fp_open_flag & open_flag) { 13624 ret = FC_SUCCESS; 13625 } 13626 break; 13627 } 13628 } 13629 13630 return (ret); 13631 } 13632 13633 13634 /* 13635 * Bind Port driver's unsolicited, state change callbacks 13636 */ 13637 static int 13638 fp_bind_callbacks(fc_local_port_t *port) 13639 { 13640 fc_fca_bind_info_t bind_info = {0}; 13641 fc_fca_port_info_t *port_info; 13642 int rval = DDI_SUCCESS; 13643 uint16_t class; 13644 int node_namelen, port_namelen; 13645 char *nname = NULL, *pname = NULL; 13646 13647 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13648 13649 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13650 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13651 "node-name", &nname) != DDI_PROP_SUCCESS) { 13652 FP_TRACE(FP_NHEAD1(1, 0), 13653 "fp_bind_callback fail to get node-name"); 13654 } 13655 if (nname) { 13656 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13657 } 13658 13659 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13660 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13661 "port-name", &pname) != DDI_PROP_SUCCESS) { 13662 FP_TRACE(FP_NHEAD1(1, 0), 13663 "fp_bind_callback fail to get port-name"); 13664 } 13665 if (pname) { 13666 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13667 } 13668 13669 if (port->fp_npiv_type == FC_NPIV_PORT) { 13670 bind_info.port_npiv = 1; 13671 } 13672 13673 /* 13674 * fca_bind_port returns the FCA driver's handle for the local 13675 * port instance. If the port number isn't supported it returns NULL. 13676 * It also sets up callback in the FCA for various 13677 * things like state change, ELS etc.. 13678 */ 13679 bind_info.port_statec_cb = fp_statec_cb; 13680 bind_info.port_unsol_cb = fp_unsol_cb; 13681 bind_info.port_num = port->fp_port_num; 13682 bind_info.port_handle = (opaque_t)port; 13683 13684 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13685 13686 /* 13687 * Hold the port driver mutex as the callbacks are bound until the 13688 * service parameters are properly filled in (in order to be able to 13689 * properly respond to unsolicited ELS requests) 13690 */ 13691 mutex_enter(&port->fp_mutex); 13692 13693 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13694 port->fp_fca_dip, port_info, &bind_info); 13695 13696 if (port->fp_fca_handle == NULL) { 13697 rval = DDI_FAILURE; 13698 goto exit; 13699 } 13700 13701 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13702 port->fp_service_params = port_info->pi_login_params; 13703 port->fp_hard_addr = port_info->pi_hard_addr; 13704 13705 /* Copy from the FCA structure to the FP structure */ 13706 port->fp_hba_port_attrs = port_info->pi_attrs; 13707 13708 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13709 port->fp_rnid_init = 1; 13710 bcopy(&port_info->pi_rnid_params.params, 13711 &port->fp_rnid_params, 13712 sizeof (port->fp_rnid_params)); 13713 } else { 13714 port->fp_rnid_init = 0; 13715 } 13716 13717 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13718 if (node_namelen) { 13719 bcopy(&port_info->pi_attrs.sym_node_name, 13720 &port->fp_sym_node_name, 13721 node_namelen); 13722 port->fp_sym_node_namelen = node_namelen; 13723 } 13724 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13725 if (port_namelen) { 13726 bcopy(&port_info->pi_attrs.sym_port_name, 13727 &port->fp_sym_port_name, 13728 port_namelen); 13729 port->fp_sym_port_namelen = port_namelen; 13730 } 13731 13732 /* zero out the normally unused fields right away */ 13733 port->fp_service_params.ls_code.mbz = 0; 13734 port->fp_service_params.ls_code.ls_code = 0; 13735 bzero(&port->fp_service_params.reserved, 13736 sizeof (port->fp_service_params.reserved)); 13737 13738 class = port_info->pi_login_params.class_1.class_opt; 13739 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13740 13741 class = port_info->pi_login_params.class_2.class_opt; 13742 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13743 13744 class = port_info->pi_login_params.class_3.class_opt; 13745 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13746 13747 exit: 13748 if (nname) { 13749 ddi_prop_free(nname); 13750 } 13751 if (pname) { 13752 ddi_prop_free(pname); 13753 } 13754 mutex_exit(&port->fp_mutex); 13755 kmem_free(port_info, sizeof (*port_info)); 13756 13757 return (rval); 13758 } 13759 13760 13761 /* 13762 * Retrieve FCA capabilities 13763 */ 13764 static void 13765 fp_retrieve_caps(fc_local_port_t *port) 13766 { 13767 int rval; 13768 int ub_count; 13769 fc_fcp_dma_t fcp_dma; 13770 fc_reset_action_t action; 13771 fc_dma_behavior_t dma_behavior; 13772 13773 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13774 13775 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13776 FC_CAP_UNSOL_BUF, &ub_count); 13777 13778 switch (rval) { 13779 case FC_CAP_FOUND: 13780 case FC_CAP_SETTABLE: 13781 switch (ub_count) { 13782 case 0: 13783 break; 13784 13785 case -1: 13786 ub_count = fp_unsol_buf_count; 13787 break; 13788 13789 default: 13790 /* 1/4th of total buffers is my share */ 13791 ub_count = 13792 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13793 break; 13794 } 13795 break; 13796 13797 default: 13798 ub_count = 0; 13799 break; 13800 } 13801 13802 mutex_enter(&port->fp_mutex); 13803 port->fp_ub_count = ub_count; 13804 mutex_exit(&port->fp_mutex); 13805 13806 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13807 FC_CAP_POST_RESET_BEHAVIOR, &action); 13808 13809 switch (rval) { 13810 case FC_CAP_FOUND: 13811 case FC_CAP_SETTABLE: 13812 switch (action) { 13813 case FC_RESET_RETURN_NONE: 13814 case FC_RESET_RETURN_ALL: 13815 case FC_RESET_RETURN_OUTSTANDING: 13816 break; 13817 13818 default: 13819 action = FC_RESET_RETURN_NONE; 13820 break; 13821 } 13822 break; 13823 13824 default: 13825 action = FC_RESET_RETURN_NONE; 13826 break; 13827 } 13828 mutex_enter(&port->fp_mutex); 13829 port->fp_reset_action = action; 13830 mutex_exit(&port->fp_mutex); 13831 13832 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13833 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13834 13835 switch (rval) { 13836 case FC_CAP_FOUND: 13837 switch (dma_behavior) { 13838 case FC_ALLOW_STREAMING: 13839 /* FALLTHROUGH */ 13840 case FC_NO_STREAMING: 13841 break; 13842 13843 default: 13844 /* 13845 * If capability was found and the value 13846 * was incorrect assume the worst 13847 */ 13848 dma_behavior = FC_NO_STREAMING; 13849 break; 13850 } 13851 break; 13852 13853 default: 13854 /* 13855 * If capability was not defined - allow streaming; existing 13856 * FCAs should not be affected. 13857 */ 13858 dma_behavior = FC_ALLOW_STREAMING; 13859 break; 13860 } 13861 mutex_enter(&port->fp_mutex); 13862 port->fp_dma_behavior = dma_behavior; 13863 mutex_exit(&port->fp_mutex); 13864 13865 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13866 FC_CAP_FCP_DMA, &fcp_dma); 13867 13868 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 13869 fcp_dma != FC_DVMA_SPACE)) { 13870 fcp_dma = FC_DVMA_SPACE; 13871 } 13872 13873 mutex_enter(&port->fp_mutex); 13874 port->fp_fcp_dma = fcp_dma; 13875 mutex_exit(&port->fp_mutex); 13876 } 13877 13878 13879 /* 13880 * Handle Domain, Area changes in the Fabric. 13881 */ 13882 static void 13883 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 13884 job_request_t *job, int sleep) 13885 { 13886 #ifdef DEBUG 13887 uint32_t dcnt; 13888 #endif 13889 int rval; 13890 int send; 13891 int index; 13892 int listindex; 13893 int login; 13894 int job_flags; 13895 char ww_name[17]; 13896 uint32_t d_id; 13897 uint32_t count; 13898 fctl_ns_req_t *ns_cmd; 13899 fc_portmap_t *list; 13900 fc_orphan_t *orp; 13901 fc_orphan_t *norp; 13902 fc_orphan_t *prev; 13903 fc_remote_port_t *pd; 13904 fc_remote_port_t *npd; 13905 struct pwwn_hash *head; 13906 13907 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 13908 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 13909 0, sleep); 13910 if (ns_cmd == NULL) { 13911 mutex_enter(&port->fp_mutex); 13912 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13913 --port->fp_rscn_count; 13914 } 13915 mutex_exit(&port->fp_mutex); 13916 13917 return; 13918 } 13919 ns_cmd->ns_cmd_code = NS_GID_PN; 13920 13921 /* 13922 * We need to get a new count of devices from the 13923 * name server, which will also create any new devices 13924 * as needed. 13925 */ 13926 13927 (void) fp_ns_get_devcount(port, job, 1, sleep); 13928 13929 FP_TRACE(FP_NHEAD1(3, 0), 13930 "fp_validate_area_domain: get_devcount found %d devices", 13931 port->fp_total_devices); 13932 13933 mutex_enter(&port->fp_mutex); 13934 13935 for (count = index = 0; index < pwwn_table_size; index++) { 13936 head = &port->fp_pwwn_table[index]; 13937 pd = head->pwwn_head; 13938 while (pd != NULL) { 13939 mutex_enter(&pd->pd_mutex); 13940 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 13941 if ((pd->pd_port_id.port_id & mask) == id && 13942 pd->pd_recepient == PD_PLOGI_INITIATOR) { 13943 count++; 13944 pd->pd_type = PORT_DEVICE_OLD; 13945 pd->pd_flags = PD_ELS_MARK; 13946 } 13947 } 13948 mutex_exit(&pd->pd_mutex); 13949 pd = pd->pd_wwn_hnext; 13950 } 13951 } 13952 13953 #ifdef DEBUG 13954 dcnt = count; 13955 #endif /* DEBUG */ 13956 13957 /* 13958 * Since port->fp_orphan_count is declared an 'int' it is 13959 * theoretically possible that the count could go negative. 13960 * 13961 * This would be bad and if that happens we really do want 13962 * to know. 13963 */ 13964 13965 ASSERT(port->fp_orphan_count >= 0); 13966 13967 count += port->fp_orphan_count; 13968 13969 /* 13970 * We add the port->fp_total_devices value to the count 13971 * in the case where our port is newly attached. This is 13972 * because we haven't done any discovery and we don't have 13973 * any orphans in the port's orphan list. If we do not do 13974 * this addition to count then we won't alloc enough kmem 13975 * to do discovery with. 13976 */ 13977 13978 if (count == 0) { 13979 count += port->fp_total_devices; 13980 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 13981 "0x%x orphans found, using 0x%x", 13982 port->fp_orphan_count, count); 13983 } 13984 13985 mutex_exit(&port->fp_mutex); 13986 13987 /* 13988 * Allocate the change list 13989 */ 13990 13991 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 13992 if (list == NULL) { 13993 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13994 " Not enough memory to service RSCNs" 13995 " for %d ports, continuing...", count); 13996 13997 fctl_free_ns_cmd(ns_cmd); 13998 13999 mutex_enter(&port->fp_mutex); 14000 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14001 --port->fp_rscn_count; 14002 } 14003 mutex_exit(&port->fp_mutex); 14004 14005 return; 14006 } 14007 14008 /* 14009 * Attempt to validate or invalidate the devices that were 14010 * already in the pwwn hash table. 14011 */ 14012 14013 mutex_enter(&port->fp_mutex); 14014 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 14015 head = &port->fp_pwwn_table[index]; 14016 npd = head->pwwn_head; 14017 14018 while ((pd = npd) != NULL) { 14019 npd = pd->pd_wwn_hnext; 14020 14021 mutex_enter(&pd->pd_mutex); 14022 if ((pd->pd_port_id.port_id & mask) == id && 14023 pd->pd_flags == PD_ELS_MARK) { 14024 la_wwn_t *pwwn; 14025 14026 job->job_result = FC_SUCCESS; 14027 14028 ((ns_req_gid_pn_t *) 14029 (ns_cmd->ns_cmd_buf))->pwwn = 14030 pd->pd_port_name; 14031 14032 pwwn = &pd->pd_port_name; 14033 d_id = pd->pd_port_id.port_id; 14034 14035 mutex_exit(&pd->pd_mutex); 14036 mutex_exit(&port->fp_mutex); 14037 14038 rval = fp_ns_query(port, ns_cmd, job, 1, 14039 sleep); 14040 if (rval != FC_SUCCESS) { 14041 fc_wwn_to_str(pwwn, ww_name); 14042 14043 FP_TRACE(FP_NHEAD1(3, 0), 14044 "AREA RSCN: PD disappeared; " 14045 "d_id=%x, PWWN=%s", d_id, ww_name); 14046 14047 FP_TRACE(FP_NHEAD2(9, 0), 14048 "N_x Port with D_ID=%x," 14049 " PWWN=%s disappeared from fabric", 14050 d_id, ww_name); 14051 14052 fp_fillout_old_map(list + listindex++, 14053 pd, 1); 14054 } else { 14055 fctl_copy_portmap(list + listindex++, 14056 pd); 14057 14058 mutex_enter(&pd->pd_mutex); 14059 pd->pd_flags = PD_ELS_IN_PROGRESS; 14060 mutex_exit(&pd->pd_mutex); 14061 } 14062 14063 mutex_enter(&port->fp_mutex); 14064 } else { 14065 mutex_exit(&pd->pd_mutex); 14066 } 14067 } 14068 } 14069 14070 mutex_exit(&port->fp_mutex); 14071 14072 ASSERT(listindex == dcnt); 14073 14074 job->job_counter = listindex; 14075 job_flags = job->job_flags; 14076 job->job_flags |= JOB_TYPE_FP_ASYNC; 14077 14078 /* 14079 * Login (if we were the initiator) or validate devices in the 14080 * port map. 14081 */ 14082 14083 for (index = 0; index < listindex; index++) { 14084 pd = list[index].map_pd; 14085 14086 mutex_enter(&pd->pd_mutex); 14087 ASSERT((pd->pd_port_id.port_id & mask) == id); 14088 14089 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14090 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 14091 mutex_exit(&pd->pd_mutex); 14092 fp_jobdone(job); 14093 continue; 14094 } 14095 14096 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 14097 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 14098 d_id = pd->pd_port_id.port_id; 14099 mutex_exit(&pd->pd_mutex); 14100 14101 if ((d_id & mask) == id && send) { 14102 if (login) { 14103 FP_TRACE(FP_NHEAD1(6, 0), 14104 "RSCN and PLOGI request;" 14105 " pd=%p, job=%p d_id=%x, index=%d", pd, 14106 job, d_id, index); 14107 14108 rval = fp_port_login(port, d_id, job, 14109 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 14110 if (rval != FC_SUCCESS) { 14111 mutex_enter(&pd->pd_mutex); 14112 pd->pd_flags = PD_IDLE; 14113 mutex_exit(&pd->pd_mutex); 14114 14115 job->job_result = rval; 14116 fp_jobdone(job); 14117 } 14118 14119 FP_TRACE(FP_NHEAD2(4, 0), 14120 "PLOGI succeeded:no skip(1) for " 14121 "D_ID %x", d_id); 14122 list[index].map_flags |= 14123 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14124 } else { 14125 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 14126 " pd=%p, job=%p d_id=%x, index=%d", pd, 14127 job, d_id, index); 14128 14129 rval = fp_ns_validate_device(port, pd, job, 14130 0, sleep); 14131 if (rval != FC_SUCCESS) { 14132 fp_jobdone(job); 14133 } 14134 mutex_enter(&pd->pd_mutex); 14135 pd->pd_flags = PD_IDLE; 14136 mutex_exit(&pd->pd_mutex); 14137 } 14138 } else { 14139 FP_TRACE(FP_NHEAD1(6, 0), 14140 "RSCN and NO request sent; pd=%p," 14141 " d_id=%x, index=%d", pd, d_id, index); 14142 14143 mutex_enter(&pd->pd_mutex); 14144 pd->pd_flags = PD_IDLE; 14145 mutex_exit(&pd->pd_mutex); 14146 14147 fp_jobdone(job); 14148 } 14149 } 14150 14151 if (listindex) { 14152 fctl_jobwait(job); 14153 } 14154 job->job_flags = job_flags; 14155 14156 /* 14157 * Orphan list validation. 14158 */ 14159 mutex_enter(&port->fp_mutex); 14160 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14161 orp != NULL; orp = norp) { 14162 norp = orp->orp_next; 14163 mutex_exit(&port->fp_mutex); 14164 14165 job->job_counter = 1; 14166 job->job_result = FC_SUCCESS; 14167 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14168 14169 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14170 14171 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14172 ((ns_resp_gid_pn_t *) 14173 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14174 14175 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14176 if (rval == FC_SUCCESS) { 14177 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14178 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14179 if (pd != NULL) { 14180 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14181 14182 FP_TRACE(FP_NHEAD1(6, 0), 14183 "RSCN and ORPHAN list " 14184 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14185 14186 FP_TRACE(FP_NHEAD2(6, 0), 14187 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14188 " in fabric", d_id, ww_name); 14189 14190 mutex_enter(&port->fp_mutex); 14191 if (prev) { 14192 prev->orp_next = orp->orp_next; 14193 } else { 14194 ASSERT(orp == port->fp_orphan_list); 14195 port->fp_orphan_list = orp->orp_next; 14196 } 14197 port->fp_orphan_count--; 14198 mutex_exit(&port->fp_mutex); 14199 14200 kmem_free(orp, sizeof (*orp)); 14201 fctl_copy_portmap(list + listindex++, pd); 14202 } else { 14203 prev = orp; 14204 } 14205 } else { 14206 prev = orp; 14207 } 14208 mutex_enter(&port->fp_mutex); 14209 } 14210 mutex_exit(&port->fp_mutex); 14211 14212 /* 14213 * One more pass through the list to delist old devices from 14214 * the d_id and pwwn tables and possibly add to the orphan list. 14215 */ 14216 14217 for (index = 0; index < listindex; index++) { 14218 pd = list[index].map_pd; 14219 ASSERT(pd != NULL); 14220 14221 /* 14222 * Update PLOGI results; For NS validation 14223 * of orphan list, it is redundant 14224 * 14225 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14226 * appropriate as fctl_copy_portmap() will clear map_flags. 14227 */ 14228 if (list[index].map_flags & 14229 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14230 fctl_copy_portmap(list + index, pd); 14231 list[index].map_flags |= 14232 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14233 } else { 14234 fctl_copy_portmap(list + index, pd); 14235 } 14236 14237 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14238 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14239 pd, pd->pd_port_id.port_id, 14240 pd->pd_port_name.raw_wwn[0], 14241 pd->pd_port_name.raw_wwn[1], 14242 pd->pd_port_name.raw_wwn[2], 14243 pd->pd_port_name.raw_wwn[3], 14244 pd->pd_port_name.raw_wwn[4], 14245 pd->pd_port_name.raw_wwn[5], 14246 pd->pd_port_name.raw_wwn[6], 14247 pd->pd_port_name.raw_wwn[7]); 14248 14249 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14250 "results continued, pd=%p type=%x, flags=%x, state=%x", 14251 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14252 14253 mutex_enter(&pd->pd_mutex); 14254 if (pd->pd_type == PORT_DEVICE_OLD) { 14255 int initiator; 14256 14257 pd->pd_flags = PD_IDLE; 14258 initiator = (pd->pd_recepient == 14259 PD_PLOGI_INITIATOR) ? 1 : 0; 14260 14261 mutex_exit(&pd->pd_mutex); 14262 14263 mutex_enter(&port->fp_mutex); 14264 mutex_enter(&pd->pd_mutex); 14265 14266 pd->pd_state = PORT_DEVICE_INVALID; 14267 fctl_delist_did_table(port, pd); 14268 fctl_delist_pwwn_table(port, pd); 14269 14270 mutex_exit(&pd->pd_mutex); 14271 mutex_exit(&port->fp_mutex); 14272 14273 if (initiator) { 14274 (void) fctl_add_orphan(port, pd, sleep); 14275 } 14276 list[index].map_pd = pd; 14277 } else { 14278 ASSERT(pd->pd_flags == PD_IDLE); 14279 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14280 /* 14281 * Reset LOGO tolerance to zero 14282 */ 14283 fctl_tc_reset(&pd->pd_logo_tc); 14284 } 14285 mutex_exit(&pd->pd_mutex); 14286 } 14287 } 14288 14289 if (ns_cmd) { 14290 fctl_free_ns_cmd(ns_cmd); 14291 } 14292 if (listindex) { 14293 (void) fp_ulp_devc_cb(port, list, listindex, count, 14294 sleep, 0); 14295 } else { 14296 kmem_free(list, sizeof (*list) * count); 14297 14298 mutex_enter(&port->fp_mutex); 14299 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14300 --port->fp_rscn_count; 14301 } 14302 mutex_exit(&port->fp_mutex); 14303 } 14304 } 14305 14306 14307 /* 14308 * Work hard to make sense out of an RSCN page. 14309 */ 14310 static void 14311 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14312 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14313 int *listindex, int sleep) 14314 { 14315 int rval; 14316 char ww_name[17]; 14317 la_wwn_t *pwwn; 14318 fc_remote_port_t *pwwn_pd; 14319 fc_remote_port_t *did_pd; 14320 14321 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14322 14323 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14324 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14325 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14326 14327 if (did_pd != NULL) { 14328 mutex_enter(&did_pd->pd_mutex); 14329 if (did_pd->pd_flags != PD_IDLE) { 14330 mutex_exit(&did_pd->pd_mutex); 14331 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14332 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14333 port, page->aff_d_id, did_pd); 14334 return; 14335 } 14336 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14337 mutex_exit(&did_pd->pd_mutex); 14338 } 14339 14340 job->job_counter = 1; 14341 14342 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14343 14344 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14345 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14346 14347 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14348 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14349 14350 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14351 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14352 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14353 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14354 ns_cmd->ns_resp_hdr.ct_expln); 14355 14356 job->job_counter = 1; 14357 14358 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14359 /* 14360 * What this means is that the D_ID 14361 * disappeared from the Fabric. 14362 */ 14363 if (did_pd == NULL) { 14364 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14365 " NULL PD disappeared, rval=%x", rval); 14366 return; 14367 } 14368 14369 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14370 14371 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14372 (uint32_t)(uintptr_t)job->job_cb_arg; 14373 14374 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14375 14376 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14377 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14378 14379 FP_TRACE(FP_NHEAD2(9, 0), 14380 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14381 14382 FP_TRACE(FP_NHEAD2(9, 0), 14383 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14384 " fabric", page->aff_d_id, ww_name); 14385 14386 mutex_enter(&did_pd->pd_mutex); 14387 did_pd->pd_flags = PD_IDLE; 14388 mutex_exit(&did_pd->pd_mutex); 14389 14390 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14391 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14392 14393 return; 14394 } 14395 14396 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14397 14398 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14399 /* 14400 * There is no change. Do PLOGI again and add it to 14401 * ULP portmap baggage and return. Note: When RSCNs 14402 * arrive with per page states, the need for PLOGI 14403 * can be determined correctly. 14404 */ 14405 mutex_enter(&pwwn_pd->pd_mutex); 14406 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14407 mutex_exit(&pwwn_pd->pd_mutex); 14408 14409 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14410 (uint32_t)(uintptr_t)job->job_cb_arg; 14411 14412 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14413 14414 mutex_enter(&pwwn_pd->pd_mutex); 14415 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14416 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14417 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14418 mutex_exit(&pwwn_pd->pd_mutex); 14419 14420 rval = fp_port_login(port, page->aff_d_id, job, 14421 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14422 if (rval == FC_SUCCESS) { 14423 fp_jobwait(job); 14424 rval = job->job_result; 14425 14426 /* 14427 * Reset LOGO tolerance to zero 14428 * Also we are the PLOGI initiator now. 14429 */ 14430 mutex_enter(&pwwn_pd->pd_mutex); 14431 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14432 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14433 mutex_exit(&pwwn_pd->pd_mutex); 14434 } 14435 14436 if (rval == FC_SUCCESS) { 14437 struct fc_portmap *map = 14438 listptr + *listindex - 1; 14439 14440 FP_TRACE(FP_NHEAD2(4, 0), 14441 "PLOGI succeeded: no skip(2)" 14442 " for D_ID %x", page->aff_d_id); 14443 map->map_flags |= 14444 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14445 } else { 14446 FP_TRACE(FP_NHEAD2(9, rval), 14447 "PLOGI to D_ID=%x failed", page->aff_d_id); 14448 14449 FP_TRACE(FP_NHEAD2(9, 0), 14450 "N_x Port with D_ID=%x, PWWN=%s" 14451 " disappeared from fabric", 14452 page->aff_d_id, ww_name); 14453 14454 fp_fillout_old_map(listptr + 14455 *listindex - 1, pwwn_pd, 0); 14456 } 14457 } else { 14458 mutex_exit(&pwwn_pd->pd_mutex); 14459 } 14460 14461 mutex_enter(&did_pd->pd_mutex); 14462 did_pd->pd_flags = PD_IDLE; 14463 mutex_exit(&did_pd->pd_mutex); 14464 14465 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14466 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14467 job->job_result, pwwn_pd); 14468 14469 return; 14470 } 14471 14472 if (did_pd == NULL && pwwn_pd == NULL) { 14473 14474 fc_orphan_t *orp = NULL; 14475 fc_orphan_t *norp = NULL; 14476 fc_orphan_t *prev = NULL; 14477 14478 /* 14479 * Hunt down the orphan list before giving up. 14480 */ 14481 14482 mutex_enter(&port->fp_mutex); 14483 if (port->fp_orphan_count) { 14484 14485 for (orp = port->fp_orphan_list; orp; orp = norp) { 14486 norp = orp->orp_next; 14487 14488 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14489 prev = orp; 14490 continue; 14491 } 14492 14493 if (prev) { 14494 prev->orp_next = orp->orp_next; 14495 } else { 14496 ASSERT(orp == 14497 port->fp_orphan_list); 14498 port->fp_orphan_list = 14499 orp->orp_next; 14500 } 14501 port->fp_orphan_count--; 14502 break; 14503 } 14504 } 14505 14506 mutex_exit(&port->fp_mutex); 14507 pwwn_pd = fp_create_remote_port_by_ns(port, 14508 page->aff_d_id, sleep); 14509 14510 if (pwwn_pd != NULL) { 14511 14512 if (orp) { 14513 fc_wwn_to_str(&orp->orp_pwwn, 14514 ww_name); 14515 14516 FP_TRACE(FP_NHEAD2(9, 0), 14517 "N_x Port with D_ID=%x," 14518 " PWWN=%s reappeared in fabric", 14519 page->aff_d_id, ww_name); 14520 14521 kmem_free(orp, sizeof (*orp)); 14522 } 14523 14524 (listptr + *listindex)-> 14525 map_rscn_info.ulp_rscn_count = 14526 (uint32_t)(uintptr_t)job->job_cb_arg; 14527 14528 fctl_copy_portmap(listptr + 14529 (*listindex)++, pwwn_pd); 14530 } 14531 14532 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14533 "Case TWO", page->aff_d_id); 14534 14535 return; 14536 } 14537 14538 if (pwwn_pd != NULL && did_pd == NULL) { 14539 uint32_t old_d_id; 14540 uint32_t d_id = page->aff_d_id; 14541 14542 /* 14543 * What this means is there is a new D_ID for this 14544 * Port WWN. Take out the port device off D_ID 14545 * list and put it back with a new D_ID. Perform 14546 * PLOGI if already logged in. 14547 */ 14548 mutex_enter(&port->fp_mutex); 14549 mutex_enter(&pwwn_pd->pd_mutex); 14550 14551 old_d_id = pwwn_pd->pd_port_id.port_id; 14552 14553 fctl_delist_did_table(port, pwwn_pd); 14554 14555 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14556 (uint32_t)(uintptr_t)job->job_cb_arg; 14557 14558 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14559 &d_id, NULL); 14560 fctl_enlist_did_table(port, pwwn_pd); 14561 14562 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14563 " Case THREE, pd=%p," 14564 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14565 14566 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14567 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14568 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14569 14570 mutex_exit(&pwwn_pd->pd_mutex); 14571 mutex_exit(&port->fp_mutex); 14572 14573 FP_TRACE(FP_NHEAD2(9, 0), 14574 "N_x Port with D_ID=%x, PWWN=%s has a new" 14575 " D_ID=%x now", old_d_id, ww_name, d_id); 14576 14577 rval = fp_port_login(port, page->aff_d_id, job, 14578 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14579 if (rval == FC_SUCCESS) { 14580 fp_jobwait(job); 14581 rval = job->job_result; 14582 } 14583 14584 if (rval != FC_SUCCESS) { 14585 fp_fillout_old_map(listptr + 14586 *listindex - 1, pwwn_pd, 0); 14587 } 14588 } else { 14589 mutex_exit(&pwwn_pd->pd_mutex); 14590 mutex_exit(&port->fp_mutex); 14591 } 14592 14593 return; 14594 } 14595 14596 if (pwwn_pd == NULL && did_pd != NULL) { 14597 fc_portmap_t *ptr; 14598 uint32_t len = 1; 14599 char old_ww_name[17]; 14600 14601 mutex_enter(&did_pd->pd_mutex); 14602 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14603 mutex_exit(&did_pd->pd_mutex); 14604 14605 fc_wwn_to_str(pwwn, ww_name); 14606 14607 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14608 (uint32_t)(uintptr_t)job->job_cb_arg; 14609 14610 /* 14611 * What this means is that there is a new Port WWN for 14612 * this D_ID; Mark the Port device as old and provide 14613 * the new PWWN and D_ID combination as new. 14614 */ 14615 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14616 14617 FP_TRACE(FP_NHEAD2(9, 0), 14618 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14619 page->aff_d_id, old_ww_name, ww_name); 14620 14621 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14622 (uint32_t)(uintptr_t)job->job_cb_arg; 14623 14624 ptr = listptr + (*listindex)++; 14625 14626 job->job_counter = 1; 14627 14628 if (fp_ns_getmap(port, job, &ptr, &len, 14629 page->aff_d_id - 1) != FC_SUCCESS) { 14630 (*listindex)--; 14631 } 14632 14633 mutex_enter(&did_pd->pd_mutex); 14634 did_pd->pd_flags = PD_IDLE; 14635 mutex_exit(&did_pd->pd_mutex); 14636 14637 return; 14638 } 14639 14640 /* 14641 * A weird case of Port WWN and D_ID existence but not matching up 14642 * between them. Trust your instincts - Take the port device handle 14643 * off Port WWN list, fix it with new Port WWN and put it back, In 14644 * the mean time mark the port device corresponding to the old port 14645 * WWN as OLD. 14646 */ 14647 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14648 " did_pd=%p", pwwn_pd, did_pd); 14649 14650 mutex_enter(&port->fp_mutex); 14651 mutex_enter(&pwwn_pd->pd_mutex); 14652 14653 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14654 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14655 fctl_delist_did_table(port, pwwn_pd); 14656 fctl_delist_pwwn_table(port, pwwn_pd); 14657 14658 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14659 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14660 pwwn_pd->pd_port_id.port_id, 14661 14662 pwwn_pd->pd_port_name.raw_wwn[0], 14663 pwwn_pd->pd_port_name.raw_wwn[1], 14664 pwwn_pd->pd_port_name.raw_wwn[2], 14665 pwwn_pd->pd_port_name.raw_wwn[3], 14666 pwwn_pd->pd_port_name.raw_wwn[4], 14667 pwwn_pd->pd_port_name.raw_wwn[5], 14668 pwwn_pd->pd_port_name.raw_wwn[6], 14669 pwwn_pd->pd_port_name.raw_wwn[7]); 14670 14671 mutex_exit(&pwwn_pd->pd_mutex); 14672 mutex_exit(&port->fp_mutex); 14673 14674 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14675 (uint32_t)(uintptr_t)job->job_cb_arg; 14676 14677 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14678 14679 mutex_enter(&port->fp_mutex); 14680 mutex_enter(&did_pd->pd_mutex); 14681 14682 fctl_delist_pwwn_table(port, did_pd); 14683 14684 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14685 (uint32_t)(uintptr_t)job->job_cb_arg; 14686 14687 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14688 fctl_enlist_pwwn_table(port, did_pd); 14689 14690 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14691 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14692 did_pd->pd_port_id.port_id, did_pd->pd_state, 14693 14694 did_pd->pd_port_name.raw_wwn[0], 14695 did_pd->pd_port_name.raw_wwn[1], 14696 did_pd->pd_port_name.raw_wwn[2], 14697 did_pd->pd_port_name.raw_wwn[3], 14698 did_pd->pd_port_name.raw_wwn[4], 14699 did_pd->pd_port_name.raw_wwn[5], 14700 did_pd->pd_port_name.raw_wwn[6], 14701 did_pd->pd_port_name.raw_wwn[7]); 14702 14703 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14704 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14705 mutex_exit(&did_pd->pd_mutex); 14706 mutex_exit(&port->fp_mutex); 14707 14708 rval = fp_port_login(port, page->aff_d_id, job, 14709 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14710 if (rval == FC_SUCCESS) { 14711 fp_jobwait(job); 14712 if (job->job_result != FC_SUCCESS) { 14713 fp_fillout_old_map(listptr + 14714 *listindex - 1, did_pd, 0); 14715 } 14716 } else { 14717 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14718 } 14719 } else { 14720 mutex_exit(&did_pd->pd_mutex); 14721 mutex_exit(&port->fp_mutex); 14722 } 14723 14724 mutex_enter(&did_pd->pd_mutex); 14725 did_pd->pd_flags = PD_IDLE; 14726 mutex_exit(&did_pd->pd_mutex); 14727 } 14728 14729 14730 /* 14731 * Check with NS for the presence of this port WWN 14732 */ 14733 static int 14734 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14735 job_request_t *job, int polled, int sleep) 14736 { 14737 la_wwn_t pwwn; 14738 uint32_t flags; 14739 fctl_ns_req_t *ns_cmd; 14740 14741 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14742 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14743 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14744 flags, sleep); 14745 if (ns_cmd == NULL) { 14746 return (FC_NOMEM); 14747 } 14748 14749 mutex_enter(&pd->pd_mutex); 14750 pwwn = pd->pd_port_name; 14751 mutex_exit(&pd->pd_mutex); 14752 14753 ns_cmd->ns_cmd_code = NS_GID_PN; 14754 ns_cmd->ns_pd = pd; 14755 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14756 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14757 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14758 14759 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14760 } 14761 14762 14763 /* 14764 * Sanity check the LILP map returned by FCA 14765 */ 14766 static int 14767 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14768 { 14769 int count; 14770 14771 if (lilp_map->lilp_length == 0) { 14772 return (FC_FAILURE); 14773 } 14774 14775 for (count = 0; count < lilp_map->lilp_length; count++) { 14776 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14777 FC_SUCCESS) { 14778 return (FC_FAILURE); 14779 } 14780 } 14781 14782 return (FC_SUCCESS); 14783 } 14784 14785 14786 /* 14787 * Sanity check if the AL_PA is a valid address 14788 */ 14789 static int 14790 fp_is_valid_alpa(uchar_t al_pa) 14791 { 14792 int count; 14793 14794 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14795 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14796 return (FC_SUCCESS); 14797 } 14798 } 14799 14800 return (FC_FAILURE); 14801 } 14802 14803 14804 /* 14805 * Post unsolicited callbacks to ULPs 14806 */ 14807 static void 14808 fp_ulp_unsol_cb(void *arg) 14809 { 14810 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14811 14812 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14813 ub_spec->buf->ub_frame.type); 14814 kmem_free(ub_spec, sizeof (*ub_spec)); 14815 } 14816 14817 14818 /* 14819 * Perform message reporting in a consistent manner. Unless there is 14820 * a strong reason NOT to use this function (which is very very rare) 14821 * all message reporting should go through this. 14822 */ 14823 static void 14824 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14825 fc_packet_t *pkt, const char *fmt, ...) 14826 { 14827 caddr_t buf; 14828 va_list ap; 14829 14830 switch (level) { 14831 case CE_NOTE: 14832 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14833 return; 14834 } 14835 break; 14836 14837 case CE_WARN: 14838 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14839 return; 14840 } 14841 break; 14842 } 14843 14844 buf = kmem_zalloc(256, KM_NOSLEEP); 14845 if (buf == NULL) { 14846 return; 14847 } 14848 14849 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14850 14851 va_start(ap, fmt); 14852 (void) vsprintf(buf + strlen(buf), fmt, ap); 14853 va_end(ap); 14854 14855 if (fc_errno) { 14856 char *errmsg; 14857 14858 (void) fc_ulp_error(fc_errno, &errmsg); 14859 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 14860 } else { 14861 if (pkt) { 14862 caddr_t state, reason, action, expln; 14863 14864 (void) fc_ulp_pkt_error(pkt, &state, &reason, 14865 &action, &expln); 14866 14867 (void) sprintf(buf + strlen(buf), 14868 " state=%s, reason=%s", state, reason); 14869 14870 if (pkt->pkt_resp_resid) { 14871 (void) sprintf(buf + strlen(buf), 14872 " resp resid=%x\n", pkt->pkt_resp_resid); 14873 } 14874 } 14875 } 14876 14877 switch (dest) { 14878 case FP_CONSOLE_ONLY: 14879 cmn_err(level, "^%s", buf); 14880 break; 14881 14882 case FP_LOG_ONLY: 14883 cmn_err(level, "!%s", buf); 14884 break; 14885 14886 default: 14887 cmn_err(level, "%s", buf); 14888 break; 14889 } 14890 14891 kmem_free(buf, 256); 14892 } 14893 14894 static int 14895 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 14896 { 14897 int ret; 14898 uint32_t d_id; 14899 la_wwn_t pwwn; 14900 fc_remote_port_t *pd = NULL; 14901 fc_remote_port_t *held_pd = NULL; 14902 fctl_ns_req_t *ns_cmd; 14903 fc_portmap_t *changelist; 14904 14905 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 14906 14907 mutex_enter(&port->fp_mutex); 14908 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14909 mutex_exit(&port->fp_mutex); 14910 job->job_counter = 1; 14911 14912 job->job_result = FC_SUCCESS; 14913 14914 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14915 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14916 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 14917 14918 ASSERT(ns_cmd != NULL); 14919 14920 ns_cmd->ns_cmd_code = NS_GID_PN; 14921 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 14922 14923 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14924 14925 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 14926 if (ret != FC_SUCCESS) { 14927 fcio->fcio_errno = ret; 14928 } else { 14929 fcio->fcio_errno = job->job_result; 14930 } 14931 fctl_free_ns_cmd(ns_cmd); 14932 return (EIO); 14933 } 14934 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14935 fctl_free_ns_cmd(ns_cmd); 14936 } else { 14937 mutex_exit(&port->fp_mutex); 14938 14939 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14940 if (held_pd == NULL) { 14941 fcio->fcio_errno = FC_BADWWN; 14942 return (EIO); 14943 } 14944 pd = held_pd; 14945 14946 mutex_enter(&pd->pd_mutex); 14947 d_id = pd->pd_port_id.port_id; 14948 mutex_exit(&pd->pd_mutex); 14949 } 14950 14951 job->job_counter = 1; 14952 14953 pd = fctl_get_remote_port_by_did(port, d_id); 14954 14955 if (pd) { 14956 mutex_enter(&pd->pd_mutex); 14957 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14958 pd->pd_login_count++; 14959 mutex_exit(&pd->pd_mutex); 14960 14961 fcio->fcio_errno = FC_SUCCESS; 14962 if (held_pd) { 14963 fctl_release_remote_port(held_pd); 14964 } 14965 14966 return (0); 14967 } 14968 mutex_exit(&pd->pd_mutex); 14969 } else { 14970 mutex_enter(&port->fp_mutex); 14971 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14972 mutex_exit(&port->fp_mutex); 14973 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14974 if (pd == NULL) { 14975 fcio->fcio_errno = FC_FAILURE; 14976 if (held_pd) { 14977 fctl_release_remote_port(held_pd); 14978 } 14979 return (EIO); 14980 } 14981 } else { 14982 mutex_exit(&port->fp_mutex); 14983 } 14984 } 14985 14986 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 14987 job->job_counter = 1; 14988 14989 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 14990 KM_SLEEP, pd, NULL); 14991 14992 if (ret != FC_SUCCESS) { 14993 fcio->fcio_errno = ret; 14994 if (held_pd) { 14995 fctl_release_remote_port(held_pd); 14996 } 14997 return (EIO); 14998 } 14999 fp_jobwait(job); 15000 15001 fcio->fcio_errno = job->job_result; 15002 15003 if (held_pd) { 15004 fctl_release_remote_port(held_pd); 15005 } 15006 15007 if (job->job_result != FC_SUCCESS) { 15008 return (EIO); 15009 } 15010 15011 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15012 if (pd == NULL) { 15013 fcio->fcio_errno = FC_BADDEV; 15014 return (ENODEV); 15015 } 15016 15017 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15018 15019 fctl_copy_portmap(changelist, pd); 15020 changelist->map_type = PORT_DEVICE_USER_LOGIN; 15021 15022 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15023 15024 mutex_enter(&pd->pd_mutex); 15025 pd->pd_type = PORT_DEVICE_NOCHANGE; 15026 mutex_exit(&pd->pd_mutex); 15027 15028 fctl_release_remote_port(pd); 15029 15030 return (0); 15031 } 15032 15033 15034 static int 15035 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15036 { 15037 la_wwn_t pwwn; 15038 fp_cmd_t *cmd; 15039 fc_portmap_t *changelist; 15040 fc_remote_port_t *pd; 15041 15042 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15043 15044 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15045 if (pd == NULL) { 15046 fcio->fcio_errno = FC_BADWWN; 15047 return (ENXIO); 15048 } 15049 15050 mutex_enter(&pd->pd_mutex); 15051 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 15052 fcio->fcio_errno = FC_LOGINREQ; 15053 mutex_exit(&pd->pd_mutex); 15054 15055 fctl_release_remote_port(pd); 15056 15057 return (EINVAL); 15058 } 15059 15060 ASSERT(pd->pd_login_count >= 1); 15061 15062 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 15063 fcio->fcio_errno = FC_FAILURE; 15064 mutex_exit(&pd->pd_mutex); 15065 15066 fctl_release_remote_port(pd); 15067 15068 return (EBUSY); 15069 } 15070 15071 if (pd->pd_login_count > 1) { 15072 pd->pd_login_count--; 15073 fcio->fcio_errno = FC_SUCCESS; 15074 mutex_exit(&pd->pd_mutex); 15075 15076 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15077 15078 fctl_copy_portmap(changelist, pd); 15079 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15080 15081 fctl_release_remote_port(pd); 15082 15083 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15084 15085 return (0); 15086 } 15087 15088 pd->pd_flags = PD_ELS_IN_PROGRESS; 15089 mutex_exit(&pd->pd_mutex); 15090 15091 job->job_counter = 1; 15092 15093 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 15094 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 15095 if (cmd == NULL) { 15096 fcio->fcio_errno = FC_NOMEM; 15097 fctl_release_remote_port(pd); 15098 15099 mutex_enter(&pd->pd_mutex); 15100 pd->pd_flags = PD_IDLE; 15101 mutex_exit(&pd->pd_mutex); 15102 15103 return (ENOMEM); 15104 } 15105 15106 mutex_enter(&port->fp_mutex); 15107 mutex_enter(&pd->pd_mutex); 15108 15109 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 15110 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 15111 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 15112 cmd->cmd_retry_count = 1; 15113 cmd->cmd_ulp_pkt = NULL; 15114 15115 fp_logo_init(pd, cmd, job); 15116 15117 mutex_exit(&pd->pd_mutex); 15118 mutex_exit(&port->fp_mutex); 15119 15120 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 15121 mutex_enter(&pd->pd_mutex); 15122 pd->pd_flags = PD_IDLE; 15123 mutex_exit(&pd->pd_mutex); 15124 15125 fp_free_pkt(cmd); 15126 fctl_release_remote_port(pd); 15127 15128 return (EIO); 15129 } 15130 15131 fp_jobwait(job); 15132 15133 fcio->fcio_errno = job->job_result; 15134 if (job->job_result != FC_SUCCESS) { 15135 mutex_enter(&pd->pd_mutex); 15136 pd->pd_flags = PD_IDLE; 15137 mutex_exit(&pd->pd_mutex); 15138 15139 fctl_release_remote_port(pd); 15140 15141 return (EIO); 15142 } 15143 15144 ASSERT(pd != NULL); 15145 15146 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15147 15148 fctl_copy_portmap(changelist, pd); 15149 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15150 changelist->map_state = PORT_DEVICE_INVALID; 15151 15152 mutex_enter(&port->fp_mutex); 15153 mutex_enter(&pd->pd_mutex); 15154 15155 fctl_delist_did_table(port, pd); 15156 fctl_delist_pwwn_table(port, pd); 15157 pd->pd_flags = PD_IDLE; 15158 15159 mutex_exit(&pd->pd_mutex); 15160 mutex_exit(&port->fp_mutex); 15161 15162 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15163 15164 fctl_release_remote_port(pd); 15165 15166 return (0); 15167 } 15168 15169 15170 15171 /* 15172 * Send a syslog event for adapter port level events. 15173 */ 15174 static void 15175 fp_log_port_event(fc_local_port_t *port, char *subclass) 15176 { 15177 nvlist_t *attr_list; 15178 15179 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15180 KM_SLEEP) != DDI_SUCCESS) { 15181 goto alloc_failed; 15182 } 15183 15184 if (nvlist_add_uint32(attr_list, "instance", 15185 port->fp_instance) != DDI_SUCCESS) { 15186 goto error; 15187 } 15188 15189 if (nvlist_add_byte_array(attr_list, "port-wwn", 15190 port->fp_service_params.nport_ww_name.raw_wwn, 15191 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15192 goto error; 15193 } 15194 15195 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15196 subclass, attr_list, NULL, DDI_SLEEP); 15197 15198 nvlist_free(attr_list); 15199 return; 15200 15201 error: 15202 nvlist_free(attr_list); 15203 alloc_failed: 15204 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15205 } 15206 15207 15208 static void 15209 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15210 uint32_t port_id) 15211 { 15212 nvlist_t *attr_list; 15213 15214 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15215 KM_SLEEP) != DDI_SUCCESS) { 15216 goto alloc_failed; 15217 } 15218 15219 if (nvlist_add_uint32(attr_list, "instance", 15220 port->fp_instance) != DDI_SUCCESS) { 15221 goto error; 15222 } 15223 15224 if (nvlist_add_byte_array(attr_list, "port-wwn", 15225 port->fp_service_params.nport_ww_name.raw_wwn, 15226 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15227 goto error; 15228 } 15229 15230 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15231 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15232 goto error; 15233 } 15234 15235 if (nvlist_add_uint32(attr_list, "target-port-id", 15236 port_id) != DDI_SUCCESS) { 15237 goto error; 15238 } 15239 15240 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15241 subclass, attr_list, NULL, DDI_SLEEP); 15242 15243 nvlist_free(attr_list); 15244 return; 15245 15246 error: 15247 nvlist_free(attr_list); 15248 alloc_failed: 15249 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15250 } 15251 15252 static uint32_t 15253 fp_map_remote_port_state(uint32_t rm_state) 15254 { 15255 switch (rm_state) { 15256 case PORT_DEVICE_LOGGED_IN: 15257 return (FC_HBA_PORTSTATE_ONLINE); 15258 case PORT_DEVICE_VALID: 15259 case PORT_DEVICE_INVALID: 15260 default: 15261 return (FC_HBA_PORTSTATE_UNKNOWN); 15262 } 15263 } 15264