1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 26 * 27 */ 28 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/param.h> 32 #include <sys/errno.h> 33 #include <sys/uio.h> 34 #include <sys/buf.h> 35 #include <sys/modctl.h> 36 #include <sys/open.h> 37 #include <sys/file.h> 38 #include <sys/kmem.h> 39 #include <sys/poll.h> 40 #include <sys/conf.h> 41 #include <sys/thread.h> 42 #include <sys/var.h> 43 #include <sys/cmn_err.h> 44 #include <sys/stat.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/promif.h> 48 #include <sys/nvpair.h> 49 #include <sys/byteorder.h> 50 #include <sys/scsi/scsi.h> 51 #include <sys/fibre-channel/fc.h> 52 #include <sys/fibre-channel/impl/fc_ulpif.h> 53 #include <sys/fibre-channel/impl/fc_fcaif.h> 54 #include <sys/fibre-channel/impl/fctl_private.h> 55 #include <sys/fibre-channel/impl/fc_portif.h> 56 #include <sys/fibre-channel/impl/fp.h> 57 58 /* These are defined in fctl.c! */ 59 extern int did_table_size; 60 extern int pwwn_table_size; 61 62 static struct cb_ops fp_cb_ops = { 63 fp_open, /* open */ 64 fp_close, /* close */ 65 nodev, /* strategy */ 66 nodev, /* print */ 67 nodev, /* dump */ 68 nodev, /* read */ 69 nodev, /* write */ 70 fp_ioctl, /* ioctl */ 71 nodev, /* devmap */ 72 nodev, /* mmap */ 73 nodev, /* segmap */ 74 nochpoll, /* chpoll */ 75 ddi_prop_op, /* cb_prop_op */ 76 0, /* streamtab */ 77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 78 CB_REV, /* rev */ 79 nodev, /* aread */ 80 nodev /* awrite */ 81 }; 82 83 static struct dev_ops fp_ops = { 84 DEVO_REV, /* build revision */ 85 0, /* reference count */ 86 fp_getinfo, /* getinfo */ 87 nulldev, /* identify - Obsoleted */ 88 nulldev, /* probe */ 89 fp_attach, /* attach */ 90 fp_detach, /* detach */ 91 nodev, /* reset */ 92 &fp_cb_ops, /* cb_ops */ 93 NULL, /* bus_ops */ 94 fp_power /* power */ 95 }; 96 97 #define FP_VERSION "1.98" 98 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 99 100 char *fp_version = FP_NAME_VERSION; 101 102 static struct modldrv modldrv = { 103 &mod_driverops, /* Type of Module */ 104 FP_NAME_VERSION, /* Name/Version of fp */ 105 &fp_ops /* driver ops */ 106 }; 107 108 static struct modlinkage modlinkage = { 109 MODREV_1, /* Rev of the loadable modules system */ 110 &modldrv, /* NULL terminated list of */ 111 NULL /* Linkage structures */ 112 }; 113 114 115 116 static uint16_t ns_reg_cmds[] = { 117 NS_RPN_ID, 118 NS_RNN_ID, 119 NS_RCS_ID, 120 NS_RFT_ID, 121 NS_RPT_ID, 122 NS_RSPN_ID, 123 NS_RSNN_NN 124 }; 125 126 struct fp_xlat { 127 uchar_t xlat_state; 128 int xlat_rval; 129 } fp_xlat [] = { 130 { FC_PKT_SUCCESS, FC_SUCCESS }, 131 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 132 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 133 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 134 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 135 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 136 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_NPORT_BSY, FC_PBUSY }, 138 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 139 { FC_PKT_LS_RJT, FC_FAILURE }, 140 { FC_PKT_BA_RJT, FC_FAILURE }, 141 { FC_PKT_TIMEOUT, FC_FAILURE }, 142 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 143 { FC_PKT_FAILURE, FC_FAILURE }, 144 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 145 }; 146 147 static uchar_t fp_valid_alpas[] = { 148 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 149 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 150 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 151 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 152 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 153 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 154 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 155 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 156 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 157 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 158 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 159 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 160 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 161 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 162 }; 163 164 static struct fp_perms { 165 uint16_t fp_ioctl_cmd; 166 uchar_t fp_open_flag; 167 } fp_perm_list [] = { 168 { FCIO_GET_NUM_DEVS, FP_OPEN }, 169 { FCIO_GET_DEV_LIST, FP_OPEN }, 170 { FCIO_GET_SYM_PNAME, FP_OPEN }, 171 { FCIO_GET_SYM_NNAME, FP_OPEN }, 172 { FCIO_SET_SYM_PNAME, FP_EXCL }, 173 { FCIO_SET_SYM_NNAME, FP_EXCL }, 174 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 175 { FCIO_DEV_LOGIN, FP_EXCL }, 176 { FCIO_DEV_LOGOUT, FP_EXCL }, 177 { FCIO_GET_STATE, FP_OPEN }, 178 { FCIO_DEV_REMOVE, FP_EXCL }, 179 { FCIO_GET_FCODE_REV, FP_OPEN }, 180 { FCIO_GET_FW_REV, FP_OPEN }, 181 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 182 { FCIO_FORCE_DUMP, FP_EXCL }, 183 { FCIO_GET_DUMP, FP_OPEN }, 184 { FCIO_GET_TOPOLOGY, FP_OPEN }, 185 { FCIO_RESET_LINK, FP_EXCL }, 186 { FCIO_RESET_HARD, FP_EXCL }, 187 { FCIO_RESET_HARD_CORE, FP_EXCL }, 188 { FCIO_DIAG, FP_OPEN }, 189 { FCIO_NS, FP_EXCL }, 190 { FCIO_DOWNLOAD_FW, FP_EXCL }, 191 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 192 { FCIO_LINK_STATUS, FP_OPEN }, 193 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 194 { FCIO_GET_NODE_ID, FP_OPEN }, 195 { FCIO_SET_NODE_ID, FP_EXCL }, 196 { FCIO_SEND_NODE_ID, FP_OPEN }, 197 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 198 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 199 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 200 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 204 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 205 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 206 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 207 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 208 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 209 }; 210 211 static char *fp_pm_comps[] = { 212 "NAME=FC Port", 213 "0=Port Down", 214 "1=Port Up" 215 }; 216 217 218 #ifdef _LITTLE_ENDIAN 219 #define MAKE_BE_32(x) { \ 220 uint32_t *ptr1, i; \ 221 ptr1 = (uint32_t *)(x); \ 222 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 223 *ptr1 = BE_32(*ptr1); \ 224 ptr1++; \ 225 } \ 226 } 227 #else 228 #define MAKE_BE_32(x) 229 #endif 230 231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 232 static uint32_t fp_options = 0; 233 234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 235 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 236 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 237 unsigned int fp_offline_ticker; /* seconds */ 238 239 /* 240 * Driver global variable to anchor the list of soft state structs for 241 * all fp driver instances. Used with the Solaris DDI soft state functions. 242 */ 243 static void *fp_driver_softstate; 244 245 static clock_t fp_retry_ticks; 246 static clock_t fp_offline_ticks; 247 248 static int fp_retry_ticker; 249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 251 252 static int fp_log_size = FP_LOG_SIZE; 253 static int fp_trace = FP_TRACE_DEFAULT; 254 static fc_trace_logq_t *fp_logq = NULL; 255 256 int fp_get_adapter_paths(char *pathList, int count); 257 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 258 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 259 la_wwn_t tgt_pwwn, uint32_t port_id); 260 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 261 static void fp_init_symbolic_names(fc_local_port_t *port); 262 263 264 /* 265 * Perform global initialization 266 */ 267 int 268 _init(void) 269 { 270 int ret; 271 272 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 273 sizeof (struct fc_local_port), 8)) != 0) { 274 return (ret); 275 } 276 277 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 278 ddi_soft_state_fini(&fp_driver_softstate); 279 return (ret); 280 } 281 282 fp_logq = fc_trace_alloc_logq(fp_log_size); 283 284 if ((ret = mod_install(&modlinkage)) != 0) { 285 fc_trace_free_logq(fp_logq); 286 ddi_soft_state_fini(&fp_driver_softstate); 287 scsi_hba_fini(&modlinkage); 288 } 289 290 return (ret); 291 } 292 293 294 /* 295 * Prepare for driver unload 296 */ 297 int 298 _fini(void) 299 { 300 int ret; 301 302 if ((ret = mod_remove(&modlinkage)) == 0) { 303 fc_trace_free_logq(fp_logq); 304 ddi_soft_state_fini(&fp_driver_softstate); 305 scsi_hba_fini(&modlinkage); 306 } 307 308 return (ret); 309 } 310 311 312 /* 313 * Request mod_info() to handle all cases 314 */ 315 int 316 _info(struct modinfo *modinfo) 317 { 318 return (mod_info(&modlinkage, modinfo)); 319 } 320 321 322 /* 323 * fp_attach: 324 * 325 * The respective cmd handlers take care of performing 326 * ULP related invocations 327 */ 328 static int 329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 330 { 331 int rval; 332 333 /* 334 * We check the value of fp_offline_ticker at this 335 * point. The variable is global for the driver and 336 * not specific to an instance. 337 * 338 * If there is no user-defined value found in /etc/system 339 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 340 * The minimum setting for this offline timeout according 341 * to the FC-FS2 standard (Fibre Channel Framing and 342 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 343 * 344 * We do not recommend setting the value to less than 10 345 * seconds (RA_TOV) or more than 90 seconds. If this 346 * variable is greater than 90 seconds then drivers above 347 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 348 */ 349 350 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 351 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 352 FP_OFFLINE_TICKER); 353 354 if ((fp_offline_ticker < 10) || 355 (fp_offline_ticker > 90)) { 356 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 357 "%d second(s). This is outside the " 358 "recommended range of 10..90 seconds", 359 fp_offline_ticker); 360 } 361 362 /* 363 * Tick every second when there are commands to retry. 364 * It should tick at the least granular value of pkt_timeout 365 * (which is one second) 366 */ 367 fp_retry_ticker = 1; 368 369 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 370 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 371 372 switch (cmd) { 373 case DDI_ATTACH: 374 rval = fp_attach_handler(dip); 375 break; 376 377 case DDI_RESUME: 378 rval = fp_resume_handler(dip); 379 break; 380 381 default: 382 rval = DDI_FAILURE; 383 break; 384 } 385 return (rval); 386 } 387 388 389 /* 390 * fp_detach: 391 * 392 * If a ULP fails to handle cmd request converse of 393 * cmd is invoked for ULPs that previously succeeded 394 * cmd request. 395 */ 396 static int 397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 398 { 399 int rval = DDI_FAILURE; 400 fc_local_port_t *port; 401 fc_attach_cmd_t converse; 402 uint8_t cnt; 403 404 if ((port = ddi_get_soft_state(fp_driver_softstate, 405 ddi_get_instance(dip))) == NULL) { 406 return (DDI_FAILURE); 407 } 408 409 mutex_enter(&port->fp_mutex); 410 411 if (port->fp_ulp_attach) { 412 mutex_exit(&port->fp_mutex); 413 return (DDI_FAILURE); 414 } 415 416 switch (cmd) { 417 case DDI_DETACH: 418 if (port->fp_task != FP_TASK_IDLE) { 419 mutex_exit(&port->fp_mutex); 420 return (DDI_FAILURE); 421 } 422 423 /* Let's attempt to quit the job handler gracefully */ 424 port->fp_soft_state |= FP_DETACH_INPROGRESS; 425 426 mutex_exit(&port->fp_mutex); 427 converse = FC_CMD_ATTACH; 428 if (fctl_detach_ulps(port, FC_CMD_DETACH, 429 &modlinkage) != FC_SUCCESS) { 430 mutex_enter(&port->fp_mutex); 431 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 432 mutex_exit(&port->fp_mutex); 433 rval = DDI_FAILURE; 434 break; 435 } 436 437 mutex_enter(&port->fp_mutex); 438 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 439 cnt++) { 440 mutex_exit(&port->fp_mutex); 441 delay(drv_usectohz(1000000)); 442 mutex_enter(&port->fp_mutex); 443 } 444 445 if (port->fp_job_head) { 446 mutex_exit(&port->fp_mutex); 447 rval = DDI_FAILURE; 448 break; 449 } 450 mutex_exit(&port->fp_mutex); 451 452 rval = fp_detach_handler(port); 453 break; 454 455 case DDI_SUSPEND: 456 mutex_exit(&port->fp_mutex); 457 converse = FC_CMD_RESUME; 458 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 459 &modlinkage) != FC_SUCCESS) { 460 rval = DDI_FAILURE; 461 break; 462 } 463 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 464 (void) callb_generic_cpr(&port->fp_cpr_info, 465 CB_CODE_CPR_RESUME); 466 } 467 break; 468 469 default: 470 mutex_exit(&port->fp_mutex); 471 break; 472 } 473 474 /* 475 * Use softint to perform reattach. Mark fp_ulp_attach so we 476 * don't attempt to do this repeatedly on behalf of some persistent 477 * caller. 478 */ 479 if (rval != DDI_SUCCESS) { 480 mutex_enter(&port->fp_mutex); 481 port->fp_ulp_attach = 1; 482 483 /* 484 * If the port is in the low power mode then there is 485 * possibility that fca too could be in low power mode. 486 * Try to raise the power before calling attach ulps. 487 */ 488 489 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 490 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 491 mutex_exit(&port->fp_mutex); 492 (void) pm_raise_power(port->fp_port_dip, 493 FP_PM_COMPONENT, FP_PM_PORT_UP); 494 } else { 495 mutex_exit(&port->fp_mutex); 496 } 497 498 499 fp_attach_ulps(port, converse); 500 501 mutex_enter(&port->fp_mutex); 502 while (port->fp_ulp_attach) { 503 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 504 } 505 506 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 507 508 /* 509 * Mark state as detach failed so asynchronous ULP attach 510 * events (downstream, not the ones we're initiating with 511 * the call to fp_attach_ulps) are not honored. We're 512 * really still in pending detach. 513 */ 514 port->fp_soft_state |= FP_DETACH_FAILED; 515 516 mutex_exit(&port->fp_mutex); 517 } 518 519 return (rval); 520 } 521 522 523 /* 524 * fp_getinfo: 525 * Given the device number, return either the 526 * dev_info_t pointer or the instance number. 527 */ 528 529 /* ARGSUSED */ 530 static int 531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 532 { 533 int rval; 534 minor_t instance; 535 fc_local_port_t *port; 536 537 rval = DDI_SUCCESS; 538 instance = getminor((dev_t)arg); 539 540 switch (cmd) { 541 case DDI_INFO_DEVT2DEVINFO: 542 if ((port = ddi_get_soft_state(fp_driver_softstate, 543 instance)) == NULL) { 544 rval = DDI_FAILURE; 545 break; 546 } 547 *result = (void *)port->fp_port_dip; 548 break; 549 550 case DDI_INFO_DEVT2INSTANCE: 551 *result = (void *)(uintptr_t)instance; 552 break; 553 554 default: 555 rval = DDI_FAILURE; 556 break; 557 } 558 559 return (rval); 560 } 561 562 563 /* 564 * Entry point for power up and power down request from kernel 565 */ 566 static int 567 fp_power(dev_info_t *dip, int comp, int level) 568 { 569 int rval = DDI_FAILURE; 570 fc_local_port_t *port; 571 572 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 573 if (port == NULL || comp != FP_PM_COMPONENT) { 574 return (rval); 575 } 576 577 switch (level) { 578 case FP_PM_PORT_UP: 579 rval = DDI_SUCCESS; 580 581 /* 582 * If the port is DDI_SUSPENDed, let the DDI_RESUME 583 * code complete the rediscovery. 584 */ 585 mutex_enter(&port->fp_mutex); 586 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 587 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 588 port->fp_pm_level = FP_PM_PORT_UP; 589 mutex_exit(&port->fp_mutex); 590 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 591 break; 592 } 593 594 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 595 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 596 597 port->fp_pm_level = FP_PM_PORT_UP; 598 rval = fp_power_up(port); 599 if (rval != DDI_SUCCESS) { 600 port->fp_pm_level = FP_PM_PORT_DOWN; 601 } 602 } else { 603 port->fp_pm_level = FP_PM_PORT_UP; 604 } 605 mutex_exit(&port->fp_mutex); 606 break; 607 608 case FP_PM_PORT_DOWN: 609 mutex_enter(&port->fp_mutex); 610 611 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 612 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 613 /* 614 * PM framework goofed up. We have don't 615 * have any PM components. Let's never go down. 616 */ 617 mutex_exit(&port->fp_mutex); 618 break; 619 620 } 621 622 if (port->fp_ulp_attach) { 623 /* We shouldn't let the power go down */ 624 mutex_exit(&port->fp_mutex); 625 break; 626 } 627 628 /* 629 * Not a whole lot to do if we are detaching 630 */ 631 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 632 port->fp_pm_level = FP_PM_PORT_DOWN; 633 mutex_exit(&port->fp_mutex); 634 rval = DDI_SUCCESS; 635 break; 636 } 637 638 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 639 port->fp_pm_level = FP_PM_PORT_DOWN; 640 641 rval = fp_power_down(port); 642 if (rval != DDI_SUCCESS) { 643 port->fp_pm_level = FP_PM_PORT_UP; 644 ASSERT(!(port->fp_soft_state & 645 FP_SOFT_POWER_DOWN)); 646 } else { 647 ASSERT(port->fp_soft_state & 648 FP_SOFT_POWER_DOWN); 649 } 650 } 651 mutex_exit(&port->fp_mutex); 652 break; 653 654 default: 655 break; 656 } 657 658 return (rval); 659 } 660 661 662 /* 663 * Open FC port devctl node 664 */ 665 static int 666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 667 { 668 int instance; 669 fc_local_port_t *port; 670 671 if (otype != OTYP_CHR) { 672 return (EINVAL); 673 } 674 675 /* 676 * This is not a toy to play with. Allow only powerful 677 * users (hopefully knowledgeable) to access the port 678 * (A hacker potentially could download a sick binary 679 * file into FCA) 680 */ 681 if (drv_priv(credp)) { 682 return (EPERM); 683 } 684 685 instance = (int)getminor(*devp); 686 687 port = ddi_get_soft_state(fp_driver_softstate, instance); 688 if (port == NULL) { 689 return (ENXIO); 690 } 691 692 mutex_enter(&port->fp_mutex); 693 if (port->fp_flag & FP_EXCL) { 694 /* 695 * It is already open for exclusive access. 696 * So shut the door on this caller. 697 */ 698 mutex_exit(&port->fp_mutex); 699 return (EBUSY); 700 } 701 702 if (flag & FEXCL) { 703 if (port->fp_flag & FP_OPEN) { 704 /* 705 * Exclusive operation not possible 706 * as it is already opened 707 */ 708 mutex_exit(&port->fp_mutex); 709 return (EBUSY); 710 } 711 port->fp_flag |= FP_EXCL; 712 } 713 port->fp_flag |= FP_OPEN; 714 mutex_exit(&port->fp_mutex); 715 716 return (0); 717 } 718 719 720 /* 721 * The driver close entry point is called on the last close() 722 * of a device. So it is perfectly alright to just clobber the 723 * open flag and reset it to idle (instead of having to reset 724 * each flag bits). For any confusion, check out close(9E). 725 */ 726 727 /* ARGSUSED */ 728 static int 729 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 730 { 731 int instance; 732 fc_local_port_t *port; 733 734 if (otype != OTYP_CHR) { 735 return (EINVAL); 736 } 737 738 instance = (int)getminor(dev); 739 740 port = ddi_get_soft_state(fp_driver_softstate, instance); 741 if (port == NULL) { 742 return (ENXIO); 743 } 744 745 mutex_enter(&port->fp_mutex); 746 if ((port->fp_flag & FP_OPEN) == 0) { 747 mutex_exit(&port->fp_mutex); 748 return (ENODEV); 749 } 750 port->fp_flag = FP_IDLE; 751 mutex_exit(&port->fp_mutex); 752 753 return (0); 754 } 755 756 /* 757 * Handle IOCTL requests 758 */ 759 760 /* ARGSUSED */ 761 static int 762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 763 { 764 int instance; 765 int ret = 0; 766 fcio_t fcio; 767 fc_local_port_t *port; 768 769 instance = (int)getminor(dev); 770 771 port = ddi_get_soft_state(fp_driver_softstate, instance); 772 if (port == NULL) { 773 return (ENXIO); 774 } 775 776 mutex_enter(&port->fp_mutex); 777 if ((port->fp_flag & FP_OPEN) == 0) { 778 mutex_exit(&port->fp_mutex); 779 return (ENXIO); 780 } 781 782 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 783 mutex_exit(&port->fp_mutex); 784 return (ENXIO); 785 } 786 787 mutex_exit(&port->fp_mutex); 788 789 /* this will raise power if necessary */ 790 ret = fctl_busy_port(port); 791 if (ret != 0) { 792 return (ret); 793 } 794 795 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 796 797 798 switch (cmd) { 799 case FCIO_CMD: { 800 #ifdef _MULTI_DATAMODEL 801 switch (ddi_model_convert_from(mode & FMODELS)) { 802 case DDI_MODEL_ILP32: { 803 struct fcio32 fcio32; 804 805 if (ddi_copyin((void *)data, (void *)&fcio32, 806 sizeof (struct fcio32), mode)) { 807 ret = EFAULT; 808 break; 809 } 810 fcio.fcio_xfer = fcio32.fcio_xfer; 811 fcio.fcio_cmd = fcio32.fcio_cmd; 812 fcio.fcio_flags = fcio32.fcio_flags; 813 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 814 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 815 fcio.fcio_ibuf = 816 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 817 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 818 fcio.fcio_obuf = 819 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 820 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 821 fcio.fcio_abuf = 822 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 823 fcio.fcio_errno = fcio32.fcio_errno; 824 break; 825 } 826 827 case DDI_MODEL_NONE: 828 if (ddi_copyin((void *)data, (void *)&fcio, 829 sizeof (fcio_t), mode)) { 830 ret = EFAULT; 831 } 832 break; 833 } 834 #else /* _MULTI_DATAMODEL */ 835 if (ddi_copyin((void *)data, (void *)&fcio, 836 sizeof (fcio_t), mode)) { 837 ret = EFAULT; 838 break; 839 } 840 #endif /* _MULTI_DATAMODEL */ 841 if (!ret) { 842 ret = fp_fciocmd(port, data, mode, &fcio); 843 } 844 break; 845 } 846 847 default: 848 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 849 mode, credp, rval); 850 } 851 852 fctl_idle_port(port); 853 854 return (ret); 855 } 856 857 858 /* 859 * Init Symbolic Port Name and Node Name 860 * LV will try to get symbolic names from FCA driver 861 * and register these to name server, 862 * if LV fails to get these, 863 * LV will register its default symbolic names to name server. 864 * The Default symbolic node name format is : 865 * <hostname>:<hba driver name>(instance) 866 * The Default symbolic port name format is : 867 * <fp path name> 868 */ 869 static void 870 fp_init_symbolic_names(fc_local_port_t *port) 871 { 872 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 873 char *sym_name; 874 char fcaname[50] = {0}; 875 int hostnlen, fcanlen; 876 877 if (port->fp_sym_node_namelen == 0) { 878 hostnlen = strlen(utsname.nodename); 879 (void) snprintf(fcaname, sizeof (fcaname), 880 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 881 fcanlen = strlen(fcaname); 882 883 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 884 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 885 port->fp_sym_node_namelen = strlen(sym_name); 886 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 887 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 888 } 889 (void) strncpy(port->fp_sym_node_name, sym_name, 890 port->fp_sym_node_namelen); 891 kmem_free(sym_name, hostnlen + fcanlen + 2); 892 } 893 894 if (port->fp_sym_port_namelen == 0) { 895 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 896 897 (void) ddi_pathname(port->fp_port_dip, pathname); 898 port->fp_sym_port_namelen = strlen(pathname); 899 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 900 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 901 } 902 (void) strncpy(port->fp_sym_port_name, pathname, 903 port->fp_sym_port_namelen); 904 kmem_free(pathname, MAXPATHLEN); 905 } 906 } 907 908 909 /* 910 * Perform port attach 911 */ 912 static int 913 fp_attach_handler(dev_info_t *dip) 914 { 915 int rval; 916 int instance; 917 int port_num; 918 int port_len; 919 char name[30]; 920 char i_pwwn[17]; 921 fp_cmd_t *pkt; 922 uint32_t ub_count; 923 fc_local_port_t *port; 924 job_request_t *job; 925 fc_local_port_t *phyport = NULL; 926 int portpro1; 927 char pwwn[17], nwwn[17]; 928 929 instance = ddi_get_instance(dip); 930 931 port_len = sizeof (port_num); 932 933 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 934 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 935 (caddr_t)&port_num, &port_len); 936 937 if (rval != DDI_SUCCESS) { 938 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 939 instance); 940 return (DDI_FAILURE); 941 } 942 943 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 944 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 945 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 946 instance); 947 return (DDI_FAILURE); 948 } 949 950 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 951 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 952 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 953 " point minor node", instance); 954 ddi_remove_minor_node(dip, NULL); 955 return (DDI_FAILURE); 956 } 957 958 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 959 != DDI_SUCCESS) { 960 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 961 instance); 962 ddi_remove_minor_node(dip, NULL); 963 return (DDI_FAILURE); 964 } 965 port = ddi_get_soft_state(fp_driver_softstate, instance); 966 967 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 968 969 port->fp_instance = instance; 970 port->fp_ulp_attach = 1; 971 port->fp_port_num = port_num; 972 port->fp_verbose = fp_verbosity; 973 port->fp_options = fp_options; 974 975 port->fp_fca_dip = ddi_get_parent(dip); 976 port->fp_port_dip = dip; 977 port->fp_fca_tran = (fc_fca_tran_t *) 978 ddi_get_driver_private(port->fp_fca_dip); 979 980 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 981 982 /* 983 * Init the starting value of fp_rscn_count. Note that if 984 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 985 * actual # of RSCNs will be (fp_rscn_count - 1) 986 */ 987 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 988 989 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 990 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 991 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 992 993 (void) sprintf(name, "fp%d_cache", instance); 994 995 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 996 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 997 "phyport-instance", -1)) != -1) { 998 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 999 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 1000 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 1001 port->fp_npiv_type = FC_NPIV_PORT; 1002 } 1003 1004 /* 1005 * Allocate the pool of fc_packet_t structs to be used with 1006 * this fp instance. 1007 */ 1008 port->fp_pkt_cache = kmem_cache_create(name, 1009 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1010 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1011 NULL, 0); 1012 port->fp_out_fpcmds = 0; 1013 if (port->fp_pkt_cache == NULL) { 1014 goto cache_alloc_failed; 1015 } 1016 1017 1018 /* 1019 * Allocate the d_id and pwwn hash tables for all remote ports 1020 * connected to this local port. 1021 */ 1022 port->fp_did_table = kmem_zalloc(did_table_size * 1023 sizeof (struct d_id_hash), KM_SLEEP); 1024 1025 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1026 sizeof (struct pwwn_hash), KM_SLEEP); 1027 1028 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1029 MINCLSYSPRI, 1, 16, 0); 1030 1031 /* Indicate that don't have the pm components yet */ 1032 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1033 1034 /* 1035 * Bind the callbacks with the FCA driver. This will open the gate 1036 * for asynchronous callbacks, so after this call the fp_mutex 1037 * must be held when updating the fc_local_port_t struct. 1038 * 1039 * This is done _before_ setting up the job thread so we can avoid 1040 * cleaning up after the thread_create() in the error path. This 1041 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1042 */ 1043 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1044 goto bind_callbacks_failed; 1045 } 1046 1047 if (phyport) { 1048 mutex_enter(&phyport->fp_mutex); 1049 if (phyport->fp_port_next) { 1050 phyport->fp_port_next->fp_port_prev = port; 1051 port->fp_port_next = phyport->fp_port_next; 1052 phyport->fp_port_next = port; 1053 port->fp_port_prev = phyport; 1054 } else { 1055 phyport->fp_port_next = port; 1056 phyport->fp_port_prev = port; 1057 port->fp_port_next = phyport; 1058 port->fp_port_prev = phyport; 1059 } 1060 mutex_exit(&phyport->fp_mutex); 1061 } 1062 1063 /* 1064 * Init Symbolic Names 1065 */ 1066 fp_init_symbolic_names(port); 1067 1068 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1069 KM_SLEEP, NULL); 1070 1071 if (pkt == NULL) { 1072 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1073 instance); 1074 goto alloc_els_packet_failed; 1075 } 1076 1077 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1078 v.v_maxsyspri - 2); 1079 1080 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1081 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1082 i_pwwn) != DDI_PROP_SUCCESS) { 1083 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1084 "fp(%d): Updating 'initiator-port' property" 1085 " on fp dev_info node failed", instance); 1086 } 1087 1088 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1089 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1090 i_pwwn) != DDI_PROP_SUCCESS) { 1091 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1092 "fp(%d): Updating 'initiator-node' property" 1093 " on fp dev_info node failed", instance); 1094 } 1095 1096 mutex_enter(&port->fp_mutex); 1097 port->fp_els_resp_pkt = pkt; 1098 mutex_exit(&port->fp_mutex); 1099 1100 /* 1101 * Determine the count of unsolicited buffers this FCA can support 1102 */ 1103 fp_retrieve_caps(port); 1104 1105 /* 1106 * Allocate unsolicited buffer tokens 1107 */ 1108 if (port->fp_ub_count) { 1109 ub_count = port->fp_ub_count; 1110 port->fp_ub_tokens = kmem_zalloc(ub_count * 1111 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1112 /* 1113 * Do not fail the attach if unsolicited buffer allocation 1114 * fails; Just try to get along with whatever the FCA can do. 1115 */ 1116 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1117 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1118 FC_SUCCESS || ub_count != port->fp_ub_count) { 1119 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1120 " Unsolicited buffers. proceeding with attach...", 1121 instance); 1122 kmem_free(port->fp_ub_tokens, 1123 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1124 port->fp_ub_tokens = NULL; 1125 } 1126 } 1127 1128 fp_load_ulp_modules(dip, port); 1129 1130 /* 1131 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1132 */ 1133 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1134 "pm-hardware-state", "needs-suspend-resume", 1135 strlen("needs-suspend-resume") + 1); 1136 1137 /* 1138 * fctl maintains a list of all port handles, so 1139 * help fctl add this one to its list now. 1140 */ 1141 mutex_enter(&port->fp_mutex); 1142 fctl_add_port(port); 1143 1144 /* 1145 * If a state change is already in progress, set the bind state t 1146 * OFFLINE as well, so further state change callbacks into ULPs 1147 * will pass the appropriate states 1148 */ 1149 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1150 port->fp_statec_busy) { 1151 port->fp_bind_state = FC_STATE_OFFLINE; 1152 mutex_exit(&port->fp_mutex); 1153 1154 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1155 } else { 1156 /* 1157 * Without dropping the mutex, ensure that the port 1158 * startup happens ahead of state change callback 1159 * processing 1160 */ 1161 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1162 1163 port->fp_last_task = port->fp_task; 1164 port->fp_task = FP_TASK_PORT_STARTUP; 1165 1166 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1167 fp_startup_done, (opaque_t)port, KM_SLEEP); 1168 1169 port->fp_job_head = port->fp_job_tail = job; 1170 1171 cv_signal(&port->fp_cv); 1172 1173 mutex_exit(&port->fp_mutex); 1174 } 1175 1176 mutex_enter(&port->fp_mutex); 1177 while (port->fp_ulp_attach) { 1178 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1179 } 1180 mutex_exit(&port->fp_mutex); 1181 1182 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1183 "pm-components", fp_pm_comps, 1184 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1185 DDI_PROP_SUCCESS) { 1186 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1187 " components property, PM disabled on this port."); 1188 mutex_enter(&port->fp_mutex); 1189 port->fp_pm_level = FP_PM_PORT_UP; 1190 mutex_exit(&port->fp_mutex); 1191 } else { 1192 if (pm_raise_power(dip, FP_PM_COMPONENT, 1193 FP_PM_PORT_UP) != DDI_SUCCESS) { 1194 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1195 " power level"); 1196 mutex_enter(&port->fp_mutex); 1197 port->fp_pm_level = FP_PM_PORT_UP; 1198 mutex_exit(&port->fp_mutex); 1199 } 1200 1201 /* 1202 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1203 * the call to pm_raise_power. The PM framework can't 1204 * handle multiple threads calling into it during attach. 1205 */ 1206 1207 mutex_enter(&port->fp_mutex); 1208 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1209 mutex_exit(&port->fp_mutex); 1210 } 1211 1212 ddi_report_dev(dip); 1213 1214 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1215 1216 return (DDI_SUCCESS); 1217 1218 /* 1219 * Unwind any/all preceeding allocations in the event of an error. 1220 */ 1221 1222 alloc_els_packet_failed: 1223 1224 if (port->fp_fca_handle != NULL) { 1225 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1226 port->fp_fca_handle = NULL; 1227 } 1228 1229 if (port->fp_ub_tokens != NULL) { 1230 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1231 port->fp_ub_tokens); 1232 kmem_free(port->fp_ub_tokens, 1233 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1234 port->fp_ub_tokens = NULL; 1235 } 1236 1237 if (port->fp_els_resp_pkt != NULL) { 1238 fp_free_pkt(port->fp_els_resp_pkt); 1239 port->fp_els_resp_pkt = NULL; 1240 } 1241 1242 bind_callbacks_failed: 1243 1244 if (port->fp_taskq != NULL) { 1245 taskq_destroy(port->fp_taskq); 1246 } 1247 1248 if (port->fp_pwwn_table != NULL) { 1249 kmem_free(port->fp_pwwn_table, 1250 pwwn_table_size * sizeof (struct pwwn_hash)); 1251 port->fp_pwwn_table = NULL; 1252 } 1253 1254 if (port->fp_did_table != NULL) { 1255 kmem_free(port->fp_did_table, 1256 did_table_size * sizeof (struct d_id_hash)); 1257 port->fp_did_table = NULL; 1258 } 1259 1260 if (port->fp_pkt_cache != NULL) { 1261 kmem_cache_destroy(port->fp_pkt_cache); 1262 port->fp_pkt_cache = NULL; 1263 } 1264 1265 cache_alloc_failed: 1266 1267 cv_destroy(&port->fp_attach_cv); 1268 cv_destroy(&port->fp_cv); 1269 mutex_destroy(&port->fp_mutex); 1270 ddi_remove_minor_node(port->fp_port_dip, NULL); 1271 ddi_soft_state_free(fp_driver_softstate, instance); 1272 ddi_prop_remove_all(dip); 1273 1274 return (DDI_FAILURE); 1275 } 1276 1277 1278 /* 1279 * Handle DDI_RESUME request 1280 */ 1281 static int 1282 fp_resume_handler(dev_info_t *dip) 1283 { 1284 int rval; 1285 fc_local_port_t *port; 1286 1287 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1288 1289 ASSERT(port != NULL); 1290 1291 #ifdef DEBUG 1292 mutex_enter(&port->fp_mutex); 1293 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1294 mutex_exit(&port->fp_mutex); 1295 #endif 1296 1297 /* 1298 * If the port was power suspended, raise the power level 1299 */ 1300 mutex_enter(&port->fp_mutex); 1301 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1302 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1303 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1304 1305 mutex_exit(&port->fp_mutex); 1306 if (pm_raise_power(dip, FP_PM_COMPONENT, 1307 FP_PM_PORT_UP) != DDI_SUCCESS) { 1308 FP_TRACE(FP_NHEAD2(9, 0), 1309 "Failed to raise the power level"); 1310 return (DDI_FAILURE); 1311 } 1312 mutex_enter(&port->fp_mutex); 1313 } 1314 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1315 mutex_exit(&port->fp_mutex); 1316 1317 /* 1318 * All the discovery is initiated and handled by per-port thread. 1319 * Further all the discovery is done in handled in callback mode 1320 * (not polled mode); In a specific case such as this, the discovery 1321 * is required to happen in polled mode. The easiest way out is 1322 * to bail out port thread and get started. Come back and fix this 1323 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1324 * will do on-demand discovery during pre-power-up busctl handling 1325 * which will only be possible when SCSA provides a new HBA vector 1326 * for sending down the PM busctl requests. 1327 */ 1328 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1329 1330 rval = fp_resume_all(port, FC_CMD_RESUME); 1331 if (rval != DDI_SUCCESS) { 1332 mutex_enter(&port->fp_mutex); 1333 port->fp_soft_state |= FP_SOFT_SUSPEND; 1334 mutex_exit(&port->fp_mutex); 1335 (void) callb_generic_cpr(&port->fp_cpr_info, 1336 CB_CODE_CPR_CHKPT); 1337 } 1338 1339 return (rval); 1340 } 1341 1342 /* 1343 * Perform FC Port power on initialization 1344 */ 1345 static int 1346 fp_power_up(fc_local_port_t *port) 1347 { 1348 int rval; 1349 1350 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1351 1352 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1353 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1354 1355 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1356 1357 mutex_exit(&port->fp_mutex); 1358 1359 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1360 if (rval != DDI_SUCCESS) { 1361 mutex_enter(&port->fp_mutex); 1362 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1363 } else { 1364 mutex_enter(&port->fp_mutex); 1365 } 1366 1367 return (rval); 1368 } 1369 1370 1371 /* 1372 * It is important to note that the power may possibly be removed between 1373 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1374 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1375 * (hardware state). In this case, the port driver may need to rediscover the 1376 * topology, perform LOGINs, register with the name server again and perform 1377 * any such port initialization procedures. To perform LOGINs, the driver could 1378 * use the port device handle to see if a LOGIN needs to be performed and use 1379 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1380 * or removed) which will be reflected in the map the ULPs will see. 1381 */ 1382 static int 1383 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1384 { 1385 1386 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1387 1388 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1389 return (DDI_FAILURE); 1390 } 1391 1392 mutex_enter(&port->fp_mutex); 1393 1394 /* 1395 * If there are commands queued for delayed retry, instead of 1396 * working the hard way to figure out which ones are good for 1397 * restart and which ones not (ELSs are definitely not good 1398 * as the port will have to go through a new spin of rediscovery 1399 * now), so just flush them out. 1400 */ 1401 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1402 fp_cmd_t *cmd; 1403 1404 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1405 1406 mutex_exit(&port->fp_mutex); 1407 while ((cmd = fp_deque_cmd(port)) != NULL) { 1408 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1409 fp_iodone(cmd); 1410 } 1411 mutex_enter(&port->fp_mutex); 1412 } 1413 1414 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1415 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1416 port->fp_dev_count) { 1417 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1418 port->fp_offline_tid = timeout(fp_offline_timeout, 1419 (caddr_t)port, fp_offline_ticks); 1420 } 1421 if (port->fp_job_head) { 1422 cv_signal(&port->fp_cv); 1423 } 1424 mutex_exit(&port->fp_mutex); 1425 fctl_attach_ulps(port, cmd, &modlinkage); 1426 } else { 1427 struct job_request *job; 1428 1429 /* 1430 * If an OFFLINE timer was running at the time of 1431 * suspending, there is no need to restart it as 1432 * the port is ONLINE now. 1433 */ 1434 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1435 if (port->fp_statec_busy == 0) { 1436 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1437 } 1438 port->fp_statec_busy++; 1439 mutex_exit(&port->fp_mutex); 1440 1441 job = fctl_alloc_job(JOB_PORT_ONLINE, 1442 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1443 fctl_enque_job(port, job); 1444 1445 fctl_jobwait(job); 1446 fctl_remove_oldies(port); 1447 1448 fctl_attach_ulps(port, cmd, &modlinkage); 1449 fctl_dealloc_job(job); 1450 } 1451 1452 return (DDI_SUCCESS); 1453 } 1454 1455 1456 /* 1457 * At this time, there shouldn't be any I/O requests on this port. 1458 * But the unsolicited callbacks from the underlying FCA port need 1459 * to be handled very carefully. The steps followed to handle the 1460 * DDI_DETACH are: 1461 * + Grab the port driver mutex, check if the unsolicited 1462 * callback is currently under processing. If true, fail 1463 * the DDI_DETACH request by printing a message; If false 1464 * mark the DDI_DETACH as under progress, so that any 1465 * further unsolicited callbacks get bounced. 1466 * + Perform PRLO/LOGO if necessary, cleanup all the data 1467 * structures. 1468 * + Get the job_handler thread to gracefully exit. 1469 * + Unregister callbacks with the FCA port. 1470 * + Now that some peace is found, notify all the ULPs of 1471 * DDI_DETACH request (using ulp_port_detach entry point) 1472 * + Free all mutexes, semaphores, conditional variables. 1473 * + Free the soft state, return success. 1474 * 1475 * Important considerations: 1476 * Port driver de-registers state change and unsolicited 1477 * callbacks before taking up the task of notifying ULPs 1478 * and performing PRLO and LOGOs. 1479 * 1480 * A port may go offline at the time PRLO/LOGO is being 1481 * requested. It is expected of all FCA drivers to fail 1482 * such requests either immediately with a FC_OFFLINE 1483 * return code to fc_fca_transport() or return the packet 1484 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1485 */ 1486 static int 1487 fp_detach_handler(fc_local_port_t *port) 1488 { 1489 job_request_t *job; 1490 uint32_t delay_count; 1491 fc_orphan_t *orp, *tmporp; 1492 1493 /* 1494 * In a Fabric topology with many host ports connected to 1495 * a switch, another detaching instance of fp might have 1496 * triggered a LOGO (which is an unsolicited request to 1497 * this instance). So in order to be able to successfully 1498 * detach by taking care of such cases a delay of about 1499 * 30 seconds is introduced. 1500 */ 1501 delay_count = 0; 1502 mutex_enter(&port->fp_mutex); 1503 if (port->fp_out_fpcmds != 0) { 1504 /* 1505 * At this time we can only check fp internal commands, because 1506 * sd/ssd/scsi_vhci should have finsihed all their commands, 1507 * fcp/fcip/fcsm should have finished all their commands. 1508 * 1509 * It seems that all fp internal commands are asynchronous now. 1510 */ 1511 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1512 mutex_exit(&port->fp_mutex); 1513 1514 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress" 1515 " Failing detach", port->fp_instance, port->fp_out_fpcmds); 1516 return (DDI_FAILURE); 1517 } 1518 1519 while ((port->fp_soft_state & 1520 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1521 (delay_count < 30)) { 1522 mutex_exit(&port->fp_mutex); 1523 delay_count++; 1524 delay(drv_usectohz(1000000)); 1525 mutex_enter(&port->fp_mutex); 1526 } 1527 1528 if (port->fp_soft_state & 1529 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1530 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1531 mutex_exit(&port->fp_mutex); 1532 1533 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1534 " Failing detach", port->fp_instance); 1535 return (DDI_FAILURE); 1536 } 1537 1538 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1539 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1540 mutex_exit(&port->fp_mutex); 1541 1542 /* 1543 * If we're powered down, we need to raise power prior to submitting 1544 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1545 * process the shutdown job. 1546 */ 1547 if (fctl_busy_port(port) != 0) { 1548 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1549 port->fp_instance); 1550 mutex_enter(&port->fp_mutex); 1551 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1552 mutex_exit(&port->fp_mutex); 1553 return (DDI_FAILURE); 1554 } 1555 1556 /* 1557 * This will deallocate data structs and cause the "job" thread 1558 * to exit, in preparation for DDI_DETACH on the instance. 1559 * This can sleep for an arbitrary duration, since it waits for 1560 * commands over the wire, timeout(9F) callbacks, etc. 1561 * 1562 * CAUTION: There is still a race here, where the "job" thread 1563 * can still be executing code even tho the fctl_jobwait() call 1564 * below has returned to us. In theory the fp driver could even be 1565 * modunloaded even tho the job thread isn't done executing. 1566 * without creating the race condition. 1567 */ 1568 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1569 (opaque_t)port, KM_SLEEP); 1570 fctl_enque_job(port, job); 1571 fctl_jobwait(job); 1572 fctl_dealloc_job(job); 1573 1574 1575 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1576 FP_PM_PORT_DOWN); 1577 1578 if (port->fp_taskq) { 1579 taskq_destroy(port->fp_taskq); 1580 } 1581 1582 ddi_prop_remove_all(port->fp_port_dip); 1583 1584 ddi_remove_minor_node(port->fp_port_dip, NULL); 1585 1586 fctl_remove_port(port); 1587 1588 fp_free_pkt(port->fp_els_resp_pkt); 1589 1590 if (port->fp_ub_tokens) { 1591 if (fc_ulp_ubfree(port, port->fp_ub_count, 1592 port->fp_ub_tokens) != FC_SUCCESS) { 1593 cmn_err(CE_WARN, "fp(%d): couldn't free " 1594 " unsolicited buffers", port->fp_instance); 1595 } 1596 kmem_free(port->fp_ub_tokens, 1597 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1598 port->fp_ub_tokens = NULL; 1599 } 1600 1601 if (port->fp_pkt_cache != NULL) { 1602 kmem_cache_destroy(port->fp_pkt_cache); 1603 } 1604 1605 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1606 1607 mutex_enter(&port->fp_mutex); 1608 if (port->fp_did_table) { 1609 kmem_free(port->fp_did_table, did_table_size * 1610 sizeof (struct d_id_hash)); 1611 } 1612 1613 if (port->fp_pwwn_table) { 1614 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1615 sizeof (struct pwwn_hash)); 1616 } 1617 orp = port->fp_orphan_list; 1618 while (orp) { 1619 tmporp = orp; 1620 orp = orp->orp_next; 1621 kmem_free(tmporp, sizeof (*orp)); 1622 } 1623 1624 mutex_exit(&port->fp_mutex); 1625 1626 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1627 1628 mutex_destroy(&port->fp_mutex); 1629 cv_destroy(&port->fp_attach_cv); 1630 cv_destroy(&port->fp_cv); 1631 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1632 1633 return (DDI_SUCCESS); 1634 } 1635 1636 1637 /* 1638 * Steps to perform DDI_SUSPEND operation on a FC port 1639 * 1640 * - If already suspended return DDI_FAILURE 1641 * - If already power-suspended return DDI_SUCCESS 1642 * - If an unsolicited callback or state change handling is in 1643 * in progress, throw a warning message, return DDI_FAILURE 1644 * - Cancel timeouts 1645 * - SUSPEND the job_handler thread (means do nothing as it is 1646 * taken care of by the CPR frame work) 1647 */ 1648 static int 1649 fp_suspend_handler(fc_local_port_t *port) 1650 { 1651 uint32_t delay_count; 1652 1653 mutex_enter(&port->fp_mutex); 1654 1655 /* 1656 * The following should never happen, but 1657 * let the driver be more defensive here 1658 */ 1659 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1660 mutex_exit(&port->fp_mutex); 1661 return (DDI_FAILURE); 1662 } 1663 1664 /* 1665 * If the port is already power suspended, there 1666 * is nothing else to do, So return DDI_SUCCESS, 1667 * but mark the SUSPEND bit in the soft state 1668 * before leaving. 1669 */ 1670 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1671 port->fp_soft_state |= FP_SOFT_SUSPEND; 1672 mutex_exit(&port->fp_mutex); 1673 return (DDI_SUCCESS); 1674 } 1675 1676 /* 1677 * Check if an unsolicited callback or state change handling is 1678 * in progress. If true, fail the suspend operation; also throw 1679 * a warning message notifying the failure. Note that Sun PCI 1680 * hotplug spec recommends messages in cases of failure (but 1681 * not flooding the console) 1682 * 1683 * Busy waiting for a short interval (500 millisecond ?) to see 1684 * if the callback processing completes may be another idea. Since 1685 * most of the callback processing involves a lot of work, it 1686 * is safe to just fail the SUSPEND operation. It is definitely 1687 * not bad to fail the SUSPEND operation if the driver is busy. 1688 */ 1689 delay_count = 0; 1690 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1691 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1692 mutex_exit(&port->fp_mutex); 1693 delay_count++; 1694 delay(drv_usectohz(1000000)); 1695 mutex_enter(&port->fp_mutex); 1696 } 1697 1698 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1699 FP_SOFT_IN_UNSOL_CB)) { 1700 mutex_exit(&port->fp_mutex); 1701 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1702 " Failing suspend", port->fp_instance); 1703 return (DDI_FAILURE); 1704 } 1705 1706 /* 1707 * Check of FC port thread is busy 1708 */ 1709 if (port->fp_job_head) { 1710 mutex_exit(&port->fp_mutex); 1711 FP_TRACE(FP_NHEAD2(9, 0), 1712 "FC port thread is busy: Failing suspend"); 1713 return (DDI_FAILURE); 1714 } 1715 port->fp_soft_state |= FP_SOFT_SUSPEND; 1716 1717 fp_suspend_all(port); 1718 mutex_exit(&port->fp_mutex); 1719 1720 return (DDI_SUCCESS); 1721 } 1722 1723 1724 /* 1725 * Prepare for graceful power down of a FC port 1726 */ 1727 static int 1728 fp_power_down(fc_local_port_t *port) 1729 { 1730 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1731 1732 /* 1733 * Power down request followed by a DDI_SUSPEND should 1734 * never happen; If it does return DDI_SUCCESS 1735 */ 1736 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1737 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1738 return (DDI_SUCCESS); 1739 } 1740 1741 /* 1742 * If the port is already power suspended, there 1743 * is nothing else to do, So return DDI_SUCCESS, 1744 */ 1745 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1746 return (DDI_SUCCESS); 1747 } 1748 1749 /* 1750 * Check if an unsolicited callback or state change handling 1751 * is in progress. If true, fail the PM suspend operation. 1752 * But don't print a message unless the verbosity of the 1753 * driver desires otherwise. 1754 */ 1755 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1756 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1757 FP_TRACE(FP_NHEAD2(9, 0), 1758 "Unsolicited callback in progress: Failing power down"); 1759 return (DDI_FAILURE); 1760 } 1761 1762 /* 1763 * Check of FC port thread is busy 1764 */ 1765 if (port->fp_job_head) { 1766 FP_TRACE(FP_NHEAD2(9, 0), 1767 "FC port thread is busy: Failing power down"); 1768 return (DDI_FAILURE); 1769 } 1770 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1771 1772 /* 1773 * check if the ULPs are ready for power down 1774 */ 1775 mutex_exit(&port->fp_mutex); 1776 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1777 &modlinkage) != FC_SUCCESS) { 1778 mutex_enter(&port->fp_mutex); 1779 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1780 mutex_exit(&port->fp_mutex); 1781 1782 /* 1783 * Power back up the obedient ULPs that went down 1784 */ 1785 fp_attach_ulps(port, FC_CMD_POWER_UP); 1786 1787 FP_TRACE(FP_NHEAD2(9, 0), 1788 "ULP(s) busy, detach_ulps failed. Failing power down"); 1789 mutex_enter(&port->fp_mutex); 1790 return (DDI_FAILURE); 1791 } 1792 mutex_enter(&port->fp_mutex); 1793 1794 fp_suspend_all(port); 1795 1796 return (DDI_SUCCESS); 1797 } 1798 1799 1800 /* 1801 * Suspend the entire FC port 1802 */ 1803 static void 1804 fp_suspend_all(fc_local_port_t *port) 1805 { 1806 int index; 1807 struct pwwn_hash *head; 1808 fc_remote_port_t *pd; 1809 1810 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1811 1812 if (port->fp_wait_tid != 0) { 1813 timeout_id_t tid; 1814 1815 tid = port->fp_wait_tid; 1816 port->fp_wait_tid = (timeout_id_t)NULL; 1817 mutex_exit(&port->fp_mutex); 1818 (void) untimeout(tid); 1819 mutex_enter(&port->fp_mutex); 1820 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1821 } 1822 1823 if (port->fp_offline_tid) { 1824 timeout_id_t tid; 1825 1826 tid = port->fp_offline_tid; 1827 port->fp_offline_tid = (timeout_id_t)NULL; 1828 mutex_exit(&port->fp_mutex); 1829 (void) untimeout(tid); 1830 mutex_enter(&port->fp_mutex); 1831 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1832 } 1833 mutex_exit(&port->fp_mutex); 1834 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1835 mutex_enter(&port->fp_mutex); 1836 1837 /* 1838 * Mark all devices as OLD, and reset the LOGIN state as well 1839 * (this will force the ULPs to perform a LOGIN after calling 1840 * fc_portgetmap() during RESUME/PM_RESUME) 1841 */ 1842 for (index = 0; index < pwwn_table_size; index++) { 1843 head = &port->fp_pwwn_table[index]; 1844 pd = head->pwwn_head; 1845 while (pd != NULL) { 1846 mutex_enter(&pd->pd_mutex); 1847 fp_remote_port_offline(pd); 1848 fctl_delist_did_table(port, pd); 1849 pd->pd_state = PORT_DEVICE_VALID; 1850 pd->pd_login_count = 0; 1851 mutex_exit(&pd->pd_mutex); 1852 pd = pd->pd_wwn_hnext; 1853 } 1854 } 1855 } 1856 1857 1858 /* 1859 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1860 * Performs intializations for fc_packet_t structs. 1861 * Returns 0 for success or -1 for failure. 1862 * 1863 * This function allocates DMA handles for both command and responses. 1864 * Most of the ELSs used have both command and responses so it is strongly 1865 * desired to move them to cache constructor routine. 1866 * 1867 * Context: Can sleep iff called with KM_SLEEP flag. 1868 */ 1869 static int 1870 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1871 { 1872 int (*cb) (caddr_t); 1873 fc_packet_t *pkt; 1874 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1875 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1876 1877 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1878 1879 cmd->cmd_next = NULL; 1880 cmd->cmd_flags = 0; 1881 cmd->cmd_dflags = 0; 1882 cmd->cmd_job = NULL; 1883 cmd->cmd_port = port; 1884 pkt = &cmd->cmd_pkt; 1885 1886 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1887 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1888 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1889 return (-1); 1890 } 1891 1892 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1893 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1894 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1895 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1896 return (-1); 1897 } 1898 1899 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1900 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1901 pkt->pkt_data_cookie_cnt = 0; 1902 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1903 pkt->pkt_data_cookie = NULL; 1904 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1905 1906 return (0); 1907 } 1908 1909 1910 /* 1911 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1912 * Performs un-intializations for fc_packet_t structs. 1913 */ 1914 /* ARGSUSED */ 1915 static void 1916 fp_cache_destructor(void *buf, void *cdarg) 1917 { 1918 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1919 fc_packet_t *pkt; 1920 1921 pkt = &cmd->cmd_pkt; 1922 if (pkt->pkt_cmd_dma) { 1923 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1924 } 1925 1926 if (pkt->pkt_resp_dma) { 1927 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1928 } 1929 } 1930 1931 1932 /* 1933 * Packet allocation for ELS and any other port driver commands 1934 * 1935 * Some ELSs like FLOGI and PLOGI are critical for topology and 1936 * device discovery and a system's inability to allocate memory 1937 * or DVMA resources while performing some of these critical ELSs 1938 * cause a lot of problem. While memory allocation failures are 1939 * rare, DVMA resource failures are common as the applications 1940 * are becoming more and more powerful on huge servers. So it 1941 * is desirable to have a framework support to reserve a fragment 1942 * of DVMA. So until this is fixed the correct way, the suffering 1943 * is huge whenever a LIP happens at a time DVMA resources are 1944 * drained out completely - So an attempt needs to be made to 1945 * KM_SLEEP while requesting for these resources, hoping that 1946 * the requests won't hang forever. 1947 * 1948 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1949 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1950 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1951 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1952 * fp_alloc_pkt() must be called with pd set to NULL. 1953 */ 1954 1955 static fp_cmd_t * 1956 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1957 fc_remote_port_t *pd) 1958 { 1959 int rval; 1960 ulong_t real_len; 1961 fp_cmd_t *cmd; 1962 fc_packet_t *pkt; 1963 int (*cb) (caddr_t); 1964 ddi_dma_cookie_t pkt_cookie; 1965 ddi_dma_cookie_t *cp; 1966 uint32_t cnt; 1967 1968 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1969 1970 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1971 1972 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1973 if (cmd == NULL) { 1974 return (cmd); 1975 } 1976 1977 cmd->cmd_ulp_pkt = NULL; 1978 cmd->cmd_flags = 0; 1979 pkt = &cmd->cmd_pkt; 1980 ASSERT(cmd->cmd_dflags == 0); 1981 1982 pkt->pkt_datalen = 0; 1983 pkt->pkt_data = NULL; 1984 pkt->pkt_state = 0; 1985 pkt->pkt_action = 0; 1986 pkt->pkt_reason = 0; 1987 pkt->pkt_expln = 0; 1988 1989 /* 1990 * Init pkt_pd with the given pointer; this must be done _before_ 1991 * the call to fc_ulp_init_packet(). 1992 */ 1993 pkt->pkt_pd = pd; 1994 1995 /* Now call the FCA driver to init its private, per-packet fields */ 1996 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 1997 goto alloc_pkt_failed; 1998 } 1999 2000 if (cmd_len) { 2001 ASSERT(pkt->pkt_cmd_dma != NULL); 2002 2003 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 2004 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 2005 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 2006 &pkt->pkt_cmd_acc); 2007 2008 if (rval != DDI_SUCCESS) { 2009 goto alloc_pkt_failed; 2010 } 2011 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 2012 2013 if (real_len < cmd_len) { 2014 goto alloc_pkt_failed; 2015 } 2016 2017 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2018 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2019 DDI_DMA_CONSISTENT, cb, NULL, 2020 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2021 2022 if (rval != DDI_DMA_MAPPED) { 2023 goto alloc_pkt_failed; 2024 } 2025 2026 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2027 2028 if (pkt->pkt_cmd_cookie_cnt > 2029 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2030 goto alloc_pkt_failed; 2031 } 2032 2033 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2034 2035 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2036 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2037 KM_NOSLEEP); 2038 2039 if (cp == NULL) { 2040 goto alloc_pkt_failed; 2041 } 2042 2043 *cp = pkt_cookie; 2044 cp++; 2045 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2046 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2047 *cp = pkt_cookie; 2048 } 2049 } 2050 2051 if (resp_len) { 2052 ASSERT(pkt->pkt_resp_dma != NULL); 2053 2054 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2055 port->fp_fca_tran->fca_acc_attr, 2056 DDI_DMA_CONSISTENT, cb, NULL, 2057 (caddr_t *)&pkt->pkt_resp, &real_len, 2058 &pkt->pkt_resp_acc); 2059 2060 if (rval != DDI_SUCCESS) { 2061 goto alloc_pkt_failed; 2062 } 2063 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2064 2065 if (real_len < resp_len) { 2066 goto alloc_pkt_failed; 2067 } 2068 2069 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2070 pkt->pkt_resp, real_len, DDI_DMA_READ | 2071 DDI_DMA_CONSISTENT, cb, NULL, 2072 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2073 2074 if (rval != DDI_DMA_MAPPED) { 2075 goto alloc_pkt_failed; 2076 } 2077 2078 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2079 2080 if (pkt->pkt_resp_cookie_cnt > 2081 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2082 goto alloc_pkt_failed; 2083 } 2084 2085 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2086 2087 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2088 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2089 KM_NOSLEEP); 2090 2091 if (cp == NULL) { 2092 goto alloc_pkt_failed; 2093 } 2094 2095 *cp = pkt_cookie; 2096 cp++; 2097 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2098 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2099 *cp = pkt_cookie; 2100 } 2101 } 2102 2103 pkt->pkt_cmdlen = cmd_len; 2104 pkt->pkt_rsplen = resp_len; 2105 pkt->pkt_ulp_private = cmd; 2106 2107 return (cmd); 2108 2109 alloc_pkt_failed: 2110 2111 fp_free_dma(cmd); 2112 2113 if (pkt->pkt_cmd_cookie != NULL) { 2114 kmem_free(pkt->pkt_cmd_cookie, 2115 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2116 pkt->pkt_cmd_cookie = NULL; 2117 } 2118 2119 if (pkt->pkt_resp_cookie != NULL) { 2120 kmem_free(pkt->pkt_resp_cookie, 2121 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2122 pkt->pkt_resp_cookie = NULL; 2123 } 2124 2125 kmem_cache_free(port->fp_pkt_cache, cmd); 2126 2127 return (NULL); 2128 } 2129 2130 2131 /* 2132 * Free FC packet 2133 */ 2134 static void 2135 fp_free_pkt(fp_cmd_t *cmd) 2136 { 2137 fc_local_port_t *port; 2138 fc_packet_t *pkt; 2139 2140 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2141 2142 cmd->cmd_next = NULL; 2143 cmd->cmd_job = NULL; 2144 pkt = &cmd->cmd_pkt; 2145 pkt->pkt_ulp_private = 0; 2146 pkt->pkt_tran_flags = 0; 2147 pkt->pkt_tran_type = 0; 2148 port = cmd->cmd_port; 2149 2150 if (pkt->pkt_cmd_cookie != NULL) { 2151 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2152 sizeof (ddi_dma_cookie_t)); 2153 pkt->pkt_cmd_cookie = NULL; 2154 } 2155 2156 if (pkt->pkt_resp_cookie != NULL) { 2157 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2158 sizeof (ddi_dma_cookie_t)); 2159 pkt->pkt_resp_cookie = NULL; 2160 } 2161 2162 fp_free_dma(cmd); 2163 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2164 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2165 } 2166 2167 2168 /* 2169 * Release DVMA resources 2170 */ 2171 static void 2172 fp_free_dma(fp_cmd_t *cmd) 2173 { 2174 fc_packet_t *pkt = &cmd->cmd_pkt; 2175 2176 pkt->pkt_cmdlen = 0; 2177 pkt->pkt_rsplen = 0; 2178 pkt->pkt_tran_type = 0; 2179 pkt->pkt_tran_flags = 0; 2180 2181 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2182 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2183 } 2184 2185 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2186 if (pkt->pkt_cmd_acc) { 2187 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2188 } 2189 } 2190 2191 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2192 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2193 } 2194 2195 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2196 if (pkt->pkt_resp_acc) { 2197 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2198 } 2199 } 2200 cmd->cmd_dflags = 0; 2201 } 2202 2203 2204 /* 2205 * Dedicated thread to perform various activities. One thread for 2206 * each fc_local_port_t (driver soft state) instance. 2207 * Note, this effectively works out to one thread for each local 2208 * port, but there are also some Solaris taskq threads in use on a per-local 2209 * port basis; these also need to be taken into consideration. 2210 */ 2211 static void 2212 fp_job_handler(fc_local_port_t *port) 2213 { 2214 int rval; 2215 uint32_t *d_id; 2216 fc_remote_port_t *pd; 2217 job_request_t *job; 2218 2219 #ifndef __lock_lint 2220 /* 2221 * Solaris-internal stuff for proper operation of kernel threads 2222 * with Solaris CPR. 2223 */ 2224 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2225 callb_generic_cpr, "fp_job_handler"); 2226 #endif 2227 2228 2229 /* Loop forever waiting for work to do */ 2230 for (;;) { 2231 2232 mutex_enter(&port->fp_mutex); 2233 2234 /* 2235 * Sleep if no work to do right now, or if we want 2236 * to suspend or power-down. 2237 */ 2238 while (port->fp_job_head == NULL || 2239 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2240 FP_SOFT_SUSPEND))) { 2241 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2242 cv_wait(&port->fp_cv, &port->fp_mutex); 2243 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2244 } 2245 2246 /* 2247 * OK, we've just been woken up, so retrieve the next entry 2248 * from the head of the job queue for this local port. 2249 */ 2250 job = fctl_deque_job(port); 2251 2252 /* 2253 * Handle all the fp driver's supported job codes here 2254 * in this big honkin' switch. 2255 */ 2256 switch (job->job_code) { 2257 case JOB_PORT_SHUTDOWN: 2258 /* 2259 * fp_port_shutdown() is only called from here. This 2260 * will prepare the local port instance (softstate) 2261 * for detaching. This cancels timeout callbacks, 2262 * executes LOGOs with remote ports, cleans up tables, 2263 * and deallocates data structs. 2264 */ 2265 fp_port_shutdown(port, job); 2266 2267 /* 2268 * This will exit the job thread. 2269 */ 2270 #ifndef __lock_lint 2271 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2272 #else 2273 mutex_exit(&port->fp_mutex); 2274 #endif 2275 fctl_jobdone(job); 2276 thread_exit(); 2277 2278 /* NOTREACHED */ 2279 2280 case JOB_ATTACH_ULP: { 2281 /* 2282 * This job is spawned in response to a ULP calling 2283 * fc_ulp_add(). 2284 */ 2285 2286 boolean_t do_attach_ulps = B_TRUE; 2287 2288 /* 2289 * If fp is detaching, we don't want to call 2290 * fp_startup_done as this asynchronous 2291 * notification may interfere with the re-attach. 2292 */ 2293 2294 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2295 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2296 do_attach_ulps = B_FALSE; 2297 } else { 2298 /* 2299 * We are going to force the transport 2300 * to attach to the ULPs, so set 2301 * fp_ulp_attach. This will keep any 2302 * potential detach from occurring until 2303 * we are done. 2304 */ 2305 port->fp_ulp_attach = 1; 2306 } 2307 2308 mutex_exit(&port->fp_mutex); 2309 2310 /* 2311 * NOTE: Since we just dropped the mutex, there is now 2312 * a race window where the fp_soft_state check above 2313 * could change here. This race is covered because an 2314 * additional check was added in the functions hidden 2315 * under fp_startup_done(). 2316 */ 2317 if (do_attach_ulps == B_TRUE) { 2318 /* 2319 * This goes thru a bit of a convoluted call 2320 * chain before spawning off a DDI taskq 2321 * request to perform the actual attach 2322 * operations. Blocking can occur at a number 2323 * of points. 2324 */ 2325 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2326 } 2327 job->job_result = FC_SUCCESS; 2328 fctl_jobdone(job); 2329 break; 2330 } 2331 2332 case JOB_ULP_NOTIFY: { 2333 /* 2334 * Pass state change notifications up to any/all 2335 * registered ULPs. 2336 */ 2337 uint32_t statec; 2338 2339 statec = job->job_ulp_listlen; 2340 if (statec == FC_STATE_RESET_REQUESTED) { 2341 port->fp_last_task = port->fp_task; 2342 port->fp_task = FP_TASK_OFFLINE; 2343 fp_port_offline(port, 0); 2344 port->fp_task = port->fp_last_task; 2345 port->fp_last_task = FP_TASK_IDLE; 2346 } 2347 2348 if (--port->fp_statec_busy == 0) { 2349 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2350 } 2351 2352 mutex_exit(&port->fp_mutex); 2353 2354 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2355 fctl_jobdone(job); 2356 break; 2357 } 2358 2359 case JOB_PLOGI_ONE: 2360 /* 2361 * Issue a PLOGI to a single remote port. Multiple 2362 * PLOGIs to different remote ports may occur in 2363 * parallel. 2364 * This can create the fc_remote_port_t if it does not 2365 * already exist. 2366 */ 2367 2368 mutex_exit(&port->fp_mutex); 2369 d_id = (uint32_t *)job->job_private; 2370 pd = fctl_get_remote_port_by_did(port, *d_id); 2371 2372 if (pd) { 2373 mutex_enter(&pd->pd_mutex); 2374 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2375 pd->pd_login_count++; 2376 mutex_exit(&pd->pd_mutex); 2377 job->job_result = FC_SUCCESS; 2378 fctl_jobdone(job); 2379 break; 2380 } 2381 mutex_exit(&pd->pd_mutex); 2382 } else { 2383 mutex_enter(&port->fp_mutex); 2384 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2385 mutex_exit(&port->fp_mutex); 2386 pd = fp_create_remote_port_by_ns(port, 2387 *d_id, KM_SLEEP); 2388 if (pd == NULL) { 2389 job->job_result = FC_FAILURE; 2390 fctl_jobdone(job); 2391 break; 2392 } 2393 } else { 2394 mutex_exit(&port->fp_mutex); 2395 } 2396 } 2397 2398 job->job_flags |= JOB_TYPE_FP_ASYNC; 2399 job->job_counter = 1; 2400 2401 rval = fp_port_login(port, *d_id, job, 2402 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2403 2404 if (rval != FC_SUCCESS) { 2405 job->job_result = rval; 2406 fctl_jobdone(job); 2407 } 2408 break; 2409 2410 case JOB_LOGO_ONE: { 2411 /* 2412 * Issue a PLOGO to a single remote port. Multiple 2413 * PLOGOs to different remote ports may occur in 2414 * parallel. 2415 */ 2416 fc_remote_port_t *pd; 2417 2418 #ifndef __lock_lint 2419 ASSERT(job->job_counter > 0); 2420 #endif 2421 2422 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2423 2424 mutex_enter(&pd->pd_mutex); 2425 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2426 mutex_exit(&pd->pd_mutex); 2427 job->job_result = FC_LOGINREQ; 2428 mutex_exit(&port->fp_mutex); 2429 fctl_jobdone(job); 2430 break; 2431 } 2432 if (pd->pd_login_count > 1) { 2433 pd->pd_login_count--; 2434 mutex_exit(&pd->pd_mutex); 2435 job->job_result = FC_SUCCESS; 2436 mutex_exit(&port->fp_mutex); 2437 fctl_jobdone(job); 2438 break; 2439 } 2440 mutex_exit(&pd->pd_mutex); 2441 mutex_exit(&port->fp_mutex); 2442 job->job_flags |= JOB_TYPE_FP_ASYNC; 2443 (void) fp_logout(port, pd, job); 2444 break; 2445 } 2446 2447 case JOB_FCIO_LOGIN: 2448 /* 2449 * PLOGI initiated at ioctl request. 2450 */ 2451 mutex_exit(&port->fp_mutex); 2452 job->job_result = 2453 fp_fcio_login(port, job->job_private, job); 2454 fctl_jobdone(job); 2455 break; 2456 2457 case JOB_FCIO_LOGOUT: 2458 /* 2459 * PLOGO initiated at ioctl request. 2460 */ 2461 mutex_exit(&port->fp_mutex); 2462 job->job_result = 2463 fp_fcio_logout(port, job->job_private, job); 2464 fctl_jobdone(job); 2465 break; 2466 2467 case JOB_PORT_GETMAP: 2468 case JOB_PORT_GETMAP_PLOGI_ALL: { 2469 port->fp_last_task = port->fp_task; 2470 port->fp_task = FP_TASK_GETMAP; 2471 2472 switch (port->fp_topology) { 2473 case FC_TOP_PRIVATE_LOOP: 2474 job->job_counter = 1; 2475 2476 fp_get_loopmap(port, job); 2477 mutex_exit(&port->fp_mutex); 2478 fp_jobwait(job); 2479 fctl_fillout_map(port, 2480 (fc_portmap_t **)job->job_private, 2481 (uint32_t *)job->job_arg, 1, 0, 0); 2482 fctl_jobdone(job); 2483 mutex_enter(&port->fp_mutex); 2484 break; 2485 2486 case FC_TOP_PUBLIC_LOOP: 2487 case FC_TOP_FABRIC: 2488 mutex_exit(&port->fp_mutex); 2489 job->job_counter = 1; 2490 2491 job->job_result = fp_ns_getmap(port, 2492 job, (fc_portmap_t **)job->job_private, 2493 (uint32_t *)job->job_arg, 2494 FCTL_GAN_START_ID); 2495 fctl_jobdone(job); 2496 mutex_enter(&port->fp_mutex); 2497 break; 2498 2499 case FC_TOP_PT_PT: 2500 mutex_exit(&port->fp_mutex); 2501 fctl_fillout_map(port, 2502 (fc_portmap_t **)job->job_private, 2503 (uint32_t *)job->job_arg, 1, 0, 0); 2504 fctl_jobdone(job); 2505 mutex_enter(&port->fp_mutex); 2506 break; 2507 2508 default: 2509 mutex_exit(&port->fp_mutex); 2510 fctl_jobdone(job); 2511 mutex_enter(&port->fp_mutex); 2512 break; 2513 } 2514 port->fp_task = port->fp_last_task; 2515 port->fp_last_task = FP_TASK_IDLE; 2516 mutex_exit(&port->fp_mutex); 2517 break; 2518 } 2519 2520 case JOB_PORT_OFFLINE: { 2521 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2522 2523 port->fp_last_task = port->fp_task; 2524 port->fp_task = FP_TASK_OFFLINE; 2525 2526 if (port->fp_statec_busy > 2) { 2527 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2528 fp_port_offline(port, 0); 2529 if (--port->fp_statec_busy == 0) { 2530 port->fp_soft_state &= 2531 ~FP_SOFT_IN_STATEC_CB; 2532 } 2533 } else { 2534 fp_port_offline(port, 1); 2535 } 2536 2537 port->fp_task = port->fp_last_task; 2538 port->fp_last_task = FP_TASK_IDLE; 2539 2540 mutex_exit(&port->fp_mutex); 2541 2542 fctl_jobdone(job); 2543 break; 2544 } 2545 2546 case JOB_PORT_STARTUP: { 2547 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2548 if (port->fp_statec_busy > 1) { 2549 mutex_exit(&port->fp_mutex); 2550 break; 2551 } 2552 mutex_exit(&port->fp_mutex); 2553 2554 FP_TRACE(FP_NHEAD2(9, rval), 2555 "Topology discovery failed"); 2556 break; 2557 } 2558 2559 /* 2560 * Attempt building device handles in case 2561 * of private Loop. 2562 */ 2563 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2564 job->job_counter = 1; 2565 2566 fp_get_loopmap(port, job); 2567 mutex_exit(&port->fp_mutex); 2568 fp_jobwait(job); 2569 mutex_enter(&port->fp_mutex); 2570 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2571 ASSERT(port->fp_total_devices == 0); 2572 port->fp_total_devices = 2573 port->fp_dev_count; 2574 } 2575 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2576 /* 2577 * Hack to avoid state changes going up early 2578 */ 2579 port->fp_statec_busy++; 2580 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2581 2582 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2583 fp_fabric_online(port, job); 2584 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2585 } 2586 mutex_exit(&port->fp_mutex); 2587 fctl_jobdone(job); 2588 break; 2589 } 2590 2591 case JOB_PORT_ONLINE: { 2592 char *newtop; 2593 char *oldtop; 2594 uint32_t old_top; 2595 2596 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2597 2598 /* 2599 * Bail out early if there are a lot of 2600 * state changes in the pipeline 2601 */ 2602 if (port->fp_statec_busy > 1) { 2603 --port->fp_statec_busy; 2604 mutex_exit(&port->fp_mutex); 2605 fctl_jobdone(job); 2606 break; 2607 } 2608 2609 switch (old_top = port->fp_topology) { 2610 case FC_TOP_PRIVATE_LOOP: 2611 oldtop = "Private Loop"; 2612 break; 2613 2614 case FC_TOP_PUBLIC_LOOP: 2615 oldtop = "Public Loop"; 2616 break; 2617 2618 case FC_TOP_PT_PT: 2619 oldtop = "Point to Point"; 2620 break; 2621 2622 case FC_TOP_FABRIC: 2623 oldtop = "Fabric"; 2624 break; 2625 2626 default: 2627 oldtop = NULL; 2628 break; 2629 } 2630 2631 port->fp_last_task = port->fp_task; 2632 port->fp_task = FP_TASK_ONLINE; 2633 2634 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2635 2636 port->fp_task = port->fp_last_task; 2637 port->fp_last_task = FP_TASK_IDLE; 2638 2639 if (port->fp_statec_busy > 1) { 2640 --port->fp_statec_busy; 2641 mutex_exit(&port->fp_mutex); 2642 break; 2643 } 2644 2645 port->fp_state = FC_STATE_OFFLINE; 2646 2647 FP_TRACE(FP_NHEAD2(9, rval), 2648 "Topology discovery failed"); 2649 2650 if (--port->fp_statec_busy == 0) { 2651 port->fp_soft_state &= 2652 ~FP_SOFT_IN_STATEC_CB; 2653 } 2654 2655 if (port->fp_offline_tid == NULL) { 2656 port->fp_offline_tid = 2657 timeout(fp_offline_timeout, 2658 (caddr_t)port, fp_offline_ticks); 2659 } 2660 2661 mutex_exit(&port->fp_mutex); 2662 break; 2663 } 2664 2665 switch (port->fp_topology) { 2666 case FC_TOP_PRIVATE_LOOP: 2667 newtop = "Private Loop"; 2668 break; 2669 2670 case FC_TOP_PUBLIC_LOOP: 2671 newtop = "Public Loop"; 2672 break; 2673 2674 case FC_TOP_PT_PT: 2675 newtop = "Point to Point"; 2676 break; 2677 2678 case FC_TOP_FABRIC: 2679 newtop = "Fabric"; 2680 break; 2681 2682 default: 2683 newtop = NULL; 2684 break; 2685 } 2686 2687 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2688 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2689 "Change in FC Topology old = %s new = %s", 2690 oldtop, newtop); 2691 } 2692 2693 switch (port->fp_topology) { 2694 case FC_TOP_PRIVATE_LOOP: { 2695 int orphan = (old_top == FC_TOP_FABRIC || 2696 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2697 2698 mutex_exit(&port->fp_mutex); 2699 fp_loop_online(port, job, orphan); 2700 break; 2701 } 2702 2703 case FC_TOP_PUBLIC_LOOP: 2704 /* FALLTHROUGH */ 2705 case FC_TOP_FABRIC: 2706 fp_fabric_online(port, job); 2707 mutex_exit(&port->fp_mutex); 2708 break; 2709 2710 case FC_TOP_PT_PT: 2711 fp_p2p_online(port, job); 2712 mutex_exit(&port->fp_mutex); 2713 break; 2714 2715 default: 2716 if (--port->fp_statec_busy != 0) { 2717 /* 2718 * Watch curiously at what the next 2719 * state transition can do. 2720 */ 2721 mutex_exit(&port->fp_mutex); 2722 break; 2723 } 2724 2725 FP_TRACE(FP_NHEAD2(9, 0), 2726 "Topology Unknown, Offlining the port.."); 2727 2728 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2729 port->fp_state = FC_STATE_OFFLINE; 2730 2731 if (port->fp_offline_tid == NULL) { 2732 port->fp_offline_tid = 2733 timeout(fp_offline_timeout, 2734 (caddr_t)port, fp_offline_ticks); 2735 } 2736 mutex_exit(&port->fp_mutex); 2737 break; 2738 } 2739 2740 mutex_enter(&port->fp_mutex); 2741 2742 port->fp_task = port->fp_last_task; 2743 port->fp_last_task = FP_TASK_IDLE; 2744 2745 mutex_exit(&port->fp_mutex); 2746 2747 fctl_jobdone(job); 2748 break; 2749 } 2750 2751 case JOB_PLOGI_GROUP: { 2752 mutex_exit(&port->fp_mutex); 2753 fp_plogi_group(port, job); 2754 break; 2755 } 2756 2757 case JOB_UNSOL_REQUEST: { 2758 mutex_exit(&port->fp_mutex); 2759 fp_handle_unsol_buf(port, 2760 (fc_unsol_buf_t *)job->job_private, job); 2761 fctl_dealloc_job(job); 2762 break; 2763 } 2764 2765 case JOB_NS_CMD: { 2766 fctl_ns_req_t *ns_cmd; 2767 2768 mutex_exit(&port->fp_mutex); 2769 2770 job->job_flags |= JOB_TYPE_FP_ASYNC; 2771 ns_cmd = (fctl_ns_req_t *)job->job_private; 2772 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2773 ns_cmd->ns_cmd_code > NS_DA_ID) { 2774 job->job_result = FC_BADCMD; 2775 fctl_jobdone(job); 2776 break; 2777 } 2778 2779 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2780 if (ns_cmd->ns_pd != NULL) { 2781 job->job_result = FC_BADOBJECT; 2782 fctl_jobdone(job); 2783 break; 2784 } 2785 2786 job->job_counter = 1; 2787 2788 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2789 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2790 2791 if (rval != FC_SUCCESS) { 2792 job->job_result = rval; 2793 fctl_jobdone(job); 2794 } 2795 break; 2796 } 2797 job->job_result = FC_SUCCESS; 2798 job->job_counter = 1; 2799 2800 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2801 if (rval != FC_SUCCESS) { 2802 fctl_jobdone(job); 2803 } 2804 break; 2805 } 2806 2807 case JOB_LINK_RESET: { 2808 la_wwn_t *pwwn; 2809 uint32_t topology; 2810 2811 pwwn = (la_wwn_t *)job->job_private; 2812 ASSERT(pwwn != NULL); 2813 2814 topology = port->fp_topology; 2815 mutex_exit(&port->fp_mutex); 2816 2817 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2818 topology == FC_TOP_PRIVATE_LOOP) { 2819 job->job_flags |= JOB_TYPE_FP_ASYNC; 2820 rval = port->fp_fca_tran->fca_reset( 2821 port->fp_fca_handle, FC_FCA_LINK_RESET); 2822 job->job_result = rval; 2823 fp_jobdone(job); 2824 } else { 2825 ASSERT((job->job_flags & 2826 JOB_TYPE_FP_ASYNC) == 0); 2827 2828 if (FC_IS_TOP_SWITCH(topology)) { 2829 rval = fp_remote_lip(port, pwwn, 2830 KM_SLEEP, job); 2831 } else { 2832 rval = FC_FAILURE; 2833 } 2834 if (rval != FC_SUCCESS) { 2835 job->job_result = rval; 2836 } 2837 fctl_jobdone(job); 2838 } 2839 break; 2840 } 2841 2842 default: 2843 mutex_exit(&port->fp_mutex); 2844 job->job_result = FC_BADCMD; 2845 fctl_jobdone(job); 2846 break; 2847 } 2848 } 2849 /* NOTREACHED */ 2850 } 2851 2852 2853 /* 2854 * Perform FC port bring up initialization 2855 */ 2856 static int 2857 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2858 { 2859 int rval; 2860 uint32_t state; 2861 uint32_t src_id; 2862 fc_lilpmap_t *lilp_map; 2863 2864 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2865 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2866 2867 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2868 " port=%p, job=%p", port, job); 2869 2870 port->fp_topology = FC_TOP_UNKNOWN; 2871 port->fp_port_id.port_id = 0; 2872 state = FC_PORT_STATE_MASK(port->fp_state); 2873 2874 if (state == FC_STATE_OFFLINE) { 2875 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2876 job->job_result = FC_OFFLINE; 2877 mutex_exit(&port->fp_mutex); 2878 fctl_jobdone(job); 2879 mutex_enter(&port->fp_mutex); 2880 return (FC_OFFLINE); 2881 } 2882 2883 if (state == FC_STATE_LOOP) { 2884 port->fp_port_type.port_type = FC_NS_PORT_NL; 2885 mutex_exit(&port->fp_mutex); 2886 2887 lilp_map = &port->fp_lilp_map; 2888 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2889 job->job_result = FC_FAILURE; 2890 fctl_jobdone(job); 2891 2892 FP_TRACE(FP_NHEAD1(9, rval), 2893 "LILP map Invalid or not present"); 2894 mutex_enter(&port->fp_mutex); 2895 return (FC_FAILURE); 2896 } 2897 2898 if (lilp_map->lilp_length == 0) { 2899 job->job_result = FC_NO_MAP; 2900 fctl_jobdone(job); 2901 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2902 "LILP map length zero"); 2903 mutex_enter(&port->fp_mutex); 2904 return (FC_NO_MAP); 2905 } 2906 src_id = lilp_map->lilp_myalpa & 0xFF; 2907 } else { 2908 fc_remote_port_t *pd; 2909 fc_fca_pm_t pm; 2910 fc_fca_p2p_info_t p2p_info; 2911 int pd_recepient; 2912 2913 /* 2914 * Get P2P remote port info if possible 2915 */ 2916 bzero((caddr_t)&pm, sizeof (pm)); 2917 2918 pm.pm_cmd_flags = FC_FCA_PM_READ; 2919 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2920 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2921 pm.pm_data_buf = (caddr_t)&p2p_info; 2922 2923 rval = port->fp_fca_tran->fca_port_manage( 2924 port->fp_fca_handle, &pm); 2925 2926 if (rval == FC_SUCCESS) { 2927 port->fp_port_id.port_id = p2p_info.fca_d_id; 2928 port->fp_port_type.port_type = FC_NS_PORT_N; 2929 port->fp_topology = FC_TOP_PT_PT; 2930 port->fp_total_devices = 1; 2931 pd_recepient = fctl_wwn_cmp( 2932 &port->fp_service_params.nport_ww_name, 2933 &p2p_info.pwwn) < 0 ? 2934 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2935 mutex_exit(&port->fp_mutex); 2936 pd = fctl_create_remote_port(port, 2937 &p2p_info.nwwn, 2938 &p2p_info.pwwn, 2939 p2p_info.d_id, 2940 pd_recepient, KM_NOSLEEP); 2941 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2942 " P2P port=%p pd=%p", port, pd); 2943 mutex_enter(&port->fp_mutex); 2944 return (FC_SUCCESS); 2945 } 2946 port->fp_port_type.port_type = FC_NS_PORT_N; 2947 mutex_exit(&port->fp_mutex); 2948 src_id = 0; 2949 } 2950 2951 job->job_counter = 1; 2952 job->job_result = FC_SUCCESS; 2953 2954 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2955 KM_SLEEP)) != FC_SUCCESS) { 2956 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2957 job->job_result = FC_FAILURE; 2958 fctl_jobdone(job); 2959 2960 mutex_enter(&port->fp_mutex); 2961 if (port->fp_statec_busy <= 1) { 2962 mutex_exit(&port->fp_mutex); 2963 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 2964 "Couldn't transport FLOGI"); 2965 mutex_enter(&port->fp_mutex); 2966 } 2967 return (FC_FAILURE); 2968 } 2969 2970 fp_jobwait(job); 2971 2972 mutex_enter(&port->fp_mutex); 2973 if (job->job_result == FC_SUCCESS) { 2974 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2975 mutex_exit(&port->fp_mutex); 2976 fp_ns_init(port, job, KM_SLEEP); 2977 mutex_enter(&port->fp_mutex); 2978 } 2979 } else { 2980 if (state == FC_STATE_LOOP) { 2981 port->fp_topology = FC_TOP_PRIVATE_LOOP; 2982 port->fp_port_id.port_id = 2983 port->fp_lilp_map.lilp_myalpa & 0xFF; 2984 } 2985 } 2986 2987 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 2988 port, job); 2989 2990 return (FC_SUCCESS); 2991 } 2992 2993 2994 /* 2995 * Perform ULP invocations following FC port startup 2996 */ 2997 /* ARGSUSED */ 2998 static void 2999 fp_startup_done(opaque_t arg, uchar_t result) 3000 { 3001 fc_local_port_t *port = arg; 3002 3003 fp_attach_ulps(port, FC_CMD_ATTACH); 3004 3005 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 3006 } 3007 3008 3009 /* 3010 * Perform ULP port attach 3011 */ 3012 static void 3013 fp_ulp_port_attach(void *arg) 3014 { 3015 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 3016 fc_local_port_t *port = att->att_port; 3017 3018 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3019 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3020 3021 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3022 3023 if (att->att_need_pm_idle == B_TRUE) { 3024 fctl_idle_port(port); 3025 } 3026 3027 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3028 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3029 3030 mutex_enter(&att->att_port->fp_mutex); 3031 att->att_port->fp_ulp_attach = 0; 3032 3033 port->fp_task = port->fp_last_task; 3034 port->fp_last_task = FP_TASK_IDLE; 3035 3036 cv_signal(&att->att_port->fp_attach_cv); 3037 3038 mutex_exit(&att->att_port->fp_mutex); 3039 3040 kmem_free(att, sizeof (fp_soft_attach_t)); 3041 } 3042 3043 /* 3044 * Entry point to funnel all requests down to FCAs 3045 */ 3046 static int 3047 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3048 { 3049 int rval; 3050 3051 mutex_enter(&port->fp_mutex); 3052 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3053 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3054 FC_STATE_OFFLINE))) { 3055 /* 3056 * This means there is more than one state change 3057 * at this point of time - Since they are processed 3058 * serially, any processing of the current one should 3059 * be failed, failed and move up in processing the next 3060 */ 3061 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3062 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3063 if (cmd->cmd_job) { 3064 /* 3065 * A state change that is going to be invalidated 3066 * by another one already in the port driver's queue 3067 * need not go up to all ULPs. This will minimize 3068 * needless processing and ripples in ULP modules 3069 */ 3070 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3071 } 3072 mutex_exit(&port->fp_mutex); 3073 return (FC_STATEC_BUSY); 3074 } 3075 3076 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3077 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3078 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3079 mutex_exit(&port->fp_mutex); 3080 3081 return (FC_OFFLINE); 3082 } 3083 mutex_exit(&port->fp_mutex); 3084 3085 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3086 if (rval != FC_SUCCESS) { 3087 if (rval == FC_TRAN_BUSY) { 3088 cmd->cmd_retry_interval = fp_retry_delay; 3089 rval = fp_retry_cmd(&cmd->cmd_pkt); 3090 if (rval == FC_FAILURE) { 3091 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3092 } 3093 } 3094 } else { 3095 mutex_enter(&port->fp_mutex); 3096 port->fp_out_fpcmds++; 3097 mutex_exit(&port->fp_mutex); 3098 } 3099 3100 return (rval); 3101 } 3102 3103 3104 /* 3105 * Each time a timeout kicks in, walk the wait queue, decrement the 3106 * the retry_interval, when the retry_interval becomes less than 3107 * or equal to zero, re-transport the command: If the re-transport 3108 * fails with BUSY, enqueue the command in the wait queue. 3109 * 3110 * In order to prevent looping forever because of commands enqueued 3111 * from within this function itself, save the current tail pointer 3112 * (in cur_tail) and exit the loop after serving this command. 3113 */ 3114 static void 3115 fp_resendcmd(void *port_handle) 3116 { 3117 int rval; 3118 fc_local_port_t *port; 3119 fp_cmd_t *cmd; 3120 fp_cmd_t *cur_tail; 3121 3122 port = port_handle; 3123 mutex_enter(&port->fp_mutex); 3124 cur_tail = port->fp_wait_tail; 3125 mutex_exit(&port->fp_mutex); 3126 3127 while ((cmd = fp_deque_cmd(port)) != NULL) { 3128 cmd->cmd_retry_interval -= fp_retry_ticker; 3129 /* Check if we are detaching */ 3130 if (port->fp_soft_state & 3131 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3132 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3133 cmd->cmd_pkt.pkt_reason = 0; 3134 fp_iodone(cmd); 3135 } else if (cmd->cmd_retry_interval <= 0) { 3136 rval = cmd->cmd_transport(port->fp_fca_handle, 3137 &cmd->cmd_pkt); 3138 3139 if (rval != FC_SUCCESS) { 3140 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3141 if (--cmd->cmd_retry_count) { 3142 fp_enque_cmd(port, cmd); 3143 if (cmd == cur_tail) { 3144 break; 3145 } 3146 continue; 3147 } 3148 cmd->cmd_pkt.pkt_state = 3149 FC_PKT_TRAN_BSY; 3150 } else { 3151 cmd->cmd_pkt.pkt_state = 3152 FC_PKT_TRAN_ERROR; 3153 } 3154 cmd->cmd_pkt.pkt_reason = 0; 3155 fp_iodone(cmd); 3156 } else { 3157 mutex_enter(&port->fp_mutex); 3158 port->fp_out_fpcmds++; 3159 mutex_exit(&port->fp_mutex); 3160 } 3161 } else { 3162 fp_enque_cmd(port, cmd); 3163 } 3164 3165 if (cmd == cur_tail) { 3166 break; 3167 } 3168 } 3169 3170 mutex_enter(&port->fp_mutex); 3171 if (port->fp_wait_head) { 3172 timeout_id_t tid; 3173 3174 mutex_exit(&port->fp_mutex); 3175 tid = timeout(fp_resendcmd, (caddr_t)port, 3176 fp_retry_ticks); 3177 mutex_enter(&port->fp_mutex); 3178 port->fp_wait_tid = tid; 3179 } else { 3180 port->fp_wait_tid = NULL; 3181 } 3182 mutex_exit(&port->fp_mutex); 3183 } 3184 3185 3186 /* 3187 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3188 * 3189 * Yes, as you can see below, cmd_retry_count is used here too. That means 3190 * the retries for BUSY are less if there were transport failures (transport 3191 * failure means fca_transport failure). The goal is not to exceed overall 3192 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3193 * 3194 * Return Values: 3195 * FC_SUCCESS 3196 * FC_FAILURE 3197 */ 3198 static int 3199 fp_retry_cmd(fc_packet_t *pkt) 3200 { 3201 fp_cmd_t *cmd; 3202 3203 cmd = pkt->pkt_ulp_private; 3204 3205 if (--cmd->cmd_retry_count) { 3206 fp_enque_cmd(cmd->cmd_port, cmd); 3207 return (FC_SUCCESS); 3208 } else { 3209 return (FC_FAILURE); 3210 } 3211 } 3212 3213 3214 /* 3215 * Queue up FC packet for deferred retry 3216 */ 3217 static void 3218 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3219 { 3220 timeout_id_t tid; 3221 3222 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3223 3224 #ifdef DEBUG 3225 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3226 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3227 #endif 3228 3229 mutex_enter(&port->fp_mutex); 3230 if (port->fp_wait_tail) { 3231 port->fp_wait_tail->cmd_next = cmd; 3232 port->fp_wait_tail = cmd; 3233 } else { 3234 ASSERT(port->fp_wait_head == NULL); 3235 port->fp_wait_head = port->fp_wait_tail = cmd; 3236 if (port->fp_wait_tid == NULL) { 3237 mutex_exit(&port->fp_mutex); 3238 tid = timeout(fp_resendcmd, (caddr_t)port, 3239 fp_retry_ticks); 3240 mutex_enter(&port->fp_mutex); 3241 port->fp_wait_tid = tid; 3242 } 3243 } 3244 mutex_exit(&port->fp_mutex); 3245 } 3246 3247 3248 /* 3249 * Handle all RJT codes 3250 */ 3251 static int 3252 fp_handle_reject(fc_packet_t *pkt) 3253 { 3254 int rval = FC_FAILURE; 3255 uchar_t next_class; 3256 fp_cmd_t *cmd; 3257 fc_local_port_t *port; 3258 3259 cmd = pkt->pkt_ulp_private; 3260 port = cmd->cmd_port; 3261 3262 switch (pkt->pkt_state) { 3263 case FC_PKT_FABRIC_RJT: 3264 case FC_PKT_NPORT_RJT: 3265 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) { 3266 next_class = fp_get_nextclass(cmd->cmd_port, 3267 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3268 3269 if (next_class == FC_TRAN_CLASS_INVALID) { 3270 return (rval); 3271 } 3272 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3273 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3274 3275 rval = fp_sendcmd(cmd->cmd_port, cmd, 3276 cmd->cmd_port->fp_fca_handle); 3277 3278 if (rval != FC_SUCCESS) { 3279 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3280 } 3281 } 3282 break; 3283 3284 case FC_PKT_LS_RJT: 3285 case FC_PKT_BA_RJT: 3286 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) || 3287 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) { 3288 cmd->cmd_retry_interval = fp_retry_delay; 3289 rval = fp_retry_cmd(pkt); 3290 } 3291 break; 3292 3293 case FC_PKT_FS_RJT: 3294 if (pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) { 3295 cmd->cmd_retry_interval = fp_retry_delay; 3296 rval = fp_retry_cmd(pkt); 3297 } 3298 break; 3299 3300 case FC_PKT_LOCAL_RJT: 3301 if (pkt->pkt_reason == FC_REASON_QFULL) { 3302 cmd->cmd_retry_interval = fp_retry_delay; 3303 rval = fp_retry_cmd(pkt); 3304 } 3305 break; 3306 3307 default: 3308 FP_TRACE(FP_NHEAD1(1, 0), 3309 "fp_handle_reject(): Invalid pkt_state"); 3310 break; 3311 } 3312 3313 return (rval); 3314 } 3315 3316 3317 /* 3318 * Return the next class of service supported by the FCA 3319 */ 3320 static uchar_t 3321 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3322 { 3323 uchar_t next_class; 3324 3325 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3326 3327 switch (cur_class) { 3328 case FC_TRAN_CLASS_INVALID: 3329 if (port->fp_cos & FC_NS_CLASS1) { 3330 next_class = FC_TRAN_CLASS1; 3331 break; 3332 } 3333 /* FALLTHROUGH */ 3334 3335 case FC_TRAN_CLASS1: 3336 if (port->fp_cos & FC_NS_CLASS2) { 3337 next_class = FC_TRAN_CLASS2; 3338 break; 3339 } 3340 /* FALLTHROUGH */ 3341 3342 case FC_TRAN_CLASS2: 3343 if (port->fp_cos & FC_NS_CLASS3) { 3344 next_class = FC_TRAN_CLASS3; 3345 break; 3346 } 3347 /* FALLTHROUGH */ 3348 3349 case FC_TRAN_CLASS3: 3350 default: 3351 next_class = FC_TRAN_CLASS_INVALID; 3352 break; 3353 } 3354 3355 return (next_class); 3356 } 3357 3358 3359 /* 3360 * Determine if a class of service is supported by the FCA 3361 */ 3362 static int 3363 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3364 { 3365 int rval; 3366 3367 switch (tran_class) { 3368 case FC_TRAN_CLASS1: 3369 if (cos & FC_NS_CLASS1) { 3370 rval = FC_SUCCESS; 3371 } else { 3372 rval = FC_FAILURE; 3373 } 3374 break; 3375 3376 case FC_TRAN_CLASS2: 3377 if (cos & FC_NS_CLASS2) { 3378 rval = FC_SUCCESS; 3379 } else { 3380 rval = FC_FAILURE; 3381 } 3382 break; 3383 3384 case FC_TRAN_CLASS3: 3385 if (cos & FC_NS_CLASS3) { 3386 rval = FC_SUCCESS; 3387 } else { 3388 rval = FC_FAILURE; 3389 } 3390 break; 3391 3392 default: 3393 rval = FC_FAILURE; 3394 break; 3395 } 3396 3397 return (rval); 3398 } 3399 3400 3401 /* 3402 * Dequeue FC packet for retry 3403 */ 3404 static fp_cmd_t * 3405 fp_deque_cmd(fc_local_port_t *port) 3406 { 3407 fp_cmd_t *cmd; 3408 3409 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3410 3411 mutex_enter(&port->fp_mutex); 3412 3413 if (port->fp_wait_head == NULL) { 3414 /* 3415 * To avoid races, NULL the fp_wait_tid as 3416 * we are about to exit the timeout thread. 3417 */ 3418 port->fp_wait_tid = NULL; 3419 mutex_exit(&port->fp_mutex); 3420 return (NULL); 3421 } 3422 3423 cmd = port->fp_wait_head; 3424 port->fp_wait_head = cmd->cmd_next; 3425 cmd->cmd_next = NULL; 3426 3427 if (port->fp_wait_head == NULL) { 3428 port->fp_wait_tail = NULL; 3429 } 3430 mutex_exit(&port->fp_mutex); 3431 3432 return (cmd); 3433 } 3434 3435 3436 /* 3437 * Wait for job completion 3438 */ 3439 static void 3440 fp_jobwait(job_request_t *job) 3441 { 3442 sema_p(&job->job_port_sema); 3443 } 3444 3445 3446 /* 3447 * Convert FC packet state to FC errno 3448 */ 3449 int 3450 fp_state_to_rval(uchar_t state) 3451 { 3452 int count; 3453 3454 for (count = 0; count < sizeof (fp_xlat) / 3455 sizeof (fp_xlat[0]); count++) { 3456 if (fp_xlat[count].xlat_state == state) { 3457 return (fp_xlat[count].xlat_rval); 3458 } 3459 } 3460 3461 return (FC_FAILURE); 3462 } 3463 3464 3465 /* 3466 * For Synchronous I/O requests, the caller is 3467 * expected to do fctl_jobdone(if necessary) 3468 * 3469 * We want to preserve at least one failure in the 3470 * job_result if it happens. 3471 * 3472 */ 3473 static void 3474 fp_iodone(fp_cmd_t *cmd) 3475 { 3476 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3477 job_request_t *job = cmd->cmd_job; 3478 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3479 3480 ASSERT(job != NULL); 3481 ASSERT(cmd->cmd_port != NULL); 3482 ASSERT(&cmd->cmd_pkt != NULL); 3483 3484 mutex_enter(&job->job_mutex); 3485 if (job->job_result == FC_SUCCESS) { 3486 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3487 } 3488 mutex_exit(&job->job_mutex); 3489 3490 if (pd) { 3491 mutex_enter(&pd->pd_mutex); 3492 pd->pd_flags = PD_IDLE; 3493 mutex_exit(&pd->pd_mutex); 3494 } 3495 3496 if (ulp_pkt) { 3497 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3498 FP_IS_PKT_ERROR(ulp_pkt)) { 3499 fc_local_port_t *port; 3500 fc_remote_node_t *node; 3501 3502 port = cmd->cmd_port; 3503 3504 mutex_enter(&pd->pd_mutex); 3505 pd->pd_state = PORT_DEVICE_INVALID; 3506 pd->pd_ref_count--; 3507 node = pd->pd_remote_nodep; 3508 mutex_exit(&pd->pd_mutex); 3509 3510 ASSERT(node != NULL); 3511 ASSERT(port != NULL); 3512 3513 if (fctl_destroy_remote_port(port, pd) == 0) { 3514 fctl_destroy_remote_node(node); 3515 } 3516 3517 ulp_pkt->pkt_pd = NULL; 3518 } 3519 3520 ulp_pkt->pkt_comp(ulp_pkt); 3521 } 3522 3523 fp_free_pkt(cmd); 3524 fp_jobdone(job); 3525 } 3526 3527 3528 /* 3529 * Job completion handler 3530 */ 3531 static void 3532 fp_jobdone(job_request_t *job) 3533 { 3534 mutex_enter(&job->job_mutex); 3535 ASSERT(job->job_counter > 0); 3536 3537 if (--job->job_counter != 0) { 3538 mutex_exit(&job->job_mutex); 3539 return; 3540 } 3541 3542 if (job->job_ulp_pkts) { 3543 ASSERT(job->job_ulp_listlen > 0); 3544 kmem_free(job->job_ulp_pkts, 3545 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3546 } 3547 3548 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3549 mutex_exit(&job->job_mutex); 3550 fctl_jobdone(job); 3551 } else { 3552 mutex_exit(&job->job_mutex); 3553 sema_v(&job->job_port_sema); 3554 } 3555 } 3556 3557 3558 /* 3559 * Try to perform shutdown of a port during a detach. No return 3560 * value since the detach should not fail because the port shutdown 3561 * failed. 3562 */ 3563 static void 3564 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3565 { 3566 int index; 3567 int count; 3568 int flags; 3569 fp_cmd_t *cmd; 3570 struct pwwn_hash *head; 3571 fc_remote_port_t *pd; 3572 3573 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3574 3575 job->job_result = FC_SUCCESS; 3576 3577 if (port->fp_taskq) { 3578 /* 3579 * We must release the mutex here to ensure that other 3580 * potential jobs can complete their processing. Many 3581 * also need this mutex. 3582 */ 3583 mutex_exit(&port->fp_mutex); 3584 taskq_wait(port->fp_taskq); 3585 mutex_enter(&port->fp_mutex); 3586 } 3587 3588 if (port->fp_offline_tid) { 3589 timeout_id_t tid; 3590 3591 tid = port->fp_offline_tid; 3592 port->fp_offline_tid = NULL; 3593 mutex_exit(&port->fp_mutex); 3594 (void) untimeout(tid); 3595 mutex_enter(&port->fp_mutex); 3596 } 3597 3598 if (port->fp_wait_tid) { 3599 timeout_id_t tid; 3600 3601 tid = port->fp_wait_tid; 3602 port->fp_wait_tid = NULL; 3603 mutex_exit(&port->fp_mutex); 3604 (void) untimeout(tid); 3605 } else { 3606 mutex_exit(&port->fp_mutex); 3607 } 3608 3609 /* 3610 * While we cancel the timeout, let's also return the 3611 * the outstanding requests back to the callers. 3612 */ 3613 while ((cmd = fp_deque_cmd(port)) != NULL) { 3614 ASSERT(cmd->cmd_job != NULL); 3615 cmd->cmd_job->job_result = FC_OFFLINE; 3616 fp_iodone(cmd); 3617 } 3618 3619 /* 3620 * Gracefully LOGO with all the devices logged in. 3621 */ 3622 mutex_enter(&port->fp_mutex); 3623 3624 for (count = index = 0; index < pwwn_table_size; index++) { 3625 head = &port->fp_pwwn_table[index]; 3626 pd = head->pwwn_head; 3627 while (pd != NULL) { 3628 mutex_enter(&pd->pd_mutex); 3629 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3630 count++; 3631 } 3632 mutex_exit(&pd->pd_mutex); 3633 pd = pd->pd_wwn_hnext; 3634 } 3635 } 3636 3637 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3638 flags = job->job_flags; 3639 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3640 } else { 3641 flags = 0; 3642 } 3643 if (count) { 3644 job->job_counter = count; 3645 3646 for (index = 0; index < pwwn_table_size; index++) { 3647 head = &port->fp_pwwn_table[index]; 3648 pd = head->pwwn_head; 3649 while (pd != NULL) { 3650 mutex_enter(&pd->pd_mutex); 3651 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3652 ASSERT(pd->pd_login_count > 0); 3653 /* 3654 * Force the counter to ONE in order 3655 * for us to really send LOGO els. 3656 */ 3657 pd->pd_login_count = 1; 3658 mutex_exit(&pd->pd_mutex); 3659 mutex_exit(&port->fp_mutex); 3660 (void) fp_logout(port, pd, job); 3661 mutex_enter(&port->fp_mutex); 3662 } else { 3663 mutex_exit(&pd->pd_mutex); 3664 } 3665 pd = pd->pd_wwn_hnext; 3666 } 3667 } 3668 mutex_exit(&port->fp_mutex); 3669 fp_jobwait(job); 3670 } else { 3671 mutex_exit(&port->fp_mutex); 3672 } 3673 3674 if (job->job_result != FC_SUCCESS) { 3675 FP_TRACE(FP_NHEAD1(9, 0), 3676 "Can't logout all devices. Proceeding with" 3677 " port shutdown"); 3678 job->job_result = FC_SUCCESS; 3679 } 3680 3681 fctl_destroy_all_remote_ports(port); 3682 3683 mutex_enter(&port->fp_mutex); 3684 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3685 mutex_exit(&port->fp_mutex); 3686 fp_ns_fini(port, job); 3687 } else { 3688 mutex_exit(&port->fp_mutex); 3689 } 3690 3691 if (flags) { 3692 job->job_flags = flags; 3693 } 3694 3695 mutex_enter(&port->fp_mutex); 3696 3697 } 3698 3699 3700 /* 3701 * Build the port driver's data structures based on the AL_PA list 3702 */ 3703 static void 3704 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3705 { 3706 int rval; 3707 int flag; 3708 int count; 3709 uint32_t d_id; 3710 fc_remote_port_t *pd; 3711 fc_lilpmap_t *lilp_map; 3712 3713 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3714 3715 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3716 job->job_result = FC_OFFLINE; 3717 mutex_exit(&port->fp_mutex); 3718 fp_jobdone(job); 3719 mutex_enter(&port->fp_mutex); 3720 return; 3721 } 3722 3723 if (port->fp_lilp_map.lilp_length == 0) { 3724 mutex_exit(&port->fp_mutex); 3725 job->job_result = FC_NO_MAP; 3726 fp_jobdone(job); 3727 mutex_enter(&port->fp_mutex); 3728 return; 3729 } 3730 mutex_exit(&port->fp_mutex); 3731 3732 lilp_map = &port->fp_lilp_map; 3733 job->job_counter = lilp_map->lilp_length; 3734 3735 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3736 flag = FP_CMD_PLOGI_RETAIN; 3737 } else { 3738 flag = FP_CMD_PLOGI_DONT_CARE; 3739 } 3740 3741 for (count = 0; count < lilp_map->lilp_length; count++) { 3742 d_id = lilp_map->lilp_alpalist[count]; 3743 3744 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3745 fp_jobdone(job); 3746 continue; 3747 } 3748 3749 pd = fctl_get_remote_port_by_did(port, d_id); 3750 if (pd) { 3751 mutex_enter(&pd->pd_mutex); 3752 if (flag == FP_CMD_PLOGI_DONT_CARE || 3753 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3754 mutex_exit(&pd->pd_mutex); 3755 fp_jobdone(job); 3756 continue; 3757 } 3758 mutex_exit(&pd->pd_mutex); 3759 } 3760 3761 rval = fp_port_login(port, d_id, job, flag, 3762 KM_SLEEP, pd, NULL); 3763 if (rval != FC_SUCCESS) { 3764 fp_jobdone(job); 3765 } 3766 } 3767 3768 mutex_enter(&port->fp_mutex); 3769 } 3770 3771 3772 /* 3773 * Perform loop ONLINE processing 3774 */ 3775 static void 3776 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3777 { 3778 int count; 3779 int rval; 3780 uint32_t d_id; 3781 uint32_t listlen; 3782 fc_lilpmap_t *lilp_map; 3783 fc_remote_port_t *pd; 3784 fc_portmap_t *changelist; 3785 3786 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3787 3788 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3789 port, job); 3790 3791 lilp_map = &port->fp_lilp_map; 3792 3793 if (lilp_map->lilp_length) { 3794 mutex_enter(&port->fp_mutex); 3795 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3796 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3797 mutex_exit(&port->fp_mutex); 3798 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3799 } else { 3800 mutex_exit(&port->fp_mutex); 3801 } 3802 3803 job->job_counter = lilp_map->lilp_length; 3804 3805 for (count = 0; count < lilp_map->lilp_length; count++) { 3806 d_id = lilp_map->lilp_alpalist[count]; 3807 3808 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3809 fp_jobdone(job); 3810 continue; 3811 } 3812 3813 pd = fctl_get_remote_port_by_did(port, d_id); 3814 if (pd != NULL) { 3815 #ifdef DEBUG 3816 mutex_enter(&pd->pd_mutex); 3817 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3818 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3819 } 3820 mutex_exit(&pd->pd_mutex); 3821 #endif 3822 fp_jobdone(job); 3823 continue; 3824 } 3825 3826 rval = fp_port_login(port, d_id, job, 3827 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3828 3829 if (rval != FC_SUCCESS) { 3830 fp_jobdone(job); 3831 } 3832 } 3833 fp_jobwait(job); 3834 } 3835 listlen = 0; 3836 changelist = NULL; 3837 3838 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3839 mutex_enter(&port->fp_mutex); 3840 ASSERT(port->fp_statec_busy > 0); 3841 if (port->fp_statec_busy == 1) { 3842 mutex_exit(&port->fp_mutex); 3843 fctl_fillout_map(port, &changelist, &listlen, 3844 1, 0, orphan); 3845 3846 mutex_enter(&port->fp_mutex); 3847 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3848 ASSERT(port->fp_total_devices == 0); 3849 port->fp_total_devices = port->fp_dev_count; 3850 } 3851 } else { 3852 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3853 } 3854 mutex_exit(&port->fp_mutex); 3855 } 3856 3857 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3858 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3859 listlen, listlen, KM_SLEEP); 3860 } else { 3861 mutex_enter(&port->fp_mutex); 3862 if (--port->fp_statec_busy == 0) { 3863 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3864 } 3865 ASSERT(changelist == NULL && listlen == 0); 3866 mutex_exit(&port->fp_mutex); 3867 } 3868 3869 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3870 port, job); 3871 } 3872 3873 3874 /* 3875 * Get an Arbitrated Loop map from the underlying FCA 3876 */ 3877 static int 3878 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3879 { 3880 int rval; 3881 3882 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3883 port, lilp_map); 3884 3885 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3886 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3887 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3888 3889 if (rval != FC_SUCCESS) { 3890 rval = FC_NO_MAP; 3891 } else if (lilp_map->lilp_length == 0 && 3892 (lilp_map->lilp_magic >= MAGIC_LISM && 3893 lilp_map->lilp_magic < MAGIC_LIRP)) { 3894 uchar_t lilp_length; 3895 3896 /* 3897 * Since the map length is zero, provide all 3898 * the valid AL_PAs for NL_ports discovery. 3899 */ 3900 lilp_length = sizeof (fp_valid_alpas) / 3901 sizeof (fp_valid_alpas[0]); 3902 lilp_map->lilp_length = lilp_length; 3903 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3904 lilp_length); 3905 } else { 3906 rval = fp_validate_lilp_map(lilp_map); 3907 3908 if (rval == FC_SUCCESS) { 3909 mutex_enter(&port->fp_mutex); 3910 port->fp_total_devices = lilp_map->lilp_length - 1; 3911 mutex_exit(&port->fp_mutex); 3912 } 3913 } 3914 3915 mutex_enter(&port->fp_mutex); 3916 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3917 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3918 mutex_exit(&port->fp_mutex); 3919 3920 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3921 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3922 FP_TRACE(FP_NHEAD1(9, 0), 3923 "FCA reset failed after LILP map was found" 3924 " to be invalid"); 3925 } 3926 } else if (rval == FC_SUCCESS) { 3927 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3928 mutex_exit(&port->fp_mutex); 3929 } else { 3930 mutex_exit(&port->fp_mutex); 3931 } 3932 3933 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3934 lilp_map); 3935 3936 return (rval); 3937 } 3938 3939 3940 /* 3941 * Perform Fabric Login: 3942 * 3943 * Return Values: 3944 * FC_SUCCESS 3945 * FC_FAILURE 3946 * FC_NOMEM 3947 * FC_TRANSPORT_ERROR 3948 * and a lot others defined in fc_error.h 3949 */ 3950 static int 3951 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3952 int flag, int sleep) 3953 { 3954 int rval; 3955 fp_cmd_t *cmd; 3956 uchar_t class; 3957 3958 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3959 3960 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 3961 port, job); 3962 3963 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3964 if (class == FC_TRAN_CLASS_INVALID) { 3965 return (FC_ELS_BAD); 3966 } 3967 3968 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 3969 sizeof (la_els_logi_t), sleep, NULL); 3970 if (cmd == NULL) { 3971 return (FC_NOMEM); 3972 } 3973 3974 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 3975 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 3976 cmd->cmd_flags = flag; 3977 cmd->cmd_retry_count = fp_retry_count; 3978 cmd->cmd_ulp_pkt = NULL; 3979 3980 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 3981 job, LA_ELS_FLOGI); 3982 3983 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 3984 if (rval != FC_SUCCESS) { 3985 fp_free_pkt(cmd); 3986 } 3987 3988 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 3989 port, job); 3990 3991 return (rval); 3992 } 3993 3994 3995 /* 3996 * In some scenarios such as private loop device discovery period 3997 * the fc_remote_port_t data structure isn't allocated. The allocation 3998 * is done when the PLOGI is successful. In some other scenarios 3999 * such as Fabric topology, the fc_remote_port_t is already created 4000 * and initialized with appropriate values (as the NS provides 4001 * them) 4002 */ 4003 static int 4004 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 4005 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 4006 { 4007 uchar_t class; 4008 fp_cmd_t *cmd; 4009 uint32_t src_id; 4010 fc_remote_port_t *tmp_pd; 4011 int relogin; 4012 int found = 0; 4013 4014 #ifdef DEBUG 4015 if (pd == NULL) { 4016 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 4017 } 4018 #endif 4019 ASSERT(job->job_counter > 0); 4020 4021 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4022 if (class == FC_TRAN_CLASS_INVALID) { 4023 return (FC_ELS_BAD); 4024 } 4025 4026 mutex_enter(&port->fp_mutex); 4027 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 4028 mutex_exit(&port->fp_mutex); 4029 4030 relogin = 1; 4031 if (tmp_pd) { 4032 mutex_enter(&tmp_pd->pd_mutex); 4033 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 4034 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 4035 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 4036 relogin = 0; 4037 } 4038 mutex_exit(&tmp_pd->pd_mutex); 4039 } 4040 4041 if (!relogin) { 4042 mutex_enter(&tmp_pd->pd_mutex); 4043 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4044 cmd_flag |= FP_CMD_PLOGI_RETAIN; 4045 } 4046 mutex_exit(&tmp_pd->pd_mutex); 4047 4048 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 4049 sizeof (la_els_adisc_t), sleep, tmp_pd); 4050 if (cmd == NULL) { 4051 return (FC_NOMEM); 4052 } 4053 4054 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4055 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4056 cmd->cmd_flags = cmd_flag; 4057 cmd->cmd_retry_count = fp_retry_count; 4058 cmd->cmd_ulp_pkt = ulp_pkt; 4059 4060 mutex_enter(&port->fp_mutex); 4061 mutex_enter(&tmp_pd->pd_mutex); 4062 fp_adisc_init(cmd, job); 4063 mutex_exit(&tmp_pd->pd_mutex); 4064 mutex_exit(&port->fp_mutex); 4065 4066 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4067 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4068 4069 } else { 4070 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4071 sizeof (la_els_logi_t), sleep, pd); 4072 if (cmd == NULL) { 4073 return (FC_NOMEM); 4074 } 4075 4076 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4077 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4078 cmd->cmd_flags = cmd_flag; 4079 cmd->cmd_retry_count = fp_retry_count; 4080 cmd->cmd_ulp_pkt = ulp_pkt; 4081 4082 mutex_enter(&port->fp_mutex); 4083 src_id = port->fp_port_id.port_id; 4084 mutex_exit(&port->fp_mutex); 4085 4086 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4087 job, LA_ELS_PLOGI); 4088 } 4089 4090 if (pd) { 4091 mutex_enter(&pd->pd_mutex); 4092 pd->pd_flags = PD_ELS_IN_PROGRESS; 4093 mutex_exit(&pd->pd_mutex); 4094 } 4095 4096 /* npiv check to make sure we don't log into ourself */ 4097 if (relogin && 4098 ((port->fp_npiv_type == FC_NPIV_PORT) || 4099 (port->fp_npiv_flag == FC_NPIV_ENABLE))) { 4100 if ((d_id & 0xffff00) == 4101 (port->fp_port_id.port_id & 0xffff00)) { 4102 found = 1; 4103 } 4104 } 4105 4106 if (found || 4107 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4108 if (found) { 4109 fc_packet_t *pkt = &cmd->cmd_pkt; 4110 pkt->pkt_state = FC_PKT_NPORT_RJT; 4111 } 4112 if (pd) { 4113 mutex_enter(&pd->pd_mutex); 4114 pd->pd_flags = PD_IDLE; 4115 mutex_exit(&pd->pd_mutex); 4116 } 4117 4118 if (ulp_pkt) { 4119 fc_packet_t *pkt = &cmd->cmd_pkt; 4120 4121 ulp_pkt->pkt_state = pkt->pkt_state; 4122 ulp_pkt->pkt_reason = pkt->pkt_reason; 4123 ulp_pkt->pkt_action = pkt->pkt_action; 4124 ulp_pkt->pkt_expln = pkt->pkt_expln; 4125 } 4126 4127 fp_iodone(cmd); 4128 } 4129 4130 return (FC_SUCCESS); 4131 } 4132 4133 4134 /* 4135 * Register the LOGIN parameters with a port device 4136 */ 4137 static void 4138 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4139 la_els_logi_t *acc, uchar_t class) 4140 { 4141 fc_remote_node_t *node; 4142 4143 ASSERT(pd != NULL); 4144 4145 mutex_enter(&pd->pd_mutex); 4146 node = pd->pd_remote_nodep; 4147 if (pd->pd_login_count == 0) { 4148 pd->pd_login_count++; 4149 } 4150 4151 if (handle) { 4152 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_csp, 4153 (uint8_t *)&acc->common_service, 4154 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4155 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp1, 4156 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4157 DDI_DEV_AUTOINCR); 4158 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp2, 4159 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4160 DDI_DEV_AUTOINCR); 4161 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp3, 4162 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4163 DDI_DEV_AUTOINCR); 4164 } else { 4165 pd->pd_csp = acc->common_service; 4166 pd->pd_clsp1 = acc->class_1; 4167 pd->pd_clsp2 = acc->class_2; 4168 pd->pd_clsp3 = acc->class_3; 4169 } 4170 4171 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4172 pd->pd_login_class = class; 4173 mutex_exit(&pd->pd_mutex); 4174 4175 #ifndef __lock_lint 4176 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4177 pd->pd_port_id.port_id) == pd); 4178 #endif 4179 4180 mutex_enter(&node->fd_mutex); 4181 if (handle) { 4182 ddi_rep_get8(*handle, (uint8_t *)node->fd_vv, 4183 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4184 DDI_DEV_AUTOINCR); 4185 } else { 4186 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4187 } 4188 mutex_exit(&node->fd_mutex); 4189 } 4190 4191 4192 /* 4193 * Mark the remote port as OFFLINE 4194 */ 4195 static void 4196 fp_remote_port_offline(fc_remote_port_t *pd) 4197 { 4198 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4199 if (pd->pd_login_count && 4200 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4201 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4202 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4203 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4204 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4205 pd->pd_login_class = 0; 4206 } 4207 pd->pd_type = PORT_DEVICE_OLD; 4208 pd->pd_flags = PD_IDLE; 4209 fctl_tc_reset(&pd->pd_logo_tc); 4210 } 4211 4212 4213 /* 4214 * Deregistration of a port device 4215 */ 4216 static void 4217 fp_unregister_login(fc_remote_port_t *pd) 4218 { 4219 fc_remote_node_t *node; 4220 4221 ASSERT(pd != NULL); 4222 4223 mutex_enter(&pd->pd_mutex); 4224 pd->pd_login_count = 0; 4225 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4226 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4227 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4228 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4229 4230 pd->pd_state = PORT_DEVICE_VALID; 4231 pd->pd_login_class = 0; 4232 node = pd->pd_remote_nodep; 4233 mutex_exit(&pd->pd_mutex); 4234 4235 mutex_enter(&node->fd_mutex); 4236 bzero(node->fd_vv, sizeof (node->fd_vv)); 4237 mutex_exit(&node->fd_mutex); 4238 } 4239 4240 4241 /* 4242 * Handle OFFLINE state of an FCA port 4243 */ 4244 static void 4245 fp_port_offline(fc_local_port_t *port, int notify) 4246 { 4247 int index; 4248 int statec; 4249 timeout_id_t tid; 4250 struct pwwn_hash *head; 4251 fc_remote_port_t *pd; 4252 4253 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4254 4255 for (index = 0; index < pwwn_table_size; index++) { 4256 head = &port->fp_pwwn_table[index]; 4257 pd = head->pwwn_head; 4258 while (pd != NULL) { 4259 mutex_enter(&pd->pd_mutex); 4260 fp_remote_port_offline(pd); 4261 fctl_delist_did_table(port, pd); 4262 mutex_exit(&pd->pd_mutex); 4263 pd = pd->pd_wwn_hnext; 4264 } 4265 } 4266 port->fp_total_devices = 0; 4267 4268 statec = 0; 4269 if (notify) { 4270 /* 4271 * Decrement the statec busy counter as we 4272 * are almost done with handling the state 4273 * change 4274 */ 4275 ASSERT(port->fp_statec_busy > 0); 4276 if (--port->fp_statec_busy == 0) { 4277 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4278 } 4279 mutex_exit(&port->fp_mutex); 4280 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4281 0, 0, KM_SLEEP); 4282 mutex_enter(&port->fp_mutex); 4283 4284 if (port->fp_statec_busy) { 4285 statec++; 4286 } 4287 } else if (port->fp_statec_busy > 1) { 4288 statec++; 4289 } 4290 4291 if ((tid = port->fp_offline_tid) != NULL) { 4292 mutex_exit(&port->fp_mutex); 4293 (void) untimeout(tid); 4294 mutex_enter(&port->fp_mutex); 4295 } 4296 4297 if (!statec) { 4298 port->fp_offline_tid = timeout(fp_offline_timeout, 4299 (caddr_t)port, fp_offline_ticks); 4300 } 4301 } 4302 4303 4304 /* 4305 * Offline devices and send up a state change notification to ULPs 4306 */ 4307 static void 4308 fp_offline_timeout(void *port_handle) 4309 { 4310 int ret; 4311 fc_local_port_t *port = port_handle; 4312 uint32_t listlen = 0; 4313 fc_portmap_t *changelist = NULL; 4314 4315 mutex_enter(&port->fp_mutex); 4316 4317 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4318 (port->fp_soft_state & 4319 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4320 port->fp_dev_count == 0 || port->fp_statec_busy) { 4321 port->fp_offline_tid = NULL; 4322 mutex_exit(&port->fp_mutex); 4323 return; 4324 } 4325 4326 mutex_exit(&port->fp_mutex); 4327 4328 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4329 4330 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4331 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4332 FC_FCA_CORE)) != FC_SUCCESS) { 4333 FP_TRACE(FP_NHEAD1(9, ret), 4334 "Failed to force adapter dump"); 4335 } else { 4336 FP_TRACE(FP_NHEAD1(9, 0), 4337 "Forced adapter dump successfully"); 4338 } 4339 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4340 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4341 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4342 FP_TRACE(FP_NHEAD1(9, ret), 4343 "Failed to force adapter dump and reset"); 4344 } else { 4345 FP_TRACE(FP_NHEAD1(9, 0), 4346 "Forced adapter dump and reset successfully"); 4347 } 4348 } 4349 4350 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4351 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4352 listlen, listlen, KM_SLEEP); 4353 4354 mutex_enter(&port->fp_mutex); 4355 port->fp_offline_tid = NULL; 4356 mutex_exit(&port->fp_mutex); 4357 } 4358 4359 4360 /* 4361 * Perform general purpose ELS request initialization 4362 */ 4363 static void 4364 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4365 void (*comp) (), job_request_t *job) 4366 { 4367 fc_packet_t *pkt; 4368 4369 pkt = &cmd->cmd_pkt; 4370 cmd->cmd_job = job; 4371 4372 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4373 pkt->pkt_cmd_fhdr.d_id = d_id; 4374 pkt->pkt_cmd_fhdr.s_id = s_id; 4375 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4376 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4377 pkt->pkt_cmd_fhdr.seq_id = 0; 4378 pkt->pkt_cmd_fhdr.df_ctl = 0; 4379 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4380 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4381 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4382 pkt->pkt_cmd_fhdr.ro = 0; 4383 pkt->pkt_cmd_fhdr.rsvd = 0; 4384 pkt->pkt_comp = comp; 4385 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4386 } 4387 4388 4389 /* 4390 * Initialize PLOGI/FLOGI ELS request 4391 */ 4392 static void 4393 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4394 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4395 { 4396 ls_code_t payload; 4397 4398 fp_els_init(cmd, s_id, d_id, intr, job); 4399 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4400 4401 payload.ls_code = ls_code; 4402 payload.mbz = 0; 4403 4404 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, 4405 (uint8_t *)&port->fp_service_params, 4406 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4407 DDI_DEV_AUTOINCR); 4408 4409 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4410 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4411 DDI_DEV_AUTOINCR); 4412 } 4413 4414 4415 /* 4416 * Initialize LOGO ELS request 4417 */ 4418 static void 4419 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4420 { 4421 fc_local_port_t *port; 4422 fc_packet_t *pkt; 4423 la_els_logo_t payload; 4424 4425 port = pd->pd_port; 4426 pkt = &cmd->cmd_pkt; 4427 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4428 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4429 4430 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4431 fp_logo_intr, job); 4432 4433 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4434 4435 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4436 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4437 4438 payload.ls_code.ls_code = LA_ELS_LOGO; 4439 payload.ls_code.mbz = 0; 4440 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4441 payload.nport_id = port->fp_port_id; 4442 4443 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4444 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4445 } 4446 4447 /* 4448 * Initialize RNID ELS request 4449 */ 4450 static void 4451 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4452 { 4453 fc_local_port_t *port; 4454 fc_packet_t *pkt; 4455 la_els_rnid_t payload; 4456 fc_remote_port_t *pd; 4457 4458 pkt = &cmd->cmd_pkt; 4459 pd = pkt->pkt_pd; 4460 port = pd->pd_port; 4461 4462 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4463 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4464 4465 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4466 fp_rnid_intr, job); 4467 4468 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4469 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4470 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4471 4472 payload.ls_code.ls_code = LA_ELS_RNID; 4473 payload.ls_code.mbz = 0; 4474 payload.data_format = flag; 4475 4476 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4477 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4478 } 4479 4480 /* 4481 * Initialize RLS ELS request 4482 */ 4483 static void 4484 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4485 { 4486 fc_local_port_t *port; 4487 fc_packet_t *pkt; 4488 la_els_rls_t payload; 4489 fc_remote_port_t *pd; 4490 4491 pkt = &cmd->cmd_pkt; 4492 pd = pkt->pkt_pd; 4493 port = pd->pd_port; 4494 4495 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4496 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4497 4498 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4499 fp_rls_intr, job); 4500 4501 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4502 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4503 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4504 4505 payload.ls_code.ls_code = LA_ELS_RLS; 4506 payload.ls_code.mbz = 0; 4507 payload.rls_portid = port->fp_port_id; 4508 4509 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4510 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4511 } 4512 4513 4514 /* 4515 * Initialize an ADISC ELS request 4516 */ 4517 static void 4518 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4519 { 4520 fc_local_port_t *port; 4521 fc_packet_t *pkt; 4522 la_els_adisc_t payload; 4523 fc_remote_port_t *pd; 4524 4525 pkt = &cmd->cmd_pkt; 4526 pd = pkt->pkt_pd; 4527 port = pd->pd_port; 4528 4529 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4530 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4531 4532 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4533 fp_adisc_intr, job); 4534 4535 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4536 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4537 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4538 4539 payload.ls_code.ls_code = LA_ELS_ADISC; 4540 payload.ls_code.mbz = 0; 4541 payload.nport_id = port->fp_port_id; 4542 payload.port_wwn = port->fp_service_params.nport_ww_name; 4543 payload.node_wwn = port->fp_service_params.node_ww_name; 4544 payload.hard_addr = port->fp_hard_addr; 4545 4546 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4547 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4548 } 4549 4550 4551 /* 4552 * Send up a state change notification to ULPs. 4553 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4554 */ 4555 static int 4556 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4557 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4558 { 4559 fc_port_clist_t *clist; 4560 fc_remote_port_t *pd; 4561 int count; 4562 4563 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4564 4565 clist = kmem_zalloc(sizeof (*clist), sleep); 4566 if (clist == NULL) { 4567 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4568 return (FC_NOMEM); 4569 } 4570 4571 clist->clist_state = state; 4572 4573 mutex_enter(&port->fp_mutex); 4574 clist->clist_flags = port->fp_topology; 4575 mutex_exit(&port->fp_mutex); 4576 4577 clist->clist_port = (opaque_t)port; 4578 clist->clist_len = listlen; 4579 clist->clist_size = alloc_len; 4580 clist->clist_map = changelist; 4581 4582 /* 4583 * Bump the reference count of each fc_remote_port_t in this changelist. 4584 * This is necessary since these devices will be sitting in a taskq 4585 * and referenced later. When the state change notification is 4586 * complete, the reference counts will be decremented. 4587 */ 4588 for (count = 0; count < clist->clist_len; count++) { 4589 pd = clist->clist_map[count].map_pd; 4590 4591 if (pd != NULL) { 4592 mutex_enter(&pd->pd_mutex); 4593 ASSERT((pd->pd_ref_count >= 0) || 4594 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4595 pd->pd_ref_count++; 4596 4597 if (clist->clist_map[count].map_state != 4598 PORT_DEVICE_INVALID) { 4599 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4600 } 4601 4602 mutex_exit(&pd->pd_mutex); 4603 } 4604 } 4605 4606 #ifdef DEBUG 4607 /* 4608 * Sanity check for presence of OLD devices in the hash lists 4609 */ 4610 if (clist->clist_size) { 4611 ASSERT(clist->clist_map != NULL); 4612 for (count = 0; count < clist->clist_len; count++) { 4613 if (clist->clist_map[count].map_state == 4614 PORT_DEVICE_INVALID) { 4615 la_wwn_t pwwn; 4616 fc_portid_t d_id; 4617 4618 pd = clist->clist_map[count].map_pd; 4619 ASSERT(pd != NULL); 4620 4621 mutex_enter(&pd->pd_mutex); 4622 pwwn = pd->pd_port_name; 4623 d_id = pd->pd_port_id; 4624 mutex_exit(&pd->pd_mutex); 4625 4626 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4627 ASSERT(pd != clist->clist_map[count].map_pd); 4628 4629 pd = fctl_get_remote_port_by_did(port, 4630 d_id.port_id); 4631 ASSERT(pd != clist->clist_map[count].map_pd); 4632 } 4633 } 4634 } 4635 #endif 4636 4637 mutex_enter(&port->fp_mutex); 4638 4639 if (state == FC_STATE_ONLINE) { 4640 if (--port->fp_statec_busy == 0) { 4641 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4642 } 4643 } 4644 mutex_exit(&port->fp_mutex); 4645 4646 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4647 clist, KM_SLEEP); 4648 4649 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4650 "state=%x, len=%d", port, state, listlen); 4651 4652 return (FC_SUCCESS); 4653 } 4654 4655 4656 /* 4657 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4658 */ 4659 static int 4660 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4661 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4662 { 4663 int ret; 4664 fc_port_clist_t *clist; 4665 4666 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4667 4668 clist = kmem_zalloc(sizeof (*clist), sleep); 4669 if (clist == NULL) { 4670 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4671 return (FC_NOMEM); 4672 } 4673 4674 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4675 4676 mutex_enter(&port->fp_mutex); 4677 clist->clist_flags = port->fp_topology; 4678 mutex_exit(&port->fp_mutex); 4679 4680 clist->clist_port = (opaque_t)port; 4681 clist->clist_len = listlen; 4682 clist->clist_size = alloc_len; 4683 clist->clist_map = changelist; 4684 4685 /* Send sysevents for target state changes */ 4686 4687 if (clist->clist_size) { 4688 int count; 4689 fc_remote_port_t *pd; 4690 4691 ASSERT(clist->clist_map != NULL); 4692 for (count = 0; count < clist->clist_len; count++) { 4693 pd = clist->clist_map[count].map_pd; 4694 4695 /* 4696 * Bump reference counts on all fc_remote_port_t 4697 * structs in this list. We don't know when the task 4698 * will fire, and we don't need these fc_remote_port_t 4699 * structs going away behind our back. 4700 */ 4701 if (pd) { 4702 mutex_enter(&pd->pd_mutex); 4703 ASSERT((pd->pd_ref_count >= 0) || 4704 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4705 pd->pd_ref_count++; 4706 mutex_exit(&pd->pd_mutex); 4707 } 4708 4709 if (clist->clist_map[count].map_state == 4710 PORT_DEVICE_VALID) { 4711 if (clist->clist_map[count].map_type == 4712 PORT_DEVICE_NEW) { 4713 /* Update our state change counter */ 4714 mutex_enter(&port->fp_mutex); 4715 port->fp_last_change++; 4716 mutex_exit(&port->fp_mutex); 4717 4718 /* Additions */ 4719 fp_log_target_event(port, 4720 ESC_SUNFC_TARGET_ADD, 4721 clist->clist_map[count].map_pwwn, 4722 clist->clist_map[count].map_did. 4723 port_id); 4724 } 4725 4726 } else if ((clist->clist_map[count].map_type == 4727 PORT_DEVICE_OLD) && 4728 (clist->clist_map[count].map_state == 4729 PORT_DEVICE_INVALID)) { 4730 /* Update our state change counter */ 4731 mutex_enter(&port->fp_mutex); 4732 port->fp_last_change++; 4733 mutex_exit(&port->fp_mutex); 4734 4735 /* 4736 * For removals, we don't decrement 4737 * pd_ref_count until after the ULP's 4738 * state change callback function has 4739 * completed. 4740 */ 4741 4742 /* Removals */ 4743 fp_log_target_event(port, 4744 ESC_SUNFC_TARGET_REMOVE, 4745 clist->clist_map[count].map_pwwn, 4746 clist->clist_map[count].map_did.port_id); 4747 } 4748 4749 if (clist->clist_map[count].map_state != 4750 PORT_DEVICE_INVALID) { 4751 /* 4752 * Indicate that the ULPs are now aware of 4753 * this device. 4754 */ 4755 4756 mutex_enter(&pd->pd_mutex); 4757 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4758 mutex_exit(&pd->pd_mutex); 4759 } 4760 4761 #ifdef DEBUG 4762 /* 4763 * Sanity check for OLD devices in the hash lists 4764 */ 4765 if (pd && clist->clist_map[count].map_state == 4766 PORT_DEVICE_INVALID) { 4767 la_wwn_t pwwn; 4768 fc_portid_t d_id; 4769 4770 mutex_enter(&pd->pd_mutex); 4771 pwwn = pd->pd_port_name; 4772 d_id = pd->pd_port_id; 4773 mutex_exit(&pd->pd_mutex); 4774 4775 /* 4776 * This overwrites the 'pd' local variable. 4777 * Beware of this if 'pd' ever gets 4778 * referenced below this block. 4779 */ 4780 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4781 ASSERT(pd != clist->clist_map[count].map_pd); 4782 4783 pd = fctl_get_remote_port_by_did(port, 4784 d_id.port_id); 4785 ASSERT(pd != clist->clist_map[count].map_pd); 4786 } 4787 #endif 4788 } 4789 } 4790 4791 if (sync) { 4792 clist->clist_wait = 1; 4793 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4794 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4795 } 4796 4797 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4798 if (sync && ret) { 4799 mutex_enter(&clist->clist_mutex); 4800 while (clist->clist_wait) { 4801 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4802 } 4803 mutex_exit(&clist->clist_mutex); 4804 4805 mutex_destroy(&clist->clist_mutex); 4806 cv_destroy(&clist->clist_cv); 4807 kmem_free(clist, sizeof (*clist)); 4808 } 4809 4810 if (!ret) { 4811 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4812 "port=%p", port); 4813 kmem_free(clist->clist_map, 4814 sizeof (*(clist->clist_map)) * clist->clist_size); 4815 kmem_free(clist, sizeof (*clist)); 4816 } else { 4817 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4818 port, listlen); 4819 } 4820 4821 return (FC_SUCCESS); 4822 } 4823 4824 4825 /* 4826 * Perform PLOGI to the group of devices for ULPs 4827 */ 4828 static void 4829 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4830 { 4831 int offline; 4832 int count; 4833 int rval; 4834 uint32_t listlen; 4835 uint32_t done; 4836 uint32_t d_id; 4837 fc_remote_node_t *node; 4838 fc_remote_port_t *pd; 4839 fc_remote_port_t *tmp_pd; 4840 fc_packet_t *ulp_pkt; 4841 la_els_logi_t *els_data; 4842 ls_code_t ls_code; 4843 4844 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4845 port, job); 4846 4847 done = 0; 4848 listlen = job->job_ulp_listlen; 4849 job->job_counter = job->job_ulp_listlen; 4850 4851 mutex_enter(&port->fp_mutex); 4852 offline = (port->fp_statec_busy || 4853 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4854 mutex_exit(&port->fp_mutex); 4855 4856 for (count = 0; count < listlen; count++) { 4857 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4858 sizeof (la_els_logi_t)); 4859 4860 ulp_pkt = job->job_ulp_pkts[count]; 4861 pd = ulp_pkt->pkt_pd; 4862 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4863 4864 if (offline) { 4865 done++; 4866 4867 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4868 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4869 ulp_pkt->pkt_pd = NULL; 4870 ulp_pkt->pkt_comp(ulp_pkt); 4871 4872 job->job_ulp_pkts[count] = NULL; 4873 4874 fp_jobdone(job); 4875 continue; 4876 } 4877 4878 if (pd == NULL) { 4879 pd = fctl_get_remote_port_by_did(port, d_id); 4880 if (pd == NULL) { 4881 /* reset later */ 4882 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4883 continue; 4884 } 4885 mutex_enter(&pd->pd_mutex); 4886 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4887 mutex_exit(&pd->pd_mutex); 4888 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4889 done++; 4890 ulp_pkt->pkt_comp(ulp_pkt); 4891 job->job_ulp_pkts[count] = NULL; 4892 fp_jobdone(job); 4893 } else { 4894 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4895 mutex_exit(&pd->pd_mutex); 4896 } 4897 continue; 4898 } 4899 4900 switch (ulp_pkt->pkt_state) { 4901 case FC_PKT_ELS_IN_PROGRESS: 4902 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4903 /* FALLTHRU */ 4904 case FC_PKT_LOCAL_RJT: 4905 done++; 4906 ulp_pkt->pkt_comp(ulp_pkt); 4907 job->job_ulp_pkts[count] = NULL; 4908 fp_jobdone(job); 4909 continue; 4910 default: 4911 break; 4912 } 4913 4914 /* 4915 * Validate the pd corresponding to the d_id passed 4916 * by the ULPs 4917 */ 4918 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4919 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4920 done++; 4921 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4922 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4923 ulp_pkt->pkt_pd = NULL; 4924 ulp_pkt->pkt_comp(ulp_pkt); 4925 job->job_ulp_pkts[count] = NULL; 4926 fp_jobdone(job); 4927 continue; 4928 } 4929 4930 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4931 "port=%p, pd=%p", port, pd); 4932 4933 mutex_enter(&pd->pd_mutex); 4934 4935 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4936 done++; 4937 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4938 4939 ls_code.ls_code = LA_ELS_ACC; 4940 ls_code.mbz = 0; 4941 4942 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4943 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4944 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4945 4946 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4947 (uint8_t *)&pd->pd_csp, 4948 (uint8_t *)&els_data->common_service, 4949 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4950 4951 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4952 (uint8_t *)&pd->pd_port_name, 4953 (uint8_t *)&els_data->nport_ww_name, 4954 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 4955 4956 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4957 (uint8_t *)&pd->pd_clsp1, 4958 (uint8_t *)&els_data->class_1, 4959 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 4960 4961 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4962 (uint8_t *)&pd->pd_clsp2, 4963 (uint8_t *)&els_data->class_2, 4964 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 4965 4966 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4967 (uint8_t *)&pd->pd_clsp3, 4968 (uint8_t *)&els_data->class_3, 4969 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 4970 4971 node = pd->pd_remote_nodep; 4972 pd->pd_login_count++; 4973 pd->pd_flags = PD_IDLE; 4974 ulp_pkt->pkt_pd = pd; 4975 mutex_exit(&pd->pd_mutex); 4976 4977 mutex_enter(&node->fd_mutex); 4978 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4979 (uint8_t *)&node->fd_node_name, 4980 (uint8_t *)(&els_data->node_ww_name), 4981 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 4982 4983 4984 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4985 (uint8_t *)&node->fd_vv, 4986 (uint8_t *)(&els_data->vendor_version), 4987 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 4988 4989 mutex_exit(&node->fd_mutex); 4990 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 4991 } else { 4992 4993 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 4994 mutex_exit(&pd->pd_mutex); 4995 } 4996 4997 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 4998 ulp_pkt->pkt_comp(ulp_pkt); 4999 job->job_ulp_pkts[count] = NULL; 5000 fp_jobdone(job); 5001 } 5002 } 5003 5004 if (done == listlen) { 5005 fp_jobwait(job); 5006 fctl_jobdone(job); 5007 return; 5008 } 5009 5010 job->job_counter = listlen - done; 5011 5012 for (count = 0; count < listlen; count++) { 5013 int cmd_flags; 5014 5015 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 5016 continue; 5017 } 5018 5019 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 5020 5021 cmd_flags = FP_CMD_PLOGI_RETAIN; 5022 5023 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5024 ASSERT(d_id != 0); 5025 5026 pd = fctl_get_remote_port_by_did(port, d_id); 5027 5028 /* 5029 * We need to properly adjust the port device 5030 * reference counter before we assign the pd 5031 * to the ULP packets port device pointer. 5032 */ 5033 if (pd != NULL && ulp_pkt->pkt_pd == NULL) { 5034 mutex_enter(&pd->pd_mutex); 5035 pd->pd_ref_count++; 5036 mutex_exit(&pd->pd_mutex); 5037 FP_TRACE(FP_NHEAD1(3, 0), 5038 "fp_plogi_group: DID = 0x%x using new pd %p \ 5039 old pd NULL\n", d_id, pd); 5040 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL && 5041 ulp_pkt->pkt_pd != pd) { 5042 mutex_enter(&pd->pd_mutex); 5043 pd->pd_ref_count++; 5044 mutex_exit(&pd->pd_mutex); 5045 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5046 ulp_pkt->pkt_pd->pd_ref_count--; 5047 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5048 FP_TRACE(FP_NHEAD1(3, 0), 5049 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n", 5050 d_id, ulp_pkt->pkt_pd, pd); 5051 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) { 5052 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5053 ulp_pkt->pkt_pd->pd_ref_count--; 5054 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5055 FP_TRACE(FP_NHEAD1(3, 0), 5056 "fp_plogi_group: DID = 0x%x pd is NULL and \ 5057 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd); 5058 } 5059 5060 ulp_pkt->pkt_pd = pd; 5061 5062 if (pd != NULL) { 5063 mutex_enter(&pd->pd_mutex); 5064 d_id = pd->pd_port_id.port_id; 5065 pd->pd_flags = PD_ELS_IN_PROGRESS; 5066 mutex_exit(&pd->pd_mutex); 5067 } else { 5068 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5069 #ifdef DEBUG 5070 pd = fctl_get_remote_port_by_did(port, d_id); 5071 ASSERT(pd == NULL); 5072 #endif 5073 /* 5074 * In the Fabric topology, use NS to create 5075 * port device, and if that fails still try 5076 * with PLOGI - which will make yet another 5077 * attempt to create after successful PLOGI 5078 */ 5079 mutex_enter(&port->fp_mutex); 5080 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 5081 mutex_exit(&port->fp_mutex); 5082 pd = fp_create_remote_port_by_ns(port, 5083 d_id, KM_SLEEP); 5084 if (pd) { 5085 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 5086 5087 mutex_enter(&pd->pd_mutex); 5088 pd->pd_flags = PD_ELS_IN_PROGRESS; 5089 mutex_exit(&pd->pd_mutex); 5090 5091 FP_TRACE(FP_NHEAD1(3, 0), 5092 "fp_plogi_group;" 5093 " NS created PD port=%p, job=%p," 5094 " pd=%p", port, job, pd); 5095 } 5096 } else { 5097 mutex_exit(&port->fp_mutex); 5098 } 5099 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5100 FP_TRACE(FP_NHEAD1(3, 0), 5101 "fp_plogi_group;" 5102 "ulp_pkt's pd is NULL, get a pd %p", 5103 pd); 5104 mutex_enter(&pd->pd_mutex); 5105 pd->pd_ref_count++; 5106 mutex_exit(&pd->pd_mutex); 5107 } 5108 ulp_pkt->pkt_pd = pd; 5109 } 5110 5111 rval = fp_port_login(port, d_id, job, cmd_flags, 5112 KM_SLEEP, pd, ulp_pkt); 5113 5114 if (rval == FC_SUCCESS) { 5115 continue; 5116 } 5117 5118 if (rval == FC_STATEC_BUSY) { 5119 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5120 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5121 } else { 5122 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5123 } 5124 5125 if (pd) { 5126 mutex_enter(&pd->pd_mutex); 5127 pd->pd_flags = PD_IDLE; 5128 mutex_exit(&pd->pd_mutex); 5129 } 5130 5131 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5132 ASSERT(pd != NULL); 5133 5134 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5135 " PD removed; port=%p, job=%p", port, job); 5136 5137 mutex_enter(&pd->pd_mutex); 5138 pd->pd_ref_count--; 5139 node = pd->pd_remote_nodep; 5140 mutex_exit(&pd->pd_mutex); 5141 5142 ASSERT(node != NULL); 5143 5144 if (fctl_destroy_remote_port(port, pd) == 0) { 5145 fctl_destroy_remote_node(node); 5146 } 5147 ulp_pkt->pkt_pd = NULL; 5148 } 5149 ulp_pkt->pkt_comp(ulp_pkt); 5150 fp_jobdone(job); 5151 } 5152 5153 fp_jobwait(job); 5154 fctl_jobdone(job); 5155 5156 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5157 port, job); 5158 } 5159 5160 5161 /* 5162 * Name server request initialization 5163 */ 5164 static void 5165 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5166 { 5167 int rval; 5168 int count; 5169 int size; 5170 5171 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5172 5173 job->job_counter = 1; 5174 job->job_result = FC_SUCCESS; 5175 5176 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5177 KM_SLEEP, NULL, NULL); 5178 5179 if (rval != FC_SUCCESS) { 5180 mutex_enter(&port->fp_mutex); 5181 port->fp_topology = FC_TOP_NO_NS; 5182 mutex_exit(&port->fp_mutex); 5183 return; 5184 } 5185 5186 fp_jobwait(job); 5187 5188 if (job->job_result != FC_SUCCESS) { 5189 mutex_enter(&port->fp_mutex); 5190 port->fp_topology = FC_TOP_NO_NS; 5191 mutex_exit(&port->fp_mutex); 5192 return; 5193 } 5194 5195 /* 5196 * At this time, we'll do NS registration for objects in the 5197 * ns_reg_cmds (see top of this file) array. 5198 * 5199 * Each time a ULP module registers with the transport, the 5200 * appropriate fc4 bit is set fc4 types and registered with 5201 * the NS for this support. Also, ULPs and FC admin utilities 5202 * may do registration for objects like IP address, symbolic 5203 * port/node name, Initial process associator at run time. 5204 */ 5205 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5206 job->job_counter = size; 5207 job->job_result = FC_SUCCESS; 5208 5209 for (count = 0; count < size; count++) { 5210 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5211 job, 0, sleep) != FC_SUCCESS) { 5212 fp_jobdone(job); 5213 } 5214 } 5215 if (size) { 5216 fp_jobwait(job); 5217 } 5218 5219 job->job_result = FC_SUCCESS; 5220 5221 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5222 5223 if (port->fp_dev_count < FP_MAX_DEVICES) { 5224 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5225 } 5226 5227 job->job_counter = 1; 5228 5229 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5230 sleep) == FC_SUCCESS) { 5231 fp_jobwait(job); 5232 } 5233 } 5234 5235 5236 /* 5237 * Name server finish: 5238 * Unregister for RSCNs 5239 * Unregister all the host port objects in the Name Server 5240 * Perform LOGO with the NS; 5241 */ 5242 static void 5243 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5244 { 5245 fp_cmd_t *cmd; 5246 uchar_t class; 5247 uint32_t s_id; 5248 fc_packet_t *pkt; 5249 la_els_logo_t payload; 5250 5251 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5252 5253 job->job_counter = 1; 5254 5255 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5256 FC_SUCCESS) { 5257 fp_jobdone(job); 5258 } 5259 fp_jobwait(job); 5260 5261 job->job_counter = 1; 5262 5263 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5264 fp_jobdone(job); 5265 } 5266 fp_jobwait(job); 5267 5268 job->job_counter = 1; 5269 5270 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5271 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5272 pkt = &cmd->cmd_pkt; 5273 5274 mutex_enter(&port->fp_mutex); 5275 class = port->fp_ns_login_class; 5276 s_id = port->fp_port_id.port_id; 5277 payload.nport_id = port->fp_port_id; 5278 mutex_exit(&port->fp_mutex); 5279 5280 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5281 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5282 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5283 cmd->cmd_retry_count = 1; 5284 cmd->cmd_ulp_pkt = NULL; 5285 5286 if (port->fp_npiv_type == FC_NPIV_PORT) { 5287 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5288 } else { 5289 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5290 } 5291 5292 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5293 5294 payload.ls_code.ls_code = LA_ELS_LOGO; 5295 payload.ls_code.mbz = 0; 5296 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5297 5298 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 5299 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5300 5301 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5302 fp_iodone(cmd); 5303 } 5304 fp_jobwait(job); 5305 } 5306 5307 5308 /* 5309 * NS Registration function. 5310 * 5311 * It should be seriously noted that FC-GS-2 currently doesn't support 5312 * an Object Registration by a D_ID other than the owner of the object. 5313 * What we are aiming at currently is to at least allow Symbolic Node/Port 5314 * Name registration for any N_Port Identifier by the host software. 5315 * 5316 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5317 * function treats the request as Host NS Object. 5318 */ 5319 static int 5320 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5321 job_request_t *job, int polled, int sleep) 5322 { 5323 int rval; 5324 fc_portid_t s_id; 5325 fc_packet_t *pkt; 5326 fp_cmd_t *cmd; 5327 5328 if (pd == NULL) { 5329 mutex_enter(&port->fp_mutex); 5330 s_id = port->fp_port_id; 5331 mutex_exit(&port->fp_mutex); 5332 } else { 5333 mutex_enter(&pd->pd_mutex); 5334 s_id = pd->pd_port_id; 5335 mutex_exit(&pd->pd_mutex); 5336 } 5337 5338 if (polled) { 5339 job->job_counter = 1; 5340 } 5341 5342 switch (cmd_code) { 5343 case NS_RPN_ID: 5344 case NS_RNN_ID: { 5345 ns_rxn_req_t rxn; 5346 5347 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5348 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5349 if (cmd == NULL) { 5350 return (FC_NOMEM); 5351 } 5352 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5353 pkt = &cmd->cmd_pkt; 5354 5355 if (pd == NULL) { 5356 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ? 5357 (port->fp_service_params.nport_ww_name) : 5358 (port->fp_service_params.node_ww_name)); 5359 } else { 5360 if (cmd_code == NS_RPN_ID) { 5361 mutex_enter(&pd->pd_mutex); 5362 rxn.rxn_xname = pd->pd_port_name; 5363 mutex_exit(&pd->pd_mutex); 5364 } else { 5365 fc_remote_node_t *node; 5366 5367 mutex_enter(&pd->pd_mutex); 5368 node = pd->pd_remote_nodep; 5369 mutex_exit(&pd->pd_mutex); 5370 5371 mutex_enter(&node->fd_mutex); 5372 rxn.rxn_xname = node->fd_node_name; 5373 mutex_exit(&node->fd_mutex); 5374 } 5375 } 5376 rxn.rxn_port_id = s_id; 5377 5378 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5379 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5380 sizeof (rxn), DDI_DEV_AUTOINCR); 5381 5382 break; 5383 } 5384 5385 case NS_RCS_ID: { 5386 ns_rcos_t rcos; 5387 5388 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5389 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5390 if (cmd == NULL) { 5391 return (FC_NOMEM); 5392 } 5393 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5394 pkt = &cmd->cmd_pkt; 5395 5396 if (pd == NULL) { 5397 rcos.rcos_cos = port->fp_cos; 5398 } else { 5399 mutex_enter(&pd->pd_mutex); 5400 rcos.rcos_cos = pd->pd_cos; 5401 mutex_exit(&pd->pd_mutex); 5402 } 5403 rcos.rcos_port_id = s_id; 5404 5405 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5406 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5407 sizeof (rcos), DDI_DEV_AUTOINCR); 5408 5409 break; 5410 } 5411 5412 case NS_RFT_ID: { 5413 ns_rfc_type_t rfc; 5414 5415 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5416 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5417 NULL); 5418 if (cmd == NULL) { 5419 return (FC_NOMEM); 5420 } 5421 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5422 pkt = &cmd->cmd_pkt; 5423 5424 if (pd == NULL) { 5425 mutex_enter(&port->fp_mutex); 5426 bcopy(port->fp_fc4_types, rfc.rfc_types, 5427 sizeof (port->fp_fc4_types)); 5428 mutex_exit(&port->fp_mutex); 5429 } else { 5430 mutex_enter(&pd->pd_mutex); 5431 bcopy(pd->pd_fc4types, rfc.rfc_types, 5432 sizeof (pd->pd_fc4types)); 5433 mutex_exit(&pd->pd_mutex); 5434 } 5435 rfc.rfc_port_id = s_id; 5436 5437 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5438 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5439 sizeof (rfc), DDI_DEV_AUTOINCR); 5440 5441 break; 5442 } 5443 5444 case NS_RSPN_ID: { 5445 uchar_t name_len; 5446 int pl_size; 5447 fc_portid_t spn; 5448 5449 if (pd == NULL) { 5450 mutex_enter(&port->fp_mutex); 5451 name_len = port->fp_sym_port_namelen; 5452 mutex_exit(&port->fp_mutex); 5453 } else { 5454 mutex_enter(&pd->pd_mutex); 5455 name_len = pd->pd_spn_len; 5456 mutex_exit(&pd->pd_mutex); 5457 } 5458 5459 pl_size = sizeof (fc_portid_t) + name_len + 1; 5460 5461 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5462 sizeof (fc_reg_resp_t), sleep, NULL); 5463 if (cmd == NULL) { 5464 return (FC_NOMEM); 5465 } 5466 5467 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5468 5469 pkt = &cmd->cmd_pkt; 5470 5471 spn = s_id; 5472 5473 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5474 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5475 DDI_DEV_AUTOINCR); 5476 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5477 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5478 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5479 5480 if (pd == NULL) { 5481 mutex_enter(&port->fp_mutex); 5482 ddi_rep_put8(pkt->pkt_cmd_acc, 5483 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5484 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5485 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5486 mutex_exit(&port->fp_mutex); 5487 } else { 5488 mutex_enter(&pd->pd_mutex); 5489 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)pd->pd_spn, 5490 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5491 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5492 mutex_exit(&pd->pd_mutex); 5493 } 5494 break; 5495 } 5496 5497 case NS_RPT_ID: { 5498 ns_rpt_t rpt; 5499 5500 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5501 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5502 if (cmd == NULL) { 5503 return (FC_NOMEM); 5504 } 5505 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5506 pkt = &cmd->cmd_pkt; 5507 5508 if (pd == NULL) { 5509 rpt.rpt_type = port->fp_port_type; 5510 } else { 5511 mutex_enter(&pd->pd_mutex); 5512 rpt.rpt_type = pd->pd_porttype; 5513 mutex_exit(&pd->pd_mutex); 5514 } 5515 rpt.rpt_port_id = s_id; 5516 5517 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5518 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5519 sizeof (rpt), DDI_DEV_AUTOINCR); 5520 5521 break; 5522 } 5523 5524 case NS_RIP_NN: { 5525 ns_rip_t rip; 5526 5527 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5528 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5529 if (cmd == NULL) { 5530 return (FC_NOMEM); 5531 } 5532 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5533 pkt = &cmd->cmd_pkt; 5534 5535 if (pd == NULL) { 5536 rip.rip_node_name = 5537 port->fp_service_params.node_ww_name; 5538 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5539 sizeof (port->fp_ip_addr)); 5540 } else { 5541 fc_remote_node_t *node; 5542 5543 /* 5544 * The most correct implementation should have the IP 5545 * address in the fc_remote_node_t structure; I believe 5546 * Node WWN and IP address should have one to one 5547 * correlation (but guess what this is changing in 5548 * FC-GS-2 latest draft) 5549 */ 5550 mutex_enter(&pd->pd_mutex); 5551 node = pd->pd_remote_nodep; 5552 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5553 sizeof (pd->pd_ip_addr)); 5554 mutex_exit(&pd->pd_mutex); 5555 5556 mutex_enter(&node->fd_mutex); 5557 rip.rip_node_name = node->fd_node_name; 5558 mutex_exit(&node->fd_mutex); 5559 } 5560 5561 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rip, 5562 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5563 sizeof (rip), DDI_DEV_AUTOINCR); 5564 5565 break; 5566 } 5567 5568 case NS_RIPA_NN: { 5569 ns_ipa_t ipa; 5570 5571 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5572 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5573 if (cmd == NULL) { 5574 return (FC_NOMEM); 5575 } 5576 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5577 pkt = &cmd->cmd_pkt; 5578 5579 if (pd == NULL) { 5580 ipa.ipa_node_name = 5581 port->fp_service_params.node_ww_name; 5582 bcopy(port->fp_ipa, ipa.ipa_value, 5583 sizeof (port->fp_ipa)); 5584 } else { 5585 fc_remote_node_t *node; 5586 5587 mutex_enter(&pd->pd_mutex); 5588 node = pd->pd_remote_nodep; 5589 mutex_exit(&pd->pd_mutex); 5590 5591 mutex_enter(&node->fd_mutex); 5592 ipa.ipa_node_name = node->fd_node_name; 5593 bcopy(node->fd_ipa, ipa.ipa_value, 5594 sizeof (node->fd_ipa)); 5595 mutex_exit(&node->fd_mutex); 5596 } 5597 5598 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5599 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5600 sizeof (ipa), DDI_DEV_AUTOINCR); 5601 5602 break; 5603 } 5604 5605 case NS_RSNN_NN: { 5606 uchar_t name_len; 5607 int pl_size; 5608 la_wwn_t snn; 5609 fc_remote_node_t *node = NULL; 5610 5611 if (pd == NULL) { 5612 mutex_enter(&port->fp_mutex); 5613 name_len = port->fp_sym_node_namelen; 5614 mutex_exit(&port->fp_mutex); 5615 } else { 5616 mutex_enter(&pd->pd_mutex); 5617 node = pd->pd_remote_nodep; 5618 mutex_exit(&pd->pd_mutex); 5619 5620 mutex_enter(&node->fd_mutex); 5621 name_len = node->fd_snn_len; 5622 mutex_exit(&node->fd_mutex); 5623 } 5624 5625 pl_size = sizeof (la_wwn_t) + name_len + 1; 5626 5627 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5628 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5629 if (cmd == NULL) { 5630 return (FC_NOMEM); 5631 } 5632 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5633 5634 pkt = &cmd->cmd_pkt; 5635 5636 bcopy(&port->fp_service_params.node_ww_name, 5637 &snn, sizeof (la_wwn_t)); 5638 5639 if (pd == NULL) { 5640 mutex_enter(&port->fp_mutex); 5641 ddi_rep_put8(pkt->pkt_cmd_acc, 5642 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5643 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5644 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5645 mutex_exit(&port->fp_mutex); 5646 } else { 5647 ASSERT(node != NULL); 5648 mutex_enter(&node->fd_mutex); 5649 ddi_rep_put8(pkt->pkt_cmd_acc, 5650 (uint8_t *)node->fd_snn, 5651 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5652 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5653 mutex_exit(&node->fd_mutex); 5654 } 5655 5656 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&snn, 5657 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5658 sizeof (snn), DDI_DEV_AUTOINCR); 5659 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5660 (uint8_t *)(pkt->pkt_cmd 5661 + sizeof (fc_ct_header_t) + sizeof (snn)), 5662 1, DDI_DEV_AUTOINCR); 5663 5664 break; 5665 } 5666 5667 case NS_DA_ID: { 5668 ns_remall_t rall; 5669 char tmp[4] = {0}; 5670 char *ptr; 5671 5672 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5673 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5674 5675 if (cmd == NULL) { 5676 return (FC_NOMEM); 5677 } 5678 5679 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5680 pkt = &cmd->cmd_pkt; 5681 5682 ptr = (char *)(&s_id); 5683 tmp[3] = *ptr++; 5684 tmp[2] = *ptr++; 5685 tmp[1] = *ptr++; 5686 tmp[0] = *ptr; 5687 #if defined(_BIT_FIELDS_LTOH) 5688 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5689 #else 5690 rall.rem_port_id = s_id; 5691 #endif 5692 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rall, 5693 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5694 sizeof (rall), DDI_DEV_AUTOINCR); 5695 5696 break; 5697 } 5698 5699 default: 5700 return (FC_FAILURE); 5701 } 5702 5703 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5704 5705 if (rval != FC_SUCCESS) { 5706 job->job_result = rval; 5707 fp_iodone(cmd); 5708 } 5709 5710 if (polled) { 5711 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5712 fp_jobwait(job); 5713 } else { 5714 rval = FC_SUCCESS; 5715 } 5716 5717 return (rval); 5718 } 5719 5720 5721 /* 5722 * Common interrupt handler 5723 */ 5724 static int 5725 fp_common_intr(fc_packet_t *pkt, int iodone) 5726 { 5727 int rval = FC_FAILURE; 5728 fp_cmd_t *cmd; 5729 fc_local_port_t *port; 5730 5731 cmd = pkt->pkt_ulp_private; 5732 port = cmd->cmd_port; 5733 5734 /* 5735 * Fail fast the upper layer requests if 5736 * a state change has occurred amidst. 5737 */ 5738 mutex_enter(&port->fp_mutex); 5739 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5740 mutex_exit(&port->fp_mutex); 5741 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5742 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5743 } else if (!(port->fp_soft_state & 5744 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5745 mutex_exit(&port->fp_mutex); 5746 5747 switch (pkt->pkt_state) { 5748 case FC_PKT_LOCAL_BSY: 5749 case FC_PKT_FABRIC_BSY: 5750 case FC_PKT_NPORT_BSY: 5751 case FC_PKT_TIMEOUT: 5752 cmd->cmd_retry_interval = (pkt->pkt_state == 5753 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5754 rval = fp_retry_cmd(pkt); 5755 break; 5756 5757 case FC_PKT_FABRIC_RJT: 5758 case FC_PKT_NPORT_RJT: 5759 case FC_PKT_LOCAL_RJT: 5760 case FC_PKT_LS_RJT: 5761 case FC_PKT_FS_RJT: 5762 case FC_PKT_BA_RJT: 5763 rval = fp_handle_reject(pkt); 5764 break; 5765 5766 default: 5767 if (pkt->pkt_resp_resid) { 5768 cmd->cmd_retry_interval = 0; 5769 rval = fp_retry_cmd(pkt); 5770 } 5771 break; 5772 } 5773 } else { 5774 mutex_exit(&port->fp_mutex); 5775 } 5776 5777 if (rval != FC_SUCCESS && iodone) { 5778 fp_iodone(cmd); 5779 rval = FC_SUCCESS; 5780 } 5781 5782 return (rval); 5783 } 5784 5785 5786 /* 5787 * Some not so long winding theory on point to point topology: 5788 * 5789 * In the ACC payload, if the D_ID is ZERO and the common service 5790 * parameters indicate N_Port, then the topology is POINT TO POINT. 5791 * 5792 * In a point to point topology with an N_Port, during Fabric Login, 5793 * the destination N_Port will check with our WWN and decide if it 5794 * needs to issue PLOGI or not. That means, FLOGI could potentially 5795 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5796 * PLOGI creates the device handles. 5797 * 5798 * Assuming that the host port WWN is greater than the other N_Port 5799 * WWN, then we become the master (be aware that this isn't the word 5800 * used in the FC standards) and initiate the PLOGI. 5801 * 5802 */ 5803 static void 5804 fp_flogi_intr(fc_packet_t *pkt) 5805 { 5806 int state; 5807 int f_port; 5808 uint32_t s_id; 5809 uint32_t d_id; 5810 fp_cmd_t *cmd; 5811 fc_local_port_t *port; 5812 la_wwn_t *swwn; 5813 la_wwn_t dwwn; 5814 la_wwn_t nwwn; 5815 fc_remote_port_t *pd; 5816 la_els_logi_t *acc; 5817 com_svc_t csp; 5818 ls_code_t resp; 5819 5820 cmd = pkt->pkt_ulp_private; 5821 port = cmd->cmd_port; 5822 5823 mutex_enter(&port->fp_mutex); 5824 port->fp_out_fpcmds--; 5825 mutex_exit(&port->fp_mutex); 5826 5827 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5828 port, pkt, pkt->pkt_state); 5829 5830 if (FP_IS_PKT_ERROR(pkt)) { 5831 (void) fp_common_intr(pkt, 1); 5832 return; 5833 } 5834 5835 /* 5836 * Currently, we don't need to swap bytes here because qlc is faking the 5837 * response for us and so endianness is getting taken care of. But we 5838 * have to fix this and generalize this at some point 5839 */ 5840 acc = (la_els_logi_t *)pkt->pkt_resp; 5841 5842 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5843 sizeof (resp), DDI_DEV_AUTOINCR); 5844 5845 ASSERT(resp.ls_code == LA_ELS_ACC); 5846 if (resp.ls_code != LA_ELS_ACC) { 5847 (void) fp_common_intr(pkt, 1); 5848 return; 5849 } 5850 5851 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&csp, 5852 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5853 5854 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5855 5856 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5857 5858 mutex_enter(&port->fp_mutex); 5859 state = FC_PORT_STATE_MASK(port->fp_state); 5860 mutex_exit(&port->fp_mutex); 5861 5862 if (pkt->pkt_resp_fhdr.d_id == 0) { 5863 if (f_port == 0 && state != FC_STATE_LOOP) { 5864 swwn = &port->fp_service_params.nport_ww_name; 5865 5866 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5867 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5868 DDI_DEV_AUTOINCR); 5869 5870 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5871 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5872 DDI_DEV_AUTOINCR); 5873 5874 mutex_enter(&port->fp_mutex); 5875 5876 port->fp_topology = FC_TOP_PT_PT; 5877 port->fp_total_devices = 1; 5878 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5879 port->fp_ptpt_master = 1; 5880 /* 5881 * Let us choose 'X' as S_ID and 'Y' 5882 * as D_ID and that'll work; hopefully 5883 * If not, it will get changed. 5884 */ 5885 s_id = port->fp_instance + FP_DEFAULT_SID; 5886 d_id = port->fp_instance + FP_DEFAULT_DID; 5887 port->fp_port_id.port_id = s_id; 5888 mutex_exit(&port->fp_mutex); 5889 5890 pd = fctl_create_remote_port(port, 5891 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5892 KM_NOSLEEP); 5893 if (pd == NULL) { 5894 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5895 0, NULL, "couldn't create device" 5896 " d_id=%X", d_id); 5897 fp_iodone(cmd); 5898 return; 5899 } 5900 5901 cmd->cmd_pkt.pkt_tran_flags = 5902 pkt->pkt_tran_flags; 5903 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5904 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5905 cmd->cmd_retry_count = fp_retry_count; 5906 5907 fp_xlogi_init(port, cmd, s_id, d_id, 5908 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5909 5910 (&cmd->cmd_pkt)->pkt_pd = pd; 5911 5912 /* 5913 * We've just created this fc_remote_port_t, and 5914 * we're about to use it to send a PLOGI, so 5915 * bump the reference count right now. When 5916 * the packet is freed, the reference count will 5917 * be decremented. The ULP may also start using 5918 * it, so mark it as given away as well. 5919 */ 5920 pd->pd_ref_count++; 5921 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5922 5923 if (fp_sendcmd(port, cmd, 5924 port->fp_fca_handle) == FC_SUCCESS) { 5925 return; 5926 } 5927 } else { 5928 /* 5929 * The device handles will be created when the 5930 * unsolicited PLOGI is completed successfully 5931 */ 5932 port->fp_ptpt_master = 0; 5933 mutex_exit(&port->fp_mutex); 5934 } 5935 } 5936 pkt->pkt_state = FC_PKT_FAILURE; 5937 } else { 5938 if (f_port) { 5939 mutex_enter(&port->fp_mutex); 5940 if (state == FC_STATE_LOOP) { 5941 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5942 } else { 5943 port->fp_topology = FC_TOP_FABRIC; 5944 5945 ddi_rep_get8(pkt->pkt_resp_acc, 5946 (uint8_t *)&port->fp_fabric_name, 5947 (uint8_t *)&acc->node_ww_name, 5948 sizeof (la_wwn_t), 5949 DDI_DEV_AUTOINCR); 5950 } 5951 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5952 mutex_exit(&port->fp_mutex); 5953 } else { 5954 pkt->pkt_state = FC_PKT_FAILURE; 5955 } 5956 } 5957 fp_iodone(cmd); 5958 } 5959 5960 5961 /* 5962 * Handle solicited PLOGI response 5963 */ 5964 static void 5965 fp_plogi_intr(fc_packet_t *pkt) 5966 { 5967 int nl_port; 5968 int bailout; 5969 uint32_t d_id; 5970 fp_cmd_t *cmd; 5971 la_els_logi_t *acc; 5972 fc_local_port_t *port; 5973 fc_remote_port_t *pd; 5974 la_wwn_t nwwn; 5975 la_wwn_t pwwn; 5976 ls_code_t resp; 5977 5978 nl_port = 0; 5979 cmd = pkt->pkt_ulp_private; 5980 port = cmd->cmd_port; 5981 d_id = pkt->pkt_cmd_fhdr.d_id; 5982 5983 #ifndef __lock_lint 5984 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 5985 #endif 5986 5987 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 5988 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 5989 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 5990 5991 /* 5992 * Bail out early on ULP initiated requests if the 5993 * state change has occurred 5994 */ 5995 mutex_enter(&port->fp_mutex); 5996 port->fp_out_fpcmds--; 5997 bailout = ((port->fp_statec_busy || 5998 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 5999 cmd->cmd_ulp_pkt) ? 1 : 0; 6000 mutex_exit(&port->fp_mutex); 6001 6002 if (FP_IS_PKT_ERROR(pkt) || bailout) { 6003 int skip_msg = 0; 6004 int giveup = 0; 6005 6006 if (cmd->cmd_ulp_pkt) { 6007 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6008 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 6009 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6010 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6011 } 6012 6013 /* 6014 * If an unsolicited cross login already created 6015 * a device speed up the discovery by not retrying 6016 * the command mindlessly. 6017 */ 6018 if (pkt->pkt_pd == NULL && 6019 fctl_get_remote_port_by_did(port, d_id) != NULL) { 6020 fp_iodone(cmd); 6021 return; 6022 } 6023 6024 if (pkt->pkt_pd != NULL) { 6025 giveup = (pkt->pkt_pd->pd_recepient == 6026 PD_PLOGI_RECEPIENT) ? 1 : 0; 6027 if (giveup) { 6028 /* 6029 * This pd is marked as plogi 6030 * recipient, stop retrying 6031 */ 6032 FP_TRACE(FP_NHEAD1(3, 0), 6033 "fp_plogi_intr: stop retry as" 6034 " a cross login was accepted" 6035 " from d_id=%x, port=%p.", 6036 d_id, port); 6037 fp_iodone(cmd); 6038 return; 6039 } 6040 } 6041 6042 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6043 return; 6044 } 6045 6046 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 6047 mutex_enter(&pd->pd_mutex); 6048 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 6049 skip_msg++; 6050 } 6051 mutex_exit(&pd->pd_mutex); 6052 } 6053 6054 mutex_enter(&port->fp_mutex); 6055 if (!bailout && !(skip_msg && port->fp_statec_busy) && 6056 port->fp_statec_busy <= 1 && 6057 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 6058 mutex_exit(&port->fp_mutex); 6059 /* 6060 * In case of Login Collisions, JNI HBAs returns the 6061 * FC pkt back to the Initiator with the state set to 6062 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 6063 * QLC HBAs handles such cases in the FW and doesnot 6064 * return the LS_RJT with Logical error when 6065 * login collision happens. 6066 */ 6067 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 6068 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 6069 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6070 "PLOGI to %x failed", d_id); 6071 } 6072 FP_TRACE(FP_NHEAD2(9, 0), 6073 "PLOGI to %x failed. state=%x reason=%x.", 6074 d_id, pkt->pkt_state, pkt->pkt_reason); 6075 } else { 6076 mutex_exit(&port->fp_mutex); 6077 } 6078 6079 fp_iodone(cmd); 6080 return; 6081 } 6082 6083 acc = (la_els_logi_t *)pkt->pkt_resp; 6084 6085 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 6086 sizeof (resp), DDI_DEV_AUTOINCR); 6087 6088 ASSERT(resp.ls_code == LA_ELS_ACC); 6089 if (resp.ls_code != LA_ELS_ACC) { 6090 (void) fp_common_intr(pkt, 1); 6091 return; 6092 } 6093 6094 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6095 mutex_enter(&port->fp_mutex); 6096 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6097 mutex_exit(&port->fp_mutex); 6098 fp_iodone(cmd); 6099 return; 6100 } 6101 6102 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6103 6104 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6105 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6106 DDI_DEV_AUTOINCR); 6107 6108 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6109 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6110 DDI_DEV_AUTOINCR); 6111 6112 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6113 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6114 6115 if ((pd = pkt->pkt_pd) == NULL) { 6116 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6117 if (pd == NULL) { 6118 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6119 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6120 if (pd == NULL) { 6121 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6122 "couldn't create port device handles" 6123 " d_id=%x", d_id); 6124 fp_iodone(cmd); 6125 return; 6126 } 6127 } else { 6128 fc_remote_port_t *tmp_pd; 6129 6130 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6131 if (tmp_pd != NULL) { 6132 fp_iodone(cmd); 6133 return; 6134 } 6135 6136 mutex_enter(&port->fp_mutex); 6137 mutex_enter(&pd->pd_mutex); 6138 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6139 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6140 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6141 } 6142 6143 if (pd->pd_type == PORT_DEVICE_OLD) { 6144 if (pd->pd_port_id.port_id != d_id) { 6145 fctl_delist_did_table(port, pd); 6146 pd->pd_type = PORT_DEVICE_CHANGED; 6147 pd->pd_port_id.port_id = d_id; 6148 } else { 6149 pd->pd_type = PORT_DEVICE_NOCHANGE; 6150 } 6151 } 6152 6153 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6154 char ww_name[17]; 6155 6156 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6157 6158 mutex_exit(&pd->pd_mutex); 6159 mutex_exit(&port->fp_mutex); 6160 FP_TRACE(FP_NHEAD2(9, 0), 6161 "Possible Duplicate name or address" 6162 " identifiers in the PLOGI response" 6163 " D_ID=%x, PWWN=%s: Please check the" 6164 " configuration", d_id, ww_name); 6165 fp_iodone(cmd); 6166 return; 6167 } 6168 fctl_enlist_did_table(port, pd); 6169 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6170 mutex_exit(&pd->pd_mutex); 6171 mutex_exit(&port->fp_mutex); 6172 } 6173 } else { 6174 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6175 6176 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6177 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6178 6179 mutex_enter(&port->fp_mutex); 6180 mutex_enter(&pd->pd_mutex); 6181 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6182 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6183 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6184 pd->pd_type); 6185 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6186 pd->pd_type == PORT_DEVICE_OLD) || 6187 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6188 pd->pd_type = PORT_DEVICE_NOCHANGE; 6189 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6190 pd->pd_type = PORT_DEVICE_NEW; 6191 } 6192 } else { 6193 char old_name[17]; 6194 char new_name[17]; 6195 6196 fc_wwn_to_str(&pd->pd_port_name, old_name); 6197 fc_wwn_to_str(&pwwn, new_name); 6198 6199 FP_TRACE(FP_NHEAD1(9, 0), 6200 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6201 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6202 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6203 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6204 cmd->cmd_ulp_pkt, bailout); 6205 6206 FP_TRACE(FP_NHEAD2(9, 0), 6207 "PWWN of a device with D_ID=%x changed." 6208 " New PWWN = %s, OLD PWWN = %s", d_id, 6209 new_name, old_name); 6210 6211 if (cmd->cmd_ulp_pkt && !bailout) { 6212 fc_remote_node_t *rnodep; 6213 fc_portmap_t *changelist; 6214 fc_portmap_t *listptr; 6215 int len = 1; 6216 /* # entries in changelist */ 6217 6218 fctl_delist_pwwn_table(port, pd); 6219 6220 /* 6221 * Lets now check if there already is a pd with 6222 * this new WWN in the table. If so, we'll mark 6223 * it as invalid 6224 */ 6225 6226 if (new_wwn_pd) { 6227 /* 6228 * There is another pd with in the pwwn 6229 * table with the same WWN that we got 6230 * in the PLOGI payload. We have to get 6231 * it out of the pwwn table, update the 6232 * pd's state (fp_fillout_old_map does 6233 * this for us) and add it to the 6234 * changelist that goes up to ULPs. 6235 * 6236 * len is length of changelist and so 6237 * increment it. 6238 */ 6239 len++; 6240 6241 if (tmp_pd != pd) { 6242 /* 6243 * Odd case where pwwn and did 6244 * tables are out of sync but 6245 * we will handle that too. See 6246 * more comments below. 6247 * 6248 * One more device that ULPs 6249 * should know about and so len 6250 * gets incremented again. 6251 */ 6252 len++; 6253 } 6254 6255 listptr = changelist = kmem_zalloc(len * 6256 sizeof (*changelist), KM_SLEEP); 6257 6258 mutex_enter(&new_wwn_pd->pd_mutex); 6259 rnodep = new_wwn_pd->pd_remote_nodep; 6260 mutex_exit(&new_wwn_pd->pd_mutex); 6261 6262 /* 6263 * Hold the fd_mutex since 6264 * fctl_copy_portmap_held expects it. 6265 * Preserve lock hierarchy by grabbing 6266 * fd_mutex before pd_mutex 6267 */ 6268 if (rnodep) { 6269 mutex_enter(&rnodep->fd_mutex); 6270 } 6271 mutex_enter(&new_wwn_pd->pd_mutex); 6272 fp_fillout_old_map_held(listptr++, 6273 new_wwn_pd, 0); 6274 mutex_exit(&new_wwn_pd->pd_mutex); 6275 if (rnodep) { 6276 mutex_exit(&rnodep->fd_mutex); 6277 } 6278 6279 /* 6280 * Safety check : 6281 * Lets ensure that the pwwn and did 6282 * tables are in sync. Ideally, we 6283 * should not find that these two pd's 6284 * are different. 6285 */ 6286 if (tmp_pd != pd) { 6287 mutex_enter(&tmp_pd->pd_mutex); 6288 rnodep = 6289 tmp_pd->pd_remote_nodep; 6290 mutex_exit(&tmp_pd->pd_mutex); 6291 6292 /* As above grab fd_mutex */ 6293 if (rnodep) { 6294 mutex_enter(&rnodep-> 6295 fd_mutex); 6296 } 6297 mutex_enter(&tmp_pd->pd_mutex); 6298 6299 fp_fillout_old_map_held( 6300 listptr++, tmp_pd, 0); 6301 6302 mutex_exit(&tmp_pd->pd_mutex); 6303 if (rnodep) { 6304 mutex_exit(&rnodep-> 6305 fd_mutex); 6306 } 6307 6308 /* 6309 * Now add "pd" (not tmp_pd) 6310 * to fp_did_table to sync it up 6311 * with fp_pwwn_table 6312 * 6313 * pd->pd_mutex is already held 6314 * at this point 6315 */ 6316 fctl_enlist_did_table(port, pd); 6317 } 6318 } else { 6319 listptr = changelist = kmem_zalloc( 6320 sizeof (*changelist), KM_SLEEP); 6321 } 6322 6323 ASSERT(changelist != NULL); 6324 6325 fp_fillout_changed_map(listptr, pd, &d_id, 6326 &pwwn); 6327 fctl_enlist_pwwn_table(port, pd); 6328 6329 mutex_exit(&pd->pd_mutex); 6330 mutex_exit(&port->fp_mutex); 6331 6332 fp_iodone(cmd); 6333 6334 (void) fp_ulp_devc_cb(port, changelist, len, 6335 len, KM_NOSLEEP, 0); 6336 6337 return; 6338 } 6339 } 6340 6341 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6342 nl_port = 1; 6343 } 6344 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6345 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6346 } 6347 6348 mutex_exit(&pd->pd_mutex); 6349 mutex_exit(&port->fp_mutex); 6350 6351 if (tmp_pd == NULL) { 6352 mutex_enter(&port->fp_mutex); 6353 mutex_enter(&pd->pd_mutex); 6354 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6355 char ww_name[17]; 6356 6357 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6358 mutex_exit(&pd->pd_mutex); 6359 mutex_exit(&port->fp_mutex); 6360 FP_TRACE(FP_NHEAD2(9, 0), 6361 "Possible Duplicate name or address" 6362 " identifiers in the PLOGI response" 6363 " D_ID=%x, PWWN=%s: Please check the" 6364 " configuration", d_id, ww_name); 6365 fp_iodone(cmd); 6366 return; 6367 } 6368 fctl_enlist_did_table(port, pd); 6369 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6370 mutex_exit(&pd->pd_mutex); 6371 mutex_exit(&port->fp_mutex); 6372 } 6373 } 6374 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6375 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6376 6377 if (cmd->cmd_ulp_pkt) { 6378 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6379 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6380 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6381 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6382 if (pd != NULL) { 6383 FP_TRACE(FP_NHEAD1(9, 0), 6384 "fp_plogi_intr;" 6385 "ulp_pkt's pd is NULL, get a pd %p", 6386 pd); 6387 mutex_enter(&pd->pd_mutex); 6388 pd->pd_ref_count++; 6389 mutex_exit(&pd->pd_mutex); 6390 } 6391 cmd->cmd_ulp_pkt->pkt_pd = pd; 6392 } 6393 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6394 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6395 sizeof (fc_frame_hdr_t)); 6396 bcopy((caddr_t)pkt->pkt_resp, 6397 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6398 sizeof (la_els_logi_t)); 6399 } 6400 6401 mutex_enter(&port->fp_mutex); 6402 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6403 mutex_enter(&pd->pd_mutex); 6404 6405 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6406 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6407 cmd->cmd_retry_count = fp_retry_count; 6408 6409 /* 6410 * If the fc_remote_port_t pointer is not set in the given 6411 * fc_packet_t, then this fc_remote_port_t must have just 6412 * been created. Save the pointer and also increment the 6413 * fc_remote_port_t reference count. 6414 */ 6415 if (pkt->pkt_pd == NULL) { 6416 pkt->pkt_pd = pd; 6417 pd->pd_ref_count++; /* It's in use! */ 6418 } 6419 6420 fp_adisc_init(cmd, cmd->cmd_job); 6421 6422 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6423 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6424 6425 mutex_exit(&pd->pd_mutex); 6426 mutex_exit(&port->fp_mutex); 6427 6428 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6429 return; 6430 } 6431 } else { 6432 mutex_exit(&port->fp_mutex); 6433 } 6434 6435 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6436 mutex_enter(&port->fp_mutex); 6437 mutex_enter(&pd->pd_mutex); 6438 6439 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6440 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6441 cmd->cmd_retry_count = fp_retry_count; 6442 6443 fp_logo_init(pd, cmd, cmd->cmd_job); 6444 6445 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6446 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6447 6448 mutex_exit(&pd->pd_mutex); 6449 mutex_exit(&port->fp_mutex); 6450 6451 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6452 return; 6453 } 6454 6455 } 6456 fp_iodone(cmd); 6457 } 6458 6459 6460 /* 6461 * Handle solicited ADISC response 6462 */ 6463 static void 6464 fp_adisc_intr(fc_packet_t *pkt) 6465 { 6466 int rval; 6467 int bailout; 6468 fp_cmd_t *cmd; 6469 fc_local_port_t *port; 6470 fc_remote_port_t *pd; 6471 la_els_adisc_t *acc; 6472 ls_code_t resp; 6473 fc_hardaddr_t ha; 6474 fc_portmap_t *changelist; 6475 int initiator, adiscfail = 0; 6476 6477 pd = pkt->pkt_pd; 6478 cmd = pkt->pkt_ulp_private; 6479 port = cmd->cmd_port; 6480 6481 #ifndef __lock_lint 6482 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6483 #endif 6484 6485 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6486 6487 mutex_enter(&port->fp_mutex); 6488 port->fp_out_fpcmds--; 6489 bailout = ((port->fp_statec_busy || 6490 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6491 cmd->cmd_ulp_pkt) ? 1 : 0; 6492 mutex_exit(&port->fp_mutex); 6493 6494 if (bailout) { 6495 fp_iodone(cmd); 6496 return; 6497 } 6498 6499 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6500 acc = (la_els_adisc_t *)pkt->pkt_resp; 6501 6502 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6503 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6504 6505 if (resp.ls_code == LA_ELS_ACC) { 6506 int is_private; 6507 6508 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&ha, 6509 (uint8_t *)&acc->hard_addr, sizeof (ha), 6510 DDI_DEV_AUTOINCR); 6511 6512 mutex_enter(&port->fp_mutex); 6513 6514 is_private = 6515 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6516 6517 mutex_enter(&pd->pd_mutex); 6518 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6519 fctl_enlist_did_table(port, pd); 6520 } 6521 mutex_exit(&pd->pd_mutex); 6522 6523 mutex_exit(&port->fp_mutex); 6524 6525 mutex_enter(&pd->pd_mutex); 6526 if (pd->pd_type != PORT_DEVICE_NEW) { 6527 if (is_private && (pd->pd_hard_addr.hard_addr != 6528 ha.hard_addr)) { 6529 pd->pd_type = PORT_DEVICE_CHANGED; 6530 } else { 6531 pd->pd_type = PORT_DEVICE_NOCHANGE; 6532 } 6533 } 6534 6535 if (is_private && (ha.hard_addr && 6536 pd->pd_port_id.port_id != ha.hard_addr)) { 6537 char ww_name[17]; 6538 6539 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6540 6541 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6542 "NL_Port Identifier %x doesn't match" 6543 " with Hard Address %x, Will use Port" 6544 " WWN %s", pd->pd_port_id.port_id, 6545 ha.hard_addr, ww_name); 6546 6547 pd->pd_hard_addr.hard_addr = 0; 6548 } else { 6549 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6550 } 6551 mutex_exit(&pd->pd_mutex); 6552 } else { 6553 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6554 return; 6555 } 6556 } 6557 } else { 6558 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6559 return; 6560 } 6561 6562 mutex_enter(&port->fp_mutex); 6563 if (port->fp_statec_busy <= 1) { 6564 mutex_exit(&port->fp_mutex); 6565 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6566 "ADISC to %x failed, cmd_flags=%x", 6567 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6568 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6569 adiscfail = 1; 6570 } else { 6571 mutex_exit(&port->fp_mutex); 6572 } 6573 } 6574 6575 if (cmd->cmd_ulp_pkt) { 6576 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6577 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6578 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6579 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6580 cmd->cmd_ulp_pkt->pkt_pd = pd; 6581 FP_TRACE(FP_NHEAD1(9, 0), 6582 "fp_adisc__intr;" 6583 "ulp_pkt's pd is NULL, get a pd %p", 6584 pd); 6585 6586 } 6587 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6588 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6589 sizeof (fc_frame_hdr_t)); 6590 bcopy((caddr_t)pkt->pkt_resp, 6591 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6592 sizeof (la_els_logi_t)); 6593 } 6594 6595 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6596 FP_TRACE(FP_NHEAD1(9, 0), 6597 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6598 "fp_retry_count=%x, ulp_pkt=%p", 6599 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6600 6601 mutex_enter(&port->fp_mutex); 6602 mutex_enter(&pd->pd_mutex); 6603 6604 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6605 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6606 cmd->cmd_retry_count = fp_retry_count; 6607 6608 fp_logo_init(pd, cmd, cmd->cmd_job); 6609 6610 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6611 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6612 6613 mutex_exit(&pd->pd_mutex); 6614 mutex_exit(&port->fp_mutex); 6615 6616 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6617 if (adiscfail) { 6618 mutex_enter(&pd->pd_mutex); 6619 initiator = 6620 (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 6621 pd->pd_state = PORT_DEVICE_VALID; 6622 pd->pd_aux_flags |= PD_LOGGED_OUT; 6623 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6624 pd->pd_type = PORT_DEVICE_NEW; 6625 } else { 6626 pd->pd_type = PORT_DEVICE_NOCHANGE; 6627 } 6628 mutex_exit(&pd->pd_mutex); 6629 6630 changelist = 6631 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6632 6633 if (initiator) { 6634 fp_unregister_login(pd); 6635 fctl_copy_portmap(changelist, pd); 6636 } else { 6637 fp_fillout_old_map(changelist, pd, 0); 6638 } 6639 6640 FP_TRACE(FP_NHEAD1(9, 0), 6641 "fp_adisc_intr: Dev change notification " 6642 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6643 "map_flags=%x initiator=%d", port, pd, 6644 changelist->map_type, changelist->map_state, 6645 changelist->map_flags, initiator); 6646 6647 (void) fp_ulp_devc_cb(port, changelist, 6648 1, 1, KM_SLEEP, 0); 6649 } 6650 if (rval == FC_SUCCESS) { 6651 return; 6652 } 6653 } 6654 fp_iodone(cmd); 6655 } 6656 6657 6658 /* 6659 * Handle solicited LOGO response 6660 */ 6661 static void 6662 fp_logo_intr(fc_packet_t *pkt) 6663 { 6664 ls_code_t resp; 6665 6666 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6667 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6668 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6669 6670 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6671 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6672 6673 if (FP_IS_PKT_ERROR(pkt)) { 6674 (void) fp_common_intr(pkt, 1); 6675 return; 6676 } 6677 6678 ASSERT(resp.ls_code == LA_ELS_ACC); 6679 if (resp.ls_code != LA_ELS_ACC) { 6680 (void) fp_common_intr(pkt, 1); 6681 return; 6682 } 6683 6684 if (pkt->pkt_pd != NULL) { 6685 fp_unregister_login(pkt->pkt_pd); 6686 } 6687 6688 fp_iodone(pkt->pkt_ulp_private); 6689 } 6690 6691 6692 /* 6693 * Handle solicited RNID response 6694 */ 6695 static void 6696 fp_rnid_intr(fc_packet_t *pkt) 6697 { 6698 ls_code_t resp; 6699 job_request_t *job; 6700 fp_cmd_t *cmd; 6701 la_els_rnid_acc_t *acc; 6702 6703 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6704 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6705 cmd = pkt->pkt_ulp_private; 6706 6707 mutex_enter(&cmd->cmd_port->fp_mutex); 6708 cmd->cmd_port->fp_out_fpcmds--; 6709 mutex_exit(&cmd->cmd_port->fp_mutex); 6710 6711 job = cmd->cmd_job; 6712 ASSERT(job->job_private != NULL); 6713 6714 /* If failure or LS_RJT then retry the packet, if needed */ 6715 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6716 (void) fp_common_intr(pkt, 1); 6717 return; 6718 } 6719 6720 /* Save node_id memory allocated in ioctl code */ 6721 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6722 6723 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6724 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6725 6726 /* wakeup the ioctl thread and free the pkt */ 6727 fp_iodone(cmd); 6728 } 6729 6730 6731 /* 6732 * Handle solicited RLS response 6733 */ 6734 static void 6735 fp_rls_intr(fc_packet_t *pkt) 6736 { 6737 ls_code_t resp; 6738 job_request_t *job; 6739 fp_cmd_t *cmd; 6740 la_els_rls_acc_t *acc; 6741 6742 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6743 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6744 cmd = pkt->pkt_ulp_private; 6745 6746 mutex_enter(&cmd->cmd_port->fp_mutex); 6747 cmd->cmd_port->fp_out_fpcmds--; 6748 mutex_exit(&cmd->cmd_port->fp_mutex); 6749 6750 job = cmd->cmd_job; 6751 ASSERT(job->job_private != NULL); 6752 6753 /* If failure or LS_RJT then retry the packet, if needed */ 6754 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6755 (void) fp_common_intr(pkt, 1); 6756 return; 6757 } 6758 6759 /* Save link error status block in memory allocated in ioctl code */ 6760 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6761 6762 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6763 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6764 DDI_DEV_AUTOINCR); 6765 6766 /* wakeup the ioctl thread and free the pkt */ 6767 fp_iodone(cmd); 6768 } 6769 6770 6771 /* 6772 * A solicited command completion interrupt (mostly for commands 6773 * that require almost no post processing such as SCR ELS) 6774 */ 6775 static void 6776 fp_intr(fc_packet_t *pkt) 6777 { 6778 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6779 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6780 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6781 6782 if (FP_IS_PKT_ERROR(pkt)) { 6783 (void) fp_common_intr(pkt, 1); 6784 return; 6785 } 6786 fp_iodone(pkt->pkt_ulp_private); 6787 } 6788 6789 6790 /* 6791 * Handle the underlying port's state change 6792 */ 6793 static void 6794 fp_statec_cb(opaque_t port_handle, uint32_t state) 6795 { 6796 fc_local_port_t *port = port_handle; 6797 job_request_t *job; 6798 6799 /* 6800 * If it is not possible to process the callbacks 6801 * just drop the callback on the floor; Don't bother 6802 * to do something that isn't safe at this time 6803 */ 6804 mutex_enter(&port->fp_mutex); 6805 if ((port->fp_soft_state & 6806 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6807 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6808 mutex_exit(&port->fp_mutex); 6809 return; 6810 } 6811 6812 if (port->fp_statec_busy == 0) { 6813 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6814 #ifdef DEBUG 6815 } else { 6816 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6817 #endif 6818 } 6819 6820 port->fp_statec_busy++; 6821 6822 /* 6823 * For now, force the trusted method of device authentication (by 6824 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6825 */ 6826 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6827 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6828 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6829 fp_port_offline(port, 0); 6830 } 6831 mutex_exit(&port->fp_mutex); 6832 6833 switch (FC_PORT_STATE_MASK(state)) { 6834 case FC_STATE_OFFLINE: 6835 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6836 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6837 if (job == NULL) { 6838 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6839 " fp_statec_cb() couldn't submit a job " 6840 " to the thread: failing.."); 6841 mutex_enter(&port->fp_mutex); 6842 if (--port->fp_statec_busy == 0) { 6843 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6844 } 6845 mutex_exit(&port->fp_mutex); 6846 return; 6847 } 6848 mutex_enter(&port->fp_mutex); 6849 /* 6850 * Zero out this field so that we do not retain 6851 * the fabric name as its no longer valid 6852 */ 6853 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6854 port->fp_state = state; 6855 mutex_exit(&port->fp_mutex); 6856 6857 fctl_enque_job(port, job); 6858 break; 6859 6860 case FC_STATE_ONLINE: 6861 case FC_STATE_LOOP: 6862 mutex_enter(&port->fp_mutex); 6863 port->fp_state = state; 6864 6865 if (port->fp_offline_tid) { 6866 timeout_id_t tid; 6867 6868 tid = port->fp_offline_tid; 6869 port->fp_offline_tid = NULL; 6870 mutex_exit(&port->fp_mutex); 6871 (void) untimeout(tid); 6872 } else { 6873 mutex_exit(&port->fp_mutex); 6874 } 6875 6876 job = fctl_alloc_job(JOB_PORT_ONLINE, 6877 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6878 if (job == NULL) { 6879 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6880 "fp_statec_cb() couldn't submit a job " 6881 "to the thread: failing.."); 6882 6883 mutex_enter(&port->fp_mutex); 6884 if (--port->fp_statec_busy == 0) { 6885 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6886 } 6887 mutex_exit(&port->fp_mutex); 6888 return; 6889 } 6890 fctl_enque_job(port, job); 6891 break; 6892 6893 case FC_STATE_RESET_REQUESTED: 6894 mutex_enter(&port->fp_mutex); 6895 port->fp_state = FC_STATE_OFFLINE; 6896 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 6897 mutex_exit(&port->fp_mutex); 6898 /* FALLTHROUGH */ 6899 6900 case FC_STATE_RESET: 6901 job = fctl_alloc_job(JOB_ULP_NOTIFY, 6902 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6903 if (job == NULL) { 6904 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6905 "fp_statec_cb() couldn't submit a job" 6906 " to the thread: failing.."); 6907 6908 mutex_enter(&port->fp_mutex); 6909 if (--port->fp_statec_busy == 0) { 6910 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6911 } 6912 mutex_exit(&port->fp_mutex); 6913 return; 6914 } 6915 6916 /* squeeze into some field in the job structure */ 6917 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 6918 fctl_enque_job(port, job); 6919 break; 6920 6921 case FC_STATE_TARGET_PORT_RESET: 6922 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 6923 /* FALLTHROUGH */ 6924 6925 case FC_STATE_NAMESERVICE: 6926 /* FALLTHROUGH */ 6927 6928 default: 6929 mutex_enter(&port->fp_mutex); 6930 if (--port->fp_statec_busy == 0) { 6931 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6932 } 6933 mutex_exit(&port->fp_mutex); 6934 break; 6935 } 6936 } 6937 6938 6939 /* 6940 * Register with the Name Server for RSCNs 6941 */ 6942 static int 6943 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 6944 int sleep) 6945 { 6946 uint32_t s_id; 6947 uchar_t class; 6948 fc_scr_req_t payload; 6949 fp_cmd_t *cmd; 6950 fc_packet_t *pkt; 6951 6952 mutex_enter(&port->fp_mutex); 6953 s_id = port->fp_port_id.port_id; 6954 class = port->fp_ns_login_class; 6955 mutex_exit(&port->fp_mutex); 6956 6957 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 6958 sizeof (fc_scr_resp_t), sleep, NULL); 6959 if (cmd == NULL) { 6960 return (FC_NOMEM); 6961 } 6962 6963 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 6964 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6965 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 6966 cmd->cmd_retry_count = fp_retry_count; 6967 cmd->cmd_ulp_pkt = NULL; 6968 6969 pkt = &cmd->cmd_pkt; 6970 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 6971 6972 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 6973 6974 payload.ls_code.ls_code = LA_ELS_SCR; 6975 payload.ls_code.mbz = 0; 6976 payload.scr_rsvd = 0; 6977 payload.scr_func = scr_func; 6978 6979 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 6980 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 6981 6982 job->job_counter = 1; 6983 6984 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 6985 fp_iodone(cmd); 6986 } 6987 6988 return (FC_SUCCESS); 6989 } 6990 6991 6992 /* 6993 * There are basically two methods to determine the total number of 6994 * devices out in the NS database; Reading the details of the two 6995 * methods described below, it shouldn't be hard to identify which 6996 * of the two methods is better. 6997 * 6998 * Method 1. 6999 * Iteratively issue GANs until all ports identifiers are walked 7000 * 7001 * Method 2. 7002 * Issue GID_PT (get port Identifiers) with Maximum residual 7003 * field in the request CT HEADER set to accommodate only the 7004 * CT HEADER in the response frame. And if FC-GS2 has been 7005 * carefully read, the NS here has a chance to FS_ACC the 7006 * request and indicate the residual size in the FS_ACC. 7007 * 7008 * Method 2 is wonderful, although it's not mandatory for the NS 7009 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 7010 * (note with particular care the use of the auxiliary verb 'may') 7011 * 7012 */ 7013 static int 7014 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 7015 int sleep) 7016 { 7017 int flags; 7018 int rval; 7019 uint32_t src_id; 7020 fctl_ns_req_t *ns_cmd; 7021 7022 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 7023 7024 mutex_enter(&port->fp_mutex); 7025 src_id = port->fp_port_id.port_id; 7026 mutex_exit(&port->fp_mutex); 7027 7028 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7029 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 7030 sizeof (ns_resp_gid_pt_t), 0, 7031 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 7032 7033 if (ns_cmd == NULL) { 7034 return (FC_NOMEM); 7035 } 7036 7037 ns_cmd->ns_cmd_code = NS_GID_PT; 7038 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 7039 = FC_NS_PORT_NX; /* All port types */ 7040 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 7041 7042 } else { 7043 uint32_t ns_flags; 7044 7045 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 7046 if (create) { 7047 ns_flags |= FCTL_NS_CREATE_DEVICE; 7048 } 7049 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 7050 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 7051 7052 if (ns_cmd == NULL) { 7053 return (FC_NOMEM); 7054 } 7055 ns_cmd->ns_gan_index = 0; 7056 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 7057 ns_cmd->ns_cmd_code = NS_GA_NXT; 7058 ns_cmd->ns_gan_max = 0xFFFF; 7059 7060 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 7061 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 7062 } 7063 7064 flags = job->job_flags; 7065 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 7066 job->job_counter = 1; 7067 7068 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 7069 job->job_flags = flags; 7070 7071 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7072 uint16_t max_resid; 7073 7074 /* 7075 * Revert to scanning the NS if NS_GID_PT isn't 7076 * helping us figure out total number of devices. 7077 */ 7078 if (job->job_result != FC_SUCCESS || 7079 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 7080 mutex_enter(&port->fp_mutex); 7081 port->fp_options &= ~FP_NS_SMART_COUNT; 7082 mutex_exit(&port->fp_mutex); 7083 7084 fctl_free_ns_cmd(ns_cmd); 7085 return (fp_ns_get_devcount(port, job, create, sleep)); 7086 } 7087 7088 mutex_enter(&port->fp_mutex); 7089 port->fp_total_devices = 1; 7090 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 7091 if (max_resid) { 7092 /* 7093 * Since port identifier is 4 bytes and max_resid 7094 * is also in WORDS, max_resid simply indicates 7095 * the total number of port identifiers not 7096 * transferred 7097 */ 7098 port->fp_total_devices += max_resid; 7099 } 7100 mutex_exit(&port->fp_mutex); 7101 } 7102 mutex_enter(&port->fp_mutex); 7103 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 7104 mutex_exit(&port->fp_mutex); 7105 fctl_free_ns_cmd(ns_cmd); 7106 7107 return (rval); 7108 } 7109 7110 /* 7111 * One heck of a function to serve userland. 7112 */ 7113 static int 7114 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7115 { 7116 int rval = 0; 7117 int jcode; 7118 uint32_t ret; 7119 uchar_t open_flag; 7120 fcio_t *kfcio; 7121 job_request_t *job; 7122 boolean_t use32 = B_FALSE; 7123 7124 #ifdef _MULTI_DATAMODEL 7125 switch (ddi_model_convert_from(mode & FMODELS)) { 7126 case DDI_MODEL_ILP32: 7127 use32 = B_TRUE; 7128 break; 7129 7130 case DDI_MODEL_NONE: 7131 default: 7132 break; 7133 } 7134 #endif 7135 7136 mutex_enter(&port->fp_mutex); 7137 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7138 FP_SOFT_IN_UNSOL_CB)) { 7139 fcio->fcio_errno = FC_STATEC_BUSY; 7140 mutex_exit(&port->fp_mutex); 7141 rval = EAGAIN; 7142 if (fp_fcio_copyout(fcio, data, mode)) { 7143 rval = EFAULT; 7144 } 7145 return (rval); 7146 } 7147 open_flag = port->fp_flag; 7148 mutex_exit(&port->fp_mutex); 7149 7150 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7151 fcio->fcio_errno = FC_FAILURE; 7152 rval = EACCES; 7153 if (fp_fcio_copyout(fcio, data, mode)) { 7154 rval = EFAULT; 7155 } 7156 return (rval); 7157 } 7158 7159 /* 7160 * If an exclusive open was demanded during open, don't let 7161 * either innocuous or devil threads to share the file 7162 * descriptor and fire down exclusive access commands 7163 */ 7164 mutex_enter(&port->fp_mutex); 7165 if (port->fp_flag & FP_EXCL) { 7166 if (port->fp_flag & FP_EXCL_BUSY) { 7167 mutex_exit(&port->fp_mutex); 7168 fcio->fcio_errno = FC_FAILURE; 7169 return (EBUSY); 7170 } 7171 port->fp_flag |= FP_EXCL_BUSY; 7172 } 7173 mutex_exit(&port->fp_mutex); 7174 7175 switch (fcio->fcio_cmd) { 7176 case FCIO_GET_HOST_PARAMS: { 7177 fc_port_dev_t *val; 7178 fc_port_dev32_t *val32; 7179 int index; 7180 int lilp_device_count; 7181 fc_lilpmap_t *lilp_map; 7182 uchar_t *alpa_list; 7183 7184 if (use32 == B_TRUE) { 7185 if (fcio->fcio_olen != sizeof (*val32) || 7186 fcio->fcio_xfer != FCIO_XFER_READ) { 7187 rval = EINVAL; 7188 break; 7189 } 7190 } else { 7191 if (fcio->fcio_olen != sizeof (*val) || 7192 fcio->fcio_xfer != FCIO_XFER_READ) { 7193 rval = EINVAL; 7194 break; 7195 } 7196 } 7197 7198 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7199 7200 mutex_enter(&port->fp_mutex); 7201 val->dev_did = port->fp_port_id; 7202 val->dev_hard_addr = port->fp_hard_addr; 7203 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7204 val->dev_nwwn = port->fp_service_params.node_ww_name; 7205 val->dev_state = port->fp_state; 7206 7207 lilp_map = &port->fp_lilp_map; 7208 alpa_list = &lilp_map->lilp_alpalist[0]; 7209 lilp_device_count = lilp_map->lilp_length; 7210 for (index = 0; index < lilp_device_count; index++) { 7211 uint32_t d_id; 7212 7213 d_id = alpa_list[index]; 7214 if (d_id == port->fp_port_id.port_id) { 7215 break; 7216 } 7217 } 7218 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7219 7220 bcopy(port->fp_fc4_types, val->dev_type, 7221 sizeof (port->fp_fc4_types)); 7222 mutex_exit(&port->fp_mutex); 7223 7224 if (use32 == B_TRUE) { 7225 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7226 7227 val32->dev_did = val->dev_did; 7228 val32->dev_hard_addr = val->dev_hard_addr; 7229 val32->dev_pwwn = val->dev_pwwn; 7230 val32->dev_nwwn = val->dev_nwwn; 7231 val32->dev_state = val->dev_state; 7232 val32->dev_did.priv_lilp_posit = 7233 val->dev_did.priv_lilp_posit; 7234 7235 bcopy(val->dev_type, val32->dev_type, 7236 sizeof (port->fp_fc4_types)); 7237 7238 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7239 fcio->fcio_olen, mode) == 0) { 7240 if (fp_fcio_copyout(fcio, data, mode)) { 7241 rval = EFAULT; 7242 } 7243 } else { 7244 rval = EFAULT; 7245 } 7246 7247 kmem_free(val32, sizeof (*val32)); 7248 } else { 7249 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7250 fcio->fcio_olen, mode) == 0) { 7251 if (fp_fcio_copyout(fcio, data, mode)) { 7252 rval = EFAULT; 7253 } 7254 } else { 7255 rval = EFAULT; 7256 } 7257 } 7258 7259 /* need to free "val" here */ 7260 kmem_free(val, sizeof (*val)); 7261 break; 7262 } 7263 7264 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7265 uint32_t index; 7266 char *tmpPath; 7267 fc_local_port_t *tmpPort; 7268 7269 if (fcio->fcio_olen < MAXPATHLEN || 7270 fcio->fcio_ilen != sizeof (uint32_t)) { 7271 rval = EINVAL; 7272 break; 7273 } 7274 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7275 rval = EFAULT; 7276 break; 7277 } 7278 7279 tmpPort = fctl_get_adapter_port_by_index(port, index); 7280 if (tmpPort == NULL) { 7281 FP_TRACE(FP_NHEAD1(9, 0), 7282 "User supplied index out of range"); 7283 fcio->fcio_errno = FC_BADPORT; 7284 rval = EFAULT; 7285 if (fp_fcio_copyout(fcio, data, mode)) { 7286 rval = EFAULT; 7287 } 7288 break; 7289 } 7290 7291 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7292 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7293 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7294 MAXPATHLEN, mode) == 0) { 7295 if (fp_fcio_copyout(fcio, data, mode)) { 7296 rval = EFAULT; 7297 } 7298 } else { 7299 rval = EFAULT; 7300 } 7301 kmem_free(tmpPath, MAXPATHLEN); 7302 break; 7303 } 7304 7305 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7306 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7307 fc_hba_adapter_attributes_t *val; 7308 fc_hba_adapter_attributes32_t *val32; 7309 7310 if (use32 == B_TRUE) { 7311 if (fcio->fcio_olen < sizeof (*val32) || 7312 fcio->fcio_xfer != FCIO_XFER_READ) { 7313 rval = EINVAL; 7314 break; 7315 } 7316 } else { 7317 if (fcio->fcio_olen < sizeof (*val) || 7318 fcio->fcio_xfer != FCIO_XFER_READ) { 7319 rval = EINVAL; 7320 break; 7321 } 7322 } 7323 7324 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7325 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7326 mutex_enter(&port->fp_mutex); 7327 bcopy(port->fp_hba_port_attrs.manufacturer, 7328 val->Manufacturer, 7329 sizeof (val->Manufacturer)); 7330 bcopy(port->fp_hba_port_attrs.serial_number, 7331 val->SerialNumber, 7332 sizeof (val->SerialNumber)); 7333 bcopy(port->fp_hba_port_attrs.model, 7334 val->Model, 7335 sizeof (val->Model)); 7336 bcopy(port->fp_hba_port_attrs.model_description, 7337 val->ModelDescription, 7338 sizeof (val->ModelDescription)); 7339 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7340 sizeof (val->NodeSymbolicName)); 7341 bcopy(port->fp_hba_port_attrs.hardware_version, 7342 val->HardwareVersion, 7343 sizeof (val->HardwareVersion)); 7344 bcopy(port->fp_hba_port_attrs.option_rom_version, 7345 val->OptionROMVersion, 7346 sizeof (val->OptionROMVersion)); 7347 bcopy(port->fp_hba_port_attrs.firmware_version, 7348 val->FirmwareVersion, 7349 sizeof (val->FirmwareVersion)); 7350 val->VendorSpecificID = 7351 port->fp_hba_port_attrs.vendor_specific_id; 7352 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7353 &val->NodeWWN.raw_wwn, 7354 sizeof (val->NodeWWN.raw_wwn)); 7355 7356 7357 bcopy(port->fp_hba_port_attrs.driver_name, 7358 val->DriverName, 7359 sizeof (val->DriverName)); 7360 bcopy(port->fp_hba_port_attrs.driver_version, 7361 val->DriverVersion, 7362 sizeof (val->DriverVersion)); 7363 mutex_exit(&port->fp_mutex); 7364 7365 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7366 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7367 } else { 7368 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7369 } 7370 7371 if (use32 == B_TRUE) { 7372 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7373 val32->version = val->version; 7374 bcopy(val->Manufacturer, val32->Manufacturer, 7375 sizeof (val->Manufacturer)); 7376 bcopy(val->SerialNumber, val32->SerialNumber, 7377 sizeof (val->SerialNumber)); 7378 bcopy(val->Model, val32->Model, 7379 sizeof (val->Model)); 7380 bcopy(val->ModelDescription, val32->ModelDescription, 7381 sizeof (val->ModelDescription)); 7382 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7383 sizeof (val->NodeSymbolicName)); 7384 bcopy(val->HardwareVersion, val32->HardwareVersion, 7385 sizeof (val->HardwareVersion)); 7386 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7387 sizeof (val->OptionROMVersion)); 7388 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7389 sizeof (val->FirmwareVersion)); 7390 val32->VendorSpecificID = val->VendorSpecificID; 7391 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7392 sizeof (val->NodeWWN.raw_wwn)); 7393 bcopy(val->DriverName, val32->DriverName, 7394 sizeof (val->DriverName)); 7395 bcopy(val->DriverVersion, val32->DriverVersion, 7396 sizeof (val->DriverVersion)); 7397 7398 val32->NumberOfPorts = val->NumberOfPorts; 7399 7400 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7401 fcio->fcio_olen, mode) == 0) { 7402 if (fp_fcio_copyout(fcio, data, mode)) { 7403 rval = EFAULT; 7404 } 7405 } else { 7406 rval = EFAULT; 7407 } 7408 7409 kmem_free(val32, sizeof (*val32)); 7410 } else { 7411 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7412 fcio->fcio_olen, mode) == 0) { 7413 if (fp_fcio_copyout(fcio, data, mode)) { 7414 rval = EFAULT; 7415 } 7416 } else { 7417 rval = EFAULT; 7418 } 7419 } 7420 7421 kmem_free(val, sizeof (*val)); 7422 break; 7423 } 7424 7425 case FCIO_GET_NPIV_ATTRIBUTES: { 7426 fc_hba_npiv_attributes_t *attrs; 7427 7428 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7429 mutex_enter(&port->fp_mutex); 7430 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7431 &attrs->NodeWWN.raw_wwn, 7432 sizeof (attrs->NodeWWN.raw_wwn)); 7433 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7434 &attrs->PortWWN.raw_wwn, 7435 sizeof (attrs->PortWWN.raw_wwn)); 7436 mutex_exit(&port->fp_mutex); 7437 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7438 fcio->fcio_olen, mode) == 0) { 7439 if (fp_fcio_copyout(fcio, data, mode)) { 7440 rval = EFAULT; 7441 } 7442 } else { 7443 rval = EFAULT; 7444 } 7445 kmem_free(attrs, sizeof (*attrs)); 7446 break; 7447 } 7448 7449 case FCIO_DELETE_NPIV_PORT: { 7450 fc_local_port_t *tmpport; 7451 char ww_pname[17]; 7452 la_wwn_t vwwn[1]; 7453 7454 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7455 if (ddi_copyin(fcio->fcio_ibuf, 7456 &vwwn, sizeof (la_wwn_t), mode)) { 7457 rval = EFAULT; 7458 break; 7459 } 7460 7461 fc_wwn_to_str(&vwwn[0], ww_pname); 7462 FP_TRACE(FP_NHEAD1(3, 0), 7463 "Delete NPIV Port %s", ww_pname); 7464 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7465 if (tmpport == NULL) { 7466 FP_TRACE(FP_NHEAD1(3, 0), 7467 "Delete NPIV Port : no found"); 7468 rval = EFAULT; 7469 } else { 7470 fc_local_port_t *nextport = tmpport->fp_port_next; 7471 fc_local_port_t *prevport = tmpport->fp_port_prev; 7472 int portlen, portindex, ret; 7473 7474 portlen = sizeof (portindex); 7475 ret = ddi_prop_op(DDI_DEV_T_ANY, 7476 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7477 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7478 (caddr_t)&portindex, &portlen); 7479 if (ret != DDI_SUCCESS) { 7480 rval = EFAULT; 7481 break; 7482 } 7483 if (ndi_devi_offline(tmpport->fp_port_dip, 7484 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7485 FP_TRACE(FP_NHEAD1(1, 0), 7486 "Delete NPIV Port failed"); 7487 mutex_enter(&port->fp_mutex); 7488 tmpport->fp_npiv_state = 0; 7489 mutex_exit(&port->fp_mutex); 7490 rval = EFAULT; 7491 } else { 7492 mutex_enter(&port->fp_mutex); 7493 nextport->fp_port_prev = prevport; 7494 prevport->fp_port_next = nextport; 7495 if (port == port->fp_port_next) { 7496 port->fp_port_next = 7497 port->fp_port_prev = NULL; 7498 } 7499 port->fp_npiv_portnum--; 7500 FP_TRACE(FP_NHEAD1(3, 0), 7501 "Delete NPIV Port %d", portindex); 7502 port->fp_npiv_portindex[portindex-1] = 0; 7503 mutex_exit(&port->fp_mutex); 7504 } 7505 } 7506 break; 7507 } 7508 7509 case FCIO_CREATE_NPIV_PORT: { 7510 char ww_nname[17], ww_pname[17]; 7511 la_npiv_create_entry_t entrybuf; 7512 uint32_t vportindex = 0; 7513 int npiv_ret = 0; 7514 char *portname, *fcaname; 7515 7516 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7517 (void) ddi_pathname(port->fp_port_dip, portname); 7518 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7519 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7520 FP_TRACE(FP_NHEAD1(1, 0), 7521 "Create NPIV port %s %s %s", portname, fcaname, 7522 ddi_driver_name(port->fp_fca_dip)); 7523 kmem_free(portname, MAXPATHLEN); 7524 kmem_free(fcaname, MAXPATHLEN); 7525 if (ddi_copyin(fcio->fcio_ibuf, 7526 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7527 rval = EFAULT; 7528 break; 7529 } 7530 7531 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7532 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7533 vportindex = entrybuf.vindex; 7534 FP_TRACE(FP_NHEAD1(3, 0), 7535 "Create NPIV Port %s %s %d", 7536 ww_nname, ww_pname, vportindex); 7537 7538 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7539 rval = EFAULT; 7540 break; 7541 } 7542 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7543 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7544 if (npiv_ret == NDI_SUCCESS) { 7545 mutex_enter(&port->fp_mutex); 7546 port->fp_npiv_portnum++; 7547 mutex_exit(&port->fp_mutex); 7548 if (fp_copyout((void *)&vportindex, 7549 (void *)fcio->fcio_obuf, 7550 fcio->fcio_olen, mode) == 0) { 7551 if (fp_fcio_copyout(fcio, data, mode)) { 7552 rval = EFAULT; 7553 } 7554 } else { 7555 rval = EFAULT; 7556 } 7557 } else { 7558 rval = EFAULT; 7559 } 7560 FP_TRACE(FP_NHEAD1(3, 0), 7561 "Create NPIV Port %d %d", npiv_ret, vportindex); 7562 break; 7563 } 7564 7565 case FCIO_GET_NPIV_PORT_LIST: { 7566 fc_hba_npiv_port_list_t *list; 7567 int count; 7568 7569 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7570 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7571 rval = EINVAL; 7572 break; 7573 } 7574 7575 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7576 list->version = FC_HBA_LIST_VERSION; 7577 /* build npiv port list */ 7578 count = fc_ulp_get_npiv_port_list(port, (char *)list->hbaPaths); 7579 if (count < 0) { 7580 rval = ENXIO; 7581 FP_TRACE(FP_NHEAD1(1, 0), "Build NPIV Port List error"); 7582 kmem_free(list, fcio->fcio_olen); 7583 break; 7584 } 7585 list->numAdapters = count; 7586 7587 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7588 fcio->fcio_olen, mode) == 0) { 7589 if (fp_fcio_copyout(fcio, data, mode)) { 7590 FP_TRACE(FP_NHEAD1(1, 0), 7591 "Copy NPIV Port data error"); 7592 rval = EFAULT; 7593 } 7594 } else { 7595 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7596 rval = EFAULT; 7597 } 7598 kmem_free(list, fcio->fcio_olen); 7599 break; 7600 } 7601 7602 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7603 fc_hba_port_npiv_attributes_t *val; 7604 7605 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7606 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7607 7608 mutex_enter(&port->fp_mutex); 7609 val->npivflag = port->fp_npiv_flag; 7610 val->lastChange = port->fp_last_change; 7611 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7612 &val->PortWWN.raw_wwn, 7613 sizeof (val->PortWWN.raw_wwn)); 7614 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7615 &val->NodeWWN.raw_wwn, 7616 sizeof (val->NodeWWN.raw_wwn)); 7617 mutex_exit(&port->fp_mutex); 7618 7619 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7620 if (port->fp_npiv_type != FC_NPIV_PORT) { 7621 val->MaxNumberOfNPIVPorts = 7622 port->fp_fca_tran->fca_num_npivports; 7623 } else { 7624 val->MaxNumberOfNPIVPorts = 0; 7625 } 7626 7627 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7628 fcio->fcio_olen, mode) == 0) { 7629 if (fp_fcio_copyout(fcio, data, mode)) { 7630 rval = EFAULT; 7631 } 7632 } else { 7633 rval = EFAULT; 7634 } 7635 kmem_free(val, sizeof (*val)); 7636 break; 7637 } 7638 7639 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7640 fc_hba_port_attributes_t *val; 7641 fc_hba_port_attributes32_t *val32; 7642 7643 if (use32 == B_TRUE) { 7644 if (fcio->fcio_olen < sizeof (*val32) || 7645 fcio->fcio_xfer != FCIO_XFER_READ) { 7646 rval = EINVAL; 7647 break; 7648 } 7649 } else { 7650 if (fcio->fcio_olen < sizeof (*val) || 7651 fcio->fcio_xfer != FCIO_XFER_READ) { 7652 rval = EINVAL; 7653 break; 7654 } 7655 } 7656 7657 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7658 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7659 mutex_enter(&port->fp_mutex); 7660 val->lastChange = port->fp_last_change; 7661 val->fp_minor = port->fp_instance; 7662 7663 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7664 &val->PortWWN.raw_wwn, 7665 sizeof (val->PortWWN.raw_wwn)); 7666 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7667 &val->NodeWWN.raw_wwn, 7668 sizeof (val->NodeWWN.raw_wwn)); 7669 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7670 sizeof (val->FabricName.raw_wwn)); 7671 7672 val->PortFcId = port->fp_port_id.port_id; 7673 7674 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7675 case FC_STATE_OFFLINE: 7676 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7677 break; 7678 case FC_STATE_ONLINE: 7679 case FC_STATE_LOOP: 7680 case FC_STATE_NAMESERVICE: 7681 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7682 break; 7683 default: 7684 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7685 break; 7686 } 7687 7688 /* Translate from LV to FC-HBA port type codes */ 7689 switch (port->fp_port_type.port_type) { 7690 case FC_NS_PORT_N: 7691 val->PortType = FC_HBA_PORTTYPE_NPORT; 7692 break; 7693 case FC_NS_PORT_NL: 7694 /* Actually means loop for us */ 7695 val->PortType = FC_HBA_PORTTYPE_LPORT; 7696 break; 7697 case FC_NS_PORT_F: 7698 val->PortType = FC_HBA_PORTTYPE_FPORT; 7699 break; 7700 case FC_NS_PORT_FL: 7701 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7702 break; 7703 case FC_NS_PORT_E: 7704 val->PortType = FC_HBA_PORTTYPE_EPORT; 7705 break; 7706 default: 7707 val->PortType = FC_HBA_PORTTYPE_OTHER; 7708 break; 7709 } 7710 7711 7712 /* 7713 * If fp has decided that the topology is public loop, 7714 * we will indicate that using the appropriate 7715 * FC HBA API constant. 7716 */ 7717 switch (port->fp_topology) { 7718 case FC_TOP_PUBLIC_LOOP: 7719 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7720 break; 7721 7722 case FC_TOP_PT_PT: 7723 val->PortType = FC_HBA_PORTTYPE_PTP; 7724 break; 7725 7726 case FC_TOP_UNKNOWN: 7727 /* 7728 * This should cover the case where nothing is connected 7729 * to the port. Crystal+ is p'bly an exception here. 7730 * For Crystal+, port 0 will come up as private loop 7731 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7732 * nothing is connected to it. 7733 * Current plan is to let userland handle this. 7734 */ 7735 if (port->fp_bind_state == FC_STATE_OFFLINE) { 7736 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7737 } 7738 break; 7739 7740 default: 7741 /* 7742 * Do Nothing. 7743 * Unused: 7744 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7745 */ 7746 break; 7747 } 7748 7749 val->PortSupportedClassofService = 7750 port->fp_hba_port_attrs.supported_cos; 7751 val->PortSupportedFc4Types[0] = 0; 7752 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7753 sizeof (val->PortActiveFc4Types)); 7754 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7755 sizeof (val->PortSymbolicName)); 7756 val->PortSupportedSpeed = 7757 port->fp_hba_port_attrs.supported_speed; 7758 7759 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7760 case FC_STATE_1GBIT_SPEED: 7761 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7762 break; 7763 case FC_STATE_2GBIT_SPEED: 7764 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7765 break; 7766 case FC_STATE_4GBIT_SPEED: 7767 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7768 break; 7769 case FC_STATE_8GBIT_SPEED: 7770 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7771 break; 7772 case FC_STATE_10GBIT_SPEED: 7773 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7774 break; 7775 case FC_STATE_16GBIT_SPEED: 7776 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7777 break; 7778 default: 7779 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7780 break; 7781 } 7782 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7783 val->NumberofDiscoveredPorts = port->fp_dev_count; 7784 mutex_exit(&port->fp_mutex); 7785 7786 if (use32 == B_TRUE) { 7787 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7788 val32->version = val->version; 7789 val32->lastChange = val->lastChange; 7790 val32->fp_minor = val->fp_minor; 7791 7792 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7793 sizeof (val->PortWWN.raw_wwn)); 7794 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7795 sizeof (val->NodeWWN.raw_wwn)); 7796 val32->PortFcId = val->PortFcId; 7797 val32->PortState = val->PortState; 7798 val32->PortType = val->PortType; 7799 7800 val32->PortSupportedClassofService = 7801 val->PortSupportedClassofService; 7802 bcopy(val->PortActiveFc4Types, 7803 val32->PortActiveFc4Types, 7804 sizeof (val->PortActiveFc4Types)); 7805 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7806 sizeof (val->PortSymbolicName)); 7807 bcopy(&val->FabricName, &val32->FabricName, 7808 sizeof (val->FabricName.raw_wwn)); 7809 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7810 val32->PortSpeed = val->PortSpeed; 7811 7812 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7813 val32->NumberofDiscoveredPorts = 7814 val->NumberofDiscoveredPorts; 7815 7816 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7817 fcio->fcio_olen, mode) == 0) { 7818 if (fp_fcio_copyout(fcio, data, mode)) { 7819 rval = EFAULT; 7820 } 7821 } else { 7822 rval = EFAULT; 7823 } 7824 7825 kmem_free(val32, sizeof (*val32)); 7826 } else { 7827 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7828 fcio->fcio_olen, mode) == 0) { 7829 if (fp_fcio_copyout(fcio, data, mode)) { 7830 rval = EFAULT; 7831 } 7832 } else { 7833 rval = EFAULT; 7834 } 7835 } 7836 7837 kmem_free(val, sizeof (*val)); 7838 break; 7839 } 7840 7841 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7842 fc_hba_port_attributes_t *val; 7843 fc_hba_port_attributes32_t *val32; 7844 uint32_t index = 0; 7845 fc_remote_port_t *tmp_pd; 7846 7847 if (use32 == B_TRUE) { 7848 if (fcio->fcio_olen < sizeof (*val32) || 7849 fcio->fcio_xfer != FCIO_XFER_READ) { 7850 rval = EINVAL; 7851 break; 7852 } 7853 } else { 7854 if (fcio->fcio_olen < sizeof (*val) || 7855 fcio->fcio_xfer != FCIO_XFER_READ) { 7856 rval = EINVAL; 7857 break; 7858 } 7859 } 7860 7861 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7862 rval = EFAULT; 7863 break; 7864 } 7865 7866 if (index >= port->fp_dev_count) { 7867 FP_TRACE(FP_NHEAD1(9, 0), 7868 "User supplied index out of range"); 7869 fcio->fcio_errno = FC_OUTOFBOUNDS; 7870 rval = EINVAL; 7871 if (fp_fcio_copyout(fcio, data, mode)) { 7872 rval = EFAULT; 7873 } 7874 break; 7875 } 7876 7877 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7878 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7879 7880 mutex_enter(&port->fp_mutex); 7881 tmp_pd = fctl_lookup_pd_by_index(port, index); 7882 7883 if (tmp_pd == NULL) { 7884 fcio->fcio_errno = FC_BADPORT; 7885 rval = EINVAL; 7886 } else { 7887 val->lastChange = port->fp_last_change; 7888 val->fp_minor = port->fp_instance; 7889 7890 mutex_enter(&tmp_pd->pd_mutex); 7891 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7892 &val->PortWWN.raw_wwn, 7893 sizeof (val->PortWWN.raw_wwn)); 7894 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 7895 &val->NodeWWN.raw_wwn, 7896 sizeof (val->NodeWWN.raw_wwn)); 7897 val->PortFcId = tmp_pd->pd_port_id.port_id; 7898 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 7899 sizeof (val->PortSymbolicName)); 7900 val->PortSupportedClassofService = tmp_pd->pd_cos; 7901 /* 7902 * we will assume the sizeof these pd_fc4types and 7903 * portActiveFc4Types will remain the same. we could 7904 * add in a check for it, but we decided it was unneeded 7905 */ 7906 bcopy((caddr_t)tmp_pd->pd_fc4types, 7907 val->PortActiveFc4Types, 7908 sizeof (tmp_pd->pd_fc4types)); 7909 val->PortState = 7910 fp_map_remote_port_state(tmp_pd->pd_state); 7911 mutex_exit(&tmp_pd->pd_mutex); 7912 7913 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7914 val->PortSupportedFc4Types[0] = 0; 7915 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7916 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7917 val->PortMaxFrameSize = 0; 7918 val->NumberofDiscoveredPorts = 0; 7919 7920 if (use32 == B_TRUE) { 7921 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7922 val32->version = val->version; 7923 val32->lastChange = val->lastChange; 7924 val32->fp_minor = val->fp_minor; 7925 7926 bcopy(&val->PortWWN.raw_wwn, 7927 &val32->PortWWN.raw_wwn, 7928 sizeof (val->PortWWN.raw_wwn)); 7929 bcopy(&val->NodeWWN.raw_wwn, 7930 &val32->NodeWWN.raw_wwn, 7931 sizeof (val->NodeWWN.raw_wwn)); 7932 val32->PortFcId = val->PortFcId; 7933 bcopy(val->PortSymbolicName, 7934 val32->PortSymbolicName, 7935 sizeof (val->PortSymbolicName)); 7936 val32->PortSupportedClassofService = 7937 val->PortSupportedClassofService; 7938 bcopy(val->PortActiveFc4Types, 7939 val32->PortActiveFc4Types, 7940 sizeof (tmp_pd->pd_fc4types)); 7941 7942 val32->PortType = val->PortType; 7943 val32->PortState = val->PortState; 7944 val32->PortSupportedFc4Types[0] = 7945 val->PortSupportedFc4Types[0]; 7946 val32->PortSupportedSpeed = 7947 val->PortSupportedSpeed; 7948 val32->PortSpeed = val->PortSpeed; 7949 val32->PortMaxFrameSize = 7950 val->PortMaxFrameSize; 7951 val32->NumberofDiscoveredPorts = 7952 val->NumberofDiscoveredPorts; 7953 7954 if (fp_copyout((void *)val32, 7955 (void *)fcio->fcio_obuf, 7956 fcio->fcio_olen, mode) == 0) { 7957 if (fp_fcio_copyout(fcio, 7958 data, mode)) { 7959 rval = EFAULT; 7960 } 7961 } else { 7962 rval = EFAULT; 7963 } 7964 7965 kmem_free(val32, sizeof (*val32)); 7966 } else { 7967 if (fp_copyout((void *)val, 7968 (void *)fcio->fcio_obuf, 7969 fcio->fcio_olen, mode) == 0) { 7970 if (fp_fcio_copyout(fcio, data, mode)) { 7971 rval = EFAULT; 7972 } 7973 } else { 7974 rval = EFAULT; 7975 } 7976 } 7977 } 7978 7979 mutex_exit(&port->fp_mutex); 7980 kmem_free(val, sizeof (*val)); 7981 break; 7982 } 7983 7984 case FCIO_GET_PORT_ATTRIBUTES: { 7985 fc_hba_port_attributes_t *val; 7986 fc_hba_port_attributes32_t *val32; 7987 la_wwn_t wwn; 7988 fc_remote_port_t *tmp_pd; 7989 7990 if (use32 == B_TRUE) { 7991 if (fcio->fcio_olen < sizeof (*val32) || 7992 fcio->fcio_xfer != FCIO_XFER_READ) { 7993 rval = EINVAL; 7994 break; 7995 } 7996 } else { 7997 if (fcio->fcio_olen < sizeof (*val) || 7998 fcio->fcio_xfer != FCIO_XFER_READ) { 7999 rval = EINVAL; 8000 break; 8001 } 8002 } 8003 8004 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 8005 rval = EFAULT; 8006 break; 8007 } 8008 8009 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 8010 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 8011 8012 mutex_enter(&port->fp_mutex); 8013 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 8014 val->lastChange = port->fp_last_change; 8015 val->fp_minor = port->fp_instance; 8016 mutex_exit(&port->fp_mutex); 8017 8018 if (tmp_pd == NULL) { 8019 fcio->fcio_errno = FC_BADWWN; 8020 rval = EINVAL; 8021 } else { 8022 mutex_enter(&tmp_pd->pd_mutex); 8023 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8024 &val->PortWWN.raw_wwn, 8025 sizeof (val->PortWWN.raw_wwn)); 8026 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8027 &val->NodeWWN.raw_wwn, 8028 sizeof (val->NodeWWN.raw_wwn)); 8029 val->PortFcId = tmp_pd->pd_port_id.port_id; 8030 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8031 sizeof (val->PortSymbolicName)); 8032 val->PortSupportedClassofService = tmp_pd->pd_cos; 8033 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8034 val->PortState = 8035 fp_map_remote_port_state(tmp_pd->pd_state); 8036 val->PortSupportedFc4Types[0] = 0; 8037 /* 8038 * we will assume the sizeof these pd_fc4types and 8039 * portActiveFc4Types will remain the same. we could 8040 * add in a check for it, but we decided it was unneeded 8041 */ 8042 bcopy((caddr_t)tmp_pd->pd_fc4types, 8043 val->PortActiveFc4Types, 8044 sizeof (tmp_pd->pd_fc4types)); 8045 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8046 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8047 val->PortMaxFrameSize = 0; 8048 val->NumberofDiscoveredPorts = 0; 8049 mutex_exit(&tmp_pd->pd_mutex); 8050 8051 if (use32 == B_TRUE) { 8052 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8053 val32->version = val->version; 8054 val32->lastChange = val->lastChange; 8055 val32->fp_minor = val->fp_minor; 8056 bcopy(&val->PortWWN.raw_wwn, 8057 &val32->PortWWN.raw_wwn, 8058 sizeof (val->PortWWN.raw_wwn)); 8059 bcopy(&val->NodeWWN.raw_wwn, 8060 &val32->NodeWWN.raw_wwn, 8061 sizeof (val->NodeWWN.raw_wwn)); 8062 val32->PortFcId = val->PortFcId; 8063 bcopy(val->PortSymbolicName, 8064 val32->PortSymbolicName, 8065 sizeof (val->PortSymbolicName)); 8066 val32->PortSupportedClassofService = 8067 val->PortSupportedClassofService; 8068 val32->PortType = val->PortType; 8069 val32->PortState = val->PortState; 8070 val32->PortSupportedFc4Types[0] = 8071 val->PortSupportedFc4Types[0]; 8072 bcopy(val->PortActiveFc4Types, 8073 val32->PortActiveFc4Types, 8074 sizeof (tmp_pd->pd_fc4types)); 8075 val32->PortSupportedSpeed = 8076 val->PortSupportedSpeed; 8077 val32->PortSpeed = val->PortSpeed; 8078 val32->PortMaxFrameSize = val->PortMaxFrameSize; 8079 val32->NumberofDiscoveredPorts = 8080 val->NumberofDiscoveredPorts; 8081 8082 if (fp_copyout((void *)val32, 8083 (void *)fcio->fcio_obuf, 8084 fcio->fcio_olen, mode) == 0) { 8085 if (fp_fcio_copyout(fcio, data, mode)) { 8086 rval = EFAULT; 8087 } 8088 } else { 8089 rval = EFAULT; 8090 } 8091 8092 kmem_free(val32, sizeof (*val32)); 8093 } else { 8094 if (fp_copyout((void *)val, 8095 (void *)fcio->fcio_obuf, 8096 fcio->fcio_olen, mode) == 0) { 8097 if (fp_fcio_copyout(fcio, data, mode)) { 8098 rval = EFAULT; 8099 } 8100 } else { 8101 rval = EFAULT; 8102 } 8103 } 8104 } 8105 kmem_free(val, sizeof (*val)); 8106 break; 8107 } 8108 8109 case FCIO_GET_NUM_DEVS: { 8110 int num_devices; 8111 8112 if (fcio->fcio_olen != sizeof (num_devices) || 8113 fcio->fcio_xfer != FCIO_XFER_READ) { 8114 rval = EINVAL; 8115 break; 8116 } 8117 8118 mutex_enter(&port->fp_mutex); 8119 switch (port->fp_topology) { 8120 case FC_TOP_PRIVATE_LOOP: 8121 case FC_TOP_PT_PT: 8122 num_devices = port->fp_total_devices; 8123 fcio->fcio_errno = FC_SUCCESS; 8124 break; 8125 8126 case FC_TOP_PUBLIC_LOOP: 8127 case FC_TOP_FABRIC: 8128 mutex_exit(&port->fp_mutex); 8129 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8130 NULL, KM_SLEEP); 8131 ASSERT(job != NULL); 8132 8133 /* 8134 * In FC-GS-2 the Name Server doesn't send out 8135 * RSCNs for any Name Server Database updates 8136 * When it is finally fixed there is no need 8137 * to probe as below and should be removed. 8138 */ 8139 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8140 fctl_dealloc_job(job); 8141 8142 mutex_enter(&port->fp_mutex); 8143 num_devices = port->fp_total_devices; 8144 fcio->fcio_errno = FC_SUCCESS; 8145 break; 8146 8147 case FC_TOP_NO_NS: 8148 /* FALLTHROUGH */ 8149 case FC_TOP_UNKNOWN: 8150 /* FALLTHROUGH */ 8151 default: 8152 num_devices = 0; 8153 fcio->fcio_errno = FC_SUCCESS; 8154 break; 8155 } 8156 mutex_exit(&port->fp_mutex); 8157 8158 if (fp_copyout((void *)&num_devices, 8159 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8160 mode) == 0) { 8161 if (fp_fcio_copyout(fcio, data, mode)) { 8162 rval = EFAULT; 8163 } 8164 } else { 8165 rval = EFAULT; 8166 } 8167 break; 8168 } 8169 8170 case FCIO_GET_DEV_LIST: { 8171 int num_devices; 8172 int new_count; 8173 int map_size; 8174 8175 if (fcio->fcio_xfer != FCIO_XFER_READ || 8176 fcio->fcio_alen != sizeof (new_count)) { 8177 rval = EINVAL; 8178 break; 8179 } 8180 8181 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8182 8183 mutex_enter(&port->fp_mutex); 8184 if (num_devices < port->fp_total_devices) { 8185 fcio->fcio_errno = FC_TOOMANY; 8186 new_count = port->fp_total_devices; 8187 mutex_exit(&port->fp_mutex); 8188 8189 if (fp_copyout((void *)&new_count, 8190 (void *)fcio->fcio_abuf, 8191 sizeof (new_count), mode)) { 8192 rval = EFAULT; 8193 break; 8194 } 8195 8196 if (fp_fcio_copyout(fcio, data, mode)) { 8197 rval = EFAULT; 8198 break; 8199 } 8200 rval = EINVAL; 8201 break; 8202 } 8203 8204 if (port->fp_total_devices <= 0) { 8205 fcio->fcio_errno = FC_NO_MAP; 8206 new_count = port->fp_total_devices; 8207 mutex_exit(&port->fp_mutex); 8208 8209 if (fp_copyout((void *)&new_count, 8210 (void *)fcio->fcio_abuf, 8211 sizeof (new_count), mode)) { 8212 rval = EFAULT; 8213 break; 8214 } 8215 8216 if (fp_fcio_copyout(fcio, data, mode)) { 8217 rval = EFAULT; 8218 break; 8219 } 8220 rval = EINVAL; 8221 break; 8222 } 8223 8224 switch (port->fp_topology) { 8225 case FC_TOP_PRIVATE_LOOP: 8226 if (fp_fillout_loopmap(port, fcio, 8227 mode) != FC_SUCCESS) { 8228 rval = EFAULT; 8229 break; 8230 } 8231 if (fp_fcio_copyout(fcio, data, mode)) { 8232 rval = EFAULT; 8233 } 8234 break; 8235 8236 case FC_TOP_PT_PT: 8237 if (fp_fillout_p2pmap(port, fcio, 8238 mode) != FC_SUCCESS) { 8239 rval = EFAULT; 8240 break; 8241 } 8242 if (fp_fcio_copyout(fcio, data, mode)) { 8243 rval = EFAULT; 8244 } 8245 break; 8246 8247 case FC_TOP_PUBLIC_LOOP: 8248 case FC_TOP_FABRIC: { 8249 fctl_ns_req_t *ns_cmd; 8250 8251 map_size = 8252 sizeof (fc_port_dev_t) * port->fp_total_devices; 8253 8254 mutex_exit(&port->fp_mutex); 8255 8256 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8257 sizeof (ns_resp_gan_t), map_size, 8258 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8259 KM_SLEEP); 8260 ASSERT(ns_cmd != NULL); 8261 8262 ns_cmd->ns_gan_index = 0; 8263 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8264 ns_cmd->ns_cmd_code = NS_GA_NXT; 8265 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8266 8267 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8268 NULL, KM_SLEEP); 8269 ASSERT(job != NULL); 8270 8271 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8272 8273 if (ret != FC_SUCCESS || 8274 job->job_result != FC_SUCCESS) { 8275 fctl_free_ns_cmd(ns_cmd); 8276 8277 fcio->fcio_errno = job->job_result; 8278 new_count = 0; 8279 if (fp_copyout((void *)&new_count, 8280 (void *)fcio->fcio_abuf, 8281 sizeof (new_count), mode)) { 8282 fctl_dealloc_job(job); 8283 mutex_enter(&port->fp_mutex); 8284 rval = EFAULT; 8285 break; 8286 } 8287 8288 if (fp_fcio_copyout(fcio, data, mode)) { 8289 fctl_dealloc_job(job); 8290 mutex_enter(&port->fp_mutex); 8291 rval = EFAULT; 8292 break; 8293 } 8294 rval = EIO; 8295 mutex_enter(&port->fp_mutex); 8296 break; 8297 } 8298 fctl_dealloc_job(job); 8299 8300 new_count = ns_cmd->ns_gan_index; 8301 if (fp_copyout((void *)&new_count, 8302 (void *)fcio->fcio_abuf, sizeof (new_count), 8303 mode)) { 8304 rval = EFAULT; 8305 fctl_free_ns_cmd(ns_cmd); 8306 mutex_enter(&port->fp_mutex); 8307 break; 8308 } 8309 8310 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8311 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8312 ns_cmd->ns_gan_index, mode)) { 8313 rval = EFAULT; 8314 fctl_free_ns_cmd(ns_cmd); 8315 mutex_enter(&port->fp_mutex); 8316 break; 8317 } 8318 fctl_free_ns_cmd(ns_cmd); 8319 8320 if (fp_fcio_copyout(fcio, data, mode)) { 8321 rval = EFAULT; 8322 } 8323 mutex_enter(&port->fp_mutex); 8324 break; 8325 } 8326 8327 case FC_TOP_NO_NS: 8328 /* FALLTHROUGH */ 8329 case FC_TOP_UNKNOWN: 8330 /* FALLTHROUGH */ 8331 default: 8332 fcio->fcio_errno = FC_NO_MAP; 8333 num_devices = port->fp_total_devices; 8334 8335 if (fp_copyout((void *)&new_count, 8336 (void *)fcio->fcio_abuf, 8337 sizeof (new_count), mode)) { 8338 rval = EFAULT; 8339 break; 8340 } 8341 8342 if (fp_fcio_copyout(fcio, data, mode)) { 8343 rval = EFAULT; 8344 break; 8345 } 8346 rval = EINVAL; 8347 break; 8348 } 8349 mutex_exit(&port->fp_mutex); 8350 break; 8351 } 8352 8353 case FCIO_GET_SYM_PNAME: { 8354 rval = ENOTSUP; 8355 break; 8356 } 8357 8358 case FCIO_GET_SYM_NNAME: { 8359 rval = ENOTSUP; 8360 break; 8361 } 8362 8363 case FCIO_SET_SYM_PNAME: { 8364 rval = ENOTSUP; 8365 break; 8366 } 8367 8368 case FCIO_SET_SYM_NNAME: { 8369 rval = ENOTSUP; 8370 break; 8371 } 8372 8373 case FCIO_GET_LOGI_PARAMS: { 8374 la_wwn_t pwwn; 8375 la_wwn_t *my_pwwn; 8376 la_els_logi_t *params; 8377 la_els_logi32_t *params32; 8378 fc_remote_node_t *node; 8379 fc_remote_port_t *pd; 8380 8381 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8382 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8383 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8384 rval = EINVAL; 8385 break; 8386 } 8387 8388 if (use32 == B_TRUE) { 8389 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8390 rval = EINVAL; 8391 break; 8392 } 8393 } else { 8394 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8395 rval = EINVAL; 8396 break; 8397 } 8398 } 8399 8400 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8401 rval = EFAULT; 8402 break; 8403 } 8404 8405 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8406 if (pd == NULL) { 8407 mutex_enter(&port->fp_mutex); 8408 my_pwwn = &port->fp_service_params.nport_ww_name; 8409 mutex_exit(&port->fp_mutex); 8410 8411 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8412 rval = ENXIO; 8413 break; 8414 } 8415 8416 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8417 mutex_enter(&port->fp_mutex); 8418 *params = port->fp_service_params; 8419 mutex_exit(&port->fp_mutex); 8420 } else { 8421 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8422 8423 mutex_enter(&pd->pd_mutex); 8424 params->ls_code.mbz = params->ls_code.ls_code = 0; 8425 params->common_service = pd->pd_csp; 8426 params->nport_ww_name = pd->pd_port_name; 8427 params->class_1 = pd->pd_clsp1; 8428 params->class_2 = pd->pd_clsp2; 8429 params->class_3 = pd->pd_clsp3; 8430 node = pd->pd_remote_nodep; 8431 mutex_exit(&pd->pd_mutex); 8432 8433 bzero(params->reserved, sizeof (params->reserved)); 8434 8435 mutex_enter(&node->fd_mutex); 8436 bcopy(node->fd_vv, params->vendor_version, 8437 sizeof (node->fd_vv)); 8438 params->node_ww_name = node->fd_node_name; 8439 mutex_exit(&node->fd_mutex); 8440 8441 fctl_release_remote_port(pd); 8442 } 8443 8444 if (use32 == B_TRUE) { 8445 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8446 8447 params32->ls_code.mbz = params->ls_code.mbz; 8448 params32->common_service = params->common_service; 8449 params32->nport_ww_name = params->nport_ww_name; 8450 params32->class_1 = params->class_1; 8451 params32->class_2 = params->class_2; 8452 params32->class_3 = params->class_3; 8453 bzero(params32->reserved, sizeof (params32->reserved)); 8454 bcopy(params->vendor_version, params32->vendor_version, 8455 sizeof (node->fd_vv)); 8456 params32->node_ww_name = params->node_ww_name; 8457 8458 if (ddi_copyout((void *)params32, 8459 (void *)fcio->fcio_obuf, 8460 sizeof (*params32), mode)) { 8461 rval = EFAULT; 8462 } 8463 8464 kmem_free(params32, sizeof (*params32)); 8465 } else { 8466 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8467 sizeof (*params), mode)) { 8468 rval = EFAULT; 8469 } 8470 } 8471 8472 kmem_free(params, sizeof (*params)); 8473 if (fp_fcio_copyout(fcio, data, mode)) { 8474 rval = EFAULT; 8475 } 8476 break; 8477 } 8478 8479 case FCIO_DEV_LOGOUT: 8480 case FCIO_DEV_LOGIN: 8481 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8482 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8483 rval = EINVAL; 8484 8485 if (fp_fcio_copyout(fcio, data, mode)) { 8486 rval = EFAULT; 8487 } 8488 break; 8489 } 8490 8491 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8492 jcode = JOB_FCIO_LOGIN; 8493 } else { 8494 jcode = JOB_FCIO_LOGOUT; 8495 } 8496 8497 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8498 bcopy(fcio, kfcio, sizeof (*fcio)); 8499 8500 if (kfcio->fcio_ilen) { 8501 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8502 KM_SLEEP); 8503 8504 if (ddi_copyin((void *)fcio->fcio_ibuf, 8505 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8506 mode)) { 8507 rval = EFAULT; 8508 8509 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8510 kmem_free(kfcio, sizeof (*kfcio)); 8511 fcio->fcio_errno = job->job_result; 8512 if (fp_fcio_copyout(fcio, data, mode)) { 8513 rval = EFAULT; 8514 } 8515 break; 8516 } 8517 } 8518 8519 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8520 job->job_private = kfcio; 8521 8522 fctl_enque_job(port, job); 8523 fctl_jobwait(job); 8524 8525 rval = job->job_result; 8526 8527 fcio->fcio_errno = kfcio->fcio_errno; 8528 if (fp_fcio_copyout(fcio, data, mode)) { 8529 rval = EFAULT; 8530 } 8531 8532 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8533 kmem_free(kfcio, sizeof (*kfcio)); 8534 fctl_dealloc_job(job); 8535 break; 8536 8537 case FCIO_GET_STATE: { 8538 la_wwn_t pwwn; 8539 uint32_t state; 8540 fc_remote_port_t *pd; 8541 fctl_ns_req_t *ns_cmd; 8542 8543 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8544 fcio->fcio_olen != sizeof (state) || 8545 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8546 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8547 rval = EINVAL; 8548 break; 8549 } 8550 8551 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8552 rval = EFAULT; 8553 break; 8554 } 8555 fcio->fcio_errno = 0; 8556 8557 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8558 if (pd == NULL) { 8559 mutex_enter(&port->fp_mutex); 8560 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8561 mutex_exit(&port->fp_mutex); 8562 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8563 NULL, NULL, KM_SLEEP); 8564 8565 job->job_counter = 1; 8566 job->job_result = FC_SUCCESS; 8567 8568 ns_cmd = fctl_alloc_ns_cmd( 8569 sizeof (ns_req_gid_pn_t), 8570 sizeof (ns_resp_gid_pn_t), 8571 sizeof (ns_resp_gid_pn_t), 8572 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8573 ASSERT(ns_cmd != NULL); 8574 8575 ns_cmd->ns_cmd_code = NS_GID_PN; 8576 ((ns_req_gid_pn_t *) 8577 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8578 8579 ret = fp_ns_query(port, ns_cmd, job, 8580 1, KM_SLEEP); 8581 8582 if (ret != FC_SUCCESS || job->job_result != 8583 FC_SUCCESS) { 8584 if (ret != FC_SUCCESS) { 8585 fcio->fcio_errno = ret; 8586 } else { 8587 fcio->fcio_errno = 8588 job->job_result; 8589 } 8590 rval = EIO; 8591 } else { 8592 state = PORT_DEVICE_INVALID; 8593 } 8594 fctl_free_ns_cmd(ns_cmd); 8595 fctl_dealloc_job(job); 8596 } else { 8597 mutex_exit(&port->fp_mutex); 8598 fcio->fcio_errno = FC_BADWWN; 8599 rval = ENXIO; 8600 } 8601 } else { 8602 mutex_enter(&pd->pd_mutex); 8603 state = pd->pd_state; 8604 mutex_exit(&pd->pd_mutex); 8605 8606 fctl_release_remote_port(pd); 8607 } 8608 8609 if (!rval) { 8610 if (ddi_copyout((void *)&state, 8611 (void *)fcio->fcio_obuf, sizeof (state), 8612 mode)) { 8613 rval = EFAULT; 8614 } 8615 } 8616 if (fp_fcio_copyout(fcio, data, mode)) { 8617 rval = EFAULT; 8618 } 8619 break; 8620 } 8621 8622 case FCIO_DEV_REMOVE: { 8623 la_wwn_t pwwn; 8624 fc_portmap_t *changelist; 8625 fc_remote_port_t *pd; 8626 8627 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8628 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8629 rval = EINVAL; 8630 break; 8631 } 8632 8633 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8634 rval = EFAULT; 8635 break; 8636 } 8637 8638 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8639 if (pd == NULL) { 8640 rval = ENXIO; 8641 fcio->fcio_errno = FC_BADWWN; 8642 if (fp_fcio_copyout(fcio, data, mode)) { 8643 rval = EFAULT; 8644 } 8645 break; 8646 } 8647 8648 mutex_enter(&pd->pd_mutex); 8649 if (pd->pd_ref_count > 1) { 8650 mutex_exit(&pd->pd_mutex); 8651 8652 rval = EBUSY; 8653 fcio->fcio_errno = FC_FAILURE; 8654 fctl_release_remote_port(pd); 8655 8656 if (fp_fcio_copyout(fcio, data, mode)) { 8657 rval = EFAULT; 8658 } 8659 break; 8660 } 8661 mutex_exit(&pd->pd_mutex); 8662 8663 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8664 8665 fctl_copy_portmap(changelist, pd); 8666 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8667 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8668 8669 fctl_release_remote_port(pd); 8670 break; 8671 } 8672 8673 case FCIO_GET_FCODE_REV: { 8674 caddr_t fcode_rev; 8675 fc_fca_pm_t pm; 8676 8677 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8678 fcio->fcio_xfer != FCIO_XFER_READ) { 8679 rval = EINVAL; 8680 break; 8681 } 8682 bzero((caddr_t)&pm, sizeof (pm)); 8683 8684 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8685 8686 pm.pm_cmd_flags = FC_FCA_PM_READ; 8687 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8688 pm.pm_data_len = fcio->fcio_olen; 8689 pm.pm_data_buf = fcode_rev; 8690 8691 ret = port->fp_fca_tran->fca_port_manage( 8692 port->fp_fca_handle, &pm); 8693 8694 if (ret == FC_SUCCESS) { 8695 if (ddi_copyout((void *)fcode_rev, 8696 (void *)fcio->fcio_obuf, 8697 fcio->fcio_olen, mode) == 0) { 8698 if (fp_fcio_copyout(fcio, data, mode)) { 8699 rval = EFAULT; 8700 } 8701 } else { 8702 rval = EFAULT; 8703 } 8704 } else { 8705 /* 8706 * check if buffer was not large enough to obtain 8707 * FCODE version. 8708 */ 8709 if (pm.pm_data_len > fcio->fcio_olen) { 8710 rval = ENOMEM; 8711 } else { 8712 rval = EIO; 8713 } 8714 fcio->fcio_errno = ret; 8715 if (fp_fcio_copyout(fcio, data, mode)) { 8716 rval = EFAULT; 8717 } 8718 } 8719 kmem_free(fcode_rev, fcio->fcio_olen); 8720 break; 8721 } 8722 8723 case FCIO_GET_FW_REV: { 8724 caddr_t fw_rev; 8725 fc_fca_pm_t pm; 8726 8727 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8728 fcio->fcio_xfer != FCIO_XFER_READ) { 8729 rval = EINVAL; 8730 break; 8731 } 8732 bzero((caddr_t)&pm, sizeof (pm)); 8733 8734 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8735 8736 pm.pm_cmd_flags = FC_FCA_PM_READ; 8737 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8738 pm.pm_data_len = fcio->fcio_olen; 8739 pm.pm_data_buf = fw_rev; 8740 8741 ret = port->fp_fca_tran->fca_port_manage( 8742 port->fp_fca_handle, &pm); 8743 8744 if (ret == FC_SUCCESS) { 8745 if (ddi_copyout((void *)fw_rev, 8746 (void *)fcio->fcio_obuf, 8747 fcio->fcio_olen, mode) == 0) { 8748 if (fp_fcio_copyout(fcio, data, mode)) { 8749 rval = EFAULT; 8750 } 8751 } else { 8752 rval = EFAULT; 8753 } 8754 } else { 8755 if (fp_fcio_copyout(fcio, data, mode)) { 8756 rval = EFAULT; 8757 } 8758 rval = EIO; 8759 } 8760 kmem_free(fw_rev, fcio->fcio_olen); 8761 break; 8762 } 8763 8764 case FCIO_GET_DUMP_SIZE: { 8765 uint32_t dump_size; 8766 fc_fca_pm_t pm; 8767 8768 if (fcio->fcio_olen != sizeof (dump_size) || 8769 fcio->fcio_xfer != FCIO_XFER_READ) { 8770 rval = EINVAL; 8771 break; 8772 } 8773 bzero((caddr_t)&pm, sizeof (pm)); 8774 pm.pm_cmd_flags = FC_FCA_PM_READ; 8775 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8776 pm.pm_data_len = sizeof (dump_size); 8777 pm.pm_data_buf = (caddr_t)&dump_size; 8778 8779 ret = port->fp_fca_tran->fca_port_manage( 8780 port->fp_fca_handle, &pm); 8781 8782 if (ret == FC_SUCCESS) { 8783 if (ddi_copyout((void *)&dump_size, 8784 (void *)fcio->fcio_obuf, sizeof (dump_size), 8785 mode) == 0) { 8786 if (fp_fcio_copyout(fcio, data, mode)) { 8787 rval = EFAULT; 8788 } 8789 } else { 8790 rval = EFAULT; 8791 } 8792 } else { 8793 fcio->fcio_errno = ret; 8794 rval = EIO; 8795 if (fp_fcio_copyout(fcio, data, mode)) { 8796 rval = EFAULT; 8797 } 8798 } 8799 break; 8800 } 8801 8802 case FCIO_DOWNLOAD_FW: { 8803 caddr_t firmware; 8804 fc_fca_pm_t pm; 8805 8806 if (fcio->fcio_ilen <= 0 || 8807 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8808 rval = EINVAL; 8809 break; 8810 } 8811 8812 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8813 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8814 fcio->fcio_ilen, mode)) { 8815 rval = EFAULT; 8816 kmem_free(firmware, fcio->fcio_ilen); 8817 break; 8818 } 8819 8820 bzero((caddr_t)&pm, sizeof (pm)); 8821 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8822 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8823 pm.pm_data_len = fcio->fcio_ilen; 8824 pm.pm_data_buf = firmware; 8825 8826 ret = port->fp_fca_tran->fca_port_manage( 8827 port->fp_fca_handle, &pm); 8828 8829 kmem_free(firmware, fcio->fcio_ilen); 8830 8831 if (ret != FC_SUCCESS) { 8832 fcio->fcio_errno = ret; 8833 rval = EIO; 8834 if (fp_fcio_copyout(fcio, data, mode)) { 8835 rval = EFAULT; 8836 } 8837 } 8838 break; 8839 } 8840 8841 case FCIO_DOWNLOAD_FCODE: { 8842 caddr_t fcode; 8843 fc_fca_pm_t pm; 8844 8845 if (fcio->fcio_ilen <= 0 || 8846 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8847 rval = EINVAL; 8848 break; 8849 } 8850 8851 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8852 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8853 fcio->fcio_ilen, mode)) { 8854 rval = EFAULT; 8855 kmem_free(fcode, fcio->fcio_ilen); 8856 break; 8857 } 8858 8859 bzero((caddr_t)&pm, sizeof (pm)); 8860 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8861 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8862 pm.pm_data_len = fcio->fcio_ilen; 8863 pm.pm_data_buf = fcode; 8864 8865 ret = port->fp_fca_tran->fca_port_manage( 8866 port->fp_fca_handle, &pm); 8867 8868 kmem_free(fcode, fcio->fcio_ilen); 8869 8870 if (ret != FC_SUCCESS) { 8871 fcio->fcio_errno = ret; 8872 rval = EIO; 8873 if (fp_fcio_copyout(fcio, data, mode)) { 8874 rval = EFAULT; 8875 } 8876 } 8877 break; 8878 } 8879 8880 case FCIO_FORCE_DUMP: 8881 ret = port->fp_fca_tran->fca_reset( 8882 port->fp_fca_handle, FC_FCA_CORE); 8883 8884 if (ret != FC_SUCCESS) { 8885 fcio->fcio_errno = ret; 8886 rval = EIO; 8887 if (fp_fcio_copyout(fcio, data, mode)) { 8888 rval = EFAULT; 8889 } 8890 } 8891 break; 8892 8893 case FCIO_GET_DUMP: { 8894 caddr_t dump; 8895 uint32_t dump_size; 8896 fc_fca_pm_t pm; 8897 8898 if (fcio->fcio_xfer != FCIO_XFER_READ) { 8899 rval = EINVAL; 8900 break; 8901 } 8902 bzero((caddr_t)&pm, sizeof (pm)); 8903 8904 pm.pm_cmd_flags = FC_FCA_PM_READ; 8905 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8906 pm.pm_data_len = sizeof (dump_size); 8907 pm.pm_data_buf = (caddr_t)&dump_size; 8908 8909 ret = port->fp_fca_tran->fca_port_manage( 8910 port->fp_fca_handle, &pm); 8911 8912 if (ret != FC_SUCCESS) { 8913 fcio->fcio_errno = ret; 8914 rval = EIO; 8915 if (fp_fcio_copyout(fcio, data, mode)) { 8916 rval = EFAULT; 8917 } 8918 break; 8919 } 8920 if (fcio->fcio_olen != dump_size) { 8921 fcio->fcio_errno = FC_NOMEM; 8922 rval = EINVAL; 8923 if (fp_fcio_copyout(fcio, data, mode)) { 8924 rval = EFAULT; 8925 } 8926 break; 8927 } 8928 8929 dump = kmem_zalloc(dump_size, KM_SLEEP); 8930 8931 bzero((caddr_t)&pm, sizeof (pm)); 8932 pm.pm_cmd_flags = FC_FCA_PM_READ; 8933 pm.pm_cmd_code = FC_PORT_GET_DUMP; 8934 pm.pm_data_len = dump_size; 8935 pm.pm_data_buf = dump; 8936 8937 ret = port->fp_fca_tran->fca_port_manage( 8938 port->fp_fca_handle, &pm); 8939 8940 if (ret == FC_SUCCESS) { 8941 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 8942 dump_size, mode) == 0) { 8943 if (fp_fcio_copyout(fcio, data, mode)) { 8944 rval = EFAULT; 8945 } 8946 } else { 8947 rval = EFAULT; 8948 } 8949 } else { 8950 fcio->fcio_errno = ret; 8951 rval = EIO; 8952 if (fp_fcio_copyout(fcio, data, mode)) { 8953 rval = EFAULT; 8954 } 8955 } 8956 kmem_free(dump, dump_size); 8957 break; 8958 } 8959 8960 case FCIO_GET_TOPOLOGY: { 8961 uint32_t user_topology; 8962 8963 if (fcio->fcio_xfer != FCIO_XFER_READ || 8964 fcio->fcio_olen != sizeof (user_topology)) { 8965 rval = EINVAL; 8966 break; 8967 } 8968 8969 mutex_enter(&port->fp_mutex); 8970 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 8971 user_topology = FC_TOP_UNKNOWN; 8972 } else { 8973 user_topology = port->fp_topology; 8974 } 8975 mutex_exit(&port->fp_mutex); 8976 8977 if (ddi_copyout((void *)&user_topology, 8978 (void *)fcio->fcio_obuf, sizeof (user_topology), 8979 mode)) { 8980 rval = EFAULT; 8981 } 8982 break; 8983 } 8984 8985 case FCIO_RESET_LINK: { 8986 la_wwn_t pwwn; 8987 8988 /* 8989 * Look at the output buffer field; if this field has zero 8990 * bytes then attempt to reset the local link/loop. If the 8991 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 8992 * and if yes, determine the LFA and reset the remote LIP 8993 * by LINIT ELS. 8994 */ 8995 8996 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 8997 fcio->fcio_ilen != sizeof (pwwn)) { 8998 rval = EINVAL; 8999 break; 9000 } 9001 9002 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9003 sizeof (pwwn), mode)) { 9004 rval = EFAULT; 9005 break; 9006 } 9007 9008 mutex_enter(&port->fp_mutex); 9009 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 9010 mutex_exit(&port->fp_mutex); 9011 break; 9012 } 9013 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 9014 mutex_exit(&port->fp_mutex); 9015 9016 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 9017 if (job == NULL) { 9018 rval = ENOMEM; 9019 break; 9020 } 9021 job->job_counter = 1; 9022 job->job_private = (void *)&pwwn; 9023 9024 fctl_enque_job(port, job); 9025 fctl_jobwait(job); 9026 9027 mutex_enter(&port->fp_mutex); 9028 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 9029 mutex_exit(&port->fp_mutex); 9030 9031 if (job->job_result != FC_SUCCESS) { 9032 fcio->fcio_errno = job->job_result; 9033 rval = EIO; 9034 if (fp_fcio_copyout(fcio, data, mode)) { 9035 rval = EFAULT; 9036 } 9037 } 9038 fctl_dealloc_job(job); 9039 break; 9040 } 9041 9042 case FCIO_RESET_HARD: 9043 ret = port->fp_fca_tran->fca_reset( 9044 port->fp_fca_handle, FC_FCA_RESET); 9045 if (ret != FC_SUCCESS) { 9046 fcio->fcio_errno = ret; 9047 rval = EIO; 9048 if (fp_fcio_copyout(fcio, data, mode)) { 9049 rval = EFAULT; 9050 } 9051 } 9052 break; 9053 9054 case FCIO_RESET_HARD_CORE: 9055 ret = port->fp_fca_tran->fca_reset( 9056 port->fp_fca_handle, FC_FCA_RESET_CORE); 9057 if (ret != FC_SUCCESS) { 9058 rval = EIO; 9059 fcio->fcio_errno = ret; 9060 if (fp_fcio_copyout(fcio, data, mode)) { 9061 rval = EFAULT; 9062 } 9063 } 9064 break; 9065 9066 case FCIO_DIAG: { 9067 fc_fca_pm_t pm; 9068 9069 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 9070 9071 /* Validate user buffer from ioctl call. */ 9072 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 9073 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 9074 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 9075 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 9076 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 9077 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 9078 rval = EFAULT; 9079 break; 9080 } 9081 9082 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 9083 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 9084 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 9085 fcio->fcio_ilen, mode)) { 9086 rval = EFAULT; 9087 goto fp_fcio_diag_cleanup; 9088 } 9089 } 9090 9091 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 9092 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 9093 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 9094 fcio->fcio_alen, mode)) { 9095 rval = EFAULT; 9096 goto fp_fcio_diag_cleanup; 9097 } 9098 } 9099 9100 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 9101 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 9102 } 9103 9104 pm.pm_cmd_code = FC_PORT_DIAG; 9105 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 9106 9107 ret = port->fp_fca_tran->fca_port_manage( 9108 port->fp_fca_handle, &pm); 9109 9110 if (ret != FC_SUCCESS) { 9111 if (ret == FC_INVALID_REQUEST) { 9112 rval = ENOTTY; 9113 } else { 9114 rval = EIO; 9115 } 9116 9117 fcio->fcio_errno = ret; 9118 if (fp_fcio_copyout(fcio, data, mode)) { 9119 rval = EFAULT; 9120 } 9121 goto fp_fcio_diag_cleanup; 9122 } 9123 9124 /* 9125 * pm_stat_len will contain the number of status bytes 9126 * an FCA driver requires to return the complete status 9127 * of the requested diag operation. If the user buffer 9128 * is not large enough to hold the entire status, We 9129 * copy only the portion of data the fits in the buffer and 9130 * return a ENOMEM to the user application. 9131 */ 9132 if (pm.pm_stat_len > fcio->fcio_olen) { 9133 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9134 "fp:FCIO_DIAG:status buffer too small\n"); 9135 9136 rval = ENOMEM; 9137 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9138 fcio->fcio_olen, mode)) { 9139 rval = EFAULT; 9140 goto fp_fcio_diag_cleanup; 9141 } 9142 } else { 9143 /* 9144 * Copy only data pm_stat_len bytes of data 9145 */ 9146 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9147 pm.pm_stat_len, mode)) { 9148 rval = EFAULT; 9149 goto fp_fcio_diag_cleanup; 9150 } 9151 } 9152 9153 if (fp_fcio_copyout(fcio, data, mode)) { 9154 rval = EFAULT; 9155 } 9156 9157 fp_fcio_diag_cleanup: 9158 if (pm.pm_cmd_buf != NULL) { 9159 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9160 } 9161 if (pm.pm_data_buf != NULL) { 9162 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9163 } 9164 if (pm.pm_stat_buf != NULL) { 9165 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9166 } 9167 9168 break; 9169 } 9170 9171 case FCIO_GET_NODE_ID: { 9172 /* validate parameters */ 9173 if (fcio->fcio_xfer != FCIO_XFER_READ || 9174 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9175 rval = EINVAL; 9176 break; 9177 } 9178 9179 rval = fp_get_rnid(port, data, mode, fcio); 9180 9181 /* ioctl handling is over */ 9182 break; 9183 } 9184 9185 case FCIO_SEND_NODE_ID: { 9186 la_wwn_t pwwn; 9187 9188 /* validate parameters */ 9189 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9190 fcio->fcio_xfer != FCIO_XFER_READ) { 9191 rval = EINVAL; 9192 break; 9193 } 9194 9195 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9196 sizeof (la_wwn_t), mode)) { 9197 rval = EFAULT; 9198 break; 9199 } 9200 9201 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9202 9203 /* ioctl handling is over */ 9204 break; 9205 } 9206 9207 case FCIO_SET_NODE_ID: { 9208 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9209 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9210 rval = EINVAL; 9211 break; 9212 } 9213 9214 rval = fp_set_rnid(port, data, mode, fcio); 9215 break; 9216 } 9217 9218 case FCIO_LINK_STATUS: { 9219 fc_portid_t rls_req; 9220 fc_rls_acc_t *rls_acc; 9221 fc_fca_pm_t pm; 9222 uint32_t dest, src_id; 9223 fp_cmd_t *cmd; 9224 fc_remote_port_t *pd; 9225 uchar_t pd_flags; 9226 9227 /* validate parameters */ 9228 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9229 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9230 fcio->fcio_xfer != FCIO_XFER_RW) { 9231 rval = EINVAL; 9232 break; 9233 } 9234 9235 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9236 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9237 rval = EINVAL; 9238 break; 9239 } 9240 9241 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9242 sizeof (fc_portid_t), mode)) { 9243 rval = EFAULT; 9244 break; 9245 } 9246 9247 9248 /* Determine the destination of the RLS frame */ 9249 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9250 dest = FS_FABRIC_F_PORT; 9251 } else { 9252 dest = rls_req.port_id; 9253 } 9254 9255 mutex_enter(&port->fp_mutex); 9256 src_id = port->fp_port_id.port_id; 9257 mutex_exit(&port->fp_mutex); 9258 9259 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9260 if (dest == 0 || dest == src_id) { 9261 9262 /* Allocate memory for link error status block */ 9263 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9264 ASSERT(rls_acc != NULL); 9265 9266 /* Prepare the port management structure */ 9267 bzero((caddr_t)&pm, sizeof (pm)); 9268 9269 pm.pm_cmd_flags = FC_FCA_PM_READ; 9270 pm.pm_cmd_code = FC_PORT_RLS; 9271 pm.pm_data_len = sizeof (*rls_acc); 9272 pm.pm_data_buf = (caddr_t)rls_acc; 9273 9274 /* Get the adapter's link error status block */ 9275 ret = port->fp_fca_tran->fca_port_manage( 9276 port->fp_fca_handle, &pm); 9277 9278 if (ret == FC_SUCCESS) { 9279 /* xfer link status block to userland */ 9280 if (ddi_copyout((void *)rls_acc, 9281 (void *)fcio->fcio_obuf, 9282 sizeof (*rls_acc), mode) == 0) { 9283 if (fp_fcio_copyout(fcio, data, 9284 mode)) { 9285 rval = EFAULT; 9286 } 9287 } else { 9288 rval = EFAULT; 9289 } 9290 } else { 9291 rval = EIO; 9292 fcio->fcio_errno = ret; 9293 if (fp_fcio_copyout(fcio, data, mode)) { 9294 rval = EFAULT; 9295 } 9296 } 9297 9298 kmem_free(rls_acc, sizeof (*rls_acc)); 9299 9300 /* ioctl handling is over */ 9301 break; 9302 } 9303 9304 /* 9305 * Send RLS to the destination port. 9306 * Having RLS frame destination is as FPORT is not yet 9307 * supported and will be implemented in future, if needed. 9308 * Following call to get "pd" will fail if dest is FPORT 9309 */ 9310 pd = fctl_hold_remote_port_by_did(port, dest); 9311 if (pd == NULL) { 9312 fcio->fcio_errno = FC_BADOBJECT; 9313 rval = ENXIO; 9314 if (fp_fcio_copyout(fcio, data, mode)) { 9315 rval = EFAULT; 9316 } 9317 break; 9318 } 9319 9320 mutex_enter(&pd->pd_mutex); 9321 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9322 mutex_exit(&pd->pd_mutex); 9323 fctl_release_remote_port(pd); 9324 9325 fcio->fcio_errno = FC_LOGINREQ; 9326 rval = EINVAL; 9327 if (fp_fcio_copyout(fcio, data, mode)) { 9328 rval = EFAULT; 9329 } 9330 break; 9331 } 9332 ASSERT(pd->pd_login_count >= 1); 9333 mutex_exit(&pd->pd_mutex); 9334 9335 /* 9336 * Allocate job structure and set job_code as DUMMY, 9337 * because we will not go through the job thread. 9338 * Instead fp_sendcmd() is called directly here. 9339 */ 9340 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9341 NULL, NULL, KM_SLEEP); 9342 ASSERT(job != NULL); 9343 9344 job->job_counter = 1; 9345 9346 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9347 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9348 if (cmd == NULL) { 9349 fcio->fcio_errno = FC_NOMEM; 9350 rval = ENOMEM; 9351 9352 fctl_release_remote_port(pd); 9353 9354 fctl_dealloc_job(job); 9355 if (fp_fcio_copyout(fcio, data, mode)) { 9356 rval = EFAULT; 9357 } 9358 break; 9359 } 9360 9361 /* Allocate memory for link error status block */ 9362 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9363 9364 mutex_enter(&port->fp_mutex); 9365 mutex_enter(&pd->pd_mutex); 9366 9367 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9368 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9369 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9370 cmd->cmd_retry_count = 1; 9371 cmd->cmd_ulp_pkt = NULL; 9372 9373 fp_rls_init(cmd, job); 9374 9375 job->job_private = (void *)rls_acc; 9376 9377 pd_flags = pd->pd_flags; 9378 pd->pd_flags = PD_ELS_IN_PROGRESS; 9379 9380 mutex_exit(&pd->pd_mutex); 9381 mutex_exit(&port->fp_mutex); 9382 9383 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9384 fctl_jobwait(job); 9385 9386 fcio->fcio_errno = job->job_result; 9387 if (job->job_result == FC_SUCCESS) { 9388 ASSERT(pd != NULL); 9389 /* 9390 * link error status block is now available. 9391 * Copy it to userland 9392 */ 9393 ASSERT(job->job_private == (void *)rls_acc); 9394 if (ddi_copyout((void *)rls_acc, 9395 (void *)fcio->fcio_obuf, 9396 sizeof (*rls_acc), mode) == 0) { 9397 if (fp_fcio_copyout(fcio, data, 9398 mode)) { 9399 rval = EFAULT; 9400 } 9401 } else { 9402 rval = EFAULT; 9403 } 9404 } else { 9405 rval = EIO; 9406 } 9407 } else { 9408 rval = EIO; 9409 fp_free_pkt(cmd); 9410 } 9411 9412 if (rval) { 9413 mutex_enter(&port->fp_mutex); 9414 mutex_enter(&pd->pd_mutex); 9415 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9416 pd->pd_flags = pd_flags; 9417 } 9418 mutex_exit(&pd->pd_mutex); 9419 mutex_exit(&port->fp_mutex); 9420 } 9421 9422 fctl_release_remote_port(pd); 9423 fctl_dealloc_job(job); 9424 kmem_free(rls_acc, sizeof (*rls_acc)); 9425 9426 if (fp_fcio_copyout(fcio, data, mode)) { 9427 rval = EFAULT; 9428 } 9429 break; 9430 } 9431 9432 case FCIO_NS: { 9433 fc_ns_cmd_t *ns_req; 9434 fc_ns_cmd32_t *ns_req32; 9435 fctl_ns_req_t *ns_cmd; 9436 9437 if (use32 == B_TRUE) { 9438 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9439 rval = EINVAL; 9440 break; 9441 } 9442 9443 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9444 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9445 9446 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9447 sizeof (*ns_req32), mode)) { 9448 rval = EFAULT; 9449 kmem_free(ns_req, sizeof (*ns_req)); 9450 kmem_free(ns_req32, sizeof (*ns_req32)); 9451 break; 9452 } 9453 9454 ns_req->ns_flags = ns_req32->ns_flags; 9455 ns_req->ns_cmd = ns_req32->ns_cmd; 9456 ns_req->ns_req_len = ns_req32->ns_req_len; 9457 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9458 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9459 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9460 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9461 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9462 9463 kmem_free(ns_req32, sizeof (*ns_req32)); 9464 } else { 9465 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9466 rval = EINVAL; 9467 break; 9468 } 9469 9470 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9471 9472 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9473 sizeof (fc_ns_cmd_t), mode)) { 9474 rval = EFAULT; 9475 kmem_free(ns_req, sizeof (*ns_req)); 9476 break; 9477 } 9478 } 9479 9480 if (ns_req->ns_req_len <= 0) { 9481 rval = EINVAL; 9482 kmem_free(ns_req, sizeof (*ns_req)); 9483 break; 9484 } 9485 9486 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9487 ASSERT(job != NULL); 9488 9489 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9490 ns_req->ns_resp_len, ns_req->ns_resp_len, 9491 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9492 ASSERT(ns_cmd != NULL); 9493 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9494 9495 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9496 ns_cmd->ns_gan_max = 1; 9497 ns_cmd->ns_gan_index = 0; 9498 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9499 } 9500 9501 if (ddi_copyin(ns_req->ns_req_payload, 9502 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9503 rval = EFAULT; 9504 fctl_free_ns_cmd(ns_cmd); 9505 fctl_dealloc_job(job); 9506 kmem_free(ns_req, sizeof (*ns_req)); 9507 break; 9508 } 9509 9510 job->job_private = (void *)ns_cmd; 9511 fctl_enque_job(port, job); 9512 fctl_jobwait(job); 9513 rval = job->job_result; 9514 9515 if (rval == FC_SUCCESS) { 9516 if (ns_req->ns_resp_len) { 9517 if (ddi_copyout(ns_cmd->ns_data_buf, 9518 ns_req->ns_resp_payload, 9519 ns_cmd->ns_data_len, mode)) { 9520 rval = EFAULT; 9521 fctl_free_ns_cmd(ns_cmd); 9522 fctl_dealloc_job(job); 9523 kmem_free(ns_req, sizeof (*ns_req)); 9524 break; 9525 } 9526 } 9527 } else { 9528 rval = EIO; 9529 } 9530 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9531 fctl_free_ns_cmd(ns_cmd); 9532 fctl_dealloc_job(job); 9533 kmem_free(ns_req, sizeof (*ns_req)); 9534 9535 if (fp_fcio_copyout(fcio, data, mode)) { 9536 rval = EFAULT; 9537 } 9538 break; 9539 } 9540 9541 default: 9542 rval = ENOTTY; 9543 break; 9544 } 9545 9546 /* 9547 * If set, reset the EXCL busy bit to 9548 * receive other exclusive access commands 9549 */ 9550 mutex_enter(&port->fp_mutex); 9551 if (port->fp_flag & FP_EXCL_BUSY) { 9552 port->fp_flag &= ~FP_EXCL_BUSY; 9553 } 9554 mutex_exit(&port->fp_mutex); 9555 9556 return (rval); 9557 } 9558 9559 9560 /* 9561 * This function assumes that the response length 9562 * is same regardless of data model (LP32 or LP64) 9563 * which is true for all the ioctls currently 9564 * supported. 9565 */ 9566 static int 9567 fp_copyout(void *from, void *to, size_t len, int mode) 9568 { 9569 return (ddi_copyout(from, to, len, mode)); 9570 } 9571 9572 /* 9573 * This function does the set rnid 9574 */ 9575 static int 9576 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9577 { 9578 int rval = 0; 9579 fc_rnid_t *rnid; 9580 fc_fca_pm_t pm; 9581 9582 /* Allocate memory for node id block */ 9583 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9584 9585 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9586 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9587 kmem_free(rnid, sizeof (fc_rnid_t)); 9588 return (EFAULT); 9589 } 9590 9591 /* Prepare the port management structure */ 9592 bzero((caddr_t)&pm, sizeof (pm)); 9593 9594 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9595 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9596 pm.pm_data_len = sizeof (*rnid); 9597 pm.pm_data_buf = (caddr_t)rnid; 9598 9599 /* Get the adapter's node data */ 9600 rval = port->fp_fca_tran->fca_port_manage( 9601 port->fp_fca_handle, &pm); 9602 9603 if (rval != FC_SUCCESS) { 9604 fcio->fcio_errno = rval; 9605 rval = EIO; 9606 if (fp_fcio_copyout(fcio, data, mode)) { 9607 rval = EFAULT; 9608 } 9609 } else { 9610 mutex_enter(&port->fp_mutex); 9611 /* copy to the port structure */ 9612 bcopy(rnid, &port->fp_rnid_params, 9613 sizeof (port->fp_rnid_params)); 9614 mutex_exit(&port->fp_mutex); 9615 } 9616 9617 kmem_free(rnid, sizeof (fc_rnid_t)); 9618 9619 if (rval != FC_SUCCESS) { 9620 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9621 } 9622 9623 return (rval); 9624 } 9625 9626 /* 9627 * This function does the local pwwn get rnid 9628 */ 9629 static int 9630 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9631 { 9632 fc_rnid_t *rnid; 9633 fc_fca_pm_t pm; 9634 int rval = 0; 9635 uint32_t ret; 9636 9637 /* Allocate memory for rnid data block */ 9638 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9639 9640 mutex_enter(&port->fp_mutex); 9641 if (port->fp_rnid_init == 1) { 9642 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9643 mutex_exit(&port->fp_mutex); 9644 /* xfer node info to userland */ 9645 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9646 sizeof (*rnid), mode) == 0) { 9647 if (fp_fcio_copyout(fcio, data, mode)) { 9648 rval = EFAULT; 9649 } 9650 } else { 9651 rval = EFAULT; 9652 } 9653 9654 kmem_free(rnid, sizeof (fc_rnid_t)); 9655 9656 if (rval != FC_SUCCESS) { 9657 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9658 rval); 9659 } 9660 9661 return (rval); 9662 } 9663 mutex_exit(&port->fp_mutex); 9664 9665 /* Prepare the port management structure */ 9666 bzero((caddr_t)&pm, sizeof (pm)); 9667 9668 pm.pm_cmd_flags = FC_FCA_PM_READ; 9669 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9670 pm.pm_data_len = sizeof (fc_rnid_t); 9671 pm.pm_data_buf = (caddr_t)rnid; 9672 9673 /* Get the adapter's node data */ 9674 ret = port->fp_fca_tran->fca_port_manage( 9675 port->fp_fca_handle, 9676 &pm); 9677 9678 if (ret == FC_SUCCESS) { 9679 /* initialize in the port_info */ 9680 mutex_enter(&port->fp_mutex); 9681 port->fp_rnid_init = 1; 9682 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9683 mutex_exit(&port->fp_mutex); 9684 9685 /* xfer node info to userland */ 9686 if (ddi_copyout((void *)rnid, 9687 (void *)fcio->fcio_obuf, 9688 sizeof (*rnid), mode) == 0) { 9689 if (fp_fcio_copyout(fcio, data, 9690 mode)) { 9691 rval = EFAULT; 9692 } 9693 } else { 9694 rval = EFAULT; 9695 } 9696 } else { 9697 rval = EIO; 9698 fcio->fcio_errno = ret; 9699 if (fp_fcio_copyout(fcio, data, mode)) { 9700 rval = EFAULT; 9701 } 9702 } 9703 9704 kmem_free(rnid, sizeof (fc_rnid_t)); 9705 9706 if (rval != FC_SUCCESS) { 9707 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9708 } 9709 9710 return (rval); 9711 } 9712 9713 static int 9714 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9715 la_wwn_t *pwwn) 9716 { 9717 int rval = 0; 9718 fc_remote_port_t *pd; 9719 fp_cmd_t *cmd; 9720 job_request_t *job; 9721 la_els_rnid_acc_t *rnid_acc; 9722 9723 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9724 if (pd == NULL) { 9725 /* 9726 * We can safely assume that the destination port 9727 * is logged in. Either the user land will explicitly 9728 * login before issuing RNID ioctl or the device would 9729 * have been configured, meaning already logged in. 9730 */ 9731 9732 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9733 9734 return (ENXIO); 9735 } 9736 /* 9737 * Allocate job structure and set job_code as DUMMY, 9738 * because we will not go thorugh the job thread. 9739 * Instead fp_sendcmd() is called directly here. 9740 */ 9741 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9742 NULL, NULL, KM_SLEEP); 9743 9744 ASSERT(job != NULL); 9745 9746 job->job_counter = 1; 9747 9748 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9749 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9750 if (cmd == NULL) { 9751 fcio->fcio_errno = FC_NOMEM; 9752 rval = ENOMEM; 9753 9754 fctl_dealloc_job(job); 9755 if (fp_fcio_copyout(fcio, data, mode)) { 9756 rval = EFAULT; 9757 } 9758 9759 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9760 9761 return (rval); 9762 } 9763 9764 /* Allocate memory for node id accept block */ 9765 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9766 9767 mutex_enter(&port->fp_mutex); 9768 mutex_enter(&pd->pd_mutex); 9769 9770 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9771 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9772 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9773 cmd->cmd_retry_count = 1; 9774 cmd->cmd_ulp_pkt = NULL; 9775 9776 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9777 9778 job->job_private = (void *)rnid_acc; 9779 9780 pd->pd_flags = PD_ELS_IN_PROGRESS; 9781 9782 mutex_exit(&pd->pd_mutex); 9783 mutex_exit(&port->fp_mutex); 9784 9785 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9786 fctl_jobwait(job); 9787 fcio->fcio_errno = job->job_result; 9788 if (job->job_result == FC_SUCCESS) { 9789 int rnid_cnt; 9790 ASSERT(pd != NULL); 9791 /* 9792 * node id block is now available. 9793 * Copy it to userland 9794 */ 9795 ASSERT(job->job_private == (void *)rnid_acc); 9796 9797 /* get the response length */ 9798 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9799 rnid_acc->hdr.cmn_len + 9800 rnid_acc->hdr.specific_len; 9801 9802 if (fcio->fcio_olen < rnid_cnt) { 9803 rval = EINVAL; 9804 } else if (ddi_copyout((void *)rnid_acc, 9805 (void *)fcio->fcio_obuf, 9806 rnid_cnt, mode) == 0) { 9807 if (fp_fcio_copyout(fcio, data, 9808 mode)) { 9809 rval = EFAULT; 9810 } 9811 } else { 9812 rval = EFAULT; 9813 } 9814 } else { 9815 rval = EIO; 9816 } 9817 } else { 9818 rval = EIO; 9819 if (pd) { 9820 mutex_enter(&pd->pd_mutex); 9821 pd->pd_flags = PD_IDLE; 9822 mutex_exit(&pd->pd_mutex); 9823 } 9824 fp_free_pkt(cmd); 9825 } 9826 9827 fctl_dealloc_job(job); 9828 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9829 9830 if (fp_fcio_copyout(fcio, data, mode)) { 9831 rval = EFAULT; 9832 } 9833 9834 if (rval != FC_SUCCESS) { 9835 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9836 } 9837 9838 return (rval); 9839 } 9840 9841 /* 9842 * Copy out to userland 9843 */ 9844 static int 9845 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9846 { 9847 int rval; 9848 9849 #ifdef _MULTI_DATAMODEL 9850 switch (ddi_model_convert_from(mode & FMODELS)) { 9851 case DDI_MODEL_ILP32: { 9852 struct fcio32 fcio32; 9853 9854 fcio32.fcio_xfer = fcio->fcio_xfer; 9855 fcio32.fcio_cmd = fcio->fcio_cmd; 9856 fcio32.fcio_flags = fcio->fcio_flags; 9857 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9858 fcio32.fcio_ilen = fcio->fcio_ilen; 9859 fcio32.fcio_ibuf = 9860 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9861 fcio32.fcio_olen = fcio->fcio_olen; 9862 fcio32.fcio_obuf = 9863 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9864 fcio32.fcio_alen = fcio->fcio_alen; 9865 fcio32.fcio_abuf = 9866 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9867 fcio32.fcio_errno = fcio->fcio_errno; 9868 9869 rval = ddi_copyout((void *)&fcio32, (void *)data, 9870 sizeof (struct fcio32), mode); 9871 break; 9872 } 9873 case DDI_MODEL_NONE: 9874 rval = ddi_copyout((void *)fcio, (void *)data, 9875 sizeof (fcio_t), mode); 9876 break; 9877 } 9878 #else 9879 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 9880 #endif 9881 9882 return (rval); 9883 } 9884 9885 9886 static void 9887 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 9888 { 9889 uint32_t listlen; 9890 fc_portmap_t *changelist; 9891 9892 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9893 ASSERT(port->fp_topology == FC_TOP_PT_PT); 9894 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 9895 9896 listlen = 0; 9897 changelist = NULL; 9898 9899 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9900 if (port->fp_statec_busy > 1) { 9901 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 9902 } 9903 } 9904 mutex_exit(&port->fp_mutex); 9905 9906 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9907 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 9908 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 9909 listlen, listlen, KM_SLEEP); 9910 9911 mutex_enter(&port->fp_mutex); 9912 } else { 9913 ASSERT(changelist == NULL && listlen == 0); 9914 mutex_enter(&port->fp_mutex); 9915 if (--port->fp_statec_busy == 0) { 9916 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 9917 } 9918 } 9919 } 9920 9921 static int 9922 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 9923 { 9924 int rval; 9925 int count; 9926 int index; 9927 int num_devices; 9928 fc_remote_node_t *node; 9929 fc_port_dev_t *devlist; 9930 struct pwwn_hash *head; 9931 fc_remote_port_t *pd; 9932 9933 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9934 9935 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 9936 9937 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 9938 9939 for (count = index = 0; index < pwwn_table_size; index++) { 9940 head = &port->fp_pwwn_table[index]; 9941 pd = head->pwwn_head; 9942 while (pd != NULL) { 9943 mutex_enter(&pd->pd_mutex); 9944 if (pd->pd_state == PORT_DEVICE_INVALID) { 9945 mutex_exit(&pd->pd_mutex); 9946 pd = pd->pd_wwn_hnext; 9947 continue; 9948 } 9949 9950 devlist[count].dev_state = pd->pd_state; 9951 devlist[count].dev_hard_addr = pd->pd_hard_addr; 9952 devlist[count].dev_did = pd->pd_port_id; 9953 devlist[count].dev_did.priv_lilp_posit = 9954 (uint8_t)(index & 0xff); 9955 bcopy((caddr_t)pd->pd_fc4types, 9956 (caddr_t)devlist[count].dev_type, 9957 sizeof (pd->pd_fc4types)); 9958 9959 bcopy((caddr_t)&pd->pd_port_name, 9960 (caddr_t)&devlist[count].dev_pwwn, 9961 sizeof (la_wwn_t)); 9962 9963 node = pd->pd_remote_nodep; 9964 mutex_exit(&pd->pd_mutex); 9965 9966 if (node) { 9967 mutex_enter(&node->fd_mutex); 9968 bcopy((caddr_t)&node->fd_node_name, 9969 (caddr_t)&devlist[count].dev_nwwn, 9970 sizeof (la_wwn_t)); 9971 mutex_exit(&node->fd_mutex); 9972 } 9973 count++; 9974 if (count >= num_devices) { 9975 goto found; 9976 } 9977 } 9978 } 9979 found: 9980 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 9981 sizeof (count), mode)) { 9982 rval = FC_FAILURE; 9983 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 9984 sizeof (fc_port_dev_t) * num_devices, mode)) { 9985 rval = FC_FAILURE; 9986 } else { 9987 rval = FC_SUCCESS; 9988 } 9989 9990 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 9991 9992 return (rval); 9993 } 9994 9995 9996 /* 9997 * Handle Fabric ONLINE 9998 */ 9999 static void 10000 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 10001 { 10002 int index; 10003 int rval; 10004 int dbg_count; 10005 int count = 0; 10006 char ww_name[17]; 10007 uint32_t d_id; 10008 uint32_t listlen; 10009 fctl_ns_req_t *ns_cmd; 10010 struct pwwn_hash *head; 10011 fc_remote_port_t *pd; 10012 fc_remote_port_t *npd; 10013 fc_portmap_t *changelist; 10014 10015 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10016 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 10017 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10018 10019 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 10020 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 10021 0, KM_SLEEP); 10022 10023 ASSERT(ns_cmd != NULL); 10024 10025 ns_cmd->ns_cmd_code = NS_GID_PN; 10026 10027 /* 10028 * Check if orphans are showing up now 10029 */ 10030 if (port->fp_orphan_count) { 10031 fc_orphan_t *orp; 10032 fc_orphan_t *norp = NULL; 10033 fc_orphan_t *prev = NULL; 10034 10035 for (orp = port->fp_orphan_list; orp; orp = norp) { 10036 norp = orp->orp_next; 10037 mutex_exit(&port->fp_mutex); 10038 orp->orp_nscan++; 10039 10040 job->job_counter = 1; 10041 job->job_result = FC_SUCCESS; 10042 10043 ((ns_req_gid_pn_t *) 10044 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 10045 ((ns_resp_gid_pn_t *) 10046 ns_cmd->ns_data_buf)->pid.port_id = 0; 10047 ((ns_resp_gid_pn_t *) 10048 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 10049 10050 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10051 if (rval == FC_SUCCESS) { 10052 d_id = 10053 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10054 pd = fp_create_remote_port_by_ns(port, 10055 d_id, KM_SLEEP); 10056 10057 if (pd != NULL) { 10058 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10059 10060 fp_printf(port, CE_WARN, FP_LOG_ONLY, 10061 0, NULL, "N_x Port with D_ID=%x," 10062 " PWWN=%s reappeared in fabric", 10063 d_id, ww_name); 10064 10065 mutex_enter(&port->fp_mutex); 10066 if (prev) { 10067 prev->orp_next = orp->orp_next; 10068 } else { 10069 ASSERT(orp == 10070 port->fp_orphan_list); 10071 port->fp_orphan_list = 10072 orp->orp_next; 10073 } 10074 port->fp_orphan_count--; 10075 mutex_exit(&port->fp_mutex); 10076 kmem_free(orp, sizeof (*orp)); 10077 count++; 10078 10079 mutex_enter(&pd->pd_mutex); 10080 pd->pd_flags = PD_ELS_MARK; 10081 10082 mutex_exit(&pd->pd_mutex); 10083 } else { 10084 prev = orp; 10085 } 10086 } else { 10087 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 10088 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10089 10090 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 10091 NULL, 10092 " Port WWN %s removed from orphan" 10093 " list after %d scans", ww_name, 10094 orp->orp_nscan); 10095 10096 mutex_enter(&port->fp_mutex); 10097 if (prev) { 10098 prev->orp_next = orp->orp_next; 10099 } else { 10100 ASSERT(orp == 10101 port->fp_orphan_list); 10102 port->fp_orphan_list = 10103 orp->orp_next; 10104 } 10105 port->fp_orphan_count--; 10106 mutex_exit(&port->fp_mutex); 10107 10108 kmem_free(orp, sizeof (*orp)); 10109 } else { 10110 prev = orp; 10111 } 10112 } 10113 mutex_enter(&port->fp_mutex); 10114 } 10115 } 10116 10117 /* 10118 * Walk the Port WWN hash table, reestablish LOGIN 10119 * if a LOGIN is already performed on a particular 10120 * device; Any failure to LOGIN should mark the 10121 * port device OLD. 10122 */ 10123 for (index = 0; index < pwwn_table_size; index++) { 10124 head = &port->fp_pwwn_table[index]; 10125 npd = head->pwwn_head; 10126 10127 while ((pd = npd) != NULL) { 10128 la_wwn_t *pwwn; 10129 10130 npd = pd->pd_wwn_hnext; 10131 10132 /* 10133 * Don't count in the port devices that are new 10134 * unless the total number of devices visible 10135 * through this port is less than FP_MAX_DEVICES 10136 */ 10137 mutex_enter(&pd->pd_mutex); 10138 if (port->fp_dev_count >= FP_MAX_DEVICES || 10139 (port->fp_options & FP_TARGET_MODE)) { 10140 if (pd->pd_type == PORT_DEVICE_NEW || 10141 pd->pd_flags == PD_ELS_MARK || 10142 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10143 mutex_exit(&pd->pd_mutex); 10144 continue; 10145 } 10146 } else { 10147 if (pd->pd_flags == PD_ELS_MARK || 10148 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10149 mutex_exit(&pd->pd_mutex); 10150 continue; 10151 } 10152 pd->pd_type = PORT_DEVICE_OLD; 10153 } 10154 count++; 10155 10156 /* 10157 * Consult with the name server about D_ID changes 10158 */ 10159 job->job_counter = 1; 10160 job->job_result = FC_SUCCESS; 10161 10162 ((ns_req_gid_pn_t *) 10163 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10164 ((ns_resp_gid_pn_t *) 10165 ns_cmd->ns_data_buf)->pid.port_id = 0; 10166 10167 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10168 pid.priv_lilp_posit = 0; 10169 10170 pwwn = &pd->pd_port_name; 10171 pd->pd_flags = PD_ELS_MARK; 10172 10173 mutex_exit(&pd->pd_mutex); 10174 mutex_exit(&port->fp_mutex); 10175 10176 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10177 if (rval != FC_SUCCESS) { 10178 fc_wwn_to_str(pwwn, ww_name); 10179 10180 mutex_enter(&pd->pd_mutex); 10181 d_id = pd->pd_port_id.port_id; 10182 pd->pd_type = PORT_DEVICE_DELETE; 10183 mutex_exit(&pd->pd_mutex); 10184 10185 FP_TRACE(FP_NHEAD1(3, 0), 10186 "fp_fabric_online: PD " 10187 "disappeared; d_id=%x, PWWN=%s", 10188 d_id, ww_name); 10189 10190 FP_TRACE(FP_NHEAD2(9, 0), 10191 "N_x Port with D_ID=%x, PWWN=%s" 10192 " disappeared from fabric", d_id, 10193 ww_name); 10194 10195 mutex_enter(&port->fp_mutex); 10196 continue; 10197 } 10198 10199 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10200 10201 mutex_enter(&port->fp_mutex); 10202 mutex_enter(&pd->pd_mutex); 10203 if (d_id != pd->pd_port_id.port_id) { 10204 fctl_delist_did_table(port, pd); 10205 fc_wwn_to_str(pwwn, ww_name); 10206 10207 FP_TRACE(FP_NHEAD2(9, 0), 10208 "D_ID of a device with PWWN %s changed." 10209 " New D_ID = %x, OLD D_ID = %x", ww_name, 10210 d_id, pd->pd_port_id.port_id); 10211 10212 pd->pd_port_id.port_id = BE_32(d_id); 10213 pd->pd_type = PORT_DEVICE_CHANGED; 10214 fctl_enlist_did_table(port, pd); 10215 } 10216 mutex_exit(&pd->pd_mutex); 10217 10218 } 10219 } 10220 10221 if (ns_cmd) { 10222 fctl_free_ns_cmd(ns_cmd); 10223 } 10224 10225 listlen = 0; 10226 changelist = NULL; 10227 if (count) { 10228 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10229 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10230 mutex_exit(&port->fp_mutex); 10231 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10232 mutex_enter(&port->fp_mutex); 10233 } 10234 10235 dbg_count = 0; 10236 10237 job->job_counter = count; 10238 10239 for (index = 0; index < pwwn_table_size; index++) { 10240 head = &port->fp_pwwn_table[index]; 10241 npd = head->pwwn_head; 10242 10243 while ((pd = npd) != NULL) { 10244 npd = pd->pd_wwn_hnext; 10245 10246 mutex_enter(&pd->pd_mutex); 10247 if (pd->pd_flags != PD_ELS_MARK) { 10248 mutex_exit(&pd->pd_mutex); 10249 continue; 10250 } 10251 10252 dbg_count++; 10253 10254 /* 10255 * If it is already marked deletion, nothing 10256 * else to do. 10257 */ 10258 if (pd->pd_type == PORT_DEVICE_DELETE) { 10259 pd->pd_type = PORT_DEVICE_OLD; 10260 10261 mutex_exit(&pd->pd_mutex); 10262 mutex_exit(&port->fp_mutex); 10263 fp_jobdone(job); 10264 mutex_enter(&port->fp_mutex); 10265 10266 continue; 10267 } 10268 10269 /* 10270 * If it is freshly discovered out of 10271 * the orphan list, nothing else to do 10272 */ 10273 if (pd->pd_type == PORT_DEVICE_NEW) { 10274 pd->pd_flags = PD_IDLE; 10275 10276 mutex_exit(&pd->pd_mutex); 10277 mutex_exit(&port->fp_mutex); 10278 fp_jobdone(job); 10279 mutex_enter(&port->fp_mutex); 10280 10281 continue; 10282 } 10283 10284 pd->pd_flags = PD_IDLE; 10285 d_id = pd->pd_port_id.port_id; 10286 10287 /* 10288 * Explicitly mark all devices OLD; successful 10289 * PLOGI should reset this to either NO_CHANGE 10290 * or CHANGED. 10291 */ 10292 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10293 pd->pd_type = PORT_DEVICE_OLD; 10294 } 10295 10296 mutex_exit(&pd->pd_mutex); 10297 mutex_exit(&port->fp_mutex); 10298 10299 rval = fp_port_login(port, d_id, job, 10300 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10301 10302 if (rval != FC_SUCCESS) { 10303 fp_jobdone(job); 10304 } 10305 mutex_enter(&port->fp_mutex); 10306 } 10307 } 10308 mutex_exit(&port->fp_mutex); 10309 10310 ASSERT(dbg_count == count); 10311 fp_jobwait(job); 10312 10313 mutex_enter(&port->fp_mutex); 10314 10315 ASSERT(port->fp_statec_busy > 0); 10316 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10317 if (port->fp_statec_busy > 1) { 10318 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10319 } 10320 } 10321 mutex_exit(&port->fp_mutex); 10322 } else { 10323 ASSERT(port->fp_statec_busy > 0); 10324 if (port->fp_statec_busy > 1) { 10325 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10326 } 10327 mutex_exit(&port->fp_mutex); 10328 } 10329 10330 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10331 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10332 10333 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10334 listlen, listlen, KM_SLEEP); 10335 10336 mutex_enter(&port->fp_mutex); 10337 } else { 10338 ASSERT(changelist == NULL && listlen == 0); 10339 mutex_enter(&port->fp_mutex); 10340 if (--port->fp_statec_busy == 0) { 10341 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10342 } 10343 } 10344 } 10345 10346 10347 /* 10348 * Fill out device list for userland ioctl in private loop 10349 */ 10350 static int 10351 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10352 { 10353 int rval; 10354 int count; 10355 int index; 10356 int num_devices; 10357 fc_remote_node_t *node; 10358 fc_port_dev_t *devlist; 10359 int lilp_device_count; 10360 fc_lilpmap_t *lilp_map; 10361 uchar_t *alpa_list; 10362 10363 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10364 10365 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10366 if (port->fp_total_devices > port->fp_dev_count && 10367 num_devices >= port->fp_total_devices) { 10368 job_request_t *job; 10369 10370 mutex_exit(&port->fp_mutex); 10371 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10372 job->job_counter = 1; 10373 10374 mutex_enter(&port->fp_mutex); 10375 fp_get_loopmap(port, job); 10376 mutex_exit(&port->fp_mutex); 10377 10378 fp_jobwait(job); 10379 fctl_dealloc_job(job); 10380 } else { 10381 mutex_exit(&port->fp_mutex); 10382 } 10383 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10384 10385 mutex_enter(&port->fp_mutex); 10386 10387 /* 10388 * Applications are accustomed to getting the device list in 10389 * LILP map order. The HBA firmware usually returns the device 10390 * map in the LILP map order and diagnostic applications would 10391 * prefer to receive in the device list in that order too 10392 */ 10393 lilp_map = &port->fp_lilp_map; 10394 alpa_list = &lilp_map->lilp_alpalist[0]; 10395 10396 /* 10397 * the length field corresponds to the offset in the LILP frame 10398 * which begins with 1. The thing to note here is that the 10399 * lilp_device_count is 1 more than fp->fp_total_devices since 10400 * the host adapter's alpa also shows up in the lilp map. We 10401 * don't however return details of the host adapter since 10402 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10403 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10404 * ioctl to obtain details about the host adapter port. 10405 */ 10406 lilp_device_count = lilp_map->lilp_length; 10407 10408 for (count = index = 0; index < lilp_device_count && 10409 count < num_devices; index++) { 10410 uint32_t d_id; 10411 fc_remote_port_t *pd; 10412 10413 d_id = alpa_list[index]; 10414 10415 mutex_exit(&port->fp_mutex); 10416 pd = fctl_get_remote_port_by_did(port, d_id); 10417 mutex_enter(&port->fp_mutex); 10418 10419 if (pd != NULL) { 10420 mutex_enter(&pd->pd_mutex); 10421 10422 if (pd->pd_state == PORT_DEVICE_INVALID) { 10423 mutex_exit(&pd->pd_mutex); 10424 continue; 10425 } 10426 10427 devlist[count].dev_state = pd->pd_state; 10428 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10429 devlist[count].dev_did = pd->pd_port_id; 10430 devlist[count].dev_did.priv_lilp_posit = 10431 (uint8_t)(index & 0xff); 10432 bcopy((caddr_t)pd->pd_fc4types, 10433 (caddr_t)devlist[count].dev_type, 10434 sizeof (pd->pd_fc4types)); 10435 10436 bcopy((caddr_t)&pd->pd_port_name, 10437 (caddr_t)&devlist[count].dev_pwwn, 10438 sizeof (la_wwn_t)); 10439 10440 node = pd->pd_remote_nodep; 10441 mutex_exit(&pd->pd_mutex); 10442 10443 if (node) { 10444 mutex_enter(&node->fd_mutex); 10445 bcopy((caddr_t)&node->fd_node_name, 10446 (caddr_t)&devlist[count].dev_nwwn, 10447 sizeof (la_wwn_t)); 10448 mutex_exit(&node->fd_mutex); 10449 } 10450 count++; 10451 } 10452 } 10453 10454 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10455 sizeof (count), mode)) { 10456 rval = FC_FAILURE; 10457 } 10458 10459 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10460 sizeof (fc_port_dev_t) * num_devices, mode)) { 10461 rval = FC_FAILURE; 10462 } else { 10463 rval = FC_SUCCESS; 10464 } 10465 10466 kmem_free(devlist, sizeof (*devlist) * num_devices); 10467 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10468 10469 return (rval); 10470 } 10471 10472 10473 /* 10474 * Completion function for responses to unsolicited commands 10475 */ 10476 static void 10477 fp_unsol_intr(fc_packet_t *pkt) 10478 { 10479 fp_cmd_t *cmd; 10480 fc_local_port_t *port; 10481 10482 cmd = pkt->pkt_ulp_private; 10483 port = cmd->cmd_port; 10484 10485 mutex_enter(&port->fp_mutex); 10486 port->fp_out_fpcmds--; 10487 mutex_exit(&port->fp_mutex); 10488 10489 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10490 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10491 "couldn't post response to unsolicited request;" 10492 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10493 pkt->pkt_resp_fhdr.rx_id); 10494 } 10495 10496 if (cmd == port->fp_els_resp_pkt) { 10497 mutex_enter(&port->fp_mutex); 10498 port->fp_els_resp_pkt_busy = 0; 10499 mutex_exit(&port->fp_mutex); 10500 return; 10501 } 10502 10503 fp_free_pkt(cmd); 10504 } 10505 10506 10507 /* 10508 * solicited LINIT ELS completion function 10509 */ 10510 static void 10511 fp_linit_intr(fc_packet_t *pkt) 10512 { 10513 fp_cmd_t *cmd; 10514 job_request_t *job; 10515 fc_linit_resp_t acc; 10516 10517 cmd = (fp_cmd_t *)pkt->pkt_ulp_private; 10518 10519 mutex_enter(&cmd->cmd_port->fp_mutex); 10520 cmd->cmd_port->fp_out_fpcmds--; 10521 mutex_exit(&cmd->cmd_port->fp_mutex); 10522 10523 if (FP_IS_PKT_ERROR(pkt)) { 10524 (void) fp_common_intr(pkt, 1); 10525 return; 10526 } 10527 10528 job = cmd->cmd_job; 10529 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&acc, 10530 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10531 if (acc.status != FC_LINIT_SUCCESS) { 10532 job->job_result = FC_FAILURE; 10533 } else { 10534 job->job_result = FC_SUCCESS; 10535 } 10536 10537 fp_iodone(cmd); 10538 } 10539 10540 10541 /* 10542 * Decode the unsolicited request; For FC-4 Device and Link data frames 10543 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10544 * ELS requests, submit a request to the job_handler thread to work on it. 10545 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10546 * and save much of the interrupt time processing of unsolicited ELS requests 10547 * and hand it off to the job_handler thread. 10548 */ 10549 static void 10550 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10551 { 10552 uchar_t r_ctl; 10553 uchar_t ls_code; 10554 uint32_t s_id; 10555 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10556 uint32_t cb_arg; 10557 fp_cmd_t *cmd; 10558 fc_local_port_t *port; 10559 job_request_t *job; 10560 fc_remote_port_t *pd; 10561 10562 port = port_handle; 10563 10564 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10565 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10566 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10567 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10568 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10569 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10570 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10571 buf->ub_buffer[0]); 10572 10573 if (type & 0x80000000) { 10574 /* 10575 * Huh ? Nothing much can be done without 10576 * a valid buffer. So just exit. 10577 */ 10578 return; 10579 } 10580 /* 10581 * If the unsolicited interrupts arrive while it isn't 10582 * safe to handle unsolicited callbacks; Drop them, yes, 10583 * drop them on the floor 10584 */ 10585 mutex_enter(&port->fp_mutex); 10586 port->fp_active_ubs++; 10587 if ((port->fp_soft_state & 10588 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10589 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10590 10591 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10592 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10593 "seq_id=%x, ox_id=%x, rx_id=%x" 10594 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10595 buf->ub_frame.type, buf->ub_frame.seq_id, 10596 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10597 10598 ASSERT(port->fp_active_ubs > 0); 10599 if (--(port->fp_active_ubs) == 0) { 10600 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10601 } 10602 10603 mutex_exit(&port->fp_mutex); 10604 10605 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10606 1, &buf->ub_token); 10607 10608 return; 10609 } 10610 10611 r_ctl = buf->ub_frame.r_ctl; 10612 s_id = buf->ub_frame.s_id; 10613 if (port->fp_active_ubs == 1) { 10614 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10615 } 10616 10617 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10618 port->fp_statec_busy) { 10619 mutex_exit(&port->fp_mutex); 10620 pd = fctl_get_remote_port_by_did(port, s_id); 10621 if (pd) { 10622 mutex_enter(&pd->pd_mutex); 10623 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10624 FP_TRACE(FP_NHEAD1(3, 0), 10625 "LOGO for LOGGED IN D_ID %x", 10626 buf->ub_frame.s_id); 10627 pd->pd_state = PORT_DEVICE_VALID; 10628 } 10629 mutex_exit(&pd->pd_mutex); 10630 } 10631 10632 mutex_enter(&port->fp_mutex); 10633 ASSERT(port->fp_active_ubs > 0); 10634 if (--(port->fp_active_ubs) == 0) { 10635 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10636 } 10637 mutex_exit(&port->fp_mutex); 10638 10639 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10640 1, &buf->ub_token); 10641 10642 FP_TRACE(FP_NHEAD1(3, 0), 10643 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10644 buf->ub_frame.s_id); 10645 return; 10646 } 10647 10648 if (port->fp_els_resp_pkt_busy == 0) { 10649 if (r_ctl == R_CTL_ELS_REQ) { 10650 ls_code = buf->ub_buffer[0]; 10651 10652 switch (ls_code) { 10653 case LA_ELS_PLOGI: 10654 case LA_ELS_FLOGI: 10655 port->fp_els_resp_pkt_busy = 1; 10656 mutex_exit(&port->fp_mutex); 10657 fp_i_handle_unsol_els(port, buf); 10658 10659 mutex_enter(&port->fp_mutex); 10660 ASSERT(port->fp_active_ubs > 0); 10661 if (--(port->fp_active_ubs) == 0) { 10662 port->fp_soft_state &= 10663 ~FP_SOFT_IN_UNSOL_CB; 10664 } 10665 mutex_exit(&port->fp_mutex); 10666 port->fp_fca_tran->fca_ub_release( 10667 port->fp_fca_handle, 1, &buf->ub_token); 10668 10669 return; 10670 case LA_ELS_RSCN: 10671 if (++(port)->fp_rscn_count == 10672 FC_INVALID_RSCN_COUNT) { 10673 ++(port)->fp_rscn_count; 10674 } 10675 rscn_count = port->fp_rscn_count; 10676 break; 10677 10678 default: 10679 break; 10680 } 10681 } 10682 } else if ((r_ctl == R_CTL_ELS_REQ) && 10683 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10684 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10685 ++port->fp_rscn_count; 10686 } 10687 rscn_count = port->fp_rscn_count; 10688 } 10689 10690 mutex_exit(&port->fp_mutex); 10691 10692 switch (r_ctl & R_CTL_ROUTING) { 10693 case R_CTL_DEVICE_DATA: 10694 /* 10695 * If the unsolicited buffer is a CT IU, 10696 * have the job_handler thread work on it. 10697 */ 10698 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10699 break; 10700 } 10701 /* FALLTHROUGH */ 10702 10703 case R_CTL_FC4_SVC: { 10704 int sendup = 0; 10705 10706 /* 10707 * If a LOGIN isn't performed before this request 10708 * shut the door on this port with a reply that a 10709 * LOGIN is required. We make an exception however 10710 * for IP broadcast packets and pass them through 10711 * to the IP ULP(s) to handle broadcast requests. 10712 * This is not a problem for private loop devices 10713 * but for fabric topologies we don't log into the 10714 * remote ports during port initialization and 10715 * the ULPs need to log into requesting ports on 10716 * demand. 10717 */ 10718 pd = fctl_get_remote_port_by_did(port, s_id); 10719 if (pd) { 10720 mutex_enter(&pd->pd_mutex); 10721 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10722 sendup++; 10723 } 10724 mutex_exit(&pd->pd_mutex); 10725 } else if ((pd == NULL) && 10726 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10727 (buf->ub_frame.d_id == 0xffffff || 10728 buf->ub_frame.d_id == 0x00)) { 10729 /* brodacst IP frame - so sendup via job thread */ 10730 break; 10731 } 10732 10733 /* 10734 * Send all FC4 services via job thread too 10735 */ 10736 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10737 break; 10738 } 10739 10740 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10741 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10742 return; 10743 } 10744 10745 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10746 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10747 0, KM_NOSLEEP, pd); 10748 if (cmd != NULL) { 10749 fp_els_rjt_init(port, cmd, buf, 10750 FC_ACTION_NON_RETRYABLE, 10751 FC_REASON_LOGIN_REQUIRED, NULL); 10752 10753 if (fp_sendcmd(port, cmd, 10754 port->fp_fca_handle) != FC_SUCCESS) { 10755 fp_free_pkt(cmd); 10756 } 10757 } 10758 } 10759 10760 mutex_enter(&port->fp_mutex); 10761 ASSERT(port->fp_active_ubs > 0); 10762 if (--(port->fp_active_ubs) == 0) { 10763 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10764 } 10765 mutex_exit(&port->fp_mutex); 10766 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10767 1, &buf->ub_token); 10768 10769 return; 10770 } 10771 10772 default: 10773 break; 10774 } 10775 10776 /* 10777 * Submit a Request to the job_handler thread to work 10778 * on the unsolicited request. The potential side effect 10779 * of this is that the unsolicited buffer takes a little 10780 * longer to get released but we save interrupt time in 10781 * the bargain. 10782 */ 10783 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10784 10785 /* 10786 * One way that the rscn_count will get used is described below : 10787 * 10788 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10789 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10790 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10791 * by overloading the job_cb_arg to pass the rscn_count 10792 * 4. When one of the routines processing the RSCN picks it up (ex: 10793 * fp_validate_rscn_page()), it passes this count in the map 10794 * structure (as part of the map_rscn_info structure member) to the 10795 * ULPs. 10796 * 5. When ULPs make calls back to the transport (example interfaces for 10797 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10798 * can now pass back this count as part of the fc_packet's 10799 * pkt_ulp_rscn_count member. fcp does this currently. 10800 * 6. When transport gets a call to transport a command on the wire, it 10801 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10802 * fc_packet. If there is, it will match that info with the current 10803 * rscn_count on that instance of the port. If they don't match up 10804 * then there was a newer RSCN. The ULP gets back an error code which 10805 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10806 * 7. At this point the ULP is free to make up its own mind as to how to 10807 * handle this. Currently, fcp will reset its retry counters and keep 10808 * retrying the operation it was doing in anticipation of getting a 10809 * new state change call back for the new RSCN. 10810 */ 10811 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10812 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10813 if (job == NULL) { 10814 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10815 "couldn't submit a job to the thread, failing.."); 10816 10817 mutex_enter(&port->fp_mutex); 10818 10819 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10820 --port->fp_rscn_count; 10821 } 10822 10823 ASSERT(port->fp_active_ubs > 0); 10824 if (--(port->fp_active_ubs) == 0) { 10825 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10826 } 10827 10828 mutex_exit(&port->fp_mutex); 10829 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10830 1, &buf->ub_token); 10831 10832 return; 10833 } 10834 job->job_private = (void *)buf; 10835 fctl_enque_job(port, job); 10836 } 10837 10838 10839 /* 10840 * Handle unsolicited requests 10841 */ 10842 static void 10843 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10844 job_request_t *job) 10845 { 10846 uchar_t r_ctl; 10847 uchar_t ls_code; 10848 uint32_t s_id; 10849 fp_cmd_t *cmd; 10850 fc_remote_port_t *pd; 10851 fp_unsol_spec_t *ub_spec; 10852 10853 r_ctl = buf->ub_frame.r_ctl; 10854 s_id = buf->ub_frame.s_id; 10855 10856 switch (r_ctl & R_CTL_ROUTING) { 10857 case R_CTL_EXTENDED_SVC: 10858 if (r_ctl != R_CTL_ELS_REQ) { 10859 break; 10860 } 10861 10862 ls_code = buf->ub_buffer[0]; 10863 switch (ls_code) { 10864 case LA_ELS_LOGO: 10865 case LA_ELS_ADISC: 10866 case LA_ELS_PRLO: 10867 pd = fctl_get_remote_port_by_did(port, s_id); 10868 if (pd == NULL) { 10869 if (!FC_IS_REAL_DEVICE(s_id)) { 10870 break; 10871 } 10872 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10873 break; 10874 } 10875 if ((cmd = fp_alloc_pkt(port, 10876 sizeof (la_els_rjt_t), 0, KM_SLEEP, 10877 NULL)) == NULL) { 10878 /* 10879 * Can this actually fail when 10880 * given KM_SLEEP? (Could be used 10881 * this way in a number of places.) 10882 */ 10883 break; 10884 } 10885 10886 fp_els_rjt_init(port, cmd, buf, 10887 FC_ACTION_NON_RETRYABLE, 10888 FC_REASON_INVALID_LINK_CTRL, job); 10889 10890 if (fp_sendcmd(port, cmd, 10891 port->fp_fca_handle) != FC_SUCCESS) { 10892 fp_free_pkt(cmd); 10893 } 10894 10895 break; 10896 } 10897 if (ls_code == LA_ELS_LOGO) { 10898 fp_handle_unsol_logo(port, buf, pd, job); 10899 } else if (ls_code == LA_ELS_ADISC) { 10900 fp_handle_unsol_adisc(port, buf, pd, job); 10901 } else { 10902 fp_handle_unsol_prlo(port, buf, pd, job); 10903 } 10904 break; 10905 10906 case LA_ELS_PLOGI: 10907 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 10908 break; 10909 10910 case LA_ELS_FLOGI: 10911 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 10912 break; 10913 10914 case LA_ELS_RSCN: 10915 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 10916 break; 10917 10918 default: 10919 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10920 ub_spec->port = port; 10921 ub_spec->buf = buf; 10922 10923 (void) taskq_dispatch(port->fp_taskq, 10924 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10925 return; 10926 } 10927 break; 10928 10929 case R_CTL_BASIC_SVC: 10930 /* 10931 * The unsolicited basic link services could be ABTS 10932 * and RMC (Or even a NOP). Just BA_RJT them until 10933 * such time there arises a need to handle them more 10934 * carefully. 10935 */ 10936 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10937 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 10938 0, KM_SLEEP, NULL); 10939 if (cmd != NULL) { 10940 fp_ba_rjt_init(port, cmd, buf, job); 10941 if (fp_sendcmd(port, cmd, 10942 port->fp_fca_handle) != FC_SUCCESS) { 10943 fp_free_pkt(cmd); 10944 } 10945 } 10946 } 10947 break; 10948 10949 case R_CTL_DEVICE_DATA: 10950 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10951 /* 10952 * Mostly this is of type FC_TYPE_FC_SERVICES. 10953 * As we don't like any Unsolicited FC services 10954 * requests, we would do well to RJT them as 10955 * well. 10956 */ 10957 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10958 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10959 0, KM_SLEEP, NULL); 10960 if (cmd != NULL) { 10961 fp_els_rjt_init(port, cmd, buf, 10962 FC_ACTION_NON_RETRYABLE, 10963 FC_REASON_INVALID_LINK_CTRL, job); 10964 10965 if (fp_sendcmd(port, cmd, 10966 port->fp_fca_handle) != 10967 FC_SUCCESS) { 10968 fp_free_pkt(cmd); 10969 } 10970 } 10971 } 10972 break; 10973 } 10974 /* FALLTHROUGH */ 10975 10976 case R_CTL_FC4_SVC: 10977 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10978 ub_spec->port = port; 10979 ub_spec->buf = buf; 10980 10981 (void) taskq_dispatch(port->fp_taskq, 10982 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10983 return; 10984 10985 case R_CTL_LINK_CTL: 10986 /* 10987 * Turn deaf ear on unsolicited link control frames. 10988 * Typical unsolicited link control Frame is an LCR 10989 * (to reset End to End credit to the default login 10990 * value and abort current sequences for all classes) 10991 * An intelligent microcode/firmware should handle 10992 * this transparently at its level and not pass all 10993 * the way up here. 10994 * 10995 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 10996 * or F_BSY. P_RJT is chosen to be the most appropriate 10997 * at this time. 10998 */ 10999 /* FALLTHROUGH */ 11000 11001 default: 11002 /* 11003 * Just reject everything else as an invalid request. 11004 */ 11005 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11006 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11007 0, KM_SLEEP, NULL); 11008 if (cmd != NULL) { 11009 fp_els_rjt_init(port, cmd, buf, 11010 FC_ACTION_NON_RETRYABLE, 11011 FC_REASON_INVALID_LINK_CTRL, job); 11012 11013 if (fp_sendcmd(port, cmd, 11014 port->fp_fca_handle) != FC_SUCCESS) { 11015 fp_free_pkt(cmd); 11016 } 11017 } 11018 } 11019 break; 11020 } 11021 11022 mutex_enter(&port->fp_mutex); 11023 ASSERT(port->fp_active_ubs > 0); 11024 if (--(port->fp_active_ubs) == 0) { 11025 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 11026 } 11027 mutex_exit(&port->fp_mutex); 11028 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 11029 1, &buf->ub_token); 11030 } 11031 11032 11033 /* 11034 * Prepare a BA_RJT and send it over. 11035 */ 11036 static void 11037 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11038 job_request_t *job) 11039 { 11040 fc_packet_t *pkt; 11041 la_ba_rjt_t payload; 11042 11043 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11044 11045 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11046 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11047 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11048 cmd->cmd_retry_count = 1; 11049 cmd->cmd_ulp_pkt = NULL; 11050 11051 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11052 cmd->cmd_job = job; 11053 11054 pkt = &cmd->cmd_pkt; 11055 11056 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 11057 11058 payload.reserved = 0; 11059 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 11060 payload.explanation = FC_EXPLN_NONE; 11061 payload.vendor = 0; 11062 11063 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11064 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11065 } 11066 11067 11068 /* 11069 * Prepare an LS_RJT and send it over 11070 */ 11071 static void 11072 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11073 uchar_t action, uchar_t reason, job_request_t *job) 11074 { 11075 fc_packet_t *pkt; 11076 la_els_rjt_t payload; 11077 11078 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11079 11080 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11081 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11082 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11083 cmd->cmd_retry_count = 1; 11084 cmd->cmd_ulp_pkt = NULL; 11085 11086 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11087 cmd->cmd_job = job; 11088 11089 pkt = &cmd->cmd_pkt; 11090 11091 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11092 11093 payload.ls_code.ls_code = LA_ELS_RJT; 11094 payload.ls_code.mbz = 0; 11095 payload.action = action; 11096 payload.reason = reason; 11097 payload.reserved = 0; 11098 payload.vu = 0; 11099 11100 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11101 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11102 } 11103 11104 /* 11105 * Function: fp_prlo_acc_init 11106 * 11107 * Description: Initializes an Link Service Accept for a PRLO. 11108 * 11109 * Arguments: *port Local port through which the PRLO was 11110 * received. 11111 * cmd Command that will carry the accept. 11112 * *buf Unsolicited buffer containing the PRLO 11113 * request. 11114 * job Job request. 11115 * sleep Allocation mode. 11116 * 11117 * Return Value: *cmd Command containing the response. 11118 * 11119 * Context: Depends on the parameter sleep. 11120 */ 11121 fp_cmd_t * 11122 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 11123 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11124 { 11125 fp_cmd_t *cmd; 11126 fc_packet_t *pkt; 11127 la_els_prlo_t *req; 11128 size_t len; 11129 uint16_t flags; 11130 11131 req = (la_els_prlo_t *)buf->ub_buffer; 11132 len = (size_t)ntohs(req->payload_length); 11133 11134 /* 11135 * The payload of the accept to a PRLO has to be the exact match of 11136 * the payload of the request (at the exception of the code). 11137 */ 11138 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11139 11140 if (cmd) { 11141 /* 11142 * The fp command was successfully allocated. 11143 */ 11144 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11145 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11146 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11147 cmd->cmd_retry_count = 1; 11148 cmd->cmd_ulp_pkt = NULL; 11149 11150 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11151 cmd->cmd_job = job; 11152 11153 pkt = &cmd->cmd_pkt; 11154 11155 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11156 FC_TYPE_EXTENDED_LS); 11157 11158 /* The code is overwritten for the copy. */ 11159 req->ls_code = LA_ELS_ACC; 11160 /* Response code is set. */ 11161 flags = ntohs(req->flags); 11162 flags &= ~SP_RESP_CODE_MASK; 11163 flags |= SP_RESP_CODE_REQ_EXECUTED; 11164 req->flags = htons(flags); 11165 11166 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)req, 11167 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11168 } 11169 return (cmd); 11170 } 11171 11172 /* 11173 * Prepare an ACC response to an ELS request 11174 */ 11175 static void 11176 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11177 job_request_t *job) 11178 { 11179 fc_packet_t *pkt; 11180 ls_code_t payload; 11181 11182 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11183 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11184 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11185 cmd->cmd_retry_count = 1; 11186 cmd->cmd_ulp_pkt = NULL; 11187 11188 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11189 cmd->cmd_job = job; 11190 11191 pkt = &cmd->cmd_pkt; 11192 11193 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11194 11195 payload.ls_code = LA_ELS_ACC; 11196 payload.mbz = 0; 11197 11198 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11199 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11200 } 11201 11202 /* 11203 * Unsolicited PRLO handler 11204 * 11205 * A Process Logout should be handled by the ULP that established it. However, 11206 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11207 * when a device implicitly logs out an initiator (for whatever reason) and 11208 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11209 * The logical thing to do for the device would be to send a LOGO in response 11210 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11211 * a PRLO instead. 11212 * 11213 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11214 * think that the Port Login has been lost. If we follow the Fibre Channel 11215 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11216 * the Port Login has also been lost, the remote port will reject the PRLI 11217 * indicating that we must PLOGI first. The initiator will then turn around and 11218 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11219 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11220 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11221 * needed would be received by FCP. FCP would have, then, to tell the transport 11222 * (fp) to PLOGI. The problem is, the transport would still think the Port 11223 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11224 * if you think it's not necessary". To work around that difficulty, the PRLO 11225 * is treated by the transport as a LOGO. The downside to it is a Port Login 11226 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11227 * has nothing to do with the PRLO) may be impacted. However, this is a 11228 * scenario very unlikely to happen. As of today the only ULP in Leadville 11229 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11230 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11231 * unlikely). 11232 */ 11233 static void 11234 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11235 fc_remote_port_t *pd, job_request_t *job) 11236 { 11237 int busy; 11238 int rval; 11239 int retain; 11240 fp_cmd_t *cmd; 11241 fc_portmap_t *listptr; 11242 boolean_t tolerance; 11243 la_els_prlo_t *req; 11244 11245 req = (la_els_prlo_t *)buf->ub_buffer; 11246 11247 if ((ntohs(req->payload_length) != 11248 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11249 (req->page_length != sizeof (service_parameter_page_t))) { 11250 /* 11251 * We are being very restrictive. Only on page per 11252 * payload. If it is not the case we reject the ELS although 11253 * we should reply indicating we handle only single page 11254 * per PRLO. 11255 */ 11256 goto fp_reject_prlo; 11257 } 11258 11259 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11260 /* 11261 * This is in case the payload advertizes a size bigger than 11262 * what it really is. 11263 */ 11264 goto fp_reject_prlo; 11265 } 11266 11267 mutex_enter(&port->fp_mutex); 11268 busy = port->fp_statec_busy; 11269 mutex_exit(&port->fp_mutex); 11270 11271 mutex_enter(&pd->pd_mutex); 11272 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11273 if (!busy) { 11274 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11275 pd->pd_state == PORT_DEVICE_INVALID || 11276 pd->pd_flags == PD_ELS_IN_PROGRESS || 11277 pd->pd_type == PORT_DEVICE_OLD) { 11278 busy++; 11279 } 11280 } 11281 11282 if (busy) { 11283 mutex_exit(&pd->pd_mutex); 11284 11285 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11286 "pd=%p - busy", 11287 pd->pd_port_id.port_id, pd); 11288 11289 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11290 goto fp_reject_prlo; 11291 } 11292 } else { 11293 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11294 11295 if (tolerance) { 11296 fctl_tc_reset(&pd->pd_logo_tc); 11297 retain = 0; 11298 pd->pd_state = PORT_DEVICE_INVALID; 11299 } 11300 11301 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11302 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11303 tolerance, retain); 11304 11305 pd->pd_aux_flags |= PD_LOGGED_OUT; 11306 mutex_exit(&pd->pd_mutex); 11307 11308 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11309 if (cmd == NULL) { 11310 return; 11311 } 11312 11313 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11314 if (rval != FC_SUCCESS) { 11315 fp_free_pkt(cmd); 11316 return; 11317 } 11318 11319 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11320 11321 if (retain) { 11322 fp_unregister_login(pd); 11323 fctl_copy_portmap(listptr, pd); 11324 } else { 11325 uint32_t d_id; 11326 char ww_name[17]; 11327 11328 mutex_enter(&pd->pd_mutex); 11329 d_id = pd->pd_port_id.port_id; 11330 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11331 mutex_exit(&pd->pd_mutex); 11332 11333 FP_TRACE(FP_NHEAD2(9, 0), 11334 "N_x Port with D_ID=%x, PWWN=%s logged out" 11335 " %d times in %d us; Giving up", d_id, ww_name, 11336 FC_LOGO_TOLERANCE_LIMIT, 11337 FC_LOGO_TOLERANCE_TIME_LIMIT); 11338 11339 fp_fillout_old_map(listptr, pd, 0); 11340 listptr->map_type = PORT_DEVICE_OLD; 11341 } 11342 11343 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11344 return; 11345 } 11346 11347 fp_reject_prlo: 11348 11349 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11350 if (cmd != NULL) { 11351 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11352 FC_REASON_INVALID_LINK_CTRL, job); 11353 11354 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11355 fp_free_pkt(cmd); 11356 } 11357 } 11358 } 11359 11360 /* 11361 * Unsolicited LOGO handler 11362 */ 11363 static void 11364 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11365 fc_remote_port_t *pd, job_request_t *job) 11366 { 11367 int busy; 11368 int rval; 11369 int retain; 11370 fp_cmd_t *cmd; 11371 fc_portmap_t *listptr; 11372 boolean_t tolerance; 11373 11374 mutex_enter(&port->fp_mutex); 11375 busy = port->fp_statec_busy; 11376 mutex_exit(&port->fp_mutex); 11377 11378 mutex_enter(&pd->pd_mutex); 11379 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11380 if (!busy) { 11381 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11382 pd->pd_state == PORT_DEVICE_INVALID || 11383 pd->pd_flags == PD_ELS_IN_PROGRESS || 11384 pd->pd_type == PORT_DEVICE_OLD) { 11385 busy++; 11386 } 11387 } 11388 11389 if (busy) { 11390 mutex_exit(&pd->pd_mutex); 11391 11392 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11393 "pd=%p - busy", 11394 pd->pd_port_id.port_id, pd); 11395 11396 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11397 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11398 0, KM_SLEEP, pd); 11399 if (cmd != NULL) { 11400 fp_els_rjt_init(port, cmd, buf, 11401 FC_ACTION_NON_RETRYABLE, 11402 FC_REASON_INVALID_LINK_CTRL, job); 11403 11404 if (fp_sendcmd(port, cmd, 11405 port->fp_fca_handle) != FC_SUCCESS) { 11406 fp_free_pkt(cmd); 11407 } 11408 } 11409 } 11410 } else { 11411 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11412 11413 if (tolerance) { 11414 fctl_tc_reset(&pd->pd_logo_tc); 11415 retain = 0; 11416 pd->pd_state = PORT_DEVICE_INVALID; 11417 } 11418 11419 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11420 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11421 tolerance, retain); 11422 11423 pd->pd_aux_flags |= PD_LOGGED_OUT; 11424 mutex_exit(&pd->pd_mutex); 11425 11426 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11427 KM_SLEEP, pd); 11428 if (cmd == NULL) { 11429 return; 11430 } 11431 11432 fp_els_acc_init(port, cmd, buf, job); 11433 11434 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11435 if (rval != FC_SUCCESS) { 11436 fp_free_pkt(cmd); 11437 return; 11438 } 11439 11440 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11441 11442 if (retain) { 11443 job_request_t *job; 11444 fctl_ns_req_t *ns_cmd; 11445 11446 /* 11447 * when get LOGO, first try to get PID from nameserver 11448 * if failed, then we do not need 11449 * send PLOGI to that remote port 11450 */ 11451 job = fctl_alloc_job( 11452 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11453 11454 if (job != NULL) { 11455 ns_cmd = fctl_alloc_ns_cmd( 11456 sizeof (ns_req_gid_pn_t), 11457 sizeof (ns_resp_gid_pn_t), 11458 sizeof (ns_resp_gid_pn_t), 11459 0, KM_SLEEP); 11460 if (ns_cmd != NULL) { 11461 int ret; 11462 job->job_result = FC_SUCCESS; 11463 ns_cmd->ns_cmd_code = NS_GID_PN; 11464 ((ns_req_gid_pn_t *) 11465 (ns_cmd->ns_cmd_buf))->pwwn = 11466 pd->pd_port_name; 11467 ret = fp_ns_query( 11468 port, ns_cmd, job, 1, KM_SLEEP); 11469 if ((ret != FC_SUCCESS) || 11470 (job->job_result != FC_SUCCESS)) { 11471 fctl_free_ns_cmd(ns_cmd); 11472 fctl_dealloc_job(job); 11473 FP_TRACE(FP_NHEAD2(9, 0), 11474 "NS query failed,", 11475 " delete pd"); 11476 goto delete_pd; 11477 } 11478 fctl_free_ns_cmd(ns_cmd); 11479 } 11480 fctl_dealloc_job(job); 11481 } 11482 fp_unregister_login(pd); 11483 fctl_copy_portmap(listptr, pd); 11484 } else { 11485 uint32_t d_id; 11486 char ww_name[17]; 11487 11488 delete_pd: 11489 mutex_enter(&pd->pd_mutex); 11490 d_id = pd->pd_port_id.port_id; 11491 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11492 mutex_exit(&pd->pd_mutex); 11493 11494 FP_TRACE(FP_NHEAD2(9, 0), 11495 "N_x Port with D_ID=%x, PWWN=%s logged out" 11496 " %d times in %d us; Giving up", d_id, ww_name, 11497 FC_LOGO_TOLERANCE_LIMIT, 11498 FC_LOGO_TOLERANCE_TIME_LIMIT); 11499 11500 fp_fillout_old_map(listptr, pd, 0); 11501 listptr->map_type = PORT_DEVICE_OLD; 11502 } 11503 11504 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11505 } 11506 } 11507 11508 11509 /* 11510 * Perform general purpose preparation of a response to an unsolicited request 11511 */ 11512 static void 11513 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11514 uchar_t r_ctl, uchar_t type) 11515 { 11516 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11517 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11518 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11519 pkt->pkt_cmd_fhdr.type = type; 11520 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11521 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11522 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11523 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11524 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11525 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11526 pkt->pkt_cmd_fhdr.ro = 0; 11527 pkt->pkt_cmd_fhdr.rsvd = 0; 11528 pkt->pkt_comp = fp_unsol_intr; 11529 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11530 } 11531 11532 /* 11533 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11534 * early development days of public loop soc+ firmware, numerous problems 11535 * were encountered (the details are undocumented and history now) which 11536 * led to the birth of this function. 11537 * 11538 * If a pre-allocated unsolicited response packet is free, send out an 11539 * immediate response, otherwise submit the request to the port thread 11540 * to do the deferred processing. 11541 */ 11542 static void 11543 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11544 { 11545 int sent; 11546 int f_port; 11547 int do_acc; 11548 fp_cmd_t *cmd; 11549 la_els_logi_t *payload; 11550 fc_remote_port_t *pd; 11551 char dww_name[17]; 11552 11553 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11554 11555 cmd = port->fp_els_resp_pkt; 11556 11557 mutex_enter(&port->fp_mutex); 11558 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11559 mutex_exit(&port->fp_mutex); 11560 11561 switch (buf->ub_buffer[0]) { 11562 case LA_ELS_PLOGI: { 11563 int small; 11564 11565 payload = (la_els_logi_t *)buf->ub_buffer; 11566 11567 f_port = FP_IS_F_PORT(payload-> 11568 common_service.cmn_features) ? 1 : 0; 11569 11570 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11571 &payload->nport_ww_name); 11572 pd = fctl_get_remote_port_by_pwwn(port, 11573 &payload->nport_ww_name); 11574 if (pd) { 11575 mutex_enter(&pd->pd_mutex); 11576 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11577 /* 11578 * Most likely this means a cross login is in 11579 * progress or a device about to be yanked out. 11580 * Only accept the plogi if my wwn is smaller. 11581 */ 11582 if (pd->pd_type == PORT_DEVICE_OLD) { 11583 sent = 1; 11584 } 11585 /* 11586 * Stop plogi request (if any) 11587 * attempt from local side to speedup 11588 * the discovery progress. 11589 * Mark the pd as PD_PLOGI_RECEPIENT. 11590 */ 11591 if (f_port == 0 && small < 0) { 11592 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11593 } 11594 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11595 11596 mutex_exit(&pd->pd_mutex); 11597 11598 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11599 "Unsol PLOGI received. PD still exists in the " 11600 "PWWN list. pd=%p PWWN=%s, sent=%x", 11601 pd, dww_name, sent); 11602 11603 if (f_port == 0 && small < 0) { 11604 FP_TRACE(FP_NHEAD1(3, 0), 11605 "fp_i_handle_unsol_els: Mark the pd" 11606 " as plogi recipient, pd=%p, PWWN=%s" 11607 ", sent=%x", 11608 pd, dww_name, sent); 11609 } 11610 } else { 11611 sent = 0; 11612 } 11613 11614 /* 11615 * To avoid Login collisions, accept only if my WWN 11616 * is smaller than the requester (A curious side note 11617 * would be that this rule may not satisfy the PLOGIs 11618 * initiated by the switch from not-so-well known 11619 * ports such as 0xFFFC41) 11620 */ 11621 if ((f_port == 0 && small < 0) || 11622 (((small > 0 && do_acc) || 11623 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11624 if (fp_is_class_supported(port->fp_cos, 11625 buf->ub_class) == FC_FAILURE) { 11626 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11627 cmd->cmd_pkt.pkt_cmdlen = 11628 sizeof (la_els_rjt_t); 11629 cmd->cmd_pkt.pkt_rsplen = 0; 11630 fp_els_rjt_init(port, cmd, buf, 11631 FC_ACTION_NON_RETRYABLE, 11632 FC_REASON_CLASS_NOT_SUPP, NULL); 11633 FP_TRACE(FP_NHEAD1(3, 0), 11634 "fp_i_handle_unsol_els: " 11635 "Unsupported class. " 11636 "Rejecting PLOGI"); 11637 11638 } else { 11639 mutex_enter(&port->fp_mutex); 11640 port->fp_els_resp_pkt_busy = 0; 11641 mutex_exit(&port->fp_mutex); 11642 return; 11643 } 11644 } else { 11645 cmd->cmd_pkt.pkt_cmdlen = 11646 sizeof (la_els_logi_t); 11647 cmd->cmd_pkt.pkt_rsplen = 0; 11648 11649 /* 11650 * Sometime later, we should validate 11651 * the service parameters instead of 11652 * just accepting it. 11653 */ 11654 fp_login_acc_init(port, cmd, buf, NULL, 11655 KM_NOSLEEP); 11656 FP_TRACE(FP_NHEAD1(3, 0), 11657 "fp_i_handle_unsol_els: Accepting PLOGI," 11658 " f_port=%d, small=%d, do_acc=%d," 11659 " sent=%d.", f_port, small, do_acc, 11660 sent); 11661 /* 11662 * If fp_port_id is zero and topology is 11663 * Point-to-Point, get the local port id from 11664 * the d_id in the PLOGI request. 11665 * If the outgoing FLOGI hasn't been accepted, 11666 * the topology will be unknown here. But it's 11667 * still safe to save the d_id to fp_port_id, 11668 * just because it will be overwritten later 11669 * if the topology is not Point-to-Point. 11670 */ 11671 mutex_enter(&port->fp_mutex); 11672 if ((port->fp_port_id.port_id == 0) && 11673 (port->fp_topology == FC_TOP_PT_PT || 11674 port->fp_topology == FC_TOP_UNKNOWN)) { 11675 port->fp_port_id.port_id = 11676 buf->ub_frame.d_id; 11677 } 11678 mutex_exit(&port->fp_mutex); 11679 } 11680 } else { 11681 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11682 port->fp_options & FP_SEND_RJT) { 11683 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11684 cmd->cmd_pkt.pkt_rsplen = 0; 11685 fp_els_rjt_init(port, cmd, buf, 11686 FC_ACTION_NON_RETRYABLE, 11687 FC_REASON_LOGICAL_BSY, NULL); 11688 FP_TRACE(FP_NHEAD1(3, 0), 11689 "fp_i_handle_unsol_els: " 11690 "Rejecting PLOGI with Logical Busy." 11691 "Possible Login collision."); 11692 } else { 11693 mutex_enter(&port->fp_mutex); 11694 port->fp_els_resp_pkt_busy = 0; 11695 mutex_exit(&port->fp_mutex); 11696 return; 11697 } 11698 } 11699 break; 11700 } 11701 11702 case LA_ELS_FLOGI: 11703 if (fp_is_class_supported(port->fp_cos, 11704 buf->ub_class) == FC_FAILURE) { 11705 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11706 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11707 cmd->cmd_pkt.pkt_rsplen = 0; 11708 fp_els_rjt_init(port, cmd, buf, 11709 FC_ACTION_NON_RETRYABLE, 11710 FC_REASON_CLASS_NOT_SUPP, NULL); 11711 FP_TRACE(FP_NHEAD1(3, 0), 11712 "fp_i_handle_unsol_els: " 11713 "Unsupported Class. Rejecting FLOGI."); 11714 } else { 11715 mutex_enter(&port->fp_mutex); 11716 port->fp_els_resp_pkt_busy = 0; 11717 mutex_exit(&port->fp_mutex); 11718 return; 11719 } 11720 } else { 11721 mutex_enter(&port->fp_mutex); 11722 if (FC_PORT_STATE_MASK(port->fp_state) != 11723 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11724 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11725 mutex_exit(&port->fp_mutex); 11726 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11727 cmd->cmd_pkt.pkt_cmdlen = 11728 sizeof (la_els_rjt_t); 11729 cmd->cmd_pkt.pkt_rsplen = 0; 11730 fp_els_rjt_init(port, cmd, buf, 11731 FC_ACTION_NON_RETRYABLE, 11732 FC_REASON_INVALID_LINK_CTRL, 11733 NULL); 11734 FP_TRACE(FP_NHEAD1(3, 0), 11735 "fp_i_handle_unsol_els: " 11736 "Invalid Link Ctrl. " 11737 "Rejecting FLOGI."); 11738 } else { 11739 mutex_enter(&port->fp_mutex); 11740 port->fp_els_resp_pkt_busy = 0; 11741 mutex_exit(&port->fp_mutex); 11742 return; 11743 } 11744 } else { 11745 mutex_exit(&port->fp_mutex); 11746 cmd->cmd_pkt.pkt_cmdlen = 11747 sizeof (la_els_logi_t); 11748 cmd->cmd_pkt.pkt_rsplen = 0; 11749 /* 11750 * Let's not aggressively validate the N_Port's 11751 * service parameters until PLOGI. Suffice it 11752 * to give a hint that we are an N_Port and we 11753 * are game to some serious stuff here. 11754 */ 11755 fp_login_acc_init(port, cmd, buf, 11756 NULL, KM_NOSLEEP); 11757 FP_TRACE(FP_NHEAD1(3, 0), 11758 "fp_i_handle_unsol_els: " 11759 "Accepting FLOGI."); 11760 } 11761 } 11762 break; 11763 11764 default: 11765 return; 11766 } 11767 11768 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11769 mutex_enter(&port->fp_mutex); 11770 port->fp_els_resp_pkt_busy = 0; 11771 mutex_exit(&port->fp_mutex); 11772 } 11773 } 11774 11775 11776 /* 11777 * Handle unsolicited PLOGI request 11778 */ 11779 static void 11780 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11781 job_request_t *job, int sleep) 11782 { 11783 int sent; 11784 int small; 11785 int f_port; 11786 int do_acc; 11787 fp_cmd_t *cmd; 11788 la_wwn_t *swwn; 11789 la_wwn_t *dwwn; 11790 la_els_logi_t *payload; 11791 fc_remote_port_t *pd; 11792 char dww_name[17]; 11793 11794 payload = (la_els_logi_t *)buf->ub_buffer; 11795 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11796 11797 mutex_enter(&port->fp_mutex); 11798 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11799 mutex_exit(&port->fp_mutex); 11800 11801 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11802 "type=%x, f_ctl=%x" 11803 " seq_id=%x, ox_id=%x, rx_id=%x" 11804 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11805 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11806 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11807 11808 swwn = &port->fp_service_params.nport_ww_name; 11809 dwwn = &payload->nport_ww_name; 11810 small = fctl_wwn_cmp(swwn, dwwn); 11811 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11812 if (pd) { 11813 mutex_enter(&pd->pd_mutex); 11814 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11815 /* 11816 * Most likely this means a cross login is in 11817 * progress or a device about to be yanked out. 11818 * Only accept the plogi if my wwn is smaller. 11819 */ 11820 11821 if (pd->pd_type == PORT_DEVICE_OLD) { 11822 sent = 1; 11823 } 11824 /* 11825 * Stop plogi request (if any) 11826 * attempt from local side to speedup 11827 * the discovery progress. 11828 * Mark the pd as PD_PLOGI_RECEPIENT. 11829 */ 11830 if (f_port == 0 && small < 0) { 11831 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11832 } 11833 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11834 11835 mutex_exit(&pd->pd_mutex); 11836 11837 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11838 " received. PD still exists in the PWWN list. pd=%p " 11839 "PWWN=%s, sent=%x", pd, dww_name, sent); 11840 11841 if (f_port == 0 && small < 0) { 11842 FP_TRACE(FP_NHEAD1(3, 0), 11843 "fp_handle_unsol_plogi: Mark the pd" 11844 " as plogi recipient, pd=%p, PWWN=%s" 11845 ", sent=%x", 11846 pd, dww_name, sent); 11847 } 11848 } else { 11849 sent = 0; 11850 } 11851 11852 /* 11853 * Avoid Login collisions by accepting only if my WWN is smaller. 11854 * 11855 * A side note: There is no need to start a PLOGI from this end in 11856 * this context if login isn't going to be accepted for the 11857 * above reason as either a LIP (in private loop), RSCN (in 11858 * fabric topology), or an FLOGI (in point to point - Huh ? 11859 * check FC-PH) would normally drive the PLOGI from this end. 11860 * At this point of time there is no need for an inbound PLOGI 11861 * to kick an outbound PLOGI when it is going to be rejected 11862 * for the reason of WWN being smaller. However it isn't hard 11863 * to do that either (when such a need arises, start a timer 11864 * for a duration that extends beyond a normal device discovery 11865 * time and check if an outbound PLOGI did go before that, if 11866 * none fire one) 11867 * 11868 * Unfortunately, as it turned out, during booting, it is possible 11869 * to miss another initiator in the same loop as port driver 11870 * instances are serially attached. While preserving the above 11871 * comments for belly laughs, please kick an outbound PLOGI in 11872 * a non-switch environment (which is a pt pt between N_Ports or 11873 * a private loop) 11874 * 11875 * While preserving the above comments for amusement, send an 11876 * ACC if the PLOGI is going to be rejected for WWN being smaller 11877 * when no discovery is in progress at this end. Turn around 11878 * and make the port device as the PLOGI initiator, so that 11879 * during subsequent link/loop initialization, this end drives 11880 * the PLOGI (In fact both ends do in this particular case, but 11881 * only one wins) 11882 * 11883 * Make sure the PLOGIs initiated by the switch from not-so-well-known 11884 * ports (such as 0xFFFC41) are accepted too. 11885 */ 11886 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 11887 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11888 if (fp_is_class_supported(port->fp_cos, 11889 buf->ub_class) == FC_FAILURE) { 11890 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11891 cmd = fp_alloc_pkt(port, 11892 sizeof (la_els_logi_t), 0, sleep, pd); 11893 if (cmd == NULL) { 11894 return; 11895 } 11896 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11897 cmd->cmd_pkt.pkt_rsplen = 0; 11898 fp_els_rjt_init(port, cmd, buf, 11899 FC_ACTION_NON_RETRYABLE, 11900 FC_REASON_CLASS_NOT_SUPP, job); 11901 FP_TRACE(FP_NHEAD1(3, 0), 11902 "fp_handle_unsol_plogi: " 11903 "Unsupported class. rejecting PLOGI"); 11904 } 11905 } else { 11906 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11907 0, sleep, pd); 11908 if (cmd == NULL) { 11909 return; 11910 } 11911 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 11912 cmd->cmd_pkt.pkt_rsplen = 0; 11913 11914 /* 11915 * Sometime later, we should validate the service 11916 * parameters instead of just accepting it. 11917 */ 11918 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 11919 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11920 "Accepting PLOGI, f_port=%d, small=%d, " 11921 "do_acc=%d, sent=%d.", f_port, small, do_acc, 11922 sent); 11923 11924 /* 11925 * If fp_port_id is zero and topology is 11926 * Point-to-Point, get the local port id from 11927 * the d_id in the PLOGI request. 11928 * If the outgoing FLOGI hasn't been accepted, 11929 * the topology will be unknown here. But it's 11930 * still safe to save the d_id to fp_port_id, 11931 * just because it will be overwritten later 11932 * if the topology is not Point-to-Point. 11933 */ 11934 mutex_enter(&port->fp_mutex); 11935 if ((port->fp_port_id.port_id == 0) && 11936 (port->fp_topology == FC_TOP_PT_PT || 11937 port->fp_topology == FC_TOP_UNKNOWN)) { 11938 port->fp_port_id.port_id = 11939 buf->ub_frame.d_id; 11940 } 11941 mutex_exit(&port->fp_mutex); 11942 } 11943 } else { 11944 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11945 port->fp_options & FP_SEND_RJT) { 11946 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11947 0, sleep, pd); 11948 if (cmd == NULL) { 11949 return; 11950 } 11951 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11952 cmd->cmd_pkt.pkt_rsplen = 0; 11953 /* 11954 * Send out Logical busy to indicate 11955 * the detection of PLOGI collision 11956 */ 11957 fp_els_rjt_init(port, cmd, buf, 11958 FC_ACTION_NON_RETRYABLE, 11959 FC_REASON_LOGICAL_BSY, job); 11960 11961 fc_wwn_to_str(dwwn, dww_name); 11962 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11963 "Rejecting Unsol PLOGI with Logical Busy." 11964 "possible PLOGI collision. PWWN=%s, sent=%x", 11965 dww_name, sent); 11966 } else { 11967 return; 11968 } 11969 } 11970 11971 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11972 fp_free_pkt(cmd); 11973 } 11974 } 11975 11976 11977 /* 11978 * Handle mischievous turning over of our own FLOGI requests back to 11979 * us by the SOC+ microcode. In other words, look at the class of such 11980 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 11981 * on the floor 11982 */ 11983 static void 11984 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11985 job_request_t *job, int sleep) 11986 { 11987 uint32_t state; 11988 uint32_t s_id; 11989 fp_cmd_t *cmd; 11990 11991 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 11992 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11993 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11994 0, sleep, NULL); 11995 if (cmd == NULL) { 11996 return; 11997 } 11998 fp_els_rjt_init(port, cmd, buf, 11999 FC_ACTION_NON_RETRYABLE, 12000 FC_REASON_CLASS_NOT_SUPP, job); 12001 } else { 12002 return; 12003 } 12004 } else { 12005 12006 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 12007 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 12008 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 12009 buf->ub_frame.s_id, buf->ub_frame.d_id, 12010 buf->ub_frame.type, buf->ub_frame.f_ctl, 12011 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 12012 buf->ub_frame.rx_id, buf->ub_frame.ro); 12013 12014 mutex_enter(&port->fp_mutex); 12015 state = FC_PORT_STATE_MASK(port->fp_state); 12016 s_id = port->fp_port_id.port_id; 12017 mutex_exit(&port->fp_mutex); 12018 12019 if (state != FC_STATE_ONLINE || 12020 (s_id && buf->ub_frame.s_id == s_id)) { 12021 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12022 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12023 0, sleep, NULL); 12024 if (cmd == NULL) { 12025 return; 12026 } 12027 fp_els_rjt_init(port, cmd, buf, 12028 FC_ACTION_NON_RETRYABLE, 12029 FC_REASON_INVALID_LINK_CTRL, job); 12030 FP_TRACE(FP_NHEAD1(3, 0), 12031 "fp_handle_unsol_flogi: " 12032 "Rejecting PLOGI. Invalid Link CTRL"); 12033 } else { 12034 return; 12035 } 12036 } else { 12037 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12038 0, sleep, NULL); 12039 if (cmd == NULL) { 12040 return; 12041 } 12042 /* 12043 * Let's not aggressively validate the N_Port's 12044 * service parameters until PLOGI. Suffice it 12045 * to give a hint that we are an N_Port and we 12046 * are game to some serious stuff here. 12047 */ 12048 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12049 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 12050 "Accepting PLOGI"); 12051 } 12052 } 12053 12054 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12055 fp_free_pkt(cmd); 12056 } 12057 } 12058 12059 12060 /* 12061 * Perform PLOGI accept 12062 */ 12063 static void 12064 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 12065 job_request_t *job, int sleep) 12066 { 12067 fc_packet_t *pkt; 12068 fc_portmap_t *listptr; 12069 la_els_logi_t payload; 12070 12071 ASSERT(buf != NULL); 12072 12073 /* 12074 * If we are sending ACC to PLOGI and we haven't already 12075 * create port and node device handles, let's create them 12076 * here. 12077 */ 12078 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 12079 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 12080 int small; 12081 int do_acc; 12082 fc_remote_port_t *pd; 12083 la_els_logi_t *req; 12084 12085 req = (la_els_logi_t *)buf->ub_buffer; 12086 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 12087 &req->nport_ww_name); 12088 12089 mutex_enter(&port->fp_mutex); 12090 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 12091 mutex_exit(&port->fp_mutex); 12092 12093 pd = fctl_create_remote_port(port, &req->node_ww_name, 12094 &req->nport_ww_name, buf->ub_frame.s_id, 12095 PD_PLOGI_RECEPIENT, sleep); 12096 if (pd == NULL) { 12097 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 12098 "Couldn't create port device for d_id:0x%x", 12099 buf->ub_frame.s_id); 12100 12101 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 12102 "couldn't create port device d_id=%x", 12103 buf->ub_frame.s_id); 12104 } else { 12105 /* 12106 * usoc currently returns PLOGIs inline and 12107 * the maximum buffer size is 60 bytes or so. 12108 * So attempt not to look beyond what is in 12109 * the unsolicited buffer 12110 * 12111 * JNI also traverses this path sometimes 12112 */ 12113 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 12114 fp_register_login(NULL, pd, req, buf->ub_class); 12115 } else { 12116 mutex_enter(&pd->pd_mutex); 12117 if (pd->pd_login_count == 0) { 12118 pd->pd_login_count++; 12119 } 12120 pd->pd_state = PORT_DEVICE_LOGGED_IN; 12121 pd->pd_login_class = buf->ub_class; 12122 mutex_exit(&pd->pd_mutex); 12123 } 12124 12125 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12126 if (listptr != NULL) { 12127 fctl_copy_portmap(listptr, pd); 12128 (void) fp_ulp_devc_cb(port, listptr, 12129 1, 1, sleep, 0); 12130 } 12131 12132 if (small > 0 && do_acc) { 12133 mutex_enter(&pd->pd_mutex); 12134 pd->pd_recepient = PD_PLOGI_INITIATOR; 12135 mutex_exit(&pd->pd_mutex); 12136 } 12137 } 12138 } 12139 12140 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12141 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12142 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12143 cmd->cmd_retry_count = 1; 12144 cmd->cmd_ulp_pkt = NULL; 12145 12146 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12147 cmd->cmd_job = job; 12148 12149 pkt = &cmd->cmd_pkt; 12150 12151 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12152 12153 payload = port->fp_service_params; 12154 payload.ls_code.ls_code = LA_ELS_ACC; 12155 12156 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12157 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12158 12159 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12160 "bufsize:0x%x sizeof (la_els_logi):0x%x " 12161 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12162 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12163 buf->ub_bufsize, sizeof (la_els_logi_t), 12164 port->fp_service_params.nport_ww_name.w.naa_id, 12165 port->fp_service_params.nport_ww_name.w.nport_id, 12166 port->fp_service_params.nport_ww_name.w.wwn_hi, 12167 port->fp_service_params.nport_ww_name.w.wwn_lo, 12168 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12169 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12170 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12171 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12172 port->fp_statec_busy); 12173 } 12174 12175 12176 #define RSCN_EVENT_NAME_LEN 256 12177 12178 /* 12179 * Handle RSCNs 12180 */ 12181 static void 12182 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12183 job_request_t *job, int sleep) 12184 { 12185 uint32_t mask; 12186 fp_cmd_t *cmd; 12187 uint32_t count; 12188 int listindex; 12189 int16_t len; 12190 fc_rscn_t *payload; 12191 fc_portmap_t *listptr; 12192 fctl_ns_req_t *ns_cmd; 12193 fc_affected_id_t *page; 12194 caddr_t nvname; 12195 nvlist_t *attr_list = NULL; 12196 12197 mutex_enter(&port->fp_mutex); 12198 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12199 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12200 --port->fp_rscn_count; 12201 } 12202 mutex_exit(&port->fp_mutex); 12203 return; 12204 } 12205 mutex_exit(&port->fp_mutex); 12206 12207 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12208 if (cmd != NULL) { 12209 fp_els_acc_init(port, cmd, buf, job); 12210 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12211 fp_free_pkt(cmd); 12212 } 12213 } 12214 12215 payload = (fc_rscn_t *)buf->ub_buffer; 12216 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12217 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12218 12219 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12220 12221 if (len <= 0) { 12222 mutex_enter(&port->fp_mutex); 12223 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12224 --port->fp_rscn_count; 12225 } 12226 mutex_exit(&port->fp_mutex); 12227 12228 return; 12229 } 12230 12231 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12232 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12233 12234 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12235 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12236 12237 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12238 12239 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12240 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12241 0, sleep); 12242 if (ns_cmd == NULL) { 12243 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12244 12245 mutex_enter(&port->fp_mutex); 12246 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12247 --port->fp_rscn_count; 12248 } 12249 mutex_exit(&port->fp_mutex); 12250 12251 return; 12252 } 12253 12254 ns_cmd->ns_cmd_code = NS_GPN_ID; 12255 12256 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12257 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12258 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12259 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12260 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12261 12262 /* Only proceed if we can allocate nvname and the nvlist */ 12263 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12264 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12265 KM_NOSLEEP) == DDI_SUCCESS) { 12266 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12267 port->fp_instance) == DDI_SUCCESS && 12268 nvlist_add_byte_array(attr_list, "port-wwn", 12269 port->fp_service_params.nport_ww_name.raw_wwn, 12270 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12271 nvlist_free(attr_list); 12272 attr_list = NULL; 12273 } 12274 } 12275 12276 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12277 /* Add affected page to the event payload */ 12278 if (attr_list != NULL) { 12279 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12280 "affected_page_%d", listindex); 12281 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12282 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12283 /* We don't send a partial event, so dump it */ 12284 nvlist_free(attr_list); 12285 attr_list = NULL; 12286 } 12287 } 12288 /* 12289 * Query the NS to get the Port WWN for this 12290 * affected D_ID. 12291 */ 12292 mask = 0; 12293 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12294 case FC_RSCN_PORT_ADDRESS: 12295 fp_validate_rscn_page(port, page, job, ns_cmd, 12296 listptr, &listindex, sleep); 12297 12298 if (listindex == 0) { 12299 /* 12300 * We essentially did not process this RSCN. So, 12301 * ULPs are not going to be called and so we 12302 * decrement the rscn_count 12303 */ 12304 mutex_enter(&port->fp_mutex); 12305 if (--port->fp_rscn_count == 12306 FC_INVALID_RSCN_COUNT) { 12307 --port->fp_rscn_count; 12308 } 12309 mutex_exit(&port->fp_mutex); 12310 } 12311 break; 12312 12313 case FC_RSCN_AREA_ADDRESS: 12314 mask = 0xFFFF00; 12315 /* FALLTHROUGH */ 12316 12317 case FC_RSCN_DOMAIN_ADDRESS: 12318 if (!mask) { 12319 mask = 0xFF0000; 12320 } 12321 fp_validate_area_domain(port, page->aff_d_id, mask, 12322 job, sleep); 12323 break; 12324 12325 case FC_RSCN_FABRIC_ADDRESS: 12326 /* 12327 * We need to discover all the devices on this 12328 * port. 12329 */ 12330 fp_validate_area_domain(port, 0, 0, job, sleep); 12331 break; 12332 12333 default: 12334 break; 12335 } 12336 } 12337 if (attr_list != NULL) { 12338 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12339 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12340 NULL, DDI_SLEEP); 12341 nvlist_free(attr_list); 12342 } else { 12343 FP_TRACE(FP_NHEAD1(9, 0), 12344 "RSCN handled, but event not sent to userland"); 12345 } 12346 if (nvname != NULL) { 12347 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12348 } 12349 12350 if (ns_cmd) { 12351 fctl_free_ns_cmd(ns_cmd); 12352 } 12353 12354 if (listindex) { 12355 #ifdef DEBUG 12356 page = (fc_affected_id_t *)(buf->ub_buffer + 12357 sizeof (fc_rscn_t)); 12358 12359 if (listptr->map_did.port_id != page->aff_d_id) { 12360 FP_TRACE(FP_NHEAD1(9, 0), 12361 "PORT RSCN: processed=%x, reporting=%x", 12362 listptr->map_did.port_id, page->aff_d_id); 12363 } 12364 #endif 12365 12366 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12367 sleep, 0); 12368 } else { 12369 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12370 } 12371 } 12372 12373 12374 /* 12375 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12376 */ 12377 static void 12378 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12379 { 12380 int is_switch; 12381 int initiator; 12382 fc_local_port_t *port; 12383 12384 port = pd->pd_port; 12385 12386 /* This function has the following bunch of assumptions */ 12387 ASSERT(port != NULL); 12388 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12389 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12390 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12391 12392 pd->pd_state = PORT_DEVICE_INVALID; 12393 pd->pd_type = PORT_DEVICE_OLD; 12394 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12395 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12396 12397 fctl_delist_did_table(port, pd); 12398 fctl_delist_pwwn_table(port, pd); 12399 12400 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12401 " removed the PD=%p from DID and PWWN tables", 12402 port, pd->pd_port_id.port_id, pd); 12403 12404 if ((!flag) && port && initiator && is_switch) { 12405 (void) fctl_add_orphan_held(port, pd); 12406 } 12407 fctl_copy_portmap_held(map, pd); 12408 map->map_pd = pd; 12409 } 12410 12411 /* 12412 * Fill out old map for ULPs 12413 */ 12414 static void 12415 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12416 { 12417 int is_switch; 12418 int initiator; 12419 fc_local_port_t *port; 12420 12421 mutex_enter(&pd->pd_mutex); 12422 port = pd->pd_port; 12423 mutex_exit(&pd->pd_mutex); 12424 12425 mutex_enter(&port->fp_mutex); 12426 mutex_enter(&pd->pd_mutex); 12427 12428 pd->pd_state = PORT_DEVICE_INVALID; 12429 pd->pd_type = PORT_DEVICE_OLD; 12430 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12431 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12432 12433 fctl_delist_did_table(port, pd); 12434 fctl_delist_pwwn_table(port, pd); 12435 12436 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12437 " removed the PD=%p from DID and PWWN tables", 12438 port, pd->pd_port_id.port_id, pd); 12439 12440 mutex_exit(&pd->pd_mutex); 12441 mutex_exit(&port->fp_mutex); 12442 12443 ASSERT(port != NULL); 12444 if ((!flag) && port && initiator && is_switch) { 12445 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12446 } 12447 fctl_copy_portmap(map, pd); 12448 map->map_pd = pd; 12449 } 12450 12451 12452 /* 12453 * Fillout Changed Map for ULPs 12454 */ 12455 static void 12456 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12457 uint32_t *new_did, la_wwn_t *new_pwwn) 12458 { 12459 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12460 12461 pd->pd_type = PORT_DEVICE_CHANGED; 12462 if (new_did) { 12463 pd->pd_port_id.port_id = *new_did; 12464 } 12465 if (new_pwwn) { 12466 pd->pd_port_name = *new_pwwn; 12467 } 12468 mutex_exit(&pd->pd_mutex); 12469 12470 fctl_copy_portmap(map, pd); 12471 12472 mutex_enter(&pd->pd_mutex); 12473 pd->pd_type = PORT_DEVICE_NOCHANGE; 12474 } 12475 12476 12477 /* 12478 * Fillout New Name Server map 12479 */ 12480 static void 12481 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12482 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12483 { 12484 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12485 12486 if (handle) { 12487 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_pwwn, 12488 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12489 DDI_DEV_AUTOINCR); 12490 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_nwwn, 12491 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12492 DDI_DEV_AUTOINCR); 12493 ddi_rep_get8(*handle, (uint8_t *)port_map->map_fc4_types, 12494 (uint8_t *)gan_resp->gan_fc4types, 12495 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12496 } else { 12497 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12498 sizeof (gan_resp->gan_pwwn)); 12499 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12500 sizeof (gan_resp->gan_nwwn)); 12501 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12502 sizeof (gan_resp->gan_fc4types)); 12503 } 12504 port_map->map_did.port_id = d_id; 12505 port_map->map_did.priv_lilp_posit = 0; 12506 port_map->map_hard_addr.hard_addr = 0; 12507 port_map->map_hard_addr.rsvd = 0; 12508 port_map->map_state = PORT_DEVICE_INVALID; 12509 port_map->map_type = PORT_DEVICE_NEW; 12510 port_map->map_flags = 0; 12511 port_map->map_pd = NULL; 12512 12513 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12514 12515 ASSERT(port != NULL); 12516 } 12517 12518 12519 /* 12520 * Perform LINIT ELS 12521 */ 12522 static int 12523 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12524 job_request_t *job) 12525 { 12526 int rval; 12527 uint32_t d_id; 12528 uint32_t s_id; 12529 uint32_t lfa; 12530 uchar_t class; 12531 uint32_t ret; 12532 fp_cmd_t *cmd; 12533 fc_porttype_t ptype; 12534 fc_packet_t *pkt; 12535 fc_linit_req_t payload; 12536 fc_remote_port_t *pd; 12537 12538 rval = 0; 12539 12540 ASSERT(job != NULL); 12541 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12542 12543 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12544 if (pd == NULL) { 12545 fctl_ns_req_t *ns_cmd; 12546 12547 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12548 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12549 0, sleep); 12550 12551 if (ns_cmd == NULL) { 12552 return (FC_NOMEM); 12553 } 12554 job->job_result = FC_SUCCESS; 12555 ns_cmd->ns_cmd_code = NS_GID_PN; 12556 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12557 12558 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12559 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12560 fctl_free_ns_cmd(ns_cmd); 12561 return (FC_FAILURE); 12562 } 12563 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12564 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12565 12566 fctl_free_ns_cmd(ns_cmd); 12567 lfa = d_id & 0xFFFF00; 12568 12569 /* 12570 * Given this D_ID, get the port type to see if 12571 * we can do LINIT on the LFA 12572 */ 12573 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12574 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12575 0, sleep); 12576 12577 if (ns_cmd == NULL) { 12578 return (FC_NOMEM); 12579 } 12580 12581 job->job_result = FC_SUCCESS; 12582 ns_cmd->ns_cmd_code = NS_GPT_ID; 12583 12584 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12585 ((ns_req_gpt_id_t *) 12586 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12587 12588 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12589 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12590 fctl_free_ns_cmd(ns_cmd); 12591 return (FC_FAILURE); 12592 } 12593 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12594 12595 fctl_free_ns_cmd(ns_cmd); 12596 12597 switch (ptype.port_type) { 12598 case FC_NS_PORT_NL: 12599 case FC_NS_PORT_F_NL: 12600 case FC_NS_PORT_FL: 12601 break; 12602 12603 default: 12604 return (FC_FAILURE); 12605 } 12606 } else { 12607 mutex_enter(&pd->pd_mutex); 12608 ptype = pd->pd_porttype; 12609 12610 switch (pd->pd_porttype.port_type) { 12611 case FC_NS_PORT_NL: 12612 case FC_NS_PORT_F_NL: 12613 case FC_NS_PORT_FL: 12614 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12615 break; 12616 12617 default: 12618 mutex_exit(&pd->pd_mutex); 12619 return (FC_FAILURE); 12620 } 12621 mutex_exit(&pd->pd_mutex); 12622 } 12623 12624 mutex_enter(&port->fp_mutex); 12625 s_id = port->fp_port_id.port_id; 12626 class = port->fp_ns_login_class; 12627 mutex_exit(&port->fp_mutex); 12628 12629 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12630 sizeof (fc_linit_resp_t), sleep, pd); 12631 if (cmd == NULL) { 12632 return (FC_NOMEM); 12633 } 12634 12635 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12636 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12637 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12638 cmd->cmd_retry_count = fp_retry_count; 12639 cmd->cmd_ulp_pkt = NULL; 12640 12641 pkt = &cmd->cmd_pkt; 12642 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12643 12644 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12645 12646 /* 12647 * How does LIP work by the way ? 12648 * If the L_Port receives three consecutive identical ordered 12649 * sets whose first two characters (fully decoded) are equal to 12650 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12651 * recognize a Loop Initialization Primitive sequence. The 12652 * character 3 determines the type of lip: 12653 * LIP(F7) Normal LIP 12654 * LIP(F8) Loop Failure LIP 12655 * 12656 * The possible combination for the 3rd and 4th bytes are: 12657 * F7, F7 Normal Lip - No valid AL_PA 12658 * F8, F8 Loop Failure - No valid AL_PA 12659 * F7, AL_PS Normal Lip - Valid source AL_PA 12660 * F8, AL_PS Loop Failure - Valid source AL_PA 12661 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12662 * And Normal Lip for all other loop members 12663 * 0xFF AL_PS Vendor specific reset of all loop members 12664 * 12665 * Now, it may not always be that we, at the source, may have an 12666 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12667 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12668 * payload we are going to set: 12669 * lip_b3 = 0xF7; Normal LIP 12670 * lip_b4 = 0xF7; No valid source AL_PA 12671 */ 12672 payload.ls_code.ls_code = LA_ELS_LINIT; 12673 payload.ls_code.mbz = 0; 12674 payload.rsvd = 0; 12675 payload.func = 0; /* Let Fabric determine the best way */ 12676 payload.lip_b3 = 0xF7; /* Normal LIP */ 12677 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12678 12679 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12680 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12681 12682 job->job_counter = 1; 12683 12684 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12685 if (ret == FC_SUCCESS) { 12686 fp_jobwait(job); 12687 rval = job->job_result; 12688 } else { 12689 rval = FC_FAILURE; 12690 fp_free_pkt(cmd); 12691 } 12692 12693 return (rval); 12694 } 12695 12696 12697 /* 12698 * Fill out the device handles with GAN response 12699 */ 12700 static void 12701 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12702 ns_resp_gan_t *gan_resp) 12703 { 12704 fc_remote_node_t *node; 12705 fc_porttype_t type; 12706 fc_local_port_t *port; 12707 12708 ASSERT(pd != NULL); 12709 ASSERT(handle != NULL); 12710 12711 port = pd->pd_port; 12712 12713 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12714 " port_id=%x, sym_len=%d fc4-type=%x", 12715 pd, gan_resp->gan_type_id.rsvd, 12716 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12717 12718 mutex_enter(&pd->pd_mutex); 12719 12720 ddi_rep_get8(*handle, (uint8_t *)&type, 12721 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12722 12723 pd->pd_porttype.port_type = type.port_type; 12724 pd->pd_porttype.rsvd = 0; 12725 12726 pd->pd_spn_len = gan_resp->gan_spnlen; 12727 if (pd->pd_spn_len) { 12728 ddi_rep_get8(*handle, (uint8_t *)pd->pd_spn, 12729 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12730 DDI_DEV_AUTOINCR); 12731 } 12732 12733 ddi_rep_get8(*handle, (uint8_t *)pd->pd_ip_addr, 12734 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12735 DDI_DEV_AUTOINCR); 12736 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_cos, 12737 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12738 DDI_DEV_AUTOINCR); 12739 ddi_rep_get8(*handle, (uint8_t *)pd->pd_fc4types, 12740 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12741 DDI_DEV_AUTOINCR); 12742 12743 node = pd->pd_remote_nodep; 12744 mutex_exit(&pd->pd_mutex); 12745 12746 mutex_enter(&node->fd_mutex); 12747 12748 ddi_rep_get8(*handle, (uint8_t *)node->fd_ipa, 12749 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12750 DDI_DEV_AUTOINCR); 12751 12752 node->fd_snn_len = gan_resp->gan_snnlen; 12753 if (node->fd_snn_len) { 12754 ddi_rep_get8(*handle, (uint8_t *)node->fd_snn, 12755 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12756 DDI_DEV_AUTOINCR); 12757 } 12758 12759 mutex_exit(&node->fd_mutex); 12760 } 12761 12762 12763 /* 12764 * Handles all NS Queries (also means that this function 12765 * doesn't handle NS object registration) 12766 */ 12767 static int 12768 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12769 int polled, int sleep) 12770 { 12771 int rval; 12772 fp_cmd_t *cmd; 12773 12774 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12775 12776 if (ns_cmd->ns_cmd_size == 0) { 12777 return (FC_FAILURE); 12778 } 12779 12780 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12781 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12782 ns_cmd->ns_resp_size, sleep, NULL); 12783 if (cmd == NULL) { 12784 return (FC_NOMEM); 12785 } 12786 12787 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12788 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12789 12790 if (polled) { 12791 job->job_counter = 1; 12792 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12793 } 12794 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12795 if (rval != FC_SUCCESS) { 12796 job->job_result = rval; 12797 fp_iodone(cmd); 12798 if (polled == 0) { 12799 /* 12800 * Return FC_SUCCESS to indicate that 12801 * fp_iodone is performed already. 12802 */ 12803 rval = FC_SUCCESS; 12804 } 12805 } 12806 12807 if (polled) { 12808 fp_jobwait(job); 12809 rval = job->job_result; 12810 } 12811 12812 return (rval); 12813 } 12814 12815 12816 /* 12817 * Initialize Common Transport request 12818 */ 12819 static void 12820 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12821 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12822 uint16_t resp_len, job_request_t *job) 12823 { 12824 uint32_t s_id; 12825 uchar_t class; 12826 fc_packet_t *pkt; 12827 fc_ct_header_t ct; 12828 12829 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12830 12831 mutex_enter(&port->fp_mutex); 12832 s_id = port->fp_port_id.port_id; 12833 class = port->fp_ns_login_class; 12834 mutex_exit(&port->fp_mutex); 12835 12836 cmd->cmd_job = job; 12837 cmd->cmd_private = ns_cmd; 12838 pkt = &cmd->cmd_pkt; 12839 12840 ct.ct_rev = CT_REV; 12841 ct.ct_inid = 0; 12842 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12843 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12844 ct.ct_options = 0; 12845 ct.ct_reserved1 = 0; 12846 ct.ct_cmdrsp = cmd_code; 12847 ct.ct_aiusize = resp_len >> 2; 12848 ct.ct_reserved2 = 0; 12849 ct.ct_reason = 0; 12850 ct.ct_expln = 0; 12851 ct.ct_vendor = 0; 12852 12853 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ct, (uint8_t *)pkt->pkt_cmd, 12854 sizeof (ct), DDI_DEV_AUTOINCR); 12855 12856 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12857 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12858 pkt->pkt_cmd_fhdr.s_id = s_id; 12859 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12860 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12861 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12862 pkt->pkt_cmd_fhdr.seq_id = 0; 12863 pkt->pkt_cmd_fhdr.df_ctl = 0; 12864 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12865 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12866 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12867 pkt->pkt_cmd_fhdr.ro = 0; 12868 pkt->pkt_cmd_fhdr.rsvd = 0; 12869 12870 pkt->pkt_comp = fp_ns_intr; 12871 pkt->pkt_ulp_private = (opaque_t)cmd; 12872 pkt->pkt_timeout = FP_NS_TIMEOUT; 12873 12874 if (cmd_buf) { 12875 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 12876 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 12877 cmd_len, DDI_DEV_AUTOINCR); 12878 } 12879 12880 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 12881 12882 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12883 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12884 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 12885 cmd->cmd_retry_count = fp_retry_count; 12886 cmd->cmd_ulp_pkt = NULL; 12887 } 12888 12889 12890 /* 12891 * Name Server request interrupt routine 12892 */ 12893 static void 12894 fp_ns_intr(fc_packet_t *pkt) 12895 { 12896 fp_cmd_t *cmd; 12897 fc_local_port_t *port; 12898 fc_ct_header_t resp_hdr; 12899 fc_ct_header_t cmd_hdr; 12900 fctl_ns_req_t *ns_cmd; 12901 12902 cmd = pkt->pkt_ulp_private; 12903 port = cmd->cmd_port; 12904 12905 mutex_enter(&port->fp_mutex); 12906 port->fp_out_fpcmds--; 12907 mutex_exit(&port->fp_mutex); 12908 12909 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 12910 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 12911 ns_cmd = (fctl_ns_req_t *) 12912 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 12913 if (!FP_IS_PKT_ERROR(pkt)) { 12914 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 12915 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 12916 DDI_DEV_AUTOINCR); 12917 12918 /* 12919 * On x86 architectures, make sure the resp_hdr is big endian. 12920 * This macro is a NOP on sparc architectures mainly because 12921 * we don't want to end up wasting time since the end result 12922 * is going to be the same. 12923 */ 12924 MAKE_BE_32(&resp_hdr); 12925 12926 if (ns_cmd) { 12927 /* 12928 * Always copy out the response CT_HDR 12929 */ 12930 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 12931 sizeof (resp_hdr)); 12932 } 12933 12934 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 12935 pkt->pkt_state = FC_PKT_FS_RJT; 12936 pkt->pkt_reason = resp_hdr.ct_reason; 12937 pkt->pkt_expln = resp_hdr.ct_expln; 12938 } 12939 } 12940 12941 if (FP_IS_PKT_ERROR(pkt)) { 12942 if (ns_cmd) { 12943 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 12944 ASSERT(ns_cmd->ns_pd != NULL); 12945 12946 /* Mark it OLD if not already done */ 12947 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 12948 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 12949 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 12950 } 12951 12952 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 12953 fctl_free_ns_cmd(ns_cmd); 12954 ((fp_cmd_t *) 12955 (pkt->pkt_ulp_private))->cmd_private = NULL; 12956 } 12957 12958 } 12959 12960 FP_TRACE(FP_NHEAD1(4, 0), "NS failure; pkt state=%x reason=%x", 12961 pkt->pkt_state, pkt->pkt_reason); 12962 12963 (void) fp_common_intr(pkt, 1); 12964 12965 return; 12966 } 12967 12968 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 12969 uint32_t d_id; 12970 fc_local_port_t *port; 12971 fp_cmd_t *cmd; 12972 12973 d_id = pkt->pkt_cmd_fhdr.d_id; 12974 cmd = pkt->pkt_ulp_private; 12975 port = cmd->cmd_port; 12976 FP_TRACE(FP_NHEAD2(9, 0), 12977 "Bogus NS response received for D_ID=%x", d_id); 12978 } 12979 12980 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 12981 fp_gan_handler(pkt, ns_cmd); 12982 return; 12983 } 12984 12985 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 12986 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 12987 if (ns_cmd) { 12988 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 12989 fp_ns_query_handler(pkt, ns_cmd); 12990 return; 12991 } 12992 } 12993 } 12994 12995 fp_iodone(pkt->pkt_ulp_private); 12996 } 12997 12998 12999 /* 13000 * Process NS_GAN response 13001 */ 13002 static void 13003 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13004 { 13005 int my_did; 13006 fc_portid_t d_id; 13007 fp_cmd_t *cmd; 13008 fc_local_port_t *port; 13009 fc_remote_port_t *pd; 13010 ns_req_gan_t gan_req; 13011 ns_resp_gan_t *gan_resp; 13012 13013 ASSERT(ns_cmd != NULL); 13014 13015 cmd = pkt->pkt_ulp_private; 13016 port = cmd->cmd_port; 13017 13018 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 13019 13020 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&d_id, 13021 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 13022 13023 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 13024 13025 /* 13026 * In this case the priv_lilp_posit field in reality 13027 * is actually represents the relative position on a private loop. 13028 * So zero it while dealing with Port Identifiers. 13029 */ 13030 d_id.priv_lilp_posit = 0; 13031 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 13032 if (ns_cmd->ns_gan_sid == d_id.port_id) { 13033 /* 13034 * We've come a full circle; time to get out. 13035 */ 13036 fp_iodone(cmd); 13037 return; 13038 } 13039 13040 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 13041 ns_cmd->ns_gan_sid = d_id.port_id; 13042 } 13043 13044 mutex_enter(&port->fp_mutex); 13045 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 13046 mutex_exit(&port->fp_mutex); 13047 13048 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, d_id=%x", port, 13049 d_id.port_id); 13050 13051 if (my_did == 0) { 13052 la_wwn_t pwwn; 13053 la_wwn_t nwwn; 13054 13055 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 13056 "port=%p, d_id=%x, type_id=%x, " 13057 "pwwn=%x %x %x %x %x %x %x %x, " 13058 "nwwn=%x %x %x %x %x %x %x %x", 13059 port, d_id.port_id, gan_resp->gan_type_id, 13060 13061 gan_resp->gan_pwwn.raw_wwn[0], 13062 gan_resp->gan_pwwn.raw_wwn[1], 13063 gan_resp->gan_pwwn.raw_wwn[2], 13064 gan_resp->gan_pwwn.raw_wwn[3], 13065 gan_resp->gan_pwwn.raw_wwn[4], 13066 gan_resp->gan_pwwn.raw_wwn[5], 13067 gan_resp->gan_pwwn.raw_wwn[6], 13068 gan_resp->gan_pwwn.raw_wwn[7], 13069 13070 gan_resp->gan_nwwn.raw_wwn[0], 13071 gan_resp->gan_nwwn.raw_wwn[1], 13072 gan_resp->gan_nwwn.raw_wwn[2], 13073 gan_resp->gan_nwwn.raw_wwn[3], 13074 gan_resp->gan_nwwn.raw_wwn[4], 13075 gan_resp->gan_nwwn.raw_wwn[5], 13076 gan_resp->gan_nwwn.raw_wwn[6], 13077 gan_resp->gan_nwwn.raw_wwn[7]); 13078 13079 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 13080 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 13081 DDI_DEV_AUTOINCR); 13082 13083 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 13084 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 13085 DDI_DEV_AUTOINCR); 13086 13087 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 13088 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 13089 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 13090 } 13091 if (pd != NULL) { 13092 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 13093 pd, gan_resp); 13094 } 13095 13096 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 13097 *((int *)ns_cmd->ns_data_buf) += 1; 13098 } 13099 13100 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 13101 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 13102 13103 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 13104 fc_port_dev_t *userbuf; 13105 13106 userbuf = ((fc_port_dev_t *) 13107 ns_cmd->ns_data_buf) + 13108 ns_cmd->ns_gan_index++; 13109 13110 userbuf->dev_did = d_id; 13111 13112 ddi_rep_get8(pkt->pkt_resp_acc, 13113 (uint8_t *)userbuf->dev_type, 13114 (uint8_t *)gan_resp->gan_fc4types, 13115 sizeof (userbuf->dev_type), 13116 DDI_DEV_AUTOINCR); 13117 13118 userbuf->dev_nwwn = nwwn; 13119 userbuf->dev_pwwn = pwwn; 13120 13121 if (pd != NULL) { 13122 mutex_enter(&pd->pd_mutex); 13123 userbuf->dev_state = pd->pd_state; 13124 userbuf->dev_hard_addr = 13125 pd->pd_hard_addr; 13126 mutex_exit(&pd->pd_mutex); 13127 } else { 13128 userbuf->dev_state = 13129 PORT_DEVICE_INVALID; 13130 } 13131 } else if (ns_cmd->ns_flags & 13132 FCTL_NS_BUF_IS_FC_PORTMAP) { 13133 fc_portmap_t *map; 13134 13135 map = ((fc_portmap_t *) 13136 ns_cmd->ns_data_buf) + 13137 ns_cmd->ns_gan_index++; 13138 13139 /* 13140 * First fill it like any new map 13141 * and update the port device info 13142 * below. 13143 */ 13144 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13145 map, gan_resp, d_id.port_id); 13146 if (pd != NULL) { 13147 fctl_copy_portmap(map, pd); 13148 } else { 13149 map->map_state = PORT_DEVICE_INVALID; 13150 map->map_type = PORT_DEVICE_NOCHANGE; 13151 } 13152 } else { 13153 caddr_t dst_ptr; 13154 13155 dst_ptr = ns_cmd->ns_data_buf + 13156 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13157 13158 ddi_rep_get8(pkt->pkt_resp_acc, 13159 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13160 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13161 } 13162 } else { 13163 ns_cmd->ns_gan_index++; 13164 } 13165 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13166 fp_iodone(cmd); 13167 return; 13168 } 13169 } 13170 13171 gan_req.pid = d_id; 13172 13173 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13174 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13175 sizeof (gan_req), DDI_DEV_AUTOINCR); 13176 13177 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13178 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13179 fp_iodone(cmd); 13180 } else { 13181 mutex_enter(&port->fp_mutex); 13182 port->fp_out_fpcmds++; 13183 mutex_exit(&port->fp_mutex); 13184 } 13185 } 13186 13187 13188 /* 13189 * Handle NS Query interrupt 13190 */ 13191 static void 13192 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13193 { 13194 fp_cmd_t *cmd; 13195 fc_local_port_t *port; 13196 caddr_t src_ptr; 13197 uint32_t xfer_len; 13198 13199 cmd = pkt->pkt_ulp_private; 13200 port = cmd->cmd_port; 13201 13202 xfer_len = ns_cmd->ns_resp_size; 13203 13204 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13205 ns_cmd->ns_cmd_code, xfer_len); 13206 13207 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13208 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13209 13210 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13211 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13212 } 13213 13214 if (xfer_len <= ns_cmd->ns_data_len) { 13215 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13216 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)ns_cmd->ns_data_buf, 13217 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13218 } 13219 13220 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13221 ASSERT(ns_cmd->ns_pd != NULL); 13222 13223 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13224 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13225 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13226 } 13227 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13228 } 13229 13230 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13231 fctl_free_ns_cmd(ns_cmd); 13232 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13233 } 13234 fp_iodone(cmd); 13235 } 13236 13237 13238 /* 13239 * Handle unsolicited ADISC ELS request 13240 */ 13241 static void 13242 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13243 fc_remote_port_t *pd, job_request_t *job) 13244 { 13245 int rval; 13246 fp_cmd_t *cmd; 13247 13248 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13249 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13250 mutex_enter(&pd->pd_mutex); 13251 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13252 mutex_exit(&pd->pd_mutex); 13253 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13254 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13255 0, KM_SLEEP, pd); 13256 if (cmd != NULL) { 13257 fp_els_rjt_init(port, cmd, buf, 13258 FC_ACTION_NON_RETRYABLE, 13259 FC_REASON_INVALID_LINK_CTRL, job); 13260 13261 if (fp_sendcmd(port, cmd, 13262 port->fp_fca_handle) != FC_SUCCESS) { 13263 fp_free_pkt(cmd); 13264 } 13265 } 13266 } 13267 } else { 13268 mutex_exit(&pd->pd_mutex); 13269 /* 13270 * Yes, yes, we don't have a hard address. But we 13271 * we should still respond. Huh ? Visit 21.19.2 13272 * of FC-PH-2 which essentially says that if an 13273 * NL_Port doesn't have a hard address, or if a port 13274 * does not have FC-AL capability, it shall report 13275 * zeroes in this field. 13276 */ 13277 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13278 0, KM_SLEEP, pd); 13279 if (cmd == NULL) { 13280 return; 13281 } 13282 fp_adisc_acc_init(port, cmd, buf, job); 13283 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13284 if (rval != FC_SUCCESS) { 13285 fp_free_pkt(cmd); 13286 } 13287 } 13288 } 13289 13290 13291 /* 13292 * Initialize ADISC response. 13293 */ 13294 static void 13295 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13296 job_request_t *job) 13297 { 13298 fc_packet_t *pkt; 13299 la_els_adisc_t payload; 13300 13301 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13302 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13303 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13304 cmd->cmd_retry_count = 1; 13305 cmd->cmd_ulp_pkt = NULL; 13306 13307 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13308 cmd->cmd_job = job; 13309 13310 pkt = &cmd->cmd_pkt; 13311 13312 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13313 13314 payload.ls_code.ls_code = LA_ELS_ACC; 13315 payload.ls_code.mbz = 0; 13316 13317 mutex_enter(&port->fp_mutex); 13318 payload.nport_id = port->fp_port_id; 13319 payload.hard_addr = port->fp_hard_addr; 13320 mutex_exit(&port->fp_mutex); 13321 13322 payload.port_wwn = port->fp_service_params.nport_ww_name; 13323 payload.node_wwn = port->fp_service_params.node_ww_name; 13324 13325 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 13326 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13327 } 13328 13329 13330 /* 13331 * Hold and Install the requested ULP drivers 13332 */ 13333 static void 13334 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13335 { 13336 int len; 13337 int count; 13338 int data_len; 13339 major_t ulp_major; 13340 caddr_t ulp_name; 13341 caddr_t data_ptr; 13342 caddr_t data_buf; 13343 13344 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13345 13346 data_buf = NULL; 13347 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13348 DDI_PROP_DONTPASS, "load-ulp-list", 13349 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13350 return; 13351 } 13352 13353 len = strlen(data_buf); 13354 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13355 13356 data_ptr = data_buf + len + 1; 13357 for (count = 0; count < port->fp_ulp_nload; count++) { 13358 len = strlen(data_ptr) + 1; 13359 ulp_name = kmem_zalloc(len, KM_SLEEP); 13360 bcopy(data_ptr, ulp_name, len); 13361 13362 ulp_major = ddi_name_to_major(ulp_name); 13363 13364 if (ulp_major != (major_t)-1) { 13365 if (modload("drv", ulp_name) < 0) { 13366 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13367 0, NULL, "failed to load %s", 13368 ulp_name); 13369 } 13370 } else { 13371 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13372 "%s isn't a valid driver", ulp_name); 13373 } 13374 13375 kmem_free(ulp_name, len); 13376 data_ptr += len; /* Skip to next field */ 13377 } 13378 13379 /* 13380 * Free the memory allocated by DDI 13381 */ 13382 if (data_buf != NULL) { 13383 kmem_free(data_buf, data_len); 13384 } 13385 } 13386 13387 13388 /* 13389 * Perform LOGO operation 13390 */ 13391 static int 13392 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13393 { 13394 int rval; 13395 fp_cmd_t *cmd; 13396 13397 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13398 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13399 13400 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13401 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13402 13403 mutex_enter(&port->fp_mutex); 13404 mutex_enter(&pd->pd_mutex); 13405 13406 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13407 ASSERT(pd->pd_login_count == 1); 13408 13409 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13410 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13411 cmd->cmd_flags = 0; 13412 cmd->cmd_retry_count = 1; 13413 cmd->cmd_ulp_pkt = NULL; 13414 13415 fp_logo_init(pd, cmd, job); 13416 13417 mutex_exit(&pd->pd_mutex); 13418 mutex_exit(&port->fp_mutex); 13419 13420 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13421 if (rval != FC_SUCCESS) { 13422 fp_iodone(cmd); 13423 } 13424 13425 return (rval); 13426 } 13427 13428 13429 /* 13430 * Perform Port attach callbacks to registered ULPs 13431 */ 13432 static void 13433 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13434 { 13435 fp_soft_attach_t *att; 13436 13437 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13438 att->att_cmd = cmd; 13439 att->att_port = port; 13440 13441 /* 13442 * We need to remember whether or not fctl_busy_port 13443 * succeeded so we know whether or not to call 13444 * fctl_idle_port when the task is complete. 13445 */ 13446 13447 if (fctl_busy_port(port) == 0) { 13448 att->att_need_pm_idle = B_TRUE; 13449 } else { 13450 att->att_need_pm_idle = B_FALSE; 13451 } 13452 13453 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13454 att, KM_SLEEP); 13455 } 13456 13457 13458 /* 13459 * Forward state change notifications on to interested ULPs. 13460 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13461 * real work. 13462 */ 13463 static int 13464 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13465 { 13466 fc_port_clist_t *clist; 13467 13468 clist = kmem_zalloc(sizeof (*clist), sleep); 13469 if (clist == NULL) { 13470 return (FC_NOMEM); 13471 } 13472 13473 clist->clist_state = statec; 13474 13475 mutex_enter(&port->fp_mutex); 13476 clist->clist_flags = port->fp_topology; 13477 mutex_exit(&port->fp_mutex); 13478 13479 clist->clist_port = (opaque_t)port; 13480 clist->clist_len = 0; 13481 clist->clist_size = 0; 13482 clist->clist_map = NULL; 13483 13484 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13485 clist, KM_SLEEP); 13486 13487 return (FC_SUCCESS); 13488 } 13489 13490 13491 /* 13492 * Get name server map 13493 */ 13494 static int 13495 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13496 uint32_t *len, uint32_t sid) 13497 { 13498 int ret; 13499 fctl_ns_req_t *ns_cmd; 13500 13501 /* 13502 * Don't let the allocator do anything for response; 13503 * we have have buffer ready to fillout. 13504 */ 13505 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13506 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13507 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13508 13509 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13510 ns_cmd->ns_data_buf = (caddr_t)*map; 13511 13512 ASSERT(ns_cmd != NULL); 13513 13514 ns_cmd->ns_gan_index = 0; 13515 ns_cmd->ns_gan_sid = sid; 13516 ns_cmd->ns_cmd_code = NS_GA_NXT; 13517 ns_cmd->ns_gan_max = *len; 13518 13519 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13520 13521 if (ns_cmd->ns_gan_index != *len) { 13522 *len = ns_cmd->ns_gan_index; 13523 } 13524 ns_cmd->ns_data_len = 0; 13525 ns_cmd->ns_data_buf = NULL; 13526 fctl_free_ns_cmd(ns_cmd); 13527 13528 return (ret); 13529 } 13530 13531 13532 /* 13533 * Create a remote port in Fabric topology by using NS services 13534 */ 13535 static fc_remote_port_t * 13536 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13537 { 13538 int rval; 13539 job_request_t *job; 13540 fctl_ns_req_t *ns_cmd; 13541 fc_remote_port_t *pd; 13542 13543 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13544 13545 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13546 port, d_id); 13547 13548 #ifdef DEBUG 13549 mutex_enter(&port->fp_mutex); 13550 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13551 mutex_exit(&port->fp_mutex); 13552 #endif 13553 13554 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13555 if (job == NULL) { 13556 return (NULL); 13557 } 13558 13559 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13560 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13561 FCTL_NS_NO_DATA_BUF), sleep); 13562 if (ns_cmd == NULL) { 13563 return (NULL); 13564 } 13565 13566 job->job_result = FC_SUCCESS; 13567 ns_cmd->ns_gan_max = 1; 13568 ns_cmd->ns_cmd_code = NS_GA_NXT; 13569 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13570 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13571 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13572 13573 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13574 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13575 fctl_free_ns_cmd(ns_cmd); 13576 13577 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13578 fctl_dealloc_job(job); 13579 return (NULL); 13580 } 13581 fctl_dealloc_job(job); 13582 13583 pd = fctl_get_remote_port_by_did(port, d_id); 13584 13585 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13586 port, d_id, pd); 13587 13588 return (pd); 13589 } 13590 13591 13592 /* 13593 * Check for the permissions on an ioctl command. If it is required to have an 13594 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13595 * the ioctl command isn't in one of the list built, shut the door on that too. 13596 * 13597 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13598 * to be made sure that users open the port for an exclusive access while 13599 * performing those operations. 13600 * 13601 * This can prevent a casual user from inflicting damage on the port by 13602 * sending these ioctls from multiple processes/threads (there is no good 13603 * reason why one would need to do that) without actually realizing how 13604 * expensive such commands could turn out to be. 13605 * 13606 * It is also important to note that, even with an exclusive access, 13607 * multiple threads can share the same file descriptor and fire down 13608 * commands in parallel. To prevent that the driver needs to make sure 13609 * that such commands aren't in progress already. This is taken care of 13610 * in the FP_EXCL_BUSY bit of fp_flag. 13611 */ 13612 static int 13613 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13614 { 13615 int ret = FC_FAILURE; 13616 int count; 13617 13618 for (count = 0; 13619 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13620 count++) { 13621 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13622 if (fp_perm_list[count].fp_open_flag & open_flag) { 13623 ret = FC_SUCCESS; 13624 } 13625 break; 13626 } 13627 } 13628 13629 return (ret); 13630 } 13631 13632 13633 /* 13634 * Bind Port driver's unsolicited, state change callbacks 13635 */ 13636 static int 13637 fp_bind_callbacks(fc_local_port_t *port) 13638 { 13639 fc_fca_bind_info_t bind_info = {0}; 13640 fc_fca_port_info_t *port_info; 13641 int rval = DDI_SUCCESS; 13642 uint16_t class; 13643 int node_namelen, port_namelen; 13644 char *nname = NULL, *pname = NULL; 13645 13646 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13647 13648 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13649 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13650 "node-name", &nname) != DDI_PROP_SUCCESS) { 13651 FP_TRACE(FP_NHEAD1(1, 0), 13652 "fp_bind_callback fail to get node-name"); 13653 } 13654 if (nname) { 13655 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13656 } 13657 13658 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13659 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13660 "port-name", &pname) != DDI_PROP_SUCCESS) { 13661 FP_TRACE(FP_NHEAD1(1, 0), 13662 "fp_bind_callback fail to get port-name"); 13663 } 13664 if (pname) { 13665 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13666 } 13667 13668 if (port->fp_npiv_type == FC_NPIV_PORT) { 13669 bind_info.port_npiv = 1; 13670 } 13671 13672 /* 13673 * fca_bind_port returns the FCA driver's handle for the local 13674 * port instance. If the port number isn't supported it returns NULL. 13675 * It also sets up callback in the FCA for various 13676 * things like state change, ELS etc.. 13677 */ 13678 bind_info.port_statec_cb = fp_statec_cb; 13679 bind_info.port_unsol_cb = fp_unsol_cb; 13680 bind_info.port_num = port->fp_port_num; 13681 bind_info.port_handle = (opaque_t)port; 13682 13683 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13684 13685 /* 13686 * Hold the port driver mutex as the callbacks are bound until the 13687 * service parameters are properly filled in (in order to be able to 13688 * properly respond to unsolicited ELS requests) 13689 */ 13690 mutex_enter(&port->fp_mutex); 13691 13692 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13693 port->fp_fca_dip, port_info, &bind_info); 13694 13695 if (port->fp_fca_handle == NULL) { 13696 rval = DDI_FAILURE; 13697 goto exit; 13698 } 13699 13700 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13701 port->fp_service_params = port_info->pi_login_params; 13702 port->fp_hard_addr = port_info->pi_hard_addr; 13703 13704 /* Copy from the FCA structure to the FP structure */ 13705 port->fp_hba_port_attrs = port_info->pi_attrs; 13706 13707 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13708 port->fp_rnid_init = 1; 13709 bcopy(&port_info->pi_rnid_params.params, 13710 &port->fp_rnid_params, 13711 sizeof (port->fp_rnid_params)); 13712 } else { 13713 port->fp_rnid_init = 0; 13714 } 13715 13716 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13717 if (node_namelen) { 13718 bcopy(&port_info->pi_attrs.sym_node_name, 13719 &port->fp_sym_node_name, 13720 node_namelen); 13721 port->fp_sym_node_namelen = node_namelen; 13722 } 13723 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13724 if (port_namelen) { 13725 bcopy(&port_info->pi_attrs.sym_port_name, 13726 &port->fp_sym_port_name, 13727 port_namelen); 13728 port->fp_sym_port_namelen = port_namelen; 13729 } 13730 13731 /* zero out the normally unused fields right away */ 13732 port->fp_service_params.ls_code.mbz = 0; 13733 port->fp_service_params.ls_code.ls_code = 0; 13734 bzero(&port->fp_service_params.reserved, 13735 sizeof (port->fp_service_params.reserved)); 13736 13737 class = port_info->pi_login_params.class_1.class_opt; 13738 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13739 13740 class = port_info->pi_login_params.class_2.class_opt; 13741 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13742 13743 class = port_info->pi_login_params.class_3.class_opt; 13744 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13745 13746 exit: 13747 if (nname) { 13748 ddi_prop_free(nname); 13749 } 13750 if (pname) { 13751 ddi_prop_free(pname); 13752 } 13753 mutex_exit(&port->fp_mutex); 13754 kmem_free(port_info, sizeof (*port_info)); 13755 13756 return (rval); 13757 } 13758 13759 13760 /* 13761 * Retrieve FCA capabilities 13762 */ 13763 static void 13764 fp_retrieve_caps(fc_local_port_t *port) 13765 { 13766 int rval; 13767 int ub_count; 13768 fc_fcp_dma_t fcp_dma; 13769 fc_reset_action_t action; 13770 fc_dma_behavior_t dma_behavior; 13771 13772 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13773 13774 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13775 FC_CAP_UNSOL_BUF, &ub_count); 13776 13777 switch (rval) { 13778 case FC_CAP_FOUND: 13779 case FC_CAP_SETTABLE: 13780 switch (ub_count) { 13781 case 0: 13782 break; 13783 13784 case -1: 13785 ub_count = fp_unsol_buf_count; 13786 break; 13787 13788 default: 13789 /* 1/4th of total buffers is my share */ 13790 ub_count = 13791 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13792 break; 13793 } 13794 break; 13795 13796 default: 13797 ub_count = 0; 13798 break; 13799 } 13800 13801 mutex_enter(&port->fp_mutex); 13802 port->fp_ub_count = ub_count; 13803 mutex_exit(&port->fp_mutex); 13804 13805 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13806 FC_CAP_POST_RESET_BEHAVIOR, &action); 13807 13808 switch (rval) { 13809 case FC_CAP_FOUND: 13810 case FC_CAP_SETTABLE: 13811 switch (action) { 13812 case FC_RESET_RETURN_NONE: 13813 case FC_RESET_RETURN_ALL: 13814 case FC_RESET_RETURN_OUTSTANDING: 13815 break; 13816 13817 default: 13818 action = FC_RESET_RETURN_NONE; 13819 break; 13820 } 13821 break; 13822 13823 default: 13824 action = FC_RESET_RETURN_NONE; 13825 break; 13826 } 13827 mutex_enter(&port->fp_mutex); 13828 port->fp_reset_action = action; 13829 mutex_exit(&port->fp_mutex); 13830 13831 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13832 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13833 13834 switch (rval) { 13835 case FC_CAP_FOUND: 13836 switch (dma_behavior) { 13837 case FC_ALLOW_STREAMING: 13838 /* FALLTHROUGH */ 13839 case FC_NO_STREAMING: 13840 break; 13841 13842 default: 13843 /* 13844 * If capability was found and the value 13845 * was incorrect assume the worst 13846 */ 13847 dma_behavior = FC_NO_STREAMING; 13848 break; 13849 } 13850 break; 13851 13852 default: 13853 /* 13854 * If capability was not defined - allow streaming; existing 13855 * FCAs should not be affected. 13856 */ 13857 dma_behavior = FC_ALLOW_STREAMING; 13858 break; 13859 } 13860 mutex_enter(&port->fp_mutex); 13861 port->fp_dma_behavior = dma_behavior; 13862 mutex_exit(&port->fp_mutex); 13863 13864 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13865 FC_CAP_FCP_DMA, &fcp_dma); 13866 13867 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 13868 fcp_dma != FC_DVMA_SPACE)) { 13869 fcp_dma = FC_DVMA_SPACE; 13870 } 13871 13872 mutex_enter(&port->fp_mutex); 13873 port->fp_fcp_dma = fcp_dma; 13874 mutex_exit(&port->fp_mutex); 13875 } 13876 13877 13878 /* 13879 * Handle Domain, Area changes in the Fabric. 13880 */ 13881 static void 13882 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 13883 job_request_t *job, int sleep) 13884 { 13885 #ifdef DEBUG 13886 uint32_t dcnt; 13887 #endif 13888 int rval; 13889 int send; 13890 int index; 13891 int listindex; 13892 int login; 13893 int job_flags; 13894 char ww_name[17]; 13895 uint32_t d_id; 13896 uint32_t count; 13897 fctl_ns_req_t *ns_cmd; 13898 fc_portmap_t *list; 13899 fc_orphan_t *orp; 13900 fc_orphan_t *norp; 13901 fc_orphan_t *prev; 13902 fc_remote_port_t *pd; 13903 fc_remote_port_t *npd; 13904 struct pwwn_hash *head; 13905 13906 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 13907 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 13908 0, sleep); 13909 if (ns_cmd == NULL) { 13910 mutex_enter(&port->fp_mutex); 13911 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13912 --port->fp_rscn_count; 13913 } 13914 mutex_exit(&port->fp_mutex); 13915 13916 return; 13917 } 13918 ns_cmd->ns_cmd_code = NS_GID_PN; 13919 13920 /* 13921 * We need to get a new count of devices from the 13922 * name server, which will also create any new devices 13923 * as needed. 13924 */ 13925 13926 (void) fp_ns_get_devcount(port, job, 1, sleep); 13927 13928 FP_TRACE(FP_NHEAD1(3, 0), 13929 "fp_validate_area_domain: get_devcount found %d devices", 13930 port->fp_total_devices); 13931 13932 mutex_enter(&port->fp_mutex); 13933 13934 for (count = index = 0; index < pwwn_table_size; index++) { 13935 head = &port->fp_pwwn_table[index]; 13936 pd = head->pwwn_head; 13937 while (pd != NULL) { 13938 mutex_enter(&pd->pd_mutex); 13939 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 13940 if ((pd->pd_port_id.port_id & mask) == id && 13941 pd->pd_recepient == PD_PLOGI_INITIATOR) { 13942 count++; 13943 pd->pd_type = PORT_DEVICE_OLD; 13944 pd->pd_flags = PD_ELS_MARK; 13945 } 13946 } 13947 mutex_exit(&pd->pd_mutex); 13948 pd = pd->pd_wwn_hnext; 13949 } 13950 } 13951 13952 #ifdef DEBUG 13953 dcnt = count; 13954 #endif /* DEBUG */ 13955 13956 /* 13957 * Since port->fp_orphan_count is declared an 'int' it is 13958 * theoretically possible that the count could go negative. 13959 * 13960 * This would be bad and if that happens we really do want 13961 * to know. 13962 */ 13963 13964 ASSERT(port->fp_orphan_count >= 0); 13965 13966 count += port->fp_orphan_count; 13967 13968 /* 13969 * We add the port->fp_total_devices value to the count 13970 * in the case where our port is newly attached. This is 13971 * because we haven't done any discovery and we don't have 13972 * any orphans in the port's orphan list. If we do not do 13973 * this addition to count then we won't alloc enough kmem 13974 * to do discovery with. 13975 */ 13976 13977 if (count == 0) { 13978 count += port->fp_total_devices; 13979 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 13980 "0x%x orphans found, using 0x%x", 13981 port->fp_orphan_count, count); 13982 } 13983 13984 mutex_exit(&port->fp_mutex); 13985 13986 /* 13987 * Allocate the change list 13988 */ 13989 13990 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 13991 if (list == NULL) { 13992 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13993 " Not enough memory to service RSCNs" 13994 " for %d ports, continuing...", count); 13995 13996 fctl_free_ns_cmd(ns_cmd); 13997 13998 mutex_enter(&port->fp_mutex); 13999 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14000 --port->fp_rscn_count; 14001 } 14002 mutex_exit(&port->fp_mutex); 14003 14004 return; 14005 } 14006 14007 /* 14008 * Attempt to validate or invalidate the devices that were 14009 * already in the pwwn hash table. 14010 */ 14011 14012 mutex_enter(&port->fp_mutex); 14013 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 14014 head = &port->fp_pwwn_table[index]; 14015 npd = head->pwwn_head; 14016 14017 while ((pd = npd) != NULL) { 14018 npd = pd->pd_wwn_hnext; 14019 14020 mutex_enter(&pd->pd_mutex); 14021 if ((pd->pd_port_id.port_id & mask) == id && 14022 pd->pd_flags == PD_ELS_MARK) { 14023 la_wwn_t *pwwn; 14024 14025 job->job_result = FC_SUCCESS; 14026 14027 ((ns_req_gid_pn_t *) 14028 (ns_cmd->ns_cmd_buf))->pwwn = 14029 pd->pd_port_name; 14030 14031 pwwn = &pd->pd_port_name; 14032 d_id = pd->pd_port_id.port_id; 14033 14034 mutex_exit(&pd->pd_mutex); 14035 mutex_exit(&port->fp_mutex); 14036 14037 rval = fp_ns_query(port, ns_cmd, job, 1, 14038 sleep); 14039 if (rval != FC_SUCCESS) { 14040 fc_wwn_to_str(pwwn, ww_name); 14041 14042 FP_TRACE(FP_NHEAD1(3, 0), 14043 "AREA RSCN: PD disappeared; " 14044 "d_id=%x, PWWN=%s", d_id, ww_name); 14045 14046 FP_TRACE(FP_NHEAD2(9, 0), 14047 "N_x Port with D_ID=%x," 14048 " PWWN=%s disappeared from fabric", 14049 d_id, ww_name); 14050 14051 fp_fillout_old_map(list + listindex++, 14052 pd, 1); 14053 } else { 14054 fctl_copy_portmap(list + listindex++, 14055 pd); 14056 14057 mutex_enter(&pd->pd_mutex); 14058 pd->pd_flags = PD_ELS_IN_PROGRESS; 14059 mutex_exit(&pd->pd_mutex); 14060 } 14061 14062 mutex_enter(&port->fp_mutex); 14063 } else { 14064 mutex_exit(&pd->pd_mutex); 14065 } 14066 } 14067 } 14068 14069 mutex_exit(&port->fp_mutex); 14070 14071 ASSERT(listindex == dcnt); 14072 14073 job->job_counter = listindex; 14074 job_flags = job->job_flags; 14075 job->job_flags |= JOB_TYPE_FP_ASYNC; 14076 14077 /* 14078 * Login (if we were the initiator) or validate devices in the 14079 * port map. 14080 */ 14081 14082 for (index = 0; index < listindex; index++) { 14083 pd = list[index].map_pd; 14084 14085 mutex_enter(&pd->pd_mutex); 14086 ASSERT((pd->pd_port_id.port_id & mask) == id); 14087 14088 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14089 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 14090 mutex_exit(&pd->pd_mutex); 14091 fp_jobdone(job); 14092 continue; 14093 } 14094 14095 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 14096 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 14097 d_id = pd->pd_port_id.port_id; 14098 mutex_exit(&pd->pd_mutex); 14099 14100 if ((d_id & mask) == id && send) { 14101 if (login) { 14102 FP_TRACE(FP_NHEAD1(6, 0), 14103 "RSCN and PLOGI request;" 14104 " pd=%p, job=%p d_id=%x, index=%d", pd, 14105 job, d_id, index); 14106 14107 rval = fp_port_login(port, d_id, job, 14108 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 14109 if (rval != FC_SUCCESS) { 14110 mutex_enter(&pd->pd_mutex); 14111 pd->pd_flags = PD_IDLE; 14112 mutex_exit(&pd->pd_mutex); 14113 14114 job->job_result = rval; 14115 fp_jobdone(job); 14116 } 14117 14118 FP_TRACE(FP_NHEAD2(4, 0), 14119 "PLOGI succeeded:no skip(1) for " 14120 "D_ID %x", d_id); 14121 list[index].map_flags |= 14122 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14123 } else { 14124 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 14125 " pd=%p, job=%p d_id=%x, index=%d", pd, 14126 job, d_id, index); 14127 14128 rval = fp_ns_validate_device(port, pd, job, 14129 0, sleep); 14130 if (rval != FC_SUCCESS) { 14131 fp_jobdone(job); 14132 } 14133 mutex_enter(&pd->pd_mutex); 14134 pd->pd_flags = PD_IDLE; 14135 mutex_exit(&pd->pd_mutex); 14136 } 14137 } else { 14138 FP_TRACE(FP_NHEAD1(6, 0), 14139 "RSCN and NO request sent; pd=%p," 14140 " d_id=%x, index=%d", pd, d_id, index); 14141 14142 mutex_enter(&pd->pd_mutex); 14143 pd->pd_flags = PD_IDLE; 14144 mutex_exit(&pd->pd_mutex); 14145 14146 fp_jobdone(job); 14147 } 14148 } 14149 14150 if (listindex) { 14151 fctl_jobwait(job); 14152 } 14153 job->job_flags = job_flags; 14154 14155 /* 14156 * Orphan list validation. 14157 */ 14158 mutex_enter(&port->fp_mutex); 14159 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14160 orp != NULL; orp = norp) { 14161 norp = orp->orp_next; 14162 mutex_exit(&port->fp_mutex); 14163 14164 job->job_counter = 1; 14165 job->job_result = FC_SUCCESS; 14166 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14167 14168 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14169 14170 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14171 ((ns_resp_gid_pn_t *) 14172 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14173 14174 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14175 if (rval == FC_SUCCESS) { 14176 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14177 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14178 if (pd != NULL) { 14179 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14180 14181 FP_TRACE(FP_NHEAD1(6, 0), 14182 "RSCN and ORPHAN list " 14183 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14184 14185 FP_TRACE(FP_NHEAD2(6, 0), 14186 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14187 " in fabric", d_id, ww_name); 14188 14189 mutex_enter(&port->fp_mutex); 14190 if (prev) { 14191 prev->orp_next = orp->orp_next; 14192 } else { 14193 ASSERT(orp == port->fp_orphan_list); 14194 port->fp_orphan_list = orp->orp_next; 14195 } 14196 port->fp_orphan_count--; 14197 mutex_exit(&port->fp_mutex); 14198 14199 kmem_free(orp, sizeof (*orp)); 14200 fctl_copy_portmap(list + listindex++, pd); 14201 } else { 14202 prev = orp; 14203 } 14204 } else { 14205 prev = orp; 14206 } 14207 mutex_enter(&port->fp_mutex); 14208 } 14209 mutex_exit(&port->fp_mutex); 14210 14211 /* 14212 * One more pass through the list to delist old devices from 14213 * the d_id and pwwn tables and possibly add to the orphan list. 14214 */ 14215 14216 for (index = 0; index < listindex; index++) { 14217 pd = list[index].map_pd; 14218 ASSERT(pd != NULL); 14219 14220 /* 14221 * Update PLOGI results; For NS validation 14222 * of orphan list, it is redundant 14223 * 14224 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14225 * appropriate as fctl_copy_portmap() will clear map_flags. 14226 */ 14227 if (list[index].map_flags & 14228 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14229 fctl_copy_portmap(list + index, pd); 14230 list[index].map_flags |= 14231 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14232 } else { 14233 fctl_copy_portmap(list + index, pd); 14234 } 14235 14236 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14237 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14238 pd, pd->pd_port_id.port_id, 14239 pd->pd_port_name.raw_wwn[0], 14240 pd->pd_port_name.raw_wwn[1], 14241 pd->pd_port_name.raw_wwn[2], 14242 pd->pd_port_name.raw_wwn[3], 14243 pd->pd_port_name.raw_wwn[4], 14244 pd->pd_port_name.raw_wwn[5], 14245 pd->pd_port_name.raw_wwn[6], 14246 pd->pd_port_name.raw_wwn[7]); 14247 14248 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14249 "results continued, pd=%p type=%x, flags=%x, state=%x", 14250 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14251 14252 mutex_enter(&pd->pd_mutex); 14253 if (pd->pd_type == PORT_DEVICE_OLD) { 14254 int initiator; 14255 14256 pd->pd_flags = PD_IDLE; 14257 initiator = (pd->pd_recepient == 14258 PD_PLOGI_INITIATOR) ? 1 : 0; 14259 14260 mutex_exit(&pd->pd_mutex); 14261 14262 mutex_enter(&port->fp_mutex); 14263 mutex_enter(&pd->pd_mutex); 14264 14265 pd->pd_state = PORT_DEVICE_INVALID; 14266 fctl_delist_did_table(port, pd); 14267 fctl_delist_pwwn_table(port, pd); 14268 14269 mutex_exit(&pd->pd_mutex); 14270 mutex_exit(&port->fp_mutex); 14271 14272 if (initiator) { 14273 (void) fctl_add_orphan(port, pd, sleep); 14274 } 14275 list[index].map_pd = pd; 14276 } else { 14277 ASSERT(pd->pd_flags == PD_IDLE); 14278 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14279 /* 14280 * Reset LOGO tolerance to zero 14281 */ 14282 fctl_tc_reset(&pd->pd_logo_tc); 14283 } 14284 mutex_exit(&pd->pd_mutex); 14285 } 14286 } 14287 14288 if (ns_cmd) { 14289 fctl_free_ns_cmd(ns_cmd); 14290 } 14291 if (listindex) { 14292 (void) fp_ulp_devc_cb(port, list, listindex, count, 14293 sleep, 0); 14294 } else { 14295 kmem_free(list, sizeof (*list) * count); 14296 14297 mutex_enter(&port->fp_mutex); 14298 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14299 --port->fp_rscn_count; 14300 } 14301 mutex_exit(&port->fp_mutex); 14302 } 14303 } 14304 14305 14306 /* 14307 * Work hard to make sense out of an RSCN page. 14308 */ 14309 static void 14310 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14311 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14312 int *listindex, int sleep) 14313 { 14314 int rval; 14315 char ww_name[17]; 14316 la_wwn_t *pwwn; 14317 fc_remote_port_t *pwwn_pd; 14318 fc_remote_port_t *did_pd; 14319 14320 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14321 14322 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14323 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14324 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14325 14326 if (did_pd != NULL) { 14327 mutex_enter(&did_pd->pd_mutex); 14328 if (did_pd->pd_flags != PD_IDLE) { 14329 mutex_exit(&did_pd->pd_mutex); 14330 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14331 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14332 port, page->aff_d_id, did_pd); 14333 return; 14334 } 14335 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14336 mutex_exit(&did_pd->pd_mutex); 14337 } 14338 14339 job->job_counter = 1; 14340 14341 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14342 14343 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14344 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14345 14346 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14347 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14348 14349 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14350 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14351 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14352 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14353 ns_cmd->ns_resp_hdr.ct_expln); 14354 14355 job->job_counter = 1; 14356 14357 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14358 /* 14359 * What this means is that the D_ID 14360 * disappeared from the Fabric. 14361 */ 14362 if (did_pd == NULL) { 14363 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14364 " NULL PD disappeared, rval=%x", rval); 14365 return; 14366 } 14367 14368 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14369 14370 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14371 (uint32_t)(uintptr_t)job->job_cb_arg; 14372 14373 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14374 14375 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14376 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14377 14378 FP_TRACE(FP_NHEAD2(9, 0), 14379 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14380 14381 FP_TRACE(FP_NHEAD2(9, 0), 14382 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14383 " fabric", page->aff_d_id, ww_name); 14384 14385 mutex_enter(&did_pd->pd_mutex); 14386 did_pd->pd_flags = PD_IDLE; 14387 mutex_exit(&did_pd->pd_mutex); 14388 14389 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14390 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14391 14392 return; 14393 } 14394 14395 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14396 14397 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14398 /* 14399 * There is no change. Do PLOGI again and add it to 14400 * ULP portmap baggage and return. Note: When RSCNs 14401 * arrive with per page states, the need for PLOGI 14402 * can be determined correctly. 14403 */ 14404 mutex_enter(&pwwn_pd->pd_mutex); 14405 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14406 mutex_exit(&pwwn_pd->pd_mutex); 14407 14408 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14409 (uint32_t)(uintptr_t)job->job_cb_arg; 14410 14411 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14412 14413 mutex_enter(&pwwn_pd->pd_mutex); 14414 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14415 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14416 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14417 mutex_exit(&pwwn_pd->pd_mutex); 14418 14419 rval = fp_port_login(port, page->aff_d_id, job, 14420 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14421 if (rval == FC_SUCCESS) { 14422 fp_jobwait(job); 14423 rval = job->job_result; 14424 14425 /* 14426 * Reset LOGO tolerance to zero 14427 * Also we are the PLOGI initiator now. 14428 */ 14429 mutex_enter(&pwwn_pd->pd_mutex); 14430 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14431 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14432 mutex_exit(&pwwn_pd->pd_mutex); 14433 } 14434 14435 if (rval == FC_SUCCESS) { 14436 struct fc_portmap *map = 14437 listptr + *listindex - 1; 14438 14439 FP_TRACE(FP_NHEAD2(4, 0), 14440 "PLOGI succeeded: no skip(2)" 14441 " for D_ID %x", page->aff_d_id); 14442 map->map_flags |= 14443 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14444 } else { 14445 FP_TRACE(FP_NHEAD2(9, rval), 14446 "PLOGI to D_ID=%x failed", page->aff_d_id); 14447 14448 FP_TRACE(FP_NHEAD2(9, 0), 14449 "N_x Port with D_ID=%x, PWWN=%s" 14450 " disappeared from fabric", 14451 page->aff_d_id, ww_name); 14452 14453 fp_fillout_old_map(listptr + 14454 *listindex - 1, pwwn_pd, 0); 14455 } 14456 } else { 14457 mutex_exit(&pwwn_pd->pd_mutex); 14458 } 14459 14460 mutex_enter(&did_pd->pd_mutex); 14461 did_pd->pd_flags = PD_IDLE; 14462 mutex_exit(&did_pd->pd_mutex); 14463 14464 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14465 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14466 job->job_result, pwwn_pd); 14467 14468 return; 14469 } 14470 14471 if (did_pd == NULL && pwwn_pd == NULL) { 14472 14473 fc_orphan_t *orp = NULL; 14474 fc_orphan_t *norp = NULL; 14475 fc_orphan_t *prev = NULL; 14476 14477 /* 14478 * Hunt down the orphan list before giving up. 14479 */ 14480 14481 mutex_enter(&port->fp_mutex); 14482 if (port->fp_orphan_count) { 14483 14484 for (orp = port->fp_orphan_list; orp; orp = norp) { 14485 norp = orp->orp_next; 14486 14487 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14488 prev = orp; 14489 continue; 14490 } 14491 14492 if (prev) { 14493 prev->orp_next = orp->orp_next; 14494 } else { 14495 ASSERT(orp == 14496 port->fp_orphan_list); 14497 port->fp_orphan_list = 14498 orp->orp_next; 14499 } 14500 port->fp_orphan_count--; 14501 break; 14502 } 14503 } 14504 14505 mutex_exit(&port->fp_mutex); 14506 pwwn_pd = fp_create_remote_port_by_ns(port, 14507 page->aff_d_id, sleep); 14508 14509 if (pwwn_pd != NULL) { 14510 14511 if (orp) { 14512 fc_wwn_to_str(&orp->orp_pwwn, 14513 ww_name); 14514 14515 FP_TRACE(FP_NHEAD2(9, 0), 14516 "N_x Port with D_ID=%x," 14517 " PWWN=%s reappeared in fabric", 14518 page->aff_d_id, ww_name); 14519 14520 kmem_free(orp, sizeof (*orp)); 14521 } 14522 14523 (listptr + *listindex)-> 14524 map_rscn_info.ulp_rscn_count = 14525 (uint32_t)(uintptr_t)job->job_cb_arg; 14526 14527 fctl_copy_portmap(listptr + 14528 (*listindex)++, pwwn_pd); 14529 } 14530 14531 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14532 "Case TWO", page->aff_d_id); 14533 14534 return; 14535 } 14536 14537 if (pwwn_pd != NULL && did_pd == NULL) { 14538 uint32_t old_d_id; 14539 uint32_t d_id = page->aff_d_id; 14540 14541 /* 14542 * What this means is there is a new D_ID for this 14543 * Port WWN. Take out the port device off D_ID 14544 * list and put it back with a new D_ID. Perform 14545 * PLOGI if already logged in. 14546 */ 14547 mutex_enter(&port->fp_mutex); 14548 mutex_enter(&pwwn_pd->pd_mutex); 14549 14550 old_d_id = pwwn_pd->pd_port_id.port_id; 14551 14552 fctl_delist_did_table(port, pwwn_pd); 14553 14554 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14555 (uint32_t)(uintptr_t)job->job_cb_arg; 14556 14557 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14558 &d_id, NULL); 14559 fctl_enlist_did_table(port, pwwn_pd); 14560 14561 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14562 " Case THREE, pd=%p," 14563 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14564 14565 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14566 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14567 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14568 14569 mutex_exit(&pwwn_pd->pd_mutex); 14570 mutex_exit(&port->fp_mutex); 14571 14572 FP_TRACE(FP_NHEAD2(9, 0), 14573 "N_x Port with D_ID=%x, PWWN=%s has a new" 14574 " D_ID=%x now", old_d_id, ww_name, d_id); 14575 14576 rval = fp_port_login(port, page->aff_d_id, job, 14577 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14578 if (rval == FC_SUCCESS) { 14579 fp_jobwait(job); 14580 rval = job->job_result; 14581 } 14582 14583 if (rval != FC_SUCCESS) { 14584 fp_fillout_old_map(listptr + 14585 *listindex - 1, pwwn_pd, 0); 14586 } 14587 } else { 14588 mutex_exit(&pwwn_pd->pd_mutex); 14589 mutex_exit(&port->fp_mutex); 14590 } 14591 14592 return; 14593 } 14594 14595 if (pwwn_pd == NULL && did_pd != NULL) { 14596 fc_portmap_t *ptr; 14597 uint32_t len = 1; 14598 char old_ww_name[17]; 14599 14600 mutex_enter(&did_pd->pd_mutex); 14601 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14602 mutex_exit(&did_pd->pd_mutex); 14603 14604 fc_wwn_to_str(pwwn, ww_name); 14605 14606 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14607 (uint32_t)(uintptr_t)job->job_cb_arg; 14608 14609 /* 14610 * What this means is that there is a new Port WWN for 14611 * this D_ID; Mark the Port device as old and provide 14612 * the new PWWN and D_ID combination as new. 14613 */ 14614 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14615 14616 FP_TRACE(FP_NHEAD2(9, 0), 14617 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14618 page->aff_d_id, old_ww_name, ww_name); 14619 14620 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14621 (uint32_t)(uintptr_t)job->job_cb_arg; 14622 14623 ptr = listptr + (*listindex)++; 14624 14625 job->job_counter = 1; 14626 14627 if (fp_ns_getmap(port, job, &ptr, &len, 14628 page->aff_d_id - 1) != FC_SUCCESS) { 14629 (*listindex)--; 14630 } 14631 14632 mutex_enter(&did_pd->pd_mutex); 14633 did_pd->pd_flags = PD_IDLE; 14634 mutex_exit(&did_pd->pd_mutex); 14635 14636 return; 14637 } 14638 14639 /* 14640 * A weird case of Port WWN and D_ID existence but not matching up 14641 * between them. Trust your instincts - Take the port device handle 14642 * off Port WWN list, fix it with new Port WWN and put it back, In 14643 * the mean time mark the port device corresponding to the old port 14644 * WWN as OLD. 14645 */ 14646 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14647 " did_pd=%p", pwwn_pd, did_pd); 14648 14649 mutex_enter(&port->fp_mutex); 14650 mutex_enter(&pwwn_pd->pd_mutex); 14651 14652 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14653 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14654 fctl_delist_did_table(port, pwwn_pd); 14655 fctl_delist_pwwn_table(port, pwwn_pd); 14656 14657 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14658 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14659 pwwn_pd->pd_port_id.port_id, 14660 14661 pwwn_pd->pd_port_name.raw_wwn[0], 14662 pwwn_pd->pd_port_name.raw_wwn[1], 14663 pwwn_pd->pd_port_name.raw_wwn[2], 14664 pwwn_pd->pd_port_name.raw_wwn[3], 14665 pwwn_pd->pd_port_name.raw_wwn[4], 14666 pwwn_pd->pd_port_name.raw_wwn[5], 14667 pwwn_pd->pd_port_name.raw_wwn[6], 14668 pwwn_pd->pd_port_name.raw_wwn[7]); 14669 14670 mutex_exit(&pwwn_pd->pd_mutex); 14671 mutex_exit(&port->fp_mutex); 14672 14673 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14674 (uint32_t)(uintptr_t)job->job_cb_arg; 14675 14676 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14677 14678 mutex_enter(&port->fp_mutex); 14679 mutex_enter(&did_pd->pd_mutex); 14680 14681 fctl_delist_pwwn_table(port, did_pd); 14682 14683 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14684 (uint32_t)(uintptr_t)job->job_cb_arg; 14685 14686 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14687 fctl_enlist_pwwn_table(port, did_pd); 14688 14689 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14690 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14691 did_pd->pd_port_id.port_id, did_pd->pd_state, 14692 14693 did_pd->pd_port_name.raw_wwn[0], 14694 did_pd->pd_port_name.raw_wwn[1], 14695 did_pd->pd_port_name.raw_wwn[2], 14696 did_pd->pd_port_name.raw_wwn[3], 14697 did_pd->pd_port_name.raw_wwn[4], 14698 did_pd->pd_port_name.raw_wwn[5], 14699 did_pd->pd_port_name.raw_wwn[6], 14700 did_pd->pd_port_name.raw_wwn[7]); 14701 14702 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14703 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14704 mutex_exit(&did_pd->pd_mutex); 14705 mutex_exit(&port->fp_mutex); 14706 14707 rval = fp_port_login(port, page->aff_d_id, job, 14708 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14709 if (rval == FC_SUCCESS) { 14710 fp_jobwait(job); 14711 if (job->job_result != FC_SUCCESS) { 14712 fp_fillout_old_map(listptr + 14713 *listindex - 1, did_pd, 0); 14714 } 14715 } else { 14716 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14717 } 14718 } else { 14719 mutex_exit(&did_pd->pd_mutex); 14720 mutex_exit(&port->fp_mutex); 14721 } 14722 14723 mutex_enter(&did_pd->pd_mutex); 14724 did_pd->pd_flags = PD_IDLE; 14725 mutex_exit(&did_pd->pd_mutex); 14726 } 14727 14728 14729 /* 14730 * Check with NS for the presence of this port WWN 14731 */ 14732 static int 14733 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14734 job_request_t *job, int polled, int sleep) 14735 { 14736 la_wwn_t pwwn; 14737 uint32_t flags; 14738 fctl_ns_req_t *ns_cmd; 14739 14740 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14741 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14742 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14743 flags, sleep); 14744 if (ns_cmd == NULL) { 14745 return (FC_NOMEM); 14746 } 14747 14748 mutex_enter(&pd->pd_mutex); 14749 pwwn = pd->pd_port_name; 14750 mutex_exit(&pd->pd_mutex); 14751 14752 ns_cmd->ns_cmd_code = NS_GID_PN; 14753 ns_cmd->ns_pd = pd; 14754 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14755 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14756 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14757 14758 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14759 } 14760 14761 14762 /* 14763 * Sanity check the LILP map returned by FCA 14764 */ 14765 static int 14766 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14767 { 14768 int count; 14769 14770 if (lilp_map->lilp_length == 0) { 14771 return (FC_FAILURE); 14772 } 14773 14774 for (count = 0; count < lilp_map->lilp_length; count++) { 14775 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14776 FC_SUCCESS) { 14777 return (FC_FAILURE); 14778 } 14779 } 14780 14781 return (FC_SUCCESS); 14782 } 14783 14784 14785 /* 14786 * Sanity check if the AL_PA is a valid address 14787 */ 14788 static int 14789 fp_is_valid_alpa(uchar_t al_pa) 14790 { 14791 int count; 14792 14793 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14794 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14795 return (FC_SUCCESS); 14796 } 14797 } 14798 14799 return (FC_FAILURE); 14800 } 14801 14802 14803 /* 14804 * Post unsolicited callbacks to ULPs 14805 */ 14806 static void 14807 fp_ulp_unsol_cb(void *arg) 14808 { 14809 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14810 14811 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14812 ub_spec->buf->ub_frame.type); 14813 kmem_free(ub_spec, sizeof (*ub_spec)); 14814 } 14815 14816 14817 /* 14818 * Perform message reporting in a consistent manner. Unless there is 14819 * a strong reason NOT to use this function (which is very very rare) 14820 * all message reporting should go through this. 14821 */ 14822 static void 14823 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14824 fc_packet_t *pkt, const char *fmt, ...) 14825 { 14826 caddr_t buf; 14827 va_list ap; 14828 14829 switch (level) { 14830 case CE_NOTE: 14831 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14832 return; 14833 } 14834 break; 14835 14836 case CE_WARN: 14837 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14838 return; 14839 } 14840 break; 14841 } 14842 14843 buf = kmem_zalloc(256, KM_NOSLEEP); 14844 if (buf == NULL) { 14845 return; 14846 } 14847 14848 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14849 14850 va_start(ap, fmt); 14851 (void) vsprintf(buf + strlen(buf), fmt, ap); 14852 va_end(ap); 14853 14854 if (fc_errno) { 14855 char *errmsg; 14856 14857 (void) fc_ulp_error(fc_errno, &errmsg); 14858 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 14859 } else { 14860 if (pkt) { 14861 caddr_t state, reason, action, expln; 14862 14863 (void) fc_ulp_pkt_error(pkt, &state, &reason, 14864 &action, &expln); 14865 14866 (void) sprintf(buf + strlen(buf), 14867 " state=%s, reason=%s", state, reason); 14868 14869 if (pkt->pkt_resp_resid) { 14870 (void) sprintf(buf + strlen(buf), 14871 " resp resid=%x\n", pkt->pkt_resp_resid); 14872 } 14873 } 14874 } 14875 14876 switch (dest) { 14877 case FP_CONSOLE_ONLY: 14878 cmn_err(level, "^%s", buf); 14879 break; 14880 14881 case FP_LOG_ONLY: 14882 cmn_err(level, "!%s", buf); 14883 break; 14884 14885 default: 14886 cmn_err(level, "%s", buf); 14887 break; 14888 } 14889 14890 kmem_free(buf, 256); 14891 } 14892 14893 static int 14894 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 14895 { 14896 int ret; 14897 uint32_t d_id; 14898 la_wwn_t pwwn; 14899 fc_remote_port_t *pd = NULL; 14900 fc_remote_port_t *held_pd = NULL; 14901 fctl_ns_req_t *ns_cmd; 14902 fc_portmap_t *changelist; 14903 14904 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 14905 14906 mutex_enter(&port->fp_mutex); 14907 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14908 mutex_exit(&port->fp_mutex); 14909 job->job_counter = 1; 14910 14911 job->job_result = FC_SUCCESS; 14912 14913 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14914 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14915 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 14916 14917 ASSERT(ns_cmd != NULL); 14918 14919 ns_cmd->ns_cmd_code = NS_GID_PN; 14920 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 14921 14922 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14923 14924 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 14925 if (ret != FC_SUCCESS) { 14926 fcio->fcio_errno = ret; 14927 } else { 14928 fcio->fcio_errno = job->job_result; 14929 } 14930 fctl_free_ns_cmd(ns_cmd); 14931 return (EIO); 14932 } 14933 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14934 fctl_free_ns_cmd(ns_cmd); 14935 } else { 14936 mutex_exit(&port->fp_mutex); 14937 14938 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14939 if (held_pd == NULL) { 14940 fcio->fcio_errno = FC_BADWWN; 14941 return (EIO); 14942 } 14943 pd = held_pd; 14944 14945 mutex_enter(&pd->pd_mutex); 14946 d_id = pd->pd_port_id.port_id; 14947 mutex_exit(&pd->pd_mutex); 14948 } 14949 14950 job->job_counter = 1; 14951 14952 pd = fctl_get_remote_port_by_did(port, d_id); 14953 14954 if (pd) { 14955 mutex_enter(&pd->pd_mutex); 14956 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14957 pd->pd_login_count++; 14958 mutex_exit(&pd->pd_mutex); 14959 14960 fcio->fcio_errno = FC_SUCCESS; 14961 if (held_pd) { 14962 fctl_release_remote_port(held_pd); 14963 } 14964 14965 return (0); 14966 } 14967 mutex_exit(&pd->pd_mutex); 14968 } else { 14969 mutex_enter(&port->fp_mutex); 14970 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14971 mutex_exit(&port->fp_mutex); 14972 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14973 if (pd == NULL) { 14974 fcio->fcio_errno = FC_FAILURE; 14975 if (held_pd) { 14976 fctl_release_remote_port(held_pd); 14977 } 14978 return (EIO); 14979 } 14980 } else { 14981 mutex_exit(&port->fp_mutex); 14982 } 14983 } 14984 14985 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 14986 job->job_counter = 1; 14987 14988 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 14989 KM_SLEEP, pd, NULL); 14990 14991 if (ret != FC_SUCCESS) { 14992 fcio->fcio_errno = ret; 14993 if (held_pd) { 14994 fctl_release_remote_port(held_pd); 14995 } 14996 return (EIO); 14997 } 14998 fp_jobwait(job); 14999 15000 fcio->fcio_errno = job->job_result; 15001 15002 if (held_pd) { 15003 fctl_release_remote_port(held_pd); 15004 } 15005 15006 if (job->job_result != FC_SUCCESS) { 15007 return (EIO); 15008 } 15009 15010 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15011 if (pd == NULL) { 15012 fcio->fcio_errno = FC_BADDEV; 15013 return (ENODEV); 15014 } 15015 15016 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15017 15018 fctl_copy_portmap(changelist, pd); 15019 changelist->map_type = PORT_DEVICE_USER_LOGIN; 15020 15021 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15022 15023 mutex_enter(&pd->pd_mutex); 15024 pd->pd_type = PORT_DEVICE_NOCHANGE; 15025 mutex_exit(&pd->pd_mutex); 15026 15027 fctl_release_remote_port(pd); 15028 15029 return (0); 15030 } 15031 15032 15033 static int 15034 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15035 { 15036 la_wwn_t pwwn; 15037 fp_cmd_t *cmd; 15038 fc_portmap_t *changelist; 15039 fc_remote_port_t *pd; 15040 15041 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15042 15043 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15044 if (pd == NULL) { 15045 fcio->fcio_errno = FC_BADWWN; 15046 return (ENXIO); 15047 } 15048 15049 mutex_enter(&pd->pd_mutex); 15050 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 15051 fcio->fcio_errno = FC_LOGINREQ; 15052 mutex_exit(&pd->pd_mutex); 15053 15054 fctl_release_remote_port(pd); 15055 15056 return (EINVAL); 15057 } 15058 15059 ASSERT(pd->pd_login_count >= 1); 15060 15061 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 15062 fcio->fcio_errno = FC_FAILURE; 15063 mutex_exit(&pd->pd_mutex); 15064 15065 fctl_release_remote_port(pd); 15066 15067 return (EBUSY); 15068 } 15069 15070 if (pd->pd_login_count > 1) { 15071 pd->pd_login_count--; 15072 fcio->fcio_errno = FC_SUCCESS; 15073 mutex_exit(&pd->pd_mutex); 15074 15075 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15076 15077 fctl_copy_portmap(changelist, pd); 15078 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15079 15080 fctl_release_remote_port(pd); 15081 15082 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15083 15084 return (0); 15085 } 15086 15087 pd->pd_flags = PD_ELS_IN_PROGRESS; 15088 mutex_exit(&pd->pd_mutex); 15089 15090 job->job_counter = 1; 15091 15092 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 15093 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 15094 if (cmd == NULL) { 15095 fcio->fcio_errno = FC_NOMEM; 15096 fctl_release_remote_port(pd); 15097 15098 mutex_enter(&pd->pd_mutex); 15099 pd->pd_flags = PD_IDLE; 15100 mutex_exit(&pd->pd_mutex); 15101 15102 return (ENOMEM); 15103 } 15104 15105 mutex_enter(&port->fp_mutex); 15106 mutex_enter(&pd->pd_mutex); 15107 15108 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 15109 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 15110 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 15111 cmd->cmd_retry_count = 1; 15112 cmd->cmd_ulp_pkt = NULL; 15113 15114 fp_logo_init(pd, cmd, job); 15115 15116 mutex_exit(&pd->pd_mutex); 15117 mutex_exit(&port->fp_mutex); 15118 15119 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 15120 mutex_enter(&pd->pd_mutex); 15121 pd->pd_flags = PD_IDLE; 15122 mutex_exit(&pd->pd_mutex); 15123 15124 fp_free_pkt(cmd); 15125 fctl_release_remote_port(pd); 15126 15127 return (EIO); 15128 } 15129 15130 fp_jobwait(job); 15131 15132 fcio->fcio_errno = job->job_result; 15133 if (job->job_result != FC_SUCCESS) { 15134 mutex_enter(&pd->pd_mutex); 15135 pd->pd_flags = PD_IDLE; 15136 mutex_exit(&pd->pd_mutex); 15137 15138 fctl_release_remote_port(pd); 15139 15140 return (EIO); 15141 } 15142 15143 ASSERT(pd != NULL); 15144 15145 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15146 15147 fctl_copy_portmap(changelist, pd); 15148 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15149 changelist->map_state = PORT_DEVICE_INVALID; 15150 15151 mutex_enter(&port->fp_mutex); 15152 mutex_enter(&pd->pd_mutex); 15153 15154 fctl_delist_did_table(port, pd); 15155 fctl_delist_pwwn_table(port, pd); 15156 pd->pd_flags = PD_IDLE; 15157 15158 mutex_exit(&pd->pd_mutex); 15159 mutex_exit(&port->fp_mutex); 15160 15161 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15162 15163 fctl_release_remote_port(pd); 15164 15165 return (0); 15166 } 15167 15168 15169 15170 /* 15171 * Send a syslog event for adapter port level events. 15172 */ 15173 static void 15174 fp_log_port_event(fc_local_port_t *port, char *subclass) 15175 { 15176 nvlist_t *attr_list; 15177 15178 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15179 KM_SLEEP) != DDI_SUCCESS) { 15180 goto alloc_failed; 15181 } 15182 15183 if (nvlist_add_uint32(attr_list, "instance", 15184 port->fp_instance) != DDI_SUCCESS) { 15185 goto error; 15186 } 15187 15188 if (nvlist_add_byte_array(attr_list, "port-wwn", 15189 port->fp_service_params.nport_ww_name.raw_wwn, 15190 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15191 goto error; 15192 } 15193 15194 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15195 subclass, attr_list, NULL, DDI_SLEEP); 15196 15197 nvlist_free(attr_list); 15198 return; 15199 15200 error: 15201 nvlist_free(attr_list); 15202 alloc_failed: 15203 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15204 } 15205 15206 15207 static void 15208 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15209 uint32_t port_id) 15210 { 15211 nvlist_t *attr_list; 15212 15213 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15214 KM_SLEEP) != DDI_SUCCESS) { 15215 goto alloc_failed; 15216 } 15217 15218 if (nvlist_add_uint32(attr_list, "instance", 15219 port->fp_instance) != DDI_SUCCESS) { 15220 goto error; 15221 } 15222 15223 if (nvlist_add_byte_array(attr_list, "port-wwn", 15224 port->fp_service_params.nport_ww_name.raw_wwn, 15225 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15226 goto error; 15227 } 15228 15229 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15230 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15231 goto error; 15232 } 15233 15234 if (nvlist_add_uint32(attr_list, "target-port-id", 15235 port_id) != DDI_SUCCESS) { 15236 goto error; 15237 } 15238 15239 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15240 subclass, attr_list, NULL, DDI_SLEEP); 15241 15242 nvlist_free(attr_list); 15243 return; 15244 15245 error: 15246 nvlist_free(attr_list); 15247 alloc_failed: 15248 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15249 } 15250 15251 static uint32_t 15252 fp_map_remote_port_state(uint32_t rm_state) 15253 { 15254 switch (rm_state) { 15255 case PORT_DEVICE_LOGGED_IN: 15256 return (FC_HBA_PORTSTATE_ONLINE); 15257 case PORT_DEVICE_VALID: 15258 case PORT_DEVICE_INVALID: 15259 default: 15260 return (FC_HBA_PORTSTATE_UNKNOWN); 15261 } 15262 } 15263