1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 26 * 27 */ 28 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/param.h> 32 #include <sys/errno.h> 33 #include <sys/uio.h> 34 #include <sys/buf.h> 35 #include <sys/modctl.h> 36 #include <sys/open.h> 37 #include <sys/file.h> 38 #include <sys/kmem.h> 39 #include <sys/poll.h> 40 #include <sys/conf.h> 41 #include <sys/thread.h> 42 #include <sys/var.h> 43 #include <sys/cmn_err.h> 44 #include <sys/stat.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/promif.h> 48 #include <sys/nvpair.h> 49 #include <sys/byteorder.h> 50 #include <sys/scsi/scsi.h> 51 #include <sys/fibre-channel/fc.h> 52 #include <sys/fibre-channel/impl/fc_ulpif.h> 53 #include <sys/fibre-channel/impl/fc_fcaif.h> 54 #include <sys/fibre-channel/impl/fctl_private.h> 55 #include <sys/fibre-channel/impl/fc_portif.h> 56 #include <sys/fibre-channel/impl/fp.h> 57 58 /* These are defined in fctl.c! */ 59 extern int did_table_size; 60 extern int pwwn_table_size; 61 62 static struct cb_ops fp_cb_ops = { 63 fp_open, /* open */ 64 fp_close, /* close */ 65 nodev, /* strategy */ 66 nodev, /* print */ 67 nodev, /* dump */ 68 nodev, /* read */ 69 nodev, /* write */ 70 fp_ioctl, /* ioctl */ 71 nodev, /* devmap */ 72 nodev, /* mmap */ 73 nodev, /* segmap */ 74 nochpoll, /* chpoll */ 75 ddi_prop_op, /* cb_prop_op */ 76 0, /* streamtab */ 77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 78 CB_REV, /* rev */ 79 nodev, /* aread */ 80 nodev /* awrite */ 81 }; 82 83 static struct dev_ops fp_ops = { 84 DEVO_REV, /* build revision */ 85 0, /* reference count */ 86 fp_getinfo, /* getinfo */ 87 nulldev, /* identify - Obsoleted */ 88 nulldev, /* probe */ 89 fp_attach, /* attach */ 90 fp_detach, /* detach */ 91 nodev, /* reset */ 92 &fp_cb_ops, /* cb_ops */ 93 NULL, /* bus_ops */ 94 fp_power /* power */ 95 }; 96 97 #define FP_VERSION "1.97" 98 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 99 100 char *fp_version = FP_NAME_VERSION; 101 102 static struct modldrv modldrv = { 103 &mod_driverops, /* Type of Module */ 104 FP_NAME_VERSION, /* Name/Version of fp */ 105 &fp_ops /* driver ops */ 106 }; 107 108 static struct modlinkage modlinkage = { 109 MODREV_1, /* Rev of the loadable modules system */ 110 &modldrv, /* NULL terminated list of */ 111 NULL /* Linkage structures */ 112 }; 113 114 115 116 static uint16_t ns_reg_cmds[] = { 117 NS_RPN_ID, 118 NS_RNN_ID, 119 NS_RCS_ID, 120 NS_RFT_ID, 121 NS_RPT_ID, 122 NS_RSPN_ID, 123 NS_RSNN_NN 124 }; 125 126 struct fp_xlat { 127 uchar_t xlat_state; 128 int xlat_rval; 129 } fp_xlat [] = { 130 { FC_PKT_SUCCESS, FC_SUCCESS }, 131 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 132 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 133 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 134 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 135 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 136 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_NPORT_BSY, FC_PBUSY }, 138 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 139 { FC_PKT_LS_RJT, FC_FAILURE }, 140 { FC_PKT_BA_RJT, FC_FAILURE }, 141 { FC_PKT_TIMEOUT, FC_FAILURE }, 142 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 143 { FC_PKT_FAILURE, FC_FAILURE }, 144 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 145 }; 146 147 static uchar_t fp_valid_alpas[] = { 148 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 149 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 150 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 151 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 152 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 153 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 154 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 155 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 156 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 157 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 158 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 159 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 160 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 161 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 162 }; 163 164 static struct fp_perms { 165 uint16_t fp_ioctl_cmd; 166 uchar_t fp_open_flag; 167 } fp_perm_list [] = { 168 { FCIO_GET_NUM_DEVS, FP_OPEN }, 169 { FCIO_GET_DEV_LIST, FP_OPEN }, 170 { FCIO_GET_SYM_PNAME, FP_OPEN }, 171 { FCIO_GET_SYM_NNAME, FP_OPEN }, 172 { FCIO_SET_SYM_PNAME, FP_EXCL }, 173 { FCIO_SET_SYM_NNAME, FP_EXCL }, 174 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 175 { FCIO_DEV_LOGIN, FP_EXCL }, 176 { FCIO_DEV_LOGOUT, FP_EXCL }, 177 { FCIO_GET_STATE, FP_OPEN }, 178 { FCIO_DEV_REMOVE, FP_EXCL }, 179 { FCIO_GET_FCODE_REV, FP_OPEN }, 180 { FCIO_GET_FW_REV, FP_OPEN }, 181 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 182 { FCIO_FORCE_DUMP, FP_EXCL }, 183 { FCIO_GET_DUMP, FP_OPEN }, 184 { FCIO_GET_TOPOLOGY, FP_OPEN }, 185 { FCIO_RESET_LINK, FP_EXCL }, 186 { FCIO_RESET_HARD, FP_EXCL }, 187 { FCIO_RESET_HARD_CORE, FP_EXCL }, 188 { FCIO_DIAG, FP_OPEN }, 189 { FCIO_NS, FP_EXCL }, 190 { FCIO_DOWNLOAD_FW, FP_EXCL }, 191 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 192 { FCIO_LINK_STATUS, FP_OPEN }, 193 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 194 { FCIO_GET_NODE_ID, FP_OPEN }, 195 { FCIO_SET_NODE_ID, FP_EXCL }, 196 { FCIO_SEND_NODE_ID, FP_OPEN }, 197 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 198 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 199 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 200 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 204 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 205 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 206 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 207 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 208 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 209 }; 210 211 static char *fp_pm_comps[] = { 212 "NAME=FC Port", 213 "0=Port Down", 214 "1=Port Up" 215 }; 216 217 218 #ifdef _LITTLE_ENDIAN 219 #define MAKE_BE_32(x) { \ 220 uint32_t *ptr1, i; \ 221 ptr1 = (uint32_t *)(x); \ 222 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 223 *ptr1 = BE_32(*ptr1); \ 224 ptr1++; \ 225 } \ 226 } 227 #else 228 #define MAKE_BE_32(x) 229 #endif 230 231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 232 static uint32_t fp_options = 0; 233 234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 235 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 236 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 237 unsigned int fp_offline_ticker; /* seconds */ 238 239 /* 240 * Driver global variable to anchor the list of soft state structs for 241 * all fp driver instances. Used with the Solaris DDI soft state functions. 242 */ 243 static void *fp_driver_softstate; 244 245 static clock_t fp_retry_ticks; 246 static clock_t fp_offline_ticks; 247 248 static int fp_retry_ticker; 249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 251 252 static int fp_log_size = FP_LOG_SIZE; 253 static int fp_trace = FP_TRACE_DEFAULT; 254 static fc_trace_logq_t *fp_logq = NULL; 255 256 int fp_get_adapter_paths(char *pathList, int count); 257 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 258 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 259 la_wwn_t tgt_pwwn, uint32_t port_id); 260 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 261 static void fp_init_symbolic_names(fc_local_port_t *port); 262 263 264 /* 265 * Perform global initialization 266 */ 267 int 268 _init(void) 269 { 270 int ret; 271 272 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 273 sizeof (struct fc_local_port), 8)) != 0) { 274 return (ret); 275 } 276 277 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 278 ddi_soft_state_fini(&fp_driver_softstate); 279 return (ret); 280 } 281 282 fp_logq = fc_trace_alloc_logq(fp_log_size); 283 284 if ((ret = mod_install(&modlinkage)) != 0) { 285 fc_trace_free_logq(fp_logq); 286 ddi_soft_state_fini(&fp_driver_softstate); 287 scsi_hba_fini(&modlinkage); 288 } 289 290 return (ret); 291 } 292 293 294 /* 295 * Prepare for driver unload 296 */ 297 int 298 _fini(void) 299 { 300 int ret; 301 302 if ((ret = mod_remove(&modlinkage)) == 0) { 303 fc_trace_free_logq(fp_logq); 304 ddi_soft_state_fini(&fp_driver_softstate); 305 scsi_hba_fini(&modlinkage); 306 } 307 308 return (ret); 309 } 310 311 312 /* 313 * Request mod_info() to handle all cases 314 */ 315 int 316 _info(struct modinfo *modinfo) 317 { 318 return (mod_info(&modlinkage, modinfo)); 319 } 320 321 322 /* 323 * fp_attach: 324 * 325 * The respective cmd handlers take care of performing 326 * ULP related invocations 327 */ 328 static int 329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 330 { 331 int rval; 332 333 /* 334 * We check the value of fp_offline_ticker at this 335 * point. The variable is global for the driver and 336 * not specific to an instance. 337 * 338 * If there is no user-defined value found in /etc/system 339 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 340 * The minimum setting for this offline timeout according 341 * to the FC-FS2 standard (Fibre Channel Framing and 342 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 343 * 344 * We do not recommend setting the value to less than 10 345 * seconds (RA_TOV) or more than 90 seconds. If this 346 * variable is greater than 90 seconds then drivers above 347 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 348 */ 349 350 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 351 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 352 FP_OFFLINE_TICKER); 353 354 if ((fp_offline_ticker < 10) || 355 (fp_offline_ticker > 90)) { 356 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 357 "%d second(s). This is outside the " 358 "recommended range of 10..90 seconds", 359 fp_offline_ticker); 360 } 361 362 /* 363 * Tick every second when there are commands to retry. 364 * It should tick at the least granular value of pkt_timeout 365 * (which is one second) 366 */ 367 fp_retry_ticker = 1; 368 369 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 370 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 371 372 switch (cmd) { 373 case DDI_ATTACH: 374 rval = fp_attach_handler(dip); 375 break; 376 377 case DDI_RESUME: 378 rval = fp_resume_handler(dip); 379 break; 380 381 default: 382 rval = DDI_FAILURE; 383 break; 384 } 385 return (rval); 386 } 387 388 389 /* 390 * fp_detach: 391 * 392 * If a ULP fails to handle cmd request converse of 393 * cmd is invoked for ULPs that previously succeeded 394 * cmd request. 395 */ 396 static int 397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 398 { 399 int rval = DDI_FAILURE; 400 fc_local_port_t *port; 401 fc_attach_cmd_t converse; 402 uint8_t cnt; 403 404 if ((port = ddi_get_soft_state(fp_driver_softstate, 405 ddi_get_instance(dip))) == NULL) { 406 return (DDI_FAILURE); 407 } 408 409 mutex_enter(&port->fp_mutex); 410 411 if (port->fp_ulp_attach) { 412 mutex_exit(&port->fp_mutex); 413 return (DDI_FAILURE); 414 } 415 416 switch (cmd) { 417 case DDI_DETACH: 418 if (port->fp_task != FP_TASK_IDLE) { 419 mutex_exit(&port->fp_mutex); 420 return (DDI_FAILURE); 421 } 422 423 /* Let's attempt to quit the job handler gracefully */ 424 port->fp_soft_state |= FP_DETACH_INPROGRESS; 425 426 mutex_exit(&port->fp_mutex); 427 converse = FC_CMD_ATTACH; 428 if (fctl_detach_ulps(port, FC_CMD_DETACH, 429 &modlinkage) != FC_SUCCESS) { 430 mutex_enter(&port->fp_mutex); 431 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 432 mutex_exit(&port->fp_mutex); 433 rval = DDI_FAILURE; 434 break; 435 } 436 437 mutex_enter(&port->fp_mutex); 438 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 439 cnt++) { 440 mutex_exit(&port->fp_mutex); 441 delay(drv_usectohz(1000000)); 442 mutex_enter(&port->fp_mutex); 443 } 444 445 if (port->fp_job_head) { 446 mutex_exit(&port->fp_mutex); 447 rval = DDI_FAILURE; 448 break; 449 } 450 mutex_exit(&port->fp_mutex); 451 452 rval = fp_detach_handler(port); 453 break; 454 455 case DDI_SUSPEND: 456 mutex_exit(&port->fp_mutex); 457 converse = FC_CMD_RESUME; 458 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 459 &modlinkage) != FC_SUCCESS) { 460 rval = DDI_FAILURE; 461 break; 462 } 463 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 464 (void) callb_generic_cpr(&port->fp_cpr_info, 465 CB_CODE_CPR_RESUME); 466 } 467 break; 468 469 default: 470 mutex_exit(&port->fp_mutex); 471 break; 472 } 473 474 /* 475 * Use softint to perform reattach. Mark fp_ulp_attach so we 476 * don't attempt to do this repeatedly on behalf of some persistent 477 * caller. 478 */ 479 if (rval != DDI_SUCCESS) { 480 mutex_enter(&port->fp_mutex); 481 port->fp_ulp_attach = 1; 482 483 /* 484 * If the port is in the low power mode then there is 485 * possibility that fca too could be in low power mode. 486 * Try to raise the power before calling attach ulps. 487 */ 488 489 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 490 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 491 mutex_exit(&port->fp_mutex); 492 (void) pm_raise_power(port->fp_port_dip, 493 FP_PM_COMPONENT, FP_PM_PORT_UP); 494 } else { 495 mutex_exit(&port->fp_mutex); 496 } 497 498 499 fp_attach_ulps(port, converse); 500 501 mutex_enter(&port->fp_mutex); 502 while (port->fp_ulp_attach) { 503 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 504 } 505 506 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 507 508 /* 509 * Mark state as detach failed so asynchronous ULP attach 510 * events (downstream, not the ones we're initiating with 511 * the call to fp_attach_ulps) are not honored. We're 512 * really still in pending detach. 513 */ 514 port->fp_soft_state |= FP_DETACH_FAILED; 515 516 mutex_exit(&port->fp_mutex); 517 } 518 519 return (rval); 520 } 521 522 523 /* 524 * fp_getinfo: 525 * Given the device number, return either the 526 * dev_info_t pointer or the instance number. 527 */ 528 529 /* ARGSUSED */ 530 static int 531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 532 { 533 int rval; 534 minor_t instance; 535 fc_local_port_t *port; 536 537 rval = DDI_SUCCESS; 538 instance = getminor((dev_t)arg); 539 540 switch (cmd) { 541 case DDI_INFO_DEVT2DEVINFO: 542 if ((port = ddi_get_soft_state(fp_driver_softstate, 543 instance)) == NULL) { 544 rval = DDI_FAILURE; 545 break; 546 } 547 *result = (void *)port->fp_port_dip; 548 break; 549 550 case DDI_INFO_DEVT2INSTANCE: 551 *result = (void *)(uintptr_t)instance; 552 break; 553 554 default: 555 rval = DDI_FAILURE; 556 break; 557 } 558 559 return (rval); 560 } 561 562 563 /* 564 * Entry point for power up and power down request from kernel 565 */ 566 static int 567 fp_power(dev_info_t *dip, int comp, int level) 568 { 569 int rval = DDI_FAILURE; 570 fc_local_port_t *port; 571 572 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 573 if (port == NULL || comp != FP_PM_COMPONENT) { 574 return (rval); 575 } 576 577 switch (level) { 578 case FP_PM_PORT_UP: 579 rval = DDI_SUCCESS; 580 581 /* 582 * If the port is DDI_SUSPENDed, let the DDI_RESUME 583 * code complete the rediscovery. 584 */ 585 mutex_enter(&port->fp_mutex); 586 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 587 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 588 port->fp_pm_level = FP_PM_PORT_UP; 589 mutex_exit(&port->fp_mutex); 590 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 591 break; 592 } 593 594 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 595 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 596 597 port->fp_pm_level = FP_PM_PORT_UP; 598 rval = fp_power_up(port); 599 if (rval != DDI_SUCCESS) { 600 port->fp_pm_level = FP_PM_PORT_DOWN; 601 } 602 } else { 603 port->fp_pm_level = FP_PM_PORT_UP; 604 } 605 mutex_exit(&port->fp_mutex); 606 break; 607 608 case FP_PM_PORT_DOWN: 609 mutex_enter(&port->fp_mutex); 610 611 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 612 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 613 /* 614 * PM framework goofed up. We have don't 615 * have any PM components. Let's never go down. 616 */ 617 mutex_exit(&port->fp_mutex); 618 break; 619 620 } 621 622 if (port->fp_ulp_attach) { 623 /* We shouldn't let the power go down */ 624 mutex_exit(&port->fp_mutex); 625 break; 626 } 627 628 /* 629 * Not a whole lot to do if we are detaching 630 */ 631 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 632 port->fp_pm_level = FP_PM_PORT_DOWN; 633 mutex_exit(&port->fp_mutex); 634 rval = DDI_SUCCESS; 635 break; 636 } 637 638 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 639 port->fp_pm_level = FP_PM_PORT_DOWN; 640 641 rval = fp_power_down(port); 642 if (rval != DDI_SUCCESS) { 643 port->fp_pm_level = FP_PM_PORT_UP; 644 ASSERT(!(port->fp_soft_state & 645 FP_SOFT_POWER_DOWN)); 646 } else { 647 ASSERT(port->fp_soft_state & 648 FP_SOFT_POWER_DOWN); 649 } 650 } 651 mutex_exit(&port->fp_mutex); 652 break; 653 654 default: 655 break; 656 } 657 658 return (rval); 659 } 660 661 662 /* 663 * Open FC port devctl node 664 */ 665 static int 666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 667 { 668 int instance; 669 fc_local_port_t *port; 670 671 if (otype != OTYP_CHR) { 672 return (EINVAL); 673 } 674 675 /* 676 * This is not a toy to play with. Allow only powerful 677 * users (hopefully knowledgeable) to access the port 678 * (A hacker potentially could download a sick binary 679 * file into FCA) 680 */ 681 if (drv_priv(credp)) { 682 return (EPERM); 683 } 684 685 instance = (int)getminor(*devp); 686 687 port = ddi_get_soft_state(fp_driver_softstate, instance); 688 if (port == NULL) { 689 return (ENXIO); 690 } 691 692 mutex_enter(&port->fp_mutex); 693 if (port->fp_flag & FP_EXCL) { 694 /* 695 * It is already open for exclusive access. 696 * So shut the door on this caller. 697 */ 698 mutex_exit(&port->fp_mutex); 699 return (EBUSY); 700 } 701 702 if (flag & FEXCL) { 703 if (port->fp_flag & FP_OPEN) { 704 /* 705 * Exclusive operation not possible 706 * as it is already opened 707 */ 708 mutex_exit(&port->fp_mutex); 709 return (EBUSY); 710 } 711 port->fp_flag |= FP_EXCL; 712 } 713 port->fp_flag |= FP_OPEN; 714 mutex_exit(&port->fp_mutex); 715 716 return (0); 717 } 718 719 720 /* 721 * The driver close entry point is called on the last close() 722 * of a device. So it is perfectly alright to just clobber the 723 * open flag and reset it to idle (instead of having to reset 724 * each flag bits). For any confusion, check out close(9E). 725 */ 726 727 /* ARGSUSED */ 728 static int 729 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 730 { 731 int instance; 732 fc_local_port_t *port; 733 734 if (otype != OTYP_CHR) { 735 return (EINVAL); 736 } 737 738 instance = (int)getminor(dev); 739 740 port = ddi_get_soft_state(fp_driver_softstate, instance); 741 if (port == NULL) { 742 return (ENXIO); 743 } 744 745 mutex_enter(&port->fp_mutex); 746 if ((port->fp_flag & FP_OPEN) == 0) { 747 mutex_exit(&port->fp_mutex); 748 return (ENODEV); 749 } 750 port->fp_flag = FP_IDLE; 751 mutex_exit(&port->fp_mutex); 752 753 return (0); 754 } 755 756 /* 757 * Handle IOCTL requests 758 */ 759 760 /* ARGSUSED */ 761 static int 762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 763 { 764 int instance; 765 int ret = 0; 766 fcio_t fcio; 767 fc_local_port_t *port; 768 769 instance = (int)getminor(dev); 770 771 port = ddi_get_soft_state(fp_driver_softstate, instance); 772 if (port == NULL) { 773 return (ENXIO); 774 } 775 776 mutex_enter(&port->fp_mutex); 777 if ((port->fp_flag & FP_OPEN) == 0) { 778 mutex_exit(&port->fp_mutex); 779 return (ENXIO); 780 } 781 782 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 783 mutex_exit(&port->fp_mutex); 784 return (ENXIO); 785 } 786 787 mutex_exit(&port->fp_mutex); 788 789 /* this will raise power if necessary */ 790 ret = fctl_busy_port(port); 791 if (ret != 0) { 792 return (ret); 793 } 794 795 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 796 797 798 switch (cmd) { 799 case FCIO_CMD: { 800 #ifdef _MULTI_DATAMODEL 801 switch (ddi_model_convert_from(mode & FMODELS)) { 802 case DDI_MODEL_ILP32: { 803 struct fcio32 fcio32; 804 805 if (ddi_copyin((void *)data, (void *)&fcio32, 806 sizeof (struct fcio32), mode)) { 807 ret = EFAULT; 808 break; 809 } 810 fcio.fcio_xfer = fcio32.fcio_xfer; 811 fcio.fcio_cmd = fcio32.fcio_cmd; 812 fcio.fcio_flags = fcio32.fcio_flags; 813 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 814 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 815 fcio.fcio_ibuf = 816 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 817 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 818 fcio.fcio_obuf = 819 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 820 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 821 fcio.fcio_abuf = 822 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 823 fcio.fcio_errno = fcio32.fcio_errno; 824 break; 825 } 826 827 case DDI_MODEL_NONE: 828 if (ddi_copyin((void *)data, (void *)&fcio, 829 sizeof (fcio_t), mode)) { 830 ret = EFAULT; 831 } 832 break; 833 } 834 #else /* _MULTI_DATAMODEL */ 835 if (ddi_copyin((void *)data, (void *)&fcio, 836 sizeof (fcio_t), mode)) { 837 ret = EFAULT; 838 break; 839 } 840 #endif /* _MULTI_DATAMODEL */ 841 if (!ret) { 842 ret = fp_fciocmd(port, data, mode, &fcio); 843 } 844 break; 845 } 846 847 default: 848 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 849 mode, credp, rval); 850 } 851 852 fctl_idle_port(port); 853 854 return (ret); 855 } 856 857 858 /* 859 * Init Symbolic Port Name and Node Name 860 * LV will try to get symbolic names from FCA driver 861 * and register these to name server, 862 * if LV fails to get these, 863 * LV will register its default symbolic names to name server. 864 * The Default symbolic node name format is : 865 * <hostname>:<hba driver name>(instance) 866 * The Default symbolic port name format is : 867 * <fp path name> 868 */ 869 static void 870 fp_init_symbolic_names(fc_local_port_t *port) 871 { 872 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 873 char *sym_name; 874 char fcaname[50] = {0}; 875 int hostnlen, fcanlen; 876 877 if (port->fp_sym_node_namelen == 0) { 878 hostnlen = strlen(utsname.nodename); 879 (void) snprintf(fcaname, sizeof (fcaname), 880 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 881 fcanlen = strlen(fcaname); 882 883 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 884 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 885 port->fp_sym_node_namelen = strlen(sym_name); 886 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 887 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 888 } 889 (void) strncpy(port->fp_sym_node_name, sym_name, 890 port->fp_sym_node_namelen); 891 kmem_free(sym_name, hostnlen + fcanlen + 2); 892 } 893 894 if (port->fp_sym_port_namelen == 0) { 895 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 896 897 (void) ddi_pathname(port->fp_port_dip, pathname); 898 port->fp_sym_port_namelen = strlen(pathname); 899 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 900 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 901 } 902 (void) strncpy(port->fp_sym_port_name, pathname, 903 port->fp_sym_port_namelen); 904 kmem_free(pathname, MAXPATHLEN); 905 } 906 } 907 908 909 /* 910 * Perform port attach 911 */ 912 static int 913 fp_attach_handler(dev_info_t *dip) 914 { 915 int rval; 916 int instance; 917 int port_num; 918 int port_len; 919 char name[30]; 920 char i_pwwn[17]; 921 fp_cmd_t *pkt; 922 uint32_t ub_count; 923 fc_local_port_t *port; 924 job_request_t *job; 925 fc_local_port_t *phyport = NULL; 926 int portpro1; 927 char pwwn[17], nwwn[17]; 928 929 instance = ddi_get_instance(dip); 930 931 port_len = sizeof (port_num); 932 933 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 934 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 935 (caddr_t)&port_num, &port_len); 936 937 if (rval != DDI_SUCCESS) { 938 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 939 instance); 940 return (DDI_FAILURE); 941 } 942 943 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 944 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 945 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 946 instance); 947 return (DDI_FAILURE); 948 } 949 950 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 951 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 952 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 953 " point minor node", instance); 954 ddi_remove_minor_node(dip, NULL); 955 return (DDI_FAILURE); 956 } 957 958 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 959 != DDI_SUCCESS) { 960 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 961 instance); 962 ddi_remove_minor_node(dip, NULL); 963 return (DDI_FAILURE); 964 } 965 port = ddi_get_soft_state(fp_driver_softstate, instance); 966 967 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 968 969 port->fp_instance = instance; 970 port->fp_ulp_attach = 1; 971 port->fp_port_num = port_num; 972 port->fp_verbose = fp_verbosity; 973 port->fp_options = fp_options; 974 975 port->fp_fca_dip = ddi_get_parent(dip); 976 port->fp_port_dip = dip; 977 port->fp_fca_tran = (fc_fca_tran_t *) 978 ddi_get_driver_private(port->fp_fca_dip); 979 980 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 981 982 /* 983 * Init the starting value of fp_rscn_count. Note that if 984 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 985 * actual # of RSCNs will be (fp_rscn_count - 1) 986 */ 987 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 988 989 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 990 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 991 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 992 993 (void) sprintf(name, "fp%d_cache", instance); 994 995 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 996 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 997 "phyport-instance", -1)) != -1) { 998 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 999 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 1000 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 1001 port->fp_npiv_type = FC_NPIV_PORT; 1002 } 1003 1004 /* 1005 * Allocate the pool of fc_packet_t structs to be used with 1006 * this fp instance. 1007 */ 1008 port->fp_pkt_cache = kmem_cache_create(name, 1009 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1010 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1011 NULL, 0); 1012 port->fp_out_fpcmds = 0; 1013 if (port->fp_pkt_cache == NULL) { 1014 goto cache_alloc_failed; 1015 } 1016 1017 1018 /* 1019 * Allocate the d_id and pwwn hash tables for all remote ports 1020 * connected to this local port. 1021 */ 1022 port->fp_did_table = kmem_zalloc(did_table_size * 1023 sizeof (struct d_id_hash), KM_SLEEP); 1024 1025 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1026 sizeof (struct pwwn_hash), KM_SLEEP); 1027 1028 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1029 MINCLSYSPRI, 1, 16, 0); 1030 1031 /* Indicate that don't have the pm components yet */ 1032 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1033 1034 /* 1035 * Bind the callbacks with the FCA driver. This will open the gate 1036 * for asynchronous callbacks, so after this call the fp_mutex 1037 * must be held when updating the fc_local_port_t struct. 1038 * 1039 * This is done _before_ setting up the job thread so we can avoid 1040 * cleaning up after the thread_create() in the error path. This 1041 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1042 */ 1043 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1044 goto bind_callbacks_failed; 1045 } 1046 1047 if (phyport) { 1048 mutex_enter(&phyport->fp_mutex); 1049 if (phyport->fp_port_next) { 1050 phyport->fp_port_next->fp_port_prev = port; 1051 port->fp_port_next = phyport->fp_port_next; 1052 phyport->fp_port_next = port; 1053 port->fp_port_prev = phyport; 1054 } else { 1055 phyport->fp_port_next = port; 1056 phyport->fp_port_prev = port; 1057 port->fp_port_next = phyport; 1058 port->fp_port_prev = phyport; 1059 } 1060 mutex_exit(&phyport->fp_mutex); 1061 } 1062 1063 /* 1064 * Init Symbolic Names 1065 */ 1066 fp_init_symbolic_names(port); 1067 1068 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1069 KM_SLEEP, NULL); 1070 1071 if (pkt == NULL) { 1072 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1073 instance); 1074 goto alloc_els_packet_failed; 1075 } 1076 1077 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1078 v.v_maxsyspri - 2); 1079 1080 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1081 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1082 i_pwwn) != DDI_PROP_SUCCESS) { 1083 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1084 "fp(%d): Updating 'initiator-port' property" 1085 " on fp dev_info node failed", instance); 1086 } 1087 1088 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1089 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1090 i_pwwn) != DDI_PROP_SUCCESS) { 1091 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1092 "fp(%d): Updating 'initiator-node' property" 1093 " on fp dev_info node failed", instance); 1094 } 1095 1096 mutex_enter(&port->fp_mutex); 1097 port->fp_els_resp_pkt = pkt; 1098 mutex_exit(&port->fp_mutex); 1099 1100 /* 1101 * Determine the count of unsolicited buffers this FCA can support 1102 */ 1103 fp_retrieve_caps(port); 1104 1105 /* 1106 * Allocate unsolicited buffer tokens 1107 */ 1108 if (port->fp_ub_count) { 1109 ub_count = port->fp_ub_count; 1110 port->fp_ub_tokens = kmem_zalloc(ub_count * 1111 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1112 /* 1113 * Do not fail the attach if unsolicited buffer allocation 1114 * fails; Just try to get along with whatever the FCA can do. 1115 */ 1116 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1117 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1118 FC_SUCCESS || ub_count != port->fp_ub_count) { 1119 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1120 " Unsolicited buffers. proceeding with attach...", 1121 instance); 1122 kmem_free(port->fp_ub_tokens, 1123 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1124 port->fp_ub_tokens = NULL; 1125 } 1126 } 1127 1128 fp_load_ulp_modules(dip, port); 1129 1130 /* 1131 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1132 */ 1133 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1134 "pm-hardware-state", "needs-suspend-resume", 1135 strlen("needs-suspend-resume") + 1); 1136 1137 /* 1138 * fctl maintains a list of all port handles, so 1139 * help fctl add this one to its list now. 1140 */ 1141 mutex_enter(&port->fp_mutex); 1142 fctl_add_port(port); 1143 1144 /* 1145 * If a state change is already in progress, set the bind state t 1146 * OFFLINE as well, so further state change callbacks into ULPs 1147 * will pass the appropriate states 1148 */ 1149 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1150 port->fp_statec_busy) { 1151 port->fp_bind_state = FC_STATE_OFFLINE; 1152 mutex_exit(&port->fp_mutex); 1153 1154 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1155 } else { 1156 /* 1157 * Without dropping the mutex, ensure that the port 1158 * startup happens ahead of state change callback 1159 * processing 1160 */ 1161 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1162 1163 port->fp_last_task = port->fp_task; 1164 port->fp_task = FP_TASK_PORT_STARTUP; 1165 1166 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1167 fp_startup_done, (opaque_t)port, KM_SLEEP); 1168 1169 port->fp_job_head = port->fp_job_tail = job; 1170 1171 cv_signal(&port->fp_cv); 1172 1173 mutex_exit(&port->fp_mutex); 1174 } 1175 1176 mutex_enter(&port->fp_mutex); 1177 while (port->fp_ulp_attach) { 1178 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1179 } 1180 mutex_exit(&port->fp_mutex); 1181 1182 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1183 "pm-components", fp_pm_comps, 1184 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1185 DDI_PROP_SUCCESS) { 1186 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1187 " components property, PM disabled on this port."); 1188 mutex_enter(&port->fp_mutex); 1189 port->fp_pm_level = FP_PM_PORT_UP; 1190 mutex_exit(&port->fp_mutex); 1191 } else { 1192 if (pm_raise_power(dip, FP_PM_COMPONENT, 1193 FP_PM_PORT_UP) != DDI_SUCCESS) { 1194 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1195 " power level"); 1196 mutex_enter(&port->fp_mutex); 1197 port->fp_pm_level = FP_PM_PORT_UP; 1198 mutex_exit(&port->fp_mutex); 1199 } 1200 1201 /* 1202 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1203 * the call to pm_raise_power. The PM framework can't 1204 * handle multiple threads calling into it during attach. 1205 */ 1206 1207 mutex_enter(&port->fp_mutex); 1208 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1209 mutex_exit(&port->fp_mutex); 1210 } 1211 1212 ddi_report_dev(dip); 1213 1214 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1215 1216 return (DDI_SUCCESS); 1217 1218 /* 1219 * Unwind any/all preceeding allocations in the event of an error. 1220 */ 1221 1222 alloc_els_packet_failed: 1223 1224 if (port->fp_fca_handle != NULL) { 1225 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1226 port->fp_fca_handle = NULL; 1227 } 1228 1229 if (port->fp_ub_tokens != NULL) { 1230 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1231 port->fp_ub_tokens); 1232 kmem_free(port->fp_ub_tokens, 1233 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1234 port->fp_ub_tokens = NULL; 1235 } 1236 1237 if (port->fp_els_resp_pkt != NULL) { 1238 fp_free_pkt(port->fp_els_resp_pkt); 1239 port->fp_els_resp_pkt = NULL; 1240 } 1241 1242 bind_callbacks_failed: 1243 1244 if (port->fp_taskq != NULL) { 1245 taskq_destroy(port->fp_taskq); 1246 } 1247 1248 if (port->fp_pwwn_table != NULL) { 1249 kmem_free(port->fp_pwwn_table, 1250 pwwn_table_size * sizeof (struct pwwn_hash)); 1251 port->fp_pwwn_table = NULL; 1252 } 1253 1254 if (port->fp_did_table != NULL) { 1255 kmem_free(port->fp_did_table, 1256 did_table_size * sizeof (struct d_id_hash)); 1257 port->fp_did_table = NULL; 1258 } 1259 1260 if (port->fp_pkt_cache != NULL) { 1261 kmem_cache_destroy(port->fp_pkt_cache); 1262 port->fp_pkt_cache = NULL; 1263 } 1264 1265 cache_alloc_failed: 1266 1267 cv_destroy(&port->fp_attach_cv); 1268 cv_destroy(&port->fp_cv); 1269 mutex_destroy(&port->fp_mutex); 1270 ddi_remove_minor_node(port->fp_port_dip, NULL); 1271 ddi_soft_state_free(fp_driver_softstate, instance); 1272 ddi_prop_remove_all(dip); 1273 1274 return (DDI_FAILURE); 1275 } 1276 1277 1278 /* 1279 * Handle DDI_RESUME request 1280 */ 1281 static int 1282 fp_resume_handler(dev_info_t *dip) 1283 { 1284 int rval; 1285 fc_local_port_t *port; 1286 1287 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1288 1289 ASSERT(port != NULL); 1290 1291 #ifdef DEBUG 1292 mutex_enter(&port->fp_mutex); 1293 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1294 mutex_exit(&port->fp_mutex); 1295 #endif 1296 1297 /* 1298 * If the port was power suspended, raise the power level 1299 */ 1300 mutex_enter(&port->fp_mutex); 1301 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1302 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1303 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1304 1305 mutex_exit(&port->fp_mutex); 1306 if (pm_raise_power(dip, FP_PM_COMPONENT, 1307 FP_PM_PORT_UP) != DDI_SUCCESS) { 1308 FP_TRACE(FP_NHEAD2(9, 0), 1309 "Failed to raise the power level"); 1310 return (DDI_FAILURE); 1311 } 1312 mutex_enter(&port->fp_mutex); 1313 } 1314 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1315 mutex_exit(&port->fp_mutex); 1316 1317 /* 1318 * All the discovery is initiated and handled by per-port thread. 1319 * Further all the discovery is done in handled in callback mode 1320 * (not polled mode); In a specific case such as this, the discovery 1321 * is required to happen in polled mode. The easiest way out is 1322 * to bail out port thread and get started. Come back and fix this 1323 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1324 * will do on-demand discovery during pre-power-up busctl handling 1325 * which will only be possible when SCSA provides a new HBA vector 1326 * for sending down the PM busctl requests. 1327 */ 1328 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1329 1330 rval = fp_resume_all(port, FC_CMD_RESUME); 1331 if (rval != DDI_SUCCESS) { 1332 mutex_enter(&port->fp_mutex); 1333 port->fp_soft_state |= FP_SOFT_SUSPEND; 1334 mutex_exit(&port->fp_mutex); 1335 (void) callb_generic_cpr(&port->fp_cpr_info, 1336 CB_CODE_CPR_CHKPT); 1337 } 1338 1339 return (rval); 1340 } 1341 1342 /* 1343 * Perform FC Port power on initialization 1344 */ 1345 static int 1346 fp_power_up(fc_local_port_t *port) 1347 { 1348 int rval; 1349 1350 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1351 1352 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1353 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1354 1355 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1356 1357 mutex_exit(&port->fp_mutex); 1358 1359 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1360 if (rval != DDI_SUCCESS) { 1361 mutex_enter(&port->fp_mutex); 1362 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1363 } else { 1364 mutex_enter(&port->fp_mutex); 1365 } 1366 1367 return (rval); 1368 } 1369 1370 1371 /* 1372 * It is important to note that the power may possibly be removed between 1373 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1374 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1375 * (hardware state). In this case, the port driver may need to rediscover the 1376 * topology, perform LOGINs, register with the name server again and perform 1377 * any such port initialization procedures. To perform LOGINs, the driver could 1378 * use the port device handle to see if a LOGIN needs to be performed and use 1379 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1380 * or removed) which will be reflected in the map the ULPs will see. 1381 */ 1382 static int 1383 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1384 { 1385 1386 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1387 1388 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1389 return (DDI_FAILURE); 1390 } 1391 1392 mutex_enter(&port->fp_mutex); 1393 1394 /* 1395 * If there are commands queued for delayed retry, instead of 1396 * working the hard way to figure out which ones are good for 1397 * restart and which ones not (ELSs are definitely not good 1398 * as the port will have to go through a new spin of rediscovery 1399 * now), so just flush them out. 1400 */ 1401 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1402 fp_cmd_t *cmd; 1403 1404 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1405 1406 mutex_exit(&port->fp_mutex); 1407 while ((cmd = fp_deque_cmd(port)) != NULL) { 1408 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1409 fp_iodone(cmd); 1410 } 1411 mutex_enter(&port->fp_mutex); 1412 } 1413 1414 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1415 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1416 port->fp_dev_count) { 1417 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1418 port->fp_offline_tid = timeout(fp_offline_timeout, 1419 (caddr_t)port, fp_offline_ticks); 1420 } 1421 if (port->fp_job_head) { 1422 cv_signal(&port->fp_cv); 1423 } 1424 mutex_exit(&port->fp_mutex); 1425 fctl_attach_ulps(port, cmd, &modlinkage); 1426 } else { 1427 struct job_request *job; 1428 1429 /* 1430 * If an OFFLINE timer was running at the time of 1431 * suspending, there is no need to restart it as 1432 * the port is ONLINE now. 1433 */ 1434 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1435 if (port->fp_statec_busy == 0) { 1436 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1437 } 1438 port->fp_statec_busy++; 1439 mutex_exit(&port->fp_mutex); 1440 1441 job = fctl_alloc_job(JOB_PORT_ONLINE, 1442 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1443 fctl_enque_job(port, job); 1444 1445 fctl_jobwait(job); 1446 fctl_remove_oldies(port); 1447 1448 fctl_attach_ulps(port, cmd, &modlinkage); 1449 fctl_dealloc_job(job); 1450 } 1451 1452 return (DDI_SUCCESS); 1453 } 1454 1455 1456 /* 1457 * At this time, there shouldn't be any I/O requests on this port. 1458 * But the unsolicited callbacks from the underlying FCA port need 1459 * to be handled very carefully. The steps followed to handle the 1460 * DDI_DETACH are: 1461 * + Grab the port driver mutex, check if the unsolicited 1462 * callback is currently under processing. If true, fail 1463 * the DDI_DETACH request by printing a message; If false 1464 * mark the DDI_DETACH as under progress, so that any 1465 * further unsolicited callbacks get bounced. 1466 * + Perform PRLO/LOGO if necessary, cleanup all the data 1467 * structures. 1468 * + Get the job_handler thread to gracefully exit. 1469 * + Unregister callbacks with the FCA port. 1470 * + Now that some peace is found, notify all the ULPs of 1471 * DDI_DETACH request (using ulp_port_detach entry point) 1472 * + Free all mutexes, semaphores, conditional variables. 1473 * + Free the soft state, return success. 1474 * 1475 * Important considerations: 1476 * Port driver de-registers state change and unsolicited 1477 * callbacks before taking up the task of notifying ULPs 1478 * and performing PRLO and LOGOs. 1479 * 1480 * A port may go offline at the time PRLO/LOGO is being 1481 * requested. It is expected of all FCA drivers to fail 1482 * such requests either immediately with a FC_OFFLINE 1483 * return code to fc_fca_transport() or return the packet 1484 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1485 */ 1486 static int 1487 fp_detach_handler(fc_local_port_t *port) 1488 { 1489 job_request_t *job; 1490 uint32_t delay_count; 1491 fc_orphan_t *orp, *tmporp; 1492 1493 /* 1494 * In a Fabric topology with many host ports connected to 1495 * a switch, another detaching instance of fp might have 1496 * triggered a LOGO (which is an unsolicited request to 1497 * this instance). So in order to be able to successfully 1498 * detach by taking care of such cases a delay of about 1499 * 30 seconds is introduced. 1500 */ 1501 delay_count = 0; 1502 mutex_enter(&port->fp_mutex); 1503 if (port->fp_out_fpcmds != 0) { 1504 /* 1505 * At this time we can only check fp internal commands, because 1506 * sd/ssd/scsi_vhci should have finsihed all their commands, 1507 * fcp/fcip/fcsm should have finished all their commands. 1508 * 1509 * It seems that all fp internal commands are asynchronous now. 1510 */ 1511 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1512 mutex_exit(&port->fp_mutex); 1513 1514 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress" 1515 " Failing detach", port->fp_instance, port->fp_out_fpcmds); 1516 return (DDI_FAILURE); 1517 } 1518 1519 while ((port->fp_soft_state & 1520 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1521 (delay_count < 30)) { 1522 mutex_exit(&port->fp_mutex); 1523 delay_count++; 1524 delay(drv_usectohz(1000000)); 1525 mutex_enter(&port->fp_mutex); 1526 } 1527 1528 if (port->fp_soft_state & 1529 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1530 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1531 mutex_exit(&port->fp_mutex); 1532 1533 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1534 " Failing detach", port->fp_instance); 1535 return (DDI_FAILURE); 1536 } 1537 1538 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1539 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1540 mutex_exit(&port->fp_mutex); 1541 1542 /* 1543 * If we're powered down, we need to raise power prior to submitting 1544 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1545 * process the shutdown job. 1546 */ 1547 if (fctl_busy_port(port) != 0) { 1548 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1549 port->fp_instance); 1550 mutex_enter(&port->fp_mutex); 1551 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1552 mutex_exit(&port->fp_mutex); 1553 return (DDI_FAILURE); 1554 } 1555 1556 /* 1557 * This will deallocate data structs and cause the "job" thread 1558 * to exit, in preparation for DDI_DETACH on the instance. 1559 * This can sleep for an arbitrary duration, since it waits for 1560 * commands over the wire, timeout(9F) callbacks, etc. 1561 * 1562 * CAUTION: There is still a race here, where the "job" thread 1563 * can still be executing code even tho the fctl_jobwait() call 1564 * below has returned to us. In theory the fp driver could even be 1565 * modunloaded even tho the job thread isn't done executing. 1566 * without creating the race condition. 1567 */ 1568 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1569 (opaque_t)port, KM_SLEEP); 1570 fctl_enque_job(port, job); 1571 fctl_jobwait(job); 1572 fctl_dealloc_job(job); 1573 1574 1575 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1576 FP_PM_PORT_DOWN); 1577 1578 if (port->fp_taskq) { 1579 taskq_destroy(port->fp_taskq); 1580 } 1581 1582 ddi_prop_remove_all(port->fp_port_dip); 1583 1584 ddi_remove_minor_node(port->fp_port_dip, NULL); 1585 1586 fctl_remove_port(port); 1587 1588 fp_free_pkt(port->fp_els_resp_pkt); 1589 1590 if (port->fp_ub_tokens) { 1591 if (fc_ulp_ubfree(port, port->fp_ub_count, 1592 port->fp_ub_tokens) != FC_SUCCESS) { 1593 cmn_err(CE_WARN, "fp(%d): couldn't free " 1594 " unsolicited buffers", port->fp_instance); 1595 } 1596 kmem_free(port->fp_ub_tokens, 1597 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1598 port->fp_ub_tokens = NULL; 1599 } 1600 1601 if (port->fp_pkt_cache != NULL) { 1602 kmem_cache_destroy(port->fp_pkt_cache); 1603 } 1604 1605 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1606 1607 mutex_enter(&port->fp_mutex); 1608 if (port->fp_did_table) { 1609 kmem_free(port->fp_did_table, did_table_size * 1610 sizeof (struct d_id_hash)); 1611 } 1612 1613 if (port->fp_pwwn_table) { 1614 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1615 sizeof (struct pwwn_hash)); 1616 } 1617 orp = port->fp_orphan_list; 1618 while (orp) { 1619 tmporp = orp; 1620 orp = orp->orp_next; 1621 kmem_free(tmporp, sizeof (*orp)); 1622 } 1623 1624 mutex_exit(&port->fp_mutex); 1625 1626 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1627 1628 mutex_destroy(&port->fp_mutex); 1629 cv_destroy(&port->fp_attach_cv); 1630 cv_destroy(&port->fp_cv); 1631 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1632 1633 return (DDI_SUCCESS); 1634 } 1635 1636 1637 /* 1638 * Steps to perform DDI_SUSPEND operation on a FC port 1639 * 1640 * - If already suspended return DDI_FAILURE 1641 * - If already power-suspended return DDI_SUCCESS 1642 * - If an unsolicited callback or state change handling is in 1643 * in progress, throw a warning message, return DDI_FAILURE 1644 * - Cancel timeouts 1645 * - SUSPEND the job_handler thread (means do nothing as it is 1646 * taken care of by the CPR frame work) 1647 */ 1648 static int 1649 fp_suspend_handler(fc_local_port_t *port) 1650 { 1651 uint32_t delay_count; 1652 1653 mutex_enter(&port->fp_mutex); 1654 1655 /* 1656 * The following should never happen, but 1657 * let the driver be more defensive here 1658 */ 1659 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1660 mutex_exit(&port->fp_mutex); 1661 return (DDI_FAILURE); 1662 } 1663 1664 /* 1665 * If the port is already power suspended, there 1666 * is nothing else to do, So return DDI_SUCCESS, 1667 * but mark the SUSPEND bit in the soft state 1668 * before leaving. 1669 */ 1670 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1671 port->fp_soft_state |= FP_SOFT_SUSPEND; 1672 mutex_exit(&port->fp_mutex); 1673 return (DDI_SUCCESS); 1674 } 1675 1676 /* 1677 * Check if an unsolicited callback or state change handling is 1678 * in progress. If true, fail the suspend operation; also throw 1679 * a warning message notifying the failure. Note that Sun PCI 1680 * hotplug spec recommends messages in cases of failure (but 1681 * not flooding the console) 1682 * 1683 * Busy waiting for a short interval (500 millisecond ?) to see 1684 * if the callback processing completes may be another idea. Since 1685 * most of the callback processing involves a lot of work, it 1686 * is safe to just fail the SUSPEND operation. It is definitely 1687 * not bad to fail the SUSPEND operation if the driver is busy. 1688 */ 1689 delay_count = 0; 1690 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1691 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1692 mutex_exit(&port->fp_mutex); 1693 delay_count++; 1694 delay(drv_usectohz(1000000)); 1695 mutex_enter(&port->fp_mutex); 1696 } 1697 1698 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1699 FP_SOFT_IN_UNSOL_CB)) { 1700 mutex_exit(&port->fp_mutex); 1701 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1702 " Failing suspend", port->fp_instance); 1703 return (DDI_FAILURE); 1704 } 1705 1706 /* 1707 * Check of FC port thread is busy 1708 */ 1709 if (port->fp_job_head) { 1710 mutex_exit(&port->fp_mutex); 1711 FP_TRACE(FP_NHEAD2(9, 0), 1712 "FC port thread is busy: Failing suspend"); 1713 return (DDI_FAILURE); 1714 } 1715 port->fp_soft_state |= FP_SOFT_SUSPEND; 1716 1717 fp_suspend_all(port); 1718 mutex_exit(&port->fp_mutex); 1719 1720 return (DDI_SUCCESS); 1721 } 1722 1723 1724 /* 1725 * Prepare for graceful power down of a FC port 1726 */ 1727 static int 1728 fp_power_down(fc_local_port_t *port) 1729 { 1730 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1731 1732 /* 1733 * Power down request followed by a DDI_SUSPEND should 1734 * never happen; If it does return DDI_SUCCESS 1735 */ 1736 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1737 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1738 return (DDI_SUCCESS); 1739 } 1740 1741 /* 1742 * If the port is already power suspended, there 1743 * is nothing else to do, So return DDI_SUCCESS, 1744 */ 1745 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1746 return (DDI_SUCCESS); 1747 } 1748 1749 /* 1750 * Check if an unsolicited callback or state change handling 1751 * is in progress. If true, fail the PM suspend operation. 1752 * But don't print a message unless the verbosity of the 1753 * driver desires otherwise. 1754 */ 1755 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1756 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1757 FP_TRACE(FP_NHEAD2(9, 0), 1758 "Unsolicited callback in progress: Failing power down"); 1759 return (DDI_FAILURE); 1760 } 1761 1762 /* 1763 * Check of FC port thread is busy 1764 */ 1765 if (port->fp_job_head) { 1766 FP_TRACE(FP_NHEAD2(9, 0), 1767 "FC port thread is busy: Failing power down"); 1768 return (DDI_FAILURE); 1769 } 1770 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1771 1772 /* 1773 * check if the ULPs are ready for power down 1774 */ 1775 mutex_exit(&port->fp_mutex); 1776 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1777 &modlinkage) != FC_SUCCESS) { 1778 mutex_enter(&port->fp_mutex); 1779 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1780 mutex_exit(&port->fp_mutex); 1781 1782 /* 1783 * Power back up the obedient ULPs that went down 1784 */ 1785 fp_attach_ulps(port, FC_CMD_POWER_UP); 1786 1787 FP_TRACE(FP_NHEAD2(9, 0), 1788 "ULP(s) busy, detach_ulps failed. Failing power down"); 1789 mutex_enter(&port->fp_mutex); 1790 return (DDI_FAILURE); 1791 } 1792 mutex_enter(&port->fp_mutex); 1793 1794 fp_suspend_all(port); 1795 1796 return (DDI_SUCCESS); 1797 } 1798 1799 1800 /* 1801 * Suspend the entire FC port 1802 */ 1803 static void 1804 fp_suspend_all(fc_local_port_t *port) 1805 { 1806 int index; 1807 struct pwwn_hash *head; 1808 fc_remote_port_t *pd; 1809 1810 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1811 1812 if (port->fp_wait_tid != 0) { 1813 timeout_id_t tid; 1814 1815 tid = port->fp_wait_tid; 1816 port->fp_wait_tid = (timeout_id_t)NULL; 1817 mutex_exit(&port->fp_mutex); 1818 (void) untimeout(tid); 1819 mutex_enter(&port->fp_mutex); 1820 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1821 } 1822 1823 if (port->fp_offline_tid) { 1824 timeout_id_t tid; 1825 1826 tid = port->fp_offline_tid; 1827 port->fp_offline_tid = (timeout_id_t)NULL; 1828 mutex_exit(&port->fp_mutex); 1829 (void) untimeout(tid); 1830 mutex_enter(&port->fp_mutex); 1831 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1832 } 1833 mutex_exit(&port->fp_mutex); 1834 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1835 mutex_enter(&port->fp_mutex); 1836 1837 /* 1838 * Mark all devices as OLD, and reset the LOGIN state as well 1839 * (this will force the ULPs to perform a LOGIN after calling 1840 * fc_portgetmap() during RESUME/PM_RESUME) 1841 */ 1842 for (index = 0; index < pwwn_table_size; index++) { 1843 head = &port->fp_pwwn_table[index]; 1844 pd = head->pwwn_head; 1845 while (pd != NULL) { 1846 mutex_enter(&pd->pd_mutex); 1847 fp_remote_port_offline(pd); 1848 fctl_delist_did_table(port, pd); 1849 pd->pd_state = PORT_DEVICE_VALID; 1850 pd->pd_login_count = 0; 1851 mutex_exit(&pd->pd_mutex); 1852 pd = pd->pd_wwn_hnext; 1853 } 1854 } 1855 } 1856 1857 1858 /* 1859 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1860 * Performs intializations for fc_packet_t structs. 1861 * Returns 0 for success or -1 for failure. 1862 * 1863 * This function allocates DMA handles for both command and responses. 1864 * Most of the ELSs used have both command and responses so it is strongly 1865 * desired to move them to cache constructor routine. 1866 * 1867 * Context: Can sleep iff called with KM_SLEEP flag. 1868 */ 1869 static int 1870 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1871 { 1872 int (*cb) (caddr_t); 1873 fc_packet_t *pkt; 1874 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1875 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1876 1877 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1878 1879 cmd->cmd_next = NULL; 1880 cmd->cmd_flags = 0; 1881 cmd->cmd_dflags = 0; 1882 cmd->cmd_job = NULL; 1883 cmd->cmd_port = port; 1884 pkt = &cmd->cmd_pkt; 1885 1886 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1887 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1888 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1889 return (-1); 1890 } 1891 1892 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1893 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1894 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1895 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1896 return (-1); 1897 } 1898 1899 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1900 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1901 pkt->pkt_data_cookie_cnt = 0; 1902 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1903 pkt->pkt_data_cookie = NULL; 1904 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1905 1906 return (0); 1907 } 1908 1909 1910 /* 1911 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1912 * Performs un-intializations for fc_packet_t structs. 1913 */ 1914 /* ARGSUSED */ 1915 static void 1916 fp_cache_destructor(void *buf, void *cdarg) 1917 { 1918 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1919 fc_packet_t *pkt; 1920 1921 pkt = &cmd->cmd_pkt; 1922 if (pkt->pkt_cmd_dma) { 1923 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1924 } 1925 1926 if (pkt->pkt_resp_dma) { 1927 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1928 } 1929 } 1930 1931 1932 /* 1933 * Packet allocation for ELS and any other port driver commands 1934 * 1935 * Some ELSs like FLOGI and PLOGI are critical for topology and 1936 * device discovery and a system's inability to allocate memory 1937 * or DVMA resources while performing some of these critical ELSs 1938 * cause a lot of problem. While memory allocation failures are 1939 * rare, DVMA resource failures are common as the applications 1940 * are becoming more and more powerful on huge servers. So it 1941 * is desirable to have a framework support to reserve a fragment 1942 * of DVMA. So until this is fixed the correct way, the suffering 1943 * is huge whenever a LIP happens at a time DVMA resources are 1944 * drained out completely - So an attempt needs to be made to 1945 * KM_SLEEP while requesting for these resources, hoping that 1946 * the requests won't hang forever. 1947 * 1948 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1949 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1950 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1951 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1952 * fp_alloc_pkt() must be called with pd set to NULL. 1953 */ 1954 1955 static fp_cmd_t * 1956 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1957 fc_remote_port_t *pd) 1958 { 1959 int rval; 1960 ulong_t real_len; 1961 fp_cmd_t *cmd; 1962 fc_packet_t *pkt; 1963 int (*cb) (caddr_t); 1964 ddi_dma_cookie_t pkt_cookie; 1965 ddi_dma_cookie_t *cp; 1966 uint32_t cnt; 1967 1968 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1969 1970 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1971 1972 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1973 if (cmd == NULL) { 1974 return (cmd); 1975 } 1976 1977 cmd->cmd_ulp_pkt = NULL; 1978 cmd->cmd_flags = 0; 1979 pkt = &cmd->cmd_pkt; 1980 ASSERT(cmd->cmd_dflags == 0); 1981 1982 pkt->pkt_datalen = 0; 1983 pkt->pkt_data = NULL; 1984 pkt->pkt_state = 0; 1985 pkt->pkt_action = 0; 1986 pkt->pkt_reason = 0; 1987 pkt->pkt_expln = 0; 1988 1989 /* 1990 * Init pkt_pd with the given pointer; this must be done _before_ 1991 * the call to fc_ulp_init_packet(). 1992 */ 1993 pkt->pkt_pd = pd; 1994 1995 /* Now call the FCA driver to init its private, per-packet fields */ 1996 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 1997 goto alloc_pkt_failed; 1998 } 1999 2000 if (cmd_len) { 2001 ASSERT(pkt->pkt_cmd_dma != NULL); 2002 2003 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 2004 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 2005 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 2006 &pkt->pkt_cmd_acc); 2007 2008 if (rval != DDI_SUCCESS) { 2009 goto alloc_pkt_failed; 2010 } 2011 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 2012 2013 if (real_len < cmd_len) { 2014 goto alloc_pkt_failed; 2015 } 2016 2017 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2018 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2019 DDI_DMA_CONSISTENT, cb, NULL, 2020 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2021 2022 if (rval != DDI_DMA_MAPPED) { 2023 goto alloc_pkt_failed; 2024 } 2025 2026 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2027 2028 if (pkt->pkt_cmd_cookie_cnt > 2029 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2030 goto alloc_pkt_failed; 2031 } 2032 2033 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2034 2035 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2036 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2037 KM_NOSLEEP); 2038 2039 if (cp == NULL) { 2040 goto alloc_pkt_failed; 2041 } 2042 2043 *cp = pkt_cookie; 2044 cp++; 2045 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2046 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2047 *cp = pkt_cookie; 2048 } 2049 } 2050 2051 if (resp_len) { 2052 ASSERT(pkt->pkt_resp_dma != NULL); 2053 2054 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2055 port->fp_fca_tran->fca_acc_attr, 2056 DDI_DMA_CONSISTENT, cb, NULL, 2057 (caddr_t *)&pkt->pkt_resp, &real_len, 2058 &pkt->pkt_resp_acc); 2059 2060 if (rval != DDI_SUCCESS) { 2061 goto alloc_pkt_failed; 2062 } 2063 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2064 2065 if (real_len < resp_len) { 2066 goto alloc_pkt_failed; 2067 } 2068 2069 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2070 pkt->pkt_resp, real_len, DDI_DMA_READ | 2071 DDI_DMA_CONSISTENT, cb, NULL, 2072 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2073 2074 if (rval != DDI_DMA_MAPPED) { 2075 goto alloc_pkt_failed; 2076 } 2077 2078 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2079 2080 if (pkt->pkt_resp_cookie_cnt > 2081 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2082 goto alloc_pkt_failed; 2083 } 2084 2085 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2086 2087 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2088 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2089 KM_NOSLEEP); 2090 2091 if (cp == NULL) { 2092 goto alloc_pkt_failed; 2093 } 2094 2095 *cp = pkt_cookie; 2096 cp++; 2097 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2098 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2099 *cp = pkt_cookie; 2100 } 2101 } 2102 2103 pkt->pkt_cmdlen = cmd_len; 2104 pkt->pkt_rsplen = resp_len; 2105 pkt->pkt_ulp_private = cmd; 2106 2107 return (cmd); 2108 2109 alloc_pkt_failed: 2110 2111 fp_free_dma(cmd); 2112 2113 if (pkt->pkt_cmd_cookie != NULL) { 2114 kmem_free(pkt->pkt_cmd_cookie, 2115 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2116 pkt->pkt_cmd_cookie = NULL; 2117 } 2118 2119 if (pkt->pkt_resp_cookie != NULL) { 2120 kmem_free(pkt->pkt_resp_cookie, 2121 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2122 pkt->pkt_resp_cookie = NULL; 2123 } 2124 2125 kmem_cache_free(port->fp_pkt_cache, cmd); 2126 2127 return (NULL); 2128 } 2129 2130 2131 /* 2132 * Free FC packet 2133 */ 2134 static void 2135 fp_free_pkt(fp_cmd_t *cmd) 2136 { 2137 fc_local_port_t *port; 2138 fc_packet_t *pkt; 2139 2140 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2141 2142 cmd->cmd_next = NULL; 2143 cmd->cmd_job = NULL; 2144 pkt = &cmd->cmd_pkt; 2145 pkt->pkt_ulp_private = 0; 2146 pkt->pkt_tran_flags = 0; 2147 pkt->pkt_tran_type = 0; 2148 port = cmd->cmd_port; 2149 2150 if (pkt->pkt_cmd_cookie != NULL) { 2151 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2152 sizeof (ddi_dma_cookie_t)); 2153 pkt->pkt_cmd_cookie = NULL; 2154 } 2155 2156 if (pkt->pkt_resp_cookie != NULL) { 2157 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2158 sizeof (ddi_dma_cookie_t)); 2159 pkt->pkt_resp_cookie = NULL; 2160 } 2161 2162 fp_free_dma(cmd); 2163 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2164 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2165 } 2166 2167 2168 /* 2169 * Release DVMA resources 2170 */ 2171 static void 2172 fp_free_dma(fp_cmd_t *cmd) 2173 { 2174 fc_packet_t *pkt = &cmd->cmd_pkt; 2175 2176 pkt->pkt_cmdlen = 0; 2177 pkt->pkt_rsplen = 0; 2178 pkt->pkt_tran_type = 0; 2179 pkt->pkt_tran_flags = 0; 2180 2181 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2182 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2183 } 2184 2185 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2186 if (pkt->pkt_cmd_acc) { 2187 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2188 } 2189 } 2190 2191 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2192 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2193 } 2194 2195 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2196 if (pkt->pkt_resp_acc) { 2197 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2198 } 2199 } 2200 cmd->cmd_dflags = 0; 2201 } 2202 2203 2204 /* 2205 * Dedicated thread to perform various activities. One thread for 2206 * each fc_local_port_t (driver soft state) instance. 2207 * Note, this effectively works out to one thread for each local 2208 * port, but there are also some Solaris taskq threads in use on a per-local 2209 * port basis; these also need to be taken into consideration. 2210 */ 2211 static void 2212 fp_job_handler(fc_local_port_t *port) 2213 { 2214 int rval; 2215 uint32_t *d_id; 2216 fc_remote_port_t *pd; 2217 job_request_t *job; 2218 2219 #ifndef __lock_lint 2220 /* 2221 * Solaris-internal stuff for proper operation of kernel threads 2222 * with Solaris CPR. 2223 */ 2224 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2225 callb_generic_cpr, "fp_job_handler"); 2226 #endif 2227 2228 2229 /* Loop forever waiting for work to do */ 2230 for (;;) { 2231 2232 mutex_enter(&port->fp_mutex); 2233 2234 /* 2235 * Sleep if no work to do right now, or if we want 2236 * to suspend or power-down. 2237 */ 2238 while (port->fp_job_head == NULL || 2239 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2240 FP_SOFT_SUSPEND))) { 2241 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2242 cv_wait(&port->fp_cv, &port->fp_mutex); 2243 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2244 } 2245 2246 /* 2247 * OK, we've just been woken up, so retrieve the next entry 2248 * from the head of the job queue for this local port. 2249 */ 2250 job = fctl_deque_job(port); 2251 2252 /* 2253 * Handle all the fp driver's supported job codes here 2254 * in this big honkin' switch. 2255 */ 2256 switch (job->job_code) { 2257 case JOB_PORT_SHUTDOWN: 2258 /* 2259 * fp_port_shutdown() is only called from here. This 2260 * will prepare the local port instance (softstate) 2261 * for detaching. This cancels timeout callbacks, 2262 * executes LOGOs with remote ports, cleans up tables, 2263 * and deallocates data structs. 2264 */ 2265 fp_port_shutdown(port, job); 2266 2267 /* 2268 * This will exit the job thread. 2269 */ 2270 #ifndef __lock_lint 2271 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2272 #else 2273 mutex_exit(&port->fp_mutex); 2274 #endif 2275 fctl_jobdone(job); 2276 thread_exit(); 2277 2278 /* NOTREACHED */ 2279 2280 case JOB_ATTACH_ULP: { 2281 /* 2282 * This job is spawned in response to a ULP calling 2283 * fc_ulp_add(). 2284 */ 2285 2286 boolean_t do_attach_ulps = B_TRUE; 2287 2288 /* 2289 * If fp is detaching, we don't want to call 2290 * fp_startup_done as this asynchronous 2291 * notification may interfere with the re-attach. 2292 */ 2293 2294 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2295 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2296 do_attach_ulps = B_FALSE; 2297 } else { 2298 /* 2299 * We are going to force the transport 2300 * to attach to the ULPs, so set 2301 * fp_ulp_attach. This will keep any 2302 * potential detach from occurring until 2303 * we are done. 2304 */ 2305 port->fp_ulp_attach = 1; 2306 } 2307 2308 mutex_exit(&port->fp_mutex); 2309 2310 /* 2311 * NOTE: Since we just dropped the mutex, there is now 2312 * a race window where the fp_soft_state check above 2313 * could change here. This race is covered because an 2314 * additional check was added in the functions hidden 2315 * under fp_startup_done(). 2316 */ 2317 if (do_attach_ulps == B_TRUE) { 2318 /* 2319 * This goes thru a bit of a convoluted call 2320 * chain before spawning off a DDI taskq 2321 * request to perform the actual attach 2322 * operations. Blocking can occur at a number 2323 * of points. 2324 */ 2325 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2326 } 2327 job->job_result = FC_SUCCESS; 2328 fctl_jobdone(job); 2329 break; 2330 } 2331 2332 case JOB_ULP_NOTIFY: { 2333 /* 2334 * Pass state change notifications up to any/all 2335 * registered ULPs. 2336 */ 2337 uint32_t statec; 2338 2339 statec = job->job_ulp_listlen; 2340 if (statec == FC_STATE_RESET_REQUESTED) { 2341 port->fp_last_task = port->fp_task; 2342 port->fp_task = FP_TASK_OFFLINE; 2343 fp_port_offline(port, 0); 2344 port->fp_task = port->fp_last_task; 2345 port->fp_last_task = FP_TASK_IDLE; 2346 } 2347 2348 if (--port->fp_statec_busy == 0) { 2349 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2350 } 2351 2352 mutex_exit(&port->fp_mutex); 2353 2354 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2355 fctl_jobdone(job); 2356 break; 2357 } 2358 2359 case JOB_PLOGI_ONE: 2360 /* 2361 * Issue a PLOGI to a single remote port. Multiple 2362 * PLOGIs to different remote ports may occur in 2363 * parallel. 2364 * This can create the fc_remote_port_t if it does not 2365 * already exist. 2366 */ 2367 2368 mutex_exit(&port->fp_mutex); 2369 d_id = (uint32_t *)job->job_private; 2370 pd = fctl_get_remote_port_by_did(port, *d_id); 2371 2372 if (pd) { 2373 mutex_enter(&pd->pd_mutex); 2374 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2375 pd->pd_login_count++; 2376 mutex_exit(&pd->pd_mutex); 2377 job->job_result = FC_SUCCESS; 2378 fctl_jobdone(job); 2379 break; 2380 } 2381 mutex_exit(&pd->pd_mutex); 2382 } else { 2383 mutex_enter(&port->fp_mutex); 2384 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2385 mutex_exit(&port->fp_mutex); 2386 pd = fp_create_remote_port_by_ns(port, 2387 *d_id, KM_SLEEP); 2388 if (pd == NULL) { 2389 job->job_result = FC_FAILURE; 2390 fctl_jobdone(job); 2391 break; 2392 } 2393 } else { 2394 mutex_exit(&port->fp_mutex); 2395 } 2396 } 2397 2398 job->job_flags |= JOB_TYPE_FP_ASYNC; 2399 job->job_counter = 1; 2400 2401 rval = fp_port_login(port, *d_id, job, 2402 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2403 2404 if (rval != FC_SUCCESS) { 2405 job->job_result = rval; 2406 fctl_jobdone(job); 2407 } 2408 break; 2409 2410 case JOB_LOGO_ONE: { 2411 /* 2412 * Issue a PLOGO to a single remote port. Multiple 2413 * PLOGOs to different remote ports may occur in 2414 * parallel. 2415 */ 2416 fc_remote_port_t *pd; 2417 2418 #ifndef __lock_lint 2419 ASSERT(job->job_counter > 0); 2420 #endif 2421 2422 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2423 2424 mutex_enter(&pd->pd_mutex); 2425 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2426 mutex_exit(&pd->pd_mutex); 2427 job->job_result = FC_LOGINREQ; 2428 mutex_exit(&port->fp_mutex); 2429 fctl_jobdone(job); 2430 break; 2431 } 2432 if (pd->pd_login_count > 1) { 2433 pd->pd_login_count--; 2434 mutex_exit(&pd->pd_mutex); 2435 job->job_result = FC_SUCCESS; 2436 mutex_exit(&port->fp_mutex); 2437 fctl_jobdone(job); 2438 break; 2439 } 2440 mutex_exit(&pd->pd_mutex); 2441 mutex_exit(&port->fp_mutex); 2442 job->job_flags |= JOB_TYPE_FP_ASYNC; 2443 (void) fp_logout(port, pd, job); 2444 break; 2445 } 2446 2447 case JOB_FCIO_LOGIN: 2448 /* 2449 * PLOGI initiated at ioctl request. 2450 */ 2451 mutex_exit(&port->fp_mutex); 2452 job->job_result = 2453 fp_fcio_login(port, job->job_private, job); 2454 fctl_jobdone(job); 2455 break; 2456 2457 case JOB_FCIO_LOGOUT: 2458 /* 2459 * PLOGO initiated at ioctl request. 2460 */ 2461 mutex_exit(&port->fp_mutex); 2462 job->job_result = 2463 fp_fcio_logout(port, job->job_private, job); 2464 fctl_jobdone(job); 2465 break; 2466 2467 case JOB_PORT_GETMAP: 2468 case JOB_PORT_GETMAP_PLOGI_ALL: { 2469 port->fp_last_task = port->fp_task; 2470 port->fp_task = FP_TASK_GETMAP; 2471 2472 switch (port->fp_topology) { 2473 case FC_TOP_PRIVATE_LOOP: 2474 job->job_counter = 1; 2475 2476 fp_get_loopmap(port, job); 2477 mutex_exit(&port->fp_mutex); 2478 fp_jobwait(job); 2479 fctl_fillout_map(port, 2480 (fc_portmap_t **)job->job_private, 2481 (uint32_t *)job->job_arg, 1, 0, 0); 2482 fctl_jobdone(job); 2483 mutex_enter(&port->fp_mutex); 2484 break; 2485 2486 case FC_TOP_PUBLIC_LOOP: 2487 case FC_TOP_FABRIC: 2488 mutex_exit(&port->fp_mutex); 2489 job->job_counter = 1; 2490 2491 job->job_result = fp_ns_getmap(port, 2492 job, (fc_portmap_t **)job->job_private, 2493 (uint32_t *)job->job_arg, 2494 FCTL_GAN_START_ID); 2495 fctl_jobdone(job); 2496 mutex_enter(&port->fp_mutex); 2497 break; 2498 2499 case FC_TOP_PT_PT: 2500 mutex_exit(&port->fp_mutex); 2501 fctl_fillout_map(port, 2502 (fc_portmap_t **)job->job_private, 2503 (uint32_t *)job->job_arg, 1, 0, 0); 2504 fctl_jobdone(job); 2505 mutex_enter(&port->fp_mutex); 2506 break; 2507 2508 default: 2509 mutex_exit(&port->fp_mutex); 2510 fctl_jobdone(job); 2511 mutex_enter(&port->fp_mutex); 2512 break; 2513 } 2514 port->fp_task = port->fp_last_task; 2515 port->fp_last_task = FP_TASK_IDLE; 2516 mutex_exit(&port->fp_mutex); 2517 break; 2518 } 2519 2520 case JOB_PORT_OFFLINE: { 2521 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2522 2523 port->fp_last_task = port->fp_task; 2524 port->fp_task = FP_TASK_OFFLINE; 2525 2526 if (port->fp_statec_busy > 2) { 2527 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2528 fp_port_offline(port, 0); 2529 if (--port->fp_statec_busy == 0) { 2530 port->fp_soft_state &= 2531 ~FP_SOFT_IN_STATEC_CB; 2532 } 2533 } else { 2534 fp_port_offline(port, 1); 2535 } 2536 2537 port->fp_task = port->fp_last_task; 2538 port->fp_last_task = FP_TASK_IDLE; 2539 2540 mutex_exit(&port->fp_mutex); 2541 2542 fctl_jobdone(job); 2543 break; 2544 } 2545 2546 case JOB_PORT_STARTUP: { 2547 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2548 if (port->fp_statec_busy > 1) { 2549 mutex_exit(&port->fp_mutex); 2550 break; 2551 } 2552 mutex_exit(&port->fp_mutex); 2553 2554 FP_TRACE(FP_NHEAD2(9, rval), 2555 "Topology discovery failed"); 2556 break; 2557 } 2558 2559 /* 2560 * Attempt building device handles in case 2561 * of private Loop. 2562 */ 2563 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2564 job->job_counter = 1; 2565 2566 fp_get_loopmap(port, job); 2567 mutex_exit(&port->fp_mutex); 2568 fp_jobwait(job); 2569 mutex_enter(&port->fp_mutex); 2570 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2571 ASSERT(port->fp_total_devices == 0); 2572 port->fp_total_devices = 2573 port->fp_dev_count; 2574 } 2575 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2576 /* 2577 * Hack to avoid state changes going up early 2578 */ 2579 port->fp_statec_busy++; 2580 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2581 2582 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2583 fp_fabric_online(port, job); 2584 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2585 } 2586 mutex_exit(&port->fp_mutex); 2587 fctl_jobdone(job); 2588 break; 2589 } 2590 2591 case JOB_PORT_ONLINE: { 2592 char *newtop; 2593 char *oldtop; 2594 uint32_t old_top; 2595 2596 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2597 2598 /* 2599 * Bail out early if there are a lot of 2600 * state changes in the pipeline 2601 */ 2602 if (port->fp_statec_busy > 1) { 2603 --port->fp_statec_busy; 2604 mutex_exit(&port->fp_mutex); 2605 fctl_jobdone(job); 2606 break; 2607 } 2608 2609 switch (old_top = port->fp_topology) { 2610 case FC_TOP_PRIVATE_LOOP: 2611 oldtop = "Private Loop"; 2612 break; 2613 2614 case FC_TOP_PUBLIC_LOOP: 2615 oldtop = "Public Loop"; 2616 break; 2617 2618 case FC_TOP_PT_PT: 2619 oldtop = "Point to Point"; 2620 break; 2621 2622 case FC_TOP_FABRIC: 2623 oldtop = "Fabric"; 2624 break; 2625 2626 default: 2627 oldtop = NULL; 2628 break; 2629 } 2630 2631 port->fp_last_task = port->fp_task; 2632 port->fp_task = FP_TASK_ONLINE; 2633 2634 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2635 2636 port->fp_task = port->fp_last_task; 2637 port->fp_last_task = FP_TASK_IDLE; 2638 2639 if (port->fp_statec_busy > 1) { 2640 --port->fp_statec_busy; 2641 mutex_exit(&port->fp_mutex); 2642 break; 2643 } 2644 2645 port->fp_state = FC_STATE_OFFLINE; 2646 2647 FP_TRACE(FP_NHEAD2(9, rval), 2648 "Topology discovery failed"); 2649 2650 if (--port->fp_statec_busy == 0) { 2651 port->fp_soft_state &= 2652 ~FP_SOFT_IN_STATEC_CB; 2653 } 2654 2655 if (port->fp_offline_tid == NULL) { 2656 port->fp_offline_tid = 2657 timeout(fp_offline_timeout, 2658 (caddr_t)port, fp_offline_ticks); 2659 } 2660 2661 mutex_exit(&port->fp_mutex); 2662 break; 2663 } 2664 2665 switch (port->fp_topology) { 2666 case FC_TOP_PRIVATE_LOOP: 2667 newtop = "Private Loop"; 2668 break; 2669 2670 case FC_TOP_PUBLIC_LOOP: 2671 newtop = "Public Loop"; 2672 break; 2673 2674 case FC_TOP_PT_PT: 2675 newtop = "Point to Point"; 2676 break; 2677 2678 case FC_TOP_FABRIC: 2679 newtop = "Fabric"; 2680 break; 2681 2682 default: 2683 newtop = NULL; 2684 break; 2685 } 2686 2687 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2688 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2689 "Change in FC Topology old = %s new = %s", 2690 oldtop, newtop); 2691 } 2692 2693 switch (port->fp_topology) { 2694 case FC_TOP_PRIVATE_LOOP: { 2695 int orphan = (old_top == FC_TOP_FABRIC || 2696 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2697 2698 mutex_exit(&port->fp_mutex); 2699 fp_loop_online(port, job, orphan); 2700 break; 2701 } 2702 2703 case FC_TOP_PUBLIC_LOOP: 2704 /* FALLTHROUGH */ 2705 case FC_TOP_FABRIC: 2706 fp_fabric_online(port, job); 2707 mutex_exit(&port->fp_mutex); 2708 break; 2709 2710 case FC_TOP_PT_PT: 2711 fp_p2p_online(port, job); 2712 mutex_exit(&port->fp_mutex); 2713 break; 2714 2715 default: 2716 if (--port->fp_statec_busy != 0) { 2717 /* 2718 * Watch curiously at what the next 2719 * state transition can do. 2720 */ 2721 mutex_exit(&port->fp_mutex); 2722 break; 2723 } 2724 2725 FP_TRACE(FP_NHEAD2(9, 0), 2726 "Topology Unknown, Offlining the port.."); 2727 2728 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2729 port->fp_state = FC_STATE_OFFLINE; 2730 2731 if (port->fp_offline_tid == NULL) { 2732 port->fp_offline_tid = 2733 timeout(fp_offline_timeout, 2734 (caddr_t)port, fp_offline_ticks); 2735 } 2736 mutex_exit(&port->fp_mutex); 2737 break; 2738 } 2739 2740 mutex_enter(&port->fp_mutex); 2741 2742 port->fp_task = port->fp_last_task; 2743 port->fp_last_task = FP_TASK_IDLE; 2744 2745 mutex_exit(&port->fp_mutex); 2746 2747 fctl_jobdone(job); 2748 break; 2749 } 2750 2751 case JOB_PLOGI_GROUP: { 2752 mutex_exit(&port->fp_mutex); 2753 fp_plogi_group(port, job); 2754 break; 2755 } 2756 2757 case JOB_UNSOL_REQUEST: { 2758 mutex_exit(&port->fp_mutex); 2759 fp_handle_unsol_buf(port, 2760 (fc_unsol_buf_t *)job->job_private, job); 2761 fctl_dealloc_job(job); 2762 break; 2763 } 2764 2765 case JOB_NS_CMD: { 2766 fctl_ns_req_t *ns_cmd; 2767 2768 mutex_exit(&port->fp_mutex); 2769 2770 job->job_flags |= JOB_TYPE_FP_ASYNC; 2771 ns_cmd = (fctl_ns_req_t *)job->job_private; 2772 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2773 ns_cmd->ns_cmd_code > NS_DA_ID) { 2774 job->job_result = FC_BADCMD; 2775 fctl_jobdone(job); 2776 break; 2777 } 2778 2779 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2780 if (ns_cmd->ns_pd != NULL) { 2781 job->job_result = FC_BADOBJECT; 2782 fctl_jobdone(job); 2783 break; 2784 } 2785 2786 job->job_counter = 1; 2787 2788 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2789 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2790 2791 if (rval != FC_SUCCESS) { 2792 job->job_result = rval; 2793 fctl_jobdone(job); 2794 } 2795 break; 2796 } 2797 job->job_result = FC_SUCCESS; 2798 job->job_counter = 1; 2799 2800 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2801 if (rval != FC_SUCCESS) { 2802 fctl_jobdone(job); 2803 } 2804 break; 2805 } 2806 2807 case JOB_LINK_RESET: { 2808 la_wwn_t *pwwn; 2809 uint32_t topology; 2810 2811 pwwn = (la_wwn_t *)job->job_private; 2812 ASSERT(pwwn != NULL); 2813 2814 topology = port->fp_topology; 2815 mutex_exit(&port->fp_mutex); 2816 2817 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2818 topology == FC_TOP_PRIVATE_LOOP) { 2819 job->job_flags |= JOB_TYPE_FP_ASYNC; 2820 rval = port->fp_fca_tran->fca_reset( 2821 port->fp_fca_handle, FC_FCA_LINK_RESET); 2822 job->job_result = rval; 2823 fp_jobdone(job); 2824 } else { 2825 ASSERT((job->job_flags & 2826 JOB_TYPE_FP_ASYNC) == 0); 2827 2828 if (FC_IS_TOP_SWITCH(topology)) { 2829 rval = fp_remote_lip(port, pwwn, 2830 KM_SLEEP, job); 2831 } else { 2832 rval = FC_FAILURE; 2833 } 2834 if (rval != FC_SUCCESS) { 2835 job->job_result = rval; 2836 } 2837 fctl_jobdone(job); 2838 } 2839 break; 2840 } 2841 2842 default: 2843 mutex_exit(&port->fp_mutex); 2844 job->job_result = FC_BADCMD; 2845 fctl_jobdone(job); 2846 break; 2847 } 2848 } 2849 /* NOTREACHED */ 2850 } 2851 2852 2853 /* 2854 * Perform FC port bring up initialization 2855 */ 2856 static int 2857 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2858 { 2859 int rval; 2860 uint32_t state; 2861 uint32_t src_id; 2862 fc_lilpmap_t *lilp_map; 2863 2864 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2865 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2866 2867 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2868 " port=%p, job=%p", port, job); 2869 2870 port->fp_topology = FC_TOP_UNKNOWN; 2871 port->fp_port_id.port_id = 0; 2872 state = FC_PORT_STATE_MASK(port->fp_state); 2873 2874 if (state == FC_STATE_OFFLINE) { 2875 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2876 job->job_result = FC_OFFLINE; 2877 mutex_exit(&port->fp_mutex); 2878 fctl_jobdone(job); 2879 mutex_enter(&port->fp_mutex); 2880 return (FC_OFFLINE); 2881 } 2882 2883 if (state == FC_STATE_LOOP) { 2884 port->fp_port_type.port_type = FC_NS_PORT_NL; 2885 mutex_exit(&port->fp_mutex); 2886 2887 lilp_map = &port->fp_lilp_map; 2888 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2889 job->job_result = FC_FAILURE; 2890 fctl_jobdone(job); 2891 2892 FP_TRACE(FP_NHEAD1(9, rval), 2893 "LILP map Invalid or not present"); 2894 mutex_enter(&port->fp_mutex); 2895 return (FC_FAILURE); 2896 } 2897 2898 if (lilp_map->lilp_length == 0) { 2899 job->job_result = FC_NO_MAP; 2900 fctl_jobdone(job); 2901 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2902 "LILP map length zero"); 2903 mutex_enter(&port->fp_mutex); 2904 return (FC_NO_MAP); 2905 } 2906 src_id = lilp_map->lilp_myalpa & 0xFF; 2907 } else { 2908 fc_remote_port_t *pd; 2909 fc_fca_pm_t pm; 2910 fc_fca_p2p_info_t p2p_info; 2911 int pd_recepient; 2912 2913 /* 2914 * Get P2P remote port info if possible 2915 */ 2916 bzero((caddr_t)&pm, sizeof (pm)); 2917 2918 pm.pm_cmd_flags = FC_FCA_PM_READ; 2919 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2920 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2921 pm.pm_data_buf = (caddr_t)&p2p_info; 2922 2923 rval = port->fp_fca_tran->fca_port_manage( 2924 port->fp_fca_handle, &pm); 2925 2926 if (rval == FC_SUCCESS) { 2927 port->fp_port_id.port_id = p2p_info.fca_d_id; 2928 port->fp_port_type.port_type = FC_NS_PORT_N; 2929 port->fp_topology = FC_TOP_PT_PT; 2930 port->fp_total_devices = 1; 2931 pd_recepient = fctl_wwn_cmp( 2932 &port->fp_service_params.nport_ww_name, 2933 &p2p_info.pwwn) < 0 ? 2934 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2935 mutex_exit(&port->fp_mutex); 2936 pd = fctl_create_remote_port(port, 2937 &p2p_info.nwwn, 2938 &p2p_info.pwwn, 2939 p2p_info.d_id, 2940 pd_recepient, KM_NOSLEEP); 2941 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2942 " P2P port=%p pd=%p", port, pd); 2943 mutex_enter(&port->fp_mutex); 2944 return (FC_SUCCESS); 2945 } 2946 port->fp_port_type.port_type = FC_NS_PORT_N; 2947 mutex_exit(&port->fp_mutex); 2948 src_id = 0; 2949 } 2950 2951 job->job_counter = 1; 2952 job->job_result = FC_SUCCESS; 2953 2954 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2955 KM_SLEEP)) != FC_SUCCESS) { 2956 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2957 job->job_result = FC_FAILURE; 2958 fctl_jobdone(job); 2959 2960 mutex_enter(&port->fp_mutex); 2961 if (port->fp_statec_busy <= 1) { 2962 mutex_exit(&port->fp_mutex); 2963 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 2964 "Couldn't transport FLOGI"); 2965 mutex_enter(&port->fp_mutex); 2966 } 2967 return (FC_FAILURE); 2968 } 2969 2970 fp_jobwait(job); 2971 2972 mutex_enter(&port->fp_mutex); 2973 if (job->job_result == FC_SUCCESS) { 2974 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2975 mutex_exit(&port->fp_mutex); 2976 fp_ns_init(port, job, KM_SLEEP); 2977 mutex_enter(&port->fp_mutex); 2978 } 2979 } else { 2980 if (state == FC_STATE_LOOP) { 2981 port->fp_topology = FC_TOP_PRIVATE_LOOP; 2982 port->fp_port_id.port_id = 2983 port->fp_lilp_map.lilp_myalpa & 0xFF; 2984 } 2985 } 2986 2987 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 2988 port, job); 2989 2990 return (FC_SUCCESS); 2991 } 2992 2993 2994 /* 2995 * Perform ULP invocations following FC port startup 2996 */ 2997 /* ARGSUSED */ 2998 static void 2999 fp_startup_done(opaque_t arg, uchar_t result) 3000 { 3001 fc_local_port_t *port = arg; 3002 3003 fp_attach_ulps(port, FC_CMD_ATTACH); 3004 3005 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 3006 } 3007 3008 3009 /* 3010 * Perform ULP port attach 3011 */ 3012 static void 3013 fp_ulp_port_attach(void *arg) 3014 { 3015 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 3016 fc_local_port_t *port = att->att_port; 3017 3018 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3019 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3020 3021 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3022 3023 if (att->att_need_pm_idle == B_TRUE) { 3024 fctl_idle_port(port); 3025 } 3026 3027 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3028 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3029 3030 mutex_enter(&att->att_port->fp_mutex); 3031 att->att_port->fp_ulp_attach = 0; 3032 3033 port->fp_task = port->fp_last_task; 3034 port->fp_last_task = FP_TASK_IDLE; 3035 3036 cv_signal(&att->att_port->fp_attach_cv); 3037 3038 mutex_exit(&att->att_port->fp_mutex); 3039 3040 kmem_free(att, sizeof (fp_soft_attach_t)); 3041 } 3042 3043 /* 3044 * Entry point to funnel all requests down to FCAs 3045 */ 3046 static int 3047 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3048 { 3049 int rval; 3050 3051 mutex_enter(&port->fp_mutex); 3052 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3053 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3054 FC_STATE_OFFLINE))) { 3055 /* 3056 * This means there is more than one state change 3057 * at this point of time - Since they are processed 3058 * serially, any processing of the current one should 3059 * be failed, failed and move up in processing the next 3060 */ 3061 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3062 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3063 if (cmd->cmd_job) { 3064 /* 3065 * A state change that is going to be invalidated 3066 * by another one already in the port driver's queue 3067 * need not go up to all ULPs. This will minimize 3068 * needless processing and ripples in ULP modules 3069 */ 3070 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3071 } 3072 mutex_exit(&port->fp_mutex); 3073 return (FC_STATEC_BUSY); 3074 } 3075 3076 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3077 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3078 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3079 mutex_exit(&port->fp_mutex); 3080 3081 return (FC_OFFLINE); 3082 } 3083 mutex_exit(&port->fp_mutex); 3084 3085 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3086 if (rval != FC_SUCCESS) { 3087 if (rval == FC_TRAN_BUSY) { 3088 cmd->cmd_retry_interval = fp_retry_delay; 3089 rval = fp_retry_cmd(&cmd->cmd_pkt); 3090 if (rval == FC_FAILURE) { 3091 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3092 } 3093 } 3094 } else { 3095 mutex_enter(&port->fp_mutex); 3096 port->fp_out_fpcmds++; 3097 mutex_exit(&port->fp_mutex); 3098 } 3099 3100 return (rval); 3101 } 3102 3103 3104 /* 3105 * Each time a timeout kicks in, walk the wait queue, decrement the 3106 * the retry_interval, when the retry_interval becomes less than 3107 * or equal to zero, re-transport the command: If the re-transport 3108 * fails with BUSY, enqueue the command in the wait queue. 3109 * 3110 * In order to prevent looping forever because of commands enqueued 3111 * from within this function itself, save the current tail pointer 3112 * (in cur_tail) and exit the loop after serving this command. 3113 */ 3114 static void 3115 fp_resendcmd(void *port_handle) 3116 { 3117 int rval; 3118 fc_local_port_t *port; 3119 fp_cmd_t *cmd; 3120 fp_cmd_t *cur_tail; 3121 3122 port = port_handle; 3123 mutex_enter(&port->fp_mutex); 3124 cur_tail = port->fp_wait_tail; 3125 mutex_exit(&port->fp_mutex); 3126 3127 while ((cmd = fp_deque_cmd(port)) != NULL) { 3128 cmd->cmd_retry_interval -= fp_retry_ticker; 3129 /* Check if we are detaching */ 3130 if (port->fp_soft_state & 3131 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3132 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3133 cmd->cmd_pkt.pkt_reason = 0; 3134 fp_iodone(cmd); 3135 } else if (cmd->cmd_retry_interval <= 0) { 3136 rval = cmd->cmd_transport(port->fp_fca_handle, 3137 &cmd->cmd_pkt); 3138 3139 if (rval != FC_SUCCESS) { 3140 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3141 if (--cmd->cmd_retry_count) { 3142 fp_enque_cmd(port, cmd); 3143 if (cmd == cur_tail) { 3144 break; 3145 } 3146 continue; 3147 } 3148 cmd->cmd_pkt.pkt_state = 3149 FC_PKT_TRAN_BSY; 3150 } else { 3151 cmd->cmd_pkt.pkt_state = 3152 FC_PKT_TRAN_ERROR; 3153 } 3154 cmd->cmd_pkt.pkt_reason = 0; 3155 fp_iodone(cmd); 3156 } else { 3157 mutex_enter(&port->fp_mutex); 3158 port->fp_out_fpcmds++; 3159 mutex_exit(&port->fp_mutex); 3160 } 3161 } else { 3162 fp_enque_cmd(port, cmd); 3163 } 3164 3165 if (cmd == cur_tail) { 3166 break; 3167 } 3168 } 3169 3170 mutex_enter(&port->fp_mutex); 3171 if (port->fp_wait_head) { 3172 timeout_id_t tid; 3173 3174 mutex_exit(&port->fp_mutex); 3175 tid = timeout(fp_resendcmd, (caddr_t)port, 3176 fp_retry_ticks); 3177 mutex_enter(&port->fp_mutex); 3178 port->fp_wait_tid = tid; 3179 } else { 3180 port->fp_wait_tid = NULL; 3181 } 3182 mutex_exit(&port->fp_mutex); 3183 } 3184 3185 3186 /* 3187 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3188 * 3189 * Yes, as you can see below, cmd_retry_count is used here too. That means 3190 * the retries for BUSY are less if there were transport failures (transport 3191 * failure means fca_transport failure). The goal is not to exceed overall 3192 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3193 * 3194 * Return Values: 3195 * FC_SUCCESS 3196 * FC_FAILURE 3197 */ 3198 static int 3199 fp_retry_cmd(fc_packet_t *pkt) 3200 { 3201 fp_cmd_t *cmd; 3202 3203 cmd = pkt->pkt_ulp_private; 3204 3205 if (--cmd->cmd_retry_count) { 3206 fp_enque_cmd(cmd->cmd_port, cmd); 3207 return (FC_SUCCESS); 3208 } else { 3209 return (FC_FAILURE); 3210 } 3211 } 3212 3213 3214 /* 3215 * Queue up FC packet for deferred retry 3216 */ 3217 static void 3218 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3219 { 3220 timeout_id_t tid; 3221 3222 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3223 3224 #ifdef DEBUG 3225 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3226 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3227 #endif 3228 3229 mutex_enter(&port->fp_mutex); 3230 if (port->fp_wait_tail) { 3231 port->fp_wait_tail->cmd_next = cmd; 3232 port->fp_wait_tail = cmd; 3233 } else { 3234 ASSERT(port->fp_wait_head == NULL); 3235 port->fp_wait_head = port->fp_wait_tail = cmd; 3236 if (port->fp_wait_tid == NULL) { 3237 mutex_exit(&port->fp_mutex); 3238 tid = timeout(fp_resendcmd, (caddr_t)port, 3239 fp_retry_ticks); 3240 mutex_enter(&port->fp_mutex); 3241 port->fp_wait_tid = tid; 3242 } 3243 } 3244 mutex_exit(&port->fp_mutex); 3245 } 3246 3247 3248 /* 3249 * Handle all RJT codes 3250 */ 3251 static int 3252 fp_handle_reject(fc_packet_t *pkt) 3253 { 3254 int rval = FC_FAILURE; 3255 uchar_t next_class; 3256 fp_cmd_t *cmd; 3257 fc_local_port_t *port; 3258 3259 cmd = pkt->pkt_ulp_private; 3260 port = cmd->cmd_port; 3261 3262 switch (pkt->pkt_state) { 3263 case FC_PKT_FABRIC_RJT: 3264 case FC_PKT_NPORT_RJT: 3265 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) { 3266 next_class = fp_get_nextclass(cmd->cmd_port, 3267 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3268 3269 if (next_class == FC_TRAN_CLASS_INVALID) { 3270 return (rval); 3271 } 3272 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3273 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3274 3275 rval = fp_sendcmd(cmd->cmd_port, cmd, 3276 cmd->cmd_port->fp_fca_handle); 3277 3278 if (rval != FC_SUCCESS) { 3279 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3280 } 3281 } 3282 break; 3283 3284 case FC_PKT_LS_RJT: 3285 case FC_PKT_BA_RJT: 3286 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) || 3287 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) { 3288 cmd->cmd_retry_interval = fp_retry_delay; 3289 rval = fp_retry_cmd(pkt); 3290 } 3291 break; 3292 3293 case FC_PKT_FS_RJT: 3294 if (pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) { 3295 cmd->cmd_retry_interval = fp_retry_delay; 3296 rval = fp_retry_cmd(pkt); 3297 } 3298 break; 3299 3300 case FC_PKT_LOCAL_RJT: 3301 if (pkt->pkt_reason == FC_REASON_QFULL) { 3302 cmd->cmd_retry_interval = fp_retry_delay; 3303 rval = fp_retry_cmd(pkt); 3304 } 3305 break; 3306 3307 default: 3308 FP_TRACE(FP_NHEAD1(1, 0), 3309 "fp_handle_reject(): Invalid pkt_state"); 3310 break; 3311 } 3312 3313 return (rval); 3314 } 3315 3316 3317 /* 3318 * Return the next class of service supported by the FCA 3319 */ 3320 static uchar_t 3321 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3322 { 3323 uchar_t next_class; 3324 3325 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3326 3327 switch (cur_class) { 3328 case FC_TRAN_CLASS_INVALID: 3329 if (port->fp_cos & FC_NS_CLASS1) { 3330 next_class = FC_TRAN_CLASS1; 3331 break; 3332 } 3333 /* FALLTHROUGH */ 3334 3335 case FC_TRAN_CLASS1: 3336 if (port->fp_cos & FC_NS_CLASS2) { 3337 next_class = FC_TRAN_CLASS2; 3338 break; 3339 } 3340 /* FALLTHROUGH */ 3341 3342 case FC_TRAN_CLASS2: 3343 if (port->fp_cos & FC_NS_CLASS3) { 3344 next_class = FC_TRAN_CLASS3; 3345 break; 3346 } 3347 /* FALLTHROUGH */ 3348 3349 case FC_TRAN_CLASS3: 3350 default: 3351 next_class = FC_TRAN_CLASS_INVALID; 3352 break; 3353 } 3354 3355 return (next_class); 3356 } 3357 3358 3359 /* 3360 * Determine if a class of service is supported by the FCA 3361 */ 3362 static int 3363 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3364 { 3365 int rval; 3366 3367 switch (tran_class) { 3368 case FC_TRAN_CLASS1: 3369 if (cos & FC_NS_CLASS1) { 3370 rval = FC_SUCCESS; 3371 } else { 3372 rval = FC_FAILURE; 3373 } 3374 break; 3375 3376 case FC_TRAN_CLASS2: 3377 if (cos & FC_NS_CLASS2) { 3378 rval = FC_SUCCESS; 3379 } else { 3380 rval = FC_FAILURE; 3381 } 3382 break; 3383 3384 case FC_TRAN_CLASS3: 3385 if (cos & FC_NS_CLASS3) { 3386 rval = FC_SUCCESS; 3387 } else { 3388 rval = FC_FAILURE; 3389 } 3390 break; 3391 3392 default: 3393 rval = FC_FAILURE; 3394 break; 3395 } 3396 3397 return (rval); 3398 } 3399 3400 3401 /* 3402 * Dequeue FC packet for retry 3403 */ 3404 static fp_cmd_t * 3405 fp_deque_cmd(fc_local_port_t *port) 3406 { 3407 fp_cmd_t *cmd; 3408 3409 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3410 3411 mutex_enter(&port->fp_mutex); 3412 3413 if (port->fp_wait_head == NULL) { 3414 /* 3415 * To avoid races, NULL the fp_wait_tid as 3416 * we are about to exit the timeout thread. 3417 */ 3418 port->fp_wait_tid = NULL; 3419 mutex_exit(&port->fp_mutex); 3420 return (NULL); 3421 } 3422 3423 cmd = port->fp_wait_head; 3424 port->fp_wait_head = cmd->cmd_next; 3425 cmd->cmd_next = NULL; 3426 3427 if (port->fp_wait_head == NULL) { 3428 port->fp_wait_tail = NULL; 3429 } 3430 mutex_exit(&port->fp_mutex); 3431 3432 return (cmd); 3433 } 3434 3435 3436 /* 3437 * Wait for job completion 3438 */ 3439 static void 3440 fp_jobwait(job_request_t *job) 3441 { 3442 sema_p(&job->job_port_sema); 3443 } 3444 3445 3446 /* 3447 * Convert FC packet state to FC errno 3448 */ 3449 int 3450 fp_state_to_rval(uchar_t state) 3451 { 3452 int count; 3453 3454 for (count = 0; count < sizeof (fp_xlat) / 3455 sizeof (fp_xlat[0]); count++) { 3456 if (fp_xlat[count].xlat_state == state) { 3457 return (fp_xlat[count].xlat_rval); 3458 } 3459 } 3460 3461 return (FC_FAILURE); 3462 } 3463 3464 3465 /* 3466 * For Synchronous I/O requests, the caller is 3467 * expected to do fctl_jobdone(if necessary) 3468 * 3469 * We want to preserve at least one failure in the 3470 * job_result if it happens. 3471 * 3472 */ 3473 static void 3474 fp_iodone(fp_cmd_t *cmd) 3475 { 3476 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3477 job_request_t *job = cmd->cmd_job; 3478 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3479 3480 ASSERT(job != NULL); 3481 ASSERT(cmd->cmd_port != NULL); 3482 ASSERT(&cmd->cmd_pkt != NULL); 3483 3484 mutex_enter(&job->job_mutex); 3485 if (job->job_result == FC_SUCCESS) { 3486 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3487 } 3488 mutex_exit(&job->job_mutex); 3489 3490 if (pd) { 3491 mutex_enter(&pd->pd_mutex); 3492 pd->pd_flags = PD_IDLE; 3493 mutex_exit(&pd->pd_mutex); 3494 } 3495 3496 if (ulp_pkt) { 3497 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3498 FP_IS_PKT_ERROR(ulp_pkt)) { 3499 fc_local_port_t *port; 3500 fc_remote_node_t *node; 3501 3502 port = cmd->cmd_port; 3503 3504 mutex_enter(&pd->pd_mutex); 3505 pd->pd_state = PORT_DEVICE_INVALID; 3506 pd->pd_ref_count--; 3507 node = pd->pd_remote_nodep; 3508 mutex_exit(&pd->pd_mutex); 3509 3510 ASSERT(node != NULL); 3511 ASSERT(port != NULL); 3512 3513 if (fctl_destroy_remote_port(port, pd) == 0) { 3514 fctl_destroy_remote_node(node); 3515 } 3516 3517 ulp_pkt->pkt_pd = NULL; 3518 } 3519 3520 ulp_pkt->pkt_comp(ulp_pkt); 3521 } 3522 3523 fp_free_pkt(cmd); 3524 fp_jobdone(job); 3525 } 3526 3527 3528 /* 3529 * Job completion handler 3530 */ 3531 static void 3532 fp_jobdone(job_request_t *job) 3533 { 3534 mutex_enter(&job->job_mutex); 3535 ASSERT(job->job_counter > 0); 3536 3537 if (--job->job_counter != 0) { 3538 mutex_exit(&job->job_mutex); 3539 return; 3540 } 3541 3542 if (job->job_ulp_pkts) { 3543 ASSERT(job->job_ulp_listlen > 0); 3544 kmem_free(job->job_ulp_pkts, 3545 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3546 } 3547 3548 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3549 mutex_exit(&job->job_mutex); 3550 fctl_jobdone(job); 3551 } else { 3552 mutex_exit(&job->job_mutex); 3553 sema_v(&job->job_port_sema); 3554 } 3555 } 3556 3557 3558 /* 3559 * Try to perform shutdown of a port during a detach. No return 3560 * value since the detach should not fail because the port shutdown 3561 * failed. 3562 */ 3563 static void 3564 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3565 { 3566 int index; 3567 int count; 3568 int flags; 3569 fp_cmd_t *cmd; 3570 struct pwwn_hash *head; 3571 fc_remote_port_t *pd; 3572 3573 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3574 3575 job->job_result = FC_SUCCESS; 3576 3577 if (port->fp_taskq) { 3578 /* 3579 * We must release the mutex here to ensure that other 3580 * potential jobs can complete their processing. Many 3581 * also need this mutex. 3582 */ 3583 mutex_exit(&port->fp_mutex); 3584 taskq_wait(port->fp_taskq); 3585 mutex_enter(&port->fp_mutex); 3586 } 3587 3588 if (port->fp_offline_tid) { 3589 timeout_id_t tid; 3590 3591 tid = port->fp_offline_tid; 3592 port->fp_offline_tid = NULL; 3593 mutex_exit(&port->fp_mutex); 3594 (void) untimeout(tid); 3595 mutex_enter(&port->fp_mutex); 3596 } 3597 3598 if (port->fp_wait_tid) { 3599 timeout_id_t tid; 3600 3601 tid = port->fp_wait_tid; 3602 port->fp_wait_tid = NULL; 3603 mutex_exit(&port->fp_mutex); 3604 (void) untimeout(tid); 3605 } else { 3606 mutex_exit(&port->fp_mutex); 3607 } 3608 3609 /* 3610 * While we cancel the timeout, let's also return the 3611 * the outstanding requests back to the callers. 3612 */ 3613 while ((cmd = fp_deque_cmd(port)) != NULL) { 3614 ASSERT(cmd->cmd_job != NULL); 3615 cmd->cmd_job->job_result = FC_OFFLINE; 3616 fp_iodone(cmd); 3617 } 3618 3619 /* 3620 * Gracefully LOGO with all the devices logged in. 3621 */ 3622 mutex_enter(&port->fp_mutex); 3623 3624 for (count = index = 0; index < pwwn_table_size; index++) { 3625 head = &port->fp_pwwn_table[index]; 3626 pd = head->pwwn_head; 3627 while (pd != NULL) { 3628 mutex_enter(&pd->pd_mutex); 3629 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3630 count++; 3631 } 3632 mutex_exit(&pd->pd_mutex); 3633 pd = pd->pd_wwn_hnext; 3634 } 3635 } 3636 3637 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3638 flags = job->job_flags; 3639 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3640 } else { 3641 flags = 0; 3642 } 3643 if (count) { 3644 job->job_counter = count; 3645 3646 for (index = 0; index < pwwn_table_size; index++) { 3647 head = &port->fp_pwwn_table[index]; 3648 pd = head->pwwn_head; 3649 while (pd != NULL) { 3650 mutex_enter(&pd->pd_mutex); 3651 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3652 ASSERT(pd->pd_login_count > 0); 3653 /* 3654 * Force the counter to ONE in order 3655 * for us to really send LOGO els. 3656 */ 3657 pd->pd_login_count = 1; 3658 mutex_exit(&pd->pd_mutex); 3659 mutex_exit(&port->fp_mutex); 3660 (void) fp_logout(port, pd, job); 3661 mutex_enter(&port->fp_mutex); 3662 } else { 3663 mutex_exit(&pd->pd_mutex); 3664 } 3665 pd = pd->pd_wwn_hnext; 3666 } 3667 } 3668 mutex_exit(&port->fp_mutex); 3669 fp_jobwait(job); 3670 } else { 3671 mutex_exit(&port->fp_mutex); 3672 } 3673 3674 if (job->job_result != FC_SUCCESS) { 3675 FP_TRACE(FP_NHEAD1(9, 0), 3676 "Can't logout all devices. Proceeding with" 3677 " port shutdown"); 3678 job->job_result = FC_SUCCESS; 3679 } 3680 3681 fctl_destroy_all_remote_ports(port); 3682 3683 mutex_enter(&port->fp_mutex); 3684 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3685 mutex_exit(&port->fp_mutex); 3686 fp_ns_fini(port, job); 3687 } else { 3688 mutex_exit(&port->fp_mutex); 3689 } 3690 3691 if (flags) { 3692 job->job_flags = flags; 3693 } 3694 3695 mutex_enter(&port->fp_mutex); 3696 3697 } 3698 3699 3700 /* 3701 * Build the port driver's data structures based on the AL_PA list 3702 */ 3703 static void 3704 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3705 { 3706 int rval; 3707 int flag; 3708 int count; 3709 uint32_t d_id; 3710 fc_remote_port_t *pd; 3711 fc_lilpmap_t *lilp_map; 3712 3713 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3714 3715 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3716 job->job_result = FC_OFFLINE; 3717 mutex_exit(&port->fp_mutex); 3718 fp_jobdone(job); 3719 mutex_enter(&port->fp_mutex); 3720 return; 3721 } 3722 3723 if (port->fp_lilp_map.lilp_length == 0) { 3724 mutex_exit(&port->fp_mutex); 3725 job->job_result = FC_NO_MAP; 3726 fp_jobdone(job); 3727 mutex_enter(&port->fp_mutex); 3728 return; 3729 } 3730 mutex_exit(&port->fp_mutex); 3731 3732 lilp_map = &port->fp_lilp_map; 3733 job->job_counter = lilp_map->lilp_length; 3734 3735 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3736 flag = FP_CMD_PLOGI_RETAIN; 3737 } else { 3738 flag = FP_CMD_PLOGI_DONT_CARE; 3739 } 3740 3741 for (count = 0; count < lilp_map->lilp_length; count++) { 3742 d_id = lilp_map->lilp_alpalist[count]; 3743 3744 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3745 fp_jobdone(job); 3746 continue; 3747 } 3748 3749 pd = fctl_get_remote_port_by_did(port, d_id); 3750 if (pd) { 3751 mutex_enter(&pd->pd_mutex); 3752 if (flag == FP_CMD_PLOGI_DONT_CARE || 3753 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3754 mutex_exit(&pd->pd_mutex); 3755 fp_jobdone(job); 3756 continue; 3757 } 3758 mutex_exit(&pd->pd_mutex); 3759 } 3760 3761 rval = fp_port_login(port, d_id, job, flag, 3762 KM_SLEEP, pd, NULL); 3763 if (rval != FC_SUCCESS) { 3764 fp_jobdone(job); 3765 } 3766 } 3767 3768 mutex_enter(&port->fp_mutex); 3769 } 3770 3771 3772 /* 3773 * Perform loop ONLINE processing 3774 */ 3775 static void 3776 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3777 { 3778 int count; 3779 int rval; 3780 uint32_t d_id; 3781 uint32_t listlen; 3782 fc_lilpmap_t *lilp_map; 3783 fc_remote_port_t *pd; 3784 fc_portmap_t *changelist; 3785 3786 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3787 3788 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3789 port, job); 3790 3791 lilp_map = &port->fp_lilp_map; 3792 3793 if (lilp_map->lilp_length) { 3794 mutex_enter(&port->fp_mutex); 3795 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3796 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3797 mutex_exit(&port->fp_mutex); 3798 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3799 } else { 3800 mutex_exit(&port->fp_mutex); 3801 } 3802 3803 job->job_counter = lilp_map->lilp_length; 3804 3805 for (count = 0; count < lilp_map->lilp_length; count++) { 3806 d_id = lilp_map->lilp_alpalist[count]; 3807 3808 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3809 fp_jobdone(job); 3810 continue; 3811 } 3812 3813 pd = fctl_get_remote_port_by_did(port, d_id); 3814 if (pd != NULL) { 3815 #ifdef DEBUG 3816 mutex_enter(&pd->pd_mutex); 3817 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3818 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3819 } 3820 mutex_exit(&pd->pd_mutex); 3821 #endif 3822 fp_jobdone(job); 3823 continue; 3824 } 3825 3826 rval = fp_port_login(port, d_id, job, 3827 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3828 3829 if (rval != FC_SUCCESS) { 3830 fp_jobdone(job); 3831 } 3832 } 3833 fp_jobwait(job); 3834 } 3835 listlen = 0; 3836 changelist = NULL; 3837 3838 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3839 mutex_enter(&port->fp_mutex); 3840 ASSERT(port->fp_statec_busy > 0); 3841 if (port->fp_statec_busy == 1) { 3842 mutex_exit(&port->fp_mutex); 3843 fctl_fillout_map(port, &changelist, &listlen, 3844 1, 0, orphan); 3845 3846 mutex_enter(&port->fp_mutex); 3847 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3848 ASSERT(port->fp_total_devices == 0); 3849 port->fp_total_devices = port->fp_dev_count; 3850 } 3851 } else { 3852 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3853 } 3854 mutex_exit(&port->fp_mutex); 3855 } 3856 3857 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3858 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3859 listlen, listlen, KM_SLEEP); 3860 } else { 3861 mutex_enter(&port->fp_mutex); 3862 if (--port->fp_statec_busy == 0) { 3863 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3864 } 3865 ASSERT(changelist == NULL && listlen == 0); 3866 mutex_exit(&port->fp_mutex); 3867 } 3868 3869 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3870 port, job); 3871 } 3872 3873 3874 /* 3875 * Get an Arbitrated Loop map from the underlying FCA 3876 */ 3877 static int 3878 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3879 { 3880 int rval; 3881 3882 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3883 port, lilp_map); 3884 3885 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3886 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3887 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3888 3889 if (rval != FC_SUCCESS) { 3890 rval = FC_NO_MAP; 3891 } else if (lilp_map->lilp_length == 0 && 3892 (lilp_map->lilp_magic >= MAGIC_LISM && 3893 lilp_map->lilp_magic < MAGIC_LIRP)) { 3894 uchar_t lilp_length; 3895 3896 /* 3897 * Since the map length is zero, provide all 3898 * the valid AL_PAs for NL_ports discovery. 3899 */ 3900 lilp_length = sizeof (fp_valid_alpas) / 3901 sizeof (fp_valid_alpas[0]); 3902 lilp_map->lilp_length = lilp_length; 3903 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3904 lilp_length); 3905 } else { 3906 rval = fp_validate_lilp_map(lilp_map); 3907 3908 if (rval == FC_SUCCESS) { 3909 mutex_enter(&port->fp_mutex); 3910 port->fp_total_devices = lilp_map->lilp_length - 1; 3911 mutex_exit(&port->fp_mutex); 3912 } 3913 } 3914 3915 mutex_enter(&port->fp_mutex); 3916 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3917 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3918 mutex_exit(&port->fp_mutex); 3919 3920 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3921 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3922 FP_TRACE(FP_NHEAD1(9, 0), 3923 "FCA reset failed after LILP map was found" 3924 " to be invalid"); 3925 } 3926 } else if (rval == FC_SUCCESS) { 3927 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3928 mutex_exit(&port->fp_mutex); 3929 } else { 3930 mutex_exit(&port->fp_mutex); 3931 } 3932 3933 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3934 lilp_map); 3935 3936 return (rval); 3937 } 3938 3939 3940 /* 3941 * Perform Fabric Login: 3942 * 3943 * Return Values: 3944 * FC_SUCCESS 3945 * FC_FAILURE 3946 * FC_NOMEM 3947 * FC_TRANSPORT_ERROR 3948 * and a lot others defined in fc_error.h 3949 */ 3950 static int 3951 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3952 int flag, int sleep) 3953 { 3954 int rval; 3955 fp_cmd_t *cmd; 3956 uchar_t class; 3957 3958 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3959 3960 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 3961 port, job); 3962 3963 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3964 if (class == FC_TRAN_CLASS_INVALID) { 3965 return (FC_ELS_BAD); 3966 } 3967 3968 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 3969 sizeof (la_els_logi_t), sleep, NULL); 3970 if (cmd == NULL) { 3971 return (FC_NOMEM); 3972 } 3973 3974 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 3975 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 3976 cmd->cmd_flags = flag; 3977 cmd->cmd_retry_count = fp_retry_count; 3978 cmd->cmd_ulp_pkt = NULL; 3979 3980 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 3981 job, LA_ELS_FLOGI); 3982 3983 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 3984 if (rval != FC_SUCCESS) { 3985 fp_free_pkt(cmd); 3986 } 3987 3988 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 3989 port, job); 3990 3991 return (rval); 3992 } 3993 3994 3995 /* 3996 * In some scenarios such as private loop device discovery period 3997 * the fc_remote_port_t data structure isn't allocated. The allocation 3998 * is done when the PLOGI is successful. In some other scenarios 3999 * such as Fabric topology, the fc_remote_port_t is already created 4000 * and initialized with appropriate values (as the NS provides 4001 * them) 4002 */ 4003 static int 4004 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 4005 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 4006 { 4007 uchar_t class; 4008 fp_cmd_t *cmd; 4009 uint32_t src_id; 4010 fc_remote_port_t *tmp_pd; 4011 int relogin; 4012 int found = 0; 4013 4014 #ifdef DEBUG 4015 if (pd == NULL) { 4016 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 4017 } 4018 #endif 4019 ASSERT(job->job_counter > 0); 4020 4021 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4022 if (class == FC_TRAN_CLASS_INVALID) { 4023 return (FC_ELS_BAD); 4024 } 4025 4026 mutex_enter(&port->fp_mutex); 4027 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 4028 mutex_exit(&port->fp_mutex); 4029 4030 relogin = 1; 4031 if (tmp_pd) { 4032 mutex_enter(&tmp_pd->pd_mutex); 4033 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 4034 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 4035 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 4036 relogin = 0; 4037 } 4038 mutex_exit(&tmp_pd->pd_mutex); 4039 } 4040 4041 if (!relogin) { 4042 mutex_enter(&tmp_pd->pd_mutex); 4043 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4044 cmd_flag |= FP_CMD_PLOGI_RETAIN; 4045 } 4046 mutex_exit(&tmp_pd->pd_mutex); 4047 4048 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 4049 sizeof (la_els_adisc_t), sleep, tmp_pd); 4050 if (cmd == NULL) { 4051 return (FC_NOMEM); 4052 } 4053 4054 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4055 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4056 cmd->cmd_flags = cmd_flag; 4057 cmd->cmd_retry_count = fp_retry_count; 4058 cmd->cmd_ulp_pkt = ulp_pkt; 4059 4060 mutex_enter(&port->fp_mutex); 4061 mutex_enter(&tmp_pd->pd_mutex); 4062 fp_adisc_init(cmd, job); 4063 mutex_exit(&tmp_pd->pd_mutex); 4064 mutex_exit(&port->fp_mutex); 4065 4066 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4067 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4068 4069 } else { 4070 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4071 sizeof (la_els_logi_t), sleep, pd); 4072 if (cmd == NULL) { 4073 return (FC_NOMEM); 4074 } 4075 4076 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4077 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4078 cmd->cmd_flags = cmd_flag; 4079 cmd->cmd_retry_count = fp_retry_count; 4080 cmd->cmd_ulp_pkt = ulp_pkt; 4081 4082 mutex_enter(&port->fp_mutex); 4083 src_id = port->fp_port_id.port_id; 4084 mutex_exit(&port->fp_mutex); 4085 4086 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4087 job, LA_ELS_PLOGI); 4088 } 4089 4090 if (pd) { 4091 mutex_enter(&pd->pd_mutex); 4092 pd->pd_flags = PD_ELS_IN_PROGRESS; 4093 mutex_exit(&pd->pd_mutex); 4094 } 4095 4096 /* npiv check to make sure we don't log into ourself */ 4097 if (relogin && (port->fp_topology == FC_TOP_FABRIC)) { 4098 if ((d_id & 0xffff00) == 4099 (port->fp_port_id.port_id & 0xffff00)) { 4100 found = 1; 4101 } 4102 } 4103 4104 if (found || 4105 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4106 if (found) { 4107 fc_packet_t *pkt = &cmd->cmd_pkt; 4108 pkt->pkt_state = FC_PKT_NPORT_RJT; 4109 } 4110 if (pd) { 4111 mutex_enter(&pd->pd_mutex); 4112 pd->pd_flags = PD_IDLE; 4113 mutex_exit(&pd->pd_mutex); 4114 } 4115 4116 if (ulp_pkt) { 4117 fc_packet_t *pkt = &cmd->cmd_pkt; 4118 4119 ulp_pkt->pkt_state = pkt->pkt_state; 4120 ulp_pkt->pkt_reason = pkt->pkt_reason; 4121 ulp_pkt->pkt_action = pkt->pkt_action; 4122 ulp_pkt->pkt_expln = pkt->pkt_expln; 4123 } 4124 4125 fp_iodone(cmd); 4126 } 4127 4128 return (FC_SUCCESS); 4129 } 4130 4131 4132 /* 4133 * Register the LOGIN parameters with a port device 4134 */ 4135 static void 4136 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4137 la_els_logi_t *acc, uchar_t class) 4138 { 4139 fc_remote_node_t *node; 4140 4141 ASSERT(pd != NULL); 4142 4143 mutex_enter(&pd->pd_mutex); 4144 node = pd->pd_remote_nodep; 4145 if (pd->pd_login_count == 0) { 4146 pd->pd_login_count++; 4147 } 4148 4149 if (handle) { 4150 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_csp, 4151 (uint8_t *)&acc->common_service, 4152 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4153 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp1, 4154 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4155 DDI_DEV_AUTOINCR); 4156 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp2, 4157 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4158 DDI_DEV_AUTOINCR); 4159 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp3, 4160 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4161 DDI_DEV_AUTOINCR); 4162 } else { 4163 pd->pd_csp = acc->common_service; 4164 pd->pd_clsp1 = acc->class_1; 4165 pd->pd_clsp2 = acc->class_2; 4166 pd->pd_clsp3 = acc->class_3; 4167 } 4168 4169 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4170 pd->pd_login_class = class; 4171 mutex_exit(&pd->pd_mutex); 4172 4173 #ifndef __lock_lint 4174 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4175 pd->pd_port_id.port_id) == pd); 4176 #endif 4177 4178 mutex_enter(&node->fd_mutex); 4179 if (handle) { 4180 ddi_rep_get8(*handle, (uint8_t *)node->fd_vv, 4181 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4182 DDI_DEV_AUTOINCR); 4183 } else { 4184 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4185 } 4186 mutex_exit(&node->fd_mutex); 4187 } 4188 4189 4190 /* 4191 * Mark the remote port as OFFLINE 4192 */ 4193 static void 4194 fp_remote_port_offline(fc_remote_port_t *pd) 4195 { 4196 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4197 if (pd->pd_login_count && 4198 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4199 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4200 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4201 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4202 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4203 pd->pd_login_class = 0; 4204 } 4205 pd->pd_type = PORT_DEVICE_OLD; 4206 pd->pd_flags = PD_IDLE; 4207 fctl_tc_reset(&pd->pd_logo_tc); 4208 } 4209 4210 4211 /* 4212 * Deregistration of a port device 4213 */ 4214 static void 4215 fp_unregister_login(fc_remote_port_t *pd) 4216 { 4217 fc_remote_node_t *node; 4218 4219 ASSERT(pd != NULL); 4220 4221 mutex_enter(&pd->pd_mutex); 4222 pd->pd_login_count = 0; 4223 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4224 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4225 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4226 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4227 4228 pd->pd_state = PORT_DEVICE_VALID; 4229 pd->pd_login_class = 0; 4230 node = pd->pd_remote_nodep; 4231 mutex_exit(&pd->pd_mutex); 4232 4233 mutex_enter(&node->fd_mutex); 4234 bzero(node->fd_vv, sizeof (node->fd_vv)); 4235 mutex_exit(&node->fd_mutex); 4236 } 4237 4238 4239 /* 4240 * Handle OFFLINE state of an FCA port 4241 */ 4242 static void 4243 fp_port_offline(fc_local_port_t *port, int notify) 4244 { 4245 int index; 4246 int statec; 4247 timeout_id_t tid; 4248 struct pwwn_hash *head; 4249 fc_remote_port_t *pd; 4250 4251 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4252 4253 for (index = 0; index < pwwn_table_size; index++) { 4254 head = &port->fp_pwwn_table[index]; 4255 pd = head->pwwn_head; 4256 while (pd != NULL) { 4257 mutex_enter(&pd->pd_mutex); 4258 fp_remote_port_offline(pd); 4259 fctl_delist_did_table(port, pd); 4260 mutex_exit(&pd->pd_mutex); 4261 pd = pd->pd_wwn_hnext; 4262 } 4263 } 4264 port->fp_total_devices = 0; 4265 4266 statec = 0; 4267 if (notify) { 4268 /* 4269 * Decrement the statec busy counter as we 4270 * are almost done with handling the state 4271 * change 4272 */ 4273 ASSERT(port->fp_statec_busy > 0); 4274 if (--port->fp_statec_busy == 0) { 4275 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4276 } 4277 mutex_exit(&port->fp_mutex); 4278 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4279 0, 0, KM_SLEEP); 4280 mutex_enter(&port->fp_mutex); 4281 4282 if (port->fp_statec_busy) { 4283 statec++; 4284 } 4285 } else if (port->fp_statec_busy > 1) { 4286 statec++; 4287 } 4288 4289 if ((tid = port->fp_offline_tid) != NULL) { 4290 mutex_exit(&port->fp_mutex); 4291 (void) untimeout(tid); 4292 mutex_enter(&port->fp_mutex); 4293 } 4294 4295 if (!statec) { 4296 port->fp_offline_tid = timeout(fp_offline_timeout, 4297 (caddr_t)port, fp_offline_ticks); 4298 } 4299 } 4300 4301 4302 /* 4303 * Offline devices and send up a state change notification to ULPs 4304 */ 4305 static void 4306 fp_offline_timeout(void *port_handle) 4307 { 4308 int ret; 4309 fc_local_port_t *port = port_handle; 4310 uint32_t listlen = 0; 4311 fc_portmap_t *changelist = NULL; 4312 4313 mutex_enter(&port->fp_mutex); 4314 4315 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4316 (port->fp_soft_state & 4317 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4318 port->fp_dev_count == 0 || port->fp_statec_busy) { 4319 port->fp_offline_tid = NULL; 4320 mutex_exit(&port->fp_mutex); 4321 return; 4322 } 4323 4324 mutex_exit(&port->fp_mutex); 4325 4326 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4327 4328 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4329 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4330 FC_FCA_CORE)) != FC_SUCCESS) { 4331 FP_TRACE(FP_NHEAD1(9, ret), 4332 "Failed to force adapter dump"); 4333 } else { 4334 FP_TRACE(FP_NHEAD1(9, 0), 4335 "Forced adapter dump successfully"); 4336 } 4337 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4338 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4339 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4340 FP_TRACE(FP_NHEAD1(9, ret), 4341 "Failed to force adapter dump and reset"); 4342 } else { 4343 FP_TRACE(FP_NHEAD1(9, 0), 4344 "Forced adapter dump and reset successfully"); 4345 } 4346 } 4347 4348 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4349 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4350 listlen, listlen, KM_SLEEP); 4351 4352 mutex_enter(&port->fp_mutex); 4353 port->fp_offline_tid = NULL; 4354 mutex_exit(&port->fp_mutex); 4355 } 4356 4357 4358 /* 4359 * Perform general purpose ELS request initialization 4360 */ 4361 static void 4362 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4363 void (*comp) (), job_request_t *job) 4364 { 4365 fc_packet_t *pkt; 4366 4367 pkt = &cmd->cmd_pkt; 4368 cmd->cmd_job = job; 4369 4370 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4371 pkt->pkt_cmd_fhdr.d_id = d_id; 4372 pkt->pkt_cmd_fhdr.s_id = s_id; 4373 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4374 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4375 pkt->pkt_cmd_fhdr.seq_id = 0; 4376 pkt->pkt_cmd_fhdr.df_ctl = 0; 4377 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4378 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4379 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4380 pkt->pkt_cmd_fhdr.ro = 0; 4381 pkt->pkt_cmd_fhdr.rsvd = 0; 4382 pkt->pkt_comp = comp; 4383 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4384 } 4385 4386 4387 /* 4388 * Initialize PLOGI/FLOGI ELS request 4389 */ 4390 static void 4391 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4392 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4393 { 4394 ls_code_t payload; 4395 4396 fp_els_init(cmd, s_id, d_id, intr, job); 4397 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4398 4399 payload.ls_code = ls_code; 4400 payload.mbz = 0; 4401 4402 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, 4403 (uint8_t *)&port->fp_service_params, 4404 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4405 DDI_DEV_AUTOINCR); 4406 4407 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4408 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4409 DDI_DEV_AUTOINCR); 4410 } 4411 4412 4413 /* 4414 * Initialize LOGO ELS request 4415 */ 4416 static void 4417 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4418 { 4419 fc_local_port_t *port; 4420 fc_packet_t *pkt; 4421 la_els_logo_t payload; 4422 4423 port = pd->pd_port; 4424 pkt = &cmd->cmd_pkt; 4425 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4426 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4427 4428 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4429 fp_logo_intr, job); 4430 4431 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4432 4433 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4434 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4435 4436 payload.ls_code.ls_code = LA_ELS_LOGO; 4437 payload.ls_code.mbz = 0; 4438 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4439 payload.nport_id = port->fp_port_id; 4440 4441 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4442 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4443 } 4444 4445 /* 4446 * Initialize RNID ELS request 4447 */ 4448 static void 4449 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4450 { 4451 fc_local_port_t *port; 4452 fc_packet_t *pkt; 4453 la_els_rnid_t payload; 4454 fc_remote_port_t *pd; 4455 4456 pkt = &cmd->cmd_pkt; 4457 pd = pkt->pkt_pd; 4458 port = pd->pd_port; 4459 4460 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4461 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4462 4463 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4464 fp_rnid_intr, job); 4465 4466 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4467 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4468 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4469 4470 payload.ls_code.ls_code = LA_ELS_RNID; 4471 payload.ls_code.mbz = 0; 4472 payload.data_format = flag; 4473 4474 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4475 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4476 } 4477 4478 /* 4479 * Initialize RLS ELS request 4480 */ 4481 static void 4482 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4483 { 4484 fc_local_port_t *port; 4485 fc_packet_t *pkt; 4486 la_els_rls_t payload; 4487 fc_remote_port_t *pd; 4488 4489 pkt = &cmd->cmd_pkt; 4490 pd = pkt->pkt_pd; 4491 port = pd->pd_port; 4492 4493 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4494 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4495 4496 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4497 fp_rls_intr, job); 4498 4499 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4500 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4501 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4502 4503 payload.ls_code.ls_code = LA_ELS_RLS; 4504 payload.ls_code.mbz = 0; 4505 payload.rls_portid = port->fp_port_id; 4506 4507 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4508 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4509 } 4510 4511 4512 /* 4513 * Initialize an ADISC ELS request 4514 */ 4515 static void 4516 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4517 { 4518 fc_local_port_t *port; 4519 fc_packet_t *pkt; 4520 la_els_adisc_t payload; 4521 fc_remote_port_t *pd; 4522 4523 pkt = &cmd->cmd_pkt; 4524 pd = pkt->pkt_pd; 4525 port = pd->pd_port; 4526 4527 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4528 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4529 4530 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4531 fp_adisc_intr, job); 4532 4533 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4534 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4535 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4536 4537 payload.ls_code.ls_code = LA_ELS_ADISC; 4538 payload.ls_code.mbz = 0; 4539 payload.nport_id = port->fp_port_id; 4540 payload.port_wwn = port->fp_service_params.nport_ww_name; 4541 payload.node_wwn = port->fp_service_params.node_ww_name; 4542 payload.hard_addr = port->fp_hard_addr; 4543 4544 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4545 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4546 } 4547 4548 4549 /* 4550 * Send up a state change notification to ULPs. 4551 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4552 */ 4553 static int 4554 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4555 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4556 { 4557 fc_port_clist_t *clist; 4558 fc_remote_port_t *pd; 4559 int count; 4560 4561 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4562 4563 clist = kmem_zalloc(sizeof (*clist), sleep); 4564 if (clist == NULL) { 4565 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4566 return (FC_NOMEM); 4567 } 4568 4569 clist->clist_state = state; 4570 4571 mutex_enter(&port->fp_mutex); 4572 clist->clist_flags = port->fp_topology; 4573 mutex_exit(&port->fp_mutex); 4574 4575 clist->clist_port = (opaque_t)port; 4576 clist->clist_len = listlen; 4577 clist->clist_size = alloc_len; 4578 clist->clist_map = changelist; 4579 4580 /* 4581 * Bump the reference count of each fc_remote_port_t in this changelist. 4582 * This is necessary since these devices will be sitting in a taskq 4583 * and referenced later. When the state change notification is 4584 * complete, the reference counts will be decremented. 4585 */ 4586 for (count = 0; count < clist->clist_len; count++) { 4587 pd = clist->clist_map[count].map_pd; 4588 4589 if (pd != NULL) { 4590 mutex_enter(&pd->pd_mutex); 4591 ASSERT((pd->pd_ref_count >= 0) || 4592 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4593 pd->pd_ref_count++; 4594 4595 if (clist->clist_map[count].map_state != 4596 PORT_DEVICE_INVALID) { 4597 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4598 } 4599 4600 mutex_exit(&pd->pd_mutex); 4601 } 4602 } 4603 4604 #ifdef DEBUG 4605 /* 4606 * Sanity check for presence of OLD devices in the hash lists 4607 */ 4608 if (clist->clist_size) { 4609 ASSERT(clist->clist_map != NULL); 4610 for (count = 0; count < clist->clist_len; count++) { 4611 if (clist->clist_map[count].map_state == 4612 PORT_DEVICE_INVALID) { 4613 la_wwn_t pwwn; 4614 fc_portid_t d_id; 4615 4616 pd = clist->clist_map[count].map_pd; 4617 ASSERT(pd != NULL); 4618 4619 mutex_enter(&pd->pd_mutex); 4620 pwwn = pd->pd_port_name; 4621 d_id = pd->pd_port_id; 4622 mutex_exit(&pd->pd_mutex); 4623 4624 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4625 ASSERT(pd != clist->clist_map[count].map_pd); 4626 4627 pd = fctl_get_remote_port_by_did(port, 4628 d_id.port_id); 4629 ASSERT(pd != clist->clist_map[count].map_pd); 4630 } 4631 } 4632 } 4633 #endif 4634 4635 mutex_enter(&port->fp_mutex); 4636 4637 if (state == FC_STATE_ONLINE) { 4638 if (--port->fp_statec_busy == 0) { 4639 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4640 } 4641 } 4642 mutex_exit(&port->fp_mutex); 4643 4644 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4645 clist, KM_SLEEP); 4646 4647 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4648 "state=%x, len=%d", port, state, listlen); 4649 4650 return (FC_SUCCESS); 4651 } 4652 4653 4654 /* 4655 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4656 */ 4657 static int 4658 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4659 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4660 { 4661 int ret; 4662 fc_port_clist_t *clist; 4663 4664 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4665 4666 clist = kmem_zalloc(sizeof (*clist), sleep); 4667 if (clist == NULL) { 4668 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4669 return (FC_NOMEM); 4670 } 4671 4672 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4673 4674 mutex_enter(&port->fp_mutex); 4675 clist->clist_flags = port->fp_topology; 4676 mutex_exit(&port->fp_mutex); 4677 4678 clist->clist_port = (opaque_t)port; 4679 clist->clist_len = listlen; 4680 clist->clist_size = alloc_len; 4681 clist->clist_map = changelist; 4682 4683 /* Send sysevents for target state changes */ 4684 4685 if (clist->clist_size) { 4686 int count; 4687 fc_remote_port_t *pd; 4688 4689 ASSERT(clist->clist_map != NULL); 4690 for (count = 0; count < clist->clist_len; count++) { 4691 pd = clist->clist_map[count].map_pd; 4692 4693 /* 4694 * Bump reference counts on all fc_remote_port_t 4695 * structs in this list. We don't know when the task 4696 * will fire, and we don't need these fc_remote_port_t 4697 * structs going away behind our back. 4698 */ 4699 if (pd) { 4700 mutex_enter(&pd->pd_mutex); 4701 ASSERT((pd->pd_ref_count >= 0) || 4702 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4703 pd->pd_ref_count++; 4704 mutex_exit(&pd->pd_mutex); 4705 } 4706 4707 if (clist->clist_map[count].map_state == 4708 PORT_DEVICE_VALID) { 4709 if (clist->clist_map[count].map_type == 4710 PORT_DEVICE_NEW) { 4711 /* Update our state change counter */ 4712 mutex_enter(&port->fp_mutex); 4713 port->fp_last_change++; 4714 mutex_exit(&port->fp_mutex); 4715 4716 /* Additions */ 4717 fp_log_target_event(port, 4718 ESC_SUNFC_TARGET_ADD, 4719 clist->clist_map[count].map_pwwn, 4720 clist->clist_map[count].map_did. 4721 port_id); 4722 } 4723 4724 } else if ((clist->clist_map[count].map_type == 4725 PORT_DEVICE_OLD) && 4726 (clist->clist_map[count].map_state == 4727 PORT_DEVICE_INVALID)) { 4728 /* Update our state change counter */ 4729 mutex_enter(&port->fp_mutex); 4730 port->fp_last_change++; 4731 mutex_exit(&port->fp_mutex); 4732 4733 /* 4734 * For removals, we don't decrement 4735 * pd_ref_count until after the ULP's 4736 * state change callback function has 4737 * completed. 4738 */ 4739 4740 /* Removals */ 4741 fp_log_target_event(port, 4742 ESC_SUNFC_TARGET_REMOVE, 4743 clist->clist_map[count].map_pwwn, 4744 clist->clist_map[count].map_did.port_id); 4745 } 4746 4747 if (clist->clist_map[count].map_state != 4748 PORT_DEVICE_INVALID) { 4749 /* 4750 * Indicate that the ULPs are now aware of 4751 * this device. 4752 */ 4753 4754 mutex_enter(&pd->pd_mutex); 4755 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4756 mutex_exit(&pd->pd_mutex); 4757 } 4758 4759 #ifdef DEBUG 4760 /* 4761 * Sanity check for OLD devices in the hash lists 4762 */ 4763 if (pd && clist->clist_map[count].map_state == 4764 PORT_DEVICE_INVALID) { 4765 la_wwn_t pwwn; 4766 fc_portid_t d_id; 4767 4768 mutex_enter(&pd->pd_mutex); 4769 pwwn = pd->pd_port_name; 4770 d_id = pd->pd_port_id; 4771 mutex_exit(&pd->pd_mutex); 4772 4773 /* 4774 * This overwrites the 'pd' local variable. 4775 * Beware of this if 'pd' ever gets 4776 * referenced below this block. 4777 */ 4778 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4779 ASSERT(pd != clist->clist_map[count].map_pd); 4780 4781 pd = fctl_get_remote_port_by_did(port, 4782 d_id.port_id); 4783 ASSERT(pd != clist->clist_map[count].map_pd); 4784 } 4785 #endif 4786 } 4787 } 4788 4789 if (sync) { 4790 clist->clist_wait = 1; 4791 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4792 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4793 } 4794 4795 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4796 if (sync && ret) { 4797 mutex_enter(&clist->clist_mutex); 4798 while (clist->clist_wait) { 4799 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4800 } 4801 mutex_exit(&clist->clist_mutex); 4802 4803 mutex_destroy(&clist->clist_mutex); 4804 cv_destroy(&clist->clist_cv); 4805 kmem_free(clist, sizeof (*clist)); 4806 } 4807 4808 if (!ret) { 4809 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4810 "port=%p", port); 4811 kmem_free(clist->clist_map, 4812 sizeof (*(clist->clist_map)) * clist->clist_size); 4813 kmem_free(clist, sizeof (*clist)); 4814 } else { 4815 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4816 port, listlen); 4817 } 4818 4819 return (FC_SUCCESS); 4820 } 4821 4822 4823 /* 4824 * Perform PLOGI to the group of devices for ULPs 4825 */ 4826 static void 4827 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4828 { 4829 int offline; 4830 int count; 4831 int rval; 4832 uint32_t listlen; 4833 uint32_t done; 4834 uint32_t d_id; 4835 fc_remote_node_t *node; 4836 fc_remote_port_t *pd; 4837 fc_remote_port_t *tmp_pd; 4838 fc_packet_t *ulp_pkt; 4839 la_els_logi_t *els_data; 4840 ls_code_t ls_code; 4841 4842 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4843 port, job); 4844 4845 done = 0; 4846 listlen = job->job_ulp_listlen; 4847 job->job_counter = job->job_ulp_listlen; 4848 4849 mutex_enter(&port->fp_mutex); 4850 offline = (port->fp_statec_busy || 4851 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4852 mutex_exit(&port->fp_mutex); 4853 4854 for (count = 0; count < listlen; count++) { 4855 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4856 sizeof (la_els_logi_t)); 4857 4858 ulp_pkt = job->job_ulp_pkts[count]; 4859 pd = ulp_pkt->pkt_pd; 4860 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4861 4862 if (offline) { 4863 done++; 4864 4865 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4866 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4867 ulp_pkt->pkt_pd = NULL; 4868 ulp_pkt->pkt_comp(ulp_pkt); 4869 4870 job->job_ulp_pkts[count] = NULL; 4871 4872 fp_jobdone(job); 4873 continue; 4874 } 4875 4876 if (pd == NULL) { 4877 pd = fctl_get_remote_port_by_did(port, d_id); 4878 if (pd == NULL) { 4879 /* reset later */ 4880 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4881 continue; 4882 } 4883 mutex_enter(&pd->pd_mutex); 4884 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4885 mutex_exit(&pd->pd_mutex); 4886 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4887 done++; 4888 ulp_pkt->pkt_comp(ulp_pkt); 4889 job->job_ulp_pkts[count] = NULL; 4890 fp_jobdone(job); 4891 } else { 4892 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4893 mutex_exit(&pd->pd_mutex); 4894 } 4895 continue; 4896 } 4897 4898 switch (ulp_pkt->pkt_state) { 4899 case FC_PKT_ELS_IN_PROGRESS: 4900 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4901 /* FALLTHRU */ 4902 case FC_PKT_LOCAL_RJT: 4903 done++; 4904 ulp_pkt->pkt_comp(ulp_pkt); 4905 job->job_ulp_pkts[count] = NULL; 4906 fp_jobdone(job); 4907 continue; 4908 default: 4909 break; 4910 } 4911 4912 /* 4913 * Validate the pd corresponding to the d_id passed 4914 * by the ULPs 4915 */ 4916 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4917 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4918 done++; 4919 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4920 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4921 ulp_pkt->pkt_pd = NULL; 4922 ulp_pkt->pkt_comp(ulp_pkt); 4923 job->job_ulp_pkts[count] = NULL; 4924 fp_jobdone(job); 4925 continue; 4926 } 4927 4928 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4929 "port=%p, pd=%p", port, pd); 4930 4931 mutex_enter(&pd->pd_mutex); 4932 4933 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4934 done++; 4935 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4936 4937 ls_code.ls_code = LA_ELS_ACC; 4938 ls_code.mbz = 0; 4939 4940 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4941 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4942 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4943 4944 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4945 (uint8_t *)&pd->pd_csp, 4946 (uint8_t *)&els_data->common_service, 4947 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4948 4949 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4950 (uint8_t *)&pd->pd_port_name, 4951 (uint8_t *)&els_data->nport_ww_name, 4952 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 4953 4954 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4955 (uint8_t *)&pd->pd_clsp1, 4956 (uint8_t *)&els_data->class_1, 4957 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 4958 4959 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4960 (uint8_t *)&pd->pd_clsp2, 4961 (uint8_t *)&els_data->class_2, 4962 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 4963 4964 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4965 (uint8_t *)&pd->pd_clsp3, 4966 (uint8_t *)&els_data->class_3, 4967 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 4968 4969 node = pd->pd_remote_nodep; 4970 pd->pd_login_count++; 4971 pd->pd_flags = PD_IDLE; 4972 ulp_pkt->pkt_pd = pd; 4973 mutex_exit(&pd->pd_mutex); 4974 4975 mutex_enter(&node->fd_mutex); 4976 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4977 (uint8_t *)&node->fd_node_name, 4978 (uint8_t *)(&els_data->node_ww_name), 4979 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 4980 4981 4982 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4983 (uint8_t *)&node->fd_vv, 4984 (uint8_t *)(&els_data->vendor_version), 4985 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 4986 4987 mutex_exit(&node->fd_mutex); 4988 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 4989 } else { 4990 4991 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 4992 mutex_exit(&pd->pd_mutex); 4993 } 4994 4995 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 4996 ulp_pkt->pkt_comp(ulp_pkt); 4997 job->job_ulp_pkts[count] = NULL; 4998 fp_jobdone(job); 4999 } 5000 } 5001 5002 if (done == listlen) { 5003 fp_jobwait(job); 5004 fctl_jobdone(job); 5005 return; 5006 } 5007 5008 job->job_counter = listlen - done; 5009 5010 for (count = 0; count < listlen; count++) { 5011 int cmd_flags; 5012 5013 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 5014 continue; 5015 } 5016 5017 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 5018 5019 cmd_flags = FP_CMD_PLOGI_RETAIN; 5020 5021 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5022 ASSERT(d_id != 0); 5023 5024 pd = fctl_get_remote_port_by_did(port, d_id); 5025 5026 /* 5027 * We need to properly adjust the port device 5028 * reference counter before we assign the pd 5029 * to the ULP packets port device pointer. 5030 */ 5031 if (pd != NULL && ulp_pkt->pkt_pd == NULL) { 5032 mutex_enter(&pd->pd_mutex); 5033 pd->pd_ref_count++; 5034 mutex_exit(&pd->pd_mutex); 5035 FP_TRACE(FP_NHEAD1(3, 0), 5036 "fp_plogi_group: DID = 0x%x using new pd %p \ 5037 old pd NULL\n", d_id, pd); 5038 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL && 5039 ulp_pkt->pkt_pd != pd) { 5040 mutex_enter(&pd->pd_mutex); 5041 pd->pd_ref_count++; 5042 mutex_exit(&pd->pd_mutex); 5043 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5044 ulp_pkt->pkt_pd->pd_ref_count--; 5045 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5046 FP_TRACE(FP_NHEAD1(3, 0), 5047 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n", 5048 d_id, ulp_pkt->pkt_pd, pd); 5049 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) { 5050 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5051 ulp_pkt->pkt_pd->pd_ref_count--; 5052 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5053 FP_TRACE(FP_NHEAD1(3, 0), 5054 "fp_plogi_group: DID = 0x%x pd is NULL and \ 5055 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd); 5056 } 5057 5058 ulp_pkt->pkt_pd = pd; 5059 5060 if (pd != NULL) { 5061 mutex_enter(&pd->pd_mutex); 5062 d_id = pd->pd_port_id.port_id; 5063 pd->pd_flags = PD_ELS_IN_PROGRESS; 5064 mutex_exit(&pd->pd_mutex); 5065 } else { 5066 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5067 #ifdef DEBUG 5068 pd = fctl_get_remote_port_by_did(port, d_id); 5069 ASSERT(pd == NULL); 5070 #endif 5071 /* 5072 * In the Fabric topology, use NS to create 5073 * port device, and if that fails still try 5074 * with PLOGI - which will make yet another 5075 * attempt to create after successful PLOGI 5076 */ 5077 mutex_enter(&port->fp_mutex); 5078 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 5079 mutex_exit(&port->fp_mutex); 5080 pd = fp_create_remote_port_by_ns(port, 5081 d_id, KM_SLEEP); 5082 if (pd) { 5083 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 5084 5085 mutex_enter(&pd->pd_mutex); 5086 pd->pd_flags = PD_ELS_IN_PROGRESS; 5087 mutex_exit(&pd->pd_mutex); 5088 5089 FP_TRACE(FP_NHEAD1(3, 0), 5090 "fp_plogi_group;" 5091 " NS created PD port=%p, job=%p," 5092 " pd=%p", port, job, pd); 5093 } 5094 } else { 5095 mutex_exit(&port->fp_mutex); 5096 } 5097 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5098 FP_TRACE(FP_NHEAD1(3, 0), 5099 "fp_plogi_group;" 5100 "ulp_pkt's pd is NULL, get a pd %p", 5101 pd); 5102 mutex_enter(&pd->pd_mutex); 5103 pd->pd_ref_count++; 5104 mutex_exit(&pd->pd_mutex); 5105 } 5106 ulp_pkt->pkt_pd = pd; 5107 } 5108 5109 rval = fp_port_login(port, d_id, job, cmd_flags, 5110 KM_SLEEP, pd, ulp_pkt); 5111 5112 if (rval == FC_SUCCESS) { 5113 continue; 5114 } 5115 5116 if (rval == FC_STATEC_BUSY) { 5117 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5118 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5119 } else { 5120 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5121 } 5122 5123 if (pd) { 5124 mutex_enter(&pd->pd_mutex); 5125 pd->pd_flags = PD_IDLE; 5126 mutex_exit(&pd->pd_mutex); 5127 } 5128 5129 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5130 ASSERT(pd != NULL); 5131 5132 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5133 " PD removed; port=%p, job=%p", port, job); 5134 5135 mutex_enter(&pd->pd_mutex); 5136 pd->pd_ref_count--; 5137 node = pd->pd_remote_nodep; 5138 mutex_exit(&pd->pd_mutex); 5139 5140 ASSERT(node != NULL); 5141 5142 if (fctl_destroy_remote_port(port, pd) == 0) { 5143 fctl_destroy_remote_node(node); 5144 } 5145 ulp_pkt->pkt_pd = NULL; 5146 } 5147 ulp_pkt->pkt_comp(ulp_pkt); 5148 fp_jobdone(job); 5149 } 5150 5151 fp_jobwait(job); 5152 fctl_jobdone(job); 5153 5154 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5155 port, job); 5156 } 5157 5158 5159 /* 5160 * Name server request initialization 5161 */ 5162 static void 5163 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5164 { 5165 int rval; 5166 int count; 5167 int size; 5168 5169 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5170 5171 job->job_counter = 1; 5172 job->job_result = FC_SUCCESS; 5173 5174 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5175 KM_SLEEP, NULL, NULL); 5176 5177 if (rval != FC_SUCCESS) { 5178 mutex_enter(&port->fp_mutex); 5179 port->fp_topology = FC_TOP_NO_NS; 5180 mutex_exit(&port->fp_mutex); 5181 return; 5182 } 5183 5184 fp_jobwait(job); 5185 5186 if (job->job_result != FC_SUCCESS) { 5187 mutex_enter(&port->fp_mutex); 5188 port->fp_topology = FC_TOP_NO_NS; 5189 mutex_exit(&port->fp_mutex); 5190 return; 5191 } 5192 5193 /* 5194 * At this time, we'll do NS registration for objects in the 5195 * ns_reg_cmds (see top of this file) array. 5196 * 5197 * Each time a ULP module registers with the transport, the 5198 * appropriate fc4 bit is set fc4 types and registered with 5199 * the NS for this support. Also, ULPs and FC admin utilities 5200 * may do registration for objects like IP address, symbolic 5201 * port/node name, Initial process associator at run time. 5202 */ 5203 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5204 job->job_counter = size; 5205 job->job_result = FC_SUCCESS; 5206 5207 for (count = 0; count < size; count++) { 5208 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5209 job, 0, sleep) != FC_SUCCESS) { 5210 fp_jobdone(job); 5211 } 5212 } 5213 if (size) { 5214 fp_jobwait(job); 5215 } 5216 5217 job->job_result = FC_SUCCESS; 5218 5219 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5220 5221 if (port->fp_dev_count < FP_MAX_DEVICES) { 5222 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5223 } 5224 5225 job->job_counter = 1; 5226 5227 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5228 sleep) == FC_SUCCESS) { 5229 fp_jobwait(job); 5230 } 5231 } 5232 5233 5234 /* 5235 * Name server finish: 5236 * Unregister for RSCNs 5237 * Unregister all the host port objects in the Name Server 5238 * Perform LOGO with the NS; 5239 */ 5240 static void 5241 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5242 { 5243 fp_cmd_t *cmd; 5244 uchar_t class; 5245 uint32_t s_id; 5246 fc_packet_t *pkt; 5247 la_els_logo_t payload; 5248 5249 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5250 5251 job->job_counter = 1; 5252 5253 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5254 FC_SUCCESS) { 5255 fp_jobdone(job); 5256 } 5257 fp_jobwait(job); 5258 5259 job->job_counter = 1; 5260 5261 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5262 fp_jobdone(job); 5263 } 5264 fp_jobwait(job); 5265 5266 job->job_counter = 1; 5267 5268 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5269 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5270 pkt = &cmd->cmd_pkt; 5271 5272 mutex_enter(&port->fp_mutex); 5273 class = port->fp_ns_login_class; 5274 s_id = port->fp_port_id.port_id; 5275 payload.nport_id = port->fp_port_id; 5276 mutex_exit(&port->fp_mutex); 5277 5278 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5279 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5280 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5281 cmd->cmd_retry_count = 1; 5282 cmd->cmd_ulp_pkt = NULL; 5283 5284 if (port->fp_npiv_type == FC_NPIV_PORT) { 5285 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5286 } else { 5287 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5288 } 5289 5290 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5291 5292 payload.ls_code.ls_code = LA_ELS_LOGO; 5293 payload.ls_code.mbz = 0; 5294 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5295 5296 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 5297 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5298 5299 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5300 fp_iodone(cmd); 5301 } 5302 fp_jobwait(job); 5303 } 5304 5305 5306 /* 5307 * NS Registration function. 5308 * 5309 * It should be seriously noted that FC-GS-2 currently doesn't support 5310 * an Object Registration by a D_ID other than the owner of the object. 5311 * What we are aiming at currently is to at least allow Symbolic Node/Port 5312 * Name registration for any N_Port Identifier by the host software. 5313 * 5314 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5315 * function treats the request as Host NS Object. 5316 */ 5317 static int 5318 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5319 job_request_t *job, int polled, int sleep) 5320 { 5321 int rval; 5322 fc_portid_t s_id; 5323 fc_packet_t *pkt; 5324 fp_cmd_t *cmd; 5325 5326 if (pd == NULL) { 5327 mutex_enter(&port->fp_mutex); 5328 s_id = port->fp_port_id; 5329 mutex_exit(&port->fp_mutex); 5330 } else { 5331 mutex_enter(&pd->pd_mutex); 5332 s_id = pd->pd_port_id; 5333 mutex_exit(&pd->pd_mutex); 5334 } 5335 5336 if (polled) { 5337 job->job_counter = 1; 5338 } 5339 5340 switch (cmd_code) { 5341 case NS_RPN_ID: 5342 case NS_RNN_ID: { 5343 ns_rxn_req_t rxn; 5344 5345 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5346 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5347 if (cmd == NULL) { 5348 return (FC_NOMEM); 5349 } 5350 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5351 pkt = &cmd->cmd_pkt; 5352 5353 if (pd == NULL) { 5354 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ? 5355 (port->fp_service_params.nport_ww_name) : 5356 (port->fp_service_params.node_ww_name)); 5357 } else { 5358 if (cmd_code == NS_RPN_ID) { 5359 mutex_enter(&pd->pd_mutex); 5360 rxn.rxn_xname = pd->pd_port_name; 5361 mutex_exit(&pd->pd_mutex); 5362 } else { 5363 fc_remote_node_t *node; 5364 5365 mutex_enter(&pd->pd_mutex); 5366 node = pd->pd_remote_nodep; 5367 mutex_exit(&pd->pd_mutex); 5368 5369 mutex_enter(&node->fd_mutex); 5370 rxn.rxn_xname = node->fd_node_name; 5371 mutex_exit(&node->fd_mutex); 5372 } 5373 } 5374 rxn.rxn_port_id = s_id; 5375 5376 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5377 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5378 sizeof (rxn), DDI_DEV_AUTOINCR); 5379 5380 break; 5381 } 5382 5383 case NS_RCS_ID: { 5384 ns_rcos_t rcos; 5385 5386 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5387 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5388 if (cmd == NULL) { 5389 return (FC_NOMEM); 5390 } 5391 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5392 pkt = &cmd->cmd_pkt; 5393 5394 if (pd == NULL) { 5395 rcos.rcos_cos = port->fp_cos; 5396 } else { 5397 mutex_enter(&pd->pd_mutex); 5398 rcos.rcos_cos = pd->pd_cos; 5399 mutex_exit(&pd->pd_mutex); 5400 } 5401 rcos.rcos_port_id = s_id; 5402 5403 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5404 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5405 sizeof (rcos), DDI_DEV_AUTOINCR); 5406 5407 break; 5408 } 5409 5410 case NS_RFT_ID: { 5411 ns_rfc_type_t rfc; 5412 5413 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5414 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5415 NULL); 5416 if (cmd == NULL) { 5417 return (FC_NOMEM); 5418 } 5419 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5420 pkt = &cmd->cmd_pkt; 5421 5422 if (pd == NULL) { 5423 mutex_enter(&port->fp_mutex); 5424 bcopy(port->fp_fc4_types, rfc.rfc_types, 5425 sizeof (port->fp_fc4_types)); 5426 mutex_exit(&port->fp_mutex); 5427 } else { 5428 mutex_enter(&pd->pd_mutex); 5429 bcopy(pd->pd_fc4types, rfc.rfc_types, 5430 sizeof (pd->pd_fc4types)); 5431 mutex_exit(&pd->pd_mutex); 5432 } 5433 rfc.rfc_port_id = s_id; 5434 5435 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5436 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5437 sizeof (rfc), DDI_DEV_AUTOINCR); 5438 5439 break; 5440 } 5441 5442 case NS_RSPN_ID: { 5443 uchar_t name_len; 5444 int pl_size; 5445 fc_portid_t spn; 5446 5447 if (pd == NULL) { 5448 mutex_enter(&port->fp_mutex); 5449 name_len = port->fp_sym_port_namelen; 5450 mutex_exit(&port->fp_mutex); 5451 } else { 5452 mutex_enter(&pd->pd_mutex); 5453 name_len = pd->pd_spn_len; 5454 mutex_exit(&pd->pd_mutex); 5455 } 5456 5457 pl_size = sizeof (fc_portid_t) + name_len + 1; 5458 5459 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5460 sizeof (fc_reg_resp_t), sleep, NULL); 5461 if (cmd == NULL) { 5462 return (FC_NOMEM); 5463 } 5464 5465 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5466 5467 pkt = &cmd->cmd_pkt; 5468 5469 spn = s_id; 5470 5471 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5472 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5473 DDI_DEV_AUTOINCR); 5474 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5475 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5476 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5477 5478 if (pd == NULL) { 5479 mutex_enter(&port->fp_mutex); 5480 ddi_rep_put8(pkt->pkt_cmd_acc, 5481 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5482 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5483 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5484 mutex_exit(&port->fp_mutex); 5485 } else { 5486 mutex_enter(&pd->pd_mutex); 5487 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)pd->pd_spn, 5488 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5489 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5490 mutex_exit(&pd->pd_mutex); 5491 } 5492 break; 5493 } 5494 5495 case NS_RPT_ID: { 5496 ns_rpt_t rpt; 5497 5498 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5499 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5500 if (cmd == NULL) { 5501 return (FC_NOMEM); 5502 } 5503 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5504 pkt = &cmd->cmd_pkt; 5505 5506 if (pd == NULL) { 5507 rpt.rpt_type = port->fp_port_type; 5508 } else { 5509 mutex_enter(&pd->pd_mutex); 5510 rpt.rpt_type = pd->pd_porttype; 5511 mutex_exit(&pd->pd_mutex); 5512 } 5513 rpt.rpt_port_id = s_id; 5514 5515 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5516 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5517 sizeof (rpt), DDI_DEV_AUTOINCR); 5518 5519 break; 5520 } 5521 5522 case NS_RIP_NN: { 5523 ns_rip_t rip; 5524 5525 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5526 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5527 if (cmd == NULL) { 5528 return (FC_NOMEM); 5529 } 5530 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5531 pkt = &cmd->cmd_pkt; 5532 5533 if (pd == NULL) { 5534 rip.rip_node_name = 5535 port->fp_service_params.node_ww_name; 5536 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5537 sizeof (port->fp_ip_addr)); 5538 } else { 5539 fc_remote_node_t *node; 5540 5541 /* 5542 * The most correct implementation should have the IP 5543 * address in the fc_remote_node_t structure; I believe 5544 * Node WWN and IP address should have one to one 5545 * correlation (but guess what this is changing in 5546 * FC-GS-2 latest draft) 5547 */ 5548 mutex_enter(&pd->pd_mutex); 5549 node = pd->pd_remote_nodep; 5550 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5551 sizeof (pd->pd_ip_addr)); 5552 mutex_exit(&pd->pd_mutex); 5553 5554 mutex_enter(&node->fd_mutex); 5555 rip.rip_node_name = node->fd_node_name; 5556 mutex_exit(&node->fd_mutex); 5557 } 5558 5559 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rip, 5560 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5561 sizeof (rip), DDI_DEV_AUTOINCR); 5562 5563 break; 5564 } 5565 5566 case NS_RIPA_NN: { 5567 ns_ipa_t ipa; 5568 5569 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5570 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5571 if (cmd == NULL) { 5572 return (FC_NOMEM); 5573 } 5574 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5575 pkt = &cmd->cmd_pkt; 5576 5577 if (pd == NULL) { 5578 ipa.ipa_node_name = 5579 port->fp_service_params.node_ww_name; 5580 bcopy(port->fp_ipa, ipa.ipa_value, 5581 sizeof (port->fp_ipa)); 5582 } else { 5583 fc_remote_node_t *node; 5584 5585 mutex_enter(&pd->pd_mutex); 5586 node = pd->pd_remote_nodep; 5587 mutex_exit(&pd->pd_mutex); 5588 5589 mutex_enter(&node->fd_mutex); 5590 ipa.ipa_node_name = node->fd_node_name; 5591 bcopy(node->fd_ipa, ipa.ipa_value, 5592 sizeof (node->fd_ipa)); 5593 mutex_exit(&node->fd_mutex); 5594 } 5595 5596 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5597 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5598 sizeof (ipa), DDI_DEV_AUTOINCR); 5599 5600 break; 5601 } 5602 5603 case NS_RSNN_NN: { 5604 uchar_t name_len; 5605 int pl_size; 5606 la_wwn_t snn; 5607 fc_remote_node_t *node = NULL; 5608 5609 if (pd == NULL) { 5610 mutex_enter(&port->fp_mutex); 5611 name_len = port->fp_sym_node_namelen; 5612 mutex_exit(&port->fp_mutex); 5613 } else { 5614 mutex_enter(&pd->pd_mutex); 5615 node = pd->pd_remote_nodep; 5616 mutex_exit(&pd->pd_mutex); 5617 5618 mutex_enter(&node->fd_mutex); 5619 name_len = node->fd_snn_len; 5620 mutex_exit(&node->fd_mutex); 5621 } 5622 5623 pl_size = sizeof (la_wwn_t) + name_len + 1; 5624 5625 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5626 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5627 if (cmd == NULL) { 5628 return (FC_NOMEM); 5629 } 5630 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5631 5632 pkt = &cmd->cmd_pkt; 5633 5634 bcopy(&port->fp_service_params.node_ww_name, 5635 &snn, sizeof (la_wwn_t)); 5636 5637 if (pd == NULL) { 5638 mutex_enter(&port->fp_mutex); 5639 ddi_rep_put8(pkt->pkt_cmd_acc, 5640 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5641 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5642 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5643 mutex_exit(&port->fp_mutex); 5644 } else { 5645 ASSERT(node != NULL); 5646 mutex_enter(&node->fd_mutex); 5647 ddi_rep_put8(pkt->pkt_cmd_acc, 5648 (uint8_t *)node->fd_snn, 5649 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5650 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5651 mutex_exit(&node->fd_mutex); 5652 } 5653 5654 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&snn, 5655 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5656 sizeof (snn), DDI_DEV_AUTOINCR); 5657 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5658 (uint8_t *)(pkt->pkt_cmd 5659 + sizeof (fc_ct_header_t) + sizeof (snn)), 5660 1, DDI_DEV_AUTOINCR); 5661 5662 break; 5663 } 5664 5665 case NS_DA_ID: { 5666 ns_remall_t rall; 5667 char tmp[4] = {0}; 5668 char *ptr; 5669 5670 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5671 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5672 5673 if (cmd == NULL) { 5674 return (FC_NOMEM); 5675 } 5676 5677 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5678 pkt = &cmd->cmd_pkt; 5679 5680 ptr = (char *)(&s_id); 5681 tmp[3] = *ptr++; 5682 tmp[2] = *ptr++; 5683 tmp[1] = *ptr++; 5684 tmp[0] = *ptr; 5685 #if defined(_BIT_FIELDS_LTOH) 5686 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5687 #else 5688 rall.rem_port_id = s_id; 5689 #endif 5690 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rall, 5691 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5692 sizeof (rall), DDI_DEV_AUTOINCR); 5693 5694 break; 5695 } 5696 5697 default: 5698 return (FC_FAILURE); 5699 } 5700 5701 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5702 5703 if (rval != FC_SUCCESS) { 5704 job->job_result = rval; 5705 fp_iodone(cmd); 5706 } 5707 5708 if (polled) { 5709 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5710 fp_jobwait(job); 5711 } else { 5712 rval = FC_SUCCESS; 5713 } 5714 5715 return (rval); 5716 } 5717 5718 5719 /* 5720 * Common interrupt handler 5721 */ 5722 static int 5723 fp_common_intr(fc_packet_t *pkt, int iodone) 5724 { 5725 int rval = FC_FAILURE; 5726 fp_cmd_t *cmd; 5727 fc_local_port_t *port; 5728 5729 cmd = pkt->pkt_ulp_private; 5730 port = cmd->cmd_port; 5731 5732 /* 5733 * Fail fast the upper layer requests if 5734 * a state change has occurred amidst. 5735 */ 5736 mutex_enter(&port->fp_mutex); 5737 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5738 mutex_exit(&port->fp_mutex); 5739 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5740 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5741 } else if (!(port->fp_soft_state & 5742 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5743 mutex_exit(&port->fp_mutex); 5744 5745 switch (pkt->pkt_state) { 5746 case FC_PKT_LOCAL_BSY: 5747 case FC_PKT_FABRIC_BSY: 5748 case FC_PKT_NPORT_BSY: 5749 case FC_PKT_TIMEOUT: 5750 cmd->cmd_retry_interval = (pkt->pkt_state == 5751 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5752 rval = fp_retry_cmd(pkt); 5753 break; 5754 5755 case FC_PKT_FABRIC_RJT: 5756 case FC_PKT_NPORT_RJT: 5757 case FC_PKT_LOCAL_RJT: 5758 case FC_PKT_LS_RJT: 5759 case FC_PKT_FS_RJT: 5760 case FC_PKT_BA_RJT: 5761 rval = fp_handle_reject(pkt); 5762 break; 5763 5764 default: 5765 if (pkt->pkt_resp_resid) { 5766 cmd->cmd_retry_interval = 0; 5767 rval = fp_retry_cmd(pkt); 5768 } 5769 break; 5770 } 5771 } else { 5772 mutex_exit(&port->fp_mutex); 5773 } 5774 5775 if (rval != FC_SUCCESS && iodone) { 5776 fp_iodone(cmd); 5777 rval = FC_SUCCESS; 5778 } 5779 5780 return (rval); 5781 } 5782 5783 5784 /* 5785 * Some not so long winding theory on point to point topology: 5786 * 5787 * In the ACC payload, if the D_ID is ZERO and the common service 5788 * parameters indicate N_Port, then the topology is POINT TO POINT. 5789 * 5790 * In a point to point topology with an N_Port, during Fabric Login, 5791 * the destination N_Port will check with our WWN and decide if it 5792 * needs to issue PLOGI or not. That means, FLOGI could potentially 5793 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5794 * PLOGI creates the device handles. 5795 * 5796 * Assuming that the host port WWN is greater than the other N_Port 5797 * WWN, then we become the master (be aware that this isn't the word 5798 * used in the FC standards) and initiate the PLOGI. 5799 * 5800 */ 5801 static void 5802 fp_flogi_intr(fc_packet_t *pkt) 5803 { 5804 int state; 5805 int f_port; 5806 uint32_t s_id; 5807 uint32_t d_id; 5808 fp_cmd_t *cmd; 5809 fc_local_port_t *port; 5810 la_wwn_t *swwn; 5811 la_wwn_t dwwn; 5812 la_wwn_t nwwn; 5813 fc_remote_port_t *pd; 5814 la_els_logi_t *acc; 5815 com_svc_t csp; 5816 ls_code_t resp; 5817 5818 cmd = pkt->pkt_ulp_private; 5819 port = cmd->cmd_port; 5820 5821 mutex_enter(&port->fp_mutex); 5822 port->fp_out_fpcmds--; 5823 mutex_exit(&port->fp_mutex); 5824 5825 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5826 port, pkt, pkt->pkt_state); 5827 5828 if (FP_IS_PKT_ERROR(pkt)) { 5829 (void) fp_common_intr(pkt, 1); 5830 return; 5831 } 5832 5833 /* 5834 * Currently, we don't need to swap bytes here because qlc is faking the 5835 * response for us and so endianness is getting taken care of. But we 5836 * have to fix this and generalize this at some point 5837 */ 5838 acc = (la_els_logi_t *)pkt->pkt_resp; 5839 5840 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5841 sizeof (resp), DDI_DEV_AUTOINCR); 5842 5843 ASSERT(resp.ls_code == LA_ELS_ACC); 5844 if (resp.ls_code != LA_ELS_ACC) { 5845 (void) fp_common_intr(pkt, 1); 5846 return; 5847 } 5848 5849 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&csp, 5850 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5851 5852 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5853 5854 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5855 5856 mutex_enter(&port->fp_mutex); 5857 state = FC_PORT_STATE_MASK(port->fp_state); 5858 mutex_exit(&port->fp_mutex); 5859 5860 if (pkt->pkt_resp_fhdr.d_id == 0) { 5861 if (f_port == 0 && state != FC_STATE_LOOP) { 5862 swwn = &port->fp_service_params.nport_ww_name; 5863 5864 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5865 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5866 DDI_DEV_AUTOINCR); 5867 5868 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5869 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5870 DDI_DEV_AUTOINCR); 5871 5872 mutex_enter(&port->fp_mutex); 5873 5874 port->fp_topology = FC_TOP_PT_PT; 5875 port->fp_total_devices = 1; 5876 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5877 port->fp_ptpt_master = 1; 5878 /* 5879 * Let us choose 'X' as S_ID and 'Y' 5880 * as D_ID and that'll work; hopefully 5881 * If not, it will get changed. 5882 */ 5883 s_id = port->fp_instance + FP_DEFAULT_SID; 5884 d_id = port->fp_instance + FP_DEFAULT_DID; 5885 port->fp_port_id.port_id = s_id; 5886 mutex_exit(&port->fp_mutex); 5887 5888 pd = fctl_create_remote_port(port, 5889 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5890 KM_NOSLEEP); 5891 if (pd == NULL) { 5892 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5893 0, NULL, "couldn't create device" 5894 " d_id=%X", d_id); 5895 fp_iodone(cmd); 5896 return; 5897 } 5898 5899 cmd->cmd_pkt.pkt_tran_flags = 5900 pkt->pkt_tran_flags; 5901 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5902 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5903 cmd->cmd_retry_count = fp_retry_count; 5904 5905 fp_xlogi_init(port, cmd, s_id, d_id, 5906 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5907 5908 (&cmd->cmd_pkt)->pkt_pd = pd; 5909 5910 /* 5911 * We've just created this fc_remote_port_t, and 5912 * we're about to use it to send a PLOGI, so 5913 * bump the reference count right now. When 5914 * the packet is freed, the reference count will 5915 * be decremented. The ULP may also start using 5916 * it, so mark it as given away as well. 5917 */ 5918 pd->pd_ref_count++; 5919 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5920 5921 if (fp_sendcmd(port, cmd, 5922 port->fp_fca_handle) == FC_SUCCESS) { 5923 return; 5924 } 5925 } else { 5926 /* 5927 * The device handles will be created when the 5928 * unsolicited PLOGI is completed successfully 5929 */ 5930 port->fp_ptpt_master = 0; 5931 mutex_exit(&port->fp_mutex); 5932 } 5933 } 5934 pkt->pkt_state = FC_PKT_FAILURE; 5935 } else { 5936 if (f_port) { 5937 mutex_enter(&port->fp_mutex); 5938 if (state == FC_STATE_LOOP) { 5939 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5940 } else { 5941 port->fp_topology = FC_TOP_FABRIC; 5942 5943 ddi_rep_get8(pkt->pkt_resp_acc, 5944 (uint8_t *)&port->fp_fabric_name, 5945 (uint8_t *)&acc->node_ww_name, 5946 sizeof (la_wwn_t), 5947 DDI_DEV_AUTOINCR); 5948 } 5949 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5950 mutex_exit(&port->fp_mutex); 5951 } else { 5952 pkt->pkt_state = FC_PKT_FAILURE; 5953 } 5954 } 5955 fp_iodone(cmd); 5956 } 5957 5958 5959 /* 5960 * Handle solicited PLOGI response 5961 */ 5962 static void 5963 fp_plogi_intr(fc_packet_t *pkt) 5964 { 5965 int nl_port; 5966 int bailout; 5967 uint32_t d_id; 5968 fp_cmd_t *cmd; 5969 la_els_logi_t *acc; 5970 fc_local_port_t *port; 5971 fc_remote_port_t *pd; 5972 la_wwn_t nwwn; 5973 la_wwn_t pwwn; 5974 ls_code_t resp; 5975 5976 nl_port = 0; 5977 cmd = pkt->pkt_ulp_private; 5978 port = cmd->cmd_port; 5979 d_id = pkt->pkt_cmd_fhdr.d_id; 5980 5981 #ifndef __lock_lint 5982 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 5983 #endif 5984 5985 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 5986 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 5987 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 5988 5989 /* 5990 * Bail out early on ULP initiated requests if the 5991 * state change has occurred 5992 */ 5993 mutex_enter(&port->fp_mutex); 5994 port->fp_out_fpcmds--; 5995 bailout = ((port->fp_statec_busy || 5996 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 5997 cmd->cmd_ulp_pkt) ? 1 : 0; 5998 mutex_exit(&port->fp_mutex); 5999 6000 if (FP_IS_PKT_ERROR(pkt) || bailout) { 6001 int skip_msg = 0; 6002 int giveup = 0; 6003 6004 if (cmd->cmd_ulp_pkt) { 6005 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6006 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 6007 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6008 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6009 } 6010 6011 /* 6012 * If an unsolicited cross login already created 6013 * a device speed up the discovery by not retrying 6014 * the command mindlessly. 6015 */ 6016 if (pkt->pkt_pd == NULL && 6017 fctl_get_remote_port_by_did(port, d_id) != NULL) { 6018 fp_iodone(cmd); 6019 return; 6020 } 6021 6022 if (pkt->pkt_pd != NULL) { 6023 giveup = (pkt->pkt_pd->pd_recepient == 6024 PD_PLOGI_RECEPIENT) ? 1 : 0; 6025 if (giveup) { 6026 /* 6027 * This pd is marked as plogi 6028 * recipient, stop retrying 6029 */ 6030 FP_TRACE(FP_NHEAD1(3, 0), 6031 "fp_plogi_intr: stop retry as" 6032 " a cross login was accepted" 6033 " from d_id=%x, port=%p.", 6034 d_id, port); 6035 fp_iodone(cmd); 6036 return; 6037 } 6038 } 6039 6040 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6041 return; 6042 } 6043 6044 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 6045 mutex_enter(&pd->pd_mutex); 6046 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 6047 skip_msg++; 6048 } 6049 mutex_exit(&pd->pd_mutex); 6050 } 6051 6052 mutex_enter(&port->fp_mutex); 6053 if (!bailout && !(skip_msg && port->fp_statec_busy) && 6054 port->fp_statec_busy <= 1 && 6055 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 6056 mutex_exit(&port->fp_mutex); 6057 /* 6058 * In case of Login Collisions, JNI HBAs returns the 6059 * FC pkt back to the Initiator with the state set to 6060 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 6061 * QLC HBAs handles such cases in the FW and doesnot 6062 * return the LS_RJT with Logical error when 6063 * login collision happens. 6064 */ 6065 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 6066 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 6067 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6068 "PLOGI to %x failed", d_id); 6069 } 6070 FP_TRACE(FP_NHEAD2(9, 0), 6071 "PLOGI to %x failed. state=%x reason=%x.", 6072 d_id, pkt->pkt_state, pkt->pkt_reason); 6073 } else { 6074 mutex_exit(&port->fp_mutex); 6075 } 6076 6077 fp_iodone(cmd); 6078 return; 6079 } 6080 6081 acc = (la_els_logi_t *)pkt->pkt_resp; 6082 6083 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 6084 sizeof (resp), DDI_DEV_AUTOINCR); 6085 6086 ASSERT(resp.ls_code == LA_ELS_ACC); 6087 if (resp.ls_code != LA_ELS_ACC) { 6088 (void) fp_common_intr(pkt, 1); 6089 return; 6090 } 6091 6092 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6093 mutex_enter(&port->fp_mutex); 6094 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6095 mutex_exit(&port->fp_mutex); 6096 fp_iodone(cmd); 6097 return; 6098 } 6099 6100 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6101 6102 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6103 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6104 DDI_DEV_AUTOINCR); 6105 6106 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6107 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6108 DDI_DEV_AUTOINCR); 6109 6110 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6111 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6112 6113 if ((pd = pkt->pkt_pd) == NULL) { 6114 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6115 if (pd == NULL) { 6116 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6117 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6118 if (pd == NULL) { 6119 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6120 "couldn't create port device handles" 6121 " d_id=%x", d_id); 6122 fp_iodone(cmd); 6123 return; 6124 } 6125 } else { 6126 fc_remote_port_t *tmp_pd; 6127 6128 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6129 if (tmp_pd != NULL) { 6130 fp_iodone(cmd); 6131 return; 6132 } 6133 6134 mutex_enter(&port->fp_mutex); 6135 mutex_enter(&pd->pd_mutex); 6136 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6137 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6138 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6139 } 6140 6141 if (pd->pd_type == PORT_DEVICE_OLD) { 6142 if (pd->pd_port_id.port_id != d_id) { 6143 fctl_delist_did_table(port, pd); 6144 pd->pd_type = PORT_DEVICE_CHANGED; 6145 pd->pd_port_id.port_id = d_id; 6146 } else { 6147 pd->pd_type = PORT_DEVICE_NOCHANGE; 6148 } 6149 } 6150 6151 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6152 char ww_name[17]; 6153 6154 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6155 6156 mutex_exit(&pd->pd_mutex); 6157 mutex_exit(&port->fp_mutex); 6158 FP_TRACE(FP_NHEAD2(9, 0), 6159 "Possible Duplicate name or address" 6160 " identifiers in the PLOGI response" 6161 " D_ID=%x, PWWN=%s: Please check the" 6162 " configuration", d_id, ww_name); 6163 fp_iodone(cmd); 6164 return; 6165 } 6166 fctl_enlist_did_table(port, pd); 6167 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6168 mutex_exit(&pd->pd_mutex); 6169 mutex_exit(&port->fp_mutex); 6170 } 6171 } else { 6172 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6173 6174 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6175 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6176 6177 mutex_enter(&port->fp_mutex); 6178 mutex_enter(&pd->pd_mutex); 6179 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6180 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6181 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6182 pd->pd_type); 6183 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6184 pd->pd_type == PORT_DEVICE_OLD) || 6185 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6186 pd->pd_type = PORT_DEVICE_NOCHANGE; 6187 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6188 pd->pd_type = PORT_DEVICE_NEW; 6189 } 6190 } else { 6191 char old_name[17]; 6192 char new_name[17]; 6193 6194 fc_wwn_to_str(&pd->pd_port_name, old_name); 6195 fc_wwn_to_str(&pwwn, new_name); 6196 6197 FP_TRACE(FP_NHEAD1(9, 0), 6198 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6199 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6200 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6201 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6202 cmd->cmd_ulp_pkt, bailout); 6203 6204 FP_TRACE(FP_NHEAD2(9, 0), 6205 "PWWN of a device with D_ID=%x changed." 6206 " New PWWN = %s, OLD PWWN = %s", d_id, 6207 new_name, old_name); 6208 6209 if (cmd->cmd_ulp_pkt && !bailout) { 6210 fc_remote_node_t *rnodep; 6211 fc_portmap_t *changelist; 6212 fc_portmap_t *listptr; 6213 int len = 1; 6214 /* # entries in changelist */ 6215 6216 fctl_delist_pwwn_table(port, pd); 6217 6218 /* 6219 * Lets now check if there already is a pd with 6220 * this new WWN in the table. If so, we'll mark 6221 * it as invalid 6222 */ 6223 6224 if (new_wwn_pd) { 6225 /* 6226 * There is another pd with in the pwwn 6227 * table with the same WWN that we got 6228 * in the PLOGI payload. We have to get 6229 * it out of the pwwn table, update the 6230 * pd's state (fp_fillout_old_map does 6231 * this for us) and add it to the 6232 * changelist that goes up to ULPs. 6233 * 6234 * len is length of changelist and so 6235 * increment it. 6236 */ 6237 len++; 6238 6239 if (tmp_pd != pd) { 6240 /* 6241 * Odd case where pwwn and did 6242 * tables are out of sync but 6243 * we will handle that too. See 6244 * more comments below. 6245 * 6246 * One more device that ULPs 6247 * should know about and so len 6248 * gets incremented again. 6249 */ 6250 len++; 6251 } 6252 6253 listptr = changelist = kmem_zalloc(len * 6254 sizeof (*changelist), KM_SLEEP); 6255 6256 mutex_enter(&new_wwn_pd->pd_mutex); 6257 rnodep = new_wwn_pd->pd_remote_nodep; 6258 mutex_exit(&new_wwn_pd->pd_mutex); 6259 6260 /* 6261 * Hold the fd_mutex since 6262 * fctl_copy_portmap_held expects it. 6263 * Preserve lock hierarchy by grabbing 6264 * fd_mutex before pd_mutex 6265 */ 6266 if (rnodep) { 6267 mutex_enter(&rnodep->fd_mutex); 6268 } 6269 mutex_enter(&new_wwn_pd->pd_mutex); 6270 fp_fillout_old_map_held(listptr++, 6271 new_wwn_pd, 0); 6272 mutex_exit(&new_wwn_pd->pd_mutex); 6273 if (rnodep) { 6274 mutex_exit(&rnodep->fd_mutex); 6275 } 6276 6277 /* 6278 * Safety check : 6279 * Lets ensure that the pwwn and did 6280 * tables are in sync. Ideally, we 6281 * should not find that these two pd's 6282 * are different. 6283 */ 6284 if (tmp_pd != pd) { 6285 mutex_enter(&tmp_pd->pd_mutex); 6286 rnodep = 6287 tmp_pd->pd_remote_nodep; 6288 mutex_exit(&tmp_pd->pd_mutex); 6289 6290 /* As above grab fd_mutex */ 6291 if (rnodep) { 6292 mutex_enter(&rnodep-> 6293 fd_mutex); 6294 } 6295 mutex_enter(&tmp_pd->pd_mutex); 6296 6297 fp_fillout_old_map_held( 6298 listptr++, tmp_pd, 0); 6299 6300 mutex_exit(&tmp_pd->pd_mutex); 6301 if (rnodep) { 6302 mutex_exit(&rnodep-> 6303 fd_mutex); 6304 } 6305 6306 /* 6307 * Now add "pd" (not tmp_pd) 6308 * to fp_did_table to sync it up 6309 * with fp_pwwn_table 6310 * 6311 * pd->pd_mutex is already held 6312 * at this point 6313 */ 6314 fctl_enlist_did_table(port, pd); 6315 } 6316 } else { 6317 listptr = changelist = kmem_zalloc( 6318 sizeof (*changelist), KM_SLEEP); 6319 } 6320 6321 ASSERT(changelist != NULL); 6322 6323 fp_fillout_changed_map(listptr, pd, &d_id, 6324 &pwwn); 6325 fctl_enlist_pwwn_table(port, pd); 6326 6327 mutex_exit(&pd->pd_mutex); 6328 mutex_exit(&port->fp_mutex); 6329 6330 fp_iodone(cmd); 6331 6332 (void) fp_ulp_devc_cb(port, changelist, len, 6333 len, KM_NOSLEEP, 0); 6334 6335 return; 6336 } 6337 } 6338 6339 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6340 nl_port = 1; 6341 } 6342 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6343 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6344 } 6345 6346 mutex_exit(&pd->pd_mutex); 6347 mutex_exit(&port->fp_mutex); 6348 6349 if (tmp_pd == NULL) { 6350 mutex_enter(&port->fp_mutex); 6351 mutex_enter(&pd->pd_mutex); 6352 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6353 char ww_name[17]; 6354 6355 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6356 mutex_exit(&pd->pd_mutex); 6357 mutex_exit(&port->fp_mutex); 6358 FP_TRACE(FP_NHEAD2(9, 0), 6359 "Possible Duplicate name or address" 6360 " identifiers in the PLOGI response" 6361 " D_ID=%x, PWWN=%s: Please check the" 6362 " configuration", d_id, ww_name); 6363 fp_iodone(cmd); 6364 return; 6365 } 6366 fctl_enlist_did_table(port, pd); 6367 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6368 mutex_exit(&pd->pd_mutex); 6369 mutex_exit(&port->fp_mutex); 6370 } 6371 } 6372 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6373 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6374 6375 if (cmd->cmd_ulp_pkt) { 6376 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6377 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6378 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6379 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6380 if (pd != NULL) { 6381 FP_TRACE(FP_NHEAD1(9, 0), 6382 "fp_plogi_intr;" 6383 "ulp_pkt's pd is NULL, get a pd %p", 6384 pd); 6385 mutex_enter(&pd->pd_mutex); 6386 pd->pd_ref_count++; 6387 mutex_exit(&pd->pd_mutex); 6388 } 6389 cmd->cmd_ulp_pkt->pkt_pd = pd; 6390 } 6391 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6392 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6393 sizeof (fc_frame_hdr_t)); 6394 bcopy((caddr_t)pkt->pkt_resp, 6395 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6396 sizeof (la_els_logi_t)); 6397 } 6398 6399 mutex_enter(&port->fp_mutex); 6400 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6401 mutex_enter(&pd->pd_mutex); 6402 6403 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6404 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6405 cmd->cmd_retry_count = fp_retry_count; 6406 6407 /* 6408 * If the fc_remote_port_t pointer is not set in the given 6409 * fc_packet_t, then this fc_remote_port_t must have just 6410 * been created. Save the pointer and also increment the 6411 * fc_remote_port_t reference count. 6412 */ 6413 if (pkt->pkt_pd == NULL) { 6414 pkt->pkt_pd = pd; 6415 pd->pd_ref_count++; /* It's in use! */ 6416 } 6417 6418 fp_adisc_init(cmd, cmd->cmd_job); 6419 6420 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6421 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6422 6423 mutex_exit(&pd->pd_mutex); 6424 mutex_exit(&port->fp_mutex); 6425 6426 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6427 return; 6428 } 6429 } else { 6430 mutex_exit(&port->fp_mutex); 6431 } 6432 6433 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6434 mutex_enter(&port->fp_mutex); 6435 mutex_enter(&pd->pd_mutex); 6436 6437 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6438 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6439 cmd->cmd_retry_count = fp_retry_count; 6440 6441 fp_logo_init(pd, cmd, cmd->cmd_job); 6442 6443 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6444 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6445 6446 mutex_exit(&pd->pd_mutex); 6447 mutex_exit(&port->fp_mutex); 6448 6449 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6450 return; 6451 } 6452 6453 } 6454 fp_iodone(cmd); 6455 } 6456 6457 6458 /* 6459 * Handle solicited ADISC response 6460 */ 6461 static void 6462 fp_adisc_intr(fc_packet_t *pkt) 6463 { 6464 int rval; 6465 int bailout; 6466 fp_cmd_t *cmd; 6467 fc_local_port_t *port; 6468 fc_remote_port_t *pd; 6469 la_els_adisc_t *acc; 6470 ls_code_t resp; 6471 fc_hardaddr_t ha; 6472 fc_portmap_t *changelist; 6473 int initiator, adiscfail = 0; 6474 6475 pd = pkt->pkt_pd; 6476 cmd = pkt->pkt_ulp_private; 6477 port = cmd->cmd_port; 6478 6479 #ifndef __lock_lint 6480 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6481 #endif 6482 6483 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6484 6485 mutex_enter(&port->fp_mutex); 6486 port->fp_out_fpcmds--; 6487 bailout = ((port->fp_statec_busy || 6488 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6489 cmd->cmd_ulp_pkt) ? 1 : 0; 6490 mutex_exit(&port->fp_mutex); 6491 6492 if (bailout) { 6493 fp_iodone(cmd); 6494 return; 6495 } 6496 6497 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6498 acc = (la_els_adisc_t *)pkt->pkt_resp; 6499 6500 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6501 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6502 6503 if (resp.ls_code == LA_ELS_ACC) { 6504 int is_private; 6505 6506 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&ha, 6507 (uint8_t *)&acc->hard_addr, sizeof (ha), 6508 DDI_DEV_AUTOINCR); 6509 6510 mutex_enter(&port->fp_mutex); 6511 6512 is_private = 6513 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6514 6515 mutex_enter(&pd->pd_mutex); 6516 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6517 fctl_enlist_did_table(port, pd); 6518 } 6519 mutex_exit(&pd->pd_mutex); 6520 6521 mutex_exit(&port->fp_mutex); 6522 6523 mutex_enter(&pd->pd_mutex); 6524 if (pd->pd_type != PORT_DEVICE_NEW) { 6525 if (is_private && (pd->pd_hard_addr.hard_addr != 6526 ha.hard_addr)) { 6527 pd->pd_type = PORT_DEVICE_CHANGED; 6528 } else { 6529 pd->pd_type = PORT_DEVICE_NOCHANGE; 6530 } 6531 } 6532 6533 if (is_private && (ha.hard_addr && 6534 pd->pd_port_id.port_id != ha.hard_addr)) { 6535 char ww_name[17]; 6536 6537 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6538 6539 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6540 "NL_Port Identifier %x doesn't match" 6541 " with Hard Address %x, Will use Port" 6542 " WWN %s", pd->pd_port_id.port_id, 6543 ha.hard_addr, ww_name); 6544 6545 pd->pd_hard_addr.hard_addr = 0; 6546 } else { 6547 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6548 } 6549 mutex_exit(&pd->pd_mutex); 6550 } else { 6551 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6552 return; 6553 } 6554 } 6555 } else { 6556 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6557 return; 6558 } 6559 6560 mutex_enter(&port->fp_mutex); 6561 if (port->fp_statec_busy <= 1) { 6562 mutex_exit(&port->fp_mutex); 6563 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6564 "ADISC to %x failed, cmd_flags=%x", 6565 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6566 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6567 adiscfail = 1; 6568 } else { 6569 mutex_exit(&port->fp_mutex); 6570 } 6571 } 6572 6573 if (cmd->cmd_ulp_pkt) { 6574 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6575 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6576 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6577 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6578 cmd->cmd_ulp_pkt->pkt_pd = pd; 6579 FP_TRACE(FP_NHEAD1(9, 0), 6580 "fp_adisc__intr;" 6581 "ulp_pkt's pd is NULL, get a pd %p", 6582 pd); 6583 6584 } 6585 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6586 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6587 sizeof (fc_frame_hdr_t)); 6588 bcopy((caddr_t)pkt->pkt_resp, 6589 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6590 sizeof (la_els_logi_t)); 6591 } 6592 6593 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6594 FP_TRACE(FP_NHEAD1(9, 0), 6595 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6596 "fp_retry_count=%x, ulp_pkt=%p", 6597 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6598 6599 mutex_enter(&port->fp_mutex); 6600 mutex_enter(&pd->pd_mutex); 6601 6602 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6603 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6604 cmd->cmd_retry_count = fp_retry_count; 6605 6606 fp_logo_init(pd, cmd, cmd->cmd_job); 6607 6608 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6609 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6610 6611 mutex_exit(&pd->pd_mutex); 6612 mutex_exit(&port->fp_mutex); 6613 6614 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6615 if (adiscfail) { 6616 mutex_enter(&pd->pd_mutex); 6617 initiator = 6618 (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 6619 pd->pd_state = PORT_DEVICE_VALID; 6620 pd->pd_aux_flags |= PD_LOGGED_OUT; 6621 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6622 pd->pd_type = PORT_DEVICE_NEW; 6623 } else { 6624 pd->pd_type = PORT_DEVICE_NOCHANGE; 6625 } 6626 mutex_exit(&pd->pd_mutex); 6627 6628 changelist = 6629 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6630 6631 if (initiator) { 6632 fp_unregister_login(pd); 6633 fctl_copy_portmap(changelist, pd); 6634 } else { 6635 fp_fillout_old_map(changelist, pd, 0); 6636 } 6637 6638 FP_TRACE(FP_NHEAD1(9, 0), 6639 "fp_adisc_intr: Dev change notification " 6640 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6641 "map_flags=%x initiator=%d", port, pd, 6642 changelist->map_type, changelist->map_state, 6643 changelist->map_flags, initiator); 6644 6645 (void) fp_ulp_devc_cb(port, changelist, 6646 1, 1, KM_SLEEP, 0); 6647 } 6648 if (rval == FC_SUCCESS) { 6649 return; 6650 } 6651 } 6652 fp_iodone(cmd); 6653 } 6654 6655 6656 /* 6657 * Handle solicited LOGO response 6658 */ 6659 static void 6660 fp_logo_intr(fc_packet_t *pkt) 6661 { 6662 ls_code_t resp; 6663 6664 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6665 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6666 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6667 6668 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6669 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6670 6671 if (FP_IS_PKT_ERROR(pkt)) { 6672 (void) fp_common_intr(pkt, 1); 6673 return; 6674 } 6675 6676 ASSERT(resp.ls_code == LA_ELS_ACC); 6677 if (resp.ls_code != LA_ELS_ACC) { 6678 (void) fp_common_intr(pkt, 1); 6679 return; 6680 } 6681 6682 if (pkt->pkt_pd != NULL) { 6683 fp_unregister_login(pkt->pkt_pd); 6684 } 6685 6686 fp_iodone(pkt->pkt_ulp_private); 6687 } 6688 6689 6690 /* 6691 * Handle solicited RNID response 6692 */ 6693 static void 6694 fp_rnid_intr(fc_packet_t *pkt) 6695 { 6696 ls_code_t resp; 6697 job_request_t *job; 6698 fp_cmd_t *cmd; 6699 la_els_rnid_acc_t *acc; 6700 6701 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6702 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6703 cmd = pkt->pkt_ulp_private; 6704 6705 mutex_enter(&cmd->cmd_port->fp_mutex); 6706 cmd->cmd_port->fp_out_fpcmds--; 6707 mutex_exit(&cmd->cmd_port->fp_mutex); 6708 6709 job = cmd->cmd_job; 6710 ASSERT(job->job_private != NULL); 6711 6712 /* If failure or LS_RJT then retry the packet, if needed */ 6713 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6714 (void) fp_common_intr(pkt, 1); 6715 return; 6716 } 6717 6718 /* Save node_id memory allocated in ioctl code */ 6719 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6720 6721 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6722 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6723 6724 /* wakeup the ioctl thread and free the pkt */ 6725 fp_iodone(cmd); 6726 } 6727 6728 6729 /* 6730 * Handle solicited RLS response 6731 */ 6732 static void 6733 fp_rls_intr(fc_packet_t *pkt) 6734 { 6735 ls_code_t resp; 6736 job_request_t *job; 6737 fp_cmd_t *cmd; 6738 la_els_rls_acc_t *acc; 6739 6740 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6741 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6742 cmd = pkt->pkt_ulp_private; 6743 6744 mutex_enter(&cmd->cmd_port->fp_mutex); 6745 cmd->cmd_port->fp_out_fpcmds--; 6746 mutex_exit(&cmd->cmd_port->fp_mutex); 6747 6748 job = cmd->cmd_job; 6749 ASSERT(job->job_private != NULL); 6750 6751 /* If failure or LS_RJT then retry the packet, if needed */ 6752 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6753 (void) fp_common_intr(pkt, 1); 6754 return; 6755 } 6756 6757 /* Save link error status block in memory allocated in ioctl code */ 6758 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6759 6760 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6761 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6762 DDI_DEV_AUTOINCR); 6763 6764 /* wakeup the ioctl thread and free the pkt */ 6765 fp_iodone(cmd); 6766 } 6767 6768 6769 /* 6770 * A solicited command completion interrupt (mostly for commands 6771 * that require almost no post processing such as SCR ELS) 6772 */ 6773 static void 6774 fp_intr(fc_packet_t *pkt) 6775 { 6776 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6777 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6778 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6779 6780 if (FP_IS_PKT_ERROR(pkt)) { 6781 (void) fp_common_intr(pkt, 1); 6782 return; 6783 } 6784 fp_iodone(pkt->pkt_ulp_private); 6785 } 6786 6787 6788 /* 6789 * Handle the underlying port's state change 6790 */ 6791 static void 6792 fp_statec_cb(opaque_t port_handle, uint32_t state) 6793 { 6794 fc_local_port_t *port = port_handle; 6795 job_request_t *job; 6796 6797 /* 6798 * If it is not possible to process the callbacks 6799 * just drop the callback on the floor; Don't bother 6800 * to do something that isn't safe at this time 6801 */ 6802 mutex_enter(&port->fp_mutex); 6803 if ((port->fp_soft_state & 6804 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6805 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6806 mutex_exit(&port->fp_mutex); 6807 return; 6808 } 6809 6810 if (port->fp_statec_busy == 0) { 6811 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6812 #ifdef DEBUG 6813 } else { 6814 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6815 #endif 6816 } 6817 6818 port->fp_statec_busy++; 6819 6820 /* 6821 * For now, force the trusted method of device authentication (by 6822 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6823 */ 6824 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6825 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6826 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6827 fp_port_offline(port, 0); 6828 } 6829 mutex_exit(&port->fp_mutex); 6830 6831 switch (FC_PORT_STATE_MASK(state)) { 6832 case FC_STATE_OFFLINE: 6833 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6834 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6835 if (job == NULL) { 6836 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6837 " fp_statec_cb() couldn't submit a job " 6838 " to the thread: failing.."); 6839 mutex_enter(&port->fp_mutex); 6840 if (--port->fp_statec_busy == 0) { 6841 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6842 } 6843 mutex_exit(&port->fp_mutex); 6844 return; 6845 } 6846 mutex_enter(&port->fp_mutex); 6847 /* 6848 * Zero out this field so that we do not retain 6849 * the fabric name as its no longer valid 6850 */ 6851 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6852 port->fp_state = state; 6853 mutex_exit(&port->fp_mutex); 6854 6855 fctl_enque_job(port, job); 6856 break; 6857 6858 case FC_STATE_ONLINE: 6859 case FC_STATE_LOOP: 6860 mutex_enter(&port->fp_mutex); 6861 port->fp_state = state; 6862 6863 if (port->fp_offline_tid) { 6864 timeout_id_t tid; 6865 6866 tid = port->fp_offline_tid; 6867 port->fp_offline_tid = NULL; 6868 mutex_exit(&port->fp_mutex); 6869 (void) untimeout(tid); 6870 } else { 6871 mutex_exit(&port->fp_mutex); 6872 } 6873 6874 job = fctl_alloc_job(JOB_PORT_ONLINE, 6875 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6876 if (job == NULL) { 6877 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6878 "fp_statec_cb() couldn't submit a job " 6879 "to the thread: failing.."); 6880 6881 mutex_enter(&port->fp_mutex); 6882 if (--port->fp_statec_busy == 0) { 6883 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6884 } 6885 mutex_exit(&port->fp_mutex); 6886 return; 6887 } 6888 fctl_enque_job(port, job); 6889 break; 6890 6891 case FC_STATE_RESET_REQUESTED: 6892 mutex_enter(&port->fp_mutex); 6893 port->fp_state = FC_STATE_OFFLINE; 6894 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 6895 mutex_exit(&port->fp_mutex); 6896 /* FALLTHROUGH */ 6897 6898 case FC_STATE_RESET: 6899 job = fctl_alloc_job(JOB_ULP_NOTIFY, 6900 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6901 if (job == NULL) { 6902 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6903 "fp_statec_cb() couldn't submit a job" 6904 " to the thread: failing.."); 6905 6906 mutex_enter(&port->fp_mutex); 6907 if (--port->fp_statec_busy == 0) { 6908 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6909 } 6910 mutex_exit(&port->fp_mutex); 6911 return; 6912 } 6913 6914 /* squeeze into some field in the job structure */ 6915 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 6916 fctl_enque_job(port, job); 6917 break; 6918 6919 case FC_STATE_TARGET_PORT_RESET: 6920 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 6921 /* FALLTHROUGH */ 6922 6923 case FC_STATE_NAMESERVICE: 6924 /* FALLTHROUGH */ 6925 6926 default: 6927 mutex_enter(&port->fp_mutex); 6928 if (--port->fp_statec_busy == 0) { 6929 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6930 } 6931 mutex_exit(&port->fp_mutex); 6932 break; 6933 } 6934 } 6935 6936 6937 /* 6938 * Register with the Name Server for RSCNs 6939 */ 6940 static int 6941 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 6942 int sleep) 6943 { 6944 uint32_t s_id; 6945 uchar_t class; 6946 fc_scr_req_t payload; 6947 fp_cmd_t *cmd; 6948 fc_packet_t *pkt; 6949 6950 mutex_enter(&port->fp_mutex); 6951 s_id = port->fp_port_id.port_id; 6952 class = port->fp_ns_login_class; 6953 mutex_exit(&port->fp_mutex); 6954 6955 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 6956 sizeof (fc_scr_resp_t), sleep, NULL); 6957 if (cmd == NULL) { 6958 return (FC_NOMEM); 6959 } 6960 6961 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 6962 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6963 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 6964 cmd->cmd_retry_count = fp_retry_count; 6965 cmd->cmd_ulp_pkt = NULL; 6966 6967 pkt = &cmd->cmd_pkt; 6968 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 6969 6970 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 6971 6972 payload.ls_code.ls_code = LA_ELS_SCR; 6973 payload.ls_code.mbz = 0; 6974 payload.scr_rsvd = 0; 6975 payload.scr_func = scr_func; 6976 6977 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 6978 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 6979 6980 job->job_counter = 1; 6981 6982 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 6983 fp_iodone(cmd); 6984 } 6985 6986 return (FC_SUCCESS); 6987 } 6988 6989 6990 /* 6991 * There are basically two methods to determine the total number of 6992 * devices out in the NS database; Reading the details of the two 6993 * methods described below, it shouldn't be hard to identify which 6994 * of the two methods is better. 6995 * 6996 * Method 1. 6997 * Iteratively issue GANs until all ports identifiers are walked 6998 * 6999 * Method 2. 7000 * Issue GID_PT (get port Identifiers) with Maximum residual 7001 * field in the request CT HEADER set to accommodate only the 7002 * CT HEADER in the response frame. And if FC-GS2 has been 7003 * carefully read, the NS here has a chance to FS_ACC the 7004 * request and indicate the residual size in the FS_ACC. 7005 * 7006 * Method 2 is wonderful, although it's not mandatory for the NS 7007 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 7008 * (note with particular care the use of the auxiliary verb 'may') 7009 * 7010 */ 7011 static int 7012 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 7013 int sleep) 7014 { 7015 int flags; 7016 int rval; 7017 uint32_t src_id; 7018 fctl_ns_req_t *ns_cmd; 7019 7020 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 7021 7022 mutex_enter(&port->fp_mutex); 7023 src_id = port->fp_port_id.port_id; 7024 mutex_exit(&port->fp_mutex); 7025 7026 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7027 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 7028 sizeof (ns_resp_gid_pt_t), 0, 7029 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 7030 7031 if (ns_cmd == NULL) { 7032 return (FC_NOMEM); 7033 } 7034 7035 ns_cmd->ns_cmd_code = NS_GID_PT; 7036 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 7037 = FC_NS_PORT_NX; /* All port types */ 7038 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 7039 7040 } else { 7041 uint32_t ns_flags; 7042 7043 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 7044 if (create) { 7045 ns_flags |= FCTL_NS_CREATE_DEVICE; 7046 } 7047 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 7048 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 7049 7050 if (ns_cmd == NULL) { 7051 return (FC_NOMEM); 7052 } 7053 ns_cmd->ns_gan_index = 0; 7054 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 7055 ns_cmd->ns_cmd_code = NS_GA_NXT; 7056 ns_cmd->ns_gan_max = 0xFFFF; 7057 7058 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 7059 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 7060 } 7061 7062 flags = job->job_flags; 7063 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 7064 job->job_counter = 1; 7065 7066 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 7067 job->job_flags = flags; 7068 7069 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7070 uint16_t max_resid; 7071 7072 /* 7073 * Revert to scanning the NS if NS_GID_PT isn't 7074 * helping us figure out total number of devices. 7075 */ 7076 if (job->job_result != FC_SUCCESS || 7077 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 7078 mutex_enter(&port->fp_mutex); 7079 port->fp_options &= ~FP_NS_SMART_COUNT; 7080 mutex_exit(&port->fp_mutex); 7081 7082 fctl_free_ns_cmd(ns_cmd); 7083 return (fp_ns_get_devcount(port, job, create, sleep)); 7084 } 7085 7086 mutex_enter(&port->fp_mutex); 7087 port->fp_total_devices = 1; 7088 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 7089 if (max_resid) { 7090 /* 7091 * Since port identifier is 4 bytes and max_resid 7092 * is also in WORDS, max_resid simply indicates 7093 * the total number of port identifiers not 7094 * transferred 7095 */ 7096 port->fp_total_devices += max_resid; 7097 } 7098 mutex_exit(&port->fp_mutex); 7099 } 7100 mutex_enter(&port->fp_mutex); 7101 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 7102 mutex_exit(&port->fp_mutex); 7103 fctl_free_ns_cmd(ns_cmd); 7104 7105 return (rval); 7106 } 7107 7108 /* 7109 * One heck of a function to serve userland. 7110 */ 7111 static int 7112 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7113 { 7114 int rval = 0; 7115 int jcode; 7116 uint32_t ret; 7117 uchar_t open_flag; 7118 fcio_t *kfcio; 7119 job_request_t *job; 7120 boolean_t use32 = B_FALSE; 7121 7122 #ifdef _MULTI_DATAMODEL 7123 switch (ddi_model_convert_from(mode & FMODELS)) { 7124 case DDI_MODEL_ILP32: 7125 use32 = B_TRUE; 7126 break; 7127 7128 case DDI_MODEL_NONE: 7129 default: 7130 break; 7131 } 7132 #endif 7133 7134 mutex_enter(&port->fp_mutex); 7135 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7136 FP_SOFT_IN_UNSOL_CB)) { 7137 fcio->fcio_errno = FC_STATEC_BUSY; 7138 mutex_exit(&port->fp_mutex); 7139 rval = EAGAIN; 7140 if (fp_fcio_copyout(fcio, data, mode)) { 7141 rval = EFAULT; 7142 } 7143 return (rval); 7144 } 7145 open_flag = port->fp_flag; 7146 mutex_exit(&port->fp_mutex); 7147 7148 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7149 fcio->fcio_errno = FC_FAILURE; 7150 rval = EACCES; 7151 if (fp_fcio_copyout(fcio, data, mode)) { 7152 rval = EFAULT; 7153 } 7154 return (rval); 7155 } 7156 7157 /* 7158 * If an exclusive open was demanded during open, don't let 7159 * either innocuous or devil threads to share the file 7160 * descriptor and fire down exclusive access commands 7161 */ 7162 mutex_enter(&port->fp_mutex); 7163 if (port->fp_flag & FP_EXCL) { 7164 if (port->fp_flag & FP_EXCL_BUSY) { 7165 mutex_exit(&port->fp_mutex); 7166 fcio->fcio_errno = FC_FAILURE; 7167 return (EBUSY); 7168 } 7169 port->fp_flag |= FP_EXCL_BUSY; 7170 } 7171 mutex_exit(&port->fp_mutex); 7172 7173 switch (fcio->fcio_cmd) { 7174 case FCIO_GET_HOST_PARAMS: { 7175 fc_port_dev_t *val; 7176 fc_port_dev32_t *val32; 7177 int index; 7178 int lilp_device_count; 7179 fc_lilpmap_t *lilp_map; 7180 uchar_t *alpa_list; 7181 7182 if (use32 == B_TRUE) { 7183 if (fcio->fcio_olen != sizeof (*val32) || 7184 fcio->fcio_xfer != FCIO_XFER_READ) { 7185 rval = EINVAL; 7186 break; 7187 } 7188 } else { 7189 if (fcio->fcio_olen != sizeof (*val) || 7190 fcio->fcio_xfer != FCIO_XFER_READ) { 7191 rval = EINVAL; 7192 break; 7193 } 7194 } 7195 7196 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7197 7198 mutex_enter(&port->fp_mutex); 7199 val->dev_did = port->fp_port_id; 7200 val->dev_hard_addr = port->fp_hard_addr; 7201 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7202 val->dev_nwwn = port->fp_service_params.node_ww_name; 7203 val->dev_state = port->fp_state; 7204 7205 lilp_map = &port->fp_lilp_map; 7206 alpa_list = &lilp_map->lilp_alpalist[0]; 7207 lilp_device_count = lilp_map->lilp_length; 7208 for (index = 0; index < lilp_device_count; index++) { 7209 uint32_t d_id; 7210 7211 d_id = alpa_list[index]; 7212 if (d_id == port->fp_port_id.port_id) { 7213 break; 7214 } 7215 } 7216 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7217 7218 bcopy(port->fp_fc4_types, val->dev_type, 7219 sizeof (port->fp_fc4_types)); 7220 mutex_exit(&port->fp_mutex); 7221 7222 if (use32 == B_TRUE) { 7223 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7224 7225 val32->dev_did = val->dev_did; 7226 val32->dev_hard_addr = val->dev_hard_addr; 7227 val32->dev_pwwn = val->dev_pwwn; 7228 val32->dev_nwwn = val->dev_nwwn; 7229 val32->dev_state = val->dev_state; 7230 val32->dev_did.priv_lilp_posit = 7231 val->dev_did.priv_lilp_posit; 7232 7233 bcopy(val->dev_type, val32->dev_type, 7234 sizeof (port->fp_fc4_types)); 7235 7236 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7237 fcio->fcio_olen, mode) == 0) { 7238 if (fp_fcio_copyout(fcio, data, mode)) { 7239 rval = EFAULT; 7240 } 7241 } else { 7242 rval = EFAULT; 7243 } 7244 7245 kmem_free(val32, sizeof (*val32)); 7246 } else { 7247 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7248 fcio->fcio_olen, mode) == 0) { 7249 if (fp_fcio_copyout(fcio, data, mode)) { 7250 rval = EFAULT; 7251 } 7252 } else { 7253 rval = EFAULT; 7254 } 7255 } 7256 7257 /* need to free "val" here */ 7258 kmem_free(val, sizeof (*val)); 7259 break; 7260 } 7261 7262 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7263 uint32_t index; 7264 char *tmpPath; 7265 fc_local_port_t *tmpPort; 7266 7267 if (fcio->fcio_olen < MAXPATHLEN || 7268 fcio->fcio_ilen != sizeof (uint32_t)) { 7269 rval = EINVAL; 7270 break; 7271 } 7272 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7273 rval = EFAULT; 7274 break; 7275 } 7276 7277 tmpPort = fctl_get_adapter_port_by_index(port, index); 7278 if (tmpPort == NULL) { 7279 FP_TRACE(FP_NHEAD1(9, 0), 7280 "User supplied index out of range"); 7281 fcio->fcio_errno = FC_BADPORT; 7282 rval = EFAULT; 7283 if (fp_fcio_copyout(fcio, data, mode)) { 7284 rval = EFAULT; 7285 } 7286 break; 7287 } 7288 7289 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7290 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7291 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7292 MAXPATHLEN, mode) == 0) { 7293 if (fp_fcio_copyout(fcio, data, mode)) { 7294 rval = EFAULT; 7295 } 7296 } else { 7297 rval = EFAULT; 7298 } 7299 kmem_free(tmpPath, MAXPATHLEN); 7300 break; 7301 } 7302 7303 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7304 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7305 fc_hba_adapter_attributes_t *val; 7306 fc_hba_adapter_attributes32_t *val32; 7307 7308 if (use32 == B_TRUE) { 7309 if (fcio->fcio_olen < sizeof (*val32) || 7310 fcio->fcio_xfer != FCIO_XFER_READ) { 7311 rval = EINVAL; 7312 break; 7313 } 7314 } else { 7315 if (fcio->fcio_olen < sizeof (*val) || 7316 fcio->fcio_xfer != FCIO_XFER_READ) { 7317 rval = EINVAL; 7318 break; 7319 } 7320 } 7321 7322 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7323 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7324 mutex_enter(&port->fp_mutex); 7325 bcopy(port->fp_hba_port_attrs.manufacturer, 7326 val->Manufacturer, 7327 sizeof (val->Manufacturer)); 7328 bcopy(port->fp_hba_port_attrs.serial_number, 7329 val->SerialNumber, 7330 sizeof (val->SerialNumber)); 7331 bcopy(port->fp_hba_port_attrs.model, 7332 val->Model, 7333 sizeof (val->Model)); 7334 bcopy(port->fp_hba_port_attrs.model_description, 7335 val->ModelDescription, 7336 sizeof (val->ModelDescription)); 7337 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7338 sizeof (val->NodeSymbolicName)); 7339 bcopy(port->fp_hba_port_attrs.hardware_version, 7340 val->HardwareVersion, 7341 sizeof (val->HardwareVersion)); 7342 bcopy(port->fp_hba_port_attrs.option_rom_version, 7343 val->OptionROMVersion, 7344 sizeof (val->OptionROMVersion)); 7345 bcopy(port->fp_hba_port_attrs.firmware_version, 7346 val->FirmwareVersion, 7347 sizeof (val->FirmwareVersion)); 7348 val->VendorSpecificID = 7349 port->fp_hba_port_attrs.vendor_specific_id; 7350 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7351 &val->NodeWWN.raw_wwn, 7352 sizeof (val->NodeWWN.raw_wwn)); 7353 7354 7355 bcopy(port->fp_hba_port_attrs.driver_name, 7356 val->DriverName, 7357 sizeof (val->DriverName)); 7358 bcopy(port->fp_hba_port_attrs.driver_version, 7359 val->DriverVersion, 7360 sizeof (val->DriverVersion)); 7361 mutex_exit(&port->fp_mutex); 7362 7363 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7364 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7365 } else { 7366 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7367 } 7368 7369 if (use32 == B_TRUE) { 7370 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7371 val32->version = val->version; 7372 bcopy(val->Manufacturer, val32->Manufacturer, 7373 sizeof (val->Manufacturer)); 7374 bcopy(val->SerialNumber, val32->SerialNumber, 7375 sizeof (val->SerialNumber)); 7376 bcopy(val->Model, val32->Model, 7377 sizeof (val->Model)); 7378 bcopy(val->ModelDescription, val32->ModelDescription, 7379 sizeof (val->ModelDescription)); 7380 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7381 sizeof (val->NodeSymbolicName)); 7382 bcopy(val->HardwareVersion, val32->HardwareVersion, 7383 sizeof (val->HardwareVersion)); 7384 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7385 sizeof (val->OptionROMVersion)); 7386 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7387 sizeof (val->FirmwareVersion)); 7388 val32->VendorSpecificID = val->VendorSpecificID; 7389 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7390 sizeof (val->NodeWWN.raw_wwn)); 7391 bcopy(val->DriverName, val32->DriverName, 7392 sizeof (val->DriverName)); 7393 bcopy(val->DriverVersion, val32->DriverVersion, 7394 sizeof (val->DriverVersion)); 7395 7396 val32->NumberOfPorts = val->NumberOfPorts; 7397 7398 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7399 fcio->fcio_olen, mode) == 0) { 7400 if (fp_fcio_copyout(fcio, data, mode)) { 7401 rval = EFAULT; 7402 } 7403 } else { 7404 rval = EFAULT; 7405 } 7406 7407 kmem_free(val32, sizeof (*val32)); 7408 } else { 7409 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7410 fcio->fcio_olen, mode) == 0) { 7411 if (fp_fcio_copyout(fcio, data, mode)) { 7412 rval = EFAULT; 7413 } 7414 } else { 7415 rval = EFAULT; 7416 } 7417 } 7418 7419 kmem_free(val, sizeof (*val)); 7420 break; 7421 } 7422 7423 case FCIO_GET_NPIV_ATTRIBUTES: { 7424 fc_hba_npiv_attributes_t *attrs; 7425 7426 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7427 mutex_enter(&port->fp_mutex); 7428 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7429 &attrs->NodeWWN.raw_wwn, 7430 sizeof (attrs->NodeWWN.raw_wwn)); 7431 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7432 &attrs->PortWWN.raw_wwn, 7433 sizeof (attrs->PortWWN.raw_wwn)); 7434 mutex_exit(&port->fp_mutex); 7435 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7436 fcio->fcio_olen, mode) == 0) { 7437 if (fp_fcio_copyout(fcio, data, mode)) { 7438 rval = EFAULT; 7439 } 7440 } else { 7441 rval = EFAULT; 7442 } 7443 kmem_free(attrs, sizeof (*attrs)); 7444 break; 7445 } 7446 7447 case FCIO_DELETE_NPIV_PORT: { 7448 fc_local_port_t *tmpport; 7449 char ww_pname[17]; 7450 la_wwn_t vwwn[1]; 7451 7452 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7453 if (ddi_copyin(fcio->fcio_ibuf, 7454 &vwwn, sizeof (la_wwn_t), mode)) { 7455 rval = EFAULT; 7456 break; 7457 } 7458 7459 fc_wwn_to_str(&vwwn[0], ww_pname); 7460 FP_TRACE(FP_NHEAD1(3, 0), 7461 "Delete NPIV Port %s", ww_pname); 7462 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7463 if (tmpport == NULL) { 7464 FP_TRACE(FP_NHEAD1(3, 0), 7465 "Delete NPIV Port : no found"); 7466 rval = EFAULT; 7467 } else { 7468 fc_local_port_t *nextport = tmpport->fp_port_next; 7469 fc_local_port_t *prevport = tmpport->fp_port_prev; 7470 int portlen, portindex, ret; 7471 7472 portlen = sizeof (portindex); 7473 ret = ddi_prop_op(DDI_DEV_T_ANY, 7474 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7475 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7476 (caddr_t)&portindex, &portlen); 7477 if (ret != DDI_SUCCESS) { 7478 rval = EFAULT; 7479 break; 7480 } 7481 if (ndi_devi_offline(tmpport->fp_port_dip, 7482 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7483 FP_TRACE(FP_NHEAD1(1, 0), 7484 "Delete NPIV Port failed"); 7485 mutex_enter(&port->fp_mutex); 7486 tmpport->fp_npiv_state = 0; 7487 mutex_exit(&port->fp_mutex); 7488 rval = EFAULT; 7489 } else { 7490 mutex_enter(&port->fp_mutex); 7491 nextport->fp_port_prev = prevport; 7492 prevport->fp_port_next = nextport; 7493 if (port == port->fp_port_next) { 7494 port->fp_port_next = 7495 port->fp_port_prev = NULL; 7496 } 7497 port->fp_npiv_portnum--; 7498 FP_TRACE(FP_NHEAD1(3, 0), 7499 "Delete NPIV Port %d", portindex); 7500 port->fp_npiv_portindex[portindex-1] = 0; 7501 mutex_exit(&port->fp_mutex); 7502 } 7503 } 7504 break; 7505 } 7506 7507 case FCIO_CREATE_NPIV_PORT: { 7508 char ww_nname[17], ww_pname[17]; 7509 la_npiv_create_entry_t entrybuf; 7510 uint32_t vportindex = 0; 7511 int npiv_ret = 0; 7512 char *portname, *fcaname; 7513 7514 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7515 (void) ddi_pathname(port->fp_port_dip, portname); 7516 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7517 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7518 FP_TRACE(FP_NHEAD1(1, 0), 7519 "Create NPIV port %s %s %s", portname, fcaname, 7520 ddi_driver_name(port->fp_fca_dip)); 7521 kmem_free(portname, MAXPATHLEN); 7522 kmem_free(fcaname, MAXPATHLEN); 7523 if (ddi_copyin(fcio->fcio_ibuf, 7524 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7525 rval = EFAULT; 7526 break; 7527 } 7528 7529 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7530 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7531 vportindex = entrybuf.vindex; 7532 FP_TRACE(FP_NHEAD1(3, 0), 7533 "Create NPIV Port %s %s %d", 7534 ww_nname, ww_pname, vportindex); 7535 7536 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7537 rval = EFAULT; 7538 break; 7539 } 7540 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7541 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7542 if (npiv_ret == NDI_SUCCESS) { 7543 mutex_enter(&port->fp_mutex); 7544 port->fp_npiv_portnum++; 7545 mutex_exit(&port->fp_mutex); 7546 if (fp_copyout((void *)&vportindex, 7547 (void *)fcio->fcio_obuf, 7548 fcio->fcio_olen, mode) == 0) { 7549 if (fp_fcio_copyout(fcio, data, mode)) { 7550 rval = EFAULT; 7551 } 7552 } else { 7553 rval = EFAULT; 7554 } 7555 } else { 7556 rval = EFAULT; 7557 } 7558 FP_TRACE(FP_NHEAD1(3, 0), 7559 "Create NPIV Port %d %d", npiv_ret, vportindex); 7560 break; 7561 } 7562 7563 case FCIO_GET_NPIV_PORT_LIST: { 7564 fc_hba_npiv_port_list_t *list; 7565 int count; 7566 7567 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7568 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7569 rval = EINVAL; 7570 break; 7571 } 7572 7573 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7574 list->version = FC_HBA_LIST_VERSION; 7575 /* build npiv port list */ 7576 count = fc_ulp_get_npiv_port_list(port, (char *)list->hbaPaths); 7577 if (count < 0) { 7578 rval = ENXIO; 7579 FP_TRACE(FP_NHEAD1(1, 0), "Build NPIV Port List error"); 7580 kmem_free(list, fcio->fcio_olen); 7581 break; 7582 } 7583 list->numAdapters = count; 7584 7585 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7586 fcio->fcio_olen, mode) == 0) { 7587 if (fp_fcio_copyout(fcio, data, mode)) { 7588 FP_TRACE(FP_NHEAD1(1, 0), 7589 "Copy NPIV Port data error"); 7590 rval = EFAULT; 7591 } 7592 } else { 7593 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7594 rval = EFAULT; 7595 } 7596 kmem_free(list, fcio->fcio_olen); 7597 break; 7598 } 7599 7600 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7601 fc_hba_port_npiv_attributes_t *val; 7602 7603 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7604 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7605 7606 mutex_enter(&port->fp_mutex); 7607 val->npivflag = port->fp_npiv_flag; 7608 val->lastChange = port->fp_last_change; 7609 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7610 &val->PortWWN.raw_wwn, 7611 sizeof (val->PortWWN.raw_wwn)); 7612 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7613 &val->NodeWWN.raw_wwn, 7614 sizeof (val->NodeWWN.raw_wwn)); 7615 mutex_exit(&port->fp_mutex); 7616 7617 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7618 if (port->fp_npiv_type != FC_NPIV_PORT) { 7619 val->MaxNumberOfNPIVPorts = 7620 port->fp_fca_tran->fca_num_npivports; 7621 } else { 7622 val->MaxNumberOfNPIVPorts = 0; 7623 } 7624 7625 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7626 fcio->fcio_olen, mode) == 0) { 7627 if (fp_fcio_copyout(fcio, data, mode)) { 7628 rval = EFAULT; 7629 } 7630 } else { 7631 rval = EFAULT; 7632 } 7633 kmem_free(val, sizeof (*val)); 7634 break; 7635 } 7636 7637 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7638 fc_hba_port_attributes_t *val; 7639 fc_hba_port_attributes32_t *val32; 7640 7641 if (use32 == B_TRUE) { 7642 if (fcio->fcio_olen < sizeof (*val32) || 7643 fcio->fcio_xfer != FCIO_XFER_READ) { 7644 rval = EINVAL; 7645 break; 7646 } 7647 } else { 7648 if (fcio->fcio_olen < sizeof (*val) || 7649 fcio->fcio_xfer != FCIO_XFER_READ) { 7650 rval = EINVAL; 7651 break; 7652 } 7653 } 7654 7655 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7656 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7657 mutex_enter(&port->fp_mutex); 7658 val->lastChange = port->fp_last_change; 7659 val->fp_minor = port->fp_instance; 7660 7661 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7662 &val->PortWWN.raw_wwn, 7663 sizeof (val->PortWWN.raw_wwn)); 7664 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7665 &val->NodeWWN.raw_wwn, 7666 sizeof (val->NodeWWN.raw_wwn)); 7667 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7668 sizeof (val->FabricName.raw_wwn)); 7669 7670 val->PortFcId = port->fp_port_id.port_id; 7671 7672 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7673 case FC_STATE_OFFLINE: 7674 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7675 break; 7676 case FC_STATE_ONLINE: 7677 case FC_STATE_LOOP: 7678 case FC_STATE_NAMESERVICE: 7679 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7680 break; 7681 default: 7682 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7683 break; 7684 } 7685 7686 /* Translate from LV to FC-HBA port type codes */ 7687 switch (port->fp_port_type.port_type) { 7688 case FC_NS_PORT_N: 7689 val->PortType = FC_HBA_PORTTYPE_NPORT; 7690 break; 7691 case FC_NS_PORT_NL: 7692 /* Actually means loop for us */ 7693 val->PortType = FC_HBA_PORTTYPE_LPORT; 7694 break; 7695 case FC_NS_PORT_F: 7696 val->PortType = FC_HBA_PORTTYPE_FPORT; 7697 break; 7698 case FC_NS_PORT_FL: 7699 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7700 break; 7701 case FC_NS_PORT_E: 7702 val->PortType = FC_HBA_PORTTYPE_EPORT; 7703 break; 7704 default: 7705 val->PortType = FC_HBA_PORTTYPE_OTHER; 7706 break; 7707 } 7708 7709 7710 /* 7711 * If fp has decided that the topology is public loop, 7712 * we will indicate that using the appropriate 7713 * FC HBA API constant. 7714 */ 7715 switch (port->fp_topology) { 7716 case FC_TOP_PUBLIC_LOOP: 7717 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7718 break; 7719 7720 case FC_TOP_PT_PT: 7721 val->PortType = FC_HBA_PORTTYPE_PTP; 7722 break; 7723 7724 case FC_TOP_UNKNOWN: 7725 /* 7726 * This should cover the case where nothing is connected 7727 * to the port. Crystal+ is p'bly an exception here. 7728 * For Crystal+, port 0 will come up as private loop 7729 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7730 * nothing is connected to it. 7731 * Current plan is to let userland handle this. 7732 */ 7733 if (port->fp_bind_state == FC_STATE_OFFLINE) { 7734 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7735 } 7736 break; 7737 7738 default: 7739 /* 7740 * Do Nothing. 7741 * Unused: 7742 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7743 */ 7744 break; 7745 } 7746 7747 val->PortSupportedClassofService = 7748 port->fp_hba_port_attrs.supported_cos; 7749 val->PortSupportedFc4Types[0] = 0; 7750 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7751 sizeof (val->PortActiveFc4Types)); 7752 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7753 sizeof (val->PortSymbolicName)); 7754 val->PortSupportedSpeed = 7755 port->fp_hba_port_attrs.supported_speed; 7756 7757 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7758 case FC_STATE_1GBIT_SPEED: 7759 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7760 break; 7761 case FC_STATE_2GBIT_SPEED: 7762 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7763 break; 7764 case FC_STATE_4GBIT_SPEED: 7765 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7766 break; 7767 case FC_STATE_8GBIT_SPEED: 7768 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7769 break; 7770 case FC_STATE_10GBIT_SPEED: 7771 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7772 break; 7773 case FC_STATE_16GBIT_SPEED: 7774 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7775 break; 7776 default: 7777 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7778 break; 7779 } 7780 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7781 val->NumberofDiscoveredPorts = port->fp_dev_count; 7782 mutex_exit(&port->fp_mutex); 7783 7784 if (use32 == B_TRUE) { 7785 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7786 val32->version = val->version; 7787 val32->lastChange = val->lastChange; 7788 val32->fp_minor = val->fp_minor; 7789 7790 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7791 sizeof (val->PortWWN.raw_wwn)); 7792 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7793 sizeof (val->NodeWWN.raw_wwn)); 7794 val32->PortFcId = val->PortFcId; 7795 val32->PortState = val->PortState; 7796 val32->PortType = val->PortType; 7797 7798 val32->PortSupportedClassofService = 7799 val->PortSupportedClassofService; 7800 bcopy(val->PortActiveFc4Types, 7801 val32->PortActiveFc4Types, 7802 sizeof (val->PortActiveFc4Types)); 7803 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7804 sizeof (val->PortSymbolicName)); 7805 bcopy(&val->FabricName, &val32->FabricName, 7806 sizeof (val->FabricName.raw_wwn)); 7807 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7808 val32->PortSpeed = val->PortSpeed; 7809 7810 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7811 val32->NumberofDiscoveredPorts = 7812 val->NumberofDiscoveredPorts; 7813 7814 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7815 fcio->fcio_olen, mode) == 0) { 7816 if (fp_fcio_copyout(fcio, data, mode)) { 7817 rval = EFAULT; 7818 } 7819 } else { 7820 rval = EFAULT; 7821 } 7822 7823 kmem_free(val32, sizeof (*val32)); 7824 } else { 7825 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7826 fcio->fcio_olen, mode) == 0) { 7827 if (fp_fcio_copyout(fcio, data, mode)) { 7828 rval = EFAULT; 7829 } 7830 } else { 7831 rval = EFAULT; 7832 } 7833 } 7834 7835 kmem_free(val, sizeof (*val)); 7836 break; 7837 } 7838 7839 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7840 fc_hba_port_attributes_t *val; 7841 fc_hba_port_attributes32_t *val32; 7842 uint32_t index = 0; 7843 fc_remote_port_t *tmp_pd; 7844 7845 if (use32 == B_TRUE) { 7846 if (fcio->fcio_olen < sizeof (*val32) || 7847 fcio->fcio_xfer != FCIO_XFER_READ) { 7848 rval = EINVAL; 7849 break; 7850 } 7851 } else { 7852 if (fcio->fcio_olen < sizeof (*val) || 7853 fcio->fcio_xfer != FCIO_XFER_READ) { 7854 rval = EINVAL; 7855 break; 7856 } 7857 } 7858 7859 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7860 rval = EFAULT; 7861 break; 7862 } 7863 7864 if (index >= port->fp_dev_count) { 7865 FP_TRACE(FP_NHEAD1(9, 0), 7866 "User supplied index out of range"); 7867 fcio->fcio_errno = FC_OUTOFBOUNDS; 7868 rval = EINVAL; 7869 if (fp_fcio_copyout(fcio, data, mode)) { 7870 rval = EFAULT; 7871 } 7872 break; 7873 } 7874 7875 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7876 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7877 7878 mutex_enter(&port->fp_mutex); 7879 tmp_pd = fctl_lookup_pd_by_index(port, index); 7880 7881 if (tmp_pd == NULL) { 7882 fcio->fcio_errno = FC_BADPORT; 7883 rval = EINVAL; 7884 } else { 7885 val->lastChange = port->fp_last_change; 7886 val->fp_minor = port->fp_instance; 7887 7888 mutex_enter(&tmp_pd->pd_mutex); 7889 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7890 &val->PortWWN.raw_wwn, 7891 sizeof (val->PortWWN.raw_wwn)); 7892 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 7893 &val->NodeWWN.raw_wwn, 7894 sizeof (val->NodeWWN.raw_wwn)); 7895 val->PortFcId = tmp_pd->pd_port_id.port_id; 7896 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 7897 sizeof (val->PortSymbolicName)); 7898 val->PortSupportedClassofService = tmp_pd->pd_cos; 7899 /* 7900 * we will assume the sizeof these pd_fc4types and 7901 * portActiveFc4Types will remain the same. we could 7902 * add in a check for it, but we decided it was unneeded 7903 */ 7904 bcopy((caddr_t)tmp_pd->pd_fc4types, 7905 val->PortActiveFc4Types, 7906 sizeof (tmp_pd->pd_fc4types)); 7907 val->PortState = 7908 fp_map_remote_port_state(tmp_pd->pd_state); 7909 mutex_exit(&tmp_pd->pd_mutex); 7910 7911 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7912 val->PortSupportedFc4Types[0] = 0; 7913 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7914 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7915 val->PortMaxFrameSize = 0; 7916 val->NumberofDiscoveredPorts = 0; 7917 7918 if (use32 == B_TRUE) { 7919 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7920 val32->version = val->version; 7921 val32->lastChange = val->lastChange; 7922 val32->fp_minor = val->fp_minor; 7923 7924 bcopy(&val->PortWWN.raw_wwn, 7925 &val32->PortWWN.raw_wwn, 7926 sizeof (val->PortWWN.raw_wwn)); 7927 bcopy(&val->NodeWWN.raw_wwn, 7928 &val32->NodeWWN.raw_wwn, 7929 sizeof (val->NodeWWN.raw_wwn)); 7930 val32->PortFcId = val->PortFcId; 7931 bcopy(val->PortSymbolicName, 7932 val32->PortSymbolicName, 7933 sizeof (val->PortSymbolicName)); 7934 val32->PortSupportedClassofService = 7935 val->PortSupportedClassofService; 7936 bcopy(val->PortActiveFc4Types, 7937 val32->PortActiveFc4Types, 7938 sizeof (tmp_pd->pd_fc4types)); 7939 7940 val32->PortType = val->PortType; 7941 val32->PortState = val->PortState; 7942 val32->PortSupportedFc4Types[0] = 7943 val->PortSupportedFc4Types[0]; 7944 val32->PortSupportedSpeed = 7945 val->PortSupportedSpeed; 7946 val32->PortSpeed = val->PortSpeed; 7947 val32->PortMaxFrameSize = 7948 val->PortMaxFrameSize; 7949 val32->NumberofDiscoveredPorts = 7950 val->NumberofDiscoveredPorts; 7951 7952 if (fp_copyout((void *)val32, 7953 (void *)fcio->fcio_obuf, 7954 fcio->fcio_olen, mode) == 0) { 7955 if (fp_fcio_copyout(fcio, 7956 data, mode)) { 7957 rval = EFAULT; 7958 } 7959 } else { 7960 rval = EFAULT; 7961 } 7962 7963 kmem_free(val32, sizeof (*val32)); 7964 } else { 7965 if (fp_copyout((void *)val, 7966 (void *)fcio->fcio_obuf, 7967 fcio->fcio_olen, mode) == 0) { 7968 if (fp_fcio_copyout(fcio, data, mode)) { 7969 rval = EFAULT; 7970 } 7971 } else { 7972 rval = EFAULT; 7973 } 7974 } 7975 } 7976 7977 mutex_exit(&port->fp_mutex); 7978 kmem_free(val, sizeof (*val)); 7979 break; 7980 } 7981 7982 case FCIO_GET_PORT_ATTRIBUTES: { 7983 fc_hba_port_attributes_t *val; 7984 fc_hba_port_attributes32_t *val32; 7985 la_wwn_t wwn; 7986 fc_remote_port_t *tmp_pd; 7987 7988 if (use32 == B_TRUE) { 7989 if (fcio->fcio_olen < sizeof (*val32) || 7990 fcio->fcio_xfer != FCIO_XFER_READ) { 7991 rval = EINVAL; 7992 break; 7993 } 7994 } else { 7995 if (fcio->fcio_olen < sizeof (*val) || 7996 fcio->fcio_xfer != FCIO_XFER_READ) { 7997 rval = EINVAL; 7998 break; 7999 } 8000 } 8001 8002 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 8003 rval = EFAULT; 8004 break; 8005 } 8006 8007 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 8008 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 8009 8010 mutex_enter(&port->fp_mutex); 8011 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 8012 val->lastChange = port->fp_last_change; 8013 val->fp_minor = port->fp_instance; 8014 mutex_exit(&port->fp_mutex); 8015 8016 if (tmp_pd == NULL) { 8017 fcio->fcio_errno = FC_BADWWN; 8018 rval = EINVAL; 8019 } else { 8020 mutex_enter(&tmp_pd->pd_mutex); 8021 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8022 &val->PortWWN.raw_wwn, 8023 sizeof (val->PortWWN.raw_wwn)); 8024 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8025 &val->NodeWWN.raw_wwn, 8026 sizeof (val->NodeWWN.raw_wwn)); 8027 val->PortFcId = tmp_pd->pd_port_id.port_id; 8028 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8029 sizeof (val->PortSymbolicName)); 8030 val->PortSupportedClassofService = tmp_pd->pd_cos; 8031 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8032 val->PortState = 8033 fp_map_remote_port_state(tmp_pd->pd_state); 8034 val->PortSupportedFc4Types[0] = 0; 8035 /* 8036 * we will assume the sizeof these pd_fc4types and 8037 * portActiveFc4Types will remain the same. we could 8038 * add in a check for it, but we decided it was unneeded 8039 */ 8040 bcopy((caddr_t)tmp_pd->pd_fc4types, 8041 val->PortActiveFc4Types, 8042 sizeof (tmp_pd->pd_fc4types)); 8043 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8044 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8045 val->PortMaxFrameSize = 0; 8046 val->NumberofDiscoveredPorts = 0; 8047 mutex_exit(&tmp_pd->pd_mutex); 8048 8049 if (use32 == B_TRUE) { 8050 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8051 val32->version = val->version; 8052 val32->lastChange = val->lastChange; 8053 val32->fp_minor = val->fp_minor; 8054 bcopy(&val->PortWWN.raw_wwn, 8055 &val32->PortWWN.raw_wwn, 8056 sizeof (val->PortWWN.raw_wwn)); 8057 bcopy(&val->NodeWWN.raw_wwn, 8058 &val32->NodeWWN.raw_wwn, 8059 sizeof (val->NodeWWN.raw_wwn)); 8060 val32->PortFcId = val->PortFcId; 8061 bcopy(val->PortSymbolicName, 8062 val32->PortSymbolicName, 8063 sizeof (val->PortSymbolicName)); 8064 val32->PortSupportedClassofService = 8065 val->PortSupportedClassofService; 8066 val32->PortType = val->PortType; 8067 val32->PortState = val->PortState; 8068 val32->PortSupportedFc4Types[0] = 8069 val->PortSupportedFc4Types[0]; 8070 bcopy(val->PortActiveFc4Types, 8071 val32->PortActiveFc4Types, 8072 sizeof (tmp_pd->pd_fc4types)); 8073 val32->PortSupportedSpeed = 8074 val->PortSupportedSpeed; 8075 val32->PortSpeed = val->PortSpeed; 8076 val32->PortMaxFrameSize = val->PortMaxFrameSize; 8077 val32->NumberofDiscoveredPorts = 8078 val->NumberofDiscoveredPorts; 8079 8080 if (fp_copyout((void *)val32, 8081 (void *)fcio->fcio_obuf, 8082 fcio->fcio_olen, mode) == 0) { 8083 if (fp_fcio_copyout(fcio, data, mode)) { 8084 rval = EFAULT; 8085 } 8086 } else { 8087 rval = EFAULT; 8088 } 8089 8090 kmem_free(val32, sizeof (*val32)); 8091 } else { 8092 if (fp_copyout((void *)val, 8093 (void *)fcio->fcio_obuf, 8094 fcio->fcio_olen, mode) == 0) { 8095 if (fp_fcio_copyout(fcio, data, mode)) { 8096 rval = EFAULT; 8097 } 8098 } else { 8099 rval = EFAULT; 8100 } 8101 } 8102 } 8103 kmem_free(val, sizeof (*val)); 8104 break; 8105 } 8106 8107 case FCIO_GET_NUM_DEVS: { 8108 int num_devices; 8109 8110 if (fcio->fcio_olen != sizeof (num_devices) || 8111 fcio->fcio_xfer != FCIO_XFER_READ) { 8112 rval = EINVAL; 8113 break; 8114 } 8115 8116 mutex_enter(&port->fp_mutex); 8117 switch (port->fp_topology) { 8118 case FC_TOP_PRIVATE_LOOP: 8119 case FC_TOP_PT_PT: 8120 num_devices = port->fp_total_devices; 8121 fcio->fcio_errno = FC_SUCCESS; 8122 break; 8123 8124 case FC_TOP_PUBLIC_LOOP: 8125 case FC_TOP_FABRIC: 8126 mutex_exit(&port->fp_mutex); 8127 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8128 NULL, KM_SLEEP); 8129 ASSERT(job != NULL); 8130 8131 /* 8132 * In FC-GS-2 the Name Server doesn't send out 8133 * RSCNs for any Name Server Database updates 8134 * When it is finally fixed there is no need 8135 * to probe as below and should be removed. 8136 */ 8137 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8138 fctl_dealloc_job(job); 8139 8140 mutex_enter(&port->fp_mutex); 8141 num_devices = port->fp_total_devices; 8142 fcio->fcio_errno = FC_SUCCESS; 8143 break; 8144 8145 case FC_TOP_NO_NS: 8146 /* FALLTHROUGH */ 8147 case FC_TOP_UNKNOWN: 8148 /* FALLTHROUGH */ 8149 default: 8150 num_devices = 0; 8151 fcio->fcio_errno = FC_SUCCESS; 8152 break; 8153 } 8154 mutex_exit(&port->fp_mutex); 8155 8156 if (fp_copyout((void *)&num_devices, 8157 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8158 mode) == 0) { 8159 if (fp_fcio_copyout(fcio, data, mode)) { 8160 rval = EFAULT; 8161 } 8162 } else { 8163 rval = EFAULT; 8164 } 8165 break; 8166 } 8167 8168 case FCIO_GET_DEV_LIST: { 8169 int num_devices; 8170 int new_count; 8171 int map_size; 8172 8173 if (fcio->fcio_xfer != FCIO_XFER_READ || 8174 fcio->fcio_alen != sizeof (new_count)) { 8175 rval = EINVAL; 8176 break; 8177 } 8178 8179 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8180 8181 mutex_enter(&port->fp_mutex); 8182 if (num_devices < port->fp_total_devices) { 8183 fcio->fcio_errno = FC_TOOMANY; 8184 new_count = port->fp_total_devices; 8185 mutex_exit(&port->fp_mutex); 8186 8187 if (fp_copyout((void *)&new_count, 8188 (void *)fcio->fcio_abuf, 8189 sizeof (new_count), mode)) { 8190 rval = EFAULT; 8191 break; 8192 } 8193 8194 if (fp_fcio_copyout(fcio, data, mode)) { 8195 rval = EFAULT; 8196 break; 8197 } 8198 rval = EINVAL; 8199 break; 8200 } 8201 8202 if (port->fp_total_devices <= 0) { 8203 fcio->fcio_errno = FC_NO_MAP; 8204 new_count = port->fp_total_devices; 8205 mutex_exit(&port->fp_mutex); 8206 8207 if (fp_copyout((void *)&new_count, 8208 (void *)fcio->fcio_abuf, 8209 sizeof (new_count), mode)) { 8210 rval = EFAULT; 8211 break; 8212 } 8213 8214 if (fp_fcio_copyout(fcio, data, mode)) { 8215 rval = EFAULT; 8216 break; 8217 } 8218 rval = EINVAL; 8219 break; 8220 } 8221 8222 switch (port->fp_topology) { 8223 case FC_TOP_PRIVATE_LOOP: 8224 if (fp_fillout_loopmap(port, fcio, 8225 mode) != FC_SUCCESS) { 8226 rval = EFAULT; 8227 break; 8228 } 8229 if (fp_fcio_copyout(fcio, data, mode)) { 8230 rval = EFAULT; 8231 } 8232 break; 8233 8234 case FC_TOP_PT_PT: 8235 if (fp_fillout_p2pmap(port, fcio, 8236 mode) != FC_SUCCESS) { 8237 rval = EFAULT; 8238 break; 8239 } 8240 if (fp_fcio_copyout(fcio, data, mode)) { 8241 rval = EFAULT; 8242 } 8243 break; 8244 8245 case FC_TOP_PUBLIC_LOOP: 8246 case FC_TOP_FABRIC: { 8247 fctl_ns_req_t *ns_cmd; 8248 8249 map_size = 8250 sizeof (fc_port_dev_t) * port->fp_total_devices; 8251 8252 mutex_exit(&port->fp_mutex); 8253 8254 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8255 sizeof (ns_resp_gan_t), map_size, 8256 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8257 KM_SLEEP); 8258 ASSERT(ns_cmd != NULL); 8259 8260 ns_cmd->ns_gan_index = 0; 8261 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8262 ns_cmd->ns_cmd_code = NS_GA_NXT; 8263 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8264 8265 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8266 NULL, KM_SLEEP); 8267 ASSERT(job != NULL); 8268 8269 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8270 8271 if (ret != FC_SUCCESS || 8272 job->job_result != FC_SUCCESS) { 8273 fctl_free_ns_cmd(ns_cmd); 8274 8275 fcio->fcio_errno = job->job_result; 8276 new_count = 0; 8277 if (fp_copyout((void *)&new_count, 8278 (void *)fcio->fcio_abuf, 8279 sizeof (new_count), mode)) { 8280 fctl_dealloc_job(job); 8281 mutex_enter(&port->fp_mutex); 8282 rval = EFAULT; 8283 break; 8284 } 8285 8286 if (fp_fcio_copyout(fcio, data, mode)) { 8287 fctl_dealloc_job(job); 8288 mutex_enter(&port->fp_mutex); 8289 rval = EFAULT; 8290 break; 8291 } 8292 rval = EIO; 8293 mutex_enter(&port->fp_mutex); 8294 break; 8295 } 8296 fctl_dealloc_job(job); 8297 8298 new_count = ns_cmd->ns_gan_index; 8299 if (fp_copyout((void *)&new_count, 8300 (void *)fcio->fcio_abuf, sizeof (new_count), 8301 mode)) { 8302 rval = EFAULT; 8303 fctl_free_ns_cmd(ns_cmd); 8304 mutex_enter(&port->fp_mutex); 8305 break; 8306 } 8307 8308 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8309 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8310 ns_cmd->ns_gan_index, mode)) { 8311 rval = EFAULT; 8312 fctl_free_ns_cmd(ns_cmd); 8313 mutex_enter(&port->fp_mutex); 8314 break; 8315 } 8316 fctl_free_ns_cmd(ns_cmd); 8317 8318 if (fp_fcio_copyout(fcio, data, mode)) { 8319 rval = EFAULT; 8320 } 8321 mutex_enter(&port->fp_mutex); 8322 break; 8323 } 8324 8325 case FC_TOP_NO_NS: 8326 /* FALLTHROUGH */ 8327 case FC_TOP_UNKNOWN: 8328 /* FALLTHROUGH */ 8329 default: 8330 fcio->fcio_errno = FC_NO_MAP; 8331 num_devices = port->fp_total_devices; 8332 8333 if (fp_copyout((void *)&new_count, 8334 (void *)fcio->fcio_abuf, 8335 sizeof (new_count), mode)) { 8336 rval = EFAULT; 8337 break; 8338 } 8339 8340 if (fp_fcio_copyout(fcio, data, mode)) { 8341 rval = EFAULT; 8342 break; 8343 } 8344 rval = EINVAL; 8345 break; 8346 } 8347 mutex_exit(&port->fp_mutex); 8348 break; 8349 } 8350 8351 case FCIO_GET_SYM_PNAME: { 8352 rval = ENOTSUP; 8353 break; 8354 } 8355 8356 case FCIO_GET_SYM_NNAME: { 8357 rval = ENOTSUP; 8358 break; 8359 } 8360 8361 case FCIO_SET_SYM_PNAME: { 8362 rval = ENOTSUP; 8363 break; 8364 } 8365 8366 case FCIO_SET_SYM_NNAME: { 8367 rval = ENOTSUP; 8368 break; 8369 } 8370 8371 case FCIO_GET_LOGI_PARAMS: { 8372 la_wwn_t pwwn; 8373 la_wwn_t *my_pwwn; 8374 la_els_logi_t *params; 8375 la_els_logi32_t *params32; 8376 fc_remote_node_t *node; 8377 fc_remote_port_t *pd; 8378 8379 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8380 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8381 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8382 rval = EINVAL; 8383 break; 8384 } 8385 8386 if (use32 == B_TRUE) { 8387 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8388 rval = EINVAL; 8389 break; 8390 } 8391 } else { 8392 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8393 rval = EINVAL; 8394 break; 8395 } 8396 } 8397 8398 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8399 rval = EFAULT; 8400 break; 8401 } 8402 8403 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8404 if (pd == NULL) { 8405 mutex_enter(&port->fp_mutex); 8406 my_pwwn = &port->fp_service_params.nport_ww_name; 8407 mutex_exit(&port->fp_mutex); 8408 8409 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8410 rval = ENXIO; 8411 break; 8412 } 8413 8414 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8415 mutex_enter(&port->fp_mutex); 8416 *params = port->fp_service_params; 8417 mutex_exit(&port->fp_mutex); 8418 } else { 8419 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8420 8421 mutex_enter(&pd->pd_mutex); 8422 params->ls_code.mbz = params->ls_code.ls_code = 0; 8423 params->common_service = pd->pd_csp; 8424 params->nport_ww_name = pd->pd_port_name; 8425 params->class_1 = pd->pd_clsp1; 8426 params->class_2 = pd->pd_clsp2; 8427 params->class_3 = pd->pd_clsp3; 8428 node = pd->pd_remote_nodep; 8429 mutex_exit(&pd->pd_mutex); 8430 8431 bzero(params->reserved, sizeof (params->reserved)); 8432 8433 mutex_enter(&node->fd_mutex); 8434 bcopy(node->fd_vv, params->vendor_version, 8435 sizeof (node->fd_vv)); 8436 params->node_ww_name = node->fd_node_name; 8437 mutex_exit(&node->fd_mutex); 8438 8439 fctl_release_remote_port(pd); 8440 } 8441 8442 if (use32 == B_TRUE) { 8443 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8444 8445 params32->ls_code.mbz = params->ls_code.mbz; 8446 params32->common_service = params->common_service; 8447 params32->nport_ww_name = params->nport_ww_name; 8448 params32->class_1 = params->class_1; 8449 params32->class_2 = params->class_2; 8450 params32->class_3 = params->class_3; 8451 bzero(params32->reserved, sizeof (params32->reserved)); 8452 bcopy(params->vendor_version, params32->vendor_version, 8453 sizeof (node->fd_vv)); 8454 params32->node_ww_name = params->node_ww_name; 8455 8456 if (ddi_copyout((void *)params32, 8457 (void *)fcio->fcio_obuf, 8458 sizeof (*params32), mode)) { 8459 rval = EFAULT; 8460 } 8461 8462 kmem_free(params32, sizeof (*params32)); 8463 } else { 8464 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8465 sizeof (*params), mode)) { 8466 rval = EFAULT; 8467 } 8468 } 8469 8470 kmem_free(params, sizeof (*params)); 8471 if (fp_fcio_copyout(fcio, data, mode)) { 8472 rval = EFAULT; 8473 } 8474 break; 8475 } 8476 8477 case FCIO_DEV_LOGOUT: 8478 case FCIO_DEV_LOGIN: 8479 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8480 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8481 rval = EINVAL; 8482 8483 if (fp_fcio_copyout(fcio, data, mode)) { 8484 rval = EFAULT; 8485 } 8486 break; 8487 } 8488 8489 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8490 jcode = JOB_FCIO_LOGIN; 8491 } else { 8492 jcode = JOB_FCIO_LOGOUT; 8493 } 8494 8495 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8496 bcopy(fcio, kfcio, sizeof (*fcio)); 8497 8498 if (kfcio->fcio_ilen) { 8499 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8500 KM_SLEEP); 8501 8502 if (ddi_copyin((void *)fcio->fcio_ibuf, 8503 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8504 mode)) { 8505 rval = EFAULT; 8506 8507 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8508 kmem_free(kfcio, sizeof (*kfcio)); 8509 fcio->fcio_errno = job->job_result; 8510 if (fp_fcio_copyout(fcio, data, mode)) { 8511 rval = EFAULT; 8512 } 8513 break; 8514 } 8515 } 8516 8517 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8518 job->job_private = kfcio; 8519 8520 fctl_enque_job(port, job); 8521 fctl_jobwait(job); 8522 8523 rval = job->job_result; 8524 8525 fcio->fcio_errno = kfcio->fcio_errno; 8526 if (fp_fcio_copyout(fcio, data, mode)) { 8527 rval = EFAULT; 8528 } 8529 8530 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8531 kmem_free(kfcio, sizeof (*kfcio)); 8532 fctl_dealloc_job(job); 8533 break; 8534 8535 case FCIO_GET_STATE: { 8536 la_wwn_t pwwn; 8537 uint32_t state; 8538 fc_remote_port_t *pd; 8539 fctl_ns_req_t *ns_cmd; 8540 8541 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8542 fcio->fcio_olen != sizeof (state) || 8543 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8544 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8545 rval = EINVAL; 8546 break; 8547 } 8548 8549 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8550 rval = EFAULT; 8551 break; 8552 } 8553 fcio->fcio_errno = 0; 8554 8555 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8556 if (pd == NULL) { 8557 mutex_enter(&port->fp_mutex); 8558 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8559 mutex_exit(&port->fp_mutex); 8560 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8561 NULL, NULL, KM_SLEEP); 8562 8563 job->job_counter = 1; 8564 job->job_result = FC_SUCCESS; 8565 8566 ns_cmd = fctl_alloc_ns_cmd( 8567 sizeof (ns_req_gid_pn_t), 8568 sizeof (ns_resp_gid_pn_t), 8569 sizeof (ns_resp_gid_pn_t), 8570 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8571 ASSERT(ns_cmd != NULL); 8572 8573 ns_cmd->ns_cmd_code = NS_GID_PN; 8574 ((ns_req_gid_pn_t *) 8575 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8576 8577 ret = fp_ns_query(port, ns_cmd, job, 8578 1, KM_SLEEP); 8579 8580 if (ret != FC_SUCCESS || job->job_result != 8581 FC_SUCCESS) { 8582 if (ret != FC_SUCCESS) { 8583 fcio->fcio_errno = ret; 8584 } else { 8585 fcio->fcio_errno = 8586 job->job_result; 8587 } 8588 rval = EIO; 8589 } else { 8590 state = PORT_DEVICE_INVALID; 8591 } 8592 fctl_free_ns_cmd(ns_cmd); 8593 fctl_dealloc_job(job); 8594 } else { 8595 mutex_exit(&port->fp_mutex); 8596 fcio->fcio_errno = FC_BADWWN; 8597 rval = ENXIO; 8598 } 8599 } else { 8600 mutex_enter(&pd->pd_mutex); 8601 state = pd->pd_state; 8602 mutex_exit(&pd->pd_mutex); 8603 8604 fctl_release_remote_port(pd); 8605 } 8606 8607 if (!rval) { 8608 if (ddi_copyout((void *)&state, 8609 (void *)fcio->fcio_obuf, sizeof (state), 8610 mode)) { 8611 rval = EFAULT; 8612 } 8613 } 8614 if (fp_fcio_copyout(fcio, data, mode)) { 8615 rval = EFAULT; 8616 } 8617 break; 8618 } 8619 8620 case FCIO_DEV_REMOVE: { 8621 la_wwn_t pwwn; 8622 fc_portmap_t *changelist; 8623 fc_remote_port_t *pd; 8624 8625 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8626 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8627 rval = EINVAL; 8628 break; 8629 } 8630 8631 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8632 rval = EFAULT; 8633 break; 8634 } 8635 8636 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8637 if (pd == NULL) { 8638 rval = ENXIO; 8639 fcio->fcio_errno = FC_BADWWN; 8640 if (fp_fcio_copyout(fcio, data, mode)) { 8641 rval = EFAULT; 8642 } 8643 break; 8644 } 8645 8646 mutex_enter(&pd->pd_mutex); 8647 if (pd->pd_ref_count > 1) { 8648 mutex_exit(&pd->pd_mutex); 8649 8650 rval = EBUSY; 8651 fcio->fcio_errno = FC_FAILURE; 8652 fctl_release_remote_port(pd); 8653 8654 if (fp_fcio_copyout(fcio, data, mode)) { 8655 rval = EFAULT; 8656 } 8657 break; 8658 } 8659 mutex_exit(&pd->pd_mutex); 8660 8661 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8662 8663 fctl_copy_portmap(changelist, pd); 8664 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8665 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8666 8667 fctl_release_remote_port(pd); 8668 break; 8669 } 8670 8671 case FCIO_GET_FCODE_REV: { 8672 caddr_t fcode_rev; 8673 fc_fca_pm_t pm; 8674 8675 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8676 fcio->fcio_xfer != FCIO_XFER_READ) { 8677 rval = EINVAL; 8678 break; 8679 } 8680 bzero((caddr_t)&pm, sizeof (pm)); 8681 8682 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8683 8684 pm.pm_cmd_flags = FC_FCA_PM_READ; 8685 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8686 pm.pm_data_len = fcio->fcio_olen; 8687 pm.pm_data_buf = fcode_rev; 8688 8689 ret = port->fp_fca_tran->fca_port_manage( 8690 port->fp_fca_handle, &pm); 8691 8692 if (ret == FC_SUCCESS) { 8693 if (ddi_copyout((void *)fcode_rev, 8694 (void *)fcio->fcio_obuf, 8695 fcio->fcio_olen, mode) == 0) { 8696 if (fp_fcio_copyout(fcio, data, mode)) { 8697 rval = EFAULT; 8698 } 8699 } else { 8700 rval = EFAULT; 8701 } 8702 } else { 8703 /* 8704 * check if buffer was not large enough to obtain 8705 * FCODE version. 8706 */ 8707 if (pm.pm_data_len > fcio->fcio_olen) { 8708 rval = ENOMEM; 8709 } else { 8710 rval = EIO; 8711 } 8712 fcio->fcio_errno = ret; 8713 if (fp_fcio_copyout(fcio, data, mode)) { 8714 rval = EFAULT; 8715 } 8716 } 8717 kmem_free(fcode_rev, fcio->fcio_olen); 8718 break; 8719 } 8720 8721 case FCIO_GET_FW_REV: { 8722 caddr_t fw_rev; 8723 fc_fca_pm_t pm; 8724 8725 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8726 fcio->fcio_xfer != FCIO_XFER_READ) { 8727 rval = EINVAL; 8728 break; 8729 } 8730 bzero((caddr_t)&pm, sizeof (pm)); 8731 8732 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8733 8734 pm.pm_cmd_flags = FC_FCA_PM_READ; 8735 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8736 pm.pm_data_len = fcio->fcio_olen; 8737 pm.pm_data_buf = fw_rev; 8738 8739 ret = port->fp_fca_tran->fca_port_manage( 8740 port->fp_fca_handle, &pm); 8741 8742 if (ret == FC_SUCCESS) { 8743 if (ddi_copyout((void *)fw_rev, 8744 (void *)fcio->fcio_obuf, 8745 fcio->fcio_olen, mode) == 0) { 8746 if (fp_fcio_copyout(fcio, data, mode)) { 8747 rval = EFAULT; 8748 } 8749 } else { 8750 rval = EFAULT; 8751 } 8752 } else { 8753 if (fp_fcio_copyout(fcio, data, mode)) { 8754 rval = EFAULT; 8755 } 8756 rval = EIO; 8757 } 8758 kmem_free(fw_rev, fcio->fcio_olen); 8759 break; 8760 } 8761 8762 case FCIO_GET_DUMP_SIZE: { 8763 uint32_t dump_size; 8764 fc_fca_pm_t pm; 8765 8766 if (fcio->fcio_olen != sizeof (dump_size) || 8767 fcio->fcio_xfer != FCIO_XFER_READ) { 8768 rval = EINVAL; 8769 break; 8770 } 8771 bzero((caddr_t)&pm, sizeof (pm)); 8772 pm.pm_cmd_flags = FC_FCA_PM_READ; 8773 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8774 pm.pm_data_len = sizeof (dump_size); 8775 pm.pm_data_buf = (caddr_t)&dump_size; 8776 8777 ret = port->fp_fca_tran->fca_port_manage( 8778 port->fp_fca_handle, &pm); 8779 8780 if (ret == FC_SUCCESS) { 8781 if (ddi_copyout((void *)&dump_size, 8782 (void *)fcio->fcio_obuf, sizeof (dump_size), 8783 mode) == 0) { 8784 if (fp_fcio_copyout(fcio, data, mode)) { 8785 rval = EFAULT; 8786 } 8787 } else { 8788 rval = EFAULT; 8789 } 8790 } else { 8791 fcio->fcio_errno = ret; 8792 rval = EIO; 8793 if (fp_fcio_copyout(fcio, data, mode)) { 8794 rval = EFAULT; 8795 } 8796 } 8797 break; 8798 } 8799 8800 case FCIO_DOWNLOAD_FW: { 8801 caddr_t firmware; 8802 fc_fca_pm_t pm; 8803 8804 if (fcio->fcio_ilen <= 0 || 8805 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8806 rval = EINVAL; 8807 break; 8808 } 8809 8810 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8811 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8812 fcio->fcio_ilen, mode)) { 8813 rval = EFAULT; 8814 kmem_free(firmware, fcio->fcio_ilen); 8815 break; 8816 } 8817 8818 bzero((caddr_t)&pm, sizeof (pm)); 8819 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8820 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8821 pm.pm_data_len = fcio->fcio_ilen; 8822 pm.pm_data_buf = firmware; 8823 8824 ret = port->fp_fca_tran->fca_port_manage( 8825 port->fp_fca_handle, &pm); 8826 8827 kmem_free(firmware, fcio->fcio_ilen); 8828 8829 if (ret != FC_SUCCESS) { 8830 fcio->fcio_errno = ret; 8831 rval = EIO; 8832 if (fp_fcio_copyout(fcio, data, mode)) { 8833 rval = EFAULT; 8834 } 8835 } 8836 break; 8837 } 8838 8839 case FCIO_DOWNLOAD_FCODE: { 8840 caddr_t fcode; 8841 fc_fca_pm_t pm; 8842 8843 if (fcio->fcio_ilen <= 0 || 8844 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8845 rval = EINVAL; 8846 break; 8847 } 8848 8849 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8850 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8851 fcio->fcio_ilen, mode)) { 8852 rval = EFAULT; 8853 kmem_free(fcode, fcio->fcio_ilen); 8854 break; 8855 } 8856 8857 bzero((caddr_t)&pm, sizeof (pm)); 8858 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8859 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8860 pm.pm_data_len = fcio->fcio_ilen; 8861 pm.pm_data_buf = fcode; 8862 8863 ret = port->fp_fca_tran->fca_port_manage( 8864 port->fp_fca_handle, &pm); 8865 8866 kmem_free(fcode, fcio->fcio_ilen); 8867 8868 if (ret != FC_SUCCESS) { 8869 fcio->fcio_errno = ret; 8870 rval = EIO; 8871 if (fp_fcio_copyout(fcio, data, mode)) { 8872 rval = EFAULT; 8873 } 8874 } 8875 break; 8876 } 8877 8878 case FCIO_FORCE_DUMP: 8879 ret = port->fp_fca_tran->fca_reset( 8880 port->fp_fca_handle, FC_FCA_CORE); 8881 8882 if (ret != FC_SUCCESS) { 8883 fcio->fcio_errno = ret; 8884 rval = EIO; 8885 if (fp_fcio_copyout(fcio, data, mode)) { 8886 rval = EFAULT; 8887 } 8888 } 8889 break; 8890 8891 case FCIO_GET_DUMP: { 8892 caddr_t dump; 8893 uint32_t dump_size; 8894 fc_fca_pm_t pm; 8895 8896 if (fcio->fcio_xfer != FCIO_XFER_READ) { 8897 rval = EINVAL; 8898 break; 8899 } 8900 bzero((caddr_t)&pm, sizeof (pm)); 8901 8902 pm.pm_cmd_flags = FC_FCA_PM_READ; 8903 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8904 pm.pm_data_len = sizeof (dump_size); 8905 pm.pm_data_buf = (caddr_t)&dump_size; 8906 8907 ret = port->fp_fca_tran->fca_port_manage( 8908 port->fp_fca_handle, &pm); 8909 8910 if (ret != FC_SUCCESS) { 8911 fcio->fcio_errno = ret; 8912 rval = EIO; 8913 if (fp_fcio_copyout(fcio, data, mode)) { 8914 rval = EFAULT; 8915 } 8916 break; 8917 } 8918 if (fcio->fcio_olen != dump_size) { 8919 fcio->fcio_errno = FC_NOMEM; 8920 rval = EINVAL; 8921 if (fp_fcio_copyout(fcio, data, mode)) { 8922 rval = EFAULT; 8923 } 8924 break; 8925 } 8926 8927 dump = kmem_zalloc(dump_size, KM_SLEEP); 8928 8929 bzero((caddr_t)&pm, sizeof (pm)); 8930 pm.pm_cmd_flags = FC_FCA_PM_READ; 8931 pm.pm_cmd_code = FC_PORT_GET_DUMP; 8932 pm.pm_data_len = dump_size; 8933 pm.pm_data_buf = dump; 8934 8935 ret = port->fp_fca_tran->fca_port_manage( 8936 port->fp_fca_handle, &pm); 8937 8938 if (ret == FC_SUCCESS) { 8939 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 8940 dump_size, mode) == 0) { 8941 if (fp_fcio_copyout(fcio, data, mode)) { 8942 rval = EFAULT; 8943 } 8944 } else { 8945 rval = EFAULT; 8946 } 8947 } else { 8948 fcio->fcio_errno = ret; 8949 rval = EIO; 8950 if (fp_fcio_copyout(fcio, data, mode)) { 8951 rval = EFAULT; 8952 } 8953 } 8954 kmem_free(dump, dump_size); 8955 break; 8956 } 8957 8958 case FCIO_GET_TOPOLOGY: { 8959 uint32_t user_topology; 8960 8961 if (fcio->fcio_xfer != FCIO_XFER_READ || 8962 fcio->fcio_olen != sizeof (user_topology)) { 8963 rval = EINVAL; 8964 break; 8965 } 8966 8967 mutex_enter(&port->fp_mutex); 8968 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 8969 user_topology = FC_TOP_UNKNOWN; 8970 } else { 8971 user_topology = port->fp_topology; 8972 } 8973 mutex_exit(&port->fp_mutex); 8974 8975 if (ddi_copyout((void *)&user_topology, 8976 (void *)fcio->fcio_obuf, sizeof (user_topology), 8977 mode)) { 8978 rval = EFAULT; 8979 } 8980 break; 8981 } 8982 8983 case FCIO_RESET_LINK: { 8984 la_wwn_t pwwn; 8985 8986 /* 8987 * Look at the output buffer field; if this field has zero 8988 * bytes then attempt to reset the local link/loop. If the 8989 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 8990 * and if yes, determine the LFA and reset the remote LIP 8991 * by LINIT ELS. 8992 */ 8993 8994 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 8995 fcio->fcio_ilen != sizeof (pwwn)) { 8996 rval = EINVAL; 8997 break; 8998 } 8999 9000 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9001 sizeof (pwwn), mode)) { 9002 rval = EFAULT; 9003 break; 9004 } 9005 9006 mutex_enter(&port->fp_mutex); 9007 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 9008 mutex_exit(&port->fp_mutex); 9009 break; 9010 } 9011 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 9012 mutex_exit(&port->fp_mutex); 9013 9014 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 9015 if (job == NULL) { 9016 rval = ENOMEM; 9017 break; 9018 } 9019 job->job_counter = 1; 9020 job->job_private = (void *)&pwwn; 9021 9022 fctl_enque_job(port, job); 9023 fctl_jobwait(job); 9024 9025 mutex_enter(&port->fp_mutex); 9026 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 9027 mutex_exit(&port->fp_mutex); 9028 9029 if (job->job_result != FC_SUCCESS) { 9030 fcio->fcio_errno = job->job_result; 9031 rval = EIO; 9032 if (fp_fcio_copyout(fcio, data, mode)) { 9033 rval = EFAULT; 9034 } 9035 } 9036 fctl_dealloc_job(job); 9037 break; 9038 } 9039 9040 case FCIO_RESET_HARD: 9041 ret = port->fp_fca_tran->fca_reset( 9042 port->fp_fca_handle, FC_FCA_RESET); 9043 if (ret != FC_SUCCESS) { 9044 fcio->fcio_errno = ret; 9045 rval = EIO; 9046 if (fp_fcio_copyout(fcio, data, mode)) { 9047 rval = EFAULT; 9048 } 9049 } 9050 break; 9051 9052 case FCIO_RESET_HARD_CORE: 9053 ret = port->fp_fca_tran->fca_reset( 9054 port->fp_fca_handle, FC_FCA_RESET_CORE); 9055 if (ret != FC_SUCCESS) { 9056 rval = EIO; 9057 fcio->fcio_errno = ret; 9058 if (fp_fcio_copyout(fcio, data, mode)) { 9059 rval = EFAULT; 9060 } 9061 } 9062 break; 9063 9064 case FCIO_DIAG: { 9065 fc_fca_pm_t pm; 9066 9067 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 9068 9069 /* Validate user buffer from ioctl call. */ 9070 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 9071 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 9072 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 9073 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 9074 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 9075 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 9076 rval = EFAULT; 9077 break; 9078 } 9079 9080 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 9081 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 9082 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 9083 fcio->fcio_ilen, mode)) { 9084 rval = EFAULT; 9085 goto fp_fcio_diag_cleanup; 9086 } 9087 } 9088 9089 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 9090 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 9091 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 9092 fcio->fcio_alen, mode)) { 9093 rval = EFAULT; 9094 goto fp_fcio_diag_cleanup; 9095 } 9096 } 9097 9098 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 9099 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 9100 } 9101 9102 pm.pm_cmd_code = FC_PORT_DIAG; 9103 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 9104 9105 ret = port->fp_fca_tran->fca_port_manage( 9106 port->fp_fca_handle, &pm); 9107 9108 if (ret != FC_SUCCESS) { 9109 if (ret == FC_INVALID_REQUEST) { 9110 rval = ENOTTY; 9111 } else { 9112 rval = EIO; 9113 } 9114 9115 fcio->fcio_errno = ret; 9116 if (fp_fcio_copyout(fcio, data, mode)) { 9117 rval = EFAULT; 9118 } 9119 goto fp_fcio_diag_cleanup; 9120 } 9121 9122 /* 9123 * pm_stat_len will contain the number of status bytes 9124 * an FCA driver requires to return the complete status 9125 * of the requested diag operation. If the user buffer 9126 * is not large enough to hold the entire status, We 9127 * copy only the portion of data the fits in the buffer and 9128 * return a ENOMEM to the user application. 9129 */ 9130 if (pm.pm_stat_len > fcio->fcio_olen) { 9131 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9132 "fp:FCIO_DIAG:status buffer too small\n"); 9133 9134 rval = ENOMEM; 9135 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9136 fcio->fcio_olen, mode)) { 9137 rval = EFAULT; 9138 goto fp_fcio_diag_cleanup; 9139 } 9140 } else { 9141 /* 9142 * Copy only data pm_stat_len bytes of data 9143 */ 9144 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9145 pm.pm_stat_len, mode)) { 9146 rval = EFAULT; 9147 goto fp_fcio_diag_cleanup; 9148 } 9149 } 9150 9151 if (fp_fcio_copyout(fcio, data, mode)) { 9152 rval = EFAULT; 9153 } 9154 9155 fp_fcio_diag_cleanup: 9156 if (pm.pm_cmd_buf != NULL) { 9157 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9158 } 9159 if (pm.pm_data_buf != NULL) { 9160 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9161 } 9162 if (pm.pm_stat_buf != NULL) { 9163 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9164 } 9165 9166 break; 9167 } 9168 9169 case FCIO_GET_NODE_ID: { 9170 /* validate parameters */ 9171 if (fcio->fcio_xfer != FCIO_XFER_READ || 9172 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9173 rval = EINVAL; 9174 break; 9175 } 9176 9177 rval = fp_get_rnid(port, data, mode, fcio); 9178 9179 /* ioctl handling is over */ 9180 break; 9181 } 9182 9183 case FCIO_SEND_NODE_ID: { 9184 la_wwn_t pwwn; 9185 9186 /* validate parameters */ 9187 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9188 fcio->fcio_xfer != FCIO_XFER_READ) { 9189 rval = EINVAL; 9190 break; 9191 } 9192 9193 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9194 sizeof (la_wwn_t), mode)) { 9195 rval = EFAULT; 9196 break; 9197 } 9198 9199 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9200 9201 /* ioctl handling is over */ 9202 break; 9203 } 9204 9205 case FCIO_SET_NODE_ID: { 9206 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9207 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9208 rval = EINVAL; 9209 break; 9210 } 9211 9212 rval = fp_set_rnid(port, data, mode, fcio); 9213 break; 9214 } 9215 9216 case FCIO_LINK_STATUS: { 9217 fc_portid_t rls_req; 9218 fc_rls_acc_t *rls_acc; 9219 fc_fca_pm_t pm; 9220 uint32_t dest, src_id; 9221 fp_cmd_t *cmd; 9222 fc_remote_port_t *pd; 9223 uchar_t pd_flags; 9224 9225 /* validate parameters */ 9226 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9227 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9228 fcio->fcio_xfer != FCIO_XFER_RW) { 9229 rval = EINVAL; 9230 break; 9231 } 9232 9233 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9234 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9235 rval = EINVAL; 9236 break; 9237 } 9238 9239 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9240 sizeof (fc_portid_t), mode)) { 9241 rval = EFAULT; 9242 break; 9243 } 9244 9245 9246 /* Determine the destination of the RLS frame */ 9247 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9248 dest = FS_FABRIC_F_PORT; 9249 } else { 9250 dest = rls_req.port_id; 9251 } 9252 9253 mutex_enter(&port->fp_mutex); 9254 src_id = port->fp_port_id.port_id; 9255 mutex_exit(&port->fp_mutex); 9256 9257 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9258 if (dest == 0 || dest == src_id) { 9259 9260 /* Allocate memory for link error status block */ 9261 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9262 ASSERT(rls_acc != NULL); 9263 9264 /* Prepare the port management structure */ 9265 bzero((caddr_t)&pm, sizeof (pm)); 9266 9267 pm.pm_cmd_flags = FC_FCA_PM_READ; 9268 pm.pm_cmd_code = FC_PORT_RLS; 9269 pm.pm_data_len = sizeof (*rls_acc); 9270 pm.pm_data_buf = (caddr_t)rls_acc; 9271 9272 /* Get the adapter's link error status block */ 9273 ret = port->fp_fca_tran->fca_port_manage( 9274 port->fp_fca_handle, &pm); 9275 9276 if (ret == FC_SUCCESS) { 9277 /* xfer link status block to userland */ 9278 if (ddi_copyout((void *)rls_acc, 9279 (void *)fcio->fcio_obuf, 9280 sizeof (*rls_acc), mode) == 0) { 9281 if (fp_fcio_copyout(fcio, data, 9282 mode)) { 9283 rval = EFAULT; 9284 } 9285 } else { 9286 rval = EFAULT; 9287 } 9288 } else { 9289 rval = EIO; 9290 fcio->fcio_errno = ret; 9291 if (fp_fcio_copyout(fcio, data, mode)) { 9292 rval = EFAULT; 9293 } 9294 } 9295 9296 kmem_free(rls_acc, sizeof (*rls_acc)); 9297 9298 /* ioctl handling is over */ 9299 break; 9300 } 9301 9302 /* 9303 * Send RLS to the destination port. 9304 * Having RLS frame destination is as FPORT is not yet 9305 * supported and will be implemented in future, if needed. 9306 * Following call to get "pd" will fail if dest is FPORT 9307 */ 9308 pd = fctl_hold_remote_port_by_did(port, dest); 9309 if (pd == NULL) { 9310 fcio->fcio_errno = FC_BADOBJECT; 9311 rval = ENXIO; 9312 if (fp_fcio_copyout(fcio, data, mode)) { 9313 rval = EFAULT; 9314 } 9315 break; 9316 } 9317 9318 mutex_enter(&pd->pd_mutex); 9319 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9320 mutex_exit(&pd->pd_mutex); 9321 fctl_release_remote_port(pd); 9322 9323 fcio->fcio_errno = FC_LOGINREQ; 9324 rval = EINVAL; 9325 if (fp_fcio_copyout(fcio, data, mode)) { 9326 rval = EFAULT; 9327 } 9328 break; 9329 } 9330 ASSERT(pd->pd_login_count >= 1); 9331 mutex_exit(&pd->pd_mutex); 9332 9333 /* 9334 * Allocate job structure and set job_code as DUMMY, 9335 * because we will not go through the job thread. 9336 * Instead fp_sendcmd() is called directly here. 9337 */ 9338 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9339 NULL, NULL, KM_SLEEP); 9340 ASSERT(job != NULL); 9341 9342 job->job_counter = 1; 9343 9344 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9345 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9346 if (cmd == NULL) { 9347 fcio->fcio_errno = FC_NOMEM; 9348 rval = ENOMEM; 9349 9350 fctl_release_remote_port(pd); 9351 9352 fctl_dealloc_job(job); 9353 if (fp_fcio_copyout(fcio, data, mode)) { 9354 rval = EFAULT; 9355 } 9356 break; 9357 } 9358 9359 /* Allocate memory for link error status block */ 9360 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9361 9362 mutex_enter(&port->fp_mutex); 9363 mutex_enter(&pd->pd_mutex); 9364 9365 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9366 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9367 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9368 cmd->cmd_retry_count = 1; 9369 cmd->cmd_ulp_pkt = NULL; 9370 9371 fp_rls_init(cmd, job); 9372 9373 job->job_private = (void *)rls_acc; 9374 9375 pd_flags = pd->pd_flags; 9376 pd->pd_flags = PD_ELS_IN_PROGRESS; 9377 9378 mutex_exit(&pd->pd_mutex); 9379 mutex_exit(&port->fp_mutex); 9380 9381 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9382 fctl_jobwait(job); 9383 9384 fcio->fcio_errno = job->job_result; 9385 if (job->job_result == FC_SUCCESS) { 9386 ASSERT(pd != NULL); 9387 /* 9388 * link error status block is now available. 9389 * Copy it to userland 9390 */ 9391 ASSERT(job->job_private == (void *)rls_acc); 9392 if (ddi_copyout((void *)rls_acc, 9393 (void *)fcio->fcio_obuf, 9394 sizeof (*rls_acc), mode) == 0) { 9395 if (fp_fcio_copyout(fcio, data, 9396 mode)) { 9397 rval = EFAULT; 9398 } 9399 } else { 9400 rval = EFAULT; 9401 } 9402 } else { 9403 rval = EIO; 9404 } 9405 } else { 9406 rval = EIO; 9407 fp_free_pkt(cmd); 9408 } 9409 9410 if (rval) { 9411 mutex_enter(&port->fp_mutex); 9412 mutex_enter(&pd->pd_mutex); 9413 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9414 pd->pd_flags = pd_flags; 9415 } 9416 mutex_exit(&pd->pd_mutex); 9417 mutex_exit(&port->fp_mutex); 9418 } 9419 9420 fctl_release_remote_port(pd); 9421 fctl_dealloc_job(job); 9422 kmem_free(rls_acc, sizeof (*rls_acc)); 9423 9424 if (fp_fcio_copyout(fcio, data, mode)) { 9425 rval = EFAULT; 9426 } 9427 break; 9428 } 9429 9430 case FCIO_NS: { 9431 fc_ns_cmd_t *ns_req; 9432 fc_ns_cmd32_t *ns_req32; 9433 fctl_ns_req_t *ns_cmd; 9434 9435 if (use32 == B_TRUE) { 9436 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9437 rval = EINVAL; 9438 break; 9439 } 9440 9441 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9442 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9443 9444 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9445 sizeof (*ns_req32), mode)) { 9446 rval = EFAULT; 9447 kmem_free(ns_req, sizeof (*ns_req)); 9448 kmem_free(ns_req32, sizeof (*ns_req32)); 9449 break; 9450 } 9451 9452 ns_req->ns_flags = ns_req32->ns_flags; 9453 ns_req->ns_cmd = ns_req32->ns_cmd; 9454 ns_req->ns_req_len = ns_req32->ns_req_len; 9455 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9456 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9457 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9458 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9459 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9460 9461 kmem_free(ns_req32, sizeof (*ns_req32)); 9462 } else { 9463 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9464 rval = EINVAL; 9465 break; 9466 } 9467 9468 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9469 9470 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9471 sizeof (fc_ns_cmd_t), mode)) { 9472 rval = EFAULT; 9473 kmem_free(ns_req, sizeof (*ns_req)); 9474 break; 9475 } 9476 } 9477 9478 if (ns_req->ns_req_len <= 0) { 9479 rval = EINVAL; 9480 kmem_free(ns_req, sizeof (*ns_req)); 9481 break; 9482 } 9483 9484 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9485 ASSERT(job != NULL); 9486 9487 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9488 ns_req->ns_resp_len, ns_req->ns_resp_len, 9489 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9490 ASSERT(ns_cmd != NULL); 9491 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9492 9493 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9494 ns_cmd->ns_gan_max = 1; 9495 ns_cmd->ns_gan_index = 0; 9496 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9497 } 9498 9499 if (ddi_copyin(ns_req->ns_req_payload, 9500 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9501 rval = EFAULT; 9502 fctl_free_ns_cmd(ns_cmd); 9503 fctl_dealloc_job(job); 9504 kmem_free(ns_req, sizeof (*ns_req)); 9505 break; 9506 } 9507 9508 job->job_private = (void *)ns_cmd; 9509 fctl_enque_job(port, job); 9510 fctl_jobwait(job); 9511 rval = job->job_result; 9512 9513 if (rval == FC_SUCCESS) { 9514 if (ns_req->ns_resp_len) { 9515 if (ddi_copyout(ns_cmd->ns_data_buf, 9516 ns_req->ns_resp_payload, 9517 ns_cmd->ns_data_len, mode)) { 9518 rval = EFAULT; 9519 fctl_free_ns_cmd(ns_cmd); 9520 fctl_dealloc_job(job); 9521 kmem_free(ns_req, sizeof (*ns_req)); 9522 break; 9523 } 9524 } 9525 } else { 9526 rval = EIO; 9527 } 9528 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9529 fctl_free_ns_cmd(ns_cmd); 9530 fctl_dealloc_job(job); 9531 kmem_free(ns_req, sizeof (*ns_req)); 9532 9533 if (fp_fcio_copyout(fcio, data, mode)) { 9534 rval = EFAULT; 9535 } 9536 break; 9537 } 9538 9539 default: 9540 rval = ENOTTY; 9541 break; 9542 } 9543 9544 /* 9545 * If set, reset the EXCL busy bit to 9546 * receive other exclusive access commands 9547 */ 9548 mutex_enter(&port->fp_mutex); 9549 if (port->fp_flag & FP_EXCL_BUSY) { 9550 port->fp_flag &= ~FP_EXCL_BUSY; 9551 } 9552 mutex_exit(&port->fp_mutex); 9553 9554 return (rval); 9555 } 9556 9557 9558 /* 9559 * This function assumes that the response length 9560 * is same regardless of data model (LP32 or LP64) 9561 * which is true for all the ioctls currently 9562 * supported. 9563 */ 9564 static int 9565 fp_copyout(void *from, void *to, size_t len, int mode) 9566 { 9567 return (ddi_copyout(from, to, len, mode)); 9568 } 9569 9570 /* 9571 * This function does the set rnid 9572 */ 9573 static int 9574 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9575 { 9576 int rval = 0; 9577 fc_rnid_t *rnid; 9578 fc_fca_pm_t pm; 9579 9580 /* Allocate memory for node id block */ 9581 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9582 9583 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9584 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9585 kmem_free(rnid, sizeof (fc_rnid_t)); 9586 return (EFAULT); 9587 } 9588 9589 /* Prepare the port management structure */ 9590 bzero((caddr_t)&pm, sizeof (pm)); 9591 9592 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9593 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9594 pm.pm_data_len = sizeof (*rnid); 9595 pm.pm_data_buf = (caddr_t)rnid; 9596 9597 /* Get the adapter's node data */ 9598 rval = port->fp_fca_tran->fca_port_manage( 9599 port->fp_fca_handle, &pm); 9600 9601 if (rval != FC_SUCCESS) { 9602 fcio->fcio_errno = rval; 9603 rval = EIO; 9604 if (fp_fcio_copyout(fcio, data, mode)) { 9605 rval = EFAULT; 9606 } 9607 } else { 9608 mutex_enter(&port->fp_mutex); 9609 /* copy to the port structure */ 9610 bcopy(rnid, &port->fp_rnid_params, 9611 sizeof (port->fp_rnid_params)); 9612 mutex_exit(&port->fp_mutex); 9613 } 9614 9615 kmem_free(rnid, sizeof (fc_rnid_t)); 9616 9617 if (rval != FC_SUCCESS) { 9618 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9619 } 9620 9621 return (rval); 9622 } 9623 9624 /* 9625 * This function does the local pwwn get rnid 9626 */ 9627 static int 9628 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9629 { 9630 fc_rnid_t *rnid; 9631 fc_fca_pm_t pm; 9632 int rval = 0; 9633 uint32_t ret; 9634 9635 /* Allocate memory for rnid data block */ 9636 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9637 9638 mutex_enter(&port->fp_mutex); 9639 if (port->fp_rnid_init == 1) { 9640 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9641 mutex_exit(&port->fp_mutex); 9642 /* xfer node info to userland */ 9643 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9644 sizeof (*rnid), mode) == 0) { 9645 if (fp_fcio_copyout(fcio, data, mode)) { 9646 rval = EFAULT; 9647 } 9648 } else { 9649 rval = EFAULT; 9650 } 9651 9652 kmem_free(rnid, sizeof (fc_rnid_t)); 9653 9654 if (rval != FC_SUCCESS) { 9655 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9656 rval); 9657 } 9658 9659 return (rval); 9660 } 9661 mutex_exit(&port->fp_mutex); 9662 9663 /* Prepare the port management structure */ 9664 bzero((caddr_t)&pm, sizeof (pm)); 9665 9666 pm.pm_cmd_flags = FC_FCA_PM_READ; 9667 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9668 pm.pm_data_len = sizeof (fc_rnid_t); 9669 pm.pm_data_buf = (caddr_t)rnid; 9670 9671 /* Get the adapter's node data */ 9672 ret = port->fp_fca_tran->fca_port_manage( 9673 port->fp_fca_handle, 9674 &pm); 9675 9676 if (ret == FC_SUCCESS) { 9677 /* initialize in the port_info */ 9678 mutex_enter(&port->fp_mutex); 9679 port->fp_rnid_init = 1; 9680 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9681 mutex_exit(&port->fp_mutex); 9682 9683 /* xfer node info to userland */ 9684 if (ddi_copyout((void *)rnid, 9685 (void *)fcio->fcio_obuf, 9686 sizeof (*rnid), mode) == 0) { 9687 if (fp_fcio_copyout(fcio, data, 9688 mode)) { 9689 rval = EFAULT; 9690 } 9691 } else { 9692 rval = EFAULT; 9693 } 9694 } else { 9695 rval = EIO; 9696 fcio->fcio_errno = ret; 9697 if (fp_fcio_copyout(fcio, data, mode)) { 9698 rval = EFAULT; 9699 } 9700 } 9701 9702 kmem_free(rnid, sizeof (fc_rnid_t)); 9703 9704 if (rval != FC_SUCCESS) { 9705 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9706 } 9707 9708 return (rval); 9709 } 9710 9711 static int 9712 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9713 la_wwn_t *pwwn) 9714 { 9715 int rval = 0; 9716 fc_remote_port_t *pd; 9717 fp_cmd_t *cmd; 9718 job_request_t *job; 9719 la_els_rnid_acc_t *rnid_acc; 9720 9721 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9722 if (pd == NULL) { 9723 /* 9724 * We can safely assume that the destination port 9725 * is logged in. Either the user land will explicitly 9726 * login before issuing RNID ioctl or the device would 9727 * have been configured, meaning already logged in. 9728 */ 9729 9730 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9731 9732 return (ENXIO); 9733 } 9734 /* 9735 * Allocate job structure and set job_code as DUMMY, 9736 * because we will not go thorugh the job thread. 9737 * Instead fp_sendcmd() is called directly here. 9738 */ 9739 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9740 NULL, NULL, KM_SLEEP); 9741 9742 ASSERT(job != NULL); 9743 9744 job->job_counter = 1; 9745 9746 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9747 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9748 if (cmd == NULL) { 9749 fcio->fcio_errno = FC_NOMEM; 9750 rval = ENOMEM; 9751 9752 fctl_dealloc_job(job); 9753 if (fp_fcio_copyout(fcio, data, mode)) { 9754 rval = EFAULT; 9755 } 9756 9757 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9758 9759 return (rval); 9760 } 9761 9762 /* Allocate memory for node id accept block */ 9763 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9764 9765 mutex_enter(&port->fp_mutex); 9766 mutex_enter(&pd->pd_mutex); 9767 9768 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9769 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9770 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9771 cmd->cmd_retry_count = 1; 9772 cmd->cmd_ulp_pkt = NULL; 9773 9774 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9775 9776 job->job_private = (void *)rnid_acc; 9777 9778 pd->pd_flags = PD_ELS_IN_PROGRESS; 9779 9780 mutex_exit(&pd->pd_mutex); 9781 mutex_exit(&port->fp_mutex); 9782 9783 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9784 fctl_jobwait(job); 9785 fcio->fcio_errno = job->job_result; 9786 if (job->job_result == FC_SUCCESS) { 9787 int rnid_cnt; 9788 ASSERT(pd != NULL); 9789 /* 9790 * node id block is now available. 9791 * Copy it to userland 9792 */ 9793 ASSERT(job->job_private == (void *)rnid_acc); 9794 9795 /* get the response length */ 9796 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9797 rnid_acc->hdr.cmn_len + 9798 rnid_acc->hdr.specific_len; 9799 9800 if (fcio->fcio_olen < rnid_cnt) { 9801 rval = EINVAL; 9802 } else if (ddi_copyout((void *)rnid_acc, 9803 (void *)fcio->fcio_obuf, 9804 rnid_cnt, mode) == 0) { 9805 if (fp_fcio_copyout(fcio, data, 9806 mode)) { 9807 rval = EFAULT; 9808 } 9809 } else { 9810 rval = EFAULT; 9811 } 9812 } else { 9813 rval = EIO; 9814 } 9815 } else { 9816 rval = EIO; 9817 if (pd) { 9818 mutex_enter(&pd->pd_mutex); 9819 pd->pd_flags = PD_IDLE; 9820 mutex_exit(&pd->pd_mutex); 9821 } 9822 fp_free_pkt(cmd); 9823 } 9824 9825 fctl_dealloc_job(job); 9826 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9827 9828 if (fp_fcio_copyout(fcio, data, mode)) { 9829 rval = EFAULT; 9830 } 9831 9832 if (rval != FC_SUCCESS) { 9833 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9834 } 9835 9836 return (rval); 9837 } 9838 9839 /* 9840 * Copy out to userland 9841 */ 9842 static int 9843 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9844 { 9845 int rval; 9846 9847 #ifdef _MULTI_DATAMODEL 9848 switch (ddi_model_convert_from(mode & FMODELS)) { 9849 case DDI_MODEL_ILP32: { 9850 struct fcio32 fcio32; 9851 9852 fcio32.fcio_xfer = fcio->fcio_xfer; 9853 fcio32.fcio_cmd = fcio->fcio_cmd; 9854 fcio32.fcio_flags = fcio->fcio_flags; 9855 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9856 fcio32.fcio_ilen = fcio->fcio_ilen; 9857 fcio32.fcio_ibuf = 9858 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9859 fcio32.fcio_olen = fcio->fcio_olen; 9860 fcio32.fcio_obuf = 9861 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9862 fcio32.fcio_alen = fcio->fcio_alen; 9863 fcio32.fcio_abuf = 9864 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9865 fcio32.fcio_errno = fcio->fcio_errno; 9866 9867 rval = ddi_copyout((void *)&fcio32, (void *)data, 9868 sizeof (struct fcio32), mode); 9869 break; 9870 } 9871 case DDI_MODEL_NONE: 9872 rval = ddi_copyout((void *)fcio, (void *)data, 9873 sizeof (fcio_t), mode); 9874 break; 9875 } 9876 #else 9877 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 9878 #endif 9879 9880 return (rval); 9881 } 9882 9883 9884 static void 9885 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 9886 { 9887 uint32_t listlen; 9888 fc_portmap_t *changelist; 9889 9890 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9891 ASSERT(port->fp_topology == FC_TOP_PT_PT); 9892 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 9893 9894 listlen = 0; 9895 changelist = NULL; 9896 9897 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9898 if (port->fp_statec_busy > 1) { 9899 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 9900 } 9901 } 9902 mutex_exit(&port->fp_mutex); 9903 9904 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9905 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 9906 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 9907 listlen, listlen, KM_SLEEP); 9908 9909 mutex_enter(&port->fp_mutex); 9910 } else { 9911 ASSERT(changelist == NULL && listlen == 0); 9912 mutex_enter(&port->fp_mutex); 9913 if (--port->fp_statec_busy == 0) { 9914 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 9915 } 9916 } 9917 } 9918 9919 static int 9920 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 9921 { 9922 int rval; 9923 int count; 9924 int index; 9925 int num_devices; 9926 fc_remote_node_t *node; 9927 fc_port_dev_t *devlist; 9928 struct pwwn_hash *head; 9929 fc_remote_port_t *pd; 9930 9931 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9932 9933 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 9934 9935 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 9936 9937 for (count = index = 0; index < pwwn_table_size; index++) { 9938 head = &port->fp_pwwn_table[index]; 9939 pd = head->pwwn_head; 9940 while (pd != NULL) { 9941 mutex_enter(&pd->pd_mutex); 9942 if (pd->pd_state == PORT_DEVICE_INVALID) { 9943 mutex_exit(&pd->pd_mutex); 9944 pd = pd->pd_wwn_hnext; 9945 continue; 9946 } 9947 9948 devlist[count].dev_state = pd->pd_state; 9949 devlist[count].dev_hard_addr = pd->pd_hard_addr; 9950 devlist[count].dev_did = pd->pd_port_id; 9951 devlist[count].dev_did.priv_lilp_posit = 9952 (uint8_t)(index & 0xff); 9953 bcopy((caddr_t)pd->pd_fc4types, 9954 (caddr_t)devlist[count].dev_type, 9955 sizeof (pd->pd_fc4types)); 9956 9957 bcopy((caddr_t)&pd->pd_port_name, 9958 (caddr_t)&devlist[count].dev_pwwn, 9959 sizeof (la_wwn_t)); 9960 9961 node = pd->pd_remote_nodep; 9962 mutex_exit(&pd->pd_mutex); 9963 9964 if (node) { 9965 mutex_enter(&node->fd_mutex); 9966 bcopy((caddr_t)&node->fd_node_name, 9967 (caddr_t)&devlist[count].dev_nwwn, 9968 sizeof (la_wwn_t)); 9969 mutex_exit(&node->fd_mutex); 9970 } 9971 count++; 9972 if (count >= num_devices) { 9973 goto found; 9974 } 9975 } 9976 } 9977 found: 9978 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 9979 sizeof (count), mode)) { 9980 rval = FC_FAILURE; 9981 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 9982 sizeof (fc_port_dev_t) * num_devices, mode)) { 9983 rval = FC_FAILURE; 9984 } else { 9985 rval = FC_SUCCESS; 9986 } 9987 9988 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 9989 9990 return (rval); 9991 } 9992 9993 9994 /* 9995 * Handle Fabric ONLINE 9996 */ 9997 static void 9998 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 9999 { 10000 int index; 10001 int rval; 10002 int dbg_count; 10003 int count = 0; 10004 char ww_name[17]; 10005 uint32_t d_id; 10006 uint32_t listlen; 10007 fctl_ns_req_t *ns_cmd; 10008 struct pwwn_hash *head; 10009 fc_remote_port_t *pd; 10010 fc_remote_port_t *npd; 10011 fc_portmap_t *changelist; 10012 10013 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10014 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 10015 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10016 10017 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 10018 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 10019 0, KM_SLEEP); 10020 10021 ASSERT(ns_cmd != NULL); 10022 10023 ns_cmd->ns_cmd_code = NS_GID_PN; 10024 10025 /* 10026 * Check if orphans are showing up now 10027 */ 10028 if (port->fp_orphan_count) { 10029 fc_orphan_t *orp; 10030 fc_orphan_t *norp = NULL; 10031 fc_orphan_t *prev = NULL; 10032 10033 for (orp = port->fp_orphan_list; orp; orp = norp) { 10034 norp = orp->orp_next; 10035 mutex_exit(&port->fp_mutex); 10036 orp->orp_nscan++; 10037 10038 job->job_counter = 1; 10039 job->job_result = FC_SUCCESS; 10040 10041 ((ns_req_gid_pn_t *) 10042 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 10043 ((ns_resp_gid_pn_t *) 10044 ns_cmd->ns_data_buf)->pid.port_id = 0; 10045 ((ns_resp_gid_pn_t *) 10046 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 10047 10048 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10049 if (rval == FC_SUCCESS) { 10050 d_id = 10051 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10052 pd = fp_create_remote_port_by_ns(port, 10053 d_id, KM_SLEEP); 10054 10055 if (pd != NULL) { 10056 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10057 10058 fp_printf(port, CE_WARN, FP_LOG_ONLY, 10059 0, NULL, "N_x Port with D_ID=%x," 10060 " PWWN=%s reappeared in fabric", 10061 d_id, ww_name); 10062 10063 mutex_enter(&port->fp_mutex); 10064 if (prev) { 10065 prev->orp_next = orp->orp_next; 10066 } else { 10067 ASSERT(orp == 10068 port->fp_orphan_list); 10069 port->fp_orphan_list = 10070 orp->orp_next; 10071 } 10072 port->fp_orphan_count--; 10073 mutex_exit(&port->fp_mutex); 10074 kmem_free(orp, sizeof (*orp)); 10075 count++; 10076 10077 mutex_enter(&pd->pd_mutex); 10078 pd->pd_flags = PD_ELS_MARK; 10079 10080 mutex_exit(&pd->pd_mutex); 10081 } else { 10082 prev = orp; 10083 } 10084 } else { 10085 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 10086 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10087 10088 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 10089 NULL, 10090 " Port WWN %s removed from orphan" 10091 " list after %d scans", ww_name, 10092 orp->orp_nscan); 10093 10094 mutex_enter(&port->fp_mutex); 10095 if (prev) { 10096 prev->orp_next = orp->orp_next; 10097 } else { 10098 ASSERT(orp == 10099 port->fp_orphan_list); 10100 port->fp_orphan_list = 10101 orp->orp_next; 10102 } 10103 port->fp_orphan_count--; 10104 mutex_exit(&port->fp_mutex); 10105 10106 kmem_free(orp, sizeof (*orp)); 10107 } else { 10108 prev = orp; 10109 } 10110 } 10111 mutex_enter(&port->fp_mutex); 10112 } 10113 } 10114 10115 /* 10116 * Walk the Port WWN hash table, reestablish LOGIN 10117 * if a LOGIN is already performed on a particular 10118 * device; Any failure to LOGIN should mark the 10119 * port device OLD. 10120 */ 10121 for (index = 0; index < pwwn_table_size; index++) { 10122 head = &port->fp_pwwn_table[index]; 10123 npd = head->pwwn_head; 10124 10125 while ((pd = npd) != NULL) { 10126 la_wwn_t *pwwn; 10127 10128 npd = pd->pd_wwn_hnext; 10129 10130 /* 10131 * Don't count in the port devices that are new 10132 * unless the total number of devices visible 10133 * through this port is less than FP_MAX_DEVICES 10134 */ 10135 mutex_enter(&pd->pd_mutex); 10136 if (port->fp_dev_count >= FP_MAX_DEVICES || 10137 (port->fp_options & FP_TARGET_MODE)) { 10138 if (pd->pd_type == PORT_DEVICE_NEW || 10139 pd->pd_flags == PD_ELS_MARK || 10140 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10141 mutex_exit(&pd->pd_mutex); 10142 continue; 10143 } 10144 } else { 10145 if (pd->pd_flags == PD_ELS_MARK || 10146 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10147 mutex_exit(&pd->pd_mutex); 10148 continue; 10149 } 10150 pd->pd_type = PORT_DEVICE_OLD; 10151 } 10152 count++; 10153 10154 /* 10155 * Consult with the name server about D_ID changes 10156 */ 10157 job->job_counter = 1; 10158 job->job_result = FC_SUCCESS; 10159 10160 ((ns_req_gid_pn_t *) 10161 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10162 ((ns_resp_gid_pn_t *) 10163 ns_cmd->ns_data_buf)->pid.port_id = 0; 10164 10165 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10166 pid.priv_lilp_posit = 0; 10167 10168 pwwn = &pd->pd_port_name; 10169 pd->pd_flags = PD_ELS_MARK; 10170 10171 mutex_exit(&pd->pd_mutex); 10172 mutex_exit(&port->fp_mutex); 10173 10174 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10175 if (rval != FC_SUCCESS) { 10176 fc_wwn_to_str(pwwn, ww_name); 10177 10178 mutex_enter(&pd->pd_mutex); 10179 d_id = pd->pd_port_id.port_id; 10180 pd->pd_type = PORT_DEVICE_DELETE; 10181 mutex_exit(&pd->pd_mutex); 10182 10183 FP_TRACE(FP_NHEAD1(3, 0), 10184 "fp_fabric_online: PD " 10185 "disappeared; d_id=%x, PWWN=%s", 10186 d_id, ww_name); 10187 10188 FP_TRACE(FP_NHEAD2(9, 0), 10189 "N_x Port with D_ID=%x, PWWN=%s" 10190 " disappeared from fabric", d_id, 10191 ww_name); 10192 10193 mutex_enter(&port->fp_mutex); 10194 continue; 10195 } 10196 10197 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10198 10199 mutex_enter(&port->fp_mutex); 10200 mutex_enter(&pd->pd_mutex); 10201 if (d_id != pd->pd_port_id.port_id) { 10202 fctl_delist_did_table(port, pd); 10203 fc_wwn_to_str(pwwn, ww_name); 10204 10205 FP_TRACE(FP_NHEAD2(9, 0), 10206 "D_ID of a device with PWWN %s changed." 10207 " New D_ID = %x, OLD D_ID = %x", ww_name, 10208 d_id, pd->pd_port_id.port_id); 10209 10210 pd->pd_port_id.port_id = BE_32(d_id); 10211 pd->pd_type = PORT_DEVICE_CHANGED; 10212 fctl_enlist_did_table(port, pd); 10213 } 10214 mutex_exit(&pd->pd_mutex); 10215 10216 } 10217 } 10218 10219 if (ns_cmd) { 10220 fctl_free_ns_cmd(ns_cmd); 10221 } 10222 10223 listlen = 0; 10224 changelist = NULL; 10225 if (count) { 10226 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10227 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10228 mutex_exit(&port->fp_mutex); 10229 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10230 mutex_enter(&port->fp_mutex); 10231 } 10232 10233 dbg_count = 0; 10234 10235 job->job_counter = count; 10236 10237 for (index = 0; index < pwwn_table_size; index++) { 10238 head = &port->fp_pwwn_table[index]; 10239 npd = head->pwwn_head; 10240 10241 while ((pd = npd) != NULL) { 10242 npd = pd->pd_wwn_hnext; 10243 10244 mutex_enter(&pd->pd_mutex); 10245 if (pd->pd_flags != PD_ELS_MARK) { 10246 mutex_exit(&pd->pd_mutex); 10247 continue; 10248 } 10249 10250 dbg_count++; 10251 10252 /* 10253 * If it is already marked deletion, nothing 10254 * else to do. 10255 */ 10256 if (pd->pd_type == PORT_DEVICE_DELETE) { 10257 pd->pd_type = PORT_DEVICE_OLD; 10258 10259 mutex_exit(&pd->pd_mutex); 10260 mutex_exit(&port->fp_mutex); 10261 fp_jobdone(job); 10262 mutex_enter(&port->fp_mutex); 10263 10264 continue; 10265 } 10266 10267 /* 10268 * If it is freshly discovered out of 10269 * the orphan list, nothing else to do 10270 */ 10271 if (pd->pd_type == PORT_DEVICE_NEW) { 10272 pd->pd_flags = PD_IDLE; 10273 10274 mutex_exit(&pd->pd_mutex); 10275 mutex_exit(&port->fp_mutex); 10276 fp_jobdone(job); 10277 mutex_enter(&port->fp_mutex); 10278 10279 continue; 10280 } 10281 10282 pd->pd_flags = PD_IDLE; 10283 d_id = pd->pd_port_id.port_id; 10284 10285 /* 10286 * Explicitly mark all devices OLD; successful 10287 * PLOGI should reset this to either NO_CHANGE 10288 * or CHANGED. 10289 */ 10290 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10291 pd->pd_type = PORT_DEVICE_OLD; 10292 } 10293 10294 mutex_exit(&pd->pd_mutex); 10295 mutex_exit(&port->fp_mutex); 10296 10297 rval = fp_port_login(port, d_id, job, 10298 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10299 10300 if (rval != FC_SUCCESS) { 10301 fp_jobdone(job); 10302 } 10303 mutex_enter(&port->fp_mutex); 10304 } 10305 } 10306 mutex_exit(&port->fp_mutex); 10307 10308 ASSERT(dbg_count == count); 10309 fp_jobwait(job); 10310 10311 mutex_enter(&port->fp_mutex); 10312 10313 ASSERT(port->fp_statec_busy > 0); 10314 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10315 if (port->fp_statec_busy > 1) { 10316 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10317 } 10318 } 10319 mutex_exit(&port->fp_mutex); 10320 } else { 10321 ASSERT(port->fp_statec_busy > 0); 10322 if (port->fp_statec_busy > 1) { 10323 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10324 } 10325 mutex_exit(&port->fp_mutex); 10326 } 10327 10328 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10329 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10330 10331 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10332 listlen, listlen, KM_SLEEP); 10333 10334 mutex_enter(&port->fp_mutex); 10335 } else { 10336 ASSERT(changelist == NULL && listlen == 0); 10337 mutex_enter(&port->fp_mutex); 10338 if (--port->fp_statec_busy == 0) { 10339 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10340 } 10341 } 10342 } 10343 10344 10345 /* 10346 * Fill out device list for userland ioctl in private loop 10347 */ 10348 static int 10349 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10350 { 10351 int rval; 10352 int count; 10353 int index; 10354 int num_devices; 10355 fc_remote_node_t *node; 10356 fc_port_dev_t *devlist; 10357 int lilp_device_count; 10358 fc_lilpmap_t *lilp_map; 10359 uchar_t *alpa_list; 10360 10361 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10362 10363 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10364 if (port->fp_total_devices > port->fp_dev_count && 10365 num_devices >= port->fp_total_devices) { 10366 job_request_t *job; 10367 10368 mutex_exit(&port->fp_mutex); 10369 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10370 job->job_counter = 1; 10371 10372 mutex_enter(&port->fp_mutex); 10373 fp_get_loopmap(port, job); 10374 mutex_exit(&port->fp_mutex); 10375 10376 fp_jobwait(job); 10377 fctl_dealloc_job(job); 10378 } else { 10379 mutex_exit(&port->fp_mutex); 10380 } 10381 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10382 10383 mutex_enter(&port->fp_mutex); 10384 10385 /* 10386 * Applications are accustomed to getting the device list in 10387 * LILP map order. The HBA firmware usually returns the device 10388 * map in the LILP map order and diagnostic applications would 10389 * prefer to receive in the device list in that order too 10390 */ 10391 lilp_map = &port->fp_lilp_map; 10392 alpa_list = &lilp_map->lilp_alpalist[0]; 10393 10394 /* 10395 * the length field corresponds to the offset in the LILP frame 10396 * which begins with 1. The thing to note here is that the 10397 * lilp_device_count is 1 more than fp->fp_total_devices since 10398 * the host adapter's alpa also shows up in the lilp map. We 10399 * don't however return details of the host adapter since 10400 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10401 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10402 * ioctl to obtain details about the host adapter port. 10403 */ 10404 lilp_device_count = lilp_map->lilp_length; 10405 10406 for (count = index = 0; index < lilp_device_count && 10407 count < num_devices; index++) { 10408 uint32_t d_id; 10409 fc_remote_port_t *pd; 10410 10411 d_id = alpa_list[index]; 10412 10413 mutex_exit(&port->fp_mutex); 10414 pd = fctl_get_remote_port_by_did(port, d_id); 10415 mutex_enter(&port->fp_mutex); 10416 10417 if (pd != NULL) { 10418 mutex_enter(&pd->pd_mutex); 10419 10420 if (pd->pd_state == PORT_DEVICE_INVALID) { 10421 mutex_exit(&pd->pd_mutex); 10422 continue; 10423 } 10424 10425 devlist[count].dev_state = pd->pd_state; 10426 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10427 devlist[count].dev_did = pd->pd_port_id; 10428 devlist[count].dev_did.priv_lilp_posit = 10429 (uint8_t)(index & 0xff); 10430 bcopy((caddr_t)pd->pd_fc4types, 10431 (caddr_t)devlist[count].dev_type, 10432 sizeof (pd->pd_fc4types)); 10433 10434 bcopy((caddr_t)&pd->pd_port_name, 10435 (caddr_t)&devlist[count].dev_pwwn, 10436 sizeof (la_wwn_t)); 10437 10438 node = pd->pd_remote_nodep; 10439 mutex_exit(&pd->pd_mutex); 10440 10441 if (node) { 10442 mutex_enter(&node->fd_mutex); 10443 bcopy((caddr_t)&node->fd_node_name, 10444 (caddr_t)&devlist[count].dev_nwwn, 10445 sizeof (la_wwn_t)); 10446 mutex_exit(&node->fd_mutex); 10447 } 10448 count++; 10449 } 10450 } 10451 10452 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10453 sizeof (count), mode)) { 10454 rval = FC_FAILURE; 10455 } 10456 10457 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10458 sizeof (fc_port_dev_t) * num_devices, mode)) { 10459 rval = FC_FAILURE; 10460 } else { 10461 rval = FC_SUCCESS; 10462 } 10463 10464 kmem_free(devlist, sizeof (*devlist) * num_devices); 10465 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10466 10467 return (rval); 10468 } 10469 10470 10471 /* 10472 * Completion function for responses to unsolicited commands 10473 */ 10474 static void 10475 fp_unsol_intr(fc_packet_t *pkt) 10476 { 10477 fp_cmd_t *cmd; 10478 fc_local_port_t *port; 10479 10480 cmd = pkt->pkt_ulp_private; 10481 port = cmd->cmd_port; 10482 10483 mutex_enter(&port->fp_mutex); 10484 port->fp_out_fpcmds--; 10485 mutex_exit(&port->fp_mutex); 10486 10487 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10488 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10489 "couldn't post response to unsolicited request;" 10490 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10491 pkt->pkt_resp_fhdr.rx_id); 10492 } 10493 10494 if (cmd == port->fp_els_resp_pkt) { 10495 mutex_enter(&port->fp_mutex); 10496 port->fp_els_resp_pkt_busy = 0; 10497 mutex_exit(&port->fp_mutex); 10498 return; 10499 } 10500 10501 fp_free_pkt(cmd); 10502 } 10503 10504 10505 /* 10506 * solicited LINIT ELS completion function 10507 */ 10508 static void 10509 fp_linit_intr(fc_packet_t *pkt) 10510 { 10511 fp_cmd_t *cmd; 10512 job_request_t *job; 10513 fc_linit_resp_t acc; 10514 10515 cmd = (fp_cmd_t *)pkt->pkt_ulp_private; 10516 10517 mutex_enter(&cmd->cmd_port->fp_mutex); 10518 cmd->cmd_port->fp_out_fpcmds--; 10519 mutex_exit(&cmd->cmd_port->fp_mutex); 10520 10521 if (FP_IS_PKT_ERROR(pkt)) { 10522 (void) fp_common_intr(pkt, 1); 10523 return; 10524 } 10525 10526 job = cmd->cmd_job; 10527 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&acc, 10528 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10529 if (acc.status != FC_LINIT_SUCCESS) { 10530 job->job_result = FC_FAILURE; 10531 } else { 10532 job->job_result = FC_SUCCESS; 10533 } 10534 10535 fp_iodone(cmd); 10536 } 10537 10538 10539 /* 10540 * Decode the unsolicited request; For FC-4 Device and Link data frames 10541 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10542 * ELS requests, submit a request to the job_handler thread to work on it. 10543 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10544 * and save much of the interrupt time processing of unsolicited ELS requests 10545 * and hand it off to the job_handler thread. 10546 */ 10547 static void 10548 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10549 { 10550 uchar_t r_ctl; 10551 uchar_t ls_code; 10552 uint32_t s_id; 10553 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10554 uint32_t cb_arg; 10555 fp_cmd_t *cmd; 10556 fc_local_port_t *port; 10557 job_request_t *job; 10558 fc_remote_port_t *pd; 10559 10560 port = port_handle; 10561 10562 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10563 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10564 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10565 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10566 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10567 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10568 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10569 buf->ub_buffer[0]); 10570 10571 if (type & 0x80000000) { 10572 /* 10573 * Huh ? Nothing much can be done without 10574 * a valid buffer. So just exit. 10575 */ 10576 return; 10577 } 10578 /* 10579 * If the unsolicited interrupts arrive while it isn't 10580 * safe to handle unsolicited callbacks; Drop them, yes, 10581 * drop them on the floor 10582 */ 10583 mutex_enter(&port->fp_mutex); 10584 port->fp_active_ubs++; 10585 if ((port->fp_soft_state & 10586 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10587 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10588 10589 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10590 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10591 "seq_id=%x, ox_id=%x, rx_id=%x" 10592 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10593 buf->ub_frame.type, buf->ub_frame.seq_id, 10594 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10595 10596 ASSERT(port->fp_active_ubs > 0); 10597 if (--(port->fp_active_ubs) == 0) { 10598 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10599 } 10600 10601 mutex_exit(&port->fp_mutex); 10602 10603 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10604 1, &buf->ub_token); 10605 10606 return; 10607 } 10608 10609 r_ctl = buf->ub_frame.r_ctl; 10610 s_id = buf->ub_frame.s_id; 10611 if (port->fp_active_ubs == 1) { 10612 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10613 } 10614 10615 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10616 port->fp_statec_busy) { 10617 mutex_exit(&port->fp_mutex); 10618 pd = fctl_get_remote_port_by_did(port, s_id); 10619 if (pd) { 10620 mutex_enter(&pd->pd_mutex); 10621 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10622 FP_TRACE(FP_NHEAD1(3, 0), 10623 "LOGO for LOGGED IN D_ID %x", 10624 buf->ub_frame.s_id); 10625 pd->pd_state = PORT_DEVICE_VALID; 10626 } 10627 mutex_exit(&pd->pd_mutex); 10628 } 10629 10630 mutex_enter(&port->fp_mutex); 10631 ASSERT(port->fp_active_ubs > 0); 10632 if (--(port->fp_active_ubs) == 0) { 10633 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10634 } 10635 mutex_exit(&port->fp_mutex); 10636 10637 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10638 1, &buf->ub_token); 10639 10640 FP_TRACE(FP_NHEAD1(3, 0), 10641 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10642 buf->ub_frame.s_id); 10643 return; 10644 } 10645 10646 if (port->fp_els_resp_pkt_busy == 0) { 10647 if (r_ctl == R_CTL_ELS_REQ) { 10648 ls_code = buf->ub_buffer[0]; 10649 10650 switch (ls_code) { 10651 case LA_ELS_PLOGI: 10652 case LA_ELS_FLOGI: 10653 port->fp_els_resp_pkt_busy = 1; 10654 mutex_exit(&port->fp_mutex); 10655 fp_i_handle_unsol_els(port, buf); 10656 10657 mutex_enter(&port->fp_mutex); 10658 ASSERT(port->fp_active_ubs > 0); 10659 if (--(port->fp_active_ubs) == 0) { 10660 port->fp_soft_state &= 10661 ~FP_SOFT_IN_UNSOL_CB; 10662 } 10663 mutex_exit(&port->fp_mutex); 10664 port->fp_fca_tran->fca_ub_release( 10665 port->fp_fca_handle, 1, &buf->ub_token); 10666 10667 return; 10668 case LA_ELS_RSCN: 10669 if (++(port)->fp_rscn_count == 10670 FC_INVALID_RSCN_COUNT) { 10671 ++(port)->fp_rscn_count; 10672 } 10673 rscn_count = port->fp_rscn_count; 10674 break; 10675 10676 default: 10677 break; 10678 } 10679 } 10680 } else if ((r_ctl == R_CTL_ELS_REQ) && 10681 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10682 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10683 ++port->fp_rscn_count; 10684 } 10685 rscn_count = port->fp_rscn_count; 10686 } 10687 10688 mutex_exit(&port->fp_mutex); 10689 10690 switch (r_ctl & R_CTL_ROUTING) { 10691 case R_CTL_DEVICE_DATA: 10692 /* 10693 * If the unsolicited buffer is a CT IU, 10694 * have the job_handler thread work on it. 10695 */ 10696 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10697 break; 10698 } 10699 /* FALLTHROUGH */ 10700 10701 case R_CTL_FC4_SVC: { 10702 int sendup = 0; 10703 10704 /* 10705 * If a LOGIN isn't performed before this request 10706 * shut the door on this port with a reply that a 10707 * LOGIN is required. We make an exception however 10708 * for IP broadcast packets and pass them through 10709 * to the IP ULP(s) to handle broadcast requests. 10710 * This is not a problem for private loop devices 10711 * but for fabric topologies we don't log into the 10712 * remote ports during port initialization and 10713 * the ULPs need to log into requesting ports on 10714 * demand. 10715 */ 10716 pd = fctl_get_remote_port_by_did(port, s_id); 10717 if (pd) { 10718 mutex_enter(&pd->pd_mutex); 10719 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10720 sendup++; 10721 } 10722 mutex_exit(&pd->pd_mutex); 10723 } else if ((pd == NULL) && 10724 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10725 (buf->ub_frame.d_id == 0xffffff || 10726 buf->ub_frame.d_id == 0x00)) { 10727 /* brodacst IP frame - so sendup via job thread */ 10728 break; 10729 } 10730 10731 /* 10732 * Send all FC4 services via job thread too 10733 */ 10734 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10735 break; 10736 } 10737 10738 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10739 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10740 return; 10741 } 10742 10743 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10744 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10745 0, KM_NOSLEEP, pd); 10746 if (cmd != NULL) { 10747 fp_els_rjt_init(port, cmd, buf, 10748 FC_ACTION_NON_RETRYABLE, 10749 FC_REASON_LOGIN_REQUIRED, NULL); 10750 10751 if (fp_sendcmd(port, cmd, 10752 port->fp_fca_handle) != FC_SUCCESS) { 10753 fp_free_pkt(cmd); 10754 } 10755 } 10756 } 10757 10758 mutex_enter(&port->fp_mutex); 10759 ASSERT(port->fp_active_ubs > 0); 10760 if (--(port->fp_active_ubs) == 0) { 10761 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10762 } 10763 mutex_exit(&port->fp_mutex); 10764 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10765 1, &buf->ub_token); 10766 10767 return; 10768 } 10769 10770 default: 10771 break; 10772 } 10773 10774 /* 10775 * Submit a Request to the job_handler thread to work 10776 * on the unsolicited request. The potential side effect 10777 * of this is that the unsolicited buffer takes a little 10778 * longer to get released but we save interrupt time in 10779 * the bargain. 10780 */ 10781 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10782 10783 /* 10784 * One way that the rscn_count will get used is described below : 10785 * 10786 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10787 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10788 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10789 * by overloading the job_cb_arg to pass the rscn_count 10790 * 4. When one of the routines processing the RSCN picks it up (ex: 10791 * fp_validate_rscn_page()), it passes this count in the map 10792 * structure (as part of the map_rscn_info structure member) to the 10793 * ULPs. 10794 * 5. When ULPs make calls back to the transport (example interfaces for 10795 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10796 * can now pass back this count as part of the fc_packet's 10797 * pkt_ulp_rscn_count member. fcp does this currently. 10798 * 6. When transport gets a call to transport a command on the wire, it 10799 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10800 * fc_packet. If there is, it will match that info with the current 10801 * rscn_count on that instance of the port. If they don't match up 10802 * then there was a newer RSCN. The ULP gets back an error code which 10803 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10804 * 7. At this point the ULP is free to make up its own mind as to how to 10805 * handle this. Currently, fcp will reset its retry counters and keep 10806 * retrying the operation it was doing in anticipation of getting a 10807 * new state change call back for the new RSCN. 10808 */ 10809 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10810 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10811 if (job == NULL) { 10812 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10813 "couldn't submit a job to the thread, failing.."); 10814 10815 mutex_enter(&port->fp_mutex); 10816 10817 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10818 --port->fp_rscn_count; 10819 } 10820 10821 ASSERT(port->fp_active_ubs > 0); 10822 if (--(port->fp_active_ubs) == 0) { 10823 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10824 } 10825 10826 mutex_exit(&port->fp_mutex); 10827 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10828 1, &buf->ub_token); 10829 10830 return; 10831 } 10832 job->job_private = (void *)buf; 10833 fctl_enque_job(port, job); 10834 } 10835 10836 10837 /* 10838 * Handle unsolicited requests 10839 */ 10840 static void 10841 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10842 job_request_t *job) 10843 { 10844 uchar_t r_ctl; 10845 uchar_t ls_code; 10846 uint32_t s_id; 10847 fp_cmd_t *cmd; 10848 fc_remote_port_t *pd; 10849 fp_unsol_spec_t *ub_spec; 10850 10851 r_ctl = buf->ub_frame.r_ctl; 10852 s_id = buf->ub_frame.s_id; 10853 10854 switch (r_ctl & R_CTL_ROUTING) { 10855 case R_CTL_EXTENDED_SVC: 10856 if (r_ctl != R_CTL_ELS_REQ) { 10857 break; 10858 } 10859 10860 ls_code = buf->ub_buffer[0]; 10861 switch (ls_code) { 10862 case LA_ELS_LOGO: 10863 case LA_ELS_ADISC: 10864 case LA_ELS_PRLO: 10865 pd = fctl_get_remote_port_by_did(port, s_id); 10866 if (pd == NULL) { 10867 if (!FC_IS_REAL_DEVICE(s_id)) { 10868 break; 10869 } 10870 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10871 break; 10872 } 10873 if ((cmd = fp_alloc_pkt(port, 10874 sizeof (la_els_rjt_t), 0, KM_SLEEP, 10875 NULL)) == NULL) { 10876 /* 10877 * Can this actually fail when 10878 * given KM_SLEEP? (Could be used 10879 * this way in a number of places.) 10880 */ 10881 break; 10882 } 10883 10884 fp_els_rjt_init(port, cmd, buf, 10885 FC_ACTION_NON_RETRYABLE, 10886 FC_REASON_INVALID_LINK_CTRL, job); 10887 10888 if (fp_sendcmd(port, cmd, 10889 port->fp_fca_handle) != FC_SUCCESS) { 10890 fp_free_pkt(cmd); 10891 } 10892 10893 break; 10894 } 10895 if (ls_code == LA_ELS_LOGO) { 10896 fp_handle_unsol_logo(port, buf, pd, job); 10897 } else if (ls_code == LA_ELS_ADISC) { 10898 fp_handle_unsol_adisc(port, buf, pd, job); 10899 } else { 10900 fp_handle_unsol_prlo(port, buf, pd, job); 10901 } 10902 break; 10903 10904 case LA_ELS_PLOGI: 10905 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 10906 break; 10907 10908 case LA_ELS_FLOGI: 10909 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 10910 break; 10911 10912 case LA_ELS_RSCN: 10913 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 10914 break; 10915 10916 default: 10917 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10918 ub_spec->port = port; 10919 ub_spec->buf = buf; 10920 10921 (void) taskq_dispatch(port->fp_taskq, 10922 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10923 return; 10924 } 10925 break; 10926 10927 case R_CTL_BASIC_SVC: 10928 /* 10929 * The unsolicited basic link services could be ABTS 10930 * and RMC (Or even a NOP). Just BA_RJT them until 10931 * such time there arises a need to handle them more 10932 * carefully. 10933 */ 10934 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10935 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 10936 0, KM_SLEEP, NULL); 10937 if (cmd != NULL) { 10938 fp_ba_rjt_init(port, cmd, buf, job); 10939 if (fp_sendcmd(port, cmd, 10940 port->fp_fca_handle) != FC_SUCCESS) { 10941 fp_free_pkt(cmd); 10942 } 10943 } 10944 } 10945 break; 10946 10947 case R_CTL_DEVICE_DATA: 10948 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10949 /* 10950 * Mostly this is of type FC_TYPE_FC_SERVICES. 10951 * As we don't like any Unsolicited FC services 10952 * requests, we would do well to RJT them as 10953 * well. 10954 */ 10955 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10956 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10957 0, KM_SLEEP, NULL); 10958 if (cmd != NULL) { 10959 fp_els_rjt_init(port, cmd, buf, 10960 FC_ACTION_NON_RETRYABLE, 10961 FC_REASON_INVALID_LINK_CTRL, job); 10962 10963 if (fp_sendcmd(port, cmd, 10964 port->fp_fca_handle) != 10965 FC_SUCCESS) { 10966 fp_free_pkt(cmd); 10967 } 10968 } 10969 } 10970 break; 10971 } 10972 /* FALLTHROUGH */ 10973 10974 case R_CTL_FC4_SVC: 10975 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10976 ub_spec->port = port; 10977 ub_spec->buf = buf; 10978 10979 (void) taskq_dispatch(port->fp_taskq, 10980 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10981 return; 10982 10983 case R_CTL_LINK_CTL: 10984 /* 10985 * Turn deaf ear on unsolicited link control frames. 10986 * Typical unsolicited link control Frame is an LCR 10987 * (to reset End to End credit to the default login 10988 * value and abort current sequences for all classes) 10989 * An intelligent microcode/firmware should handle 10990 * this transparently at its level and not pass all 10991 * the way up here. 10992 * 10993 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 10994 * or F_BSY. P_RJT is chosen to be the most appropriate 10995 * at this time. 10996 */ 10997 /* FALLTHROUGH */ 10998 10999 default: 11000 /* 11001 * Just reject everything else as an invalid request. 11002 */ 11003 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11004 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11005 0, KM_SLEEP, NULL); 11006 if (cmd != NULL) { 11007 fp_els_rjt_init(port, cmd, buf, 11008 FC_ACTION_NON_RETRYABLE, 11009 FC_REASON_INVALID_LINK_CTRL, job); 11010 11011 if (fp_sendcmd(port, cmd, 11012 port->fp_fca_handle) != FC_SUCCESS) { 11013 fp_free_pkt(cmd); 11014 } 11015 } 11016 } 11017 break; 11018 } 11019 11020 mutex_enter(&port->fp_mutex); 11021 ASSERT(port->fp_active_ubs > 0); 11022 if (--(port->fp_active_ubs) == 0) { 11023 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 11024 } 11025 mutex_exit(&port->fp_mutex); 11026 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 11027 1, &buf->ub_token); 11028 } 11029 11030 11031 /* 11032 * Prepare a BA_RJT and send it over. 11033 */ 11034 static void 11035 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11036 job_request_t *job) 11037 { 11038 fc_packet_t *pkt; 11039 la_ba_rjt_t payload; 11040 11041 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11042 11043 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11044 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11045 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11046 cmd->cmd_retry_count = 1; 11047 cmd->cmd_ulp_pkt = NULL; 11048 11049 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11050 cmd->cmd_job = job; 11051 11052 pkt = &cmd->cmd_pkt; 11053 11054 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 11055 11056 payload.reserved = 0; 11057 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 11058 payload.explanation = FC_EXPLN_NONE; 11059 payload.vendor = 0; 11060 11061 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11062 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11063 } 11064 11065 11066 /* 11067 * Prepare an LS_RJT and send it over 11068 */ 11069 static void 11070 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11071 uchar_t action, uchar_t reason, job_request_t *job) 11072 { 11073 fc_packet_t *pkt; 11074 la_els_rjt_t payload; 11075 11076 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11077 11078 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11079 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11080 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11081 cmd->cmd_retry_count = 1; 11082 cmd->cmd_ulp_pkt = NULL; 11083 11084 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11085 cmd->cmd_job = job; 11086 11087 pkt = &cmd->cmd_pkt; 11088 11089 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11090 11091 payload.ls_code.ls_code = LA_ELS_RJT; 11092 payload.ls_code.mbz = 0; 11093 payload.action = action; 11094 payload.reason = reason; 11095 payload.reserved = 0; 11096 payload.vu = 0; 11097 11098 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11099 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11100 } 11101 11102 /* 11103 * Function: fp_prlo_acc_init 11104 * 11105 * Description: Initializes an Link Service Accept for a PRLO. 11106 * 11107 * Arguments: *port Local port through which the PRLO was 11108 * received. 11109 * cmd Command that will carry the accept. 11110 * *buf Unsolicited buffer containing the PRLO 11111 * request. 11112 * job Job request. 11113 * sleep Allocation mode. 11114 * 11115 * Return Value: *cmd Command containing the response. 11116 * 11117 * Context: Depends on the parameter sleep. 11118 */ 11119 fp_cmd_t * 11120 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 11121 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11122 { 11123 fp_cmd_t *cmd; 11124 fc_packet_t *pkt; 11125 la_els_prlo_t *req; 11126 size_t len; 11127 uint16_t flags; 11128 11129 req = (la_els_prlo_t *)buf->ub_buffer; 11130 len = (size_t)ntohs(req->payload_length); 11131 11132 /* 11133 * The payload of the accept to a PRLO has to be the exact match of 11134 * the payload of the request (at the exception of the code). 11135 */ 11136 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11137 11138 if (cmd) { 11139 /* 11140 * The fp command was successfully allocated. 11141 */ 11142 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11143 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11144 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11145 cmd->cmd_retry_count = 1; 11146 cmd->cmd_ulp_pkt = NULL; 11147 11148 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11149 cmd->cmd_job = job; 11150 11151 pkt = &cmd->cmd_pkt; 11152 11153 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11154 FC_TYPE_EXTENDED_LS); 11155 11156 /* The code is overwritten for the copy. */ 11157 req->ls_code = LA_ELS_ACC; 11158 /* Response code is set. */ 11159 flags = ntohs(req->flags); 11160 flags &= ~SP_RESP_CODE_MASK; 11161 flags |= SP_RESP_CODE_REQ_EXECUTED; 11162 req->flags = htons(flags); 11163 11164 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)req, 11165 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11166 } 11167 return (cmd); 11168 } 11169 11170 /* 11171 * Prepare an ACC response to an ELS request 11172 */ 11173 static void 11174 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11175 job_request_t *job) 11176 { 11177 fc_packet_t *pkt; 11178 ls_code_t payload; 11179 11180 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11181 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11182 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11183 cmd->cmd_retry_count = 1; 11184 cmd->cmd_ulp_pkt = NULL; 11185 11186 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11187 cmd->cmd_job = job; 11188 11189 pkt = &cmd->cmd_pkt; 11190 11191 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11192 11193 payload.ls_code = LA_ELS_ACC; 11194 payload.mbz = 0; 11195 11196 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11197 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11198 } 11199 11200 /* 11201 * Unsolicited PRLO handler 11202 * 11203 * A Process Logout should be handled by the ULP that established it. However, 11204 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11205 * when a device implicitly logs out an initiator (for whatever reason) and 11206 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11207 * The logical thing to do for the device would be to send a LOGO in response 11208 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11209 * a PRLO instead. 11210 * 11211 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11212 * think that the Port Login has been lost. If we follow the Fibre Channel 11213 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11214 * the Port Login has also been lost, the remote port will reject the PRLI 11215 * indicating that we must PLOGI first. The initiator will then turn around and 11216 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11217 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11218 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11219 * needed would be received by FCP. FCP would have, then, to tell the transport 11220 * (fp) to PLOGI. The problem is, the transport would still think the Port 11221 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11222 * if you think it's not necessary". To work around that difficulty, the PRLO 11223 * is treated by the transport as a LOGO. The downside to it is a Port Login 11224 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11225 * has nothing to do with the PRLO) may be impacted. However, this is a 11226 * scenario very unlikely to happen. As of today the only ULP in Leadville 11227 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11228 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11229 * unlikely). 11230 */ 11231 static void 11232 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11233 fc_remote_port_t *pd, job_request_t *job) 11234 { 11235 int busy; 11236 int rval; 11237 int retain; 11238 fp_cmd_t *cmd; 11239 fc_portmap_t *listptr; 11240 boolean_t tolerance; 11241 la_els_prlo_t *req; 11242 11243 req = (la_els_prlo_t *)buf->ub_buffer; 11244 11245 if ((ntohs(req->payload_length) != 11246 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11247 (req->page_length != sizeof (service_parameter_page_t))) { 11248 /* 11249 * We are being very restrictive. Only on page per 11250 * payload. If it is not the case we reject the ELS although 11251 * we should reply indicating we handle only single page 11252 * per PRLO. 11253 */ 11254 goto fp_reject_prlo; 11255 } 11256 11257 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11258 /* 11259 * This is in case the payload advertizes a size bigger than 11260 * what it really is. 11261 */ 11262 goto fp_reject_prlo; 11263 } 11264 11265 mutex_enter(&port->fp_mutex); 11266 busy = port->fp_statec_busy; 11267 mutex_exit(&port->fp_mutex); 11268 11269 mutex_enter(&pd->pd_mutex); 11270 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11271 if (!busy) { 11272 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11273 pd->pd_state == PORT_DEVICE_INVALID || 11274 pd->pd_flags == PD_ELS_IN_PROGRESS || 11275 pd->pd_type == PORT_DEVICE_OLD) { 11276 busy++; 11277 } 11278 } 11279 11280 if (busy) { 11281 mutex_exit(&pd->pd_mutex); 11282 11283 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11284 "pd=%p - busy", 11285 pd->pd_port_id.port_id, pd); 11286 11287 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11288 goto fp_reject_prlo; 11289 } 11290 } else { 11291 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11292 11293 if (tolerance) { 11294 fctl_tc_reset(&pd->pd_logo_tc); 11295 retain = 0; 11296 pd->pd_state = PORT_DEVICE_INVALID; 11297 } 11298 11299 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11300 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11301 tolerance, retain); 11302 11303 pd->pd_aux_flags |= PD_LOGGED_OUT; 11304 mutex_exit(&pd->pd_mutex); 11305 11306 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11307 if (cmd == NULL) { 11308 return; 11309 } 11310 11311 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11312 if (rval != FC_SUCCESS) { 11313 fp_free_pkt(cmd); 11314 return; 11315 } 11316 11317 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11318 11319 if (retain) { 11320 fp_unregister_login(pd); 11321 fctl_copy_portmap(listptr, pd); 11322 } else { 11323 uint32_t d_id; 11324 char ww_name[17]; 11325 11326 mutex_enter(&pd->pd_mutex); 11327 d_id = pd->pd_port_id.port_id; 11328 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11329 mutex_exit(&pd->pd_mutex); 11330 11331 FP_TRACE(FP_NHEAD2(9, 0), 11332 "N_x Port with D_ID=%x, PWWN=%s logged out" 11333 " %d times in %d us; Giving up", d_id, ww_name, 11334 FC_LOGO_TOLERANCE_LIMIT, 11335 FC_LOGO_TOLERANCE_TIME_LIMIT); 11336 11337 fp_fillout_old_map(listptr, pd, 0); 11338 listptr->map_type = PORT_DEVICE_OLD; 11339 } 11340 11341 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11342 return; 11343 } 11344 11345 fp_reject_prlo: 11346 11347 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11348 if (cmd != NULL) { 11349 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11350 FC_REASON_INVALID_LINK_CTRL, job); 11351 11352 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11353 fp_free_pkt(cmd); 11354 } 11355 } 11356 } 11357 11358 /* 11359 * Unsolicited LOGO handler 11360 */ 11361 static void 11362 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11363 fc_remote_port_t *pd, job_request_t *job) 11364 { 11365 int busy; 11366 int rval; 11367 int retain; 11368 fp_cmd_t *cmd; 11369 fc_portmap_t *listptr; 11370 boolean_t tolerance; 11371 11372 mutex_enter(&port->fp_mutex); 11373 busy = port->fp_statec_busy; 11374 mutex_exit(&port->fp_mutex); 11375 11376 mutex_enter(&pd->pd_mutex); 11377 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11378 if (!busy) { 11379 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11380 pd->pd_state == PORT_DEVICE_INVALID || 11381 pd->pd_flags == PD_ELS_IN_PROGRESS || 11382 pd->pd_type == PORT_DEVICE_OLD) { 11383 busy++; 11384 } 11385 } 11386 11387 if (busy) { 11388 mutex_exit(&pd->pd_mutex); 11389 11390 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11391 "pd=%p - busy", 11392 pd->pd_port_id.port_id, pd); 11393 11394 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11395 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11396 0, KM_SLEEP, pd); 11397 if (cmd != NULL) { 11398 fp_els_rjt_init(port, cmd, buf, 11399 FC_ACTION_NON_RETRYABLE, 11400 FC_REASON_INVALID_LINK_CTRL, job); 11401 11402 if (fp_sendcmd(port, cmd, 11403 port->fp_fca_handle) != FC_SUCCESS) { 11404 fp_free_pkt(cmd); 11405 } 11406 } 11407 } 11408 } else { 11409 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11410 11411 if (tolerance) { 11412 fctl_tc_reset(&pd->pd_logo_tc); 11413 retain = 0; 11414 pd->pd_state = PORT_DEVICE_INVALID; 11415 } 11416 11417 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11418 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11419 tolerance, retain); 11420 11421 pd->pd_aux_flags |= PD_LOGGED_OUT; 11422 mutex_exit(&pd->pd_mutex); 11423 11424 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11425 KM_SLEEP, pd); 11426 if (cmd == NULL) { 11427 return; 11428 } 11429 11430 fp_els_acc_init(port, cmd, buf, job); 11431 11432 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11433 if (rval != FC_SUCCESS) { 11434 fp_free_pkt(cmd); 11435 return; 11436 } 11437 11438 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11439 11440 if (retain) { 11441 job_request_t *job; 11442 fctl_ns_req_t *ns_cmd; 11443 11444 /* 11445 * when get LOGO, first try to get PID from nameserver 11446 * if failed, then we do not need 11447 * send PLOGI to that remote port 11448 */ 11449 job = fctl_alloc_job( 11450 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11451 11452 if (job != NULL) { 11453 ns_cmd = fctl_alloc_ns_cmd( 11454 sizeof (ns_req_gid_pn_t), 11455 sizeof (ns_resp_gid_pn_t), 11456 sizeof (ns_resp_gid_pn_t), 11457 0, KM_SLEEP); 11458 if (ns_cmd != NULL) { 11459 int ret; 11460 job->job_result = FC_SUCCESS; 11461 ns_cmd->ns_cmd_code = NS_GID_PN; 11462 ((ns_req_gid_pn_t *) 11463 (ns_cmd->ns_cmd_buf))->pwwn = 11464 pd->pd_port_name; 11465 ret = fp_ns_query( 11466 port, ns_cmd, job, 1, KM_SLEEP); 11467 if ((ret != FC_SUCCESS) || 11468 (job->job_result != FC_SUCCESS)) { 11469 fctl_free_ns_cmd(ns_cmd); 11470 fctl_dealloc_job(job); 11471 FP_TRACE(FP_NHEAD2(9, 0), 11472 "NS query failed,", 11473 " delete pd"); 11474 goto delete_pd; 11475 } 11476 fctl_free_ns_cmd(ns_cmd); 11477 } 11478 fctl_dealloc_job(job); 11479 } 11480 fp_unregister_login(pd); 11481 fctl_copy_portmap(listptr, pd); 11482 } else { 11483 uint32_t d_id; 11484 char ww_name[17]; 11485 11486 delete_pd: 11487 mutex_enter(&pd->pd_mutex); 11488 d_id = pd->pd_port_id.port_id; 11489 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11490 mutex_exit(&pd->pd_mutex); 11491 11492 FP_TRACE(FP_NHEAD2(9, 0), 11493 "N_x Port with D_ID=%x, PWWN=%s logged out" 11494 " %d times in %d us; Giving up", d_id, ww_name, 11495 FC_LOGO_TOLERANCE_LIMIT, 11496 FC_LOGO_TOLERANCE_TIME_LIMIT); 11497 11498 fp_fillout_old_map(listptr, pd, 0); 11499 listptr->map_type = PORT_DEVICE_OLD; 11500 } 11501 11502 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11503 } 11504 } 11505 11506 11507 /* 11508 * Perform general purpose preparation of a response to an unsolicited request 11509 */ 11510 static void 11511 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11512 uchar_t r_ctl, uchar_t type) 11513 { 11514 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11515 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11516 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11517 pkt->pkt_cmd_fhdr.type = type; 11518 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11519 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11520 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11521 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11522 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11523 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11524 pkt->pkt_cmd_fhdr.ro = 0; 11525 pkt->pkt_cmd_fhdr.rsvd = 0; 11526 pkt->pkt_comp = fp_unsol_intr; 11527 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11528 } 11529 11530 /* 11531 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11532 * early development days of public loop soc+ firmware, numerous problems 11533 * were encountered (the details are undocumented and history now) which 11534 * led to the birth of this function. 11535 * 11536 * If a pre-allocated unsolicited response packet is free, send out an 11537 * immediate response, otherwise submit the request to the port thread 11538 * to do the deferred processing. 11539 */ 11540 static void 11541 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11542 { 11543 int sent; 11544 int f_port; 11545 int do_acc; 11546 fp_cmd_t *cmd; 11547 la_els_logi_t *payload; 11548 fc_remote_port_t *pd; 11549 char dww_name[17]; 11550 11551 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11552 11553 cmd = port->fp_els_resp_pkt; 11554 11555 mutex_enter(&port->fp_mutex); 11556 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11557 mutex_exit(&port->fp_mutex); 11558 11559 switch (buf->ub_buffer[0]) { 11560 case LA_ELS_PLOGI: { 11561 int small; 11562 11563 payload = (la_els_logi_t *)buf->ub_buffer; 11564 11565 f_port = FP_IS_F_PORT(payload-> 11566 common_service.cmn_features) ? 1 : 0; 11567 11568 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11569 &payload->nport_ww_name); 11570 pd = fctl_get_remote_port_by_pwwn(port, 11571 &payload->nport_ww_name); 11572 if (pd) { 11573 mutex_enter(&pd->pd_mutex); 11574 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11575 /* 11576 * Most likely this means a cross login is in 11577 * progress or a device about to be yanked out. 11578 * Only accept the plogi if my wwn is smaller. 11579 */ 11580 if (pd->pd_type == PORT_DEVICE_OLD) { 11581 sent = 1; 11582 } 11583 /* 11584 * Stop plogi request (if any) 11585 * attempt from local side to speedup 11586 * the discovery progress. 11587 * Mark the pd as PD_PLOGI_RECEPIENT. 11588 */ 11589 if (f_port == 0 && small < 0) { 11590 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11591 } 11592 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11593 11594 mutex_exit(&pd->pd_mutex); 11595 11596 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11597 "Unsol PLOGI received. PD still exists in the " 11598 "PWWN list. pd=%p PWWN=%s, sent=%x", 11599 pd, dww_name, sent); 11600 11601 if (f_port == 0 && small < 0) { 11602 FP_TRACE(FP_NHEAD1(3, 0), 11603 "fp_i_handle_unsol_els: Mark the pd" 11604 " as plogi recipient, pd=%p, PWWN=%s" 11605 ", sent=%x", 11606 pd, dww_name, sent); 11607 } 11608 } else { 11609 sent = 0; 11610 } 11611 11612 /* 11613 * To avoid Login collisions, accept only if my WWN 11614 * is smaller than the requester (A curious side note 11615 * would be that this rule may not satisfy the PLOGIs 11616 * initiated by the switch from not-so-well known 11617 * ports such as 0xFFFC41) 11618 */ 11619 if ((f_port == 0 && small < 0) || 11620 (((small > 0 && do_acc) || 11621 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11622 if (fp_is_class_supported(port->fp_cos, 11623 buf->ub_class) == FC_FAILURE) { 11624 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11625 cmd->cmd_pkt.pkt_cmdlen = 11626 sizeof (la_els_rjt_t); 11627 cmd->cmd_pkt.pkt_rsplen = 0; 11628 fp_els_rjt_init(port, cmd, buf, 11629 FC_ACTION_NON_RETRYABLE, 11630 FC_REASON_CLASS_NOT_SUPP, NULL); 11631 FP_TRACE(FP_NHEAD1(3, 0), 11632 "fp_i_handle_unsol_els: " 11633 "Unsupported class. " 11634 "Rejecting PLOGI"); 11635 11636 } else { 11637 mutex_enter(&port->fp_mutex); 11638 port->fp_els_resp_pkt_busy = 0; 11639 mutex_exit(&port->fp_mutex); 11640 return; 11641 } 11642 } else { 11643 cmd->cmd_pkt.pkt_cmdlen = 11644 sizeof (la_els_logi_t); 11645 cmd->cmd_pkt.pkt_rsplen = 0; 11646 11647 /* 11648 * Sometime later, we should validate 11649 * the service parameters instead of 11650 * just accepting it. 11651 */ 11652 fp_login_acc_init(port, cmd, buf, NULL, 11653 KM_NOSLEEP); 11654 FP_TRACE(FP_NHEAD1(3, 0), 11655 "fp_i_handle_unsol_els: Accepting PLOGI," 11656 " f_port=%d, small=%d, do_acc=%d," 11657 " sent=%d.", f_port, small, do_acc, 11658 sent); 11659 /* 11660 * If fp_port_id is zero and topology is 11661 * Point-to-Point, get the local port id from 11662 * the d_id in the PLOGI request. 11663 * If the outgoing FLOGI hasn't been accepted, 11664 * the topology will be unknown here. But it's 11665 * still safe to save the d_id to fp_port_id, 11666 * just because it will be overwritten later 11667 * if the topology is not Point-to-Point. 11668 */ 11669 mutex_enter(&port->fp_mutex); 11670 if ((port->fp_port_id.port_id == 0) && 11671 (port->fp_topology == FC_TOP_PT_PT || 11672 port->fp_topology == FC_TOP_UNKNOWN)) { 11673 port->fp_port_id.port_id = 11674 buf->ub_frame.d_id; 11675 } 11676 mutex_exit(&port->fp_mutex); 11677 } 11678 } else { 11679 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11680 port->fp_options & FP_SEND_RJT) { 11681 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11682 cmd->cmd_pkt.pkt_rsplen = 0; 11683 fp_els_rjt_init(port, cmd, buf, 11684 FC_ACTION_NON_RETRYABLE, 11685 FC_REASON_LOGICAL_BSY, NULL); 11686 FP_TRACE(FP_NHEAD1(3, 0), 11687 "fp_i_handle_unsol_els: " 11688 "Rejecting PLOGI with Logical Busy." 11689 "Possible Login collision."); 11690 } else { 11691 mutex_enter(&port->fp_mutex); 11692 port->fp_els_resp_pkt_busy = 0; 11693 mutex_exit(&port->fp_mutex); 11694 return; 11695 } 11696 } 11697 break; 11698 } 11699 11700 case LA_ELS_FLOGI: 11701 if (fp_is_class_supported(port->fp_cos, 11702 buf->ub_class) == FC_FAILURE) { 11703 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11704 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11705 cmd->cmd_pkt.pkt_rsplen = 0; 11706 fp_els_rjt_init(port, cmd, buf, 11707 FC_ACTION_NON_RETRYABLE, 11708 FC_REASON_CLASS_NOT_SUPP, NULL); 11709 FP_TRACE(FP_NHEAD1(3, 0), 11710 "fp_i_handle_unsol_els: " 11711 "Unsupported Class. Rejecting FLOGI."); 11712 } else { 11713 mutex_enter(&port->fp_mutex); 11714 port->fp_els_resp_pkt_busy = 0; 11715 mutex_exit(&port->fp_mutex); 11716 return; 11717 } 11718 } else { 11719 mutex_enter(&port->fp_mutex); 11720 if (FC_PORT_STATE_MASK(port->fp_state) != 11721 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11722 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11723 mutex_exit(&port->fp_mutex); 11724 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11725 cmd->cmd_pkt.pkt_cmdlen = 11726 sizeof (la_els_rjt_t); 11727 cmd->cmd_pkt.pkt_rsplen = 0; 11728 fp_els_rjt_init(port, cmd, buf, 11729 FC_ACTION_NON_RETRYABLE, 11730 FC_REASON_INVALID_LINK_CTRL, 11731 NULL); 11732 FP_TRACE(FP_NHEAD1(3, 0), 11733 "fp_i_handle_unsol_els: " 11734 "Invalid Link Ctrl. " 11735 "Rejecting FLOGI."); 11736 } else { 11737 mutex_enter(&port->fp_mutex); 11738 port->fp_els_resp_pkt_busy = 0; 11739 mutex_exit(&port->fp_mutex); 11740 return; 11741 } 11742 } else { 11743 mutex_exit(&port->fp_mutex); 11744 cmd->cmd_pkt.pkt_cmdlen = 11745 sizeof (la_els_logi_t); 11746 cmd->cmd_pkt.pkt_rsplen = 0; 11747 /* 11748 * Let's not aggressively validate the N_Port's 11749 * service parameters until PLOGI. Suffice it 11750 * to give a hint that we are an N_Port and we 11751 * are game to some serious stuff here. 11752 */ 11753 fp_login_acc_init(port, cmd, buf, 11754 NULL, KM_NOSLEEP); 11755 FP_TRACE(FP_NHEAD1(3, 0), 11756 "fp_i_handle_unsol_els: " 11757 "Accepting FLOGI."); 11758 } 11759 } 11760 break; 11761 11762 default: 11763 return; 11764 } 11765 11766 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11767 mutex_enter(&port->fp_mutex); 11768 port->fp_els_resp_pkt_busy = 0; 11769 mutex_exit(&port->fp_mutex); 11770 } 11771 } 11772 11773 11774 /* 11775 * Handle unsolicited PLOGI request 11776 */ 11777 static void 11778 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11779 job_request_t *job, int sleep) 11780 { 11781 int sent; 11782 int small; 11783 int f_port; 11784 int do_acc; 11785 fp_cmd_t *cmd; 11786 la_wwn_t *swwn; 11787 la_wwn_t *dwwn; 11788 la_els_logi_t *payload; 11789 fc_remote_port_t *pd; 11790 char dww_name[17]; 11791 11792 payload = (la_els_logi_t *)buf->ub_buffer; 11793 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11794 11795 mutex_enter(&port->fp_mutex); 11796 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11797 mutex_exit(&port->fp_mutex); 11798 11799 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11800 "type=%x, f_ctl=%x" 11801 " seq_id=%x, ox_id=%x, rx_id=%x" 11802 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11803 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11804 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11805 11806 swwn = &port->fp_service_params.nport_ww_name; 11807 dwwn = &payload->nport_ww_name; 11808 small = fctl_wwn_cmp(swwn, dwwn); 11809 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11810 if (pd) { 11811 mutex_enter(&pd->pd_mutex); 11812 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11813 /* 11814 * Most likely this means a cross login is in 11815 * progress or a device about to be yanked out. 11816 * Only accept the plogi if my wwn is smaller. 11817 */ 11818 11819 if (pd->pd_type == PORT_DEVICE_OLD) { 11820 sent = 1; 11821 } 11822 /* 11823 * Stop plogi request (if any) 11824 * attempt from local side to speedup 11825 * the discovery progress. 11826 * Mark the pd as PD_PLOGI_RECEPIENT. 11827 */ 11828 if (f_port == 0 && small < 0) { 11829 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11830 } 11831 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11832 11833 mutex_exit(&pd->pd_mutex); 11834 11835 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11836 " received. PD still exists in the PWWN list. pd=%p " 11837 "PWWN=%s, sent=%x", pd, dww_name, sent); 11838 11839 if (f_port == 0 && small < 0) { 11840 FP_TRACE(FP_NHEAD1(3, 0), 11841 "fp_handle_unsol_plogi: Mark the pd" 11842 " as plogi recipient, pd=%p, PWWN=%s" 11843 ", sent=%x", 11844 pd, dww_name, sent); 11845 } 11846 } else { 11847 sent = 0; 11848 } 11849 11850 /* 11851 * Avoid Login collisions by accepting only if my WWN is smaller. 11852 * 11853 * A side note: There is no need to start a PLOGI from this end in 11854 * this context if login isn't going to be accepted for the 11855 * above reason as either a LIP (in private loop), RSCN (in 11856 * fabric topology), or an FLOGI (in point to point - Huh ? 11857 * check FC-PH) would normally drive the PLOGI from this end. 11858 * At this point of time there is no need for an inbound PLOGI 11859 * to kick an outbound PLOGI when it is going to be rejected 11860 * for the reason of WWN being smaller. However it isn't hard 11861 * to do that either (when such a need arises, start a timer 11862 * for a duration that extends beyond a normal device discovery 11863 * time and check if an outbound PLOGI did go before that, if 11864 * none fire one) 11865 * 11866 * Unfortunately, as it turned out, during booting, it is possible 11867 * to miss another initiator in the same loop as port driver 11868 * instances are serially attached. While preserving the above 11869 * comments for belly laughs, please kick an outbound PLOGI in 11870 * a non-switch environment (which is a pt pt between N_Ports or 11871 * a private loop) 11872 * 11873 * While preserving the above comments for amusement, send an 11874 * ACC if the PLOGI is going to be rejected for WWN being smaller 11875 * when no discovery is in progress at this end. Turn around 11876 * and make the port device as the PLOGI initiator, so that 11877 * during subsequent link/loop initialization, this end drives 11878 * the PLOGI (In fact both ends do in this particular case, but 11879 * only one wins) 11880 * 11881 * Make sure the PLOGIs initiated by the switch from not-so-well-known 11882 * ports (such as 0xFFFC41) are accepted too. 11883 */ 11884 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 11885 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11886 if (fp_is_class_supported(port->fp_cos, 11887 buf->ub_class) == FC_FAILURE) { 11888 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11889 cmd = fp_alloc_pkt(port, 11890 sizeof (la_els_logi_t), 0, sleep, pd); 11891 if (cmd == NULL) { 11892 return; 11893 } 11894 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11895 cmd->cmd_pkt.pkt_rsplen = 0; 11896 fp_els_rjt_init(port, cmd, buf, 11897 FC_ACTION_NON_RETRYABLE, 11898 FC_REASON_CLASS_NOT_SUPP, job); 11899 FP_TRACE(FP_NHEAD1(3, 0), 11900 "fp_handle_unsol_plogi: " 11901 "Unsupported class. rejecting PLOGI"); 11902 } 11903 } else { 11904 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11905 0, sleep, pd); 11906 if (cmd == NULL) { 11907 return; 11908 } 11909 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 11910 cmd->cmd_pkt.pkt_rsplen = 0; 11911 11912 /* 11913 * Sometime later, we should validate the service 11914 * parameters instead of just accepting it. 11915 */ 11916 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 11917 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11918 "Accepting PLOGI, f_port=%d, small=%d, " 11919 "do_acc=%d, sent=%d.", f_port, small, do_acc, 11920 sent); 11921 11922 /* 11923 * If fp_port_id is zero and topology is 11924 * Point-to-Point, get the local port id from 11925 * the d_id in the PLOGI request. 11926 * If the outgoing FLOGI hasn't been accepted, 11927 * the topology will be unknown here. But it's 11928 * still safe to save the d_id to fp_port_id, 11929 * just because it will be overwritten later 11930 * if the topology is not Point-to-Point. 11931 */ 11932 mutex_enter(&port->fp_mutex); 11933 if ((port->fp_port_id.port_id == 0) && 11934 (port->fp_topology == FC_TOP_PT_PT || 11935 port->fp_topology == FC_TOP_UNKNOWN)) { 11936 port->fp_port_id.port_id = 11937 buf->ub_frame.d_id; 11938 } 11939 mutex_exit(&port->fp_mutex); 11940 } 11941 } else { 11942 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11943 port->fp_options & FP_SEND_RJT) { 11944 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11945 0, sleep, pd); 11946 if (cmd == NULL) { 11947 return; 11948 } 11949 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11950 cmd->cmd_pkt.pkt_rsplen = 0; 11951 /* 11952 * Send out Logical busy to indicate 11953 * the detection of PLOGI collision 11954 */ 11955 fp_els_rjt_init(port, cmd, buf, 11956 FC_ACTION_NON_RETRYABLE, 11957 FC_REASON_LOGICAL_BSY, job); 11958 11959 fc_wwn_to_str(dwwn, dww_name); 11960 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11961 "Rejecting Unsol PLOGI with Logical Busy." 11962 "possible PLOGI collision. PWWN=%s, sent=%x", 11963 dww_name, sent); 11964 } else { 11965 return; 11966 } 11967 } 11968 11969 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11970 fp_free_pkt(cmd); 11971 } 11972 } 11973 11974 11975 /* 11976 * Handle mischievous turning over of our own FLOGI requests back to 11977 * us by the SOC+ microcode. In other words, look at the class of such 11978 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 11979 * on the floor 11980 */ 11981 static void 11982 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11983 job_request_t *job, int sleep) 11984 { 11985 uint32_t state; 11986 uint32_t s_id; 11987 fp_cmd_t *cmd; 11988 11989 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 11990 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11991 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11992 0, sleep, NULL); 11993 if (cmd == NULL) { 11994 return; 11995 } 11996 fp_els_rjt_init(port, cmd, buf, 11997 FC_ACTION_NON_RETRYABLE, 11998 FC_REASON_CLASS_NOT_SUPP, job); 11999 } else { 12000 return; 12001 } 12002 } else { 12003 12004 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 12005 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 12006 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 12007 buf->ub_frame.s_id, buf->ub_frame.d_id, 12008 buf->ub_frame.type, buf->ub_frame.f_ctl, 12009 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 12010 buf->ub_frame.rx_id, buf->ub_frame.ro); 12011 12012 mutex_enter(&port->fp_mutex); 12013 state = FC_PORT_STATE_MASK(port->fp_state); 12014 s_id = port->fp_port_id.port_id; 12015 mutex_exit(&port->fp_mutex); 12016 12017 if (state != FC_STATE_ONLINE || 12018 (s_id && buf->ub_frame.s_id == s_id)) { 12019 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12020 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12021 0, sleep, NULL); 12022 if (cmd == NULL) { 12023 return; 12024 } 12025 fp_els_rjt_init(port, cmd, buf, 12026 FC_ACTION_NON_RETRYABLE, 12027 FC_REASON_INVALID_LINK_CTRL, job); 12028 FP_TRACE(FP_NHEAD1(3, 0), 12029 "fp_handle_unsol_flogi: " 12030 "Rejecting PLOGI. Invalid Link CTRL"); 12031 } else { 12032 return; 12033 } 12034 } else { 12035 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12036 0, sleep, NULL); 12037 if (cmd == NULL) { 12038 return; 12039 } 12040 /* 12041 * Let's not aggressively validate the N_Port's 12042 * service parameters until PLOGI. Suffice it 12043 * to give a hint that we are an N_Port and we 12044 * are game to some serious stuff here. 12045 */ 12046 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12047 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 12048 "Accepting PLOGI"); 12049 } 12050 } 12051 12052 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12053 fp_free_pkt(cmd); 12054 } 12055 } 12056 12057 12058 /* 12059 * Perform PLOGI accept 12060 */ 12061 static void 12062 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 12063 job_request_t *job, int sleep) 12064 { 12065 fc_packet_t *pkt; 12066 fc_portmap_t *listptr; 12067 la_els_logi_t payload; 12068 12069 ASSERT(buf != NULL); 12070 12071 /* 12072 * If we are sending ACC to PLOGI and we haven't already 12073 * create port and node device handles, let's create them 12074 * here. 12075 */ 12076 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 12077 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 12078 int small; 12079 int do_acc; 12080 fc_remote_port_t *pd; 12081 la_els_logi_t *req; 12082 12083 req = (la_els_logi_t *)buf->ub_buffer; 12084 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 12085 &req->nport_ww_name); 12086 12087 mutex_enter(&port->fp_mutex); 12088 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 12089 mutex_exit(&port->fp_mutex); 12090 12091 pd = fctl_create_remote_port(port, &req->node_ww_name, 12092 &req->nport_ww_name, buf->ub_frame.s_id, 12093 PD_PLOGI_RECEPIENT, sleep); 12094 if (pd == NULL) { 12095 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 12096 "Couldn't create port device for d_id:0x%x", 12097 buf->ub_frame.s_id); 12098 12099 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 12100 "couldn't create port device d_id=%x", 12101 buf->ub_frame.s_id); 12102 } else { 12103 /* 12104 * usoc currently returns PLOGIs inline and 12105 * the maximum buffer size is 60 bytes or so. 12106 * So attempt not to look beyond what is in 12107 * the unsolicited buffer 12108 * 12109 * JNI also traverses this path sometimes 12110 */ 12111 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 12112 fp_register_login(NULL, pd, req, buf->ub_class); 12113 } else { 12114 mutex_enter(&pd->pd_mutex); 12115 if (pd->pd_login_count == 0) { 12116 pd->pd_login_count++; 12117 } 12118 pd->pd_state = PORT_DEVICE_LOGGED_IN; 12119 pd->pd_login_class = buf->ub_class; 12120 mutex_exit(&pd->pd_mutex); 12121 } 12122 12123 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12124 if (listptr != NULL) { 12125 fctl_copy_portmap(listptr, pd); 12126 (void) fp_ulp_devc_cb(port, listptr, 12127 1, 1, sleep, 0); 12128 } 12129 12130 if (small > 0 && do_acc) { 12131 mutex_enter(&pd->pd_mutex); 12132 pd->pd_recepient = PD_PLOGI_INITIATOR; 12133 mutex_exit(&pd->pd_mutex); 12134 } 12135 } 12136 } 12137 12138 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12139 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12140 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12141 cmd->cmd_retry_count = 1; 12142 cmd->cmd_ulp_pkt = NULL; 12143 12144 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12145 cmd->cmd_job = job; 12146 12147 pkt = &cmd->cmd_pkt; 12148 12149 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12150 12151 payload = port->fp_service_params; 12152 payload.ls_code.ls_code = LA_ELS_ACC; 12153 12154 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12155 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12156 12157 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12158 "bufsize:0x%x sizeof (la_els_logi):0x%x " 12159 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12160 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12161 buf->ub_bufsize, sizeof (la_els_logi_t), 12162 port->fp_service_params.nport_ww_name.w.naa_id, 12163 port->fp_service_params.nport_ww_name.w.nport_id, 12164 port->fp_service_params.nport_ww_name.w.wwn_hi, 12165 port->fp_service_params.nport_ww_name.w.wwn_lo, 12166 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12167 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12168 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12169 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12170 port->fp_statec_busy); 12171 } 12172 12173 12174 #define RSCN_EVENT_NAME_LEN 256 12175 12176 /* 12177 * Handle RSCNs 12178 */ 12179 static void 12180 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12181 job_request_t *job, int sleep) 12182 { 12183 uint32_t mask; 12184 fp_cmd_t *cmd; 12185 uint32_t count; 12186 int listindex; 12187 int16_t len; 12188 fc_rscn_t *payload; 12189 fc_portmap_t *listptr; 12190 fctl_ns_req_t *ns_cmd; 12191 fc_affected_id_t *page; 12192 caddr_t nvname; 12193 nvlist_t *attr_list = NULL; 12194 12195 mutex_enter(&port->fp_mutex); 12196 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12197 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12198 --port->fp_rscn_count; 12199 } 12200 mutex_exit(&port->fp_mutex); 12201 return; 12202 } 12203 mutex_exit(&port->fp_mutex); 12204 12205 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12206 if (cmd != NULL) { 12207 fp_els_acc_init(port, cmd, buf, job); 12208 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12209 fp_free_pkt(cmd); 12210 } 12211 } 12212 12213 payload = (fc_rscn_t *)buf->ub_buffer; 12214 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12215 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12216 12217 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12218 12219 if (len <= 0) { 12220 mutex_enter(&port->fp_mutex); 12221 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12222 --port->fp_rscn_count; 12223 } 12224 mutex_exit(&port->fp_mutex); 12225 12226 return; 12227 } 12228 12229 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12230 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12231 12232 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12233 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12234 12235 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12236 12237 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12238 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12239 0, sleep); 12240 if (ns_cmd == NULL) { 12241 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12242 12243 mutex_enter(&port->fp_mutex); 12244 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12245 --port->fp_rscn_count; 12246 } 12247 mutex_exit(&port->fp_mutex); 12248 12249 return; 12250 } 12251 12252 ns_cmd->ns_cmd_code = NS_GPN_ID; 12253 12254 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12255 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12256 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12257 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12258 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12259 12260 /* Only proceed if we can allocate nvname and the nvlist */ 12261 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12262 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12263 KM_NOSLEEP) == DDI_SUCCESS) { 12264 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12265 port->fp_instance) == DDI_SUCCESS && 12266 nvlist_add_byte_array(attr_list, "port-wwn", 12267 port->fp_service_params.nport_ww_name.raw_wwn, 12268 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12269 nvlist_free(attr_list); 12270 attr_list = NULL; 12271 } 12272 } 12273 12274 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12275 /* Add affected page to the event payload */ 12276 if (attr_list != NULL) { 12277 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12278 "affected_page_%d", listindex); 12279 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12280 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12281 /* We don't send a partial event, so dump it */ 12282 nvlist_free(attr_list); 12283 attr_list = NULL; 12284 } 12285 } 12286 /* 12287 * Query the NS to get the Port WWN for this 12288 * affected D_ID. 12289 */ 12290 mask = 0; 12291 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12292 case FC_RSCN_PORT_ADDRESS: 12293 fp_validate_rscn_page(port, page, job, ns_cmd, 12294 listptr, &listindex, sleep); 12295 12296 if (listindex == 0) { 12297 /* 12298 * We essentially did not process this RSCN. So, 12299 * ULPs are not going to be called and so we 12300 * decrement the rscn_count 12301 */ 12302 mutex_enter(&port->fp_mutex); 12303 if (--port->fp_rscn_count == 12304 FC_INVALID_RSCN_COUNT) { 12305 --port->fp_rscn_count; 12306 } 12307 mutex_exit(&port->fp_mutex); 12308 } 12309 break; 12310 12311 case FC_RSCN_AREA_ADDRESS: 12312 mask = 0xFFFF00; 12313 /* FALLTHROUGH */ 12314 12315 case FC_RSCN_DOMAIN_ADDRESS: 12316 if (!mask) { 12317 mask = 0xFF0000; 12318 } 12319 fp_validate_area_domain(port, page->aff_d_id, mask, 12320 job, sleep); 12321 break; 12322 12323 case FC_RSCN_FABRIC_ADDRESS: 12324 /* 12325 * We need to discover all the devices on this 12326 * port. 12327 */ 12328 fp_validate_area_domain(port, 0, 0, job, sleep); 12329 break; 12330 12331 default: 12332 break; 12333 } 12334 } 12335 if (attr_list != NULL) { 12336 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12337 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12338 NULL, DDI_SLEEP); 12339 nvlist_free(attr_list); 12340 } else { 12341 FP_TRACE(FP_NHEAD1(9, 0), 12342 "RSCN handled, but event not sent to userland"); 12343 } 12344 if (nvname != NULL) { 12345 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12346 } 12347 12348 if (ns_cmd) { 12349 fctl_free_ns_cmd(ns_cmd); 12350 } 12351 12352 if (listindex) { 12353 #ifdef DEBUG 12354 page = (fc_affected_id_t *)(buf->ub_buffer + 12355 sizeof (fc_rscn_t)); 12356 12357 if (listptr->map_did.port_id != page->aff_d_id) { 12358 FP_TRACE(FP_NHEAD1(9, 0), 12359 "PORT RSCN: processed=%x, reporting=%x", 12360 listptr->map_did.port_id, page->aff_d_id); 12361 } 12362 #endif 12363 12364 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12365 sleep, 0); 12366 } else { 12367 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12368 } 12369 } 12370 12371 12372 /* 12373 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12374 */ 12375 static void 12376 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12377 { 12378 int is_switch; 12379 int initiator; 12380 fc_local_port_t *port; 12381 12382 port = pd->pd_port; 12383 12384 /* This function has the following bunch of assumptions */ 12385 ASSERT(port != NULL); 12386 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12387 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12388 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12389 12390 pd->pd_state = PORT_DEVICE_INVALID; 12391 pd->pd_type = PORT_DEVICE_OLD; 12392 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12393 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12394 12395 fctl_delist_did_table(port, pd); 12396 fctl_delist_pwwn_table(port, pd); 12397 12398 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12399 " removed the PD=%p from DID and PWWN tables", 12400 port, pd->pd_port_id.port_id, pd); 12401 12402 if ((!flag) && port && initiator && is_switch) { 12403 (void) fctl_add_orphan_held(port, pd); 12404 } 12405 fctl_copy_portmap_held(map, pd); 12406 map->map_pd = pd; 12407 } 12408 12409 /* 12410 * Fill out old map for ULPs 12411 */ 12412 static void 12413 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12414 { 12415 int is_switch; 12416 int initiator; 12417 fc_local_port_t *port; 12418 12419 mutex_enter(&pd->pd_mutex); 12420 port = pd->pd_port; 12421 mutex_exit(&pd->pd_mutex); 12422 12423 mutex_enter(&port->fp_mutex); 12424 mutex_enter(&pd->pd_mutex); 12425 12426 pd->pd_state = PORT_DEVICE_INVALID; 12427 pd->pd_type = PORT_DEVICE_OLD; 12428 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12429 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12430 12431 fctl_delist_did_table(port, pd); 12432 fctl_delist_pwwn_table(port, pd); 12433 12434 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12435 " removed the PD=%p from DID and PWWN tables", 12436 port, pd->pd_port_id.port_id, pd); 12437 12438 mutex_exit(&pd->pd_mutex); 12439 mutex_exit(&port->fp_mutex); 12440 12441 ASSERT(port != NULL); 12442 if ((!flag) && port && initiator && is_switch) { 12443 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12444 } 12445 fctl_copy_portmap(map, pd); 12446 map->map_pd = pd; 12447 } 12448 12449 12450 /* 12451 * Fillout Changed Map for ULPs 12452 */ 12453 static void 12454 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12455 uint32_t *new_did, la_wwn_t *new_pwwn) 12456 { 12457 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12458 12459 pd->pd_type = PORT_DEVICE_CHANGED; 12460 if (new_did) { 12461 pd->pd_port_id.port_id = *new_did; 12462 } 12463 if (new_pwwn) { 12464 pd->pd_port_name = *new_pwwn; 12465 } 12466 mutex_exit(&pd->pd_mutex); 12467 12468 fctl_copy_portmap(map, pd); 12469 12470 mutex_enter(&pd->pd_mutex); 12471 pd->pd_type = PORT_DEVICE_NOCHANGE; 12472 } 12473 12474 12475 /* 12476 * Fillout New Name Server map 12477 */ 12478 static void 12479 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12480 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12481 { 12482 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12483 12484 if (handle) { 12485 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_pwwn, 12486 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12487 DDI_DEV_AUTOINCR); 12488 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_nwwn, 12489 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12490 DDI_DEV_AUTOINCR); 12491 ddi_rep_get8(*handle, (uint8_t *)port_map->map_fc4_types, 12492 (uint8_t *)gan_resp->gan_fc4types, 12493 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12494 } else { 12495 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12496 sizeof (gan_resp->gan_pwwn)); 12497 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12498 sizeof (gan_resp->gan_nwwn)); 12499 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12500 sizeof (gan_resp->gan_fc4types)); 12501 } 12502 port_map->map_did.port_id = d_id; 12503 port_map->map_did.priv_lilp_posit = 0; 12504 port_map->map_hard_addr.hard_addr = 0; 12505 port_map->map_hard_addr.rsvd = 0; 12506 port_map->map_state = PORT_DEVICE_INVALID; 12507 port_map->map_type = PORT_DEVICE_NEW; 12508 port_map->map_flags = 0; 12509 port_map->map_pd = NULL; 12510 12511 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12512 12513 ASSERT(port != NULL); 12514 } 12515 12516 12517 /* 12518 * Perform LINIT ELS 12519 */ 12520 static int 12521 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12522 job_request_t *job) 12523 { 12524 int rval; 12525 uint32_t d_id; 12526 uint32_t s_id; 12527 uint32_t lfa; 12528 uchar_t class; 12529 uint32_t ret; 12530 fp_cmd_t *cmd; 12531 fc_porttype_t ptype; 12532 fc_packet_t *pkt; 12533 fc_linit_req_t payload; 12534 fc_remote_port_t *pd; 12535 12536 rval = 0; 12537 12538 ASSERT(job != NULL); 12539 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12540 12541 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12542 if (pd == NULL) { 12543 fctl_ns_req_t *ns_cmd; 12544 12545 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12546 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12547 0, sleep); 12548 12549 if (ns_cmd == NULL) { 12550 return (FC_NOMEM); 12551 } 12552 job->job_result = FC_SUCCESS; 12553 ns_cmd->ns_cmd_code = NS_GID_PN; 12554 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12555 12556 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12557 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12558 fctl_free_ns_cmd(ns_cmd); 12559 return (FC_FAILURE); 12560 } 12561 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12562 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12563 12564 fctl_free_ns_cmd(ns_cmd); 12565 lfa = d_id & 0xFFFF00; 12566 12567 /* 12568 * Given this D_ID, get the port type to see if 12569 * we can do LINIT on the LFA 12570 */ 12571 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12572 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12573 0, sleep); 12574 12575 if (ns_cmd == NULL) { 12576 return (FC_NOMEM); 12577 } 12578 12579 job->job_result = FC_SUCCESS; 12580 ns_cmd->ns_cmd_code = NS_GPT_ID; 12581 12582 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12583 ((ns_req_gpt_id_t *) 12584 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12585 12586 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12587 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12588 fctl_free_ns_cmd(ns_cmd); 12589 return (FC_FAILURE); 12590 } 12591 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12592 12593 fctl_free_ns_cmd(ns_cmd); 12594 12595 switch (ptype.port_type) { 12596 case FC_NS_PORT_NL: 12597 case FC_NS_PORT_F_NL: 12598 case FC_NS_PORT_FL: 12599 break; 12600 12601 default: 12602 return (FC_FAILURE); 12603 } 12604 } else { 12605 mutex_enter(&pd->pd_mutex); 12606 ptype = pd->pd_porttype; 12607 12608 switch (pd->pd_porttype.port_type) { 12609 case FC_NS_PORT_NL: 12610 case FC_NS_PORT_F_NL: 12611 case FC_NS_PORT_FL: 12612 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12613 break; 12614 12615 default: 12616 mutex_exit(&pd->pd_mutex); 12617 return (FC_FAILURE); 12618 } 12619 mutex_exit(&pd->pd_mutex); 12620 } 12621 12622 mutex_enter(&port->fp_mutex); 12623 s_id = port->fp_port_id.port_id; 12624 class = port->fp_ns_login_class; 12625 mutex_exit(&port->fp_mutex); 12626 12627 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12628 sizeof (fc_linit_resp_t), sleep, pd); 12629 if (cmd == NULL) { 12630 return (FC_NOMEM); 12631 } 12632 12633 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12634 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12635 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12636 cmd->cmd_retry_count = fp_retry_count; 12637 cmd->cmd_ulp_pkt = NULL; 12638 12639 pkt = &cmd->cmd_pkt; 12640 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12641 12642 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12643 12644 /* 12645 * How does LIP work by the way ? 12646 * If the L_Port receives three consecutive identical ordered 12647 * sets whose first two characters (fully decoded) are equal to 12648 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12649 * recognize a Loop Initialization Primitive sequence. The 12650 * character 3 determines the type of lip: 12651 * LIP(F7) Normal LIP 12652 * LIP(F8) Loop Failure LIP 12653 * 12654 * The possible combination for the 3rd and 4th bytes are: 12655 * F7, F7 Normal Lip - No valid AL_PA 12656 * F8, F8 Loop Failure - No valid AL_PA 12657 * F7, AL_PS Normal Lip - Valid source AL_PA 12658 * F8, AL_PS Loop Failure - Valid source AL_PA 12659 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12660 * And Normal Lip for all other loop members 12661 * 0xFF AL_PS Vendor specific reset of all loop members 12662 * 12663 * Now, it may not always be that we, at the source, may have an 12664 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12665 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12666 * payload we are going to set: 12667 * lip_b3 = 0xF7; Normal LIP 12668 * lip_b4 = 0xF7; No valid source AL_PA 12669 */ 12670 payload.ls_code.ls_code = LA_ELS_LINIT; 12671 payload.ls_code.mbz = 0; 12672 payload.rsvd = 0; 12673 payload.func = 0; /* Let Fabric determine the best way */ 12674 payload.lip_b3 = 0xF7; /* Normal LIP */ 12675 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12676 12677 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12678 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12679 12680 job->job_counter = 1; 12681 12682 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12683 if (ret == FC_SUCCESS) { 12684 fp_jobwait(job); 12685 rval = job->job_result; 12686 } else { 12687 rval = FC_FAILURE; 12688 fp_free_pkt(cmd); 12689 } 12690 12691 return (rval); 12692 } 12693 12694 12695 /* 12696 * Fill out the device handles with GAN response 12697 */ 12698 static void 12699 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12700 ns_resp_gan_t *gan_resp) 12701 { 12702 fc_remote_node_t *node; 12703 fc_porttype_t type; 12704 fc_local_port_t *port; 12705 12706 ASSERT(pd != NULL); 12707 ASSERT(handle != NULL); 12708 12709 port = pd->pd_port; 12710 12711 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12712 " port_id=%x, sym_len=%d fc4-type=%x", 12713 pd, gan_resp->gan_type_id.rsvd, 12714 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12715 12716 mutex_enter(&pd->pd_mutex); 12717 12718 ddi_rep_get8(*handle, (uint8_t *)&type, 12719 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12720 12721 pd->pd_porttype.port_type = type.port_type; 12722 pd->pd_porttype.rsvd = 0; 12723 12724 pd->pd_spn_len = gan_resp->gan_spnlen; 12725 if (pd->pd_spn_len) { 12726 ddi_rep_get8(*handle, (uint8_t *)pd->pd_spn, 12727 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12728 DDI_DEV_AUTOINCR); 12729 } 12730 12731 ddi_rep_get8(*handle, (uint8_t *)pd->pd_ip_addr, 12732 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12733 DDI_DEV_AUTOINCR); 12734 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_cos, 12735 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12736 DDI_DEV_AUTOINCR); 12737 ddi_rep_get8(*handle, (uint8_t *)pd->pd_fc4types, 12738 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12739 DDI_DEV_AUTOINCR); 12740 12741 node = pd->pd_remote_nodep; 12742 mutex_exit(&pd->pd_mutex); 12743 12744 mutex_enter(&node->fd_mutex); 12745 12746 ddi_rep_get8(*handle, (uint8_t *)node->fd_ipa, 12747 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12748 DDI_DEV_AUTOINCR); 12749 12750 node->fd_snn_len = gan_resp->gan_snnlen; 12751 if (node->fd_snn_len) { 12752 ddi_rep_get8(*handle, (uint8_t *)node->fd_snn, 12753 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12754 DDI_DEV_AUTOINCR); 12755 } 12756 12757 mutex_exit(&node->fd_mutex); 12758 } 12759 12760 12761 /* 12762 * Handles all NS Queries (also means that this function 12763 * doesn't handle NS object registration) 12764 */ 12765 static int 12766 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12767 int polled, int sleep) 12768 { 12769 int rval; 12770 fp_cmd_t *cmd; 12771 12772 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12773 12774 if (ns_cmd->ns_cmd_size == 0) { 12775 return (FC_FAILURE); 12776 } 12777 12778 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12779 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12780 ns_cmd->ns_resp_size, sleep, NULL); 12781 if (cmd == NULL) { 12782 return (FC_NOMEM); 12783 } 12784 12785 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12786 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12787 12788 if (polled) { 12789 job->job_counter = 1; 12790 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12791 } 12792 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12793 if (rval != FC_SUCCESS) { 12794 job->job_result = rval; 12795 fp_iodone(cmd); 12796 if (polled == 0) { 12797 /* 12798 * Return FC_SUCCESS to indicate that 12799 * fp_iodone is performed already. 12800 */ 12801 rval = FC_SUCCESS; 12802 } 12803 } 12804 12805 if (polled) { 12806 fp_jobwait(job); 12807 rval = job->job_result; 12808 } 12809 12810 return (rval); 12811 } 12812 12813 12814 /* 12815 * Initialize Common Transport request 12816 */ 12817 static void 12818 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12819 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12820 uint16_t resp_len, job_request_t *job) 12821 { 12822 uint32_t s_id; 12823 uchar_t class; 12824 fc_packet_t *pkt; 12825 fc_ct_header_t ct; 12826 12827 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12828 12829 mutex_enter(&port->fp_mutex); 12830 s_id = port->fp_port_id.port_id; 12831 class = port->fp_ns_login_class; 12832 mutex_exit(&port->fp_mutex); 12833 12834 cmd->cmd_job = job; 12835 cmd->cmd_private = ns_cmd; 12836 pkt = &cmd->cmd_pkt; 12837 12838 ct.ct_rev = CT_REV; 12839 ct.ct_inid = 0; 12840 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12841 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12842 ct.ct_options = 0; 12843 ct.ct_reserved1 = 0; 12844 ct.ct_cmdrsp = cmd_code; 12845 ct.ct_aiusize = resp_len >> 2; 12846 ct.ct_reserved2 = 0; 12847 ct.ct_reason = 0; 12848 ct.ct_expln = 0; 12849 ct.ct_vendor = 0; 12850 12851 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ct, (uint8_t *)pkt->pkt_cmd, 12852 sizeof (ct), DDI_DEV_AUTOINCR); 12853 12854 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12855 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12856 pkt->pkt_cmd_fhdr.s_id = s_id; 12857 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12858 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12859 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12860 pkt->pkt_cmd_fhdr.seq_id = 0; 12861 pkt->pkt_cmd_fhdr.df_ctl = 0; 12862 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12863 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12864 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12865 pkt->pkt_cmd_fhdr.ro = 0; 12866 pkt->pkt_cmd_fhdr.rsvd = 0; 12867 12868 pkt->pkt_comp = fp_ns_intr; 12869 pkt->pkt_ulp_private = (opaque_t)cmd; 12870 pkt->pkt_timeout = FP_NS_TIMEOUT; 12871 12872 if (cmd_buf) { 12873 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 12874 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 12875 cmd_len, DDI_DEV_AUTOINCR); 12876 } 12877 12878 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 12879 12880 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12881 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12882 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 12883 cmd->cmd_retry_count = fp_retry_count; 12884 cmd->cmd_ulp_pkt = NULL; 12885 } 12886 12887 12888 /* 12889 * Name Server request interrupt routine 12890 */ 12891 static void 12892 fp_ns_intr(fc_packet_t *pkt) 12893 { 12894 fp_cmd_t *cmd; 12895 fc_local_port_t *port; 12896 fc_ct_header_t resp_hdr; 12897 fc_ct_header_t cmd_hdr; 12898 fctl_ns_req_t *ns_cmd; 12899 12900 cmd = pkt->pkt_ulp_private; 12901 port = cmd->cmd_port; 12902 12903 mutex_enter(&port->fp_mutex); 12904 port->fp_out_fpcmds--; 12905 mutex_exit(&port->fp_mutex); 12906 12907 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 12908 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 12909 ns_cmd = (fctl_ns_req_t *) 12910 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 12911 if (!FP_IS_PKT_ERROR(pkt)) { 12912 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 12913 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 12914 DDI_DEV_AUTOINCR); 12915 12916 /* 12917 * On x86 architectures, make sure the resp_hdr is big endian. 12918 * This macro is a NOP on sparc architectures mainly because 12919 * we don't want to end up wasting time since the end result 12920 * is going to be the same. 12921 */ 12922 MAKE_BE_32(&resp_hdr); 12923 12924 if (ns_cmd) { 12925 /* 12926 * Always copy out the response CT_HDR 12927 */ 12928 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 12929 sizeof (resp_hdr)); 12930 } 12931 12932 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 12933 pkt->pkt_state = FC_PKT_FS_RJT; 12934 pkt->pkt_reason = resp_hdr.ct_reason; 12935 pkt->pkt_expln = resp_hdr.ct_expln; 12936 } 12937 } 12938 12939 if (FP_IS_PKT_ERROR(pkt)) { 12940 if (ns_cmd) { 12941 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 12942 ASSERT(ns_cmd->ns_pd != NULL); 12943 12944 /* Mark it OLD if not already done */ 12945 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 12946 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 12947 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 12948 } 12949 12950 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 12951 fctl_free_ns_cmd(ns_cmd); 12952 ((fp_cmd_t *) 12953 (pkt->pkt_ulp_private))->cmd_private = NULL; 12954 } 12955 12956 } 12957 12958 FP_TRACE(FP_NHEAD1(4, 0), "NS failure; pkt state=%x reason=%x", 12959 pkt->pkt_state, pkt->pkt_reason); 12960 12961 (void) fp_common_intr(pkt, 1); 12962 12963 return; 12964 } 12965 12966 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 12967 uint32_t d_id; 12968 fc_local_port_t *port; 12969 fp_cmd_t *cmd; 12970 12971 d_id = pkt->pkt_cmd_fhdr.d_id; 12972 cmd = pkt->pkt_ulp_private; 12973 port = cmd->cmd_port; 12974 FP_TRACE(FP_NHEAD2(9, 0), 12975 "Bogus NS response received for D_ID=%x", d_id); 12976 } 12977 12978 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 12979 fp_gan_handler(pkt, ns_cmd); 12980 return; 12981 } 12982 12983 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 12984 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 12985 if (ns_cmd) { 12986 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 12987 fp_ns_query_handler(pkt, ns_cmd); 12988 return; 12989 } 12990 } 12991 } 12992 12993 fp_iodone(pkt->pkt_ulp_private); 12994 } 12995 12996 12997 /* 12998 * Process NS_GAN response 12999 */ 13000 static void 13001 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13002 { 13003 int my_did; 13004 fc_portid_t d_id; 13005 fp_cmd_t *cmd; 13006 fc_local_port_t *port; 13007 fc_remote_port_t *pd; 13008 ns_req_gan_t gan_req; 13009 ns_resp_gan_t *gan_resp; 13010 13011 ASSERT(ns_cmd != NULL); 13012 13013 cmd = pkt->pkt_ulp_private; 13014 port = cmd->cmd_port; 13015 13016 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 13017 13018 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&d_id, 13019 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 13020 13021 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 13022 13023 /* 13024 * In this case the priv_lilp_posit field in reality 13025 * is actually represents the relative position on a private loop. 13026 * So zero it while dealing with Port Identifiers. 13027 */ 13028 d_id.priv_lilp_posit = 0; 13029 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 13030 if (ns_cmd->ns_gan_sid == d_id.port_id) { 13031 /* 13032 * We've come a full circle; time to get out. 13033 */ 13034 fp_iodone(cmd); 13035 return; 13036 } 13037 13038 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 13039 ns_cmd->ns_gan_sid = d_id.port_id; 13040 } 13041 13042 mutex_enter(&port->fp_mutex); 13043 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 13044 mutex_exit(&port->fp_mutex); 13045 13046 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, d_id=%x", port, 13047 d_id.port_id); 13048 13049 if (my_did == 0) { 13050 la_wwn_t pwwn; 13051 la_wwn_t nwwn; 13052 13053 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 13054 "port=%p, d_id=%x, type_id=%x, " 13055 "pwwn=%x %x %x %x %x %x %x %x, " 13056 "nwwn=%x %x %x %x %x %x %x %x", 13057 port, d_id.port_id, gan_resp->gan_type_id, 13058 13059 gan_resp->gan_pwwn.raw_wwn[0], 13060 gan_resp->gan_pwwn.raw_wwn[1], 13061 gan_resp->gan_pwwn.raw_wwn[2], 13062 gan_resp->gan_pwwn.raw_wwn[3], 13063 gan_resp->gan_pwwn.raw_wwn[4], 13064 gan_resp->gan_pwwn.raw_wwn[5], 13065 gan_resp->gan_pwwn.raw_wwn[6], 13066 gan_resp->gan_pwwn.raw_wwn[7], 13067 13068 gan_resp->gan_nwwn.raw_wwn[0], 13069 gan_resp->gan_nwwn.raw_wwn[1], 13070 gan_resp->gan_nwwn.raw_wwn[2], 13071 gan_resp->gan_nwwn.raw_wwn[3], 13072 gan_resp->gan_nwwn.raw_wwn[4], 13073 gan_resp->gan_nwwn.raw_wwn[5], 13074 gan_resp->gan_nwwn.raw_wwn[6], 13075 gan_resp->gan_nwwn.raw_wwn[7]); 13076 13077 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 13078 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 13079 DDI_DEV_AUTOINCR); 13080 13081 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 13082 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 13083 DDI_DEV_AUTOINCR); 13084 13085 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 13086 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 13087 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 13088 } 13089 if (pd != NULL) { 13090 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 13091 pd, gan_resp); 13092 } 13093 13094 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 13095 *((int *)ns_cmd->ns_data_buf) += 1; 13096 } 13097 13098 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 13099 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 13100 13101 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 13102 fc_port_dev_t *userbuf; 13103 13104 userbuf = ((fc_port_dev_t *) 13105 ns_cmd->ns_data_buf) + 13106 ns_cmd->ns_gan_index++; 13107 13108 userbuf->dev_did = d_id; 13109 13110 ddi_rep_get8(pkt->pkt_resp_acc, 13111 (uint8_t *)userbuf->dev_type, 13112 (uint8_t *)gan_resp->gan_fc4types, 13113 sizeof (userbuf->dev_type), 13114 DDI_DEV_AUTOINCR); 13115 13116 userbuf->dev_nwwn = nwwn; 13117 userbuf->dev_pwwn = pwwn; 13118 13119 if (pd != NULL) { 13120 mutex_enter(&pd->pd_mutex); 13121 userbuf->dev_state = pd->pd_state; 13122 userbuf->dev_hard_addr = 13123 pd->pd_hard_addr; 13124 mutex_exit(&pd->pd_mutex); 13125 } else { 13126 userbuf->dev_state = 13127 PORT_DEVICE_INVALID; 13128 } 13129 } else if (ns_cmd->ns_flags & 13130 FCTL_NS_BUF_IS_FC_PORTMAP) { 13131 fc_portmap_t *map; 13132 13133 map = ((fc_portmap_t *) 13134 ns_cmd->ns_data_buf) + 13135 ns_cmd->ns_gan_index++; 13136 13137 /* 13138 * First fill it like any new map 13139 * and update the port device info 13140 * below. 13141 */ 13142 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13143 map, gan_resp, d_id.port_id); 13144 if (pd != NULL) { 13145 fctl_copy_portmap(map, pd); 13146 } else { 13147 map->map_state = PORT_DEVICE_INVALID; 13148 map->map_type = PORT_DEVICE_NOCHANGE; 13149 } 13150 } else { 13151 caddr_t dst_ptr; 13152 13153 dst_ptr = ns_cmd->ns_data_buf + 13154 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13155 13156 ddi_rep_get8(pkt->pkt_resp_acc, 13157 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13158 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13159 } 13160 } else { 13161 ns_cmd->ns_gan_index++; 13162 } 13163 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13164 fp_iodone(cmd); 13165 return; 13166 } 13167 } 13168 13169 gan_req.pid = d_id; 13170 13171 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13172 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13173 sizeof (gan_req), DDI_DEV_AUTOINCR); 13174 13175 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13176 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13177 fp_iodone(cmd); 13178 } else { 13179 mutex_enter(&port->fp_mutex); 13180 port->fp_out_fpcmds++; 13181 mutex_exit(&port->fp_mutex); 13182 } 13183 } 13184 13185 13186 /* 13187 * Handle NS Query interrupt 13188 */ 13189 static void 13190 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13191 { 13192 fp_cmd_t *cmd; 13193 fc_local_port_t *port; 13194 caddr_t src_ptr; 13195 uint32_t xfer_len; 13196 13197 cmd = pkt->pkt_ulp_private; 13198 port = cmd->cmd_port; 13199 13200 xfer_len = ns_cmd->ns_resp_size; 13201 13202 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13203 ns_cmd->ns_cmd_code, xfer_len); 13204 13205 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13206 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13207 13208 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13209 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13210 } 13211 13212 if (xfer_len <= ns_cmd->ns_data_len) { 13213 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13214 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)ns_cmd->ns_data_buf, 13215 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13216 } 13217 13218 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13219 ASSERT(ns_cmd->ns_pd != NULL); 13220 13221 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13222 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13223 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13224 } 13225 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13226 } 13227 13228 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13229 fctl_free_ns_cmd(ns_cmd); 13230 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13231 } 13232 fp_iodone(cmd); 13233 } 13234 13235 13236 /* 13237 * Handle unsolicited ADISC ELS request 13238 */ 13239 static void 13240 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13241 fc_remote_port_t *pd, job_request_t *job) 13242 { 13243 int rval; 13244 fp_cmd_t *cmd; 13245 13246 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13247 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13248 mutex_enter(&pd->pd_mutex); 13249 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13250 mutex_exit(&pd->pd_mutex); 13251 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13252 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13253 0, KM_SLEEP, pd); 13254 if (cmd != NULL) { 13255 fp_els_rjt_init(port, cmd, buf, 13256 FC_ACTION_NON_RETRYABLE, 13257 FC_REASON_INVALID_LINK_CTRL, job); 13258 13259 if (fp_sendcmd(port, cmd, 13260 port->fp_fca_handle) != FC_SUCCESS) { 13261 fp_free_pkt(cmd); 13262 } 13263 } 13264 } 13265 } else { 13266 mutex_exit(&pd->pd_mutex); 13267 /* 13268 * Yes, yes, we don't have a hard address. But we 13269 * we should still respond. Huh ? Visit 21.19.2 13270 * of FC-PH-2 which essentially says that if an 13271 * NL_Port doesn't have a hard address, or if a port 13272 * does not have FC-AL capability, it shall report 13273 * zeroes in this field. 13274 */ 13275 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13276 0, KM_SLEEP, pd); 13277 if (cmd == NULL) { 13278 return; 13279 } 13280 fp_adisc_acc_init(port, cmd, buf, job); 13281 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13282 if (rval != FC_SUCCESS) { 13283 fp_free_pkt(cmd); 13284 } 13285 } 13286 } 13287 13288 13289 /* 13290 * Initialize ADISC response. 13291 */ 13292 static void 13293 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13294 job_request_t *job) 13295 { 13296 fc_packet_t *pkt; 13297 la_els_adisc_t payload; 13298 13299 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13300 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13301 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13302 cmd->cmd_retry_count = 1; 13303 cmd->cmd_ulp_pkt = NULL; 13304 13305 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13306 cmd->cmd_job = job; 13307 13308 pkt = &cmd->cmd_pkt; 13309 13310 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13311 13312 payload.ls_code.ls_code = LA_ELS_ACC; 13313 payload.ls_code.mbz = 0; 13314 13315 mutex_enter(&port->fp_mutex); 13316 payload.nport_id = port->fp_port_id; 13317 payload.hard_addr = port->fp_hard_addr; 13318 mutex_exit(&port->fp_mutex); 13319 13320 payload.port_wwn = port->fp_service_params.nport_ww_name; 13321 payload.node_wwn = port->fp_service_params.node_ww_name; 13322 13323 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 13324 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13325 } 13326 13327 13328 /* 13329 * Hold and Install the requested ULP drivers 13330 */ 13331 static void 13332 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13333 { 13334 int len; 13335 int count; 13336 int data_len; 13337 major_t ulp_major; 13338 caddr_t ulp_name; 13339 caddr_t data_ptr; 13340 caddr_t data_buf; 13341 13342 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13343 13344 data_buf = NULL; 13345 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13346 DDI_PROP_DONTPASS, "load-ulp-list", 13347 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13348 return; 13349 } 13350 13351 len = strlen(data_buf); 13352 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13353 13354 data_ptr = data_buf + len + 1; 13355 for (count = 0; count < port->fp_ulp_nload; count++) { 13356 len = strlen(data_ptr) + 1; 13357 ulp_name = kmem_zalloc(len, KM_SLEEP); 13358 bcopy(data_ptr, ulp_name, len); 13359 13360 ulp_major = ddi_name_to_major(ulp_name); 13361 13362 if (ulp_major != (major_t)-1) { 13363 if (modload("drv", ulp_name) < 0) { 13364 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13365 0, NULL, "failed to load %s", 13366 ulp_name); 13367 } 13368 } else { 13369 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13370 "%s isn't a valid driver", ulp_name); 13371 } 13372 13373 kmem_free(ulp_name, len); 13374 data_ptr += len; /* Skip to next field */ 13375 } 13376 13377 /* 13378 * Free the memory allocated by DDI 13379 */ 13380 if (data_buf != NULL) { 13381 kmem_free(data_buf, data_len); 13382 } 13383 } 13384 13385 13386 /* 13387 * Perform LOGO operation 13388 */ 13389 static int 13390 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13391 { 13392 int rval; 13393 fp_cmd_t *cmd; 13394 13395 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13396 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13397 13398 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13399 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13400 13401 mutex_enter(&port->fp_mutex); 13402 mutex_enter(&pd->pd_mutex); 13403 13404 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13405 ASSERT(pd->pd_login_count == 1); 13406 13407 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13408 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13409 cmd->cmd_flags = 0; 13410 cmd->cmd_retry_count = 1; 13411 cmd->cmd_ulp_pkt = NULL; 13412 13413 fp_logo_init(pd, cmd, job); 13414 13415 mutex_exit(&pd->pd_mutex); 13416 mutex_exit(&port->fp_mutex); 13417 13418 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13419 if (rval != FC_SUCCESS) { 13420 fp_iodone(cmd); 13421 } 13422 13423 return (rval); 13424 } 13425 13426 13427 /* 13428 * Perform Port attach callbacks to registered ULPs 13429 */ 13430 static void 13431 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13432 { 13433 fp_soft_attach_t *att; 13434 13435 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13436 att->att_cmd = cmd; 13437 att->att_port = port; 13438 13439 /* 13440 * We need to remember whether or not fctl_busy_port 13441 * succeeded so we know whether or not to call 13442 * fctl_idle_port when the task is complete. 13443 */ 13444 13445 if (fctl_busy_port(port) == 0) { 13446 att->att_need_pm_idle = B_TRUE; 13447 } else { 13448 att->att_need_pm_idle = B_FALSE; 13449 } 13450 13451 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13452 att, KM_SLEEP); 13453 } 13454 13455 13456 /* 13457 * Forward state change notifications on to interested ULPs. 13458 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13459 * real work. 13460 */ 13461 static int 13462 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13463 { 13464 fc_port_clist_t *clist; 13465 13466 clist = kmem_zalloc(sizeof (*clist), sleep); 13467 if (clist == NULL) { 13468 return (FC_NOMEM); 13469 } 13470 13471 clist->clist_state = statec; 13472 13473 mutex_enter(&port->fp_mutex); 13474 clist->clist_flags = port->fp_topology; 13475 mutex_exit(&port->fp_mutex); 13476 13477 clist->clist_port = (opaque_t)port; 13478 clist->clist_len = 0; 13479 clist->clist_size = 0; 13480 clist->clist_map = NULL; 13481 13482 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13483 clist, KM_SLEEP); 13484 13485 return (FC_SUCCESS); 13486 } 13487 13488 13489 /* 13490 * Get name server map 13491 */ 13492 static int 13493 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13494 uint32_t *len, uint32_t sid) 13495 { 13496 int ret; 13497 fctl_ns_req_t *ns_cmd; 13498 13499 /* 13500 * Don't let the allocator do anything for response; 13501 * we have have buffer ready to fillout. 13502 */ 13503 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13504 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13505 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13506 13507 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13508 ns_cmd->ns_data_buf = (caddr_t)*map; 13509 13510 ASSERT(ns_cmd != NULL); 13511 13512 ns_cmd->ns_gan_index = 0; 13513 ns_cmd->ns_gan_sid = sid; 13514 ns_cmd->ns_cmd_code = NS_GA_NXT; 13515 ns_cmd->ns_gan_max = *len; 13516 13517 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13518 13519 if (ns_cmd->ns_gan_index != *len) { 13520 *len = ns_cmd->ns_gan_index; 13521 } 13522 ns_cmd->ns_data_len = 0; 13523 ns_cmd->ns_data_buf = NULL; 13524 fctl_free_ns_cmd(ns_cmd); 13525 13526 return (ret); 13527 } 13528 13529 13530 /* 13531 * Create a remote port in Fabric topology by using NS services 13532 */ 13533 static fc_remote_port_t * 13534 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13535 { 13536 int rval; 13537 job_request_t *job; 13538 fctl_ns_req_t *ns_cmd; 13539 fc_remote_port_t *pd; 13540 13541 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13542 13543 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13544 port, d_id); 13545 13546 #ifdef DEBUG 13547 mutex_enter(&port->fp_mutex); 13548 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13549 mutex_exit(&port->fp_mutex); 13550 #endif 13551 13552 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13553 if (job == NULL) { 13554 return (NULL); 13555 } 13556 13557 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13558 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13559 FCTL_NS_NO_DATA_BUF), sleep); 13560 if (ns_cmd == NULL) { 13561 return (NULL); 13562 } 13563 13564 job->job_result = FC_SUCCESS; 13565 ns_cmd->ns_gan_max = 1; 13566 ns_cmd->ns_cmd_code = NS_GA_NXT; 13567 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13568 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13569 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13570 13571 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13572 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13573 fctl_free_ns_cmd(ns_cmd); 13574 13575 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13576 fctl_dealloc_job(job); 13577 return (NULL); 13578 } 13579 fctl_dealloc_job(job); 13580 13581 pd = fctl_get_remote_port_by_did(port, d_id); 13582 13583 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13584 port, d_id, pd); 13585 13586 return (pd); 13587 } 13588 13589 13590 /* 13591 * Check for the permissions on an ioctl command. If it is required to have an 13592 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13593 * the ioctl command isn't in one of the list built, shut the door on that too. 13594 * 13595 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13596 * to be made sure that users open the port for an exclusive access while 13597 * performing those operations. 13598 * 13599 * This can prevent a casual user from inflicting damage on the port by 13600 * sending these ioctls from multiple processes/threads (there is no good 13601 * reason why one would need to do that) without actually realizing how 13602 * expensive such commands could turn out to be. 13603 * 13604 * It is also important to note that, even with an exclusive access, 13605 * multiple threads can share the same file descriptor and fire down 13606 * commands in parallel. To prevent that the driver needs to make sure 13607 * that such commands aren't in progress already. This is taken care of 13608 * in the FP_EXCL_BUSY bit of fp_flag. 13609 */ 13610 static int 13611 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13612 { 13613 int ret = FC_FAILURE; 13614 int count; 13615 13616 for (count = 0; 13617 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13618 count++) { 13619 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13620 if (fp_perm_list[count].fp_open_flag & open_flag) { 13621 ret = FC_SUCCESS; 13622 } 13623 break; 13624 } 13625 } 13626 13627 return (ret); 13628 } 13629 13630 13631 /* 13632 * Bind Port driver's unsolicited, state change callbacks 13633 */ 13634 static int 13635 fp_bind_callbacks(fc_local_port_t *port) 13636 { 13637 fc_fca_bind_info_t bind_info = {0}; 13638 fc_fca_port_info_t *port_info; 13639 int rval = DDI_SUCCESS; 13640 uint16_t class; 13641 int node_namelen, port_namelen; 13642 char *nname = NULL, *pname = NULL; 13643 13644 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13645 13646 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13647 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13648 "node-name", &nname) != DDI_PROP_SUCCESS) { 13649 FP_TRACE(FP_NHEAD1(1, 0), 13650 "fp_bind_callback fail to get node-name"); 13651 } 13652 if (nname) { 13653 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13654 } 13655 13656 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13657 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13658 "port-name", &pname) != DDI_PROP_SUCCESS) { 13659 FP_TRACE(FP_NHEAD1(1, 0), 13660 "fp_bind_callback fail to get port-name"); 13661 } 13662 if (pname) { 13663 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13664 } 13665 13666 if (port->fp_npiv_type == FC_NPIV_PORT) { 13667 bind_info.port_npiv = 1; 13668 } 13669 13670 /* 13671 * fca_bind_port returns the FCA driver's handle for the local 13672 * port instance. If the port number isn't supported it returns NULL. 13673 * It also sets up callback in the FCA for various 13674 * things like state change, ELS etc.. 13675 */ 13676 bind_info.port_statec_cb = fp_statec_cb; 13677 bind_info.port_unsol_cb = fp_unsol_cb; 13678 bind_info.port_num = port->fp_port_num; 13679 bind_info.port_handle = (opaque_t)port; 13680 13681 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13682 13683 /* 13684 * Hold the port driver mutex as the callbacks are bound until the 13685 * service parameters are properly filled in (in order to be able to 13686 * properly respond to unsolicited ELS requests) 13687 */ 13688 mutex_enter(&port->fp_mutex); 13689 13690 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13691 port->fp_fca_dip, port_info, &bind_info); 13692 13693 if (port->fp_fca_handle == NULL) { 13694 rval = DDI_FAILURE; 13695 goto exit; 13696 } 13697 13698 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13699 port->fp_service_params = port_info->pi_login_params; 13700 port->fp_hard_addr = port_info->pi_hard_addr; 13701 13702 /* Copy from the FCA structure to the FP structure */ 13703 port->fp_hba_port_attrs = port_info->pi_attrs; 13704 13705 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13706 port->fp_rnid_init = 1; 13707 bcopy(&port_info->pi_rnid_params.params, 13708 &port->fp_rnid_params, 13709 sizeof (port->fp_rnid_params)); 13710 } else { 13711 port->fp_rnid_init = 0; 13712 } 13713 13714 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13715 if (node_namelen) { 13716 bcopy(&port_info->pi_attrs.sym_node_name, 13717 &port->fp_sym_node_name, 13718 node_namelen); 13719 port->fp_sym_node_namelen = node_namelen; 13720 } 13721 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13722 if (port_namelen) { 13723 bcopy(&port_info->pi_attrs.sym_port_name, 13724 &port->fp_sym_port_name, 13725 port_namelen); 13726 port->fp_sym_port_namelen = port_namelen; 13727 } 13728 13729 /* zero out the normally unused fields right away */ 13730 port->fp_service_params.ls_code.mbz = 0; 13731 port->fp_service_params.ls_code.ls_code = 0; 13732 bzero(&port->fp_service_params.reserved, 13733 sizeof (port->fp_service_params.reserved)); 13734 13735 class = port_info->pi_login_params.class_1.class_opt; 13736 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13737 13738 class = port_info->pi_login_params.class_2.class_opt; 13739 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13740 13741 class = port_info->pi_login_params.class_3.class_opt; 13742 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13743 13744 exit: 13745 if (nname) { 13746 ddi_prop_free(nname); 13747 } 13748 if (pname) { 13749 ddi_prop_free(pname); 13750 } 13751 mutex_exit(&port->fp_mutex); 13752 kmem_free(port_info, sizeof (*port_info)); 13753 13754 return (rval); 13755 } 13756 13757 13758 /* 13759 * Retrieve FCA capabilities 13760 */ 13761 static void 13762 fp_retrieve_caps(fc_local_port_t *port) 13763 { 13764 int rval; 13765 int ub_count; 13766 fc_fcp_dma_t fcp_dma; 13767 fc_reset_action_t action; 13768 fc_dma_behavior_t dma_behavior; 13769 13770 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13771 13772 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13773 FC_CAP_UNSOL_BUF, &ub_count); 13774 13775 switch (rval) { 13776 case FC_CAP_FOUND: 13777 case FC_CAP_SETTABLE: 13778 switch (ub_count) { 13779 case 0: 13780 break; 13781 13782 case -1: 13783 ub_count = fp_unsol_buf_count; 13784 break; 13785 13786 default: 13787 /* 1/4th of total buffers is my share */ 13788 ub_count = 13789 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13790 break; 13791 } 13792 break; 13793 13794 default: 13795 ub_count = 0; 13796 break; 13797 } 13798 13799 mutex_enter(&port->fp_mutex); 13800 port->fp_ub_count = ub_count; 13801 mutex_exit(&port->fp_mutex); 13802 13803 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13804 FC_CAP_POST_RESET_BEHAVIOR, &action); 13805 13806 switch (rval) { 13807 case FC_CAP_FOUND: 13808 case FC_CAP_SETTABLE: 13809 switch (action) { 13810 case FC_RESET_RETURN_NONE: 13811 case FC_RESET_RETURN_ALL: 13812 case FC_RESET_RETURN_OUTSTANDING: 13813 break; 13814 13815 default: 13816 action = FC_RESET_RETURN_NONE; 13817 break; 13818 } 13819 break; 13820 13821 default: 13822 action = FC_RESET_RETURN_NONE; 13823 break; 13824 } 13825 mutex_enter(&port->fp_mutex); 13826 port->fp_reset_action = action; 13827 mutex_exit(&port->fp_mutex); 13828 13829 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13830 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13831 13832 switch (rval) { 13833 case FC_CAP_FOUND: 13834 switch (dma_behavior) { 13835 case FC_ALLOW_STREAMING: 13836 /* FALLTHROUGH */ 13837 case FC_NO_STREAMING: 13838 break; 13839 13840 default: 13841 /* 13842 * If capability was found and the value 13843 * was incorrect assume the worst 13844 */ 13845 dma_behavior = FC_NO_STREAMING; 13846 break; 13847 } 13848 break; 13849 13850 default: 13851 /* 13852 * If capability was not defined - allow streaming; existing 13853 * FCAs should not be affected. 13854 */ 13855 dma_behavior = FC_ALLOW_STREAMING; 13856 break; 13857 } 13858 mutex_enter(&port->fp_mutex); 13859 port->fp_dma_behavior = dma_behavior; 13860 mutex_exit(&port->fp_mutex); 13861 13862 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13863 FC_CAP_FCP_DMA, &fcp_dma); 13864 13865 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 13866 fcp_dma != FC_DVMA_SPACE)) { 13867 fcp_dma = FC_DVMA_SPACE; 13868 } 13869 13870 mutex_enter(&port->fp_mutex); 13871 port->fp_fcp_dma = fcp_dma; 13872 mutex_exit(&port->fp_mutex); 13873 } 13874 13875 13876 /* 13877 * Handle Domain, Area changes in the Fabric. 13878 */ 13879 static void 13880 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 13881 job_request_t *job, int sleep) 13882 { 13883 #ifdef DEBUG 13884 uint32_t dcnt; 13885 #endif 13886 int rval; 13887 int send; 13888 int index; 13889 int listindex; 13890 int login; 13891 int job_flags; 13892 char ww_name[17]; 13893 uint32_t d_id; 13894 uint32_t count; 13895 fctl_ns_req_t *ns_cmd; 13896 fc_portmap_t *list; 13897 fc_orphan_t *orp; 13898 fc_orphan_t *norp; 13899 fc_orphan_t *prev; 13900 fc_remote_port_t *pd; 13901 fc_remote_port_t *npd; 13902 struct pwwn_hash *head; 13903 13904 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 13905 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 13906 0, sleep); 13907 if (ns_cmd == NULL) { 13908 mutex_enter(&port->fp_mutex); 13909 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13910 --port->fp_rscn_count; 13911 } 13912 mutex_exit(&port->fp_mutex); 13913 13914 return; 13915 } 13916 ns_cmd->ns_cmd_code = NS_GID_PN; 13917 13918 /* 13919 * We need to get a new count of devices from the 13920 * name server, which will also create any new devices 13921 * as needed. 13922 */ 13923 13924 (void) fp_ns_get_devcount(port, job, 1, sleep); 13925 13926 FP_TRACE(FP_NHEAD1(3, 0), 13927 "fp_validate_area_domain: get_devcount found %d devices", 13928 port->fp_total_devices); 13929 13930 mutex_enter(&port->fp_mutex); 13931 13932 for (count = index = 0; index < pwwn_table_size; index++) { 13933 head = &port->fp_pwwn_table[index]; 13934 pd = head->pwwn_head; 13935 while (pd != NULL) { 13936 mutex_enter(&pd->pd_mutex); 13937 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 13938 if ((pd->pd_port_id.port_id & mask) == id && 13939 pd->pd_recepient == PD_PLOGI_INITIATOR) { 13940 count++; 13941 pd->pd_type = PORT_DEVICE_OLD; 13942 pd->pd_flags = PD_ELS_MARK; 13943 } 13944 } 13945 mutex_exit(&pd->pd_mutex); 13946 pd = pd->pd_wwn_hnext; 13947 } 13948 } 13949 13950 #ifdef DEBUG 13951 dcnt = count; 13952 #endif /* DEBUG */ 13953 13954 /* 13955 * Since port->fp_orphan_count is declared an 'int' it is 13956 * theoretically possible that the count could go negative. 13957 * 13958 * This would be bad and if that happens we really do want 13959 * to know. 13960 */ 13961 13962 ASSERT(port->fp_orphan_count >= 0); 13963 13964 count += port->fp_orphan_count; 13965 13966 /* 13967 * We add the port->fp_total_devices value to the count 13968 * in the case where our port is newly attached. This is 13969 * because we haven't done any discovery and we don't have 13970 * any orphans in the port's orphan list. If we do not do 13971 * this addition to count then we won't alloc enough kmem 13972 * to do discovery with. 13973 */ 13974 13975 if (count == 0) { 13976 count += port->fp_total_devices; 13977 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 13978 "0x%x orphans found, using 0x%x", 13979 port->fp_orphan_count, count); 13980 } 13981 13982 mutex_exit(&port->fp_mutex); 13983 13984 /* 13985 * Allocate the change list 13986 */ 13987 13988 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 13989 if (list == NULL) { 13990 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13991 " Not enough memory to service RSCNs" 13992 " for %d ports, continuing...", count); 13993 13994 fctl_free_ns_cmd(ns_cmd); 13995 13996 mutex_enter(&port->fp_mutex); 13997 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13998 --port->fp_rscn_count; 13999 } 14000 mutex_exit(&port->fp_mutex); 14001 14002 return; 14003 } 14004 14005 /* 14006 * Attempt to validate or invalidate the devices that were 14007 * already in the pwwn hash table. 14008 */ 14009 14010 mutex_enter(&port->fp_mutex); 14011 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 14012 head = &port->fp_pwwn_table[index]; 14013 npd = head->pwwn_head; 14014 14015 while ((pd = npd) != NULL) { 14016 npd = pd->pd_wwn_hnext; 14017 14018 mutex_enter(&pd->pd_mutex); 14019 if ((pd->pd_port_id.port_id & mask) == id && 14020 pd->pd_flags == PD_ELS_MARK) { 14021 la_wwn_t *pwwn; 14022 14023 job->job_result = FC_SUCCESS; 14024 14025 ((ns_req_gid_pn_t *) 14026 (ns_cmd->ns_cmd_buf))->pwwn = 14027 pd->pd_port_name; 14028 14029 pwwn = &pd->pd_port_name; 14030 d_id = pd->pd_port_id.port_id; 14031 14032 mutex_exit(&pd->pd_mutex); 14033 mutex_exit(&port->fp_mutex); 14034 14035 rval = fp_ns_query(port, ns_cmd, job, 1, 14036 sleep); 14037 if (rval != FC_SUCCESS) { 14038 fc_wwn_to_str(pwwn, ww_name); 14039 14040 FP_TRACE(FP_NHEAD1(3, 0), 14041 "AREA RSCN: PD disappeared; " 14042 "d_id=%x, PWWN=%s", d_id, ww_name); 14043 14044 FP_TRACE(FP_NHEAD2(9, 0), 14045 "N_x Port with D_ID=%x," 14046 " PWWN=%s disappeared from fabric", 14047 d_id, ww_name); 14048 14049 fp_fillout_old_map(list + listindex++, 14050 pd, 1); 14051 } else { 14052 fctl_copy_portmap(list + listindex++, 14053 pd); 14054 14055 mutex_enter(&pd->pd_mutex); 14056 pd->pd_flags = PD_ELS_IN_PROGRESS; 14057 mutex_exit(&pd->pd_mutex); 14058 } 14059 14060 mutex_enter(&port->fp_mutex); 14061 } else { 14062 mutex_exit(&pd->pd_mutex); 14063 } 14064 } 14065 } 14066 14067 mutex_exit(&port->fp_mutex); 14068 14069 ASSERT(listindex == dcnt); 14070 14071 job->job_counter = listindex; 14072 job_flags = job->job_flags; 14073 job->job_flags |= JOB_TYPE_FP_ASYNC; 14074 14075 /* 14076 * Login (if we were the initiator) or validate devices in the 14077 * port map. 14078 */ 14079 14080 for (index = 0; index < listindex; index++) { 14081 pd = list[index].map_pd; 14082 14083 mutex_enter(&pd->pd_mutex); 14084 ASSERT((pd->pd_port_id.port_id & mask) == id); 14085 14086 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14087 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 14088 mutex_exit(&pd->pd_mutex); 14089 fp_jobdone(job); 14090 continue; 14091 } 14092 14093 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 14094 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 14095 d_id = pd->pd_port_id.port_id; 14096 mutex_exit(&pd->pd_mutex); 14097 14098 if ((d_id & mask) == id && send) { 14099 if (login) { 14100 FP_TRACE(FP_NHEAD1(6, 0), 14101 "RSCN and PLOGI request;" 14102 " pd=%p, job=%p d_id=%x, index=%d", pd, 14103 job, d_id, index); 14104 14105 rval = fp_port_login(port, d_id, job, 14106 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 14107 if (rval != FC_SUCCESS) { 14108 mutex_enter(&pd->pd_mutex); 14109 pd->pd_flags = PD_IDLE; 14110 mutex_exit(&pd->pd_mutex); 14111 14112 job->job_result = rval; 14113 fp_jobdone(job); 14114 } 14115 14116 FP_TRACE(FP_NHEAD2(4, 0), 14117 "PLOGI succeeded:no skip(1) for " 14118 "D_ID %x", d_id); 14119 list[index].map_flags |= 14120 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14121 } else { 14122 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 14123 " pd=%p, job=%p d_id=%x, index=%d", pd, 14124 job, d_id, index); 14125 14126 rval = fp_ns_validate_device(port, pd, job, 14127 0, sleep); 14128 if (rval != FC_SUCCESS) { 14129 fp_jobdone(job); 14130 } 14131 mutex_enter(&pd->pd_mutex); 14132 pd->pd_flags = PD_IDLE; 14133 mutex_exit(&pd->pd_mutex); 14134 } 14135 } else { 14136 FP_TRACE(FP_NHEAD1(6, 0), 14137 "RSCN and NO request sent; pd=%p," 14138 " d_id=%x, index=%d", pd, d_id, index); 14139 14140 mutex_enter(&pd->pd_mutex); 14141 pd->pd_flags = PD_IDLE; 14142 mutex_exit(&pd->pd_mutex); 14143 14144 fp_jobdone(job); 14145 } 14146 } 14147 14148 if (listindex) { 14149 fctl_jobwait(job); 14150 } 14151 job->job_flags = job_flags; 14152 14153 /* 14154 * Orphan list validation. 14155 */ 14156 mutex_enter(&port->fp_mutex); 14157 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14158 orp != NULL; orp = norp) { 14159 norp = orp->orp_next; 14160 mutex_exit(&port->fp_mutex); 14161 14162 job->job_counter = 1; 14163 job->job_result = FC_SUCCESS; 14164 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14165 14166 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14167 14168 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14169 ((ns_resp_gid_pn_t *) 14170 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14171 14172 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14173 if (rval == FC_SUCCESS) { 14174 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14175 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14176 if (pd != NULL) { 14177 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14178 14179 FP_TRACE(FP_NHEAD1(6, 0), 14180 "RSCN and ORPHAN list " 14181 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14182 14183 FP_TRACE(FP_NHEAD2(6, 0), 14184 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14185 " in fabric", d_id, ww_name); 14186 14187 mutex_enter(&port->fp_mutex); 14188 if (prev) { 14189 prev->orp_next = orp->orp_next; 14190 } else { 14191 ASSERT(orp == port->fp_orphan_list); 14192 port->fp_orphan_list = orp->orp_next; 14193 } 14194 port->fp_orphan_count--; 14195 mutex_exit(&port->fp_mutex); 14196 14197 kmem_free(orp, sizeof (*orp)); 14198 fctl_copy_portmap(list + listindex++, pd); 14199 } else { 14200 prev = orp; 14201 } 14202 } else { 14203 prev = orp; 14204 } 14205 mutex_enter(&port->fp_mutex); 14206 } 14207 mutex_exit(&port->fp_mutex); 14208 14209 /* 14210 * One more pass through the list to delist old devices from 14211 * the d_id and pwwn tables and possibly add to the orphan list. 14212 */ 14213 14214 for (index = 0; index < listindex; index++) { 14215 pd = list[index].map_pd; 14216 ASSERT(pd != NULL); 14217 14218 /* 14219 * Update PLOGI results; For NS validation 14220 * of orphan list, it is redundant 14221 * 14222 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14223 * appropriate as fctl_copy_portmap() will clear map_flags. 14224 */ 14225 if (list[index].map_flags & 14226 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14227 fctl_copy_portmap(list + index, pd); 14228 list[index].map_flags |= 14229 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14230 } else { 14231 fctl_copy_portmap(list + index, pd); 14232 } 14233 14234 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14235 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14236 pd, pd->pd_port_id.port_id, 14237 pd->pd_port_name.raw_wwn[0], 14238 pd->pd_port_name.raw_wwn[1], 14239 pd->pd_port_name.raw_wwn[2], 14240 pd->pd_port_name.raw_wwn[3], 14241 pd->pd_port_name.raw_wwn[4], 14242 pd->pd_port_name.raw_wwn[5], 14243 pd->pd_port_name.raw_wwn[6], 14244 pd->pd_port_name.raw_wwn[7]); 14245 14246 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14247 "results continued, pd=%p type=%x, flags=%x, state=%x", 14248 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14249 14250 mutex_enter(&pd->pd_mutex); 14251 if (pd->pd_type == PORT_DEVICE_OLD) { 14252 int initiator; 14253 14254 pd->pd_flags = PD_IDLE; 14255 initiator = (pd->pd_recepient == 14256 PD_PLOGI_INITIATOR) ? 1 : 0; 14257 14258 mutex_exit(&pd->pd_mutex); 14259 14260 mutex_enter(&port->fp_mutex); 14261 mutex_enter(&pd->pd_mutex); 14262 14263 pd->pd_state = PORT_DEVICE_INVALID; 14264 fctl_delist_did_table(port, pd); 14265 fctl_delist_pwwn_table(port, pd); 14266 14267 mutex_exit(&pd->pd_mutex); 14268 mutex_exit(&port->fp_mutex); 14269 14270 if (initiator) { 14271 (void) fctl_add_orphan(port, pd, sleep); 14272 } 14273 list[index].map_pd = pd; 14274 } else { 14275 ASSERT(pd->pd_flags == PD_IDLE); 14276 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14277 /* 14278 * Reset LOGO tolerance to zero 14279 */ 14280 fctl_tc_reset(&pd->pd_logo_tc); 14281 } 14282 mutex_exit(&pd->pd_mutex); 14283 } 14284 } 14285 14286 if (ns_cmd) { 14287 fctl_free_ns_cmd(ns_cmd); 14288 } 14289 if (listindex) { 14290 (void) fp_ulp_devc_cb(port, list, listindex, count, 14291 sleep, 0); 14292 } else { 14293 kmem_free(list, sizeof (*list) * count); 14294 14295 mutex_enter(&port->fp_mutex); 14296 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14297 --port->fp_rscn_count; 14298 } 14299 mutex_exit(&port->fp_mutex); 14300 } 14301 } 14302 14303 14304 /* 14305 * Work hard to make sense out of an RSCN page. 14306 */ 14307 static void 14308 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14309 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14310 int *listindex, int sleep) 14311 { 14312 int rval; 14313 char ww_name[17]; 14314 la_wwn_t *pwwn; 14315 fc_remote_port_t *pwwn_pd; 14316 fc_remote_port_t *did_pd; 14317 14318 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14319 14320 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14321 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14322 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14323 14324 if (did_pd != NULL) { 14325 mutex_enter(&did_pd->pd_mutex); 14326 if (did_pd->pd_flags != PD_IDLE) { 14327 mutex_exit(&did_pd->pd_mutex); 14328 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14329 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14330 port, page->aff_d_id, did_pd); 14331 return; 14332 } 14333 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14334 mutex_exit(&did_pd->pd_mutex); 14335 } 14336 14337 job->job_counter = 1; 14338 14339 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14340 14341 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14342 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14343 14344 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14345 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14346 14347 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14348 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14349 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14350 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14351 ns_cmd->ns_resp_hdr.ct_expln); 14352 14353 job->job_counter = 1; 14354 14355 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14356 /* 14357 * What this means is that the D_ID 14358 * disappeared from the Fabric. 14359 */ 14360 if (did_pd == NULL) { 14361 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14362 " NULL PD disappeared, rval=%x", rval); 14363 return; 14364 } 14365 14366 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14367 14368 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14369 (uint32_t)(uintptr_t)job->job_cb_arg; 14370 14371 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14372 14373 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14374 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14375 14376 FP_TRACE(FP_NHEAD2(9, 0), 14377 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14378 14379 FP_TRACE(FP_NHEAD2(9, 0), 14380 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14381 " fabric", page->aff_d_id, ww_name); 14382 14383 mutex_enter(&did_pd->pd_mutex); 14384 did_pd->pd_flags = PD_IDLE; 14385 mutex_exit(&did_pd->pd_mutex); 14386 14387 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14388 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14389 14390 return; 14391 } 14392 14393 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14394 14395 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14396 /* 14397 * There is no change. Do PLOGI again and add it to 14398 * ULP portmap baggage and return. Note: When RSCNs 14399 * arrive with per page states, the need for PLOGI 14400 * can be determined correctly. 14401 */ 14402 mutex_enter(&pwwn_pd->pd_mutex); 14403 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14404 mutex_exit(&pwwn_pd->pd_mutex); 14405 14406 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14407 (uint32_t)(uintptr_t)job->job_cb_arg; 14408 14409 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14410 14411 mutex_enter(&pwwn_pd->pd_mutex); 14412 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14413 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14414 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14415 mutex_exit(&pwwn_pd->pd_mutex); 14416 14417 rval = fp_port_login(port, page->aff_d_id, job, 14418 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14419 if (rval == FC_SUCCESS) { 14420 fp_jobwait(job); 14421 rval = job->job_result; 14422 14423 /* 14424 * Reset LOGO tolerance to zero 14425 * Also we are the PLOGI initiator now. 14426 */ 14427 mutex_enter(&pwwn_pd->pd_mutex); 14428 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14429 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14430 mutex_exit(&pwwn_pd->pd_mutex); 14431 } 14432 14433 if (rval == FC_SUCCESS) { 14434 struct fc_portmap *map = 14435 listptr + *listindex - 1; 14436 14437 FP_TRACE(FP_NHEAD2(4, 0), 14438 "PLOGI succeeded: no skip(2)" 14439 " for D_ID %x", page->aff_d_id); 14440 map->map_flags |= 14441 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14442 } else { 14443 FP_TRACE(FP_NHEAD2(9, rval), 14444 "PLOGI to D_ID=%x failed", page->aff_d_id); 14445 14446 FP_TRACE(FP_NHEAD2(9, 0), 14447 "N_x Port with D_ID=%x, PWWN=%s" 14448 " disappeared from fabric", 14449 page->aff_d_id, ww_name); 14450 14451 fp_fillout_old_map(listptr + 14452 *listindex - 1, pwwn_pd, 0); 14453 } 14454 } else { 14455 mutex_exit(&pwwn_pd->pd_mutex); 14456 } 14457 14458 mutex_enter(&did_pd->pd_mutex); 14459 did_pd->pd_flags = PD_IDLE; 14460 mutex_exit(&did_pd->pd_mutex); 14461 14462 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14463 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14464 job->job_result, pwwn_pd); 14465 14466 return; 14467 } 14468 14469 if (did_pd == NULL && pwwn_pd == NULL) { 14470 14471 fc_orphan_t *orp = NULL; 14472 fc_orphan_t *norp = NULL; 14473 fc_orphan_t *prev = NULL; 14474 14475 /* 14476 * Hunt down the orphan list before giving up. 14477 */ 14478 14479 mutex_enter(&port->fp_mutex); 14480 if (port->fp_orphan_count) { 14481 14482 for (orp = port->fp_orphan_list; orp; orp = norp) { 14483 norp = orp->orp_next; 14484 14485 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14486 prev = orp; 14487 continue; 14488 } 14489 14490 if (prev) { 14491 prev->orp_next = orp->orp_next; 14492 } else { 14493 ASSERT(orp == 14494 port->fp_orphan_list); 14495 port->fp_orphan_list = 14496 orp->orp_next; 14497 } 14498 port->fp_orphan_count--; 14499 break; 14500 } 14501 } 14502 14503 mutex_exit(&port->fp_mutex); 14504 pwwn_pd = fp_create_remote_port_by_ns(port, 14505 page->aff_d_id, sleep); 14506 14507 if (pwwn_pd != NULL) { 14508 14509 if (orp) { 14510 fc_wwn_to_str(&orp->orp_pwwn, 14511 ww_name); 14512 14513 FP_TRACE(FP_NHEAD2(9, 0), 14514 "N_x Port with D_ID=%x," 14515 " PWWN=%s reappeared in fabric", 14516 page->aff_d_id, ww_name); 14517 14518 kmem_free(orp, sizeof (*orp)); 14519 } 14520 14521 (listptr + *listindex)-> 14522 map_rscn_info.ulp_rscn_count = 14523 (uint32_t)(uintptr_t)job->job_cb_arg; 14524 14525 fctl_copy_portmap(listptr + 14526 (*listindex)++, pwwn_pd); 14527 } 14528 14529 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14530 "Case TWO", page->aff_d_id); 14531 14532 return; 14533 } 14534 14535 if (pwwn_pd != NULL && did_pd == NULL) { 14536 uint32_t old_d_id; 14537 uint32_t d_id = page->aff_d_id; 14538 14539 /* 14540 * What this means is there is a new D_ID for this 14541 * Port WWN. Take out the port device off D_ID 14542 * list and put it back with a new D_ID. Perform 14543 * PLOGI if already logged in. 14544 */ 14545 mutex_enter(&port->fp_mutex); 14546 mutex_enter(&pwwn_pd->pd_mutex); 14547 14548 old_d_id = pwwn_pd->pd_port_id.port_id; 14549 14550 fctl_delist_did_table(port, pwwn_pd); 14551 14552 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14553 (uint32_t)(uintptr_t)job->job_cb_arg; 14554 14555 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14556 &d_id, NULL); 14557 fctl_enlist_did_table(port, pwwn_pd); 14558 14559 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14560 " Case THREE, pd=%p," 14561 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14562 14563 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14564 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14565 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14566 14567 mutex_exit(&pwwn_pd->pd_mutex); 14568 mutex_exit(&port->fp_mutex); 14569 14570 FP_TRACE(FP_NHEAD2(9, 0), 14571 "N_x Port with D_ID=%x, PWWN=%s has a new" 14572 " D_ID=%x now", old_d_id, ww_name, d_id); 14573 14574 rval = fp_port_login(port, page->aff_d_id, job, 14575 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14576 if (rval == FC_SUCCESS) { 14577 fp_jobwait(job); 14578 rval = job->job_result; 14579 } 14580 14581 if (rval != FC_SUCCESS) { 14582 fp_fillout_old_map(listptr + 14583 *listindex - 1, pwwn_pd, 0); 14584 } 14585 } else { 14586 mutex_exit(&pwwn_pd->pd_mutex); 14587 mutex_exit(&port->fp_mutex); 14588 } 14589 14590 return; 14591 } 14592 14593 if (pwwn_pd == NULL && did_pd != NULL) { 14594 fc_portmap_t *ptr; 14595 uint32_t len = 1; 14596 char old_ww_name[17]; 14597 14598 mutex_enter(&did_pd->pd_mutex); 14599 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14600 mutex_exit(&did_pd->pd_mutex); 14601 14602 fc_wwn_to_str(pwwn, ww_name); 14603 14604 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14605 (uint32_t)(uintptr_t)job->job_cb_arg; 14606 14607 /* 14608 * What this means is that there is a new Port WWN for 14609 * this D_ID; Mark the Port device as old and provide 14610 * the new PWWN and D_ID combination as new. 14611 */ 14612 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14613 14614 FP_TRACE(FP_NHEAD2(9, 0), 14615 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14616 page->aff_d_id, old_ww_name, ww_name); 14617 14618 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14619 (uint32_t)(uintptr_t)job->job_cb_arg; 14620 14621 ptr = listptr + (*listindex)++; 14622 14623 job->job_counter = 1; 14624 14625 if (fp_ns_getmap(port, job, &ptr, &len, 14626 page->aff_d_id - 1) != FC_SUCCESS) { 14627 (*listindex)--; 14628 } 14629 14630 mutex_enter(&did_pd->pd_mutex); 14631 did_pd->pd_flags = PD_IDLE; 14632 mutex_exit(&did_pd->pd_mutex); 14633 14634 return; 14635 } 14636 14637 /* 14638 * A weird case of Port WWN and D_ID existence but not matching up 14639 * between them. Trust your instincts - Take the port device handle 14640 * off Port WWN list, fix it with new Port WWN and put it back, In 14641 * the mean time mark the port device corresponding to the old port 14642 * WWN as OLD. 14643 */ 14644 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14645 " did_pd=%p", pwwn_pd, did_pd); 14646 14647 mutex_enter(&port->fp_mutex); 14648 mutex_enter(&pwwn_pd->pd_mutex); 14649 14650 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14651 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14652 fctl_delist_did_table(port, pwwn_pd); 14653 fctl_delist_pwwn_table(port, pwwn_pd); 14654 14655 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14656 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14657 pwwn_pd->pd_port_id.port_id, 14658 14659 pwwn_pd->pd_port_name.raw_wwn[0], 14660 pwwn_pd->pd_port_name.raw_wwn[1], 14661 pwwn_pd->pd_port_name.raw_wwn[2], 14662 pwwn_pd->pd_port_name.raw_wwn[3], 14663 pwwn_pd->pd_port_name.raw_wwn[4], 14664 pwwn_pd->pd_port_name.raw_wwn[5], 14665 pwwn_pd->pd_port_name.raw_wwn[6], 14666 pwwn_pd->pd_port_name.raw_wwn[7]); 14667 14668 mutex_exit(&pwwn_pd->pd_mutex); 14669 mutex_exit(&port->fp_mutex); 14670 14671 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14672 (uint32_t)(uintptr_t)job->job_cb_arg; 14673 14674 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14675 14676 mutex_enter(&port->fp_mutex); 14677 mutex_enter(&did_pd->pd_mutex); 14678 14679 fctl_delist_pwwn_table(port, did_pd); 14680 14681 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14682 (uint32_t)(uintptr_t)job->job_cb_arg; 14683 14684 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14685 fctl_enlist_pwwn_table(port, did_pd); 14686 14687 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14688 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14689 did_pd->pd_port_id.port_id, did_pd->pd_state, 14690 14691 did_pd->pd_port_name.raw_wwn[0], 14692 did_pd->pd_port_name.raw_wwn[1], 14693 did_pd->pd_port_name.raw_wwn[2], 14694 did_pd->pd_port_name.raw_wwn[3], 14695 did_pd->pd_port_name.raw_wwn[4], 14696 did_pd->pd_port_name.raw_wwn[5], 14697 did_pd->pd_port_name.raw_wwn[6], 14698 did_pd->pd_port_name.raw_wwn[7]); 14699 14700 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14701 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14702 mutex_exit(&did_pd->pd_mutex); 14703 mutex_exit(&port->fp_mutex); 14704 14705 rval = fp_port_login(port, page->aff_d_id, job, 14706 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14707 if (rval == FC_SUCCESS) { 14708 fp_jobwait(job); 14709 if (job->job_result != FC_SUCCESS) { 14710 fp_fillout_old_map(listptr + 14711 *listindex - 1, did_pd, 0); 14712 } 14713 } else { 14714 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14715 } 14716 } else { 14717 mutex_exit(&did_pd->pd_mutex); 14718 mutex_exit(&port->fp_mutex); 14719 } 14720 14721 mutex_enter(&did_pd->pd_mutex); 14722 did_pd->pd_flags = PD_IDLE; 14723 mutex_exit(&did_pd->pd_mutex); 14724 } 14725 14726 14727 /* 14728 * Check with NS for the presence of this port WWN 14729 */ 14730 static int 14731 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14732 job_request_t *job, int polled, int sleep) 14733 { 14734 la_wwn_t pwwn; 14735 uint32_t flags; 14736 fctl_ns_req_t *ns_cmd; 14737 14738 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14739 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14740 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14741 flags, sleep); 14742 if (ns_cmd == NULL) { 14743 return (FC_NOMEM); 14744 } 14745 14746 mutex_enter(&pd->pd_mutex); 14747 pwwn = pd->pd_port_name; 14748 mutex_exit(&pd->pd_mutex); 14749 14750 ns_cmd->ns_cmd_code = NS_GID_PN; 14751 ns_cmd->ns_pd = pd; 14752 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14753 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14754 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14755 14756 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14757 } 14758 14759 14760 /* 14761 * Sanity check the LILP map returned by FCA 14762 */ 14763 static int 14764 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14765 { 14766 int count; 14767 14768 if (lilp_map->lilp_length == 0) { 14769 return (FC_FAILURE); 14770 } 14771 14772 for (count = 0; count < lilp_map->lilp_length; count++) { 14773 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14774 FC_SUCCESS) { 14775 return (FC_FAILURE); 14776 } 14777 } 14778 14779 return (FC_SUCCESS); 14780 } 14781 14782 14783 /* 14784 * Sanity check if the AL_PA is a valid address 14785 */ 14786 static int 14787 fp_is_valid_alpa(uchar_t al_pa) 14788 { 14789 int count; 14790 14791 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14792 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14793 return (FC_SUCCESS); 14794 } 14795 } 14796 14797 return (FC_FAILURE); 14798 } 14799 14800 14801 /* 14802 * Post unsolicited callbacks to ULPs 14803 */ 14804 static void 14805 fp_ulp_unsol_cb(void *arg) 14806 { 14807 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14808 14809 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14810 ub_spec->buf->ub_frame.type); 14811 kmem_free(ub_spec, sizeof (*ub_spec)); 14812 } 14813 14814 14815 /* 14816 * Perform message reporting in a consistent manner. Unless there is 14817 * a strong reason NOT to use this function (which is very very rare) 14818 * all message reporting should go through this. 14819 */ 14820 static void 14821 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14822 fc_packet_t *pkt, const char *fmt, ...) 14823 { 14824 caddr_t buf; 14825 va_list ap; 14826 14827 switch (level) { 14828 case CE_NOTE: 14829 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14830 return; 14831 } 14832 break; 14833 14834 case CE_WARN: 14835 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14836 return; 14837 } 14838 break; 14839 } 14840 14841 buf = kmem_zalloc(256, KM_NOSLEEP); 14842 if (buf == NULL) { 14843 return; 14844 } 14845 14846 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14847 14848 va_start(ap, fmt); 14849 (void) vsprintf(buf + strlen(buf), fmt, ap); 14850 va_end(ap); 14851 14852 if (fc_errno) { 14853 char *errmsg; 14854 14855 (void) fc_ulp_error(fc_errno, &errmsg); 14856 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 14857 } else { 14858 if (pkt) { 14859 caddr_t state, reason, action, expln; 14860 14861 (void) fc_ulp_pkt_error(pkt, &state, &reason, 14862 &action, &expln); 14863 14864 (void) sprintf(buf + strlen(buf), 14865 " state=%s, reason=%s", state, reason); 14866 14867 if (pkt->pkt_resp_resid) { 14868 (void) sprintf(buf + strlen(buf), 14869 " resp resid=%x\n", pkt->pkt_resp_resid); 14870 } 14871 } 14872 } 14873 14874 switch (dest) { 14875 case FP_CONSOLE_ONLY: 14876 cmn_err(level, "^%s", buf); 14877 break; 14878 14879 case FP_LOG_ONLY: 14880 cmn_err(level, "!%s", buf); 14881 break; 14882 14883 default: 14884 cmn_err(level, "%s", buf); 14885 break; 14886 } 14887 14888 kmem_free(buf, 256); 14889 } 14890 14891 static int 14892 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 14893 { 14894 int ret; 14895 uint32_t d_id; 14896 la_wwn_t pwwn; 14897 fc_remote_port_t *pd = NULL; 14898 fc_remote_port_t *held_pd = NULL; 14899 fctl_ns_req_t *ns_cmd; 14900 fc_portmap_t *changelist; 14901 14902 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 14903 14904 mutex_enter(&port->fp_mutex); 14905 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14906 mutex_exit(&port->fp_mutex); 14907 job->job_counter = 1; 14908 14909 job->job_result = FC_SUCCESS; 14910 14911 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14912 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14913 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 14914 14915 ASSERT(ns_cmd != NULL); 14916 14917 ns_cmd->ns_cmd_code = NS_GID_PN; 14918 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 14919 14920 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14921 14922 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 14923 if (ret != FC_SUCCESS) { 14924 fcio->fcio_errno = ret; 14925 } else { 14926 fcio->fcio_errno = job->job_result; 14927 } 14928 fctl_free_ns_cmd(ns_cmd); 14929 return (EIO); 14930 } 14931 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14932 fctl_free_ns_cmd(ns_cmd); 14933 } else { 14934 mutex_exit(&port->fp_mutex); 14935 14936 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14937 if (held_pd == NULL) { 14938 fcio->fcio_errno = FC_BADWWN; 14939 return (EIO); 14940 } 14941 pd = held_pd; 14942 14943 mutex_enter(&pd->pd_mutex); 14944 d_id = pd->pd_port_id.port_id; 14945 mutex_exit(&pd->pd_mutex); 14946 } 14947 14948 job->job_counter = 1; 14949 14950 pd = fctl_get_remote_port_by_did(port, d_id); 14951 14952 if (pd) { 14953 mutex_enter(&pd->pd_mutex); 14954 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14955 pd->pd_login_count++; 14956 mutex_exit(&pd->pd_mutex); 14957 14958 fcio->fcio_errno = FC_SUCCESS; 14959 if (held_pd) { 14960 fctl_release_remote_port(held_pd); 14961 } 14962 14963 return (0); 14964 } 14965 mutex_exit(&pd->pd_mutex); 14966 } else { 14967 mutex_enter(&port->fp_mutex); 14968 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14969 mutex_exit(&port->fp_mutex); 14970 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14971 if (pd == NULL) { 14972 fcio->fcio_errno = FC_FAILURE; 14973 if (held_pd) { 14974 fctl_release_remote_port(held_pd); 14975 } 14976 return (EIO); 14977 } 14978 } else { 14979 mutex_exit(&port->fp_mutex); 14980 } 14981 } 14982 14983 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 14984 job->job_counter = 1; 14985 14986 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 14987 KM_SLEEP, pd, NULL); 14988 14989 if (ret != FC_SUCCESS) { 14990 fcio->fcio_errno = ret; 14991 if (held_pd) { 14992 fctl_release_remote_port(held_pd); 14993 } 14994 return (EIO); 14995 } 14996 fp_jobwait(job); 14997 14998 fcio->fcio_errno = job->job_result; 14999 15000 if (held_pd) { 15001 fctl_release_remote_port(held_pd); 15002 } 15003 15004 if (job->job_result != FC_SUCCESS) { 15005 return (EIO); 15006 } 15007 15008 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15009 if (pd == NULL) { 15010 fcio->fcio_errno = FC_BADDEV; 15011 return (ENODEV); 15012 } 15013 15014 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15015 15016 fctl_copy_portmap(changelist, pd); 15017 changelist->map_type = PORT_DEVICE_USER_LOGIN; 15018 15019 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15020 15021 mutex_enter(&pd->pd_mutex); 15022 pd->pd_type = PORT_DEVICE_NOCHANGE; 15023 mutex_exit(&pd->pd_mutex); 15024 15025 fctl_release_remote_port(pd); 15026 15027 return (0); 15028 } 15029 15030 15031 static int 15032 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15033 { 15034 la_wwn_t pwwn; 15035 fp_cmd_t *cmd; 15036 fc_portmap_t *changelist; 15037 fc_remote_port_t *pd; 15038 15039 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15040 15041 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15042 if (pd == NULL) { 15043 fcio->fcio_errno = FC_BADWWN; 15044 return (ENXIO); 15045 } 15046 15047 mutex_enter(&pd->pd_mutex); 15048 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 15049 fcio->fcio_errno = FC_LOGINREQ; 15050 mutex_exit(&pd->pd_mutex); 15051 15052 fctl_release_remote_port(pd); 15053 15054 return (EINVAL); 15055 } 15056 15057 ASSERT(pd->pd_login_count >= 1); 15058 15059 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 15060 fcio->fcio_errno = FC_FAILURE; 15061 mutex_exit(&pd->pd_mutex); 15062 15063 fctl_release_remote_port(pd); 15064 15065 return (EBUSY); 15066 } 15067 15068 if (pd->pd_login_count > 1) { 15069 pd->pd_login_count--; 15070 fcio->fcio_errno = FC_SUCCESS; 15071 mutex_exit(&pd->pd_mutex); 15072 15073 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15074 15075 fctl_copy_portmap(changelist, pd); 15076 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15077 15078 fctl_release_remote_port(pd); 15079 15080 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15081 15082 return (0); 15083 } 15084 15085 pd->pd_flags = PD_ELS_IN_PROGRESS; 15086 mutex_exit(&pd->pd_mutex); 15087 15088 job->job_counter = 1; 15089 15090 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 15091 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 15092 if (cmd == NULL) { 15093 fcio->fcio_errno = FC_NOMEM; 15094 fctl_release_remote_port(pd); 15095 15096 mutex_enter(&pd->pd_mutex); 15097 pd->pd_flags = PD_IDLE; 15098 mutex_exit(&pd->pd_mutex); 15099 15100 return (ENOMEM); 15101 } 15102 15103 mutex_enter(&port->fp_mutex); 15104 mutex_enter(&pd->pd_mutex); 15105 15106 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 15107 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 15108 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 15109 cmd->cmd_retry_count = 1; 15110 cmd->cmd_ulp_pkt = NULL; 15111 15112 fp_logo_init(pd, cmd, job); 15113 15114 mutex_exit(&pd->pd_mutex); 15115 mutex_exit(&port->fp_mutex); 15116 15117 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 15118 mutex_enter(&pd->pd_mutex); 15119 pd->pd_flags = PD_IDLE; 15120 mutex_exit(&pd->pd_mutex); 15121 15122 fp_free_pkt(cmd); 15123 fctl_release_remote_port(pd); 15124 15125 return (EIO); 15126 } 15127 15128 fp_jobwait(job); 15129 15130 fcio->fcio_errno = job->job_result; 15131 if (job->job_result != FC_SUCCESS) { 15132 mutex_enter(&pd->pd_mutex); 15133 pd->pd_flags = PD_IDLE; 15134 mutex_exit(&pd->pd_mutex); 15135 15136 fctl_release_remote_port(pd); 15137 15138 return (EIO); 15139 } 15140 15141 ASSERT(pd != NULL); 15142 15143 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15144 15145 fctl_copy_portmap(changelist, pd); 15146 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15147 changelist->map_state = PORT_DEVICE_INVALID; 15148 15149 mutex_enter(&port->fp_mutex); 15150 mutex_enter(&pd->pd_mutex); 15151 15152 fctl_delist_did_table(port, pd); 15153 fctl_delist_pwwn_table(port, pd); 15154 pd->pd_flags = PD_IDLE; 15155 15156 mutex_exit(&pd->pd_mutex); 15157 mutex_exit(&port->fp_mutex); 15158 15159 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15160 15161 fctl_release_remote_port(pd); 15162 15163 return (0); 15164 } 15165 15166 15167 15168 /* 15169 * Send a syslog event for adapter port level events. 15170 */ 15171 static void 15172 fp_log_port_event(fc_local_port_t *port, char *subclass) 15173 { 15174 nvlist_t *attr_list; 15175 15176 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15177 KM_SLEEP) != DDI_SUCCESS) { 15178 goto alloc_failed; 15179 } 15180 15181 if (nvlist_add_uint32(attr_list, "instance", 15182 port->fp_instance) != DDI_SUCCESS) { 15183 goto error; 15184 } 15185 15186 if (nvlist_add_byte_array(attr_list, "port-wwn", 15187 port->fp_service_params.nport_ww_name.raw_wwn, 15188 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15189 goto error; 15190 } 15191 15192 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15193 subclass, attr_list, NULL, DDI_SLEEP); 15194 15195 nvlist_free(attr_list); 15196 return; 15197 15198 error: 15199 nvlist_free(attr_list); 15200 alloc_failed: 15201 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15202 } 15203 15204 15205 static void 15206 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15207 uint32_t port_id) 15208 { 15209 nvlist_t *attr_list; 15210 15211 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15212 KM_SLEEP) != DDI_SUCCESS) { 15213 goto alloc_failed; 15214 } 15215 15216 if (nvlist_add_uint32(attr_list, "instance", 15217 port->fp_instance) != DDI_SUCCESS) { 15218 goto error; 15219 } 15220 15221 if (nvlist_add_byte_array(attr_list, "port-wwn", 15222 port->fp_service_params.nport_ww_name.raw_wwn, 15223 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15224 goto error; 15225 } 15226 15227 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15228 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15229 goto error; 15230 } 15231 15232 if (nvlist_add_uint32(attr_list, "target-port-id", 15233 port_id) != DDI_SUCCESS) { 15234 goto error; 15235 } 15236 15237 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15238 subclass, attr_list, NULL, DDI_SLEEP); 15239 15240 nvlist_free(attr_list); 15241 return; 15242 15243 error: 15244 nvlist_free(attr_list); 15245 alloc_failed: 15246 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15247 } 15248 15249 static uint32_t 15250 fp_map_remote_port_state(uint32_t rm_state) 15251 { 15252 switch (rm_state) { 15253 case PORT_DEVICE_LOGGED_IN: 15254 return (FC_HBA_PORTSTATE_ONLINE); 15255 case PORT_DEVICE_VALID: 15256 case PORT_DEVICE_INVALID: 15257 default: 15258 return (FC_HBA_PORTSTATE_UNKNOWN); 15259 } 15260 } 15261