1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 26 * 27 */ 28 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/param.h> 32 #include <sys/errno.h> 33 #include <sys/uio.h> 34 #include <sys/buf.h> 35 #include <sys/modctl.h> 36 #include <sys/open.h> 37 #include <sys/file.h> 38 #include <sys/kmem.h> 39 #include <sys/poll.h> 40 #include <sys/conf.h> 41 #include <sys/thread.h> 42 #include <sys/var.h> 43 #include <sys/cmn_err.h> 44 #include <sys/stat.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/promif.h> 48 #include <sys/nvpair.h> 49 #include <sys/byteorder.h> 50 #include <sys/scsi/scsi.h> 51 #include <sys/fibre-channel/fc.h> 52 #include <sys/fibre-channel/impl/fc_ulpif.h> 53 #include <sys/fibre-channel/impl/fc_fcaif.h> 54 #include <sys/fibre-channel/impl/fctl_private.h> 55 #include <sys/fibre-channel/impl/fc_portif.h> 56 #include <sys/fibre-channel/impl/fp.h> 57 58 /* These are defined in fctl.c! */ 59 extern int did_table_size; 60 extern int pwwn_table_size; 61 62 static struct cb_ops fp_cb_ops = { 63 fp_open, /* open */ 64 fp_close, /* close */ 65 nodev, /* strategy */ 66 nodev, /* print */ 67 nodev, /* dump */ 68 nodev, /* read */ 69 nodev, /* write */ 70 fp_ioctl, /* ioctl */ 71 nodev, /* devmap */ 72 nodev, /* mmap */ 73 nodev, /* segmap */ 74 nochpoll, /* chpoll */ 75 ddi_prop_op, /* cb_prop_op */ 76 0, /* streamtab */ 77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 78 CB_REV, /* rev */ 79 nodev, /* aread */ 80 nodev /* awrite */ 81 }; 82 83 static struct dev_ops fp_ops = { 84 DEVO_REV, /* build revision */ 85 0, /* reference count */ 86 fp_getinfo, /* getinfo */ 87 nulldev, /* identify - Obsoleted */ 88 nulldev, /* probe */ 89 fp_attach, /* attach */ 90 fp_detach, /* detach */ 91 nodev, /* reset */ 92 &fp_cb_ops, /* cb_ops */ 93 NULL, /* bus_ops */ 94 fp_power, /* power */ 95 ddi_quiesce_not_needed /* quiesce */ 96 }; 97 98 #define FP_VERSION "20091123-1.101" 99 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 100 101 char *fp_version = FP_NAME_VERSION; 102 103 static struct modldrv modldrv = { 104 &mod_driverops, /* Type of Module */ 105 FP_NAME_VERSION, /* Name/Version of fp */ 106 &fp_ops /* driver ops */ 107 }; 108 109 static struct modlinkage modlinkage = { 110 MODREV_1, /* Rev of the loadable modules system */ 111 &modldrv, /* NULL terminated list of */ 112 NULL /* Linkage structures */ 113 }; 114 115 116 117 static uint16_t ns_reg_cmds[] = { 118 NS_RPN_ID, 119 NS_RNN_ID, 120 NS_RCS_ID, 121 NS_RFT_ID, 122 NS_RPT_ID, 123 NS_RSPN_ID, 124 NS_RSNN_NN 125 }; 126 127 struct fp_xlat { 128 uchar_t xlat_state; 129 int xlat_rval; 130 } fp_xlat [] = { 131 { FC_PKT_SUCCESS, FC_SUCCESS }, 132 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 133 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 134 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 135 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 136 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 138 { FC_PKT_NPORT_BSY, FC_PBUSY }, 139 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 140 { FC_PKT_LS_RJT, FC_FAILURE }, 141 { FC_PKT_BA_RJT, FC_FAILURE }, 142 { FC_PKT_TIMEOUT, FC_FAILURE }, 143 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 144 { FC_PKT_FAILURE, FC_FAILURE }, 145 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 146 }; 147 148 static uchar_t fp_valid_alpas[] = { 149 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 150 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 151 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 152 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 153 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 154 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 155 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 156 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 157 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 158 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 159 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 160 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 161 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 162 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 163 }; 164 165 static struct fp_perms { 166 uint16_t fp_ioctl_cmd; 167 uchar_t fp_open_flag; 168 } fp_perm_list [] = { 169 { FCIO_GET_NUM_DEVS, FP_OPEN }, 170 { FCIO_GET_DEV_LIST, FP_OPEN }, 171 { FCIO_GET_SYM_PNAME, FP_OPEN }, 172 { FCIO_GET_SYM_NNAME, FP_OPEN }, 173 { FCIO_SET_SYM_PNAME, FP_EXCL }, 174 { FCIO_SET_SYM_NNAME, FP_EXCL }, 175 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 176 { FCIO_DEV_LOGIN, FP_EXCL }, 177 { FCIO_DEV_LOGOUT, FP_EXCL }, 178 { FCIO_GET_STATE, FP_OPEN }, 179 { FCIO_DEV_REMOVE, FP_EXCL }, 180 { FCIO_GET_FCODE_REV, FP_OPEN }, 181 { FCIO_GET_FW_REV, FP_OPEN }, 182 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 183 { FCIO_FORCE_DUMP, FP_EXCL }, 184 { FCIO_GET_DUMP, FP_OPEN }, 185 { FCIO_GET_TOPOLOGY, FP_OPEN }, 186 { FCIO_RESET_LINK, FP_EXCL }, 187 { FCIO_RESET_HARD, FP_EXCL }, 188 { FCIO_RESET_HARD_CORE, FP_EXCL }, 189 { FCIO_DIAG, FP_OPEN }, 190 { FCIO_NS, FP_EXCL }, 191 { FCIO_DOWNLOAD_FW, FP_EXCL }, 192 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 193 { FCIO_LINK_STATUS, FP_OPEN }, 194 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 195 { FCIO_GET_NODE_ID, FP_OPEN }, 196 { FCIO_SET_NODE_ID, FP_EXCL }, 197 { FCIO_SEND_NODE_ID, FP_OPEN }, 198 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 199 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 200 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 204 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 205 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 206 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 207 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 208 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 209 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 210 }; 211 212 static char *fp_pm_comps[] = { 213 "NAME=FC Port", 214 "0=Port Down", 215 "1=Port Up" 216 }; 217 218 219 #ifdef _LITTLE_ENDIAN 220 #define MAKE_BE_32(x) { \ 221 uint32_t *ptr1, i; \ 222 ptr1 = (uint32_t *)(x); \ 223 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 224 *ptr1 = BE_32(*ptr1); \ 225 ptr1++; \ 226 } \ 227 } 228 #else 229 #define MAKE_BE_32(x) 230 #endif 231 232 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 233 static uint32_t fp_options = 0; 234 235 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 236 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 237 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 238 unsigned int fp_offline_ticker; /* seconds */ 239 240 /* 241 * Driver global variable to anchor the list of soft state structs for 242 * all fp driver instances. Used with the Solaris DDI soft state functions. 243 */ 244 static void *fp_driver_softstate; 245 246 static clock_t fp_retry_ticks; 247 static clock_t fp_offline_ticks; 248 249 static int fp_retry_ticker; 250 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 251 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 252 253 static int fp_log_size = FP_LOG_SIZE; 254 static int fp_trace = FP_TRACE_DEFAULT; 255 static fc_trace_logq_t *fp_logq = NULL; 256 257 int fp_get_adapter_paths(char *pathList, int count); 258 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 259 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 260 la_wwn_t tgt_pwwn, uint32_t port_id); 261 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 262 static void fp_init_symbolic_names(fc_local_port_t *port); 263 264 265 /* 266 * Perform global initialization 267 */ 268 int 269 _init(void) 270 { 271 int ret; 272 273 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 274 sizeof (struct fc_local_port), 8)) != 0) { 275 return (ret); 276 } 277 278 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 279 ddi_soft_state_fini(&fp_driver_softstate); 280 return (ret); 281 } 282 283 fp_logq = fc_trace_alloc_logq(fp_log_size); 284 285 if ((ret = mod_install(&modlinkage)) != 0) { 286 fc_trace_free_logq(fp_logq); 287 ddi_soft_state_fini(&fp_driver_softstate); 288 scsi_hba_fini(&modlinkage); 289 } 290 291 return (ret); 292 } 293 294 295 /* 296 * Prepare for driver unload 297 */ 298 int 299 _fini(void) 300 { 301 int ret; 302 303 if ((ret = mod_remove(&modlinkage)) == 0) { 304 fc_trace_free_logq(fp_logq); 305 ddi_soft_state_fini(&fp_driver_softstate); 306 scsi_hba_fini(&modlinkage); 307 } 308 309 return (ret); 310 } 311 312 313 /* 314 * Request mod_info() to handle all cases 315 */ 316 int 317 _info(struct modinfo *modinfo) 318 { 319 return (mod_info(&modlinkage, modinfo)); 320 } 321 322 323 /* 324 * fp_attach: 325 * 326 * The respective cmd handlers take care of performing 327 * ULP related invocations 328 */ 329 static int 330 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 331 { 332 int rval; 333 334 /* 335 * We check the value of fp_offline_ticker at this 336 * point. The variable is global for the driver and 337 * not specific to an instance. 338 * 339 * If there is no user-defined value found in /etc/system 340 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 341 * The minimum setting for this offline timeout according 342 * to the FC-FS2 standard (Fibre Channel Framing and 343 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 344 * 345 * We do not recommend setting the value to less than 10 346 * seconds (RA_TOV) or more than 90 seconds. If this 347 * variable is greater than 90 seconds then drivers above 348 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 349 */ 350 351 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 352 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 353 FP_OFFLINE_TICKER); 354 355 if ((fp_offline_ticker < 10) || 356 (fp_offline_ticker > 90)) { 357 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 358 "%d second(s). This is outside the " 359 "recommended range of 10..90 seconds", 360 fp_offline_ticker); 361 } 362 363 /* 364 * Tick every second when there are commands to retry. 365 * It should tick at the least granular value of pkt_timeout 366 * (which is one second) 367 */ 368 fp_retry_ticker = 1; 369 370 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 371 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 372 373 switch (cmd) { 374 case DDI_ATTACH: 375 rval = fp_attach_handler(dip); 376 break; 377 378 case DDI_RESUME: 379 rval = fp_resume_handler(dip); 380 break; 381 382 default: 383 rval = DDI_FAILURE; 384 break; 385 } 386 return (rval); 387 } 388 389 390 /* 391 * fp_detach: 392 * 393 * If a ULP fails to handle cmd request converse of 394 * cmd is invoked for ULPs that previously succeeded 395 * cmd request. 396 */ 397 static int 398 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 399 { 400 int rval = DDI_FAILURE; 401 fc_local_port_t *port; 402 fc_attach_cmd_t converse; 403 uint8_t cnt; 404 405 if ((port = ddi_get_soft_state(fp_driver_softstate, 406 ddi_get_instance(dip))) == NULL) { 407 return (DDI_FAILURE); 408 } 409 410 mutex_enter(&port->fp_mutex); 411 412 if (port->fp_ulp_attach) { 413 mutex_exit(&port->fp_mutex); 414 return (DDI_FAILURE); 415 } 416 417 switch (cmd) { 418 case DDI_DETACH: 419 if (port->fp_task != FP_TASK_IDLE) { 420 mutex_exit(&port->fp_mutex); 421 return (DDI_FAILURE); 422 } 423 424 /* Let's attempt to quit the job handler gracefully */ 425 port->fp_soft_state |= FP_DETACH_INPROGRESS; 426 427 mutex_exit(&port->fp_mutex); 428 converse = FC_CMD_ATTACH; 429 if (fctl_detach_ulps(port, FC_CMD_DETACH, 430 &modlinkage) != FC_SUCCESS) { 431 mutex_enter(&port->fp_mutex); 432 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 433 mutex_exit(&port->fp_mutex); 434 rval = DDI_FAILURE; 435 break; 436 } 437 438 mutex_enter(&port->fp_mutex); 439 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 440 cnt++) { 441 mutex_exit(&port->fp_mutex); 442 delay(drv_usectohz(1000000)); 443 mutex_enter(&port->fp_mutex); 444 } 445 446 if (port->fp_job_head) { 447 mutex_exit(&port->fp_mutex); 448 rval = DDI_FAILURE; 449 break; 450 } 451 mutex_exit(&port->fp_mutex); 452 453 rval = fp_detach_handler(port); 454 break; 455 456 case DDI_SUSPEND: 457 mutex_exit(&port->fp_mutex); 458 converse = FC_CMD_RESUME; 459 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 460 &modlinkage) != FC_SUCCESS) { 461 rval = DDI_FAILURE; 462 break; 463 } 464 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 465 (void) callb_generic_cpr(&port->fp_cpr_info, 466 CB_CODE_CPR_RESUME); 467 } 468 break; 469 470 default: 471 mutex_exit(&port->fp_mutex); 472 break; 473 } 474 475 /* 476 * Use softint to perform reattach. Mark fp_ulp_attach so we 477 * don't attempt to do this repeatedly on behalf of some persistent 478 * caller. 479 */ 480 if (rval != DDI_SUCCESS) { 481 mutex_enter(&port->fp_mutex); 482 port->fp_ulp_attach = 1; 483 484 /* 485 * If the port is in the low power mode then there is 486 * possibility that fca too could be in low power mode. 487 * Try to raise the power before calling attach ulps. 488 */ 489 490 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 491 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 492 mutex_exit(&port->fp_mutex); 493 (void) pm_raise_power(port->fp_port_dip, 494 FP_PM_COMPONENT, FP_PM_PORT_UP); 495 } else { 496 mutex_exit(&port->fp_mutex); 497 } 498 499 500 fp_attach_ulps(port, converse); 501 502 mutex_enter(&port->fp_mutex); 503 while (port->fp_ulp_attach) { 504 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 505 } 506 507 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 508 509 /* 510 * Mark state as detach failed so asynchronous ULP attach 511 * events (downstream, not the ones we're initiating with 512 * the call to fp_attach_ulps) are not honored. We're 513 * really still in pending detach. 514 */ 515 port->fp_soft_state |= FP_DETACH_FAILED; 516 517 mutex_exit(&port->fp_mutex); 518 } 519 520 return (rval); 521 } 522 523 524 /* 525 * fp_getinfo: 526 * Given the device number, return either the 527 * dev_info_t pointer or the instance number. 528 */ 529 530 /* ARGSUSED */ 531 static int 532 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 533 { 534 int rval; 535 minor_t instance; 536 fc_local_port_t *port; 537 538 rval = DDI_SUCCESS; 539 instance = getminor((dev_t)arg); 540 541 switch (cmd) { 542 case DDI_INFO_DEVT2DEVINFO: 543 if ((port = ddi_get_soft_state(fp_driver_softstate, 544 instance)) == NULL) { 545 rval = DDI_FAILURE; 546 break; 547 } 548 *result = (void *)port->fp_port_dip; 549 break; 550 551 case DDI_INFO_DEVT2INSTANCE: 552 *result = (void *)(uintptr_t)instance; 553 break; 554 555 default: 556 rval = DDI_FAILURE; 557 break; 558 } 559 560 return (rval); 561 } 562 563 564 /* 565 * Entry point for power up and power down request from kernel 566 */ 567 static int 568 fp_power(dev_info_t *dip, int comp, int level) 569 { 570 int rval = DDI_FAILURE; 571 fc_local_port_t *port; 572 573 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 574 if (port == NULL || comp != FP_PM_COMPONENT) { 575 return (rval); 576 } 577 578 switch (level) { 579 case FP_PM_PORT_UP: 580 rval = DDI_SUCCESS; 581 582 /* 583 * If the port is DDI_SUSPENDed, let the DDI_RESUME 584 * code complete the rediscovery. 585 */ 586 mutex_enter(&port->fp_mutex); 587 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 588 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 589 port->fp_pm_level = FP_PM_PORT_UP; 590 mutex_exit(&port->fp_mutex); 591 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 592 break; 593 } 594 595 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 596 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 597 598 port->fp_pm_level = FP_PM_PORT_UP; 599 rval = fp_power_up(port); 600 if (rval != DDI_SUCCESS) { 601 port->fp_pm_level = FP_PM_PORT_DOWN; 602 } 603 } else { 604 port->fp_pm_level = FP_PM_PORT_UP; 605 } 606 mutex_exit(&port->fp_mutex); 607 break; 608 609 case FP_PM_PORT_DOWN: 610 mutex_enter(&port->fp_mutex); 611 612 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 613 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 614 /* 615 * PM framework goofed up. We have don't 616 * have any PM components. Let's never go down. 617 */ 618 mutex_exit(&port->fp_mutex); 619 break; 620 621 } 622 623 if (port->fp_ulp_attach) { 624 /* We shouldn't let the power go down */ 625 mutex_exit(&port->fp_mutex); 626 break; 627 } 628 629 /* 630 * Not a whole lot to do if we are detaching 631 */ 632 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 633 port->fp_pm_level = FP_PM_PORT_DOWN; 634 mutex_exit(&port->fp_mutex); 635 rval = DDI_SUCCESS; 636 break; 637 } 638 639 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 640 port->fp_pm_level = FP_PM_PORT_DOWN; 641 642 rval = fp_power_down(port); 643 if (rval != DDI_SUCCESS) { 644 port->fp_pm_level = FP_PM_PORT_UP; 645 ASSERT(!(port->fp_soft_state & 646 FP_SOFT_POWER_DOWN)); 647 } else { 648 ASSERT(port->fp_soft_state & 649 FP_SOFT_POWER_DOWN); 650 } 651 } 652 mutex_exit(&port->fp_mutex); 653 break; 654 655 default: 656 break; 657 } 658 659 return (rval); 660 } 661 662 663 /* 664 * Open FC port devctl node 665 */ 666 static int 667 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 668 { 669 int instance; 670 fc_local_port_t *port; 671 672 if (otype != OTYP_CHR) { 673 return (EINVAL); 674 } 675 676 /* 677 * This is not a toy to play with. Allow only powerful 678 * users (hopefully knowledgeable) to access the port 679 * (A hacker potentially could download a sick binary 680 * file into FCA) 681 */ 682 if (drv_priv(credp)) { 683 return (EPERM); 684 } 685 686 instance = (int)getminor(*devp); 687 688 port = ddi_get_soft_state(fp_driver_softstate, instance); 689 if (port == NULL) { 690 return (ENXIO); 691 } 692 693 mutex_enter(&port->fp_mutex); 694 if (port->fp_flag & FP_EXCL) { 695 /* 696 * It is already open for exclusive access. 697 * So shut the door on this caller. 698 */ 699 mutex_exit(&port->fp_mutex); 700 return (EBUSY); 701 } 702 703 if (flag & FEXCL) { 704 if (port->fp_flag & FP_OPEN) { 705 /* 706 * Exclusive operation not possible 707 * as it is already opened 708 */ 709 mutex_exit(&port->fp_mutex); 710 return (EBUSY); 711 } 712 port->fp_flag |= FP_EXCL; 713 } 714 port->fp_flag |= FP_OPEN; 715 mutex_exit(&port->fp_mutex); 716 717 return (0); 718 } 719 720 721 /* 722 * The driver close entry point is called on the last close() 723 * of a device. So it is perfectly alright to just clobber the 724 * open flag and reset it to idle (instead of having to reset 725 * each flag bits). For any confusion, check out close(9E). 726 */ 727 728 /* ARGSUSED */ 729 static int 730 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 731 { 732 int instance; 733 fc_local_port_t *port; 734 735 if (otype != OTYP_CHR) { 736 return (EINVAL); 737 } 738 739 instance = (int)getminor(dev); 740 741 port = ddi_get_soft_state(fp_driver_softstate, instance); 742 if (port == NULL) { 743 return (ENXIO); 744 } 745 746 mutex_enter(&port->fp_mutex); 747 if ((port->fp_flag & FP_OPEN) == 0) { 748 mutex_exit(&port->fp_mutex); 749 return (ENODEV); 750 } 751 port->fp_flag = FP_IDLE; 752 mutex_exit(&port->fp_mutex); 753 754 return (0); 755 } 756 757 /* 758 * Handle IOCTL requests 759 */ 760 761 /* ARGSUSED */ 762 static int 763 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 764 { 765 int instance; 766 int ret = 0; 767 fcio_t fcio; 768 fc_local_port_t *port; 769 770 instance = (int)getminor(dev); 771 772 port = ddi_get_soft_state(fp_driver_softstate, instance); 773 if (port == NULL) { 774 return (ENXIO); 775 } 776 777 mutex_enter(&port->fp_mutex); 778 if ((port->fp_flag & FP_OPEN) == 0) { 779 mutex_exit(&port->fp_mutex); 780 return (ENXIO); 781 } 782 783 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 784 mutex_exit(&port->fp_mutex); 785 return (ENXIO); 786 } 787 788 mutex_exit(&port->fp_mutex); 789 790 /* this will raise power if necessary */ 791 ret = fctl_busy_port(port); 792 if (ret != 0) { 793 return (ret); 794 } 795 796 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 797 798 799 switch (cmd) { 800 case FCIO_CMD: { 801 #ifdef _MULTI_DATAMODEL 802 switch (ddi_model_convert_from(mode & FMODELS)) { 803 case DDI_MODEL_ILP32: { 804 struct fcio32 fcio32; 805 806 if (ddi_copyin((void *)data, (void *)&fcio32, 807 sizeof (struct fcio32), mode)) { 808 ret = EFAULT; 809 break; 810 } 811 fcio.fcio_xfer = fcio32.fcio_xfer; 812 fcio.fcio_cmd = fcio32.fcio_cmd; 813 fcio.fcio_flags = fcio32.fcio_flags; 814 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 815 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 816 fcio.fcio_ibuf = 817 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 818 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 819 fcio.fcio_obuf = 820 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 821 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 822 fcio.fcio_abuf = 823 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 824 fcio.fcio_errno = fcio32.fcio_errno; 825 break; 826 } 827 828 case DDI_MODEL_NONE: 829 if (ddi_copyin((void *)data, (void *)&fcio, 830 sizeof (fcio_t), mode)) { 831 ret = EFAULT; 832 } 833 break; 834 } 835 #else /* _MULTI_DATAMODEL */ 836 if (ddi_copyin((void *)data, (void *)&fcio, 837 sizeof (fcio_t), mode)) { 838 ret = EFAULT; 839 break; 840 } 841 #endif /* _MULTI_DATAMODEL */ 842 if (!ret) { 843 ret = fp_fciocmd(port, data, mode, &fcio); 844 } 845 break; 846 } 847 848 default: 849 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 850 mode, credp, rval); 851 } 852 853 fctl_idle_port(port); 854 855 return (ret); 856 } 857 858 859 /* 860 * Init Symbolic Port Name and Node Name 861 * LV will try to get symbolic names from FCA driver 862 * and register these to name server, 863 * if LV fails to get these, 864 * LV will register its default symbolic names to name server. 865 * The Default symbolic node name format is : 866 * <hostname>:<hba driver name>(instance) 867 * The Default symbolic port name format is : 868 * <fp path name> 869 */ 870 static void 871 fp_init_symbolic_names(fc_local_port_t *port) 872 { 873 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 874 char *sym_name; 875 char fcaname[50] = {0}; 876 int hostnlen, fcanlen; 877 878 if (port->fp_sym_node_namelen == 0) { 879 hostnlen = strlen(utsname.nodename); 880 (void) snprintf(fcaname, sizeof (fcaname), 881 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 882 fcanlen = strlen(fcaname); 883 884 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 885 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 886 port->fp_sym_node_namelen = strlen(sym_name); 887 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 888 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 889 } 890 (void) strncpy(port->fp_sym_node_name, sym_name, 891 port->fp_sym_node_namelen); 892 kmem_free(sym_name, hostnlen + fcanlen + 2); 893 } 894 895 if (port->fp_sym_port_namelen == 0) { 896 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 897 898 (void) ddi_pathname(port->fp_port_dip, pathname); 899 port->fp_sym_port_namelen = strlen(pathname); 900 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 901 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 902 } 903 (void) strncpy(port->fp_sym_port_name, pathname, 904 port->fp_sym_port_namelen); 905 kmem_free(pathname, MAXPATHLEN); 906 } 907 } 908 909 910 /* 911 * Perform port attach 912 */ 913 static int 914 fp_attach_handler(dev_info_t *dip) 915 { 916 int rval; 917 int instance; 918 int port_num; 919 int port_len; 920 char name[30]; 921 char i_pwwn[17]; 922 fp_cmd_t *pkt; 923 uint32_t ub_count; 924 fc_local_port_t *port; 925 job_request_t *job; 926 fc_local_port_t *phyport = NULL; 927 int portpro1; 928 char pwwn[17], nwwn[17]; 929 930 instance = ddi_get_instance(dip); 931 port_len = sizeof (port_num); 932 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 933 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 934 (caddr_t)&port_num, &port_len); 935 if (rval != DDI_SUCCESS) { 936 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 937 instance); 938 return (DDI_FAILURE); 939 } 940 941 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 942 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 943 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 944 instance); 945 return (DDI_FAILURE); 946 } 947 948 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 949 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 950 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 951 " point minor node", instance); 952 ddi_remove_minor_node(dip, NULL); 953 return (DDI_FAILURE); 954 } 955 956 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 957 != DDI_SUCCESS) { 958 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 959 instance); 960 ddi_remove_minor_node(dip, NULL); 961 return (DDI_FAILURE); 962 } 963 port = ddi_get_soft_state(fp_driver_softstate, instance); 964 965 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 966 967 port->fp_instance = instance; 968 port->fp_ulp_attach = 1; 969 port->fp_port_num = port_num; 970 port->fp_verbose = fp_verbosity; 971 port->fp_options = fp_options; 972 973 port->fp_fca_dip = ddi_get_parent(dip); 974 port->fp_port_dip = dip; 975 port->fp_fca_tran = (fc_fca_tran_t *) 976 ddi_get_driver_private(port->fp_fca_dip); 977 978 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 979 980 /* 981 * Init the starting value of fp_rscn_count. Note that if 982 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 983 * actual # of RSCNs will be (fp_rscn_count - 1) 984 */ 985 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 986 987 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 988 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 989 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 990 991 (void) sprintf(name, "fp%d_cache", instance); 992 993 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 994 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 995 "phyport-instance", -1)) != -1) { 996 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 997 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 998 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 999 port->fp_npiv_type = FC_NPIV_PORT; 1000 } 1001 1002 /* 1003 * Allocate the pool of fc_packet_t structs to be used with 1004 * this fp instance. 1005 */ 1006 port->fp_pkt_cache = kmem_cache_create(name, 1007 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1008 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1009 NULL, 0); 1010 port->fp_out_fpcmds = 0; 1011 if (port->fp_pkt_cache == NULL) { 1012 goto cache_alloc_failed; 1013 } 1014 1015 1016 /* 1017 * Allocate the d_id and pwwn hash tables for all remote ports 1018 * connected to this local port. 1019 */ 1020 port->fp_did_table = kmem_zalloc(did_table_size * 1021 sizeof (struct d_id_hash), KM_SLEEP); 1022 1023 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1024 sizeof (struct pwwn_hash), KM_SLEEP); 1025 1026 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1027 MINCLSYSPRI, 1, 16, 0); 1028 1029 /* Indicate that don't have the pm components yet */ 1030 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1031 1032 /* 1033 * Bind the callbacks with the FCA driver. This will open the gate 1034 * for asynchronous callbacks, so after this call the fp_mutex 1035 * must be held when updating the fc_local_port_t struct. 1036 * 1037 * This is done _before_ setting up the job thread so we can avoid 1038 * cleaning up after the thread_create() in the error path. This 1039 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1040 */ 1041 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1042 goto bind_callbacks_failed; 1043 } 1044 1045 if (phyport) { 1046 mutex_enter(&phyport->fp_mutex); 1047 if (phyport->fp_port_next) { 1048 phyport->fp_port_next->fp_port_prev = port; 1049 port->fp_port_next = phyport->fp_port_next; 1050 phyport->fp_port_next = port; 1051 port->fp_port_prev = phyport; 1052 } else { 1053 phyport->fp_port_next = port; 1054 phyport->fp_port_prev = port; 1055 port->fp_port_next = phyport; 1056 port->fp_port_prev = phyport; 1057 } 1058 mutex_exit(&phyport->fp_mutex); 1059 } 1060 1061 /* 1062 * Init Symbolic Names 1063 */ 1064 fp_init_symbolic_names(port); 1065 1066 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1067 KM_SLEEP, NULL); 1068 1069 if (pkt == NULL) { 1070 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1071 instance); 1072 goto alloc_els_packet_failed; 1073 } 1074 1075 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1076 v.v_maxsyspri - 2); 1077 1078 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1079 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1080 i_pwwn) != DDI_PROP_SUCCESS) { 1081 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1082 "fp(%d): Updating 'initiator-port' property" 1083 " on fp dev_info node failed", instance); 1084 } 1085 1086 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1087 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1088 i_pwwn) != DDI_PROP_SUCCESS) { 1089 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1090 "fp(%d): Updating 'initiator-node' property" 1091 " on fp dev_info node failed", instance); 1092 } 1093 1094 mutex_enter(&port->fp_mutex); 1095 port->fp_els_resp_pkt = pkt; 1096 mutex_exit(&port->fp_mutex); 1097 1098 /* 1099 * Determine the count of unsolicited buffers this FCA can support 1100 */ 1101 fp_retrieve_caps(port); 1102 1103 /* 1104 * Allocate unsolicited buffer tokens 1105 */ 1106 if (port->fp_ub_count) { 1107 ub_count = port->fp_ub_count; 1108 port->fp_ub_tokens = kmem_zalloc(ub_count * 1109 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1110 /* 1111 * Do not fail the attach if unsolicited buffer allocation 1112 * fails; Just try to get along with whatever the FCA can do. 1113 */ 1114 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1115 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1116 FC_SUCCESS || ub_count != port->fp_ub_count) { 1117 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1118 " Unsolicited buffers. proceeding with attach...", 1119 instance); 1120 kmem_free(port->fp_ub_tokens, 1121 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1122 port->fp_ub_tokens = NULL; 1123 } 1124 } 1125 1126 fp_load_ulp_modules(dip, port); 1127 1128 /* 1129 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1130 */ 1131 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1132 "pm-hardware-state", "needs-suspend-resume", 1133 strlen("needs-suspend-resume") + 1); 1134 1135 /* 1136 * fctl maintains a list of all port handles, so 1137 * help fctl add this one to its list now. 1138 */ 1139 mutex_enter(&port->fp_mutex); 1140 fctl_add_port(port); 1141 1142 /* 1143 * If a state change is already in progress, set the bind state t 1144 * OFFLINE as well, so further state change callbacks into ULPs 1145 * will pass the appropriate states 1146 */ 1147 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1148 port->fp_statec_busy) { 1149 port->fp_bind_state = FC_STATE_OFFLINE; 1150 mutex_exit(&port->fp_mutex); 1151 1152 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1153 } else { 1154 /* 1155 * Without dropping the mutex, ensure that the port 1156 * startup happens ahead of state change callback 1157 * processing 1158 */ 1159 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1160 1161 port->fp_last_task = port->fp_task; 1162 port->fp_task = FP_TASK_PORT_STARTUP; 1163 1164 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1165 fp_startup_done, (opaque_t)port, KM_SLEEP); 1166 1167 port->fp_job_head = port->fp_job_tail = job; 1168 1169 cv_signal(&port->fp_cv); 1170 1171 mutex_exit(&port->fp_mutex); 1172 } 1173 1174 mutex_enter(&port->fp_mutex); 1175 while (port->fp_ulp_attach) { 1176 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1177 } 1178 mutex_exit(&port->fp_mutex); 1179 1180 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1181 "pm-components", fp_pm_comps, 1182 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1183 DDI_PROP_SUCCESS) { 1184 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1185 " components property, PM disabled on this port."); 1186 mutex_enter(&port->fp_mutex); 1187 port->fp_pm_level = FP_PM_PORT_UP; 1188 mutex_exit(&port->fp_mutex); 1189 } else { 1190 if (pm_raise_power(dip, FP_PM_COMPONENT, 1191 FP_PM_PORT_UP) != DDI_SUCCESS) { 1192 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1193 " power level"); 1194 mutex_enter(&port->fp_mutex); 1195 port->fp_pm_level = FP_PM_PORT_UP; 1196 mutex_exit(&port->fp_mutex); 1197 } 1198 1199 /* 1200 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1201 * the call to pm_raise_power. The PM framework can't 1202 * handle multiple threads calling into it during attach. 1203 */ 1204 1205 mutex_enter(&port->fp_mutex); 1206 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1207 mutex_exit(&port->fp_mutex); 1208 } 1209 1210 ddi_report_dev(dip); 1211 1212 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1213 1214 return (DDI_SUCCESS); 1215 1216 /* 1217 * Unwind any/all preceeding allocations in the event of an error. 1218 */ 1219 1220 alloc_els_packet_failed: 1221 1222 if (port->fp_fca_handle != NULL) { 1223 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1224 port->fp_fca_handle = NULL; 1225 } 1226 1227 if (port->fp_ub_tokens != NULL) { 1228 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1229 port->fp_ub_tokens); 1230 kmem_free(port->fp_ub_tokens, 1231 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1232 port->fp_ub_tokens = NULL; 1233 } 1234 1235 if (port->fp_els_resp_pkt != NULL) { 1236 fp_free_pkt(port->fp_els_resp_pkt); 1237 port->fp_els_resp_pkt = NULL; 1238 } 1239 1240 bind_callbacks_failed: 1241 1242 if (port->fp_taskq != NULL) { 1243 taskq_destroy(port->fp_taskq); 1244 } 1245 1246 if (port->fp_pwwn_table != NULL) { 1247 kmem_free(port->fp_pwwn_table, 1248 pwwn_table_size * sizeof (struct pwwn_hash)); 1249 port->fp_pwwn_table = NULL; 1250 } 1251 1252 if (port->fp_did_table != NULL) { 1253 kmem_free(port->fp_did_table, 1254 did_table_size * sizeof (struct d_id_hash)); 1255 port->fp_did_table = NULL; 1256 } 1257 1258 if (port->fp_pkt_cache != NULL) { 1259 kmem_cache_destroy(port->fp_pkt_cache); 1260 port->fp_pkt_cache = NULL; 1261 } 1262 1263 cache_alloc_failed: 1264 1265 cv_destroy(&port->fp_attach_cv); 1266 cv_destroy(&port->fp_cv); 1267 mutex_destroy(&port->fp_mutex); 1268 ddi_remove_minor_node(port->fp_port_dip, NULL); 1269 ddi_soft_state_free(fp_driver_softstate, instance); 1270 ddi_prop_remove_all(dip); 1271 1272 return (DDI_FAILURE); 1273 } 1274 1275 1276 /* 1277 * Handle DDI_RESUME request 1278 */ 1279 static int 1280 fp_resume_handler(dev_info_t *dip) 1281 { 1282 int rval; 1283 fc_local_port_t *port; 1284 1285 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1286 1287 ASSERT(port != NULL); 1288 1289 #ifdef DEBUG 1290 mutex_enter(&port->fp_mutex); 1291 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1292 mutex_exit(&port->fp_mutex); 1293 #endif 1294 1295 /* 1296 * If the port was power suspended, raise the power level 1297 */ 1298 mutex_enter(&port->fp_mutex); 1299 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1300 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1301 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1302 1303 mutex_exit(&port->fp_mutex); 1304 if (pm_raise_power(dip, FP_PM_COMPONENT, 1305 FP_PM_PORT_UP) != DDI_SUCCESS) { 1306 FP_TRACE(FP_NHEAD2(9, 0), 1307 "Failed to raise the power level"); 1308 return (DDI_FAILURE); 1309 } 1310 mutex_enter(&port->fp_mutex); 1311 } 1312 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1313 mutex_exit(&port->fp_mutex); 1314 1315 /* 1316 * All the discovery is initiated and handled by per-port thread. 1317 * Further all the discovery is done in handled in callback mode 1318 * (not polled mode); In a specific case such as this, the discovery 1319 * is required to happen in polled mode. The easiest way out is 1320 * to bail out port thread and get started. Come back and fix this 1321 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1322 * will do on-demand discovery during pre-power-up busctl handling 1323 * which will only be possible when SCSA provides a new HBA vector 1324 * for sending down the PM busctl requests. 1325 */ 1326 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1327 1328 rval = fp_resume_all(port, FC_CMD_RESUME); 1329 if (rval != DDI_SUCCESS) { 1330 mutex_enter(&port->fp_mutex); 1331 port->fp_soft_state |= FP_SOFT_SUSPEND; 1332 mutex_exit(&port->fp_mutex); 1333 (void) callb_generic_cpr(&port->fp_cpr_info, 1334 CB_CODE_CPR_CHKPT); 1335 } 1336 1337 return (rval); 1338 } 1339 1340 /* 1341 * Perform FC Port power on initialization 1342 */ 1343 static int 1344 fp_power_up(fc_local_port_t *port) 1345 { 1346 int rval; 1347 1348 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1349 1350 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1351 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1352 1353 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1354 1355 mutex_exit(&port->fp_mutex); 1356 1357 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1358 if (rval != DDI_SUCCESS) { 1359 mutex_enter(&port->fp_mutex); 1360 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1361 } else { 1362 mutex_enter(&port->fp_mutex); 1363 } 1364 1365 return (rval); 1366 } 1367 1368 1369 /* 1370 * It is important to note that the power may possibly be removed between 1371 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1372 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1373 * (hardware state). In this case, the port driver may need to rediscover the 1374 * topology, perform LOGINs, register with the name server again and perform 1375 * any such port initialization procedures. To perform LOGINs, the driver could 1376 * use the port device handle to see if a LOGIN needs to be performed and use 1377 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1378 * or removed) which will be reflected in the map the ULPs will see. 1379 */ 1380 static int 1381 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1382 { 1383 1384 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1385 1386 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1387 return (DDI_FAILURE); 1388 } 1389 1390 mutex_enter(&port->fp_mutex); 1391 1392 /* 1393 * If there are commands queued for delayed retry, instead of 1394 * working the hard way to figure out which ones are good for 1395 * restart and which ones not (ELSs are definitely not good 1396 * as the port will have to go through a new spin of rediscovery 1397 * now), so just flush them out. 1398 */ 1399 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1400 fp_cmd_t *cmd; 1401 1402 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1403 1404 mutex_exit(&port->fp_mutex); 1405 while ((cmd = fp_deque_cmd(port)) != NULL) { 1406 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1407 fp_iodone(cmd); 1408 } 1409 mutex_enter(&port->fp_mutex); 1410 } 1411 1412 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1413 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1414 port->fp_dev_count) { 1415 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1416 port->fp_offline_tid = timeout(fp_offline_timeout, 1417 (caddr_t)port, fp_offline_ticks); 1418 } 1419 if (port->fp_job_head) { 1420 cv_signal(&port->fp_cv); 1421 } 1422 mutex_exit(&port->fp_mutex); 1423 fctl_attach_ulps(port, cmd, &modlinkage); 1424 } else { 1425 struct job_request *job; 1426 1427 /* 1428 * If an OFFLINE timer was running at the time of 1429 * suspending, there is no need to restart it as 1430 * the port is ONLINE now. 1431 */ 1432 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1433 if (port->fp_statec_busy == 0) { 1434 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1435 } 1436 port->fp_statec_busy++; 1437 mutex_exit(&port->fp_mutex); 1438 1439 job = fctl_alloc_job(JOB_PORT_ONLINE, 1440 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1441 fctl_enque_job(port, job); 1442 1443 fctl_jobwait(job); 1444 fctl_remove_oldies(port); 1445 1446 fctl_attach_ulps(port, cmd, &modlinkage); 1447 fctl_dealloc_job(job); 1448 } 1449 1450 return (DDI_SUCCESS); 1451 } 1452 1453 1454 /* 1455 * At this time, there shouldn't be any I/O requests on this port. 1456 * But the unsolicited callbacks from the underlying FCA port need 1457 * to be handled very carefully. The steps followed to handle the 1458 * DDI_DETACH are: 1459 * + Grab the port driver mutex, check if the unsolicited 1460 * callback is currently under processing. If true, fail 1461 * the DDI_DETACH request by printing a message; If false 1462 * mark the DDI_DETACH as under progress, so that any 1463 * further unsolicited callbacks get bounced. 1464 * + Perform PRLO/LOGO if necessary, cleanup all the data 1465 * structures. 1466 * + Get the job_handler thread to gracefully exit. 1467 * + Unregister callbacks with the FCA port. 1468 * + Now that some peace is found, notify all the ULPs of 1469 * DDI_DETACH request (using ulp_port_detach entry point) 1470 * + Free all mutexes, semaphores, conditional variables. 1471 * + Free the soft state, return success. 1472 * 1473 * Important considerations: 1474 * Port driver de-registers state change and unsolicited 1475 * callbacks before taking up the task of notifying ULPs 1476 * and performing PRLO and LOGOs. 1477 * 1478 * A port may go offline at the time PRLO/LOGO is being 1479 * requested. It is expected of all FCA drivers to fail 1480 * such requests either immediately with a FC_OFFLINE 1481 * return code to fc_fca_transport() or return the packet 1482 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1483 */ 1484 static int 1485 fp_detach_handler(fc_local_port_t *port) 1486 { 1487 job_request_t *job; 1488 uint32_t delay_count; 1489 fc_orphan_t *orp, *tmporp; 1490 1491 /* 1492 * In a Fabric topology with many host ports connected to 1493 * a switch, another detaching instance of fp might have 1494 * triggered a LOGO (which is an unsolicited request to 1495 * this instance). So in order to be able to successfully 1496 * detach by taking care of such cases a delay of about 1497 * 30 seconds is introduced. 1498 */ 1499 delay_count = 0; 1500 mutex_enter(&port->fp_mutex); 1501 if (port->fp_out_fpcmds != 0) { 1502 /* 1503 * At this time we can only check fp internal commands, because 1504 * sd/ssd/scsi_vhci should have finsihed all their commands, 1505 * fcp/fcip/fcsm should have finished all their commands. 1506 * 1507 * It seems that all fp internal commands are asynchronous now. 1508 */ 1509 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1510 mutex_exit(&port->fp_mutex); 1511 1512 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress" 1513 " Failing detach", port->fp_instance, port->fp_out_fpcmds); 1514 return (DDI_FAILURE); 1515 } 1516 1517 while ((port->fp_soft_state & 1518 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1519 (delay_count < 30)) { 1520 mutex_exit(&port->fp_mutex); 1521 delay_count++; 1522 delay(drv_usectohz(1000000)); 1523 mutex_enter(&port->fp_mutex); 1524 } 1525 1526 if (port->fp_soft_state & 1527 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1528 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1529 mutex_exit(&port->fp_mutex); 1530 1531 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1532 " Failing detach", port->fp_instance); 1533 return (DDI_FAILURE); 1534 } 1535 1536 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1537 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1538 mutex_exit(&port->fp_mutex); 1539 1540 /* 1541 * If we're powered down, we need to raise power prior to submitting 1542 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1543 * process the shutdown job. 1544 */ 1545 if (fctl_busy_port(port) != 0) { 1546 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1547 port->fp_instance); 1548 mutex_enter(&port->fp_mutex); 1549 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1550 mutex_exit(&port->fp_mutex); 1551 return (DDI_FAILURE); 1552 } 1553 1554 /* 1555 * This will deallocate data structs and cause the "job" thread 1556 * to exit, in preparation for DDI_DETACH on the instance. 1557 * This can sleep for an arbitrary duration, since it waits for 1558 * commands over the wire, timeout(9F) callbacks, etc. 1559 * 1560 * CAUTION: There is still a race here, where the "job" thread 1561 * can still be executing code even tho the fctl_jobwait() call 1562 * below has returned to us. In theory the fp driver could even be 1563 * modunloaded even tho the job thread isn't done executing. 1564 * without creating the race condition. 1565 */ 1566 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1567 (opaque_t)port, KM_SLEEP); 1568 fctl_enque_job(port, job); 1569 fctl_jobwait(job); 1570 fctl_dealloc_job(job); 1571 1572 1573 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1574 FP_PM_PORT_DOWN); 1575 1576 if (port->fp_taskq) { 1577 taskq_destroy(port->fp_taskq); 1578 } 1579 1580 ddi_prop_remove_all(port->fp_port_dip); 1581 1582 ddi_remove_minor_node(port->fp_port_dip, NULL); 1583 1584 fctl_remove_port(port); 1585 1586 fp_free_pkt(port->fp_els_resp_pkt); 1587 1588 if (port->fp_ub_tokens) { 1589 if (fc_ulp_ubfree(port, port->fp_ub_count, 1590 port->fp_ub_tokens) != FC_SUCCESS) { 1591 cmn_err(CE_WARN, "fp(%d): couldn't free " 1592 " unsolicited buffers", port->fp_instance); 1593 } 1594 kmem_free(port->fp_ub_tokens, 1595 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1596 port->fp_ub_tokens = NULL; 1597 } 1598 1599 if (port->fp_pkt_cache != NULL) { 1600 kmem_cache_destroy(port->fp_pkt_cache); 1601 } 1602 1603 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1604 1605 mutex_enter(&port->fp_mutex); 1606 if (port->fp_did_table) { 1607 kmem_free(port->fp_did_table, did_table_size * 1608 sizeof (struct d_id_hash)); 1609 } 1610 1611 if (port->fp_pwwn_table) { 1612 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1613 sizeof (struct pwwn_hash)); 1614 } 1615 orp = port->fp_orphan_list; 1616 while (orp) { 1617 tmporp = orp; 1618 orp = orp->orp_next; 1619 kmem_free(tmporp, sizeof (*orp)); 1620 } 1621 1622 mutex_exit(&port->fp_mutex); 1623 1624 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1625 1626 mutex_destroy(&port->fp_mutex); 1627 cv_destroy(&port->fp_attach_cv); 1628 cv_destroy(&port->fp_cv); 1629 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1630 1631 return (DDI_SUCCESS); 1632 } 1633 1634 1635 /* 1636 * Steps to perform DDI_SUSPEND operation on a FC port 1637 * 1638 * - If already suspended return DDI_FAILURE 1639 * - If already power-suspended return DDI_SUCCESS 1640 * - If an unsolicited callback or state change handling is in 1641 * in progress, throw a warning message, return DDI_FAILURE 1642 * - Cancel timeouts 1643 * - SUSPEND the job_handler thread (means do nothing as it is 1644 * taken care of by the CPR frame work) 1645 */ 1646 static int 1647 fp_suspend_handler(fc_local_port_t *port) 1648 { 1649 uint32_t delay_count; 1650 1651 mutex_enter(&port->fp_mutex); 1652 1653 /* 1654 * The following should never happen, but 1655 * let the driver be more defensive here 1656 */ 1657 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1658 mutex_exit(&port->fp_mutex); 1659 return (DDI_FAILURE); 1660 } 1661 1662 /* 1663 * If the port is already power suspended, there 1664 * is nothing else to do, So return DDI_SUCCESS, 1665 * but mark the SUSPEND bit in the soft state 1666 * before leaving. 1667 */ 1668 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1669 port->fp_soft_state |= FP_SOFT_SUSPEND; 1670 mutex_exit(&port->fp_mutex); 1671 return (DDI_SUCCESS); 1672 } 1673 1674 /* 1675 * Check if an unsolicited callback or state change handling is 1676 * in progress. If true, fail the suspend operation; also throw 1677 * a warning message notifying the failure. Note that Sun PCI 1678 * hotplug spec recommends messages in cases of failure (but 1679 * not flooding the console) 1680 * 1681 * Busy waiting for a short interval (500 millisecond ?) to see 1682 * if the callback processing completes may be another idea. Since 1683 * most of the callback processing involves a lot of work, it 1684 * is safe to just fail the SUSPEND operation. It is definitely 1685 * not bad to fail the SUSPEND operation if the driver is busy. 1686 */ 1687 delay_count = 0; 1688 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1689 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1690 mutex_exit(&port->fp_mutex); 1691 delay_count++; 1692 delay(drv_usectohz(1000000)); 1693 mutex_enter(&port->fp_mutex); 1694 } 1695 1696 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1697 FP_SOFT_IN_UNSOL_CB)) { 1698 mutex_exit(&port->fp_mutex); 1699 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1700 " Failing suspend", port->fp_instance); 1701 return (DDI_FAILURE); 1702 } 1703 1704 /* 1705 * Check of FC port thread is busy 1706 */ 1707 if (port->fp_job_head) { 1708 mutex_exit(&port->fp_mutex); 1709 FP_TRACE(FP_NHEAD2(9, 0), 1710 "FC port thread is busy: Failing suspend"); 1711 return (DDI_FAILURE); 1712 } 1713 port->fp_soft_state |= FP_SOFT_SUSPEND; 1714 1715 fp_suspend_all(port); 1716 mutex_exit(&port->fp_mutex); 1717 1718 return (DDI_SUCCESS); 1719 } 1720 1721 1722 /* 1723 * Prepare for graceful power down of a FC port 1724 */ 1725 static int 1726 fp_power_down(fc_local_port_t *port) 1727 { 1728 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1729 1730 /* 1731 * Power down request followed by a DDI_SUSPEND should 1732 * never happen; If it does return DDI_SUCCESS 1733 */ 1734 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1735 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1736 return (DDI_SUCCESS); 1737 } 1738 1739 /* 1740 * If the port is already power suspended, there 1741 * is nothing else to do, So return DDI_SUCCESS, 1742 */ 1743 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1744 return (DDI_SUCCESS); 1745 } 1746 1747 /* 1748 * Check if an unsolicited callback or state change handling 1749 * is in progress. If true, fail the PM suspend operation. 1750 * But don't print a message unless the verbosity of the 1751 * driver desires otherwise. 1752 */ 1753 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1754 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1755 FP_TRACE(FP_NHEAD2(9, 0), 1756 "Unsolicited callback in progress: Failing power down"); 1757 return (DDI_FAILURE); 1758 } 1759 1760 /* 1761 * Check of FC port thread is busy 1762 */ 1763 if (port->fp_job_head) { 1764 FP_TRACE(FP_NHEAD2(9, 0), 1765 "FC port thread is busy: Failing power down"); 1766 return (DDI_FAILURE); 1767 } 1768 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1769 1770 /* 1771 * check if the ULPs are ready for power down 1772 */ 1773 mutex_exit(&port->fp_mutex); 1774 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1775 &modlinkage) != FC_SUCCESS) { 1776 mutex_enter(&port->fp_mutex); 1777 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1778 mutex_exit(&port->fp_mutex); 1779 1780 /* 1781 * Power back up the obedient ULPs that went down 1782 */ 1783 fp_attach_ulps(port, FC_CMD_POWER_UP); 1784 1785 FP_TRACE(FP_NHEAD2(9, 0), 1786 "ULP(s) busy, detach_ulps failed. Failing power down"); 1787 mutex_enter(&port->fp_mutex); 1788 return (DDI_FAILURE); 1789 } 1790 mutex_enter(&port->fp_mutex); 1791 1792 fp_suspend_all(port); 1793 1794 return (DDI_SUCCESS); 1795 } 1796 1797 1798 /* 1799 * Suspend the entire FC port 1800 */ 1801 static void 1802 fp_suspend_all(fc_local_port_t *port) 1803 { 1804 int index; 1805 struct pwwn_hash *head; 1806 fc_remote_port_t *pd; 1807 1808 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1809 1810 if (port->fp_wait_tid != 0) { 1811 timeout_id_t tid; 1812 1813 tid = port->fp_wait_tid; 1814 port->fp_wait_tid = (timeout_id_t)NULL; 1815 mutex_exit(&port->fp_mutex); 1816 (void) untimeout(tid); 1817 mutex_enter(&port->fp_mutex); 1818 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1819 } 1820 1821 if (port->fp_offline_tid) { 1822 timeout_id_t tid; 1823 1824 tid = port->fp_offline_tid; 1825 port->fp_offline_tid = (timeout_id_t)NULL; 1826 mutex_exit(&port->fp_mutex); 1827 (void) untimeout(tid); 1828 mutex_enter(&port->fp_mutex); 1829 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1830 } 1831 mutex_exit(&port->fp_mutex); 1832 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1833 mutex_enter(&port->fp_mutex); 1834 1835 /* 1836 * Mark all devices as OLD, and reset the LOGIN state as well 1837 * (this will force the ULPs to perform a LOGIN after calling 1838 * fc_portgetmap() during RESUME/PM_RESUME) 1839 */ 1840 for (index = 0; index < pwwn_table_size; index++) { 1841 head = &port->fp_pwwn_table[index]; 1842 pd = head->pwwn_head; 1843 while (pd != NULL) { 1844 mutex_enter(&pd->pd_mutex); 1845 fp_remote_port_offline(pd); 1846 fctl_delist_did_table(port, pd); 1847 pd->pd_state = PORT_DEVICE_VALID; 1848 pd->pd_login_count = 0; 1849 mutex_exit(&pd->pd_mutex); 1850 pd = pd->pd_wwn_hnext; 1851 } 1852 } 1853 } 1854 1855 1856 /* 1857 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1858 * Performs intializations for fc_packet_t structs. 1859 * Returns 0 for success or -1 for failure. 1860 * 1861 * This function allocates DMA handles for both command and responses. 1862 * Most of the ELSs used have both command and responses so it is strongly 1863 * desired to move them to cache constructor routine. 1864 * 1865 * Context: Can sleep iff called with KM_SLEEP flag. 1866 */ 1867 static int 1868 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1869 { 1870 int (*cb) (caddr_t); 1871 fc_packet_t *pkt; 1872 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1873 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1874 1875 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1876 1877 cmd->cmd_next = NULL; 1878 cmd->cmd_flags = 0; 1879 cmd->cmd_dflags = 0; 1880 cmd->cmd_job = NULL; 1881 cmd->cmd_port = port; 1882 pkt = &cmd->cmd_pkt; 1883 1884 if (!(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 1885 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1886 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1887 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1888 return (-1); 1889 } 1890 1891 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1892 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1893 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1894 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1895 return (-1); 1896 } 1897 } else { 1898 pkt->pkt_cmd_dma = 0; 1899 pkt->pkt_resp_dma = 0; 1900 } 1901 1902 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1903 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1904 pkt->pkt_data_cookie_cnt = 0; 1905 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1906 pkt->pkt_data_cookie = NULL; 1907 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1908 1909 return (0); 1910 } 1911 1912 1913 /* 1914 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1915 * Performs un-intializations for fc_packet_t structs. 1916 */ 1917 /* ARGSUSED */ 1918 static void 1919 fp_cache_destructor(void *buf, void *cdarg) 1920 { 1921 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1922 fc_packet_t *pkt; 1923 1924 pkt = &cmd->cmd_pkt; 1925 if (pkt->pkt_cmd_dma) { 1926 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1927 } 1928 1929 if (pkt->pkt_resp_dma) { 1930 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1931 } 1932 } 1933 1934 1935 /* 1936 * Packet allocation for ELS and any other port driver commands 1937 * 1938 * Some ELSs like FLOGI and PLOGI are critical for topology and 1939 * device discovery and a system's inability to allocate memory 1940 * or DVMA resources while performing some of these critical ELSs 1941 * cause a lot of problem. While memory allocation failures are 1942 * rare, DVMA resource failures are common as the applications 1943 * are becoming more and more powerful on huge servers. So it 1944 * is desirable to have a framework support to reserve a fragment 1945 * of DVMA. So until this is fixed the correct way, the suffering 1946 * is huge whenever a LIP happens at a time DVMA resources are 1947 * drained out completely - So an attempt needs to be made to 1948 * KM_SLEEP while requesting for these resources, hoping that 1949 * the requests won't hang forever. 1950 * 1951 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1952 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1953 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1954 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1955 * fp_alloc_pkt() must be called with pd set to NULL. 1956 * 1957 * fp/fctl will resue fp_cmd_t somewhere, and change pkt_cmdlen/rsplen, 1958 * actually, it's a design fault. But there's no problem for physical 1959 * FCAs. But it will cause memory leak or panic for virtual FCAs like fcoei. 1960 * 1961 * For FCAs that don't support DMA, such as fcoei, we will use 1962 * pkt_fctl_rsvd1/rsvd2 to keep the real cmd_len/resp_len. 1963 */ 1964 1965 static fp_cmd_t * 1966 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1967 fc_remote_port_t *pd) 1968 { 1969 int rval; 1970 ulong_t real_len; 1971 fp_cmd_t *cmd; 1972 fc_packet_t *pkt; 1973 int (*cb) (caddr_t); 1974 ddi_dma_cookie_t pkt_cookie; 1975 ddi_dma_cookie_t *cp; 1976 uint32_t cnt; 1977 1978 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1979 1980 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1981 1982 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1983 if (cmd == NULL) { 1984 return (cmd); 1985 } 1986 1987 cmd->cmd_ulp_pkt = NULL; 1988 cmd->cmd_flags = 0; 1989 pkt = &cmd->cmd_pkt; 1990 ASSERT(cmd->cmd_dflags == 0); 1991 1992 pkt->pkt_datalen = 0; 1993 pkt->pkt_data = NULL; 1994 pkt->pkt_state = 0; 1995 pkt->pkt_action = 0; 1996 pkt->pkt_reason = 0; 1997 pkt->pkt_expln = 0; 1998 pkt->pkt_cmd = NULL; 1999 pkt->pkt_resp = NULL; 2000 pkt->pkt_fctl_rsvd1 = NULL; 2001 pkt->pkt_fctl_rsvd2 = NULL; 2002 2003 /* 2004 * Init pkt_pd with the given pointer; this must be done _before_ 2005 * the call to fc_ulp_init_packet(). 2006 */ 2007 pkt->pkt_pd = pd; 2008 2009 /* Now call the FCA driver to init its private, per-packet fields */ 2010 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 2011 goto alloc_pkt_failed; 2012 } 2013 2014 if (cmd_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 2015 ASSERT(pkt->pkt_cmd_dma != NULL); 2016 2017 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 2018 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 2019 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 2020 &pkt->pkt_cmd_acc); 2021 2022 if (rval != DDI_SUCCESS) { 2023 goto alloc_pkt_failed; 2024 } 2025 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 2026 2027 if (real_len < cmd_len) { 2028 goto alloc_pkt_failed; 2029 } 2030 2031 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2032 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2033 DDI_DMA_CONSISTENT, cb, NULL, 2034 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2035 2036 if (rval != DDI_DMA_MAPPED) { 2037 goto alloc_pkt_failed; 2038 } 2039 2040 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2041 2042 if (pkt->pkt_cmd_cookie_cnt > 2043 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2044 goto alloc_pkt_failed; 2045 } 2046 2047 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2048 2049 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2050 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2051 KM_NOSLEEP); 2052 2053 if (cp == NULL) { 2054 goto alloc_pkt_failed; 2055 } 2056 2057 *cp = pkt_cookie; 2058 cp++; 2059 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2060 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2061 *cp = pkt_cookie; 2062 } 2063 } else if (cmd_len != 0) { 2064 pkt->pkt_cmd = kmem_alloc(cmd_len, KM_SLEEP); 2065 pkt->pkt_fctl_rsvd1 = (opaque_t)(uintptr_t)cmd_len; 2066 } 2067 2068 if (resp_len && !(port->fp_soft_state & FP_SOFT_FCA_IS_NODMA)) { 2069 ASSERT(pkt->pkt_resp_dma != NULL); 2070 2071 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2072 port->fp_fca_tran->fca_acc_attr, 2073 DDI_DMA_CONSISTENT, cb, NULL, 2074 (caddr_t *)&pkt->pkt_resp, &real_len, 2075 &pkt->pkt_resp_acc); 2076 2077 if (rval != DDI_SUCCESS) { 2078 goto alloc_pkt_failed; 2079 } 2080 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2081 2082 if (real_len < resp_len) { 2083 goto alloc_pkt_failed; 2084 } 2085 2086 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2087 pkt->pkt_resp, real_len, DDI_DMA_READ | 2088 DDI_DMA_CONSISTENT, cb, NULL, 2089 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2090 2091 if (rval != DDI_DMA_MAPPED) { 2092 goto alloc_pkt_failed; 2093 } 2094 2095 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2096 2097 if (pkt->pkt_resp_cookie_cnt > 2098 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2099 goto alloc_pkt_failed; 2100 } 2101 2102 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2103 2104 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2105 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2106 KM_NOSLEEP); 2107 2108 if (cp == NULL) { 2109 goto alloc_pkt_failed; 2110 } 2111 2112 *cp = pkt_cookie; 2113 cp++; 2114 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2115 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2116 *cp = pkt_cookie; 2117 } 2118 } else if (resp_len != 0) { 2119 pkt->pkt_resp = kmem_alloc(resp_len, KM_SLEEP); 2120 pkt->pkt_fctl_rsvd2 = (opaque_t)(uintptr_t)resp_len; 2121 } 2122 2123 pkt->pkt_cmdlen = cmd_len; 2124 pkt->pkt_rsplen = resp_len; 2125 pkt->pkt_ulp_private = cmd; 2126 2127 return (cmd); 2128 2129 alloc_pkt_failed: 2130 2131 fp_free_dma(cmd); 2132 2133 if (pkt->pkt_cmd_cookie != NULL) { 2134 kmem_free(pkt->pkt_cmd_cookie, 2135 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2136 pkt->pkt_cmd_cookie = NULL; 2137 } 2138 2139 if (pkt->pkt_resp_cookie != NULL) { 2140 kmem_free(pkt->pkt_resp_cookie, 2141 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2142 pkt->pkt_resp_cookie = NULL; 2143 } 2144 2145 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) { 2146 if (pkt->pkt_cmd) { 2147 kmem_free(pkt->pkt_cmd, cmd_len); 2148 } 2149 2150 if (pkt->pkt_resp) { 2151 kmem_free(pkt->pkt_resp, resp_len); 2152 } 2153 } 2154 2155 kmem_cache_free(port->fp_pkt_cache, cmd); 2156 2157 return (NULL); 2158 } 2159 2160 2161 /* 2162 * Free FC packet 2163 */ 2164 static void 2165 fp_free_pkt(fp_cmd_t *cmd) 2166 { 2167 fc_local_port_t *port; 2168 fc_packet_t *pkt; 2169 2170 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2171 2172 cmd->cmd_next = NULL; 2173 cmd->cmd_job = NULL; 2174 pkt = &cmd->cmd_pkt; 2175 pkt->pkt_ulp_private = 0; 2176 pkt->pkt_tran_flags = 0; 2177 pkt->pkt_tran_type = 0; 2178 port = cmd->cmd_port; 2179 2180 if (pkt->pkt_cmd_cookie != NULL) { 2181 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2182 sizeof (ddi_dma_cookie_t)); 2183 pkt->pkt_cmd_cookie = NULL; 2184 } 2185 2186 if (pkt->pkt_resp_cookie != NULL) { 2187 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2188 sizeof (ddi_dma_cookie_t)); 2189 pkt->pkt_resp_cookie = NULL; 2190 } 2191 2192 if (port->fp_soft_state & FP_SOFT_FCA_IS_NODMA) { 2193 if (pkt->pkt_cmd) { 2194 kmem_free(pkt->pkt_cmd, 2195 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd1); 2196 } 2197 2198 if (pkt->pkt_resp) { 2199 kmem_free(pkt->pkt_resp, 2200 (uint32_t)(uintptr_t)pkt->pkt_fctl_rsvd2); 2201 } 2202 } 2203 2204 fp_free_dma(cmd); 2205 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2206 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2207 } 2208 2209 2210 /* 2211 * Release DVMA resources 2212 */ 2213 static void 2214 fp_free_dma(fp_cmd_t *cmd) 2215 { 2216 fc_packet_t *pkt = &cmd->cmd_pkt; 2217 2218 pkt->pkt_cmdlen = 0; 2219 pkt->pkt_rsplen = 0; 2220 pkt->pkt_tran_type = 0; 2221 pkt->pkt_tran_flags = 0; 2222 2223 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2224 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2225 } 2226 2227 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2228 if (pkt->pkt_cmd_acc) { 2229 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2230 } 2231 } 2232 2233 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2234 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2235 } 2236 2237 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2238 if (pkt->pkt_resp_acc) { 2239 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2240 } 2241 } 2242 cmd->cmd_dflags = 0; 2243 } 2244 2245 2246 /* 2247 * Dedicated thread to perform various activities. One thread for 2248 * each fc_local_port_t (driver soft state) instance. 2249 * Note, this effectively works out to one thread for each local 2250 * port, but there are also some Solaris taskq threads in use on a per-local 2251 * port basis; these also need to be taken into consideration. 2252 */ 2253 static void 2254 fp_job_handler(fc_local_port_t *port) 2255 { 2256 int rval; 2257 uint32_t *d_id; 2258 fc_remote_port_t *pd; 2259 job_request_t *job; 2260 2261 #ifndef __lock_lint 2262 /* 2263 * Solaris-internal stuff for proper operation of kernel threads 2264 * with Solaris CPR. 2265 */ 2266 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2267 callb_generic_cpr, "fp_job_handler"); 2268 #endif 2269 2270 2271 /* Loop forever waiting for work to do */ 2272 for (;;) { 2273 2274 mutex_enter(&port->fp_mutex); 2275 2276 /* 2277 * Sleep if no work to do right now, or if we want 2278 * to suspend or power-down. 2279 */ 2280 while (port->fp_job_head == NULL || 2281 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2282 FP_SOFT_SUSPEND))) { 2283 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2284 cv_wait(&port->fp_cv, &port->fp_mutex); 2285 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2286 } 2287 2288 /* 2289 * OK, we've just been woken up, so retrieve the next entry 2290 * from the head of the job queue for this local port. 2291 */ 2292 job = fctl_deque_job(port); 2293 2294 /* 2295 * Handle all the fp driver's supported job codes here 2296 * in this big honkin' switch. 2297 */ 2298 switch (job->job_code) { 2299 case JOB_PORT_SHUTDOWN: 2300 /* 2301 * fp_port_shutdown() is only called from here. This 2302 * will prepare the local port instance (softstate) 2303 * for detaching. This cancels timeout callbacks, 2304 * executes LOGOs with remote ports, cleans up tables, 2305 * and deallocates data structs. 2306 */ 2307 fp_port_shutdown(port, job); 2308 2309 /* 2310 * This will exit the job thread. 2311 */ 2312 #ifndef __lock_lint 2313 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2314 #else 2315 mutex_exit(&port->fp_mutex); 2316 #endif 2317 fctl_jobdone(job); 2318 thread_exit(); 2319 2320 /* NOTREACHED */ 2321 2322 case JOB_ATTACH_ULP: { 2323 /* 2324 * This job is spawned in response to a ULP calling 2325 * fc_ulp_add(). 2326 */ 2327 2328 boolean_t do_attach_ulps = B_TRUE; 2329 2330 /* 2331 * If fp is detaching, we don't want to call 2332 * fp_startup_done as this asynchronous 2333 * notification may interfere with the re-attach. 2334 */ 2335 2336 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2337 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2338 do_attach_ulps = B_FALSE; 2339 } else { 2340 /* 2341 * We are going to force the transport 2342 * to attach to the ULPs, so set 2343 * fp_ulp_attach. This will keep any 2344 * potential detach from occurring until 2345 * we are done. 2346 */ 2347 port->fp_ulp_attach = 1; 2348 } 2349 2350 mutex_exit(&port->fp_mutex); 2351 2352 /* 2353 * NOTE: Since we just dropped the mutex, there is now 2354 * a race window where the fp_soft_state check above 2355 * could change here. This race is covered because an 2356 * additional check was added in the functions hidden 2357 * under fp_startup_done(). 2358 */ 2359 if (do_attach_ulps == B_TRUE) { 2360 /* 2361 * This goes thru a bit of a convoluted call 2362 * chain before spawning off a DDI taskq 2363 * request to perform the actual attach 2364 * operations. Blocking can occur at a number 2365 * of points. 2366 */ 2367 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2368 } 2369 job->job_result = FC_SUCCESS; 2370 fctl_jobdone(job); 2371 break; 2372 } 2373 2374 case JOB_ULP_NOTIFY: { 2375 /* 2376 * Pass state change notifications up to any/all 2377 * registered ULPs. 2378 */ 2379 uint32_t statec; 2380 2381 statec = job->job_ulp_listlen; 2382 if (statec == FC_STATE_RESET_REQUESTED) { 2383 port->fp_last_task = port->fp_task; 2384 port->fp_task = FP_TASK_OFFLINE; 2385 fp_port_offline(port, 0); 2386 port->fp_task = port->fp_last_task; 2387 port->fp_last_task = FP_TASK_IDLE; 2388 } 2389 2390 if (--port->fp_statec_busy == 0) { 2391 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2392 } 2393 2394 mutex_exit(&port->fp_mutex); 2395 2396 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2397 fctl_jobdone(job); 2398 break; 2399 } 2400 2401 case JOB_PLOGI_ONE: 2402 /* 2403 * Issue a PLOGI to a single remote port. Multiple 2404 * PLOGIs to different remote ports may occur in 2405 * parallel. 2406 * This can create the fc_remote_port_t if it does not 2407 * already exist. 2408 */ 2409 2410 mutex_exit(&port->fp_mutex); 2411 d_id = (uint32_t *)job->job_private; 2412 pd = fctl_get_remote_port_by_did(port, *d_id); 2413 2414 if (pd) { 2415 mutex_enter(&pd->pd_mutex); 2416 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2417 pd->pd_login_count++; 2418 mutex_exit(&pd->pd_mutex); 2419 job->job_result = FC_SUCCESS; 2420 fctl_jobdone(job); 2421 break; 2422 } 2423 mutex_exit(&pd->pd_mutex); 2424 } else { 2425 mutex_enter(&port->fp_mutex); 2426 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2427 mutex_exit(&port->fp_mutex); 2428 pd = fp_create_remote_port_by_ns(port, 2429 *d_id, KM_SLEEP); 2430 if (pd == NULL) { 2431 job->job_result = FC_FAILURE; 2432 fctl_jobdone(job); 2433 break; 2434 } 2435 } else { 2436 mutex_exit(&port->fp_mutex); 2437 } 2438 } 2439 2440 job->job_flags |= JOB_TYPE_FP_ASYNC; 2441 job->job_counter = 1; 2442 2443 rval = fp_port_login(port, *d_id, job, 2444 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2445 2446 if (rval != FC_SUCCESS) { 2447 job->job_result = rval; 2448 fctl_jobdone(job); 2449 } 2450 break; 2451 2452 case JOB_LOGO_ONE: { 2453 /* 2454 * Issue a PLOGO to a single remote port. Multiple 2455 * PLOGOs to different remote ports may occur in 2456 * parallel. 2457 */ 2458 fc_remote_port_t *pd; 2459 2460 #ifndef __lock_lint 2461 ASSERT(job->job_counter > 0); 2462 #endif 2463 2464 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2465 2466 mutex_enter(&pd->pd_mutex); 2467 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2468 mutex_exit(&pd->pd_mutex); 2469 job->job_result = FC_LOGINREQ; 2470 mutex_exit(&port->fp_mutex); 2471 fctl_jobdone(job); 2472 break; 2473 } 2474 if (pd->pd_login_count > 1) { 2475 pd->pd_login_count--; 2476 mutex_exit(&pd->pd_mutex); 2477 job->job_result = FC_SUCCESS; 2478 mutex_exit(&port->fp_mutex); 2479 fctl_jobdone(job); 2480 break; 2481 } 2482 mutex_exit(&pd->pd_mutex); 2483 mutex_exit(&port->fp_mutex); 2484 job->job_flags |= JOB_TYPE_FP_ASYNC; 2485 (void) fp_logout(port, pd, job); 2486 break; 2487 } 2488 2489 case JOB_FCIO_LOGIN: 2490 /* 2491 * PLOGI initiated at ioctl request. 2492 */ 2493 mutex_exit(&port->fp_mutex); 2494 job->job_result = 2495 fp_fcio_login(port, job->job_private, job); 2496 fctl_jobdone(job); 2497 break; 2498 2499 case JOB_FCIO_LOGOUT: 2500 /* 2501 * PLOGO initiated at ioctl request. 2502 */ 2503 mutex_exit(&port->fp_mutex); 2504 job->job_result = 2505 fp_fcio_logout(port, job->job_private, job); 2506 fctl_jobdone(job); 2507 break; 2508 2509 case JOB_PORT_GETMAP: 2510 case JOB_PORT_GETMAP_PLOGI_ALL: { 2511 port->fp_last_task = port->fp_task; 2512 port->fp_task = FP_TASK_GETMAP; 2513 2514 switch (port->fp_topology) { 2515 case FC_TOP_PRIVATE_LOOP: 2516 job->job_counter = 1; 2517 2518 fp_get_loopmap(port, job); 2519 mutex_exit(&port->fp_mutex); 2520 fp_jobwait(job); 2521 fctl_fillout_map(port, 2522 (fc_portmap_t **)job->job_private, 2523 (uint32_t *)job->job_arg, 1, 0, 0); 2524 fctl_jobdone(job); 2525 mutex_enter(&port->fp_mutex); 2526 break; 2527 2528 case FC_TOP_PUBLIC_LOOP: 2529 case FC_TOP_FABRIC: 2530 mutex_exit(&port->fp_mutex); 2531 job->job_counter = 1; 2532 2533 job->job_result = fp_ns_getmap(port, 2534 job, (fc_portmap_t **)job->job_private, 2535 (uint32_t *)job->job_arg, 2536 FCTL_GAN_START_ID); 2537 fctl_jobdone(job); 2538 mutex_enter(&port->fp_mutex); 2539 break; 2540 2541 case FC_TOP_PT_PT: 2542 mutex_exit(&port->fp_mutex); 2543 fctl_fillout_map(port, 2544 (fc_portmap_t **)job->job_private, 2545 (uint32_t *)job->job_arg, 1, 0, 0); 2546 fctl_jobdone(job); 2547 mutex_enter(&port->fp_mutex); 2548 break; 2549 2550 default: 2551 mutex_exit(&port->fp_mutex); 2552 fctl_jobdone(job); 2553 mutex_enter(&port->fp_mutex); 2554 break; 2555 } 2556 port->fp_task = port->fp_last_task; 2557 port->fp_last_task = FP_TASK_IDLE; 2558 mutex_exit(&port->fp_mutex); 2559 break; 2560 } 2561 2562 case JOB_PORT_OFFLINE: { 2563 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2564 2565 port->fp_last_task = port->fp_task; 2566 port->fp_task = FP_TASK_OFFLINE; 2567 2568 if (port->fp_statec_busy > 2) { 2569 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2570 fp_port_offline(port, 0); 2571 if (--port->fp_statec_busy == 0) { 2572 port->fp_soft_state &= 2573 ~FP_SOFT_IN_STATEC_CB; 2574 } 2575 } else { 2576 fp_port_offline(port, 1); 2577 } 2578 2579 port->fp_task = port->fp_last_task; 2580 port->fp_last_task = FP_TASK_IDLE; 2581 2582 mutex_exit(&port->fp_mutex); 2583 2584 fctl_jobdone(job); 2585 break; 2586 } 2587 2588 case JOB_PORT_STARTUP: { 2589 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2590 if (port->fp_statec_busy > 1) { 2591 mutex_exit(&port->fp_mutex); 2592 break; 2593 } 2594 mutex_exit(&port->fp_mutex); 2595 2596 FP_TRACE(FP_NHEAD2(9, rval), 2597 "Topology discovery failed"); 2598 break; 2599 } 2600 2601 /* 2602 * Attempt building device handles in case 2603 * of private Loop. 2604 */ 2605 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2606 job->job_counter = 1; 2607 2608 fp_get_loopmap(port, job); 2609 mutex_exit(&port->fp_mutex); 2610 fp_jobwait(job); 2611 mutex_enter(&port->fp_mutex); 2612 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2613 ASSERT(port->fp_total_devices == 0); 2614 port->fp_total_devices = 2615 port->fp_dev_count; 2616 } 2617 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2618 /* 2619 * Hack to avoid state changes going up early 2620 */ 2621 port->fp_statec_busy++; 2622 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2623 2624 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2625 fp_fabric_online(port, job); 2626 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2627 } 2628 mutex_exit(&port->fp_mutex); 2629 fctl_jobdone(job); 2630 break; 2631 } 2632 2633 case JOB_PORT_ONLINE: { 2634 char *newtop; 2635 char *oldtop; 2636 uint32_t old_top; 2637 2638 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2639 2640 /* 2641 * Bail out early if there are a lot of 2642 * state changes in the pipeline 2643 */ 2644 if (port->fp_statec_busy > 1) { 2645 --port->fp_statec_busy; 2646 mutex_exit(&port->fp_mutex); 2647 fctl_jobdone(job); 2648 break; 2649 } 2650 2651 switch (old_top = port->fp_topology) { 2652 case FC_TOP_PRIVATE_LOOP: 2653 oldtop = "Private Loop"; 2654 break; 2655 2656 case FC_TOP_PUBLIC_LOOP: 2657 oldtop = "Public Loop"; 2658 break; 2659 2660 case FC_TOP_PT_PT: 2661 oldtop = "Point to Point"; 2662 break; 2663 2664 case FC_TOP_FABRIC: 2665 oldtop = "Fabric"; 2666 break; 2667 2668 default: 2669 oldtop = NULL; 2670 break; 2671 } 2672 2673 port->fp_last_task = port->fp_task; 2674 port->fp_task = FP_TASK_ONLINE; 2675 2676 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2677 2678 port->fp_task = port->fp_last_task; 2679 port->fp_last_task = FP_TASK_IDLE; 2680 2681 if (port->fp_statec_busy > 1) { 2682 --port->fp_statec_busy; 2683 mutex_exit(&port->fp_mutex); 2684 break; 2685 } 2686 2687 port->fp_state = FC_STATE_OFFLINE; 2688 2689 FP_TRACE(FP_NHEAD2(9, rval), 2690 "Topology discovery failed"); 2691 2692 if (--port->fp_statec_busy == 0) { 2693 port->fp_soft_state &= 2694 ~FP_SOFT_IN_STATEC_CB; 2695 } 2696 2697 if (port->fp_offline_tid == NULL) { 2698 port->fp_offline_tid = 2699 timeout(fp_offline_timeout, 2700 (caddr_t)port, fp_offline_ticks); 2701 } 2702 2703 mutex_exit(&port->fp_mutex); 2704 break; 2705 } 2706 2707 switch (port->fp_topology) { 2708 case FC_TOP_PRIVATE_LOOP: 2709 newtop = "Private Loop"; 2710 break; 2711 2712 case FC_TOP_PUBLIC_LOOP: 2713 newtop = "Public Loop"; 2714 break; 2715 2716 case FC_TOP_PT_PT: 2717 newtop = "Point to Point"; 2718 break; 2719 2720 case FC_TOP_FABRIC: 2721 newtop = "Fabric"; 2722 break; 2723 2724 default: 2725 newtop = NULL; 2726 break; 2727 } 2728 2729 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2730 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2731 "Change in FC Topology old = %s new = %s", 2732 oldtop, newtop); 2733 } 2734 2735 switch (port->fp_topology) { 2736 case FC_TOP_PRIVATE_LOOP: { 2737 int orphan = (old_top == FC_TOP_FABRIC || 2738 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2739 2740 mutex_exit(&port->fp_mutex); 2741 fp_loop_online(port, job, orphan); 2742 break; 2743 } 2744 2745 case FC_TOP_PUBLIC_LOOP: 2746 /* FALLTHROUGH */ 2747 case FC_TOP_FABRIC: 2748 fp_fabric_online(port, job); 2749 mutex_exit(&port->fp_mutex); 2750 break; 2751 2752 case FC_TOP_PT_PT: 2753 fp_p2p_online(port, job); 2754 mutex_exit(&port->fp_mutex); 2755 break; 2756 2757 default: 2758 if (--port->fp_statec_busy != 0) { 2759 /* 2760 * Watch curiously at what the next 2761 * state transition can do. 2762 */ 2763 mutex_exit(&port->fp_mutex); 2764 break; 2765 } 2766 2767 FP_TRACE(FP_NHEAD2(9, 0), 2768 "Topology Unknown, Offlining the port.."); 2769 2770 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2771 port->fp_state = FC_STATE_OFFLINE; 2772 2773 if (port->fp_offline_tid == NULL) { 2774 port->fp_offline_tid = 2775 timeout(fp_offline_timeout, 2776 (caddr_t)port, fp_offline_ticks); 2777 } 2778 mutex_exit(&port->fp_mutex); 2779 break; 2780 } 2781 2782 mutex_enter(&port->fp_mutex); 2783 2784 port->fp_task = port->fp_last_task; 2785 port->fp_last_task = FP_TASK_IDLE; 2786 2787 mutex_exit(&port->fp_mutex); 2788 2789 fctl_jobdone(job); 2790 break; 2791 } 2792 2793 case JOB_PLOGI_GROUP: { 2794 mutex_exit(&port->fp_mutex); 2795 fp_plogi_group(port, job); 2796 break; 2797 } 2798 2799 case JOB_UNSOL_REQUEST: { 2800 mutex_exit(&port->fp_mutex); 2801 fp_handle_unsol_buf(port, 2802 (fc_unsol_buf_t *)job->job_private, job); 2803 fctl_dealloc_job(job); 2804 break; 2805 } 2806 2807 case JOB_NS_CMD: { 2808 fctl_ns_req_t *ns_cmd; 2809 2810 mutex_exit(&port->fp_mutex); 2811 2812 job->job_flags |= JOB_TYPE_FP_ASYNC; 2813 ns_cmd = (fctl_ns_req_t *)job->job_private; 2814 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2815 ns_cmd->ns_cmd_code > NS_DA_ID) { 2816 job->job_result = FC_BADCMD; 2817 fctl_jobdone(job); 2818 break; 2819 } 2820 2821 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2822 if (ns_cmd->ns_pd != NULL) { 2823 job->job_result = FC_BADOBJECT; 2824 fctl_jobdone(job); 2825 break; 2826 } 2827 2828 job->job_counter = 1; 2829 2830 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2831 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2832 2833 if (rval != FC_SUCCESS) { 2834 job->job_result = rval; 2835 fctl_jobdone(job); 2836 } 2837 break; 2838 } 2839 job->job_result = FC_SUCCESS; 2840 job->job_counter = 1; 2841 2842 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2843 if (rval != FC_SUCCESS) { 2844 fctl_jobdone(job); 2845 } 2846 break; 2847 } 2848 2849 case JOB_LINK_RESET: { 2850 la_wwn_t *pwwn; 2851 uint32_t topology; 2852 2853 pwwn = (la_wwn_t *)job->job_private; 2854 ASSERT(pwwn != NULL); 2855 2856 topology = port->fp_topology; 2857 mutex_exit(&port->fp_mutex); 2858 2859 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2860 topology == FC_TOP_PRIVATE_LOOP) { 2861 job->job_flags |= JOB_TYPE_FP_ASYNC; 2862 rval = port->fp_fca_tran->fca_reset( 2863 port->fp_fca_handle, FC_FCA_LINK_RESET); 2864 job->job_result = rval; 2865 fp_jobdone(job); 2866 } else { 2867 ASSERT((job->job_flags & 2868 JOB_TYPE_FP_ASYNC) == 0); 2869 2870 if (FC_IS_TOP_SWITCH(topology)) { 2871 rval = fp_remote_lip(port, pwwn, 2872 KM_SLEEP, job); 2873 } else { 2874 rval = FC_FAILURE; 2875 } 2876 if (rval != FC_SUCCESS) { 2877 job->job_result = rval; 2878 } 2879 fctl_jobdone(job); 2880 } 2881 break; 2882 } 2883 2884 default: 2885 mutex_exit(&port->fp_mutex); 2886 job->job_result = FC_BADCMD; 2887 fctl_jobdone(job); 2888 break; 2889 } 2890 } 2891 /* NOTREACHED */ 2892 } 2893 2894 2895 /* 2896 * Perform FC port bring up initialization 2897 */ 2898 static int 2899 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2900 { 2901 int rval; 2902 uint32_t state; 2903 uint32_t src_id; 2904 fc_lilpmap_t *lilp_map; 2905 2906 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2907 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2908 2909 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2910 " port=%p, job=%p", port, job); 2911 2912 port->fp_topology = FC_TOP_UNKNOWN; 2913 port->fp_port_id.port_id = 0; 2914 state = FC_PORT_STATE_MASK(port->fp_state); 2915 2916 if (state == FC_STATE_OFFLINE) { 2917 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2918 job->job_result = FC_OFFLINE; 2919 mutex_exit(&port->fp_mutex); 2920 fctl_jobdone(job); 2921 mutex_enter(&port->fp_mutex); 2922 return (FC_OFFLINE); 2923 } 2924 2925 if (state == FC_STATE_LOOP) { 2926 port->fp_port_type.port_type = FC_NS_PORT_NL; 2927 mutex_exit(&port->fp_mutex); 2928 2929 lilp_map = &port->fp_lilp_map; 2930 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2931 job->job_result = FC_FAILURE; 2932 fctl_jobdone(job); 2933 2934 FP_TRACE(FP_NHEAD1(9, rval), 2935 "LILP map Invalid or not present"); 2936 mutex_enter(&port->fp_mutex); 2937 return (FC_FAILURE); 2938 } 2939 2940 if (lilp_map->lilp_length == 0) { 2941 job->job_result = FC_NO_MAP; 2942 fctl_jobdone(job); 2943 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2944 "LILP map length zero"); 2945 mutex_enter(&port->fp_mutex); 2946 return (FC_NO_MAP); 2947 } 2948 src_id = lilp_map->lilp_myalpa & 0xFF; 2949 } else { 2950 fc_remote_port_t *pd; 2951 fc_fca_pm_t pm; 2952 fc_fca_p2p_info_t p2p_info; 2953 int pd_recepient; 2954 2955 /* 2956 * Get P2P remote port info if possible 2957 */ 2958 bzero((caddr_t)&pm, sizeof (pm)); 2959 2960 pm.pm_cmd_flags = FC_FCA_PM_READ; 2961 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2962 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2963 pm.pm_data_buf = (caddr_t)&p2p_info; 2964 2965 rval = port->fp_fca_tran->fca_port_manage( 2966 port->fp_fca_handle, &pm); 2967 2968 if (rval == FC_SUCCESS) { 2969 port->fp_port_id.port_id = p2p_info.fca_d_id; 2970 port->fp_port_type.port_type = FC_NS_PORT_N; 2971 port->fp_topology = FC_TOP_PT_PT; 2972 port->fp_total_devices = 1; 2973 pd_recepient = fctl_wwn_cmp( 2974 &port->fp_service_params.nport_ww_name, 2975 &p2p_info.pwwn) < 0 ? 2976 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2977 mutex_exit(&port->fp_mutex); 2978 pd = fctl_create_remote_port(port, 2979 &p2p_info.nwwn, 2980 &p2p_info.pwwn, 2981 p2p_info.d_id, 2982 pd_recepient, KM_NOSLEEP); 2983 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2984 " P2P port=%p pd=%p fp %x pd %x", port, pd, 2985 port->fp_port_id.port_id, p2p_info.d_id); 2986 mutex_enter(&port->fp_mutex); 2987 return (FC_SUCCESS); 2988 } 2989 port->fp_port_type.port_type = FC_NS_PORT_N; 2990 mutex_exit(&port->fp_mutex); 2991 src_id = 0; 2992 } 2993 2994 job->job_counter = 1; 2995 job->job_result = FC_SUCCESS; 2996 2997 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2998 KM_SLEEP)) != FC_SUCCESS) { 2999 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 3000 job->job_result = FC_FAILURE; 3001 fctl_jobdone(job); 3002 3003 mutex_enter(&port->fp_mutex); 3004 if (port->fp_statec_busy <= 1) { 3005 mutex_exit(&port->fp_mutex); 3006 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 3007 "Couldn't transport FLOGI"); 3008 mutex_enter(&port->fp_mutex); 3009 } 3010 return (FC_FAILURE); 3011 } 3012 3013 fp_jobwait(job); 3014 3015 mutex_enter(&port->fp_mutex); 3016 if (job->job_result == FC_SUCCESS) { 3017 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3018 mutex_exit(&port->fp_mutex); 3019 fp_ns_init(port, job, KM_SLEEP); 3020 mutex_enter(&port->fp_mutex); 3021 } 3022 } else { 3023 if (state == FC_STATE_LOOP) { 3024 port->fp_topology = FC_TOP_PRIVATE_LOOP; 3025 port->fp_port_id.port_id = 3026 port->fp_lilp_map.lilp_myalpa & 0xFF; 3027 } 3028 } 3029 3030 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 3031 port, job); 3032 3033 return (FC_SUCCESS); 3034 } 3035 3036 3037 /* 3038 * Perform ULP invocations following FC port startup 3039 */ 3040 /* ARGSUSED */ 3041 static void 3042 fp_startup_done(opaque_t arg, uchar_t result) 3043 { 3044 fc_local_port_t *port = arg; 3045 3046 fp_attach_ulps(port, FC_CMD_ATTACH); 3047 3048 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 3049 } 3050 3051 3052 /* 3053 * Perform ULP port attach 3054 */ 3055 static void 3056 fp_ulp_port_attach(void *arg) 3057 { 3058 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 3059 fc_local_port_t *port = att->att_port; 3060 3061 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3062 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3063 3064 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3065 3066 if (att->att_need_pm_idle == B_TRUE) { 3067 fctl_idle_port(port); 3068 } 3069 3070 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3071 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3072 3073 mutex_enter(&att->att_port->fp_mutex); 3074 att->att_port->fp_ulp_attach = 0; 3075 3076 port->fp_task = port->fp_last_task; 3077 port->fp_last_task = FP_TASK_IDLE; 3078 3079 cv_signal(&att->att_port->fp_attach_cv); 3080 3081 mutex_exit(&att->att_port->fp_mutex); 3082 3083 kmem_free(att, sizeof (fp_soft_attach_t)); 3084 } 3085 3086 /* 3087 * Entry point to funnel all requests down to FCAs 3088 */ 3089 static int 3090 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3091 { 3092 int rval; 3093 3094 mutex_enter(&port->fp_mutex); 3095 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3096 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3097 FC_STATE_OFFLINE))) { 3098 /* 3099 * This means there is more than one state change 3100 * at this point of time - Since they are processed 3101 * serially, any processing of the current one should 3102 * be failed, failed and move up in processing the next 3103 */ 3104 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3105 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3106 if (cmd->cmd_job) { 3107 /* 3108 * A state change that is going to be invalidated 3109 * by another one already in the port driver's queue 3110 * need not go up to all ULPs. This will minimize 3111 * needless processing and ripples in ULP modules 3112 */ 3113 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3114 } 3115 mutex_exit(&port->fp_mutex); 3116 return (FC_STATEC_BUSY); 3117 } 3118 3119 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3120 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3121 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3122 mutex_exit(&port->fp_mutex); 3123 3124 return (FC_OFFLINE); 3125 } 3126 mutex_exit(&port->fp_mutex); 3127 3128 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3129 if (rval != FC_SUCCESS) { 3130 if (rval == FC_TRAN_BUSY) { 3131 cmd->cmd_retry_interval = fp_retry_delay; 3132 rval = fp_retry_cmd(&cmd->cmd_pkt); 3133 if (rval == FC_FAILURE) { 3134 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3135 } 3136 } 3137 } else { 3138 mutex_enter(&port->fp_mutex); 3139 port->fp_out_fpcmds++; 3140 mutex_exit(&port->fp_mutex); 3141 } 3142 3143 return (rval); 3144 } 3145 3146 3147 /* 3148 * Each time a timeout kicks in, walk the wait queue, decrement the 3149 * the retry_interval, when the retry_interval becomes less than 3150 * or equal to zero, re-transport the command: If the re-transport 3151 * fails with BUSY, enqueue the command in the wait queue. 3152 * 3153 * In order to prevent looping forever because of commands enqueued 3154 * from within this function itself, save the current tail pointer 3155 * (in cur_tail) and exit the loop after serving this command. 3156 */ 3157 static void 3158 fp_resendcmd(void *port_handle) 3159 { 3160 int rval; 3161 fc_local_port_t *port; 3162 fp_cmd_t *cmd; 3163 fp_cmd_t *cur_tail; 3164 3165 port = port_handle; 3166 mutex_enter(&port->fp_mutex); 3167 cur_tail = port->fp_wait_tail; 3168 mutex_exit(&port->fp_mutex); 3169 3170 while ((cmd = fp_deque_cmd(port)) != NULL) { 3171 cmd->cmd_retry_interval -= fp_retry_ticker; 3172 /* Check if we are detaching */ 3173 if (port->fp_soft_state & 3174 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3175 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3176 cmd->cmd_pkt.pkt_reason = 0; 3177 fp_iodone(cmd); 3178 } else if (cmd->cmd_retry_interval <= 0) { 3179 rval = cmd->cmd_transport(port->fp_fca_handle, 3180 &cmd->cmd_pkt); 3181 3182 if (rval != FC_SUCCESS) { 3183 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3184 if (--cmd->cmd_retry_count) { 3185 fp_enque_cmd(port, cmd); 3186 if (cmd == cur_tail) { 3187 break; 3188 } 3189 continue; 3190 } 3191 cmd->cmd_pkt.pkt_state = 3192 FC_PKT_TRAN_BSY; 3193 } else { 3194 cmd->cmd_pkt.pkt_state = 3195 FC_PKT_TRAN_ERROR; 3196 } 3197 cmd->cmd_pkt.pkt_reason = 0; 3198 fp_iodone(cmd); 3199 } else { 3200 mutex_enter(&port->fp_mutex); 3201 port->fp_out_fpcmds++; 3202 mutex_exit(&port->fp_mutex); 3203 } 3204 } else { 3205 fp_enque_cmd(port, cmd); 3206 } 3207 3208 if (cmd == cur_tail) { 3209 break; 3210 } 3211 } 3212 3213 mutex_enter(&port->fp_mutex); 3214 if (port->fp_wait_head) { 3215 timeout_id_t tid; 3216 3217 mutex_exit(&port->fp_mutex); 3218 tid = timeout(fp_resendcmd, (caddr_t)port, 3219 fp_retry_ticks); 3220 mutex_enter(&port->fp_mutex); 3221 port->fp_wait_tid = tid; 3222 } else { 3223 port->fp_wait_tid = NULL; 3224 } 3225 mutex_exit(&port->fp_mutex); 3226 } 3227 3228 3229 /* 3230 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3231 * 3232 * Yes, as you can see below, cmd_retry_count is used here too. That means 3233 * the retries for BUSY are less if there were transport failures (transport 3234 * failure means fca_transport failure). The goal is not to exceed overall 3235 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3236 * 3237 * Return Values: 3238 * FC_SUCCESS 3239 * FC_FAILURE 3240 */ 3241 static int 3242 fp_retry_cmd(fc_packet_t *pkt) 3243 { 3244 fp_cmd_t *cmd; 3245 3246 cmd = pkt->pkt_ulp_private; 3247 3248 if (--cmd->cmd_retry_count) { 3249 fp_enque_cmd(cmd->cmd_port, cmd); 3250 return (FC_SUCCESS); 3251 } else { 3252 return (FC_FAILURE); 3253 } 3254 } 3255 3256 3257 /* 3258 * Queue up FC packet for deferred retry 3259 */ 3260 static void 3261 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3262 { 3263 timeout_id_t tid; 3264 3265 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3266 3267 #ifdef DEBUG 3268 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3269 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3270 #endif 3271 3272 mutex_enter(&port->fp_mutex); 3273 if (port->fp_wait_tail) { 3274 port->fp_wait_tail->cmd_next = cmd; 3275 port->fp_wait_tail = cmd; 3276 } else { 3277 ASSERT(port->fp_wait_head == NULL); 3278 port->fp_wait_head = port->fp_wait_tail = cmd; 3279 if (port->fp_wait_tid == NULL) { 3280 mutex_exit(&port->fp_mutex); 3281 tid = timeout(fp_resendcmd, (caddr_t)port, 3282 fp_retry_ticks); 3283 mutex_enter(&port->fp_mutex); 3284 port->fp_wait_tid = tid; 3285 } 3286 } 3287 mutex_exit(&port->fp_mutex); 3288 } 3289 3290 3291 /* 3292 * Handle all RJT codes 3293 */ 3294 static int 3295 fp_handle_reject(fc_packet_t *pkt) 3296 { 3297 int rval = FC_FAILURE; 3298 uchar_t next_class; 3299 fp_cmd_t *cmd; 3300 fc_local_port_t *port; 3301 3302 cmd = pkt->pkt_ulp_private; 3303 port = cmd->cmd_port; 3304 3305 switch (pkt->pkt_state) { 3306 case FC_PKT_FABRIC_RJT: 3307 case FC_PKT_NPORT_RJT: 3308 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) { 3309 next_class = fp_get_nextclass(cmd->cmd_port, 3310 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3311 3312 if (next_class == FC_TRAN_CLASS_INVALID) { 3313 return (rval); 3314 } 3315 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3316 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3317 3318 rval = fp_sendcmd(cmd->cmd_port, cmd, 3319 cmd->cmd_port->fp_fca_handle); 3320 3321 if (rval != FC_SUCCESS) { 3322 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3323 } 3324 } 3325 break; 3326 3327 case FC_PKT_LS_RJT: 3328 case FC_PKT_BA_RJT: 3329 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) || 3330 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) { 3331 cmd->cmd_retry_interval = fp_retry_delay; 3332 rval = fp_retry_cmd(pkt); 3333 } 3334 break; 3335 3336 case FC_PKT_FS_RJT: 3337 if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) || 3338 ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) && 3339 (pkt->pkt_expln == 0x00))) { 3340 cmd->cmd_retry_interval = fp_retry_delay; 3341 rval = fp_retry_cmd(pkt); 3342 } 3343 break; 3344 3345 case FC_PKT_LOCAL_RJT: 3346 if (pkt->pkt_reason == FC_REASON_QFULL) { 3347 cmd->cmd_retry_interval = fp_retry_delay; 3348 rval = fp_retry_cmd(pkt); 3349 } 3350 break; 3351 3352 default: 3353 FP_TRACE(FP_NHEAD1(1, 0), 3354 "fp_handle_reject(): Invalid pkt_state"); 3355 break; 3356 } 3357 3358 return (rval); 3359 } 3360 3361 3362 /* 3363 * Return the next class of service supported by the FCA 3364 */ 3365 static uchar_t 3366 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3367 { 3368 uchar_t next_class; 3369 3370 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3371 3372 switch (cur_class) { 3373 case FC_TRAN_CLASS_INVALID: 3374 if (port->fp_cos & FC_NS_CLASS1) { 3375 next_class = FC_TRAN_CLASS1; 3376 break; 3377 } 3378 /* FALLTHROUGH */ 3379 3380 case FC_TRAN_CLASS1: 3381 if (port->fp_cos & FC_NS_CLASS2) { 3382 next_class = FC_TRAN_CLASS2; 3383 break; 3384 } 3385 /* FALLTHROUGH */ 3386 3387 case FC_TRAN_CLASS2: 3388 if (port->fp_cos & FC_NS_CLASS3) { 3389 next_class = FC_TRAN_CLASS3; 3390 break; 3391 } 3392 /* FALLTHROUGH */ 3393 3394 case FC_TRAN_CLASS3: 3395 default: 3396 next_class = FC_TRAN_CLASS_INVALID; 3397 break; 3398 } 3399 3400 return (next_class); 3401 } 3402 3403 3404 /* 3405 * Determine if a class of service is supported by the FCA 3406 */ 3407 static int 3408 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3409 { 3410 int rval; 3411 3412 switch (tran_class) { 3413 case FC_TRAN_CLASS1: 3414 if (cos & FC_NS_CLASS1) { 3415 rval = FC_SUCCESS; 3416 } else { 3417 rval = FC_FAILURE; 3418 } 3419 break; 3420 3421 case FC_TRAN_CLASS2: 3422 if (cos & FC_NS_CLASS2) { 3423 rval = FC_SUCCESS; 3424 } else { 3425 rval = FC_FAILURE; 3426 } 3427 break; 3428 3429 case FC_TRAN_CLASS3: 3430 if (cos & FC_NS_CLASS3) { 3431 rval = FC_SUCCESS; 3432 } else { 3433 rval = FC_FAILURE; 3434 } 3435 break; 3436 3437 default: 3438 rval = FC_FAILURE; 3439 break; 3440 } 3441 3442 return (rval); 3443 } 3444 3445 3446 /* 3447 * Dequeue FC packet for retry 3448 */ 3449 static fp_cmd_t * 3450 fp_deque_cmd(fc_local_port_t *port) 3451 { 3452 fp_cmd_t *cmd; 3453 3454 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3455 3456 mutex_enter(&port->fp_mutex); 3457 3458 if (port->fp_wait_head == NULL) { 3459 /* 3460 * To avoid races, NULL the fp_wait_tid as 3461 * we are about to exit the timeout thread. 3462 */ 3463 port->fp_wait_tid = NULL; 3464 mutex_exit(&port->fp_mutex); 3465 return (NULL); 3466 } 3467 3468 cmd = port->fp_wait_head; 3469 port->fp_wait_head = cmd->cmd_next; 3470 cmd->cmd_next = NULL; 3471 3472 if (port->fp_wait_head == NULL) { 3473 port->fp_wait_tail = NULL; 3474 } 3475 mutex_exit(&port->fp_mutex); 3476 3477 return (cmd); 3478 } 3479 3480 3481 /* 3482 * Wait for job completion 3483 */ 3484 static void 3485 fp_jobwait(job_request_t *job) 3486 { 3487 sema_p(&job->job_port_sema); 3488 } 3489 3490 3491 /* 3492 * Convert FC packet state to FC errno 3493 */ 3494 int 3495 fp_state_to_rval(uchar_t state) 3496 { 3497 int count; 3498 3499 for (count = 0; count < sizeof (fp_xlat) / 3500 sizeof (fp_xlat[0]); count++) { 3501 if (fp_xlat[count].xlat_state == state) { 3502 return (fp_xlat[count].xlat_rval); 3503 } 3504 } 3505 3506 return (FC_FAILURE); 3507 } 3508 3509 3510 /* 3511 * For Synchronous I/O requests, the caller is 3512 * expected to do fctl_jobdone(if necessary) 3513 * 3514 * We want to preserve at least one failure in the 3515 * job_result if it happens. 3516 * 3517 */ 3518 static void 3519 fp_iodone(fp_cmd_t *cmd) 3520 { 3521 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3522 job_request_t *job = cmd->cmd_job; 3523 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3524 3525 ASSERT(job != NULL); 3526 ASSERT(cmd->cmd_port != NULL); 3527 ASSERT(&cmd->cmd_pkt != NULL); 3528 3529 mutex_enter(&job->job_mutex); 3530 if (job->job_result == FC_SUCCESS) { 3531 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3532 } 3533 mutex_exit(&job->job_mutex); 3534 3535 if (pd) { 3536 mutex_enter(&pd->pd_mutex); 3537 pd->pd_flags = PD_IDLE; 3538 mutex_exit(&pd->pd_mutex); 3539 } 3540 3541 if (ulp_pkt) { 3542 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3543 FP_IS_PKT_ERROR(ulp_pkt)) { 3544 fc_local_port_t *port; 3545 fc_remote_node_t *node; 3546 3547 port = cmd->cmd_port; 3548 3549 mutex_enter(&pd->pd_mutex); 3550 pd->pd_state = PORT_DEVICE_INVALID; 3551 pd->pd_ref_count--; 3552 node = pd->pd_remote_nodep; 3553 mutex_exit(&pd->pd_mutex); 3554 3555 ASSERT(node != NULL); 3556 ASSERT(port != NULL); 3557 3558 if (fctl_destroy_remote_port(port, pd) == 0) { 3559 fctl_destroy_remote_node(node); 3560 } 3561 3562 ulp_pkt->pkt_pd = NULL; 3563 } 3564 3565 ulp_pkt->pkt_comp(ulp_pkt); 3566 } 3567 3568 fp_free_pkt(cmd); 3569 fp_jobdone(job); 3570 } 3571 3572 3573 /* 3574 * Job completion handler 3575 */ 3576 static void 3577 fp_jobdone(job_request_t *job) 3578 { 3579 mutex_enter(&job->job_mutex); 3580 ASSERT(job->job_counter > 0); 3581 3582 if (--job->job_counter != 0) { 3583 mutex_exit(&job->job_mutex); 3584 return; 3585 } 3586 3587 if (job->job_ulp_pkts) { 3588 ASSERT(job->job_ulp_listlen > 0); 3589 kmem_free(job->job_ulp_pkts, 3590 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3591 } 3592 3593 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3594 mutex_exit(&job->job_mutex); 3595 fctl_jobdone(job); 3596 } else { 3597 mutex_exit(&job->job_mutex); 3598 sema_v(&job->job_port_sema); 3599 } 3600 } 3601 3602 3603 /* 3604 * Try to perform shutdown of a port during a detach. No return 3605 * value since the detach should not fail because the port shutdown 3606 * failed. 3607 */ 3608 static void 3609 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3610 { 3611 int index; 3612 int count; 3613 int flags; 3614 fp_cmd_t *cmd; 3615 struct pwwn_hash *head; 3616 fc_remote_port_t *pd; 3617 3618 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3619 3620 job->job_result = FC_SUCCESS; 3621 3622 if (port->fp_taskq) { 3623 /* 3624 * We must release the mutex here to ensure that other 3625 * potential jobs can complete their processing. Many 3626 * also need this mutex. 3627 */ 3628 mutex_exit(&port->fp_mutex); 3629 taskq_wait(port->fp_taskq); 3630 mutex_enter(&port->fp_mutex); 3631 } 3632 3633 if (port->fp_offline_tid) { 3634 timeout_id_t tid; 3635 3636 tid = port->fp_offline_tid; 3637 port->fp_offline_tid = NULL; 3638 mutex_exit(&port->fp_mutex); 3639 (void) untimeout(tid); 3640 mutex_enter(&port->fp_mutex); 3641 } 3642 3643 if (port->fp_wait_tid) { 3644 timeout_id_t tid; 3645 3646 tid = port->fp_wait_tid; 3647 port->fp_wait_tid = NULL; 3648 mutex_exit(&port->fp_mutex); 3649 (void) untimeout(tid); 3650 } else { 3651 mutex_exit(&port->fp_mutex); 3652 } 3653 3654 /* 3655 * While we cancel the timeout, let's also return the 3656 * the outstanding requests back to the callers. 3657 */ 3658 while ((cmd = fp_deque_cmd(port)) != NULL) { 3659 ASSERT(cmd->cmd_job != NULL); 3660 cmd->cmd_job->job_result = FC_OFFLINE; 3661 fp_iodone(cmd); 3662 } 3663 3664 /* 3665 * Gracefully LOGO with all the devices logged in. 3666 */ 3667 mutex_enter(&port->fp_mutex); 3668 3669 for (count = index = 0; index < pwwn_table_size; index++) { 3670 head = &port->fp_pwwn_table[index]; 3671 pd = head->pwwn_head; 3672 while (pd != NULL) { 3673 mutex_enter(&pd->pd_mutex); 3674 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3675 count++; 3676 } 3677 mutex_exit(&pd->pd_mutex); 3678 pd = pd->pd_wwn_hnext; 3679 } 3680 } 3681 3682 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3683 flags = job->job_flags; 3684 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3685 } else { 3686 flags = 0; 3687 } 3688 if (count) { 3689 job->job_counter = count; 3690 3691 for (index = 0; index < pwwn_table_size; index++) { 3692 head = &port->fp_pwwn_table[index]; 3693 pd = head->pwwn_head; 3694 while (pd != NULL) { 3695 mutex_enter(&pd->pd_mutex); 3696 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3697 ASSERT(pd->pd_login_count > 0); 3698 /* 3699 * Force the counter to ONE in order 3700 * for us to really send LOGO els. 3701 */ 3702 pd->pd_login_count = 1; 3703 mutex_exit(&pd->pd_mutex); 3704 mutex_exit(&port->fp_mutex); 3705 (void) fp_logout(port, pd, job); 3706 mutex_enter(&port->fp_mutex); 3707 } else { 3708 mutex_exit(&pd->pd_mutex); 3709 } 3710 pd = pd->pd_wwn_hnext; 3711 } 3712 } 3713 mutex_exit(&port->fp_mutex); 3714 fp_jobwait(job); 3715 } else { 3716 mutex_exit(&port->fp_mutex); 3717 } 3718 3719 if (job->job_result != FC_SUCCESS) { 3720 FP_TRACE(FP_NHEAD1(9, 0), 3721 "Can't logout all devices. Proceeding with" 3722 " port shutdown"); 3723 job->job_result = FC_SUCCESS; 3724 } 3725 3726 fctl_destroy_all_remote_ports(port); 3727 3728 mutex_enter(&port->fp_mutex); 3729 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3730 mutex_exit(&port->fp_mutex); 3731 fp_ns_fini(port, job); 3732 } else { 3733 mutex_exit(&port->fp_mutex); 3734 } 3735 3736 if (flags) { 3737 job->job_flags = flags; 3738 } 3739 3740 mutex_enter(&port->fp_mutex); 3741 3742 } 3743 3744 3745 /* 3746 * Build the port driver's data structures based on the AL_PA list 3747 */ 3748 static void 3749 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3750 { 3751 int rval; 3752 int flag; 3753 int count; 3754 uint32_t d_id; 3755 fc_remote_port_t *pd; 3756 fc_lilpmap_t *lilp_map; 3757 3758 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3759 3760 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3761 job->job_result = FC_OFFLINE; 3762 mutex_exit(&port->fp_mutex); 3763 fp_jobdone(job); 3764 mutex_enter(&port->fp_mutex); 3765 return; 3766 } 3767 3768 if (port->fp_lilp_map.lilp_length == 0) { 3769 mutex_exit(&port->fp_mutex); 3770 job->job_result = FC_NO_MAP; 3771 fp_jobdone(job); 3772 mutex_enter(&port->fp_mutex); 3773 return; 3774 } 3775 mutex_exit(&port->fp_mutex); 3776 3777 lilp_map = &port->fp_lilp_map; 3778 job->job_counter = lilp_map->lilp_length; 3779 3780 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3781 flag = FP_CMD_PLOGI_RETAIN; 3782 } else { 3783 flag = FP_CMD_PLOGI_DONT_CARE; 3784 } 3785 3786 for (count = 0; count < lilp_map->lilp_length; count++) { 3787 d_id = lilp_map->lilp_alpalist[count]; 3788 3789 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3790 fp_jobdone(job); 3791 continue; 3792 } 3793 3794 pd = fctl_get_remote_port_by_did(port, d_id); 3795 if (pd) { 3796 mutex_enter(&pd->pd_mutex); 3797 if (flag == FP_CMD_PLOGI_DONT_CARE || 3798 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3799 mutex_exit(&pd->pd_mutex); 3800 fp_jobdone(job); 3801 continue; 3802 } 3803 mutex_exit(&pd->pd_mutex); 3804 } 3805 3806 rval = fp_port_login(port, d_id, job, flag, 3807 KM_SLEEP, pd, NULL); 3808 if (rval != FC_SUCCESS) { 3809 fp_jobdone(job); 3810 } 3811 } 3812 3813 mutex_enter(&port->fp_mutex); 3814 } 3815 3816 3817 /* 3818 * Perform loop ONLINE processing 3819 */ 3820 static void 3821 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3822 { 3823 int count; 3824 int rval; 3825 uint32_t d_id; 3826 uint32_t listlen; 3827 fc_lilpmap_t *lilp_map; 3828 fc_remote_port_t *pd; 3829 fc_portmap_t *changelist; 3830 3831 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3832 3833 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3834 port, job); 3835 3836 lilp_map = &port->fp_lilp_map; 3837 3838 if (lilp_map->lilp_length) { 3839 mutex_enter(&port->fp_mutex); 3840 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3841 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3842 mutex_exit(&port->fp_mutex); 3843 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3844 } else { 3845 mutex_exit(&port->fp_mutex); 3846 } 3847 3848 job->job_counter = lilp_map->lilp_length; 3849 3850 for (count = 0; count < lilp_map->lilp_length; count++) { 3851 d_id = lilp_map->lilp_alpalist[count]; 3852 3853 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3854 fp_jobdone(job); 3855 continue; 3856 } 3857 3858 pd = fctl_get_remote_port_by_did(port, d_id); 3859 if (pd != NULL) { 3860 #ifdef DEBUG 3861 mutex_enter(&pd->pd_mutex); 3862 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3863 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3864 } 3865 mutex_exit(&pd->pd_mutex); 3866 #endif 3867 fp_jobdone(job); 3868 continue; 3869 } 3870 3871 rval = fp_port_login(port, d_id, job, 3872 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3873 3874 if (rval != FC_SUCCESS) { 3875 fp_jobdone(job); 3876 } 3877 } 3878 fp_jobwait(job); 3879 } 3880 listlen = 0; 3881 changelist = NULL; 3882 3883 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3884 mutex_enter(&port->fp_mutex); 3885 ASSERT(port->fp_statec_busy > 0); 3886 if (port->fp_statec_busy == 1) { 3887 mutex_exit(&port->fp_mutex); 3888 fctl_fillout_map(port, &changelist, &listlen, 3889 1, 0, orphan); 3890 3891 mutex_enter(&port->fp_mutex); 3892 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3893 ASSERT(port->fp_total_devices == 0); 3894 port->fp_total_devices = port->fp_dev_count; 3895 } 3896 } else { 3897 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3898 } 3899 mutex_exit(&port->fp_mutex); 3900 } 3901 3902 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3903 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3904 listlen, listlen, KM_SLEEP); 3905 } else { 3906 mutex_enter(&port->fp_mutex); 3907 if (--port->fp_statec_busy == 0) { 3908 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3909 } 3910 ASSERT(changelist == NULL && listlen == 0); 3911 mutex_exit(&port->fp_mutex); 3912 } 3913 3914 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3915 port, job); 3916 } 3917 3918 3919 /* 3920 * Get an Arbitrated Loop map from the underlying FCA 3921 */ 3922 static int 3923 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3924 { 3925 int rval; 3926 3927 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3928 port, lilp_map); 3929 3930 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3931 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3932 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3933 3934 if (rval != FC_SUCCESS) { 3935 rval = FC_NO_MAP; 3936 } else if (lilp_map->lilp_length == 0 && 3937 (lilp_map->lilp_magic >= MAGIC_LISM && 3938 lilp_map->lilp_magic < MAGIC_LIRP)) { 3939 uchar_t lilp_length; 3940 3941 /* 3942 * Since the map length is zero, provide all 3943 * the valid AL_PAs for NL_ports discovery. 3944 */ 3945 lilp_length = sizeof (fp_valid_alpas) / 3946 sizeof (fp_valid_alpas[0]); 3947 lilp_map->lilp_length = lilp_length; 3948 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3949 lilp_length); 3950 } else { 3951 rval = fp_validate_lilp_map(lilp_map); 3952 3953 if (rval == FC_SUCCESS) { 3954 mutex_enter(&port->fp_mutex); 3955 port->fp_total_devices = lilp_map->lilp_length - 1; 3956 mutex_exit(&port->fp_mutex); 3957 } 3958 } 3959 3960 mutex_enter(&port->fp_mutex); 3961 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3962 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3963 mutex_exit(&port->fp_mutex); 3964 3965 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3966 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3967 FP_TRACE(FP_NHEAD1(9, 0), 3968 "FCA reset failed after LILP map was found" 3969 " to be invalid"); 3970 } 3971 } else if (rval == FC_SUCCESS) { 3972 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3973 mutex_exit(&port->fp_mutex); 3974 } else { 3975 mutex_exit(&port->fp_mutex); 3976 } 3977 3978 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3979 lilp_map); 3980 3981 return (rval); 3982 } 3983 3984 3985 /* 3986 * Perform Fabric Login: 3987 * 3988 * Return Values: 3989 * FC_SUCCESS 3990 * FC_FAILURE 3991 * FC_NOMEM 3992 * FC_TRANSPORT_ERROR 3993 * and a lot others defined in fc_error.h 3994 */ 3995 static int 3996 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3997 int flag, int sleep) 3998 { 3999 int rval; 4000 fp_cmd_t *cmd; 4001 uchar_t class; 4002 4003 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4004 4005 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 4006 port, job); 4007 4008 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4009 if (class == FC_TRAN_CLASS_INVALID) { 4010 return (FC_ELS_BAD); 4011 } 4012 4013 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4014 sizeof (la_els_logi_t), sleep, NULL); 4015 if (cmd == NULL) { 4016 return (FC_NOMEM); 4017 } 4018 4019 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4020 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4021 cmd->cmd_flags = flag; 4022 cmd->cmd_retry_count = fp_retry_count; 4023 cmd->cmd_ulp_pkt = NULL; 4024 4025 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 4026 job, LA_ELS_FLOGI); 4027 4028 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 4029 if (rval != FC_SUCCESS) { 4030 fp_free_pkt(cmd); 4031 } 4032 4033 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 4034 port, job); 4035 4036 return (rval); 4037 } 4038 4039 4040 /* 4041 * In some scenarios such as private loop device discovery period 4042 * the fc_remote_port_t data structure isn't allocated. The allocation 4043 * is done when the PLOGI is successful. In some other scenarios 4044 * such as Fabric topology, the fc_remote_port_t is already created 4045 * and initialized with appropriate values (as the NS provides 4046 * them) 4047 */ 4048 static int 4049 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 4050 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 4051 { 4052 uchar_t class; 4053 fp_cmd_t *cmd; 4054 uint32_t src_id; 4055 fc_remote_port_t *tmp_pd; 4056 int relogin; 4057 int found = 0; 4058 4059 #ifdef DEBUG 4060 if (pd == NULL) { 4061 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 4062 } 4063 #endif 4064 ASSERT(job->job_counter > 0); 4065 4066 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4067 if (class == FC_TRAN_CLASS_INVALID) { 4068 return (FC_ELS_BAD); 4069 } 4070 4071 mutex_enter(&port->fp_mutex); 4072 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 4073 mutex_exit(&port->fp_mutex); 4074 4075 relogin = 1; 4076 if (tmp_pd) { 4077 mutex_enter(&tmp_pd->pd_mutex); 4078 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 4079 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 4080 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 4081 relogin = 0; 4082 } 4083 mutex_exit(&tmp_pd->pd_mutex); 4084 } 4085 4086 if (!relogin) { 4087 mutex_enter(&tmp_pd->pd_mutex); 4088 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4089 cmd_flag |= FP_CMD_PLOGI_RETAIN; 4090 } 4091 mutex_exit(&tmp_pd->pd_mutex); 4092 4093 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 4094 sizeof (la_els_adisc_t), sleep, tmp_pd); 4095 if (cmd == NULL) { 4096 return (FC_NOMEM); 4097 } 4098 4099 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4100 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4101 cmd->cmd_flags = cmd_flag; 4102 cmd->cmd_retry_count = fp_retry_count; 4103 cmd->cmd_ulp_pkt = ulp_pkt; 4104 4105 mutex_enter(&port->fp_mutex); 4106 mutex_enter(&tmp_pd->pd_mutex); 4107 fp_adisc_init(cmd, job); 4108 mutex_exit(&tmp_pd->pd_mutex); 4109 mutex_exit(&port->fp_mutex); 4110 4111 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4112 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4113 4114 } else { 4115 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4116 sizeof (la_els_logi_t), sleep, pd); 4117 if (cmd == NULL) { 4118 return (FC_NOMEM); 4119 } 4120 4121 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4122 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4123 cmd->cmd_flags = cmd_flag; 4124 cmd->cmd_retry_count = fp_retry_count; 4125 cmd->cmd_ulp_pkt = ulp_pkt; 4126 4127 mutex_enter(&port->fp_mutex); 4128 src_id = port->fp_port_id.port_id; 4129 mutex_exit(&port->fp_mutex); 4130 4131 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4132 job, LA_ELS_PLOGI); 4133 } 4134 4135 if (pd) { 4136 mutex_enter(&pd->pd_mutex); 4137 pd->pd_flags = PD_ELS_IN_PROGRESS; 4138 mutex_exit(&pd->pd_mutex); 4139 } 4140 4141 /* npiv check to make sure we don't log into ourself */ 4142 if (relogin && 4143 ((port->fp_npiv_type == FC_NPIV_PORT) || 4144 (port->fp_npiv_flag == FC_NPIV_ENABLE))) { 4145 if ((d_id & 0xffff00) == 4146 (port->fp_port_id.port_id & 0xffff00)) { 4147 found = 1; 4148 } 4149 } 4150 4151 if (found || 4152 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4153 if (found) { 4154 fc_packet_t *pkt = &cmd->cmd_pkt; 4155 pkt->pkt_state = FC_PKT_NPORT_RJT; 4156 } 4157 if (pd) { 4158 mutex_enter(&pd->pd_mutex); 4159 pd->pd_flags = PD_IDLE; 4160 mutex_exit(&pd->pd_mutex); 4161 } 4162 4163 if (ulp_pkt) { 4164 fc_packet_t *pkt = &cmd->cmd_pkt; 4165 4166 ulp_pkt->pkt_state = pkt->pkt_state; 4167 ulp_pkt->pkt_reason = pkt->pkt_reason; 4168 ulp_pkt->pkt_action = pkt->pkt_action; 4169 ulp_pkt->pkt_expln = pkt->pkt_expln; 4170 } 4171 4172 fp_iodone(cmd); 4173 } 4174 4175 return (FC_SUCCESS); 4176 } 4177 4178 4179 /* 4180 * Register the LOGIN parameters with a port device 4181 */ 4182 static void 4183 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4184 la_els_logi_t *acc, uchar_t class) 4185 { 4186 fc_remote_node_t *node; 4187 4188 ASSERT(pd != NULL); 4189 4190 mutex_enter(&pd->pd_mutex); 4191 node = pd->pd_remote_nodep; 4192 if (pd->pd_login_count == 0) { 4193 pd->pd_login_count++; 4194 } 4195 4196 if (handle) { 4197 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_csp, 4198 (uint8_t *)&acc->common_service, 4199 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4200 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp1, 4201 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4202 DDI_DEV_AUTOINCR); 4203 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp2, 4204 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4205 DDI_DEV_AUTOINCR); 4206 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)&pd->pd_clsp3, 4207 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4208 DDI_DEV_AUTOINCR); 4209 } else { 4210 pd->pd_csp = acc->common_service; 4211 pd->pd_clsp1 = acc->class_1; 4212 pd->pd_clsp2 = acc->class_2; 4213 pd->pd_clsp3 = acc->class_3; 4214 } 4215 4216 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4217 pd->pd_login_class = class; 4218 mutex_exit(&pd->pd_mutex); 4219 4220 #ifndef __lock_lint 4221 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4222 pd->pd_port_id.port_id) == pd); 4223 #endif 4224 4225 mutex_enter(&node->fd_mutex); 4226 if (handle) { 4227 FC_GET_RSP(pd->pd_port, *handle, (uint8_t *)node->fd_vv, 4228 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4229 DDI_DEV_AUTOINCR); 4230 } else { 4231 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4232 } 4233 mutex_exit(&node->fd_mutex); 4234 } 4235 4236 4237 /* 4238 * Mark the remote port as OFFLINE 4239 */ 4240 static void 4241 fp_remote_port_offline(fc_remote_port_t *pd) 4242 { 4243 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4244 if (pd->pd_login_count && 4245 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4246 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4247 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4248 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4249 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4250 pd->pd_login_class = 0; 4251 } 4252 pd->pd_type = PORT_DEVICE_OLD; 4253 pd->pd_flags = PD_IDLE; 4254 fctl_tc_reset(&pd->pd_logo_tc); 4255 } 4256 4257 4258 /* 4259 * Deregistration of a port device 4260 */ 4261 static void 4262 fp_unregister_login(fc_remote_port_t *pd) 4263 { 4264 fc_remote_node_t *node; 4265 4266 ASSERT(pd != NULL); 4267 4268 mutex_enter(&pd->pd_mutex); 4269 pd->pd_login_count = 0; 4270 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4271 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4272 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4273 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4274 4275 pd->pd_state = PORT_DEVICE_VALID; 4276 pd->pd_login_class = 0; 4277 node = pd->pd_remote_nodep; 4278 mutex_exit(&pd->pd_mutex); 4279 4280 mutex_enter(&node->fd_mutex); 4281 bzero(node->fd_vv, sizeof (node->fd_vv)); 4282 mutex_exit(&node->fd_mutex); 4283 } 4284 4285 4286 /* 4287 * Handle OFFLINE state of an FCA port 4288 */ 4289 static void 4290 fp_port_offline(fc_local_port_t *port, int notify) 4291 { 4292 int index; 4293 int statec; 4294 timeout_id_t tid; 4295 struct pwwn_hash *head; 4296 fc_remote_port_t *pd; 4297 4298 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4299 4300 for (index = 0; index < pwwn_table_size; index++) { 4301 head = &port->fp_pwwn_table[index]; 4302 pd = head->pwwn_head; 4303 while (pd != NULL) { 4304 mutex_enter(&pd->pd_mutex); 4305 fp_remote_port_offline(pd); 4306 fctl_delist_did_table(port, pd); 4307 mutex_exit(&pd->pd_mutex); 4308 pd = pd->pd_wwn_hnext; 4309 } 4310 } 4311 port->fp_total_devices = 0; 4312 4313 statec = 0; 4314 if (notify) { 4315 /* 4316 * Decrement the statec busy counter as we 4317 * are almost done with handling the state 4318 * change 4319 */ 4320 ASSERT(port->fp_statec_busy > 0); 4321 if (--port->fp_statec_busy == 0) { 4322 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4323 } 4324 mutex_exit(&port->fp_mutex); 4325 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4326 0, 0, KM_SLEEP); 4327 mutex_enter(&port->fp_mutex); 4328 4329 if (port->fp_statec_busy) { 4330 statec++; 4331 } 4332 } else if (port->fp_statec_busy > 1) { 4333 statec++; 4334 } 4335 4336 if ((tid = port->fp_offline_tid) != NULL) { 4337 mutex_exit(&port->fp_mutex); 4338 (void) untimeout(tid); 4339 mutex_enter(&port->fp_mutex); 4340 } 4341 4342 if (!statec) { 4343 port->fp_offline_tid = timeout(fp_offline_timeout, 4344 (caddr_t)port, fp_offline_ticks); 4345 } 4346 } 4347 4348 4349 /* 4350 * Offline devices and send up a state change notification to ULPs 4351 */ 4352 static void 4353 fp_offline_timeout(void *port_handle) 4354 { 4355 int ret; 4356 fc_local_port_t *port = port_handle; 4357 uint32_t listlen = 0; 4358 fc_portmap_t *changelist = NULL; 4359 4360 mutex_enter(&port->fp_mutex); 4361 4362 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4363 (port->fp_soft_state & 4364 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4365 port->fp_dev_count == 0 || port->fp_statec_busy) { 4366 port->fp_offline_tid = NULL; 4367 mutex_exit(&port->fp_mutex); 4368 return; 4369 } 4370 4371 mutex_exit(&port->fp_mutex); 4372 4373 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4374 4375 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4376 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4377 FC_FCA_CORE)) != FC_SUCCESS) { 4378 FP_TRACE(FP_NHEAD1(9, ret), 4379 "Failed to force adapter dump"); 4380 } else { 4381 FP_TRACE(FP_NHEAD1(9, 0), 4382 "Forced adapter dump successfully"); 4383 } 4384 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4385 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4386 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4387 FP_TRACE(FP_NHEAD1(9, ret), 4388 "Failed to force adapter dump and reset"); 4389 } else { 4390 FP_TRACE(FP_NHEAD1(9, 0), 4391 "Forced adapter dump and reset successfully"); 4392 } 4393 } 4394 4395 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4396 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4397 listlen, listlen, KM_SLEEP); 4398 4399 mutex_enter(&port->fp_mutex); 4400 port->fp_offline_tid = NULL; 4401 mutex_exit(&port->fp_mutex); 4402 } 4403 4404 4405 /* 4406 * Perform general purpose ELS request initialization 4407 */ 4408 static void 4409 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4410 void (*comp) (), job_request_t *job) 4411 { 4412 fc_packet_t *pkt; 4413 4414 pkt = &cmd->cmd_pkt; 4415 cmd->cmd_job = job; 4416 4417 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4418 pkt->pkt_cmd_fhdr.d_id = d_id; 4419 pkt->pkt_cmd_fhdr.s_id = s_id; 4420 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4421 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4422 pkt->pkt_cmd_fhdr.seq_id = 0; 4423 pkt->pkt_cmd_fhdr.df_ctl = 0; 4424 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4425 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4426 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4427 pkt->pkt_cmd_fhdr.ro = 0; 4428 pkt->pkt_cmd_fhdr.rsvd = 0; 4429 pkt->pkt_comp = comp; 4430 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4431 } 4432 4433 4434 /* 4435 * Initialize PLOGI/FLOGI ELS request 4436 */ 4437 static void 4438 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4439 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4440 { 4441 ls_code_t payload; 4442 4443 fp_els_init(cmd, s_id, d_id, intr, job); 4444 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4445 4446 payload.ls_code = ls_code; 4447 payload.mbz = 0; 4448 4449 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, 4450 (uint8_t *)&port->fp_service_params, 4451 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4452 DDI_DEV_AUTOINCR); 4453 4454 FC_SET_CMD(port, cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4455 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4456 DDI_DEV_AUTOINCR); 4457 } 4458 4459 4460 /* 4461 * Initialize LOGO ELS request 4462 */ 4463 static void 4464 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4465 { 4466 fc_local_port_t *port; 4467 fc_packet_t *pkt; 4468 la_els_logo_t payload; 4469 4470 port = pd->pd_port; 4471 pkt = &cmd->cmd_pkt; 4472 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4473 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4474 4475 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4476 fp_logo_intr, job); 4477 4478 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4479 4480 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4481 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4482 4483 payload.ls_code.ls_code = LA_ELS_LOGO; 4484 payload.ls_code.mbz = 0; 4485 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4486 payload.nport_id = port->fp_port_id; 4487 4488 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4489 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4490 } 4491 4492 /* 4493 * Initialize RNID ELS request 4494 */ 4495 static void 4496 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4497 { 4498 fc_local_port_t *port; 4499 fc_packet_t *pkt; 4500 la_els_rnid_t payload; 4501 fc_remote_port_t *pd; 4502 4503 pkt = &cmd->cmd_pkt; 4504 pd = pkt->pkt_pd; 4505 port = pd->pd_port; 4506 4507 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4508 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4509 4510 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4511 fp_rnid_intr, job); 4512 4513 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4514 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4515 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4516 4517 payload.ls_code.ls_code = LA_ELS_RNID; 4518 payload.ls_code.mbz = 0; 4519 payload.data_format = flag; 4520 4521 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4522 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4523 } 4524 4525 /* 4526 * Initialize RLS ELS request 4527 */ 4528 static void 4529 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4530 { 4531 fc_local_port_t *port; 4532 fc_packet_t *pkt; 4533 la_els_rls_t payload; 4534 fc_remote_port_t *pd; 4535 4536 pkt = &cmd->cmd_pkt; 4537 pd = pkt->pkt_pd; 4538 port = pd->pd_port; 4539 4540 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4541 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4542 4543 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4544 fp_rls_intr, job); 4545 4546 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4547 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4548 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4549 4550 payload.ls_code.ls_code = LA_ELS_RLS; 4551 payload.ls_code.mbz = 0; 4552 payload.rls_portid = port->fp_port_id; 4553 4554 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4555 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4556 } 4557 4558 4559 /* 4560 * Initialize an ADISC ELS request 4561 */ 4562 static void 4563 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4564 { 4565 fc_local_port_t *port; 4566 fc_packet_t *pkt; 4567 la_els_adisc_t payload; 4568 fc_remote_port_t *pd; 4569 4570 pkt = &cmd->cmd_pkt; 4571 pd = pkt->pkt_pd; 4572 port = pd->pd_port; 4573 4574 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4575 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4576 4577 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4578 fp_adisc_intr, job); 4579 4580 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4581 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4582 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4583 4584 payload.ls_code.ls_code = LA_ELS_ADISC; 4585 payload.ls_code.mbz = 0; 4586 payload.nport_id = port->fp_port_id; 4587 payload.port_wwn = port->fp_service_params.nport_ww_name; 4588 payload.node_wwn = port->fp_service_params.node_ww_name; 4589 payload.hard_addr = port->fp_hard_addr; 4590 4591 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 4592 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4593 } 4594 4595 4596 /* 4597 * Send up a state change notification to ULPs. 4598 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4599 */ 4600 static int 4601 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4602 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4603 { 4604 fc_port_clist_t *clist; 4605 fc_remote_port_t *pd; 4606 int count; 4607 4608 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4609 4610 clist = kmem_zalloc(sizeof (*clist), sleep); 4611 if (clist == NULL) { 4612 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4613 return (FC_NOMEM); 4614 } 4615 4616 clist->clist_state = state; 4617 4618 mutex_enter(&port->fp_mutex); 4619 clist->clist_flags = port->fp_topology; 4620 mutex_exit(&port->fp_mutex); 4621 4622 clist->clist_port = (opaque_t)port; 4623 clist->clist_len = listlen; 4624 clist->clist_size = alloc_len; 4625 clist->clist_map = changelist; 4626 4627 /* 4628 * Bump the reference count of each fc_remote_port_t in this changelist. 4629 * This is necessary since these devices will be sitting in a taskq 4630 * and referenced later. When the state change notification is 4631 * complete, the reference counts will be decremented. 4632 */ 4633 for (count = 0; count < clist->clist_len; count++) { 4634 pd = clist->clist_map[count].map_pd; 4635 4636 if (pd != NULL) { 4637 mutex_enter(&pd->pd_mutex); 4638 ASSERT((pd->pd_ref_count >= 0) || 4639 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4640 pd->pd_ref_count++; 4641 4642 if (clist->clist_map[count].map_state != 4643 PORT_DEVICE_INVALID) { 4644 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4645 } 4646 4647 mutex_exit(&pd->pd_mutex); 4648 } 4649 } 4650 4651 #ifdef DEBUG 4652 /* 4653 * Sanity check for presence of OLD devices in the hash lists 4654 */ 4655 if (clist->clist_size) { 4656 ASSERT(clist->clist_map != NULL); 4657 for (count = 0; count < clist->clist_len; count++) { 4658 if (clist->clist_map[count].map_state == 4659 PORT_DEVICE_INVALID) { 4660 la_wwn_t pwwn; 4661 fc_portid_t d_id; 4662 4663 pd = clist->clist_map[count].map_pd; 4664 ASSERT(pd != NULL); 4665 4666 mutex_enter(&pd->pd_mutex); 4667 pwwn = pd->pd_port_name; 4668 d_id = pd->pd_port_id; 4669 mutex_exit(&pd->pd_mutex); 4670 4671 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4672 ASSERT(pd != clist->clist_map[count].map_pd); 4673 4674 pd = fctl_get_remote_port_by_did(port, 4675 d_id.port_id); 4676 ASSERT(pd != clist->clist_map[count].map_pd); 4677 } 4678 } 4679 } 4680 #endif 4681 4682 mutex_enter(&port->fp_mutex); 4683 4684 if (state == FC_STATE_ONLINE) { 4685 if (--port->fp_statec_busy == 0) { 4686 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4687 } 4688 } 4689 mutex_exit(&port->fp_mutex); 4690 4691 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4692 clist, KM_SLEEP); 4693 4694 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4695 "state=%x, len=%d", port, state, listlen); 4696 4697 return (FC_SUCCESS); 4698 } 4699 4700 4701 /* 4702 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4703 */ 4704 static int 4705 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4706 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4707 { 4708 int ret; 4709 fc_port_clist_t *clist; 4710 4711 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4712 4713 clist = kmem_zalloc(sizeof (*clist), sleep); 4714 if (clist == NULL) { 4715 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4716 return (FC_NOMEM); 4717 } 4718 4719 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4720 4721 mutex_enter(&port->fp_mutex); 4722 clist->clist_flags = port->fp_topology; 4723 mutex_exit(&port->fp_mutex); 4724 4725 clist->clist_port = (opaque_t)port; 4726 clist->clist_len = listlen; 4727 clist->clist_size = alloc_len; 4728 clist->clist_map = changelist; 4729 4730 /* Send sysevents for target state changes */ 4731 4732 if (clist->clist_size) { 4733 int count; 4734 fc_remote_port_t *pd; 4735 4736 ASSERT(clist->clist_map != NULL); 4737 for (count = 0; count < clist->clist_len; count++) { 4738 pd = clist->clist_map[count].map_pd; 4739 4740 /* 4741 * Bump reference counts on all fc_remote_port_t 4742 * structs in this list. We don't know when the task 4743 * will fire, and we don't need these fc_remote_port_t 4744 * structs going away behind our back. 4745 */ 4746 if (pd) { 4747 mutex_enter(&pd->pd_mutex); 4748 ASSERT((pd->pd_ref_count >= 0) || 4749 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4750 pd->pd_ref_count++; 4751 mutex_exit(&pd->pd_mutex); 4752 } 4753 4754 if (clist->clist_map[count].map_state == 4755 PORT_DEVICE_VALID) { 4756 if (clist->clist_map[count].map_type == 4757 PORT_DEVICE_NEW) { 4758 /* Update our state change counter */ 4759 mutex_enter(&port->fp_mutex); 4760 port->fp_last_change++; 4761 mutex_exit(&port->fp_mutex); 4762 4763 /* Additions */ 4764 fp_log_target_event(port, 4765 ESC_SUNFC_TARGET_ADD, 4766 clist->clist_map[count].map_pwwn, 4767 clist->clist_map[count].map_did. 4768 port_id); 4769 } 4770 4771 } else if ((clist->clist_map[count].map_type == 4772 PORT_DEVICE_OLD) && 4773 (clist->clist_map[count].map_state == 4774 PORT_DEVICE_INVALID)) { 4775 /* Update our state change counter */ 4776 mutex_enter(&port->fp_mutex); 4777 port->fp_last_change++; 4778 mutex_exit(&port->fp_mutex); 4779 4780 /* 4781 * For removals, we don't decrement 4782 * pd_ref_count until after the ULP's 4783 * state change callback function has 4784 * completed. 4785 */ 4786 4787 /* Removals */ 4788 fp_log_target_event(port, 4789 ESC_SUNFC_TARGET_REMOVE, 4790 clist->clist_map[count].map_pwwn, 4791 clist->clist_map[count].map_did.port_id); 4792 } 4793 4794 if (clist->clist_map[count].map_state != 4795 PORT_DEVICE_INVALID) { 4796 /* 4797 * Indicate that the ULPs are now aware of 4798 * this device. 4799 */ 4800 4801 mutex_enter(&pd->pd_mutex); 4802 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4803 mutex_exit(&pd->pd_mutex); 4804 } 4805 4806 #ifdef DEBUG 4807 /* 4808 * Sanity check for OLD devices in the hash lists 4809 */ 4810 if (pd && clist->clist_map[count].map_state == 4811 PORT_DEVICE_INVALID) { 4812 la_wwn_t pwwn; 4813 fc_portid_t d_id; 4814 4815 mutex_enter(&pd->pd_mutex); 4816 pwwn = pd->pd_port_name; 4817 d_id = pd->pd_port_id; 4818 mutex_exit(&pd->pd_mutex); 4819 4820 /* 4821 * This overwrites the 'pd' local variable. 4822 * Beware of this if 'pd' ever gets 4823 * referenced below this block. 4824 */ 4825 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4826 ASSERT(pd != clist->clist_map[count].map_pd); 4827 4828 pd = fctl_get_remote_port_by_did(port, 4829 d_id.port_id); 4830 ASSERT(pd != clist->clist_map[count].map_pd); 4831 } 4832 #endif 4833 } 4834 } 4835 4836 if (sync) { 4837 clist->clist_wait = 1; 4838 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4839 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4840 } 4841 4842 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4843 if (sync && ret) { 4844 mutex_enter(&clist->clist_mutex); 4845 while (clist->clist_wait) { 4846 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4847 } 4848 mutex_exit(&clist->clist_mutex); 4849 4850 mutex_destroy(&clist->clist_mutex); 4851 cv_destroy(&clist->clist_cv); 4852 kmem_free(clist, sizeof (*clist)); 4853 } 4854 4855 if (!ret) { 4856 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4857 "port=%p", port); 4858 kmem_free(clist->clist_map, 4859 sizeof (*(clist->clist_map)) * clist->clist_size); 4860 kmem_free(clist, sizeof (*clist)); 4861 } else { 4862 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4863 port, listlen); 4864 } 4865 4866 return (FC_SUCCESS); 4867 } 4868 4869 4870 /* 4871 * Perform PLOGI to the group of devices for ULPs 4872 */ 4873 static void 4874 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4875 { 4876 int offline; 4877 int count; 4878 int rval; 4879 uint32_t listlen; 4880 uint32_t done; 4881 uint32_t d_id; 4882 fc_remote_node_t *node; 4883 fc_remote_port_t *pd; 4884 fc_remote_port_t *tmp_pd; 4885 fc_packet_t *ulp_pkt; 4886 la_els_logi_t *els_data; 4887 ls_code_t ls_code; 4888 4889 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4890 port, job); 4891 4892 done = 0; 4893 listlen = job->job_ulp_listlen; 4894 job->job_counter = job->job_ulp_listlen; 4895 4896 mutex_enter(&port->fp_mutex); 4897 offline = (port->fp_statec_busy || 4898 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4899 mutex_exit(&port->fp_mutex); 4900 4901 for (count = 0; count < listlen; count++) { 4902 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4903 sizeof (la_els_logi_t)); 4904 4905 ulp_pkt = job->job_ulp_pkts[count]; 4906 pd = ulp_pkt->pkt_pd; 4907 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4908 4909 if (offline) { 4910 done++; 4911 4912 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4913 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4914 ulp_pkt->pkt_pd = NULL; 4915 ulp_pkt->pkt_comp(ulp_pkt); 4916 4917 job->job_ulp_pkts[count] = NULL; 4918 4919 fp_jobdone(job); 4920 continue; 4921 } 4922 4923 if (pd == NULL) { 4924 pd = fctl_get_remote_port_by_did(port, d_id); 4925 if (pd == NULL) { 4926 /* reset later */ 4927 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4928 continue; 4929 } 4930 mutex_enter(&pd->pd_mutex); 4931 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4932 mutex_exit(&pd->pd_mutex); 4933 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4934 done++; 4935 ulp_pkt->pkt_comp(ulp_pkt); 4936 job->job_ulp_pkts[count] = NULL; 4937 fp_jobdone(job); 4938 } else { 4939 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4940 mutex_exit(&pd->pd_mutex); 4941 } 4942 continue; 4943 } 4944 4945 switch (ulp_pkt->pkt_state) { 4946 case FC_PKT_ELS_IN_PROGRESS: 4947 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4948 /* FALLTHRU */ 4949 case FC_PKT_LOCAL_RJT: 4950 done++; 4951 ulp_pkt->pkt_comp(ulp_pkt); 4952 job->job_ulp_pkts[count] = NULL; 4953 fp_jobdone(job); 4954 continue; 4955 default: 4956 break; 4957 } 4958 4959 /* 4960 * Validate the pd corresponding to the d_id passed 4961 * by the ULPs 4962 */ 4963 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4964 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4965 done++; 4966 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4967 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4968 ulp_pkt->pkt_pd = NULL; 4969 ulp_pkt->pkt_comp(ulp_pkt); 4970 job->job_ulp_pkts[count] = NULL; 4971 fp_jobdone(job); 4972 continue; 4973 } 4974 4975 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4976 "port=%p, pd=%p", port, pd); 4977 4978 mutex_enter(&pd->pd_mutex); 4979 4980 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4981 done++; 4982 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4983 4984 ls_code.ls_code = LA_ELS_ACC; 4985 ls_code.mbz = 0; 4986 4987 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4988 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4989 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4990 4991 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4992 (uint8_t *)&pd->pd_csp, 4993 (uint8_t *)&els_data->common_service, 4994 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4995 4996 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 4997 (uint8_t *)&pd->pd_port_name, 4998 (uint8_t *)&els_data->nport_ww_name, 4999 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 5000 5001 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5002 (uint8_t *)&pd->pd_clsp1, 5003 (uint8_t *)&els_data->class_1, 5004 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 5005 5006 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5007 (uint8_t *)&pd->pd_clsp2, 5008 (uint8_t *)&els_data->class_2, 5009 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 5010 5011 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5012 (uint8_t *)&pd->pd_clsp3, 5013 (uint8_t *)&els_data->class_3, 5014 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 5015 5016 node = pd->pd_remote_nodep; 5017 pd->pd_login_count++; 5018 pd->pd_flags = PD_IDLE; 5019 ulp_pkt->pkt_pd = pd; 5020 mutex_exit(&pd->pd_mutex); 5021 5022 mutex_enter(&node->fd_mutex); 5023 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5024 (uint8_t *)&node->fd_node_name, 5025 (uint8_t *)(&els_data->node_ww_name), 5026 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 5027 5028 FC_SET_CMD(pd->pd_port, ulp_pkt->pkt_resp_acc, 5029 (uint8_t *)&node->fd_vv, 5030 (uint8_t *)(&els_data->vendor_version), 5031 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 5032 5033 mutex_exit(&node->fd_mutex); 5034 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 5035 } else { 5036 5037 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 5038 mutex_exit(&pd->pd_mutex); 5039 } 5040 5041 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 5042 ulp_pkt->pkt_comp(ulp_pkt); 5043 job->job_ulp_pkts[count] = NULL; 5044 fp_jobdone(job); 5045 } 5046 } 5047 5048 if (done == listlen) { 5049 fp_jobwait(job); 5050 fctl_jobdone(job); 5051 return; 5052 } 5053 5054 job->job_counter = listlen - done; 5055 5056 for (count = 0; count < listlen; count++) { 5057 int cmd_flags; 5058 5059 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 5060 continue; 5061 } 5062 5063 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 5064 5065 cmd_flags = FP_CMD_PLOGI_RETAIN; 5066 5067 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5068 ASSERT(d_id != 0); 5069 5070 pd = fctl_get_remote_port_by_did(port, d_id); 5071 5072 /* 5073 * We need to properly adjust the port device 5074 * reference counter before we assign the pd 5075 * to the ULP packets port device pointer. 5076 */ 5077 if (pd != NULL && ulp_pkt->pkt_pd == NULL) { 5078 mutex_enter(&pd->pd_mutex); 5079 pd->pd_ref_count++; 5080 mutex_exit(&pd->pd_mutex); 5081 FP_TRACE(FP_NHEAD1(3, 0), 5082 "fp_plogi_group: DID = 0x%x using new pd %p \ 5083 old pd NULL\n", d_id, pd); 5084 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL && 5085 ulp_pkt->pkt_pd != pd) { 5086 mutex_enter(&pd->pd_mutex); 5087 pd->pd_ref_count++; 5088 mutex_exit(&pd->pd_mutex); 5089 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5090 ulp_pkt->pkt_pd->pd_ref_count--; 5091 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5092 FP_TRACE(FP_NHEAD1(3, 0), 5093 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n", 5094 d_id, ulp_pkt->pkt_pd, pd); 5095 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) { 5096 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5097 ulp_pkt->pkt_pd->pd_ref_count--; 5098 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5099 FP_TRACE(FP_NHEAD1(3, 0), 5100 "fp_plogi_group: DID = 0x%x pd is NULL and \ 5101 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd); 5102 } 5103 5104 ulp_pkt->pkt_pd = pd; 5105 5106 if (pd != NULL) { 5107 mutex_enter(&pd->pd_mutex); 5108 d_id = pd->pd_port_id.port_id; 5109 pd->pd_flags = PD_ELS_IN_PROGRESS; 5110 mutex_exit(&pd->pd_mutex); 5111 } else { 5112 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5113 #ifdef DEBUG 5114 pd = fctl_get_remote_port_by_did(port, d_id); 5115 ASSERT(pd == NULL); 5116 #endif 5117 /* 5118 * In the Fabric topology, use NS to create 5119 * port device, and if that fails still try 5120 * with PLOGI - which will make yet another 5121 * attempt to create after successful PLOGI 5122 */ 5123 mutex_enter(&port->fp_mutex); 5124 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 5125 mutex_exit(&port->fp_mutex); 5126 pd = fp_create_remote_port_by_ns(port, 5127 d_id, KM_SLEEP); 5128 if (pd) { 5129 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 5130 5131 mutex_enter(&pd->pd_mutex); 5132 pd->pd_flags = PD_ELS_IN_PROGRESS; 5133 mutex_exit(&pd->pd_mutex); 5134 5135 FP_TRACE(FP_NHEAD1(3, 0), 5136 "fp_plogi_group;" 5137 " NS created PD port=%p, job=%p," 5138 " pd=%p", port, job, pd); 5139 } 5140 } else { 5141 mutex_exit(&port->fp_mutex); 5142 } 5143 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5144 FP_TRACE(FP_NHEAD1(3, 0), 5145 "fp_plogi_group;" 5146 "ulp_pkt's pd is NULL, get a pd %p", 5147 pd); 5148 mutex_enter(&pd->pd_mutex); 5149 pd->pd_ref_count++; 5150 mutex_exit(&pd->pd_mutex); 5151 } 5152 ulp_pkt->pkt_pd = pd; 5153 } 5154 5155 rval = fp_port_login(port, d_id, job, cmd_flags, 5156 KM_SLEEP, pd, ulp_pkt); 5157 5158 if (rval == FC_SUCCESS) { 5159 continue; 5160 } 5161 5162 if (rval == FC_STATEC_BUSY) { 5163 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5164 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5165 } else { 5166 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5167 } 5168 5169 if (pd) { 5170 mutex_enter(&pd->pd_mutex); 5171 pd->pd_flags = PD_IDLE; 5172 mutex_exit(&pd->pd_mutex); 5173 } 5174 5175 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5176 ASSERT(pd != NULL); 5177 5178 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5179 " PD removed; port=%p, job=%p", port, job); 5180 5181 mutex_enter(&pd->pd_mutex); 5182 pd->pd_ref_count--; 5183 node = pd->pd_remote_nodep; 5184 mutex_exit(&pd->pd_mutex); 5185 5186 ASSERT(node != NULL); 5187 5188 if (fctl_destroy_remote_port(port, pd) == 0) { 5189 fctl_destroy_remote_node(node); 5190 } 5191 ulp_pkt->pkt_pd = NULL; 5192 } 5193 ulp_pkt->pkt_comp(ulp_pkt); 5194 fp_jobdone(job); 5195 } 5196 5197 fp_jobwait(job); 5198 fctl_jobdone(job); 5199 5200 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5201 port, job); 5202 } 5203 5204 5205 /* 5206 * Name server request initialization 5207 */ 5208 static void 5209 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5210 { 5211 int rval; 5212 int count; 5213 int size; 5214 5215 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5216 5217 job->job_counter = 1; 5218 job->job_result = FC_SUCCESS; 5219 5220 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5221 KM_SLEEP, NULL, NULL); 5222 5223 if (rval != FC_SUCCESS) { 5224 mutex_enter(&port->fp_mutex); 5225 port->fp_topology = FC_TOP_NO_NS; 5226 mutex_exit(&port->fp_mutex); 5227 return; 5228 } 5229 5230 fp_jobwait(job); 5231 5232 if (job->job_result != FC_SUCCESS) { 5233 mutex_enter(&port->fp_mutex); 5234 port->fp_topology = FC_TOP_NO_NS; 5235 mutex_exit(&port->fp_mutex); 5236 return; 5237 } 5238 5239 /* 5240 * At this time, we'll do NS registration for objects in the 5241 * ns_reg_cmds (see top of this file) array. 5242 * 5243 * Each time a ULP module registers with the transport, the 5244 * appropriate fc4 bit is set fc4 types and registered with 5245 * the NS for this support. Also, ULPs and FC admin utilities 5246 * may do registration for objects like IP address, symbolic 5247 * port/node name, Initial process associator at run time. 5248 */ 5249 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5250 job->job_counter = size; 5251 job->job_result = FC_SUCCESS; 5252 5253 for (count = 0; count < size; count++) { 5254 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5255 job, 0, sleep) != FC_SUCCESS) { 5256 fp_jobdone(job); 5257 } 5258 } 5259 if (size) { 5260 fp_jobwait(job); 5261 } 5262 5263 job->job_result = FC_SUCCESS; 5264 5265 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5266 5267 if (port->fp_dev_count < FP_MAX_DEVICES) { 5268 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5269 } 5270 5271 job->job_counter = 1; 5272 5273 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5274 sleep) == FC_SUCCESS) { 5275 fp_jobwait(job); 5276 } 5277 } 5278 5279 5280 /* 5281 * Name server finish: 5282 * Unregister for RSCNs 5283 * Unregister all the host port objects in the Name Server 5284 * Perform LOGO with the NS; 5285 */ 5286 static void 5287 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5288 { 5289 fp_cmd_t *cmd; 5290 uchar_t class; 5291 uint32_t s_id; 5292 fc_packet_t *pkt; 5293 la_els_logo_t payload; 5294 5295 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5296 5297 job->job_counter = 1; 5298 5299 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5300 FC_SUCCESS) { 5301 fp_jobdone(job); 5302 } 5303 fp_jobwait(job); 5304 5305 job->job_counter = 1; 5306 5307 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5308 fp_jobdone(job); 5309 } 5310 fp_jobwait(job); 5311 5312 job->job_counter = 1; 5313 5314 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5315 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5316 pkt = &cmd->cmd_pkt; 5317 5318 mutex_enter(&port->fp_mutex); 5319 class = port->fp_ns_login_class; 5320 s_id = port->fp_port_id.port_id; 5321 payload.nport_id = port->fp_port_id; 5322 mutex_exit(&port->fp_mutex); 5323 5324 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5325 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5326 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5327 cmd->cmd_retry_count = 1; 5328 cmd->cmd_ulp_pkt = NULL; 5329 5330 if (port->fp_npiv_type == FC_NPIV_PORT) { 5331 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5332 } else { 5333 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5334 } 5335 5336 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5337 5338 payload.ls_code.ls_code = LA_ELS_LOGO; 5339 payload.ls_code.mbz = 0; 5340 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5341 5342 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 5343 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5344 5345 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5346 fp_iodone(cmd); 5347 } 5348 fp_jobwait(job); 5349 } 5350 5351 5352 /* 5353 * NS Registration function. 5354 * 5355 * It should be seriously noted that FC-GS-2 currently doesn't support 5356 * an Object Registration by a D_ID other than the owner of the object. 5357 * What we are aiming at currently is to at least allow Symbolic Node/Port 5358 * Name registration for any N_Port Identifier by the host software. 5359 * 5360 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5361 * function treats the request as Host NS Object. 5362 */ 5363 static int 5364 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5365 job_request_t *job, int polled, int sleep) 5366 { 5367 int rval; 5368 fc_portid_t s_id; 5369 fc_packet_t *pkt; 5370 fp_cmd_t *cmd; 5371 5372 if (pd == NULL) { 5373 mutex_enter(&port->fp_mutex); 5374 s_id = port->fp_port_id; 5375 mutex_exit(&port->fp_mutex); 5376 } else { 5377 mutex_enter(&pd->pd_mutex); 5378 s_id = pd->pd_port_id; 5379 mutex_exit(&pd->pd_mutex); 5380 } 5381 5382 if (polled) { 5383 job->job_counter = 1; 5384 } 5385 5386 switch (cmd_code) { 5387 case NS_RPN_ID: 5388 case NS_RNN_ID: { 5389 ns_rxn_req_t rxn; 5390 5391 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5392 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5393 if (cmd == NULL) { 5394 return (FC_NOMEM); 5395 } 5396 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5397 pkt = &cmd->cmd_pkt; 5398 5399 if (pd == NULL) { 5400 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ? 5401 (port->fp_service_params.nport_ww_name) : 5402 (port->fp_service_params.node_ww_name)); 5403 } else { 5404 if (cmd_code == NS_RPN_ID) { 5405 mutex_enter(&pd->pd_mutex); 5406 rxn.rxn_xname = pd->pd_port_name; 5407 mutex_exit(&pd->pd_mutex); 5408 } else { 5409 fc_remote_node_t *node; 5410 5411 mutex_enter(&pd->pd_mutex); 5412 node = pd->pd_remote_nodep; 5413 mutex_exit(&pd->pd_mutex); 5414 5415 mutex_enter(&node->fd_mutex); 5416 rxn.rxn_xname = node->fd_node_name; 5417 mutex_exit(&node->fd_mutex); 5418 } 5419 } 5420 rxn.rxn_port_id = s_id; 5421 5422 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5423 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5424 sizeof (rxn), DDI_DEV_AUTOINCR); 5425 5426 break; 5427 } 5428 5429 case NS_RCS_ID: { 5430 ns_rcos_t rcos; 5431 5432 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5433 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5434 if (cmd == NULL) { 5435 return (FC_NOMEM); 5436 } 5437 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5438 pkt = &cmd->cmd_pkt; 5439 5440 if (pd == NULL) { 5441 rcos.rcos_cos = port->fp_cos; 5442 } else { 5443 mutex_enter(&pd->pd_mutex); 5444 rcos.rcos_cos = pd->pd_cos; 5445 mutex_exit(&pd->pd_mutex); 5446 } 5447 rcos.rcos_port_id = s_id; 5448 5449 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5450 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5451 sizeof (rcos), DDI_DEV_AUTOINCR); 5452 5453 break; 5454 } 5455 5456 case NS_RFT_ID: { 5457 ns_rfc_type_t rfc; 5458 5459 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5460 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5461 NULL); 5462 if (cmd == NULL) { 5463 return (FC_NOMEM); 5464 } 5465 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5466 pkt = &cmd->cmd_pkt; 5467 5468 if (pd == NULL) { 5469 mutex_enter(&port->fp_mutex); 5470 bcopy(port->fp_fc4_types, rfc.rfc_types, 5471 sizeof (port->fp_fc4_types)); 5472 mutex_exit(&port->fp_mutex); 5473 } else { 5474 mutex_enter(&pd->pd_mutex); 5475 bcopy(pd->pd_fc4types, rfc.rfc_types, 5476 sizeof (pd->pd_fc4types)); 5477 mutex_exit(&pd->pd_mutex); 5478 } 5479 rfc.rfc_port_id = s_id; 5480 5481 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5482 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5483 sizeof (rfc), DDI_DEV_AUTOINCR); 5484 5485 break; 5486 } 5487 5488 case NS_RSPN_ID: { 5489 uchar_t name_len; 5490 int pl_size; 5491 fc_portid_t spn; 5492 5493 if (pd == NULL) { 5494 mutex_enter(&port->fp_mutex); 5495 name_len = port->fp_sym_port_namelen; 5496 mutex_exit(&port->fp_mutex); 5497 } else { 5498 mutex_enter(&pd->pd_mutex); 5499 name_len = pd->pd_spn_len; 5500 mutex_exit(&pd->pd_mutex); 5501 } 5502 5503 pl_size = sizeof (fc_portid_t) + name_len + 1; 5504 5505 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5506 sizeof (fc_reg_resp_t), sleep, NULL); 5507 if (cmd == NULL) { 5508 return (FC_NOMEM); 5509 } 5510 5511 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5512 5513 pkt = &cmd->cmd_pkt; 5514 5515 spn = s_id; 5516 5517 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5518 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5519 DDI_DEV_AUTOINCR); 5520 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5521 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5522 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5523 5524 if (pd == NULL) { 5525 mutex_enter(&port->fp_mutex); 5526 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5527 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5528 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5529 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5530 mutex_exit(&port->fp_mutex); 5531 } else { 5532 mutex_enter(&pd->pd_mutex); 5533 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5534 (uint8_t *)pd->pd_spn, 5535 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5536 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5537 mutex_exit(&pd->pd_mutex); 5538 } 5539 break; 5540 } 5541 5542 case NS_RPT_ID: { 5543 ns_rpt_t rpt; 5544 5545 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5546 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5547 if (cmd == NULL) { 5548 return (FC_NOMEM); 5549 } 5550 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5551 pkt = &cmd->cmd_pkt; 5552 5553 if (pd == NULL) { 5554 rpt.rpt_type = port->fp_port_type; 5555 } else { 5556 mutex_enter(&pd->pd_mutex); 5557 rpt.rpt_type = pd->pd_porttype; 5558 mutex_exit(&pd->pd_mutex); 5559 } 5560 rpt.rpt_port_id = s_id; 5561 5562 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5563 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5564 sizeof (rpt), DDI_DEV_AUTOINCR); 5565 5566 break; 5567 } 5568 5569 case NS_RIP_NN: { 5570 ns_rip_t rip; 5571 5572 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5573 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5574 if (cmd == NULL) { 5575 return (FC_NOMEM); 5576 } 5577 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5578 pkt = &cmd->cmd_pkt; 5579 5580 if (pd == NULL) { 5581 rip.rip_node_name = 5582 port->fp_service_params.node_ww_name; 5583 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5584 sizeof (port->fp_ip_addr)); 5585 } else { 5586 fc_remote_node_t *node; 5587 5588 /* 5589 * The most correct implementation should have the IP 5590 * address in the fc_remote_node_t structure; I believe 5591 * Node WWN and IP address should have one to one 5592 * correlation (but guess what this is changing in 5593 * FC-GS-2 latest draft) 5594 */ 5595 mutex_enter(&pd->pd_mutex); 5596 node = pd->pd_remote_nodep; 5597 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5598 sizeof (pd->pd_ip_addr)); 5599 mutex_exit(&pd->pd_mutex); 5600 5601 mutex_enter(&node->fd_mutex); 5602 rip.rip_node_name = node->fd_node_name; 5603 mutex_exit(&node->fd_mutex); 5604 } 5605 5606 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rip, 5607 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5608 sizeof (rip), DDI_DEV_AUTOINCR); 5609 5610 break; 5611 } 5612 5613 case NS_RIPA_NN: { 5614 ns_ipa_t ipa; 5615 5616 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5617 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5618 if (cmd == NULL) { 5619 return (FC_NOMEM); 5620 } 5621 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5622 pkt = &cmd->cmd_pkt; 5623 5624 if (pd == NULL) { 5625 ipa.ipa_node_name = 5626 port->fp_service_params.node_ww_name; 5627 bcopy(port->fp_ipa, ipa.ipa_value, 5628 sizeof (port->fp_ipa)); 5629 } else { 5630 fc_remote_node_t *node; 5631 5632 mutex_enter(&pd->pd_mutex); 5633 node = pd->pd_remote_nodep; 5634 mutex_exit(&pd->pd_mutex); 5635 5636 mutex_enter(&node->fd_mutex); 5637 ipa.ipa_node_name = node->fd_node_name; 5638 bcopy(node->fd_ipa, ipa.ipa_value, 5639 sizeof (node->fd_ipa)); 5640 mutex_exit(&node->fd_mutex); 5641 } 5642 5643 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5644 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5645 sizeof (ipa), DDI_DEV_AUTOINCR); 5646 5647 break; 5648 } 5649 5650 case NS_RSNN_NN: { 5651 uchar_t name_len; 5652 int pl_size; 5653 la_wwn_t snn; 5654 fc_remote_node_t *node = NULL; 5655 5656 if (pd == NULL) { 5657 mutex_enter(&port->fp_mutex); 5658 name_len = port->fp_sym_node_namelen; 5659 mutex_exit(&port->fp_mutex); 5660 } else { 5661 mutex_enter(&pd->pd_mutex); 5662 node = pd->pd_remote_nodep; 5663 mutex_exit(&pd->pd_mutex); 5664 5665 mutex_enter(&node->fd_mutex); 5666 name_len = node->fd_snn_len; 5667 mutex_exit(&node->fd_mutex); 5668 } 5669 5670 pl_size = sizeof (la_wwn_t) + name_len + 1; 5671 5672 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5673 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5674 if (cmd == NULL) { 5675 return (FC_NOMEM); 5676 } 5677 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5678 5679 pkt = &cmd->cmd_pkt; 5680 5681 bcopy(&port->fp_service_params.node_ww_name, 5682 &snn, sizeof (la_wwn_t)); 5683 5684 if (pd == NULL) { 5685 mutex_enter(&port->fp_mutex); 5686 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5687 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5688 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5689 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5690 mutex_exit(&port->fp_mutex); 5691 } else { 5692 ASSERT(node != NULL); 5693 mutex_enter(&node->fd_mutex); 5694 FC_SET_CMD(port, pkt->pkt_cmd_acc, 5695 (uint8_t *)node->fd_snn, 5696 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5697 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5698 mutex_exit(&node->fd_mutex); 5699 } 5700 5701 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&snn, 5702 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5703 sizeof (snn), DDI_DEV_AUTOINCR); 5704 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5705 (uint8_t *)(pkt->pkt_cmd 5706 + sizeof (fc_ct_header_t) + sizeof (snn)), 5707 1, DDI_DEV_AUTOINCR); 5708 5709 break; 5710 } 5711 5712 case NS_DA_ID: { 5713 ns_remall_t rall; 5714 char tmp[4] = {0}; 5715 char *ptr; 5716 5717 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5718 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5719 5720 if (cmd == NULL) { 5721 return (FC_NOMEM); 5722 } 5723 5724 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5725 pkt = &cmd->cmd_pkt; 5726 5727 ptr = (char *)(&s_id); 5728 tmp[3] = *ptr++; 5729 tmp[2] = *ptr++; 5730 tmp[1] = *ptr++; 5731 tmp[0] = *ptr; 5732 #if defined(_BIT_FIELDS_LTOH) 5733 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5734 #else 5735 rall.rem_port_id = s_id; 5736 #endif 5737 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&rall, 5738 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5739 sizeof (rall), DDI_DEV_AUTOINCR); 5740 5741 break; 5742 } 5743 5744 default: 5745 return (FC_FAILURE); 5746 } 5747 5748 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5749 5750 if (rval != FC_SUCCESS) { 5751 job->job_result = rval; 5752 fp_iodone(cmd); 5753 } 5754 5755 if (polled) { 5756 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5757 fp_jobwait(job); 5758 } else { 5759 rval = FC_SUCCESS; 5760 } 5761 5762 return (rval); 5763 } 5764 5765 5766 /* 5767 * Common interrupt handler 5768 */ 5769 static int 5770 fp_common_intr(fc_packet_t *pkt, int iodone) 5771 { 5772 int rval = FC_FAILURE; 5773 fp_cmd_t *cmd; 5774 fc_local_port_t *port; 5775 5776 cmd = pkt->pkt_ulp_private; 5777 port = cmd->cmd_port; 5778 5779 /* 5780 * Fail fast the upper layer requests if 5781 * a state change has occurred amidst. 5782 */ 5783 mutex_enter(&port->fp_mutex); 5784 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5785 mutex_exit(&port->fp_mutex); 5786 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5787 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5788 } else if (!(port->fp_soft_state & 5789 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5790 mutex_exit(&port->fp_mutex); 5791 5792 switch (pkt->pkt_state) { 5793 case FC_PKT_LOCAL_BSY: 5794 case FC_PKT_FABRIC_BSY: 5795 case FC_PKT_NPORT_BSY: 5796 case FC_PKT_TIMEOUT: 5797 cmd->cmd_retry_interval = (pkt->pkt_state == 5798 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5799 rval = fp_retry_cmd(pkt); 5800 break; 5801 5802 case FC_PKT_FABRIC_RJT: 5803 case FC_PKT_NPORT_RJT: 5804 case FC_PKT_LOCAL_RJT: 5805 case FC_PKT_LS_RJT: 5806 case FC_PKT_FS_RJT: 5807 case FC_PKT_BA_RJT: 5808 rval = fp_handle_reject(pkt); 5809 break; 5810 5811 default: 5812 if (pkt->pkt_resp_resid) { 5813 cmd->cmd_retry_interval = 0; 5814 rval = fp_retry_cmd(pkt); 5815 } 5816 break; 5817 } 5818 } else { 5819 mutex_exit(&port->fp_mutex); 5820 } 5821 5822 if (rval != FC_SUCCESS && iodone) { 5823 fp_iodone(cmd); 5824 rval = FC_SUCCESS; 5825 } 5826 5827 return (rval); 5828 } 5829 5830 5831 /* 5832 * Some not so long winding theory on point to point topology: 5833 * 5834 * In the ACC payload, if the D_ID is ZERO and the common service 5835 * parameters indicate N_Port, then the topology is POINT TO POINT. 5836 * 5837 * In a point to point topology with an N_Port, during Fabric Login, 5838 * the destination N_Port will check with our WWN and decide if it 5839 * needs to issue PLOGI or not. That means, FLOGI could potentially 5840 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5841 * PLOGI creates the device handles. 5842 * 5843 * Assuming that the host port WWN is greater than the other N_Port 5844 * WWN, then we become the master (be aware that this isn't the word 5845 * used in the FC standards) and initiate the PLOGI. 5846 * 5847 */ 5848 static void 5849 fp_flogi_intr(fc_packet_t *pkt) 5850 { 5851 int state; 5852 int f_port; 5853 uint32_t s_id; 5854 uint32_t d_id; 5855 fp_cmd_t *cmd; 5856 fc_local_port_t *port; 5857 la_wwn_t *swwn; 5858 la_wwn_t dwwn; 5859 la_wwn_t nwwn; 5860 fc_remote_port_t *pd; 5861 la_els_logi_t *acc; 5862 com_svc_t csp; 5863 ls_code_t resp; 5864 5865 cmd = pkt->pkt_ulp_private; 5866 port = cmd->cmd_port; 5867 5868 mutex_enter(&port->fp_mutex); 5869 port->fp_out_fpcmds--; 5870 mutex_exit(&port->fp_mutex); 5871 5872 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5873 port, pkt, pkt->pkt_state); 5874 5875 if (FP_IS_PKT_ERROR(pkt)) { 5876 (void) fp_common_intr(pkt, 1); 5877 return; 5878 } 5879 5880 /* 5881 * Currently, we don't need to swap bytes here because qlc is faking the 5882 * response for us and so endianness is getting taken care of. But we 5883 * have to fix this and generalize this at some point 5884 */ 5885 acc = (la_els_logi_t *)pkt->pkt_resp; 5886 5887 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5888 sizeof (resp), DDI_DEV_AUTOINCR); 5889 5890 ASSERT(resp.ls_code == LA_ELS_ACC); 5891 if (resp.ls_code != LA_ELS_ACC) { 5892 (void) fp_common_intr(pkt, 1); 5893 return; 5894 } 5895 5896 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&csp, 5897 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5898 5899 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5900 5901 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5902 5903 mutex_enter(&port->fp_mutex); 5904 state = FC_PORT_STATE_MASK(port->fp_state); 5905 mutex_exit(&port->fp_mutex); 5906 5907 if (f_port == 0) { 5908 if (state != FC_STATE_LOOP) { 5909 swwn = &port->fp_service_params.nport_ww_name; 5910 5911 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5912 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5913 DDI_DEV_AUTOINCR); 5914 5915 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5916 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5917 DDI_DEV_AUTOINCR); 5918 5919 mutex_enter(&port->fp_mutex); 5920 5921 port->fp_topology = FC_TOP_PT_PT; 5922 port->fp_total_devices = 1; 5923 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5924 port->fp_ptpt_master = 1; 5925 /* 5926 * Let us choose 'X' as S_ID and 'Y' 5927 * as D_ID and that'll work; hopefully 5928 * If not, it will get changed. 5929 */ 5930 s_id = port->fp_instance + FP_DEFAULT_SID; 5931 d_id = port->fp_instance + FP_DEFAULT_DID; 5932 port->fp_port_id.port_id = s_id; 5933 mutex_exit(&port->fp_mutex); 5934 5935 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr: fp %x" 5936 "pd %x", port->fp_port_id.port_id, d_id); 5937 pd = fctl_create_remote_port(port, 5938 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5939 KM_NOSLEEP); 5940 if (pd == NULL) { 5941 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5942 0, NULL, "couldn't create device" 5943 " d_id=%X", d_id); 5944 fp_iodone(cmd); 5945 return; 5946 } 5947 5948 cmd->cmd_pkt.pkt_tran_flags = 5949 pkt->pkt_tran_flags; 5950 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5951 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5952 cmd->cmd_retry_count = fp_retry_count; 5953 5954 fp_xlogi_init(port, cmd, s_id, d_id, 5955 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5956 5957 (&cmd->cmd_pkt)->pkt_pd = pd; 5958 5959 /* 5960 * We've just created this fc_remote_port_t, and 5961 * we're about to use it to send a PLOGI, so 5962 * bump the reference count right now. When 5963 * the packet is freed, the reference count will 5964 * be decremented. The ULP may also start using 5965 * it, so mark it as given away as well. 5966 */ 5967 pd->pd_ref_count++; 5968 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5969 5970 if (fp_sendcmd(port, cmd, 5971 port->fp_fca_handle) == FC_SUCCESS) { 5972 return; 5973 } 5974 } else { 5975 /* 5976 * The device handles will be created when the 5977 * unsolicited PLOGI is completed successfully 5978 */ 5979 port->fp_ptpt_master = 0; 5980 mutex_exit(&port->fp_mutex); 5981 } 5982 } 5983 pkt->pkt_state = FC_PKT_FAILURE; 5984 } else { 5985 if (f_port) { 5986 mutex_enter(&port->fp_mutex); 5987 if (state == FC_STATE_LOOP) { 5988 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5989 } else { 5990 port->fp_topology = FC_TOP_FABRIC; 5991 5992 FC_GET_RSP(port, pkt->pkt_resp_acc, 5993 (uint8_t *)&port->fp_fabric_name, 5994 (uint8_t *)&acc->node_ww_name, 5995 sizeof (la_wwn_t), 5996 DDI_DEV_AUTOINCR); 5997 } 5998 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5999 mutex_exit(&port->fp_mutex); 6000 } else { 6001 pkt->pkt_state = FC_PKT_FAILURE; 6002 } 6003 } 6004 fp_iodone(cmd); 6005 } 6006 6007 6008 /* 6009 * Handle solicited PLOGI response 6010 */ 6011 static void 6012 fp_plogi_intr(fc_packet_t *pkt) 6013 { 6014 int nl_port; 6015 int bailout; 6016 uint32_t d_id; 6017 fp_cmd_t *cmd; 6018 la_els_logi_t *acc; 6019 fc_local_port_t *port; 6020 fc_remote_port_t *pd; 6021 la_wwn_t nwwn; 6022 la_wwn_t pwwn; 6023 ls_code_t resp; 6024 6025 nl_port = 0; 6026 cmd = pkt->pkt_ulp_private; 6027 port = cmd->cmd_port; 6028 d_id = pkt->pkt_cmd_fhdr.d_id; 6029 6030 #ifndef __lock_lint 6031 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6032 #endif 6033 6034 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 6035 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 6036 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 6037 6038 /* 6039 * Bail out early on ULP initiated requests if the 6040 * state change has occurred 6041 */ 6042 mutex_enter(&port->fp_mutex); 6043 port->fp_out_fpcmds--; 6044 bailout = ((port->fp_statec_busy || 6045 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6046 cmd->cmd_ulp_pkt) ? 1 : 0; 6047 mutex_exit(&port->fp_mutex); 6048 6049 if (FP_IS_PKT_ERROR(pkt) || bailout) { 6050 int skip_msg = 0; 6051 int giveup = 0; 6052 6053 if (cmd->cmd_ulp_pkt) { 6054 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6055 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 6056 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6057 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6058 } 6059 6060 /* 6061 * If an unsolicited cross login already created 6062 * a device speed up the discovery by not retrying 6063 * the command mindlessly. 6064 */ 6065 if (pkt->pkt_pd == NULL && 6066 fctl_get_remote_port_by_did(port, d_id) != NULL) { 6067 fp_iodone(cmd); 6068 return; 6069 } 6070 6071 if (pkt->pkt_pd != NULL) { 6072 giveup = (pkt->pkt_pd->pd_recepient == 6073 PD_PLOGI_RECEPIENT) ? 1 : 0; 6074 if (giveup) { 6075 /* 6076 * This pd is marked as plogi 6077 * recipient, stop retrying 6078 */ 6079 FP_TRACE(FP_NHEAD1(3, 0), 6080 "fp_plogi_intr: stop retry as" 6081 " a cross login was accepted" 6082 " from d_id=%x, port=%p.", 6083 d_id, port); 6084 fp_iodone(cmd); 6085 return; 6086 } 6087 } 6088 6089 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6090 return; 6091 } 6092 6093 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 6094 mutex_enter(&pd->pd_mutex); 6095 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 6096 skip_msg++; 6097 } 6098 mutex_exit(&pd->pd_mutex); 6099 } 6100 6101 mutex_enter(&port->fp_mutex); 6102 if (!bailout && !(skip_msg && port->fp_statec_busy) && 6103 port->fp_statec_busy <= 1 && 6104 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 6105 mutex_exit(&port->fp_mutex); 6106 /* 6107 * In case of Login Collisions, JNI HBAs returns the 6108 * FC pkt back to the Initiator with the state set to 6109 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 6110 * QLC HBAs handles such cases in the FW and doesnot 6111 * return the LS_RJT with Logical error when 6112 * login collision happens. 6113 */ 6114 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 6115 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 6116 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6117 "PLOGI to %x failed", d_id); 6118 } 6119 FP_TRACE(FP_NHEAD2(9, 0), 6120 "PLOGI to %x failed. state=%x reason=%x.", 6121 d_id, pkt->pkt_state, pkt->pkt_reason); 6122 } else { 6123 mutex_exit(&port->fp_mutex); 6124 } 6125 6126 fp_iodone(cmd); 6127 return; 6128 } 6129 6130 acc = (la_els_logi_t *)pkt->pkt_resp; 6131 6132 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 6133 sizeof (resp), DDI_DEV_AUTOINCR); 6134 6135 ASSERT(resp.ls_code == LA_ELS_ACC); 6136 if (resp.ls_code != LA_ELS_ACC) { 6137 (void) fp_common_intr(pkt, 1); 6138 return; 6139 } 6140 6141 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6142 mutex_enter(&port->fp_mutex); 6143 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6144 mutex_exit(&port->fp_mutex); 6145 fp_iodone(cmd); 6146 return; 6147 } 6148 6149 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6150 6151 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6152 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6153 DDI_DEV_AUTOINCR); 6154 6155 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6156 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6157 DDI_DEV_AUTOINCR); 6158 6159 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6160 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6161 6162 if ((pd = pkt->pkt_pd) == NULL) { 6163 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6164 if (pd == NULL) { 6165 FP_TRACE(FP_NHEAD2(9, 0), "fp_plogi_intr: fp %x pd %x", 6166 port->fp_port_id.port_id, d_id); 6167 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6168 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6169 if (pd == NULL) { 6170 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6171 "couldn't create port device handles" 6172 " d_id=%x", d_id); 6173 fp_iodone(cmd); 6174 return; 6175 } 6176 } else { 6177 fc_remote_port_t *tmp_pd; 6178 6179 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6180 if (tmp_pd != NULL) { 6181 fp_iodone(cmd); 6182 return; 6183 } 6184 6185 mutex_enter(&port->fp_mutex); 6186 mutex_enter(&pd->pd_mutex); 6187 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6188 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6189 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6190 } 6191 6192 if (pd->pd_type == PORT_DEVICE_OLD) { 6193 if (pd->pd_port_id.port_id != d_id) { 6194 fctl_delist_did_table(port, pd); 6195 pd->pd_type = PORT_DEVICE_CHANGED; 6196 pd->pd_port_id.port_id = d_id; 6197 } else { 6198 pd->pd_type = PORT_DEVICE_NOCHANGE; 6199 } 6200 } 6201 6202 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6203 char ww_name[17]; 6204 6205 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6206 6207 mutex_exit(&pd->pd_mutex); 6208 mutex_exit(&port->fp_mutex); 6209 FP_TRACE(FP_NHEAD2(9, 0), 6210 "Possible Duplicate name or address" 6211 " identifiers in the PLOGI response" 6212 " D_ID=%x, PWWN=%s: Please check the" 6213 " configuration", d_id, ww_name); 6214 fp_iodone(cmd); 6215 return; 6216 } 6217 fctl_enlist_did_table(port, pd); 6218 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6219 mutex_exit(&pd->pd_mutex); 6220 mutex_exit(&port->fp_mutex); 6221 } 6222 } else { 6223 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6224 6225 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6226 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6227 6228 mutex_enter(&port->fp_mutex); 6229 mutex_enter(&pd->pd_mutex); 6230 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6231 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6232 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6233 pd->pd_type); 6234 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6235 pd->pd_type == PORT_DEVICE_OLD) || 6236 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6237 pd->pd_type = PORT_DEVICE_NOCHANGE; 6238 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6239 pd->pd_type = PORT_DEVICE_NEW; 6240 } 6241 } else { 6242 char old_name[17]; 6243 char new_name[17]; 6244 6245 fc_wwn_to_str(&pd->pd_port_name, old_name); 6246 fc_wwn_to_str(&pwwn, new_name); 6247 6248 FP_TRACE(FP_NHEAD1(9, 0), 6249 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6250 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6251 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6252 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6253 cmd->cmd_ulp_pkt, bailout); 6254 6255 FP_TRACE(FP_NHEAD2(9, 0), 6256 "PWWN of a device with D_ID=%x changed." 6257 " New PWWN = %s, OLD PWWN = %s", d_id, 6258 new_name, old_name); 6259 6260 if (cmd->cmd_ulp_pkt && !bailout) { 6261 fc_remote_node_t *rnodep; 6262 fc_portmap_t *changelist; 6263 fc_portmap_t *listptr; 6264 int len = 1; 6265 /* # entries in changelist */ 6266 6267 fctl_delist_pwwn_table(port, pd); 6268 6269 /* 6270 * Lets now check if there already is a pd with 6271 * this new WWN in the table. If so, we'll mark 6272 * it as invalid 6273 */ 6274 6275 if (new_wwn_pd) { 6276 /* 6277 * There is another pd with in the pwwn 6278 * table with the same WWN that we got 6279 * in the PLOGI payload. We have to get 6280 * it out of the pwwn table, update the 6281 * pd's state (fp_fillout_old_map does 6282 * this for us) and add it to the 6283 * changelist that goes up to ULPs. 6284 * 6285 * len is length of changelist and so 6286 * increment it. 6287 */ 6288 len++; 6289 6290 if (tmp_pd != pd) { 6291 /* 6292 * Odd case where pwwn and did 6293 * tables are out of sync but 6294 * we will handle that too. See 6295 * more comments below. 6296 * 6297 * One more device that ULPs 6298 * should know about and so len 6299 * gets incremented again. 6300 */ 6301 len++; 6302 } 6303 6304 listptr = changelist = kmem_zalloc(len * 6305 sizeof (*changelist), KM_SLEEP); 6306 6307 mutex_enter(&new_wwn_pd->pd_mutex); 6308 rnodep = new_wwn_pd->pd_remote_nodep; 6309 mutex_exit(&new_wwn_pd->pd_mutex); 6310 6311 /* 6312 * Hold the fd_mutex since 6313 * fctl_copy_portmap_held expects it. 6314 * Preserve lock hierarchy by grabbing 6315 * fd_mutex before pd_mutex 6316 */ 6317 if (rnodep) { 6318 mutex_enter(&rnodep->fd_mutex); 6319 } 6320 mutex_enter(&new_wwn_pd->pd_mutex); 6321 fp_fillout_old_map_held(listptr++, 6322 new_wwn_pd, 0); 6323 mutex_exit(&new_wwn_pd->pd_mutex); 6324 if (rnodep) { 6325 mutex_exit(&rnodep->fd_mutex); 6326 } 6327 6328 /* 6329 * Safety check : 6330 * Lets ensure that the pwwn and did 6331 * tables are in sync. Ideally, we 6332 * should not find that these two pd's 6333 * are different. 6334 */ 6335 if (tmp_pd != pd) { 6336 mutex_enter(&tmp_pd->pd_mutex); 6337 rnodep = 6338 tmp_pd->pd_remote_nodep; 6339 mutex_exit(&tmp_pd->pd_mutex); 6340 6341 /* As above grab fd_mutex */ 6342 if (rnodep) { 6343 mutex_enter(&rnodep-> 6344 fd_mutex); 6345 } 6346 mutex_enter(&tmp_pd->pd_mutex); 6347 6348 fp_fillout_old_map_held( 6349 listptr++, tmp_pd, 0); 6350 6351 mutex_exit(&tmp_pd->pd_mutex); 6352 if (rnodep) { 6353 mutex_exit(&rnodep-> 6354 fd_mutex); 6355 } 6356 6357 /* 6358 * Now add "pd" (not tmp_pd) 6359 * to fp_did_table to sync it up 6360 * with fp_pwwn_table 6361 * 6362 * pd->pd_mutex is already held 6363 * at this point 6364 */ 6365 fctl_enlist_did_table(port, pd); 6366 } 6367 } else { 6368 listptr = changelist = kmem_zalloc( 6369 sizeof (*changelist), KM_SLEEP); 6370 } 6371 6372 ASSERT(changelist != NULL); 6373 6374 fp_fillout_changed_map(listptr, pd, &d_id, 6375 &pwwn); 6376 fctl_enlist_pwwn_table(port, pd); 6377 6378 mutex_exit(&pd->pd_mutex); 6379 mutex_exit(&port->fp_mutex); 6380 6381 fp_iodone(cmd); 6382 6383 (void) fp_ulp_devc_cb(port, changelist, len, 6384 len, KM_NOSLEEP, 0); 6385 6386 return; 6387 } 6388 } 6389 6390 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6391 nl_port = 1; 6392 } 6393 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6394 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6395 } 6396 6397 mutex_exit(&pd->pd_mutex); 6398 mutex_exit(&port->fp_mutex); 6399 6400 if (tmp_pd == NULL) { 6401 mutex_enter(&port->fp_mutex); 6402 mutex_enter(&pd->pd_mutex); 6403 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6404 char ww_name[17]; 6405 6406 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6407 mutex_exit(&pd->pd_mutex); 6408 mutex_exit(&port->fp_mutex); 6409 FP_TRACE(FP_NHEAD2(9, 0), 6410 "Possible Duplicate name or address" 6411 " identifiers in the PLOGI response" 6412 " D_ID=%x, PWWN=%s: Please check the" 6413 " configuration", d_id, ww_name); 6414 fp_iodone(cmd); 6415 return; 6416 } 6417 fctl_enlist_did_table(port, pd); 6418 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6419 mutex_exit(&pd->pd_mutex); 6420 mutex_exit(&port->fp_mutex); 6421 } 6422 } 6423 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6424 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6425 6426 if (cmd->cmd_ulp_pkt) { 6427 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6428 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6429 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6430 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6431 if (pd != NULL) { 6432 FP_TRACE(FP_NHEAD1(9, 0), 6433 "fp_plogi_intr;" 6434 "ulp_pkt's pd is NULL, get a pd %p", 6435 pd); 6436 mutex_enter(&pd->pd_mutex); 6437 pd->pd_ref_count++; 6438 mutex_exit(&pd->pd_mutex); 6439 } 6440 cmd->cmd_ulp_pkt->pkt_pd = pd; 6441 } 6442 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6443 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6444 sizeof (fc_frame_hdr_t)); 6445 bcopy((caddr_t)pkt->pkt_resp, 6446 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6447 sizeof (la_els_logi_t)); 6448 } 6449 6450 mutex_enter(&port->fp_mutex); 6451 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6452 mutex_enter(&pd->pd_mutex); 6453 6454 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6455 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6456 cmd->cmd_retry_count = fp_retry_count; 6457 6458 /* 6459 * If the fc_remote_port_t pointer is not set in the given 6460 * fc_packet_t, then this fc_remote_port_t must have just 6461 * been created. Save the pointer and also increment the 6462 * fc_remote_port_t reference count. 6463 */ 6464 if (pkt->pkt_pd == NULL) { 6465 pkt->pkt_pd = pd; 6466 pd->pd_ref_count++; /* It's in use! */ 6467 } 6468 6469 fp_adisc_init(cmd, cmd->cmd_job); 6470 6471 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6472 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6473 6474 mutex_exit(&pd->pd_mutex); 6475 mutex_exit(&port->fp_mutex); 6476 6477 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6478 return; 6479 } 6480 } else { 6481 mutex_exit(&port->fp_mutex); 6482 } 6483 6484 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6485 mutex_enter(&port->fp_mutex); 6486 mutex_enter(&pd->pd_mutex); 6487 6488 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6489 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6490 cmd->cmd_retry_count = fp_retry_count; 6491 6492 fp_logo_init(pd, cmd, cmd->cmd_job); 6493 6494 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6495 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6496 6497 mutex_exit(&pd->pd_mutex); 6498 mutex_exit(&port->fp_mutex); 6499 6500 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6501 return; 6502 } 6503 6504 } 6505 fp_iodone(cmd); 6506 } 6507 6508 6509 /* 6510 * Handle solicited ADISC response 6511 */ 6512 static void 6513 fp_adisc_intr(fc_packet_t *pkt) 6514 { 6515 int rval; 6516 int bailout; 6517 fp_cmd_t *cmd, *logi_cmd; 6518 fc_local_port_t *port; 6519 fc_remote_port_t *pd; 6520 la_els_adisc_t *acc; 6521 ls_code_t resp; 6522 fc_hardaddr_t ha; 6523 fc_portmap_t *changelist; 6524 int initiator, adiscfail = 0; 6525 6526 pd = pkt->pkt_pd; 6527 cmd = pkt->pkt_ulp_private; 6528 port = cmd->cmd_port; 6529 6530 #ifndef __lock_lint 6531 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6532 #endif 6533 6534 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6535 6536 mutex_enter(&port->fp_mutex); 6537 port->fp_out_fpcmds--; 6538 bailout = ((port->fp_statec_busy || 6539 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6540 cmd->cmd_ulp_pkt) ? 1 : 0; 6541 mutex_exit(&port->fp_mutex); 6542 6543 if (bailout) { 6544 fp_iodone(cmd); 6545 return; 6546 } 6547 6548 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6549 acc = (la_els_adisc_t *)pkt->pkt_resp; 6550 6551 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6552 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6553 6554 if (resp.ls_code == LA_ELS_ACC) { 6555 int is_private; 6556 6557 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&ha, 6558 (uint8_t *)&acc->hard_addr, sizeof (ha), 6559 DDI_DEV_AUTOINCR); 6560 6561 mutex_enter(&port->fp_mutex); 6562 6563 is_private = 6564 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6565 6566 mutex_enter(&pd->pd_mutex); 6567 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6568 fctl_enlist_did_table(port, pd); 6569 } 6570 mutex_exit(&pd->pd_mutex); 6571 6572 mutex_exit(&port->fp_mutex); 6573 6574 mutex_enter(&pd->pd_mutex); 6575 if (pd->pd_type != PORT_DEVICE_NEW) { 6576 if (is_private && (pd->pd_hard_addr.hard_addr != 6577 ha.hard_addr)) { 6578 pd->pd_type = PORT_DEVICE_CHANGED; 6579 } else { 6580 pd->pd_type = PORT_DEVICE_NOCHANGE; 6581 } 6582 } 6583 6584 if (is_private && (ha.hard_addr && 6585 pd->pd_port_id.port_id != ha.hard_addr)) { 6586 char ww_name[17]; 6587 6588 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6589 6590 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6591 "NL_Port Identifier %x doesn't match" 6592 " with Hard Address %x, Will use Port" 6593 " WWN %s", pd->pd_port_id.port_id, 6594 ha.hard_addr, ww_name); 6595 6596 pd->pd_hard_addr.hard_addr = 0; 6597 } else { 6598 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6599 } 6600 mutex_exit(&pd->pd_mutex); 6601 } else { 6602 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6603 return; 6604 } 6605 } 6606 } else { 6607 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6608 return; 6609 } 6610 6611 mutex_enter(&port->fp_mutex); 6612 if (port->fp_statec_busy <= 1) { 6613 mutex_exit(&port->fp_mutex); 6614 if (pkt->pkt_state == FC_PKT_LS_RJT && 6615 pkt->pkt_reason == FC_REASON_CMD_UNABLE) { 6616 uchar_t class; 6617 int cmd_flag; 6618 uint32_t src_id; 6619 6620 class = fp_get_nextclass(port, 6621 FC_TRAN_CLASS_INVALID); 6622 if (class == FC_TRAN_CLASS_INVALID) { 6623 fp_iodone(cmd); 6624 return; 6625 } 6626 6627 FP_TRACE(FP_NHEAD1(1, 0), "ADISC re-login; " 6628 "fp_state=0x%x, pkt_state=0x%x, " 6629 "reason=0x%x, class=0x%x", 6630 port->fp_state, pkt->pkt_state, 6631 pkt->pkt_reason, class); 6632 cmd_flag = FP_CMD_PLOGI_RETAIN; 6633 6634 logi_cmd = fp_alloc_pkt(port, 6635 sizeof (la_els_logi_t), 6636 sizeof (la_els_logi_t), KM_SLEEP, pd); 6637 if (logi_cmd == NULL) { 6638 fp_iodone(cmd); 6639 return; 6640 } 6641 6642 logi_cmd->cmd_pkt.pkt_tran_flags = 6643 FC_TRAN_INTR | class; 6644 logi_cmd->cmd_pkt.pkt_tran_type = 6645 FC_PKT_EXCHANGE; 6646 logi_cmd->cmd_flags = cmd_flag; 6647 logi_cmd->cmd_retry_count = fp_retry_count; 6648 logi_cmd->cmd_ulp_pkt = NULL; 6649 6650 mutex_enter(&port->fp_mutex); 6651 src_id = port->fp_port_id.port_id; 6652 mutex_exit(&port->fp_mutex); 6653 6654 fp_xlogi_init(port, logi_cmd, src_id, 6655 pkt->pkt_cmd_fhdr.d_id, fp_plogi_intr, 6656 cmd->cmd_job, LA_ELS_PLOGI); 6657 if (pd) { 6658 mutex_enter(&pd->pd_mutex); 6659 pd->pd_flags = PD_ELS_IN_PROGRESS; 6660 mutex_exit(&pd->pd_mutex); 6661 } 6662 6663 if (fp_sendcmd(port, logi_cmd, 6664 port->fp_fca_handle) == FC_SUCCESS) { 6665 fp_free_pkt(cmd); 6666 return; 6667 } else { 6668 fp_free_pkt(logi_cmd); 6669 } 6670 } else { 6671 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6672 "ADISC to %x failed, cmd_flags=%x", 6673 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6674 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6675 adiscfail = 1; 6676 } 6677 } else { 6678 mutex_exit(&port->fp_mutex); 6679 } 6680 } 6681 6682 if (cmd->cmd_ulp_pkt) { 6683 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6684 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6685 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6686 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6687 cmd->cmd_ulp_pkt->pkt_pd = pd; 6688 FP_TRACE(FP_NHEAD1(9, 0), 6689 "fp_adisc__intr;" 6690 "ulp_pkt's pd is NULL, get a pd %p", 6691 pd); 6692 6693 } 6694 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6695 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6696 sizeof (fc_frame_hdr_t)); 6697 bcopy((caddr_t)pkt->pkt_resp, 6698 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6699 sizeof (la_els_adisc_t)); 6700 } 6701 6702 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6703 FP_TRACE(FP_NHEAD1(9, 0), 6704 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6705 "fp_retry_count=%x, ulp_pkt=%p", 6706 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6707 6708 mutex_enter(&port->fp_mutex); 6709 mutex_enter(&pd->pd_mutex); 6710 6711 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6712 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6713 cmd->cmd_retry_count = fp_retry_count; 6714 6715 fp_logo_init(pd, cmd, cmd->cmd_job); 6716 6717 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6718 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6719 6720 mutex_exit(&pd->pd_mutex); 6721 mutex_exit(&port->fp_mutex); 6722 6723 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6724 if (adiscfail) { 6725 mutex_enter(&pd->pd_mutex); 6726 initiator = 6727 ((pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0); 6728 pd->pd_state = PORT_DEVICE_VALID; 6729 pd->pd_aux_flags |= PD_LOGGED_OUT; 6730 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6731 pd->pd_type = PORT_DEVICE_NEW; 6732 } else { 6733 pd->pd_type = PORT_DEVICE_NOCHANGE; 6734 } 6735 mutex_exit(&pd->pd_mutex); 6736 6737 changelist = 6738 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6739 6740 if (initiator) { 6741 fp_unregister_login(pd); 6742 fctl_copy_portmap(changelist, pd); 6743 } else { 6744 fp_fillout_old_map(changelist, pd, 0); 6745 } 6746 6747 FP_TRACE(FP_NHEAD1(9, 0), 6748 "fp_adisc_intr: Dev change notification " 6749 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6750 "map_flags=%x initiator=%d", port, pd, 6751 changelist->map_type, changelist->map_state, 6752 changelist->map_flags, initiator); 6753 6754 (void) fp_ulp_devc_cb(port, changelist, 6755 1, 1, KM_SLEEP, 0); 6756 } 6757 if (rval == FC_SUCCESS) { 6758 return; 6759 } 6760 } 6761 fp_iodone(cmd); 6762 } 6763 6764 6765 /* 6766 * Handle solicited LOGO response 6767 */ 6768 static void 6769 fp_logo_intr(fc_packet_t *pkt) 6770 { 6771 ls_code_t resp; 6772 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6773 6774 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6775 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6776 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6777 6778 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6779 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6780 6781 if (FP_IS_PKT_ERROR(pkt)) { 6782 (void) fp_common_intr(pkt, 1); 6783 return; 6784 } 6785 6786 ASSERT(resp.ls_code == LA_ELS_ACC); 6787 if (resp.ls_code != LA_ELS_ACC) { 6788 (void) fp_common_intr(pkt, 1); 6789 return; 6790 } 6791 6792 if (pkt->pkt_pd != NULL) { 6793 fp_unregister_login(pkt->pkt_pd); 6794 } 6795 6796 fp_iodone(pkt->pkt_ulp_private); 6797 } 6798 6799 6800 /* 6801 * Handle solicited RNID response 6802 */ 6803 static void 6804 fp_rnid_intr(fc_packet_t *pkt) 6805 { 6806 ls_code_t resp; 6807 job_request_t *job; 6808 fp_cmd_t *cmd; 6809 la_els_rnid_acc_t *acc; 6810 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6811 6812 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6813 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6814 cmd = pkt->pkt_ulp_private; 6815 6816 mutex_enter(&cmd->cmd_port->fp_mutex); 6817 cmd->cmd_port->fp_out_fpcmds--; 6818 mutex_exit(&cmd->cmd_port->fp_mutex); 6819 6820 job = cmd->cmd_job; 6821 ASSERT(job->job_private != NULL); 6822 6823 /* If failure or LS_RJT then retry the packet, if needed */ 6824 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6825 (void) fp_common_intr(pkt, 1); 6826 return; 6827 } 6828 6829 /* Save node_id memory allocated in ioctl code */ 6830 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6831 6832 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6833 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6834 6835 /* wakeup the ioctl thread and free the pkt */ 6836 fp_iodone(cmd); 6837 } 6838 6839 6840 /* 6841 * Handle solicited RLS response 6842 */ 6843 static void 6844 fp_rls_intr(fc_packet_t *pkt) 6845 { 6846 ls_code_t resp; 6847 job_request_t *job; 6848 fp_cmd_t *cmd; 6849 la_els_rls_acc_t *acc; 6850 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 6851 6852 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp, 6853 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6854 cmd = pkt->pkt_ulp_private; 6855 6856 mutex_enter(&cmd->cmd_port->fp_mutex); 6857 cmd->cmd_port->fp_out_fpcmds--; 6858 mutex_exit(&cmd->cmd_port->fp_mutex); 6859 6860 job = cmd->cmd_job; 6861 ASSERT(job->job_private != NULL); 6862 6863 /* If failure or LS_RJT then retry the packet, if needed */ 6864 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6865 (void) fp_common_intr(pkt, 1); 6866 return; 6867 } 6868 6869 /* Save link error status block in memory allocated in ioctl code */ 6870 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6871 6872 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6873 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6874 DDI_DEV_AUTOINCR); 6875 6876 /* wakeup the ioctl thread and free the pkt */ 6877 fp_iodone(cmd); 6878 } 6879 6880 6881 /* 6882 * A solicited command completion interrupt (mostly for commands 6883 * that require almost no post processing such as SCR ELS) 6884 */ 6885 static void 6886 fp_intr(fc_packet_t *pkt) 6887 { 6888 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6889 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6890 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6891 6892 if (FP_IS_PKT_ERROR(pkt)) { 6893 (void) fp_common_intr(pkt, 1); 6894 return; 6895 } 6896 fp_iodone(pkt->pkt_ulp_private); 6897 } 6898 6899 6900 /* 6901 * Handle the underlying port's state change 6902 */ 6903 static void 6904 fp_statec_cb(opaque_t port_handle, uint32_t state) 6905 { 6906 fc_local_port_t *port = port_handle; 6907 job_request_t *job; 6908 6909 /* 6910 * If it is not possible to process the callbacks 6911 * just drop the callback on the floor; Don't bother 6912 * to do something that isn't safe at this time 6913 */ 6914 mutex_enter(&port->fp_mutex); 6915 if ((port->fp_soft_state & 6916 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6917 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6918 mutex_exit(&port->fp_mutex); 6919 return; 6920 } 6921 6922 if (port->fp_statec_busy == 0) { 6923 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6924 #ifdef DEBUG 6925 } else { 6926 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6927 #endif 6928 } 6929 6930 port->fp_statec_busy++; 6931 6932 /* 6933 * For now, force the trusted method of device authentication (by 6934 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6935 */ 6936 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6937 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6938 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6939 fp_port_offline(port, 0); 6940 } 6941 mutex_exit(&port->fp_mutex); 6942 6943 switch (FC_PORT_STATE_MASK(state)) { 6944 case FC_STATE_OFFLINE: 6945 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6946 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6947 if (job == NULL) { 6948 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6949 " fp_statec_cb() couldn't submit a job " 6950 " to the thread: failing.."); 6951 mutex_enter(&port->fp_mutex); 6952 if (--port->fp_statec_busy == 0) { 6953 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6954 } 6955 mutex_exit(&port->fp_mutex); 6956 return; 6957 } 6958 mutex_enter(&port->fp_mutex); 6959 /* 6960 * Zero out this field so that we do not retain 6961 * the fabric name as its no longer valid 6962 */ 6963 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6964 port->fp_state = state; 6965 mutex_exit(&port->fp_mutex); 6966 6967 fctl_enque_job(port, job); 6968 break; 6969 6970 case FC_STATE_ONLINE: 6971 case FC_STATE_LOOP: 6972 mutex_enter(&port->fp_mutex); 6973 port->fp_state = state; 6974 6975 if (port->fp_offline_tid) { 6976 timeout_id_t tid; 6977 6978 tid = port->fp_offline_tid; 6979 port->fp_offline_tid = NULL; 6980 mutex_exit(&port->fp_mutex); 6981 (void) untimeout(tid); 6982 } else { 6983 mutex_exit(&port->fp_mutex); 6984 } 6985 6986 job = fctl_alloc_job(JOB_PORT_ONLINE, 6987 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6988 if (job == NULL) { 6989 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6990 "fp_statec_cb() couldn't submit a job " 6991 "to the thread: failing.."); 6992 6993 mutex_enter(&port->fp_mutex); 6994 if (--port->fp_statec_busy == 0) { 6995 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6996 } 6997 mutex_exit(&port->fp_mutex); 6998 return; 6999 } 7000 fctl_enque_job(port, job); 7001 break; 7002 7003 case FC_STATE_RESET_REQUESTED: 7004 mutex_enter(&port->fp_mutex); 7005 port->fp_state = FC_STATE_OFFLINE; 7006 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 7007 mutex_exit(&port->fp_mutex); 7008 /* FALLTHROUGH */ 7009 7010 case FC_STATE_RESET: 7011 job = fctl_alloc_job(JOB_ULP_NOTIFY, 7012 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 7013 if (job == NULL) { 7014 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 7015 "fp_statec_cb() couldn't submit a job" 7016 " to the thread: failing.."); 7017 7018 mutex_enter(&port->fp_mutex); 7019 if (--port->fp_statec_busy == 0) { 7020 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 7021 } 7022 mutex_exit(&port->fp_mutex); 7023 return; 7024 } 7025 7026 /* squeeze into some field in the job structure */ 7027 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 7028 fctl_enque_job(port, job); 7029 break; 7030 7031 case FC_STATE_TARGET_PORT_RESET: 7032 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 7033 /* FALLTHROUGH */ 7034 7035 case FC_STATE_NAMESERVICE: 7036 /* FALLTHROUGH */ 7037 7038 default: 7039 mutex_enter(&port->fp_mutex); 7040 if (--port->fp_statec_busy == 0) { 7041 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 7042 } 7043 mutex_exit(&port->fp_mutex); 7044 break; 7045 } 7046 } 7047 7048 7049 /* 7050 * Register with the Name Server for RSCNs 7051 */ 7052 static int 7053 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 7054 int sleep) 7055 { 7056 uint32_t s_id; 7057 uchar_t class; 7058 fc_scr_req_t payload; 7059 fp_cmd_t *cmd; 7060 fc_packet_t *pkt; 7061 7062 mutex_enter(&port->fp_mutex); 7063 s_id = port->fp_port_id.port_id; 7064 class = port->fp_ns_login_class; 7065 mutex_exit(&port->fp_mutex); 7066 7067 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 7068 sizeof (fc_scr_resp_t), sleep, NULL); 7069 if (cmd == NULL) { 7070 return (FC_NOMEM); 7071 } 7072 7073 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 7074 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 7075 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 7076 cmd->cmd_retry_count = fp_retry_count; 7077 cmd->cmd_ulp_pkt = NULL; 7078 7079 pkt = &cmd->cmd_pkt; 7080 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 7081 7082 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 7083 7084 payload.ls_code.ls_code = LA_ELS_SCR; 7085 payload.ls_code.mbz = 0; 7086 payload.scr_rsvd = 0; 7087 payload.scr_func = scr_func; 7088 7089 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 7090 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 7091 7092 job->job_counter = 1; 7093 7094 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 7095 fp_iodone(cmd); 7096 } 7097 7098 return (FC_SUCCESS); 7099 } 7100 7101 7102 /* 7103 * There are basically two methods to determine the total number of 7104 * devices out in the NS database; Reading the details of the two 7105 * methods described below, it shouldn't be hard to identify which 7106 * of the two methods is better. 7107 * 7108 * Method 1. 7109 * Iteratively issue GANs until all ports identifiers are walked 7110 * 7111 * Method 2. 7112 * Issue GID_PT (get port Identifiers) with Maximum residual 7113 * field in the request CT HEADER set to accommodate only the 7114 * CT HEADER in the response frame. And if FC-GS2 has been 7115 * carefully read, the NS here has a chance to FS_ACC the 7116 * request and indicate the residual size in the FS_ACC. 7117 * 7118 * Method 2 is wonderful, although it's not mandatory for the NS 7119 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 7120 * (note with particular care the use of the auxiliary verb 'may') 7121 * 7122 */ 7123 static int 7124 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 7125 int sleep) 7126 { 7127 int flags; 7128 int rval; 7129 uint32_t src_id; 7130 fctl_ns_req_t *ns_cmd; 7131 7132 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 7133 7134 mutex_enter(&port->fp_mutex); 7135 src_id = port->fp_port_id.port_id; 7136 mutex_exit(&port->fp_mutex); 7137 7138 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7139 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 7140 sizeof (ns_resp_gid_pt_t), 0, 7141 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 7142 7143 if (ns_cmd == NULL) { 7144 return (FC_NOMEM); 7145 } 7146 7147 ns_cmd->ns_cmd_code = NS_GID_PT; 7148 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 7149 = FC_NS_PORT_NX; /* All port types */ 7150 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 7151 7152 } else { 7153 uint32_t ns_flags; 7154 7155 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 7156 if (create) { 7157 ns_flags |= FCTL_NS_CREATE_DEVICE; 7158 } 7159 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 7160 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 7161 7162 if (ns_cmd == NULL) { 7163 return (FC_NOMEM); 7164 } 7165 ns_cmd->ns_gan_index = 0; 7166 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 7167 ns_cmd->ns_cmd_code = NS_GA_NXT; 7168 ns_cmd->ns_gan_max = 0xFFFF; 7169 7170 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 7171 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 7172 } 7173 7174 flags = job->job_flags; 7175 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 7176 job->job_counter = 1; 7177 7178 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 7179 job->job_flags = flags; 7180 7181 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7182 uint16_t max_resid; 7183 7184 /* 7185 * Revert to scanning the NS if NS_GID_PT isn't 7186 * helping us figure out total number of devices. 7187 */ 7188 if (job->job_result != FC_SUCCESS || 7189 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 7190 mutex_enter(&port->fp_mutex); 7191 port->fp_options &= ~FP_NS_SMART_COUNT; 7192 mutex_exit(&port->fp_mutex); 7193 7194 fctl_free_ns_cmd(ns_cmd); 7195 return (fp_ns_get_devcount(port, job, create, sleep)); 7196 } 7197 7198 mutex_enter(&port->fp_mutex); 7199 port->fp_total_devices = 1; 7200 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 7201 if (max_resid) { 7202 /* 7203 * Since port identifier is 4 bytes and max_resid 7204 * is also in WORDS, max_resid simply indicates 7205 * the total number of port identifiers not 7206 * transferred 7207 */ 7208 port->fp_total_devices += max_resid; 7209 } 7210 mutex_exit(&port->fp_mutex); 7211 } 7212 mutex_enter(&port->fp_mutex); 7213 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 7214 mutex_exit(&port->fp_mutex); 7215 fctl_free_ns_cmd(ns_cmd); 7216 7217 return (rval); 7218 } 7219 7220 /* 7221 * One heck of a function to serve userland. 7222 */ 7223 static int 7224 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7225 { 7226 int rval = 0; 7227 int jcode; 7228 uint32_t ret; 7229 uchar_t open_flag; 7230 fcio_t *kfcio; 7231 job_request_t *job; 7232 boolean_t use32 = B_FALSE; 7233 7234 #ifdef _MULTI_DATAMODEL 7235 switch (ddi_model_convert_from(mode & FMODELS)) { 7236 case DDI_MODEL_ILP32: 7237 use32 = B_TRUE; 7238 break; 7239 7240 case DDI_MODEL_NONE: 7241 default: 7242 break; 7243 } 7244 #endif 7245 7246 mutex_enter(&port->fp_mutex); 7247 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7248 FP_SOFT_IN_UNSOL_CB)) { 7249 fcio->fcio_errno = FC_STATEC_BUSY; 7250 mutex_exit(&port->fp_mutex); 7251 rval = EAGAIN; 7252 if (fp_fcio_copyout(fcio, data, mode)) { 7253 rval = EFAULT; 7254 } 7255 return (rval); 7256 } 7257 open_flag = port->fp_flag; 7258 mutex_exit(&port->fp_mutex); 7259 7260 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7261 fcio->fcio_errno = FC_FAILURE; 7262 rval = EACCES; 7263 if (fp_fcio_copyout(fcio, data, mode)) { 7264 rval = EFAULT; 7265 } 7266 return (rval); 7267 } 7268 7269 /* 7270 * If an exclusive open was demanded during open, don't let 7271 * either innocuous or devil threads to share the file 7272 * descriptor and fire down exclusive access commands 7273 */ 7274 mutex_enter(&port->fp_mutex); 7275 if (port->fp_flag & FP_EXCL) { 7276 if (port->fp_flag & FP_EXCL_BUSY) { 7277 mutex_exit(&port->fp_mutex); 7278 fcio->fcio_errno = FC_FAILURE; 7279 return (EBUSY); 7280 } 7281 port->fp_flag |= FP_EXCL_BUSY; 7282 } 7283 mutex_exit(&port->fp_mutex); 7284 7285 fcio->fcio_errno = FC_SUCCESS; 7286 7287 switch (fcio->fcio_cmd) { 7288 case FCIO_GET_HOST_PARAMS: { 7289 fc_port_dev_t *val; 7290 fc_port_dev32_t *val32; 7291 int index; 7292 int lilp_device_count; 7293 fc_lilpmap_t *lilp_map; 7294 uchar_t *alpa_list; 7295 7296 if (use32 == B_TRUE) { 7297 if (fcio->fcio_olen != sizeof (*val32) || 7298 fcio->fcio_xfer != FCIO_XFER_READ) { 7299 rval = EINVAL; 7300 break; 7301 } 7302 } else { 7303 if (fcio->fcio_olen != sizeof (*val) || 7304 fcio->fcio_xfer != FCIO_XFER_READ) { 7305 rval = EINVAL; 7306 break; 7307 } 7308 } 7309 7310 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7311 7312 mutex_enter(&port->fp_mutex); 7313 val->dev_did = port->fp_port_id; 7314 val->dev_hard_addr = port->fp_hard_addr; 7315 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7316 val->dev_nwwn = port->fp_service_params.node_ww_name; 7317 val->dev_state = port->fp_state; 7318 7319 lilp_map = &port->fp_lilp_map; 7320 alpa_list = &lilp_map->lilp_alpalist[0]; 7321 lilp_device_count = lilp_map->lilp_length; 7322 for (index = 0; index < lilp_device_count; index++) { 7323 uint32_t d_id; 7324 7325 d_id = alpa_list[index]; 7326 if (d_id == port->fp_port_id.port_id) { 7327 break; 7328 } 7329 } 7330 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7331 7332 bcopy(port->fp_fc4_types, val->dev_type, 7333 sizeof (port->fp_fc4_types)); 7334 mutex_exit(&port->fp_mutex); 7335 7336 if (use32 == B_TRUE) { 7337 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7338 7339 val32->dev_did = val->dev_did; 7340 val32->dev_hard_addr = val->dev_hard_addr; 7341 val32->dev_pwwn = val->dev_pwwn; 7342 val32->dev_nwwn = val->dev_nwwn; 7343 val32->dev_state = val->dev_state; 7344 val32->dev_did.priv_lilp_posit = 7345 val->dev_did.priv_lilp_posit; 7346 7347 bcopy(val->dev_type, val32->dev_type, 7348 sizeof (port->fp_fc4_types)); 7349 7350 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7351 fcio->fcio_olen, mode) == 0) { 7352 if (fp_fcio_copyout(fcio, data, mode)) { 7353 rval = EFAULT; 7354 } 7355 } else { 7356 rval = EFAULT; 7357 } 7358 7359 kmem_free(val32, sizeof (*val32)); 7360 } else { 7361 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7362 fcio->fcio_olen, mode) == 0) { 7363 if (fp_fcio_copyout(fcio, data, mode)) { 7364 rval = EFAULT; 7365 } 7366 } else { 7367 rval = EFAULT; 7368 } 7369 } 7370 7371 /* need to free "val" here */ 7372 kmem_free(val, sizeof (*val)); 7373 break; 7374 } 7375 7376 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7377 uint32_t index; 7378 char *tmpPath; 7379 fc_local_port_t *tmpPort; 7380 7381 if (fcio->fcio_olen < MAXPATHLEN || 7382 fcio->fcio_ilen != sizeof (uint32_t)) { 7383 rval = EINVAL; 7384 break; 7385 } 7386 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7387 rval = EFAULT; 7388 break; 7389 } 7390 7391 tmpPort = fctl_get_adapter_port_by_index(port, index); 7392 if (tmpPort == NULL) { 7393 FP_TRACE(FP_NHEAD1(9, 0), 7394 "User supplied index out of range"); 7395 fcio->fcio_errno = FC_BADPORT; 7396 rval = EFAULT; 7397 if (fp_fcio_copyout(fcio, data, mode)) { 7398 rval = EFAULT; 7399 } 7400 break; 7401 } 7402 7403 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7404 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7405 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7406 MAXPATHLEN, mode) == 0) { 7407 if (fp_fcio_copyout(fcio, data, mode)) { 7408 rval = EFAULT; 7409 } 7410 } else { 7411 rval = EFAULT; 7412 } 7413 kmem_free(tmpPath, MAXPATHLEN); 7414 break; 7415 } 7416 7417 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7418 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7419 fc_hba_adapter_attributes_t *val; 7420 fc_hba_adapter_attributes32_t *val32; 7421 7422 if (use32 == B_TRUE) { 7423 if (fcio->fcio_olen < sizeof (*val32) || 7424 fcio->fcio_xfer != FCIO_XFER_READ) { 7425 rval = EINVAL; 7426 break; 7427 } 7428 } else { 7429 if (fcio->fcio_olen < sizeof (*val) || 7430 fcio->fcio_xfer != FCIO_XFER_READ) { 7431 rval = EINVAL; 7432 break; 7433 } 7434 } 7435 7436 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7437 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7438 mutex_enter(&port->fp_mutex); 7439 bcopy(port->fp_hba_port_attrs.manufacturer, 7440 val->Manufacturer, 7441 sizeof (val->Manufacturer)); 7442 bcopy(port->fp_hba_port_attrs.serial_number, 7443 val->SerialNumber, 7444 sizeof (val->SerialNumber)); 7445 bcopy(port->fp_hba_port_attrs.model, 7446 val->Model, 7447 sizeof (val->Model)); 7448 bcopy(port->fp_hba_port_attrs.model_description, 7449 val->ModelDescription, 7450 sizeof (val->ModelDescription)); 7451 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7452 port->fp_sym_node_namelen); 7453 bcopy(port->fp_hba_port_attrs.hardware_version, 7454 val->HardwareVersion, 7455 sizeof (val->HardwareVersion)); 7456 bcopy(port->fp_hba_port_attrs.option_rom_version, 7457 val->OptionROMVersion, 7458 sizeof (val->OptionROMVersion)); 7459 bcopy(port->fp_hba_port_attrs.firmware_version, 7460 val->FirmwareVersion, 7461 sizeof (val->FirmwareVersion)); 7462 val->VendorSpecificID = 7463 port->fp_hba_port_attrs.vendor_specific_id; 7464 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7465 &val->NodeWWN.raw_wwn, 7466 sizeof (val->NodeWWN.raw_wwn)); 7467 7468 7469 bcopy(port->fp_hba_port_attrs.driver_name, 7470 val->DriverName, 7471 sizeof (val->DriverName)); 7472 bcopy(port->fp_hba_port_attrs.driver_version, 7473 val->DriverVersion, 7474 sizeof (val->DriverVersion)); 7475 mutex_exit(&port->fp_mutex); 7476 7477 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7478 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7479 } else { 7480 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7481 } 7482 7483 if (use32 == B_TRUE) { 7484 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7485 val32->version = val->version; 7486 bcopy(val->Manufacturer, val32->Manufacturer, 7487 sizeof (val->Manufacturer)); 7488 bcopy(val->SerialNumber, val32->SerialNumber, 7489 sizeof (val->SerialNumber)); 7490 bcopy(val->Model, val32->Model, 7491 sizeof (val->Model)); 7492 bcopy(val->ModelDescription, val32->ModelDescription, 7493 sizeof (val->ModelDescription)); 7494 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7495 sizeof (val->NodeSymbolicName)); 7496 bcopy(val->HardwareVersion, val32->HardwareVersion, 7497 sizeof (val->HardwareVersion)); 7498 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7499 sizeof (val->OptionROMVersion)); 7500 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7501 sizeof (val->FirmwareVersion)); 7502 val32->VendorSpecificID = val->VendorSpecificID; 7503 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7504 sizeof (val->NodeWWN.raw_wwn)); 7505 bcopy(val->DriverName, val32->DriverName, 7506 sizeof (val->DriverName)); 7507 bcopy(val->DriverVersion, val32->DriverVersion, 7508 sizeof (val->DriverVersion)); 7509 7510 val32->NumberOfPorts = val->NumberOfPorts; 7511 7512 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7513 fcio->fcio_olen, mode) == 0) { 7514 if (fp_fcio_copyout(fcio, data, mode)) { 7515 rval = EFAULT; 7516 } 7517 } else { 7518 rval = EFAULT; 7519 } 7520 7521 kmem_free(val32, sizeof (*val32)); 7522 } else { 7523 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7524 fcio->fcio_olen, mode) == 0) { 7525 if (fp_fcio_copyout(fcio, data, mode)) { 7526 rval = EFAULT; 7527 } 7528 } else { 7529 rval = EFAULT; 7530 } 7531 } 7532 7533 kmem_free(val, sizeof (*val)); 7534 break; 7535 } 7536 7537 case FCIO_GET_NPIV_ATTRIBUTES: { 7538 fc_hba_npiv_attributes_t *attrs; 7539 7540 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7541 mutex_enter(&port->fp_mutex); 7542 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7543 &attrs->NodeWWN.raw_wwn, 7544 sizeof (attrs->NodeWWN.raw_wwn)); 7545 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7546 &attrs->PortWWN.raw_wwn, 7547 sizeof (attrs->PortWWN.raw_wwn)); 7548 mutex_exit(&port->fp_mutex); 7549 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7550 fcio->fcio_olen, mode) == 0) { 7551 if (fp_fcio_copyout(fcio, data, mode)) { 7552 rval = EFAULT; 7553 } 7554 } else { 7555 rval = EFAULT; 7556 } 7557 kmem_free(attrs, sizeof (*attrs)); 7558 break; 7559 } 7560 7561 case FCIO_DELETE_NPIV_PORT: { 7562 fc_local_port_t *tmpport; 7563 char ww_pname[17]; 7564 la_wwn_t vwwn[1]; 7565 7566 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7567 if (ddi_copyin(fcio->fcio_ibuf, 7568 &vwwn, sizeof (la_wwn_t), mode)) { 7569 rval = EFAULT; 7570 break; 7571 } 7572 7573 fc_wwn_to_str(&vwwn[0], ww_pname); 7574 FP_TRACE(FP_NHEAD1(3, 0), 7575 "Delete NPIV Port %s", ww_pname); 7576 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7577 if (tmpport == NULL) { 7578 FP_TRACE(FP_NHEAD1(3, 0), 7579 "Delete NPIV Port : no found"); 7580 rval = EFAULT; 7581 } else { 7582 fc_local_port_t *nextport = tmpport->fp_port_next; 7583 fc_local_port_t *prevport = tmpport->fp_port_prev; 7584 int portlen, portindex, ret; 7585 7586 portlen = sizeof (portindex); 7587 ret = ddi_prop_op(DDI_DEV_T_ANY, 7588 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7589 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7590 (caddr_t)&portindex, &portlen); 7591 if (ret != DDI_SUCCESS) { 7592 rval = EFAULT; 7593 break; 7594 } 7595 if (ndi_devi_offline(tmpport->fp_port_dip, 7596 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7597 FP_TRACE(FP_NHEAD1(1, 0), 7598 "Delete NPIV Port failed"); 7599 mutex_enter(&port->fp_mutex); 7600 tmpport->fp_npiv_state = 0; 7601 mutex_exit(&port->fp_mutex); 7602 rval = EFAULT; 7603 } else { 7604 mutex_enter(&port->fp_mutex); 7605 nextport->fp_port_prev = prevport; 7606 prevport->fp_port_next = nextport; 7607 if (port == port->fp_port_next) { 7608 port->fp_port_next = 7609 port->fp_port_prev = NULL; 7610 } 7611 port->fp_npiv_portnum--; 7612 FP_TRACE(FP_NHEAD1(3, 0), 7613 "Delete NPIV Port %d", portindex); 7614 port->fp_npiv_portindex[portindex-1] = 0; 7615 mutex_exit(&port->fp_mutex); 7616 } 7617 } 7618 break; 7619 } 7620 7621 case FCIO_CREATE_NPIV_PORT: { 7622 char ww_nname[17], ww_pname[17]; 7623 la_npiv_create_entry_t entrybuf; 7624 uint32_t vportindex = 0; 7625 int npiv_ret = 0; 7626 char *portname, *fcaname; 7627 7628 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7629 (void) ddi_pathname(port->fp_port_dip, portname); 7630 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7631 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7632 FP_TRACE(FP_NHEAD1(1, 0), 7633 "Create NPIV port %s %s %s", portname, fcaname, 7634 ddi_driver_name(port->fp_fca_dip)); 7635 kmem_free(portname, MAXPATHLEN); 7636 kmem_free(fcaname, MAXPATHLEN); 7637 if (ddi_copyin(fcio->fcio_ibuf, 7638 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7639 rval = EFAULT; 7640 break; 7641 } 7642 7643 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7644 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7645 vportindex = entrybuf.vindex; 7646 FP_TRACE(FP_NHEAD1(3, 0), 7647 "Create NPIV Port %s %s %d", 7648 ww_nname, ww_pname, vportindex); 7649 7650 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7651 rval = EFAULT; 7652 break; 7653 } 7654 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7655 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7656 if (npiv_ret == NDI_SUCCESS) { 7657 mutex_enter(&port->fp_mutex); 7658 port->fp_npiv_portnum++; 7659 mutex_exit(&port->fp_mutex); 7660 if (fp_copyout((void *)&vportindex, 7661 (void *)fcio->fcio_obuf, 7662 fcio->fcio_olen, mode) == 0) { 7663 if (fp_fcio_copyout(fcio, data, mode)) { 7664 rval = EFAULT; 7665 } 7666 } else { 7667 rval = EFAULT; 7668 } 7669 } else { 7670 rval = EFAULT; 7671 } 7672 FP_TRACE(FP_NHEAD1(3, 0), 7673 "Create NPIV Port %d %d", npiv_ret, vportindex); 7674 break; 7675 } 7676 7677 case FCIO_GET_NPIV_PORT_LIST: { 7678 fc_hba_npiv_port_list_t *list; 7679 int count; 7680 7681 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7682 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7683 rval = EINVAL; 7684 break; 7685 } 7686 7687 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7688 list->version = FC_HBA_LIST_VERSION; 7689 7690 count = (fcio->fcio_olen - 7691 (int)sizeof (fc_hba_npiv_port_list_t))/MAXPATHLEN + 1; 7692 if (port->fp_npiv_portnum > count) { 7693 list->numAdapters = port->fp_npiv_portnum; 7694 } else { 7695 /* build npiv port list */ 7696 count = fc_ulp_get_npiv_port_list(port, 7697 (char *)list->hbaPaths); 7698 if (count < 0) { 7699 rval = ENXIO; 7700 FP_TRACE(FP_NHEAD1(1, 0), 7701 "Build NPIV Port List error"); 7702 kmem_free(list, fcio->fcio_olen); 7703 break; 7704 } 7705 list->numAdapters = count; 7706 } 7707 7708 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7709 fcio->fcio_olen, mode) == 0) { 7710 if (fp_fcio_copyout(fcio, data, mode)) { 7711 FP_TRACE(FP_NHEAD1(1, 0), 7712 "Copy NPIV Port data error"); 7713 rval = EFAULT; 7714 } 7715 } else { 7716 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7717 rval = EFAULT; 7718 } 7719 kmem_free(list, fcio->fcio_olen); 7720 break; 7721 } 7722 7723 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7724 fc_hba_port_npiv_attributes_t *val; 7725 7726 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7727 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7728 7729 mutex_enter(&port->fp_mutex); 7730 val->npivflag = port->fp_npiv_flag; 7731 val->lastChange = port->fp_last_change; 7732 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7733 &val->PortWWN.raw_wwn, 7734 sizeof (val->PortWWN.raw_wwn)); 7735 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7736 &val->NodeWWN.raw_wwn, 7737 sizeof (val->NodeWWN.raw_wwn)); 7738 mutex_exit(&port->fp_mutex); 7739 7740 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7741 if (port->fp_npiv_type != FC_NPIV_PORT) { 7742 val->MaxNumberOfNPIVPorts = 7743 port->fp_fca_tran->fca_num_npivports; 7744 } else { 7745 val->MaxNumberOfNPIVPorts = 0; 7746 } 7747 7748 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7749 fcio->fcio_olen, mode) == 0) { 7750 if (fp_fcio_copyout(fcio, data, mode)) { 7751 rval = EFAULT; 7752 } 7753 } else { 7754 rval = EFAULT; 7755 } 7756 kmem_free(val, sizeof (*val)); 7757 break; 7758 } 7759 7760 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7761 fc_hba_port_attributes_t *val; 7762 fc_hba_port_attributes32_t *val32; 7763 7764 if (use32 == B_TRUE) { 7765 if (fcio->fcio_olen < sizeof (*val32) || 7766 fcio->fcio_xfer != FCIO_XFER_READ) { 7767 rval = EINVAL; 7768 break; 7769 } 7770 } else { 7771 if (fcio->fcio_olen < sizeof (*val) || 7772 fcio->fcio_xfer != FCIO_XFER_READ) { 7773 rval = EINVAL; 7774 break; 7775 } 7776 } 7777 7778 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7779 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7780 mutex_enter(&port->fp_mutex); 7781 val->lastChange = port->fp_last_change; 7782 val->fp_minor = port->fp_instance; 7783 7784 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7785 &val->PortWWN.raw_wwn, 7786 sizeof (val->PortWWN.raw_wwn)); 7787 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7788 &val->NodeWWN.raw_wwn, 7789 sizeof (val->NodeWWN.raw_wwn)); 7790 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7791 sizeof (val->FabricName.raw_wwn)); 7792 7793 val->PortFcId = port->fp_port_id.port_id; 7794 7795 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7796 case FC_STATE_OFFLINE: 7797 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7798 break; 7799 case FC_STATE_ONLINE: 7800 case FC_STATE_LOOP: 7801 case FC_STATE_NAMESERVICE: 7802 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7803 break; 7804 default: 7805 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7806 break; 7807 } 7808 7809 /* Translate from LV to FC-HBA port type codes */ 7810 switch (port->fp_port_type.port_type) { 7811 case FC_NS_PORT_N: 7812 val->PortType = FC_HBA_PORTTYPE_NPORT; 7813 break; 7814 case FC_NS_PORT_NL: 7815 /* Actually means loop for us */ 7816 val->PortType = FC_HBA_PORTTYPE_LPORT; 7817 break; 7818 case FC_NS_PORT_F: 7819 val->PortType = FC_HBA_PORTTYPE_FPORT; 7820 break; 7821 case FC_NS_PORT_FL: 7822 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7823 break; 7824 case FC_NS_PORT_E: 7825 val->PortType = FC_HBA_PORTTYPE_EPORT; 7826 break; 7827 default: 7828 val->PortType = FC_HBA_PORTTYPE_OTHER; 7829 break; 7830 } 7831 7832 7833 /* 7834 * If fp has decided that the topology is public loop, 7835 * we will indicate that using the appropriate 7836 * FC HBA API constant. 7837 */ 7838 switch (port->fp_topology) { 7839 case FC_TOP_PUBLIC_LOOP: 7840 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7841 break; 7842 7843 case FC_TOP_PT_PT: 7844 val->PortType = FC_HBA_PORTTYPE_PTP; 7845 break; 7846 7847 case FC_TOP_UNKNOWN: 7848 /* 7849 * This should cover the case where nothing is connected 7850 * to the port. Crystal+ is p'bly an exception here. 7851 * For Crystal+, port 0 will come up as private loop 7852 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7853 * nothing is connected to it. 7854 * Current plan is to let userland handle this. 7855 */ 7856 if (port->fp_bind_state == FC_STATE_OFFLINE) { 7857 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7858 } 7859 break; 7860 7861 default: 7862 /* 7863 * Do Nothing. 7864 * Unused: 7865 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7866 */ 7867 break; 7868 } 7869 7870 val->PortSupportedClassofService = 7871 port->fp_hba_port_attrs.supported_cos; 7872 val->PortSupportedFc4Types[0] = 0; 7873 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7874 sizeof (val->PortActiveFc4Types)); 7875 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7876 port->fp_sym_port_namelen); 7877 val->PortSupportedSpeed = 7878 port->fp_hba_port_attrs.supported_speed; 7879 7880 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7881 case FC_STATE_1GBIT_SPEED: 7882 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7883 break; 7884 case FC_STATE_2GBIT_SPEED: 7885 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7886 break; 7887 case FC_STATE_4GBIT_SPEED: 7888 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7889 break; 7890 case FC_STATE_8GBIT_SPEED: 7891 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7892 break; 7893 case FC_STATE_10GBIT_SPEED: 7894 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7895 break; 7896 case FC_STATE_16GBIT_SPEED: 7897 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7898 break; 7899 default: 7900 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7901 break; 7902 } 7903 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7904 val->NumberofDiscoveredPorts = port->fp_dev_count; 7905 mutex_exit(&port->fp_mutex); 7906 7907 if (use32 == B_TRUE) { 7908 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7909 val32->version = val->version; 7910 val32->lastChange = val->lastChange; 7911 val32->fp_minor = val->fp_minor; 7912 7913 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7914 sizeof (val->PortWWN.raw_wwn)); 7915 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7916 sizeof (val->NodeWWN.raw_wwn)); 7917 val32->PortFcId = val->PortFcId; 7918 val32->PortState = val->PortState; 7919 val32->PortType = val->PortType; 7920 7921 val32->PortSupportedClassofService = 7922 val->PortSupportedClassofService; 7923 bcopy(val->PortActiveFc4Types, 7924 val32->PortActiveFc4Types, 7925 sizeof (val->PortActiveFc4Types)); 7926 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7927 sizeof (val->PortSymbolicName)); 7928 bcopy(&val->FabricName, &val32->FabricName, 7929 sizeof (val->FabricName.raw_wwn)); 7930 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7931 val32->PortSpeed = val->PortSpeed; 7932 7933 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7934 val32->NumberofDiscoveredPorts = 7935 val->NumberofDiscoveredPorts; 7936 7937 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7938 fcio->fcio_olen, mode) == 0) { 7939 if (fp_fcio_copyout(fcio, data, mode)) { 7940 rval = EFAULT; 7941 } 7942 } else { 7943 rval = EFAULT; 7944 } 7945 7946 kmem_free(val32, sizeof (*val32)); 7947 } else { 7948 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7949 fcio->fcio_olen, mode) == 0) { 7950 if (fp_fcio_copyout(fcio, data, mode)) { 7951 rval = EFAULT; 7952 } 7953 } else { 7954 rval = EFAULT; 7955 } 7956 } 7957 7958 kmem_free(val, sizeof (*val)); 7959 break; 7960 } 7961 7962 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7963 fc_hba_port_attributes_t *val; 7964 fc_hba_port_attributes32_t *val32; 7965 uint32_t index = 0; 7966 fc_remote_port_t *tmp_pd; 7967 7968 if (use32 == B_TRUE) { 7969 if (fcio->fcio_olen < sizeof (*val32) || 7970 fcio->fcio_xfer != FCIO_XFER_READ) { 7971 rval = EINVAL; 7972 break; 7973 } 7974 } else { 7975 if (fcio->fcio_olen < sizeof (*val) || 7976 fcio->fcio_xfer != FCIO_XFER_READ) { 7977 rval = EINVAL; 7978 break; 7979 } 7980 } 7981 7982 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7983 rval = EFAULT; 7984 break; 7985 } 7986 7987 if (index >= port->fp_dev_count) { 7988 FP_TRACE(FP_NHEAD1(9, 0), 7989 "User supplied index out of range"); 7990 fcio->fcio_errno = FC_OUTOFBOUNDS; 7991 rval = EINVAL; 7992 if (fp_fcio_copyout(fcio, data, mode)) { 7993 rval = EFAULT; 7994 } 7995 break; 7996 } 7997 7998 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7999 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 8000 8001 mutex_enter(&port->fp_mutex); 8002 tmp_pd = fctl_lookup_pd_by_index(port, index); 8003 8004 if (tmp_pd == NULL) { 8005 fcio->fcio_errno = FC_BADPORT; 8006 rval = EINVAL; 8007 } else { 8008 val->lastChange = port->fp_last_change; 8009 val->fp_minor = port->fp_instance; 8010 8011 mutex_enter(&tmp_pd->pd_mutex); 8012 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8013 &val->PortWWN.raw_wwn, 8014 sizeof (val->PortWWN.raw_wwn)); 8015 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8016 &val->NodeWWN.raw_wwn, 8017 sizeof (val->NodeWWN.raw_wwn)); 8018 val->PortFcId = tmp_pd->pd_port_id.port_id; 8019 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8020 tmp_pd->pd_spn_len); 8021 val->PortSupportedClassofService = tmp_pd->pd_cos; 8022 /* 8023 * we will assume the sizeof these pd_fc4types and 8024 * portActiveFc4Types will remain the same. we could 8025 * add in a check for it, but we decided it was unneeded 8026 */ 8027 bcopy((caddr_t)tmp_pd->pd_fc4types, 8028 val->PortActiveFc4Types, 8029 sizeof (tmp_pd->pd_fc4types)); 8030 val->PortState = 8031 fp_map_remote_port_state(tmp_pd->pd_state); 8032 mutex_exit(&tmp_pd->pd_mutex); 8033 8034 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8035 val->PortSupportedFc4Types[0] = 0; 8036 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8037 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8038 val->PortMaxFrameSize = 0; 8039 val->NumberofDiscoveredPorts = 0; 8040 8041 if (use32 == B_TRUE) { 8042 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8043 val32->version = val->version; 8044 val32->lastChange = val->lastChange; 8045 val32->fp_minor = val->fp_minor; 8046 8047 bcopy(&val->PortWWN.raw_wwn, 8048 &val32->PortWWN.raw_wwn, 8049 sizeof (val->PortWWN.raw_wwn)); 8050 bcopy(&val->NodeWWN.raw_wwn, 8051 &val32->NodeWWN.raw_wwn, 8052 sizeof (val->NodeWWN.raw_wwn)); 8053 val32->PortFcId = val->PortFcId; 8054 bcopy(val->PortSymbolicName, 8055 val32->PortSymbolicName, 8056 sizeof (val->PortSymbolicName)); 8057 val32->PortSupportedClassofService = 8058 val->PortSupportedClassofService; 8059 bcopy(val->PortActiveFc4Types, 8060 val32->PortActiveFc4Types, 8061 sizeof (tmp_pd->pd_fc4types)); 8062 8063 val32->PortType = val->PortType; 8064 val32->PortState = val->PortState; 8065 val32->PortSupportedFc4Types[0] = 8066 val->PortSupportedFc4Types[0]; 8067 val32->PortSupportedSpeed = 8068 val->PortSupportedSpeed; 8069 val32->PortSpeed = val->PortSpeed; 8070 val32->PortMaxFrameSize = 8071 val->PortMaxFrameSize; 8072 val32->NumberofDiscoveredPorts = 8073 val->NumberofDiscoveredPorts; 8074 8075 if (fp_copyout((void *)val32, 8076 (void *)fcio->fcio_obuf, 8077 fcio->fcio_olen, mode) == 0) { 8078 if (fp_fcio_copyout(fcio, 8079 data, mode)) { 8080 rval = EFAULT; 8081 } 8082 } else { 8083 rval = EFAULT; 8084 } 8085 8086 kmem_free(val32, sizeof (*val32)); 8087 } else { 8088 if (fp_copyout((void *)val, 8089 (void *)fcio->fcio_obuf, 8090 fcio->fcio_olen, mode) == 0) { 8091 if (fp_fcio_copyout(fcio, data, mode)) { 8092 rval = EFAULT; 8093 } 8094 } else { 8095 rval = EFAULT; 8096 } 8097 } 8098 } 8099 8100 mutex_exit(&port->fp_mutex); 8101 kmem_free(val, sizeof (*val)); 8102 break; 8103 } 8104 8105 case FCIO_GET_PORT_ATTRIBUTES: { 8106 fc_hba_port_attributes_t *val; 8107 fc_hba_port_attributes32_t *val32; 8108 la_wwn_t wwn; 8109 fc_remote_port_t *tmp_pd; 8110 8111 if (use32 == B_TRUE) { 8112 if (fcio->fcio_olen < sizeof (*val32) || 8113 fcio->fcio_xfer != FCIO_XFER_READ) { 8114 rval = EINVAL; 8115 break; 8116 } 8117 } else { 8118 if (fcio->fcio_olen < sizeof (*val) || 8119 fcio->fcio_xfer != FCIO_XFER_READ) { 8120 rval = EINVAL; 8121 break; 8122 } 8123 } 8124 8125 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 8126 rval = EFAULT; 8127 break; 8128 } 8129 8130 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 8131 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 8132 8133 mutex_enter(&port->fp_mutex); 8134 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 8135 val->lastChange = port->fp_last_change; 8136 val->fp_minor = port->fp_instance; 8137 mutex_exit(&port->fp_mutex); 8138 8139 if (tmp_pd == NULL) { 8140 fcio->fcio_errno = FC_BADWWN; 8141 rval = EINVAL; 8142 } else { 8143 mutex_enter(&tmp_pd->pd_mutex); 8144 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8145 &val->PortWWN.raw_wwn, 8146 sizeof (val->PortWWN.raw_wwn)); 8147 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8148 &val->NodeWWN.raw_wwn, 8149 sizeof (val->NodeWWN.raw_wwn)); 8150 val->PortFcId = tmp_pd->pd_port_id.port_id; 8151 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8152 tmp_pd->pd_spn_len); 8153 val->PortSupportedClassofService = tmp_pd->pd_cos; 8154 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8155 val->PortState = 8156 fp_map_remote_port_state(tmp_pd->pd_state); 8157 val->PortSupportedFc4Types[0] = 0; 8158 /* 8159 * we will assume the sizeof these pd_fc4types and 8160 * portActiveFc4Types will remain the same. we could 8161 * add in a check for it, but we decided it was unneeded 8162 */ 8163 bcopy((caddr_t)tmp_pd->pd_fc4types, 8164 val->PortActiveFc4Types, 8165 sizeof (tmp_pd->pd_fc4types)); 8166 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8167 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8168 val->PortMaxFrameSize = 0; 8169 val->NumberofDiscoveredPorts = 0; 8170 mutex_exit(&tmp_pd->pd_mutex); 8171 8172 if (use32 == B_TRUE) { 8173 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8174 val32->version = val->version; 8175 val32->lastChange = val->lastChange; 8176 val32->fp_minor = val->fp_minor; 8177 bcopy(&val->PortWWN.raw_wwn, 8178 &val32->PortWWN.raw_wwn, 8179 sizeof (val->PortWWN.raw_wwn)); 8180 bcopy(&val->NodeWWN.raw_wwn, 8181 &val32->NodeWWN.raw_wwn, 8182 sizeof (val->NodeWWN.raw_wwn)); 8183 val32->PortFcId = val->PortFcId; 8184 bcopy(val->PortSymbolicName, 8185 val32->PortSymbolicName, 8186 sizeof (val->PortSymbolicName)); 8187 val32->PortSupportedClassofService = 8188 val->PortSupportedClassofService; 8189 val32->PortType = val->PortType; 8190 val32->PortState = val->PortState; 8191 val32->PortSupportedFc4Types[0] = 8192 val->PortSupportedFc4Types[0]; 8193 bcopy(val->PortActiveFc4Types, 8194 val32->PortActiveFc4Types, 8195 sizeof (tmp_pd->pd_fc4types)); 8196 val32->PortSupportedSpeed = 8197 val->PortSupportedSpeed; 8198 val32->PortSpeed = val->PortSpeed; 8199 val32->PortMaxFrameSize = val->PortMaxFrameSize; 8200 val32->NumberofDiscoveredPorts = 8201 val->NumberofDiscoveredPorts; 8202 8203 if (fp_copyout((void *)val32, 8204 (void *)fcio->fcio_obuf, 8205 fcio->fcio_olen, mode) == 0) { 8206 if (fp_fcio_copyout(fcio, data, mode)) { 8207 rval = EFAULT; 8208 } 8209 } else { 8210 rval = EFAULT; 8211 } 8212 8213 kmem_free(val32, sizeof (*val32)); 8214 } else { 8215 if (fp_copyout((void *)val, 8216 (void *)fcio->fcio_obuf, 8217 fcio->fcio_olen, mode) == 0) { 8218 if (fp_fcio_copyout(fcio, data, mode)) { 8219 rval = EFAULT; 8220 } 8221 } else { 8222 rval = EFAULT; 8223 } 8224 } 8225 } 8226 kmem_free(val, sizeof (*val)); 8227 break; 8228 } 8229 8230 case FCIO_GET_NUM_DEVS: { 8231 int num_devices; 8232 8233 if (fcio->fcio_olen != sizeof (num_devices) || 8234 fcio->fcio_xfer != FCIO_XFER_READ) { 8235 rval = EINVAL; 8236 break; 8237 } 8238 8239 mutex_enter(&port->fp_mutex); 8240 switch (port->fp_topology) { 8241 case FC_TOP_PRIVATE_LOOP: 8242 case FC_TOP_PT_PT: 8243 num_devices = port->fp_total_devices; 8244 fcio->fcio_errno = FC_SUCCESS; 8245 break; 8246 8247 case FC_TOP_PUBLIC_LOOP: 8248 case FC_TOP_FABRIC: 8249 mutex_exit(&port->fp_mutex); 8250 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8251 NULL, KM_SLEEP); 8252 ASSERT(job != NULL); 8253 8254 /* 8255 * In FC-GS-2 the Name Server doesn't send out 8256 * RSCNs for any Name Server Database updates 8257 * When it is finally fixed there is no need 8258 * to probe as below and should be removed. 8259 */ 8260 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8261 fctl_dealloc_job(job); 8262 8263 mutex_enter(&port->fp_mutex); 8264 num_devices = port->fp_total_devices; 8265 fcio->fcio_errno = FC_SUCCESS; 8266 break; 8267 8268 case FC_TOP_NO_NS: 8269 /* FALLTHROUGH */ 8270 case FC_TOP_UNKNOWN: 8271 /* FALLTHROUGH */ 8272 default: 8273 num_devices = 0; 8274 fcio->fcio_errno = FC_SUCCESS; 8275 break; 8276 } 8277 mutex_exit(&port->fp_mutex); 8278 8279 if (fp_copyout((void *)&num_devices, 8280 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8281 mode) == 0) { 8282 if (fp_fcio_copyout(fcio, data, mode)) { 8283 rval = EFAULT; 8284 } 8285 } else { 8286 rval = EFAULT; 8287 } 8288 break; 8289 } 8290 8291 case FCIO_GET_DEV_LIST: { 8292 int num_devices; 8293 int new_count; 8294 int map_size; 8295 8296 if (fcio->fcio_xfer != FCIO_XFER_READ || 8297 fcio->fcio_alen != sizeof (new_count)) { 8298 rval = EINVAL; 8299 break; 8300 } 8301 8302 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8303 8304 mutex_enter(&port->fp_mutex); 8305 if (num_devices < port->fp_total_devices) { 8306 fcio->fcio_errno = FC_TOOMANY; 8307 new_count = port->fp_total_devices; 8308 mutex_exit(&port->fp_mutex); 8309 8310 if (fp_copyout((void *)&new_count, 8311 (void *)fcio->fcio_abuf, 8312 sizeof (new_count), mode)) { 8313 rval = EFAULT; 8314 break; 8315 } 8316 8317 if (fp_fcio_copyout(fcio, data, mode)) { 8318 rval = EFAULT; 8319 break; 8320 } 8321 rval = EINVAL; 8322 break; 8323 } 8324 8325 if (port->fp_total_devices <= 0) { 8326 fcio->fcio_errno = FC_NO_MAP; 8327 new_count = port->fp_total_devices; 8328 mutex_exit(&port->fp_mutex); 8329 8330 if (fp_copyout((void *)&new_count, 8331 (void *)fcio->fcio_abuf, 8332 sizeof (new_count), mode)) { 8333 rval = EFAULT; 8334 break; 8335 } 8336 8337 if (fp_fcio_copyout(fcio, data, mode)) { 8338 rval = EFAULT; 8339 break; 8340 } 8341 rval = EINVAL; 8342 break; 8343 } 8344 8345 switch (port->fp_topology) { 8346 case FC_TOP_PRIVATE_LOOP: 8347 if (fp_fillout_loopmap(port, fcio, 8348 mode) != FC_SUCCESS) { 8349 rval = EFAULT; 8350 break; 8351 } 8352 if (fp_fcio_copyout(fcio, data, mode)) { 8353 rval = EFAULT; 8354 } 8355 break; 8356 8357 case FC_TOP_PT_PT: 8358 if (fp_fillout_p2pmap(port, fcio, 8359 mode) != FC_SUCCESS) { 8360 rval = EFAULT; 8361 break; 8362 } 8363 if (fp_fcio_copyout(fcio, data, mode)) { 8364 rval = EFAULT; 8365 } 8366 break; 8367 8368 case FC_TOP_PUBLIC_LOOP: 8369 case FC_TOP_FABRIC: { 8370 fctl_ns_req_t *ns_cmd; 8371 8372 map_size = 8373 sizeof (fc_port_dev_t) * port->fp_total_devices; 8374 8375 mutex_exit(&port->fp_mutex); 8376 8377 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8378 sizeof (ns_resp_gan_t), map_size, 8379 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8380 KM_SLEEP); 8381 ASSERT(ns_cmd != NULL); 8382 8383 ns_cmd->ns_gan_index = 0; 8384 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8385 ns_cmd->ns_cmd_code = NS_GA_NXT; 8386 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8387 8388 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8389 NULL, KM_SLEEP); 8390 ASSERT(job != NULL); 8391 8392 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8393 8394 if (ret != FC_SUCCESS || 8395 job->job_result != FC_SUCCESS) { 8396 fctl_free_ns_cmd(ns_cmd); 8397 8398 fcio->fcio_errno = job->job_result; 8399 new_count = 0; 8400 if (fp_copyout((void *)&new_count, 8401 (void *)fcio->fcio_abuf, 8402 sizeof (new_count), mode)) { 8403 fctl_dealloc_job(job); 8404 mutex_enter(&port->fp_mutex); 8405 rval = EFAULT; 8406 break; 8407 } 8408 8409 if (fp_fcio_copyout(fcio, data, mode)) { 8410 fctl_dealloc_job(job); 8411 mutex_enter(&port->fp_mutex); 8412 rval = EFAULT; 8413 break; 8414 } 8415 rval = EIO; 8416 mutex_enter(&port->fp_mutex); 8417 break; 8418 } 8419 fctl_dealloc_job(job); 8420 8421 new_count = ns_cmd->ns_gan_index; 8422 if (fp_copyout((void *)&new_count, 8423 (void *)fcio->fcio_abuf, sizeof (new_count), 8424 mode)) { 8425 rval = EFAULT; 8426 fctl_free_ns_cmd(ns_cmd); 8427 mutex_enter(&port->fp_mutex); 8428 break; 8429 } 8430 8431 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8432 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8433 ns_cmd->ns_gan_index, mode)) { 8434 rval = EFAULT; 8435 fctl_free_ns_cmd(ns_cmd); 8436 mutex_enter(&port->fp_mutex); 8437 break; 8438 } 8439 fctl_free_ns_cmd(ns_cmd); 8440 8441 if (fp_fcio_copyout(fcio, data, mode)) { 8442 rval = EFAULT; 8443 } 8444 mutex_enter(&port->fp_mutex); 8445 break; 8446 } 8447 8448 case FC_TOP_NO_NS: 8449 /* FALLTHROUGH */ 8450 case FC_TOP_UNKNOWN: 8451 /* FALLTHROUGH */ 8452 default: 8453 fcio->fcio_errno = FC_NO_MAP; 8454 num_devices = port->fp_total_devices; 8455 8456 if (fp_copyout((void *)&new_count, 8457 (void *)fcio->fcio_abuf, 8458 sizeof (new_count), mode)) { 8459 rval = EFAULT; 8460 break; 8461 } 8462 8463 if (fp_fcio_copyout(fcio, data, mode)) { 8464 rval = EFAULT; 8465 break; 8466 } 8467 rval = EINVAL; 8468 break; 8469 } 8470 mutex_exit(&port->fp_mutex); 8471 break; 8472 } 8473 8474 case FCIO_GET_SYM_PNAME: { 8475 rval = ENOTSUP; 8476 break; 8477 } 8478 8479 case FCIO_GET_SYM_NNAME: { 8480 rval = ENOTSUP; 8481 break; 8482 } 8483 8484 case FCIO_SET_SYM_PNAME: { 8485 rval = ENOTSUP; 8486 break; 8487 } 8488 8489 case FCIO_SET_SYM_NNAME: { 8490 rval = ENOTSUP; 8491 break; 8492 } 8493 8494 case FCIO_GET_LOGI_PARAMS: { 8495 la_wwn_t pwwn; 8496 la_wwn_t *my_pwwn; 8497 la_els_logi_t *params; 8498 la_els_logi32_t *params32; 8499 fc_remote_node_t *node; 8500 fc_remote_port_t *pd; 8501 8502 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8503 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8504 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8505 rval = EINVAL; 8506 break; 8507 } 8508 8509 if (use32 == B_TRUE) { 8510 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8511 rval = EINVAL; 8512 break; 8513 } 8514 } else { 8515 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8516 rval = EINVAL; 8517 break; 8518 } 8519 } 8520 8521 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8522 rval = EFAULT; 8523 break; 8524 } 8525 8526 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8527 if (pd == NULL) { 8528 mutex_enter(&port->fp_mutex); 8529 my_pwwn = &port->fp_service_params.nport_ww_name; 8530 mutex_exit(&port->fp_mutex); 8531 8532 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8533 rval = ENXIO; 8534 break; 8535 } 8536 8537 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8538 mutex_enter(&port->fp_mutex); 8539 *params = port->fp_service_params; 8540 mutex_exit(&port->fp_mutex); 8541 } else { 8542 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8543 8544 mutex_enter(&pd->pd_mutex); 8545 params->ls_code.mbz = params->ls_code.ls_code = 0; 8546 params->common_service = pd->pd_csp; 8547 params->nport_ww_name = pd->pd_port_name; 8548 params->class_1 = pd->pd_clsp1; 8549 params->class_2 = pd->pd_clsp2; 8550 params->class_3 = pd->pd_clsp3; 8551 node = pd->pd_remote_nodep; 8552 mutex_exit(&pd->pd_mutex); 8553 8554 bzero(params->reserved, sizeof (params->reserved)); 8555 8556 mutex_enter(&node->fd_mutex); 8557 bcopy(node->fd_vv, params->vendor_version, 8558 sizeof (node->fd_vv)); 8559 params->node_ww_name = node->fd_node_name; 8560 mutex_exit(&node->fd_mutex); 8561 8562 fctl_release_remote_port(pd); 8563 } 8564 8565 if (use32 == B_TRUE) { 8566 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8567 8568 params32->ls_code.mbz = params->ls_code.mbz; 8569 params32->common_service = params->common_service; 8570 params32->nport_ww_name = params->nport_ww_name; 8571 params32->class_1 = params->class_1; 8572 params32->class_2 = params->class_2; 8573 params32->class_3 = params->class_3; 8574 bzero(params32->reserved, sizeof (params32->reserved)); 8575 bcopy(params->vendor_version, params32->vendor_version, 8576 sizeof (node->fd_vv)); 8577 params32->node_ww_name = params->node_ww_name; 8578 8579 if (ddi_copyout((void *)params32, 8580 (void *)fcio->fcio_obuf, 8581 sizeof (*params32), mode)) { 8582 rval = EFAULT; 8583 } 8584 8585 kmem_free(params32, sizeof (*params32)); 8586 } else { 8587 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8588 sizeof (*params), mode)) { 8589 rval = EFAULT; 8590 } 8591 } 8592 8593 kmem_free(params, sizeof (*params)); 8594 if (fp_fcio_copyout(fcio, data, mode)) { 8595 rval = EFAULT; 8596 } 8597 break; 8598 } 8599 8600 case FCIO_DEV_LOGOUT: 8601 case FCIO_DEV_LOGIN: 8602 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8603 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8604 rval = EINVAL; 8605 8606 if (fp_fcio_copyout(fcio, data, mode)) { 8607 rval = EFAULT; 8608 } 8609 break; 8610 } 8611 8612 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8613 jcode = JOB_FCIO_LOGIN; 8614 } else { 8615 jcode = JOB_FCIO_LOGOUT; 8616 } 8617 8618 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8619 bcopy(fcio, kfcio, sizeof (*fcio)); 8620 8621 if (kfcio->fcio_ilen) { 8622 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8623 KM_SLEEP); 8624 8625 if (ddi_copyin((void *)fcio->fcio_ibuf, 8626 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8627 mode)) { 8628 rval = EFAULT; 8629 8630 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8631 kmem_free(kfcio, sizeof (*kfcio)); 8632 fcio->fcio_errno = job->job_result; 8633 if (fp_fcio_copyout(fcio, data, mode)) { 8634 rval = EFAULT; 8635 } 8636 break; 8637 } 8638 } 8639 8640 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8641 job->job_private = kfcio; 8642 8643 fctl_enque_job(port, job); 8644 fctl_jobwait(job); 8645 8646 rval = job->job_result; 8647 8648 fcio->fcio_errno = kfcio->fcio_errno; 8649 if (fp_fcio_copyout(fcio, data, mode)) { 8650 rval = EFAULT; 8651 } 8652 8653 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8654 kmem_free(kfcio, sizeof (*kfcio)); 8655 fctl_dealloc_job(job); 8656 break; 8657 8658 case FCIO_GET_STATE: { 8659 la_wwn_t pwwn; 8660 uint32_t state; 8661 fc_remote_port_t *pd; 8662 fctl_ns_req_t *ns_cmd; 8663 8664 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8665 fcio->fcio_olen != sizeof (state) || 8666 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8667 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8668 rval = EINVAL; 8669 break; 8670 } 8671 8672 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8673 rval = EFAULT; 8674 break; 8675 } 8676 fcio->fcio_errno = 0; 8677 8678 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8679 if (pd == NULL) { 8680 mutex_enter(&port->fp_mutex); 8681 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8682 mutex_exit(&port->fp_mutex); 8683 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8684 NULL, NULL, KM_SLEEP); 8685 8686 job->job_counter = 1; 8687 job->job_result = FC_SUCCESS; 8688 8689 ns_cmd = fctl_alloc_ns_cmd( 8690 sizeof (ns_req_gid_pn_t), 8691 sizeof (ns_resp_gid_pn_t), 8692 sizeof (ns_resp_gid_pn_t), 8693 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8694 ASSERT(ns_cmd != NULL); 8695 8696 ns_cmd->ns_cmd_code = NS_GID_PN; 8697 ((ns_req_gid_pn_t *) 8698 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8699 8700 ret = fp_ns_query(port, ns_cmd, job, 8701 1, KM_SLEEP); 8702 8703 if (ret != FC_SUCCESS || job->job_result != 8704 FC_SUCCESS) { 8705 if (ret != FC_SUCCESS) { 8706 fcio->fcio_errno = ret; 8707 } else { 8708 fcio->fcio_errno = 8709 job->job_result; 8710 } 8711 rval = EIO; 8712 } else { 8713 state = PORT_DEVICE_INVALID; 8714 } 8715 fctl_free_ns_cmd(ns_cmd); 8716 fctl_dealloc_job(job); 8717 } else { 8718 mutex_exit(&port->fp_mutex); 8719 fcio->fcio_errno = FC_BADWWN; 8720 rval = ENXIO; 8721 } 8722 } else { 8723 mutex_enter(&pd->pd_mutex); 8724 state = pd->pd_state; 8725 mutex_exit(&pd->pd_mutex); 8726 8727 fctl_release_remote_port(pd); 8728 } 8729 8730 if (!rval) { 8731 if (ddi_copyout((void *)&state, 8732 (void *)fcio->fcio_obuf, sizeof (state), 8733 mode)) { 8734 rval = EFAULT; 8735 } 8736 } 8737 if (fp_fcio_copyout(fcio, data, mode)) { 8738 rval = EFAULT; 8739 } 8740 break; 8741 } 8742 8743 case FCIO_DEV_REMOVE: { 8744 la_wwn_t pwwn; 8745 fc_portmap_t *changelist; 8746 fc_remote_port_t *pd; 8747 8748 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8749 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8750 rval = EINVAL; 8751 break; 8752 } 8753 8754 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8755 rval = EFAULT; 8756 break; 8757 } 8758 8759 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8760 if (pd == NULL) { 8761 rval = ENXIO; 8762 fcio->fcio_errno = FC_BADWWN; 8763 if (fp_fcio_copyout(fcio, data, mode)) { 8764 rval = EFAULT; 8765 } 8766 break; 8767 } 8768 8769 mutex_enter(&pd->pd_mutex); 8770 if (pd->pd_ref_count > 1) { 8771 mutex_exit(&pd->pd_mutex); 8772 8773 rval = EBUSY; 8774 fcio->fcio_errno = FC_FAILURE; 8775 fctl_release_remote_port(pd); 8776 8777 if (fp_fcio_copyout(fcio, data, mode)) { 8778 rval = EFAULT; 8779 } 8780 break; 8781 } 8782 mutex_exit(&pd->pd_mutex); 8783 8784 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8785 8786 fctl_copy_portmap(changelist, pd); 8787 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8788 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8789 8790 fctl_release_remote_port(pd); 8791 break; 8792 } 8793 8794 case FCIO_GET_FCODE_REV: { 8795 caddr_t fcode_rev; 8796 fc_fca_pm_t pm; 8797 8798 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8799 fcio->fcio_xfer != FCIO_XFER_READ) { 8800 rval = EINVAL; 8801 break; 8802 } 8803 bzero((caddr_t)&pm, sizeof (pm)); 8804 8805 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8806 8807 pm.pm_cmd_flags = FC_FCA_PM_READ; 8808 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8809 pm.pm_data_len = fcio->fcio_olen; 8810 pm.pm_data_buf = fcode_rev; 8811 8812 ret = port->fp_fca_tran->fca_port_manage( 8813 port->fp_fca_handle, &pm); 8814 8815 if (ret == FC_SUCCESS) { 8816 if (ddi_copyout((void *)fcode_rev, 8817 (void *)fcio->fcio_obuf, 8818 fcio->fcio_olen, mode) == 0) { 8819 if (fp_fcio_copyout(fcio, data, mode)) { 8820 rval = EFAULT; 8821 } 8822 } else { 8823 rval = EFAULT; 8824 } 8825 } else { 8826 /* 8827 * check if buffer was not large enough to obtain 8828 * FCODE version. 8829 */ 8830 if (pm.pm_data_len > fcio->fcio_olen) { 8831 rval = ENOMEM; 8832 } else { 8833 rval = EIO; 8834 } 8835 fcio->fcio_errno = ret; 8836 if (fp_fcio_copyout(fcio, data, mode)) { 8837 rval = EFAULT; 8838 } 8839 } 8840 kmem_free(fcode_rev, fcio->fcio_olen); 8841 break; 8842 } 8843 8844 case FCIO_GET_FW_REV: { 8845 caddr_t fw_rev; 8846 fc_fca_pm_t pm; 8847 8848 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8849 fcio->fcio_xfer != FCIO_XFER_READ) { 8850 rval = EINVAL; 8851 break; 8852 } 8853 bzero((caddr_t)&pm, sizeof (pm)); 8854 8855 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8856 8857 pm.pm_cmd_flags = FC_FCA_PM_READ; 8858 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8859 pm.pm_data_len = fcio->fcio_olen; 8860 pm.pm_data_buf = fw_rev; 8861 8862 ret = port->fp_fca_tran->fca_port_manage( 8863 port->fp_fca_handle, &pm); 8864 8865 if (ret == FC_SUCCESS) { 8866 if (ddi_copyout((void *)fw_rev, 8867 (void *)fcio->fcio_obuf, 8868 fcio->fcio_olen, mode) == 0) { 8869 if (fp_fcio_copyout(fcio, data, mode)) { 8870 rval = EFAULT; 8871 } 8872 } else { 8873 rval = EFAULT; 8874 } 8875 } else { 8876 if (fp_fcio_copyout(fcio, data, mode)) { 8877 rval = EFAULT; 8878 } 8879 rval = EIO; 8880 } 8881 kmem_free(fw_rev, fcio->fcio_olen); 8882 break; 8883 } 8884 8885 case FCIO_GET_DUMP_SIZE: { 8886 uint32_t dump_size; 8887 fc_fca_pm_t pm; 8888 8889 if (fcio->fcio_olen != sizeof (dump_size) || 8890 fcio->fcio_xfer != FCIO_XFER_READ) { 8891 rval = EINVAL; 8892 break; 8893 } 8894 bzero((caddr_t)&pm, sizeof (pm)); 8895 pm.pm_cmd_flags = FC_FCA_PM_READ; 8896 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8897 pm.pm_data_len = sizeof (dump_size); 8898 pm.pm_data_buf = (caddr_t)&dump_size; 8899 8900 ret = port->fp_fca_tran->fca_port_manage( 8901 port->fp_fca_handle, &pm); 8902 8903 if (ret == FC_SUCCESS) { 8904 if (ddi_copyout((void *)&dump_size, 8905 (void *)fcio->fcio_obuf, sizeof (dump_size), 8906 mode) == 0) { 8907 if (fp_fcio_copyout(fcio, data, mode)) { 8908 rval = EFAULT; 8909 } 8910 } else { 8911 rval = EFAULT; 8912 } 8913 } else { 8914 fcio->fcio_errno = ret; 8915 rval = EIO; 8916 if (fp_fcio_copyout(fcio, data, mode)) { 8917 rval = EFAULT; 8918 } 8919 } 8920 break; 8921 } 8922 8923 case FCIO_DOWNLOAD_FW: { 8924 caddr_t firmware; 8925 fc_fca_pm_t pm; 8926 8927 if (fcio->fcio_ilen <= 0 || 8928 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8929 rval = EINVAL; 8930 break; 8931 } 8932 8933 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8934 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8935 fcio->fcio_ilen, mode)) { 8936 rval = EFAULT; 8937 kmem_free(firmware, fcio->fcio_ilen); 8938 break; 8939 } 8940 8941 bzero((caddr_t)&pm, sizeof (pm)); 8942 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8943 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8944 pm.pm_data_len = fcio->fcio_ilen; 8945 pm.pm_data_buf = firmware; 8946 8947 ret = port->fp_fca_tran->fca_port_manage( 8948 port->fp_fca_handle, &pm); 8949 8950 kmem_free(firmware, fcio->fcio_ilen); 8951 8952 if (ret != FC_SUCCESS) { 8953 fcio->fcio_errno = ret; 8954 rval = EIO; 8955 if (fp_fcio_copyout(fcio, data, mode)) { 8956 rval = EFAULT; 8957 } 8958 } 8959 break; 8960 } 8961 8962 case FCIO_DOWNLOAD_FCODE: { 8963 caddr_t fcode; 8964 fc_fca_pm_t pm; 8965 8966 if (fcio->fcio_ilen <= 0 || 8967 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8968 rval = EINVAL; 8969 break; 8970 } 8971 8972 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8973 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8974 fcio->fcio_ilen, mode)) { 8975 rval = EFAULT; 8976 kmem_free(fcode, fcio->fcio_ilen); 8977 break; 8978 } 8979 8980 bzero((caddr_t)&pm, sizeof (pm)); 8981 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8982 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8983 pm.pm_data_len = fcio->fcio_ilen; 8984 pm.pm_data_buf = fcode; 8985 8986 ret = port->fp_fca_tran->fca_port_manage( 8987 port->fp_fca_handle, &pm); 8988 8989 kmem_free(fcode, fcio->fcio_ilen); 8990 8991 if (ret != FC_SUCCESS) { 8992 fcio->fcio_errno = ret; 8993 rval = EIO; 8994 if (fp_fcio_copyout(fcio, data, mode)) { 8995 rval = EFAULT; 8996 } 8997 } 8998 break; 8999 } 9000 9001 case FCIO_FORCE_DUMP: 9002 ret = port->fp_fca_tran->fca_reset( 9003 port->fp_fca_handle, FC_FCA_CORE); 9004 9005 if (ret != FC_SUCCESS) { 9006 fcio->fcio_errno = ret; 9007 rval = EIO; 9008 if (fp_fcio_copyout(fcio, data, mode)) { 9009 rval = EFAULT; 9010 } 9011 } 9012 break; 9013 9014 case FCIO_GET_DUMP: { 9015 caddr_t dump; 9016 uint32_t dump_size; 9017 fc_fca_pm_t pm; 9018 9019 if (fcio->fcio_xfer != FCIO_XFER_READ) { 9020 rval = EINVAL; 9021 break; 9022 } 9023 bzero((caddr_t)&pm, sizeof (pm)); 9024 9025 pm.pm_cmd_flags = FC_FCA_PM_READ; 9026 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 9027 pm.pm_data_len = sizeof (dump_size); 9028 pm.pm_data_buf = (caddr_t)&dump_size; 9029 9030 ret = port->fp_fca_tran->fca_port_manage( 9031 port->fp_fca_handle, &pm); 9032 9033 if (ret != FC_SUCCESS) { 9034 fcio->fcio_errno = ret; 9035 rval = EIO; 9036 if (fp_fcio_copyout(fcio, data, mode)) { 9037 rval = EFAULT; 9038 } 9039 break; 9040 } 9041 if (fcio->fcio_olen != dump_size) { 9042 fcio->fcio_errno = FC_NOMEM; 9043 rval = EINVAL; 9044 if (fp_fcio_copyout(fcio, data, mode)) { 9045 rval = EFAULT; 9046 } 9047 break; 9048 } 9049 9050 dump = kmem_zalloc(dump_size, KM_SLEEP); 9051 9052 bzero((caddr_t)&pm, sizeof (pm)); 9053 pm.pm_cmd_flags = FC_FCA_PM_READ; 9054 pm.pm_cmd_code = FC_PORT_GET_DUMP; 9055 pm.pm_data_len = dump_size; 9056 pm.pm_data_buf = dump; 9057 9058 ret = port->fp_fca_tran->fca_port_manage( 9059 port->fp_fca_handle, &pm); 9060 9061 if (ret == FC_SUCCESS) { 9062 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 9063 dump_size, mode) == 0) { 9064 if (fp_fcio_copyout(fcio, data, mode)) { 9065 rval = EFAULT; 9066 } 9067 } else { 9068 rval = EFAULT; 9069 } 9070 } else { 9071 fcio->fcio_errno = ret; 9072 rval = EIO; 9073 if (fp_fcio_copyout(fcio, data, mode)) { 9074 rval = EFAULT; 9075 } 9076 } 9077 kmem_free(dump, dump_size); 9078 break; 9079 } 9080 9081 case FCIO_GET_TOPOLOGY: { 9082 uint32_t user_topology; 9083 9084 if (fcio->fcio_xfer != FCIO_XFER_READ || 9085 fcio->fcio_olen != sizeof (user_topology)) { 9086 rval = EINVAL; 9087 break; 9088 } 9089 9090 mutex_enter(&port->fp_mutex); 9091 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 9092 user_topology = FC_TOP_UNKNOWN; 9093 } else { 9094 user_topology = port->fp_topology; 9095 } 9096 mutex_exit(&port->fp_mutex); 9097 9098 if (ddi_copyout((void *)&user_topology, 9099 (void *)fcio->fcio_obuf, sizeof (user_topology), 9100 mode)) { 9101 rval = EFAULT; 9102 } 9103 break; 9104 } 9105 9106 case FCIO_RESET_LINK: { 9107 la_wwn_t pwwn; 9108 9109 /* 9110 * Look at the output buffer field; if this field has zero 9111 * bytes then attempt to reset the local link/loop. If the 9112 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 9113 * and if yes, determine the LFA and reset the remote LIP 9114 * by LINIT ELS. 9115 */ 9116 9117 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 9118 fcio->fcio_ilen != sizeof (pwwn)) { 9119 rval = EINVAL; 9120 break; 9121 } 9122 9123 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9124 sizeof (pwwn), mode)) { 9125 rval = EFAULT; 9126 break; 9127 } 9128 9129 mutex_enter(&port->fp_mutex); 9130 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 9131 mutex_exit(&port->fp_mutex); 9132 break; 9133 } 9134 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 9135 mutex_exit(&port->fp_mutex); 9136 9137 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 9138 if (job == NULL) { 9139 rval = ENOMEM; 9140 break; 9141 } 9142 job->job_counter = 1; 9143 job->job_private = (void *)&pwwn; 9144 9145 fctl_enque_job(port, job); 9146 fctl_jobwait(job); 9147 9148 mutex_enter(&port->fp_mutex); 9149 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 9150 mutex_exit(&port->fp_mutex); 9151 9152 if (job->job_result != FC_SUCCESS) { 9153 fcio->fcio_errno = job->job_result; 9154 rval = EIO; 9155 if (fp_fcio_copyout(fcio, data, mode)) { 9156 rval = EFAULT; 9157 } 9158 } 9159 fctl_dealloc_job(job); 9160 break; 9161 } 9162 9163 case FCIO_RESET_HARD: 9164 ret = port->fp_fca_tran->fca_reset( 9165 port->fp_fca_handle, FC_FCA_RESET); 9166 if (ret != FC_SUCCESS) { 9167 fcio->fcio_errno = ret; 9168 rval = EIO; 9169 if (fp_fcio_copyout(fcio, data, mode)) { 9170 rval = EFAULT; 9171 } 9172 } 9173 break; 9174 9175 case FCIO_RESET_HARD_CORE: 9176 ret = port->fp_fca_tran->fca_reset( 9177 port->fp_fca_handle, FC_FCA_RESET_CORE); 9178 if (ret != FC_SUCCESS) { 9179 rval = EIO; 9180 fcio->fcio_errno = ret; 9181 if (fp_fcio_copyout(fcio, data, mode)) { 9182 rval = EFAULT; 9183 } 9184 } 9185 break; 9186 9187 case FCIO_DIAG: { 9188 fc_fca_pm_t pm; 9189 9190 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 9191 9192 /* Validate user buffer from ioctl call. */ 9193 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 9194 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 9195 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 9196 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 9197 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 9198 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 9199 rval = EFAULT; 9200 break; 9201 } 9202 9203 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 9204 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 9205 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 9206 fcio->fcio_ilen, mode)) { 9207 rval = EFAULT; 9208 goto fp_fcio_diag_cleanup; 9209 } 9210 } 9211 9212 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 9213 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 9214 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 9215 fcio->fcio_alen, mode)) { 9216 rval = EFAULT; 9217 goto fp_fcio_diag_cleanup; 9218 } 9219 } 9220 9221 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 9222 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 9223 } 9224 9225 pm.pm_cmd_code = FC_PORT_DIAG; 9226 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 9227 9228 ret = port->fp_fca_tran->fca_port_manage( 9229 port->fp_fca_handle, &pm); 9230 9231 if (ret != FC_SUCCESS) { 9232 if (ret == FC_INVALID_REQUEST) { 9233 rval = ENOTTY; 9234 } else { 9235 rval = EIO; 9236 } 9237 9238 fcio->fcio_errno = ret; 9239 if (fp_fcio_copyout(fcio, data, mode)) { 9240 rval = EFAULT; 9241 } 9242 goto fp_fcio_diag_cleanup; 9243 } 9244 9245 /* 9246 * pm_stat_len will contain the number of status bytes 9247 * an FCA driver requires to return the complete status 9248 * of the requested diag operation. If the user buffer 9249 * is not large enough to hold the entire status, We 9250 * copy only the portion of data the fits in the buffer and 9251 * return a ENOMEM to the user application. 9252 */ 9253 if (pm.pm_stat_len > fcio->fcio_olen) { 9254 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9255 "fp:FCIO_DIAG:status buffer too small\n"); 9256 9257 rval = ENOMEM; 9258 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9259 fcio->fcio_olen, mode)) { 9260 rval = EFAULT; 9261 goto fp_fcio_diag_cleanup; 9262 } 9263 } else { 9264 /* 9265 * Copy only data pm_stat_len bytes of data 9266 */ 9267 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9268 pm.pm_stat_len, mode)) { 9269 rval = EFAULT; 9270 goto fp_fcio_diag_cleanup; 9271 } 9272 } 9273 9274 if (fp_fcio_copyout(fcio, data, mode)) { 9275 rval = EFAULT; 9276 } 9277 9278 fp_fcio_diag_cleanup: 9279 if (pm.pm_cmd_buf != NULL) { 9280 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9281 } 9282 if (pm.pm_data_buf != NULL) { 9283 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9284 } 9285 if (pm.pm_stat_buf != NULL) { 9286 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9287 } 9288 9289 break; 9290 } 9291 9292 case FCIO_GET_NODE_ID: { 9293 /* validate parameters */ 9294 if (fcio->fcio_xfer != FCIO_XFER_READ || 9295 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9296 rval = EINVAL; 9297 break; 9298 } 9299 9300 rval = fp_get_rnid(port, data, mode, fcio); 9301 9302 /* ioctl handling is over */ 9303 break; 9304 } 9305 9306 case FCIO_SEND_NODE_ID: { 9307 la_wwn_t pwwn; 9308 9309 /* validate parameters */ 9310 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9311 fcio->fcio_xfer != FCIO_XFER_READ) { 9312 rval = EINVAL; 9313 break; 9314 } 9315 9316 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9317 sizeof (la_wwn_t), mode)) { 9318 rval = EFAULT; 9319 break; 9320 } 9321 9322 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9323 9324 /* ioctl handling is over */ 9325 break; 9326 } 9327 9328 case FCIO_SET_NODE_ID: { 9329 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9330 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9331 rval = EINVAL; 9332 break; 9333 } 9334 9335 rval = fp_set_rnid(port, data, mode, fcio); 9336 break; 9337 } 9338 9339 case FCIO_LINK_STATUS: { 9340 fc_portid_t rls_req; 9341 fc_rls_acc_t *rls_acc; 9342 fc_fca_pm_t pm; 9343 uint32_t dest, src_id; 9344 fp_cmd_t *cmd; 9345 fc_remote_port_t *pd; 9346 uchar_t pd_flags; 9347 9348 /* validate parameters */ 9349 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9350 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9351 fcio->fcio_xfer != FCIO_XFER_RW) { 9352 rval = EINVAL; 9353 break; 9354 } 9355 9356 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9357 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9358 rval = EINVAL; 9359 break; 9360 } 9361 9362 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9363 sizeof (fc_portid_t), mode)) { 9364 rval = EFAULT; 9365 break; 9366 } 9367 9368 9369 /* Determine the destination of the RLS frame */ 9370 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9371 dest = FS_FABRIC_F_PORT; 9372 } else { 9373 dest = rls_req.port_id; 9374 } 9375 9376 mutex_enter(&port->fp_mutex); 9377 src_id = port->fp_port_id.port_id; 9378 mutex_exit(&port->fp_mutex); 9379 9380 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9381 if (dest == 0 || dest == src_id) { 9382 9383 /* Allocate memory for link error status block */ 9384 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9385 ASSERT(rls_acc != NULL); 9386 9387 /* Prepare the port management structure */ 9388 bzero((caddr_t)&pm, sizeof (pm)); 9389 9390 pm.pm_cmd_flags = FC_FCA_PM_READ; 9391 pm.pm_cmd_code = FC_PORT_RLS; 9392 pm.pm_data_len = sizeof (*rls_acc); 9393 pm.pm_data_buf = (caddr_t)rls_acc; 9394 9395 /* Get the adapter's link error status block */ 9396 ret = port->fp_fca_tran->fca_port_manage( 9397 port->fp_fca_handle, &pm); 9398 9399 if (ret == FC_SUCCESS) { 9400 /* xfer link status block to userland */ 9401 if (ddi_copyout((void *)rls_acc, 9402 (void *)fcio->fcio_obuf, 9403 sizeof (*rls_acc), mode) == 0) { 9404 if (fp_fcio_copyout(fcio, data, 9405 mode)) { 9406 rval = EFAULT; 9407 } 9408 } else { 9409 rval = EFAULT; 9410 } 9411 } else { 9412 rval = EIO; 9413 fcio->fcio_errno = ret; 9414 if (fp_fcio_copyout(fcio, data, mode)) { 9415 rval = EFAULT; 9416 } 9417 } 9418 9419 kmem_free(rls_acc, sizeof (*rls_acc)); 9420 9421 /* ioctl handling is over */ 9422 break; 9423 } 9424 9425 /* 9426 * Send RLS to the destination port. 9427 * Having RLS frame destination is as FPORT is not yet 9428 * supported and will be implemented in future, if needed. 9429 * Following call to get "pd" will fail if dest is FPORT 9430 */ 9431 pd = fctl_hold_remote_port_by_did(port, dest); 9432 if (pd == NULL) { 9433 fcio->fcio_errno = FC_BADOBJECT; 9434 rval = ENXIO; 9435 if (fp_fcio_copyout(fcio, data, mode)) { 9436 rval = EFAULT; 9437 } 9438 break; 9439 } 9440 9441 mutex_enter(&pd->pd_mutex); 9442 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9443 mutex_exit(&pd->pd_mutex); 9444 fctl_release_remote_port(pd); 9445 9446 fcio->fcio_errno = FC_LOGINREQ; 9447 rval = EINVAL; 9448 if (fp_fcio_copyout(fcio, data, mode)) { 9449 rval = EFAULT; 9450 } 9451 break; 9452 } 9453 ASSERT(pd->pd_login_count >= 1); 9454 mutex_exit(&pd->pd_mutex); 9455 9456 /* 9457 * Allocate job structure and set job_code as DUMMY, 9458 * because we will not go through the job thread. 9459 * Instead fp_sendcmd() is called directly here. 9460 */ 9461 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9462 NULL, NULL, KM_SLEEP); 9463 ASSERT(job != NULL); 9464 9465 job->job_counter = 1; 9466 9467 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9468 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9469 if (cmd == NULL) { 9470 fcio->fcio_errno = FC_NOMEM; 9471 rval = ENOMEM; 9472 9473 fctl_release_remote_port(pd); 9474 9475 fctl_dealloc_job(job); 9476 if (fp_fcio_copyout(fcio, data, mode)) { 9477 rval = EFAULT; 9478 } 9479 break; 9480 } 9481 9482 /* Allocate memory for link error status block */ 9483 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9484 9485 mutex_enter(&port->fp_mutex); 9486 mutex_enter(&pd->pd_mutex); 9487 9488 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9489 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9490 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9491 cmd->cmd_retry_count = 1; 9492 cmd->cmd_ulp_pkt = NULL; 9493 9494 fp_rls_init(cmd, job); 9495 9496 job->job_private = (void *)rls_acc; 9497 9498 pd_flags = pd->pd_flags; 9499 pd->pd_flags = PD_ELS_IN_PROGRESS; 9500 9501 mutex_exit(&pd->pd_mutex); 9502 mutex_exit(&port->fp_mutex); 9503 9504 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9505 fctl_jobwait(job); 9506 9507 fcio->fcio_errno = job->job_result; 9508 if (job->job_result == FC_SUCCESS) { 9509 ASSERT(pd != NULL); 9510 /* 9511 * link error status block is now available. 9512 * Copy it to userland 9513 */ 9514 ASSERT(job->job_private == (void *)rls_acc); 9515 if (ddi_copyout((void *)rls_acc, 9516 (void *)fcio->fcio_obuf, 9517 sizeof (*rls_acc), mode) == 0) { 9518 if (fp_fcio_copyout(fcio, data, 9519 mode)) { 9520 rval = EFAULT; 9521 } 9522 } else { 9523 rval = EFAULT; 9524 } 9525 } else { 9526 rval = EIO; 9527 } 9528 } else { 9529 rval = EIO; 9530 fp_free_pkt(cmd); 9531 } 9532 9533 if (rval) { 9534 mutex_enter(&port->fp_mutex); 9535 mutex_enter(&pd->pd_mutex); 9536 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9537 pd->pd_flags = pd_flags; 9538 } 9539 mutex_exit(&pd->pd_mutex); 9540 mutex_exit(&port->fp_mutex); 9541 } 9542 9543 fctl_release_remote_port(pd); 9544 fctl_dealloc_job(job); 9545 kmem_free(rls_acc, sizeof (*rls_acc)); 9546 9547 if (fp_fcio_copyout(fcio, data, mode)) { 9548 rval = EFAULT; 9549 } 9550 break; 9551 } 9552 9553 case FCIO_NS: { 9554 fc_ns_cmd_t *ns_req; 9555 fc_ns_cmd32_t *ns_req32; 9556 fctl_ns_req_t *ns_cmd; 9557 9558 if (use32 == B_TRUE) { 9559 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9560 rval = EINVAL; 9561 break; 9562 } 9563 9564 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9565 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9566 9567 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9568 sizeof (*ns_req32), mode)) { 9569 rval = EFAULT; 9570 kmem_free(ns_req, sizeof (*ns_req)); 9571 kmem_free(ns_req32, sizeof (*ns_req32)); 9572 break; 9573 } 9574 9575 ns_req->ns_flags = ns_req32->ns_flags; 9576 ns_req->ns_cmd = ns_req32->ns_cmd; 9577 ns_req->ns_req_len = ns_req32->ns_req_len; 9578 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9579 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9580 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9581 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9582 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9583 9584 kmem_free(ns_req32, sizeof (*ns_req32)); 9585 } else { 9586 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9587 rval = EINVAL; 9588 break; 9589 } 9590 9591 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9592 9593 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9594 sizeof (fc_ns_cmd_t), mode)) { 9595 rval = EFAULT; 9596 kmem_free(ns_req, sizeof (*ns_req)); 9597 break; 9598 } 9599 } 9600 9601 if (ns_req->ns_req_len <= 0) { 9602 rval = EINVAL; 9603 kmem_free(ns_req, sizeof (*ns_req)); 9604 break; 9605 } 9606 9607 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9608 ASSERT(job != NULL); 9609 9610 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9611 ns_req->ns_resp_len, ns_req->ns_resp_len, 9612 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9613 ASSERT(ns_cmd != NULL); 9614 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9615 9616 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9617 ns_cmd->ns_gan_max = 1; 9618 ns_cmd->ns_gan_index = 0; 9619 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9620 } 9621 9622 if (ddi_copyin(ns_req->ns_req_payload, 9623 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9624 rval = EFAULT; 9625 fctl_free_ns_cmd(ns_cmd); 9626 fctl_dealloc_job(job); 9627 kmem_free(ns_req, sizeof (*ns_req)); 9628 break; 9629 } 9630 9631 job->job_private = (void *)ns_cmd; 9632 fctl_enque_job(port, job); 9633 fctl_jobwait(job); 9634 rval = job->job_result; 9635 9636 if (rval == FC_SUCCESS) { 9637 if (ns_req->ns_resp_len) { 9638 if (ddi_copyout(ns_cmd->ns_data_buf, 9639 ns_req->ns_resp_payload, 9640 ns_cmd->ns_data_len, mode)) { 9641 rval = EFAULT; 9642 fctl_free_ns_cmd(ns_cmd); 9643 fctl_dealloc_job(job); 9644 kmem_free(ns_req, sizeof (*ns_req)); 9645 break; 9646 } 9647 } 9648 } else { 9649 rval = EIO; 9650 } 9651 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9652 fctl_free_ns_cmd(ns_cmd); 9653 fctl_dealloc_job(job); 9654 kmem_free(ns_req, sizeof (*ns_req)); 9655 9656 if (fp_fcio_copyout(fcio, data, mode)) { 9657 rval = EFAULT; 9658 } 9659 break; 9660 } 9661 9662 default: 9663 rval = ENOTTY; 9664 break; 9665 } 9666 9667 /* 9668 * If set, reset the EXCL busy bit to 9669 * receive other exclusive access commands 9670 */ 9671 mutex_enter(&port->fp_mutex); 9672 if (port->fp_flag & FP_EXCL_BUSY) { 9673 port->fp_flag &= ~FP_EXCL_BUSY; 9674 } 9675 mutex_exit(&port->fp_mutex); 9676 9677 return (rval); 9678 } 9679 9680 9681 /* 9682 * This function assumes that the response length 9683 * is same regardless of data model (LP32 or LP64) 9684 * which is true for all the ioctls currently 9685 * supported. 9686 */ 9687 static int 9688 fp_copyout(void *from, void *to, size_t len, int mode) 9689 { 9690 return (ddi_copyout(from, to, len, mode)); 9691 } 9692 9693 /* 9694 * This function does the set rnid 9695 */ 9696 static int 9697 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9698 { 9699 int rval = 0; 9700 fc_rnid_t *rnid; 9701 fc_fca_pm_t pm; 9702 9703 /* Allocate memory for node id block */ 9704 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9705 9706 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9707 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9708 kmem_free(rnid, sizeof (fc_rnid_t)); 9709 return (EFAULT); 9710 } 9711 9712 /* Prepare the port management structure */ 9713 bzero((caddr_t)&pm, sizeof (pm)); 9714 9715 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9716 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9717 pm.pm_data_len = sizeof (*rnid); 9718 pm.pm_data_buf = (caddr_t)rnid; 9719 9720 /* Get the adapter's node data */ 9721 rval = port->fp_fca_tran->fca_port_manage( 9722 port->fp_fca_handle, &pm); 9723 9724 if (rval != FC_SUCCESS) { 9725 fcio->fcio_errno = rval; 9726 rval = EIO; 9727 if (fp_fcio_copyout(fcio, data, mode)) { 9728 rval = EFAULT; 9729 } 9730 } else { 9731 mutex_enter(&port->fp_mutex); 9732 /* copy to the port structure */ 9733 bcopy(rnid, &port->fp_rnid_params, 9734 sizeof (port->fp_rnid_params)); 9735 mutex_exit(&port->fp_mutex); 9736 } 9737 9738 kmem_free(rnid, sizeof (fc_rnid_t)); 9739 9740 if (rval != FC_SUCCESS) { 9741 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9742 } 9743 9744 return (rval); 9745 } 9746 9747 /* 9748 * This function does the local pwwn get rnid 9749 */ 9750 static int 9751 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9752 { 9753 fc_rnid_t *rnid; 9754 fc_fca_pm_t pm; 9755 int rval = 0; 9756 uint32_t ret; 9757 9758 /* Allocate memory for rnid data block */ 9759 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9760 9761 mutex_enter(&port->fp_mutex); 9762 if (port->fp_rnid_init == 1) { 9763 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9764 mutex_exit(&port->fp_mutex); 9765 /* xfer node info to userland */ 9766 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9767 sizeof (*rnid), mode) == 0) { 9768 if (fp_fcio_copyout(fcio, data, mode)) { 9769 rval = EFAULT; 9770 } 9771 } else { 9772 rval = EFAULT; 9773 } 9774 9775 kmem_free(rnid, sizeof (fc_rnid_t)); 9776 9777 if (rval != FC_SUCCESS) { 9778 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9779 rval); 9780 } 9781 9782 return (rval); 9783 } 9784 mutex_exit(&port->fp_mutex); 9785 9786 /* Prepare the port management structure */ 9787 bzero((caddr_t)&pm, sizeof (pm)); 9788 9789 pm.pm_cmd_flags = FC_FCA_PM_READ; 9790 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9791 pm.pm_data_len = sizeof (fc_rnid_t); 9792 pm.pm_data_buf = (caddr_t)rnid; 9793 9794 /* Get the adapter's node data */ 9795 ret = port->fp_fca_tran->fca_port_manage( 9796 port->fp_fca_handle, 9797 &pm); 9798 9799 if (ret == FC_SUCCESS) { 9800 /* initialize in the port_info */ 9801 mutex_enter(&port->fp_mutex); 9802 port->fp_rnid_init = 1; 9803 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9804 mutex_exit(&port->fp_mutex); 9805 9806 /* xfer node info to userland */ 9807 if (ddi_copyout((void *)rnid, 9808 (void *)fcio->fcio_obuf, 9809 sizeof (*rnid), mode) == 0) { 9810 if (fp_fcio_copyout(fcio, data, 9811 mode)) { 9812 rval = EFAULT; 9813 } 9814 } else { 9815 rval = EFAULT; 9816 } 9817 } else { 9818 rval = EIO; 9819 fcio->fcio_errno = ret; 9820 if (fp_fcio_copyout(fcio, data, mode)) { 9821 rval = EFAULT; 9822 } 9823 } 9824 9825 kmem_free(rnid, sizeof (fc_rnid_t)); 9826 9827 if (rval != FC_SUCCESS) { 9828 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9829 } 9830 9831 return (rval); 9832 } 9833 9834 static int 9835 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9836 la_wwn_t *pwwn) 9837 { 9838 int rval = 0; 9839 fc_remote_port_t *pd; 9840 fp_cmd_t *cmd; 9841 job_request_t *job; 9842 la_els_rnid_acc_t *rnid_acc; 9843 9844 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9845 if (pd == NULL) { 9846 /* 9847 * We can safely assume that the destination port 9848 * is logged in. Either the user land will explicitly 9849 * login before issuing RNID ioctl or the device would 9850 * have been configured, meaning already logged in. 9851 */ 9852 9853 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9854 9855 return (ENXIO); 9856 } 9857 /* 9858 * Allocate job structure and set job_code as DUMMY, 9859 * because we will not go thorugh the job thread. 9860 * Instead fp_sendcmd() is called directly here. 9861 */ 9862 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9863 NULL, NULL, KM_SLEEP); 9864 9865 ASSERT(job != NULL); 9866 9867 job->job_counter = 1; 9868 9869 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9870 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9871 if (cmd == NULL) { 9872 fcio->fcio_errno = FC_NOMEM; 9873 rval = ENOMEM; 9874 9875 fctl_dealloc_job(job); 9876 if (fp_fcio_copyout(fcio, data, mode)) { 9877 rval = EFAULT; 9878 } 9879 9880 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9881 9882 return (rval); 9883 } 9884 9885 /* Allocate memory for node id accept block */ 9886 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9887 9888 mutex_enter(&port->fp_mutex); 9889 mutex_enter(&pd->pd_mutex); 9890 9891 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9892 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9893 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9894 cmd->cmd_retry_count = 1; 9895 cmd->cmd_ulp_pkt = NULL; 9896 9897 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9898 9899 job->job_private = (void *)rnid_acc; 9900 9901 pd->pd_flags = PD_ELS_IN_PROGRESS; 9902 9903 mutex_exit(&pd->pd_mutex); 9904 mutex_exit(&port->fp_mutex); 9905 9906 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9907 fctl_jobwait(job); 9908 fcio->fcio_errno = job->job_result; 9909 if (job->job_result == FC_SUCCESS) { 9910 int rnid_cnt; 9911 ASSERT(pd != NULL); 9912 /* 9913 * node id block is now available. 9914 * Copy it to userland 9915 */ 9916 ASSERT(job->job_private == (void *)rnid_acc); 9917 9918 /* get the response length */ 9919 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9920 rnid_acc->hdr.cmn_len + 9921 rnid_acc->hdr.specific_len; 9922 9923 if (fcio->fcio_olen < rnid_cnt) { 9924 rval = EINVAL; 9925 } else if (ddi_copyout((void *)rnid_acc, 9926 (void *)fcio->fcio_obuf, 9927 rnid_cnt, mode) == 0) { 9928 if (fp_fcio_copyout(fcio, data, 9929 mode)) { 9930 rval = EFAULT; 9931 } 9932 } else { 9933 rval = EFAULT; 9934 } 9935 } else { 9936 rval = EIO; 9937 } 9938 } else { 9939 rval = EIO; 9940 if (pd) { 9941 mutex_enter(&pd->pd_mutex); 9942 pd->pd_flags = PD_IDLE; 9943 mutex_exit(&pd->pd_mutex); 9944 } 9945 fp_free_pkt(cmd); 9946 } 9947 9948 fctl_dealloc_job(job); 9949 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9950 9951 if (fp_fcio_copyout(fcio, data, mode)) { 9952 rval = EFAULT; 9953 } 9954 9955 if (rval != FC_SUCCESS) { 9956 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9957 } 9958 9959 return (rval); 9960 } 9961 9962 /* 9963 * Copy out to userland 9964 */ 9965 static int 9966 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9967 { 9968 int rval; 9969 9970 #ifdef _MULTI_DATAMODEL 9971 switch (ddi_model_convert_from(mode & FMODELS)) { 9972 case DDI_MODEL_ILP32: { 9973 struct fcio32 fcio32; 9974 9975 fcio32.fcio_xfer = fcio->fcio_xfer; 9976 fcio32.fcio_cmd = fcio->fcio_cmd; 9977 fcio32.fcio_flags = fcio->fcio_flags; 9978 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9979 fcio32.fcio_ilen = fcio->fcio_ilen; 9980 fcio32.fcio_ibuf = 9981 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9982 fcio32.fcio_olen = fcio->fcio_olen; 9983 fcio32.fcio_obuf = 9984 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9985 fcio32.fcio_alen = fcio->fcio_alen; 9986 fcio32.fcio_abuf = 9987 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9988 fcio32.fcio_errno = fcio->fcio_errno; 9989 9990 rval = ddi_copyout((void *)&fcio32, (void *)data, 9991 sizeof (struct fcio32), mode); 9992 break; 9993 } 9994 case DDI_MODEL_NONE: 9995 rval = ddi_copyout((void *)fcio, (void *)data, 9996 sizeof (fcio_t), mode); 9997 break; 9998 } 9999 #else 10000 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 10001 #endif 10002 10003 return (rval); 10004 } 10005 10006 10007 static void 10008 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 10009 { 10010 uint32_t listlen; 10011 fc_portmap_t *changelist; 10012 10013 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10014 ASSERT(port->fp_topology == FC_TOP_PT_PT); 10015 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10016 10017 listlen = 0; 10018 changelist = NULL; 10019 10020 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10021 if (port->fp_statec_busy > 1) { 10022 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10023 } 10024 } 10025 mutex_exit(&port->fp_mutex); 10026 10027 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10028 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10029 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10030 listlen, listlen, KM_SLEEP); 10031 10032 mutex_enter(&port->fp_mutex); 10033 } else { 10034 ASSERT(changelist == NULL && listlen == 0); 10035 mutex_enter(&port->fp_mutex); 10036 if (--port->fp_statec_busy == 0) { 10037 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10038 } 10039 } 10040 } 10041 10042 static int 10043 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10044 { 10045 int rval; 10046 int count; 10047 int index; 10048 int num_devices; 10049 fc_remote_node_t *node; 10050 fc_port_dev_t *devlist; 10051 struct pwwn_hash *head; 10052 fc_remote_port_t *pd; 10053 10054 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10055 10056 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10057 10058 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 10059 10060 for (count = index = 0; index < pwwn_table_size; index++) { 10061 head = &port->fp_pwwn_table[index]; 10062 pd = head->pwwn_head; 10063 while (pd != NULL) { 10064 mutex_enter(&pd->pd_mutex); 10065 if (pd->pd_state == PORT_DEVICE_INVALID) { 10066 mutex_exit(&pd->pd_mutex); 10067 pd = pd->pd_wwn_hnext; 10068 continue; 10069 } 10070 10071 devlist[count].dev_state = pd->pd_state; 10072 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10073 devlist[count].dev_did = pd->pd_port_id; 10074 devlist[count].dev_did.priv_lilp_posit = 10075 (uint8_t)(index & 0xff); 10076 bcopy((caddr_t)pd->pd_fc4types, 10077 (caddr_t)devlist[count].dev_type, 10078 sizeof (pd->pd_fc4types)); 10079 10080 bcopy((caddr_t)&pd->pd_port_name, 10081 (caddr_t)&devlist[count].dev_pwwn, 10082 sizeof (la_wwn_t)); 10083 10084 node = pd->pd_remote_nodep; 10085 mutex_exit(&pd->pd_mutex); 10086 10087 if (node) { 10088 mutex_enter(&node->fd_mutex); 10089 bcopy((caddr_t)&node->fd_node_name, 10090 (caddr_t)&devlist[count].dev_nwwn, 10091 sizeof (la_wwn_t)); 10092 mutex_exit(&node->fd_mutex); 10093 } 10094 count++; 10095 if (count >= num_devices) { 10096 goto found; 10097 } 10098 } 10099 } 10100 found: 10101 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10102 sizeof (count), mode)) { 10103 rval = FC_FAILURE; 10104 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10105 sizeof (fc_port_dev_t) * num_devices, mode)) { 10106 rval = FC_FAILURE; 10107 } else { 10108 rval = FC_SUCCESS; 10109 } 10110 10111 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 10112 10113 return (rval); 10114 } 10115 10116 10117 /* 10118 * Handle Fabric ONLINE 10119 */ 10120 static void 10121 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 10122 { 10123 int index; 10124 int rval; 10125 int dbg_count; 10126 int count = 0; 10127 char ww_name[17]; 10128 uint32_t d_id; 10129 uint32_t listlen; 10130 fctl_ns_req_t *ns_cmd; 10131 struct pwwn_hash *head; 10132 fc_remote_port_t *pd; 10133 fc_remote_port_t *npd; 10134 fc_portmap_t *changelist; 10135 10136 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10137 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 10138 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10139 10140 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 10141 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 10142 0, KM_SLEEP); 10143 10144 ASSERT(ns_cmd != NULL); 10145 10146 ns_cmd->ns_cmd_code = NS_GID_PN; 10147 10148 /* 10149 * Check if orphans are showing up now 10150 */ 10151 if (port->fp_orphan_count) { 10152 fc_orphan_t *orp; 10153 fc_orphan_t *norp = NULL; 10154 fc_orphan_t *prev = NULL; 10155 10156 for (orp = port->fp_orphan_list; orp; orp = norp) { 10157 norp = orp->orp_next; 10158 mutex_exit(&port->fp_mutex); 10159 orp->orp_nscan++; 10160 10161 job->job_counter = 1; 10162 job->job_result = FC_SUCCESS; 10163 10164 ((ns_req_gid_pn_t *) 10165 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 10166 ((ns_resp_gid_pn_t *) 10167 ns_cmd->ns_data_buf)->pid.port_id = 0; 10168 ((ns_resp_gid_pn_t *) 10169 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 10170 10171 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10172 if (rval == FC_SUCCESS) { 10173 d_id = 10174 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10175 pd = fp_create_remote_port_by_ns(port, 10176 d_id, KM_SLEEP); 10177 10178 if (pd != NULL) { 10179 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10180 10181 fp_printf(port, CE_WARN, FP_LOG_ONLY, 10182 0, NULL, "N_x Port with D_ID=%x," 10183 " PWWN=%s reappeared in fabric", 10184 d_id, ww_name); 10185 10186 mutex_enter(&port->fp_mutex); 10187 if (prev) { 10188 prev->orp_next = orp->orp_next; 10189 } else { 10190 ASSERT(orp == 10191 port->fp_orphan_list); 10192 port->fp_orphan_list = 10193 orp->orp_next; 10194 } 10195 port->fp_orphan_count--; 10196 mutex_exit(&port->fp_mutex); 10197 kmem_free(orp, sizeof (*orp)); 10198 count++; 10199 10200 mutex_enter(&pd->pd_mutex); 10201 pd->pd_flags = PD_ELS_MARK; 10202 10203 mutex_exit(&pd->pd_mutex); 10204 } else { 10205 prev = orp; 10206 } 10207 } else { 10208 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 10209 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10210 10211 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 10212 NULL, 10213 " Port WWN %s removed from orphan" 10214 " list after %d scans", ww_name, 10215 orp->orp_nscan); 10216 10217 mutex_enter(&port->fp_mutex); 10218 if (prev) { 10219 prev->orp_next = orp->orp_next; 10220 } else { 10221 ASSERT(orp == 10222 port->fp_orphan_list); 10223 port->fp_orphan_list = 10224 orp->orp_next; 10225 } 10226 port->fp_orphan_count--; 10227 mutex_exit(&port->fp_mutex); 10228 10229 kmem_free(orp, sizeof (*orp)); 10230 } else { 10231 prev = orp; 10232 } 10233 } 10234 mutex_enter(&port->fp_mutex); 10235 } 10236 } 10237 10238 /* 10239 * Walk the Port WWN hash table, reestablish LOGIN 10240 * if a LOGIN is already performed on a particular 10241 * device; Any failure to LOGIN should mark the 10242 * port device OLD. 10243 */ 10244 for (index = 0; index < pwwn_table_size; index++) { 10245 head = &port->fp_pwwn_table[index]; 10246 npd = head->pwwn_head; 10247 10248 while ((pd = npd) != NULL) { 10249 la_wwn_t *pwwn; 10250 10251 npd = pd->pd_wwn_hnext; 10252 10253 /* 10254 * Don't count in the port devices that are new 10255 * unless the total number of devices visible 10256 * through this port is less than FP_MAX_DEVICES 10257 */ 10258 mutex_enter(&pd->pd_mutex); 10259 if (port->fp_dev_count >= FP_MAX_DEVICES || 10260 (port->fp_options & FP_TARGET_MODE)) { 10261 if (pd->pd_type == PORT_DEVICE_NEW || 10262 pd->pd_flags == PD_ELS_MARK || 10263 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10264 mutex_exit(&pd->pd_mutex); 10265 continue; 10266 } 10267 } else { 10268 if (pd->pd_flags == PD_ELS_MARK || 10269 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10270 mutex_exit(&pd->pd_mutex); 10271 continue; 10272 } 10273 pd->pd_type = PORT_DEVICE_OLD; 10274 } 10275 count++; 10276 10277 /* 10278 * Consult with the name server about D_ID changes 10279 */ 10280 job->job_counter = 1; 10281 job->job_result = FC_SUCCESS; 10282 10283 ((ns_req_gid_pn_t *) 10284 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10285 ((ns_resp_gid_pn_t *) 10286 ns_cmd->ns_data_buf)->pid.port_id = 0; 10287 10288 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10289 pid.priv_lilp_posit = 0; 10290 10291 pwwn = &pd->pd_port_name; 10292 pd->pd_flags = PD_ELS_MARK; 10293 10294 mutex_exit(&pd->pd_mutex); 10295 mutex_exit(&port->fp_mutex); 10296 10297 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10298 if (rval != FC_SUCCESS) { 10299 fc_wwn_to_str(pwwn, ww_name); 10300 10301 mutex_enter(&pd->pd_mutex); 10302 d_id = pd->pd_port_id.port_id; 10303 pd->pd_type = PORT_DEVICE_DELETE; 10304 mutex_exit(&pd->pd_mutex); 10305 10306 FP_TRACE(FP_NHEAD1(3, 0), 10307 "fp_fabric_online: PD " 10308 "disappeared; d_id=%x, PWWN=%s", 10309 d_id, ww_name); 10310 10311 FP_TRACE(FP_NHEAD2(9, 0), 10312 "N_x Port with D_ID=%x, PWWN=%s" 10313 " disappeared from fabric", d_id, 10314 ww_name); 10315 10316 mutex_enter(&port->fp_mutex); 10317 continue; 10318 } 10319 10320 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10321 10322 mutex_enter(&port->fp_mutex); 10323 mutex_enter(&pd->pd_mutex); 10324 if (d_id != pd->pd_port_id.port_id) { 10325 fctl_delist_did_table(port, pd); 10326 fc_wwn_to_str(pwwn, ww_name); 10327 10328 FP_TRACE(FP_NHEAD2(9, 0), 10329 "D_ID of a device with PWWN %s changed." 10330 " New D_ID = %x, OLD D_ID = %x", ww_name, 10331 d_id, pd->pd_port_id.port_id); 10332 10333 pd->pd_port_id.port_id = BE_32(d_id); 10334 pd->pd_type = PORT_DEVICE_CHANGED; 10335 fctl_enlist_did_table(port, pd); 10336 } 10337 mutex_exit(&pd->pd_mutex); 10338 10339 } 10340 } 10341 10342 if (ns_cmd) { 10343 fctl_free_ns_cmd(ns_cmd); 10344 } 10345 10346 listlen = 0; 10347 changelist = NULL; 10348 if (count) { 10349 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10350 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10351 mutex_exit(&port->fp_mutex); 10352 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10353 mutex_enter(&port->fp_mutex); 10354 } 10355 10356 dbg_count = 0; 10357 10358 job->job_counter = count; 10359 10360 for (index = 0; index < pwwn_table_size; index++) { 10361 head = &port->fp_pwwn_table[index]; 10362 npd = head->pwwn_head; 10363 10364 while ((pd = npd) != NULL) { 10365 npd = pd->pd_wwn_hnext; 10366 10367 mutex_enter(&pd->pd_mutex); 10368 if (pd->pd_flags != PD_ELS_MARK) { 10369 mutex_exit(&pd->pd_mutex); 10370 continue; 10371 } 10372 10373 dbg_count++; 10374 10375 /* 10376 * If it is already marked deletion, nothing 10377 * else to do. 10378 */ 10379 if (pd->pd_type == PORT_DEVICE_DELETE) { 10380 pd->pd_type = PORT_DEVICE_OLD; 10381 10382 mutex_exit(&pd->pd_mutex); 10383 mutex_exit(&port->fp_mutex); 10384 fp_jobdone(job); 10385 mutex_enter(&port->fp_mutex); 10386 10387 continue; 10388 } 10389 10390 /* 10391 * If it is freshly discovered out of 10392 * the orphan list, nothing else to do 10393 */ 10394 if (pd->pd_type == PORT_DEVICE_NEW) { 10395 pd->pd_flags = PD_IDLE; 10396 10397 mutex_exit(&pd->pd_mutex); 10398 mutex_exit(&port->fp_mutex); 10399 fp_jobdone(job); 10400 mutex_enter(&port->fp_mutex); 10401 10402 continue; 10403 } 10404 10405 pd->pd_flags = PD_IDLE; 10406 d_id = pd->pd_port_id.port_id; 10407 10408 /* 10409 * Explicitly mark all devices OLD; successful 10410 * PLOGI should reset this to either NO_CHANGE 10411 * or CHANGED. 10412 */ 10413 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10414 pd->pd_type = PORT_DEVICE_OLD; 10415 } 10416 10417 mutex_exit(&pd->pd_mutex); 10418 mutex_exit(&port->fp_mutex); 10419 10420 rval = fp_port_login(port, d_id, job, 10421 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10422 10423 if (rval != FC_SUCCESS) { 10424 fp_jobdone(job); 10425 } 10426 mutex_enter(&port->fp_mutex); 10427 } 10428 } 10429 mutex_exit(&port->fp_mutex); 10430 10431 ASSERT(dbg_count == count); 10432 fp_jobwait(job); 10433 10434 mutex_enter(&port->fp_mutex); 10435 10436 ASSERT(port->fp_statec_busy > 0); 10437 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10438 if (port->fp_statec_busy > 1) { 10439 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10440 } 10441 } 10442 mutex_exit(&port->fp_mutex); 10443 } else { 10444 ASSERT(port->fp_statec_busy > 0); 10445 if (port->fp_statec_busy > 1) { 10446 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10447 } 10448 mutex_exit(&port->fp_mutex); 10449 } 10450 10451 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10452 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10453 10454 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10455 listlen, listlen, KM_SLEEP); 10456 10457 mutex_enter(&port->fp_mutex); 10458 } else { 10459 ASSERT(changelist == NULL && listlen == 0); 10460 mutex_enter(&port->fp_mutex); 10461 if (--port->fp_statec_busy == 0) { 10462 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10463 } 10464 } 10465 } 10466 10467 10468 /* 10469 * Fill out device list for userland ioctl in private loop 10470 */ 10471 static int 10472 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10473 { 10474 int rval; 10475 int count; 10476 int index; 10477 int num_devices; 10478 fc_remote_node_t *node; 10479 fc_port_dev_t *devlist; 10480 int lilp_device_count; 10481 fc_lilpmap_t *lilp_map; 10482 uchar_t *alpa_list; 10483 10484 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10485 10486 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10487 if (port->fp_total_devices > port->fp_dev_count && 10488 num_devices >= port->fp_total_devices) { 10489 job_request_t *job; 10490 10491 mutex_exit(&port->fp_mutex); 10492 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10493 job->job_counter = 1; 10494 10495 mutex_enter(&port->fp_mutex); 10496 fp_get_loopmap(port, job); 10497 mutex_exit(&port->fp_mutex); 10498 10499 fp_jobwait(job); 10500 fctl_dealloc_job(job); 10501 } else { 10502 mutex_exit(&port->fp_mutex); 10503 } 10504 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10505 10506 mutex_enter(&port->fp_mutex); 10507 10508 /* 10509 * Applications are accustomed to getting the device list in 10510 * LILP map order. The HBA firmware usually returns the device 10511 * map in the LILP map order and diagnostic applications would 10512 * prefer to receive in the device list in that order too 10513 */ 10514 lilp_map = &port->fp_lilp_map; 10515 alpa_list = &lilp_map->lilp_alpalist[0]; 10516 10517 /* 10518 * the length field corresponds to the offset in the LILP frame 10519 * which begins with 1. The thing to note here is that the 10520 * lilp_device_count is 1 more than fp->fp_total_devices since 10521 * the host adapter's alpa also shows up in the lilp map. We 10522 * don't however return details of the host adapter since 10523 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10524 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10525 * ioctl to obtain details about the host adapter port. 10526 */ 10527 lilp_device_count = lilp_map->lilp_length; 10528 10529 for (count = index = 0; index < lilp_device_count && 10530 count < num_devices; index++) { 10531 uint32_t d_id; 10532 fc_remote_port_t *pd; 10533 10534 d_id = alpa_list[index]; 10535 10536 mutex_exit(&port->fp_mutex); 10537 pd = fctl_get_remote_port_by_did(port, d_id); 10538 mutex_enter(&port->fp_mutex); 10539 10540 if (pd != NULL) { 10541 mutex_enter(&pd->pd_mutex); 10542 10543 if (pd->pd_state == PORT_DEVICE_INVALID) { 10544 mutex_exit(&pd->pd_mutex); 10545 continue; 10546 } 10547 10548 devlist[count].dev_state = pd->pd_state; 10549 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10550 devlist[count].dev_did = pd->pd_port_id; 10551 devlist[count].dev_did.priv_lilp_posit = 10552 (uint8_t)(index & 0xff); 10553 bcopy((caddr_t)pd->pd_fc4types, 10554 (caddr_t)devlist[count].dev_type, 10555 sizeof (pd->pd_fc4types)); 10556 10557 bcopy((caddr_t)&pd->pd_port_name, 10558 (caddr_t)&devlist[count].dev_pwwn, 10559 sizeof (la_wwn_t)); 10560 10561 node = pd->pd_remote_nodep; 10562 mutex_exit(&pd->pd_mutex); 10563 10564 if (node) { 10565 mutex_enter(&node->fd_mutex); 10566 bcopy((caddr_t)&node->fd_node_name, 10567 (caddr_t)&devlist[count].dev_nwwn, 10568 sizeof (la_wwn_t)); 10569 mutex_exit(&node->fd_mutex); 10570 } 10571 count++; 10572 } 10573 } 10574 10575 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10576 sizeof (count), mode)) { 10577 rval = FC_FAILURE; 10578 } 10579 10580 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10581 sizeof (fc_port_dev_t) * num_devices, mode)) { 10582 rval = FC_FAILURE; 10583 } else { 10584 rval = FC_SUCCESS; 10585 } 10586 10587 kmem_free(devlist, sizeof (*devlist) * num_devices); 10588 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10589 10590 return (rval); 10591 } 10592 10593 10594 /* 10595 * Completion function for responses to unsolicited commands 10596 */ 10597 static void 10598 fp_unsol_intr(fc_packet_t *pkt) 10599 { 10600 fp_cmd_t *cmd; 10601 fc_local_port_t *port; 10602 10603 cmd = pkt->pkt_ulp_private; 10604 port = cmd->cmd_port; 10605 10606 mutex_enter(&port->fp_mutex); 10607 port->fp_out_fpcmds--; 10608 mutex_exit(&port->fp_mutex); 10609 10610 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10611 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10612 "couldn't post response to unsolicited request;" 10613 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10614 pkt->pkt_resp_fhdr.rx_id); 10615 } 10616 10617 if (cmd == port->fp_els_resp_pkt) { 10618 mutex_enter(&port->fp_mutex); 10619 port->fp_els_resp_pkt_busy = 0; 10620 mutex_exit(&port->fp_mutex); 10621 return; 10622 } 10623 10624 fp_free_pkt(cmd); 10625 } 10626 10627 10628 /* 10629 * solicited LINIT ELS completion function 10630 */ 10631 static void 10632 fp_linit_intr(fc_packet_t *pkt) 10633 { 10634 fp_cmd_t *cmd; 10635 job_request_t *job; 10636 fc_linit_resp_t acc; 10637 fc_local_port_t *port = ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port; 10638 10639 cmd = (fp_cmd_t *)pkt->pkt_ulp_private; 10640 10641 mutex_enter(&cmd->cmd_port->fp_mutex); 10642 cmd->cmd_port->fp_out_fpcmds--; 10643 mutex_exit(&cmd->cmd_port->fp_mutex); 10644 10645 if (FP_IS_PKT_ERROR(pkt)) { 10646 (void) fp_common_intr(pkt, 1); 10647 return; 10648 } 10649 10650 job = cmd->cmd_job; 10651 10652 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&acc, 10653 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10654 if (acc.status != FC_LINIT_SUCCESS) { 10655 job->job_result = FC_FAILURE; 10656 } else { 10657 job->job_result = FC_SUCCESS; 10658 } 10659 10660 fp_iodone(cmd); 10661 } 10662 10663 10664 /* 10665 * Decode the unsolicited request; For FC-4 Device and Link data frames 10666 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10667 * ELS requests, submit a request to the job_handler thread to work on it. 10668 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10669 * and save much of the interrupt time processing of unsolicited ELS requests 10670 * and hand it off to the job_handler thread. 10671 */ 10672 static void 10673 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10674 { 10675 uchar_t r_ctl; 10676 uchar_t ls_code; 10677 uint32_t s_id; 10678 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10679 uint32_t cb_arg; 10680 fp_cmd_t *cmd; 10681 fc_local_port_t *port; 10682 job_request_t *job; 10683 fc_remote_port_t *pd; 10684 10685 port = port_handle; 10686 10687 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10688 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10689 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10690 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10691 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10692 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10693 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10694 buf->ub_buffer[0]); 10695 10696 if (type & 0x80000000) { 10697 /* 10698 * Huh ? Nothing much can be done without 10699 * a valid buffer. So just exit. 10700 */ 10701 return; 10702 } 10703 /* 10704 * If the unsolicited interrupts arrive while it isn't 10705 * safe to handle unsolicited callbacks; Drop them, yes, 10706 * drop them on the floor 10707 */ 10708 mutex_enter(&port->fp_mutex); 10709 port->fp_active_ubs++; 10710 if ((port->fp_soft_state & 10711 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10712 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10713 10714 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10715 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10716 "seq_id=%x, ox_id=%x, rx_id=%x" 10717 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10718 buf->ub_frame.type, buf->ub_frame.seq_id, 10719 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10720 10721 ASSERT(port->fp_active_ubs > 0); 10722 if (--(port->fp_active_ubs) == 0) { 10723 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10724 } 10725 10726 mutex_exit(&port->fp_mutex); 10727 10728 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10729 1, &buf->ub_token); 10730 10731 return; 10732 } 10733 10734 r_ctl = buf->ub_frame.r_ctl; 10735 s_id = buf->ub_frame.s_id; 10736 if (port->fp_active_ubs == 1) { 10737 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10738 } 10739 10740 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10741 port->fp_statec_busy) { 10742 mutex_exit(&port->fp_mutex); 10743 pd = fctl_get_remote_port_by_did(port, s_id); 10744 if (pd) { 10745 mutex_enter(&pd->pd_mutex); 10746 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10747 FP_TRACE(FP_NHEAD1(3, 0), 10748 "LOGO for LOGGED IN D_ID %x", 10749 buf->ub_frame.s_id); 10750 pd->pd_state = PORT_DEVICE_VALID; 10751 } 10752 mutex_exit(&pd->pd_mutex); 10753 } 10754 10755 mutex_enter(&port->fp_mutex); 10756 ASSERT(port->fp_active_ubs > 0); 10757 if (--(port->fp_active_ubs) == 0) { 10758 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10759 } 10760 mutex_exit(&port->fp_mutex); 10761 10762 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10763 1, &buf->ub_token); 10764 10765 FP_TRACE(FP_NHEAD1(3, 0), 10766 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10767 buf->ub_frame.s_id); 10768 return; 10769 } 10770 10771 if (port->fp_els_resp_pkt_busy == 0) { 10772 if (r_ctl == R_CTL_ELS_REQ) { 10773 ls_code = buf->ub_buffer[0]; 10774 10775 switch (ls_code) { 10776 case LA_ELS_PLOGI: 10777 case LA_ELS_FLOGI: 10778 port->fp_els_resp_pkt_busy = 1; 10779 mutex_exit(&port->fp_mutex); 10780 fp_i_handle_unsol_els(port, buf); 10781 10782 mutex_enter(&port->fp_mutex); 10783 ASSERT(port->fp_active_ubs > 0); 10784 if (--(port->fp_active_ubs) == 0) { 10785 port->fp_soft_state &= 10786 ~FP_SOFT_IN_UNSOL_CB; 10787 } 10788 mutex_exit(&port->fp_mutex); 10789 port->fp_fca_tran->fca_ub_release( 10790 port->fp_fca_handle, 1, &buf->ub_token); 10791 10792 return; 10793 case LA_ELS_RSCN: 10794 if (++(port)->fp_rscn_count == 10795 FC_INVALID_RSCN_COUNT) { 10796 ++(port)->fp_rscn_count; 10797 } 10798 rscn_count = port->fp_rscn_count; 10799 break; 10800 10801 default: 10802 break; 10803 } 10804 } 10805 } else if ((r_ctl == R_CTL_ELS_REQ) && 10806 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10807 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10808 ++port->fp_rscn_count; 10809 } 10810 rscn_count = port->fp_rscn_count; 10811 } 10812 10813 mutex_exit(&port->fp_mutex); 10814 10815 switch (r_ctl & R_CTL_ROUTING) { 10816 case R_CTL_DEVICE_DATA: 10817 /* 10818 * If the unsolicited buffer is a CT IU, 10819 * have the job_handler thread work on it. 10820 */ 10821 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10822 break; 10823 } 10824 /* FALLTHROUGH */ 10825 10826 case R_CTL_FC4_SVC: { 10827 int sendup = 0; 10828 10829 /* 10830 * If a LOGIN isn't performed before this request 10831 * shut the door on this port with a reply that a 10832 * LOGIN is required. We make an exception however 10833 * for IP broadcast packets and pass them through 10834 * to the IP ULP(s) to handle broadcast requests. 10835 * This is not a problem for private loop devices 10836 * but for fabric topologies we don't log into the 10837 * remote ports during port initialization and 10838 * the ULPs need to log into requesting ports on 10839 * demand. 10840 */ 10841 pd = fctl_get_remote_port_by_did(port, s_id); 10842 if (pd) { 10843 mutex_enter(&pd->pd_mutex); 10844 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10845 sendup++; 10846 } 10847 mutex_exit(&pd->pd_mutex); 10848 } else if ((pd == NULL) && 10849 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10850 (buf->ub_frame.d_id == 0xffffff || 10851 buf->ub_frame.d_id == 0x00)) { 10852 /* brodacst IP frame - so sendup via job thread */ 10853 break; 10854 } 10855 10856 /* 10857 * Send all FC4 services via job thread too 10858 */ 10859 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10860 break; 10861 } 10862 10863 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10864 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10865 return; 10866 } 10867 10868 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10869 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10870 0, KM_NOSLEEP, pd); 10871 if (cmd != NULL) { 10872 fp_els_rjt_init(port, cmd, buf, 10873 FC_ACTION_NON_RETRYABLE, 10874 FC_REASON_LOGIN_REQUIRED, NULL); 10875 10876 if (fp_sendcmd(port, cmd, 10877 port->fp_fca_handle) != FC_SUCCESS) { 10878 fp_free_pkt(cmd); 10879 } 10880 } 10881 } 10882 10883 mutex_enter(&port->fp_mutex); 10884 ASSERT(port->fp_active_ubs > 0); 10885 if (--(port->fp_active_ubs) == 0) { 10886 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10887 } 10888 mutex_exit(&port->fp_mutex); 10889 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10890 1, &buf->ub_token); 10891 10892 return; 10893 } 10894 10895 default: 10896 break; 10897 } 10898 10899 /* 10900 * Submit a Request to the job_handler thread to work 10901 * on the unsolicited request. The potential side effect 10902 * of this is that the unsolicited buffer takes a little 10903 * longer to get released but we save interrupt time in 10904 * the bargain. 10905 */ 10906 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10907 10908 /* 10909 * One way that the rscn_count will get used is described below : 10910 * 10911 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10912 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10913 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10914 * by overloading the job_cb_arg to pass the rscn_count 10915 * 4. When one of the routines processing the RSCN picks it up (ex: 10916 * fp_validate_rscn_page()), it passes this count in the map 10917 * structure (as part of the map_rscn_info structure member) to the 10918 * ULPs. 10919 * 5. When ULPs make calls back to the transport (example interfaces for 10920 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10921 * can now pass back this count as part of the fc_packet's 10922 * pkt_ulp_rscn_count member. fcp does this currently. 10923 * 6. When transport gets a call to transport a command on the wire, it 10924 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10925 * fc_packet. If there is, it will match that info with the current 10926 * rscn_count on that instance of the port. If they don't match up 10927 * then there was a newer RSCN. The ULP gets back an error code which 10928 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10929 * 7. At this point the ULP is free to make up its own mind as to how to 10930 * handle this. Currently, fcp will reset its retry counters and keep 10931 * retrying the operation it was doing in anticipation of getting a 10932 * new state change call back for the new RSCN. 10933 */ 10934 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10935 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10936 if (job == NULL) { 10937 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10938 "couldn't submit a job to the thread, failing.."); 10939 10940 mutex_enter(&port->fp_mutex); 10941 10942 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10943 --port->fp_rscn_count; 10944 } 10945 10946 ASSERT(port->fp_active_ubs > 0); 10947 if (--(port->fp_active_ubs) == 0) { 10948 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10949 } 10950 10951 mutex_exit(&port->fp_mutex); 10952 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10953 1, &buf->ub_token); 10954 10955 return; 10956 } 10957 job->job_private = (void *)buf; 10958 fctl_enque_job(port, job); 10959 } 10960 10961 10962 /* 10963 * Handle unsolicited requests 10964 */ 10965 static void 10966 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10967 job_request_t *job) 10968 { 10969 uchar_t r_ctl; 10970 uchar_t ls_code; 10971 uint32_t s_id; 10972 fp_cmd_t *cmd; 10973 fc_remote_port_t *pd; 10974 fp_unsol_spec_t *ub_spec; 10975 10976 r_ctl = buf->ub_frame.r_ctl; 10977 s_id = buf->ub_frame.s_id; 10978 10979 switch (r_ctl & R_CTL_ROUTING) { 10980 case R_CTL_EXTENDED_SVC: 10981 if (r_ctl != R_CTL_ELS_REQ) { 10982 break; 10983 } 10984 10985 ls_code = buf->ub_buffer[0]; 10986 switch (ls_code) { 10987 case LA_ELS_LOGO: 10988 case LA_ELS_ADISC: 10989 case LA_ELS_PRLO: 10990 pd = fctl_get_remote_port_by_did(port, s_id); 10991 if (pd == NULL) { 10992 if (!FC_IS_REAL_DEVICE(s_id)) { 10993 break; 10994 } 10995 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10996 break; 10997 } 10998 if ((cmd = fp_alloc_pkt(port, 10999 sizeof (la_els_rjt_t), 0, KM_SLEEP, 11000 NULL)) == NULL) { 11001 /* 11002 * Can this actually fail when 11003 * given KM_SLEEP? (Could be used 11004 * this way in a number of places.) 11005 */ 11006 break; 11007 } 11008 11009 fp_els_rjt_init(port, cmd, buf, 11010 FC_ACTION_NON_RETRYABLE, 11011 FC_REASON_INVALID_LINK_CTRL, job); 11012 11013 if (fp_sendcmd(port, cmd, 11014 port->fp_fca_handle) != FC_SUCCESS) { 11015 fp_free_pkt(cmd); 11016 } 11017 11018 break; 11019 } 11020 if (ls_code == LA_ELS_LOGO) { 11021 fp_handle_unsol_logo(port, buf, pd, job); 11022 } else if (ls_code == LA_ELS_ADISC) { 11023 fp_handle_unsol_adisc(port, buf, pd, job); 11024 } else { 11025 fp_handle_unsol_prlo(port, buf, pd, job); 11026 } 11027 break; 11028 11029 case LA_ELS_PLOGI: 11030 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 11031 break; 11032 11033 case LA_ELS_FLOGI: 11034 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 11035 break; 11036 11037 case LA_ELS_RSCN: 11038 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 11039 break; 11040 11041 default: 11042 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 11043 ub_spec->port = port; 11044 ub_spec->buf = buf; 11045 11046 (void) taskq_dispatch(port->fp_taskq, 11047 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 11048 return; 11049 } 11050 break; 11051 11052 case R_CTL_BASIC_SVC: 11053 /* 11054 * The unsolicited basic link services could be ABTS 11055 * and RMC (Or even a NOP). Just BA_RJT them until 11056 * such time there arises a need to handle them more 11057 * carefully. 11058 */ 11059 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11060 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 11061 0, KM_SLEEP, NULL); 11062 if (cmd != NULL) { 11063 fp_ba_rjt_init(port, cmd, buf, job); 11064 if (fp_sendcmd(port, cmd, 11065 port->fp_fca_handle) != FC_SUCCESS) { 11066 fp_free_pkt(cmd); 11067 } 11068 } 11069 } 11070 break; 11071 11072 case R_CTL_DEVICE_DATA: 11073 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 11074 /* 11075 * Mostly this is of type FC_TYPE_FC_SERVICES. 11076 * As we don't like any Unsolicited FC services 11077 * requests, we would do well to RJT them as 11078 * well. 11079 */ 11080 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11081 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11082 0, KM_SLEEP, NULL); 11083 if (cmd != NULL) { 11084 fp_els_rjt_init(port, cmd, buf, 11085 FC_ACTION_NON_RETRYABLE, 11086 FC_REASON_INVALID_LINK_CTRL, job); 11087 11088 if (fp_sendcmd(port, cmd, 11089 port->fp_fca_handle) != 11090 FC_SUCCESS) { 11091 fp_free_pkt(cmd); 11092 } 11093 } 11094 } 11095 break; 11096 } 11097 /* FALLTHROUGH */ 11098 11099 case R_CTL_FC4_SVC: 11100 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 11101 ub_spec->port = port; 11102 ub_spec->buf = buf; 11103 11104 (void) taskq_dispatch(port->fp_taskq, 11105 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 11106 return; 11107 11108 case R_CTL_LINK_CTL: 11109 /* 11110 * Turn deaf ear on unsolicited link control frames. 11111 * Typical unsolicited link control Frame is an LCR 11112 * (to reset End to End credit to the default login 11113 * value and abort current sequences for all classes) 11114 * An intelligent microcode/firmware should handle 11115 * this transparently at its level and not pass all 11116 * the way up here. 11117 * 11118 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 11119 * or F_BSY. P_RJT is chosen to be the most appropriate 11120 * at this time. 11121 */ 11122 /* FALLTHROUGH */ 11123 11124 default: 11125 /* 11126 * Just reject everything else as an invalid request. 11127 */ 11128 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11129 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11130 0, KM_SLEEP, NULL); 11131 if (cmd != NULL) { 11132 fp_els_rjt_init(port, cmd, buf, 11133 FC_ACTION_NON_RETRYABLE, 11134 FC_REASON_INVALID_LINK_CTRL, job); 11135 11136 if (fp_sendcmd(port, cmd, 11137 port->fp_fca_handle) != FC_SUCCESS) { 11138 fp_free_pkt(cmd); 11139 } 11140 } 11141 } 11142 break; 11143 } 11144 11145 mutex_enter(&port->fp_mutex); 11146 ASSERT(port->fp_active_ubs > 0); 11147 if (--(port->fp_active_ubs) == 0) { 11148 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 11149 } 11150 mutex_exit(&port->fp_mutex); 11151 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 11152 1, &buf->ub_token); 11153 } 11154 11155 11156 /* 11157 * Prepare a BA_RJT and send it over. 11158 */ 11159 static void 11160 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11161 job_request_t *job) 11162 { 11163 fc_packet_t *pkt; 11164 la_ba_rjt_t payload; 11165 11166 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11167 11168 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11169 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11170 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11171 cmd->cmd_retry_count = 1; 11172 cmd->cmd_ulp_pkt = NULL; 11173 11174 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11175 cmd->cmd_job = job; 11176 11177 pkt = &cmd->cmd_pkt; 11178 11179 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 11180 11181 payload.reserved = 0; 11182 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 11183 payload.explanation = FC_EXPLN_NONE; 11184 payload.vendor = 0; 11185 11186 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11187 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11188 } 11189 11190 11191 /* 11192 * Prepare an LS_RJT and send it over 11193 */ 11194 static void 11195 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11196 uchar_t action, uchar_t reason, job_request_t *job) 11197 { 11198 fc_packet_t *pkt; 11199 la_els_rjt_t payload; 11200 11201 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11202 11203 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11204 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11205 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11206 cmd->cmd_retry_count = 1; 11207 cmd->cmd_ulp_pkt = NULL; 11208 11209 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11210 cmd->cmd_job = job; 11211 11212 pkt = &cmd->cmd_pkt; 11213 11214 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11215 11216 payload.ls_code.ls_code = LA_ELS_RJT; 11217 payload.ls_code.mbz = 0; 11218 payload.action = action; 11219 payload.reason = reason; 11220 payload.reserved = 0; 11221 payload.vu = 0; 11222 11223 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11224 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11225 } 11226 11227 /* 11228 * Function: fp_prlo_acc_init 11229 * 11230 * Description: Initializes an Link Service Accept for a PRLO. 11231 * 11232 * Arguments: *port Local port through which the PRLO was 11233 * received. 11234 * cmd Command that will carry the accept. 11235 * *buf Unsolicited buffer containing the PRLO 11236 * request. 11237 * job Job request. 11238 * sleep Allocation mode. 11239 * 11240 * Return Value: *cmd Command containing the response. 11241 * 11242 * Context: Depends on the parameter sleep. 11243 */ 11244 fp_cmd_t * 11245 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 11246 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11247 { 11248 fp_cmd_t *cmd; 11249 fc_packet_t *pkt; 11250 la_els_prlo_t *req; 11251 size_t len; 11252 uint16_t flags; 11253 11254 req = (la_els_prlo_t *)buf->ub_buffer; 11255 len = (size_t)ntohs(req->payload_length); 11256 11257 /* 11258 * The payload of the accept to a PRLO has to be the exact match of 11259 * the payload of the request (at the exception of the code). 11260 */ 11261 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11262 11263 if (cmd) { 11264 /* 11265 * The fp command was successfully allocated. 11266 */ 11267 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11268 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11269 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11270 cmd->cmd_retry_count = 1; 11271 cmd->cmd_ulp_pkt = NULL; 11272 11273 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11274 cmd->cmd_job = job; 11275 11276 pkt = &cmd->cmd_pkt; 11277 11278 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11279 FC_TYPE_EXTENDED_LS); 11280 11281 /* The code is overwritten for the copy. */ 11282 req->ls_code = LA_ELS_ACC; 11283 /* Response code is set. */ 11284 flags = ntohs(req->flags); 11285 flags &= ~SP_RESP_CODE_MASK; 11286 flags |= SP_RESP_CODE_REQ_EXECUTED; 11287 req->flags = htons(flags); 11288 11289 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)req, 11290 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11291 } 11292 return (cmd); 11293 } 11294 11295 /* 11296 * Prepare an ACC response to an ELS request 11297 */ 11298 static void 11299 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11300 job_request_t *job) 11301 { 11302 fc_packet_t *pkt; 11303 ls_code_t payload; 11304 11305 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11306 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11307 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11308 cmd->cmd_retry_count = 1; 11309 cmd->cmd_ulp_pkt = NULL; 11310 11311 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11312 cmd->cmd_job = job; 11313 11314 pkt = &cmd->cmd_pkt; 11315 11316 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11317 11318 payload.ls_code = LA_ELS_ACC; 11319 payload.mbz = 0; 11320 11321 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 11322 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11323 } 11324 11325 /* 11326 * Unsolicited PRLO handler 11327 * 11328 * A Process Logout should be handled by the ULP that established it. However, 11329 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11330 * when a device implicitly logs out an initiator (for whatever reason) and 11331 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11332 * The logical thing to do for the device would be to send a LOGO in response 11333 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11334 * a PRLO instead. 11335 * 11336 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11337 * think that the Port Login has been lost. If we follow the Fibre Channel 11338 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11339 * the Port Login has also been lost, the remote port will reject the PRLI 11340 * indicating that we must PLOGI first. The initiator will then turn around and 11341 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11342 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11343 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11344 * needed would be received by FCP. FCP would have, then, to tell the transport 11345 * (fp) to PLOGI. The problem is, the transport would still think the Port 11346 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11347 * if you think it's not necessary". To work around that difficulty, the PRLO 11348 * is treated by the transport as a LOGO. The downside to it is a Port Login 11349 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11350 * has nothing to do with the PRLO) may be impacted. However, this is a 11351 * scenario very unlikely to happen. As of today the only ULP in Leadville 11352 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11353 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11354 * unlikely). 11355 */ 11356 static void 11357 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11358 fc_remote_port_t *pd, job_request_t *job) 11359 { 11360 int busy; 11361 int rval; 11362 int retain; 11363 fp_cmd_t *cmd; 11364 fc_portmap_t *listptr; 11365 boolean_t tolerance; 11366 la_els_prlo_t *req; 11367 11368 req = (la_els_prlo_t *)buf->ub_buffer; 11369 11370 if ((ntohs(req->payload_length) != 11371 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11372 (req->page_length != sizeof (service_parameter_page_t))) { 11373 /* 11374 * We are being very restrictive. Only on page per 11375 * payload. If it is not the case we reject the ELS although 11376 * we should reply indicating we handle only single page 11377 * per PRLO. 11378 */ 11379 goto fp_reject_prlo; 11380 } 11381 11382 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11383 /* 11384 * This is in case the payload advertizes a size bigger than 11385 * what it really is. 11386 */ 11387 goto fp_reject_prlo; 11388 } 11389 11390 mutex_enter(&port->fp_mutex); 11391 busy = port->fp_statec_busy; 11392 mutex_exit(&port->fp_mutex); 11393 11394 mutex_enter(&pd->pd_mutex); 11395 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11396 if (!busy) { 11397 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11398 pd->pd_state == PORT_DEVICE_INVALID || 11399 pd->pd_flags == PD_ELS_IN_PROGRESS || 11400 pd->pd_type == PORT_DEVICE_OLD) { 11401 busy++; 11402 } 11403 } 11404 11405 if (busy) { 11406 mutex_exit(&pd->pd_mutex); 11407 11408 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11409 "pd=%p - busy", 11410 pd->pd_port_id.port_id, pd); 11411 11412 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11413 goto fp_reject_prlo; 11414 } 11415 } else { 11416 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11417 11418 if (tolerance) { 11419 fctl_tc_reset(&pd->pd_logo_tc); 11420 retain = 0; 11421 pd->pd_state = PORT_DEVICE_INVALID; 11422 } 11423 11424 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11425 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11426 tolerance, retain); 11427 11428 pd->pd_aux_flags |= PD_LOGGED_OUT; 11429 mutex_exit(&pd->pd_mutex); 11430 11431 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11432 if (cmd == NULL) { 11433 return; 11434 } 11435 11436 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11437 if (rval != FC_SUCCESS) { 11438 fp_free_pkt(cmd); 11439 return; 11440 } 11441 11442 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11443 11444 if (retain) { 11445 fp_unregister_login(pd); 11446 fctl_copy_portmap(listptr, pd); 11447 } else { 11448 uint32_t d_id; 11449 char ww_name[17]; 11450 11451 mutex_enter(&pd->pd_mutex); 11452 d_id = pd->pd_port_id.port_id; 11453 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11454 mutex_exit(&pd->pd_mutex); 11455 11456 FP_TRACE(FP_NHEAD2(9, 0), 11457 "N_x Port with D_ID=%x, PWWN=%s logged out" 11458 " %d times in %d us; Giving up", d_id, ww_name, 11459 FC_LOGO_TOLERANCE_LIMIT, 11460 FC_LOGO_TOLERANCE_TIME_LIMIT); 11461 11462 fp_fillout_old_map(listptr, pd, 0); 11463 listptr->map_type = PORT_DEVICE_OLD; 11464 } 11465 11466 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11467 return; 11468 } 11469 11470 fp_reject_prlo: 11471 11472 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11473 if (cmd != NULL) { 11474 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11475 FC_REASON_INVALID_LINK_CTRL, job); 11476 11477 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11478 fp_free_pkt(cmd); 11479 } 11480 } 11481 } 11482 11483 /* 11484 * Unsolicited LOGO handler 11485 */ 11486 static void 11487 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11488 fc_remote_port_t *pd, job_request_t *job) 11489 { 11490 int busy; 11491 int rval; 11492 int retain; 11493 fp_cmd_t *cmd; 11494 fc_portmap_t *listptr; 11495 boolean_t tolerance; 11496 11497 mutex_enter(&port->fp_mutex); 11498 busy = port->fp_statec_busy; 11499 mutex_exit(&port->fp_mutex); 11500 11501 mutex_enter(&pd->pd_mutex); 11502 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11503 if (!busy) { 11504 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11505 pd->pd_state == PORT_DEVICE_INVALID || 11506 pd->pd_flags == PD_ELS_IN_PROGRESS || 11507 pd->pd_type == PORT_DEVICE_OLD) { 11508 busy++; 11509 } 11510 } 11511 11512 if (busy) { 11513 mutex_exit(&pd->pd_mutex); 11514 11515 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11516 "pd=%p - busy", 11517 pd->pd_port_id.port_id, pd); 11518 11519 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11520 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11521 0, KM_SLEEP, pd); 11522 if (cmd != NULL) { 11523 fp_els_rjt_init(port, cmd, buf, 11524 FC_ACTION_NON_RETRYABLE, 11525 FC_REASON_INVALID_LINK_CTRL, job); 11526 11527 if (fp_sendcmd(port, cmd, 11528 port->fp_fca_handle) != FC_SUCCESS) { 11529 fp_free_pkt(cmd); 11530 } 11531 } 11532 } 11533 } else { 11534 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11535 11536 if (tolerance) { 11537 fctl_tc_reset(&pd->pd_logo_tc); 11538 retain = 0; 11539 pd->pd_state = PORT_DEVICE_INVALID; 11540 } 11541 11542 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11543 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11544 tolerance, retain); 11545 11546 pd->pd_aux_flags |= PD_LOGGED_OUT; 11547 mutex_exit(&pd->pd_mutex); 11548 11549 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11550 KM_SLEEP, pd); 11551 if (cmd == NULL) { 11552 return; 11553 } 11554 11555 fp_els_acc_init(port, cmd, buf, job); 11556 11557 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11558 if (rval != FC_SUCCESS) { 11559 fp_free_pkt(cmd); 11560 return; 11561 } 11562 11563 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11564 11565 if (retain) { 11566 job_request_t *job; 11567 fctl_ns_req_t *ns_cmd; 11568 11569 /* 11570 * when get LOGO, first try to get PID from nameserver 11571 * if failed, then we do not need 11572 * send PLOGI to that remote port 11573 */ 11574 job = fctl_alloc_job( 11575 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11576 11577 if (job != NULL) { 11578 ns_cmd = fctl_alloc_ns_cmd( 11579 sizeof (ns_req_gid_pn_t), 11580 sizeof (ns_resp_gid_pn_t), 11581 sizeof (ns_resp_gid_pn_t), 11582 0, KM_SLEEP); 11583 if (ns_cmd != NULL) { 11584 int ret; 11585 job->job_result = FC_SUCCESS; 11586 ns_cmd->ns_cmd_code = NS_GID_PN; 11587 ((ns_req_gid_pn_t *) 11588 (ns_cmd->ns_cmd_buf))->pwwn = 11589 pd->pd_port_name; 11590 ret = fp_ns_query( 11591 port, ns_cmd, job, 1, KM_SLEEP); 11592 if ((ret != FC_SUCCESS) || 11593 (job->job_result != FC_SUCCESS)) { 11594 fctl_free_ns_cmd(ns_cmd); 11595 fctl_dealloc_job(job); 11596 FP_TRACE(FP_NHEAD2(9, 0), 11597 "NS query failed,", 11598 " delete pd"); 11599 goto delete_pd; 11600 } 11601 fctl_free_ns_cmd(ns_cmd); 11602 } 11603 fctl_dealloc_job(job); 11604 } 11605 fp_unregister_login(pd); 11606 fctl_copy_portmap(listptr, pd); 11607 } else { 11608 uint32_t d_id; 11609 char ww_name[17]; 11610 11611 delete_pd: 11612 mutex_enter(&pd->pd_mutex); 11613 d_id = pd->pd_port_id.port_id; 11614 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11615 mutex_exit(&pd->pd_mutex); 11616 11617 FP_TRACE(FP_NHEAD2(9, 0), 11618 "N_x Port with D_ID=%x, PWWN=%s logged out" 11619 " %d times in %d us; Giving up", d_id, ww_name, 11620 FC_LOGO_TOLERANCE_LIMIT, 11621 FC_LOGO_TOLERANCE_TIME_LIMIT); 11622 11623 fp_fillout_old_map(listptr, pd, 0); 11624 listptr->map_type = PORT_DEVICE_OLD; 11625 } 11626 11627 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11628 } 11629 } 11630 11631 11632 /* 11633 * Perform general purpose preparation of a response to an unsolicited request 11634 */ 11635 static void 11636 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11637 uchar_t r_ctl, uchar_t type) 11638 { 11639 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11640 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11641 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11642 pkt->pkt_cmd_fhdr.type = type; 11643 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11644 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11645 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11646 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11647 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11648 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11649 pkt->pkt_cmd_fhdr.ro = 0; 11650 pkt->pkt_cmd_fhdr.rsvd = 0; 11651 pkt->pkt_comp = fp_unsol_intr; 11652 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11653 pkt->pkt_ub_resp_token = (opaque_t)buf; 11654 } 11655 11656 /* 11657 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11658 * early development days of public loop soc+ firmware, numerous problems 11659 * were encountered (the details are undocumented and history now) which 11660 * led to the birth of this function. 11661 * 11662 * If a pre-allocated unsolicited response packet is free, send out an 11663 * immediate response, otherwise submit the request to the port thread 11664 * to do the deferred processing. 11665 */ 11666 static void 11667 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11668 { 11669 int sent; 11670 int f_port; 11671 int do_acc; 11672 fp_cmd_t *cmd; 11673 la_els_logi_t *payload; 11674 fc_remote_port_t *pd; 11675 char dww_name[17]; 11676 11677 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11678 11679 cmd = port->fp_els_resp_pkt; 11680 11681 mutex_enter(&port->fp_mutex); 11682 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11683 mutex_exit(&port->fp_mutex); 11684 11685 switch (buf->ub_buffer[0]) { 11686 case LA_ELS_PLOGI: { 11687 int small; 11688 11689 payload = (la_els_logi_t *)buf->ub_buffer; 11690 11691 f_port = FP_IS_F_PORT(payload-> 11692 common_service.cmn_features) ? 1 : 0; 11693 11694 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11695 &payload->nport_ww_name); 11696 pd = fctl_get_remote_port_by_pwwn(port, 11697 &payload->nport_ww_name); 11698 if (pd) { 11699 mutex_enter(&pd->pd_mutex); 11700 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11701 /* 11702 * Most likely this means a cross login is in 11703 * progress or a device about to be yanked out. 11704 * Only accept the plogi if my wwn is smaller. 11705 */ 11706 if (pd->pd_type == PORT_DEVICE_OLD) { 11707 sent = 1; 11708 } 11709 /* 11710 * Stop plogi request (if any) 11711 * attempt from local side to speedup 11712 * the discovery progress. 11713 * Mark the pd as PD_PLOGI_RECEPIENT. 11714 */ 11715 if (f_port == 0 && small < 0) { 11716 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11717 } 11718 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11719 11720 mutex_exit(&pd->pd_mutex); 11721 11722 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11723 "Unsol PLOGI received. PD still exists in the " 11724 "PWWN list. pd=%p PWWN=%s, sent=%x", 11725 pd, dww_name, sent); 11726 11727 if (f_port == 0 && small < 0) { 11728 FP_TRACE(FP_NHEAD1(3, 0), 11729 "fp_i_handle_unsol_els: Mark the pd" 11730 " as plogi recipient, pd=%p, PWWN=%s" 11731 ", sent=%x", 11732 pd, dww_name, sent); 11733 } 11734 } else { 11735 sent = 0; 11736 } 11737 11738 /* 11739 * To avoid Login collisions, accept only if my WWN 11740 * is smaller than the requester (A curious side note 11741 * would be that this rule may not satisfy the PLOGIs 11742 * initiated by the switch from not-so-well known 11743 * ports such as 0xFFFC41) 11744 */ 11745 if ((f_port == 0 && small < 0) || 11746 (((small > 0 && do_acc) || 11747 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11748 if (fp_is_class_supported(port->fp_cos, 11749 buf->ub_class) == FC_FAILURE) { 11750 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11751 cmd->cmd_pkt.pkt_cmdlen = 11752 sizeof (la_els_rjt_t); 11753 cmd->cmd_pkt.pkt_rsplen = 0; 11754 fp_els_rjt_init(port, cmd, buf, 11755 FC_ACTION_NON_RETRYABLE, 11756 FC_REASON_CLASS_NOT_SUPP, NULL); 11757 FP_TRACE(FP_NHEAD1(3, 0), 11758 "fp_i_handle_unsol_els: " 11759 "Unsupported class. " 11760 "Rejecting PLOGI"); 11761 11762 } else { 11763 mutex_enter(&port->fp_mutex); 11764 port->fp_els_resp_pkt_busy = 0; 11765 mutex_exit(&port->fp_mutex); 11766 return; 11767 } 11768 } else { 11769 cmd->cmd_pkt.pkt_cmdlen = 11770 sizeof (la_els_logi_t); 11771 cmd->cmd_pkt.pkt_rsplen = 0; 11772 11773 /* 11774 * If fp_port_id is zero and topology is 11775 * Point-to-Point, get the local port id from 11776 * the d_id in the PLOGI request. 11777 * If the outgoing FLOGI hasn't been accepted, 11778 * the topology will be unknown here. But it's 11779 * still safe to save the d_id to fp_port_id, 11780 * just because it will be overwritten later 11781 * if the topology is not Point-to-Point. 11782 */ 11783 mutex_enter(&port->fp_mutex); 11784 if ((port->fp_port_id.port_id == 0) && 11785 (port->fp_topology == FC_TOP_PT_PT || 11786 port->fp_topology == FC_TOP_UNKNOWN)) { 11787 port->fp_port_id.port_id = 11788 buf->ub_frame.d_id; 11789 } 11790 mutex_exit(&port->fp_mutex); 11791 11792 /* 11793 * Sometime later, we should validate 11794 * the service parameters instead of 11795 * just accepting it. 11796 */ 11797 fp_login_acc_init(port, cmd, buf, NULL, 11798 KM_NOSLEEP); 11799 FP_TRACE(FP_NHEAD1(3, 0), 11800 "fp_i_handle_unsol_els: Accepting PLOGI," 11801 " f_port=%d, small=%d, do_acc=%d," 11802 " sent=%d.", f_port, small, do_acc, 11803 sent); 11804 } 11805 } else { 11806 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11807 port->fp_options & FP_SEND_RJT) { 11808 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11809 cmd->cmd_pkt.pkt_rsplen = 0; 11810 fp_els_rjt_init(port, cmd, buf, 11811 FC_ACTION_NON_RETRYABLE, 11812 FC_REASON_LOGICAL_BSY, NULL); 11813 FP_TRACE(FP_NHEAD1(3, 0), 11814 "fp_i_handle_unsol_els: " 11815 "Rejecting PLOGI with Logical Busy." 11816 "Possible Login collision."); 11817 } else { 11818 mutex_enter(&port->fp_mutex); 11819 port->fp_els_resp_pkt_busy = 0; 11820 mutex_exit(&port->fp_mutex); 11821 return; 11822 } 11823 } 11824 break; 11825 } 11826 11827 case LA_ELS_FLOGI: 11828 if (fp_is_class_supported(port->fp_cos, 11829 buf->ub_class) == FC_FAILURE) { 11830 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11831 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11832 cmd->cmd_pkt.pkt_rsplen = 0; 11833 fp_els_rjt_init(port, cmd, buf, 11834 FC_ACTION_NON_RETRYABLE, 11835 FC_REASON_CLASS_NOT_SUPP, NULL); 11836 FP_TRACE(FP_NHEAD1(3, 0), 11837 "fp_i_handle_unsol_els: " 11838 "Unsupported Class. Rejecting FLOGI."); 11839 } else { 11840 mutex_enter(&port->fp_mutex); 11841 port->fp_els_resp_pkt_busy = 0; 11842 mutex_exit(&port->fp_mutex); 11843 return; 11844 } 11845 } else { 11846 mutex_enter(&port->fp_mutex); 11847 if (FC_PORT_STATE_MASK(port->fp_state) != 11848 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11849 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11850 mutex_exit(&port->fp_mutex); 11851 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11852 cmd->cmd_pkt.pkt_cmdlen = 11853 sizeof (la_els_rjt_t); 11854 cmd->cmd_pkt.pkt_rsplen = 0; 11855 fp_els_rjt_init(port, cmd, buf, 11856 FC_ACTION_NON_RETRYABLE, 11857 FC_REASON_INVALID_LINK_CTRL, 11858 NULL); 11859 FP_TRACE(FP_NHEAD1(3, 0), 11860 "fp_i_handle_unsol_els: " 11861 "Invalid Link Ctrl. " 11862 "Rejecting FLOGI."); 11863 } else { 11864 mutex_enter(&port->fp_mutex); 11865 port->fp_els_resp_pkt_busy = 0; 11866 mutex_exit(&port->fp_mutex); 11867 return; 11868 } 11869 } else { 11870 mutex_exit(&port->fp_mutex); 11871 cmd->cmd_pkt.pkt_cmdlen = 11872 sizeof (la_els_logi_t); 11873 cmd->cmd_pkt.pkt_rsplen = 0; 11874 /* 11875 * Let's not aggressively validate the N_Port's 11876 * service parameters until PLOGI. Suffice it 11877 * to give a hint that we are an N_Port and we 11878 * are game to some serious stuff here. 11879 */ 11880 fp_login_acc_init(port, cmd, buf, 11881 NULL, KM_NOSLEEP); 11882 FP_TRACE(FP_NHEAD1(3, 0), 11883 "fp_i_handle_unsol_els: " 11884 "Accepting FLOGI."); 11885 } 11886 } 11887 break; 11888 11889 default: 11890 return; 11891 } 11892 11893 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11894 mutex_enter(&port->fp_mutex); 11895 port->fp_els_resp_pkt_busy = 0; 11896 mutex_exit(&port->fp_mutex); 11897 } 11898 } 11899 11900 11901 /* 11902 * Handle unsolicited PLOGI request 11903 */ 11904 static void 11905 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11906 job_request_t *job, int sleep) 11907 { 11908 int sent; 11909 int small; 11910 int f_port; 11911 int do_acc; 11912 fp_cmd_t *cmd; 11913 la_wwn_t *swwn; 11914 la_wwn_t *dwwn; 11915 la_els_logi_t *payload; 11916 fc_remote_port_t *pd; 11917 char dww_name[17]; 11918 11919 payload = (la_els_logi_t *)buf->ub_buffer; 11920 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11921 11922 mutex_enter(&port->fp_mutex); 11923 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11924 mutex_exit(&port->fp_mutex); 11925 11926 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11927 "type=%x, f_ctl=%x" 11928 " seq_id=%x, ox_id=%x, rx_id=%x" 11929 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11930 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11931 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11932 11933 swwn = &port->fp_service_params.nport_ww_name; 11934 dwwn = &payload->nport_ww_name; 11935 small = fctl_wwn_cmp(swwn, dwwn); 11936 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11937 if (pd) { 11938 mutex_enter(&pd->pd_mutex); 11939 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11940 /* 11941 * Most likely this means a cross login is in 11942 * progress or a device about to be yanked out. 11943 * Only accept the plogi if my wwn is smaller. 11944 */ 11945 11946 if (pd->pd_type == PORT_DEVICE_OLD) { 11947 sent = 1; 11948 } 11949 /* 11950 * Stop plogi request (if any) 11951 * attempt from local side to speedup 11952 * the discovery progress. 11953 * Mark the pd as PD_PLOGI_RECEPIENT. 11954 */ 11955 if (f_port == 0 && small < 0) { 11956 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11957 } 11958 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11959 11960 mutex_exit(&pd->pd_mutex); 11961 11962 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11963 " received. PD still exists in the PWWN list. pd=%p " 11964 "PWWN=%s, sent=%x", pd, dww_name, sent); 11965 11966 if (f_port == 0 && small < 0) { 11967 FP_TRACE(FP_NHEAD1(3, 0), 11968 "fp_handle_unsol_plogi: Mark the pd" 11969 " as plogi recipient, pd=%p, PWWN=%s" 11970 ", sent=%x", 11971 pd, dww_name, sent); 11972 } 11973 } else { 11974 sent = 0; 11975 } 11976 11977 /* 11978 * Avoid Login collisions by accepting only if my WWN is smaller. 11979 * 11980 * A side note: There is no need to start a PLOGI from this end in 11981 * this context if login isn't going to be accepted for the 11982 * above reason as either a LIP (in private loop), RSCN (in 11983 * fabric topology), or an FLOGI (in point to point - Huh ? 11984 * check FC-PH) would normally drive the PLOGI from this end. 11985 * At this point of time there is no need for an inbound PLOGI 11986 * to kick an outbound PLOGI when it is going to be rejected 11987 * for the reason of WWN being smaller. However it isn't hard 11988 * to do that either (when such a need arises, start a timer 11989 * for a duration that extends beyond a normal device discovery 11990 * time and check if an outbound PLOGI did go before that, if 11991 * none fire one) 11992 * 11993 * Unfortunately, as it turned out, during booting, it is possible 11994 * to miss another initiator in the same loop as port driver 11995 * instances are serially attached. While preserving the above 11996 * comments for belly laughs, please kick an outbound PLOGI in 11997 * a non-switch environment (which is a pt pt between N_Ports or 11998 * a private loop) 11999 * 12000 * While preserving the above comments for amusement, send an 12001 * ACC if the PLOGI is going to be rejected for WWN being smaller 12002 * when no discovery is in progress at this end. Turn around 12003 * and make the port device as the PLOGI initiator, so that 12004 * during subsequent link/loop initialization, this end drives 12005 * the PLOGI (In fact both ends do in this particular case, but 12006 * only one wins) 12007 * 12008 * Make sure the PLOGIs initiated by the switch from not-so-well-known 12009 * ports (such as 0xFFFC41) are accepted too. 12010 */ 12011 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 12012 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 12013 if (fp_is_class_supported(port->fp_cos, 12014 buf->ub_class) == FC_FAILURE) { 12015 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12016 cmd = fp_alloc_pkt(port, 12017 sizeof (la_els_logi_t), 0, sleep, pd); 12018 if (cmd == NULL) { 12019 return; 12020 } 12021 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 12022 cmd->cmd_pkt.pkt_rsplen = 0; 12023 fp_els_rjt_init(port, cmd, buf, 12024 FC_ACTION_NON_RETRYABLE, 12025 FC_REASON_CLASS_NOT_SUPP, job); 12026 FP_TRACE(FP_NHEAD1(3, 0), 12027 "fp_handle_unsol_plogi: " 12028 "Unsupported class. rejecting PLOGI"); 12029 } 12030 } else { 12031 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12032 0, sleep, pd); 12033 if (cmd == NULL) { 12034 return; 12035 } 12036 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 12037 cmd->cmd_pkt.pkt_rsplen = 0; 12038 12039 /* 12040 * Sometime later, we should validate the service 12041 * parameters instead of just accepting it. 12042 */ 12043 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12044 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 12045 "Accepting PLOGI, f_port=%d, small=%d, " 12046 "do_acc=%d, sent=%d.", f_port, small, do_acc, 12047 sent); 12048 12049 /* 12050 * If fp_port_id is zero and topology is 12051 * Point-to-Point, get the local port id from 12052 * the d_id in the PLOGI request. 12053 * If the outgoing FLOGI hasn't been accepted, 12054 * the topology will be unknown here. But it's 12055 * still safe to save the d_id to fp_port_id, 12056 * just because it will be overwritten later 12057 * if the topology is not Point-to-Point. 12058 */ 12059 mutex_enter(&port->fp_mutex); 12060 if ((port->fp_port_id.port_id == 0) && 12061 (port->fp_topology == FC_TOP_PT_PT || 12062 port->fp_topology == FC_TOP_UNKNOWN)) { 12063 port->fp_port_id.port_id = 12064 buf->ub_frame.d_id; 12065 } 12066 mutex_exit(&port->fp_mutex); 12067 } 12068 } else { 12069 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 12070 port->fp_options & FP_SEND_RJT) { 12071 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12072 0, sleep, pd); 12073 if (cmd == NULL) { 12074 return; 12075 } 12076 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 12077 cmd->cmd_pkt.pkt_rsplen = 0; 12078 /* 12079 * Send out Logical busy to indicate 12080 * the detection of PLOGI collision 12081 */ 12082 fp_els_rjt_init(port, cmd, buf, 12083 FC_ACTION_NON_RETRYABLE, 12084 FC_REASON_LOGICAL_BSY, job); 12085 12086 fc_wwn_to_str(dwwn, dww_name); 12087 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 12088 "Rejecting Unsol PLOGI with Logical Busy." 12089 "possible PLOGI collision. PWWN=%s, sent=%x", 12090 dww_name, sent); 12091 } else { 12092 return; 12093 } 12094 } 12095 12096 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12097 fp_free_pkt(cmd); 12098 } 12099 } 12100 12101 12102 /* 12103 * Handle mischievous turning over of our own FLOGI requests back to 12104 * us by the SOC+ microcode. In other words, look at the class of such 12105 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 12106 * on the floor 12107 */ 12108 static void 12109 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 12110 job_request_t *job, int sleep) 12111 { 12112 uint32_t state; 12113 uint32_t s_id; 12114 fp_cmd_t *cmd; 12115 12116 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 12117 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12118 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12119 0, sleep, NULL); 12120 if (cmd == NULL) { 12121 return; 12122 } 12123 fp_els_rjt_init(port, cmd, buf, 12124 FC_ACTION_NON_RETRYABLE, 12125 FC_REASON_CLASS_NOT_SUPP, job); 12126 } else { 12127 return; 12128 } 12129 } else { 12130 12131 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 12132 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 12133 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 12134 buf->ub_frame.s_id, buf->ub_frame.d_id, 12135 buf->ub_frame.type, buf->ub_frame.f_ctl, 12136 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 12137 buf->ub_frame.rx_id, buf->ub_frame.ro); 12138 12139 mutex_enter(&port->fp_mutex); 12140 state = FC_PORT_STATE_MASK(port->fp_state); 12141 s_id = port->fp_port_id.port_id; 12142 mutex_exit(&port->fp_mutex); 12143 12144 if (state != FC_STATE_ONLINE || 12145 (s_id && buf->ub_frame.s_id == s_id)) { 12146 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12147 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12148 0, sleep, NULL); 12149 if (cmd == NULL) { 12150 return; 12151 } 12152 fp_els_rjt_init(port, cmd, buf, 12153 FC_ACTION_NON_RETRYABLE, 12154 FC_REASON_INVALID_LINK_CTRL, job); 12155 FP_TRACE(FP_NHEAD1(3, 0), 12156 "fp_handle_unsol_flogi: " 12157 "Rejecting PLOGI. Invalid Link CTRL"); 12158 } else { 12159 return; 12160 } 12161 } else { 12162 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12163 0, sleep, NULL); 12164 if (cmd == NULL) { 12165 return; 12166 } 12167 /* 12168 * Let's not aggressively validate the N_Port's 12169 * service parameters until PLOGI. Suffice it 12170 * to give a hint that we are an N_Port and we 12171 * are game to some serious stuff here. 12172 */ 12173 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12174 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 12175 "Accepting PLOGI"); 12176 } 12177 } 12178 12179 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12180 fp_free_pkt(cmd); 12181 } 12182 } 12183 12184 12185 /* 12186 * Perform PLOGI accept 12187 */ 12188 static void 12189 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 12190 job_request_t *job, int sleep) 12191 { 12192 fc_packet_t *pkt; 12193 fc_portmap_t *listptr; 12194 la_els_logi_t payload; 12195 12196 ASSERT(buf != NULL); 12197 12198 /* 12199 * If we are sending ACC to PLOGI and we haven't already 12200 * create port and node device handles, let's create them 12201 * here. 12202 */ 12203 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 12204 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 12205 int small; 12206 int do_acc; 12207 fc_remote_port_t *pd; 12208 la_els_logi_t *req; 12209 12210 req = (la_els_logi_t *)buf->ub_buffer; 12211 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 12212 &req->nport_ww_name); 12213 12214 mutex_enter(&port->fp_mutex); 12215 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 12216 mutex_exit(&port->fp_mutex); 12217 12218 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_acc_init fp %x, pd %x", 12219 port->fp_port_id.port_id, buf->ub_frame.s_id); 12220 pd = fctl_create_remote_port(port, &req->node_ww_name, 12221 &req->nport_ww_name, buf->ub_frame.s_id, 12222 PD_PLOGI_RECEPIENT, sleep); 12223 if (pd == NULL) { 12224 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 12225 "Couldn't create port device for d_id:0x%x", 12226 buf->ub_frame.s_id); 12227 12228 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 12229 "couldn't create port device d_id=%x", 12230 buf->ub_frame.s_id); 12231 } else { 12232 /* 12233 * usoc currently returns PLOGIs inline and 12234 * the maximum buffer size is 60 bytes or so. 12235 * So attempt not to look beyond what is in 12236 * the unsolicited buffer 12237 * 12238 * JNI also traverses this path sometimes 12239 */ 12240 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 12241 fp_register_login(NULL, pd, req, buf->ub_class); 12242 } else { 12243 mutex_enter(&pd->pd_mutex); 12244 if (pd->pd_login_count == 0) { 12245 pd->pd_login_count++; 12246 } 12247 pd->pd_state = PORT_DEVICE_LOGGED_IN; 12248 pd->pd_login_class = buf->ub_class; 12249 mutex_exit(&pd->pd_mutex); 12250 } 12251 12252 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12253 if (listptr != NULL) { 12254 fctl_copy_portmap(listptr, pd); 12255 (void) fp_ulp_devc_cb(port, listptr, 12256 1, 1, sleep, 0); 12257 } 12258 12259 if (small > 0 && do_acc) { 12260 mutex_enter(&pd->pd_mutex); 12261 pd->pd_recepient = PD_PLOGI_INITIATOR; 12262 mutex_exit(&pd->pd_mutex); 12263 } 12264 } 12265 } 12266 12267 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12268 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12269 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12270 cmd->cmd_retry_count = 1; 12271 cmd->cmd_ulp_pkt = NULL; 12272 12273 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12274 cmd->cmd_job = job; 12275 12276 pkt = &cmd->cmd_pkt; 12277 12278 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12279 12280 payload = port->fp_service_params; 12281 payload.ls_code.ls_code = LA_ELS_ACC; 12282 12283 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 12284 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12285 12286 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12287 "bufsize:0x%x sizeof (la_els_logi):0x%x " 12288 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12289 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12290 buf->ub_bufsize, sizeof (la_els_logi_t), 12291 port->fp_service_params.nport_ww_name.w.naa_id, 12292 port->fp_service_params.nport_ww_name.w.nport_id, 12293 port->fp_service_params.nport_ww_name.w.wwn_hi, 12294 port->fp_service_params.nport_ww_name.w.wwn_lo, 12295 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12296 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12297 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12298 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12299 port->fp_statec_busy); 12300 } 12301 12302 12303 #define RSCN_EVENT_NAME_LEN 256 12304 12305 /* 12306 * Handle RSCNs 12307 */ 12308 static void 12309 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12310 job_request_t *job, int sleep) 12311 { 12312 uint32_t mask; 12313 fp_cmd_t *cmd; 12314 uint32_t count; 12315 int listindex; 12316 int16_t len; 12317 fc_rscn_t *payload; 12318 fc_portmap_t *listptr; 12319 fctl_ns_req_t *ns_cmd; 12320 fc_affected_id_t *page; 12321 caddr_t nvname; 12322 nvlist_t *attr_list = NULL; 12323 12324 mutex_enter(&port->fp_mutex); 12325 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12326 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12327 --port->fp_rscn_count; 12328 } 12329 mutex_exit(&port->fp_mutex); 12330 return; 12331 } 12332 mutex_exit(&port->fp_mutex); 12333 12334 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12335 if (cmd != NULL) { 12336 fp_els_acc_init(port, cmd, buf, job); 12337 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12338 fp_free_pkt(cmd); 12339 } 12340 } 12341 12342 payload = (fc_rscn_t *)buf->ub_buffer; 12343 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12344 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12345 12346 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12347 12348 if (len <= 0) { 12349 mutex_enter(&port->fp_mutex); 12350 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12351 --port->fp_rscn_count; 12352 } 12353 mutex_exit(&port->fp_mutex); 12354 12355 return; 12356 } 12357 12358 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12359 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12360 12361 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12362 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12363 12364 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12365 12366 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12367 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12368 0, sleep); 12369 if (ns_cmd == NULL) { 12370 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12371 12372 mutex_enter(&port->fp_mutex); 12373 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12374 --port->fp_rscn_count; 12375 } 12376 mutex_exit(&port->fp_mutex); 12377 12378 return; 12379 } 12380 12381 ns_cmd->ns_cmd_code = NS_GPN_ID; 12382 12383 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12384 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12385 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12386 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12387 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12388 12389 /* Only proceed if we can allocate nvname and the nvlist */ 12390 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12391 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12392 KM_NOSLEEP) == DDI_SUCCESS) { 12393 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12394 port->fp_instance) == DDI_SUCCESS && 12395 nvlist_add_byte_array(attr_list, "port-wwn", 12396 port->fp_service_params.nport_ww_name.raw_wwn, 12397 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12398 nvlist_free(attr_list); 12399 attr_list = NULL; 12400 } 12401 } 12402 12403 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12404 /* Add affected page to the event payload */ 12405 if (attr_list != NULL) { 12406 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12407 "affected_page_%d", listindex); 12408 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12409 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12410 /* We don't send a partial event, so dump it */ 12411 nvlist_free(attr_list); 12412 attr_list = NULL; 12413 } 12414 } 12415 /* 12416 * Query the NS to get the Port WWN for this 12417 * affected D_ID. 12418 */ 12419 mask = 0; 12420 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12421 case FC_RSCN_PORT_ADDRESS: 12422 fp_validate_rscn_page(port, page, job, ns_cmd, 12423 listptr, &listindex, sleep); 12424 12425 if (listindex == 0) { 12426 /* 12427 * We essentially did not process this RSCN. So, 12428 * ULPs are not going to be called and so we 12429 * decrement the rscn_count 12430 */ 12431 mutex_enter(&port->fp_mutex); 12432 if (--port->fp_rscn_count == 12433 FC_INVALID_RSCN_COUNT) { 12434 --port->fp_rscn_count; 12435 } 12436 mutex_exit(&port->fp_mutex); 12437 } 12438 break; 12439 12440 case FC_RSCN_AREA_ADDRESS: 12441 mask = 0xFFFF00; 12442 /* FALLTHROUGH */ 12443 12444 case FC_RSCN_DOMAIN_ADDRESS: 12445 if (!mask) { 12446 mask = 0xFF0000; 12447 } 12448 fp_validate_area_domain(port, page->aff_d_id, mask, 12449 job, sleep); 12450 break; 12451 12452 case FC_RSCN_FABRIC_ADDRESS: 12453 /* 12454 * We need to discover all the devices on this 12455 * port. 12456 */ 12457 fp_validate_area_domain(port, 0, 0, job, sleep); 12458 break; 12459 12460 default: 12461 break; 12462 } 12463 } 12464 if (attr_list != NULL) { 12465 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12466 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12467 NULL, DDI_SLEEP); 12468 nvlist_free(attr_list); 12469 } else { 12470 FP_TRACE(FP_NHEAD1(9, 0), 12471 "RSCN handled, but event not sent to userland"); 12472 } 12473 if (nvname != NULL) { 12474 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12475 } 12476 12477 if (ns_cmd) { 12478 fctl_free_ns_cmd(ns_cmd); 12479 } 12480 12481 if (listindex) { 12482 #ifdef DEBUG 12483 page = (fc_affected_id_t *)(buf->ub_buffer + 12484 sizeof (fc_rscn_t)); 12485 12486 if (listptr->map_did.port_id != page->aff_d_id) { 12487 FP_TRACE(FP_NHEAD1(9, 0), 12488 "PORT RSCN: processed=%x, reporting=%x", 12489 listptr->map_did.port_id, page->aff_d_id); 12490 } 12491 #endif 12492 12493 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12494 sleep, 0); 12495 } else { 12496 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12497 } 12498 } 12499 12500 12501 /* 12502 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12503 */ 12504 static void 12505 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12506 { 12507 int is_switch; 12508 int initiator; 12509 fc_local_port_t *port; 12510 12511 port = pd->pd_port; 12512 12513 /* This function has the following bunch of assumptions */ 12514 ASSERT(port != NULL); 12515 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12516 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12517 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12518 12519 pd->pd_state = PORT_DEVICE_INVALID; 12520 pd->pd_type = PORT_DEVICE_OLD; 12521 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12522 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12523 12524 fctl_delist_did_table(port, pd); 12525 fctl_delist_pwwn_table(port, pd); 12526 12527 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12528 " removed the PD=%p from DID and PWWN tables", 12529 port, pd->pd_port_id.port_id, pd); 12530 12531 if ((!flag) && port && initiator && is_switch) { 12532 (void) fctl_add_orphan_held(port, pd); 12533 } 12534 fctl_copy_portmap_held(map, pd); 12535 map->map_pd = pd; 12536 } 12537 12538 /* 12539 * Fill out old map for ULPs 12540 */ 12541 static void 12542 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12543 { 12544 int is_switch; 12545 int initiator; 12546 fc_local_port_t *port; 12547 12548 mutex_enter(&pd->pd_mutex); 12549 port = pd->pd_port; 12550 mutex_exit(&pd->pd_mutex); 12551 12552 mutex_enter(&port->fp_mutex); 12553 mutex_enter(&pd->pd_mutex); 12554 12555 pd->pd_state = PORT_DEVICE_INVALID; 12556 pd->pd_type = PORT_DEVICE_OLD; 12557 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12558 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12559 12560 fctl_delist_did_table(port, pd); 12561 fctl_delist_pwwn_table(port, pd); 12562 12563 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12564 " removed the PD=%p from DID and PWWN tables", 12565 port, pd->pd_port_id.port_id, pd); 12566 12567 mutex_exit(&pd->pd_mutex); 12568 mutex_exit(&port->fp_mutex); 12569 12570 ASSERT(port != NULL); 12571 if ((!flag) && port && initiator && is_switch) { 12572 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12573 } 12574 fctl_copy_portmap(map, pd); 12575 map->map_pd = pd; 12576 } 12577 12578 12579 /* 12580 * Fillout Changed Map for ULPs 12581 */ 12582 static void 12583 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12584 uint32_t *new_did, la_wwn_t *new_pwwn) 12585 { 12586 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12587 12588 pd->pd_type = PORT_DEVICE_CHANGED; 12589 if (new_did) { 12590 pd->pd_port_id.port_id = *new_did; 12591 } 12592 if (new_pwwn) { 12593 pd->pd_port_name = *new_pwwn; 12594 } 12595 mutex_exit(&pd->pd_mutex); 12596 12597 fctl_copy_portmap(map, pd); 12598 12599 mutex_enter(&pd->pd_mutex); 12600 pd->pd_type = PORT_DEVICE_NOCHANGE; 12601 } 12602 12603 12604 /* 12605 * Fillout New Name Server map 12606 */ 12607 static void 12608 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12609 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12610 { 12611 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12612 12613 if (handle) { 12614 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_pwwn, 12615 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12616 DDI_DEV_AUTOINCR); 12617 FC_GET_RSP(port, *handle, (uint8_t *)&port_map->map_nwwn, 12618 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12619 DDI_DEV_AUTOINCR); 12620 FC_GET_RSP(port, *handle, (uint8_t *)port_map->map_fc4_types, 12621 (uint8_t *)gan_resp->gan_fc4types, 12622 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12623 } else { 12624 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12625 sizeof (gan_resp->gan_pwwn)); 12626 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12627 sizeof (gan_resp->gan_nwwn)); 12628 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12629 sizeof (gan_resp->gan_fc4types)); 12630 } 12631 port_map->map_did.port_id = d_id; 12632 port_map->map_did.priv_lilp_posit = 0; 12633 port_map->map_hard_addr.hard_addr = 0; 12634 port_map->map_hard_addr.rsvd = 0; 12635 port_map->map_state = PORT_DEVICE_INVALID; 12636 port_map->map_type = PORT_DEVICE_NEW; 12637 port_map->map_flags = 0; 12638 port_map->map_pd = NULL; 12639 12640 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12641 12642 ASSERT(port != NULL); 12643 } 12644 12645 12646 /* 12647 * Perform LINIT ELS 12648 */ 12649 static int 12650 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12651 job_request_t *job) 12652 { 12653 int rval; 12654 uint32_t d_id; 12655 uint32_t s_id; 12656 uint32_t lfa; 12657 uchar_t class; 12658 uint32_t ret; 12659 fp_cmd_t *cmd; 12660 fc_porttype_t ptype; 12661 fc_packet_t *pkt; 12662 fc_linit_req_t payload; 12663 fc_remote_port_t *pd; 12664 12665 rval = 0; 12666 12667 ASSERT(job != NULL); 12668 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12669 12670 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12671 if (pd == NULL) { 12672 fctl_ns_req_t *ns_cmd; 12673 12674 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12675 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12676 0, sleep); 12677 12678 if (ns_cmd == NULL) { 12679 return (FC_NOMEM); 12680 } 12681 job->job_result = FC_SUCCESS; 12682 ns_cmd->ns_cmd_code = NS_GID_PN; 12683 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12684 12685 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12686 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12687 fctl_free_ns_cmd(ns_cmd); 12688 return (FC_FAILURE); 12689 } 12690 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12691 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12692 12693 fctl_free_ns_cmd(ns_cmd); 12694 lfa = d_id & 0xFFFF00; 12695 12696 /* 12697 * Given this D_ID, get the port type to see if 12698 * we can do LINIT on the LFA 12699 */ 12700 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12701 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12702 0, sleep); 12703 12704 if (ns_cmd == NULL) { 12705 return (FC_NOMEM); 12706 } 12707 12708 job->job_result = FC_SUCCESS; 12709 ns_cmd->ns_cmd_code = NS_GPT_ID; 12710 12711 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12712 ((ns_req_gpt_id_t *) 12713 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12714 12715 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12716 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12717 fctl_free_ns_cmd(ns_cmd); 12718 return (FC_FAILURE); 12719 } 12720 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12721 12722 fctl_free_ns_cmd(ns_cmd); 12723 12724 switch (ptype.port_type) { 12725 case FC_NS_PORT_NL: 12726 case FC_NS_PORT_F_NL: 12727 case FC_NS_PORT_FL: 12728 break; 12729 12730 default: 12731 return (FC_FAILURE); 12732 } 12733 } else { 12734 mutex_enter(&pd->pd_mutex); 12735 ptype = pd->pd_porttype; 12736 12737 switch (pd->pd_porttype.port_type) { 12738 case FC_NS_PORT_NL: 12739 case FC_NS_PORT_F_NL: 12740 case FC_NS_PORT_FL: 12741 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12742 break; 12743 12744 default: 12745 mutex_exit(&pd->pd_mutex); 12746 return (FC_FAILURE); 12747 } 12748 mutex_exit(&pd->pd_mutex); 12749 } 12750 12751 mutex_enter(&port->fp_mutex); 12752 s_id = port->fp_port_id.port_id; 12753 class = port->fp_ns_login_class; 12754 mutex_exit(&port->fp_mutex); 12755 12756 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12757 sizeof (fc_linit_resp_t), sleep, pd); 12758 if (cmd == NULL) { 12759 return (FC_NOMEM); 12760 } 12761 12762 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12763 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12764 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12765 cmd->cmd_retry_count = fp_retry_count; 12766 cmd->cmd_ulp_pkt = NULL; 12767 12768 pkt = &cmd->cmd_pkt; 12769 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12770 12771 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12772 12773 /* 12774 * How does LIP work by the way ? 12775 * If the L_Port receives three consecutive identical ordered 12776 * sets whose first two characters (fully decoded) are equal to 12777 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12778 * recognize a Loop Initialization Primitive sequence. The 12779 * character 3 determines the type of lip: 12780 * LIP(F7) Normal LIP 12781 * LIP(F8) Loop Failure LIP 12782 * 12783 * The possible combination for the 3rd and 4th bytes are: 12784 * F7, F7 Normal Lip - No valid AL_PA 12785 * F8, F8 Loop Failure - No valid AL_PA 12786 * F7, AL_PS Normal Lip - Valid source AL_PA 12787 * F8, AL_PS Loop Failure - Valid source AL_PA 12788 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12789 * And Normal Lip for all other loop members 12790 * 0xFF AL_PS Vendor specific reset of all loop members 12791 * 12792 * Now, it may not always be that we, at the source, may have an 12793 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12794 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12795 * payload we are going to set: 12796 * lip_b3 = 0xF7; Normal LIP 12797 * lip_b4 = 0xF7; No valid source AL_PA 12798 */ 12799 payload.ls_code.ls_code = LA_ELS_LINIT; 12800 payload.ls_code.mbz = 0; 12801 payload.rsvd = 0; 12802 payload.func = 0; /* Let Fabric determine the best way */ 12803 payload.lip_b3 = 0xF7; /* Normal LIP */ 12804 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12805 12806 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 12807 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12808 12809 job->job_counter = 1; 12810 12811 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12812 if (ret == FC_SUCCESS) { 12813 fp_jobwait(job); 12814 rval = job->job_result; 12815 } else { 12816 rval = FC_FAILURE; 12817 fp_free_pkt(cmd); 12818 } 12819 12820 return (rval); 12821 } 12822 12823 12824 /* 12825 * Fill out the device handles with GAN response 12826 */ 12827 static void 12828 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12829 ns_resp_gan_t *gan_resp) 12830 { 12831 fc_remote_node_t *node; 12832 fc_porttype_t type; 12833 fc_local_port_t *port; 12834 12835 ASSERT(pd != NULL); 12836 ASSERT(handle != NULL); 12837 12838 port = pd->pd_port; 12839 12840 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12841 " port_id=%x, sym_len=%d fc4-type=%x", 12842 pd, gan_resp->gan_type_id.rsvd, 12843 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12844 12845 mutex_enter(&pd->pd_mutex); 12846 12847 FC_GET_RSP(port, *handle, (uint8_t *)&type, 12848 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12849 12850 pd->pd_porttype.port_type = type.port_type; 12851 pd->pd_porttype.rsvd = 0; 12852 12853 pd->pd_spn_len = gan_resp->gan_spnlen; 12854 if (pd->pd_spn_len) { 12855 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_spn, 12856 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12857 DDI_DEV_AUTOINCR); 12858 } 12859 12860 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_ip_addr, 12861 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12862 DDI_DEV_AUTOINCR); 12863 FC_GET_RSP(port, *handle, (uint8_t *)&pd->pd_cos, 12864 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12865 DDI_DEV_AUTOINCR); 12866 FC_GET_RSP(port, *handle, (uint8_t *)pd->pd_fc4types, 12867 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12868 DDI_DEV_AUTOINCR); 12869 12870 node = pd->pd_remote_nodep; 12871 mutex_exit(&pd->pd_mutex); 12872 12873 mutex_enter(&node->fd_mutex); 12874 12875 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_ipa, 12876 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12877 DDI_DEV_AUTOINCR); 12878 12879 node->fd_snn_len = gan_resp->gan_snnlen; 12880 if (node->fd_snn_len) { 12881 FC_GET_RSP(port, *handle, (uint8_t *)node->fd_snn, 12882 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12883 DDI_DEV_AUTOINCR); 12884 } 12885 12886 mutex_exit(&node->fd_mutex); 12887 } 12888 12889 12890 /* 12891 * Handles all NS Queries (also means that this function 12892 * doesn't handle NS object registration) 12893 */ 12894 static int 12895 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12896 int polled, int sleep) 12897 { 12898 int rval; 12899 fp_cmd_t *cmd; 12900 12901 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12902 12903 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 12904 FP_TRACE(FP_NHEAD1(1, 0), "fp_ns_query GA_NXT fp %x pd %x", 12905 port->fp_port_id.port_id, ns_cmd->ns_gan_sid); 12906 } 12907 12908 if (ns_cmd->ns_cmd_size == 0) { 12909 return (FC_FAILURE); 12910 } 12911 12912 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12913 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12914 ns_cmd->ns_resp_size, sleep, NULL); 12915 if (cmd == NULL) { 12916 return (FC_NOMEM); 12917 } 12918 12919 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12920 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12921 12922 if (polled) { 12923 job->job_counter = 1; 12924 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12925 } 12926 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12927 if (rval != FC_SUCCESS) { 12928 job->job_result = rval; 12929 fp_iodone(cmd); 12930 if (polled == 0) { 12931 /* 12932 * Return FC_SUCCESS to indicate that 12933 * fp_iodone is performed already. 12934 */ 12935 rval = FC_SUCCESS; 12936 } 12937 } 12938 12939 if (polled) { 12940 fp_jobwait(job); 12941 rval = job->job_result; 12942 } 12943 12944 return (rval); 12945 } 12946 12947 12948 /* 12949 * Initialize Common Transport request 12950 */ 12951 static void 12952 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12953 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12954 uint16_t resp_len, job_request_t *job) 12955 { 12956 uint32_t s_id; 12957 uchar_t class; 12958 fc_packet_t *pkt; 12959 fc_ct_header_t ct; 12960 12961 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12962 12963 mutex_enter(&port->fp_mutex); 12964 s_id = port->fp_port_id.port_id; 12965 class = port->fp_ns_login_class; 12966 mutex_exit(&port->fp_mutex); 12967 12968 cmd->cmd_job = job; 12969 cmd->cmd_private = ns_cmd; 12970 pkt = &cmd->cmd_pkt; 12971 12972 ct.ct_rev = CT_REV; 12973 ct.ct_inid = 0; 12974 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12975 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12976 ct.ct_options = 0; 12977 ct.ct_reserved1 = 0; 12978 ct.ct_cmdrsp = cmd_code; 12979 ct.ct_aiusize = resp_len >> 2; 12980 ct.ct_reserved2 = 0; 12981 ct.ct_reason = 0; 12982 ct.ct_expln = 0; 12983 ct.ct_vendor = 0; 12984 12985 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&ct, 12986 (uint8_t *)pkt->pkt_cmd, sizeof (ct), DDI_DEV_AUTOINCR); 12987 12988 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12989 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12990 pkt->pkt_cmd_fhdr.s_id = s_id; 12991 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12992 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12993 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12994 pkt->pkt_cmd_fhdr.seq_id = 0; 12995 pkt->pkt_cmd_fhdr.df_ctl = 0; 12996 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12997 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12998 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12999 pkt->pkt_cmd_fhdr.ro = 0; 13000 pkt->pkt_cmd_fhdr.rsvd = 0; 13001 13002 pkt->pkt_comp = fp_ns_intr; 13003 pkt->pkt_ulp_private = (opaque_t)cmd; 13004 pkt->pkt_timeout = FP_NS_TIMEOUT; 13005 13006 if (cmd_buf) { 13007 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 13008 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13009 cmd_len, DDI_DEV_AUTOINCR); 13010 } 13011 13012 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 13013 13014 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 13015 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13016 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 13017 cmd->cmd_retry_count = fp_retry_count; 13018 cmd->cmd_ulp_pkt = NULL; 13019 } 13020 13021 13022 /* 13023 * Name Server request interrupt routine 13024 */ 13025 static void 13026 fp_ns_intr(fc_packet_t *pkt) 13027 { 13028 fp_cmd_t *cmd; 13029 fc_local_port_t *port; 13030 fc_ct_header_t resp_hdr; 13031 fc_ct_header_t cmd_hdr; 13032 fctl_ns_req_t *ns_cmd; 13033 13034 cmd = pkt->pkt_ulp_private; 13035 port = cmd->cmd_port; 13036 13037 mutex_enter(&port->fp_mutex); 13038 port->fp_out_fpcmds--; 13039 mutex_exit(&port->fp_mutex); 13040 13041 FC_GET_RSP(port, pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 13042 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 13043 ns_cmd = (fctl_ns_req_t *) 13044 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 13045 if (!FP_IS_PKT_ERROR(pkt)) { 13046 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 13047 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 13048 DDI_DEV_AUTOINCR); 13049 13050 /* 13051 * On x86 architectures, make sure the resp_hdr is big endian. 13052 * This macro is a NOP on sparc architectures mainly because 13053 * we don't want to end up wasting time since the end result 13054 * is going to be the same. 13055 */ 13056 MAKE_BE_32(&resp_hdr); 13057 13058 if (ns_cmd) { 13059 /* 13060 * Always copy out the response CT_HDR 13061 */ 13062 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 13063 sizeof (resp_hdr)); 13064 } 13065 13066 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 13067 pkt->pkt_state = FC_PKT_FS_RJT; 13068 pkt->pkt_reason = resp_hdr.ct_reason; 13069 pkt->pkt_expln = resp_hdr.ct_expln; 13070 } 13071 } 13072 13073 if (FP_IS_PKT_ERROR(pkt)) { 13074 if (ns_cmd) { 13075 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13076 ASSERT(ns_cmd->ns_pd != NULL); 13077 13078 /* Mark it OLD if not already done */ 13079 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13080 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 13081 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13082 } 13083 13084 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13085 fctl_free_ns_cmd(ns_cmd); 13086 ((fp_cmd_t *) 13087 (pkt->pkt_ulp_private))->cmd_private = NULL; 13088 } 13089 13090 } 13091 13092 FP_TRACE(FP_NHEAD2(9, 0), "%x NS failure pkt state=%x" 13093 "reason=%x, expln=%x, NSCMD=%04X, NSRSP=%04X", 13094 port->fp_port_id.port_id, pkt->pkt_state, 13095 pkt->pkt_reason, pkt->pkt_expln, 13096 cmd_hdr.ct_cmdrsp, resp_hdr.ct_cmdrsp); 13097 13098 (void) fp_common_intr(pkt, 1); 13099 13100 return; 13101 } 13102 13103 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 13104 uint32_t d_id; 13105 fc_local_port_t *port; 13106 fp_cmd_t *cmd; 13107 13108 d_id = pkt->pkt_cmd_fhdr.d_id; 13109 cmd = pkt->pkt_ulp_private; 13110 port = cmd->cmd_port; 13111 FP_TRACE(FP_NHEAD2(9, 0), 13112 "Bogus NS response received for D_ID=%x", d_id); 13113 } 13114 13115 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 13116 fp_gan_handler(pkt, ns_cmd); 13117 return; 13118 } 13119 13120 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 13121 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 13122 if (ns_cmd) { 13123 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 13124 fp_ns_query_handler(pkt, ns_cmd); 13125 return; 13126 } 13127 } 13128 } 13129 13130 fp_iodone(pkt->pkt_ulp_private); 13131 } 13132 13133 13134 /* 13135 * Process NS_GAN response 13136 */ 13137 static void 13138 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13139 { 13140 int my_did; 13141 fc_portid_t d_id; 13142 fp_cmd_t *cmd; 13143 fc_local_port_t *port; 13144 fc_remote_port_t *pd; 13145 ns_req_gan_t gan_req; 13146 ns_resp_gan_t *gan_resp; 13147 13148 ASSERT(ns_cmd != NULL); 13149 13150 cmd = pkt->pkt_ulp_private; 13151 port = cmd->cmd_port; 13152 13153 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 13154 13155 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&d_id, 13156 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 13157 13158 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 13159 13160 /* 13161 * In this case the priv_lilp_posit field in reality 13162 * is actually represents the relative position on a private loop. 13163 * So zero it while dealing with Port Identifiers. 13164 */ 13165 d_id.priv_lilp_posit = 0; 13166 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 13167 if (ns_cmd->ns_gan_sid == d_id.port_id) { 13168 /* 13169 * We've come a full circle; time to get out. 13170 */ 13171 fp_iodone(cmd); 13172 return; 13173 } 13174 13175 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 13176 ns_cmd->ns_gan_sid = d_id.port_id; 13177 } 13178 13179 mutex_enter(&port->fp_mutex); 13180 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 13181 mutex_exit(&port->fp_mutex); 13182 13183 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, fp %x pd %x", port, 13184 port->fp_port_id.port_id, d_id.port_id); 13185 if (my_did == 0) { 13186 la_wwn_t pwwn; 13187 la_wwn_t nwwn; 13188 13189 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 13190 "port=%p, d_id=%x, type_id=%x, " 13191 "pwwn=%x %x %x %x %x %x %x %x, " 13192 "nwwn=%x %x %x %x %x %x %x %x", 13193 port, d_id.port_id, gan_resp->gan_type_id, 13194 13195 gan_resp->gan_pwwn.raw_wwn[0], 13196 gan_resp->gan_pwwn.raw_wwn[1], 13197 gan_resp->gan_pwwn.raw_wwn[2], 13198 gan_resp->gan_pwwn.raw_wwn[3], 13199 gan_resp->gan_pwwn.raw_wwn[4], 13200 gan_resp->gan_pwwn.raw_wwn[5], 13201 gan_resp->gan_pwwn.raw_wwn[6], 13202 gan_resp->gan_pwwn.raw_wwn[7], 13203 13204 gan_resp->gan_nwwn.raw_wwn[0], 13205 gan_resp->gan_nwwn.raw_wwn[1], 13206 gan_resp->gan_nwwn.raw_wwn[2], 13207 gan_resp->gan_nwwn.raw_wwn[3], 13208 gan_resp->gan_nwwn.raw_wwn[4], 13209 gan_resp->gan_nwwn.raw_wwn[5], 13210 gan_resp->gan_nwwn.raw_wwn[6], 13211 gan_resp->gan_nwwn.raw_wwn[7]); 13212 13213 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&nwwn, 13214 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 13215 DDI_DEV_AUTOINCR); 13216 13217 FC_GET_RSP(port, pkt->pkt_resp_acc, (uint8_t *)&pwwn, 13218 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 13219 DDI_DEV_AUTOINCR); 13220 13221 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 13222 FP_TRACE(FP_NHEAD1(1, 0), "fp %x gan_hander create" 13223 "pd %x", port->fp_port_id.port_id, d_id.port_id); 13224 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 13225 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 13226 } 13227 if (pd != NULL) { 13228 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 13229 pd, gan_resp); 13230 } 13231 13232 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 13233 *((int *)ns_cmd->ns_data_buf) += 1; 13234 } 13235 13236 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 13237 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 13238 13239 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 13240 fc_port_dev_t *userbuf; 13241 13242 userbuf = ((fc_port_dev_t *) 13243 ns_cmd->ns_data_buf) + 13244 ns_cmd->ns_gan_index++; 13245 13246 userbuf->dev_did = d_id; 13247 13248 FC_GET_RSP(port, pkt->pkt_resp_acc, 13249 (uint8_t *)userbuf->dev_type, 13250 (uint8_t *)gan_resp->gan_fc4types, 13251 sizeof (userbuf->dev_type), 13252 DDI_DEV_AUTOINCR); 13253 13254 userbuf->dev_nwwn = nwwn; 13255 userbuf->dev_pwwn = pwwn; 13256 13257 if (pd != NULL) { 13258 mutex_enter(&pd->pd_mutex); 13259 userbuf->dev_state = pd->pd_state; 13260 userbuf->dev_hard_addr = 13261 pd->pd_hard_addr; 13262 mutex_exit(&pd->pd_mutex); 13263 } else { 13264 userbuf->dev_state = 13265 PORT_DEVICE_INVALID; 13266 } 13267 } else if (ns_cmd->ns_flags & 13268 FCTL_NS_BUF_IS_FC_PORTMAP) { 13269 fc_portmap_t *map; 13270 13271 map = ((fc_portmap_t *) 13272 ns_cmd->ns_data_buf) + 13273 ns_cmd->ns_gan_index++; 13274 13275 /* 13276 * First fill it like any new map 13277 * and update the port device info 13278 * below. 13279 */ 13280 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13281 map, gan_resp, d_id.port_id); 13282 if (pd != NULL) { 13283 fctl_copy_portmap(map, pd); 13284 } else { 13285 map->map_state = PORT_DEVICE_INVALID; 13286 map->map_type = PORT_DEVICE_NOCHANGE; 13287 } 13288 } else { 13289 caddr_t dst_ptr; 13290 13291 dst_ptr = ns_cmd->ns_data_buf + 13292 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13293 13294 FC_GET_RSP(port, pkt->pkt_resp_acc, 13295 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13296 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13297 } 13298 } else { 13299 ns_cmd->ns_gan_index++; 13300 } 13301 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13302 fp_iodone(cmd); 13303 return; 13304 } 13305 } 13306 13307 gan_req.pid = d_id; 13308 13309 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13310 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13311 sizeof (gan_req), DDI_DEV_AUTOINCR); 13312 13313 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13314 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13315 fp_iodone(cmd); 13316 } else { 13317 mutex_enter(&port->fp_mutex); 13318 port->fp_out_fpcmds++; 13319 mutex_exit(&port->fp_mutex); 13320 } 13321 } 13322 13323 13324 /* 13325 * Handle NS Query interrupt 13326 */ 13327 static void 13328 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13329 { 13330 fp_cmd_t *cmd; 13331 fc_local_port_t *port; 13332 caddr_t src_ptr; 13333 uint32_t xfer_len; 13334 13335 cmd = pkt->pkt_ulp_private; 13336 port = cmd->cmd_port; 13337 13338 xfer_len = ns_cmd->ns_resp_size; 13339 13340 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13341 ns_cmd->ns_cmd_code, xfer_len); 13342 13343 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13344 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13345 13346 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13347 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13348 } 13349 13350 if (xfer_len <= ns_cmd->ns_data_len) { 13351 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13352 FC_GET_RSP(port, pkt->pkt_resp_acc, 13353 (uint8_t *)ns_cmd->ns_data_buf, 13354 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13355 } 13356 13357 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13358 ASSERT(ns_cmd->ns_pd != NULL); 13359 13360 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13361 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13362 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13363 } 13364 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13365 } 13366 13367 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13368 fctl_free_ns_cmd(ns_cmd); 13369 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13370 } 13371 fp_iodone(cmd); 13372 } 13373 13374 13375 /* 13376 * Handle unsolicited ADISC ELS request 13377 */ 13378 static void 13379 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13380 fc_remote_port_t *pd, job_request_t *job) 13381 { 13382 int rval; 13383 fp_cmd_t *cmd; 13384 13385 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13386 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13387 mutex_enter(&pd->pd_mutex); 13388 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13389 mutex_exit(&pd->pd_mutex); 13390 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13391 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13392 0, KM_SLEEP, pd); 13393 if (cmd != NULL) { 13394 fp_els_rjt_init(port, cmd, buf, 13395 FC_ACTION_NON_RETRYABLE, 13396 FC_REASON_INVALID_LINK_CTRL, job); 13397 13398 if (fp_sendcmd(port, cmd, 13399 port->fp_fca_handle) != FC_SUCCESS) { 13400 fp_free_pkt(cmd); 13401 } 13402 } 13403 } 13404 } else { 13405 mutex_exit(&pd->pd_mutex); 13406 /* 13407 * Yes, yes, we don't have a hard address. But we 13408 * we should still respond. Huh ? Visit 21.19.2 13409 * of FC-PH-2 which essentially says that if an 13410 * NL_Port doesn't have a hard address, or if a port 13411 * does not have FC-AL capability, it shall report 13412 * zeroes in this field. 13413 */ 13414 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13415 0, KM_SLEEP, pd); 13416 if (cmd == NULL) { 13417 return; 13418 } 13419 fp_adisc_acc_init(port, cmd, buf, job); 13420 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13421 if (rval != FC_SUCCESS) { 13422 fp_free_pkt(cmd); 13423 } 13424 } 13425 } 13426 13427 13428 /* 13429 * Initialize ADISC response. 13430 */ 13431 static void 13432 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13433 job_request_t *job) 13434 { 13435 fc_packet_t *pkt; 13436 la_els_adisc_t payload; 13437 13438 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13439 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13440 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13441 cmd->cmd_retry_count = 1; 13442 cmd->cmd_ulp_pkt = NULL; 13443 13444 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13445 cmd->cmd_job = job; 13446 13447 pkt = &cmd->cmd_pkt; 13448 13449 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13450 13451 payload.ls_code.ls_code = LA_ELS_ACC; 13452 payload.ls_code.mbz = 0; 13453 13454 mutex_enter(&port->fp_mutex); 13455 payload.nport_id = port->fp_port_id; 13456 payload.hard_addr = port->fp_hard_addr; 13457 mutex_exit(&port->fp_mutex); 13458 13459 payload.port_wwn = port->fp_service_params.nport_ww_name; 13460 payload.node_wwn = port->fp_service_params.node_ww_name; 13461 13462 FC_SET_CMD(port, pkt->pkt_cmd_acc, (uint8_t *)&payload, 13463 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13464 } 13465 13466 13467 /* 13468 * Hold and Install the requested ULP drivers 13469 */ 13470 static void 13471 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13472 { 13473 int len; 13474 int count; 13475 int data_len; 13476 major_t ulp_major; 13477 caddr_t ulp_name; 13478 caddr_t data_ptr; 13479 caddr_t data_buf; 13480 13481 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13482 13483 data_buf = NULL; 13484 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13485 DDI_PROP_DONTPASS, "load-ulp-list", 13486 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13487 return; 13488 } 13489 13490 len = strlen(data_buf); 13491 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13492 13493 data_ptr = data_buf + len + 1; 13494 for (count = 0; count < port->fp_ulp_nload; count++) { 13495 len = strlen(data_ptr) + 1; 13496 ulp_name = kmem_zalloc(len, KM_SLEEP); 13497 bcopy(data_ptr, ulp_name, len); 13498 13499 ulp_major = ddi_name_to_major(ulp_name); 13500 13501 if (ulp_major != (major_t)-1) { 13502 if (modload("drv", ulp_name) < 0) { 13503 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13504 0, NULL, "failed to load %s", 13505 ulp_name); 13506 } 13507 } else { 13508 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13509 "%s isn't a valid driver", ulp_name); 13510 } 13511 13512 kmem_free(ulp_name, len); 13513 data_ptr += len; /* Skip to next field */ 13514 } 13515 13516 /* 13517 * Free the memory allocated by DDI 13518 */ 13519 if (data_buf != NULL) { 13520 kmem_free(data_buf, data_len); 13521 } 13522 } 13523 13524 13525 /* 13526 * Perform LOGO operation 13527 */ 13528 static int 13529 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13530 { 13531 int rval; 13532 fp_cmd_t *cmd; 13533 13534 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13535 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13536 13537 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13538 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13539 13540 mutex_enter(&port->fp_mutex); 13541 mutex_enter(&pd->pd_mutex); 13542 13543 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13544 ASSERT(pd->pd_login_count == 1); 13545 13546 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13547 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13548 cmd->cmd_flags = 0; 13549 cmd->cmd_retry_count = 1; 13550 cmd->cmd_ulp_pkt = NULL; 13551 13552 fp_logo_init(pd, cmd, job); 13553 13554 mutex_exit(&pd->pd_mutex); 13555 mutex_exit(&port->fp_mutex); 13556 13557 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13558 if (rval != FC_SUCCESS) { 13559 fp_iodone(cmd); 13560 } 13561 13562 return (rval); 13563 } 13564 13565 13566 /* 13567 * Perform Port attach callbacks to registered ULPs 13568 */ 13569 static void 13570 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13571 { 13572 fp_soft_attach_t *att; 13573 13574 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13575 att->att_cmd = cmd; 13576 att->att_port = port; 13577 13578 /* 13579 * We need to remember whether or not fctl_busy_port 13580 * succeeded so we know whether or not to call 13581 * fctl_idle_port when the task is complete. 13582 */ 13583 13584 if (fctl_busy_port(port) == 0) { 13585 att->att_need_pm_idle = B_TRUE; 13586 } else { 13587 att->att_need_pm_idle = B_FALSE; 13588 } 13589 13590 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13591 att, KM_SLEEP); 13592 } 13593 13594 13595 /* 13596 * Forward state change notifications on to interested ULPs. 13597 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13598 * real work. 13599 */ 13600 static int 13601 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13602 { 13603 fc_port_clist_t *clist; 13604 13605 clist = kmem_zalloc(sizeof (*clist), sleep); 13606 if (clist == NULL) { 13607 return (FC_NOMEM); 13608 } 13609 13610 clist->clist_state = statec; 13611 13612 mutex_enter(&port->fp_mutex); 13613 clist->clist_flags = port->fp_topology; 13614 mutex_exit(&port->fp_mutex); 13615 13616 clist->clist_port = (opaque_t)port; 13617 clist->clist_len = 0; 13618 clist->clist_size = 0; 13619 clist->clist_map = NULL; 13620 13621 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13622 clist, KM_SLEEP); 13623 13624 return (FC_SUCCESS); 13625 } 13626 13627 13628 /* 13629 * Get name server map 13630 */ 13631 static int 13632 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13633 uint32_t *len, uint32_t sid) 13634 { 13635 int ret; 13636 fctl_ns_req_t *ns_cmd; 13637 13638 /* 13639 * Don't let the allocator do anything for response; 13640 * we have have buffer ready to fillout. 13641 */ 13642 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13643 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13644 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13645 13646 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13647 ns_cmd->ns_data_buf = (caddr_t)*map; 13648 13649 ASSERT(ns_cmd != NULL); 13650 13651 ns_cmd->ns_gan_index = 0; 13652 ns_cmd->ns_gan_sid = sid; 13653 ns_cmd->ns_cmd_code = NS_GA_NXT; 13654 ns_cmd->ns_gan_max = *len; 13655 13656 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13657 13658 if (ns_cmd->ns_gan_index != *len) { 13659 *len = ns_cmd->ns_gan_index; 13660 } 13661 ns_cmd->ns_data_len = 0; 13662 ns_cmd->ns_data_buf = NULL; 13663 fctl_free_ns_cmd(ns_cmd); 13664 13665 return (ret); 13666 } 13667 13668 13669 /* 13670 * Create a remote port in Fabric topology by using NS services 13671 */ 13672 static fc_remote_port_t * 13673 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13674 { 13675 int rval; 13676 job_request_t *job; 13677 fctl_ns_req_t *ns_cmd; 13678 fc_remote_port_t *pd; 13679 13680 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13681 13682 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13683 port, d_id); 13684 13685 #ifdef DEBUG 13686 mutex_enter(&port->fp_mutex); 13687 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13688 mutex_exit(&port->fp_mutex); 13689 #endif 13690 13691 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13692 if (job == NULL) { 13693 return (NULL); 13694 } 13695 13696 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13697 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13698 FCTL_NS_NO_DATA_BUF), sleep); 13699 if (ns_cmd == NULL) { 13700 return (NULL); 13701 } 13702 13703 job->job_result = FC_SUCCESS; 13704 ns_cmd->ns_gan_max = 1; 13705 ns_cmd->ns_cmd_code = NS_GA_NXT; 13706 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13707 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13708 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13709 13710 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13711 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13712 fctl_free_ns_cmd(ns_cmd); 13713 13714 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13715 fctl_dealloc_job(job); 13716 return (NULL); 13717 } 13718 fctl_dealloc_job(job); 13719 13720 pd = fctl_get_remote_port_by_did(port, d_id); 13721 13722 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13723 port, d_id, pd); 13724 13725 return (pd); 13726 } 13727 13728 13729 /* 13730 * Check for the permissions on an ioctl command. If it is required to have an 13731 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13732 * the ioctl command isn't in one of the list built, shut the door on that too. 13733 * 13734 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13735 * to be made sure that users open the port for an exclusive access while 13736 * performing those operations. 13737 * 13738 * This can prevent a casual user from inflicting damage on the port by 13739 * sending these ioctls from multiple processes/threads (there is no good 13740 * reason why one would need to do that) without actually realizing how 13741 * expensive such commands could turn out to be. 13742 * 13743 * It is also important to note that, even with an exclusive access, 13744 * multiple threads can share the same file descriptor and fire down 13745 * commands in parallel. To prevent that the driver needs to make sure 13746 * that such commands aren't in progress already. This is taken care of 13747 * in the FP_EXCL_BUSY bit of fp_flag. 13748 */ 13749 static int 13750 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13751 { 13752 int ret = FC_FAILURE; 13753 int count; 13754 13755 for (count = 0; 13756 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13757 count++) { 13758 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13759 if (fp_perm_list[count].fp_open_flag & open_flag) { 13760 ret = FC_SUCCESS; 13761 } 13762 break; 13763 } 13764 } 13765 13766 return (ret); 13767 } 13768 13769 13770 /* 13771 * Bind Port driver's unsolicited, state change callbacks 13772 */ 13773 static int 13774 fp_bind_callbacks(fc_local_port_t *port) 13775 { 13776 fc_fca_bind_info_t bind_info = {0}; 13777 fc_fca_port_info_t *port_info; 13778 int rval = DDI_SUCCESS; 13779 uint16_t class; 13780 int node_namelen, port_namelen; 13781 char *nname = NULL, *pname = NULL; 13782 13783 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13784 13785 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13786 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13787 "node-name", &nname) != DDI_PROP_SUCCESS) { 13788 FP_TRACE(FP_NHEAD1(1, 0), 13789 "fp_bind_callback fail to get node-name"); 13790 } 13791 if (nname) { 13792 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13793 } 13794 13795 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13796 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13797 "port-name", &pname) != DDI_PROP_SUCCESS) { 13798 FP_TRACE(FP_NHEAD1(1, 0), 13799 "fp_bind_callback fail to get port-name"); 13800 } 13801 if (pname) { 13802 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13803 } 13804 13805 if (port->fp_npiv_type == FC_NPIV_PORT) { 13806 bind_info.port_npiv = 1; 13807 } 13808 13809 /* 13810 * fca_bind_port returns the FCA driver's handle for the local 13811 * port instance. If the port number isn't supported it returns NULL. 13812 * It also sets up callback in the FCA for various 13813 * things like state change, ELS etc.. 13814 */ 13815 bind_info.port_statec_cb = fp_statec_cb; 13816 bind_info.port_unsol_cb = fp_unsol_cb; 13817 bind_info.port_num = port->fp_port_num; 13818 bind_info.port_handle = (opaque_t)port; 13819 13820 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13821 13822 /* 13823 * Hold the port driver mutex as the callbacks are bound until the 13824 * service parameters are properly filled in (in order to be able to 13825 * properly respond to unsolicited ELS requests) 13826 */ 13827 mutex_enter(&port->fp_mutex); 13828 13829 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13830 port->fp_fca_dip, port_info, &bind_info); 13831 13832 if (port->fp_fca_handle == NULL) { 13833 rval = DDI_FAILURE; 13834 goto exit; 13835 } 13836 13837 /* 13838 * Only fcoei will set this bit 13839 */ 13840 if (port_info->pi_port_state & FC_STATE_FCA_IS_NODMA) { 13841 port->fp_soft_state |= FP_SOFT_FCA_IS_NODMA; 13842 port_info->pi_port_state &= ~(FC_STATE_FCA_IS_NODMA); 13843 } 13844 13845 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13846 port->fp_service_params = port_info->pi_login_params; 13847 port->fp_hard_addr = port_info->pi_hard_addr; 13848 13849 /* Copy from the FCA structure to the FP structure */ 13850 port->fp_hba_port_attrs = port_info->pi_attrs; 13851 13852 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13853 port->fp_rnid_init = 1; 13854 bcopy(&port_info->pi_rnid_params.params, 13855 &port->fp_rnid_params, 13856 sizeof (port->fp_rnid_params)); 13857 } else { 13858 port->fp_rnid_init = 0; 13859 } 13860 13861 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13862 if (node_namelen) { 13863 bcopy(&port_info->pi_attrs.sym_node_name, 13864 &port->fp_sym_node_name, 13865 node_namelen); 13866 port->fp_sym_node_namelen = node_namelen; 13867 } 13868 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13869 if (port_namelen) { 13870 bcopy(&port_info->pi_attrs.sym_port_name, 13871 &port->fp_sym_port_name, 13872 port_namelen); 13873 port->fp_sym_port_namelen = port_namelen; 13874 } 13875 13876 /* zero out the normally unused fields right away */ 13877 port->fp_service_params.ls_code.mbz = 0; 13878 port->fp_service_params.ls_code.ls_code = 0; 13879 bzero(&port->fp_service_params.reserved, 13880 sizeof (port->fp_service_params.reserved)); 13881 13882 class = port_info->pi_login_params.class_1.class_opt; 13883 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13884 13885 class = port_info->pi_login_params.class_2.class_opt; 13886 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13887 13888 class = port_info->pi_login_params.class_3.class_opt; 13889 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13890 13891 exit: 13892 if (nname) { 13893 ddi_prop_free(nname); 13894 } 13895 if (pname) { 13896 ddi_prop_free(pname); 13897 } 13898 mutex_exit(&port->fp_mutex); 13899 kmem_free(port_info, sizeof (*port_info)); 13900 13901 return (rval); 13902 } 13903 13904 13905 /* 13906 * Retrieve FCA capabilities 13907 */ 13908 static void 13909 fp_retrieve_caps(fc_local_port_t *port) 13910 { 13911 int rval; 13912 int ub_count; 13913 fc_fcp_dma_t fcp_dma; 13914 fc_reset_action_t action; 13915 fc_dma_behavior_t dma_behavior; 13916 13917 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13918 13919 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13920 FC_CAP_UNSOL_BUF, &ub_count); 13921 13922 switch (rval) { 13923 case FC_CAP_FOUND: 13924 case FC_CAP_SETTABLE: 13925 switch (ub_count) { 13926 case 0: 13927 break; 13928 13929 case -1: 13930 ub_count = fp_unsol_buf_count; 13931 break; 13932 13933 default: 13934 /* 1/4th of total buffers is my share */ 13935 ub_count = 13936 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13937 break; 13938 } 13939 break; 13940 13941 default: 13942 ub_count = 0; 13943 break; 13944 } 13945 13946 mutex_enter(&port->fp_mutex); 13947 port->fp_ub_count = ub_count; 13948 mutex_exit(&port->fp_mutex); 13949 13950 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13951 FC_CAP_POST_RESET_BEHAVIOR, &action); 13952 13953 switch (rval) { 13954 case FC_CAP_FOUND: 13955 case FC_CAP_SETTABLE: 13956 switch (action) { 13957 case FC_RESET_RETURN_NONE: 13958 case FC_RESET_RETURN_ALL: 13959 case FC_RESET_RETURN_OUTSTANDING: 13960 break; 13961 13962 default: 13963 action = FC_RESET_RETURN_NONE; 13964 break; 13965 } 13966 break; 13967 13968 default: 13969 action = FC_RESET_RETURN_NONE; 13970 break; 13971 } 13972 mutex_enter(&port->fp_mutex); 13973 port->fp_reset_action = action; 13974 mutex_exit(&port->fp_mutex); 13975 13976 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13977 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13978 13979 switch (rval) { 13980 case FC_CAP_FOUND: 13981 switch (dma_behavior) { 13982 case FC_ALLOW_STREAMING: 13983 /* FALLTHROUGH */ 13984 case FC_NO_STREAMING: 13985 break; 13986 13987 default: 13988 /* 13989 * If capability was found and the value 13990 * was incorrect assume the worst 13991 */ 13992 dma_behavior = FC_NO_STREAMING; 13993 break; 13994 } 13995 break; 13996 13997 default: 13998 /* 13999 * If capability was not defined - allow streaming; existing 14000 * FCAs should not be affected. 14001 */ 14002 dma_behavior = FC_ALLOW_STREAMING; 14003 break; 14004 } 14005 mutex_enter(&port->fp_mutex); 14006 port->fp_dma_behavior = dma_behavior; 14007 mutex_exit(&port->fp_mutex); 14008 14009 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 14010 FC_CAP_FCP_DMA, &fcp_dma); 14011 14012 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 14013 fcp_dma != FC_DVMA_SPACE)) { 14014 fcp_dma = FC_DVMA_SPACE; 14015 } 14016 14017 mutex_enter(&port->fp_mutex); 14018 port->fp_fcp_dma = fcp_dma; 14019 mutex_exit(&port->fp_mutex); 14020 } 14021 14022 14023 /* 14024 * Handle Domain, Area changes in the Fabric. 14025 */ 14026 static void 14027 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 14028 job_request_t *job, int sleep) 14029 { 14030 #ifdef DEBUG 14031 uint32_t dcnt; 14032 #endif 14033 int rval; 14034 int send; 14035 int index; 14036 int listindex; 14037 int login; 14038 int job_flags; 14039 char ww_name[17]; 14040 uint32_t d_id; 14041 uint32_t count; 14042 fctl_ns_req_t *ns_cmd; 14043 fc_portmap_t *list; 14044 fc_orphan_t *orp; 14045 fc_orphan_t *norp; 14046 fc_orphan_t *prev; 14047 fc_remote_port_t *pd; 14048 fc_remote_port_t *npd; 14049 struct pwwn_hash *head; 14050 14051 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14052 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14053 0, sleep); 14054 if (ns_cmd == NULL) { 14055 mutex_enter(&port->fp_mutex); 14056 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14057 --port->fp_rscn_count; 14058 } 14059 mutex_exit(&port->fp_mutex); 14060 14061 return; 14062 } 14063 ns_cmd->ns_cmd_code = NS_GID_PN; 14064 14065 /* 14066 * We need to get a new count of devices from the 14067 * name server, which will also create any new devices 14068 * as needed. 14069 */ 14070 14071 (void) fp_ns_get_devcount(port, job, 1, sleep); 14072 14073 FP_TRACE(FP_NHEAD1(3, 0), 14074 "fp_validate_area_domain: get_devcount found %d devices", 14075 port->fp_total_devices); 14076 14077 mutex_enter(&port->fp_mutex); 14078 14079 for (count = index = 0; index < pwwn_table_size; index++) { 14080 head = &port->fp_pwwn_table[index]; 14081 pd = head->pwwn_head; 14082 while (pd != NULL) { 14083 mutex_enter(&pd->pd_mutex); 14084 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14085 if ((pd->pd_port_id.port_id & mask) == id && 14086 pd->pd_recepient == PD_PLOGI_INITIATOR) { 14087 count++; 14088 pd->pd_type = PORT_DEVICE_OLD; 14089 pd->pd_flags = PD_ELS_MARK; 14090 } 14091 } 14092 mutex_exit(&pd->pd_mutex); 14093 pd = pd->pd_wwn_hnext; 14094 } 14095 } 14096 14097 #ifdef DEBUG 14098 dcnt = count; 14099 #endif /* DEBUG */ 14100 14101 /* 14102 * Since port->fp_orphan_count is declared an 'int' it is 14103 * theoretically possible that the count could go negative. 14104 * 14105 * This would be bad and if that happens we really do want 14106 * to know. 14107 */ 14108 14109 ASSERT(port->fp_orphan_count >= 0); 14110 14111 count += port->fp_orphan_count; 14112 14113 /* 14114 * We add the port->fp_total_devices value to the count 14115 * in the case where our port is newly attached. This is 14116 * because we haven't done any discovery and we don't have 14117 * any orphans in the port's orphan list. If we do not do 14118 * this addition to count then we won't alloc enough kmem 14119 * to do discovery with. 14120 */ 14121 14122 if (count == 0) { 14123 count += port->fp_total_devices; 14124 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 14125 "0x%x orphans found, using 0x%x", 14126 port->fp_orphan_count, count); 14127 } 14128 14129 mutex_exit(&port->fp_mutex); 14130 14131 /* 14132 * Allocate the change list 14133 */ 14134 14135 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 14136 if (list == NULL) { 14137 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 14138 " Not enough memory to service RSCNs" 14139 " for %d ports, continuing...", count); 14140 14141 fctl_free_ns_cmd(ns_cmd); 14142 14143 mutex_enter(&port->fp_mutex); 14144 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14145 --port->fp_rscn_count; 14146 } 14147 mutex_exit(&port->fp_mutex); 14148 14149 return; 14150 } 14151 14152 /* 14153 * Attempt to validate or invalidate the devices that were 14154 * already in the pwwn hash table. 14155 */ 14156 14157 mutex_enter(&port->fp_mutex); 14158 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 14159 head = &port->fp_pwwn_table[index]; 14160 npd = head->pwwn_head; 14161 14162 while ((pd = npd) != NULL) { 14163 npd = pd->pd_wwn_hnext; 14164 14165 mutex_enter(&pd->pd_mutex); 14166 if ((pd->pd_port_id.port_id & mask) == id && 14167 pd->pd_flags == PD_ELS_MARK) { 14168 la_wwn_t *pwwn; 14169 14170 job->job_result = FC_SUCCESS; 14171 14172 ((ns_req_gid_pn_t *) 14173 (ns_cmd->ns_cmd_buf))->pwwn = 14174 pd->pd_port_name; 14175 14176 pwwn = &pd->pd_port_name; 14177 d_id = pd->pd_port_id.port_id; 14178 14179 mutex_exit(&pd->pd_mutex); 14180 mutex_exit(&port->fp_mutex); 14181 14182 rval = fp_ns_query(port, ns_cmd, job, 1, 14183 sleep); 14184 if (rval != FC_SUCCESS) { 14185 fc_wwn_to_str(pwwn, ww_name); 14186 14187 FP_TRACE(FP_NHEAD1(3, 0), 14188 "AREA RSCN: PD disappeared; " 14189 "d_id=%x, PWWN=%s", d_id, ww_name); 14190 14191 FP_TRACE(FP_NHEAD2(9, 0), 14192 "N_x Port with D_ID=%x," 14193 " PWWN=%s disappeared from fabric", 14194 d_id, ww_name); 14195 14196 fp_fillout_old_map(list + listindex++, 14197 pd, 1); 14198 } else { 14199 fctl_copy_portmap(list + listindex++, 14200 pd); 14201 14202 mutex_enter(&pd->pd_mutex); 14203 pd->pd_flags = PD_ELS_IN_PROGRESS; 14204 mutex_exit(&pd->pd_mutex); 14205 } 14206 14207 mutex_enter(&port->fp_mutex); 14208 } else { 14209 mutex_exit(&pd->pd_mutex); 14210 } 14211 } 14212 } 14213 14214 mutex_exit(&port->fp_mutex); 14215 14216 ASSERT(listindex == dcnt); 14217 14218 job->job_counter = listindex; 14219 job_flags = job->job_flags; 14220 job->job_flags |= JOB_TYPE_FP_ASYNC; 14221 14222 /* 14223 * Login (if we were the initiator) or validate devices in the 14224 * port map. 14225 */ 14226 14227 for (index = 0; index < listindex; index++) { 14228 pd = list[index].map_pd; 14229 14230 mutex_enter(&pd->pd_mutex); 14231 ASSERT((pd->pd_port_id.port_id & mask) == id); 14232 14233 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14234 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 14235 mutex_exit(&pd->pd_mutex); 14236 fp_jobdone(job); 14237 continue; 14238 } 14239 14240 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 14241 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 14242 d_id = pd->pd_port_id.port_id; 14243 mutex_exit(&pd->pd_mutex); 14244 14245 if ((d_id & mask) == id && send) { 14246 if (login) { 14247 FP_TRACE(FP_NHEAD1(6, 0), 14248 "RSCN and PLOGI request;" 14249 " pd=%p, job=%p d_id=%x, index=%d", pd, 14250 job, d_id, index); 14251 14252 rval = fp_port_login(port, d_id, job, 14253 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 14254 if (rval != FC_SUCCESS) { 14255 mutex_enter(&pd->pd_mutex); 14256 pd->pd_flags = PD_IDLE; 14257 mutex_exit(&pd->pd_mutex); 14258 14259 job->job_result = rval; 14260 fp_jobdone(job); 14261 } 14262 FP_TRACE(FP_NHEAD1(1, 0), 14263 "PLOGI succeeded:no skip(1) for " 14264 "D_ID %x", d_id); 14265 list[index].map_flags |= 14266 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14267 } else { 14268 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 14269 " pd=%p, job=%p d_id=%x, index=%d", pd, 14270 job, d_id, index); 14271 14272 rval = fp_ns_validate_device(port, pd, job, 14273 0, sleep); 14274 if (rval != FC_SUCCESS) { 14275 fp_jobdone(job); 14276 } 14277 mutex_enter(&pd->pd_mutex); 14278 pd->pd_flags = PD_IDLE; 14279 mutex_exit(&pd->pd_mutex); 14280 } 14281 } else { 14282 FP_TRACE(FP_NHEAD1(6, 0), 14283 "RSCN and NO request sent; pd=%p," 14284 " d_id=%x, index=%d", pd, d_id, index); 14285 14286 mutex_enter(&pd->pd_mutex); 14287 pd->pd_flags = PD_IDLE; 14288 mutex_exit(&pd->pd_mutex); 14289 14290 fp_jobdone(job); 14291 } 14292 } 14293 14294 if (listindex) { 14295 fctl_jobwait(job); 14296 } 14297 job->job_flags = job_flags; 14298 14299 /* 14300 * Orphan list validation. 14301 */ 14302 mutex_enter(&port->fp_mutex); 14303 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14304 orp != NULL; orp = norp) { 14305 norp = orp->orp_next; 14306 mutex_exit(&port->fp_mutex); 14307 14308 job->job_counter = 1; 14309 job->job_result = FC_SUCCESS; 14310 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14311 14312 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14313 14314 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14315 ((ns_resp_gid_pn_t *) 14316 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14317 14318 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14319 if (rval == FC_SUCCESS) { 14320 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14321 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14322 if (pd != NULL) { 14323 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14324 14325 FP_TRACE(FP_NHEAD1(6, 0), 14326 "RSCN and ORPHAN list " 14327 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14328 14329 FP_TRACE(FP_NHEAD2(6, 0), 14330 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14331 " in fabric", d_id, ww_name); 14332 14333 mutex_enter(&port->fp_mutex); 14334 if (prev) { 14335 prev->orp_next = orp->orp_next; 14336 } else { 14337 ASSERT(orp == port->fp_orphan_list); 14338 port->fp_orphan_list = orp->orp_next; 14339 } 14340 port->fp_orphan_count--; 14341 mutex_exit(&port->fp_mutex); 14342 14343 kmem_free(orp, sizeof (*orp)); 14344 fctl_copy_portmap(list + listindex++, pd); 14345 } else { 14346 prev = orp; 14347 } 14348 } else { 14349 prev = orp; 14350 } 14351 mutex_enter(&port->fp_mutex); 14352 } 14353 mutex_exit(&port->fp_mutex); 14354 14355 /* 14356 * One more pass through the list to delist old devices from 14357 * the d_id and pwwn tables and possibly add to the orphan list. 14358 */ 14359 14360 for (index = 0; index < listindex; index++) { 14361 pd = list[index].map_pd; 14362 ASSERT(pd != NULL); 14363 14364 /* 14365 * Update PLOGI results; For NS validation 14366 * of orphan list, it is redundant 14367 * 14368 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14369 * appropriate as fctl_copy_portmap() will clear map_flags. 14370 */ 14371 if (list[index].map_flags & 14372 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14373 fctl_copy_portmap(list + index, pd); 14374 list[index].map_flags |= 14375 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14376 } else { 14377 fctl_copy_portmap(list + index, pd); 14378 } 14379 14380 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14381 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14382 pd, pd->pd_port_id.port_id, 14383 pd->pd_port_name.raw_wwn[0], 14384 pd->pd_port_name.raw_wwn[1], 14385 pd->pd_port_name.raw_wwn[2], 14386 pd->pd_port_name.raw_wwn[3], 14387 pd->pd_port_name.raw_wwn[4], 14388 pd->pd_port_name.raw_wwn[5], 14389 pd->pd_port_name.raw_wwn[6], 14390 pd->pd_port_name.raw_wwn[7]); 14391 14392 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14393 "results continued, pd=%p type=%x, flags=%x, state=%x", 14394 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14395 14396 mutex_enter(&pd->pd_mutex); 14397 if (pd->pd_type == PORT_DEVICE_OLD) { 14398 int initiator; 14399 14400 pd->pd_flags = PD_IDLE; 14401 initiator = (pd->pd_recepient == 14402 PD_PLOGI_INITIATOR) ? 1 : 0; 14403 14404 mutex_exit(&pd->pd_mutex); 14405 14406 mutex_enter(&port->fp_mutex); 14407 mutex_enter(&pd->pd_mutex); 14408 14409 pd->pd_state = PORT_DEVICE_INVALID; 14410 fctl_delist_did_table(port, pd); 14411 fctl_delist_pwwn_table(port, pd); 14412 14413 mutex_exit(&pd->pd_mutex); 14414 mutex_exit(&port->fp_mutex); 14415 14416 if (initiator) { 14417 (void) fctl_add_orphan(port, pd, sleep); 14418 } 14419 list[index].map_pd = pd; 14420 } else { 14421 ASSERT(pd->pd_flags == PD_IDLE); 14422 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14423 /* 14424 * Reset LOGO tolerance to zero 14425 */ 14426 fctl_tc_reset(&pd->pd_logo_tc); 14427 } 14428 mutex_exit(&pd->pd_mutex); 14429 } 14430 } 14431 14432 if (ns_cmd) { 14433 fctl_free_ns_cmd(ns_cmd); 14434 } 14435 if (listindex) { 14436 (void) fp_ulp_devc_cb(port, list, listindex, count, 14437 sleep, 0); 14438 } else { 14439 kmem_free(list, sizeof (*list) * count); 14440 14441 mutex_enter(&port->fp_mutex); 14442 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14443 --port->fp_rscn_count; 14444 } 14445 mutex_exit(&port->fp_mutex); 14446 } 14447 } 14448 14449 14450 /* 14451 * Work hard to make sense out of an RSCN page. 14452 */ 14453 static void 14454 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14455 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14456 int *listindex, int sleep) 14457 { 14458 int rval; 14459 char ww_name[17]; 14460 la_wwn_t *pwwn; 14461 fc_remote_port_t *pwwn_pd; 14462 fc_remote_port_t *did_pd; 14463 14464 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14465 14466 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14467 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14468 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14469 14470 if (did_pd != NULL) { 14471 mutex_enter(&did_pd->pd_mutex); 14472 if (did_pd->pd_flags != PD_IDLE) { 14473 mutex_exit(&did_pd->pd_mutex); 14474 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14475 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14476 port, page->aff_d_id, did_pd); 14477 return; 14478 } 14479 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14480 mutex_exit(&did_pd->pd_mutex); 14481 } 14482 14483 job->job_counter = 1; 14484 14485 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14486 14487 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14488 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14489 14490 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14491 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14492 14493 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14494 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14495 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14496 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14497 ns_cmd->ns_resp_hdr.ct_expln); 14498 14499 job->job_counter = 1; 14500 14501 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14502 /* 14503 * What this means is that the D_ID 14504 * disappeared from the Fabric. 14505 */ 14506 if (did_pd == NULL) { 14507 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14508 " NULL PD disappeared, rval=%x", rval); 14509 return; 14510 } 14511 14512 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14513 14514 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14515 (uint32_t)(uintptr_t)job->job_cb_arg; 14516 14517 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14518 14519 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14520 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14521 14522 FP_TRACE(FP_NHEAD2(9, 0), 14523 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14524 14525 FP_TRACE(FP_NHEAD2(9, 0), 14526 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14527 " fabric", page->aff_d_id, ww_name); 14528 14529 mutex_enter(&did_pd->pd_mutex); 14530 did_pd->pd_flags = PD_IDLE; 14531 mutex_exit(&did_pd->pd_mutex); 14532 14533 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14534 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14535 14536 return; 14537 } 14538 14539 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14540 14541 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14542 /* 14543 * There is no change. Do PLOGI again and add it to 14544 * ULP portmap baggage and return. Note: When RSCNs 14545 * arrive with per page states, the need for PLOGI 14546 * can be determined correctly. 14547 */ 14548 mutex_enter(&pwwn_pd->pd_mutex); 14549 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14550 mutex_exit(&pwwn_pd->pd_mutex); 14551 14552 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14553 (uint32_t)(uintptr_t)job->job_cb_arg; 14554 14555 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14556 14557 mutex_enter(&pwwn_pd->pd_mutex); 14558 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14559 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14560 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14561 mutex_exit(&pwwn_pd->pd_mutex); 14562 14563 rval = fp_port_login(port, page->aff_d_id, job, 14564 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14565 if (rval == FC_SUCCESS) { 14566 fp_jobwait(job); 14567 rval = job->job_result; 14568 14569 /* 14570 * Reset LOGO tolerance to zero 14571 * Also we are the PLOGI initiator now. 14572 */ 14573 mutex_enter(&pwwn_pd->pd_mutex); 14574 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14575 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14576 mutex_exit(&pwwn_pd->pd_mutex); 14577 } 14578 14579 if (rval == FC_SUCCESS) { 14580 struct fc_portmap *map = 14581 listptr + *listindex - 1; 14582 14583 FP_TRACE(FP_NHEAD1(1, 0), 14584 "PLOGI succeeded: no skip(2)" 14585 " for D_ID %x", page->aff_d_id); 14586 map->map_flags |= 14587 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14588 } else { 14589 FP_TRACE(FP_NHEAD2(9, rval), 14590 "PLOGI to D_ID=%x failed", page->aff_d_id); 14591 14592 FP_TRACE(FP_NHEAD2(9, 0), 14593 "N_x Port with D_ID=%x, PWWN=%s" 14594 " disappeared from fabric", 14595 page->aff_d_id, ww_name); 14596 14597 fp_fillout_old_map(listptr + 14598 *listindex - 1, pwwn_pd, 0); 14599 } 14600 } else { 14601 mutex_exit(&pwwn_pd->pd_mutex); 14602 } 14603 14604 mutex_enter(&did_pd->pd_mutex); 14605 did_pd->pd_flags = PD_IDLE; 14606 mutex_exit(&did_pd->pd_mutex); 14607 14608 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14609 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14610 job->job_result, pwwn_pd); 14611 14612 return; 14613 } 14614 14615 if (did_pd == NULL && pwwn_pd == NULL) { 14616 14617 fc_orphan_t *orp = NULL; 14618 fc_orphan_t *norp = NULL; 14619 fc_orphan_t *prev = NULL; 14620 14621 /* 14622 * Hunt down the orphan list before giving up. 14623 */ 14624 14625 mutex_enter(&port->fp_mutex); 14626 if (port->fp_orphan_count) { 14627 14628 for (orp = port->fp_orphan_list; orp; orp = norp) { 14629 norp = orp->orp_next; 14630 14631 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14632 prev = orp; 14633 continue; 14634 } 14635 14636 if (prev) { 14637 prev->orp_next = orp->orp_next; 14638 } else { 14639 ASSERT(orp == 14640 port->fp_orphan_list); 14641 port->fp_orphan_list = 14642 orp->orp_next; 14643 } 14644 port->fp_orphan_count--; 14645 break; 14646 } 14647 } 14648 14649 mutex_exit(&port->fp_mutex); 14650 pwwn_pd = fp_create_remote_port_by_ns(port, 14651 page->aff_d_id, sleep); 14652 14653 if (pwwn_pd != NULL) { 14654 14655 if (orp) { 14656 fc_wwn_to_str(&orp->orp_pwwn, 14657 ww_name); 14658 14659 FP_TRACE(FP_NHEAD2(9, 0), 14660 "N_x Port with D_ID=%x," 14661 " PWWN=%s reappeared in fabric", 14662 page->aff_d_id, ww_name); 14663 14664 kmem_free(orp, sizeof (*orp)); 14665 } 14666 14667 (listptr + *listindex)-> 14668 map_rscn_info.ulp_rscn_count = 14669 (uint32_t)(uintptr_t)job->job_cb_arg; 14670 14671 fctl_copy_portmap(listptr + 14672 (*listindex)++, pwwn_pd); 14673 } 14674 14675 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14676 "Case TWO", page->aff_d_id); 14677 14678 return; 14679 } 14680 14681 if (pwwn_pd != NULL && did_pd == NULL) { 14682 uint32_t old_d_id; 14683 uint32_t d_id = page->aff_d_id; 14684 14685 /* 14686 * What this means is there is a new D_ID for this 14687 * Port WWN. Take out the port device off D_ID 14688 * list and put it back with a new D_ID. Perform 14689 * PLOGI if already logged in. 14690 */ 14691 mutex_enter(&port->fp_mutex); 14692 mutex_enter(&pwwn_pd->pd_mutex); 14693 14694 old_d_id = pwwn_pd->pd_port_id.port_id; 14695 14696 fctl_delist_did_table(port, pwwn_pd); 14697 14698 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14699 (uint32_t)(uintptr_t)job->job_cb_arg; 14700 14701 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14702 &d_id, NULL); 14703 fctl_enlist_did_table(port, pwwn_pd); 14704 14705 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14706 " Case THREE, pd=%p," 14707 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14708 14709 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14710 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14711 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14712 14713 mutex_exit(&pwwn_pd->pd_mutex); 14714 mutex_exit(&port->fp_mutex); 14715 14716 FP_TRACE(FP_NHEAD2(9, 0), 14717 "N_x Port with D_ID=%x, PWWN=%s has a new" 14718 " D_ID=%x now", old_d_id, ww_name, d_id); 14719 14720 rval = fp_port_login(port, page->aff_d_id, job, 14721 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14722 if (rval == FC_SUCCESS) { 14723 fp_jobwait(job); 14724 rval = job->job_result; 14725 } 14726 14727 if (rval != FC_SUCCESS) { 14728 fp_fillout_old_map(listptr + 14729 *listindex - 1, pwwn_pd, 0); 14730 } 14731 } else { 14732 mutex_exit(&pwwn_pd->pd_mutex); 14733 mutex_exit(&port->fp_mutex); 14734 } 14735 14736 return; 14737 } 14738 14739 if (pwwn_pd == NULL && did_pd != NULL) { 14740 fc_portmap_t *ptr; 14741 uint32_t len = 1; 14742 char old_ww_name[17]; 14743 14744 mutex_enter(&did_pd->pd_mutex); 14745 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14746 mutex_exit(&did_pd->pd_mutex); 14747 14748 fc_wwn_to_str(pwwn, ww_name); 14749 14750 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14751 (uint32_t)(uintptr_t)job->job_cb_arg; 14752 14753 /* 14754 * What this means is that there is a new Port WWN for 14755 * this D_ID; Mark the Port device as old and provide 14756 * the new PWWN and D_ID combination as new. 14757 */ 14758 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14759 14760 FP_TRACE(FP_NHEAD2(9, 0), 14761 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14762 page->aff_d_id, old_ww_name, ww_name); 14763 14764 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14765 (uint32_t)(uintptr_t)job->job_cb_arg; 14766 14767 ptr = listptr + (*listindex)++; 14768 14769 job->job_counter = 1; 14770 14771 if (fp_ns_getmap(port, job, &ptr, &len, 14772 page->aff_d_id - 1) != FC_SUCCESS) { 14773 (*listindex)--; 14774 } 14775 14776 mutex_enter(&did_pd->pd_mutex); 14777 did_pd->pd_flags = PD_IDLE; 14778 mutex_exit(&did_pd->pd_mutex); 14779 14780 return; 14781 } 14782 14783 /* 14784 * A weird case of Port WWN and D_ID existence but not matching up 14785 * between them. Trust your instincts - Take the port device handle 14786 * off Port WWN list, fix it with new Port WWN and put it back, In 14787 * the mean time mark the port device corresponding to the old port 14788 * WWN as OLD. 14789 */ 14790 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14791 " did_pd=%p", pwwn_pd, did_pd); 14792 14793 mutex_enter(&port->fp_mutex); 14794 mutex_enter(&pwwn_pd->pd_mutex); 14795 14796 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14797 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14798 fctl_delist_did_table(port, pwwn_pd); 14799 fctl_delist_pwwn_table(port, pwwn_pd); 14800 14801 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14802 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14803 pwwn_pd->pd_port_id.port_id, 14804 14805 pwwn_pd->pd_port_name.raw_wwn[0], 14806 pwwn_pd->pd_port_name.raw_wwn[1], 14807 pwwn_pd->pd_port_name.raw_wwn[2], 14808 pwwn_pd->pd_port_name.raw_wwn[3], 14809 pwwn_pd->pd_port_name.raw_wwn[4], 14810 pwwn_pd->pd_port_name.raw_wwn[5], 14811 pwwn_pd->pd_port_name.raw_wwn[6], 14812 pwwn_pd->pd_port_name.raw_wwn[7]); 14813 14814 mutex_exit(&pwwn_pd->pd_mutex); 14815 mutex_exit(&port->fp_mutex); 14816 14817 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14818 (uint32_t)(uintptr_t)job->job_cb_arg; 14819 14820 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14821 14822 mutex_enter(&port->fp_mutex); 14823 mutex_enter(&did_pd->pd_mutex); 14824 14825 fctl_delist_pwwn_table(port, did_pd); 14826 14827 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14828 (uint32_t)(uintptr_t)job->job_cb_arg; 14829 14830 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14831 fctl_enlist_pwwn_table(port, did_pd); 14832 14833 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14834 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14835 did_pd->pd_port_id.port_id, did_pd->pd_state, 14836 14837 did_pd->pd_port_name.raw_wwn[0], 14838 did_pd->pd_port_name.raw_wwn[1], 14839 did_pd->pd_port_name.raw_wwn[2], 14840 did_pd->pd_port_name.raw_wwn[3], 14841 did_pd->pd_port_name.raw_wwn[4], 14842 did_pd->pd_port_name.raw_wwn[5], 14843 did_pd->pd_port_name.raw_wwn[6], 14844 did_pd->pd_port_name.raw_wwn[7]); 14845 14846 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14847 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14848 mutex_exit(&did_pd->pd_mutex); 14849 mutex_exit(&port->fp_mutex); 14850 14851 rval = fp_port_login(port, page->aff_d_id, job, 14852 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14853 if (rval == FC_SUCCESS) { 14854 fp_jobwait(job); 14855 if (job->job_result != FC_SUCCESS) { 14856 fp_fillout_old_map(listptr + 14857 *listindex - 1, did_pd, 0); 14858 } 14859 } else { 14860 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14861 } 14862 } else { 14863 mutex_exit(&did_pd->pd_mutex); 14864 mutex_exit(&port->fp_mutex); 14865 } 14866 14867 mutex_enter(&did_pd->pd_mutex); 14868 did_pd->pd_flags = PD_IDLE; 14869 mutex_exit(&did_pd->pd_mutex); 14870 } 14871 14872 14873 /* 14874 * Check with NS for the presence of this port WWN 14875 */ 14876 static int 14877 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14878 job_request_t *job, int polled, int sleep) 14879 { 14880 la_wwn_t pwwn; 14881 uint32_t flags; 14882 fctl_ns_req_t *ns_cmd; 14883 14884 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14885 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14886 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14887 flags, sleep); 14888 if (ns_cmd == NULL) { 14889 return (FC_NOMEM); 14890 } 14891 14892 mutex_enter(&pd->pd_mutex); 14893 pwwn = pd->pd_port_name; 14894 mutex_exit(&pd->pd_mutex); 14895 14896 ns_cmd->ns_cmd_code = NS_GID_PN; 14897 ns_cmd->ns_pd = pd; 14898 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14899 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14900 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14901 14902 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14903 } 14904 14905 14906 /* 14907 * Sanity check the LILP map returned by FCA 14908 */ 14909 static int 14910 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14911 { 14912 int count; 14913 14914 if (lilp_map->lilp_length == 0) { 14915 return (FC_FAILURE); 14916 } 14917 14918 for (count = 0; count < lilp_map->lilp_length; count++) { 14919 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14920 FC_SUCCESS) { 14921 return (FC_FAILURE); 14922 } 14923 } 14924 14925 return (FC_SUCCESS); 14926 } 14927 14928 14929 /* 14930 * Sanity check if the AL_PA is a valid address 14931 */ 14932 static int 14933 fp_is_valid_alpa(uchar_t al_pa) 14934 { 14935 int count; 14936 14937 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14938 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14939 return (FC_SUCCESS); 14940 } 14941 } 14942 14943 return (FC_FAILURE); 14944 } 14945 14946 14947 /* 14948 * Post unsolicited callbacks to ULPs 14949 */ 14950 static void 14951 fp_ulp_unsol_cb(void *arg) 14952 { 14953 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14954 14955 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14956 ub_spec->buf->ub_frame.type); 14957 kmem_free(ub_spec, sizeof (*ub_spec)); 14958 } 14959 14960 14961 /* 14962 * Perform message reporting in a consistent manner. Unless there is 14963 * a strong reason NOT to use this function (which is very very rare) 14964 * all message reporting should go through this. 14965 */ 14966 static void 14967 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14968 fc_packet_t *pkt, const char *fmt, ...) 14969 { 14970 caddr_t buf; 14971 va_list ap; 14972 14973 switch (level) { 14974 case CE_NOTE: 14975 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14976 return; 14977 } 14978 break; 14979 14980 case CE_WARN: 14981 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14982 return; 14983 } 14984 break; 14985 } 14986 14987 buf = kmem_zalloc(256, KM_NOSLEEP); 14988 if (buf == NULL) { 14989 return; 14990 } 14991 14992 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14993 14994 va_start(ap, fmt); 14995 (void) vsprintf(buf + strlen(buf), fmt, ap); 14996 va_end(ap); 14997 14998 if (fc_errno) { 14999 char *errmsg; 15000 15001 (void) fc_ulp_error(fc_errno, &errmsg); 15002 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 15003 } else { 15004 if (pkt) { 15005 caddr_t state, reason, action, expln; 15006 15007 (void) fc_ulp_pkt_error(pkt, &state, &reason, 15008 &action, &expln); 15009 15010 (void) sprintf(buf + strlen(buf), 15011 " state=%s, reason=%s", state, reason); 15012 15013 if (pkt->pkt_resp_resid) { 15014 (void) sprintf(buf + strlen(buf), 15015 " resp resid=%x\n", pkt->pkt_resp_resid); 15016 } 15017 } 15018 } 15019 15020 switch (dest) { 15021 case FP_CONSOLE_ONLY: 15022 cmn_err(level, "^%s", buf); 15023 break; 15024 15025 case FP_LOG_ONLY: 15026 cmn_err(level, "!%s", buf); 15027 break; 15028 15029 default: 15030 cmn_err(level, "%s", buf); 15031 break; 15032 } 15033 15034 kmem_free(buf, 256); 15035 } 15036 15037 static int 15038 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15039 { 15040 int ret; 15041 uint32_t d_id; 15042 la_wwn_t pwwn; 15043 fc_remote_port_t *pd = NULL; 15044 fc_remote_port_t *held_pd = NULL; 15045 fctl_ns_req_t *ns_cmd; 15046 fc_portmap_t *changelist; 15047 15048 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15049 15050 mutex_enter(&port->fp_mutex); 15051 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 15052 mutex_exit(&port->fp_mutex); 15053 job->job_counter = 1; 15054 15055 job->job_result = FC_SUCCESS; 15056 15057 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 15058 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 15059 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 15060 15061 ASSERT(ns_cmd != NULL); 15062 15063 ns_cmd->ns_cmd_code = NS_GID_PN; 15064 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 15065 15066 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 15067 15068 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 15069 if (ret != FC_SUCCESS) { 15070 fcio->fcio_errno = ret; 15071 } else { 15072 fcio->fcio_errno = job->job_result; 15073 } 15074 fctl_free_ns_cmd(ns_cmd); 15075 return (EIO); 15076 } 15077 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 15078 fctl_free_ns_cmd(ns_cmd); 15079 } else { 15080 mutex_exit(&port->fp_mutex); 15081 15082 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15083 if (held_pd == NULL) { 15084 fcio->fcio_errno = FC_BADWWN; 15085 return (EIO); 15086 } 15087 pd = held_pd; 15088 15089 mutex_enter(&pd->pd_mutex); 15090 d_id = pd->pd_port_id.port_id; 15091 mutex_exit(&pd->pd_mutex); 15092 } 15093 15094 job->job_counter = 1; 15095 15096 pd = fctl_get_remote_port_by_did(port, d_id); 15097 15098 if (pd) { 15099 mutex_enter(&pd->pd_mutex); 15100 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 15101 pd->pd_login_count++; 15102 mutex_exit(&pd->pd_mutex); 15103 15104 fcio->fcio_errno = FC_SUCCESS; 15105 if (held_pd) { 15106 fctl_release_remote_port(held_pd); 15107 } 15108 15109 return (0); 15110 } 15111 mutex_exit(&pd->pd_mutex); 15112 } else { 15113 mutex_enter(&port->fp_mutex); 15114 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 15115 mutex_exit(&port->fp_mutex); 15116 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 15117 if (pd == NULL) { 15118 fcio->fcio_errno = FC_FAILURE; 15119 if (held_pd) { 15120 fctl_release_remote_port(held_pd); 15121 } 15122 return (EIO); 15123 } 15124 } else { 15125 mutex_exit(&port->fp_mutex); 15126 } 15127 } 15128 15129 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 15130 job->job_counter = 1; 15131 15132 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 15133 KM_SLEEP, pd, NULL); 15134 15135 if (ret != FC_SUCCESS) { 15136 fcio->fcio_errno = ret; 15137 if (held_pd) { 15138 fctl_release_remote_port(held_pd); 15139 } 15140 return (EIO); 15141 } 15142 fp_jobwait(job); 15143 15144 fcio->fcio_errno = job->job_result; 15145 15146 if (held_pd) { 15147 fctl_release_remote_port(held_pd); 15148 } 15149 15150 if (job->job_result != FC_SUCCESS) { 15151 return (EIO); 15152 } 15153 15154 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15155 if (pd == NULL) { 15156 fcio->fcio_errno = FC_BADDEV; 15157 return (ENODEV); 15158 } 15159 15160 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15161 15162 fctl_copy_portmap(changelist, pd); 15163 changelist->map_type = PORT_DEVICE_USER_LOGIN; 15164 15165 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15166 15167 mutex_enter(&pd->pd_mutex); 15168 pd->pd_type = PORT_DEVICE_NOCHANGE; 15169 mutex_exit(&pd->pd_mutex); 15170 15171 fctl_release_remote_port(pd); 15172 15173 return (0); 15174 } 15175 15176 15177 static int 15178 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15179 { 15180 la_wwn_t pwwn; 15181 fp_cmd_t *cmd; 15182 fc_portmap_t *changelist; 15183 fc_remote_port_t *pd; 15184 15185 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15186 15187 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15188 if (pd == NULL) { 15189 fcio->fcio_errno = FC_BADWWN; 15190 return (ENXIO); 15191 } 15192 15193 mutex_enter(&pd->pd_mutex); 15194 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 15195 fcio->fcio_errno = FC_LOGINREQ; 15196 mutex_exit(&pd->pd_mutex); 15197 15198 fctl_release_remote_port(pd); 15199 15200 return (EINVAL); 15201 } 15202 15203 ASSERT(pd->pd_login_count >= 1); 15204 15205 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 15206 fcio->fcio_errno = FC_FAILURE; 15207 mutex_exit(&pd->pd_mutex); 15208 15209 fctl_release_remote_port(pd); 15210 15211 return (EBUSY); 15212 } 15213 15214 if (pd->pd_login_count > 1) { 15215 pd->pd_login_count--; 15216 fcio->fcio_errno = FC_SUCCESS; 15217 mutex_exit(&pd->pd_mutex); 15218 15219 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15220 15221 fctl_copy_portmap(changelist, pd); 15222 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15223 15224 fctl_release_remote_port(pd); 15225 15226 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15227 15228 return (0); 15229 } 15230 15231 pd->pd_flags = PD_ELS_IN_PROGRESS; 15232 mutex_exit(&pd->pd_mutex); 15233 15234 job->job_counter = 1; 15235 15236 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 15237 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 15238 if (cmd == NULL) { 15239 fcio->fcio_errno = FC_NOMEM; 15240 fctl_release_remote_port(pd); 15241 15242 mutex_enter(&pd->pd_mutex); 15243 pd->pd_flags = PD_IDLE; 15244 mutex_exit(&pd->pd_mutex); 15245 15246 return (ENOMEM); 15247 } 15248 15249 mutex_enter(&port->fp_mutex); 15250 mutex_enter(&pd->pd_mutex); 15251 15252 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 15253 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 15254 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 15255 cmd->cmd_retry_count = 1; 15256 cmd->cmd_ulp_pkt = NULL; 15257 15258 fp_logo_init(pd, cmd, job); 15259 15260 mutex_exit(&pd->pd_mutex); 15261 mutex_exit(&port->fp_mutex); 15262 15263 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 15264 mutex_enter(&pd->pd_mutex); 15265 pd->pd_flags = PD_IDLE; 15266 mutex_exit(&pd->pd_mutex); 15267 15268 fp_free_pkt(cmd); 15269 fctl_release_remote_port(pd); 15270 15271 return (EIO); 15272 } 15273 15274 fp_jobwait(job); 15275 15276 fcio->fcio_errno = job->job_result; 15277 if (job->job_result != FC_SUCCESS) { 15278 mutex_enter(&pd->pd_mutex); 15279 pd->pd_flags = PD_IDLE; 15280 mutex_exit(&pd->pd_mutex); 15281 15282 fctl_release_remote_port(pd); 15283 15284 return (EIO); 15285 } 15286 15287 ASSERT(pd != NULL); 15288 15289 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15290 15291 fctl_copy_portmap(changelist, pd); 15292 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15293 changelist->map_state = PORT_DEVICE_INVALID; 15294 15295 mutex_enter(&port->fp_mutex); 15296 mutex_enter(&pd->pd_mutex); 15297 15298 fctl_delist_did_table(port, pd); 15299 fctl_delist_pwwn_table(port, pd); 15300 pd->pd_flags = PD_IDLE; 15301 15302 mutex_exit(&pd->pd_mutex); 15303 mutex_exit(&port->fp_mutex); 15304 15305 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15306 15307 fctl_release_remote_port(pd); 15308 15309 return (0); 15310 } 15311 15312 15313 15314 /* 15315 * Send a syslog event for adapter port level events. 15316 */ 15317 static void 15318 fp_log_port_event(fc_local_port_t *port, char *subclass) 15319 { 15320 nvlist_t *attr_list; 15321 15322 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15323 KM_SLEEP) != DDI_SUCCESS) { 15324 goto alloc_failed; 15325 } 15326 15327 if (nvlist_add_uint32(attr_list, "instance", 15328 port->fp_instance) != DDI_SUCCESS) { 15329 goto error; 15330 } 15331 15332 if (nvlist_add_byte_array(attr_list, "port-wwn", 15333 port->fp_service_params.nport_ww_name.raw_wwn, 15334 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15335 goto error; 15336 } 15337 15338 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15339 subclass, attr_list, NULL, DDI_SLEEP); 15340 15341 nvlist_free(attr_list); 15342 return; 15343 15344 error: 15345 nvlist_free(attr_list); 15346 alloc_failed: 15347 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15348 } 15349 15350 15351 static void 15352 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15353 uint32_t port_id) 15354 { 15355 nvlist_t *attr_list; 15356 15357 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15358 KM_SLEEP) != DDI_SUCCESS) { 15359 goto alloc_failed; 15360 } 15361 15362 if (nvlist_add_uint32(attr_list, "instance", 15363 port->fp_instance) != DDI_SUCCESS) { 15364 goto error; 15365 } 15366 15367 if (nvlist_add_byte_array(attr_list, "port-wwn", 15368 port->fp_service_params.nport_ww_name.raw_wwn, 15369 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15370 goto error; 15371 } 15372 15373 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15374 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15375 goto error; 15376 } 15377 15378 if (nvlist_add_uint32(attr_list, "target-port-id", 15379 port_id) != DDI_SUCCESS) { 15380 goto error; 15381 } 15382 15383 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15384 subclass, attr_list, NULL, DDI_SLEEP); 15385 15386 nvlist_free(attr_list); 15387 return; 15388 15389 error: 15390 nvlist_free(attr_list); 15391 alloc_failed: 15392 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15393 } 15394 15395 static uint32_t 15396 fp_map_remote_port_state(uint32_t rm_state) 15397 { 15398 switch (rm_state) { 15399 case PORT_DEVICE_LOGGED_IN: 15400 return (FC_HBA_PORTSTATE_ONLINE); 15401 case PORT_DEVICE_VALID: 15402 case PORT_DEVICE_INVALID: 15403 default: 15404 return (FC_HBA_PORTSTATE_UNKNOWN); 15405 } 15406 } 15407