1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 26 * 27 */ 28 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/param.h> 32 #include <sys/errno.h> 33 #include <sys/uio.h> 34 #include <sys/buf.h> 35 #include <sys/modctl.h> 36 #include <sys/open.h> 37 #include <sys/file.h> 38 #include <sys/kmem.h> 39 #include <sys/poll.h> 40 #include <sys/conf.h> 41 #include <sys/thread.h> 42 #include <sys/var.h> 43 #include <sys/cmn_err.h> 44 #include <sys/stat.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/promif.h> 48 #include <sys/nvpair.h> 49 #include <sys/byteorder.h> 50 #include <sys/scsi/scsi.h> 51 #include <sys/fibre-channel/fc.h> 52 #include <sys/fibre-channel/impl/fc_ulpif.h> 53 #include <sys/fibre-channel/impl/fc_fcaif.h> 54 #include <sys/fibre-channel/impl/fctl_private.h> 55 #include <sys/fibre-channel/impl/fc_portif.h> 56 #include <sys/fibre-channel/impl/fp.h> 57 58 /* These are defined in fctl.c! */ 59 extern int did_table_size; 60 extern int pwwn_table_size; 61 62 static struct cb_ops fp_cb_ops = { 63 fp_open, /* open */ 64 fp_close, /* close */ 65 nodev, /* strategy */ 66 nodev, /* print */ 67 nodev, /* dump */ 68 nodev, /* read */ 69 nodev, /* write */ 70 fp_ioctl, /* ioctl */ 71 nodev, /* devmap */ 72 nodev, /* mmap */ 73 nodev, /* segmap */ 74 nochpoll, /* chpoll */ 75 ddi_prop_op, /* cb_prop_op */ 76 0, /* streamtab */ 77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 78 CB_REV, /* rev */ 79 nodev, /* aread */ 80 nodev /* awrite */ 81 }; 82 83 static struct dev_ops fp_ops = { 84 DEVO_REV, /* build revision */ 85 0, /* reference count */ 86 fp_getinfo, /* getinfo */ 87 nulldev, /* identify - Obsoleted */ 88 nulldev, /* probe */ 89 fp_attach, /* attach */ 90 fp_detach, /* detach */ 91 nodev, /* reset */ 92 &fp_cb_ops, /* cb_ops */ 93 NULL, /* bus_ops */ 94 fp_power, /* power */ 95 ddi_quiesce_not_needed /* quiesce */ 96 }; 97 98 #define FP_VERSION "1.99" 99 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 100 101 char *fp_version = FP_NAME_VERSION; 102 103 static struct modldrv modldrv = { 104 &mod_driverops, /* Type of Module */ 105 FP_NAME_VERSION, /* Name/Version of fp */ 106 &fp_ops /* driver ops */ 107 }; 108 109 static struct modlinkage modlinkage = { 110 MODREV_1, /* Rev of the loadable modules system */ 111 &modldrv, /* NULL terminated list of */ 112 NULL /* Linkage structures */ 113 }; 114 115 116 117 static uint16_t ns_reg_cmds[] = { 118 NS_RPN_ID, 119 NS_RNN_ID, 120 NS_RCS_ID, 121 NS_RFT_ID, 122 NS_RPT_ID, 123 NS_RSPN_ID, 124 NS_RSNN_NN 125 }; 126 127 struct fp_xlat { 128 uchar_t xlat_state; 129 int xlat_rval; 130 } fp_xlat [] = { 131 { FC_PKT_SUCCESS, FC_SUCCESS }, 132 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 133 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 134 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 135 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 136 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 138 { FC_PKT_NPORT_BSY, FC_PBUSY }, 139 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 140 { FC_PKT_LS_RJT, FC_FAILURE }, 141 { FC_PKT_BA_RJT, FC_FAILURE }, 142 { FC_PKT_TIMEOUT, FC_FAILURE }, 143 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 144 { FC_PKT_FAILURE, FC_FAILURE }, 145 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 146 }; 147 148 static uchar_t fp_valid_alpas[] = { 149 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 150 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 151 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 152 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 153 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 154 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 155 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 156 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 157 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 158 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 159 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 160 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 161 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 162 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 163 }; 164 165 static struct fp_perms { 166 uint16_t fp_ioctl_cmd; 167 uchar_t fp_open_flag; 168 } fp_perm_list [] = { 169 { FCIO_GET_NUM_DEVS, FP_OPEN }, 170 { FCIO_GET_DEV_LIST, FP_OPEN }, 171 { FCIO_GET_SYM_PNAME, FP_OPEN }, 172 { FCIO_GET_SYM_NNAME, FP_OPEN }, 173 { FCIO_SET_SYM_PNAME, FP_EXCL }, 174 { FCIO_SET_SYM_NNAME, FP_EXCL }, 175 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 176 { FCIO_DEV_LOGIN, FP_EXCL }, 177 { FCIO_DEV_LOGOUT, FP_EXCL }, 178 { FCIO_GET_STATE, FP_OPEN }, 179 { FCIO_DEV_REMOVE, FP_EXCL }, 180 { FCIO_GET_FCODE_REV, FP_OPEN }, 181 { FCIO_GET_FW_REV, FP_OPEN }, 182 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 183 { FCIO_FORCE_DUMP, FP_EXCL }, 184 { FCIO_GET_DUMP, FP_OPEN }, 185 { FCIO_GET_TOPOLOGY, FP_OPEN }, 186 { FCIO_RESET_LINK, FP_EXCL }, 187 { FCIO_RESET_HARD, FP_EXCL }, 188 { FCIO_RESET_HARD_CORE, FP_EXCL }, 189 { FCIO_DIAG, FP_OPEN }, 190 { FCIO_NS, FP_EXCL }, 191 { FCIO_DOWNLOAD_FW, FP_EXCL }, 192 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 193 { FCIO_LINK_STATUS, FP_OPEN }, 194 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 195 { FCIO_GET_NODE_ID, FP_OPEN }, 196 { FCIO_SET_NODE_ID, FP_EXCL }, 197 { FCIO_SEND_NODE_ID, FP_OPEN }, 198 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 199 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 200 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 204 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 205 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 206 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 207 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 208 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 209 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 210 }; 211 212 static char *fp_pm_comps[] = { 213 "NAME=FC Port", 214 "0=Port Down", 215 "1=Port Up" 216 }; 217 218 219 #ifdef _LITTLE_ENDIAN 220 #define MAKE_BE_32(x) { \ 221 uint32_t *ptr1, i; \ 222 ptr1 = (uint32_t *)(x); \ 223 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 224 *ptr1 = BE_32(*ptr1); \ 225 ptr1++; \ 226 } \ 227 } 228 #else 229 #define MAKE_BE_32(x) 230 #endif 231 232 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 233 static uint32_t fp_options = 0; 234 235 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 236 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 237 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 238 unsigned int fp_offline_ticker; /* seconds */ 239 240 /* 241 * Driver global variable to anchor the list of soft state structs for 242 * all fp driver instances. Used with the Solaris DDI soft state functions. 243 */ 244 static void *fp_driver_softstate; 245 246 static clock_t fp_retry_ticks; 247 static clock_t fp_offline_ticks; 248 249 static int fp_retry_ticker; 250 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 251 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 252 253 static int fp_log_size = FP_LOG_SIZE; 254 static int fp_trace = FP_TRACE_DEFAULT; 255 static fc_trace_logq_t *fp_logq = NULL; 256 257 int fp_get_adapter_paths(char *pathList, int count); 258 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 259 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 260 la_wwn_t tgt_pwwn, uint32_t port_id); 261 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 262 static void fp_init_symbolic_names(fc_local_port_t *port); 263 264 265 /* 266 * Perform global initialization 267 */ 268 int 269 _init(void) 270 { 271 int ret; 272 273 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 274 sizeof (struct fc_local_port), 8)) != 0) { 275 return (ret); 276 } 277 278 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 279 ddi_soft_state_fini(&fp_driver_softstate); 280 return (ret); 281 } 282 283 fp_logq = fc_trace_alloc_logq(fp_log_size); 284 285 if ((ret = mod_install(&modlinkage)) != 0) { 286 fc_trace_free_logq(fp_logq); 287 ddi_soft_state_fini(&fp_driver_softstate); 288 scsi_hba_fini(&modlinkage); 289 } 290 291 return (ret); 292 } 293 294 295 /* 296 * Prepare for driver unload 297 */ 298 int 299 _fini(void) 300 { 301 int ret; 302 303 if ((ret = mod_remove(&modlinkage)) == 0) { 304 fc_trace_free_logq(fp_logq); 305 ddi_soft_state_fini(&fp_driver_softstate); 306 scsi_hba_fini(&modlinkage); 307 } 308 309 return (ret); 310 } 311 312 313 /* 314 * Request mod_info() to handle all cases 315 */ 316 int 317 _info(struct modinfo *modinfo) 318 { 319 return (mod_info(&modlinkage, modinfo)); 320 } 321 322 323 /* 324 * fp_attach: 325 * 326 * The respective cmd handlers take care of performing 327 * ULP related invocations 328 */ 329 static int 330 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 331 { 332 int rval; 333 334 /* 335 * We check the value of fp_offline_ticker at this 336 * point. The variable is global for the driver and 337 * not specific to an instance. 338 * 339 * If there is no user-defined value found in /etc/system 340 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 341 * The minimum setting for this offline timeout according 342 * to the FC-FS2 standard (Fibre Channel Framing and 343 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 344 * 345 * We do not recommend setting the value to less than 10 346 * seconds (RA_TOV) or more than 90 seconds. If this 347 * variable is greater than 90 seconds then drivers above 348 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 349 */ 350 351 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 352 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 353 FP_OFFLINE_TICKER); 354 355 if ((fp_offline_ticker < 10) || 356 (fp_offline_ticker > 90)) { 357 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 358 "%d second(s). This is outside the " 359 "recommended range of 10..90 seconds", 360 fp_offline_ticker); 361 } 362 363 /* 364 * Tick every second when there are commands to retry. 365 * It should tick at the least granular value of pkt_timeout 366 * (which is one second) 367 */ 368 fp_retry_ticker = 1; 369 370 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 371 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 372 373 switch (cmd) { 374 case DDI_ATTACH: 375 rval = fp_attach_handler(dip); 376 break; 377 378 case DDI_RESUME: 379 rval = fp_resume_handler(dip); 380 break; 381 382 default: 383 rval = DDI_FAILURE; 384 break; 385 } 386 return (rval); 387 } 388 389 390 /* 391 * fp_detach: 392 * 393 * If a ULP fails to handle cmd request converse of 394 * cmd is invoked for ULPs that previously succeeded 395 * cmd request. 396 */ 397 static int 398 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 399 { 400 int rval = DDI_FAILURE; 401 fc_local_port_t *port; 402 fc_attach_cmd_t converse; 403 uint8_t cnt; 404 405 if ((port = ddi_get_soft_state(fp_driver_softstate, 406 ddi_get_instance(dip))) == NULL) { 407 return (DDI_FAILURE); 408 } 409 410 mutex_enter(&port->fp_mutex); 411 412 if (port->fp_ulp_attach) { 413 mutex_exit(&port->fp_mutex); 414 return (DDI_FAILURE); 415 } 416 417 switch (cmd) { 418 case DDI_DETACH: 419 if (port->fp_task != FP_TASK_IDLE) { 420 mutex_exit(&port->fp_mutex); 421 return (DDI_FAILURE); 422 } 423 424 /* Let's attempt to quit the job handler gracefully */ 425 port->fp_soft_state |= FP_DETACH_INPROGRESS; 426 427 mutex_exit(&port->fp_mutex); 428 converse = FC_CMD_ATTACH; 429 if (fctl_detach_ulps(port, FC_CMD_DETACH, 430 &modlinkage) != FC_SUCCESS) { 431 mutex_enter(&port->fp_mutex); 432 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 433 mutex_exit(&port->fp_mutex); 434 rval = DDI_FAILURE; 435 break; 436 } 437 438 mutex_enter(&port->fp_mutex); 439 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 440 cnt++) { 441 mutex_exit(&port->fp_mutex); 442 delay(drv_usectohz(1000000)); 443 mutex_enter(&port->fp_mutex); 444 } 445 446 if (port->fp_job_head) { 447 mutex_exit(&port->fp_mutex); 448 rval = DDI_FAILURE; 449 break; 450 } 451 mutex_exit(&port->fp_mutex); 452 453 rval = fp_detach_handler(port); 454 break; 455 456 case DDI_SUSPEND: 457 mutex_exit(&port->fp_mutex); 458 converse = FC_CMD_RESUME; 459 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 460 &modlinkage) != FC_SUCCESS) { 461 rval = DDI_FAILURE; 462 break; 463 } 464 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 465 (void) callb_generic_cpr(&port->fp_cpr_info, 466 CB_CODE_CPR_RESUME); 467 } 468 break; 469 470 default: 471 mutex_exit(&port->fp_mutex); 472 break; 473 } 474 475 /* 476 * Use softint to perform reattach. Mark fp_ulp_attach so we 477 * don't attempt to do this repeatedly on behalf of some persistent 478 * caller. 479 */ 480 if (rval != DDI_SUCCESS) { 481 mutex_enter(&port->fp_mutex); 482 port->fp_ulp_attach = 1; 483 484 /* 485 * If the port is in the low power mode then there is 486 * possibility that fca too could be in low power mode. 487 * Try to raise the power before calling attach ulps. 488 */ 489 490 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 491 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 492 mutex_exit(&port->fp_mutex); 493 (void) pm_raise_power(port->fp_port_dip, 494 FP_PM_COMPONENT, FP_PM_PORT_UP); 495 } else { 496 mutex_exit(&port->fp_mutex); 497 } 498 499 500 fp_attach_ulps(port, converse); 501 502 mutex_enter(&port->fp_mutex); 503 while (port->fp_ulp_attach) { 504 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 505 } 506 507 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 508 509 /* 510 * Mark state as detach failed so asynchronous ULP attach 511 * events (downstream, not the ones we're initiating with 512 * the call to fp_attach_ulps) are not honored. We're 513 * really still in pending detach. 514 */ 515 port->fp_soft_state |= FP_DETACH_FAILED; 516 517 mutex_exit(&port->fp_mutex); 518 } 519 520 return (rval); 521 } 522 523 524 /* 525 * fp_getinfo: 526 * Given the device number, return either the 527 * dev_info_t pointer or the instance number. 528 */ 529 530 /* ARGSUSED */ 531 static int 532 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 533 { 534 int rval; 535 minor_t instance; 536 fc_local_port_t *port; 537 538 rval = DDI_SUCCESS; 539 instance = getminor((dev_t)arg); 540 541 switch (cmd) { 542 case DDI_INFO_DEVT2DEVINFO: 543 if ((port = ddi_get_soft_state(fp_driver_softstate, 544 instance)) == NULL) { 545 rval = DDI_FAILURE; 546 break; 547 } 548 *result = (void *)port->fp_port_dip; 549 break; 550 551 case DDI_INFO_DEVT2INSTANCE: 552 *result = (void *)(uintptr_t)instance; 553 break; 554 555 default: 556 rval = DDI_FAILURE; 557 break; 558 } 559 560 return (rval); 561 } 562 563 564 /* 565 * Entry point for power up and power down request from kernel 566 */ 567 static int 568 fp_power(dev_info_t *dip, int comp, int level) 569 { 570 int rval = DDI_FAILURE; 571 fc_local_port_t *port; 572 573 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 574 if (port == NULL || comp != FP_PM_COMPONENT) { 575 return (rval); 576 } 577 578 switch (level) { 579 case FP_PM_PORT_UP: 580 rval = DDI_SUCCESS; 581 582 /* 583 * If the port is DDI_SUSPENDed, let the DDI_RESUME 584 * code complete the rediscovery. 585 */ 586 mutex_enter(&port->fp_mutex); 587 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 588 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 589 port->fp_pm_level = FP_PM_PORT_UP; 590 mutex_exit(&port->fp_mutex); 591 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 592 break; 593 } 594 595 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 596 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 597 598 port->fp_pm_level = FP_PM_PORT_UP; 599 rval = fp_power_up(port); 600 if (rval != DDI_SUCCESS) { 601 port->fp_pm_level = FP_PM_PORT_DOWN; 602 } 603 } else { 604 port->fp_pm_level = FP_PM_PORT_UP; 605 } 606 mutex_exit(&port->fp_mutex); 607 break; 608 609 case FP_PM_PORT_DOWN: 610 mutex_enter(&port->fp_mutex); 611 612 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 613 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 614 /* 615 * PM framework goofed up. We have don't 616 * have any PM components. Let's never go down. 617 */ 618 mutex_exit(&port->fp_mutex); 619 break; 620 621 } 622 623 if (port->fp_ulp_attach) { 624 /* We shouldn't let the power go down */ 625 mutex_exit(&port->fp_mutex); 626 break; 627 } 628 629 /* 630 * Not a whole lot to do if we are detaching 631 */ 632 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 633 port->fp_pm_level = FP_PM_PORT_DOWN; 634 mutex_exit(&port->fp_mutex); 635 rval = DDI_SUCCESS; 636 break; 637 } 638 639 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 640 port->fp_pm_level = FP_PM_PORT_DOWN; 641 642 rval = fp_power_down(port); 643 if (rval != DDI_SUCCESS) { 644 port->fp_pm_level = FP_PM_PORT_UP; 645 ASSERT(!(port->fp_soft_state & 646 FP_SOFT_POWER_DOWN)); 647 } else { 648 ASSERT(port->fp_soft_state & 649 FP_SOFT_POWER_DOWN); 650 } 651 } 652 mutex_exit(&port->fp_mutex); 653 break; 654 655 default: 656 break; 657 } 658 659 return (rval); 660 } 661 662 663 /* 664 * Open FC port devctl node 665 */ 666 static int 667 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 668 { 669 int instance; 670 fc_local_port_t *port; 671 672 if (otype != OTYP_CHR) { 673 return (EINVAL); 674 } 675 676 /* 677 * This is not a toy to play with. Allow only powerful 678 * users (hopefully knowledgeable) to access the port 679 * (A hacker potentially could download a sick binary 680 * file into FCA) 681 */ 682 if (drv_priv(credp)) { 683 return (EPERM); 684 } 685 686 instance = (int)getminor(*devp); 687 688 port = ddi_get_soft_state(fp_driver_softstate, instance); 689 if (port == NULL) { 690 return (ENXIO); 691 } 692 693 mutex_enter(&port->fp_mutex); 694 if (port->fp_flag & FP_EXCL) { 695 /* 696 * It is already open for exclusive access. 697 * So shut the door on this caller. 698 */ 699 mutex_exit(&port->fp_mutex); 700 return (EBUSY); 701 } 702 703 if (flag & FEXCL) { 704 if (port->fp_flag & FP_OPEN) { 705 /* 706 * Exclusive operation not possible 707 * as it is already opened 708 */ 709 mutex_exit(&port->fp_mutex); 710 return (EBUSY); 711 } 712 port->fp_flag |= FP_EXCL; 713 } 714 port->fp_flag |= FP_OPEN; 715 mutex_exit(&port->fp_mutex); 716 717 return (0); 718 } 719 720 721 /* 722 * The driver close entry point is called on the last close() 723 * of a device. So it is perfectly alright to just clobber the 724 * open flag and reset it to idle (instead of having to reset 725 * each flag bits). For any confusion, check out close(9E). 726 */ 727 728 /* ARGSUSED */ 729 static int 730 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 731 { 732 int instance; 733 fc_local_port_t *port; 734 735 if (otype != OTYP_CHR) { 736 return (EINVAL); 737 } 738 739 instance = (int)getminor(dev); 740 741 port = ddi_get_soft_state(fp_driver_softstate, instance); 742 if (port == NULL) { 743 return (ENXIO); 744 } 745 746 mutex_enter(&port->fp_mutex); 747 if ((port->fp_flag & FP_OPEN) == 0) { 748 mutex_exit(&port->fp_mutex); 749 return (ENODEV); 750 } 751 port->fp_flag = FP_IDLE; 752 mutex_exit(&port->fp_mutex); 753 754 return (0); 755 } 756 757 /* 758 * Handle IOCTL requests 759 */ 760 761 /* ARGSUSED */ 762 static int 763 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 764 { 765 int instance; 766 int ret = 0; 767 fcio_t fcio; 768 fc_local_port_t *port; 769 770 instance = (int)getminor(dev); 771 772 port = ddi_get_soft_state(fp_driver_softstate, instance); 773 if (port == NULL) { 774 return (ENXIO); 775 } 776 777 mutex_enter(&port->fp_mutex); 778 if ((port->fp_flag & FP_OPEN) == 0) { 779 mutex_exit(&port->fp_mutex); 780 return (ENXIO); 781 } 782 783 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 784 mutex_exit(&port->fp_mutex); 785 return (ENXIO); 786 } 787 788 mutex_exit(&port->fp_mutex); 789 790 /* this will raise power if necessary */ 791 ret = fctl_busy_port(port); 792 if (ret != 0) { 793 return (ret); 794 } 795 796 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 797 798 799 switch (cmd) { 800 case FCIO_CMD: { 801 #ifdef _MULTI_DATAMODEL 802 switch (ddi_model_convert_from(mode & FMODELS)) { 803 case DDI_MODEL_ILP32: { 804 struct fcio32 fcio32; 805 806 if (ddi_copyin((void *)data, (void *)&fcio32, 807 sizeof (struct fcio32), mode)) { 808 ret = EFAULT; 809 break; 810 } 811 fcio.fcio_xfer = fcio32.fcio_xfer; 812 fcio.fcio_cmd = fcio32.fcio_cmd; 813 fcio.fcio_flags = fcio32.fcio_flags; 814 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 815 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 816 fcio.fcio_ibuf = 817 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 818 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 819 fcio.fcio_obuf = 820 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 821 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 822 fcio.fcio_abuf = 823 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 824 fcio.fcio_errno = fcio32.fcio_errno; 825 break; 826 } 827 828 case DDI_MODEL_NONE: 829 if (ddi_copyin((void *)data, (void *)&fcio, 830 sizeof (fcio_t), mode)) { 831 ret = EFAULT; 832 } 833 break; 834 } 835 #else /* _MULTI_DATAMODEL */ 836 if (ddi_copyin((void *)data, (void *)&fcio, 837 sizeof (fcio_t), mode)) { 838 ret = EFAULT; 839 break; 840 } 841 #endif /* _MULTI_DATAMODEL */ 842 if (!ret) { 843 ret = fp_fciocmd(port, data, mode, &fcio); 844 } 845 break; 846 } 847 848 default: 849 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 850 mode, credp, rval); 851 } 852 853 fctl_idle_port(port); 854 855 return (ret); 856 } 857 858 859 /* 860 * Init Symbolic Port Name and Node Name 861 * LV will try to get symbolic names from FCA driver 862 * and register these to name server, 863 * if LV fails to get these, 864 * LV will register its default symbolic names to name server. 865 * The Default symbolic node name format is : 866 * <hostname>:<hba driver name>(instance) 867 * The Default symbolic port name format is : 868 * <fp path name> 869 */ 870 static void 871 fp_init_symbolic_names(fc_local_port_t *port) 872 { 873 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 874 char *sym_name; 875 char fcaname[50] = {0}; 876 int hostnlen, fcanlen; 877 878 if (port->fp_sym_node_namelen == 0) { 879 hostnlen = strlen(utsname.nodename); 880 (void) snprintf(fcaname, sizeof (fcaname), 881 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 882 fcanlen = strlen(fcaname); 883 884 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 885 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 886 port->fp_sym_node_namelen = strlen(sym_name); 887 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 888 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 889 } 890 (void) strncpy(port->fp_sym_node_name, sym_name, 891 port->fp_sym_node_namelen); 892 kmem_free(sym_name, hostnlen + fcanlen + 2); 893 } 894 895 if (port->fp_sym_port_namelen == 0) { 896 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 897 898 (void) ddi_pathname(port->fp_port_dip, pathname); 899 port->fp_sym_port_namelen = strlen(pathname); 900 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 901 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 902 } 903 (void) strncpy(port->fp_sym_port_name, pathname, 904 port->fp_sym_port_namelen); 905 kmem_free(pathname, MAXPATHLEN); 906 } 907 } 908 909 910 /* 911 * Perform port attach 912 */ 913 static int 914 fp_attach_handler(dev_info_t *dip) 915 { 916 int rval; 917 int instance; 918 int port_num; 919 int port_len; 920 char name[30]; 921 char i_pwwn[17]; 922 fp_cmd_t *pkt; 923 uint32_t ub_count; 924 fc_local_port_t *port; 925 job_request_t *job; 926 fc_local_port_t *phyport = NULL; 927 int portpro1; 928 char pwwn[17], nwwn[17]; 929 930 instance = ddi_get_instance(dip); 931 932 port_len = sizeof (port_num); 933 934 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 935 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 936 (caddr_t)&port_num, &port_len); 937 938 if (rval != DDI_SUCCESS) { 939 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 940 instance); 941 return (DDI_FAILURE); 942 } 943 944 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 945 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 946 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 947 instance); 948 return (DDI_FAILURE); 949 } 950 951 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 952 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 953 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 954 " point minor node", instance); 955 ddi_remove_minor_node(dip, NULL); 956 return (DDI_FAILURE); 957 } 958 959 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 960 != DDI_SUCCESS) { 961 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 962 instance); 963 ddi_remove_minor_node(dip, NULL); 964 return (DDI_FAILURE); 965 } 966 port = ddi_get_soft_state(fp_driver_softstate, instance); 967 968 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 969 970 port->fp_instance = instance; 971 port->fp_ulp_attach = 1; 972 port->fp_port_num = port_num; 973 port->fp_verbose = fp_verbosity; 974 port->fp_options = fp_options; 975 976 port->fp_fca_dip = ddi_get_parent(dip); 977 port->fp_port_dip = dip; 978 port->fp_fca_tran = (fc_fca_tran_t *) 979 ddi_get_driver_private(port->fp_fca_dip); 980 981 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 982 983 /* 984 * Init the starting value of fp_rscn_count. Note that if 985 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 986 * actual # of RSCNs will be (fp_rscn_count - 1) 987 */ 988 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 989 990 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 991 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 992 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 993 994 (void) sprintf(name, "fp%d_cache", instance); 995 996 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 997 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 998 "phyport-instance", -1)) != -1) { 999 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 1000 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 1001 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 1002 port->fp_npiv_type = FC_NPIV_PORT; 1003 } 1004 1005 /* 1006 * Allocate the pool of fc_packet_t structs to be used with 1007 * this fp instance. 1008 */ 1009 port->fp_pkt_cache = kmem_cache_create(name, 1010 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1011 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1012 NULL, 0); 1013 port->fp_out_fpcmds = 0; 1014 if (port->fp_pkt_cache == NULL) { 1015 goto cache_alloc_failed; 1016 } 1017 1018 1019 /* 1020 * Allocate the d_id and pwwn hash tables for all remote ports 1021 * connected to this local port. 1022 */ 1023 port->fp_did_table = kmem_zalloc(did_table_size * 1024 sizeof (struct d_id_hash), KM_SLEEP); 1025 1026 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1027 sizeof (struct pwwn_hash), KM_SLEEP); 1028 1029 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1030 MINCLSYSPRI, 1, 16, 0); 1031 1032 /* Indicate that don't have the pm components yet */ 1033 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1034 1035 /* 1036 * Bind the callbacks with the FCA driver. This will open the gate 1037 * for asynchronous callbacks, so after this call the fp_mutex 1038 * must be held when updating the fc_local_port_t struct. 1039 * 1040 * This is done _before_ setting up the job thread so we can avoid 1041 * cleaning up after the thread_create() in the error path. This 1042 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1043 */ 1044 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1045 goto bind_callbacks_failed; 1046 } 1047 1048 if (phyport) { 1049 mutex_enter(&phyport->fp_mutex); 1050 if (phyport->fp_port_next) { 1051 phyport->fp_port_next->fp_port_prev = port; 1052 port->fp_port_next = phyport->fp_port_next; 1053 phyport->fp_port_next = port; 1054 port->fp_port_prev = phyport; 1055 } else { 1056 phyport->fp_port_next = port; 1057 phyport->fp_port_prev = port; 1058 port->fp_port_next = phyport; 1059 port->fp_port_prev = phyport; 1060 } 1061 mutex_exit(&phyport->fp_mutex); 1062 } 1063 1064 /* 1065 * Init Symbolic Names 1066 */ 1067 fp_init_symbolic_names(port); 1068 1069 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1070 KM_SLEEP, NULL); 1071 1072 if (pkt == NULL) { 1073 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1074 instance); 1075 goto alloc_els_packet_failed; 1076 } 1077 1078 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1079 v.v_maxsyspri - 2); 1080 1081 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1082 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1083 i_pwwn) != DDI_PROP_SUCCESS) { 1084 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1085 "fp(%d): Updating 'initiator-port' property" 1086 " on fp dev_info node failed", instance); 1087 } 1088 1089 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1090 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1091 i_pwwn) != DDI_PROP_SUCCESS) { 1092 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1093 "fp(%d): Updating 'initiator-node' property" 1094 " on fp dev_info node failed", instance); 1095 } 1096 1097 mutex_enter(&port->fp_mutex); 1098 port->fp_els_resp_pkt = pkt; 1099 mutex_exit(&port->fp_mutex); 1100 1101 /* 1102 * Determine the count of unsolicited buffers this FCA can support 1103 */ 1104 fp_retrieve_caps(port); 1105 1106 /* 1107 * Allocate unsolicited buffer tokens 1108 */ 1109 if (port->fp_ub_count) { 1110 ub_count = port->fp_ub_count; 1111 port->fp_ub_tokens = kmem_zalloc(ub_count * 1112 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1113 /* 1114 * Do not fail the attach if unsolicited buffer allocation 1115 * fails; Just try to get along with whatever the FCA can do. 1116 */ 1117 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1118 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1119 FC_SUCCESS || ub_count != port->fp_ub_count) { 1120 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1121 " Unsolicited buffers. proceeding with attach...", 1122 instance); 1123 kmem_free(port->fp_ub_tokens, 1124 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1125 port->fp_ub_tokens = NULL; 1126 } 1127 } 1128 1129 fp_load_ulp_modules(dip, port); 1130 1131 /* 1132 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1133 */ 1134 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1135 "pm-hardware-state", "needs-suspend-resume", 1136 strlen("needs-suspend-resume") + 1); 1137 1138 /* 1139 * fctl maintains a list of all port handles, so 1140 * help fctl add this one to its list now. 1141 */ 1142 mutex_enter(&port->fp_mutex); 1143 fctl_add_port(port); 1144 1145 /* 1146 * If a state change is already in progress, set the bind state t 1147 * OFFLINE as well, so further state change callbacks into ULPs 1148 * will pass the appropriate states 1149 */ 1150 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1151 port->fp_statec_busy) { 1152 port->fp_bind_state = FC_STATE_OFFLINE; 1153 mutex_exit(&port->fp_mutex); 1154 1155 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1156 } else { 1157 /* 1158 * Without dropping the mutex, ensure that the port 1159 * startup happens ahead of state change callback 1160 * processing 1161 */ 1162 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1163 1164 port->fp_last_task = port->fp_task; 1165 port->fp_task = FP_TASK_PORT_STARTUP; 1166 1167 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1168 fp_startup_done, (opaque_t)port, KM_SLEEP); 1169 1170 port->fp_job_head = port->fp_job_tail = job; 1171 1172 cv_signal(&port->fp_cv); 1173 1174 mutex_exit(&port->fp_mutex); 1175 } 1176 1177 mutex_enter(&port->fp_mutex); 1178 while (port->fp_ulp_attach) { 1179 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1180 } 1181 mutex_exit(&port->fp_mutex); 1182 1183 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1184 "pm-components", fp_pm_comps, 1185 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1186 DDI_PROP_SUCCESS) { 1187 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1188 " components property, PM disabled on this port."); 1189 mutex_enter(&port->fp_mutex); 1190 port->fp_pm_level = FP_PM_PORT_UP; 1191 mutex_exit(&port->fp_mutex); 1192 } else { 1193 if (pm_raise_power(dip, FP_PM_COMPONENT, 1194 FP_PM_PORT_UP) != DDI_SUCCESS) { 1195 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1196 " power level"); 1197 mutex_enter(&port->fp_mutex); 1198 port->fp_pm_level = FP_PM_PORT_UP; 1199 mutex_exit(&port->fp_mutex); 1200 } 1201 1202 /* 1203 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1204 * the call to pm_raise_power. The PM framework can't 1205 * handle multiple threads calling into it during attach. 1206 */ 1207 1208 mutex_enter(&port->fp_mutex); 1209 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1210 mutex_exit(&port->fp_mutex); 1211 } 1212 1213 ddi_report_dev(dip); 1214 1215 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1216 1217 return (DDI_SUCCESS); 1218 1219 /* 1220 * Unwind any/all preceeding allocations in the event of an error. 1221 */ 1222 1223 alloc_els_packet_failed: 1224 1225 if (port->fp_fca_handle != NULL) { 1226 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1227 port->fp_fca_handle = NULL; 1228 } 1229 1230 if (port->fp_ub_tokens != NULL) { 1231 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1232 port->fp_ub_tokens); 1233 kmem_free(port->fp_ub_tokens, 1234 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1235 port->fp_ub_tokens = NULL; 1236 } 1237 1238 if (port->fp_els_resp_pkt != NULL) { 1239 fp_free_pkt(port->fp_els_resp_pkt); 1240 port->fp_els_resp_pkt = NULL; 1241 } 1242 1243 bind_callbacks_failed: 1244 1245 if (port->fp_taskq != NULL) { 1246 taskq_destroy(port->fp_taskq); 1247 } 1248 1249 if (port->fp_pwwn_table != NULL) { 1250 kmem_free(port->fp_pwwn_table, 1251 pwwn_table_size * sizeof (struct pwwn_hash)); 1252 port->fp_pwwn_table = NULL; 1253 } 1254 1255 if (port->fp_did_table != NULL) { 1256 kmem_free(port->fp_did_table, 1257 did_table_size * sizeof (struct d_id_hash)); 1258 port->fp_did_table = NULL; 1259 } 1260 1261 if (port->fp_pkt_cache != NULL) { 1262 kmem_cache_destroy(port->fp_pkt_cache); 1263 port->fp_pkt_cache = NULL; 1264 } 1265 1266 cache_alloc_failed: 1267 1268 cv_destroy(&port->fp_attach_cv); 1269 cv_destroy(&port->fp_cv); 1270 mutex_destroy(&port->fp_mutex); 1271 ddi_remove_minor_node(port->fp_port_dip, NULL); 1272 ddi_soft_state_free(fp_driver_softstate, instance); 1273 ddi_prop_remove_all(dip); 1274 1275 return (DDI_FAILURE); 1276 } 1277 1278 1279 /* 1280 * Handle DDI_RESUME request 1281 */ 1282 static int 1283 fp_resume_handler(dev_info_t *dip) 1284 { 1285 int rval; 1286 fc_local_port_t *port; 1287 1288 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1289 1290 ASSERT(port != NULL); 1291 1292 #ifdef DEBUG 1293 mutex_enter(&port->fp_mutex); 1294 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1295 mutex_exit(&port->fp_mutex); 1296 #endif 1297 1298 /* 1299 * If the port was power suspended, raise the power level 1300 */ 1301 mutex_enter(&port->fp_mutex); 1302 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1303 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1304 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1305 1306 mutex_exit(&port->fp_mutex); 1307 if (pm_raise_power(dip, FP_PM_COMPONENT, 1308 FP_PM_PORT_UP) != DDI_SUCCESS) { 1309 FP_TRACE(FP_NHEAD2(9, 0), 1310 "Failed to raise the power level"); 1311 return (DDI_FAILURE); 1312 } 1313 mutex_enter(&port->fp_mutex); 1314 } 1315 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1316 mutex_exit(&port->fp_mutex); 1317 1318 /* 1319 * All the discovery is initiated and handled by per-port thread. 1320 * Further all the discovery is done in handled in callback mode 1321 * (not polled mode); In a specific case such as this, the discovery 1322 * is required to happen in polled mode. The easiest way out is 1323 * to bail out port thread and get started. Come back and fix this 1324 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1325 * will do on-demand discovery during pre-power-up busctl handling 1326 * which will only be possible when SCSA provides a new HBA vector 1327 * for sending down the PM busctl requests. 1328 */ 1329 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1330 1331 rval = fp_resume_all(port, FC_CMD_RESUME); 1332 if (rval != DDI_SUCCESS) { 1333 mutex_enter(&port->fp_mutex); 1334 port->fp_soft_state |= FP_SOFT_SUSPEND; 1335 mutex_exit(&port->fp_mutex); 1336 (void) callb_generic_cpr(&port->fp_cpr_info, 1337 CB_CODE_CPR_CHKPT); 1338 } 1339 1340 return (rval); 1341 } 1342 1343 /* 1344 * Perform FC Port power on initialization 1345 */ 1346 static int 1347 fp_power_up(fc_local_port_t *port) 1348 { 1349 int rval; 1350 1351 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1352 1353 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1354 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1355 1356 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1357 1358 mutex_exit(&port->fp_mutex); 1359 1360 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1361 if (rval != DDI_SUCCESS) { 1362 mutex_enter(&port->fp_mutex); 1363 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1364 } else { 1365 mutex_enter(&port->fp_mutex); 1366 } 1367 1368 return (rval); 1369 } 1370 1371 1372 /* 1373 * It is important to note that the power may possibly be removed between 1374 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1375 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1376 * (hardware state). In this case, the port driver may need to rediscover the 1377 * topology, perform LOGINs, register with the name server again and perform 1378 * any such port initialization procedures. To perform LOGINs, the driver could 1379 * use the port device handle to see if a LOGIN needs to be performed and use 1380 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1381 * or removed) which will be reflected in the map the ULPs will see. 1382 */ 1383 static int 1384 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1385 { 1386 1387 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1388 1389 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1390 return (DDI_FAILURE); 1391 } 1392 1393 mutex_enter(&port->fp_mutex); 1394 1395 /* 1396 * If there are commands queued for delayed retry, instead of 1397 * working the hard way to figure out which ones are good for 1398 * restart and which ones not (ELSs are definitely not good 1399 * as the port will have to go through a new spin of rediscovery 1400 * now), so just flush them out. 1401 */ 1402 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1403 fp_cmd_t *cmd; 1404 1405 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1406 1407 mutex_exit(&port->fp_mutex); 1408 while ((cmd = fp_deque_cmd(port)) != NULL) { 1409 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1410 fp_iodone(cmd); 1411 } 1412 mutex_enter(&port->fp_mutex); 1413 } 1414 1415 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1416 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1417 port->fp_dev_count) { 1418 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1419 port->fp_offline_tid = timeout(fp_offline_timeout, 1420 (caddr_t)port, fp_offline_ticks); 1421 } 1422 if (port->fp_job_head) { 1423 cv_signal(&port->fp_cv); 1424 } 1425 mutex_exit(&port->fp_mutex); 1426 fctl_attach_ulps(port, cmd, &modlinkage); 1427 } else { 1428 struct job_request *job; 1429 1430 /* 1431 * If an OFFLINE timer was running at the time of 1432 * suspending, there is no need to restart it as 1433 * the port is ONLINE now. 1434 */ 1435 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1436 if (port->fp_statec_busy == 0) { 1437 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1438 } 1439 port->fp_statec_busy++; 1440 mutex_exit(&port->fp_mutex); 1441 1442 job = fctl_alloc_job(JOB_PORT_ONLINE, 1443 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1444 fctl_enque_job(port, job); 1445 1446 fctl_jobwait(job); 1447 fctl_remove_oldies(port); 1448 1449 fctl_attach_ulps(port, cmd, &modlinkage); 1450 fctl_dealloc_job(job); 1451 } 1452 1453 return (DDI_SUCCESS); 1454 } 1455 1456 1457 /* 1458 * At this time, there shouldn't be any I/O requests on this port. 1459 * But the unsolicited callbacks from the underlying FCA port need 1460 * to be handled very carefully. The steps followed to handle the 1461 * DDI_DETACH are: 1462 * + Grab the port driver mutex, check if the unsolicited 1463 * callback is currently under processing. If true, fail 1464 * the DDI_DETACH request by printing a message; If false 1465 * mark the DDI_DETACH as under progress, so that any 1466 * further unsolicited callbacks get bounced. 1467 * + Perform PRLO/LOGO if necessary, cleanup all the data 1468 * structures. 1469 * + Get the job_handler thread to gracefully exit. 1470 * + Unregister callbacks with the FCA port. 1471 * + Now that some peace is found, notify all the ULPs of 1472 * DDI_DETACH request (using ulp_port_detach entry point) 1473 * + Free all mutexes, semaphores, conditional variables. 1474 * + Free the soft state, return success. 1475 * 1476 * Important considerations: 1477 * Port driver de-registers state change and unsolicited 1478 * callbacks before taking up the task of notifying ULPs 1479 * and performing PRLO and LOGOs. 1480 * 1481 * A port may go offline at the time PRLO/LOGO is being 1482 * requested. It is expected of all FCA drivers to fail 1483 * such requests either immediately with a FC_OFFLINE 1484 * return code to fc_fca_transport() or return the packet 1485 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1486 */ 1487 static int 1488 fp_detach_handler(fc_local_port_t *port) 1489 { 1490 job_request_t *job; 1491 uint32_t delay_count; 1492 fc_orphan_t *orp, *tmporp; 1493 1494 /* 1495 * In a Fabric topology with many host ports connected to 1496 * a switch, another detaching instance of fp might have 1497 * triggered a LOGO (which is an unsolicited request to 1498 * this instance). So in order to be able to successfully 1499 * detach by taking care of such cases a delay of about 1500 * 30 seconds is introduced. 1501 */ 1502 delay_count = 0; 1503 mutex_enter(&port->fp_mutex); 1504 if (port->fp_out_fpcmds != 0) { 1505 /* 1506 * At this time we can only check fp internal commands, because 1507 * sd/ssd/scsi_vhci should have finsihed all their commands, 1508 * fcp/fcip/fcsm should have finished all their commands. 1509 * 1510 * It seems that all fp internal commands are asynchronous now. 1511 */ 1512 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1513 mutex_exit(&port->fp_mutex); 1514 1515 cmn_err(CE_WARN, "fp(%d): %d fp_cmd(s) is/are in progress" 1516 " Failing detach", port->fp_instance, port->fp_out_fpcmds); 1517 return (DDI_FAILURE); 1518 } 1519 1520 while ((port->fp_soft_state & 1521 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1522 (delay_count < 30)) { 1523 mutex_exit(&port->fp_mutex); 1524 delay_count++; 1525 delay(drv_usectohz(1000000)); 1526 mutex_enter(&port->fp_mutex); 1527 } 1528 1529 if (port->fp_soft_state & 1530 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1531 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1532 mutex_exit(&port->fp_mutex); 1533 1534 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1535 " Failing detach", port->fp_instance); 1536 return (DDI_FAILURE); 1537 } 1538 1539 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1540 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1541 mutex_exit(&port->fp_mutex); 1542 1543 /* 1544 * If we're powered down, we need to raise power prior to submitting 1545 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1546 * process the shutdown job. 1547 */ 1548 if (fctl_busy_port(port) != 0) { 1549 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1550 port->fp_instance); 1551 mutex_enter(&port->fp_mutex); 1552 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1553 mutex_exit(&port->fp_mutex); 1554 return (DDI_FAILURE); 1555 } 1556 1557 /* 1558 * This will deallocate data structs and cause the "job" thread 1559 * to exit, in preparation for DDI_DETACH on the instance. 1560 * This can sleep for an arbitrary duration, since it waits for 1561 * commands over the wire, timeout(9F) callbacks, etc. 1562 * 1563 * CAUTION: There is still a race here, where the "job" thread 1564 * can still be executing code even tho the fctl_jobwait() call 1565 * below has returned to us. In theory the fp driver could even be 1566 * modunloaded even tho the job thread isn't done executing. 1567 * without creating the race condition. 1568 */ 1569 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1570 (opaque_t)port, KM_SLEEP); 1571 fctl_enque_job(port, job); 1572 fctl_jobwait(job); 1573 fctl_dealloc_job(job); 1574 1575 1576 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1577 FP_PM_PORT_DOWN); 1578 1579 if (port->fp_taskq) { 1580 taskq_destroy(port->fp_taskq); 1581 } 1582 1583 ddi_prop_remove_all(port->fp_port_dip); 1584 1585 ddi_remove_minor_node(port->fp_port_dip, NULL); 1586 1587 fctl_remove_port(port); 1588 1589 fp_free_pkt(port->fp_els_resp_pkt); 1590 1591 if (port->fp_ub_tokens) { 1592 if (fc_ulp_ubfree(port, port->fp_ub_count, 1593 port->fp_ub_tokens) != FC_SUCCESS) { 1594 cmn_err(CE_WARN, "fp(%d): couldn't free " 1595 " unsolicited buffers", port->fp_instance); 1596 } 1597 kmem_free(port->fp_ub_tokens, 1598 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1599 port->fp_ub_tokens = NULL; 1600 } 1601 1602 if (port->fp_pkt_cache != NULL) { 1603 kmem_cache_destroy(port->fp_pkt_cache); 1604 } 1605 1606 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1607 1608 mutex_enter(&port->fp_mutex); 1609 if (port->fp_did_table) { 1610 kmem_free(port->fp_did_table, did_table_size * 1611 sizeof (struct d_id_hash)); 1612 } 1613 1614 if (port->fp_pwwn_table) { 1615 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1616 sizeof (struct pwwn_hash)); 1617 } 1618 orp = port->fp_orphan_list; 1619 while (orp) { 1620 tmporp = orp; 1621 orp = orp->orp_next; 1622 kmem_free(tmporp, sizeof (*orp)); 1623 } 1624 1625 mutex_exit(&port->fp_mutex); 1626 1627 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1628 1629 mutex_destroy(&port->fp_mutex); 1630 cv_destroy(&port->fp_attach_cv); 1631 cv_destroy(&port->fp_cv); 1632 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1633 1634 return (DDI_SUCCESS); 1635 } 1636 1637 1638 /* 1639 * Steps to perform DDI_SUSPEND operation on a FC port 1640 * 1641 * - If already suspended return DDI_FAILURE 1642 * - If already power-suspended return DDI_SUCCESS 1643 * - If an unsolicited callback or state change handling is in 1644 * in progress, throw a warning message, return DDI_FAILURE 1645 * - Cancel timeouts 1646 * - SUSPEND the job_handler thread (means do nothing as it is 1647 * taken care of by the CPR frame work) 1648 */ 1649 static int 1650 fp_suspend_handler(fc_local_port_t *port) 1651 { 1652 uint32_t delay_count; 1653 1654 mutex_enter(&port->fp_mutex); 1655 1656 /* 1657 * The following should never happen, but 1658 * let the driver be more defensive here 1659 */ 1660 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1661 mutex_exit(&port->fp_mutex); 1662 return (DDI_FAILURE); 1663 } 1664 1665 /* 1666 * If the port is already power suspended, there 1667 * is nothing else to do, So return DDI_SUCCESS, 1668 * but mark the SUSPEND bit in the soft state 1669 * before leaving. 1670 */ 1671 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1672 port->fp_soft_state |= FP_SOFT_SUSPEND; 1673 mutex_exit(&port->fp_mutex); 1674 return (DDI_SUCCESS); 1675 } 1676 1677 /* 1678 * Check if an unsolicited callback or state change handling is 1679 * in progress. If true, fail the suspend operation; also throw 1680 * a warning message notifying the failure. Note that Sun PCI 1681 * hotplug spec recommends messages in cases of failure (but 1682 * not flooding the console) 1683 * 1684 * Busy waiting for a short interval (500 millisecond ?) to see 1685 * if the callback processing completes may be another idea. Since 1686 * most of the callback processing involves a lot of work, it 1687 * is safe to just fail the SUSPEND operation. It is definitely 1688 * not bad to fail the SUSPEND operation if the driver is busy. 1689 */ 1690 delay_count = 0; 1691 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1692 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1693 mutex_exit(&port->fp_mutex); 1694 delay_count++; 1695 delay(drv_usectohz(1000000)); 1696 mutex_enter(&port->fp_mutex); 1697 } 1698 1699 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1700 FP_SOFT_IN_UNSOL_CB)) { 1701 mutex_exit(&port->fp_mutex); 1702 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1703 " Failing suspend", port->fp_instance); 1704 return (DDI_FAILURE); 1705 } 1706 1707 /* 1708 * Check of FC port thread is busy 1709 */ 1710 if (port->fp_job_head) { 1711 mutex_exit(&port->fp_mutex); 1712 FP_TRACE(FP_NHEAD2(9, 0), 1713 "FC port thread is busy: Failing suspend"); 1714 return (DDI_FAILURE); 1715 } 1716 port->fp_soft_state |= FP_SOFT_SUSPEND; 1717 1718 fp_suspend_all(port); 1719 mutex_exit(&port->fp_mutex); 1720 1721 return (DDI_SUCCESS); 1722 } 1723 1724 1725 /* 1726 * Prepare for graceful power down of a FC port 1727 */ 1728 static int 1729 fp_power_down(fc_local_port_t *port) 1730 { 1731 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1732 1733 /* 1734 * Power down request followed by a DDI_SUSPEND should 1735 * never happen; If it does return DDI_SUCCESS 1736 */ 1737 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1738 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1739 return (DDI_SUCCESS); 1740 } 1741 1742 /* 1743 * If the port is already power suspended, there 1744 * is nothing else to do, So return DDI_SUCCESS, 1745 */ 1746 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1747 return (DDI_SUCCESS); 1748 } 1749 1750 /* 1751 * Check if an unsolicited callback or state change handling 1752 * is in progress. If true, fail the PM suspend operation. 1753 * But don't print a message unless the verbosity of the 1754 * driver desires otherwise. 1755 */ 1756 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1757 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1758 FP_TRACE(FP_NHEAD2(9, 0), 1759 "Unsolicited callback in progress: Failing power down"); 1760 return (DDI_FAILURE); 1761 } 1762 1763 /* 1764 * Check of FC port thread is busy 1765 */ 1766 if (port->fp_job_head) { 1767 FP_TRACE(FP_NHEAD2(9, 0), 1768 "FC port thread is busy: Failing power down"); 1769 return (DDI_FAILURE); 1770 } 1771 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1772 1773 /* 1774 * check if the ULPs are ready for power down 1775 */ 1776 mutex_exit(&port->fp_mutex); 1777 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1778 &modlinkage) != FC_SUCCESS) { 1779 mutex_enter(&port->fp_mutex); 1780 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1781 mutex_exit(&port->fp_mutex); 1782 1783 /* 1784 * Power back up the obedient ULPs that went down 1785 */ 1786 fp_attach_ulps(port, FC_CMD_POWER_UP); 1787 1788 FP_TRACE(FP_NHEAD2(9, 0), 1789 "ULP(s) busy, detach_ulps failed. Failing power down"); 1790 mutex_enter(&port->fp_mutex); 1791 return (DDI_FAILURE); 1792 } 1793 mutex_enter(&port->fp_mutex); 1794 1795 fp_suspend_all(port); 1796 1797 return (DDI_SUCCESS); 1798 } 1799 1800 1801 /* 1802 * Suspend the entire FC port 1803 */ 1804 static void 1805 fp_suspend_all(fc_local_port_t *port) 1806 { 1807 int index; 1808 struct pwwn_hash *head; 1809 fc_remote_port_t *pd; 1810 1811 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1812 1813 if (port->fp_wait_tid != 0) { 1814 timeout_id_t tid; 1815 1816 tid = port->fp_wait_tid; 1817 port->fp_wait_tid = (timeout_id_t)NULL; 1818 mutex_exit(&port->fp_mutex); 1819 (void) untimeout(tid); 1820 mutex_enter(&port->fp_mutex); 1821 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1822 } 1823 1824 if (port->fp_offline_tid) { 1825 timeout_id_t tid; 1826 1827 tid = port->fp_offline_tid; 1828 port->fp_offline_tid = (timeout_id_t)NULL; 1829 mutex_exit(&port->fp_mutex); 1830 (void) untimeout(tid); 1831 mutex_enter(&port->fp_mutex); 1832 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1833 } 1834 mutex_exit(&port->fp_mutex); 1835 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1836 mutex_enter(&port->fp_mutex); 1837 1838 /* 1839 * Mark all devices as OLD, and reset the LOGIN state as well 1840 * (this will force the ULPs to perform a LOGIN after calling 1841 * fc_portgetmap() during RESUME/PM_RESUME) 1842 */ 1843 for (index = 0; index < pwwn_table_size; index++) { 1844 head = &port->fp_pwwn_table[index]; 1845 pd = head->pwwn_head; 1846 while (pd != NULL) { 1847 mutex_enter(&pd->pd_mutex); 1848 fp_remote_port_offline(pd); 1849 fctl_delist_did_table(port, pd); 1850 pd->pd_state = PORT_DEVICE_VALID; 1851 pd->pd_login_count = 0; 1852 mutex_exit(&pd->pd_mutex); 1853 pd = pd->pd_wwn_hnext; 1854 } 1855 } 1856 } 1857 1858 1859 /* 1860 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1861 * Performs intializations for fc_packet_t structs. 1862 * Returns 0 for success or -1 for failure. 1863 * 1864 * This function allocates DMA handles for both command and responses. 1865 * Most of the ELSs used have both command and responses so it is strongly 1866 * desired to move them to cache constructor routine. 1867 * 1868 * Context: Can sleep iff called with KM_SLEEP flag. 1869 */ 1870 static int 1871 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1872 { 1873 int (*cb) (caddr_t); 1874 fc_packet_t *pkt; 1875 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1876 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1877 1878 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1879 1880 cmd->cmd_next = NULL; 1881 cmd->cmd_flags = 0; 1882 cmd->cmd_dflags = 0; 1883 cmd->cmd_job = NULL; 1884 cmd->cmd_port = port; 1885 pkt = &cmd->cmd_pkt; 1886 1887 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1888 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1889 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1890 return (-1); 1891 } 1892 1893 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1894 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1895 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1896 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1897 return (-1); 1898 } 1899 1900 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1901 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1902 pkt->pkt_data_cookie_cnt = 0; 1903 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1904 pkt->pkt_data_cookie = NULL; 1905 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1906 1907 return (0); 1908 } 1909 1910 1911 /* 1912 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1913 * Performs un-intializations for fc_packet_t structs. 1914 */ 1915 /* ARGSUSED */ 1916 static void 1917 fp_cache_destructor(void *buf, void *cdarg) 1918 { 1919 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1920 fc_packet_t *pkt; 1921 1922 pkt = &cmd->cmd_pkt; 1923 if (pkt->pkt_cmd_dma) { 1924 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1925 } 1926 1927 if (pkt->pkt_resp_dma) { 1928 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1929 } 1930 } 1931 1932 1933 /* 1934 * Packet allocation for ELS and any other port driver commands 1935 * 1936 * Some ELSs like FLOGI and PLOGI are critical for topology and 1937 * device discovery and a system's inability to allocate memory 1938 * or DVMA resources while performing some of these critical ELSs 1939 * cause a lot of problem. While memory allocation failures are 1940 * rare, DVMA resource failures are common as the applications 1941 * are becoming more and more powerful on huge servers. So it 1942 * is desirable to have a framework support to reserve a fragment 1943 * of DVMA. So until this is fixed the correct way, the suffering 1944 * is huge whenever a LIP happens at a time DVMA resources are 1945 * drained out completely - So an attempt needs to be made to 1946 * KM_SLEEP while requesting for these resources, hoping that 1947 * the requests won't hang forever. 1948 * 1949 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1950 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1951 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1952 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1953 * fp_alloc_pkt() must be called with pd set to NULL. 1954 */ 1955 1956 static fp_cmd_t * 1957 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1958 fc_remote_port_t *pd) 1959 { 1960 int rval; 1961 ulong_t real_len; 1962 fp_cmd_t *cmd; 1963 fc_packet_t *pkt; 1964 int (*cb) (caddr_t); 1965 ddi_dma_cookie_t pkt_cookie; 1966 ddi_dma_cookie_t *cp; 1967 uint32_t cnt; 1968 1969 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1970 1971 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1972 1973 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1974 if (cmd == NULL) { 1975 return (cmd); 1976 } 1977 1978 cmd->cmd_ulp_pkt = NULL; 1979 cmd->cmd_flags = 0; 1980 pkt = &cmd->cmd_pkt; 1981 ASSERT(cmd->cmd_dflags == 0); 1982 1983 pkt->pkt_datalen = 0; 1984 pkt->pkt_data = NULL; 1985 pkt->pkt_state = 0; 1986 pkt->pkt_action = 0; 1987 pkt->pkt_reason = 0; 1988 pkt->pkt_expln = 0; 1989 1990 /* 1991 * Init pkt_pd with the given pointer; this must be done _before_ 1992 * the call to fc_ulp_init_packet(). 1993 */ 1994 pkt->pkt_pd = pd; 1995 1996 /* Now call the FCA driver to init its private, per-packet fields */ 1997 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 1998 goto alloc_pkt_failed; 1999 } 2000 2001 if (cmd_len) { 2002 ASSERT(pkt->pkt_cmd_dma != NULL); 2003 2004 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 2005 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 2006 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 2007 &pkt->pkt_cmd_acc); 2008 2009 if (rval != DDI_SUCCESS) { 2010 goto alloc_pkt_failed; 2011 } 2012 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 2013 2014 if (real_len < cmd_len) { 2015 goto alloc_pkt_failed; 2016 } 2017 2018 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2019 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2020 DDI_DMA_CONSISTENT, cb, NULL, 2021 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2022 2023 if (rval != DDI_DMA_MAPPED) { 2024 goto alloc_pkt_failed; 2025 } 2026 2027 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2028 2029 if (pkt->pkt_cmd_cookie_cnt > 2030 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2031 goto alloc_pkt_failed; 2032 } 2033 2034 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2035 2036 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2037 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2038 KM_NOSLEEP); 2039 2040 if (cp == NULL) { 2041 goto alloc_pkt_failed; 2042 } 2043 2044 *cp = pkt_cookie; 2045 cp++; 2046 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2047 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2048 *cp = pkt_cookie; 2049 } 2050 } 2051 2052 if (resp_len) { 2053 ASSERT(pkt->pkt_resp_dma != NULL); 2054 2055 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2056 port->fp_fca_tran->fca_acc_attr, 2057 DDI_DMA_CONSISTENT, cb, NULL, 2058 (caddr_t *)&pkt->pkt_resp, &real_len, 2059 &pkt->pkt_resp_acc); 2060 2061 if (rval != DDI_SUCCESS) { 2062 goto alloc_pkt_failed; 2063 } 2064 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2065 2066 if (real_len < resp_len) { 2067 goto alloc_pkt_failed; 2068 } 2069 2070 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2071 pkt->pkt_resp, real_len, DDI_DMA_READ | 2072 DDI_DMA_CONSISTENT, cb, NULL, 2073 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2074 2075 if (rval != DDI_DMA_MAPPED) { 2076 goto alloc_pkt_failed; 2077 } 2078 2079 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2080 2081 if (pkt->pkt_resp_cookie_cnt > 2082 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2083 goto alloc_pkt_failed; 2084 } 2085 2086 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2087 2088 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2089 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2090 KM_NOSLEEP); 2091 2092 if (cp == NULL) { 2093 goto alloc_pkt_failed; 2094 } 2095 2096 *cp = pkt_cookie; 2097 cp++; 2098 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2099 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2100 *cp = pkt_cookie; 2101 } 2102 } 2103 2104 pkt->pkt_cmdlen = cmd_len; 2105 pkt->pkt_rsplen = resp_len; 2106 pkt->pkt_ulp_private = cmd; 2107 2108 return (cmd); 2109 2110 alloc_pkt_failed: 2111 2112 fp_free_dma(cmd); 2113 2114 if (pkt->pkt_cmd_cookie != NULL) { 2115 kmem_free(pkt->pkt_cmd_cookie, 2116 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2117 pkt->pkt_cmd_cookie = NULL; 2118 } 2119 2120 if (pkt->pkt_resp_cookie != NULL) { 2121 kmem_free(pkt->pkt_resp_cookie, 2122 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2123 pkt->pkt_resp_cookie = NULL; 2124 } 2125 2126 kmem_cache_free(port->fp_pkt_cache, cmd); 2127 2128 return (NULL); 2129 } 2130 2131 2132 /* 2133 * Free FC packet 2134 */ 2135 static void 2136 fp_free_pkt(fp_cmd_t *cmd) 2137 { 2138 fc_local_port_t *port; 2139 fc_packet_t *pkt; 2140 2141 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2142 2143 cmd->cmd_next = NULL; 2144 cmd->cmd_job = NULL; 2145 pkt = &cmd->cmd_pkt; 2146 pkt->pkt_ulp_private = 0; 2147 pkt->pkt_tran_flags = 0; 2148 pkt->pkt_tran_type = 0; 2149 port = cmd->cmd_port; 2150 2151 if (pkt->pkt_cmd_cookie != NULL) { 2152 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2153 sizeof (ddi_dma_cookie_t)); 2154 pkt->pkt_cmd_cookie = NULL; 2155 } 2156 2157 if (pkt->pkt_resp_cookie != NULL) { 2158 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2159 sizeof (ddi_dma_cookie_t)); 2160 pkt->pkt_resp_cookie = NULL; 2161 } 2162 2163 fp_free_dma(cmd); 2164 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2165 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2166 } 2167 2168 2169 /* 2170 * Release DVMA resources 2171 */ 2172 static void 2173 fp_free_dma(fp_cmd_t *cmd) 2174 { 2175 fc_packet_t *pkt = &cmd->cmd_pkt; 2176 2177 pkt->pkt_cmdlen = 0; 2178 pkt->pkt_rsplen = 0; 2179 pkt->pkt_tran_type = 0; 2180 pkt->pkt_tran_flags = 0; 2181 2182 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2183 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2184 } 2185 2186 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2187 if (pkt->pkt_cmd_acc) { 2188 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2189 } 2190 } 2191 2192 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2193 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2194 } 2195 2196 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2197 if (pkt->pkt_resp_acc) { 2198 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2199 } 2200 } 2201 cmd->cmd_dflags = 0; 2202 } 2203 2204 2205 /* 2206 * Dedicated thread to perform various activities. One thread for 2207 * each fc_local_port_t (driver soft state) instance. 2208 * Note, this effectively works out to one thread for each local 2209 * port, but there are also some Solaris taskq threads in use on a per-local 2210 * port basis; these also need to be taken into consideration. 2211 */ 2212 static void 2213 fp_job_handler(fc_local_port_t *port) 2214 { 2215 int rval; 2216 uint32_t *d_id; 2217 fc_remote_port_t *pd; 2218 job_request_t *job; 2219 2220 #ifndef __lock_lint 2221 /* 2222 * Solaris-internal stuff for proper operation of kernel threads 2223 * with Solaris CPR. 2224 */ 2225 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2226 callb_generic_cpr, "fp_job_handler"); 2227 #endif 2228 2229 2230 /* Loop forever waiting for work to do */ 2231 for (;;) { 2232 2233 mutex_enter(&port->fp_mutex); 2234 2235 /* 2236 * Sleep if no work to do right now, or if we want 2237 * to suspend or power-down. 2238 */ 2239 while (port->fp_job_head == NULL || 2240 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2241 FP_SOFT_SUSPEND))) { 2242 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2243 cv_wait(&port->fp_cv, &port->fp_mutex); 2244 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2245 } 2246 2247 /* 2248 * OK, we've just been woken up, so retrieve the next entry 2249 * from the head of the job queue for this local port. 2250 */ 2251 job = fctl_deque_job(port); 2252 2253 /* 2254 * Handle all the fp driver's supported job codes here 2255 * in this big honkin' switch. 2256 */ 2257 switch (job->job_code) { 2258 case JOB_PORT_SHUTDOWN: 2259 /* 2260 * fp_port_shutdown() is only called from here. This 2261 * will prepare the local port instance (softstate) 2262 * for detaching. This cancels timeout callbacks, 2263 * executes LOGOs with remote ports, cleans up tables, 2264 * and deallocates data structs. 2265 */ 2266 fp_port_shutdown(port, job); 2267 2268 /* 2269 * This will exit the job thread. 2270 */ 2271 #ifndef __lock_lint 2272 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2273 #else 2274 mutex_exit(&port->fp_mutex); 2275 #endif 2276 fctl_jobdone(job); 2277 thread_exit(); 2278 2279 /* NOTREACHED */ 2280 2281 case JOB_ATTACH_ULP: { 2282 /* 2283 * This job is spawned in response to a ULP calling 2284 * fc_ulp_add(). 2285 */ 2286 2287 boolean_t do_attach_ulps = B_TRUE; 2288 2289 /* 2290 * If fp is detaching, we don't want to call 2291 * fp_startup_done as this asynchronous 2292 * notification may interfere with the re-attach. 2293 */ 2294 2295 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2296 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2297 do_attach_ulps = B_FALSE; 2298 } else { 2299 /* 2300 * We are going to force the transport 2301 * to attach to the ULPs, so set 2302 * fp_ulp_attach. This will keep any 2303 * potential detach from occurring until 2304 * we are done. 2305 */ 2306 port->fp_ulp_attach = 1; 2307 } 2308 2309 mutex_exit(&port->fp_mutex); 2310 2311 /* 2312 * NOTE: Since we just dropped the mutex, there is now 2313 * a race window where the fp_soft_state check above 2314 * could change here. This race is covered because an 2315 * additional check was added in the functions hidden 2316 * under fp_startup_done(). 2317 */ 2318 if (do_attach_ulps == B_TRUE) { 2319 /* 2320 * This goes thru a bit of a convoluted call 2321 * chain before spawning off a DDI taskq 2322 * request to perform the actual attach 2323 * operations. Blocking can occur at a number 2324 * of points. 2325 */ 2326 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2327 } 2328 job->job_result = FC_SUCCESS; 2329 fctl_jobdone(job); 2330 break; 2331 } 2332 2333 case JOB_ULP_NOTIFY: { 2334 /* 2335 * Pass state change notifications up to any/all 2336 * registered ULPs. 2337 */ 2338 uint32_t statec; 2339 2340 statec = job->job_ulp_listlen; 2341 if (statec == FC_STATE_RESET_REQUESTED) { 2342 port->fp_last_task = port->fp_task; 2343 port->fp_task = FP_TASK_OFFLINE; 2344 fp_port_offline(port, 0); 2345 port->fp_task = port->fp_last_task; 2346 port->fp_last_task = FP_TASK_IDLE; 2347 } 2348 2349 if (--port->fp_statec_busy == 0) { 2350 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2351 } 2352 2353 mutex_exit(&port->fp_mutex); 2354 2355 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2356 fctl_jobdone(job); 2357 break; 2358 } 2359 2360 case JOB_PLOGI_ONE: 2361 /* 2362 * Issue a PLOGI to a single remote port. Multiple 2363 * PLOGIs to different remote ports may occur in 2364 * parallel. 2365 * This can create the fc_remote_port_t if it does not 2366 * already exist. 2367 */ 2368 2369 mutex_exit(&port->fp_mutex); 2370 d_id = (uint32_t *)job->job_private; 2371 pd = fctl_get_remote_port_by_did(port, *d_id); 2372 2373 if (pd) { 2374 mutex_enter(&pd->pd_mutex); 2375 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2376 pd->pd_login_count++; 2377 mutex_exit(&pd->pd_mutex); 2378 job->job_result = FC_SUCCESS; 2379 fctl_jobdone(job); 2380 break; 2381 } 2382 mutex_exit(&pd->pd_mutex); 2383 } else { 2384 mutex_enter(&port->fp_mutex); 2385 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2386 mutex_exit(&port->fp_mutex); 2387 pd = fp_create_remote_port_by_ns(port, 2388 *d_id, KM_SLEEP); 2389 if (pd == NULL) { 2390 job->job_result = FC_FAILURE; 2391 fctl_jobdone(job); 2392 break; 2393 } 2394 } else { 2395 mutex_exit(&port->fp_mutex); 2396 } 2397 } 2398 2399 job->job_flags |= JOB_TYPE_FP_ASYNC; 2400 job->job_counter = 1; 2401 2402 rval = fp_port_login(port, *d_id, job, 2403 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2404 2405 if (rval != FC_SUCCESS) { 2406 job->job_result = rval; 2407 fctl_jobdone(job); 2408 } 2409 break; 2410 2411 case JOB_LOGO_ONE: { 2412 /* 2413 * Issue a PLOGO to a single remote port. Multiple 2414 * PLOGOs to different remote ports may occur in 2415 * parallel. 2416 */ 2417 fc_remote_port_t *pd; 2418 2419 #ifndef __lock_lint 2420 ASSERT(job->job_counter > 0); 2421 #endif 2422 2423 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2424 2425 mutex_enter(&pd->pd_mutex); 2426 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2427 mutex_exit(&pd->pd_mutex); 2428 job->job_result = FC_LOGINREQ; 2429 mutex_exit(&port->fp_mutex); 2430 fctl_jobdone(job); 2431 break; 2432 } 2433 if (pd->pd_login_count > 1) { 2434 pd->pd_login_count--; 2435 mutex_exit(&pd->pd_mutex); 2436 job->job_result = FC_SUCCESS; 2437 mutex_exit(&port->fp_mutex); 2438 fctl_jobdone(job); 2439 break; 2440 } 2441 mutex_exit(&pd->pd_mutex); 2442 mutex_exit(&port->fp_mutex); 2443 job->job_flags |= JOB_TYPE_FP_ASYNC; 2444 (void) fp_logout(port, pd, job); 2445 break; 2446 } 2447 2448 case JOB_FCIO_LOGIN: 2449 /* 2450 * PLOGI initiated at ioctl request. 2451 */ 2452 mutex_exit(&port->fp_mutex); 2453 job->job_result = 2454 fp_fcio_login(port, job->job_private, job); 2455 fctl_jobdone(job); 2456 break; 2457 2458 case JOB_FCIO_LOGOUT: 2459 /* 2460 * PLOGO initiated at ioctl request. 2461 */ 2462 mutex_exit(&port->fp_mutex); 2463 job->job_result = 2464 fp_fcio_logout(port, job->job_private, job); 2465 fctl_jobdone(job); 2466 break; 2467 2468 case JOB_PORT_GETMAP: 2469 case JOB_PORT_GETMAP_PLOGI_ALL: { 2470 port->fp_last_task = port->fp_task; 2471 port->fp_task = FP_TASK_GETMAP; 2472 2473 switch (port->fp_topology) { 2474 case FC_TOP_PRIVATE_LOOP: 2475 job->job_counter = 1; 2476 2477 fp_get_loopmap(port, job); 2478 mutex_exit(&port->fp_mutex); 2479 fp_jobwait(job); 2480 fctl_fillout_map(port, 2481 (fc_portmap_t **)job->job_private, 2482 (uint32_t *)job->job_arg, 1, 0, 0); 2483 fctl_jobdone(job); 2484 mutex_enter(&port->fp_mutex); 2485 break; 2486 2487 case FC_TOP_PUBLIC_LOOP: 2488 case FC_TOP_FABRIC: 2489 mutex_exit(&port->fp_mutex); 2490 job->job_counter = 1; 2491 2492 job->job_result = fp_ns_getmap(port, 2493 job, (fc_portmap_t **)job->job_private, 2494 (uint32_t *)job->job_arg, 2495 FCTL_GAN_START_ID); 2496 fctl_jobdone(job); 2497 mutex_enter(&port->fp_mutex); 2498 break; 2499 2500 case FC_TOP_PT_PT: 2501 mutex_exit(&port->fp_mutex); 2502 fctl_fillout_map(port, 2503 (fc_portmap_t **)job->job_private, 2504 (uint32_t *)job->job_arg, 1, 0, 0); 2505 fctl_jobdone(job); 2506 mutex_enter(&port->fp_mutex); 2507 break; 2508 2509 default: 2510 mutex_exit(&port->fp_mutex); 2511 fctl_jobdone(job); 2512 mutex_enter(&port->fp_mutex); 2513 break; 2514 } 2515 port->fp_task = port->fp_last_task; 2516 port->fp_last_task = FP_TASK_IDLE; 2517 mutex_exit(&port->fp_mutex); 2518 break; 2519 } 2520 2521 case JOB_PORT_OFFLINE: { 2522 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2523 2524 port->fp_last_task = port->fp_task; 2525 port->fp_task = FP_TASK_OFFLINE; 2526 2527 if (port->fp_statec_busy > 2) { 2528 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2529 fp_port_offline(port, 0); 2530 if (--port->fp_statec_busy == 0) { 2531 port->fp_soft_state &= 2532 ~FP_SOFT_IN_STATEC_CB; 2533 } 2534 } else { 2535 fp_port_offline(port, 1); 2536 } 2537 2538 port->fp_task = port->fp_last_task; 2539 port->fp_last_task = FP_TASK_IDLE; 2540 2541 mutex_exit(&port->fp_mutex); 2542 2543 fctl_jobdone(job); 2544 break; 2545 } 2546 2547 case JOB_PORT_STARTUP: { 2548 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2549 if (port->fp_statec_busy > 1) { 2550 mutex_exit(&port->fp_mutex); 2551 break; 2552 } 2553 mutex_exit(&port->fp_mutex); 2554 2555 FP_TRACE(FP_NHEAD2(9, rval), 2556 "Topology discovery failed"); 2557 break; 2558 } 2559 2560 /* 2561 * Attempt building device handles in case 2562 * of private Loop. 2563 */ 2564 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2565 job->job_counter = 1; 2566 2567 fp_get_loopmap(port, job); 2568 mutex_exit(&port->fp_mutex); 2569 fp_jobwait(job); 2570 mutex_enter(&port->fp_mutex); 2571 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2572 ASSERT(port->fp_total_devices == 0); 2573 port->fp_total_devices = 2574 port->fp_dev_count; 2575 } 2576 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2577 /* 2578 * Hack to avoid state changes going up early 2579 */ 2580 port->fp_statec_busy++; 2581 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2582 2583 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2584 fp_fabric_online(port, job); 2585 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2586 } 2587 mutex_exit(&port->fp_mutex); 2588 fctl_jobdone(job); 2589 break; 2590 } 2591 2592 case JOB_PORT_ONLINE: { 2593 char *newtop; 2594 char *oldtop; 2595 uint32_t old_top; 2596 2597 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2598 2599 /* 2600 * Bail out early if there are a lot of 2601 * state changes in the pipeline 2602 */ 2603 if (port->fp_statec_busy > 1) { 2604 --port->fp_statec_busy; 2605 mutex_exit(&port->fp_mutex); 2606 fctl_jobdone(job); 2607 break; 2608 } 2609 2610 switch (old_top = port->fp_topology) { 2611 case FC_TOP_PRIVATE_LOOP: 2612 oldtop = "Private Loop"; 2613 break; 2614 2615 case FC_TOP_PUBLIC_LOOP: 2616 oldtop = "Public Loop"; 2617 break; 2618 2619 case FC_TOP_PT_PT: 2620 oldtop = "Point to Point"; 2621 break; 2622 2623 case FC_TOP_FABRIC: 2624 oldtop = "Fabric"; 2625 break; 2626 2627 default: 2628 oldtop = NULL; 2629 break; 2630 } 2631 2632 port->fp_last_task = port->fp_task; 2633 port->fp_task = FP_TASK_ONLINE; 2634 2635 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2636 2637 port->fp_task = port->fp_last_task; 2638 port->fp_last_task = FP_TASK_IDLE; 2639 2640 if (port->fp_statec_busy > 1) { 2641 --port->fp_statec_busy; 2642 mutex_exit(&port->fp_mutex); 2643 break; 2644 } 2645 2646 port->fp_state = FC_STATE_OFFLINE; 2647 2648 FP_TRACE(FP_NHEAD2(9, rval), 2649 "Topology discovery failed"); 2650 2651 if (--port->fp_statec_busy == 0) { 2652 port->fp_soft_state &= 2653 ~FP_SOFT_IN_STATEC_CB; 2654 } 2655 2656 if (port->fp_offline_tid == NULL) { 2657 port->fp_offline_tid = 2658 timeout(fp_offline_timeout, 2659 (caddr_t)port, fp_offline_ticks); 2660 } 2661 2662 mutex_exit(&port->fp_mutex); 2663 break; 2664 } 2665 2666 switch (port->fp_topology) { 2667 case FC_TOP_PRIVATE_LOOP: 2668 newtop = "Private Loop"; 2669 break; 2670 2671 case FC_TOP_PUBLIC_LOOP: 2672 newtop = "Public Loop"; 2673 break; 2674 2675 case FC_TOP_PT_PT: 2676 newtop = "Point to Point"; 2677 break; 2678 2679 case FC_TOP_FABRIC: 2680 newtop = "Fabric"; 2681 break; 2682 2683 default: 2684 newtop = NULL; 2685 break; 2686 } 2687 2688 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2689 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2690 "Change in FC Topology old = %s new = %s", 2691 oldtop, newtop); 2692 } 2693 2694 switch (port->fp_topology) { 2695 case FC_TOP_PRIVATE_LOOP: { 2696 int orphan = (old_top == FC_TOP_FABRIC || 2697 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2698 2699 mutex_exit(&port->fp_mutex); 2700 fp_loop_online(port, job, orphan); 2701 break; 2702 } 2703 2704 case FC_TOP_PUBLIC_LOOP: 2705 /* FALLTHROUGH */ 2706 case FC_TOP_FABRIC: 2707 fp_fabric_online(port, job); 2708 mutex_exit(&port->fp_mutex); 2709 break; 2710 2711 case FC_TOP_PT_PT: 2712 fp_p2p_online(port, job); 2713 mutex_exit(&port->fp_mutex); 2714 break; 2715 2716 default: 2717 if (--port->fp_statec_busy != 0) { 2718 /* 2719 * Watch curiously at what the next 2720 * state transition can do. 2721 */ 2722 mutex_exit(&port->fp_mutex); 2723 break; 2724 } 2725 2726 FP_TRACE(FP_NHEAD2(9, 0), 2727 "Topology Unknown, Offlining the port.."); 2728 2729 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2730 port->fp_state = FC_STATE_OFFLINE; 2731 2732 if (port->fp_offline_tid == NULL) { 2733 port->fp_offline_tid = 2734 timeout(fp_offline_timeout, 2735 (caddr_t)port, fp_offline_ticks); 2736 } 2737 mutex_exit(&port->fp_mutex); 2738 break; 2739 } 2740 2741 mutex_enter(&port->fp_mutex); 2742 2743 port->fp_task = port->fp_last_task; 2744 port->fp_last_task = FP_TASK_IDLE; 2745 2746 mutex_exit(&port->fp_mutex); 2747 2748 fctl_jobdone(job); 2749 break; 2750 } 2751 2752 case JOB_PLOGI_GROUP: { 2753 mutex_exit(&port->fp_mutex); 2754 fp_plogi_group(port, job); 2755 break; 2756 } 2757 2758 case JOB_UNSOL_REQUEST: { 2759 mutex_exit(&port->fp_mutex); 2760 fp_handle_unsol_buf(port, 2761 (fc_unsol_buf_t *)job->job_private, job); 2762 fctl_dealloc_job(job); 2763 break; 2764 } 2765 2766 case JOB_NS_CMD: { 2767 fctl_ns_req_t *ns_cmd; 2768 2769 mutex_exit(&port->fp_mutex); 2770 2771 job->job_flags |= JOB_TYPE_FP_ASYNC; 2772 ns_cmd = (fctl_ns_req_t *)job->job_private; 2773 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2774 ns_cmd->ns_cmd_code > NS_DA_ID) { 2775 job->job_result = FC_BADCMD; 2776 fctl_jobdone(job); 2777 break; 2778 } 2779 2780 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2781 if (ns_cmd->ns_pd != NULL) { 2782 job->job_result = FC_BADOBJECT; 2783 fctl_jobdone(job); 2784 break; 2785 } 2786 2787 job->job_counter = 1; 2788 2789 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2790 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2791 2792 if (rval != FC_SUCCESS) { 2793 job->job_result = rval; 2794 fctl_jobdone(job); 2795 } 2796 break; 2797 } 2798 job->job_result = FC_SUCCESS; 2799 job->job_counter = 1; 2800 2801 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2802 if (rval != FC_SUCCESS) { 2803 fctl_jobdone(job); 2804 } 2805 break; 2806 } 2807 2808 case JOB_LINK_RESET: { 2809 la_wwn_t *pwwn; 2810 uint32_t topology; 2811 2812 pwwn = (la_wwn_t *)job->job_private; 2813 ASSERT(pwwn != NULL); 2814 2815 topology = port->fp_topology; 2816 mutex_exit(&port->fp_mutex); 2817 2818 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2819 topology == FC_TOP_PRIVATE_LOOP) { 2820 job->job_flags |= JOB_TYPE_FP_ASYNC; 2821 rval = port->fp_fca_tran->fca_reset( 2822 port->fp_fca_handle, FC_FCA_LINK_RESET); 2823 job->job_result = rval; 2824 fp_jobdone(job); 2825 } else { 2826 ASSERT((job->job_flags & 2827 JOB_TYPE_FP_ASYNC) == 0); 2828 2829 if (FC_IS_TOP_SWITCH(topology)) { 2830 rval = fp_remote_lip(port, pwwn, 2831 KM_SLEEP, job); 2832 } else { 2833 rval = FC_FAILURE; 2834 } 2835 if (rval != FC_SUCCESS) { 2836 job->job_result = rval; 2837 } 2838 fctl_jobdone(job); 2839 } 2840 break; 2841 } 2842 2843 default: 2844 mutex_exit(&port->fp_mutex); 2845 job->job_result = FC_BADCMD; 2846 fctl_jobdone(job); 2847 break; 2848 } 2849 } 2850 /* NOTREACHED */ 2851 } 2852 2853 2854 /* 2855 * Perform FC port bring up initialization 2856 */ 2857 static int 2858 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2859 { 2860 int rval; 2861 uint32_t state; 2862 uint32_t src_id; 2863 fc_lilpmap_t *lilp_map; 2864 2865 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2866 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2867 2868 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2869 " port=%p, job=%p", port, job); 2870 2871 port->fp_topology = FC_TOP_UNKNOWN; 2872 port->fp_port_id.port_id = 0; 2873 state = FC_PORT_STATE_MASK(port->fp_state); 2874 2875 if (state == FC_STATE_OFFLINE) { 2876 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2877 job->job_result = FC_OFFLINE; 2878 mutex_exit(&port->fp_mutex); 2879 fctl_jobdone(job); 2880 mutex_enter(&port->fp_mutex); 2881 return (FC_OFFLINE); 2882 } 2883 2884 if (state == FC_STATE_LOOP) { 2885 port->fp_port_type.port_type = FC_NS_PORT_NL; 2886 mutex_exit(&port->fp_mutex); 2887 2888 lilp_map = &port->fp_lilp_map; 2889 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2890 job->job_result = FC_FAILURE; 2891 fctl_jobdone(job); 2892 2893 FP_TRACE(FP_NHEAD1(9, rval), 2894 "LILP map Invalid or not present"); 2895 mutex_enter(&port->fp_mutex); 2896 return (FC_FAILURE); 2897 } 2898 2899 if (lilp_map->lilp_length == 0) { 2900 job->job_result = FC_NO_MAP; 2901 fctl_jobdone(job); 2902 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2903 "LILP map length zero"); 2904 mutex_enter(&port->fp_mutex); 2905 return (FC_NO_MAP); 2906 } 2907 src_id = lilp_map->lilp_myalpa & 0xFF; 2908 } else { 2909 fc_remote_port_t *pd; 2910 fc_fca_pm_t pm; 2911 fc_fca_p2p_info_t p2p_info; 2912 int pd_recepient; 2913 2914 /* 2915 * Get P2P remote port info if possible 2916 */ 2917 bzero((caddr_t)&pm, sizeof (pm)); 2918 2919 pm.pm_cmd_flags = FC_FCA_PM_READ; 2920 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2921 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2922 pm.pm_data_buf = (caddr_t)&p2p_info; 2923 2924 rval = port->fp_fca_tran->fca_port_manage( 2925 port->fp_fca_handle, &pm); 2926 2927 if (rval == FC_SUCCESS) { 2928 port->fp_port_id.port_id = p2p_info.fca_d_id; 2929 port->fp_port_type.port_type = FC_NS_PORT_N; 2930 port->fp_topology = FC_TOP_PT_PT; 2931 port->fp_total_devices = 1; 2932 pd_recepient = fctl_wwn_cmp( 2933 &port->fp_service_params.nport_ww_name, 2934 &p2p_info.pwwn) < 0 ? 2935 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2936 mutex_exit(&port->fp_mutex); 2937 pd = fctl_create_remote_port(port, 2938 &p2p_info.nwwn, 2939 &p2p_info.pwwn, 2940 p2p_info.d_id, 2941 pd_recepient, KM_NOSLEEP); 2942 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2943 " P2P port=%p pd=%p fp %x pd %x", port, pd, 2944 port->fp_port_id.port_id, p2p_info.d_id); 2945 mutex_enter(&port->fp_mutex); 2946 return (FC_SUCCESS); 2947 } 2948 port->fp_port_type.port_type = FC_NS_PORT_N; 2949 mutex_exit(&port->fp_mutex); 2950 src_id = 0; 2951 } 2952 2953 job->job_counter = 1; 2954 job->job_result = FC_SUCCESS; 2955 2956 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2957 KM_SLEEP)) != FC_SUCCESS) { 2958 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2959 job->job_result = FC_FAILURE; 2960 fctl_jobdone(job); 2961 2962 mutex_enter(&port->fp_mutex); 2963 if (port->fp_statec_busy <= 1) { 2964 mutex_exit(&port->fp_mutex); 2965 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 2966 "Couldn't transport FLOGI"); 2967 mutex_enter(&port->fp_mutex); 2968 } 2969 return (FC_FAILURE); 2970 } 2971 2972 fp_jobwait(job); 2973 2974 mutex_enter(&port->fp_mutex); 2975 if (job->job_result == FC_SUCCESS) { 2976 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2977 mutex_exit(&port->fp_mutex); 2978 fp_ns_init(port, job, KM_SLEEP); 2979 mutex_enter(&port->fp_mutex); 2980 } 2981 } else { 2982 if (state == FC_STATE_LOOP) { 2983 port->fp_topology = FC_TOP_PRIVATE_LOOP; 2984 port->fp_port_id.port_id = 2985 port->fp_lilp_map.lilp_myalpa & 0xFF; 2986 } 2987 } 2988 2989 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 2990 port, job); 2991 2992 return (FC_SUCCESS); 2993 } 2994 2995 2996 /* 2997 * Perform ULP invocations following FC port startup 2998 */ 2999 /* ARGSUSED */ 3000 static void 3001 fp_startup_done(opaque_t arg, uchar_t result) 3002 { 3003 fc_local_port_t *port = arg; 3004 3005 fp_attach_ulps(port, FC_CMD_ATTACH); 3006 3007 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 3008 } 3009 3010 3011 /* 3012 * Perform ULP port attach 3013 */ 3014 static void 3015 fp_ulp_port_attach(void *arg) 3016 { 3017 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 3018 fc_local_port_t *port = att->att_port; 3019 3020 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3021 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3022 3023 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3024 3025 if (att->att_need_pm_idle == B_TRUE) { 3026 fctl_idle_port(port); 3027 } 3028 3029 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3030 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3031 3032 mutex_enter(&att->att_port->fp_mutex); 3033 att->att_port->fp_ulp_attach = 0; 3034 3035 port->fp_task = port->fp_last_task; 3036 port->fp_last_task = FP_TASK_IDLE; 3037 3038 cv_signal(&att->att_port->fp_attach_cv); 3039 3040 mutex_exit(&att->att_port->fp_mutex); 3041 3042 kmem_free(att, sizeof (fp_soft_attach_t)); 3043 } 3044 3045 /* 3046 * Entry point to funnel all requests down to FCAs 3047 */ 3048 static int 3049 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3050 { 3051 int rval; 3052 3053 mutex_enter(&port->fp_mutex); 3054 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3055 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3056 FC_STATE_OFFLINE))) { 3057 /* 3058 * This means there is more than one state change 3059 * at this point of time - Since they are processed 3060 * serially, any processing of the current one should 3061 * be failed, failed and move up in processing the next 3062 */ 3063 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3064 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3065 if (cmd->cmd_job) { 3066 /* 3067 * A state change that is going to be invalidated 3068 * by another one already in the port driver's queue 3069 * need not go up to all ULPs. This will minimize 3070 * needless processing and ripples in ULP modules 3071 */ 3072 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3073 } 3074 mutex_exit(&port->fp_mutex); 3075 return (FC_STATEC_BUSY); 3076 } 3077 3078 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3079 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3080 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3081 mutex_exit(&port->fp_mutex); 3082 3083 return (FC_OFFLINE); 3084 } 3085 mutex_exit(&port->fp_mutex); 3086 3087 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3088 if (rval != FC_SUCCESS) { 3089 if (rval == FC_TRAN_BUSY) { 3090 cmd->cmd_retry_interval = fp_retry_delay; 3091 rval = fp_retry_cmd(&cmd->cmd_pkt); 3092 if (rval == FC_FAILURE) { 3093 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3094 } 3095 } 3096 } else { 3097 mutex_enter(&port->fp_mutex); 3098 port->fp_out_fpcmds++; 3099 mutex_exit(&port->fp_mutex); 3100 } 3101 3102 return (rval); 3103 } 3104 3105 3106 /* 3107 * Each time a timeout kicks in, walk the wait queue, decrement the 3108 * the retry_interval, when the retry_interval becomes less than 3109 * or equal to zero, re-transport the command: If the re-transport 3110 * fails with BUSY, enqueue the command in the wait queue. 3111 * 3112 * In order to prevent looping forever because of commands enqueued 3113 * from within this function itself, save the current tail pointer 3114 * (in cur_tail) and exit the loop after serving this command. 3115 */ 3116 static void 3117 fp_resendcmd(void *port_handle) 3118 { 3119 int rval; 3120 fc_local_port_t *port; 3121 fp_cmd_t *cmd; 3122 fp_cmd_t *cur_tail; 3123 3124 port = port_handle; 3125 mutex_enter(&port->fp_mutex); 3126 cur_tail = port->fp_wait_tail; 3127 mutex_exit(&port->fp_mutex); 3128 3129 while ((cmd = fp_deque_cmd(port)) != NULL) { 3130 cmd->cmd_retry_interval -= fp_retry_ticker; 3131 /* Check if we are detaching */ 3132 if (port->fp_soft_state & 3133 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3134 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3135 cmd->cmd_pkt.pkt_reason = 0; 3136 fp_iodone(cmd); 3137 } else if (cmd->cmd_retry_interval <= 0) { 3138 rval = cmd->cmd_transport(port->fp_fca_handle, 3139 &cmd->cmd_pkt); 3140 3141 if (rval != FC_SUCCESS) { 3142 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3143 if (--cmd->cmd_retry_count) { 3144 fp_enque_cmd(port, cmd); 3145 if (cmd == cur_tail) { 3146 break; 3147 } 3148 continue; 3149 } 3150 cmd->cmd_pkt.pkt_state = 3151 FC_PKT_TRAN_BSY; 3152 } else { 3153 cmd->cmd_pkt.pkt_state = 3154 FC_PKT_TRAN_ERROR; 3155 } 3156 cmd->cmd_pkt.pkt_reason = 0; 3157 fp_iodone(cmd); 3158 } else { 3159 mutex_enter(&port->fp_mutex); 3160 port->fp_out_fpcmds++; 3161 mutex_exit(&port->fp_mutex); 3162 } 3163 } else { 3164 fp_enque_cmd(port, cmd); 3165 } 3166 3167 if (cmd == cur_tail) { 3168 break; 3169 } 3170 } 3171 3172 mutex_enter(&port->fp_mutex); 3173 if (port->fp_wait_head) { 3174 timeout_id_t tid; 3175 3176 mutex_exit(&port->fp_mutex); 3177 tid = timeout(fp_resendcmd, (caddr_t)port, 3178 fp_retry_ticks); 3179 mutex_enter(&port->fp_mutex); 3180 port->fp_wait_tid = tid; 3181 } else { 3182 port->fp_wait_tid = NULL; 3183 } 3184 mutex_exit(&port->fp_mutex); 3185 } 3186 3187 3188 /* 3189 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3190 * 3191 * Yes, as you can see below, cmd_retry_count is used here too. That means 3192 * the retries for BUSY are less if there were transport failures (transport 3193 * failure means fca_transport failure). The goal is not to exceed overall 3194 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3195 * 3196 * Return Values: 3197 * FC_SUCCESS 3198 * FC_FAILURE 3199 */ 3200 static int 3201 fp_retry_cmd(fc_packet_t *pkt) 3202 { 3203 fp_cmd_t *cmd; 3204 3205 cmd = pkt->pkt_ulp_private; 3206 3207 if (--cmd->cmd_retry_count) { 3208 fp_enque_cmd(cmd->cmd_port, cmd); 3209 return (FC_SUCCESS); 3210 } else { 3211 return (FC_FAILURE); 3212 } 3213 } 3214 3215 3216 /* 3217 * Queue up FC packet for deferred retry 3218 */ 3219 static void 3220 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3221 { 3222 timeout_id_t tid; 3223 3224 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3225 3226 #ifdef DEBUG 3227 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3228 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3229 #endif 3230 3231 mutex_enter(&port->fp_mutex); 3232 if (port->fp_wait_tail) { 3233 port->fp_wait_tail->cmd_next = cmd; 3234 port->fp_wait_tail = cmd; 3235 } else { 3236 ASSERT(port->fp_wait_head == NULL); 3237 port->fp_wait_head = port->fp_wait_tail = cmd; 3238 if (port->fp_wait_tid == NULL) { 3239 mutex_exit(&port->fp_mutex); 3240 tid = timeout(fp_resendcmd, (caddr_t)port, 3241 fp_retry_ticks); 3242 mutex_enter(&port->fp_mutex); 3243 port->fp_wait_tid = tid; 3244 } 3245 } 3246 mutex_exit(&port->fp_mutex); 3247 } 3248 3249 3250 /* 3251 * Handle all RJT codes 3252 */ 3253 static int 3254 fp_handle_reject(fc_packet_t *pkt) 3255 { 3256 int rval = FC_FAILURE; 3257 uchar_t next_class; 3258 fp_cmd_t *cmd; 3259 fc_local_port_t *port; 3260 3261 cmd = pkt->pkt_ulp_private; 3262 port = cmd->cmd_port; 3263 3264 switch (pkt->pkt_state) { 3265 case FC_PKT_FABRIC_RJT: 3266 case FC_PKT_NPORT_RJT: 3267 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) { 3268 next_class = fp_get_nextclass(cmd->cmd_port, 3269 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3270 3271 if (next_class == FC_TRAN_CLASS_INVALID) { 3272 return (rval); 3273 } 3274 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3275 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3276 3277 rval = fp_sendcmd(cmd->cmd_port, cmd, 3278 cmd->cmd_port->fp_fca_handle); 3279 3280 if (rval != FC_SUCCESS) { 3281 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3282 } 3283 } 3284 break; 3285 3286 case FC_PKT_LS_RJT: 3287 case FC_PKT_BA_RJT: 3288 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) || 3289 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) { 3290 cmd->cmd_retry_interval = fp_retry_delay; 3291 rval = fp_retry_cmd(pkt); 3292 } 3293 break; 3294 3295 case FC_PKT_FS_RJT: 3296 if ((pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) || 3297 ((pkt->pkt_reason == FC_REASON_FS_CMD_UNABLE) && 3298 (pkt->pkt_expln == 0x00))) { 3299 cmd->cmd_retry_interval = fp_retry_delay; 3300 rval = fp_retry_cmd(pkt); 3301 } 3302 break; 3303 3304 case FC_PKT_LOCAL_RJT: 3305 if (pkt->pkt_reason == FC_REASON_QFULL) { 3306 cmd->cmd_retry_interval = fp_retry_delay; 3307 rval = fp_retry_cmd(pkt); 3308 } 3309 break; 3310 3311 default: 3312 FP_TRACE(FP_NHEAD1(1, 0), 3313 "fp_handle_reject(): Invalid pkt_state"); 3314 break; 3315 } 3316 3317 return (rval); 3318 } 3319 3320 3321 /* 3322 * Return the next class of service supported by the FCA 3323 */ 3324 static uchar_t 3325 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3326 { 3327 uchar_t next_class; 3328 3329 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3330 3331 switch (cur_class) { 3332 case FC_TRAN_CLASS_INVALID: 3333 if (port->fp_cos & FC_NS_CLASS1) { 3334 next_class = FC_TRAN_CLASS1; 3335 break; 3336 } 3337 /* FALLTHROUGH */ 3338 3339 case FC_TRAN_CLASS1: 3340 if (port->fp_cos & FC_NS_CLASS2) { 3341 next_class = FC_TRAN_CLASS2; 3342 break; 3343 } 3344 /* FALLTHROUGH */ 3345 3346 case FC_TRAN_CLASS2: 3347 if (port->fp_cos & FC_NS_CLASS3) { 3348 next_class = FC_TRAN_CLASS3; 3349 break; 3350 } 3351 /* FALLTHROUGH */ 3352 3353 case FC_TRAN_CLASS3: 3354 default: 3355 next_class = FC_TRAN_CLASS_INVALID; 3356 break; 3357 } 3358 3359 return (next_class); 3360 } 3361 3362 3363 /* 3364 * Determine if a class of service is supported by the FCA 3365 */ 3366 static int 3367 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3368 { 3369 int rval; 3370 3371 switch (tran_class) { 3372 case FC_TRAN_CLASS1: 3373 if (cos & FC_NS_CLASS1) { 3374 rval = FC_SUCCESS; 3375 } else { 3376 rval = FC_FAILURE; 3377 } 3378 break; 3379 3380 case FC_TRAN_CLASS2: 3381 if (cos & FC_NS_CLASS2) { 3382 rval = FC_SUCCESS; 3383 } else { 3384 rval = FC_FAILURE; 3385 } 3386 break; 3387 3388 case FC_TRAN_CLASS3: 3389 if (cos & FC_NS_CLASS3) { 3390 rval = FC_SUCCESS; 3391 } else { 3392 rval = FC_FAILURE; 3393 } 3394 break; 3395 3396 default: 3397 rval = FC_FAILURE; 3398 break; 3399 } 3400 3401 return (rval); 3402 } 3403 3404 3405 /* 3406 * Dequeue FC packet for retry 3407 */ 3408 static fp_cmd_t * 3409 fp_deque_cmd(fc_local_port_t *port) 3410 { 3411 fp_cmd_t *cmd; 3412 3413 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3414 3415 mutex_enter(&port->fp_mutex); 3416 3417 if (port->fp_wait_head == NULL) { 3418 /* 3419 * To avoid races, NULL the fp_wait_tid as 3420 * we are about to exit the timeout thread. 3421 */ 3422 port->fp_wait_tid = NULL; 3423 mutex_exit(&port->fp_mutex); 3424 return (NULL); 3425 } 3426 3427 cmd = port->fp_wait_head; 3428 port->fp_wait_head = cmd->cmd_next; 3429 cmd->cmd_next = NULL; 3430 3431 if (port->fp_wait_head == NULL) { 3432 port->fp_wait_tail = NULL; 3433 } 3434 mutex_exit(&port->fp_mutex); 3435 3436 return (cmd); 3437 } 3438 3439 3440 /* 3441 * Wait for job completion 3442 */ 3443 static void 3444 fp_jobwait(job_request_t *job) 3445 { 3446 sema_p(&job->job_port_sema); 3447 } 3448 3449 3450 /* 3451 * Convert FC packet state to FC errno 3452 */ 3453 int 3454 fp_state_to_rval(uchar_t state) 3455 { 3456 int count; 3457 3458 for (count = 0; count < sizeof (fp_xlat) / 3459 sizeof (fp_xlat[0]); count++) { 3460 if (fp_xlat[count].xlat_state == state) { 3461 return (fp_xlat[count].xlat_rval); 3462 } 3463 } 3464 3465 return (FC_FAILURE); 3466 } 3467 3468 3469 /* 3470 * For Synchronous I/O requests, the caller is 3471 * expected to do fctl_jobdone(if necessary) 3472 * 3473 * We want to preserve at least one failure in the 3474 * job_result if it happens. 3475 * 3476 */ 3477 static void 3478 fp_iodone(fp_cmd_t *cmd) 3479 { 3480 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3481 job_request_t *job = cmd->cmd_job; 3482 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3483 3484 ASSERT(job != NULL); 3485 ASSERT(cmd->cmd_port != NULL); 3486 ASSERT(&cmd->cmd_pkt != NULL); 3487 3488 mutex_enter(&job->job_mutex); 3489 if (job->job_result == FC_SUCCESS) { 3490 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3491 } 3492 mutex_exit(&job->job_mutex); 3493 3494 if (pd) { 3495 mutex_enter(&pd->pd_mutex); 3496 pd->pd_flags = PD_IDLE; 3497 mutex_exit(&pd->pd_mutex); 3498 } 3499 3500 if (ulp_pkt) { 3501 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3502 FP_IS_PKT_ERROR(ulp_pkt)) { 3503 fc_local_port_t *port; 3504 fc_remote_node_t *node; 3505 3506 port = cmd->cmd_port; 3507 3508 mutex_enter(&pd->pd_mutex); 3509 pd->pd_state = PORT_DEVICE_INVALID; 3510 pd->pd_ref_count--; 3511 node = pd->pd_remote_nodep; 3512 mutex_exit(&pd->pd_mutex); 3513 3514 ASSERT(node != NULL); 3515 ASSERT(port != NULL); 3516 3517 if (fctl_destroy_remote_port(port, pd) == 0) { 3518 fctl_destroy_remote_node(node); 3519 } 3520 3521 ulp_pkt->pkt_pd = NULL; 3522 } 3523 3524 ulp_pkt->pkt_comp(ulp_pkt); 3525 } 3526 3527 fp_free_pkt(cmd); 3528 fp_jobdone(job); 3529 } 3530 3531 3532 /* 3533 * Job completion handler 3534 */ 3535 static void 3536 fp_jobdone(job_request_t *job) 3537 { 3538 mutex_enter(&job->job_mutex); 3539 ASSERT(job->job_counter > 0); 3540 3541 if (--job->job_counter != 0) { 3542 mutex_exit(&job->job_mutex); 3543 return; 3544 } 3545 3546 if (job->job_ulp_pkts) { 3547 ASSERT(job->job_ulp_listlen > 0); 3548 kmem_free(job->job_ulp_pkts, 3549 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3550 } 3551 3552 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3553 mutex_exit(&job->job_mutex); 3554 fctl_jobdone(job); 3555 } else { 3556 mutex_exit(&job->job_mutex); 3557 sema_v(&job->job_port_sema); 3558 } 3559 } 3560 3561 3562 /* 3563 * Try to perform shutdown of a port during a detach. No return 3564 * value since the detach should not fail because the port shutdown 3565 * failed. 3566 */ 3567 static void 3568 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3569 { 3570 int index; 3571 int count; 3572 int flags; 3573 fp_cmd_t *cmd; 3574 struct pwwn_hash *head; 3575 fc_remote_port_t *pd; 3576 3577 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3578 3579 job->job_result = FC_SUCCESS; 3580 3581 if (port->fp_taskq) { 3582 /* 3583 * We must release the mutex here to ensure that other 3584 * potential jobs can complete their processing. Many 3585 * also need this mutex. 3586 */ 3587 mutex_exit(&port->fp_mutex); 3588 taskq_wait(port->fp_taskq); 3589 mutex_enter(&port->fp_mutex); 3590 } 3591 3592 if (port->fp_offline_tid) { 3593 timeout_id_t tid; 3594 3595 tid = port->fp_offline_tid; 3596 port->fp_offline_tid = NULL; 3597 mutex_exit(&port->fp_mutex); 3598 (void) untimeout(tid); 3599 mutex_enter(&port->fp_mutex); 3600 } 3601 3602 if (port->fp_wait_tid) { 3603 timeout_id_t tid; 3604 3605 tid = port->fp_wait_tid; 3606 port->fp_wait_tid = NULL; 3607 mutex_exit(&port->fp_mutex); 3608 (void) untimeout(tid); 3609 } else { 3610 mutex_exit(&port->fp_mutex); 3611 } 3612 3613 /* 3614 * While we cancel the timeout, let's also return the 3615 * the outstanding requests back to the callers. 3616 */ 3617 while ((cmd = fp_deque_cmd(port)) != NULL) { 3618 ASSERT(cmd->cmd_job != NULL); 3619 cmd->cmd_job->job_result = FC_OFFLINE; 3620 fp_iodone(cmd); 3621 } 3622 3623 /* 3624 * Gracefully LOGO with all the devices logged in. 3625 */ 3626 mutex_enter(&port->fp_mutex); 3627 3628 for (count = index = 0; index < pwwn_table_size; index++) { 3629 head = &port->fp_pwwn_table[index]; 3630 pd = head->pwwn_head; 3631 while (pd != NULL) { 3632 mutex_enter(&pd->pd_mutex); 3633 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3634 count++; 3635 } 3636 mutex_exit(&pd->pd_mutex); 3637 pd = pd->pd_wwn_hnext; 3638 } 3639 } 3640 3641 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3642 flags = job->job_flags; 3643 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3644 } else { 3645 flags = 0; 3646 } 3647 if (count) { 3648 job->job_counter = count; 3649 3650 for (index = 0; index < pwwn_table_size; index++) { 3651 head = &port->fp_pwwn_table[index]; 3652 pd = head->pwwn_head; 3653 while (pd != NULL) { 3654 mutex_enter(&pd->pd_mutex); 3655 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3656 ASSERT(pd->pd_login_count > 0); 3657 /* 3658 * Force the counter to ONE in order 3659 * for us to really send LOGO els. 3660 */ 3661 pd->pd_login_count = 1; 3662 mutex_exit(&pd->pd_mutex); 3663 mutex_exit(&port->fp_mutex); 3664 (void) fp_logout(port, pd, job); 3665 mutex_enter(&port->fp_mutex); 3666 } else { 3667 mutex_exit(&pd->pd_mutex); 3668 } 3669 pd = pd->pd_wwn_hnext; 3670 } 3671 } 3672 mutex_exit(&port->fp_mutex); 3673 fp_jobwait(job); 3674 } else { 3675 mutex_exit(&port->fp_mutex); 3676 } 3677 3678 if (job->job_result != FC_SUCCESS) { 3679 FP_TRACE(FP_NHEAD1(9, 0), 3680 "Can't logout all devices. Proceeding with" 3681 " port shutdown"); 3682 job->job_result = FC_SUCCESS; 3683 } 3684 3685 fctl_destroy_all_remote_ports(port); 3686 3687 mutex_enter(&port->fp_mutex); 3688 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3689 mutex_exit(&port->fp_mutex); 3690 fp_ns_fini(port, job); 3691 } else { 3692 mutex_exit(&port->fp_mutex); 3693 } 3694 3695 if (flags) { 3696 job->job_flags = flags; 3697 } 3698 3699 mutex_enter(&port->fp_mutex); 3700 3701 } 3702 3703 3704 /* 3705 * Build the port driver's data structures based on the AL_PA list 3706 */ 3707 static void 3708 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3709 { 3710 int rval; 3711 int flag; 3712 int count; 3713 uint32_t d_id; 3714 fc_remote_port_t *pd; 3715 fc_lilpmap_t *lilp_map; 3716 3717 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3718 3719 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3720 job->job_result = FC_OFFLINE; 3721 mutex_exit(&port->fp_mutex); 3722 fp_jobdone(job); 3723 mutex_enter(&port->fp_mutex); 3724 return; 3725 } 3726 3727 if (port->fp_lilp_map.lilp_length == 0) { 3728 mutex_exit(&port->fp_mutex); 3729 job->job_result = FC_NO_MAP; 3730 fp_jobdone(job); 3731 mutex_enter(&port->fp_mutex); 3732 return; 3733 } 3734 mutex_exit(&port->fp_mutex); 3735 3736 lilp_map = &port->fp_lilp_map; 3737 job->job_counter = lilp_map->lilp_length; 3738 3739 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3740 flag = FP_CMD_PLOGI_RETAIN; 3741 } else { 3742 flag = FP_CMD_PLOGI_DONT_CARE; 3743 } 3744 3745 for (count = 0; count < lilp_map->lilp_length; count++) { 3746 d_id = lilp_map->lilp_alpalist[count]; 3747 3748 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3749 fp_jobdone(job); 3750 continue; 3751 } 3752 3753 pd = fctl_get_remote_port_by_did(port, d_id); 3754 if (pd) { 3755 mutex_enter(&pd->pd_mutex); 3756 if (flag == FP_CMD_PLOGI_DONT_CARE || 3757 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3758 mutex_exit(&pd->pd_mutex); 3759 fp_jobdone(job); 3760 continue; 3761 } 3762 mutex_exit(&pd->pd_mutex); 3763 } 3764 3765 rval = fp_port_login(port, d_id, job, flag, 3766 KM_SLEEP, pd, NULL); 3767 if (rval != FC_SUCCESS) { 3768 fp_jobdone(job); 3769 } 3770 } 3771 3772 mutex_enter(&port->fp_mutex); 3773 } 3774 3775 3776 /* 3777 * Perform loop ONLINE processing 3778 */ 3779 static void 3780 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3781 { 3782 int count; 3783 int rval; 3784 uint32_t d_id; 3785 uint32_t listlen; 3786 fc_lilpmap_t *lilp_map; 3787 fc_remote_port_t *pd; 3788 fc_portmap_t *changelist; 3789 3790 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3791 3792 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3793 port, job); 3794 3795 lilp_map = &port->fp_lilp_map; 3796 3797 if (lilp_map->lilp_length) { 3798 mutex_enter(&port->fp_mutex); 3799 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3800 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3801 mutex_exit(&port->fp_mutex); 3802 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3803 } else { 3804 mutex_exit(&port->fp_mutex); 3805 } 3806 3807 job->job_counter = lilp_map->lilp_length; 3808 3809 for (count = 0; count < lilp_map->lilp_length; count++) { 3810 d_id = lilp_map->lilp_alpalist[count]; 3811 3812 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3813 fp_jobdone(job); 3814 continue; 3815 } 3816 3817 pd = fctl_get_remote_port_by_did(port, d_id); 3818 if (pd != NULL) { 3819 #ifdef DEBUG 3820 mutex_enter(&pd->pd_mutex); 3821 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3822 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3823 } 3824 mutex_exit(&pd->pd_mutex); 3825 #endif 3826 fp_jobdone(job); 3827 continue; 3828 } 3829 3830 rval = fp_port_login(port, d_id, job, 3831 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3832 3833 if (rval != FC_SUCCESS) { 3834 fp_jobdone(job); 3835 } 3836 } 3837 fp_jobwait(job); 3838 } 3839 listlen = 0; 3840 changelist = NULL; 3841 3842 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3843 mutex_enter(&port->fp_mutex); 3844 ASSERT(port->fp_statec_busy > 0); 3845 if (port->fp_statec_busy == 1) { 3846 mutex_exit(&port->fp_mutex); 3847 fctl_fillout_map(port, &changelist, &listlen, 3848 1, 0, orphan); 3849 3850 mutex_enter(&port->fp_mutex); 3851 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3852 ASSERT(port->fp_total_devices == 0); 3853 port->fp_total_devices = port->fp_dev_count; 3854 } 3855 } else { 3856 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3857 } 3858 mutex_exit(&port->fp_mutex); 3859 } 3860 3861 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3862 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3863 listlen, listlen, KM_SLEEP); 3864 } else { 3865 mutex_enter(&port->fp_mutex); 3866 if (--port->fp_statec_busy == 0) { 3867 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3868 } 3869 ASSERT(changelist == NULL && listlen == 0); 3870 mutex_exit(&port->fp_mutex); 3871 } 3872 3873 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3874 port, job); 3875 } 3876 3877 3878 /* 3879 * Get an Arbitrated Loop map from the underlying FCA 3880 */ 3881 static int 3882 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3883 { 3884 int rval; 3885 3886 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3887 port, lilp_map); 3888 3889 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3890 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3891 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3892 3893 if (rval != FC_SUCCESS) { 3894 rval = FC_NO_MAP; 3895 } else if (lilp_map->lilp_length == 0 && 3896 (lilp_map->lilp_magic >= MAGIC_LISM && 3897 lilp_map->lilp_magic < MAGIC_LIRP)) { 3898 uchar_t lilp_length; 3899 3900 /* 3901 * Since the map length is zero, provide all 3902 * the valid AL_PAs for NL_ports discovery. 3903 */ 3904 lilp_length = sizeof (fp_valid_alpas) / 3905 sizeof (fp_valid_alpas[0]); 3906 lilp_map->lilp_length = lilp_length; 3907 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3908 lilp_length); 3909 } else { 3910 rval = fp_validate_lilp_map(lilp_map); 3911 3912 if (rval == FC_SUCCESS) { 3913 mutex_enter(&port->fp_mutex); 3914 port->fp_total_devices = lilp_map->lilp_length - 1; 3915 mutex_exit(&port->fp_mutex); 3916 } 3917 } 3918 3919 mutex_enter(&port->fp_mutex); 3920 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3921 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3922 mutex_exit(&port->fp_mutex); 3923 3924 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3925 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3926 FP_TRACE(FP_NHEAD1(9, 0), 3927 "FCA reset failed after LILP map was found" 3928 " to be invalid"); 3929 } 3930 } else if (rval == FC_SUCCESS) { 3931 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3932 mutex_exit(&port->fp_mutex); 3933 } else { 3934 mutex_exit(&port->fp_mutex); 3935 } 3936 3937 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3938 lilp_map); 3939 3940 return (rval); 3941 } 3942 3943 3944 /* 3945 * Perform Fabric Login: 3946 * 3947 * Return Values: 3948 * FC_SUCCESS 3949 * FC_FAILURE 3950 * FC_NOMEM 3951 * FC_TRANSPORT_ERROR 3952 * and a lot others defined in fc_error.h 3953 */ 3954 static int 3955 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3956 int flag, int sleep) 3957 { 3958 int rval; 3959 fp_cmd_t *cmd; 3960 uchar_t class; 3961 3962 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3963 3964 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 3965 port, job); 3966 3967 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3968 if (class == FC_TRAN_CLASS_INVALID) { 3969 return (FC_ELS_BAD); 3970 } 3971 3972 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 3973 sizeof (la_els_logi_t), sleep, NULL); 3974 if (cmd == NULL) { 3975 return (FC_NOMEM); 3976 } 3977 3978 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 3979 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 3980 cmd->cmd_flags = flag; 3981 cmd->cmd_retry_count = fp_retry_count; 3982 cmd->cmd_ulp_pkt = NULL; 3983 3984 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 3985 job, LA_ELS_FLOGI); 3986 3987 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 3988 if (rval != FC_SUCCESS) { 3989 fp_free_pkt(cmd); 3990 } 3991 3992 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 3993 port, job); 3994 3995 return (rval); 3996 } 3997 3998 3999 /* 4000 * In some scenarios such as private loop device discovery period 4001 * the fc_remote_port_t data structure isn't allocated. The allocation 4002 * is done when the PLOGI is successful. In some other scenarios 4003 * such as Fabric topology, the fc_remote_port_t is already created 4004 * and initialized with appropriate values (as the NS provides 4005 * them) 4006 */ 4007 static int 4008 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 4009 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 4010 { 4011 uchar_t class; 4012 fp_cmd_t *cmd; 4013 uint32_t src_id; 4014 fc_remote_port_t *tmp_pd; 4015 int relogin; 4016 int found = 0; 4017 4018 #ifdef DEBUG 4019 if (pd == NULL) { 4020 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 4021 } 4022 #endif 4023 ASSERT(job->job_counter > 0); 4024 4025 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 4026 if (class == FC_TRAN_CLASS_INVALID) { 4027 return (FC_ELS_BAD); 4028 } 4029 4030 mutex_enter(&port->fp_mutex); 4031 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 4032 mutex_exit(&port->fp_mutex); 4033 4034 relogin = 1; 4035 if (tmp_pd) { 4036 mutex_enter(&tmp_pd->pd_mutex); 4037 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 4038 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 4039 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 4040 relogin = 0; 4041 } 4042 mutex_exit(&tmp_pd->pd_mutex); 4043 } 4044 4045 if (!relogin) { 4046 mutex_enter(&tmp_pd->pd_mutex); 4047 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4048 cmd_flag |= FP_CMD_PLOGI_RETAIN; 4049 } 4050 mutex_exit(&tmp_pd->pd_mutex); 4051 4052 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 4053 sizeof (la_els_adisc_t), sleep, tmp_pd); 4054 if (cmd == NULL) { 4055 return (FC_NOMEM); 4056 } 4057 4058 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4059 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4060 cmd->cmd_flags = cmd_flag; 4061 cmd->cmd_retry_count = fp_retry_count; 4062 cmd->cmd_ulp_pkt = ulp_pkt; 4063 4064 mutex_enter(&port->fp_mutex); 4065 mutex_enter(&tmp_pd->pd_mutex); 4066 fp_adisc_init(cmd, job); 4067 mutex_exit(&tmp_pd->pd_mutex); 4068 mutex_exit(&port->fp_mutex); 4069 4070 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4071 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4072 4073 } else { 4074 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4075 sizeof (la_els_logi_t), sleep, pd); 4076 if (cmd == NULL) { 4077 return (FC_NOMEM); 4078 } 4079 4080 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4081 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4082 cmd->cmd_flags = cmd_flag; 4083 cmd->cmd_retry_count = fp_retry_count; 4084 cmd->cmd_ulp_pkt = ulp_pkt; 4085 4086 mutex_enter(&port->fp_mutex); 4087 src_id = port->fp_port_id.port_id; 4088 mutex_exit(&port->fp_mutex); 4089 4090 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4091 job, LA_ELS_PLOGI); 4092 } 4093 4094 if (pd) { 4095 mutex_enter(&pd->pd_mutex); 4096 pd->pd_flags = PD_ELS_IN_PROGRESS; 4097 mutex_exit(&pd->pd_mutex); 4098 } 4099 4100 /* npiv check to make sure we don't log into ourself */ 4101 if (relogin && 4102 ((port->fp_npiv_type == FC_NPIV_PORT) || 4103 (port->fp_npiv_flag == FC_NPIV_ENABLE))) { 4104 if ((d_id & 0xffff00) == 4105 (port->fp_port_id.port_id & 0xffff00)) { 4106 found = 1; 4107 } 4108 } 4109 4110 if (found || 4111 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4112 if (found) { 4113 fc_packet_t *pkt = &cmd->cmd_pkt; 4114 pkt->pkt_state = FC_PKT_NPORT_RJT; 4115 } 4116 if (pd) { 4117 mutex_enter(&pd->pd_mutex); 4118 pd->pd_flags = PD_IDLE; 4119 mutex_exit(&pd->pd_mutex); 4120 } 4121 4122 if (ulp_pkt) { 4123 fc_packet_t *pkt = &cmd->cmd_pkt; 4124 4125 ulp_pkt->pkt_state = pkt->pkt_state; 4126 ulp_pkt->pkt_reason = pkt->pkt_reason; 4127 ulp_pkt->pkt_action = pkt->pkt_action; 4128 ulp_pkt->pkt_expln = pkt->pkt_expln; 4129 } 4130 4131 fp_iodone(cmd); 4132 } 4133 4134 return (FC_SUCCESS); 4135 } 4136 4137 4138 /* 4139 * Register the LOGIN parameters with a port device 4140 */ 4141 static void 4142 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4143 la_els_logi_t *acc, uchar_t class) 4144 { 4145 fc_remote_node_t *node; 4146 4147 ASSERT(pd != NULL); 4148 4149 mutex_enter(&pd->pd_mutex); 4150 node = pd->pd_remote_nodep; 4151 if (pd->pd_login_count == 0) { 4152 pd->pd_login_count++; 4153 } 4154 4155 if (handle) { 4156 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_csp, 4157 (uint8_t *)&acc->common_service, 4158 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4159 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp1, 4160 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4161 DDI_DEV_AUTOINCR); 4162 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp2, 4163 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4164 DDI_DEV_AUTOINCR); 4165 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp3, 4166 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4167 DDI_DEV_AUTOINCR); 4168 } else { 4169 pd->pd_csp = acc->common_service; 4170 pd->pd_clsp1 = acc->class_1; 4171 pd->pd_clsp2 = acc->class_2; 4172 pd->pd_clsp3 = acc->class_3; 4173 } 4174 4175 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4176 pd->pd_login_class = class; 4177 mutex_exit(&pd->pd_mutex); 4178 4179 #ifndef __lock_lint 4180 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4181 pd->pd_port_id.port_id) == pd); 4182 #endif 4183 4184 mutex_enter(&node->fd_mutex); 4185 if (handle) { 4186 ddi_rep_get8(*handle, (uint8_t *)node->fd_vv, 4187 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4188 DDI_DEV_AUTOINCR); 4189 } else { 4190 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4191 } 4192 mutex_exit(&node->fd_mutex); 4193 } 4194 4195 4196 /* 4197 * Mark the remote port as OFFLINE 4198 */ 4199 static void 4200 fp_remote_port_offline(fc_remote_port_t *pd) 4201 { 4202 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4203 if (pd->pd_login_count && 4204 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4205 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4206 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4207 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4208 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4209 pd->pd_login_class = 0; 4210 } 4211 pd->pd_type = PORT_DEVICE_OLD; 4212 pd->pd_flags = PD_IDLE; 4213 fctl_tc_reset(&pd->pd_logo_tc); 4214 } 4215 4216 4217 /* 4218 * Deregistration of a port device 4219 */ 4220 static void 4221 fp_unregister_login(fc_remote_port_t *pd) 4222 { 4223 fc_remote_node_t *node; 4224 4225 ASSERT(pd != NULL); 4226 4227 mutex_enter(&pd->pd_mutex); 4228 pd->pd_login_count = 0; 4229 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4230 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4231 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4232 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4233 4234 pd->pd_state = PORT_DEVICE_VALID; 4235 pd->pd_login_class = 0; 4236 node = pd->pd_remote_nodep; 4237 mutex_exit(&pd->pd_mutex); 4238 4239 mutex_enter(&node->fd_mutex); 4240 bzero(node->fd_vv, sizeof (node->fd_vv)); 4241 mutex_exit(&node->fd_mutex); 4242 } 4243 4244 4245 /* 4246 * Handle OFFLINE state of an FCA port 4247 */ 4248 static void 4249 fp_port_offline(fc_local_port_t *port, int notify) 4250 { 4251 int index; 4252 int statec; 4253 timeout_id_t tid; 4254 struct pwwn_hash *head; 4255 fc_remote_port_t *pd; 4256 4257 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4258 4259 for (index = 0; index < pwwn_table_size; index++) { 4260 head = &port->fp_pwwn_table[index]; 4261 pd = head->pwwn_head; 4262 while (pd != NULL) { 4263 mutex_enter(&pd->pd_mutex); 4264 fp_remote_port_offline(pd); 4265 fctl_delist_did_table(port, pd); 4266 mutex_exit(&pd->pd_mutex); 4267 pd = pd->pd_wwn_hnext; 4268 } 4269 } 4270 port->fp_total_devices = 0; 4271 4272 statec = 0; 4273 if (notify) { 4274 /* 4275 * Decrement the statec busy counter as we 4276 * are almost done with handling the state 4277 * change 4278 */ 4279 ASSERT(port->fp_statec_busy > 0); 4280 if (--port->fp_statec_busy == 0) { 4281 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4282 } 4283 mutex_exit(&port->fp_mutex); 4284 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4285 0, 0, KM_SLEEP); 4286 mutex_enter(&port->fp_mutex); 4287 4288 if (port->fp_statec_busy) { 4289 statec++; 4290 } 4291 } else if (port->fp_statec_busy > 1) { 4292 statec++; 4293 } 4294 4295 if ((tid = port->fp_offline_tid) != NULL) { 4296 mutex_exit(&port->fp_mutex); 4297 (void) untimeout(tid); 4298 mutex_enter(&port->fp_mutex); 4299 } 4300 4301 if (!statec) { 4302 port->fp_offline_tid = timeout(fp_offline_timeout, 4303 (caddr_t)port, fp_offline_ticks); 4304 } 4305 } 4306 4307 4308 /* 4309 * Offline devices and send up a state change notification to ULPs 4310 */ 4311 static void 4312 fp_offline_timeout(void *port_handle) 4313 { 4314 int ret; 4315 fc_local_port_t *port = port_handle; 4316 uint32_t listlen = 0; 4317 fc_portmap_t *changelist = NULL; 4318 4319 mutex_enter(&port->fp_mutex); 4320 4321 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4322 (port->fp_soft_state & 4323 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4324 port->fp_dev_count == 0 || port->fp_statec_busy) { 4325 port->fp_offline_tid = NULL; 4326 mutex_exit(&port->fp_mutex); 4327 return; 4328 } 4329 4330 mutex_exit(&port->fp_mutex); 4331 4332 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4333 4334 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4335 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4336 FC_FCA_CORE)) != FC_SUCCESS) { 4337 FP_TRACE(FP_NHEAD1(9, ret), 4338 "Failed to force adapter dump"); 4339 } else { 4340 FP_TRACE(FP_NHEAD1(9, 0), 4341 "Forced adapter dump successfully"); 4342 } 4343 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4344 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4345 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4346 FP_TRACE(FP_NHEAD1(9, ret), 4347 "Failed to force adapter dump and reset"); 4348 } else { 4349 FP_TRACE(FP_NHEAD1(9, 0), 4350 "Forced adapter dump and reset successfully"); 4351 } 4352 } 4353 4354 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4355 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4356 listlen, listlen, KM_SLEEP); 4357 4358 mutex_enter(&port->fp_mutex); 4359 port->fp_offline_tid = NULL; 4360 mutex_exit(&port->fp_mutex); 4361 } 4362 4363 4364 /* 4365 * Perform general purpose ELS request initialization 4366 */ 4367 static void 4368 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4369 void (*comp) (), job_request_t *job) 4370 { 4371 fc_packet_t *pkt; 4372 4373 pkt = &cmd->cmd_pkt; 4374 cmd->cmd_job = job; 4375 4376 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4377 pkt->pkt_cmd_fhdr.d_id = d_id; 4378 pkt->pkt_cmd_fhdr.s_id = s_id; 4379 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4380 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4381 pkt->pkt_cmd_fhdr.seq_id = 0; 4382 pkt->pkt_cmd_fhdr.df_ctl = 0; 4383 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4384 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4385 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4386 pkt->pkt_cmd_fhdr.ro = 0; 4387 pkt->pkt_cmd_fhdr.rsvd = 0; 4388 pkt->pkt_comp = comp; 4389 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4390 } 4391 4392 4393 /* 4394 * Initialize PLOGI/FLOGI ELS request 4395 */ 4396 static void 4397 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4398 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4399 { 4400 ls_code_t payload; 4401 4402 fp_els_init(cmd, s_id, d_id, intr, job); 4403 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4404 4405 payload.ls_code = ls_code; 4406 payload.mbz = 0; 4407 4408 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, 4409 (uint8_t *)&port->fp_service_params, 4410 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4411 DDI_DEV_AUTOINCR); 4412 4413 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4414 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4415 DDI_DEV_AUTOINCR); 4416 } 4417 4418 4419 /* 4420 * Initialize LOGO ELS request 4421 */ 4422 static void 4423 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4424 { 4425 fc_local_port_t *port; 4426 fc_packet_t *pkt; 4427 la_els_logo_t payload; 4428 4429 port = pd->pd_port; 4430 pkt = &cmd->cmd_pkt; 4431 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4432 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4433 4434 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4435 fp_logo_intr, job); 4436 4437 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4438 4439 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4440 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4441 4442 payload.ls_code.ls_code = LA_ELS_LOGO; 4443 payload.ls_code.mbz = 0; 4444 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4445 payload.nport_id = port->fp_port_id; 4446 4447 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4448 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4449 } 4450 4451 /* 4452 * Initialize RNID ELS request 4453 */ 4454 static void 4455 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4456 { 4457 fc_local_port_t *port; 4458 fc_packet_t *pkt; 4459 la_els_rnid_t payload; 4460 fc_remote_port_t *pd; 4461 4462 pkt = &cmd->cmd_pkt; 4463 pd = pkt->pkt_pd; 4464 port = pd->pd_port; 4465 4466 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4467 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4468 4469 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4470 fp_rnid_intr, job); 4471 4472 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4473 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4474 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4475 4476 payload.ls_code.ls_code = LA_ELS_RNID; 4477 payload.ls_code.mbz = 0; 4478 payload.data_format = flag; 4479 4480 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4481 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4482 } 4483 4484 /* 4485 * Initialize RLS ELS request 4486 */ 4487 static void 4488 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4489 { 4490 fc_local_port_t *port; 4491 fc_packet_t *pkt; 4492 la_els_rls_t payload; 4493 fc_remote_port_t *pd; 4494 4495 pkt = &cmd->cmd_pkt; 4496 pd = pkt->pkt_pd; 4497 port = pd->pd_port; 4498 4499 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4500 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4501 4502 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4503 fp_rls_intr, job); 4504 4505 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4506 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4507 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4508 4509 payload.ls_code.ls_code = LA_ELS_RLS; 4510 payload.ls_code.mbz = 0; 4511 payload.rls_portid = port->fp_port_id; 4512 4513 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4514 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4515 } 4516 4517 4518 /* 4519 * Initialize an ADISC ELS request 4520 */ 4521 static void 4522 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4523 { 4524 fc_local_port_t *port; 4525 fc_packet_t *pkt; 4526 la_els_adisc_t payload; 4527 fc_remote_port_t *pd; 4528 4529 pkt = &cmd->cmd_pkt; 4530 pd = pkt->pkt_pd; 4531 port = pd->pd_port; 4532 4533 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4534 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4535 4536 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4537 fp_adisc_intr, job); 4538 4539 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4540 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4541 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4542 4543 payload.ls_code.ls_code = LA_ELS_ADISC; 4544 payload.ls_code.mbz = 0; 4545 payload.nport_id = port->fp_port_id; 4546 payload.port_wwn = port->fp_service_params.nport_ww_name; 4547 payload.node_wwn = port->fp_service_params.node_ww_name; 4548 payload.hard_addr = port->fp_hard_addr; 4549 4550 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4551 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4552 } 4553 4554 4555 /* 4556 * Send up a state change notification to ULPs. 4557 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4558 */ 4559 static int 4560 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4561 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4562 { 4563 fc_port_clist_t *clist; 4564 fc_remote_port_t *pd; 4565 int count; 4566 4567 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4568 4569 clist = kmem_zalloc(sizeof (*clist), sleep); 4570 if (clist == NULL) { 4571 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4572 return (FC_NOMEM); 4573 } 4574 4575 clist->clist_state = state; 4576 4577 mutex_enter(&port->fp_mutex); 4578 clist->clist_flags = port->fp_topology; 4579 mutex_exit(&port->fp_mutex); 4580 4581 clist->clist_port = (opaque_t)port; 4582 clist->clist_len = listlen; 4583 clist->clist_size = alloc_len; 4584 clist->clist_map = changelist; 4585 4586 /* 4587 * Bump the reference count of each fc_remote_port_t in this changelist. 4588 * This is necessary since these devices will be sitting in a taskq 4589 * and referenced later. When the state change notification is 4590 * complete, the reference counts will be decremented. 4591 */ 4592 for (count = 0; count < clist->clist_len; count++) { 4593 pd = clist->clist_map[count].map_pd; 4594 4595 if (pd != NULL) { 4596 mutex_enter(&pd->pd_mutex); 4597 ASSERT((pd->pd_ref_count >= 0) || 4598 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4599 pd->pd_ref_count++; 4600 4601 if (clist->clist_map[count].map_state != 4602 PORT_DEVICE_INVALID) { 4603 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4604 } 4605 4606 mutex_exit(&pd->pd_mutex); 4607 } 4608 } 4609 4610 #ifdef DEBUG 4611 /* 4612 * Sanity check for presence of OLD devices in the hash lists 4613 */ 4614 if (clist->clist_size) { 4615 ASSERT(clist->clist_map != NULL); 4616 for (count = 0; count < clist->clist_len; count++) { 4617 if (clist->clist_map[count].map_state == 4618 PORT_DEVICE_INVALID) { 4619 la_wwn_t pwwn; 4620 fc_portid_t d_id; 4621 4622 pd = clist->clist_map[count].map_pd; 4623 ASSERT(pd != NULL); 4624 4625 mutex_enter(&pd->pd_mutex); 4626 pwwn = pd->pd_port_name; 4627 d_id = pd->pd_port_id; 4628 mutex_exit(&pd->pd_mutex); 4629 4630 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4631 ASSERT(pd != clist->clist_map[count].map_pd); 4632 4633 pd = fctl_get_remote_port_by_did(port, 4634 d_id.port_id); 4635 ASSERT(pd != clist->clist_map[count].map_pd); 4636 } 4637 } 4638 } 4639 #endif 4640 4641 mutex_enter(&port->fp_mutex); 4642 4643 if (state == FC_STATE_ONLINE) { 4644 if (--port->fp_statec_busy == 0) { 4645 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4646 } 4647 } 4648 mutex_exit(&port->fp_mutex); 4649 4650 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4651 clist, KM_SLEEP); 4652 4653 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4654 "state=%x, len=%d", port, state, listlen); 4655 4656 return (FC_SUCCESS); 4657 } 4658 4659 4660 /* 4661 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4662 */ 4663 static int 4664 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4665 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4666 { 4667 int ret; 4668 fc_port_clist_t *clist; 4669 4670 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4671 4672 clist = kmem_zalloc(sizeof (*clist), sleep); 4673 if (clist == NULL) { 4674 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4675 return (FC_NOMEM); 4676 } 4677 4678 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4679 4680 mutex_enter(&port->fp_mutex); 4681 clist->clist_flags = port->fp_topology; 4682 mutex_exit(&port->fp_mutex); 4683 4684 clist->clist_port = (opaque_t)port; 4685 clist->clist_len = listlen; 4686 clist->clist_size = alloc_len; 4687 clist->clist_map = changelist; 4688 4689 /* Send sysevents for target state changes */ 4690 4691 if (clist->clist_size) { 4692 int count; 4693 fc_remote_port_t *pd; 4694 4695 ASSERT(clist->clist_map != NULL); 4696 for (count = 0; count < clist->clist_len; count++) { 4697 pd = clist->clist_map[count].map_pd; 4698 4699 /* 4700 * Bump reference counts on all fc_remote_port_t 4701 * structs in this list. We don't know when the task 4702 * will fire, and we don't need these fc_remote_port_t 4703 * structs going away behind our back. 4704 */ 4705 if (pd) { 4706 mutex_enter(&pd->pd_mutex); 4707 ASSERT((pd->pd_ref_count >= 0) || 4708 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4709 pd->pd_ref_count++; 4710 mutex_exit(&pd->pd_mutex); 4711 } 4712 4713 if (clist->clist_map[count].map_state == 4714 PORT_DEVICE_VALID) { 4715 if (clist->clist_map[count].map_type == 4716 PORT_DEVICE_NEW) { 4717 /* Update our state change counter */ 4718 mutex_enter(&port->fp_mutex); 4719 port->fp_last_change++; 4720 mutex_exit(&port->fp_mutex); 4721 4722 /* Additions */ 4723 fp_log_target_event(port, 4724 ESC_SUNFC_TARGET_ADD, 4725 clist->clist_map[count].map_pwwn, 4726 clist->clist_map[count].map_did. 4727 port_id); 4728 } 4729 4730 } else if ((clist->clist_map[count].map_type == 4731 PORT_DEVICE_OLD) && 4732 (clist->clist_map[count].map_state == 4733 PORT_DEVICE_INVALID)) { 4734 /* Update our state change counter */ 4735 mutex_enter(&port->fp_mutex); 4736 port->fp_last_change++; 4737 mutex_exit(&port->fp_mutex); 4738 4739 /* 4740 * For removals, we don't decrement 4741 * pd_ref_count until after the ULP's 4742 * state change callback function has 4743 * completed. 4744 */ 4745 4746 /* Removals */ 4747 fp_log_target_event(port, 4748 ESC_SUNFC_TARGET_REMOVE, 4749 clist->clist_map[count].map_pwwn, 4750 clist->clist_map[count].map_did.port_id); 4751 } 4752 4753 if (clist->clist_map[count].map_state != 4754 PORT_DEVICE_INVALID) { 4755 /* 4756 * Indicate that the ULPs are now aware of 4757 * this device. 4758 */ 4759 4760 mutex_enter(&pd->pd_mutex); 4761 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4762 mutex_exit(&pd->pd_mutex); 4763 } 4764 4765 #ifdef DEBUG 4766 /* 4767 * Sanity check for OLD devices in the hash lists 4768 */ 4769 if (pd && clist->clist_map[count].map_state == 4770 PORT_DEVICE_INVALID) { 4771 la_wwn_t pwwn; 4772 fc_portid_t d_id; 4773 4774 mutex_enter(&pd->pd_mutex); 4775 pwwn = pd->pd_port_name; 4776 d_id = pd->pd_port_id; 4777 mutex_exit(&pd->pd_mutex); 4778 4779 /* 4780 * This overwrites the 'pd' local variable. 4781 * Beware of this if 'pd' ever gets 4782 * referenced below this block. 4783 */ 4784 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4785 ASSERT(pd != clist->clist_map[count].map_pd); 4786 4787 pd = fctl_get_remote_port_by_did(port, 4788 d_id.port_id); 4789 ASSERT(pd != clist->clist_map[count].map_pd); 4790 } 4791 #endif 4792 } 4793 } 4794 4795 if (sync) { 4796 clist->clist_wait = 1; 4797 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4798 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4799 } 4800 4801 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4802 if (sync && ret) { 4803 mutex_enter(&clist->clist_mutex); 4804 while (clist->clist_wait) { 4805 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4806 } 4807 mutex_exit(&clist->clist_mutex); 4808 4809 mutex_destroy(&clist->clist_mutex); 4810 cv_destroy(&clist->clist_cv); 4811 kmem_free(clist, sizeof (*clist)); 4812 } 4813 4814 if (!ret) { 4815 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4816 "port=%p", port); 4817 kmem_free(clist->clist_map, 4818 sizeof (*(clist->clist_map)) * clist->clist_size); 4819 kmem_free(clist, sizeof (*clist)); 4820 } else { 4821 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4822 port, listlen); 4823 } 4824 4825 return (FC_SUCCESS); 4826 } 4827 4828 4829 /* 4830 * Perform PLOGI to the group of devices for ULPs 4831 */ 4832 static void 4833 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4834 { 4835 int offline; 4836 int count; 4837 int rval; 4838 uint32_t listlen; 4839 uint32_t done; 4840 uint32_t d_id; 4841 fc_remote_node_t *node; 4842 fc_remote_port_t *pd; 4843 fc_remote_port_t *tmp_pd; 4844 fc_packet_t *ulp_pkt; 4845 la_els_logi_t *els_data; 4846 ls_code_t ls_code; 4847 4848 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4849 port, job); 4850 4851 done = 0; 4852 listlen = job->job_ulp_listlen; 4853 job->job_counter = job->job_ulp_listlen; 4854 4855 mutex_enter(&port->fp_mutex); 4856 offline = (port->fp_statec_busy || 4857 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4858 mutex_exit(&port->fp_mutex); 4859 4860 for (count = 0; count < listlen; count++) { 4861 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4862 sizeof (la_els_logi_t)); 4863 4864 ulp_pkt = job->job_ulp_pkts[count]; 4865 pd = ulp_pkt->pkt_pd; 4866 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4867 4868 if (offline) { 4869 done++; 4870 4871 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4872 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4873 ulp_pkt->pkt_pd = NULL; 4874 ulp_pkt->pkt_comp(ulp_pkt); 4875 4876 job->job_ulp_pkts[count] = NULL; 4877 4878 fp_jobdone(job); 4879 continue; 4880 } 4881 4882 if (pd == NULL) { 4883 pd = fctl_get_remote_port_by_did(port, d_id); 4884 if (pd == NULL) { 4885 /* reset later */ 4886 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4887 continue; 4888 } 4889 mutex_enter(&pd->pd_mutex); 4890 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4891 mutex_exit(&pd->pd_mutex); 4892 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4893 done++; 4894 ulp_pkt->pkt_comp(ulp_pkt); 4895 job->job_ulp_pkts[count] = NULL; 4896 fp_jobdone(job); 4897 } else { 4898 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4899 mutex_exit(&pd->pd_mutex); 4900 } 4901 continue; 4902 } 4903 4904 switch (ulp_pkt->pkt_state) { 4905 case FC_PKT_ELS_IN_PROGRESS: 4906 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4907 /* FALLTHRU */ 4908 case FC_PKT_LOCAL_RJT: 4909 done++; 4910 ulp_pkt->pkt_comp(ulp_pkt); 4911 job->job_ulp_pkts[count] = NULL; 4912 fp_jobdone(job); 4913 continue; 4914 default: 4915 break; 4916 } 4917 4918 /* 4919 * Validate the pd corresponding to the d_id passed 4920 * by the ULPs 4921 */ 4922 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4923 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4924 done++; 4925 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4926 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4927 ulp_pkt->pkt_pd = NULL; 4928 ulp_pkt->pkt_comp(ulp_pkt); 4929 job->job_ulp_pkts[count] = NULL; 4930 fp_jobdone(job); 4931 continue; 4932 } 4933 4934 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4935 "port=%p, pd=%p", port, pd); 4936 4937 mutex_enter(&pd->pd_mutex); 4938 4939 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4940 done++; 4941 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4942 4943 ls_code.ls_code = LA_ELS_ACC; 4944 ls_code.mbz = 0; 4945 4946 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4947 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4948 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4949 4950 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4951 (uint8_t *)&pd->pd_csp, 4952 (uint8_t *)&els_data->common_service, 4953 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4954 4955 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4956 (uint8_t *)&pd->pd_port_name, 4957 (uint8_t *)&els_data->nport_ww_name, 4958 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 4959 4960 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4961 (uint8_t *)&pd->pd_clsp1, 4962 (uint8_t *)&els_data->class_1, 4963 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 4964 4965 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4966 (uint8_t *)&pd->pd_clsp2, 4967 (uint8_t *)&els_data->class_2, 4968 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 4969 4970 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4971 (uint8_t *)&pd->pd_clsp3, 4972 (uint8_t *)&els_data->class_3, 4973 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 4974 4975 node = pd->pd_remote_nodep; 4976 pd->pd_login_count++; 4977 pd->pd_flags = PD_IDLE; 4978 ulp_pkt->pkt_pd = pd; 4979 mutex_exit(&pd->pd_mutex); 4980 4981 mutex_enter(&node->fd_mutex); 4982 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4983 (uint8_t *)&node->fd_node_name, 4984 (uint8_t *)(&els_data->node_ww_name), 4985 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 4986 4987 4988 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4989 (uint8_t *)&node->fd_vv, 4990 (uint8_t *)(&els_data->vendor_version), 4991 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 4992 4993 mutex_exit(&node->fd_mutex); 4994 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 4995 } else { 4996 4997 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 4998 mutex_exit(&pd->pd_mutex); 4999 } 5000 5001 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 5002 ulp_pkt->pkt_comp(ulp_pkt); 5003 job->job_ulp_pkts[count] = NULL; 5004 fp_jobdone(job); 5005 } 5006 } 5007 5008 if (done == listlen) { 5009 fp_jobwait(job); 5010 fctl_jobdone(job); 5011 return; 5012 } 5013 5014 job->job_counter = listlen - done; 5015 5016 for (count = 0; count < listlen; count++) { 5017 int cmd_flags; 5018 5019 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 5020 continue; 5021 } 5022 5023 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 5024 5025 cmd_flags = FP_CMD_PLOGI_RETAIN; 5026 5027 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5028 ASSERT(d_id != 0); 5029 5030 pd = fctl_get_remote_port_by_did(port, d_id); 5031 5032 /* 5033 * We need to properly adjust the port device 5034 * reference counter before we assign the pd 5035 * to the ULP packets port device pointer. 5036 */ 5037 if (pd != NULL && ulp_pkt->pkt_pd == NULL) { 5038 mutex_enter(&pd->pd_mutex); 5039 pd->pd_ref_count++; 5040 mutex_exit(&pd->pd_mutex); 5041 FP_TRACE(FP_NHEAD1(3, 0), 5042 "fp_plogi_group: DID = 0x%x using new pd %p \ 5043 old pd NULL\n", d_id, pd); 5044 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL && 5045 ulp_pkt->pkt_pd != pd) { 5046 mutex_enter(&pd->pd_mutex); 5047 pd->pd_ref_count++; 5048 mutex_exit(&pd->pd_mutex); 5049 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5050 ulp_pkt->pkt_pd->pd_ref_count--; 5051 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5052 FP_TRACE(FP_NHEAD1(3, 0), 5053 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n", 5054 d_id, ulp_pkt->pkt_pd, pd); 5055 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) { 5056 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5057 ulp_pkt->pkt_pd->pd_ref_count--; 5058 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5059 FP_TRACE(FP_NHEAD1(3, 0), 5060 "fp_plogi_group: DID = 0x%x pd is NULL and \ 5061 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd); 5062 } 5063 5064 ulp_pkt->pkt_pd = pd; 5065 5066 if (pd != NULL) { 5067 mutex_enter(&pd->pd_mutex); 5068 d_id = pd->pd_port_id.port_id; 5069 pd->pd_flags = PD_ELS_IN_PROGRESS; 5070 mutex_exit(&pd->pd_mutex); 5071 } else { 5072 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5073 #ifdef DEBUG 5074 pd = fctl_get_remote_port_by_did(port, d_id); 5075 ASSERT(pd == NULL); 5076 #endif 5077 /* 5078 * In the Fabric topology, use NS to create 5079 * port device, and if that fails still try 5080 * with PLOGI - which will make yet another 5081 * attempt to create after successful PLOGI 5082 */ 5083 mutex_enter(&port->fp_mutex); 5084 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 5085 mutex_exit(&port->fp_mutex); 5086 pd = fp_create_remote_port_by_ns(port, 5087 d_id, KM_SLEEP); 5088 if (pd) { 5089 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 5090 5091 mutex_enter(&pd->pd_mutex); 5092 pd->pd_flags = PD_ELS_IN_PROGRESS; 5093 mutex_exit(&pd->pd_mutex); 5094 5095 FP_TRACE(FP_NHEAD1(3, 0), 5096 "fp_plogi_group;" 5097 " NS created PD port=%p, job=%p," 5098 " pd=%p", port, job, pd); 5099 } 5100 } else { 5101 mutex_exit(&port->fp_mutex); 5102 } 5103 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5104 FP_TRACE(FP_NHEAD1(3, 0), 5105 "fp_plogi_group;" 5106 "ulp_pkt's pd is NULL, get a pd %p", 5107 pd); 5108 mutex_enter(&pd->pd_mutex); 5109 pd->pd_ref_count++; 5110 mutex_exit(&pd->pd_mutex); 5111 } 5112 ulp_pkt->pkt_pd = pd; 5113 } 5114 5115 rval = fp_port_login(port, d_id, job, cmd_flags, 5116 KM_SLEEP, pd, ulp_pkt); 5117 5118 if (rval == FC_SUCCESS) { 5119 continue; 5120 } 5121 5122 if (rval == FC_STATEC_BUSY) { 5123 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5124 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5125 } else { 5126 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5127 } 5128 5129 if (pd) { 5130 mutex_enter(&pd->pd_mutex); 5131 pd->pd_flags = PD_IDLE; 5132 mutex_exit(&pd->pd_mutex); 5133 } 5134 5135 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5136 ASSERT(pd != NULL); 5137 5138 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5139 " PD removed; port=%p, job=%p", port, job); 5140 5141 mutex_enter(&pd->pd_mutex); 5142 pd->pd_ref_count--; 5143 node = pd->pd_remote_nodep; 5144 mutex_exit(&pd->pd_mutex); 5145 5146 ASSERT(node != NULL); 5147 5148 if (fctl_destroy_remote_port(port, pd) == 0) { 5149 fctl_destroy_remote_node(node); 5150 } 5151 ulp_pkt->pkt_pd = NULL; 5152 } 5153 ulp_pkt->pkt_comp(ulp_pkt); 5154 fp_jobdone(job); 5155 } 5156 5157 fp_jobwait(job); 5158 fctl_jobdone(job); 5159 5160 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5161 port, job); 5162 } 5163 5164 5165 /* 5166 * Name server request initialization 5167 */ 5168 static void 5169 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5170 { 5171 int rval; 5172 int count; 5173 int size; 5174 5175 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5176 5177 job->job_counter = 1; 5178 job->job_result = FC_SUCCESS; 5179 5180 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5181 KM_SLEEP, NULL, NULL); 5182 5183 if (rval != FC_SUCCESS) { 5184 mutex_enter(&port->fp_mutex); 5185 port->fp_topology = FC_TOP_NO_NS; 5186 mutex_exit(&port->fp_mutex); 5187 return; 5188 } 5189 5190 fp_jobwait(job); 5191 5192 if (job->job_result != FC_SUCCESS) { 5193 mutex_enter(&port->fp_mutex); 5194 port->fp_topology = FC_TOP_NO_NS; 5195 mutex_exit(&port->fp_mutex); 5196 return; 5197 } 5198 5199 /* 5200 * At this time, we'll do NS registration for objects in the 5201 * ns_reg_cmds (see top of this file) array. 5202 * 5203 * Each time a ULP module registers with the transport, the 5204 * appropriate fc4 bit is set fc4 types and registered with 5205 * the NS for this support. Also, ULPs and FC admin utilities 5206 * may do registration for objects like IP address, symbolic 5207 * port/node name, Initial process associator at run time. 5208 */ 5209 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5210 job->job_counter = size; 5211 job->job_result = FC_SUCCESS; 5212 5213 for (count = 0; count < size; count++) { 5214 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5215 job, 0, sleep) != FC_SUCCESS) { 5216 fp_jobdone(job); 5217 } 5218 } 5219 if (size) { 5220 fp_jobwait(job); 5221 } 5222 5223 job->job_result = FC_SUCCESS; 5224 5225 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5226 5227 if (port->fp_dev_count < FP_MAX_DEVICES) { 5228 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5229 } 5230 5231 job->job_counter = 1; 5232 5233 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5234 sleep) == FC_SUCCESS) { 5235 fp_jobwait(job); 5236 } 5237 } 5238 5239 5240 /* 5241 * Name server finish: 5242 * Unregister for RSCNs 5243 * Unregister all the host port objects in the Name Server 5244 * Perform LOGO with the NS; 5245 */ 5246 static void 5247 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5248 { 5249 fp_cmd_t *cmd; 5250 uchar_t class; 5251 uint32_t s_id; 5252 fc_packet_t *pkt; 5253 la_els_logo_t payload; 5254 5255 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5256 5257 job->job_counter = 1; 5258 5259 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5260 FC_SUCCESS) { 5261 fp_jobdone(job); 5262 } 5263 fp_jobwait(job); 5264 5265 job->job_counter = 1; 5266 5267 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5268 fp_jobdone(job); 5269 } 5270 fp_jobwait(job); 5271 5272 job->job_counter = 1; 5273 5274 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5275 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5276 pkt = &cmd->cmd_pkt; 5277 5278 mutex_enter(&port->fp_mutex); 5279 class = port->fp_ns_login_class; 5280 s_id = port->fp_port_id.port_id; 5281 payload.nport_id = port->fp_port_id; 5282 mutex_exit(&port->fp_mutex); 5283 5284 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5285 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5286 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5287 cmd->cmd_retry_count = 1; 5288 cmd->cmd_ulp_pkt = NULL; 5289 5290 if (port->fp_npiv_type == FC_NPIV_PORT) { 5291 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5292 } else { 5293 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5294 } 5295 5296 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5297 5298 payload.ls_code.ls_code = LA_ELS_LOGO; 5299 payload.ls_code.mbz = 0; 5300 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5301 5302 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 5303 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5304 5305 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5306 fp_iodone(cmd); 5307 } 5308 fp_jobwait(job); 5309 } 5310 5311 5312 /* 5313 * NS Registration function. 5314 * 5315 * It should be seriously noted that FC-GS-2 currently doesn't support 5316 * an Object Registration by a D_ID other than the owner of the object. 5317 * What we are aiming at currently is to at least allow Symbolic Node/Port 5318 * Name registration for any N_Port Identifier by the host software. 5319 * 5320 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5321 * function treats the request as Host NS Object. 5322 */ 5323 static int 5324 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5325 job_request_t *job, int polled, int sleep) 5326 { 5327 int rval; 5328 fc_portid_t s_id; 5329 fc_packet_t *pkt; 5330 fp_cmd_t *cmd; 5331 5332 if (pd == NULL) { 5333 mutex_enter(&port->fp_mutex); 5334 s_id = port->fp_port_id; 5335 mutex_exit(&port->fp_mutex); 5336 } else { 5337 mutex_enter(&pd->pd_mutex); 5338 s_id = pd->pd_port_id; 5339 mutex_exit(&pd->pd_mutex); 5340 } 5341 5342 if (polled) { 5343 job->job_counter = 1; 5344 } 5345 5346 switch (cmd_code) { 5347 case NS_RPN_ID: 5348 case NS_RNN_ID: { 5349 ns_rxn_req_t rxn; 5350 5351 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5352 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5353 if (cmd == NULL) { 5354 return (FC_NOMEM); 5355 } 5356 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5357 pkt = &cmd->cmd_pkt; 5358 5359 if (pd == NULL) { 5360 rxn.rxn_xname = ((cmd_code == NS_RPN_ID) ? 5361 (port->fp_service_params.nport_ww_name) : 5362 (port->fp_service_params.node_ww_name)); 5363 } else { 5364 if (cmd_code == NS_RPN_ID) { 5365 mutex_enter(&pd->pd_mutex); 5366 rxn.rxn_xname = pd->pd_port_name; 5367 mutex_exit(&pd->pd_mutex); 5368 } else { 5369 fc_remote_node_t *node; 5370 5371 mutex_enter(&pd->pd_mutex); 5372 node = pd->pd_remote_nodep; 5373 mutex_exit(&pd->pd_mutex); 5374 5375 mutex_enter(&node->fd_mutex); 5376 rxn.rxn_xname = node->fd_node_name; 5377 mutex_exit(&node->fd_mutex); 5378 } 5379 } 5380 rxn.rxn_port_id = s_id; 5381 5382 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5383 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5384 sizeof (rxn), DDI_DEV_AUTOINCR); 5385 5386 break; 5387 } 5388 5389 case NS_RCS_ID: { 5390 ns_rcos_t rcos; 5391 5392 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5393 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5394 if (cmd == NULL) { 5395 return (FC_NOMEM); 5396 } 5397 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5398 pkt = &cmd->cmd_pkt; 5399 5400 if (pd == NULL) { 5401 rcos.rcos_cos = port->fp_cos; 5402 } else { 5403 mutex_enter(&pd->pd_mutex); 5404 rcos.rcos_cos = pd->pd_cos; 5405 mutex_exit(&pd->pd_mutex); 5406 } 5407 rcos.rcos_port_id = s_id; 5408 5409 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5410 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5411 sizeof (rcos), DDI_DEV_AUTOINCR); 5412 5413 break; 5414 } 5415 5416 case NS_RFT_ID: { 5417 ns_rfc_type_t rfc; 5418 5419 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5420 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5421 NULL); 5422 if (cmd == NULL) { 5423 return (FC_NOMEM); 5424 } 5425 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5426 pkt = &cmd->cmd_pkt; 5427 5428 if (pd == NULL) { 5429 mutex_enter(&port->fp_mutex); 5430 bcopy(port->fp_fc4_types, rfc.rfc_types, 5431 sizeof (port->fp_fc4_types)); 5432 mutex_exit(&port->fp_mutex); 5433 } else { 5434 mutex_enter(&pd->pd_mutex); 5435 bcopy(pd->pd_fc4types, rfc.rfc_types, 5436 sizeof (pd->pd_fc4types)); 5437 mutex_exit(&pd->pd_mutex); 5438 } 5439 rfc.rfc_port_id = s_id; 5440 5441 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5442 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5443 sizeof (rfc), DDI_DEV_AUTOINCR); 5444 5445 break; 5446 } 5447 5448 case NS_RSPN_ID: { 5449 uchar_t name_len; 5450 int pl_size; 5451 fc_portid_t spn; 5452 5453 if (pd == NULL) { 5454 mutex_enter(&port->fp_mutex); 5455 name_len = port->fp_sym_port_namelen; 5456 mutex_exit(&port->fp_mutex); 5457 } else { 5458 mutex_enter(&pd->pd_mutex); 5459 name_len = pd->pd_spn_len; 5460 mutex_exit(&pd->pd_mutex); 5461 } 5462 5463 pl_size = sizeof (fc_portid_t) + name_len + 1; 5464 5465 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5466 sizeof (fc_reg_resp_t), sleep, NULL); 5467 if (cmd == NULL) { 5468 return (FC_NOMEM); 5469 } 5470 5471 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5472 5473 pkt = &cmd->cmd_pkt; 5474 5475 spn = s_id; 5476 5477 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5478 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5479 DDI_DEV_AUTOINCR); 5480 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5481 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5482 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5483 5484 if (pd == NULL) { 5485 mutex_enter(&port->fp_mutex); 5486 ddi_rep_put8(pkt->pkt_cmd_acc, 5487 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5488 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5489 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5490 mutex_exit(&port->fp_mutex); 5491 } else { 5492 mutex_enter(&pd->pd_mutex); 5493 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)pd->pd_spn, 5494 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5495 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5496 mutex_exit(&pd->pd_mutex); 5497 } 5498 break; 5499 } 5500 5501 case NS_RPT_ID: { 5502 ns_rpt_t rpt; 5503 5504 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5505 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5506 if (cmd == NULL) { 5507 return (FC_NOMEM); 5508 } 5509 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5510 pkt = &cmd->cmd_pkt; 5511 5512 if (pd == NULL) { 5513 rpt.rpt_type = port->fp_port_type; 5514 } else { 5515 mutex_enter(&pd->pd_mutex); 5516 rpt.rpt_type = pd->pd_porttype; 5517 mutex_exit(&pd->pd_mutex); 5518 } 5519 rpt.rpt_port_id = s_id; 5520 5521 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5522 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5523 sizeof (rpt), DDI_DEV_AUTOINCR); 5524 5525 break; 5526 } 5527 5528 case NS_RIP_NN: { 5529 ns_rip_t rip; 5530 5531 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5532 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5533 if (cmd == NULL) { 5534 return (FC_NOMEM); 5535 } 5536 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5537 pkt = &cmd->cmd_pkt; 5538 5539 if (pd == NULL) { 5540 rip.rip_node_name = 5541 port->fp_service_params.node_ww_name; 5542 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5543 sizeof (port->fp_ip_addr)); 5544 } else { 5545 fc_remote_node_t *node; 5546 5547 /* 5548 * The most correct implementation should have the IP 5549 * address in the fc_remote_node_t structure; I believe 5550 * Node WWN and IP address should have one to one 5551 * correlation (but guess what this is changing in 5552 * FC-GS-2 latest draft) 5553 */ 5554 mutex_enter(&pd->pd_mutex); 5555 node = pd->pd_remote_nodep; 5556 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5557 sizeof (pd->pd_ip_addr)); 5558 mutex_exit(&pd->pd_mutex); 5559 5560 mutex_enter(&node->fd_mutex); 5561 rip.rip_node_name = node->fd_node_name; 5562 mutex_exit(&node->fd_mutex); 5563 } 5564 5565 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rip, 5566 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5567 sizeof (rip), DDI_DEV_AUTOINCR); 5568 5569 break; 5570 } 5571 5572 case NS_RIPA_NN: { 5573 ns_ipa_t ipa; 5574 5575 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5576 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5577 if (cmd == NULL) { 5578 return (FC_NOMEM); 5579 } 5580 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5581 pkt = &cmd->cmd_pkt; 5582 5583 if (pd == NULL) { 5584 ipa.ipa_node_name = 5585 port->fp_service_params.node_ww_name; 5586 bcopy(port->fp_ipa, ipa.ipa_value, 5587 sizeof (port->fp_ipa)); 5588 } else { 5589 fc_remote_node_t *node; 5590 5591 mutex_enter(&pd->pd_mutex); 5592 node = pd->pd_remote_nodep; 5593 mutex_exit(&pd->pd_mutex); 5594 5595 mutex_enter(&node->fd_mutex); 5596 ipa.ipa_node_name = node->fd_node_name; 5597 bcopy(node->fd_ipa, ipa.ipa_value, 5598 sizeof (node->fd_ipa)); 5599 mutex_exit(&node->fd_mutex); 5600 } 5601 5602 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5603 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5604 sizeof (ipa), DDI_DEV_AUTOINCR); 5605 5606 break; 5607 } 5608 5609 case NS_RSNN_NN: { 5610 uchar_t name_len; 5611 int pl_size; 5612 la_wwn_t snn; 5613 fc_remote_node_t *node = NULL; 5614 5615 if (pd == NULL) { 5616 mutex_enter(&port->fp_mutex); 5617 name_len = port->fp_sym_node_namelen; 5618 mutex_exit(&port->fp_mutex); 5619 } else { 5620 mutex_enter(&pd->pd_mutex); 5621 node = pd->pd_remote_nodep; 5622 mutex_exit(&pd->pd_mutex); 5623 5624 mutex_enter(&node->fd_mutex); 5625 name_len = node->fd_snn_len; 5626 mutex_exit(&node->fd_mutex); 5627 } 5628 5629 pl_size = sizeof (la_wwn_t) + name_len + 1; 5630 5631 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5632 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5633 if (cmd == NULL) { 5634 return (FC_NOMEM); 5635 } 5636 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5637 5638 pkt = &cmd->cmd_pkt; 5639 5640 bcopy(&port->fp_service_params.node_ww_name, 5641 &snn, sizeof (la_wwn_t)); 5642 5643 if (pd == NULL) { 5644 mutex_enter(&port->fp_mutex); 5645 ddi_rep_put8(pkt->pkt_cmd_acc, 5646 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5647 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5648 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5649 mutex_exit(&port->fp_mutex); 5650 } else { 5651 ASSERT(node != NULL); 5652 mutex_enter(&node->fd_mutex); 5653 ddi_rep_put8(pkt->pkt_cmd_acc, 5654 (uint8_t *)node->fd_snn, 5655 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5656 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5657 mutex_exit(&node->fd_mutex); 5658 } 5659 5660 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&snn, 5661 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5662 sizeof (snn), DDI_DEV_AUTOINCR); 5663 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5664 (uint8_t *)(pkt->pkt_cmd 5665 + sizeof (fc_ct_header_t) + sizeof (snn)), 5666 1, DDI_DEV_AUTOINCR); 5667 5668 break; 5669 } 5670 5671 case NS_DA_ID: { 5672 ns_remall_t rall; 5673 char tmp[4] = {0}; 5674 char *ptr; 5675 5676 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5677 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5678 5679 if (cmd == NULL) { 5680 return (FC_NOMEM); 5681 } 5682 5683 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5684 pkt = &cmd->cmd_pkt; 5685 5686 ptr = (char *)(&s_id); 5687 tmp[3] = *ptr++; 5688 tmp[2] = *ptr++; 5689 tmp[1] = *ptr++; 5690 tmp[0] = *ptr; 5691 #if defined(_BIT_FIELDS_LTOH) 5692 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5693 #else 5694 rall.rem_port_id = s_id; 5695 #endif 5696 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rall, 5697 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5698 sizeof (rall), DDI_DEV_AUTOINCR); 5699 5700 break; 5701 } 5702 5703 default: 5704 return (FC_FAILURE); 5705 } 5706 5707 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5708 5709 if (rval != FC_SUCCESS) { 5710 job->job_result = rval; 5711 fp_iodone(cmd); 5712 } 5713 5714 if (polled) { 5715 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5716 fp_jobwait(job); 5717 } else { 5718 rval = FC_SUCCESS; 5719 } 5720 5721 return (rval); 5722 } 5723 5724 5725 /* 5726 * Common interrupt handler 5727 */ 5728 static int 5729 fp_common_intr(fc_packet_t *pkt, int iodone) 5730 { 5731 int rval = FC_FAILURE; 5732 fp_cmd_t *cmd; 5733 fc_local_port_t *port; 5734 5735 cmd = pkt->pkt_ulp_private; 5736 port = cmd->cmd_port; 5737 5738 /* 5739 * Fail fast the upper layer requests if 5740 * a state change has occurred amidst. 5741 */ 5742 mutex_enter(&port->fp_mutex); 5743 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5744 mutex_exit(&port->fp_mutex); 5745 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5746 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5747 } else if (!(port->fp_soft_state & 5748 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5749 mutex_exit(&port->fp_mutex); 5750 5751 switch (pkt->pkt_state) { 5752 case FC_PKT_LOCAL_BSY: 5753 case FC_PKT_FABRIC_BSY: 5754 case FC_PKT_NPORT_BSY: 5755 case FC_PKT_TIMEOUT: 5756 cmd->cmd_retry_interval = (pkt->pkt_state == 5757 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5758 rval = fp_retry_cmd(pkt); 5759 break; 5760 5761 case FC_PKT_FABRIC_RJT: 5762 case FC_PKT_NPORT_RJT: 5763 case FC_PKT_LOCAL_RJT: 5764 case FC_PKT_LS_RJT: 5765 case FC_PKT_FS_RJT: 5766 case FC_PKT_BA_RJT: 5767 rval = fp_handle_reject(pkt); 5768 break; 5769 5770 default: 5771 if (pkt->pkt_resp_resid) { 5772 cmd->cmd_retry_interval = 0; 5773 rval = fp_retry_cmd(pkt); 5774 } 5775 break; 5776 } 5777 } else { 5778 mutex_exit(&port->fp_mutex); 5779 } 5780 5781 if (rval != FC_SUCCESS && iodone) { 5782 fp_iodone(cmd); 5783 rval = FC_SUCCESS; 5784 } 5785 5786 return (rval); 5787 } 5788 5789 5790 /* 5791 * Some not so long winding theory on point to point topology: 5792 * 5793 * In the ACC payload, if the D_ID is ZERO and the common service 5794 * parameters indicate N_Port, then the topology is POINT TO POINT. 5795 * 5796 * In a point to point topology with an N_Port, during Fabric Login, 5797 * the destination N_Port will check with our WWN and decide if it 5798 * needs to issue PLOGI or not. That means, FLOGI could potentially 5799 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5800 * PLOGI creates the device handles. 5801 * 5802 * Assuming that the host port WWN is greater than the other N_Port 5803 * WWN, then we become the master (be aware that this isn't the word 5804 * used in the FC standards) and initiate the PLOGI. 5805 * 5806 */ 5807 static void 5808 fp_flogi_intr(fc_packet_t *pkt) 5809 { 5810 int state; 5811 int f_port; 5812 uint32_t s_id; 5813 uint32_t d_id; 5814 fp_cmd_t *cmd; 5815 fc_local_port_t *port; 5816 la_wwn_t *swwn; 5817 la_wwn_t dwwn; 5818 la_wwn_t nwwn; 5819 fc_remote_port_t *pd; 5820 la_els_logi_t *acc; 5821 com_svc_t csp; 5822 ls_code_t resp; 5823 5824 cmd = pkt->pkt_ulp_private; 5825 port = cmd->cmd_port; 5826 5827 mutex_enter(&port->fp_mutex); 5828 port->fp_out_fpcmds--; 5829 mutex_exit(&port->fp_mutex); 5830 5831 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5832 port, pkt, pkt->pkt_state); 5833 5834 if (FP_IS_PKT_ERROR(pkt)) { 5835 (void) fp_common_intr(pkt, 1); 5836 return; 5837 } 5838 5839 /* 5840 * Currently, we don't need to swap bytes here because qlc is faking the 5841 * response for us and so endianness is getting taken care of. But we 5842 * have to fix this and generalize this at some point 5843 */ 5844 acc = (la_els_logi_t *)pkt->pkt_resp; 5845 5846 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5847 sizeof (resp), DDI_DEV_AUTOINCR); 5848 5849 ASSERT(resp.ls_code == LA_ELS_ACC); 5850 if (resp.ls_code != LA_ELS_ACC) { 5851 (void) fp_common_intr(pkt, 1); 5852 return; 5853 } 5854 5855 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&csp, 5856 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5857 5858 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5859 5860 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5861 5862 mutex_enter(&port->fp_mutex); 5863 state = FC_PORT_STATE_MASK(port->fp_state); 5864 mutex_exit(&port->fp_mutex); 5865 5866 if (pkt->pkt_resp_fhdr.d_id == 0) { 5867 if (f_port == 0 && state != FC_STATE_LOOP) { 5868 swwn = &port->fp_service_params.nport_ww_name; 5869 5870 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5871 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5872 DDI_DEV_AUTOINCR); 5873 5874 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5875 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5876 DDI_DEV_AUTOINCR); 5877 5878 mutex_enter(&port->fp_mutex); 5879 5880 port->fp_topology = FC_TOP_PT_PT; 5881 port->fp_total_devices = 1; 5882 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5883 port->fp_ptpt_master = 1; 5884 /* 5885 * Let us choose 'X' as S_ID and 'Y' 5886 * as D_ID and that'll work; hopefully 5887 * If not, it will get changed. 5888 */ 5889 s_id = port->fp_instance + FP_DEFAULT_SID; 5890 d_id = port->fp_instance + FP_DEFAULT_DID; 5891 port->fp_port_id.port_id = s_id; 5892 mutex_exit(&port->fp_mutex); 5893 5894 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr: fp %x" 5895 "pd %x", port->fp_port_id.port_id, d_id); 5896 pd = fctl_create_remote_port(port, 5897 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5898 KM_NOSLEEP); 5899 if (pd == NULL) { 5900 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5901 0, NULL, "couldn't create device" 5902 " d_id=%X", d_id); 5903 fp_iodone(cmd); 5904 return; 5905 } 5906 5907 cmd->cmd_pkt.pkt_tran_flags = 5908 pkt->pkt_tran_flags; 5909 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5910 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5911 cmd->cmd_retry_count = fp_retry_count; 5912 5913 fp_xlogi_init(port, cmd, s_id, d_id, 5914 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5915 5916 (&cmd->cmd_pkt)->pkt_pd = pd; 5917 5918 /* 5919 * We've just created this fc_remote_port_t, and 5920 * we're about to use it to send a PLOGI, so 5921 * bump the reference count right now. When 5922 * the packet is freed, the reference count will 5923 * be decremented. The ULP may also start using 5924 * it, so mark it as given away as well. 5925 */ 5926 pd->pd_ref_count++; 5927 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5928 5929 if (fp_sendcmd(port, cmd, 5930 port->fp_fca_handle) == FC_SUCCESS) { 5931 return; 5932 } 5933 } else { 5934 /* 5935 * The device handles will be created when the 5936 * unsolicited PLOGI is completed successfully 5937 */ 5938 port->fp_ptpt_master = 0; 5939 mutex_exit(&port->fp_mutex); 5940 } 5941 } 5942 pkt->pkt_state = FC_PKT_FAILURE; 5943 } else { 5944 if (f_port) { 5945 mutex_enter(&port->fp_mutex); 5946 if (state == FC_STATE_LOOP) { 5947 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5948 } else { 5949 port->fp_topology = FC_TOP_FABRIC; 5950 5951 ddi_rep_get8(pkt->pkt_resp_acc, 5952 (uint8_t *)&port->fp_fabric_name, 5953 (uint8_t *)&acc->node_ww_name, 5954 sizeof (la_wwn_t), 5955 DDI_DEV_AUTOINCR); 5956 } 5957 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5958 mutex_exit(&port->fp_mutex); 5959 } else { 5960 pkt->pkt_state = FC_PKT_FAILURE; 5961 } 5962 } 5963 fp_iodone(cmd); 5964 } 5965 5966 5967 /* 5968 * Handle solicited PLOGI response 5969 */ 5970 static void 5971 fp_plogi_intr(fc_packet_t *pkt) 5972 { 5973 int nl_port; 5974 int bailout; 5975 uint32_t d_id; 5976 fp_cmd_t *cmd; 5977 la_els_logi_t *acc; 5978 fc_local_port_t *port; 5979 fc_remote_port_t *pd; 5980 la_wwn_t nwwn; 5981 la_wwn_t pwwn; 5982 ls_code_t resp; 5983 5984 nl_port = 0; 5985 cmd = pkt->pkt_ulp_private; 5986 port = cmd->cmd_port; 5987 d_id = pkt->pkt_cmd_fhdr.d_id; 5988 5989 #ifndef __lock_lint 5990 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 5991 #endif 5992 5993 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 5994 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 5995 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 5996 5997 /* 5998 * Bail out early on ULP initiated requests if the 5999 * state change has occurred 6000 */ 6001 mutex_enter(&port->fp_mutex); 6002 port->fp_out_fpcmds--; 6003 bailout = ((port->fp_statec_busy || 6004 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6005 cmd->cmd_ulp_pkt) ? 1 : 0; 6006 mutex_exit(&port->fp_mutex); 6007 6008 if (FP_IS_PKT_ERROR(pkt) || bailout) { 6009 int skip_msg = 0; 6010 int giveup = 0; 6011 6012 if (cmd->cmd_ulp_pkt) { 6013 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6014 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 6015 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6016 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6017 } 6018 6019 /* 6020 * If an unsolicited cross login already created 6021 * a device speed up the discovery by not retrying 6022 * the command mindlessly. 6023 */ 6024 if (pkt->pkt_pd == NULL && 6025 fctl_get_remote_port_by_did(port, d_id) != NULL) { 6026 fp_iodone(cmd); 6027 return; 6028 } 6029 6030 if (pkt->pkt_pd != NULL) { 6031 giveup = (pkt->pkt_pd->pd_recepient == 6032 PD_PLOGI_RECEPIENT) ? 1 : 0; 6033 if (giveup) { 6034 /* 6035 * This pd is marked as plogi 6036 * recipient, stop retrying 6037 */ 6038 FP_TRACE(FP_NHEAD1(3, 0), 6039 "fp_plogi_intr: stop retry as" 6040 " a cross login was accepted" 6041 " from d_id=%x, port=%p.", 6042 d_id, port); 6043 fp_iodone(cmd); 6044 return; 6045 } 6046 } 6047 6048 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6049 return; 6050 } 6051 6052 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 6053 mutex_enter(&pd->pd_mutex); 6054 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 6055 skip_msg++; 6056 } 6057 mutex_exit(&pd->pd_mutex); 6058 } 6059 6060 mutex_enter(&port->fp_mutex); 6061 if (!bailout && !(skip_msg && port->fp_statec_busy) && 6062 port->fp_statec_busy <= 1 && 6063 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 6064 mutex_exit(&port->fp_mutex); 6065 /* 6066 * In case of Login Collisions, JNI HBAs returns the 6067 * FC pkt back to the Initiator with the state set to 6068 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 6069 * QLC HBAs handles such cases in the FW and doesnot 6070 * return the LS_RJT with Logical error when 6071 * login collision happens. 6072 */ 6073 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 6074 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 6075 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6076 "PLOGI to %x failed", d_id); 6077 } 6078 FP_TRACE(FP_NHEAD2(9, 0), 6079 "PLOGI to %x failed. state=%x reason=%x.", 6080 d_id, pkt->pkt_state, pkt->pkt_reason); 6081 } else { 6082 mutex_exit(&port->fp_mutex); 6083 } 6084 6085 fp_iodone(cmd); 6086 return; 6087 } 6088 6089 acc = (la_els_logi_t *)pkt->pkt_resp; 6090 6091 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 6092 sizeof (resp), DDI_DEV_AUTOINCR); 6093 6094 ASSERT(resp.ls_code == LA_ELS_ACC); 6095 if (resp.ls_code != LA_ELS_ACC) { 6096 (void) fp_common_intr(pkt, 1); 6097 return; 6098 } 6099 6100 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6101 mutex_enter(&port->fp_mutex); 6102 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6103 mutex_exit(&port->fp_mutex); 6104 fp_iodone(cmd); 6105 return; 6106 } 6107 6108 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6109 6110 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6111 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6112 DDI_DEV_AUTOINCR); 6113 6114 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6115 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6116 DDI_DEV_AUTOINCR); 6117 6118 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6119 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6120 6121 if ((pd = pkt->pkt_pd) == NULL) { 6122 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6123 if (pd == NULL) { 6124 FP_TRACE(FP_NHEAD2(9, 0), "fp_plogi_intr: fp %x pd %x", 6125 port->fp_port_id.port_id, d_id); 6126 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6127 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6128 if (pd == NULL) { 6129 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6130 "couldn't create port device handles" 6131 " d_id=%x", d_id); 6132 fp_iodone(cmd); 6133 return; 6134 } 6135 } else { 6136 fc_remote_port_t *tmp_pd; 6137 6138 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6139 if (tmp_pd != NULL) { 6140 fp_iodone(cmd); 6141 return; 6142 } 6143 6144 mutex_enter(&port->fp_mutex); 6145 mutex_enter(&pd->pd_mutex); 6146 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6147 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6148 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6149 } 6150 6151 if (pd->pd_type == PORT_DEVICE_OLD) { 6152 if (pd->pd_port_id.port_id != d_id) { 6153 fctl_delist_did_table(port, pd); 6154 pd->pd_type = PORT_DEVICE_CHANGED; 6155 pd->pd_port_id.port_id = d_id; 6156 } else { 6157 pd->pd_type = PORT_DEVICE_NOCHANGE; 6158 } 6159 } 6160 6161 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6162 char ww_name[17]; 6163 6164 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6165 6166 mutex_exit(&pd->pd_mutex); 6167 mutex_exit(&port->fp_mutex); 6168 FP_TRACE(FP_NHEAD2(9, 0), 6169 "Possible Duplicate name or address" 6170 " identifiers in the PLOGI response" 6171 " D_ID=%x, PWWN=%s: Please check the" 6172 " configuration", d_id, ww_name); 6173 fp_iodone(cmd); 6174 return; 6175 } 6176 fctl_enlist_did_table(port, pd); 6177 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6178 mutex_exit(&pd->pd_mutex); 6179 mutex_exit(&port->fp_mutex); 6180 } 6181 } else { 6182 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6183 6184 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6185 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6186 6187 mutex_enter(&port->fp_mutex); 6188 mutex_enter(&pd->pd_mutex); 6189 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6190 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6191 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6192 pd->pd_type); 6193 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6194 pd->pd_type == PORT_DEVICE_OLD) || 6195 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6196 pd->pd_type = PORT_DEVICE_NOCHANGE; 6197 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6198 pd->pd_type = PORT_DEVICE_NEW; 6199 } 6200 } else { 6201 char old_name[17]; 6202 char new_name[17]; 6203 6204 fc_wwn_to_str(&pd->pd_port_name, old_name); 6205 fc_wwn_to_str(&pwwn, new_name); 6206 6207 FP_TRACE(FP_NHEAD1(9, 0), 6208 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6209 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6210 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6211 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6212 cmd->cmd_ulp_pkt, bailout); 6213 6214 FP_TRACE(FP_NHEAD2(9, 0), 6215 "PWWN of a device with D_ID=%x changed." 6216 " New PWWN = %s, OLD PWWN = %s", d_id, 6217 new_name, old_name); 6218 6219 if (cmd->cmd_ulp_pkt && !bailout) { 6220 fc_remote_node_t *rnodep; 6221 fc_portmap_t *changelist; 6222 fc_portmap_t *listptr; 6223 int len = 1; 6224 /* # entries in changelist */ 6225 6226 fctl_delist_pwwn_table(port, pd); 6227 6228 /* 6229 * Lets now check if there already is a pd with 6230 * this new WWN in the table. If so, we'll mark 6231 * it as invalid 6232 */ 6233 6234 if (new_wwn_pd) { 6235 /* 6236 * There is another pd with in the pwwn 6237 * table with the same WWN that we got 6238 * in the PLOGI payload. We have to get 6239 * it out of the pwwn table, update the 6240 * pd's state (fp_fillout_old_map does 6241 * this for us) and add it to the 6242 * changelist that goes up to ULPs. 6243 * 6244 * len is length of changelist and so 6245 * increment it. 6246 */ 6247 len++; 6248 6249 if (tmp_pd != pd) { 6250 /* 6251 * Odd case where pwwn and did 6252 * tables are out of sync but 6253 * we will handle that too. See 6254 * more comments below. 6255 * 6256 * One more device that ULPs 6257 * should know about and so len 6258 * gets incremented again. 6259 */ 6260 len++; 6261 } 6262 6263 listptr = changelist = kmem_zalloc(len * 6264 sizeof (*changelist), KM_SLEEP); 6265 6266 mutex_enter(&new_wwn_pd->pd_mutex); 6267 rnodep = new_wwn_pd->pd_remote_nodep; 6268 mutex_exit(&new_wwn_pd->pd_mutex); 6269 6270 /* 6271 * Hold the fd_mutex since 6272 * fctl_copy_portmap_held expects it. 6273 * Preserve lock hierarchy by grabbing 6274 * fd_mutex before pd_mutex 6275 */ 6276 if (rnodep) { 6277 mutex_enter(&rnodep->fd_mutex); 6278 } 6279 mutex_enter(&new_wwn_pd->pd_mutex); 6280 fp_fillout_old_map_held(listptr++, 6281 new_wwn_pd, 0); 6282 mutex_exit(&new_wwn_pd->pd_mutex); 6283 if (rnodep) { 6284 mutex_exit(&rnodep->fd_mutex); 6285 } 6286 6287 /* 6288 * Safety check : 6289 * Lets ensure that the pwwn and did 6290 * tables are in sync. Ideally, we 6291 * should not find that these two pd's 6292 * are different. 6293 */ 6294 if (tmp_pd != pd) { 6295 mutex_enter(&tmp_pd->pd_mutex); 6296 rnodep = 6297 tmp_pd->pd_remote_nodep; 6298 mutex_exit(&tmp_pd->pd_mutex); 6299 6300 /* As above grab fd_mutex */ 6301 if (rnodep) { 6302 mutex_enter(&rnodep-> 6303 fd_mutex); 6304 } 6305 mutex_enter(&tmp_pd->pd_mutex); 6306 6307 fp_fillout_old_map_held( 6308 listptr++, tmp_pd, 0); 6309 6310 mutex_exit(&tmp_pd->pd_mutex); 6311 if (rnodep) { 6312 mutex_exit(&rnodep-> 6313 fd_mutex); 6314 } 6315 6316 /* 6317 * Now add "pd" (not tmp_pd) 6318 * to fp_did_table to sync it up 6319 * with fp_pwwn_table 6320 * 6321 * pd->pd_mutex is already held 6322 * at this point 6323 */ 6324 fctl_enlist_did_table(port, pd); 6325 } 6326 } else { 6327 listptr = changelist = kmem_zalloc( 6328 sizeof (*changelist), KM_SLEEP); 6329 } 6330 6331 ASSERT(changelist != NULL); 6332 6333 fp_fillout_changed_map(listptr, pd, &d_id, 6334 &pwwn); 6335 fctl_enlist_pwwn_table(port, pd); 6336 6337 mutex_exit(&pd->pd_mutex); 6338 mutex_exit(&port->fp_mutex); 6339 6340 fp_iodone(cmd); 6341 6342 (void) fp_ulp_devc_cb(port, changelist, len, 6343 len, KM_NOSLEEP, 0); 6344 6345 return; 6346 } 6347 } 6348 6349 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6350 nl_port = 1; 6351 } 6352 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6353 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6354 } 6355 6356 mutex_exit(&pd->pd_mutex); 6357 mutex_exit(&port->fp_mutex); 6358 6359 if (tmp_pd == NULL) { 6360 mutex_enter(&port->fp_mutex); 6361 mutex_enter(&pd->pd_mutex); 6362 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6363 char ww_name[17]; 6364 6365 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6366 mutex_exit(&pd->pd_mutex); 6367 mutex_exit(&port->fp_mutex); 6368 FP_TRACE(FP_NHEAD2(9, 0), 6369 "Possible Duplicate name or address" 6370 " identifiers in the PLOGI response" 6371 " D_ID=%x, PWWN=%s: Please check the" 6372 " configuration", d_id, ww_name); 6373 fp_iodone(cmd); 6374 return; 6375 } 6376 fctl_enlist_did_table(port, pd); 6377 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6378 mutex_exit(&pd->pd_mutex); 6379 mutex_exit(&port->fp_mutex); 6380 } 6381 } 6382 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6383 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6384 6385 if (cmd->cmd_ulp_pkt) { 6386 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6387 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6388 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6389 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6390 if (pd != NULL) { 6391 FP_TRACE(FP_NHEAD1(9, 0), 6392 "fp_plogi_intr;" 6393 "ulp_pkt's pd is NULL, get a pd %p", 6394 pd); 6395 mutex_enter(&pd->pd_mutex); 6396 pd->pd_ref_count++; 6397 mutex_exit(&pd->pd_mutex); 6398 } 6399 cmd->cmd_ulp_pkt->pkt_pd = pd; 6400 } 6401 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6402 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6403 sizeof (fc_frame_hdr_t)); 6404 bcopy((caddr_t)pkt->pkt_resp, 6405 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6406 sizeof (la_els_logi_t)); 6407 } 6408 6409 mutex_enter(&port->fp_mutex); 6410 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6411 mutex_enter(&pd->pd_mutex); 6412 6413 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6414 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6415 cmd->cmd_retry_count = fp_retry_count; 6416 6417 /* 6418 * If the fc_remote_port_t pointer is not set in the given 6419 * fc_packet_t, then this fc_remote_port_t must have just 6420 * been created. Save the pointer and also increment the 6421 * fc_remote_port_t reference count. 6422 */ 6423 if (pkt->pkt_pd == NULL) { 6424 pkt->pkt_pd = pd; 6425 pd->pd_ref_count++; /* It's in use! */ 6426 } 6427 6428 fp_adisc_init(cmd, cmd->cmd_job); 6429 6430 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6431 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6432 6433 mutex_exit(&pd->pd_mutex); 6434 mutex_exit(&port->fp_mutex); 6435 6436 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6437 return; 6438 } 6439 } else { 6440 mutex_exit(&port->fp_mutex); 6441 } 6442 6443 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6444 mutex_enter(&port->fp_mutex); 6445 mutex_enter(&pd->pd_mutex); 6446 6447 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6448 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6449 cmd->cmd_retry_count = fp_retry_count; 6450 6451 fp_logo_init(pd, cmd, cmd->cmd_job); 6452 6453 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6454 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6455 6456 mutex_exit(&pd->pd_mutex); 6457 mutex_exit(&port->fp_mutex); 6458 6459 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6460 return; 6461 } 6462 6463 } 6464 fp_iodone(cmd); 6465 } 6466 6467 6468 /* 6469 * Handle solicited ADISC response 6470 */ 6471 static void 6472 fp_adisc_intr(fc_packet_t *pkt) 6473 { 6474 int rval; 6475 int bailout; 6476 fp_cmd_t *cmd; 6477 fc_local_port_t *port; 6478 fc_remote_port_t *pd; 6479 la_els_adisc_t *acc; 6480 ls_code_t resp; 6481 fc_hardaddr_t ha; 6482 fc_portmap_t *changelist; 6483 int initiator, adiscfail = 0; 6484 6485 pd = pkt->pkt_pd; 6486 cmd = pkt->pkt_ulp_private; 6487 port = cmd->cmd_port; 6488 6489 #ifndef __lock_lint 6490 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6491 #endif 6492 6493 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6494 6495 mutex_enter(&port->fp_mutex); 6496 port->fp_out_fpcmds--; 6497 bailout = ((port->fp_statec_busy || 6498 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6499 cmd->cmd_ulp_pkt) ? 1 : 0; 6500 mutex_exit(&port->fp_mutex); 6501 6502 if (bailout) { 6503 fp_iodone(cmd); 6504 return; 6505 } 6506 6507 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6508 acc = (la_els_adisc_t *)pkt->pkt_resp; 6509 6510 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6511 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6512 6513 if (resp.ls_code == LA_ELS_ACC) { 6514 int is_private; 6515 6516 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&ha, 6517 (uint8_t *)&acc->hard_addr, sizeof (ha), 6518 DDI_DEV_AUTOINCR); 6519 6520 mutex_enter(&port->fp_mutex); 6521 6522 is_private = 6523 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6524 6525 mutex_enter(&pd->pd_mutex); 6526 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6527 fctl_enlist_did_table(port, pd); 6528 } 6529 mutex_exit(&pd->pd_mutex); 6530 6531 mutex_exit(&port->fp_mutex); 6532 6533 mutex_enter(&pd->pd_mutex); 6534 if (pd->pd_type != PORT_DEVICE_NEW) { 6535 if (is_private && (pd->pd_hard_addr.hard_addr != 6536 ha.hard_addr)) { 6537 pd->pd_type = PORT_DEVICE_CHANGED; 6538 } else { 6539 pd->pd_type = PORT_DEVICE_NOCHANGE; 6540 } 6541 } 6542 6543 if (is_private && (ha.hard_addr && 6544 pd->pd_port_id.port_id != ha.hard_addr)) { 6545 char ww_name[17]; 6546 6547 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6548 6549 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6550 "NL_Port Identifier %x doesn't match" 6551 " with Hard Address %x, Will use Port" 6552 " WWN %s", pd->pd_port_id.port_id, 6553 ha.hard_addr, ww_name); 6554 6555 pd->pd_hard_addr.hard_addr = 0; 6556 } else { 6557 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6558 } 6559 mutex_exit(&pd->pd_mutex); 6560 } else { 6561 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6562 return; 6563 } 6564 } 6565 } else { 6566 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6567 return; 6568 } 6569 6570 mutex_enter(&port->fp_mutex); 6571 if (port->fp_statec_busy <= 1) { 6572 mutex_exit(&port->fp_mutex); 6573 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6574 "ADISC to %x failed, cmd_flags=%x", 6575 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6576 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6577 adiscfail = 1; 6578 } else { 6579 mutex_exit(&port->fp_mutex); 6580 } 6581 } 6582 6583 if (cmd->cmd_ulp_pkt) { 6584 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6585 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6586 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6587 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6588 cmd->cmd_ulp_pkt->pkt_pd = pd; 6589 FP_TRACE(FP_NHEAD1(9, 0), 6590 "fp_adisc__intr;" 6591 "ulp_pkt's pd is NULL, get a pd %p", 6592 pd); 6593 6594 } 6595 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6596 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6597 sizeof (fc_frame_hdr_t)); 6598 bcopy((caddr_t)pkt->pkt_resp, 6599 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6600 sizeof (la_els_adisc_t)); 6601 } 6602 6603 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6604 FP_TRACE(FP_NHEAD1(9, 0), 6605 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6606 "fp_retry_count=%x, ulp_pkt=%p", 6607 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6608 6609 mutex_enter(&port->fp_mutex); 6610 mutex_enter(&pd->pd_mutex); 6611 6612 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6613 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6614 cmd->cmd_retry_count = fp_retry_count; 6615 6616 fp_logo_init(pd, cmd, cmd->cmd_job); 6617 6618 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6619 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6620 6621 mutex_exit(&pd->pd_mutex); 6622 mutex_exit(&port->fp_mutex); 6623 6624 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6625 if (adiscfail) { 6626 mutex_enter(&pd->pd_mutex); 6627 initiator = 6628 (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 6629 pd->pd_state = PORT_DEVICE_VALID; 6630 pd->pd_aux_flags |= PD_LOGGED_OUT; 6631 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) { 6632 pd->pd_type = PORT_DEVICE_NEW; 6633 } else { 6634 pd->pd_type = PORT_DEVICE_NOCHANGE; 6635 } 6636 mutex_exit(&pd->pd_mutex); 6637 6638 changelist = 6639 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6640 6641 if (initiator) { 6642 fp_unregister_login(pd); 6643 fctl_copy_portmap(changelist, pd); 6644 } else { 6645 fp_fillout_old_map(changelist, pd, 0); 6646 } 6647 6648 FP_TRACE(FP_NHEAD1(9, 0), 6649 "fp_adisc_intr: Dev change notification " 6650 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6651 "map_flags=%x initiator=%d", port, pd, 6652 changelist->map_type, changelist->map_state, 6653 changelist->map_flags, initiator); 6654 6655 (void) fp_ulp_devc_cb(port, changelist, 6656 1, 1, KM_SLEEP, 0); 6657 } 6658 if (rval == FC_SUCCESS) { 6659 return; 6660 } 6661 } 6662 fp_iodone(cmd); 6663 } 6664 6665 6666 /* 6667 * Handle solicited LOGO response 6668 */ 6669 static void 6670 fp_logo_intr(fc_packet_t *pkt) 6671 { 6672 ls_code_t resp; 6673 6674 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6675 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6676 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6677 6678 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6679 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6680 6681 if (FP_IS_PKT_ERROR(pkt)) { 6682 (void) fp_common_intr(pkt, 1); 6683 return; 6684 } 6685 6686 ASSERT(resp.ls_code == LA_ELS_ACC); 6687 if (resp.ls_code != LA_ELS_ACC) { 6688 (void) fp_common_intr(pkt, 1); 6689 return; 6690 } 6691 6692 if (pkt->pkt_pd != NULL) { 6693 fp_unregister_login(pkt->pkt_pd); 6694 } 6695 6696 fp_iodone(pkt->pkt_ulp_private); 6697 } 6698 6699 6700 /* 6701 * Handle solicited RNID response 6702 */ 6703 static void 6704 fp_rnid_intr(fc_packet_t *pkt) 6705 { 6706 ls_code_t resp; 6707 job_request_t *job; 6708 fp_cmd_t *cmd; 6709 la_els_rnid_acc_t *acc; 6710 6711 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6712 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6713 cmd = pkt->pkt_ulp_private; 6714 6715 mutex_enter(&cmd->cmd_port->fp_mutex); 6716 cmd->cmd_port->fp_out_fpcmds--; 6717 mutex_exit(&cmd->cmd_port->fp_mutex); 6718 6719 job = cmd->cmd_job; 6720 ASSERT(job->job_private != NULL); 6721 6722 /* If failure or LS_RJT then retry the packet, if needed */ 6723 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6724 (void) fp_common_intr(pkt, 1); 6725 return; 6726 } 6727 6728 /* Save node_id memory allocated in ioctl code */ 6729 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6730 6731 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6732 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6733 6734 /* wakeup the ioctl thread and free the pkt */ 6735 fp_iodone(cmd); 6736 } 6737 6738 6739 /* 6740 * Handle solicited RLS response 6741 */ 6742 static void 6743 fp_rls_intr(fc_packet_t *pkt) 6744 { 6745 ls_code_t resp; 6746 job_request_t *job; 6747 fp_cmd_t *cmd; 6748 la_els_rls_acc_t *acc; 6749 6750 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6751 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6752 cmd = pkt->pkt_ulp_private; 6753 6754 mutex_enter(&cmd->cmd_port->fp_mutex); 6755 cmd->cmd_port->fp_out_fpcmds--; 6756 mutex_exit(&cmd->cmd_port->fp_mutex); 6757 6758 job = cmd->cmd_job; 6759 ASSERT(job->job_private != NULL); 6760 6761 /* If failure or LS_RJT then retry the packet, if needed */ 6762 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6763 (void) fp_common_intr(pkt, 1); 6764 return; 6765 } 6766 6767 /* Save link error status block in memory allocated in ioctl code */ 6768 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6769 6770 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6771 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6772 DDI_DEV_AUTOINCR); 6773 6774 /* wakeup the ioctl thread and free the pkt */ 6775 fp_iodone(cmd); 6776 } 6777 6778 6779 /* 6780 * A solicited command completion interrupt (mostly for commands 6781 * that require almost no post processing such as SCR ELS) 6782 */ 6783 static void 6784 fp_intr(fc_packet_t *pkt) 6785 { 6786 mutex_enter(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6787 ((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_out_fpcmds--; 6788 mutex_exit(&((fp_cmd_t *)pkt->pkt_ulp_private)->cmd_port->fp_mutex); 6789 6790 if (FP_IS_PKT_ERROR(pkt)) { 6791 (void) fp_common_intr(pkt, 1); 6792 return; 6793 } 6794 fp_iodone(pkt->pkt_ulp_private); 6795 } 6796 6797 6798 /* 6799 * Handle the underlying port's state change 6800 */ 6801 static void 6802 fp_statec_cb(opaque_t port_handle, uint32_t state) 6803 { 6804 fc_local_port_t *port = port_handle; 6805 job_request_t *job; 6806 6807 /* 6808 * If it is not possible to process the callbacks 6809 * just drop the callback on the floor; Don't bother 6810 * to do something that isn't safe at this time 6811 */ 6812 mutex_enter(&port->fp_mutex); 6813 if ((port->fp_soft_state & 6814 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6815 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6816 mutex_exit(&port->fp_mutex); 6817 return; 6818 } 6819 6820 if (port->fp_statec_busy == 0) { 6821 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6822 #ifdef DEBUG 6823 } else { 6824 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6825 #endif 6826 } 6827 6828 port->fp_statec_busy++; 6829 6830 /* 6831 * For now, force the trusted method of device authentication (by 6832 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6833 */ 6834 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6835 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6836 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6837 fp_port_offline(port, 0); 6838 } 6839 mutex_exit(&port->fp_mutex); 6840 6841 switch (FC_PORT_STATE_MASK(state)) { 6842 case FC_STATE_OFFLINE: 6843 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6844 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6845 if (job == NULL) { 6846 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6847 " fp_statec_cb() couldn't submit a job " 6848 " to the thread: failing.."); 6849 mutex_enter(&port->fp_mutex); 6850 if (--port->fp_statec_busy == 0) { 6851 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6852 } 6853 mutex_exit(&port->fp_mutex); 6854 return; 6855 } 6856 mutex_enter(&port->fp_mutex); 6857 /* 6858 * Zero out this field so that we do not retain 6859 * the fabric name as its no longer valid 6860 */ 6861 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6862 port->fp_state = state; 6863 mutex_exit(&port->fp_mutex); 6864 6865 fctl_enque_job(port, job); 6866 break; 6867 6868 case FC_STATE_ONLINE: 6869 case FC_STATE_LOOP: 6870 mutex_enter(&port->fp_mutex); 6871 port->fp_state = state; 6872 6873 if (port->fp_offline_tid) { 6874 timeout_id_t tid; 6875 6876 tid = port->fp_offline_tid; 6877 port->fp_offline_tid = NULL; 6878 mutex_exit(&port->fp_mutex); 6879 (void) untimeout(tid); 6880 } else { 6881 mutex_exit(&port->fp_mutex); 6882 } 6883 6884 job = fctl_alloc_job(JOB_PORT_ONLINE, 6885 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6886 if (job == NULL) { 6887 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6888 "fp_statec_cb() couldn't submit a job " 6889 "to the thread: failing.."); 6890 6891 mutex_enter(&port->fp_mutex); 6892 if (--port->fp_statec_busy == 0) { 6893 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6894 } 6895 mutex_exit(&port->fp_mutex); 6896 return; 6897 } 6898 fctl_enque_job(port, job); 6899 break; 6900 6901 case FC_STATE_RESET_REQUESTED: 6902 mutex_enter(&port->fp_mutex); 6903 port->fp_state = FC_STATE_OFFLINE; 6904 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 6905 mutex_exit(&port->fp_mutex); 6906 /* FALLTHROUGH */ 6907 6908 case FC_STATE_RESET: 6909 job = fctl_alloc_job(JOB_ULP_NOTIFY, 6910 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6911 if (job == NULL) { 6912 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6913 "fp_statec_cb() couldn't submit a job" 6914 " to the thread: failing.."); 6915 6916 mutex_enter(&port->fp_mutex); 6917 if (--port->fp_statec_busy == 0) { 6918 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6919 } 6920 mutex_exit(&port->fp_mutex); 6921 return; 6922 } 6923 6924 /* squeeze into some field in the job structure */ 6925 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 6926 fctl_enque_job(port, job); 6927 break; 6928 6929 case FC_STATE_TARGET_PORT_RESET: 6930 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 6931 /* FALLTHROUGH */ 6932 6933 case FC_STATE_NAMESERVICE: 6934 /* FALLTHROUGH */ 6935 6936 default: 6937 mutex_enter(&port->fp_mutex); 6938 if (--port->fp_statec_busy == 0) { 6939 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6940 } 6941 mutex_exit(&port->fp_mutex); 6942 break; 6943 } 6944 } 6945 6946 6947 /* 6948 * Register with the Name Server for RSCNs 6949 */ 6950 static int 6951 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 6952 int sleep) 6953 { 6954 uint32_t s_id; 6955 uchar_t class; 6956 fc_scr_req_t payload; 6957 fp_cmd_t *cmd; 6958 fc_packet_t *pkt; 6959 6960 mutex_enter(&port->fp_mutex); 6961 s_id = port->fp_port_id.port_id; 6962 class = port->fp_ns_login_class; 6963 mutex_exit(&port->fp_mutex); 6964 6965 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 6966 sizeof (fc_scr_resp_t), sleep, NULL); 6967 if (cmd == NULL) { 6968 return (FC_NOMEM); 6969 } 6970 6971 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 6972 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6973 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 6974 cmd->cmd_retry_count = fp_retry_count; 6975 cmd->cmd_ulp_pkt = NULL; 6976 6977 pkt = &cmd->cmd_pkt; 6978 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 6979 6980 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 6981 6982 payload.ls_code.ls_code = LA_ELS_SCR; 6983 payload.ls_code.mbz = 0; 6984 payload.scr_rsvd = 0; 6985 payload.scr_func = scr_func; 6986 6987 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 6988 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 6989 6990 job->job_counter = 1; 6991 6992 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 6993 fp_iodone(cmd); 6994 } 6995 6996 return (FC_SUCCESS); 6997 } 6998 6999 7000 /* 7001 * There are basically two methods to determine the total number of 7002 * devices out in the NS database; Reading the details of the two 7003 * methods described below, it shouldn't be hard to identify which 7004 * of the two methods is better. 7005 * 7006 * Method 1. 7007 * Iteratively issue GANs until all ports identifiers are walked 7008 * 7009 * Method 2. 7010 * Issue GID_PT (get port Identifiers) with Maximum residual 7011 * field in the request CT HEADER set to accommodate only the 7012 * CT HEADER in the response frame. And if FC-GS2 has been 7013 * carefully read, the NS here has a chance to FS_ACC the 7014 * request and indicate the residual size in the FS_ACC. 7015 * 7016 * Method 2 is wonderful, although it's not mandatory for the NS 7017 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 7018 * (note with particular care the use of the auxiliary verb 'may') 7019 * 7020 */ 7021 static int 7022 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 7023 int sleep) 7024 { 7025 int flags; 7026 int rval; 7027 uint32_t src_id; 7028 fctl_ns_req_t *ns_cmd; 7029 7030 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 7031 7032 mutex_enter(&port->fp_mutex); 7033 src_id = port->fp_port_id.port_id; 7034 mutex_exit(&port->fp_mutex); 7035 7036 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7037 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 7038 sizeof (ns_resp_gid_pt_t), 0, 7039 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 7040 7041 if (ns_cmd == NULL) { 7042 return (FC_NOMEM); 7043 } 7044 7045 ns_cmd->ns_cmd_code = NS_GID_PT; 7046 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 7047 = FC_NS_PORT_NX; /* All port types */ 7048 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 7049 7050 } else { 7051 uint32_t ns_flags; 7052 7053 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 7054 if (create) { 7055 ns_flags |= FCTL_NS_CREATE_DEVICE; 7056 } 7057 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 7058 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 7059 7060 if (ns_cmd == NULL) { 7061 return (FC_NOMEM); 7062 } 7063 ns_cmd->ns_gan_index = 0; 7064 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 7065 ns_cmd->ns_cmd_code = NS_GA_NXT; 7066 ns_cmd->ns_gan_max = 0xFFFF; 7067 7068 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 7069 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 7070 } 7071 7072 flags = job->job_flags; 7073 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 7074 job->job_counter = 1; 7075 7076 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 7077 job->job_flags = flags; 7078 7079 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7080 uint16_t max_resid; 7081 7082 /* 7083 * Revert to scanning the NS if NS_GID_PT isn't 7084 * helping us figure out total number of devices. 7085 */ 7086 if (job->job_result != FC_SUCCESS || 7087 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 7088 mutex_enter(&port->fp_mutex); 7089 port->fp_options &= ~FP_NS_SMART_COUNT; 7090 mutex_exit(&port->fp_mutex); 7091 7092 fctl_free_ns_cmd(ns_cmd); 7093 return (fp_ns_get_devcount(port, job, create, sleep)); 7094 } 7095 7096 mutex_enter(&port->fp_mutex); 7097 port->fp_total_devices = 1; 7098 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 7099 if (max_resid) { 7100 /* 7101 * Since port identifier is 4 bytes and max_resid 7102 * is also in WORDS, max_resid simply indicates 7103 * the total number of port identifiers not 7104 * transferred 7105 */ 7106 port->fp_total_devices += max_resid; 7107 } 7108 mutex_exit(&port->fp_mutex); 7109 } 7110 mutex_enter(&port->fp_mutex); 7111 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 7112 mutex_exit(&port->fp_mutex); 7113 fctl_free_ns_cmd(ns_cmd); 7114 7115 return (rval); 7116 } 7117 7118 /* 7119 * One heck of a function to serve userland. 7120 */ 7121 static int 7122 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7123 { 7124 int rval = 0; 7125 int jcode; 7126 uint32_t ret; 7127 uchar_t open_flag; 7128 fcio_t *kfcio; 7129 job_request_t *job; 7130 boolean_t use32 = B_FALSE; 7131 7132 #ifdef _MULTI_DATAMODEL 7133 switch (ddi_model_convert_from(mode & FMODELS)) { 7134 case DDI_MODEL_ILP32: 7135 use32 = B_TRUE; 7136 break; 7137 7138 case DDI_MODEL_NONE: 7139 default: 7140 break; 7141 } 7142 #endif 7143 7144 mutex_enter(&port->fp_mutex); 7145 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7146 FP_SOFT_IN_UNSOL_CB)) { 7147 fcio->fcio_errno = FC_STATEC_BUSY; 7148 mutex_exit(&port->fp_mutex); 7149 rval = EAGAIN; 7150 if (fp_fcio_copyout(fcio, data, mode)) { 7151 rval = EFAULT; 7152 } 7153 return (rval); 7154 } 7155 open_flag = port->fp_flag; 7156 mutex_exit(&port->fp_mutex); 7157 7158 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7159 fcio->fcio_errno = FC_FAILURE; 7160 rval = EACCES; 7161 if (fp_fcio_copyout(fcio, data, mode)) { 7162 rval = EFAULT; 7163 } 7164 return (rval); 7165 } 7166 7167 /* 7168 * If an exclusive open was demanded during open, don't let 7169 * either innocuous or devil threads to share the file 7170 * descriptor and fire down exclusive access commands 7171 */ 7172 mutex_enter(&port->fp_mutex); 7173 if (port->fp_flag & FP_EXCL) { 7174 if (port->fp_flag & FP_EXCL_BUSY) { 7175 mutex_exit(&port->fp_mutex); 7176 fcio->fcio_errno = FC_FAILURE; 7177 return (EBUSY); 7178 } 7179 port->fp_flag |= FP_EXCL_BUSY; 7180 } 7181 mutex_exit(&port->fp_mutex); 7182 7183 switch (fcio->fcio_cmd) { 7184 case FCIO_GET_HOST_PARAMS: { 7185 fc_port_dev_t *val; 7186 fc_port_dev32_t *val32; 7187 int index; 7188 int lilp_device_count; 7189 fc_lilpmap_t *lilp_map; 7190 uchar_t *alpa_list; 7191 7192 if (use32 == B_TRUE) { 7193 if (fcio->fcio_olen != sizeof (*val32) || 7194 fcio->fcio_xfer != FCIO_XFER_READ) { 7195 rval = EINVAL; 7196 break; 7197 } 7198 } else { 7199 if (fcio->fcio_olen != sizeof (*val) || 7200 fcio->fcio_xfer != FCIO_XFER_READ) { 7201 rval = EINVAL; 7202 break; 7203 } 7204 } 7205 7206 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7207 7208 mutex_enter(&port->fp_mutex); 7209 val->dev_did = port->fp_port_id; 7210 val->dev_hard_addr = port->fp_hard_addr; 7211 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7212 val->dev_nwwn = port->fp_service_params.node_ww_name; 7213 val->dev_state = port->fp_state; 7214 7215 lilp_map = &port->fp_lilp_map; 7216 alpa_list = &lilp_map->lilp_alpalist[0]; 7217 lilp_device_count = lilp_map->lilp_length; 7218 for (index = 0; index < lilp_device_count; index++) { 7219 uint32_t d_id; 7220 7221 d_id = alpa_list[index]; 7222 if (d_id == port->fp_port_id.port_id) { 7223 break; 7224 } 7225 } 7226 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7227 7228 bcopy(port->fp_fc4_types, val->dev_type, 7229 sizeof (port->fp_fc4_types)); 7230 mutex_exit(&port->fp_mutex); 7231 7232 if (use32 == B_TRUE) { 7233 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7234 7235 val32->dev_did = val->dev_did; 7236 val32->dev_hard_addr = val->dev_hard_addr; 7237 val32->dev_pwwn = val->dev_pwwn; 7238 val32->dev_nwwn = val->dev_nwwn; 7239 val32->dev_state = val->dev_state; 7240 val32->dev_did.priv_lilp_posit = 7241 val->dev_did.priv_lilp_posit; 7242 7243 bcopy(val->dev_type, val32->dev_type, 7244 sizeof (port->fp_fc4_types)); 7245 7246 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7247 fcio->fcio_olen, mode) == 0) { 7248 if (fp_fcio_copyout(fcio, data, mode)) { 7249 rval = EFAULT; 7250 } 7251 } else { 7252 rval = EFAULT; 7253 } 7254 7255 kmem_free(val32, sizeof (*val32)); 7256 } else { 7257 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7258 fcio->fcio_olen, mode) == 0) { 7259 if (fp_fcio_copyout(fcio, data, mode)) { 7260 rval = EFAULT; 7261 } 7262 } else { 7263 rval = EFAULT; 7264 } 7265 } 7266 7267 /* need to free "val" here */ 7268 kmem_free(val, sizeof (*val)); 7269 break; 7270 } 7271 7272 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7273 uint32_t index; 7274 char *tmpPath; 7275 fc_local_port_t *tmpPort; 7276 7277 if (fcio->fcio_olen < MAXPATHLEN || 7278 fcio->fcio_ilen != sizeof (uint32_t)) { 7279 rval = EINVAL; 7280 break; 7281 } 7282 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7283 rval = EFAULT; 7284 break; 7285 } 7286 7287 tmpPort = fctl_get_adapter_port_by_index(port, index); 7288 if (tmpPort == NULL) { 7289 FP_TRACE(FP_NHEAD1(9, 0), 7290 "User supplied index out of range"); 7291 fcio->fcio_errno = FC_BADPORT; 7292 rval = EFAULT; 7293 if (fp_fcio_copyout(fcio, data, mode)) { 7294 rval = EFAULT; 7295 } 7296 break; 7297 } 7298 7299 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7300 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7301 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7302 MAXPATHLEN, mode) == 0) { 7303 if (fp_fcio_copyout(fcio, data, mode)) { 7304 rval = EFAULT; 7305 } 7306 } else { 7307 rval = EFAULT; 7308 } 7309 kmem_free(tmpPath, MAXPATHLEN); 7310 break; 7311 } 7312 7313 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7314 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7315 fc_hba_adapter_attributes_t *val; 7316 fc_hba_adapter_attributes32_t *val32; 7317 7318 if (use32 == B_TRUE) { 7319 if (fcio->fcio_olen < sizeof (*val32) || 7320 fcio->fcio_xfer != FCIO_XFER_READ) { 7321 rval = EINVAL; 7322 break; 7323 } 7324 } else { 7325 if (fcio->fcio_olen < sizeof (*val) || 7326 fcio->fcio_xfer != FCIO_XFER_READ) { 7327 rval = EINVAL; 7328 break; 7329 } 7330 } 7331 7332 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7333 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7334 mutex_enter(&port->fp_mutex); 7335 bcopy(port->fp_hba_port_attrs.manufacturer, 7336 val->Manufacturer, 7337 sizeof (val->Manufacturer)); 7338 bcopy(port->fp_hba_port_attrs.serial_number, 7339 val->SerialNumber, 7340 sizeof (val->SerialNumber)); 7341 bcopy(port->fp_hba_port_attrs.model, 7342 val->Model, 7343 sizeof (val->Model)); 7344 bcopy(port->fp_hba_port_attrs.model_description, 7345 val->ModelDescription, 7346 sizeof (val->ModelDescription)); 7347 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7348 port->fp_sym_node_namelen); 7349 bcopy(port->fp_hba_port_attrs.hardware_version, 7350 val->HardwareVersion, 7351 sizeof (val->HardwareVersion)); 7352 bcopy(port->fp_hba_port_attrs.option_rom_version, 7353 val->OptionROMVersion, 7354 sizeof (val->OptionROMVersion)); 7355 bcopy(port->fp_hba_port_attrs.firmware_version, 7356 val->FirmwareVersion, 7357 sizeof (val->FirmwareVersion)); 7358 val->VendorSpecificID = 7359 port->fp_hba_port_attrs.vendor_specific_id; 7360 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7361 &val->NodeWWN.raw_wwn, 7362 sizeof (val->NodeWWN.raw_wwn)); 7363 7364 7365 bcopy(port->fp_hba_port_attrs.driver_name, 7366 val->DriverName, 7367 sizeof (val->DriverName)); 7368 bcopy(port->fp_hba_port_attrs.driver_version, 7369 val->DriverVersion, 7370 sizeof (val->DriverVersion)); 7371 mutex_exit(&port->fp_mutex); 7372 7373 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7374 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7375 } else { 7376 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7377 } 7378 7379 if (use32 == B_TRUE) { 7380 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7381 val32->version = val->version; 7382 bcopy(val->Manufacturer, val32->Manufacturer, 7383 sizeof (val->Manufacturer)); 7384 bcopy(val->SerialNumber, val32->SerialNumber, 7385 sizeof (val->SerialNumber)); 7386 bcopy(val->Model, val32->Model, 7387 sizeof (val->Model)); 7388 bcopy(val->ModelDescription, val32->ModelDescription, 7389 sizeof (val->ModelDescription)); 7390 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7391 sizeof (val->NodeSymbolicName)); 7392 bcopy(val->HardwareVersion, val32->HardwareVersion, 7393 sizeof (val->HardwareVersion)); 7394 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7395 sizeof (val->OptionROMVersion)); 7396 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7397 sizeof (val->FirmwareVersion)); 7398 val32->VendorSpecificID = val->VendorSpecificID; 7399 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7400 sizeof (val->NodeWWN.raw_wwn)); 7401 bcopy(val->DriverName, val32->DriverName, 7402 sizeof (val->DriverName)); 7403 bcopy(val->DriverVersion, val32->DriverVersion, 7404 sizeof (val->DriverVersion)); 7405 7406 val32->NumberOfPorts = val->NumberOfPorts; 7407 7408 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7409 fcio->fcio_olen, mode) == 0) { 7410 if (fp_fcio_copyout(fcio, data, mode)) { 7411 rval = EFAULT; 7412 } 7413 } else { 7414 rval = EFAULT; 7415 } 7416 7417 kmem_free(val32, sizeof (*val32)); 7418 } else { 7419 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7420 fcio->fcio_olen, mode) == 0) { 7421 if (fp_fcio_copyout(fcio, data, mode)) { 7422 rval = EFAULT; 7423 } 7424 } else { 7425 rval = EFAULT; 7426 } 7427 } 7428 7429 kmem_free(val, sizeof (*val)); 7430 break; 7431 } 7432 7433 case FCIO_GET_NPIV_ATTRIBUTES: { 7434 fc_hba_npiv_attributes_t *attrs; 7435 7436 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7437 mutex_enter(&port->fp_mutex); 7438 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7439 &attrs->NodeWWN.raw_wwn, 7440 sizeof (attrs->NodeWWN.raw_wwn)); 7441 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7442 &attrs->PortWWN.raw_wwn, 7443 sizeof (attrs->PortWWN.raw_wwn)); 7444 mutex_exit(&port->fp_mutex); 7445 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7446 fcio->fcio_olen, mode) == 0) { 7447 if (fp_fcio_copyout(fcio, data, mode)) { 7448 rval = EFAULT; 7449 } 7450 } else { 7451 rval = EFAULT; 7452 } 7453 kmem_free(attrs, sizeof (*attrs)); 7454 break; 7455 } 7456 7457 case FCIO_DELETE_NPIV_PORT: { 7458 fc_local_port_t *tmpport; 7459 char ww_pname[17]; 7460 la_wwn_t vwwn[1]; 7461 7462 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7463 if (ddi_copyin(fcio->fcio_ibuf, 7464 &vwwn, sizeof (la_wwn_t), mode)) { 7465 rval = EFAULT; 7466 break; 7467 } 7468 7469 fc_wwn_to_str(&vwwn[0], ww_pname); 7470 FP_TRACE(FP_NHEAD1(3, 0), 7471 "Delete NPIV Port %s", ww_pname); 7472 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7473 if (tmpport == NULL) { 7474 FP_TRACE(FP_NHEAD1(3, 0), 7475 "Delete NPIV Port : no found"); 7476 rval = EFAULT; 7477 } else { 7478 fc_local_port_t *nextport = tmpport->fp_port_next; 7479 fc_local_port_t *prevport = tmpport->fp_port_prev; 7480 int portlen, portindex, ret; 7481 7482 portlen = sizeof (portindex); 7483 ret = ddi_prop_op(DDI_DEV_T_ANY, 7484 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7485 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7486 (caddr_t)&portindex, &portlen); 7487 if (ret != DDI_SUCCESS) { 7488 rval = EFAULT; 7489 break; 7490 } 7491 if (ndi_devi_offline(tmpport->fp_port_dip, 7492 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7493 FP_TRACE(FP_NHEAD1(1, 0), 7494 "Delete NPIV Port failed"); 7495 mutex_enter(&port->fp_mutex); 7496 tmpport->fp_npiv_state = 0; 7497 mutex_exit(&port->fp_mutex); 7498 rval = EFAULT; 7499 } else { 7500 mutex_enter(&port->fp_mutex); 7501 nextport->fp_port_prev = prevport; 7502 prevport->fp_port_next = nextport; 7503 if (port == port->fp_port_next) { 7504 port->fp_port_next = 7505 port->fp_port_prev = NULL; 7506 } 7507 port->fp_npiv_portnum--; 7508 FP_TRACE(FP_NHEAD1(3, 0), 7509 "Delete NPIV Port %d", portindex); 7510 port->fp_npiv_portindex[portindex-1] = 0; 7511 mutex_exit(&port->fp_mutex); 7512 } 7513 } 7514 break; 7515 } 7516 7517 case FCIO_CREATE_NPIV_PORT: { 7518 char ww_nname[17], ww_pname[17]; 7519 la_npiv_create_entry_t entrybuf; 7520 uint32_t vportindex = 0; 7521 int npiv_ret = 0; 7522 char *portname, *fcaname; 7523 7524 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7525 (void) ddi_pathname(port->fp_port_dip, portname); 7526 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7527 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7528 FP_TRACE(FP_NHEAD1(1, 0), 7529 "Create NPIV port %s %s %s", portname, fcaname, 7530 ddi_driver_name(port->fp_fca_dip)); 7531 kmem_free(portname, MAXPATHLEN); 7532 kmem_free(fcaname, MAXPATHLEN); 7533 if (ddi_copyin(fcio->fcio_ibuf, 7534 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7535 rval = EFAULT; 7536 break; 7537 } 7538 7539 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7540 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7541 vportindex = entrybuf.vindex; 7542 FP_TRACE(FP_NHEAD1(3, 0), 7543 "Create NPIV Port %s %s %d", 7544 ww_nname, ww_pname, vportindex); 7545 7546 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7547 rval = EFAULT; 7548 break; 7549 } 7550 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7551 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7552 if (npiv_ret == NDI_SUCCESS) { 7553 mutex_enter(&port->fp_mutex); 7554 port->fp_npiv_portnum++; 7555 mutex_exit(&port->fp_mutex); 7556 if (fp_copyout((void *)&vportindex, 7557 (void *)fcio->fcio_obuf, 7558 fcio->fcio_olen, mode) == 0) { 7559 if (fp_fcio_copyout(fcio, data, mode)) { 7560 rval = EFAULT; 7561 } 7562 } else { 7563 rval = EFAULT; 7564 } 7565 } else { 7566 rval = EFAULT; 7567 } 7568 FP_TRACE(FP_NHEAD1(3, 0), 7569 "Create NPIV Port %d %d", npiv_ret, vportindex); 7570 break; 7571 } 7572 7573 case FCIO_GET_NPIV_PORT_LIST: { 7574 fc_hba_npiv_port_list_t *list; 7575 int count; 7576 7577 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7578 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7579 rval = EINVAL; 7580 break; 7581 } 7582 7583 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7584 list->version = FC_HBA_LIST_VERSION; 7585 /* build npiv port list */ 7586 count = fc_ulp_get_npiv_port_list(port, (char *)list->hbaPaths); 7587 if (count < 0) { 7588 rval = ENXIO; 7589 FP_TRACE(FP_NHEAD1(1, 0), "Build NPIV Port List error"); 7590 kmem_free(list, fcio->fcio_olen); 7591 break; 7592 } 7593 list->numAdapters = count; 7594 7595 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7596 fcio->fcio_olen, mode) == 0) { 7597 if (fp_fcio_copyout(fcio, data, mode)) { 7598 FP_TRACE(FP_NHEAD1(1, 0), 7599 "Copy NPIV Port data error"); 7600 rval = EFAULT; 7601 } 7602 } else { 7603 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7604 rval = EFAULT; 7605 } 7606 kmem_free(list, fcio->fcio_olen); 7607 break; 7608 } 7609 7610 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7611 fc_hba_port_npiv_attributes_t *val; 7612 7613 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7614 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7615 7616 mutex_enter(&port->fp_mutex); 7617 val->npivflag = port->fp_npiv_flag; 7618 val->lastChange = port->fp_last_change; 7619 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7620 &val->PortWWN.raw_wwn, 7621 sizeof (val->PortWWN.raw_wwn)); 7622 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7623 &val->NodeWWN.raw_wwn, 7624 sizeof (val->NodeWWN.raw_wwn)); 7625 mutex_exit(&port->fp_mutex); 7626 7627 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7628 if (port->fp_npiv_type != FC_NPIV_PORT) { 7629 val->MaxNumberOfNPIVPorts = 7630 port->fp_fca_tran->fca_num_npivports; 7631 } else { 7632 val->MaxNumberOfNPIVPorts = 0; 7633 } 7634 7635 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7636 fcio->fcio_olen, mode) == 0) { 7637 if (fp_fcio_copyout(fcio, data, mode)) { 7638 rval = EFAULT; 7639 } 7640 } else { 7641 rval = EFAULT; 7642 } 7643 kmem_free(val, sizeof (*val)); 7644 break; 7645 } 7646 7647 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7648 fc_hba_port_attributes_t *val; 7649 fc_hba_port_attributes32_t *val32; 7650 7651 if (use32 == B_TRUE) { 7652 if (fcio->fcio_olen < sizeof (*val32) || 7653 fcio->fcio_xfer != FCIO_XFER_READ) { 7654 rval = EINVAL; 7655 break; 7656 } 7657 } else { 7658 if (fcio->fcio_olen < sizeof (*val) || 7659 fcio->fcio_xfer != FCIO_XFER_READ) { 7660 rval = EINVAL; 7661 break; 7662 } 7663 } 7664 7665 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7666 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7667 mutex_enter(&port->fp_mutex); 7668 val->lastChange = port->fp_last_change; 7669 val->fp_minor = port->fp_instance; 7670 7671 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7672 &val->PortWWN.raw_wwn, 7673 sizeof (val->PortWWN.raw_wwn)); 7674 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7675 &val->NodeWWN.raw_wwn, 7676 sizeof (val->NodeWWN.raw_wwn)); 7677 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7678 sizeof (val->FabricName.raw_wwn)); 7679 7680 val->PortFcId = port->fp_port_id.port_id; 7681 7682 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7683 case FC_STATE_OFFLINE: 7684 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7685 break; 7686 case FC_STATE_ONLINE: 7687 case FC_STATE_LOOP: 7688 case FC_STATE_NAMESERVICE: 7689 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7690 break; 7691 default: 7692 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7693 break; 7694 } 7695 7696 /* Translate from LV to FC-HBA port type codes */ 7697 switch (port->fp_port_type.port_type) { 7698 case FC_NS_PORT_N: 7699 val->PortType = FC_HBA_PORTTYPE_NPORT; 7700 break; 7701 case FC_NS_PORT_NL: 7702 /* Actually means loop for us */ 7703 val->PortType = FC_HBA_PORTTYPE_LPORT; 7704 break; 7705 case FC_NS_PORT_F: 7706 val->PortType = FC_HBA_PORTTYPE_FPORT; 7707 break; 7708 case FC_NS_PORT_FL: 7709 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7710 break; 7711 case FC_NS_PORT_E: 7712 val->PortType = FC_HBA_PORTTYPE_EPORT; 7713 break; 7714 default: 7715 val->PortType = FC_HBA_PORTTYPE_OTHER; 7716 break; 7717 } 7718 7719 7720 /* 7721 * If fp has decided that the topology is public loop, 7722 * we will indicate that using the appropriate 7723 * FC HBA API constant. 7724 */ 7725 switch (port->fp_topology) { 7726 case FC_TOP_PUBLIC_LOOP: 7727 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7728 break; 7729 7730 case FC_TOP_PT_PT: 7731 val->PortType = FC_HBA_PORTTYPE_PTP; 7732 break; 7733 7734 case FC_TOP_UNKNOWN: 7735 /* 7736 * This should cover the case where nothing is connected 7737 * to the port. Crystal+ is p'bly an exception here. 7738 * For Crystal+, port 0 will come up as private loop 7739 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7740 * nothing is connected to it. 7741 * Current plan is to let userland handle this. 7742 */ 7743 if (port->fp_bind_state == FC_STATE_OFFLINE) { 7744 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7745 } 7746 break; 7747 7748 default: 7749 /* 7750 * Do Nothing. 7751 * Unused: 7752 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7753 */ 7754 break; 7755 } 7756 7757 val->PortSupportedClassofService = 7758 port->fp_hba_port_attrs.supported_cos; 7759 val->PortSupportedFc4Types[0] = 0; 7760 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7761 sizeof (val->PortActiveFc4Types)); 7762 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7763 port->fp_sym_port_namelen); 7764 val->PortSupportedSpeed = 7765 port->fp_hba_port_attrs.supported_speed; 7766 7767 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7768 case FC_STATE_1GBIT_SPEED: 7769 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7770 break; 7771 case FC_STATE_2GBIT_SPEED: 7772 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7773 break; 7774 case FC_STATE_4GBIT_SPEED: 7775 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7776 break; 7777 case FC_STATE_8GBIT_SPEED: 7778 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7779 break; 7780 case FC_STATE_10GBIT_SPEED: 7781 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7782 break; 7783 case FC_STATE_16GBIT_SPEED: 7784 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7785 break; 7786 default: 7787 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7788 break; 7789 } 7790 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7791 val->NumberofDiscoveredPorts = port->fp_dev_count; 7792 mutex_exit(&port->fp_mutex); 7793 7794 if (use32 == B_TRUE) { 7795 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7796 val32->version = val->version; 7797 val32->lastChange = val->lastChange; 7798 val32->fp_minor = val->fp_minor; 7799 7800 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7801 sizeof (val->PortWWN.raw_wwn)); 7802 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7803 sizeof (val->NodeWWN.raw_wwn)); 7804 val32->PortFcId = val->PortFcId; 7805 val32->PortState = val->PortState; 7806 val32->PortType = val->PortType; 7807 7808 val32->PortSupportedClassofService = 7809 val->PortSupportedClassofService; 7810 bcopy(val->PortActiveFc4Types, 7811 val32->PortActiveFc4Types, 7812 sizeof (val->PortActiveFc4Types)); 7813 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7814 sizeof (val->PortSymbolicName)); 7815 bcopy(&val->FabricName, &val32->FabricName, 7816 sizeof (val->FabricName.raw_wwn)); 7817 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7818 val32->PortSpeed = val->PortSpeed; 7819 7820 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7821 val32->NumberofDiscoveredPorts = 7822 val->NumberofDiscoveredPorts; 7823 7824 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7825 fcio->fcio_olen, mode) == 0) { 7826 if (fp_fcio_copyout(fcio, data, mode)) { 7827 rval = EFAULT; 7828 } 7829 } else { 7830 rval = EFAULT; 7831 } 7832 7833 kmem_free(val32, sizeof (*val32)); 7834 } else { 7835 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7836 fcio->fcio_olen, mode) == 0) { 7837 if (fp_fcio_copyout(fcio, data, mode)) { 7838 rval = EFAULT; 7839 } 7840 } else { 7841 rval = EFAULT; 7842 } 7843 } 7844 7845 kmem_free(val, sizeof (*val)); 7846 break; 7847 } 7848 7849 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7850 fc_hba_port_attributes_t *val; 7851 fc_hba_port_attributes32_t *val32; 7852 uint32_t index = 0; 7853 fc_remote_port_t *tmp_pd; 7854 7855 if (use32 == B_TRUE) { 7856 if (fcio->fcio_olen < sizeof (*val32) || 7857 fcio->fcio_xfer != FCIO_XFER_READ) { 7858 rval = EINVAL; 7859 break; 7860 } 7861 } else { 7862 if (fcio->fcio_olen < sizeof (*val) || 7863 fcio->fcio_xfer != FCIO_XFER_READ) { 7864 rval = EINVAL; 7865 break; 7866 } 7867 } 7868 7869 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7870 rval = EFAULT; 7871 break; 7872 } 7873 7874 if (index >= port->fp_dev_count) { 7875 FP_TRACE(FP_NHEAD1(9, 0), 7876 "User supplied index out of range"); 7877 fcio->fcio_errno = FC_OUTOFBOUNDS; 7878 rval = EINVAL; 7879 if (fp_fcio_copyout(fcio, data, mode)) { 7880 rval = EFAULT; 7881 } 7882 break; 7883 } 7884 7885 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7886 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7887 7888 mutex_enter(&port->fp_mutex); 7889 tmp_pd = fctl_lookup_pd_by_index(port, index); 7890 7891 if (tmp_pd == NULL) { 7892 fcio->fcio_errno = FC_BADPORT; 7893 rval = EINVAL; 7894 } else { 7895 val->lastChange = port->fp_last_change; 7896 val->fp_minor = port->fp_instance; 7897 7898 mutex_enter(&tmp_pd->pd_mutex); 7899 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7900 &val->PortWWN.raw_wwn, 7901 sizeof (val->PortWWN.raw_wwn)); 7902 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 7903 &val->NodeWWN.raw_wwn, 7904 sizeof (val->NodeWWN.raw_wwn)); 7905 val->PortFcId = tmp_pd->pd_port_id.port_id; 7906 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 7907 tmp_pd->pd_spn_len); 7908 val->PortSupportedClassofService = tmp_pd->pd_cos; 7909 /* 7910 * we will assume the sizeof these pd_fc4types and 7911 * portActiveFc4Types will remain the same. we could 7912 * add in a check for it, but we decided it was unneeded 7913 */ 7914 bcopy((caddr_t)tmp_pd->pd_fc4types, 7915 val->PortActiveFc4Types, 7916 sizeof (tmp_pd->pd_fc4types)); 7917 val->PortState = 7918 fp_map_remote_port_state(tmp_pd->pd_state); 7919 mutex_exit(&tmp_pd->pd_mutex); 7920 7921 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7922 val->PortSupportedFc4Types[0] = 0; 7923 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7924 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7925 val->PortMaxFrameSize = 0; 7926 val->NumberofDiscoveredPorts = 0; 7927 7928 if (use32 == B_TRUE) { 7929 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7930 val32->version = val->version; 7931 val32->lastChange = val->lastChange; 7932 val32->fp_minor = val->fp_minor; 7933 7934 bcopy(&val->PortWWN.raw_wwn, 7935 &val32->PortWWN.raw_wwn, 7936 sizeof (val->PortWWN.raw_wwn)); 7937 bcopy(&val->NodeWWN.raw_wwn, 7938 &val32->NodeWWN.raw_wwn, 7939 sizeof (val->NodeWWN.raw_wwn)); 7940 val32->PortFcId = val->PortFcId; 7941 bcopy(val->PortSymbolicName, 7942 val32->PortSymbolicName, 7943 sizeof (val->PortSymbolicName)); 7944 val32->PortSupportedClassofService = 7945 val->PortSupportedClassofService; 7946 bcopy(val->PortActiveFc4Types, 7947 val32->PortActiveFc4Types, 7948 sizeof (tmp_pd->pd_fc4types)); 7949 7950 val32->PortType = val->PortType; 7951 val32->PortState = val->PortState; 7952 val32->PortSupportedFc4Types[0] = 7953 val->PortSupportedFc4Types[0]; 7954 val32->PortSupportedSpeed = 7955 val->PortSupportedSpeed; 7956 val32->PortSpeed = val->PortSpeed; 7957 val32->PortMaxFrameSize = 7958 val->PortMaxFrameSize; 7959 val32->NumberofDiscoveredPorts = 7960 val->NumberofDiscoveredPorts; 7961 7962 if (fp_copyout((void *)val32, 7963 (void *)fcio->fcio_obuf, 7964 fcio->fcio_olen, mode) == 0) { 7965 if (fp_fcio_copyout(fcio, 7966 data, mode)) { 7967 rval = EFAULT; 7968 } 7969 } else { 7970 rval = EFAULT; 7971 } 7972 7973 kmem_free(val32, sizeof (*val32)); 7974 } else { 7975 if (fp_copyout((void *)val, 7976 (void *)fcio->fcio_obuf, 7977 fcio->fcio_olen, mode) == 0) { 7978 if (fp_fcio_copyout(fcio, data, mode)) { 7979 rval = EFAULT; 7980 } 7981 } else { 7982 rval = EFAULT; 7983 } 7984 } 7985 } 7986 7987 mutex_exit(&port->fp_mutex); 7988 kmem_free(val, sizeof (*val)); 7989 break; 7990 } 7991 7992 case FCIO_GET_PORT_ATTRIBUTES: { 7993 fc_hba_port_attributes_t *val; 7994 fc_hba_port_attributes32_t *val32; 7995 la_wwn_t wwn; 7996 fc_remote_port_t *tmp_pd; 7997 7998 if (use32 == B_TRUE) { 7999 if (fcio->fcio_olen < sizeof (*val32) || 8000 fcio->fcio_xfer != FCIO_XFER_READ) { 8001 rval = EINVAL; 8002 break; 8003 } 8004 } else { 8005 if (fcio->fcio_olen < sizeof (*val) || 8006 fcio->fcio_xfer != FCIO_XFER_READ) { 8007 rval = EINVAL; 8008 break; 8009 } 8010 } 8011 8012 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 8013 rval = EFAULT; 8014 break; 8015 } 8016 8017 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 8018 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 8019 8020 mutex_enter(&port->fp_mutex); 8021 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 8022 val->lastChange = port->fp_last_change; 8023 val->fp_minor = port->fp_instance; 8024 mutex_exit(&port->fp_mutex); 8025 8026 if (tmp_pd == NULL) { 8027 fcio->fcio_errno = FC_BADWWN; 8028 rval = EINVAL; 8029 } else { 8030 mutex_enter(&tmp_pd->pd_mutex); 8031 bcopy(&tmp_pd->pd_port_name.raw_wwn, 8032 &val->PortWWN.raw_wwn, 8033 sizeof (val->PortWWN.raw_wwn)); 8034 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 8035 &val->NodeWWN.raw_wwn, 8036 sizeof (val->NodeWWN.raw_wwn)); 8037 val->PortFcId = tmp_pd->pd_port_id.port_id; 8038 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 8039 tmp_pd->pd_spn_len); 8040 val->PortSupportedClassofService = tmp_pd->pd_cos; 8041 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 8042 val->PortState = 8043 fp_map_remote_port_state(tmp_pd->pd_state); 8044 val->PortSupportedFc4Types[0] = 0; 8045 /* 8046 * we will assume the sizeof these pd_fc4types and 8047 * portActiveFc4Types will remain the same. we could 8048 * add in a check for it, but we decided it was unneeded 8049 */ 8050 bcopy((caddr_t)tmp_pd->pd_fc4types, 8051 val->PortActiveFc4Types, 8052 sizeof (tmp_pd->pd_fc4types)); 8053 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8054 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 8055 val->PortMaxFrameSize = 0; 8056 val->NumberofDiscoveredPorts = 0; 8057 mutex_exit(&tmp_pd->pd_mutex); 8058 8059 if (use32 == B_TRUE) { 8060 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 8061 val32->version = val->version; 8062 val32->lastChange = val->lastChange; 8063 val32->fp_minor = val->fp_minor; 8064 bcopy(&val->PortWWN.raw_wwn, 8065 &val32->PortWWN.raw_wwn, 8066 sizeof (val->PortWWN.raw_wwn)); 8067 bcopy(&val->NodeWWN.raw_wwn, 8068 &val32->NodeWWN.raw_wwn, 8069 sizeof (val->NodeWWN.raw_wwn)); 8070 val32->PortFcId = val->PortFcId; 8071 bcopy(val->PortSymbolicName, 8072 val32->PortSymbolicName, 8073 sizeof (val->PortSymbolicName)); 8074 val32->PortSupportedClassofService = 8075 val->PortSupportedClassofService; 8076 val32->PortType = val->PortType; 8077 val32->PortState = val->PortState; 8078 val32->PortSupportedFc4Types[0] = 8079 val->PortSupportedFc4Types[0]; 8080 bcopy(val->PortActiveFc4Types, 8081 val32->PortActiveFc4Types, 8082 sizeof (tmp_pd->pd_fc4types)); 8083 val32->PortSupportedSpeed = 8084 val->PortSupportedSpeed; 8085 val32->PortSpeed = val->PortSpeed; 8086 val32->PortMaxFrameSize = val->PortMaxFrameSize; 8087 val32->NumberofDiscoveredPorts = 8088 val->NumberofDiscoveredPorts; 8089 8090 if (fp_copyout((void *)val32, 8091 (void *)fcio->fcio_obuf, 8092 fcio->fcio_olen, mode) == 0) { 8093 if (fp_fcio_copyout(fcio, data, mode)) { 8094 rval = EFAULT; 8095 } 8096 } else { 8097 rval = EFAULT; 8098 } 8099 8100 kmem_free(val32, sizeof (*val32)); 8101 } else { 8102 if (fp_copyout((void *)val, 8103 (void *)fcio->fcio_obuf, 8104 fcio->fcio_olen, mode) == 0) { 8105 if (fp_fcio_copyout(fcio, data, mode)) { 8106 rval = EFAULT; 8107 } 8108 } else { 8109 rval = EFAULT; 8110 } 8111 } 8112 } 8113 kmem_free(val, sizeof (*val)); 8114 break; 8115 } 8116 8117 case FCIO_GET_NUM_DEVS: { 8118 int num_devices; 8119 8120 if (fcio->fcio_olen != sizeof (num_devices) || 8121 fcio->fcio_xfer != FCIO_XFER_READ) { 8122 rval = EINVAL; 8123 break; 8124 } 8125 8126 mutex_enter(&port->fp_mutex); 8127 switch (port->fp_topology) { 8128 case FC_TOP_PRIVATE_LOOP: 8129 case FC_TOP_PT_PT: 8130 num_devices = port->fp_total_devices; 8131 fcio->fcio_errno = FC_SUCCESS; 8132 break; 8133 8134 case FC_TOP_PUBLIC_LOOP: 8135 case FC_TOP_FABRIC: 8136 mutex_exit(&port->fp_mutex); 8137 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8138 NULL, KM_SLEEP); 8139 ASSERT(job != NULL); 8140 8141 /* 8142 * In FC-GS-2 the Name Server doesn't send out 8143 * RSCNs for any Name Server Database updates 8144 * When it is finally fixed there is no need 8145 * to probe as below and should be removed. 8146 */ 8147 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8148 fctl_dealloc_job(job); 8149 8150 mutex_enter(&port->fp_mutex); 8151 num_devices = port->fp_total_devices; 8152 fcio->fcio_errno = FC_SUCCESS; 8153 break; 8154 8155 case FC_TOP_NO_NS: 8156 /* FALLTHROUGH */ 8157 case FC_TOP_UNKNOWN: 8158 /* FALLTHROUGH */ 8159 default: 8160 num_devices = 0; 8161 fcio->fcio_errno = FC_SUCCESS; 8162 break; 8163 } 8164 mutex_exit(&port->fp_mutex); 8165 8166 if (fp_copyout((void *)&num_devices, 8167 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8168 mode) == 0) { 8169 if (fp_fcio_copyout(fcio, data, mode)) { 8170 rval = EFAULT; 8171 } 8172 } else { 8173 rval = EFAULT; 8174 } 8175 break; 8176 } 8177 8178 case FCIO_GET_DEV_LIST: { 8179 int num_devices; 8180 int new_count; 8181 int map_size; 8182 8183 if (fcio->fcio_xfer != FCIO_XFER_READ || 8184 fcio->fcio_alen != sizeof (new_count)) { 8185 rval = EINVAL; 8186 break; 8187 } 8188 8189 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8190 8191 mutex_enter(&port->fp_mutex); 8192 if (num_devices < port->fp_total_devices) { 8193 fcio->fcio_errno = FC_TOOMANY; 8194 new_count = port->fp_total_devices; 8195 mutex_exit(&port->fp_mutex); 8196 8197 if (fp_copyout((void *)&new_count, 8198 (void *)fcio->fcio_abuf, 8199 sizeof (new_count), mode)) { 8200 rval = EFAULT; 8201 break; 8202 } 8203 8204 if (fp_fcio_copyout(fcio, data, mode)) { 8205 rval = EFAULT; 8206 break; 8207 } 8208 rval = EINVAL; 8209 break; 8210 } 8211 8212 if (port->fp_total_devices <= 0) { 8213 fcio->fcio_errno = FC_NO_MAP; 8214 new_count = port->fp_total_devices; 8215 mutex_exit(&port->fp_mutex); 8216 8217 if (fp_copyout((void *)&new_count, 8218 (void *)fcio->fcio_abuf, 8219 sizeof (new_count), mode)) { 8220 rval = EFAULT; 8221 break; 8222 } 8223 8224 if (fp_fcio_copyout(fcio, data, mode)) { 8225 rval = EFAULT; 8226 break; 8227 } 8228 rval = EINVAL; 8229 break; 8230 } 8231 8232 switch (port->fp_topology) { 8233 case FC_TOP_PRIVATE_LOOP: 8234 if (fp_fillout_loopmap(port, fcio, 8235 mode) != FC_SUCCESS) { 8236 rval = EFAULT; 8237 break; 8238 } 8239 if (fp_fcio_copyout(fcio, data, mode)) { 8240 rval = EFAULT; 8241 } 8242 break; 8243 8244 case FC_TOP_PT_PT: 8245 if (fp_fillout_p2pmap(port, fcio, 8246 mode) != FC_SUCCESS) { 8247 rval = EFAULT; 8248 break; 8249 } 8250 if (fp_fcio_copyout(fcio, data, mode)) { 8251 rval = EFAULT; 8252 } 8253 break; 8254 8255 case FC_TOP_PUBLIC_LOOP: 8256 case FC_TOP_FABRIC: { 8257 fctl_ns_req_t *ns_cmd; 8258 8259 map_size = 8260 sizeof (fc_port_dev_t) * port->fp_total_devices; 8261 8262 mutex_exit(&port->fp_mutex); 8263 8264 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8265 sizeof (ns_resp_gan_t), map_size, 8266 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8267 KM_SLEEP); 8268 ASSERT(ns_cmd != NULL); 8269 8270 ns_cmd->ns_gan_index = 0; 8271 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8272 ns_cmd->ns_cmd_code = NS_GA_NXT; 8273 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8274 8275 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8276 NULL, KM_SLEEP); 8277 ASSERT(job != NULL); 8278 8279 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8280 8281 if (ret != FC_SUCCESS || 8282 job->job_result != FC_SUCCESS) { 8283 fctl_free_ns_cmd(ns_cmd); 8284 8285 fcio->fcio_errno = job->job_result; 8286 new_count = 0; 8287 if (fp_copyout((void *)&new_count, 8288 (void *)fcio->fcio_abuf, 8289 sizeof (new_count), mode)) { 8290 fctl_dealloc_job(job); 8291 mutex_enter(&port->fp_mutex); 8292 rval = EFAULT; 8293 break; 8294 } 8295 8296 if (fp_fcio_copyout(fcio, data, mode)) { 8297 fctl_dealloc_job(job); 8298 mutex_enter(&port->fp_mutex); 8299 rval = EFAULT; 8300 break; 8301 } 8302 rval = EIO; 8303 mutex_enter(&port->fp_mutex); 8304 break; 8305 } 8306 fctl_dealloc_job(job); 8307 8308 new_count = ns_cmd->ns_gan_index; 8309 if (fp_copyout((void *)&new_count, 8310 (void *)fcio->fcio_abuf, sizeof (new_count), 8311 mode)) { 8312 rval = EFAULT; 8313 fctl_free_ns_cmd(ns_cmd); 8314 mutex_enter(&port->fp_mutex); 8315 break; 8316 } 8317 8318 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8319 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8320 ns_cmd->ns_gan_index, mode)) { 8321 rval = EFAULT; 8322 fctl_free_ns_cmd(ns_cmd); 8323 mutex_enter(&port->fp_mutex); 8324 break; 8325 } 8326 fctl_free_ns_cmd(ns_cmd); 8327 8328 if (fp_fcio_copyout(fcio, data, mode)) { 8329 rval = EFAULT; 8330 } 8331 mutex_enter(&port->fp_mutex); 8332 break; 8333 } 8334 8335 case FC_TOP_NO_NS: 8336 /* FALLTHROUGH */ 8337 case FC_TOP_UNKNOWN: 8338 /* FALLTHROUGH */ 8339 default: 8340 fcio->fcio_errno = FC_NO_MAP; 8341 num_devices = port->fp_total_devices; 8342 8343 if (fp_copyout((void *)&new_count, 8344 (void *)fcio->fcio_abuf, 8345 sizeof (new_count), mode)) { 8346 rval = EFAULT; 8347 break; 8348 } 8349 8350 if (fp_fcio_copyout(fcio, data, mode)) { 8351 rval = EFAULT; 8352 break; 8353 } 8354 rval = EINVAL; 8355 break; 8356 } 8357 mutex_exit(&port->fp_mutex); 8358 break; 8359 } 8360 8361 case FCIO_GET_SYM_PNAME: { 8362 rval = ENOTSUP; 8363 break; 8364 } 8365 8366 case FCIO_GET_SYM_NNAME: { 8367 rval = ENOTSUP; 8368 break; 8369 } 8370 8371 case FCIO_SET_SYM_PNAME: { 8372 rval = ENOTSUP; 8373 break; 8374 } 8375 8376 case FCIO_SET_SYM_NNAME: { 8377 rval = ENOTSUP; 8378 break; 8379 } 8380 8381 case FCIO_GET_LOGI_PARAMS: { 8382 la_wwn_t pwwn; 8383 la_wwn_t *my_pwwn; 8384 la_els_logi_t *params; 8385 la_els_logi32_t *params32; 8386 fc_remote_node_t *node; 8387 fc_remote_port_t *pd; 8388 8389 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8390 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8391 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8392 rval = EINVAL; 8393 break; 8394 } 8395 8396 if (use32 == B_TRUE) { 8397 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8398 rval = EINVAL; 8399 break; 8400 } 8401 } else { 8402 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8403 rval = EINVAL; 8404 break; 8405 } 8406 } 8407 8408 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8409 rval = EFAULT; 8410 break; 8411 } 8412 8413 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8414 if (pd == NULL) { 8415 mutex_enter(&port->fp_mutex); 8416 my_pwwn = &port->fp_service_params.nport_ww_name; 8417 mutex_exit(&port->fp_mutex); 8418 8419 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8420 rval = ENXIO; 8421 break; 8422 } 8423 8424 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8425 mutex_enter(&port->fp_mutex); 8426 *params = port->fp_service_params; 8427 mutex_exit(&port->fp_mutex); 8428 } else { 8429 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8430 8431 mutex_enter(&pd->pd_mutex); 8432 params->ls_code.mbz = params->ls_code.ls_code = 0; 8433 params->common_service = pd->pd_csp; 8434 params->nport_ww_name = pd->pd_port_name; 8435 params->class_1 = pd->pd_clsp1; 8436 params->class_2 = pd->pd_clsp2; 8437 params->class_3 = pd->pd_clsp3; 8438 node = pd->pd_remote_nodep; 8439 mutex_exit(&pd->pd_mutex); 8440 8441 bzero(params->reserved, sizeof (params->reserved)); 8442 8443 mutex_enter(&node->fd_mutex); 8444 bcopy(node->fd_vv, params->vendor_version, 8445 sizeof (node->fd_vv)); 8446 params->node_ww_name = node->fd_node_name; 8447 mutex_exit(&node->fd_mutex); 8448 8449 fctl_release_remote_port(pd); 8450 } 8451 8452 if (use32 == B_TRUE) { 8453 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8454 8455 params32->ls_code.mbz = params->ls_code.mbz; 8456 params32->common_service = params->common_service; 8457 params32->nport_ww_name = params->nport_ww_name; 8458 params32->class_1 = params->class_1; 8459 params32->class_2 = params->class_2; 8460 params32->class_3 = params->class_3; 8461 bzero(params32->reserved, sizeof (params32->reserved)); 8462 bcopy(params->vendor_version, params32->vendor_version, 8463 sizeof (node->fd_vv)); 8464 params32->node_ww_name = params->node_ww_name; 8465 8466 if (ddi_copyout((void *)params32, 8467 (void *)fcio->fcio_obuf, 8468 sizeof (*params32), mode)) { 8469 rval = EFAULT; 8470 } 8471 8472 kmem_free(params32, sizeof (*params32)); 8473 } else { 8474 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8475 sizeof (*params), mode)) { 8476 rval = EFAULT; 8477 } 8478 } 8479 8480 kmem_free(params, sizeof (*params)); 8481 if (fp_fcio_copyout(fcio, data, mode)) { 8482 rval = EFAULT; 8483 } 8484 break; 8485 } 8486 8487 case FCIO_DEV_LOGOUT: 8488 case FCIO_DEV_LOGIN: 8489 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8490 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8491 rval = EINVAL; 8492 8493 if (fp_fcio_copyout(fcio, data, mode)) { 8494 rval = EFAULT; 8495 } 8496 break; 8497 } 8498 8499 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8500 jcode = JOB_FCIO_LOGIN; 8501 } else { 8502 jcode = JOB_FCIO_LOGOUT; 8503 } 8504 8505 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8506 bcopy(fcio, kfcio, sizeof (*fcio)); 8507 8508 if (kfcio->fcio_ilen) { 8509 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8510 KM_SLEEP); 8511 8512 if (ddi_copyin((void *)fcio->fcio_ibuf, 8513 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8514 mode)) { 8515 rval = EFAULT; 8516 8517 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8518 kmem_free(kfcio, sizeof (*kfcio)); 8519 fcio->fcio_errno = job->job_result; 8520 if (fp_fcio_copyout(fcio, data, mode)) { 8521 rval = EFAULT; 8522 } 8523 break; 8524 } 8525 } 8526 8527 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8528 job->job_private = kfcio; 8529 8530 fctl_enque_job(port, job); 8531 fctl_jobwait(job); 8532 8533 rval = job->job_result; 8534 8535 fcio->fcio_errno = kfcio->fcio_errno; 8536 if (fp_fcio_copyout(fcio, data, mode)) { 8537 rval = EFAULT; 8538 } 8539 8540 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8541 kmem_free(kfcio, sizeof (*kfcio)); 8542 fctl_dealloc_job(job); 8543 break; 8544 8545 case FCIO_GET_STATE: { 8546 la_wwn_t pwwn; 8547 uint32_t state; 8548 fc_remote_port_t *pd; 8549 fctl_ns_req_t *ns_cmd; 8550 8551 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8552 fcio->fcio_olen != sizeof (state) || 8553 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8554 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8555 rval = EINVAL; 8556 break; 8557 } 8558 8559 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8560 rval = EFAULT; 8561 break; 8562 } 8563 fcio->fcio_errno = 0; 8564 8565 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8566 if (pd == NULL) { 8567 mutex_enter(&port->fp_mutex); 8568 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8569 mutex_exit(&port->fp_mutex); 8570 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8571 NULL, NULL, KM_SLEEP); 8572 8573 job->job_counter = 1; 8574 job->job_result = FC_SUCCESS; 8575 8576 ns_cmd = fctl_alloc_ns_cmd( 8577 sizeof (ns_req_gid_pn_t), 8578 sizeof (ns_resp_gid_pn_t), 8579 sizeof (ns_resp_gid_pn_t), 8580 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8581 ASSERT(ns_cmd != NULL); 8582 8583 ns_cmd->ns_cmd_code = NS_GID_PN; 8584 ((ns_req_gid_pn_t *) 8585 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8586 8587 ret = fp_ns_query(port, ns_cmd, job, 8588 1, KM_SLEEP); 8589 8590 if (ret != FC_SUCCESS || job->job_result != 8591 FC_SUCCESS) { 8592 if (ret != FC_SUCCESS) { 8593 fcio->fcio_errno = ret; 8594 } else { 8595 fcio->fcio_errno = 8596 job->job_result; 8597 } 8598 rval = EIO; 8599 } else { 8600 state = PORT_DEVICE_INVALID; 8601 } 8602 fctl_free_ns_cmd(ns_cmd); 8603 fctl_dealloc_job(job); 8604 } else { 8605 mutex_exit(&port->fp_mutex); 8606 fcio->fcio_errno = FC_BADWWN; 8607 rval = ENXIO; 8608 } 8609 } else { 8610 mutex_enter(&pd->pd_mutex); 8611 state = pd->pd_state; 8612 mutex_exit(&pd->pd_mutex); 8613 8614 fctl_release_remote_port(pd); 8615 } 8616 8617 if (!rval) { 8618 if (ddi_copyout((void *)&state, 8619 (void *)fcio->fcio_obuf, sizeof (state), 8620 mode)) { 8621 rval = EFAULT; 8622 } 8623 } 8624 if (fp_fcio_copyout(fcio, data, mode)) { 8625 rval = EFAULT; 8626 } 8627 break; 8628 } 8629 8630 case FCIO_DEV_REMOVE: { 8631 la_wwn_t pwwn; 8632 fc_portmap_t *changelist; 8633 fc_remote_port_t *pd; 8634 8635 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8636 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8637 rval = EINVAL; 8638 break; 8639 } 8640 8641 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8642 rval = EFAULT; 8643 break; 8644 } 8645 8646 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8647 if (pd == NULL) { 8648 rval = ENXIO; 8649 fcio->fcio_errno = FC_BADWWN; 8650 if (fp_fcio_copyout(fcio, data, mode)) { 8651 rval = EFAULT; 8652 } 8653 break; 8654 } 8655 8656 mutex_enter(&pd->pd_mutex); 8657 if (pd->pd_ref_count > 1) { 8658 mutex_exit(&pd->pd_mutex); 8659 8660 rval = EBUSY; 8661 fcio->fcio_errno = FC_FAILURE; 8662 fctl_release_remote_port(pd); 8663 8664 if (fp_fcio_copyout(fcio, data, mode)) { 8665 rval = EFAULT; 8666 } 8667 break; 8668 } 8669 mutex_exit(&pd->pd_mutex); 8670 8671 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8672 8673 fctl_copy_portmap(changelist, pd); 8674 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8675 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8676 8677 fctl_release_remote_port(pd); 8678 break; 8679 } 8680 8681 case FCIO_GET_FCODE_REV: { 8682 caddr_t fcode_rev; 8683 fc_fca_pm_t pm; 8684 8685 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8686 fcio->fcio_xfer != FCIO_XFER_READ) { 8687 rval = EINVAL; 8688 break; 8689 } 8690 bzero((caddr_t)&pm, sizeof (pm)); 8691 8692 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8693 8694 pm.pm_cmd_flags = FC_FCA_PM_READ; 8695 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8696 pm.pm_data_len = fcio->fcio_olen; 8697 pm.pm_data_buf = fcode_rev; 8698 8699 ret = port->fp_fca_tran->fca_port_manage( 8700 port->fp_fca_handle, &pm); 8701 8702 if (ret == FC_SUCCESS) { 8703 if (ddi_copyout((void *)fcode_rev, 8704 (void *)fcio->fcio_obuf, 8705 fcio->fcio_olen, mode) == 0) { 8706 if (fp_fcio_copyout(fcio, data, mode)) { 8707 rval = EFAULT; 8708 } 8709 } else { 8710 rval = EFAULT; 8711 } 8712 } else { 8713 /* 8714 * check if buffer was not large enough to obtain 8715 * FCODE version. 8716 */ 8717 if (pm.pm_data_len > fcio->fcio_olen) { 8718 rval = ENOMEM; 8719 } else { 8720 rval = EIO; 8721 } 8722 fcio->fcio_errno = ret; 8723 if (fp_fcio_copyout(fcio, data, mode)) { 8724 rval = EFAULT; 8725 } 8726 } 8727 kmem_free(fcode_rev, fcio->fcio_olen); 8728 break; 8729 } 8730 8731 case FCIO_GET_FW_REV: { 8732 caddr_t fw_rev; 8733 fc_fca_pm_t pm; 8734 8735 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8736 fcio->fcio_xfer != FCIO_XFER_READ) { 8737 rval = EINVAL; 8738 break; 8739 } 8740 bzero((caddr_t)&pm, sizeof (pm)); 8741 8742 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8743 8744 pm.pm_cmd_flags = FC_FCA_PM_READ; 8745 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8746 pm.pm_data_len = fcio->fcio_olen; 8747 pm.pm_data_buf = fw_rev; 8748 8749 ret = port->fp_fca_tran->fca_port_manage( 8750 port->fp_fca_handle, &pm); 8751 8752 if (ret == FC_SUCCESS) { 8753 if (ddi_copyout((void *)fw_rev, 8754 (void *)fcio->fcio_obuf, 8755 fcio->fcio_olen, mode) == 0) { 8756 if (fp_fcio_copyout(fcio, data, mode)) { 8757 rval = EFAULT; 8758 } 8759 } else { 8760 rval = EFAULT; 8761 } 8762 } else { 8763 if (fp_fcio_copyout(fcio, data, mode)) { 8764 rval = EFAULT; 8765 } 8766 rval = EIO; 8767 } 8768 kmem_free(fw_rev, fcio->fcio_olen); 8769 break; 8770 } 8771 8772 case FCIO_GET_DUMP_SIZE: { 8773 uint32_t dump_size; 8774 fc_fca_pm_t pm; 8775 8776 if (fcio->fcio_olen != sizeof (dump_size) || 8777 fcio->fcio_xfer != FCIO_XFER_READ) { 8778 rval = EINVAL; 8779 break; 8780 } 8781 bzero((caddr_t)&pm, sizeof (pm)); 8782 pm.pm_cmd_flags = FC_FCA_PM_READ; 8783 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8784 pm.pm_data_len = sizeof (dump_size); 8785 pm.pm_data_buf = (caddr_t)&dump_size; 8786 8787 ret = port->fp_fca_tran->fca_port_manage( 8788 port->fp_fca_handle, &pm); 8789 8790 if (ret == FC_SUCCESS) { 8791 if (ddi_copyout((void *)&dump_size, 8792 (void *)fcio->fcio_obuf, sizeof (dump_size), 8793 mode) == 0) { 8794 if (fp_fcio_copyout(fcio, data, mode)) { 8795 rval = EFAULT; 8796 } 8797 } else { 8798 rval = EFAULT; 8799 } 8800 } else { 8801 fcio->fcio_errno = ret; 8802 rval = EIO; 8803 if (fp_fcio_copyout(fcio, data, mode)) { 8804 rval = EFAULT; 8805 } 8806 } 8807 break; 8808 } 8809 8810 case FCIO_DOWNLOAD_FW: { 8811 caddr_t firmware; 8812 fc_fca_pm_t pm; 8813 8814 if (fcio->fcio_ilen <= 0 || 8815 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8816 rval = EINVAL; 8817 break; 8818 } 8819 8820 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8821 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8822 fcio->fcio_ilen, mode)) { 8823 rval = EFAULT; 8824 kmem_free(firmware, fcio->fcio_ilen); 8825 break; 8826 } 8827 8828 bzero((caddr_t)&pm, sizeof (pm)); 8829 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8830 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8831 pm.pm_data_len = fcio->fcio_ilen; 8832 pm.pm_data_buf = firmware; 8833 8834 ret = port->fp_fca_tran->fca_port_manage( 8835 port->fp_fca_handle, &pm); 8836 8837 kmem_free(firmware, fcio->fcio_ilen); 8838 8839 if (ret != FC_SUCCESS) { 8840 fcio->fcio_errno = ret; 8841 rval = EIO; 8842 if (fp_fcio_copyout(fcio, data, mode)) { 8843 rval = EFAULT; 8844 } 8845 } 8846 break; 8847 } 8848 8849 case FCIO_DOWNLOAD_FCODE: { 8850 caddr_t fcode; 8851 fc_fca_pm_t pm; 8852 8853 if (fcio->fcio_ilen <= 0 || 8854 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8855 rval = EINVAL; 8856 break; 8857 } 8858 8859 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8860 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8861 fcio->fcio_ilen, mode)) { 8862 rval = EFAULT; 8863 kmem_free(fcode, fcio->fcio_ilen); 8864 break; 8865 } 8866 8867 bzero((caddr_t)&pm, sizeof (pm)); 8868 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8869 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8870 pm.pm_data_len = fcio->fcio_ilen; 8871 pm.pm_data_buf = fcode; 8872 8873 ret = port->fp_fca_tran->fca_port_manage( 8874 port->fp_fca_handle, &pm); 8875 8876 kmem_free(fcode, fcio->fcio_ilen); 8877 8878 if (ret != FC_SUCCESS) { 8879 fcio->fcio_errno = ret; 8880 rval = EIO; 8881 if (fp_fcio_copyout(fcio, data, mode)) { 8882 rval = EFAULT; 8883 } 8884 } 8885 break; 8886 } 8887 8888 case FCIO_FORCE_DUMP: 8889 ret = port->fp_fca_tran->fca_reset( 8890 port->fp_fca_handle, FC_FCA_CORE); 8891 8892 if (ret != FC_SUCCESS) { 8893 fcio->fcio_errno = ret; 8894 rval = EIO; 8895 if (fp_fcio_copyout(fcio, data, mode)) { 8896 rval = EFAULT; 8897 } 8898 } 8899 break; 8900 8901 case FCIO_GET_DUMP: { 8902 caddr_t dump; 8903 uint32_t dump_size; 8904 fc_fca_pm_t pm; 8905 8906 if (fcio->fcio_xfer != FCIO_XFER_READ) { 8907 rval = EINVAL; 8908 break; 8909 } 8910 bzero((caddr_t)&pm, sizeof (pm)); 8911 8912 pm.pm_cmd_flags = FC_FCA_PM_READ; 8913 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8914 pm.pm_data_len = sizeof (dump_size); 8915 pm.pm_data_buf = (caddr_t)&dump_size; 8916 8917 ret = port->fp_fca_tran->fca_port_manage( 8918 port->fp_fca_handle, &pm); 8919 8920 if (ret != FC_SUCCESS) { 8921 fcio->fcio_errno = ret; 8922 rval = EIO; 8923 if (fp_fcio_copyout(fcio, data, mode)) { 8924 rval = EFAULT; 8925 } 8926 break; 8927 } 8928 if (fcio->fcio_olen != dump_size) { 8929 fcio->fcio_errno = FC_NOMEM; 8930 rval = EINVAL; 8931 if (fp_fcio_copyout(fcio, data, mode)) { 8932 rval = EFAULT; 8933 } 8934 break; 8935 } 8936 8937 dump = kmem_zalloc(dump_size, KM_SLEEP); 8938 8939 bzero((caddr_t)&pm, sizeof (pm)); 8940 pm.pm_cmd_flags = FC_FCA_PM_READ; 8941 pm.pm_cmd_code = FC_PORT_GET_DUMP; 8942 pm.pm_data_len = dump_size; 8943 pm.pm_data_buf = dump; 8944 8945 ret = port->fp_fca_tran->fca_port_manage( 8946 port->fp_fca_handle, &pm); 8947 8948 if (ret == FC_SUCCESS) { 8949 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 8950 dump_size, mode) == 0) { 8951 if (fp_fcio_copyout(fcio, data, mode)) { 8952 rval = EFAULT; 8953 } 8954 } else { 8955 rval = EFAULT; 8956 } 8957 } else { 8958 fcio->fcio_errno = ret; 8959 rval = EIO; 8960 if (fp_fcio_copyout(fcio, data, mode)) { 8961 rval = EFAULT; 8962 } 8963 } 8964 kmem_free(dump, dump_size); 8965 break; 8966 } 8967 8968 case FCIO_GET_TOPOLOGY: { 8969 uint32_t user_topology; 8970 8971 if (fcio->fcio_xfer != FCIO_XFER_READ || 8972 fcio->fcio_olen != sizeof (user_topology)) { 8973 rval = EINVAL; 8974 break; 8975 } 8976 8977 mutex_enter(&port->fp_mutex); 8978 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 8979 user_topology = FC_TOP_UNKNOWN; 8980 } else { 8981 user_topology = port->fp_topology; 8982 } 8983 mutex_exit(&port->fp_mutex); 8984 8985 if (ddi_copyout((void *)&user_topology, 8986 (void *)fcio->fcio_obuf, sizeof (user_topology), 8987 mode)) { 8988 rval = EFAULT; 8989 } 8990 break; 8991 } 8992 8993 case FCIO_RESET_LINK: { 8994 la_wwn_t pwwn; 8995 8996 /* 8997 * Look at the output buffer field; if this field has zero 8998 * bytes then attempt to reset the local link/loop. If the 8999 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 9000 * and if yes, determine the LFA and reset the remote LIP 9001 * by LINIT ELS. 9002 */ 9003 9004 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 9005 fcio->fcio_ilen != sizeof (pwwn)) { 9006 rval = EINVAL; 9007 break; 9008 } 9009 9010 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9011 sizeof (pwwn), mode)) { 9012 rval = EFAULT; 9013 break; 9014 } 9015 9016 mutex_enter(&port->fp_mutex); 9017 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 9018 mutex_exit(&port->fp_mutex); 9019 break; 9020 } 9021 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 9022 mutex_exit(&port->fp_mutex); 9023 9024 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 9025 if (job == NULL) { 9026 rval = ENOMEM; 9027 break; 9028 } 9029 job->job_counter = 1; 9030 job->job_private = (void *)&pwwn; 9031 9032 fctl_enque_job(port, job); 9033 fctl_jobwait(job); 9034 9035 mutex_enter(&port->fp_mutex); 9036 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 9037 mutex_exit(&port->fp_mutex); 9038 9039 if (job->job_result != FC_SUCCESS) { 9040 fcio->fcio_errno = job->job_result; 9041 rval = EIO; 9042 if (fp_fcio_copyout(fcio, data, mode)) { 9043 rval = EFAULT; 9044 } 9045 } 9046 fctl_dealloc_job(job); 9047 break; 9048 } 9049 9050 case FCIO_RESET_HARD: 9051 ret = port->fp_fca_tran->fca_reset( 9052 port->fp_fca_handle, FC_FCA_RESET); 9053 if (ret != FC_SUCCESS) { 9054 fcio->fcio_errno = ret; 9055 rval = EIO; 9056 if (fp_fcio_copyout(fcio, data, mode)) { 9057 rval = EFAULT; 9058 } 9059 } 9060 break; 9061 9062 case FCIO_RESET_HARD_CORE: 9063 ret = port->fp_fca_tran->fca_reset( 9064 port->fp_fca_handle, FC_FCA_RESET_CORE); 9065 if (ret != FC_SUCCESS) { 9066 rval = EIO; 9067 fcio->fcio_errno = ret; 9068 if (fp_fcio_copyout(fcio, data, mode)) { 9069 rval = EFAULT; 9070 } 9071 } 9072 break; 9073 9074 case FCIO_DIAG: { 9075 fc_fca_pm_t pm; 9076 9077 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 9078 9079 /* Validate user buffer from ioctl call. */ 9080 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 9081 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 9082 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 9083 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 9084 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 9085 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 9086 rval = EFAULT; 9087 break; 9088 } 9089 9090 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 9091 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 9092 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 9093 fcio->fcio_ilen, mode)) { 9094 rval = EFAULT; 9095 goto fp_fcio_diag_cleanup; 9096 } 9097 } 9098 9099 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 9100 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 9101 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 9102 fcio->fcio_alen, mode)) { 9103 rval = EFAULT; 9104 goto fp_fcio_diag_cleanup; 9105 } 9106 } 9107 9108 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 9109 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 9110 } 9111 9112 pm.pm_cmd_code = FC_PORT_DIAG; 9113 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 9114 9115 ret = port->fp_fca_tran->fca_port_manage( 9116 port->fp_fca_handle, &pm); 9117 9118 if (ret != FC_SUCCESS) { 9119 if (ret == FC_INVALID_REQUEST) { 9120 rval = ENOTTY; 9121 } else { 9122 rval = EIO; 9123 } 9124 9125 fcio->fcio_errno = ret; 9126 if (fp_fcio_copyout(fcio, data, mode)) { 9127 rval = EFAULT; 9128 } 9129 goto fp_fcio_diag_cleanup; 9130 } 9131 9132 /* 9133 * pm_stat_len will contain the number of status bytes 9134 * an FCA driver requires to return the complete status 9135 * of the requested diag operation. If the user buffer 9136 * is not large enough to hold the entire status, We 9137 * copy only the portion of data the fits in the buffer and 9138 * return a ENOMEM to the user application. 9139 */ 9140 if (pm.pm_stat_len > fcio->fcio_olen) { 9141 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9142 "fp:FCIO_DIAG:status buffer too small\n"); 9143 9144 rval = ENOMEM; 9145 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9146 fcio->fcio_olen, mode)) { 9147 rval = EFAULT; 9148 goto fp_fcio_diag_cleanup; 9149 } 9150 } else { 9151 /* 9152 * Copy only data pm_stat_len bytes of data 9153 */ 9154 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9155 pm.pm_stat_len, mode)) { 9156 rval = EFAULT; 9157 goto fp_fcio_diag_cleanup; 9158 } 9159 } 9160 9161 if (fp_fcio_copyout(fcio, data, mode)) { 9162 rval = EFAULT; 9163 } 9164 9165 fp_fcio_diag_cleanup: 9166 if (pm.pm_cmd_buf != NULL) { 9167 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9168 } 9169 if (pm.pm_data_buf != NULL) { 9170 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9171 } 9172 if (pm.pm_stat_buf != NULL) { 9173 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9174 } 9175 9176 break; 9177 } 9178 9179 case FCIO_GET_NODE_ID: { 9180 /* validate parameters */ 9181 if (fcio->fcio_xfer != FCIO_XFER_READ || 9182 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9183 rval = EINVAL; 9184 break; 9185 } 9186 9187 rval = fp_get_rnid(port, data, mode, fcio); 9188 9189 /* ioctl handling is over */ 9190 break; 9191 } 9192 9193 case FCIO_SEND_NODE_ID: { 9194 la_wwn_t pwwn; 9195 9196 /* validate parameters */ 9197 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9198 fcio->fcio_xfer != FCIO_XFER_READ) { 9199 rval = EINVAL; 9200 break; 9201 } 9202 9203 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9204 sizeof (la_wwn_t), mode)) { 9205 rval = EFAULT; 9206 break; 9207 } 9208 9209 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9210 9211 /* ioctl handling is over */ 9212 break; 9213 } 9214 9215 case FCIO_SET_NODE_ID: { 9216 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9217 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9218 rval = EINVAL; 9219 break; 9220 } 9221 9222 rval = fp_set_rnid(port, data, mode, fcio); 9223 break; 9224 } 9225 9226 case FCIO_LINK_STATUS: { 9227 fc_portid_t rls_req; 9228 fc_rls_acc_t *rls_acc; 9229 fc_fca_pm_t pm; 9230 uint32_t dest, src_id; 9231 fp_cmd_t *cmd; 9232 fc_remote_port_t *pd; 9233 uchar_t pd_flags; 9234 9235 /* validate parameters */ 9236 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9237 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9238 fcio->fcio_xfer != FCIO_XFER_RW) { 9239 rval = EINVAL; 9240 break; 9241 } 9242 9243 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9244 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9245 rval = EINVAL; 9246 break; 9247 } 9248 9249 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9250 sizeof (fc_portid_t), mode)) { 9251 rval = EFAULT; 9252 break; 9253 } 9254 9255 9256 /* Determine the destination of the RLS frame */ 9257 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9258 dest = FS_FABRIC_F_PORT; 9259 } else { 9260 dest = rls_req.port_id; 9261 } 9262 9263 mutex_enter(&port->fp_mutex); 9264 src_id = port->fp_port_id.port_id; 9265 mutex_exit(&port->fp_mutex); 9266 9267 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9268 if (dest == 0 || dest == src_id) { 9269 9270 /* Allocate memory for link error status block */ 9271 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9272 ASSERT(rls_acc != NULL); 9273 9274 /* Prepare the port management structure */ 9275 bzero((caddr_t)&pm, sizeof (pm)); 9276 9277 pm.pm_cmd_flags = FC_FCA_PM_READ; 9278 pm.pm_cmd_code = FC_PORT_RLS; 9279 pm.pm_data_len = sizeof (*rls_acc); 9280 pm.pm_data_buf = (caddr_t)rls_acc; 9281 9282 /* Get the adapter's link error status block */ 9283 ret = port->fp_fca_tran->fca_port_manage( 9284 port->fp_fca_handle, &pm); 9285 9286 if (ret == FC_SUCCESS) { 9287 /* xfer link status block to userland */ 9288 if (ddi_copyout((void *)rls_acc, 9289 (void *)fcio->fcio_obuf, 9290 sizeof (*rls_acc), mode) == 0) { 9291 if (fp_fcio_copyout(fcio, data, 9292 mode)) { 9293 rval = EFAULT; 9294 } 9295 } else { 9296 rval = EFAULT; 9297 } 9298 } else { 9299 rval = EIO; 9300 fcio->fcio_errno = ret; 9301 if (fp_fcio_copyout(fcio, data, mode)) { 9302 rval = EFAULT; 9303 } 9304 } 9305 9306 kmem_free(rls_acc, sizeof (*rls_acc)); 9307 9308 /* ioctl handling is over */ 9309 break; 9310 } 9311 9312 /* 9313 * Send RLS to the destination port. 9314 * Having RLS frame destination is as FPORT is not yet 9315 * supported and will be implemented in future, if needed. 9316 * Following call to get "pd" will fail if dest is FPORT 9317 */ 9318 pd = fctl_hold_remote_port_by_did(port, dest); 9319 if (pd == NULL) { 9320 fcio->fcio_errno = FC_BADOBJECT; 9321 rval = ENXIO; 9322 if (fp_fcio_copyout(fcio, data, mode)) { 9323 rval = EFAULT; 9324 } 9325 break; 9326 } 9327 9328 mutex_enter(&pd->pd_mutex); 9329 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9330 mutex_exit(&pd->pd_mutex); 9331 fctl_release_remote_port(pd); 9332 9333 fcio->fcio_errno = FC_LOGINREQ; 9334 rval = EINVAL; 9335 if (fp_fcio_copyout(fcio, data, mode)) { 9336 rval = EFAULT; 9337 } 9338 break; 9339 } 9340 ASSERT(pd->pd_login_count >= 1); 9341 mutex_exit(&pd->pd_mutex); 9342 9343 /* 9344 * Allocate job structure and set job_code as DUMMY, 9345 * because we will not go through the job thread. 9346 * Instead fp_sendcmd() is called directly here. 9347 */ 9348 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9349 NULL, NULL, KM_SLEEP); 9350 ASSERT(job != NULL); 9351 9352 job->job_counter = 1; 9353 9354 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9355 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9356 if (cmd == NULL) { 9357 fcio->fcio_errno = FC_NOMEM; 9358 rval = ENOMEM; 9359 9360 fctl_release_remote_port(pd); 9361 9362 fctl_dealloc_job(job); 9363 if (fp_fcio_copyout(fcio, data, mode)) { 9364 rval = EFAULT; 9365 } 9366 break; 9367 } 9368 9369 /* Allocate memory for link error status block */ 9370 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9371 9372 mutex_enter(&port->fp_mutex); 9373 mutex_enter(&pd->pd_mutex); 9374 9375 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9376 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9377 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9378 cmd->cmd_retry_count = 1; 9379 cmd->cmd_ulp_pkt = NULL; 9380 9381 fp_rls_init(cmd, job); 9382 9383 job->job_private = (void *)rls_acc; 9384 9385 pd_flags = pd->pd_flags; 9386 pd->pd_flags = PD_ELS_IN_PROGRESS; 9387 9388 mutex_exit(&pd->pd_mutex); 9389 mutex_exit(&port->fp_mutex); 9390 9391 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9392 fctl_jobwait(job); 9393 9394 fcio->fcio_errno = job->job_result; 9395 if (job->job_result == FC_SUCCESS) { 9396 ASSERT(pd != NULL); 9397 /* 9398 * link error status block is now available. 9399 * Copy it to userland 9400 */ 9401 ASSERT(job->job_private == (void *)rls_acc); 9402 if (ddi_copyout((void *)rls_acc, 9403 (void *)fcio->fcio_obuf, 9404 sizeof (*rls_acc), mode) == 0) { 9405 if (fp_fcio_copyout(fcio, data, 9406 mode)) { 9407 rval = EFAULT; 9408 } 9409 } else { 9410 rval = EFAULT; 9411 } 9412 } else { 9413 rval = EIO; 9414 } 9415 } else { 9416 rval = EIO; 9417 fp_free_pkt(cmd); 9418 } 9419 9420 if (rval) { 9421 mutex_enter(&port->fp_mutex); 9422 mutex_enter(&pd->pd_mutex); 9423 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9424 pd->pd_flags = pd_flags; 9425 } 9426 mutex_exit(&pd->pd_mutex); 9427 mutex_exit(&port->fp_mutex); 9428 } 9429 9430 fctl_release_remote_port(pd); 9431 fctl_dealloc_job(job); 9432 kmem_free(rls_acc, sizeof (*rls_acc)); 9433 9434 if (fp_fcio_copyout(fcio, data, mode)) { 9435 rval = EFAULT; 9436 } 9437 break; 9438 } 9439 9440 case FCIO_NS: { 9441 fc_ns_cmd_t *ns_req; 9442 fc_ns_cmd32_t *ns_req32; 9443 fctl_ns_req_t *ns_cmd; 9444 9445 if (use32 == B_TRUE) { 9446 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9447 rval = EINVAL; 9448 break; 9449 } 9450 9451 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9452 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9453 9454 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9455 sizeof (*ns_req32), mode)) { 9456 rval = EFAULT; 9457 kmem_free(ns_req, sizeof (*ns_req)); 9458 kmem_free(ns_req32, sizeof (*ns_req32)); 9459 break; 9460 } 9461 9462 ns_req->ns_flags = ns_req32->ns_flags; 9463 ns_req->ns_cmd = ns_req32->ns_cmd; 9464 ns_req->ns_req_len = ns_req32->ns_req_len; 9465 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9466 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9467 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9468 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9469 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9470 9471 kmem_free(ns_req32, sizeof (*ns_req32)); 9472 } else { 9473 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9474 rval = EINVAL; 9475 break; 9476 } 9477 9478 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9479 9480 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9481 sizeof (fc_ns_cmd_t), mode)) { 9482 rval = EFAULT; 9483 kmem_free(ns_req, sizeof (*ns_req)); 9484 break; 9485 } 9486 } 9487 9488 if (ns_req->ns_req_len <= 0) { 9489 rval = EINVAL; 9490 kmem_free(ns_req, sizeof (*ns_req)); 9491 break; 9492 } 9493 9494 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9495 ASSERT(job != NULL); 9496 9497 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9498 ns_req->ns_resp_len, ns_req->ns_resp_len, 9499 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9500 ASSERT(ns_cmd != NULL); 9501 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9502 9503 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9504 ns_cmd->ns_gan_max = 1; 9505 ns_cmd->ns_gan_index = 0; 9506 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9507 } 9508 9509 if (ddi_copyin(ns_req->ns_req_payload, 9510 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9511 rval = EFAULT; 9512 fctl_free_ns_cmd(ns_cmd); 9513 fctl_dealloc_job(job); 9514 kmem_free(ns_req, sizeof (*ns_req)); 9515 break; 9516 } 9517 9518 job->job_private = (void *)ns_cmd; 9519 fctl_enque_job(port, job); 9520 fctl_jobwait(job); 9521 rval = job->job_result; 9522 9523 if (rval == FC_SUCCESS) { 9524 if (ns_req->ns_resp_len) { 9525 if (ddi_copyout(ns_cmd->ns_data_buf, 9526 ns_req->ns_resp_payload, 9527 ns_cmd->ns_data_len, mode)) { 9528 rval = EFAULT; 9529 fctl_free_ns_cmd(ns_cmd); 9530 fctl_dealloc_job(job); 9531 kmem_free(ns_req, sizeof (*ns_req)); 9532 break; 9533 } 9534 } 9535 } else { 9536 rval = EIO; 9537 } 9538 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9539 fctl_free_ns_cmd(ns_cmd); 9540 fctl_dealloc_job(job); 9541 kmem_free(ns_req, sizeof (*ns_req)); 9542 9543 if (fp_fcio_copyout(fcio, data, mode)) { 9544 rval = EFAULT; 9545 } 9546 break; 9547 } 9548 9549 default: 9550 rval = ENOTTY; 9551 break; 9552 } 9553 9554 /* 9555 * If set, reset the EXCL busy bit to 9556 * receive other exclusive access commands 9557 */ 9558 mutex_enter(&port->fp_mutex); 9559 if (port->fp_flag & FP_EXCL_BUSY) { 9560 port->fp_flag &= ~FP_EXCL_BUSY; 9561 } 9562 mutex_exit(&port->fp_mutex); 9563 9564 return (rval); 9565 } 9566 9567 9568 /* 9569 * This function assumes that the response length 9570 * is same regardless of data model (LP32 or LP64) 9571 * which is true for all the ioctls currently 9572 * supported. 9573 */ 9574 static int 9575 fp_copyout(void *from, void *to, size_t len, int mode) 9576 { 9577 return (ddi_copyout(from, to, len, mode)); 9578 } 9579 9580 /* 9581 * This function does the set rnid 9582 */ 9583 static int 9584 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9585 { 9586 int rval = 0; 9587 fc_rnid_t *rnid; 9588 fc_fca_pm_t pm; 9589 9590 /* Allocate memory for node id block */ 9591 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9592 9593 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9594 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9595 kmem_free(rnid, sizeof (fc_rnid_t)); 9596 return (EFAULT); 9597 } 9598 9599 /* Prepare the port management structure */ 9600 bzero((caddr_t)&pm, sizeof (pm)); 9601 9602 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9603 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9604 pm.pm_data_len = sizeof (*rnid); 9605 pm.pm_data_buf = (caddr_t)rnid; 9606 9607 /* Get the adapter's node data */ 9608 rval = port->fp_fca_tran->fca_port_manage( 9609 port->fp_fca_handle, &pm); 9610 9611 if (rval != FC_SUCCESS) { 9612 fcio->fcio_errno = rval; 9613 rval = EIO; 9614 if (fp_fcio_copyout(fcio, data, mode)) { 9615 rval = EFAULT; 9616 } 9617 } else { 9618 mutex_enter(&port->fp_mutex); 9619 /* copy to the port structure */ 9620 bcopy(rnid, &port->fp_rnid_params, 9621 sizeof (port->fp_rnid_params)); 9622 mutex_exit(&port->fp_mutex); 9623 } 9624 9625 kmem_free(rnid, sizeof (fc_rnid_t)); 9626 9627 if (rval != FC_SUCCESS) { 9628 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9629 } 9630 9631 return (rval); 9632 } 9633 9634 /* 9635 * This function does the local pwwn get rnid 9636 */ 9637 static int 9638 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9639 { 9640 fc_rnid_t *rnid; 9641 fc_fca_pm_t pm; 9642 int rval = 0; 9643 uint32_t ret; 9644 9645 /* Allocate memory for rnid data block */ 9646 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9647 9648 mutex_enter(&port->fp_mutex); 9649 if (port->fp_rnid_init == 1) { 9650 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9651 mutex_exit(&port->fp_mutex); 9652 /* xfer node info to userland */ 9653 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9654 sizeof (*rnid), mode) == 0) { 9655 if (fp_fcio_copyout(fcio, data, mode)) { 9656 rval = EFAULT; 9657 } 9658 } else { 9659 rval = EFAULT; 9660 } 9661 9662 kmem_free(rnid, sizeof (fc_rnid_t)); 9663 9664 if (rval != FC_SUCCESS) { 9665 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9666 rval); 9667 } 9668 9669 return (rval); 9670 } 9671 mutex_exit(&port->fp_mutex); 9672 9673 /* Prepare the port management structure */ 9674 bzero((caddr_t)&pm, sizeof (pm)); 9675 9676 pm.pm_cmd_flags = FC_FCA_PM_READ; 9677 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9678 pm.pm_data_len = sizeof (fc_rnid_t); 9679 pm.pm_data_buf = (caddr_t)rnid; 9680 9681 /* Get the adapter's node data */ 9682 ret = port->fp_fca_tran->fca_port_manage( 9683 port->fp_fca_handle, 9684 &pm); 9685 9686 if (ret == FC_SUCCESS) { 9687 /* initialize in the port_info */ 9688 mutex_enter(&port->fp_mutex); 9689 port->fp_rnid_init = 1; 9690 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9691 mutex_exit(&port->fp_mutex); 9692 9693 /* xfer node info to userland */ 9694 if (ddi_copyout((void *)rnid, 9695 (void *)fcio->fcio_obuf, 9696 sizeof (*rnid), mode) == 0) { 9697 if (fp_fcio_copyout(fcio, data, 9698 mode)) { 9699 rval = EFAULT; 9700 } 9701 } else { 9702 rval = EFAULT; 9703 } 9704 } else { 9705 rval = EIO; 9706 fcio->fcio_errno = ret; 9707 if (fp_fcio_copyout(fcio, data, mode)) { 9708 rval = EFAULT; 9709 } 9710 } 9711 9712 kmem_free(rnid, sizeof (fc_rnid_t)); 9713 9714 if (rval != FC_SUCCESS) { 9715 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9716 } 9717 9718 return (rval); 9719 } 9720 9721 static int 9722 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9723 la_wwn_t *pwwn) 9724 { 9725 int rval = 0; 9726 fc_remote_port_t *pd; 9727 fp_cmd_t *cmd; 9728 job_request_t *job; 9729 la_els_rnid_acc_t *rnid_acc; 9730 9731 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9732 if (pd == NULL) { 9733 /* 9734 * We can safely assume that the destination port 9735 * is logged in. Either the user land will explicitly 9736 * login before issuing RNID ioctl or the device would 9737 * have been configured, meaning already logged in. 9738 */ 9739 9740 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9741 9742 return (ENXIO); 9743 } 9744 /* 9745 * Allocate job structure and set job_code as DUMMY, 9746 * because we will not go thorugh the job thread. 9747 * Instead fp_sendcmd() is called directly here. 9748 */ 9749 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9750 NULL, NULL, KM_SLEEP); 9751 9752 ASSERT(job != NULL); 9753 9754 job->job_counter = 1; 9755 9756 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9757 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9758 if (cmd == NULL) { 9759 fcio->fcio_errno = FC_NOMEM; 9760 rval = ENOMEM; 9761 9762 fctl_dealloc_job(job); 9763 if (fp_fcio_copyout(fcio, data, mode)) { 9764 rval = EFAULT; 9765 } 9766 9767 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9768 9769 return (rval); 9770 } 9771 9772 /* Allocate memory for node id accept block */ 9773 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9774 9775 mutex_enter(&port->fp_mutex); 9776 mutex_enter(&pd->pd_mutex); 9777 9778 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9779 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9780 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9781 cmd->cmd_retry_count = 1; 9782 cmd->cmd_ulp_pkt = NULL; 9783 9784 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9785 9786 job->job_private = (void *)rnid_acc; 9787 9788 pd->pd_flags = PD_ELS_IN_PROGRESS; 9789 9790 mutex_exit(&pd->pd_mutex); 9791 mutex_exit(&port->fp_mutex); 9792 9793 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9794 fctl_jobwait(job); 9795 fcio->fcio_errno = job->job_result; 9796 if (job->job_result == FC_SUCCESS) { 9797 int rnid_cnt; 9798 ASSERT(pd != NULL); 9799 /* 9800 * node id block is now available. 9801 * Copy it to userland 9802 */ 9803 ASSERT(job->job_private == (void *)rnid_acc); 9804 9805 /* get the response length */ 9806 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9807 rnid_acc->hdr.cmn_len + 9808 rnid_acc->hdr.specific_len; 9809 9810 if (fcio->fcio_olen < rnid_cnt) { 9811 rval = EINVAL; 9812 } else if (ddi_copyout((void *)rnid_acc, 9813 (void *)fcio->fcio_obuf, 9814 rnid_cnt, mode) == 0) { 9815 if (fp_fcio_copyout(fcio, data, 9816 mode)) { 9817 rval = EFAULT; 9818 } 9819 } else { 9820 rval = EFAULT; 9821 } 9822 } else { 9823 rval = EIO; 9824 } 9825 } else { 9826 rval = EIO; 9827 if (pd) { 9828 mutex_enter(&pd->pd_mutex); 9829 pd->pd_flags = PD_IDLE; 9830 mutex_exit(&pd->pd_mutex); 9831 } 9832 fp_free_pkt(cmd); 9833 } 9834 9835 fctl_dealloc_job(job); 9836 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9837 9838 if (fp_fcio_copyout(fcio, data, mode)) { 9839 rval = EFAULT; 9840 } 9841 9842 if (rval != FC_SUCCESS) { 9843 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9844 } 9845 9846 return (rval); 9847 } 9848 9849 /* 9850 * Copy out to userland 9851 */ 9852 static int 9853 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9854 { 9855 int rval; 9856 9857 #ifdef _MULTI_DATAMODEL 9858 switch (ddi_model_convert_from(mode & FMODELS)) { 9859 case DDI_MODEL_ILP32: { 9860 struct fcio32 fcio32; 9861 9862 fcio32.fcio_xfer = fcio->fcio_xfer; 9863 fcio32.fcio_cmd = fcio->fcio_cmd; 9864 fcio32.fcio_flags = fcio->fcio_flags; 9865 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9866 fcio32.fcio_ilen = fcio->fcio_ilen; 9867 fcio32.fcio_ibuf = 9868 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9869 fcio32.fcio_olen = fcio->fcio_olen; 9870 fcio32.fcio_obuf = 9871 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9872 fcio32.fcio_alen = fcio->fcio_alen; 9873 fcio32.fcio_abuf = 9874 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9875 fcio32.fcio_errno = fcio->fcio_errno; 9876 9877 rval = ddi_copyout((void *)&fcio32, (void *)data, 9878 sizeof (struct fcio32), mode); 9879 break; 9880 } 9881 case DDI_MODEL_NONE: 9882 rval = ddi_copyout((void *)fcio, (void *)data, 9883 sizeof (fcio_t), mode); 9884 break; 9885 } 9886 #else 9887 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 9888 #endif 9889 9890 return (rval); 9891 } 9892 9893 9894 static void 9895 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 9896 { 9897 uint32_t listlen; 9898 fc_portmap_t *changelist; 9899 9900 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9901 ASSERT(port->fp_topology == FC_TOP_PT_PT); 9902 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 9903 9904 listlen = 0; 9905 changelist = NULL; 9906 9907 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9908 if (port->fp_statec_busy > 1) { 9909 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 9910 } 9911 } 9912 mutex_exit(&port->fp_mutex); 9913 9914 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9915 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 9916 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 9917 listlen, listlen, KM_SLEEP); 9918 9919 mutex_enter(&port->fp_mutex); 9920 } else { 9921 ASSERT(changelist == NULL && listlen == 0); 9922 mutex_enter(&port->fp_mutex); 9923 if (--port->fp_statec_busy == 0) { 9924 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 9925 } 9926 } 9927 } 9928 9929 static int 9930 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 9931 { 9932 int rval; 9933 int count; 9934 int index; 9935 int num_devices; 9936 fc_remote_node_t *node; 9937 fc_port_dev_t *devlist; 9938 struct pwwn_hash *head; 9939 fc_remote_port_t *pd; 9940 9941 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9942 9943 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 9944 9945 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 9946 9947 for (count = index = 0; index < pwwn_table_size; index++) { 9948 head = &port->fp_pwwn_table[index]; 9949 pd = head->pwwn_head; 9950 while (pd != NULL) { 9951 mutex_enter(&pd->pd_mutex); 9952 if (pd->pd_state == PORT_DEVICE_INVALID) { 9953 mutex_exit(&pd->pd_mutex); 9954 pd = pd->pd_wwn_hnext; 9955 continue; 9956 } 9957 9958 devlist[count].dev_state = pd->pd_state; 9959 devlist[count].dev_hard_addr = pd->pd_hard_addr; 9960 devlist[count].dev_did = pd->pd_port_id; 9961 devlist[count].dev_did.priv_lilp_posit = 9962 (uint8_t)(index & 0xff); 9963 bcopy((caddr_t)pd->pd_fc4types, 9964 (caddr_t)devlist[count].dev_type, 9965 sizeof (pd->pd_fc4types)); 9966 9967 bcopy((caddr_t)&pd->pd_port_name, 9968 (caddr_t)&devlist[count].dev_pwwn, 9969 sizeof (la_wwn_t)); 9970 9971 node = pd->pd_remote_nodep; 9972 mutex_exit(&pd->pd_mutex); 9973 9974 if (node) { 9975 mutex_enter(&node->fd_mutex); 9976 bcopy((caddr_t)&node->fd_node_name, 9977 (caddr_t)&devlist[count].dev_nwwn, 9978 sizeof (la_wwn_t)); 9979 mutex_exit(&node->fd_mutex); 9980 } 9981 count++; 9982 if (count >= num_devices) { 9983 goto found; 9984 } 9985 } 9986 } 9987 found: 9988 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 9989 sizeof (count), mode)) { 9990 rval = FC_FAILURE; 9991 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 9992 sizeof (fc_port_dev_t) * num_devices, mode)) { 9993 rval = FC_FAILURE; 9994 } else { 9995 rval = FC_SUCCESS; 9996 } 9997 9998 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 9999 10000 return (rval); 10001 } 10002 10003 10004 /* 10005 * Handle Fabric ONLINE 10006 */ 10007 static void 10008 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 10009 { 10010 int index; 10011 int rval; 10012 int dbg_count; 10013 int count = 0; 10014 char ww_name[17]; 10015 uint32_t d_id; 10016 uint32_t listlen; 10017 fctl_ns_req_t *ns_cmd; 10018 struct pwwn_hash *head; 10019 fc_remote_port_t *pd; 10020 fc_remote_port_t *npd; 10021 fc_portmap_t *changelist; 10022 10023 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10024 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 10025 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 10026 10027 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 10028 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 10029 0, KM_SLEEP); 10030 10031 ASSERT(ns_cmd != NULL); 10032 10033 ns_cmd->ns_cmd_code = NS_GID_PN; 10034 10035 /* 10036 * Check if orphans are showing up now 10037 */ 10038 if (port->fp_orphan_count) { 10039 fc_orphan_t *orp; 10040 fc_orphan_t *norp = NULL; 10041 fc_orphan_t *prev = NULL; 10042 10043 for (orp = port->fp_orphan_list; orp; orp = norp) { 10044 norp = orp->orp_next; 10045 mutex_exit(&port->fp_mutex); 10046 orp->orp_nscan++; 10047 10048 job->job_counter = 1; 10049 job->job_result = FC_SUCCESS; 10050 10051 ((ns_req_gid_pn_t *) 10052 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 10053 ((ns_resp_gid_pn_t *) 10054 ns_cmd->ns_data_buf)->pid.port_id = 0; 10055 ((ns_resp_gid_pn_t *) 10056 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 10057 10058 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10059 if (rval == FC_SUCCESS) { 10060 d_id = 10061 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10062 pd = fp_create_remote_port_by_ns(port, 10063 d_id, KM_SLEEP); 10064 10065 if (pd != NULL) { 10066 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10067 10068 fp_printf(port, CE_WARN, FP_LOG_ONLY, 10069 0, NULL, "N_x Port with D_ID=%x," 10070 " PWWN=%s reappeared in fabric", 10071 d_id, ww_name); 10072 10073 mutex_enter(&port->fp_mutex); 10074 if (prev) { 10075 prev->orp_next = orp->orp_next; 10076 } else { 10077 ASSERT(orp == 10078 port->fp_orphan_list); 10079 port->fp_orphan_list = 10080 orp->orp_next; 10081 } 10082 port->fp_orphan_count--; 10083 mutex_exit(&port->fp_mutex); 10084 kmem_free(orp, sizeof (*orp)); 10085 count++; 10086 10087 mutex_enter(&pd->pd_mutex); 10088 pd->pd_flags = PD_ELS_MARK; 10089 10090 mutex_exit(&pd->pd_mutex); 10091 } else { 10092 prev = orp; 10093 } 10094 } else { 10095 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 10096 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10097 10098 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 10099 NULL, 10100 " Port WWN %s removed from orphan" 10101 " list after %d scans", ww_name, 10102 orp->orp_nscan); 10103 10104 mutex_enter(&port->fp_mutex); 10105 if (prev) { 10106 prev->orp_next = orp->orp_next; 10107 } else { 10108 ASSERT(orp == 10109 port->fp_orphan_list); 10110 port->fp_orphan_list = 10111 orp->orp_next; 10112 } 10113 port->fp_orphan_count--; 10114 mutex_exit(&port->fp_mutex); 10115 10116 kmem_free(orp, sizeof (*orp)); 10117 } else { 10118 prev = orp; 10119 } 10120 } 10121 mutex_enter(&port->fp_mutex); 10122 } 10123 } 10124 10125 /* 10126 * Walk the Port WWN hash table, reestablish LOGIN 10127 * if a LOGIN is already performed on a particular 10128 * device; Any failure to LOGIN should mark the 10129 * port device OLD. 10130 */ 10131 for (index = 0; index < pwwn_table_size; index++) { 10132 head = &port->fp_pwwn_table[index]; 10133 npd = head->pwwn_head; 10134 10135 while ((pd = npd) != NULL) { 10136 la_wwn_t *pwwn; 10137 10138 npd = pd->pd_wwn_hnext; 10139 10140 /* 10141 * Don't count in the port devices that are new 10142 * unless the total number of devices visible 10143 * through this port is less than FP_MAX_DEVICES 10144 */ 10145 mutex_enter(&pd->pd_mutex); 10146 if (port->fp_dev_count >= FP_MAX_DEVICES || 10147 (port->fp_options & FP_TARGET_MODE)) { 10148 if (pd->pd_type == PORT_DEVICE_NEW || 10149 pd->pd_flags == PD_ELS_MARK || 10150 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10151 mutex_exit(&pd->pd_mutex); 10152 continue; 10153 } 10154 } else { 10155 if (pd->pd_flags == PD_ELS_MARK || 10156 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10157 mutex_exit(&pd->pd_mutex); 10158 continue; 10159 } 10160 pd->pd_type = PORT_DEVICE_OLD; 10161 } 10162 count++; 10163 10164 /* 10165 * Consult with the name server about D_ID changes 10166 */ 10167 job->job_counter = 1; 10168 job->job_result = FC_SUCCESS; 10169 10170 ((ns_req_gid_pn_t *) 10171 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10172 ((ns_resp_gid_pn_t *) 10173 ns_cmd->ns_data_buf)->pid.port_id = 0; 10174 10175 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10176 pid.priv_lilp_posit = 0; 10177 10178 pwwn = &pd->pd_port_name; 10179 pd->pd_flags = PD_ELS_MARK; 10180 10181 mutex_exit(&pd->pd_mutex); 10182 mutex_exit(&port->fp_mutex); 10183 10184 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10185 if (rval != FC_SUCCESS) { 10186 fc_wwn_to_str(pwwn, ww_name); 10187 10188 mutex_enter(&pd->pd_mutex); 10189 d_id = pd->pd_port_id.port_id; 10190 pd->pd_type = PORT_DEVICE_DELETE; 10191 mutex_exit(&pd->pd_mutex); 10192 10193 FP_TRACE(FP_NHEAD1(3, 0), 10194 "fp_fabric_online: PD " 10195 "disappeared; d_id=%x, PWWN=%s", 10196 d_id, ww_name); 10197 10198 FP_TRACE(FP_NHEAD2(9, 0), 10199 "N_x Port with D_ID=%x, PWWN=%s" 10200 " disappeared from fabric", d_id, 10201 ww_name); 10202 10203 mutex_enter(&port->fp_mutex); 10204 continue; 10205 } 10206 10207 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10208 10209 mutex_enter(&port->fp_mutex); 10210 mutex_enter(&pd->pd_mutex); 10211 if (d_id != pd->pd_port_id.port_id) { 10212 fctl_delist_did_table(port, pd); 10213 fc_wwn_to_str(pwwn, ww_name); 10214 10215 FP_TRACE(FP_NHEAD2(9, 0), 10216 "D_ID of a device with PWWN %s changed." 10217 " New D_ID = %x, OLD D_ID = %x", ww_name, 10218 d_id, pd->pd_port_id.port_id); 10219 10220 pd->pd_port_id.port_id = BE_32(d_id); 10221 pd->pd_type = PORT_DEVICE_CHANGED; 10222 fctl_enlist_did_table(port, pd); 10223 } 10224 mutex_exit(&pd->pd_mutex); 10225 10226 } 10227 } 10228 10229 if (ns_cmd) { 10230 fctl_free_ns_cmd(ns_cmd); 10231 } 10232 10233 listlen = 0; 10234 changelist = NULL; 10235 if (count) { 10236 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10237 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10238 mutex_exit(&port->fp_mutex); 10239 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10240 mutex_enter(&port->fp_mutex); 10241 } 10242 10243 dbg_count = 0; 10244 10245 job->job_counter = count; 10246 10247 for (index = 0; index < pwwn_table_size; index++) { 10248 head = &port->fp_pwwn_table[index]; 10249 npd = head->pwwn_head; 10250 10251 while ((pd = npd) != NULL) { 10252 npd = pd->pd_wwn_hnext; 10253 10254 mutex_enter(&pd->pd_mutex); 10255 if (pd->pd_flags != PD_ELS_MARK) { 10256 mutex_exit(&pd->pd_mutex); 10257 continue; 10258 } 10259 10260 dbg_count++; 10261 10262 /* 10263 * If it is already marked deletion, nothing 10264 * else to do. 10265 */ 10266 if (pd->pd_type == PORT_DEVICE_DELETE) { 10267 pd->pd_type = PORT_DEVICE_OLD; 10268 10269 mutex_exit(&pd->pd_mutex); 10270 mutex_exit(&port->fp_mutex); 10271 fp_jobdone(job); 10272 mutex_enter(&port->fp_mutex); 10273 10274 continue; 10275 } 10276 10277 /* 10278 * If it is freshly discovered out of 10279 * the orphan list, nothing else to do 10280 */ 10281 if (pd->pd_type == PORT_DEVICE_NEW) { 10282 pd->pd_flags = PD_IDLE; 10283 10284 mutex_exit(&pd->pd_mutex); 10285 mutex_exit(&port->fp_mutex); 10286 fp_jobdone(job); 10287 mutex_enter(&port->fp_mutex); 10288 10289 continue; 10290 } 10291 10292 pd->pd_flags = PD_IDLE; 10293 d_id = pd->pd_port_id.port_id; 10294 10295 /* 10296 * Explicitly mark all devices OLD; successful 10297 * PLOGI should reset this to either NO_CHANGE 10298 * or CHANGED. 10299 */ 10300 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10301 pd->pd_type = PORT_DEVICE_OLD; 10302 } 10303 10304 mutex_exit(&pd->pd_mutex); 10305 mutex_exit(&port->fp_mutex); 10306 10307 rval = fp_port_login(port, d_id, job, 10308 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10309 10310 if (rval != FC_SUCCESS) { 10311 fp_jobdone(job); 10312 } 10313 mutex_enter(&port->fp_mutex); 10314 } 10315 } 10316 mutex_exit(&port->fp_mutex); 10317 10318 ASSERT(dbg_count == count); 10319 fp_jobwait(job); 10320 10321 mutex_enter(&port->fp_mutex); 10322 10323 ASSERT(port->fp_statec_busy > 0); 10324 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10325 if (port->fp_statec_busy > 1) { 10326 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10327 } 10328 } 10329 mutex_exit(&port->fp_mutex); 10330 } else { 10331 ASSERT(port->fp_statec_busy > 0); 10332 if (port->fp_statec_busy > 1) { 10333 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10334 } 10335 mutex_exit(&port->fp_mutex); 10336 } 10337 10338 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10339 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10340 10341 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10342 listlen, listlen, KM_SLEEP); 10343 10344 mutex_enter(&port->fp_mutex); 10345 } else { 10346 ASSERT(changelist == NULL && listlen == 0); 10347 mutex_enter(&port->fp_mutex); 10348 if (--port->fp_statec_busy == 0) { 10349 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10350 } 10351 } 10352 } 10353 10354 10355 /* 10356 * Fill out device list for userland ioctl in private loop 10357 */ 10358 static int 10359 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10360 { 10361 int rval; 10362 int count; 10363 int index; 10364 int num_devices; 10365 fc_remote_node_t *node; 10366 fc_port_dev_t *devlist; 10367 int lilp_device_count; 10368 fc_lilpmap_t *lilp_map; 10369 uchar_t *alpa_list; 10370 10371 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10372 10373 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10374 if (port->fp_total_devices > port->fp_dev_count && 10375 num_devices >= port->fp_total_devices) { 10376 job_request_t *job; 10377 10378 mutex_exit(&port->fp_mutex); 10379 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10380 job->job_counter = 1; 10381 10382 mutex_enter(&port->fp_mutex); 10383 fp_get_loopmap(port, job); 10384 mutex_exit(&port->fp_mutex); 10385 10386 fp_jobwait(job); 10387 fctl_dealloc_job(job); 10388 } else { 10389 mutex_exit(&port->fp_mutex); 10390 } 10391 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10392 10393 mutex_enter(&port->fp_mutex); 10394 10395 /* 10396 * Applications are accustomed to getting the device list in 10397 * LILP map order. The HBA firmware usually returns the device 10398 * map in the LILP map order and diagnostic applications would 10399 * prefer to receive in the device list in that order too 10400 */ 10401 lilp_map = &port->fp_lilp_map; 10402 alpa_list = &lilp_map->lilp_alpalist[0]; 10403 10404 /* 10405 * the length field corresponds to the offset in the LILP frame 10406 * which begins with 1. The thing to note here is that the 10407 * lilp_device_count is 1 more than fp->fp_total_devices since 10408 * the host adapter's alpa also shows up in the lilp map. We 10409 * don't however return details of the host adapter since 10410 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10411 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10412 * ioctl to obtain details about the host adapter port. 10413 */ 10414 lilp_device_count = lilp_map->lilp_length; 10415 10416 for (count = index = 0; index < lilp_device_count && 10417 count < num_devices; index++) { 10418 uint32_t d_id; 10419 fc_remote_port_t *pd; 10420 10421 d_id = alpa_list[index]; 10422 10423 mutex_exit(&port->fp_mutex); 10424 pd = fctl_get_remote_port_by_did(port, d_id); 10425 mutex_enter(&port->fp_mutex); 10426 10427 if (pd != NULL) { 10428 mutex_enter(&pd->pd_mutex); 10429 10430 if (pd->pd_state == PORT_DEVICE_INVALID) { 10431 mutex_exit(&pd->pd_mutex); 10432 continue; 10433 } 10434 10435 devlist[count].dev_state = pd->pd_state; 10436 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10437 devlist[count].dev_did = pd->pd_port_id; 10438 devlist[count].dev_did.priv_lilp_posit = 10439 (uint8_t)(index & 0xff); 10440 bcopy((caddr_t)pd->pd_fc4types, 10441 (caddr_t)devlist[count].dev_type, 10442 sizeof (pd->pd_fc4types)); 10443 10444 bcopy((caddr_t)&pd->pd_port_name, 10445 (caddr_t)&devlist[count].dev_pwwn, 10446 sizeof (la_wwn_t)); 10447 10448 node = pd->pd_remote_nodep; 10449 mutex_exit(&pd->pd_mutex); 10450 10451 if (node) { 10452 mutex_enter(&node->fd_mutex); 10453 bcopy((caddr_t)&node->fd_node_name, 10454 (caddr_t)&devlist[count].dev_nwwn, 10455 sizeof (la_wwn_t)); 10456 mutex_exit(&node->fd_mutex); 10457 } 10458 count++; 10459 } 10460 } 10461 10462 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10463 sizeof (count), mode)) { 10464 rval = FC_FAILURE; 10465 } 10466 10467 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10468 sizeof (fc_port_dev_t) * num_devices, mode)) { 10469 rval = FC_FAILURE; 10470 } else { 10471 rval = FC_SUCCESS; 10472 } 10473 10474 kmem_free(devlist, sizeof (*devlist) * num_devices); 10475 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10476 10477 return (rval); 10478 } 10479 10480 10481 /* 10482 * Completion function for responses to unsolicited commands 10483 */ 10484 static void 10485 fp_unsol_intr(fc_packet_t *pkt) 10486 { 10487 fp_cmd_t *cmd; 10488 fc_local_port_t *port; 10489 10490 cmd = pkt->pkt_ulp_private; 10491 port = cmd->cmd_port; 10492 10493 mutex_enter(&port->fp_mutex); 10494 port->fp_out_fpcmds--; 10495 mutex_exit(&port->fp_mutex); 10496 10497 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10498 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10499 "couldn't post response to unsolicited request;" 10500 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10501 pkt->pkt_resp_fhdr.rx_id); 10502 } 10503 10504 if (cmd == port->fp_els_resp_pkt) { 10505 mutex_enter(&port->fp_mutex); 10506 port->fp_els_resp_pkt_busy = 0; 10507 mutex_exit(&port->fp_mutex); 10508 return; 10509 } 10510 10511 fp_free_pkt(cmd); 10512 } 10513 10514 10515 /* 10516 * solicited LINIT ELS completion function 10517 */ 10518 static void 10519 fp_linit_intr(fc_packet_t *pkt) 10520 { 10521 fp_cmd_t *cmd; 10522 job_request_t *job; 10523 fc_linit_resp_t acc; 10524 10525 cmd = (fp_cmd_t *)pkt->pkt_ulp_private; 10526 10527 mutex_enter(&cmd->cmd_port->fp_mutex); 10528 cmd->cmd_port->fp_out_fpcmds--; 10529 mutex_exit(&cmd->cmd_port->fp_mutex); 10530 10531 if (FP_IS_PKT_ERROR(pkt)) { 10532 (void) fp_common_intr(pkt, 1); 10533 return; 10534 } 10535 10536 job = cmd->cmd_job; 10537 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&acc, 10538 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10539 if (acc.status != FC_LINIT_SUCCESS) { 10540 job->job_result = FC_FAILURE; 10541 } else { 10542 job->job_result = FC_SUCCESS; 10543 } 10544 10545 fp_iodone(cmd); 10546 } 10547 10548 10549 /* 10550 * Decode the unsolicited request; For FC-4 Device and Link data frames 10551 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10552 * ELS requests, submit a request to the job_handler thread to work on it. 10553 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10554 * and save much of the interrupt time processing of unsolicited ELS requests 10555 * and hand it off to the job_handler thread. 10556 */ 10557 static void 10558 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10559 { 10560 uchar_t r_ctl; 10561 uchar_t ls_code; 10562 uint32_t s_id; 10563 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10564 uint32_t cb_arg; 10565 fp_cmd_t *cmd; 10566 fc_local_port_t *port; 10567 job_request_t *job; 10568 fc_remote_port_t *pd; 10569 10570 port = port_handle; 10571 10572 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10573 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10574 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10575 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10576 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10577 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10578 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10579 buf->ub_buffer[0]); 10580 10581 if (type & 0x80000000) { 10582 /* 10583 * Huh ? Nothing much can be done without 10584 * a valid buffer. So just exit. 10585 */ 10586 return; 10587 } 10588 /* 10589 * If the unsolicited interrupts arrive while it isn't 10590 * safe to handle unsolicited callbacks; Drop them, yes, 10591 * drop them on the floor 10592 */ 10593 mutex_enter(&port->fp_mutex); 10594 port->fp_active_ubs++; 10595 if ((port->fp_soft_state & 10596 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10597 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10598 10599 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10600 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10601 "seq_id=%x, ox_id=%x, rx_id=%x" 10602 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10603 buf->ub_frame.type, buf->ub_frame.seq_id, 10604 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10605 10606 ASSERT(port->fp_active_ubs > 0); 10607 if (--(port->fp_active_ubs) == 0) { 10608 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10609 } 10610 10611 mutex_exit(&port->fp_mutex); 10612 10613 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10614 1, &buf->ub_token); 10615 10616 return; 10617 } 10618 10619 r_ctl = buf->ub_frame.r_ctl; 10620 s_id = buf->ub_frame.s_id; 10621 if (port->fp_active_ubs == 1) { 10622 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10623 } 10624 10625 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10626 port->fp_statec_busy) { 10627 mutex_exit(&port->fp_mutex); 10628 pd = fctl_get_remote_port_by_did(port, s_id); 10629 if (pd) { 10630 mutex_enter(&pd->pd_mutex); 10631 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10632 FP_TRACE(FP_NHEAD1(3, 0), 10633 "LOGO for LOGGED IN D_ID %x", 10634 buf->ub_frame.s_id); 10635 pd->pd_state = PORT_DEVICE_VALID; 10636 } 10637 mutex_exit(&pd->pd_mutex); 10638 } 10639 10640 mutex_enter(&port->fp_mutex); 10641 ASSERT(port->fp_active_ubs > 0); 10642 if (--(port->fp_active_ubs) == 0) { 10643 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10644 } 10645 mutex_exit(&port->fp_mutex); 10646 10647 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10648 1, &buf->ub_token); 10649 10650 FP_TRACE(FP_NHEAD1(3, 0), 10651 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10652 buf->ub_frame.s_id); 10653 return; 10654 } 10655 10656 if (port->fp_els_resp_pkt_busy == 0) { 10657 if (r_ctl == R_CTL_ELS_REQ) { 10658 ls_code = buf->ub_buffer[0]; 10659 10660 switch (ls_code) { 10661 case LA_ELS_PLOGI: 10662 case LA_ELS_FLOGI: 10663 port->fp_els_resp_pkt_busy = 1; 10664 mutex_exit(&port->fp_mutex); 10665 fp_i_handle_unsol_els(port, buf); 10666 10667 mutex_enter(&port->fp_mutex); 10668 ASSERT(port->fp_active_ubs > 0); 10669 if (--(port->fp_active_ubs) == 0) { 10670 port->fp_soft_state &= 10671 ~FP_SOFT_IN_UNSOL_CB; 10672 } 10673 mutex_exit(&port->fp_mutex); 10674 port->fp_fca_tran->fca_ub_release( 10675 port->fp_fca_handle, 1, &buf->ub_token); 10676 10677 return; 10678 case LA_ELS_RSCN: 10679 if (++(port)->fp_rscn_count == 10680 FC_INVALID_RSCN_COUNT) { 10681 ++(port)->fp_rscn_count; 10682 } 10683 rscn_count = port->fp_rscn_count; 10684 break; 10685 10686 default: 10687 break; 10688 } 10689 } 10690 } else if ((r_ctl == R_CTL_ELS_REQ) && 10691 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10692 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10693 ++port->fp_rscn_count; 10694 } 10695 rscn_count = port->fp_rscn_count; 10696 } 10697 10698 mutex_exit(&port->fp_mutex); 10699 10700 switch (r_ctl & R_CTL_ROUTING) { 10701 case R_CTL_DEVICE_DATA: 10702 /* 10703 * If the unsolicited buffer is a CT IU, 10704 * have the job_handler thread work on it. 10705 */ 10706 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10707 break; 10708 } 10709 /* FALLTHROUGH */ 10710 10711 case R_CTL_FC4_SVC: { 10712 int sendup = 0; 10713 10714 /* 10715 * If a LOGIN isn't performed before this request 10716 * shut the door on this port with a reply that a 10717 * LOGIN is required. We make an exception however 10718 * for IP broadcast packets and pass them through 10719 * to the IP ULP(s) to handle broadcast requests. 10720 * This is not a problem for private loop devices 10721 * but for fabric topologies we don't log into the 10722 * remote ports during port initialization and 10723 * the ULPs need to log into requesting ports on 10724 * demand. 10725 */ 10726 pd = fctl_get_remote_port_by_did(port, s_id); 10727 if (pd) { 10728 mutex_enter(&pd->pd_mutex); 10729 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10730 sendup++; 10731 } 10732 mutex_exit(&pd->pd_mutex); 10733 } else if ((pd == NULL) && 10734 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10735 (buf->ub_frame.d_id == 0xffffff || 10736 buf->ub_frame.d_id == 0x00)) { 10737 /* brodacst IP frame - so sendup via job thread */ 10738 break; 10739 } 10740 10741 /* 10742 * Send all FC4 services via job thread too 10743 */ 10744 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10745 break; 10746 } 10747 10748 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10749 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10750 return; 10751 } 10752 10753 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10754 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10755 0, KM_NOSLEEP, pd); 10756 if (cmd != NULL) { 10757 fp_els_rjt_init(port, cmd, buf, 10758 FC_ACTION_NON_RETRYABLE, 10759 FC_REASON_LOGIN_REQUIRED, NULL); 10760 10761 if (fp_sendcmd(port, cmd, 10762 port->fp_fca_handle) != FC_SUCCESS) { 10763 fp_free_pkt(cmd); 10764 } 10765 } 10766 } 10767 10768 mutex_enter(&port->fp_mutex); 10769 ASSERT(port->fp_active_ubs > 0); 10770 if (--(port->fp_active_ubs) == 0) { 10771 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10772 } 10773 mutex_exit(&port->fp_mutex); 10774 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10775 1, &buf->ub_token); 10776 10777 return; 10778 } 10779 10780 default: 10781 break; 10782 } 10783 10784 /* 10785 * Submit a Request to the job_handler thread to work 10786 * on the unsolicited request. The potential side effect 10787 * of this is that the unsolicited buffer takes a little 10788 * longer to get released but we save interrupt time in 10789 * the bargain. 10790 */ 10791 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10792 10793 /* 10794 * One way that the rscn_count will get used is described below : 10795 * 10796 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10797 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10798 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10799 * by overloading the job_cb_arg to pass the rscn_count 10800 * 4. When one of the routines processing the RSCN picks it up (ex: 10801 * fp_validate_rscn_page()), it passes this count in the map 10802 * structure (as part of the map_rscn_info structure member) to the 10803 * ULPs. 10804 * 5. When ULPs make calls back to the transport (example interfaces for 10805 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10806 * can now pass back this count as part of the fc_packet's 10807 * pkt_ulp_rscn_count member. fcp does this currently. 10808 * 6. When transport gets a call to transport a command on the wire, it 10809 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10810 * fc_packet. If there is, it will match that info with the current 10811 * rscn_count on that instance of the port. If they don't match up 10812 * then there was a newer RSCN. The ULP gets back an error code which 10813 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10814 * 7. At this point the ULP is free to make up its own mind as to how to 10815 * handle this. Currently, fcp will reset its retry counters and keep 10816 * retrying the operation it was doing in anticipation of getting a 10817 * new state change call back for the new RSCN. 10818 */ 10819 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10820 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10821 if (job == NULL) { 10822 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10823 "couldn't submit a job to the thread, failing.."); 10824 10825 mutex_enter(&port->fp_mutex); 10826 10827 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10828 --port->fp_rscn_count; 10829 } 10830 10831 ASSERT(port->fp_active_ubs > 0); 10832 if (--(port->fp_active_ubs) == 0) { 10833 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10834 } 10835 10836 mutex_exit(&port->fp_mutex); 10837 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10838 1, &buf->ub_token); 10839 10840 return; 10841 } 10842 job->job_private = (void *)buf; 10843 fctl_enque_job(port, job); 10844 } 10845 10846 10847 /* 10848 * Handle unsolicited requests 10849 */ 10850 static void 10851 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10852 job_request_t *job) 10853 { 10854 uchar_t r_ctl; 10855 uchar_t ls_code; 10856 uint32_t s_id; 10857 fp_cmd_t *cmd; 10858 fc_remote_port_t *pd; 10859 fp_unsol_spec_t *ub_spec; 10860 10861 r_ctl = buf->ub_frame.r_ctl; 10862 s_id = buf->ub_frame.s_id; 10863 10864 switch (r_ctl & R_CTL_ROUTING) { 10865 case R_CTL_EXTENDED_SVC: 10866 if (r_ctl != R_CTL_ELS_REQ) { 10867 break; 10868 } 10869 10870 ls_code = buf->ub_buffer[0]; 10871 switch (ls_code) { 10872 case LA_ELS_LOGO: 10873 case LA_ELS_ADISC: 10874 case LA_ELS_PRLO: 10875 pd = fctl_get_remote_port_by_did(port, s_id); 10876 if (pd == NULL) { 10877 if (!FC_IS_REAL_DEVICE(s_id)) { 10878 break; 10879 } 10880 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10881 break; 10882 } 10883 if ((cmd = fp_alloc_pkt(port, 10884 sizeof (la_els_rjt_t), 0, KM_SLEEP, 10885 NULL)) == NULL) { 10886 /* 10887 * Can this actually fail when 10888 * given KM_SLEEP? (Could be used 10889 * this way in a number of places.) 10890 */ 10891 break; 10892 } 10893 10894 fp_els_rjt_init(port, cmd, buf, 10895 FC_ACTION_NON_RETRYABLE, 10896 FC_REASON_INVALID_LINK_CTRL, job); 10897 10898 if (fp_sendcmd(port, cmd, 10899 port->fp_fca_handle) != FC_SUCCESS) { 10900 fp_free_pkt(cmd); 10901 } 10902 10903 break; 10904 } 10905 if (ls_code == LA_ELS_LOGO) { 10906 fp_handle_unsol_logo(port, buf, pd, job); 10907 } else if (ls_code == LA_ELS_ADISC) { 10908 fp_handle_unsol_adisc(port, buf, pd, job); 10909 } else { 10910 fp_handle_unsol_prlo(port, buf, pd, job); 10911 } 10912 break; 10913 10914 case LA_ELS_PLOGI: 10915 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 10916 break; 10917 10918 case LA_ELS_FLOGI: 10919 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 10920 break; 10921 10922 case LA_ELS_RSCN: 10923 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 10924 break; 10925 10926 default: 10927 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10928 ub_spec->port = port; 10929 ub_spec->buf = buf; 10930 10931 (void) taskq_dispatch(port->fp_taskq, 10932 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10933 return; 10934 } 10935 break; 10936 10937 case R_CTL_BASIC_SVC: 10938 /* 10939 * The unsolicited basic link services could be ABTS 10940 * and RMC (Or even a NOP). Just BA_RJT them until 10941 * such time there arises a need to handle them more 10942 * carefully. 10943 */ 10944 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10945 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 10946 0, KM_SLEEP, NULL); 10947 if (cmd != NULL) { 10948 fp_ba_rjt_init(port, cmd, buf, job); 10949 if (fp_sendcmd(port, cmd, 10950 port->fp_fca_handle) != FC_SUCCESS) { 10951 fp_free_pkt(cmd); 10952 } 10953 } 10954 } 10955 break; 10956 10957 case R_CTL_DEVICE_DATA: 10958 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10959 /* 10960 * Mostly this is of type FC_TYPE_FC_SERVICES. 10961 * As we don't like any Unsolicited FC services 10962 * requests, we would do well to RJT them as 10963 * well. 10964 */ 10965 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10966 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10967 0, KM_SLEEP, NULL); 10968 if (cmd != NULL) { 10969 fp_els_rjt_init(port, cmd, buf, 10970 FC_ACTION_NON_RETRYABLE, 10971 FC_REASON_INVALID_LINK_CTRL, job); 10972 10973 if (fp_sendcmd(port, cmd, 10974 port->fp_fca_handle) != 10975 FC_SUCCESS) { 10976 fp_free_pkt(cmd); 10977 } 10978 } 10979 } 10980 break; 10981 } 10982 /* FALLTHROUGH */ 10983 10984 case R_CTL_FC4_SVC: 10985 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10986 ub_spec->port = port; 10987 ub_spec->buf = buf; 10988 10989 (void) taskq_dispatch(port->fp_taskq, 10990 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10991 return; 10992 10993 case R_CTL_LINK_CTL: 10994 /* 10995 * Turn deaf ear on unsolicited link control frames. 10996 * Typical unsolicited link control Frame is an LCR 10997 * (to reset End to End credit to the default login 10998 * value and abort current sequences for all classes) 10999 * An intelligent microcode/firmware should handle 11000 * this transparently at its level and not pass all 11001 * the way up here. 11002 * 11003 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 11004 * or F_BSY. P_RJT is chosen to be the most appropriate 11005 * at this time. 11006 */ 11007 /* FALLTHROUGH */ 11008 11009 default: 11010 /* 11011 * Just reject everything else as an invalid request. 11012 */ 11013 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11014 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11015 0, KM_SLEEP, NULL); 11016 if (cmd != NULL) { 11017 fp_els_rjt_init(port, cmd, buf, 11018 FC_ACTION_NON_RETRYABLE, 11019 FC_REASON_INVALID_LINK_CTRL, job); 11020 11021 if (fp_sendcmd(port, cmd, 11022 port->fp_fca_handle) != FC_SUCCESS) { 11023 fp_free_pkt(cmd); 11024 } 11025 } 11026 } 11027 break; 11028 } 11029 11030 mutex_enter(&port->fp_mutex); 11031 ASSERT(port->fp_active_ubs > 0); 11032 if (--(port->fp_active_ubs) == 0) { 11033 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 11034 } 11035 mutex_exit(&port->fp_mutex); 11036 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 11037 1, &buf->ub_token); 11038 } 11039 11040 11041 /* 11042 * Prepare a BA_RJT and send it over. 11043 */ 11044 static void 11045 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11046 job_request_t *job) 11047 { 11048 fc_packet_t *pkt; 11049 la_ba_rjt_t payload; 11050 11051 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11052 11053 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11054 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11055 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11056 cmd->cmd_retry_count = 1; 11057 cmd->cmd_ulp_pkt = NULL; 11058 11059 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11060 cmd->cmd_job = job; 11061 11062 pkt = &cmd->cmd_pkt; 11063 11064 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 11065 11066 payload.reserved = 0; 11067 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 11068 payload.explanation = FC_EXPLN_NONE; 11069 payload.vendor = 0; 11070 11071 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11072 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11073 } 11074 11075 11076 /* 11077 * Prepare an LS_RJT and send it over 11078 */ 11079 static void 11080 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11081 uchar_t action, uchar_t reason, job_request_t *job) 11082 { 11083 fc_packet_t *pkt; 11084 la_els_rjt_t payload; 11085 11086 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11087 11088 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11089 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11090 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11091 cmd->cmd_retry_count = 1; 11092 cmd->cmd_ulp_pkt = NULL; 11093 11094 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11095 cmd->cmd_job = job; 11096 11097 pkt = &cmd->cmd_pkt; 11098 11099 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11100 11101 payload.ls_code.ls_code = LA_ELS_RJT; 11102 payload.ls_code.mbz = 0; 11103 payload.action = action; 11104 payload.reason = reason; 11105 payload.reserved = 0; 11106 payload.vu = 0; 11107 11108 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11109 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11110 } 11111 11112 /* 11113 * Function: fp_prlo_acc_init 11114 * 11115 * Description: Initializes an Link Service Accept for a PRLO. 11116 * 11117 * Arguments: *port Local port through which the PRLO was 11118 * received. 11119 * cmd Command that will carry the accept. 11120 * *buf Unsolicited buffer containing the PRLO 11121 * request. 11122 * job Job request. 11123 * sleep Allocation mode. 11124 * 11125 * Return Value: *cmd Command containing the response. 11126 * 11127 * Context: Depends on the parameter sleep. 11128 */ 11129 fp_cmd_t * 11130 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 11131 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11132 { 11133 fp_cmd_t *cmd; 11134 fc_packet_t *pkt; 11135 la_els_prlo_t *req; 11136 size_t len; 11137 uint16_t flags; 11138 11139 req = (la_els_prlo_t *)buf->ub_buffer; 11140 len = (size_t)ntohs(req->payload_length); 11141 11142 /* 11143 * The payload of the accept to a PRLO has to be the exact match of 11144 * the payload of the request (at the exception of the code). 11145 */ 11146 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11147 11148 if (cmd) { 11149 /* 11150 * The fp command was successfully allocated. 11151 */ 11152 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11153 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11154 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11155 cmd->cmd_retry_count = 1; 11156 cmd->cmd_ulp_pkt = NULL; 11157 11158 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11159 cmd->cmd_job = job; 11160 11161 pkt = &cmd->cmd_pkt; 11162 11163 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11164 FC_TYPE_EXTENDED_LS); 11165 11166 /* The code is overwritten for the copy. */ 11167 req->ls_code = LA_ELS_ACC; 11168 /* Response code is set. */ 11169 flags = ntohs(req->flags); 11170 flags &= ~SP_RESP_CODE_MASK; 11171 flags |= SP_RESP_CODE_REQ_EXECUTED; 11172 req->flags = htons(flags); 11173 11174 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)req, 11175 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11176 } 11177 return (cmd); 11178 } 11179 11180 /* 11181 * Prepare an ACC response to an ELS request 11182 */ 11183 static void 11184 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11185 job_request_t *job) 11186 { 11187 fc_packet_t *pkt; 11188 ls_code_t payload; 11189 11190 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11191 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11192 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11193 cmd->cmd_retry_count = 1; 11194 cmd->cmd_ulp_pkt = NULL; 11195 11196 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11197 cmd->cmd_job = job; 11198 11199 pkt = &cmd->cmd_pkt; 11200 11201 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11202 11203 payload.ls_code = LA_ELS_ACC; 11204 payload.mbz = 0; 11205 11206 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11207 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11208 } 11209 11210 /* 11211 * Unsolicited PRLO handler 11212 * 11213 * A Process Logout should be handled by the ULP that established it. However, 11214 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11215 * when a device implicitly logs out an initiator (for whatever reason) and 11216 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11217 * The logical thing to do for the device would be to send a LOGO in response 11218 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11219 * a PRLO instead. 11220 * 11221 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11222 * think that the Port Login has been lost. If we follow the Fibre Channel 11223 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11224 * the Port Login has also been lost, the remote port will reject the PRLI 11225 * indicating that we must PLOGI first. The initiator will then turn around and 11226 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11227 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11228 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11229 * needed would be received by FCP. FCP would have, then, to tell the transport 11230 * (fp) to PLOGI. The problem is, the transport would still think the Port 11231 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11232 * if you think it's not necessary". To work around that difficulty, the PRLO 11233 * is treated by the transport as a LOGO. The downside to it is a Port Login 11234 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11235 * has nothing to do with the PRLO) may be impacted. However, this is a 11236 * scenario very unlikely to happen. As of today the only ULP in Leadville 11237 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11238 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11239 * unlikely). 11240 */ 11241 static void 11242 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11243 fc_remote_port_t *pd, job_request_t *job) 11244 { 11245 int busy; 11246 int rval; 11247 int retain; 11248 fp_cmd_t *cmd; 11249 fc_portmap_t *listptr; 11250 boolean_t tolerance; 11251 la_els_prlo_t *req; 11252 11253 req = (la_els_prlo_t *)buf->ub_buffer; 11254 11255 if ((ntohs(req->payload_length) != 11256 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11257 (req->page_length != sizeof (service_parameter_page_t))) { 11258 /* 11259 * We are being very restrictive. Only on page per 11260 * payload. If it is not the case we reject the ELS although 11261 * we should reply indicating we handle only single page 11262 * per PRLO. 11263 */ 11264 goto fp_reject_prlo; 11265 } 11266 11267 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11268 /* 11269 * This is in case the payload advertizes a size bigger than 11270 * what it really is. 11271 */ 11272 goto fp_reject_prlo; 11273 } 11274 11275 mutex_enter(&port->fp_mutex); 11276 busy = port->fp_statec_busy; 11277 mutex_exit(&port->fp_mutex); 11278 11279 mutex_enter(&pd->pd_mutex); 11280 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11281 if (!busy) { 11282 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11283 pd->pd_state == PORT_DEVICE_INVALID || 11284 pd->pd_flags == PD_ELS_IN_PROGRESS || 11285 pd->pd_type == PORT_DEVICE_OLD) { 11286 busy++; 11287 } 11288 } 11289 11290 if (busy) { 11291 mutex_exit(&pd->pd_mutex); 11292 11293 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11294 "pd=%p - busy", 11295 pd->pd_port_id.port_id, pd); 11296 11297 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11298 goto fp_reject_prlo; 11299 } 11300 } else { 11301 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11302 11303 if (tolerance) { 11304 fctl_tc_reset(&pd->pd_logo_tc); 11305 retain = 0; 11306 pd->pd_state = PORT_DEVICE_INVALID; 11307 } 11308 11309 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11310 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11311 tolerance, retain); 11312 11313 pd->pd_aux_flags |= PD_LOGGED_OUT; 11314 mutex_exit(&pd->pd_mutex); 11315 11316 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11317 if (cmd == NULL) { 11318 return; 11319 } 11320 11321 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11322 if (rval != FC_SUCCESS) { 11323 fp_free_pkt(cmd); 11324 return; 11325 } 11326 11327 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11328 11329 if (retain) { 11330 fp_unregister_login(pd); 11331 fctl_copy_portmap(listptr, pd); 11332 } else { 11333 uint32_t d_id; 11334 char ww_name[17]; 11335 11336 mutex_enter(&pd->pd_mutex); 11337 d_id = pd->pd_port_id.port_id; 11338 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11339 mutex_exit(&pd->pd_mutex); 11340 11341 FP_TRACE(FP_NHEAD2(9, 0), 11342 "N_x Port with D_ID=%x, PWWN=%s logged out" 11343 " %d times in %d us; Giving up", d_id, ww_name, 11344 FC_LOGO_TOLERANCE_LIMIT, 11345 FC_LOGO_TOLERANCE_TIME_LIMIT); 11346 11347 fp_fillout_old_map(listptr, pd, 0); 11348 listptr->map_type = PORT_DEVICE_OLD; 11349 } 11350 11351 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11352 return; 11353 } 11354 11355 fp_reject_prlo: 11356 11357 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11358 if (cmd != NULL) { 11359 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11360 FC_REASON_INVALID_LINK_CTRL, job); 11361 11362 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11363 fp_free_pkt(cmd); 11364 } 11365 } 11366 } 11367 11368 /* 11369 * Unsolicited LOGO handler 11370 */ 11371 static void 11372 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11373 fc_remote_port_t *pd, job_request_t *job) 11374 { 11375 int busy; 11376 int rval; 11377 int retain; 11378 fp_cmd_t *cmd; 11379 fc_portmap_t *listptr; 11380 boolean_t tolerance; 11381 11382 mutex_enter(&port->fp_mutex); 11383 busy = port->fp_statec_busy; 11384 mutex_exit(&port->fp_mutex); 11385 11386 mutex_enter(&pd->pd_mutex); 11387 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11388 if (!busy) { 11389 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11390 pd->pd_state == PORT_DEVICE_INVALID || 11391 pd->pd_flags == PD_ELS_IN_PROGRESS || 11392 pd->pd_type == PORT_DEVICE_OLD) { 11393 busy++; 11394 } 11395 } 11396 11397 if (busy) { 11398 mutex_exit(&pd->pd_mutex); 11399 11400 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11401 "pd=%p - busy", 11402 pd->pd_port_id.port_id, pd); 11403 11404 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11405 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11406 0, KM_SLEEP, pd); 11407 if (cmd != NULL) { 11408 fp_els_rjt_init(port, cmd, buf, 11409 FC_ACTION_NON_RETRYABLE, 11410 FC_REASON_INVALID_LINK_CTRL, job); 11411 11412 if (fp_sendcmd(port, cmd, 11413 port->fp_fca_handle) != FC_SUCCESS) { 11414 fp_free_pkt(cmd); 11415 } 11416 } 11417 } 11418 } else { 11419 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11420 11421 if (tolerance) { 11422 fctl_tc_reset(&pd->pd_logo_tc); 11423 retain = 0; 11424 pd->pd_state = PORT_DEVICE_INVALID; 11425 } 11426 11427 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11428 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11429 tolerance, retain); 11430 11431 pd->pd_aux_flags |= PD_LOGGED_OUT; 11432 mutex_exit(&pd->pd_mutex); 11433 11434 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11435 KM_SLEEP, pd); 11436 if (cmd == NULL) { 11437 return; 11438 } 11439 11440 fp_els_acc_init(port, cmd, buf, job); 11441 11442 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11443 if (rval != FC_SUCCESS) { 11444 fp_free_pkt(cmd); 11445 return; 11446 } 11447 11448 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11449 11450 if (retain) { 11451 job_request_t *job; 11452 fctl_ns_req_t *ns_cmd; 11453 11454 /* 11455 * when get LOGO, first try to get PID from nameserver 11456 * if failed, then we do not need 11457 * send PLOGI to that remote port 11458 */ 11459 job = fctl_alloc_job( 11460 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11461 11462 if (job != NULL) { 11463 ns_cmd = fctl_alloc_ns_cmd( 11464 sizeof (ns_req_gid_pn_t), 11465 sizeof (ns_resp_gid_pn_t), 11466 sizeof (ns_resp_gid_pn_t), 11467 0, KM_SLEEP); 11468 if (ns_cmd != NULL) { 11469 int ret; 11470 job->job_result = FC_SUCCESS; 11471 ns_cmd->ns_cmd_code = NS_GID_PN; 11472 ((ns_req_gid_pn_t *) 11473 (ns_cmd->ns_cmd_buf))->pwwn = 11474 pd->pd_port_name; 11475 ret = fp_ns_query( 11476 port, ns_cmd, job, 1, KM_SLEEP); 11477 if ((ret != FC_SUCCESS) || 11478 (job->job_result != FC_SUCCESS)) { 11479 fctl_free_ns_cmd(ns_cmd); 11480 fctl_dealloc_job(job); 11481 FP_TRACE(FP_NHEAD2(9, 0), 11482 "NS query failed,", 11483 " delete pd"); 11484 goto delete_pd; 11485 } 11486 fctl_free_ns_cmd(ns_cmd); 11487 } 11488 fctl_dealloc_job(job); 11489 } 11490 fp_unregister_login(pd); 11491 fctl_copy_portmap(listptr, pd); 11492 } else { 11493 uint32_t d_id; 11494 char ww_name[17]; 11495 11496 delete_pd: 11497 mutex_enter(&pd->pd_mutex); 11498 d_id = pd->pd_port_id.port_id; 11499 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11500 mutex_exit(&pd->pd_mutex); 11501 11502 FP_TRACE(FP_NHEAD2(9, 0), 11503 "N_x Port with D_ID=%x, PWWN=%s logged out" 11504 " %d times in %d us; Giving up", d_id, ww_name, 11505 FC_LOGO_TOLERANCE_LIMIT, 11506 FC_LOGO_TOLERANCE_TIME_LIMIT); 11507 11508 fp_fillout_old_map(listptr, pd, 0); 11509 listptr->map_type = PORT_DEVICE_OLD; 11510 } 11511 11512 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11513 } 11514 } 11515 11516 11517 /* 11518 * Perform general purpose preparation of a response to an unsolicited request 11519 */ 11520 static void 11521 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11522 uchar_t r_ctl, uchar_t type) 11523 { 11524 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11525 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11526 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11527 pkt->pkt_cmd_fhdr.type = type; 11528 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11529 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11530 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11531 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11532 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11533 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11534 pkt->pkt_cmd_fhdr.ro = 0; 11535 pkt->pkt_cmd_fhdr.rsvd = 0; 11536 pkt->pkt_comp = fp_unsol_intr; 11537 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11538 } 11539 11540 /* 11541 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11542 * early development days of public loop soc+ firmware, numerous problems 11543 * were encountered (the details are undocumented and history now) which 11544 * led to the birth of this function. 11545 * 11546 * If a pre-allocated unsolicited response packet is free, send out an 11547 * immediate response, otherwise submit the request to the port thread 11548 * to do the deferred processing. 11549 */ 11550 static void 11551 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11552 { 11553 int sent; 11554 int f_port; 11555 int do_acc; 11556 fp_cmd_t *cmd; 11557 la_els_logi_t *payload; 11558 fc_remote_port_t *pd; 11559 char dww_name[17]; 11560 11561 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11562 11563 cmd = port->fp_els_resp_pkt; 11564 11565 mutex_enter(&port->fp_mutex); 11566 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11567 mutex_exit(&port->fp_mutex); 11568 11569 switch (buf->ub_buffer[0]) { 11570 case LA_ELS_PLOGI: { 11571 int small; 11572 11573 payload = (la_els_logi_t *)buf->ub_buffer; 11574 11575 f_port = FP_IS_F_PORT(payload-> 11576 common_service.cmn_features) ? 1 : 0; 11577 11578 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11579 &payload->nport_ww_name); 11580 pd = fctl_get_remote_port_by_pwwn(port, 11581 &payload->nport_ww_name); 11582 if (pd) { 11583 mutex_enter(&pd->pd_mutex); 11584 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11585 /* 11586 * Most likely this means a cross login is in 11587 * progress or a device about to be yanked out. 11588 * Only accept the plogi if my wwn is smaller. 11589 */ 11590 if (pd->pd_type == PORT_DEVICE_OLD) { 11591 sent = 1; 11592 } 11593 /* 11594 * Stop plogi request (if any) 11595 * attempt from local side to speedup 11596 * the discovery progress. 11597 * Mark the pd as PD_PLOGI_RECEPIENT. 11598 */ 11599 if (f_port == 0 && small < 0) { 11600 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11601 } 11602 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11603 11604 mutex_exit(&pd->pd_mutex); 11605 11606 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11607 "Unsol PLOGI received. PD still exists in the " 11608 "PWWN list. pd=%p PWWN=%s, sent=%x", 11609 pd, dww_name, sent); 11610 11611 if (f_port == 0 && small < 0) { 11612 FP_TRACE(FP_NHEAD1(3, 0), 11613 "fp_i_handle_unsol_els: Mark the pd" 11614 " as plogi recipient, pd=%p, PWWN=%s" 11615 ", sent=%x", 11616 pd, dww_name, sent); 11617 } 11618 } else { 11619 sent = 0; 11620 } 11621 11622 /* 11623 * To avoid Login collisions, accept only if my WWN 11624 * is smaller than the requester (A curious side note 11625 * would be that this rule may not satisfy the PLOGIs 11626 * initiated by the switch from not-so-well known 11627 * ports such as 0xFFFC41) 11628 */ 11629 if ((f_port == 0 && small < 0) || 11630 (((small > 0 && do_acc) || 11631 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11632 if (fp_is_class_supported(port->fp_cos, 11633 buf->ub_class) == FC_FAILURE) { 11634 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11635 cmd->cmd_pkt.pkt_cmdlen = 11636 sizeof (la_els_rjt_t); 11637 cmd->cmd_pkt.pkt_rsplen = 0; 11638 fp_els_rjt_init(port, cmd, buf, 11639 FC_ACTION_NON_RETRYABLE, 11640 FC_REASON_CLASS_NOT_SUPP, NULL); 11641 FP_TRACE(FP_NHEAD1(3, 0), 11642 "fp_i_handle_unsol_els: " 11643 "Unsupported class. " 11644 "Rejecting PLOGI"); 11645 11646 } else { 11647 mutex_enter(&port->fp_mutex); 11648 port->fp_els_resp_pkt_busy = 0; 11649 mutex_exit(&port->fp_mutex); 11650 return; 11651 } 11652 } else { 11653 cmd->cmd_pkt.pkt_cmdlen = 11654 sizeof (la_els_logi_t); 11655 cmd->cmd_pkt.pkt_rsplen = 0; 11656 11657 /* 11658 * Sometime later, we should validate 11659 * the service parameters instead of 11660 * just accepting it. 11661 */ 11662 fp_login_acc_init(port, cmd, buf, NULL, 11663 KM_NOSLEEP); 11664 FP_TRACE(FP_NHEAD1(3, 0), 11665 "fp_i_handle_unsol_els: Accepting PLOGI," 11666 " f_port=%d, small=%d, do_acc=%d," 11667 " sent=%d.", f_port, small, do_acc, 11668 sent); 11669 /* 11670 * If fp_port_id is zero and topology is 11671 * Point-to-Point, get the local port id from 11672 * the d_id in the PLOGI request. 11673 * If the outgoing FLOGI hasn't been accepted, 11674 * the topology will be unknown here. But it's 11675 * still safe to save the d_id to fp_port_id, 11676 * just because it will be overwritten later 11677 * if the topology is not Point-to-Point. 11678 */ 11679 mutex_enter(&port->fp_mutex); 11680 if ((port->fp_port_id.port_id == 0) && 11681 (port->fp_topology == FC_TOP_PT_PT || 11682 port->fp_topology == FC_TOP_UNKNOWN)) { 11683 port->fp_port_id.port_id = 11684 buf->ub_frame.d_id; 11685 } 11686 mutex_exit(&port->fp_mutex); 11687 } 11688 } else { 11689 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11690 port->fp_options & FP_SEND_RJT) { 11691 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11692 cmd->cmd_pkt.pkt_rsplen = 0; 11693 fp_els_rjt_init(port, cmd, buf, 11694 FC_ACTION_NON_RETRYABLE, 11695 FC_REASON_LOGICAL_BSY, NULL); 11696 FP_TRACE(FP_NHEAD1(3, 0), 11697 "fp_i_handle_unsol_els: " 11698 "Rejecting PLOGI with Logical Busy." 11699 "Possible Login collision."); 11700 } else { 11701 mutex_enter(&port->fp_mutex); 11702 port->fp_els_resp_pkt_busy = 0; 11703 mutex_exit(&port->fp_mutex); 11704 return; 11705 } 11706 } 11707 break; 11708 } 11709 11710 case LA_ELS_FLOGI: 11711 if (fp_is_class_supported(port->fp_cos, 11712 buf->ub_class) == FC_FAILURE) { 11713 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11714 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11715 cmd->cmd_pkt.pkt_rsplen = 0; 11716 fp_els_rjt_init(port, cmd, buf, 11717 FC_ACTION_NON_RETRYABLE, 11718 FC_REASON_CLASS_NOT_SUPP, NULL); 11719 FP_TRACE(FP_NHEAD1(3, 0), 11720 "fp_i_handle_unsol_els: " 11721 "Unsupported Class. Rejecting FLOGI."); 11722 } else { 11723 mutex_enter(&port->fp_mutex); 11724 port->fp_els_resp_pkt_busy = 0; 11725 mutex_exit(&port->fp_mutex); 11726 return; 11727 } 11728 } else { 11729 mutex_enter(&port->fp_mutex); 11730 if (FC_PORT_STATE_MASK(port->fp_state) != 11731 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11732 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11733 mutex_exit(&port->fp_mutex); 11734 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11735 cmd->cmd_pkt.pkt_cmdlen = 11736 sizeof (la_els_rjt_t); 11737 cmd->cmd_pkt.pkt_rsplen = 0; 11738 fp_els_rjt_init(port, cmd, buf, 11739 FC_ACTION_NON_RETRYABLE, 11740 FC_REASON_INVALID_LINK_CTRL, 11741 NULL); 11742 FP_TRACE(FP_NHEAD1(3, 0), 11743 "fp_i_handle_unsol_els: " 11744 "Invalid Link Ctrl. " 11745 "Rejecting FLOGI."); 11746 } else { 11747 mutex_enter(&port->fp_mutex); 11748 port->fp_els_resp_pkt_busy = 0; 11749 mutex_exit(&port->fp_mutex); 11750 return; 11751 } 11752 } else { 11753 mutex_exit(&port->fp_mutex); 11754 cmd->cmd_pkt.pkt_cmdlen = 11755 sizeof (la_els_logi_t); 11756 cmd->cmd_pkt.pkt_rsplen = 0; 11757 /* 11758 * Let's not aggressively validate the N_Port's 11759 * service parameters until PLOGI. Suffice it 11760 * to give a hint that we are an N_Port and we 11761 * are game to some serious stuff here. 11762 */ 11763 fp_login_acc_init(port, cmd, buf, 11764 NULL, KM_NOSLEEP); 11765 FP_TRACE(FP_NHEAD1(3, 0), 11766 "fp_i_handle_unsol_els: " 11767 "Accepting FLOGI."); 11768 } 11769 } 11770 break; 11771 11772 default: 11773 return; 11774 } 11775 11776 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11777 mutex_enter(&port->fp_mutex); 11778 port->fp_els_resp_pkt_busy = 0; 11779 mutex_exit(&port->fp_mutex); 11780 } 11781 } 11782 11783 11784 /* 11785 * Handle unsolicited PLOGI request 11786 */ 11787 static void 11788 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11789 job_request_t *job, int sleep) 11790 { 11791 int sent; 11792 int small; 11793 int f_port; 11794 int do_acc; 11795 fp_cmd_t *cmd; 11796 la_wwn_t *swwn; 11797 la_wwn_t *dwwn; 11798 la_els_logi_t *payload; 11799 fc_remote_port_t *pd; 11800 char dww_name[17]; 11801 11802 payload = (la_els_logi_t *)buf->ub_buffer; 11803 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11804 11805 mutex_enter(&port->fp_mutex); 11806 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11807 mutex_exit(&port->fp_mutex); 11808 11809 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11810 "type=%x, f_ctl=%x" 11811 " seq_id=%x, ox_id=%x, rx_id=%x" 11812 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11813 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11814 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11815 11816 swwn = &port->fp_service_params.nport_ww_name; 11817 dwwn = &payload->nport_ww_name; 11818 small = fctl_wwn_cmp(swwn, dwwn); 11819 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11820 if (pd) { 11821 mutex_enter(&pd->pd_mutex); 11822 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11823 /* 11824 * Most likely this means a cross login is in 11825 * progress or a device about to be yanked out. 11826 * Only accept the plogi if my wwn is smaller. 11827 */ 11828 11829 if (pd->pd_type == PORT_DEVICE_OLD) { 11830 sent = 1; 11831 } 11832 /* 11833 * Stop plogi request (if any) 11834 * attempt from local side to speedup 11835 * the discovery progress. 11836 * Mark the pd as PD_PLOGI_RECEPIENT. 11837 */ 11838 if (f_port == 0 && small < 0) { 11839 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11840 } 11841 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11842 11843 mutex_exit(&pd->pd_mutex); 11844 11845 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11846 " received. PD still exists in the PWWN list. pd=%p " 11847 "PWWN=%s, sent=%x", pd, dww_name, sent); 11848 11849 if (f_port == 0 && small < 0) { 11850 FP_TRACE(FP_NHEAD1(3, 0), 11851 "fp_handle_unsol_plogi: Mark the pd" 11852 " as plogi recipient, pd=%p, PWWN=%s" 11853 ", sent=%x", 11854 pd, dww_name, sent); 11855 } 11856 } else { 11857 sent = 0; 11858 } 11859 11860 /* 11861 * Avoid Login collisions by accepting only if my WWN is smaller. 11862 * 11863 * A side note: There is no need to start a PLOGI from this end in 11864 * this context if login isn't going to be accepted for the 11865 * above reason as either a LIP (in private loop), RSCN (in 11866 * fabric topology), or an FLOGI (in point to point - Huh ? 11867 * check FC-PH) would normally drive the PLOGI from this end. 11868 * At this point of time there is no need for an inbound PLOGI 11869 * to kick an outbound PLOGI when it is going to be rejected 11870 * for the reason of WWN being smaller. However it isn't hard 11871 * to do that either (when such a need arises, start a timer 11872 * for a duration that extends beyond a normal device discovery 11873 * time and check if an outbound PLOGI did go before that, if 11874 * none fire one) 11875 * 11876 * Unfortunately, as it turned out, during booting, it is possible 11877 * to miss another initiator in the same loop as port driver 11878 * instances are serially attached. While preserving the above 11879 * comments for belly laughs, please kick an outbound PLOGI in 11880 * a non-switch environment (which is a pt pt between N_Ports or 11881 * a private loop) 11882 * 11883 * While preserving the above comments for amusement, send an 11884 * ACC if the PLOGI is going to be rejected for WWN being smaller 11885 * when no discovery is in progress at this end. Turn around 11886 * and make the port device as the PLOGI initiator, so that 11887 * during subsequent link/loop initialization, this end drives 11888 * the PLOGI (In fact both ends do in this particular case, but 11889 * only one wins) 11890 * 11891 * Make sure the PLOGIs initiated by the switch from not-so-well-known 11892 * ports (such as 0xFFFC41) are accepted too. 11893 */ 11894 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 11895 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11896 if (fp_is_class_supported(port->fp_cos, 11897 buf->ub_class) == FC_FAILURE) { 11898 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11899 cmd = fp_alloc_pkt(port, 11900 sizeof (la_els_logi_t), 0, sleep, pd); 11901 if (cmd == NULL) { 11902 return; 11903 } 11904 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11905 cmd->cmd_pkt.pkt_rsplen = 0; 11906 fp_els_rjt_init(port, cmd, buf, 11907 FC_ACTION_NON_RETRYABLE, 11908 FC_REASON_CLASS_NOT_SUPP, job); 11909 FP_TRACE(FP_NHEAD1(3, 0), 11910 "fp_handle_unsol_plogi: " 11911 "Unsupported class. rejecting PLOGI"); 11912 } 11913 } else { 11914 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11915 0, sleep, pd); 11916 if (cmd == NULL) { 11917 return; 11918 } 11919 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 11920 cmd->cmd_pkt.pkt_rsplen = 0; 11921 11922 /* 11923 * Sometime later, we should validate the service 11924 * parameters instead of just accepting it. 11925 */ 11926 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 11927 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11928 "Accepting PLOGI, f_port=%d, small=%d, " 11929 "do_acc=%d, sent=%d.", f_port, small, do_acc, 11930 sent); 11931 11932 /* 11933 * If fp_port_id is zero and topology is 11934 * Point-to-Point, get the local port id from 11935 * the d_id in the PLOGI request. 11936 * If the outgoing FLOGI hasn't been accepted, 11937 * the topology will be unknown here. But it's 11938 * still safe to save the d_id to fp_port_id, 11939 * just because it will be overwritten later 11940 * if the topology is not Point-to-Point. 11941 */ 11942 mutex_enter(&port->fp_mutex); 11943 if ((port->fp_port_id.port_id == 0) && 11944 (port->fp_topology == FC_TOP_PT_PT || 11945 port->fp_topology == FC_TOP_UNKNOWN)) { 11946 port->fp_port_id.port_id = 11947 buf->ub_frame.d_id; 11948 } 11949 mutex_exit(&port->fp_mutex); 11950 } 11951 } else { 11952 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11953 port->fp_options & FP_SEND_RJT) { 11954 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11955 0, sleep, pd); 11956 if (cmd == NULL) { 11957 return; 11958 } 11959 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11960 cmd->cmd_pkt.pkt_rsplen = 0; 11961 /* 11962 * Send out Logical busy to indicate 11963 * the detection of PLOGI collision 11964 */ 11965 fp_els_rjt_init(port, cmd, buf, 11966 FC_ACTION_NON_RETRYABLE, 11967 FC_REASON_LOGICAL_BSY, job); 11968 11969 fc_wwn_to_str(dwwn, dww_name); 11970 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11971 "Rejecting Unsol PLOGI with Logical Busy." 11972 "possible PLOGI collision. PWWN=%s, sent=%x", 11973 dww_name, sent); 11974 } else { 11975 return; 11976 } 11977 } 11978 11979 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11980 fp_free_pkt(cmd); 11981 } 11982 } 11983 11984 11985 /* 11986 * Handle mischievous turning over of our own FLOGI requests back to 11987 * us by the SOC+ microcode. In other words, look at the class of such 11988 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 11989 * on the floor 11990 */ 11991 static void 11992 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11993 job_request_t *job, int sleep) 11994 { 11995 uint32_t state; 11996 uint32_t s_id; 11997 fp_cmd_t *cmd; 11998 11999 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 12000 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12001 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12002 0, sleep, NULL); 12003 if (cmd == NULL) { 12004 return; 12005 } 12006 fp_els_rjt_init(port, cmd, buf, 12007 FC_ACTION_NON_RETRYABLE, 12008 FC_REASON_CLASS_NOT_SUPP, job); 12009 } else { 12010 return; 12011 } 12012 } else { 12013 12014 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 12015 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 12016 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 12017 buf->ub_frame.s_id, buf->ub_frame.d_id, 12018 buf->ub_frame.type, buf->ub_frame.f_ctl, 12019 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 12020 buf->ub_frame.rx_id, buf->ub_frame.ro); 12021 12022 mutex_enter(&port->fp_mutex); 12023 state = FC_PORT_STATE_MASK(port->fp_state); 12024 s_id = port->fp_port_id.port_id; 12025 mutex_exit(&port->fp_mutex); 12026 12027 if (state != FC_STATE_ONLINE || 12028 (s_id && buf->ub_frame.s_id == s_id)) { 12029 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 12030 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 12031 0, sleep, NULL); 12032 if (cmd == NULL) { 12033 return; 12034 } 12035 fp_els_rjt_init(port, cmd, buf, 12036 FC_ACTION_NON_RETRYABLE, 12037 FC_REASON_INVALID_LINK_CTRL, job); 12038 FP_TRACE(FP_NHEAD1(3, 0), 12039 "fp_handle_unsol_flogi: " 12040 "Rejecting PLOGI. Invalid Link CTRL"); 12041 } else { 12042 return; 12043 } 12044 } else { 12045 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 12046 0, sleep, NULL); 12047 if (cmd == NULL) { 12048 return; 12049 } 12050 /* 12051 * Let's not aggressively validate the N_Port's 12052 * service parameters until PLOGI. Suffice it 12053 * to give a hint that we are an N_Port and we 12054 * are game to some serious stuff here. 12055 */ 12056 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 12057 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 12058 "Accepting PLOGI"); 12059 } 12060 } 12061 12062 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12063 fp_free_pkt(cmd); 12064 } 12065 } 12066 12067 12068 /* 12069 * Perform PLOGI accept 12070 */ 12071 static void 12072 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 12073 job_request_t *job, int sleep) 12074 { 12075 fc_packet_t *pkt; 12076 fc_portmap_t *listptr; 12077 la_els_logi_t payload; 12078 12079 ASSERT(buf != NULL); 12080 12081 /* 12082 * If we are sending ACC to PLOGI and we haven't already 12083 * create port and node device handles, let's create them 12084 * here. 12085 */ 12086 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 12087 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 12088 int small; 12089 int do_acc; 12090 fc_remote_port_t *pd; 12091 la_els_logi_t *req; 12092 12093 req = (la_els_logi_t *)buf->ub_buffer; 12094 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 12095 &req->nport_ww_name); 12096 12097 mutex_enter(&port->fp_mutex); 12098 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 12099 mutex_exit(&port->fp_mutex); 12100 12101 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_acc_init fp %x, pd %x", 12102 port->fp_port_id.port_id, buf->ub_frame.s_id); 12103 pd = fctl_create_remote_port(port, &req->node_ww_name, 12104 &req->nport_ww_name, buf->ub_frame.s_id, 12105 PD_PLOGI_RECEPIENT, sleep); 12106 if (pd == NULL) { 12107 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 12108 "Couldn't create port device for d_id:0x%x", 12109 buf->ub_frame.s_id); 12110 12111 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 12112 "couldn't create port device d_id=%x", 12113 buf->ub_frame.s_id); 12114 } else { 12115 /* 12116 * usoc currently returns PLOGIs inline and 12117 * the maximum buffer size is 60 bytes or so. 12118 * So attempt not to look beyond what is in 12119 * the unsolicited buffer 12120 * 12121 * JNI also traverses this path sometimes 12122 */ 12123 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 12124 fp_register_login(NULL, pd, req, buf->ub_class); 12125 } else { 12126 mutex_enter(&pd->pd_mutex); 12127 if (pd->pd_login_count == 0) { 12128 pd->pd_login_count++; 12129 } 12130 pd->pd_state = PORT_DEVICE_LOGGED_IN; 12131 pd->pd_login_class = buf->ub_class; 12132 mutex_exit(&pd->pd_mutex); 12133 } 12134 12135 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12136 if (listptr != NULL) { 12137 fctl_copy_portmap(listptr, pd); 12138 (void) fp_ulp_devc_cb(port, listptr, 12139 1, 1, sleep, 0); 12140 } 12141 12142 if (small > 0 && do_acc) { 12143 mutex_enter(&pd->pd_mutex); 12144 pd->pd_recepient = PD_PLOGI_INITIATOR; 12145 mutex_exit(&pd->pd_mutex); 12146 } 12147 } 12148 } 12149 12150 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12151 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12152 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12153 cmd->cmd_retry_count = 1; 12154 cmd->cmd_ulp_pkt = NULL; 12155 12156 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12157 cmd->cmd_job = job; 12158 12159 pkt = &cmd->cmd_pkt; 12160 12161 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12162 12163 payload = port->fp_service_params; 12164 payload.ls_code.ls_code = LA_ELS_ACC; 12165 12166 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12167 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12168 12169 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12170 "bufsize:0x%x sizeof (la_els_logi):0x%x " 12171 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12172 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12173 buf->ub_bufsize, sizeof (la_els_logi_t), 12174 port->fp_service_params.nport_ww_name.w.naa_id, 12175 port->fp_service_params.nport_ww_name.w.nport_id, 12176 port->fp_service_params.nport_ww_name.w.wwn_hi, 12177 port->fp_service_params.nport_ww_name.w.wwn_lo, 12178 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12179 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12180 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12181 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12182 port->fp_statec_busy); 12183 } 12184 12185 12186 #define RSCN_EVENT_NAME_LEN 256 12187 12188 /* 12189 * Handle RSCNs 12190 */ 12191 static void 12192 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12193 job_request_t *job, int sleep) 12194 { 12195 uint32_t mask; 12196 fp_cmd_t *cmd; 12197 uint32_t count; 12198 int listindex; 12199 int16_t len; 12200 fc_rscn_t *payload; 12201 fc_portmap_t *listptr; 12202 fctl_ns_req_t *ns_cmd; 12203 fc_affected_id_t *page; 12204 caddr_t nvname; 12205 nvlist_t *attr_list = NULL; 12206 12207 mutex_enter(&port->fp_mutex); 12208 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12209 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12210 --port->fp_rscn_count; 12211 } 12212 mutex_exit(&port->fp_mutex); 12213 return; 12214 } 12215 mutex_exit(&port->fp_mutex); 12216 12217 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12218 if (cmd != NULL) { 12219 fp_els_acc_init(port, cmd, buf, job); 12220 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12221 fp_free_pkt(cmd); 12222 } 12223 } 12224 12225 payload = (fc_rscn_t *)buf->ub_buffer; 12226 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12227 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12228 12229 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12230 12231 if (len <= 0) { 12232 mutex_enter(&port->fp_mutex); 12233 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12234 --port->fp_rscn_count; 12235 } 12236 mutex_exit(&port->fp_mutex); 12237 12238 return; 12239 } 12240 12241 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12242 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12243 12244 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12245 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12246 12247 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12248 12249 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12250 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12251 0, sleep); 12252 if (ns_cmd == NULL) { 12253 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12254 12255 mutex_enter(&port->fp_mutex); 12256 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12257 --port->fp_rscn_count; 12258 } 12259 mutex_exit(&port->fp_mutex); 12260 12261 return; 12262 } 12263 12264 ns_cmd->ns_cmd_code = NS_GPN_ID; 12265 12266 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12267 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12268 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12269 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12270 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12271 12272 /* Only proceed if we can allocate nvname and the nvlist */ 12273 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12274 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12275 KM_NOSLEEP) == DDI_SUCCESS) { 12276 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12277 port->fp_instance) == DDI_SUCCESS && 12278 nvlist_add_byte_array(attr_list, "port-wwn", 12279 port->fp_service_params.nport_ww_name.raw_wwn, 12280 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12281 nvlist_free(attr_list); 12282 attr_list = NULL; 12283 } 12284 } 12285 12286 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12287 /* Add affected page to the event payload */ 12288 if (attr_list != NULL) { 12289 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12290 "affected_page_%d", listindex); 12291 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12292 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12293 /* We don't send a partial event, so dump it */ 12294 nvlist_free(attr_list); 12295 attr_list = NULL; 12296 } 12297 } 12298 /* 12299 * Query the NS to get the Port WWN for this 12300 * affected D_ID. 12301 */ 12302 mask = 0; 12303 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12304 case FC_RSCN_PORT_ADDRESS: 12305 fp_validate_rscn_page(port, page, job, ns_cmd, 12306 listptr, &listindex, sleep); 12307 12308 if (listindex == 0) { 12309 /* 12310 * We essentially did not process this RSCN. So, 12311 * ULPs are not going to be called and so we 12312 * decrement the rscn_count 12313 */ 12314 mutex_enter(&port->fp_mutex); 12315 if (--port->fp_rscn_count == 12316 FC_INVALID_RSCN_COUNT) { 12317 --port->fp_rscn_count; 12318 } 12319 mutex_exit(&port->fp_mutex); 12320 } 12321 break; 12322 12323 case FC_RSCN_AREA_ADDRESS: 12324 mask = 0xFFFF00; 12325 /* FALLTHROUGH */ 12326 12327 case FC_RSCN_DOMAIN_ADDRESS: 12328 if (!mask) { 12329 mask = 0xFF0000; 12330 } 12331 fp_validate_area_domain(port, page->aff_d_id, mask, 12332 job, sleep); 12333 break; 12334 12335 case FC_RSCN_FABRIC_ADDRESS: 12336 /* 12337 * We need to discover all the devices on this 12338 * port. 12339 */ 12340 fp_validate_area_domain(port, 0, 0, job, sleep); 12341 break; 12342 12343 default: 12344 break; 12345 } 12346 } 12347 if (attr_list != NULL) { 12348 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12349 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12350 NULL, DDI_SLEEP); 12351 nvlist_free(attr_list); 12352 } else { 12353 FP_TRACE(FP_NHEAD1(9, 0), 12354 "RSCN handled, but event not sent to userland"); 12355 } 12356 if (nvname != NULL) { 12357 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12358 } 12359 12360 if (ns_cmd) { 12361 fctl_free_ns_cmd(ns_cmd); 12362 } 12363 12364 if (listindex) { 12365 #ifdef DEBUG 12366 page = (fc_affected_id_t *)(buf->ub_buffer + 12367 sizeof (fc_rscn_t)); 12368 12369 if (listptr->map_did.port_id != page->aff_d_id) { 12370 FP_TRACE(FP_NHEAD1(9, 0), 12371 "PORT RSCN: processed=%x, reporting=%x", 12372 listptr->map_did.port_id, page->aff_d_id); 12373 } 12374 #endif 12375 12376 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12377 sleep, 0); 12378 } else { 12379 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12380 } 12381 } 12382 12383 12384 /* 12385 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12386 */ 12387 static void 12388 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12389 { 12390 int is_switch; 12391 int initiator; 12392 fc_local_port_t *port; 12393 12394 port = pd->pd_port; 12395 12396 /* This function has the following bunch of assumptions */ 12397 ASSERT(port != NULL); 12398 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12399 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12400 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12401 12402 pd->pd_state = PORT_DEVICE_INVALID; 12403 pd->pd_type = PORT_DEVICE_OLD; 12404 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12405 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12406 12407 fctl_delist_did_table(port, pd); 12408 fctl_delist_pwwn_table(port, pd); 12409 12410 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12411 " removed the PD=%p from DID and PWWN tables", 12412 port, pd->pd_port_id.port_id, pd); 12413 12414 if ((!flag) && port && initiator && is_switch) { 12415 (void) fctl_add_orphan_held(port, pd); 12416 } 12417 fctl_copy_portmap_held(map, pd); 12418 map->map_pd = pd; 12419 } 12420 12421 /* 12422 * Fill out old map for ULPs 12423 */ 12424 static void 12425 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12426 { 12427 int is_switch; 12428 int initiator; 12429 fc_local_port_t *port; 12430 12431 mutex_enter(&pd->pd_mutex); 12432 port = pd->pd_port; 12433 mutex_exit(&pd->pd_mutex); 12434 12435 mutex_enter(&port->fp_mutex); 12436 mutex_enter(&pd->pd_mutex); 12437 12438 pd->pd_state = PORT_DEVICE_INVALID; 12439 pd->pd_type = PORT_DEVICE_OLD; 12440 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12441 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12442 12443 fctl_delist_did_table(port, pd); 12444 fctl_delist_pwwn_table(port, pd); 12445 12446 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12447 " removed the PD=%p from DID and PWWN tables", 12448 port, pd->pd_port_id.port_id, pd); 12449 12450 mutex_exit(&pd->pd_mutex); 12451 mutex_exit(&port->fp_mutex); 12452 12453 ASSERT(port != NULL); 12454 if ((!flag) && port && initiator && is_switch) { 12455 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12456 } 12457 fctl_copy_portmap(map, pd); 12458 map->map_pd = pd; 12459 } 12460 12461 12462 /* 12463 * Fillout Changed Map for ULPs 12464 */ 12465 static void 12466 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12467 uint32_t *new_did, la_wwn_t *new_pwwn) 12468 { 12469 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12470 12471 pd->pd_type = PORT_DEVICE_CHANGED; 12472 if (new_did) { 12473 pd->pd_port_id.port_id = *new_did; 12474 } 12475 if (new_pwwn) { 12476 pd->pd_port_name = *new_pwwn; 12477 } 12478 mutex_exit(&pd->pd_mutex); 12479 12480 fctl_copy_portmap(map, pd); 12481 12482 mutex_enter(&pd->pd_mutex); 12483 pd->pd_type = PORT_DEVICE_NOCHANGE; 12484 } 12485 12486 12487 /* 12488 * Fillout New Name Server map 12489 */ 12490 static void 12491 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12492 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12493 { 12494 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12495 12496 if (handle) { 12497 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_pwwn, 12498 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12499 DDI_DEV_AUTOINCR); 12500 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_nwwn, 12501 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12502 DDI_DEV_AUTOINCR); 12503 ddi_rep_get8(*handle, (uint8_t *)port_map->map_fc4_types, 12504 (uint8_t *)gan_resp->gan_fc4types, 12505 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12506 } else { 12507 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12508 sizeof (gan_resp->gan_pwwn)); 12509 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12510 sizeof (gan_resp->gan_nwwn)); 12511 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12512 sizeof (gan_resp->gan_fc4types)); 12513 } 12514 port_map->map_did.port_id = d_id; 12515 port_map->map_did.priv_lilp_posit = 0; 12516 port_map->map_hard_addr.hard_addr = 0; 12517 port_map->map_hard_addr.rsvd = 0; 12518 port_map->map_state = PORT_DEVICE_INVALID; 12519 port_map->map_type = PORT_DEVICE_NEW; 12520 port_map->map_flags = 0; 12521 port_map->map_pd = NULL; 12522 12523 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12524 12525 ASSERT(port != NULL); 12526 } 12527 12528 12529 /* 12530 * Perform LINIT ELS 12531 */ 12532 static int 12533 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12534 job_request_t *job) 12535 { 12536 int rval; 12537 uint32_t d_id; 12538 uint32_t s_id; 12539 uint32_t lfa; 12540 uchar_t class; 12541 uint32_t ret; 12542 fp_cmd_t *cmd; 12543 fc_porttype_t ptype; 12544 fc_packet_t *pkt; 12545 fc_linit_req_t payload; 12546 fc_remote_port_t *pd; 12547 12548 rval = 0; 12549 12550 ASSERT(job != NULL); 12551 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12552 12553 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12554 if (pd == NULL) { 12555 fctl_ns_req_t *ns_cmd; 12556 12557 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12558 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12559 0, sleep); 12560 12561 if (ns_cmd == NULL) { 12562 return (FC_NOMEM); 12563 } 12564 job->job_result = FC_SUCCESS; 12565 ns_cmd->ns_cmd_code = NS_GID_PN; 12566 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12567 12568 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12569 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12570 fctl_free_ns_cmd(ns_cmd); 12571 return (FC_FAILURE); 12572 } 12573 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12574 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12575 12576 fctl_free_ns_cmd(ns_cmd); 12577 lfa = d_id & 0xFFFF00; 12578 12579 /* 12580 * Given this D_ID, get the port type to see if 12581 * we can do LINIT on the LFA 12582 */ 12583 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12584 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12585 0, sleep); 12586 12587 if (ns_cmd == NULL) { 12588 return (FC_NOMEM); 12589 } 12590 12591 job->job_result = FC_SUCCESS; 12592 ns_cmd->ns_cmd_code = NS_GPT_ID; 12593 12594 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12595 ((ns_req_gpt_id_t *) 12596 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12597 12598 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12599 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12600 fctl_free_ns_cmd(ns_cmd); 12601 return (FC_FAILURE); 12602 } 12603 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12604 12605 fctl_free_ns_cmd(ns_cmd); 12606 12607 switch (ptype.port_type) { 12608 case FC_NS_PORT_NL: 12609 case FC_NS_PORT_F_NL: 12610 case FC_NS_PORT_FL: 12611 break; 12612 12613 default: 12614 return (FC_FAILURE); 12615 } 12616 } else { 12617 mutex_enter(&pd->pd_mutex); 12618 ptype = pd->pd_porttype; 12619 12620 switch (pd->pd_porttype.port_type) { 12621 case FC_NS_PORT_NL: 12622 case FC_NS_PORT_F_NL: 12623 case FC_NS_PORT_FL: 12624 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12625 break; 12626 12627 default: 12628 mutex_exit(&pd->pd_mutex); 12629 return (FC_FAILURE); 12630 } 12631 mutex_exit(&pd->pd_mutex); 12632 } 12633 12634 mutex_enter(&port->fp_mutex); 12635 s_id = port->fp_port_id.port_id; 12636 class = port->fp_ns_login_class; 12637 mutex_exit(&port->fp_mutex); 12638 12639 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12640 sizeof (fc_linit_resp_t), sleep, pd); 12641 if (cmd == NULL) { 12642 return (FC_NOMEM); 12643 } 12644 12645 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12646 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12647 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12648 cmd->cmd_retry_count = fp_retry_count; 12649 cmd->cmd_ulp_pkt = NULL; 12650 12651 pkt = &cmd->cmd_pkt; 12652 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12653 12654 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12655 12656 /* 12657 * How does LIP work by the way ? 12658 * If the L_Port receives three consecutive identical ordered 12659 * sets whose first two characters (fully decoded) are equal to 12660 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12661 * recognize a Loop Initialization Primitive sequence. The 12662 * character 3 determines the type of lip: 12663 * LIP(F7) Normal LIP 12664 * LIP(F8) Loop Failure LIP 12665 * 12666 * The possible combination for the 3rd and 4th bytes are: 12667 * F7, F7 Normal Lip - No valid AL_PA 12668 * F8, F8 Loop Failure - No valid AL_PA 12669 * F7, AL_PS Normal Lip - Valid source AL_PA 12670 * F8, AL_PS Loop Failure - Valid source AL_PA 12671 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12672 * And Normal Lip for all other loop members 12673 * 0xFF AL_PS Vendor specific reset of all loop members 12674 * 12675 * Now, it may not always be that we, at the source, may have an 12676 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12677 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12678 * payload we are going to set: 12679 * lip_b3 = 0xF7; Normal LIP 12680 * lip_b4 = 0xF7; No valid source AL_PA 12681 */ 12682 payload.ls_code.ls_code = LA_ELS_LINIT; 12683 payload.ls_code.mbz = 0; 12684 payload.rsvd = 0; 12685 payload.func = 0; /* Let Fabric determine the best way */ 12686 payload.lip_b3 = 0xF7; /* Normal LIP */ 12687 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12688 12689 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12690 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12691 12692 job->job_counter = 1; 12693 12694 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12695 if (ret == FC_SUCCESS) { 12696 fp_jobwait(job); 12697 rval = job->job_result; 12698 } else { 12699 rval = FC_FAILURE; 12700 fp_free_pkt(cmd); 12701 } 12702 12703 return (rval); 12704 } 12705 12706 12707 /* 12708 * Fill out the device handles with GAN response 12709 */ 12710 static void 12711 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12712 ns_resp_gan_t *gan_resp) 12713 { 12714 fc_remote_node_t *node; 12715 fc_porttype_t type; 12716 fc_local_port_t *port; 12717 12718 ASSERT(pd != NULL); 12719 ASSERT(handle != NULL); 12720 12721 port = pd->pd_port; 12722 12723 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12724 " port_id=%x, sym_len=%d fc4-type=%x", 12725 pd, gan_resp->gan_type_id.rsvd, 12726 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12727 12728 mutex_enter(&pd->pd_mutex); 12729 12730 ddi_rep_get8(*handle, (uint8_t *)&type, 12731 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12732 12733 pd->pd_porttype.port_type = type.port_type; 12734 pd->pd_porttype.rsvd = 0; 12735 12736 pd->pd_spn_len = gan_resp->gan_spnlen; 12737 if (pd->pd_spn_len) { 12738 ddi_rep_get8(*handle, (uint8_t *)pd->pd_spn, 12739 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12740 DDI_DEV_AUTOINCR); 12741 } 12742 12743 ddi_rep_get8(*handle, (uint8_t *)pd->pd_ip_addr, 12744 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12745 DDI_DEV_AUTOINCR); 12746 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_cos, 12747 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12748 DDI_DEV_AUTOINCR); 12749 ddi_rep_get8(*handle, (uint8_t *)pd->pd_fc4types, 12750 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12751 DDI_DEV_AUTOINCR); 12752 12753 node = pd->pd_remote_nodep; 12754 mutex_exit(&pd->pd_mutex); 12755 12756 mutex_enter(&node->fd_mutex); 12757 12758 ddi_rep_get8(*handle, (uint8_t *)node->fd_ipa, 12759 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12760 DDI_DEV_AUTOINCR); 12761 12762 node->fd_snn_len = gan_resp->gan_snnlen; 12763 if (node->fd_snn_len) { 12764 ddi_rep_get8(*handle, (uint8_t *)node->fd_snn, 12765 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12766 DDI_DEV_AUTOINCR); 12767 } 12768 12769 mutex_exit(&node->fd_mutex); 12770 } 12771 12772 12773 /* 12774 * Handles all NS Queries (also means that this function 12775 * doesn't handle NS object registration) 12776 */ 12777 static int 12778 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12779 int polled, int sleep) 12780 { 12781 int rval; 12782 fp_cmd_t *cmd; 12783 12784 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12785 12786 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 12787 FP_TRACE(FP_NHEAD1(1, 0), "fp_ns_query GA_NXT fp %x pd %x", 12788 port->fp_port_id.port_id, ns_cmd->ns_gan_sid); 12789 } 12790 12791 if (ns_cmd->ns_cmd_size == 0) { 12792 return (FC_FAILURE); 12793 } 12794 12795 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12796 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12797 ns_cmd->ns_resp_size, sleep, NULL); 12798 if (cmd == NULL) { 12799 return (FC_NOMEM); 12800 } 12801 12802 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12803 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12804 12805 if (polled) { 12806 job->job_counter = 1; 12807 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12808 } 12809 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12810 if (rval != FC_SUCCESS) { 12811 job->job_result = rval; 12812 fp_iodone(cmd); 12813 if (polled == 0) { 12814 /* 12815 * Return FC_SUCCESS to indicate that 12816 * fp_iodone is performed already. 12817 */ 12818 rval = FC_SUCCESS; 12819 } 12820 } 12821 12822 if (polled) { 12823 fp_jobwait(job); 12824 rval = job->job_result; 12825 } 12826 12827 return (rval); 12828 } 12829 12830 12831 /* 12832 * Initialize Common Transport request 12833 */ 12834 static void 12835 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12836 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12837 uint16_t resp_len, job_request_t *job) 12838 { 12839 uint32_t s_id; 12840 uchar_t class; 12841 fc_packet_t *pkt; 12842 fc_ct_header_t ct; 12843 12844 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12845 12846 mutex_enter(&port->fp_mutex); 12847 s_id = port->fp_port_id.port_id; 12848 class = port->fp_ns_login_class; 12849 mutex_exit(&port->fp_mutex); 12850 12851 cmd->cmd_job = job; 12852 cmd->cmd_private = ns_cmd; 12853 pkt = &cmd->cmd_pkt; 12854 12855 ct.ct_rev = CT_REV; 12856 ct.ct_inid = 0; 12857 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12858 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12859 ct.ct_options = 0; 12860 ct.ct_reserved1 = 0; 12861 ct.ct_cmdrsp = cmd_code; 12862 ct.ct_aiusize = resp_len >> 2; 12863 ct.ct_reserved2 = 0; 12864 ct.ct_reason = 0; 12865 ct.ct_expln = 0; 12866 ct.ct_vendor = 0; 12867 12868 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ct, (uint8_t *)pkt->pkt_cmd, 12869 sizeof (ct), DDI_DEV_AUTOINCR); 12870 12871 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12872 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12873 pkt->pkt_cmd_fhdr.s_id = s_id; 12874 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12875 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12876 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12877 pkt->pkt_cmd_fhdr.seq_id = 0; 12878 pkt->pkt_cmd_fhdr.df_ctl = 0; 12879 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12880 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12881 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12882 pkt->pkt_cmd_fhdr.ro = 0; 12883 pkt->pkt_cmd_fhdr.rsvd = 0; 12884 12885 pkt->pkt_comp = fp_ns_intr; 12886 pkt->pkt_ulp_private = (opaque_t)cmd; 12887 pkt->pkt_timeout = FP_NS_TIMEOUT; 12888 12889 if (cmd_buf) { 12890 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 12891 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 12892 cmd_len, DDI_DEV_AUTOINCR); 12893 } 12894 12895 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 12896 12897 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12898 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12899 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 12900 cmd->cmd_retry_count = fp_retry_count; 12901 cmd->cmd_ulp_pkt = NULL; 12902 } 12903 12904 12905 /* 12906 * Name Server request interrupt routine 12907 */ 12908 static void 12909 fp_ns_intr(fc_packet_t *pkt) 12910 { 12911 fp_cmd_t *cmd; 12912 fc_local_port_t *port; 12913 fc_ct_header_t resp_hdr; 12914 fc_ct_header_t cmd_hdr; 12915 fctl_ns_req_t *ns_cmd; 12916 12917 cmd = pkt->pkt_ulp_private; 12918 port = cmd->cmd_port; 12919 12920 mutex_enter(&port->fp_mutex); 12921 port->fp_out_fpcmds--; 12922 mutex_exit(&port->fp_mutex); 12923 12924 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 12925 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 12926 ns_cmd = (fctl_ns_req_t *) 12927 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 12928 if (!FP_IS_PKT_ERROR(pkt)) { 12929 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 12930 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 12931 DDI_DEV_AUTOINCR); 12932 12933 /* 12934 * On x86 architectures, make sure the resp_hdr is big endian. 12935 * This macro is a NOP on sparc architectures mainly because 12936 * we don't want to end up wasting time since the end result 12937 * is going to be the same. 12938 */ 12939 MAKE_BE_32(&resp_hdr); 12940 12941 if (ns_cmd) { 12942 /* 12943 * Always copy out the response CT_HDR 12944 */ 12945 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 12946 sizeof (resp_hdr)); 12947 } 12948 12949 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 12950 pkt->pkt_state = FC_PKT_FS_RJT; 12951 pkt->pkt_reason = resp_hdr.ct_reason; 12952 pkt->pkt_expln = resp_hdr.ct_expln; 12953 } 12954 } 12955 12956 if (FP_IS_PKT_ERROR(pkt)) { 12957 if (ns_cmd) { 12958 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 12959 ASSERT(ns_cmd->ns_pd != NULL); 12960 12961 /* Mark it OLD if not already done */ 12962 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 12963 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 12964 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 12965 } 12966 12967 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 12968 fctl_free_ns_cmd(ns_cmd); 12969 ((fp_cmd_t *) 12970 (pkt->pkt_ulp_private))->cmd_private = NULL; 12971 } 12972 12973 } 12974 12975 FP_TRACE(FP_NHEAD2(9, 0), "%x NS failure pkt state=%x" 12976 "reason=%x, expln=%x, NSCMD=%04X, NSRSP=%04X", 12977 port->fp_port_id.port_id, pkt->pkt_state, 12978 pkt->pkt_reason, pkt->pkt_expln, 12979 cmd_hdr.ct_cmdrsp, resp_hdr.ct_cmdrsp); 12980 12981 (void) fp_common_intr(pkt, 1); 12982 12983 return; 12984 } 12985 12986 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 12987 uint32_t d_id; 12988 fc_local_port_t *port; 12989 fp_cmd_t *cmd; 12990 12991 d_id = pkt->pkt_cmd_fhdr.d_id; 12992 cmd = pkt->pkt_ulp_private; 12993 port = cmd->cmd_port; 12994 FP_TRACE(FP_NHEAD2(9, 0), 12995 "Bogus NS response received for D_ID=%x", d_id); 12996 } 12997 12998 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 12999 fp_gan_handler(pkt, ns_cmd); 13000 return; 13001 } 13002 13003 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 13004 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 13005 if (ns_cmd) { 13006 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 13007 fp_ns_query_handler(pkt, ns_cmd); 13008 return; 13009 } 13010 } 13011 } 13012 13013 fp_iodone(pkt->pkt_ulp_private); 13014 } 13015 13016 13017 /* 13018 * Process NS_GAN response 13019 */ 13020 static void 13021 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13022 { 13023 int my_did; 13024 fc_portid_t d_id; 13025 fp_cmd_t *cmd; 13026 fc_local_port_t *port; 13027 fc_remote_port_t *pd; 13028 ns_req_gan_t gan_req; 13029 ns_resp_gan_t *gan_resp; 13030 13031 ASSERT(ns_cmd != NULL); 13032 13033 cmd = pkt->pkt_ulp_private; 13034 port = cmd->cmd_port; 13035 13036 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 13037 13038 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&d_id, 13039 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 13040 13041 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 13042 13043 /* 13044 * In this case the priv_lilp_posit field in reality 13045 * is actually represents the relative position on a private loop. 13046 * So zero it while dealing with Port Identifiers. 13047 */ 13048 d_id.priv_lilp_posit = 0; 13049 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 13050 if (ns_cmd->ns_gan_sid == d_id.port_id) { 13051 /* 13052 * We've come a full circle; time to get out. 13053 */ 13054 fp_iodone(cmd); 13055 return; 13056 } 13057 13058 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 13059 ns_cmd->ns_gan_sid = d_id.port_id; 13060 } 13061 13062 mutex_enter(&port->fp_mutex); 13063 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 13064 mutex_exit(&port->fp_mutex); 13065 13066 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, fp %x pd %x", port, 13067 port->fp_port_id.port_id, d_id.port_id); 13068 if (my_did == 0) { 13069 la_wwn_t pwwn; 13070 la_wwn_t nwwn; 13071 13072 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 13073 "port=%p, d_id=%x, type_id=%x, " 13074 "pwwn=%x %x %x %x %x %x %x %x, " 13075 "nwwn=%x %x %x %x %x %x %x %x", 13076 port, d_id.port_id, gan_resp->gan_type_id, 13077 13078 gan_resp->gan_pwwn.raw_wwn[0], 13079 gan_resp->gan_pwwn.raw_wwn[1], 13080 gan_resp->gan_pwwn.raw_wwn[2], 13081 gan_resp->gan_pwwn.raw_wwn[3], 13082 gan_resp->gan_pwwn.raw_wwn[4], 13083 gan_resp->gan_pwwn.raw_wwn[5], 13084 gan_resp->gan_pwwn.raw_wwn[6], 13085 gan_resp->gan_pwwn.raw_wwn[7], 13086 13087 gan_resp->gan_nwwn.raw_wwn[0], 13088 gan_resp->gan_nwwn.raw_wwn[1], 13089 gan_resp->gan_nwwn.raw_wwn[2], 13090 gan_resp->gan_nwwn.raw_wwn[3], 13091 gan_resp->gan_nwwn.raw_wwn[4], 13092 gan_resp->gan_nwwn.raw_wwn[5], 13093 gan_resp->gan_nwwn.raw_wwn[6], 13094 gan_resp->gan_nwwn.raw_wwn[7]); 13095 13096 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 13097 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 13098 DDI_DEV_AUTOINCR); 13099 13100 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 13101 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 13102 DDI_DEV_AUTOINCR); 13103 13104 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 13105 FP_TRACE(FP_NHEAD1(1, 0), "fp %x gan_hander create" 13106 "pd %x", port->fp_port_id.port_id, d_id.port_id); 13107 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 13108 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 13109 } 13110 if (pd != NULL) { 13111 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 13112 pd, gan_resp); 13113 } 13114 13115 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 13116 *((int *)ns_cmd->ns_data_buf) += 1; 13117 } 13118 13119 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 13120 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 13121 13122 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 13123 fc_port_dev_t *userbuf; 13124 13125 userbuf = ((fc_port_dev_t *) 13126 ns_cmd->ns_data_buf) + 13127 ns_cmd->ns_gan_index++; 13128 13129 userbuf->dev_did = d_id; 13130 13131 ddi_rep_get8(pkt->pkt_resp_acc, 13132 (uint8_t *)userbuf->dev_type, 13133 (uint8_t *)gan_resp->gan_fc4types, 13134 sizeof (userbuf->dev_type), 13135 DDI_DEV_AUTOINCR); 13136 13137 userbuf->dev_nwwn = nwwn; 13138 userbuf->dev_pwwn = pwwn; 13139 13140 if (pd != NULL) { 13141 mutex_enter(&pd->pd_mutex); 13142 userbuf->dev_state = pd->pd_state; 13143 userbuf->dev_hard_addr = 13144 pd->pd_hard_addr; 13145 mutex_exit(&pd->pd_mutex); 13146 } else { 13147 userbuf->dev_state = 13148 PORT_DEVICE_INVALID; 13149 } 13150 } else if (ns_cmd->ns_flags & 13151 FCTL_NS_BUF_IS_FC_PORTMAP) { 13152 fc_portmap_t *map; 13153 13154 map = ((fc_portmap_t *) 13155 ns_cmd->ns_data_buf) + 13156 ns_cmd->ns_gan_index++; 13157 13158 /* 13159 * First fill it like any new map 13160 * and update the port device info 13161 * below. 13162 */ 13163 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13164 map, gan_resp, d_id.port_id); 13165 if (pd != NULL) { 13166 fctl_copy_portmap(map, pd); 13167 } else { 13168 map->map_state = PORT_DEVICE_INVALID; 13169 map->map_type = PORT_DEVICE_NOCHANGE; 13170 } 13171 } else { 13172 caddr_t dst_ptr; 13173 13174 dst_ptr = ns_cmd->ns_data_buf + 13175 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13176 13177 ddi_rep_get8(pkt->pkt_resp_acc, 13178 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13179 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13180 } 13181 } else { 13182 ns_cmd->ns_gan_index++; 13183 } 13184 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13185 fp_iodone(cmd); 13186 return; 13187 } 13188 } 13189 13190 gan_req.pid = d_id; 13191 13192 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13193 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13194 sizeof (gan_req), DDI_DEV_AUTOINCR); 13195 13196 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13197 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13198 fp_iodone(cmd); 13199 } else { 13200 mutex_enter(&port->fp_mutex); 13201 port->fp_out_fpcmds++; 13202 mutex_exit(&port->fp_mutex); 13203 } 13204 } 13205 13206 13207 /* 13208 * Handle NS Query interrupt 13209 */ 13210 static void 13211 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13212 { 13213 fp_cmd_t *cmd; 13214 fc_local_port_t *port; 13215 caddr_t src_ptr; 13216 uint32_t xfer_len; 13217 13218 cmd = pkt->pkt_ulp_private; 13219 port = cmd->cmd_port; 13220 13221 xfer_len = ns_cmd->ns_resp_size; 13222 13223 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13224 ns_cmd->ns_cmd_code, xfer_len); 13225 13226 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13227 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13228 13229 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13230 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13231 } 13232 13233 if (xfer_len <= ns_cmd->ns_data_len) { 13234 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13235 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)ns_cmd->ns_data_buf, 13236 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13237 } 13238 13239 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13240 ASSERT(ns_cmd->ns_pd != NULL); 13241 13242 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13243 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13244 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13245 } 13246 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13247 } 13248 13249 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13250 fctl_free_ns_cmd(ns_cmd); 13251 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13252 } 13253 fp_iodone(cmd); 13254 } 13255 13256 13257 /* 13258 * Handle unsolicited ADISC ELS request 13259 */ 13260 static void 13261 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13262 fc_remote_port_t *pd, job_request_t *job) 13263 { 13264 int rval; 13265 fp_cmd_t *cmd; 13266 13267 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13268 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13269 mutex_enter(&pd->pd_mutex); 13270 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13271 mutex_exit(&pd->pd_mutex); 13272 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13273 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13274 0, KM_SLEEP, pd); 13275 if (cmd != NULL) { 13276 fp_els_rjt_init(port, cmd, buf, 13277 FC_ACTION_NON_RETRYABLE, 13278 FC_REASON_INVALID_LINK_CTRL, job); 13279 13280 if (fp_sendcmd(port, cmd, 13281 port->fp_fca_handle) != FC_SUCCESS) { 13282 fp_free_pkt(cmd); 13283 } 13284 } 13285 } 13286 } else { 13287 mutex_exit(&pd->pd_mutex); 13288 /* 13289 * Yes, yes, we don't have a hard address. But we 13290 * we should still respond. Huh ? Visit 21.19.2 13291 * of FC-PH-2 which essentially says that if an 13292 * NL_Port doesn't have a hard address, or if a port 13293 * does not have FC-AL capability, it shall report 13294 * zeroes in this field. 13295 */ 13296 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13297 0, KM_SLEEP, pd); 13298 if (cmd == NULL) { 13299 return; 13300 } 13301 fp_adisc_acc_init(port, cmd, buf, job); 13302 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13303 if (rval != FC_SUCCESS) { 13304 fp_free_pkt(cmd); 13305 } 13306 } 13307 } 13308 13309 13310 /* 13311 * Initialize ADISC response. 13312 */ 13313 static void 13314 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13315 job_request_t *job) 13316 { 13317 fc_packet_t *pkt; 13318 la_els_adisc_t payload; 13319 13320 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13321 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13322 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13323 cmd->cmd_retry_count = 1; 13324 cmd->cmd_ulp_pkt = NULL; 13325 13326 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13327 cmd->cmd_job = job; 13328 13329 pkt = &cmd->cmd_pkt; 13330 13331 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13332 13333 payload.ls_code.ls_code = LA_ELS_ACC; 13334 payload.ls_code.mbz = 0; 13335 13336 mutex_enter(&port->fp_mutex); 13337 payload.nport_id = port->fp_port_id; 13338 payload.hard_addr = port->fp_hard_addr; 13339 mutex_exit(&port->fp_mutex); 13340 13341 payload.port_wwn = port->fp_service_params.nport_ww_name; 13342 payload.node_wwn = port->fp_service_params.node_ww_name; 13343 13344 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 13345 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13346 } 13347 13348 13349 /* 13350 * Hold and Install the requested ULP drivers 13351 */ 13352 static void 13353 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13354 { 13355 int len; 13356 int count; 13357 int data_len; 13358 major_t ulp_major; 13359 caddr_t ulp_name; 13360 caddr_t data_ptr; 13361 caddr_t data_buf; 13362 13363 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13364 13365 data_buf = NULL; 13366 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13367 DDI_PROP_DONTPASS, "load-ulp-list", 13368 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13369 return; 13370 } 13371 13372 len = strlen(data_buf); 13373 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13374 13375 data_ptr = data_buf + len + 1; 13376 for (count = 0; count < port->fp_ulp_nload; count++) { 13377 len = strlen(data_ptr) + 1; 13378 ulp_name = kmem_zalloc(len, KM_SLEEP); 13379 bcopy(data_ptr, ulp_name, len); 13380 13381 ulp_major = ddi_name_to_major(ulp_name); 13382 13383 if (ulp_major != (major_t)-1) { 13384 if (modload("drv", ulp_name) < 0) { 13385 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13386 0, NULL, "failed to load %s", 13387 ulp_name); 13388 } 13389 } else { 13390 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13391 "%s isn't a valid driver", ulp_name); 13392 } 13393 13394 kmem_free(ulp_name, len); 13395 data_ptr += len; /* Skip to next field */ 13396 } 13397 13398 /* 13399 * Free the memory allocated by DDI 13400 */ 13401 if (data_buf != NULL) { 13402 kmem_free(data_buf, data_len); 13403 } 13404 } 13405 13406 13407 /* 13408 * Perform LOGO operation 13409 */ 13410 static int 13411 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13412 { 13413 int rval; 13414 fp_cmd_t *cmd; 13415 13416 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13417 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13418 13419 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13420 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13421 13422 mutex_enter(&port->fp_mutex); 13423 mutex_enter(&pd->pd_mutex); 13424 13425 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13426 ASSERT(pd->pd_login_count == 1); 13427 13428 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13429 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13430 cmd->cmd_flags = 0; 13431 cmd->cmd_retry_count = 1; 13432 cmd->cmd_ulp_pkt = NULL; 13433 13434 fp_logo_init(pd, cmd, job); 13435 13436 mutex_exit(&pd->pd_mutex); 13437 mutex_exit(&port->fp_mutex); 13438 13439 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13440 if (rval != FC_SUCCESS) { 13441 fp_iodone(cmd); 13442 } 13443 13444 return (rval); 13445 } 13446 13447 13448 /* 13449 * Perform Port attach callbacks to registered ULPs 13450 */ 13451 static void 13452 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13453 { 13454 fp_soft_attach_t *att; 13455 13456 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13457 att->att_cmd = cmd; 13458 att->att_port = port; 13459 13460 /* 13461 * We need to remember whether or not fctl_busy_port 13462 * succeeded so we know whether or not to call 13463 * fctl_idle_port when the task is complete. 13464 */ 13465 13466 if (fctl_busy_port(port) == 0) { 13467 att->att_need_pm_idle = B_TRUE; 13468 } else { 13469 att->att_need_pm_idle = B_FALSE; 13470 } 13471 13472 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13473 att, KM_SLEEP); 13474 } 13475 13476 13477 /* 13478 * Forward state change notifications on to interested ULPs. 13479 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13480 * real work. 13481 */ 13482 static int 13483 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13484 { 13485 fc_port_clist_t *clist; 13486 13487 clist = kmem_zalloc(sizeof (*clist), sleep); 13488 if (clist == NULL) { 13489 return (FC_NOMEM); 13490 } 13491 13492 clist->clist_state = statec; 13493 13494 mutex_enter(&port->fp_mutex); 13495 clist->clist_flags = port->fp_topology; 13496 mutex_exit(&port->fp_mutex); 13497 13498 clist->clist_port = (opaque_t)port; 13499 clist->clist_len = 0; 13500 clist->clist_size = 0; 13501 clist->clist_map = NULL; 13502 13503 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13504 clist, KM_SLEEP); 13505 13506 return (FC_SUCCESS); 13507 } 13508 13509 13510 /* 13511 * Get name server map 13512 */ 13513 static int 13514 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13515 uint32_t *len, uint32_t sid) 13516 { 13517 int ret; 13518 fctl_ns_req_t *ns_cmd; 13519 13520 /* 13521 * Don't let the allocator do anything for response; 13522 * we have have buffer ready to fillout. 13523 */ 13524 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13525 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13526 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13527 13528 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13529 ns_cmd->ns_data_buf = (caddr_t)*map; 13530 13531 ASSERT(ns_cmd != NULL); 13532 13533 ns_cmd->ns_gan_index = 0; 13534 ns_cmd->ns_gan_sid = sid; 13535 ns_cmd->ns_cmd_code = NS_GA_NXT; 13536 ns_cmd->ns_gan_max = *len; 13537 13538 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13539 13540 if (ns_cmd->ns_gan_index != *len) { 13541 *len = ns_cmd->ns_gan_index; 13542 } 13543 ns_cmd->ns_data_len = 0; 13544 ns_cmd->ns_data_buf = NULL; 13545 fctl_free_ns_cmd(ns_cmd); 13546 13547 return (ret); 13548 } 13549 13550 13551 /* 13552 * Create a remote port in Fabric topology by using NS services 13553 */ 13554 static fc_remote_port_t * 13555 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13556 { 13557 int rval; 13558 job_request_t *job; 13559 fctl_ns_req_t *ns_cmd; 13560 fc_remote_port_t *pd; 13561 13562 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13563 13564 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13565 port, d_id); 13566 13567 #ifdef DEBUG 13568 mutex_enter(&port->fp_mutex); 13569 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13570 mutex_exit(&port->fp_mutex); 13571 #endif 13572 13573 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13574 if (job == NULL) { 13575 return (NULL); 13576 } 13577 13578 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13579 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13580 FCTL_NS_NO_DATA_BUF), sleep); 13581 if (ns_cmd == NULL) { 13582 return (NULL); 13583 } 13584 13585 job->job_result = FC_SUCCESS; 13586 ns_cmd->ns_gan_max = 1; 13587 ns_cmd->ns_cmd_code = NS_GA_NXT; 13588 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13589 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13590 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13591 13592 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13593 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13594 fctl_free_ns_cmd(ns_cmd); 13595 13596 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13597 fctl_dealloc_job(job); 13598 return (NULL); 13599 } 13600 fctl_dealloc_job(job); 13601 13602 pd = fctl_get_remote_port_by_did(port, d_id); 13603 13604 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13605 port, d_id, pd); 13606 13607 return (pd); 13608 } 13609 13610 13611 /* 13612 * Check for the permissions on an ioctl command. If it is required to have an 13613 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13614 * the ioctl command isn't in one of the list built, shut the door on that too. 13615 * 13616 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13617 * to be made sure that users open the port for an exclusive access while 13618 * performing those operations. 13619 * 13620 * This can prevent a casual user from inflicting damage on the port by 13621 * sending these ioctls from multiple processes/threads (there is no good 13622 * reason why one would need to do that) without actually realizing how 13623 * expensive such commands could turn out to be. 13624 * 13625 * It is also important to note that, even with an exclusive access, 13626 * multiple threads can share the same file descriptor and fire down 13627 * commands in parallel. To prevent that the driver needs to make sure 13628 * that such commands aren't in progress already. This is taken care of 13629 * in the FP_EXCL_BUSY bit of fp_flag. 13630 */ 13631 static int 13632 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13633 { 13634 int ret = FC_FAILURE; 13635 int count; 13636 13637 for (count = 0; 13638 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13639 count++) { 13640 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13641 if (fp_perm_list[count].fp_open_flag & open_flag) { 13642 ret = FC_SUCCESS; 13643 } 13644 break; 13645 } 13646 } 13647 13648 return (ret); 13649 } 13650 13651 13652 /* 13653 * Bind Port driver's unsolicited, state change callbacks 13654 */ 13655 static int 13656 fp_bind_callbacks(fc_local_port_t *port) 13657 { 13658 fc_fca_bind_info_t bind_info = {0}; 13659 fc_fca_port_info_t *port_info; 13660 int rval = DDI_SUCCESS; 13661 uint16_t class; 13662 int node_namelen, port_namelen; 13663 char *nname = NULL, *pname = NULL; 13664 13665 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13666 13667 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13668 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13669 "node-name", &nname) != DDI_PROP_SUCCESS) { 13670 FP_TRACE(FP_NHEAD1(1, 0), 13671 "fp_bind_callback fail to get node-name"); 13672 } 13673 if (nname) { 13674 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13675 } 13676 13677 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13678 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13679 "port-name", &pname) != DDI_PROP_SUCCESS) { 13680 FP_TRACE(FP_NHEAD1(1, 0), 13681 "fp_bind_callback fail to get port-name"); 13682 } 13683 if (pname) { 13684 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13685 } 13686 13687 if (port->fp_npiv_type == FC_NPIV_PORT) { 13688 bind_info.port_npiv = 1; 13689 } 13690 13691 /* 13692 * fca_bind_port returns the FCA driver's handle for the local 13693 * port instance. If the port number isn't supported it returns NULL. 13694 * It also sets up callback in the FCA for various 13695 * things like state change, ELS etc.. 13696 */ 13697 bind_info.port_statec_cb = fp_statec_cb; 13698 bind_info.port_unsol_cb = fp_unsol_cb; 13699 bind_info.port_num = port->fp_port_num; 13700 bind_info.port_handle = (opaque_t)port; 13701 13702 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13703 13704 /* 13705 * Hold the port driver mutex as the callbacks are bound until the 13706 * service parameters are properly filled in (in order to be able to 13707 * properly respond to unsolicited ELS requests) 13708 */ 13709 mutex_enter(&port->fp_mutex); 13710 13711 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13712 port->fp_fca_dip, port_info, &bind_info); 13713 13714 if (port->fp_fca_handle == NULL) { 13715 rval = DDI_FAILURE; 13716 goto exit; 13717 } 13718 13719 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13720 port->fp_service_params = port_info->pi_login_params; 13721 port->fp_hard_addr = port_info->pi_hard_addr; 13722 13723 /* Copy from the FCA structure to the FP structure */ 13724 port->fp_hba_port_attrs = port_info->pi_attrs; 13725 13726 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13727 port->fp_rnid_init = 1; 13728 bcopy(&port_info->pi_rnid_params.params, 13729 &port->fp_rnid_params, 13730 sizeof (port->fp_rnid_params)); 13731 } else { 13732 port->fp_rnid_init = 0; 13733 } 13734 13735 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13736 if (node_namelen) { 13737 bcopy(&port_info->pi_attrs.sym_node_name, 13738 &port->fp_sym_node_name, 13739 node_namelen); 13740 port->fp_sym_node_namelen = node_namelen; 13741 } 13742 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13743 if (port_namelen) { 13744 bcopy(&port_info->pi_attrs.sym_port_name, 13745 &port->fp_sym_port_name, 13746 port_namelen); 13747 port->fp_sym_port_namelen = port_namelen; 13748 } 13749 13750 /* zero out the normally unused fields right away */ 13751 port->fp_service_params.ls_code.mbz = 0; 13752 port->fp_service_params.ls_code.ls_code = 0; 13753 bzero(&port->fp_service_params.reserved, 13754 sizeof (port->fp_service_params.reserved)); 13755 13756 class = port_info->pi_login_params.class_1.class_opt; 13757 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13758 13759 class = port_info->pi_login_params.class_2.class_opt; 13760 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13761 13762 class = port_info->pi_login_params.class_3.class_opt; 13763 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13764 13765 exit: 13766 if (nname) { 13767 ddi_prop_free(nname); 13768 } 13769 if (pname) { 13770 ddi_prop_free(pname); 13771 } 13772 mutex_exit(&port->fp_mutex); 13773 kmem_free(port_info, sizeof (*port_info)); 13774 13775 return (rval); 13776 } 13777 13778 13779 /* 13780 * Retrieve FCA capabilities 13781 */ 13782 static void 13783 fp_retrieve_caps(fc_local_port_t *port) 13784 { 13785 int rval; 13786 int ub_count; 13787 fc_fcp_dma_t fcp_dma; 13788 fc_reset_action_t action; 13789 fc_dma_behavior_t dma_behavior; 13790 13791 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13792 13793 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13794 FC_CAP_UNSOL_BUF, &ub_count); 13795 13796 switch (rval) { 13797 case FC_CAP_FOUND: 13798 case FC_CAP_SETTABLE: 13799 switch (ub_count) { 13800 case 0: 13801 break; 13802 13803 case -1: 13804 ub_count = fp_unsol_buf_count; 13805 break; 13806 13807 default: 13808 /* 1/4th of total buffers is my share */ 13809 ub_count = 13810 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13811 break; 13812 } 13813 break; 13814 13815 default: 13816 ub_count = 0; 13817 break; 13818 } 13819 13820 mutex_enter(&port->fp_mutex); 13821 port->fp_ub_count = ub_count; 13822 mutex_exit(&port->fp_mutex); 13823 13824 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13825 FC_CAP_POST_RESET_BEHAVIOR, &action); 13826 13827 switch (rval) { 13828 case FC_CAP_FOUND: 13829 case FC_CAP_SETTABLE: 13830 switch (action) { 13831 case FC_RESET_RETURN_NONE: 13832 case FC_RESET_RETURN_ALL: 13833 case FC_RESET_RETURN_OUTSTANDING: 13834 break; 13835 13836 default: 13837 action = FC_RESET_RETURN_NONE; 13838 break; 13839 } 13840 break; 13841 13842 default: 13843 action = FC_RESET_RETURN_NONE; 13844 break; 13845 } 13846 mutex_enter(&port->fp_mutex); 13847 port->fp_reset_action = action; 13848 mutex_exit(&port->fp_mutex); 13849 13850 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13851 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13852 13853 switch (rval) { 13854 case FC_CAP_FOUND: 13855 switch (dma_behavior) { 13856 case FC_ALLOW_STREAMING: 13857 /* FALLTHROUGH */ 13858 case FC_NO_STREAMING: 13859 break; 13860 13861 default: 13862 /* 13863 * If capability was found and the value 13864 * was incorrect assume the worst 13865 */ 13866 dma_behavior = FC_NO_STREAMING; 13867 break; 13868 } 13869 break; 13870 13871 default: 13872 /* 13873 * If capability was not defined - allow streaming; existing 13874 * FCAs should not be affected. 13875 */ 13876 dma_behavior = FC_ALLOW_STREAMING; 13877 break; 13878 } 13879 mutex_enter(&port->fp_mutex); 13880 port->fp_dma_behavior = dma_behavior; 13881 mutex_exit(&port->fp_mutex); 13882 13883 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13884 FC_CAP_FCP_DMA, &fcp_dma); 13885 13886 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 13887 fcp_dma != FC_DVMA_SPACE)) { 13888 fcp_dma = FC_DVMA_SPACE; 13889 } 13890 13891 mutex_enter(&port->fp_mutex); 13892 port->fp_fcp_dma = fcp_dma; 13893 mutex_exit(&port->fp_mutex); 13894 } 13895 13896 13897 /* 13898 * Handle Domain, Area changes in the Fabric. 13899 */ 13900 static void 13901 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 13902 job_request_t *job, int sleep) 13903 { 13904 #ifdef DEBUG 13905 uint32_t dcnt; 13906 #endif 13907 int rval; 13908 int send; 13909 int index; 13910 int listindex; 13911 int login; 13912 int job_flags; 13913 char ww_name[17]; 13914 uint32_t d_id; 13915 uint32_t count; 13916 fctl_ns_req_t *ns_cmd; 13917 fc_portmap_t *list; 13918 fc_orphan_t *orp; 13919 fc_orphan_t *norp; 13920 fc_orphan_t *prev; 13921 fc_remote_port_t *pd; 13922 fc_remote_port_t *npd; 13923 struct pwwn_hash *head; 13924 13925 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 13926 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 13927 0, sleep); 13928 if (ns_cmd == NULL) { 13929 mutex_enter(&port->fp_mutex); 13930 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13931 --port->fp_rscn_count; 13932 } 13933 mutex_exit(&port->fp_mutex); 13934 13935 return; 13936 } 13937 ns_cmd->ns_cmd_code = NS_GID_PN; 13938 13939 /* 13940 * We need to get a new count of devices from the 13941 * name server, which will also create any new devices 13942 * as needed. 13943 */ 13944 13945 (void) fp_ns_get_devcount(port, job, 1, sleep); 13946 13947 FP_TRACE(FP_NHEAD1(3, 0), 13948 "fp_validate_area_domain: get_devcount found %d devices", 13949 port->fp_total_devices); 13950 13951 mutex_enter(&port->fp_mutex); 13952 13953 for (count = index = 0; index < pwwn_table_size; index++) { 13954 head = &port->fp_pwwn_table[index]; 13955 pd = head->pwwn_head; 13956 while (pd != NULL) { 13957 mutex_enter(&pd->pd_mutex); 13958 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 13959 if ((pd->pd_port_id.port_id & mask) == id && 13960 pd->pd_recepient == PD_PLOGI_INITIATOR) { 13961 count++; 13962 pd->pd_type = PORT_DEVICE_OLD; 13963 pd->pd_flags = PD_ELS_MARK; 13964 } 13965 } 13966 mutex_exit(&pd->pd_mutex); 13967 pd = pd->pd_wwn_hnext; 13968 } 13969 } 13970 13971 #ifdef DEBUG 13972 dcnt = count; 13973 #endif /* DEBUG */ 13974 13975 /* 13976 * Since port->fp_orphan_count is declared an 'int' it is 13977 * theoretically possible that the count could go negative. 13978 * 13979 * This would be bad and if that happens we really do want 13980 * to know. 13981 */ 13982 13983 ASSERT(port->fp_orphan_count >= 0); 13984 13985 count += port->fp_orphan_count; 13986 13987 /* 13988 * We add the port->fp_total_devices value to the count 13989 * in the case where our port is newly attached. This is 13990 * because we haven't done any discovery and we don't have 13991 * any orphans in the port's orphan list. If we do not do 13992 * this addition to count then we won't alloc enough kmem 13993 * to do discovery with. 13994 */ 13995 13996 if (count == 0) { 13997 count += port->fp_total_devices; 13998 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 13999 "0x%x orphans found, using 0x%x", 14000 port->fp_orphan_count, count); 14001 } 14002 14003 mutex_exit(&port->fp_mutex); 14004 14005 /* 14006 * Allocate the change list 14007 */ 14008 14009 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 14010 if (list == NULL) { 14011 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 14012 " Not enough memory to service RSCNs" 14013 " for %d ports, continuing...", count); 14014 14015 fctl_free_ns_cmd(ns_cmd); 14016 14017 mutex_enter(&port->fp_mutex); 14018 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14019 --port->fp_rscn_count; 14020 } 14021 mutex_exit(&port->fp_mutex); 14022 14023 return; 14024 } 14025 14026 /* 14027 * Attempt to validate or invalidate the devices that were 14028 * already in the pwwn hash table. 14029 */ 14030 14031 mutex_enter(&port->fp_mutex); 14032 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 14033 head = &port->fp_pwwn_table[index]; 14034 npd = head->pwwn_head; 14035 14036 while ((pd = npd) != NULL) { 14037 npd = pd->pd_wwn_hnext; 14038 14039 mutex_enter(&pd->pd_mutex); 14040 if ((pd->pd_port_id.port_id & mask) == id && 14041 pd->pd_flags == PD_ELS_MARK) { 14042 la_wwn_t *pwwn; 14043 14044 job->job_result = FC_SUCCESS; 14045 14046 ((ns_req_gid_pn_t *) 14047 (ns_cmd->ns_cmd_buf))->pwwn = 14048 pd->pd_port_name; 14049 14050 pwwn = &pd->pd_port_name; 14051 d_id = pd->pd_port_id.port_id; 14052 14053 mutex_exit(&pd->pd_mutex); 14054 mutex_exit(&port->fp_mutex); 14055 14056 rval = fp_ns_query(port, ns_cmd, job, 1, 14057 sleep); 14058 if (rval != FC_SUCCESS) { 14059 fc_wwn_to_str(pwwn, ww_name); 14060 14061 FP_TRACE(FP_NHEAD1(3, 0), 14062 "AREA RSCN: PD disappeared; " 14063 "d_id=%x, PWWN=%s", d_id, ww_name); 14064 14065 FP_TRACE(FP_NHEAD2(9, 0), 14066 "N_x Port with D_ID=%x," 14067 " PWWN=%s disappeared from fabric", 14068 d_id, ww_name); 14069 14070 fp_fillout_old_map(list + listindex++, 14071 pd, 1); 14072 } else { 14073 fctl_copy_portmap(list + listindex++, 14074 pd); 14075 14076 mutex_enter(&pd->pd_mutex); 14077 pd->pd_flags = PD_ELS_IN_PROGRESS; 14078 mutex_exit(&pd->pd_mutex); 14079 } 14080 14081 mutex_enter(&port->fp_mutex); 14082 } else { 14083 mutex_exit(&pd->pd_mutex); 14084 } 14085 } 14086 } 14087 14088 mutex_exit(&port->fp_mutex); 14089 14090 ASSERT(listindex == dcnt); 14091 14092 job->job_counter = listindex; 14093 job_flags = job->job_flags; 14094 job->job_flags |= JOB_TYPE_FP_ASYNC; 14095 14096 /* 14097 * Login (if we were the initiator) or validate devices in the 14098 * port map. 14099 */ 14100 14101 for (index = 0; index < listindex; index++) { 14102 pd = list[index].map_pd; 14103 14104 mutex_enter(&pd->pd_mutex); 14105 ASSERT((pd->pd_port_id.port_id & mask) == id); 14106 14107 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14108 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 14109 mutex_exit(&pd->pd_mutex); 14110 fp_jobdone(job); 14111 continue; 14112 } 14113 14114 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 14115 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 14116 d_id = pd->pd_port_id.port_id; 14117 mutex_exit(&pd->pd_mutex); 14118 14119 if ((d_id & mask) == id && send) { 14120 if (login) { 14121 FP_TRACE(FP_NHEAD1(6, 0), 14122 "RSCN and PLOGI request;" 14123 " pd=%p, job=%p d_id=%x, index=%d", pd, 14124 job, d_id, index); 14125 14126 rval = fp_port_login(port, d_id, job, 14127 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 14128 if (rval != FC_SUCCESS) { 14129 mutex_enter(&pd->pd_mutex); 14130 pd->pd_flags = PD_IDLE; 14131 mutex_exit(&pd->pd_mutex); 14132 14133 job->job_result = rval; 14134 fp_jobdone(job); 14135 } 14136 14137 FP_TRACE(FP_NHEAD2(4, 0), 14138 "PLOGI succeeded:no skip(1) for " 14139 "D_ID %x", d_id); 14140 list[index].map_flags |= 14141 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14142 } else { 14143 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 14144 " pd=%p, job=%p d_id=%x, index=%d", pd, 14145 job, d_id, index); 14146 14147 rval = fp_ns_validate_device(port, pd, job, 14148 0, sleep); 14149 if (rval != FC_SUCCESS) { 14150 fp_jobdone(job); 14151 } 14152 mutex_enter(&pd->pd_mutex); 14153 pd->pd_flags = PD_IDLE; 14154 mutex_exit(&pd->pd_mutex); 14155 } 14156 } else { 14157 FP_TRACE(FP_NHEAD1(6, 0), 14158 "RSCN and NO request sent; pd=%p," 14159 " d_id=%x, index=%d", pd, d_id, index); 14160 14161 mutex_enter(&pd->pd_mutex); 14162 pd->pd_flags = PD_IDLE; 14163 mutex_exit(&pd->pd_mutex); 14164 14165 fp_jobdone(job); 14166 } 14167 } 14168 14169 if (listindex) { 14170 fctl_jobwait(job); 14171 } 14172 job->job_flags = job_flags; 14173 14174 /* 14175 * Orphan list validation. 14176 */ 14177 mutex_enter(&port->fp_mutex); 14178 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14179 orp != NULL; orp = norp) { 14180 norp = orp->orp_next; 14181 mutex_exit(&port->fp_mutex); 14182 14183 job->job_counter = 1; 14184 job->job_result = FC_SUCCESS; 14185 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14186 14187 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14188 14189 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14190 ((ns_resp_gid_pn_t *) 14191 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14192 14193 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14194 if (rval == FC_SUCCESS) { 14195 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14196 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14197 if (pd != NULL) { 14198 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14199 14200 FP_TRACE(FP_NHEAD1(6, 0), 14201 "RSCN and ORPHAN list " 14202 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14203 14204 FP_TRACE(FP_NHEAD2(6, 0), 14205 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14206 " in fabric", d_id, ww_name); 14207 14208 mutex_enter(&port->fp_mutex); 14209 if (prev) { 14210 prev->orp_next = orp->orp_next; 14211 } else { 14212 ASSERT(orp == port->fp_orphan_list); 14213 port->fp_orphan_list = orp->orp_next; 14214 } 14215 port->fp_orphan_count--; 14216 mutex_exit(&port->fp_mutex); 14217 14218 kmem_free(orp, sizeof (*orp)); 14219 fctl_copy_portmap(list + listindex++, pd); 14220 } else { 14221 prev = orp; 14222 } 14223 } else { 14224 prev = orp; 14225 } 14226 mutex_enter(&port->fp_mutex); 14227 } 14228 mutex_exit(&port->fp_mutex); 14229 14230 /* 14231 * One more pass through the list to delist old devices from 14232 * the d_id and pwwn tables and possibly add to the orphan list. 14233 */ 14234 14235 for (index = 0; index < listindex; index++) { 14236 pd = list[index].map_pd; 14237 ASSERT(pd != NULL); 14238 14239 /* 14240 * Update PLOGI results; For NS validation 14241 * of orphan list, it is redundant 14242 * 14243 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14244 * appropriate as fctl_copy_portmap() will clear map_flags. 14245 */ 14246 if (list[index].map_flags & 14247 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14248 fctl_copy_portmap(list + index, pd); 14249 list[index].map_flags |= 14250 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14251 } else { 14252 fctl_copy_portmap(list + index, pd); 14253 } 14254 14255 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14256 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14257 pd, pd->pd_port_id.port_id, 14258 pd->pd_port_name.raw_wwn[0], 14259 pd->pd_port_name.raw_wwn[1], 14260 pd->pd_port_name.raw_wwn[2], 14261 pd->pd_port_name.raw_wwn[3], 14262 pd->pd_port_name.raw_wwn[4], 14263 pd->pd_port_name.raw_wwn[5], 14264 pd->pd_port_name.raw_wwn[6], 14265 pd->pd_port_name.raw_wwn[7]); 14266 14267 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14268 "results continued, pd=%p type=%x, flags=%x, state=%x", 14269 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14270 14271 mutex_enter(&pd->pd_mutex); 14272 if (pd->pd_type == PORT_DEVICE_OLD) { 14273 int initiator; 14274 14275 pd->pd_flags = PD_IDLE; 14276 initiator = (pd->pd_recepient == 14277 PD_PLOGI_INITIATOR) ? 1 : 0; 14278 14279 mutex_exit(&pd->pd_mutex); 14280 14281 mutex_enter(&port->fp_mutex); 14282 mutex_enter(&pd->pd_mutex); 14283 14284 pd->pd_state = PORT_DEVICE_INVALID; 14285 fctl_delist_did_table(port, pd); 14286 fctl_delist_pwwn_table(port, pd); 14287 14288 mutex_exit(&pd->pd_mutex); 14289 mutex_exit(&port->fp_mutex); 14290 14291 if (initiator) { 14292 (void) fctl_add_orphan(port, pd, sleep); 14293 } 14294 list[index].map_pd = pd; 14295 } else { 14296 ASSERT(pd->pd_flags == PD_IDLE); 14297 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14298 /* 14299 * Reset LOGO tolerance to zero 14300 */ 14301 fctl_tc_reset(&pd->pd_logo_tc); 14302 } 14303 mutex_exit(&pd->pd_mutex); 14304 } 14305 } 14306 14307 if (ns_cmd) { 14308 fctl_free_ns_cmd(ns_cmd); 14309 } 14310 if (listindex) { 14311 (void) fp_ulp_devc_cb(port, list, listindex, count, 14312 sleep, 0); 14313 } else { 14314 kmem_free(list, sizeof (*list) * count); 14315 14316 mutex_enter(&port->fp_mutex); 14317 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14318 --port->fp_rscn_count; 14319 } 14320 mutex_exit(&port->fp_mutex); 14321 } 14322 } 14323 14324 14325 /* 14326 * Work hard to make sense out of an RSCN page. 14327 */ 14328 static void 14329 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14330 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14331 int *listindex, int sleep) 14332 { 14333 int rval; 14334 char ww_name[17]; 14335 la_wwn_t *pwwn; 14336 fc_remote_port_t *pwwn_pd; 14337 fc_remote_port_t *did_pd; 14338 14339 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14340 14341 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14342 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14343 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14344 14345 if (did_pd != NULL) { 14346 mutex_enter(&did_pd->pd_mutex); 14347 if (did_pd->pd_flags != PD_IDLE) { 14348 mutex_exit(&did_pd->pd_mutex); 14349 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14350 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14351 port, page->aff_d_id, did_pd); 14352 return; 14353 } 14354 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14355 mutex_exit(&did_pd->pd_mutex); 14356 } 14357 14358 job->job_counter = 1; 14359 14360 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14361 14362 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14363 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14364 14365 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14366 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14367 14368 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14369 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14370 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14371 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14372 ns_cmd->ns_resp_hdr.ct_expln); 14373 14374 job->job_counter = 1; 14375 14376 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14377 /* 14378 * What this means is that the D_ID 14379 * disappeared from the Fabric. 14380 */ 14381 if (did_pd == NULL) { 14382 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14383 " NULL PD disappeared, rval=%x", rval); 14384 return; 14385 } 14386 14387 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14388 14389 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14390 (uint32_t)(uintptr_t)job->job_cb_arg; 14391 14392 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14393 14394 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14395 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14396 14397 FP_TRACE(FP_NHEAD2(9, 0), 14398 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14399 14400 FP_TRACE(FP_NHEAD2(9, 0), 14401 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14402 " fabric", page->aff_d_id, ww_name); 14403 14404 mutex_enter(&did_pd->pd_mutex); 14405 did_pd->pd_flags = PD_IDLE; 14406 mutex_exit(&did_pd->pd_mutex); 14407 14408 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14409 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14410 14411 return; 14412 } 14413 14414 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14415 14416 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14417 /* 14418 * There is no change. Do PLOGI again and add it to 14419 * ULP portmap baggage and return. Note: When RSCNs 14420 * arrive with per page states, the need for PLOGI 14421 * can be determined correctly. 14422 */ 14423 mutex_enter(&pwwn_pd->pd_mutex); 14424 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14425 mutex_exit(&pwwn_pd->pd_mutex); 14426 14427 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14428 (uint32_t)(uintptr_t)job->job_cb_arg; 14429 14430 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14431 14432 mutex_enter(&pwwn_pd->pd_mutex); 14433 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14434 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14435 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14436 mutex_exit(&pwwn_pd->pd_mutex); 14437 14438 rval = fp_port_login(port, page->aff_d_id, job, 14439 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14440 if (rval == FC_SUCCESS) { 14441 fp_jobwait(job); 14442 rval = job->job_result; 14443 14444 /* 14445 * Reset LOGO tolerance to zero 14446 * Also we are the PLOGI initiator now. 14447 */ 14448 mutex_enter(&pwwn_pd->pd_mutex); 14449 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14450 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14451 mutex_exit(&pwwn_pd->pd_mutex); 14452 } 14453 14454 if (rval == FC_SUCCESS) { 14455 struct fc_portmap *map = 14456 listptr + *listindex - 1; 14457 14458 FP_TRACE(FP_NHEAD2(4, 0), 14459 "PLOGI succeeded: no skip(2)" 14460 " for D_ID %x", page->aff_d_id); 14461 map->map_flags |= 14462 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14463 } else { 14464 FP_TRACE(FP_NHEAD2(9, rval), 14465 "PLOGI to D_ID=%x failed", page->aff_d_id); 14466 14467 FP_TRACE(FP_NHEAD2(9, 0), 14468 "N_x Port with D_ID=%x, PWWN=%s" 14469 " disappeared from fabric", 14470 page->aff_d_id, ww_name); 14471 14472 fp_fillout_old_map(listptr + 14473 *listindex - 1, pwwn_pd, 0); 14474 } 14475 } else { 14476 mutex_exit(&pwwn_pd->pd_mutex); 14477 } 14478 14479 mutex_enter(&did_pd->pd_mutex); 14480 did_pd->pd_flags = PD_IDLE; 14481 mutex_exit(&did_pd->pd_mutex); 14482 14483 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14484 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14485 job->job_result, pwwn_pd); 14486 14487 return; 14488 } 14489 14490 if (did_pd == NULL && pwwn_pd == NULL) { 14491 14492 fc_orphan_t *orp = NULL; 14493 fc_orphan_t *norp = NULL; 14494 fc_orphan_t *prev = NULL; 14495 14496 /* 14497 * Hunt down the orphan list before giving up. 14498 */ 14499 14500 mutex_enter(&port->fp_mutex); 14501 if (port->fp_orphan_count) { 14502 14503 for (orp = port->fp_orphan_list; orp; orp = norp) { 14504 norp = orp->orp_next; 14505 14506 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14507 prev = orp; 14508 continue; 14509 } 14510 14511 if (prev) { 14512 prev->orp_next = orp->orp_next; 14513 } else { 14514 ASSERT(orp == 14515 port->fp_orphan_list); 14516 port->fp_orphan_list = 14517 orp->orp_next; 14518 } 14519 port->fp_orphan_count--; 14520 break; 14521 } 14522 } 14523 14524 mutex_exit(&port->fp_mutex); 14525 pwwn_pd = fp_create_remote_port_by_ns(port, 14526 page->aff_d_id, sleep); 14527 14528 if (pwwn_pd != NULL) { 14529 14530 if (orp) { 14531 fc_wwn_to_str(&orp->orp_pwwn, 14532 ww_name); 14533 14534 FP_TRACE(FP_NHEAD2(9, 0), 14535 "N_x Port with D_ID=%x," 14536 " PWWN=%s reappeared in fabric", 14537 page->aff_d_id, ww_name); 14538 14539 kmem_free(orp, sizeof (*orp)); 14540 } 14541 14542 (listptr + *listindex)-> 14543 map_rscn_info.ulp_rscn_count = 14544 (uint32_t)(uintptr_t)job->job_cb_arg; 14545 14546 fctl_copy_portmap(listptr + 14547 (*listindex)++, pwwn_pd); 14548 } 14549 14550 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14551 "Case TWO", page->aff_d_id); 14552 14553 return; 14554 } 14555 14556 if (pwwn_pd != NULL && did_pd == NULL) { 14557 uint32_t old_d_id; 14558 uint32_t d_id = page->aff_d_id; 14559 14560 /* 14561 * What this means is there is a new D_ID for this 14562 * Port WWN. Take out the port device off D_ID 14563 * list and put it back with a new D_ID. Perform 14564 * PLOGI if already logged in. 14565 */ 14566 mutex_enter(&port->fp_mutex); 14567 mutex_enter(&pwwn_pd->pd_mutex); 14568 14569 old_d_id = pwwn_pd->pd_port_id.port_id; 14570 14571 fctl_delist_did_table(port, pwwn_pd); 14572 14573 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14574 (uint32_t)(uintptr_t)job->job_cb_arg; 14575 14576 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14577 &d_id, NULL); 14578 fctl_enlist_did_table(port, pwwn_pd); 14579 14580 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14581 " Case THREE, pd=%p," 14582 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14583 14584 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14585 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14586 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14587 14588 mutex_exit(&pwwn_pd->pd_mutex); 14589 mutex_exit(&port->fp_mutex); 14590 14591 FP_TRACE(FP_NHEAD2(9, 0), 14592 "N_x Port with D_ID=%x, PWWN=%s has a new" 14593 " D_ID=%x now", old_d_id, ww_name, d_id); 14594 14595 rval = fp_port_login(port, page->aff_d_id, job, 14596 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14597 if (rval == FC_SUCCESS) { 14598 fp_jobwait(job); 14599 rval = job->job_result; 14600 } 14601 14602 if (rval != FC_SUCCESS) { 14603 fp_fillout_old_map(listptr + 14604 *listindex - 1, pwwn_pd, 0); 14605 } 14606 } else { 14607 mutex_exit(&pwwn_pd->pd_mutex); 14608 mutex_exit(&port->fp_mutex); 14609 } 14610 14611 return; 14612 } 14613 14614 if (pwwn_pd == NULL && did_pd != NULL) { 14615 fc_portmap_t *ptr; 14616 uint32_t len = 1; 14617 char old_ww_name[17]; 14618 14619 mutex_enter(&did_pd->pd_mutex); 14620 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14621 mutex_exit(&did_pd->pd_mutex); 14622 14623 fc_wwn_to_str(pwwn, ww_name); 14624 14625 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14626 (uint32_t)(uintptr_t)job->job_cb_arg; 14627 14628 /* 14629 * What this means is that there is a new Port WWN for 14630 * this D_ID; Mark the Port device as old and provide 14631 * the new PWWN and D_ID combination as new. 14632 */ 14633 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14634 14635 FP_TRACE(FP_NHEAD2(9, 0), 14636 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14637 page->aff_d_id, old_ww_name, ww_name); 14638 14639 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14640 (uint32_t)(uintptr_t)job->job_cb_arg; 14641 14642 ptr = listptr + (*listindex)++; 14643 14644 job->job_counter = 1; 14645 14646 if (fp_ns_getmap(port, job, &ptr, &len, 14647 page->aff_d_id - 1) != FC_SUCCESS) { 14648 (*listindex)--; 14649 } 14650 14651 mutex_enter(&did_pd->pd_mutex); 14652 did_pd->pd_flags = PD_IDLE; 14653 mutex_exit(&did_pd->pd_mutex); 14654 14655 return; 14656 } 14657 14658 /* 14659 * A weird case of Port WWN and D_ID existence but not matching up 14660 * between them. Trust your instincts - Take the port device handle 14661 * off Port WWN list, fix it with new Port WWN and put it back, In 14662 * the mean time mark the port device corresponding to the old port 14663 * WWN as OLD. 14664 */ 14665 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14666 " did_pd=%p", pwwn_pd, did_pd); 14667 14668 mutex_enter(&port->fp_mutex); 14669 mutex_enter(&pwwn_pd->pd_mutex); 14670 14671 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14672 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14673 fctl_delist_did_table(port, pwwn_pd); 14674 fctl_delist_pwwn_table(port, pwwn_pd); 14675 14676 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14677 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14678 pwwn_pd->pd_port_id.port_id, 14679 14680 pwwn_pd->pd_port_name.raw_wwn[0], 14681 pwwn_pd->pd_port_name.raw_wwn[1], 14682 pwwn_pd->pd_port_name.raw_wwn[2], 14683 pwwn_pd->pd_port_name.raw_wwn[3], 14684 pwwn_pd->pd_port_name.raw_wwn[4], 14685 pwwn_pd->pd_port_name.raw_wwn[5], 14686 pwwn_pd->pd_port_name.raw_wwn[6], 14687 pwwn_pd->pd_port_name.raw_wwn[7]); 14688 14689 mutex_exit(&pwwn_pd->pd_mutex); 14690 mutex_exit(&port->fp_mutex); 14691 14692 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14693 (uint32_t)(uintptr_t)job->job_cb_arg; 14694 14695 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14696 14697 mutex_enter(&port->fp_mutex); 14698 mutex_enter(&did_pd->pd_mutex); 14699 14700 fctl_delist_pwwn_table(port, did_pd); 14701 14702 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14703 (uint32_t)(uintptr_t)job->job_cb_arg; 14704 14705 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14706 fctl_enlist_pwwn_table(port, did_pd); 14707 14708 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14709 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14710 did_pd->pd_port_id.port_id, did_pd->pd_state, 14711 14712 did_pd->pd_port_name.raw_wwn[0], 14713 did_pd->pd_port_name.raw_wwn[1], 14714 did_pd->pd_port_name.raw_wwn[2], 14715 did_pd->pd_port_name.raw_wwn[3], 14716 did_pd->pd_port_name.raw_wwn[4], 14717 did_pd->pd_port_name.raw_wwn[5], 14718 did_pd->pd_port_name.raw_wwn[6], 14719 did_pd->pd_port_name.raw_wwn[7]); 14720 14721 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14722 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14723 mutex_exit(&did_pd->pd_mutex); 14724 mutex_exit(&port->fp_mutex); 14725 14726 rval = fp_port_login(port, page->aff_d_id, job, 14727 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14728 if (rval == FC_SUCCESS) { 14729 fp_jobwait(job); 14730 if (job->job_result != FC_SUCCESS) { 14731 fp_fillout_old_map(listptr + 14732 *listindex - 1, did_pd, 0); 14733 } 14734 } else { 14735 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14736 } 14737 } else { 14738 mutex_exit(&did_pd->pd_mutex); 14739 mutex_exit(&port->fp_mutex); 14740 } 14741 14742 mutex_enter(&did_pd->pd_mutex); 14743 did_pd->pd_flags = PD_IDLE; 14744 mutex_exit(&did_pd->pd_mutex); 14745 } 14746 14747 14748 /* 14749 * Check with NS for the presence of this port WWN 14750 */ 14751 static int 14752 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14753 job_request_t *job, int polled, int sleep) 14754 { 14755 la_wwn_t pwwn; 14756 uint32_t flags; 14757 fctl_ns_req_t *ns_cmd; 14758 14759 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14760 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14761 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14762 flags, sleep); 14763 if (ns_cmd == NULL) { 14764 return (FC_NOMEM); 14765 } 14766 14767 mutex_enter(&pd->pd_mutex); 14768 pwwn = pd->pd_port_name; 14769 mutex_exit(&pd->pd_mutex); 14770 14771 ns_cmd->ns_cmd_code = NS_GID_PN; 14772 ns_cmd->ns_pd = pd; 14773 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14774 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14775 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14776 14777 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14778 } 14779 14780 14781 /* 14782 * Sanity check the LILP map returned by FCA 14783 */ 14784 static int 14785 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14786 { 14787 int count; 14788 14789 if (lilp_map->lilp_length == 0) { 14790 return (FC_FAILURE); 14791 } 14792 14793 for (count = 0; count < lilp_map->lilp_length; count++) { 14794 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14795 FC_SUCCESS) { 14796 return (FC_FAILURE); 14797 } 14798 } 14799 14800 return (FC_SUCCESS); 14801 } 14802 14803 14804 /* 14805 * Sanity check if the AL_PA is a valid address 14806 */ 14807 static int 14808 fp_is_valid_alpa(uchar_t al_pa) 14809 { 14810 int count; 14811 14812 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14813 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14814 return (FC_SUCCESS); 14815 } 14816 } 14817 14818 return (FC_FAILURE); 14819 } 14820 14821 14822 /* 14823 * Post unsolicited callbacks to ULPs 14824 */ 14825 static void 14826 fp_ulp_unsol_cb(void *arg) 14827 { 14828 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14829 14830 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14831 ub_spec->buf->ub_frame.type); 14832 kmem_free(ub_spec, sizeof (*ub_spec)); 14833 } 14834 14835 14836 /* 14837 * Perform message reporting in a consistent manner. Unless there is 14838 * a strong reason NOT to use this function (which is very very rare) 14839 * all message reporting should go through this. 14840 */ 14841 static void 14842 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14843 fc_packet_t *pkt, const char *fmt, ...) 14844 { 14845 caddr_t buf; 14846 va_list ap; 14847 14848 switch (level) { 14849 case CE_NOTE: 14850 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14851 return; 14852 } 14853 break; 14854 14855 case CE_WARN: 14856 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14857 return; 14858 } 14859 break; 14860 } 14861 14862 buf = kmem_zalloc(256, KM_NOSLEEP); 14863 if (buf == NULL) { 14864 return; 14865 } 14866 14867 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14868 14869 va_start(ap, fmt); 14870 (void) vsprintf(buf + strlen(buf), fmt, ap); 14871 va_end(ap); 14872 14873 if (fc_errno) { 14874 char *errmsg; 14875 14876 (void) fc_ulp_error(fc_errno, &errmsg); 14877 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 14878 } else { 14879 if (pkt) { 14880 caddr_t state, reason, action, expln; 14881 14882 (void) fc_ulp_pkt_error(pkt, &state, &reason, 14883 &action, &expln); 14884 14885 (void) sprintf(buf + strlen(buf), 14886 " state=%s, reason=%s", state, reason); 14887 14888 if (pkt->pkt_resp_resid) { 14889 (void) sprintf(buf + strlen(buf), 14890 " resp resid=%x\n", pkt->pkt_resp_resid); 14891 } 14892 } 14893 } 14894 14895 switch (dest) { 14896 case FP_CONSOLE_ONLY: 14897 cmn_err(level, "^%s", buf); 14898 break; 14899 14900 case FP_LOG_ONLY: 14901 cmn_err(level, "!%s", buf); 14902 break; 14903 14904 default: 14905 cmn_err(level, "%s", buf); 14906 break; 14907 } 14908 14909 kmem_free(buf, 256); 14910 } 14911 14912 static int 14913 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 14914 { 14915 int ret; 14916 uint32_t d_id; 14917 la_wwn_t pwwn; 14918 fc_remote_port_t *pd = NULL; 14919 fc_remote_port_t *held_pd = NULL; 14920 fctl_ns_req_t *ns_cmd; 14921 fc_portmap_t *changelist; 14922 14923 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 14924 14925 mutex_enter(&port->fp_mutex); 14926 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14927 mutex_exit(&port->fp_mutex); 14928 job->job_counter = 1; 14929 14930 job->job_result = FC_SUCCESS; 14931 14932 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14933 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14934 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 14935 14936 ASSERT(ns_cmd != NULL); 14937 14938 ns_cmd->ns_cmd_code = NS_GID_PN; 14939 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 14940 14941 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14942 14943 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 14944 if (ret != FC_SUCCESS) { 14945 fcio->fcio_errno = ret; 14946 } else { 14947 fcio->fcio_errno = job->job_result; 14948 } 14949 fctl_free_ns_cmd(ns_cmd); 14950 return (EIO); 14951 } 14952 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14953 fctl_free_ns_cmd(ns_cmd); 14954 } else { 14955 mutex_exit(&port->fp_mutex); 14956 14957 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14958 if (held_pd == NULL) { 14959 fcio->fcio_errno = FC_BADWWN; 14960 return (EIO); 14961 } 14962 pd = held_pd; 14963 14964 mutex_enter(&pd->pd_mutex); 14965 d_id = pd->pd_port_id.port_id; 14966 mutex_exit(&pd->pd_mutex); 14967 } 14968 14969 job->job_counter = 1; 14970 14971 pd = fctl_get_remote_port_by_did(port, d_id); 14972 14973 if (pd) { 14974 mutex_enter(&pd->pd_mutex); 14975 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14976 pd->pd_login_count++; 14977 mutex_exit(&pd->pd_mutex); 14978 14979 fcio->fcio_errno = FC_SUCCESS; 14980 if (held_pd) { 14981 fctl_release_remote_port(held_pd); 14982 } 14983 14984 return (0); 14985 } 14986 mutex_exit(&pd->pd_mutex); 14987 } else { 14988 mutex_enter(&port->fp_mutex); 14989 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14990 mutex_exit(&port->fp_mutex); 14991 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14992 if (pd == NULL) { 14993 fcio->fcio_errno = FC_FAILURE; 14994 if (held_pd) { 14995 fctl_release_remote_port(held_pd); 14996 } 14997 return (EIO); 14998 } 14999 } else { 15000 mutex_exit(&port->fp_mutex); 15001 } 15002 } 15003 15004 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 15005 job->job_counter = 1; 15006 15007 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 15008 KM_SLEEP, pd, NULL); 15009 15010 if (ret != FC_SUCCESS) { 15011 fcio->fcio_errno = ret; 15012 if (held_pd) { 15013 fctl_release_remote_port(held_pd); 15014 } 15015 return (EIO); 15016 } 15017 fp_jobwait(job); 15018 15019 fcio->fcio_errno = job->job_result; 15020 15021 if (held_pd) { 15022 fctl_release_remote_port(held_pd); 15023 } 15024 15025 if (job->job_result != FC_SUCCESS) { 15026 return (EIO); 15027 } 15028 15029 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15030 if (pd == NULL) { 15031 fcio->fcio_errno = FC_BADDEV; 15032 return (ENODEV); 15033 } 15034 15035 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15036 15037 fctl_copy_portmap(changelist, pd); 15038 changelist->map_type = PORT_DEVICE_USER_LOGIN; 15039 15040 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15041 15042 mutex_enter(&pd->pd_mutex); 15043 pd->pd_type = PORT_DEVICE_NOCHANGE; 15044 mutex_exit(&pd->pd_mutex); 15045 15046 fctl_release_remote_port(pd); 15047 15048 return (0); 15049 } 15050 15051 15052 static int 15053 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 15054 { 15055 la_wwn_t pwwn; 15056 fp_cmd_t *cmd; 15057 fc_portmap_t *changelist; 15058 fc_remote_port_t *pd; 15059 15060 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 15061 15062 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 15063 if (pd == NULL) { 15064 fcio->fcio_errno = FC_BADWWN; 15065 return (ENXIO); 15066 } 15067 15068 mutex_enter(&pd->pd_mutex); 15069 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 15070 fcio->fcio_errno = FC_LOGINREQ; 15071 mutex_exit(&pd->pd_mutex); 15072 15073 fctl_release_remote_port(pd); 15074 15075 return (EINVAL); 15076 } 15077 15078 ASSERT(pd->pd_login_count >= 1); 15079 15080 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 15081 fcio->fcio_errno = FC_FAILURE; 15082 mutex_exit(&pd->pd_mutex); 15083 15084 fctl_release_remote_port(pd); 15085 15086 return (EBUSY); 15087 } 15088 15089 if (pd->pd_login_count > 1) { 15090 pd->pd_login_count--; 15091 fcio->fcio_errno = FC_SUCCESS; 15092 mutex_exit(&pd->pd_mutex); 15093 15094 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15095 15096 fctl_copy_portmap(changelist, pd); 15097 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15098 15099 fctl_release_remote_port(pd); 15100 15101 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15102 15103 return (0); 15104 } 15105 15106 pd->pd_flags = PD_ELS_IN_PROGRESS; 15107 mutex_exit(&pd->pd_mutex); 15108 15109 job->job_counter = 1; 15110 15111 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 15112 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 15113 if (cmd == NULL) { 15114 fcio->fcio_errno = FC_NOMEM; 15115 fctl_release_remote_port(pd); 15116 15117 mutex_enter(&pd->pd_mutex); 15118 pd->pd_flags = PD_IDLE; 15119 mutex_exit(&pd->pd_mutex); 15120 15121 return (ENOMEM); 15122 } 15123 15124 mutex_enter(&port->fp_mutex); 15125 mutex_enter(&pd->pd_mutex); 15126 15127 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 15128 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 15129 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 15130 cmd->cmd_retry_count = 1; 15131 cmd->cmd_ulp_pkt = NULL; 15132 15133 fp_logo_init(pd, cmd, job); 15134 15135 mutex_exit(&pd->pd_mutex); 15136 mutex_exit(&port->fp_mutex); 15137 15138 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 15139 mutex_enter(&pd->pd_mutex); 15140 pd->pd_flags = PD_IDLE; 15141 mutex_exit(&pd->pd_mutex); 15142 15143 fp_free_pkt(cmd); 15144 fctl_release_remote_port(pd); 15145 15146 return (EIO); 15147 } 15148 15149 fp_jobwait(job); 15150 15151 fcio->fcio_errno = job->job_result; 15152 if (job->job_result != FC_SUCCESS) { 15153 mutex_enter(&pd->pd_mutex); 15154 pd->pd_flags = PD_IDLE; 15155 mutex_exit(&pd->pd_mutex); 15156 15157 fctl_release_remote_port(pd); 15158 15159 return (EIO); 15160 } 15161 15162 ASSERT(pd != NULL); 15163 15164 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15165 15166 fctl_copy_portmap(changelist, pd); 15167 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15168 changelist->map_state = PORT_DEVICE_INVALID; 15169 15170 mutex_enter(&port->fp_mutex); 15171 mutex_enter(&pd->pd_mutex); 15172 15173 fctl_delist_did_table(port, pd); 15174 fctl_delist_pwwn_table(port, pd); 15175 pd->pd_flags = PD_IDLE; 15176 15177 mutex_exit(&pd->pd_mutex); 15178 mutex_exit(&port->fp_mutex); 15179 15180 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15181 15182 fctl_release_remote_port(pd); 15183 15184 return (0); 15185 } 15186 15187 15188 15189 /* 15190 * Send a syslog event for adapter port level events. 15191 */ 15192 static void 15193 fp_log_port_event(fc_local_port_t *port, char *subclass) 15194 { 15195 nvlist_t *attr_list; 15196 15197 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15198 KM_SLEEP) != DDI_SUCCESS) { 15199 goto alloc_failed; 15200 } 15201 15202 if (nvlist_add_uint32(attr_list, "instance", 15203 port->fp_instance) != DDI_SUCCESS) { 15204 goto error; 15205 } 15206 15207 if (nvlist_add_byte_array(attr_list, "port-wwn", 15208 port->fp_service_params.nport_ww_name.raw_wwn, 15209 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15210 goto error; 15211 } 15212 15213 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15214 subclass, attr_list, NULL, DDI_SLEEP); 15215 15216 nvlist_free(attr_list); 15217 return; 15218 15219 error: 15220 nvlist_free(attr_list); 15221 alloc_failed: 15222 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15223 } 15224 15225 15226 static void 15227 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15228 uint32_t port_id) 15229 { 15230 nvlist_t *attr_list; 15231 15232 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15233 KM_SLEEP) != DDI_SUCCESS) { 15234 goto alloc_failed; 15235 } 15236 15237 if (nvlist_add_uint32(attr_list, "instance", 15238 port->fp_instance) != DDI_SUCCESS) { 15239 goto error; 15240 } 15241 15242 if (nvlist_add_byte_array(attr_list, "port-wwn", 15243 port->fp_service_params.nport_ww_name.raw_wwn, 15244 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15245 goto error; 15246 } 15247 15248 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15249 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15250 goto error; 15251 } 15252 15253 if (nvlist_add_uint32(attr_list, "target-port-id", 15254 port_id) != DDI_SUCCESS) { 15255 goto error; 15256 } 15257 15258 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15259 subclass, attr_list, NULL, DDI_SLEEP); 15260 15261 nvlist_free(attr_list); 15262 return; 15263 15264 error: 15265 nvlist_free(attr_list); 15266 alloc_failed: 15267 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15268 } 15269 15270 static uint32_t 15271 fp_map_remote_port_state(uint32_t rm_state) 15272 { 15273 switch (rm_state) { 15274 case PORT_DEVICE_LOGGED_IN: 15275 return (FC_HBA_PORTSTATE_ONLINE); 15276 case PORT_DEVICE_VALID: 15277 case PORT_DEVICE_INVALID: 15278 default: 15279 return (FC_HBA_PORTSTATE_UNKNOWN); 15280 } 15281 } 15282