1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * NOT a DDI compliant Sun Fibre Channel port driver(fp) 26 * 27 */ 28 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/param.h> 32 #include <sys/errno.h> 33 #include <sys/uio.h> 34 #include <sys/buf.h> 35 #include <sys/modctl.h> 36 #include <sys/open.h> 37 #include <sys/file.h> 38 #include <sys/kmem.h> 39 #include <sys/poll.h> 40 #include <sys/conf.h> 41 #include <sys/thread.h> 42 #include <sys/var.h> 43 #include <sys/cmn_err.h> 44 #include <sys/stat.h> 45 #include <sys/ddi.h> 46 #include <sys/sunddi.h> 47 #include <sys/promif.h> 48 #include <sys/nvpair.h> 49 #include <sys/byteorder.h> 50 #include <sys/scsi/scsi.h> 51 #include <sys/fibre-channel/fc.h> 52 #include <sys/fibre-channel/impl/fc_ulpif.h> 53 #include <sys/fibre-channel/impl/fc_fcaif.h> 54 #include <sys/fibre-channel/impl/fctl_private.h> 55 #include <sys/fibre-channel/impl/fc_portif.h> 56 #include <sys/fibre-channel/impl/fp.h> 57 58 /* These are defined in fctl.c! */ 59 extern int did_table_size; 60 extern int pwwn_table_size; 61 62 static struct cb_ops fp_cb_ops = { 63 fp_open, /* open */ 64 fp_close, /* close */ 65 nodev, /* strategy */ 66 nodev, /* print */ 67 nodev, /* dump */ 68 nodev, /* read */ 69 nodev, /* write */ 70 fp_ioctl, /* ioctl */ 71 nodev, /* devmap */ 72 nodev, /* mmap */ 73 nodev, /* segmap */ 74 nochpoll, /* chpoll */ 75 ddi_prop_op, /* cb_prop_op */ 76 0, /* streamtab */ 77 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */ 78 CB_REV, /* rev */ 79 nodev, /* aread */ 80 nodev /* awrite */ 81 }; 82 83 static struct dev_ops fp_ops = { 84 DEVO_REV, /* build revision */ 85 0, /* reference count */ 86 fp_getinfo, /* getinfo */ 87 nulldev, /* identify - Obsoleted */ 88 nulldev, /* probe */ 89 fp_attach, /* attach */ 90 fp_detach, /* detach */ 91 nodev, /* reset */ 92 &fp_cb_ops, /* cb_ops */ 93 NULL, /* bus_ops */ 94 fp_power /* power */ 95 }; 96 97 #define FP_VERSION "1.96" 98 #define FP_NAME_VERSION "SunFC Port v" FP_VERSION 99 100 char *fp_version = FP_NAME_VERSION; 101 102 static struct modldrv modldrv = { 103 &mod_driverops, /* Type of Module */ 104 FP_NAME_VERSION, /* Name/Version of fp */ 105 &fp_ops /* driver ops */ 106 }; 107 108 static struct modlinkage modlinkage = { 109 MODREV_1, /* Rev of the loadable modules system */ 110 &modldrv, /* NULL terminated list of */ 111 NULL /* Linkage structures */ 112 }; 113 114 115 116 static uint16_t ns_reg_cmds[] = { 117 NS_RPN_ID, 118 NS_RNN_ID, 119 NS_RCS_ID, 120 NS_RFT_ID, 121 NS_RPT_ID, 122 NS_RSPN_ID, 123 NS_RSNN_NN 124 }; 125 126 struct fp_xlat { 127 uchar_t xlat_state; 128 int xlat_rval; 129 } fp_xlat [] = { 130 { FC_PKT_SUCCESS, FC_SUCCESS }, 131 { FC_PKT_REMOTE_STOP, FC_FAILURE }, 132 { FC_PKT_LOCAL_RJT, FC_FAILURE }, 133 { FC_PKT_NPORT_RJT, FC_ELS_PREJECT }, 134 { FC_PKT_FABRIC_RJT, FC_ELS_FREJECT }, 135 { FC_PKT_LOCAL_BSY, FC_TRAN_BUSY }, 136 { FC_PKT_TRAN_BSY, FC_TRAN_BUSY }, 137 { FC_PKT_NPORT_BSY, FC_PBUSY }, 138 { FC_PKT_FABRIC_BSY, FC_FBUSY }, 139 { FC_PKT_LS_RJT, FC_FAILURE }, 140 { FC_PKT_BA_RJT, FC_FAILURE }, 141 { FC_PKT_TIMEOUT, FC_FAILURE }, 142 { FC_PKT_TRAN_ERROR, FC_TRANSPORT_ERROR }, 143 { FC_PKT_FAILURE, FC_FAILURE }, 144 { FC_PKT_PORT_OFFLINE, FC_OFFLINE } 145 }; 146 147 static uchar_t fp_valid_alpas[] = { 148 0x01, 0x02, 0x04, 0x08, 0x0F, 0x10, 0x17, 0x18, 0x1B, 149 0x1D, 0x1E, 0x1F, 0x23, 0x25, 0x26, 0x27, 0x29, 0x2A, 150 0x2B, 0x2C, 0x2D, 0x2E, 0x31, 0x32, 0x33, 0x34, 0x35, 151 0x36, 0x39, 0x3A, 0x3C, 0x43, 0x45, 0x46, 0x47, 0x49, 152 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x51, 0x52, 0x53, 0x54, 153 0x55, 0x56, 0x59, 0x5A, 0x5C, 0x63, 0x65, 0x66, 0x67, 154 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x71, 0x72, 0x73, 155 0x74, 0x75, 0x76, 0x79, 0x7A, 0x7C, 0x80, 0x81, 0x82, 156 0x84, 0x88, 0x8F, 0x90, 0x97, 0x98, 0x9B, 0x9D, 0x9E, 157 0x9F, 0xA3, 0xA5, 0xA6, 0xA7, 0xA9, 0xAA, 0xAB, 0xAC, 158 0xAD, 0xAE, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB9, 159 0xBA, 0xBC, 0xC3, 0xC5, 0xC6, 0xC7, 0xC9, 0xCA, 0xCB, 160 0xCC, 0xCD, 0xCE, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 161 0xD9, 0xDA, 0xDC, 0xE0, 0xE1, 0xE2, 0xE4, 0xE8, 0xEF 162 }; 163 164 static struct fp_perms { 165 uint16_t fp_ioctl_cmd; 166 uchar_t fp_open_flag; 167 } fp_perm_list [] = { 168 { FCIO_GET_NUM_DEVS, FP_OPEN }, 169 { FCIO_GET_DEV_LIST, FP_OPEN }, 170 { FCIO_GET_SYM_PNAME, FP_OPEN }, 171 { FCIO_GET_SYM_NNAME, FP_OPEN }, 172 { FCIO_SET_SYM_PNAME, FP_EXCL }, 173 { FCIO_SET_SYM_NNAME, FP_EXCL }, 174 { FCIO_GET_LOGI_PARAMS, FP_OPEN }, 175 { FCIO_DEV_LOGIN, FP_EXCL }, 176 { FCIO_DEV_LOGOUT, FP_EXCL }, 177 { FCIO_GET_STATE, FP_OPEN }, 178 { FCIO_DEV_REMOVE, FP_EXCL }, 179 { FCIO_GET_FCODE_REV, FP_OPEN }, 180 { FCIO_GET_FW_REV, FP_OPEN }, 181 { FCIO_GET_DUMP_SIZE, FP_OPEN }, 182 { FCIO_FORCE_DUMP, FP_EXCL }, 183 { FCIO_GET_DUMP, FP_OPEN }, 184 { FCIO_GET_TOPOLOGY, FP_OPEN }, 185 { FCIO_RESET_LINK, FP_EXCL }, 186 { FCIO_RESET_HARD, FP_EXCL }, 187 { FCIO_RESET_HARD_CORE, FP_EXCL }, 188 { FCIO_DIAG, FP_OPEN }, 189 { FCIO_NS, FP_EXCL }, 190 { FCIO_DOWNLOAD_FW, FP_EXCL }, 191 { FCIO_DOWNLOAD_FCODE, FP_EXCL }, 192 { FCIO_LINK_STATUS, FP_OPEN }, 193 { FCIO_GET_HOST_PARAMS, FP_OPEN }, 194 { FCIO_GET_NODE_ID, FP_OPEN }, 195 { FCIO_SET_NODE_ID, FP_EXCL }, 196 { FCIO_SEND_NODE_ID, FP_OPEN }, 197 { FCIO_GET_ADAPTER_ATTRIBUTES, FP_OPEN }, 198 { FCIO_GET_OTHER_ADAPTER_PORTS, FP_OPEN }, 199 { FCIO_GET_ADAPTER_PORT_ATTRIBUTES, FP_OPEN }, 200 { FCIO_GET_DISCOVERED_PORT_ATTRIBUTES, FP_OPEN }, 201 { FCIO_GET_PORT_ATTRIBUTES, FP_OPEN }, 202 { FCIO_GET_ADAPTER_PORT_STATS, FP_OPEN }, 203 { FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES, FP_OPEN }, 204 { FCIO_GET_NPIV_PORT_LIST, FP_OPEN }, 205 { FCIO_DELETE_NPIV_PORT, FP_OPEN }, 206 { FCIO_GET_NPIV_ATTRIBUTES, FP_OPEN }, 207 { FCIO_CREATE_NPIV_PORT, FP_OPEN }, 208 { FCIO_NPIV_GET_ADAPTER_ATTRIBUTES, FP_OPEN } 209 }; 210 211 static char *fp_pm_comps[] = { 212 "NAME=FC Port", 213 "0=Port Down", 214 "1=Port Up" 215 }; 216 217 218 #ifdef _LITTLE_ENDIAN 219 #define MAKE_BE_32(x) { \ 220 uint32_t *ptr1, i; \ 221 ptr1 = (uint32_t *)(x); \ 222 for (i = 0; i < sizeof (*(x)) / sizeof (uint32_t); i++) { \ 223 *ptr1 = BE_32(*ptr1); \ 224 ptr1++; \ 225 } \ 226 } 227 #else 228 #define MAKE_BE_32(x) 229 #endif 230 231 static uchar_t fp_verbosity = (FP_WARNING_MESSAGES | FP_FATAL_MESSAGES); 232 static uint32_t fp_options = 0; 233 234 static int fp_cmd_wait_cnt = FP_CMDWAIT_DELAY; 235 static int fp_retry_delay = FP_RETRY_DELAY; /* retry after this delay */ 236 static int fp_retry_count = FP_RETRY_COUNT; /* number of retries */ 237 unsigned int fp_offline_ticker; /* seconds */ 238 239 /* 240 * Driver global variable to anchor the list of soft state structs for 241 * all fp driver instances. Used with the Solaris DDI soft state functions. 242 */ 243 static void *fp_driver_softstate; 244 245 static clock_t fp_retry_ticks; 246 static clock_t fp_offline_ticks; 247 248 static int fp_retry_ticker; 249 static uint32_t fp_unsol_buf_count = FP_UNSOL_BUF_COUNT; 250 static uint32_t fp_unsol_buf_size = FP_UNSOL_BUF_SIZE; 251 252 static int fp_log_size = FP_LOG_SIZE; 253 static int fp_trace = FP_TRACE_DEFAULT; 254 static fc_trace_logq_t *fp_logq = NULL; 255 256 int fp_get_adapter_paths(char *pathList, int count); 257 static void fp_log_port_event(fc_local_port_t *port, char *subclass); 258 static void fp_log_target_event(fc_local_port_t *port, char *subclass, 259 la_wwn_t tgt_pwwn, uint32_t port_id); 260 static uint32_t fp_map_remote_port_state(uint32_t rm_state); 261 static void fp_init_symbolic_names(fc_local_port_t *port); 262 263 264 /* 265 * Perform global initialization 266 */ 267 int 268 _init(void) 269 { 270 int ret; 271 272 if ((ret = ddi_soft_state_init(&fp_driver_softstate, 273 sizeof (struct fc_local_port), 8)) != 0) { 274 return (ret); 275 } 276 277 if ((ret = scsi_hba_init(&modlinkage)) != 0) { 278 ddi_soft_state_fini(&fp_driver_softstate); 279 return (ret); 280 } 281 282 fp_logq = fc_trace_alloc_logq(fp_log_size); 283 284 if ((ret = mod_install(&modlinkage)) != 0) { 285 fc_trace_free_logq(fp_logq); 286 ddi_soft_state_fini(&fp_driver_softstate); 287 scsi_hba_fini(&modlinkage); 288 } 289 290 return (ret); 291 } 292 293 294 /* 295 * Prepare for driver unload 296 */ 297 int 298 _fini(void) 299 { 300 int ret; 301 302 if ((ret = mod_remove(&modlinkage)) == 0) { 303 fc_trace_free_logq(fp_logq); 304 ddi_soft_state_fini(&fp_driver_softstate); 305 scsi_hba_fini(&modlinkage); 306 } 307 308 return (ret); 309 } 310 311 312 /* 313 * Request mod_info() to handle all cases 314 */ 315 int 316 _info(struct modinfo *modinfo) 317 { 318 return (mod_info(&modlinkage, modinfo)); 319 } 320 321 322 /* 323 * fp_attach: 324 * 325 * The respective cmd handlers take care of performing 326 * ULP related invocations 327 */ 328 static int 329 fp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 330 { 331 int rval; 332 333 /* 334 * We check the value of fp_offline_ticker at this 335 * point. The variable is global for the driver and 336 * not specific to an instance. 337 * 338 * If there is no user-defined value found in /etc/system 339 * or fp.conf, then we use 90 seconds (FP_OFFLINE_TICKER). 340 * The minimum setting for this offline timeout according 341 * to the FC-FS2 standard (Fibre Channel Framing and 342 * Signalling-2, see www.t11.org) is R_T_TOV == 100msec. 343 * 344 * We do not recommend setting the value to less than 10 345 * seconds (RA_TOV) or more than 90 seconds. If this 346 * variable is greater than 90 seconds then drivers above 347 * fp (fcp, sd, scsi_vhci, vxdmp et al) might complain. 348 */ 349 350 fp_offline_ticker = ddi_prop_get_int(DDI_DEV_T_ANY, 351 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, "fp_offline_ticker", 352 FP_OFFLINE_TICKER); 353 354 if ((fp_offline_ticker < 10) || 355 (fp_offline_ticker > 90)) { 356 cmn_err(CE_WARN, "Setting fp_offline_ticker to " 357 "%d second(s). This is outside the " 358 "recommended range of 10..90 seconds", 359 fp_offline_ticker); 360 } 361 362 /* 363 * Tick every second when there are commands to retry. 364 * It should tick at the least granular value of pkt_timeout 365 * (which is one second) 366 */ 367 fp_retry_ticker = 1; 368 369 fp_retry_ticks = drv_usectohz(fp_retry_ticker * 1000 * 1000); 370 fp_offline_ticks = drv_usectohz(fp_offline_ticker * 1000 * 1000); 371 372 switch (cmd) { 373 case DDI_ATTACH: 374 rval = fp_attach_handler(dip); 375 break; 376 377 case DDI_RESUME: 378 rval = fp_resume_handler(dip); 379 break; 380 381 default: 382 rval = DDI_FAILURE; 383 break; 384 } 385 return (rval); 386 } 387 388 389 /* 390 * fp_detach: 391 * 392 * If a ULP fails to handle cmd request converse of 393 * cmd is invoked for ULPs that previously succeeded 394 * cmd request. 395 */ 396 static int 397 fp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 398 { 399 int rval = DDI_FAILURE; 400 fc_local_port_t *port; 401 fc_attach_cmd_t converse; 402 uint8_t cnt; 403 404 if ((port = ddi_get_soft_state(fp_driver_softstate, 405 ddi_get_instance(dip))) == NULL) { 406 return (DDI_FAILURE); 407 } 408 409 mutex_enter(&port->fp_mutex); 410 411 if (port->fp_ulp_attach) { 412 mutex_exit(&port->fp_mutex); 413 return (DDI_FAILURE); 414 } 415 416 switch (cmd) { 417 case DDI_DETACH: 418 if (port->fp_task != FP_TASK_IDLE) { 419 mutex_exit(&port->fp_mutex); 420 return (DDI_FAILURE); 421 } 422 423 /* Let's attempt to quit the job handler gracefully */ 424 port->fp_soft_state |= FP_DETACH_INPROGRESS; 425 426 mutex_exit(&port->fp_mutex); 427 converse = FC_CMD_ATTACH; 428 if (fctl_detach_ulps(port, FC_CMD_DETACH, 429 &modlinkage) != FC_SUCCESS) { 430 mutex_enter(&port->fp_mutex); 431 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 432 mutex_exit(&port->fp_mutex); 433 rval = DDI_FAILURE; 434 break; 435 } 436 437 mutex_enter(&port->fp_mutex); 438 for (cnt = 0; (port->fp_job_head) && (cnt < fp_cmd_wait_cnt); 439 cnt++) { 440 mutex_exit(&port->fp_mutex); 441 delay(drv_usectohz(1000000)); 442 mutex_enter(&port->fp_mutex); 443 } 444 445 if (port->fp_job_head) { 446 mutex_exit(&port->fp_mutex); 447 rval = DDI_FAILURE; 448 break; 449 } 450 mutex_exit(&port->fp_mutex); 451 452 rval = fp_detach_handler(port); 453 break; 454 455 case DDI_SUSPEND: 456 mutex_exit(&port->fp_mutex); 457 converse = FC_CMD_RESUME; 458 if (fctl_detach_ulps(port, FC_CMD_SUSPEND, 459 &modlinkage) != FC_SUCCESS) { 460 rval = DDI_FAILURE; 461 break; 462 } 463 if ((rval = fp_suspend_handler(port)) != DDI_SUCCESS) { 464 (void) callb_generic_cpr(&port->fp_cpr_info, 465 CB_CODE_CPR_RESUME); 466 } 467 break; 468 469 default: 470 mutex_exit(&port->fp_mutex); 471 break; 472 } 473 474 /* 475 * Use softint to perform reattach. Mark fp_ulp_attach so we 476 * don't attempt to do this repeatedly on behalf of some persistent 477 * caller. 478 */ 479 if (rval != DDI_SUCCESS) { 480 mutex_enter(&port->fp_mutex); 481 port->fp_ulp_attach = 1; 482 483 /* 484 * If the port is in the low power mode then there is 485 * possibility that fca too could be in low power mode. 486 * Try to raise the power before calling attach ulps. 487 */ 488 489 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 490 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 491 mutex_exit(&port->fp_mutex); 492 (void) pm_raise_power(port->fp_port_dip, 493 FP_PM_COMPONENT, FP_PM_PORT_UP); 494 } else { 495 mutex_exit(&port->fp_mutex); 496 } 497 498 499 fp_attach_ulps(port, converse); 500 501 mutex_enter(&port->fp_mutex); 502 while (port->fp_ulp_attach) { 503 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 504 } 505 506 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 507 508 /* 509 * Mark state as detach failed so asynchronous ULP attach 510 * events (downstream, not the ones we're initiating with 511 * the call to fp_attach_ulps) are not honored. We're 512 * really still in pending detach. 513 */ 514 port->fp_soft_state |= FP_DETACH_FAILED; 515 516 mutex_exit(&port->fp_mutex); 517 } 518 519 return (rval); 520 } 521 522 523 /* 524 * fp_getinfo: 525 * Given the device number, return either the 526 * dev_info_t pointer or the instance number. 527 */ 528 529 /* ARGSUSED */ 530 static int 531 fp_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 532 { 533 int rval; 534 minor_t instance; 535 fc_local_port_t *port; 536 537 rval = DDI_SUCCESS; 538 instance = getminor((dev_t)arg); 539 540 switch (cmd) { 541 case DDI_INFO_DEVT2DEVINFO: 542 if ((port = ddi_get_soft_state(fp_driver_softstate, 543 instance)) == NULL) { 544 rval = DDI_FAILURE; 545 break; 546 } 547 *result = (void *)port->fp_port_dip; 548 break; 549 550 case DDI_INFO_DEVT2INSTANCE: 551 *result = (void *)(uintptr_t)instance; 552 break; 553 554 default: 555 rval = DDI_FAILURE; 556 break; 557 } 558 559 return (rval); 560 } 561 562 563 /* 564 * Entry point for power up and power down request from kernel 565 */ 566 static int 567 fp_power(dev_info_t *dip, int comp, int level) 568 { 569 int rval = DDI_FAILURE; 570 fc_local_port_t *port; 571 572 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 573 if (port == NULL || comp != FP_PM_COMPONENT) { 574 return (rval); 575 } 576 577 switch (level) { 578 case FP_PM_PORT_UP: 579 rval = DDI_SUCCESS; 580 581 /* 582 * If the port is DDI_SUSPENDed, let the DDI_RESUME 583 * code complete the rediscovery. 584 */ 585 mutex_enter(&port->fp_mutex); 586 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 587 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 588 port->fp_pm_level = FP_PM_PORT_UP; 589 mutex_exit(&port->fp_mutex); 590 fctl_attach_ulps(port, FC_CMD_POWER_UP, &modlinkage); 591 break; 592 } 593 594 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 595 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 596 597 port->fp_pm_level = FP_PM_PORT_UP; 598 rval = fp_power_up(port); 599 if (rval != DDI_SUCCESS) { 600 port->fp_pm_level = FP_PM_PORT_DOWN; 601 } 602 } else { 603 port->fp_pm_level = FP_PM_PORT_UP; 604 } 605 mutex_exit(&port->fp_mutex); 606 break; 607 608 case FP_PM_PORT_DOWN: 609 mutex_enter(&port->fp_mutex); 610 611 ASSERT(!(port->fp_soft_state & FP_SOFT_NO_PMCOMP)); 612 if (port->fp_soft_state & FP_SOFT_NO_PMCOMP) { 613 /* 614 * PM framework goofed up. We have don't 615 * have any PM components. Let's never go down. 616 */ 617 mutex_exit(&port->fp_mutex); 618 break; 619 620 } 621 622 if (port->fp_ulp_attach) { 623 /* We shouldn't let the power go down */ 624 mutex_exit(&port->fp_mutex); 625 break; 626 } 627 628 /* 629 * Not a whole lot to do if we are detaching 630 */ 631 if (port->fp_soft_state & FP_SOFT_IN_DETACH) { 632 port->fp_pm_level = FP_PM_PORT_DOWN; 633 mutex_exit(&port->fp_mutex); 634 rval = DDI_SUCCESS; 635 break; 636 } 637 638 if (!port->fp_pm_busy && !port->fp_pm_busy_nocomp) { 639 port->fp_pm_level = FP_PM_PORT_DOWN; 640 641 rval = fp_power_down(port); 642 if (rval != DDI_SUCCESS) { 643 port->fp_pm_level = FP_PM_PORT_UP; 644 ASSERT(!(port->fp_soft_state & 645 FP_SOFT_POWER_DOWN)); 646 } else { 647 ASSERT(port->fp_soft_state & 648 FP_SOFT_POWER_DOWN); 649 } 650 } 651 mutex_exit(&port->fp_mutex); 652 break; 653 654 default: 655 break; 656 } 657 658 return (rval); 659 } 660 661 662 /* 663 * Open FC port devctl node 664 */ 665 static int 666 fp_open(dev_t *devp, int flag, int otype, cred_t *credp) 667 { 668 int instance; 669 fc_local_port_t *port; 670 671 if (otype != OTYP_CHR) { 672 return (EINVAL); 673 } 674 675 /* 676 * This is not a toy to play with. Allow only powerful 677 * users (hopefully knowledgeable) to access the port 678 * (A hacker potentially could download a sick binary 679 * file into FCA) 680 */ 681 if (drv_priv(credp)) { 682 return (EPERM); 683 } 684 685 instance = (int)getminor(*devp); 686 687 port = ddi_get_soft_state(fp_driver_softstate, instance); 688 if (port == NULL) { 689 return (ENXIO); 690 } 691 692 mutex_enter(&port->fp_mutex); 693 if (port->fp_flag & FP_EXCL) { 694 /* 695 * It is already open for exclusive access. 696 * So shut the door on this caller. 697 */ 698 mutex_exit(&port->fp_mutex); 699 return (EBUSY); 700 } 701 702 if (flag & FEXCL) { 703 if (port->fp_flag & FP_OPEN) { 704 /* 705 * Exclusive operation not possible 706 * as it is already opened 707 */ 708 mutex_exit(&port->fp_mutex); 709 return (EBUSY); 710 } 711 port->fp_flag |= FP_EXCL; 712 } 713 port->fp_flag |= FP_OPEN; 714 mutex_exit(&port->fp_mutex); 715 716 return (0); 717 } 718 719 720 /* 721 * The driver close entry point is called on the last close() 722 * of a device. So it is perfectly alright to just clobber the 723 * open flag and reset it to idle (instead of having to reset 724 * each flag bits). For any confusion, check out close(9E). 725 */ 726 727 /* ARGSUSED */ 728 static int 729 fp_close(dev_t dev, int flag, int otype, cred_t *credp) 730 { 731 int instance; 732 fc_local_port_t *port; 733 734 if (otype != OTYP_CHR) { 735 return (EINVAL); 736 } 737 738 instance = (int)getminor(dev); 739 740 port = ddi_get_soft_state(fp_driver_softstate, instance); 741 if (port == NULL) { 742 return (ENXIO); 743 } 744 745 mutex_enter(&port->fp_mutex); 746 if ((port->fp_flag & FP_OPEN) == 0) { 747 mutex_exit(&port->fp_mutex); 748 return (ENODEV); 749 } 750 port->fp_flag = FP_IDLE; 751 mutex_exit(&port->fp_mutex); 752 753 return (0); 754 } 755 756 /* 757 * Handle IOCTL requests 758 */ 759 760 /* ARGSUSED */ 761 static int 762 fp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval) 763 { 764 int instance; 765 int ret = 0; 766 fcio_t fcio; 767 fc_local_port_t *port; 768 769 instance = (int)getminor(dev); 770 771 port = ddi_get_soft_state(fp_driver_softstate, instance); 772 if (port == NULL) { 773 return (ENXIO); 774 } 775 776 mutex_enter(&port->fp_mutex); 777 if ((port->fp_flag & FP_OPEN) == 0) { 778 mutex_exit(&port->fp_mutex); 779 return (ENXIO); 780 } 781 782 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 783 mutex_exit(&port->fp_mutex); 784 return (ENXIO); 785 } 786 787 mutex_exit(&port->fp_mutex); 788 789 /* this will raise power if necessary */ 790 ret = fctl_busy_port(port); 791 if (ret != 0) { 792 return (ret); 793 } 794 795 ASSERT(port->fp_pm_level == FP_PM_PORT_UP); 796 797 798 switch (cmd) { 799 case FCIO_CMD: { 800 #ifdef _MULTI_DATAMODEL 801 switch (ddi_model_convert_from(mode & FMODELS)) { 802 case DDI_MODEL_ILP32: { 803 struct fcio32 fcio32; 804 805 if (ddi_copyin((void *)data, (void *)&fcio32, 806 sizeof (struct fcio32), mode)) { 807 ret = EFAULT; 808 break; 809 } 810 fcio.fcio_xfer = fcio32.fcio_xfer; 811 fcio.fcio_cmd = fcio32.fcio_cmd; 812 fcio.fcio_flags = fcio32.fcio_flags; 813 fcio.fcio_cmd_flags = fcio32.fcio_cmd_flags; 814 fcio.fcio_ilen = (size_t)fcio32.fcio_ilen; 815 fcio.fcio_ibuf = 816 (caddr_t)(uintptr_t)fcio32.fcio_ibuf; 817 fcio.fcio_olen = (size_t)fcio32.fcio_olen; 818 fcio.fcio_obuf = 819 (caddr_t)(uintptr_t)fcio32.fcio_obuf; 820 fcio.fcio_alen = (size_t)fcio32.fcio_alen; 821 fcio.fcio_abuf = 822 (caddr_t)(uintptr_t)fcio32.fcio_abuf; 823 fcio.fcio_errno = fcio32.fcio_errno; 824 break; 825 } 826 827 case DDI_MODEL_NONE: 828 if (ddi_copyin((void *)data, (void *)&fcio, 829 sizeof (fcio_t), mode)) { 830 ret = EFAULT; 831 } 832 break; 833 } 834 #else /* _MULTI_DATAMODEL */ 835 if (ddi_copyin((void *)data, (void *)&fcio, 836 sizeof (fcio_t), mode)) { 837 ret = EFAULT; 838 break; 839 } 840 #endif /* _MULTI_DATAMODEL */ 841 if (!ret) { 842 ret = fp_fciocmd(port, data, mode, &fcio); 843 } 844 break; 845 } 846 847 default: 848 ret = fctl_ulp_port_ioctl(port, dev, cmd, data, 849 mode, credp, rval); 850 } 851 852 fctl_idle_port(port); 853 854 return (ret); 855 } 856 857 858 /* 859 * Init Symbolic Port Name and Node Name 860 * LV will try to get symbolic names from FCA driver 861 * and register these to name server, 862 * if LV fails to get these, 863 * LV will register its default symbolic names to name server. 864 * The Default symbolic node name format is : 865 * <hostname>:<hba driver name>(instance) 866 * The Default symbolic port name format is : 867 * <fp path name> 868 */ 869 static void 870 fp_init_symbolic_names(fc_local_port_t *port) 871 { 872 const char *vendorname = ddi_driver_name(port->fp_fca_dip); 873 char *sym_name; 874 char fcaname[50] = {0}; 875 int hostnlen, fcanlen; 876 877 if (port->fp_sym_node_namelen == 0) { 878 hostnlen = strlen(utsname.nodename); 879 (void) snprintf(fcaname, sizeof (fcaname), 880 "%s%d", vendorname, ddi_get_instance(port->fp_fca_dip)); 881 fcanlen = strlen(fcaname); 882 883 sym_name = kmem_zalloc(hostnlen + fcanlen + 2, KM_SLEEP); 884 (void) sprintf(sym_name, "%s:%s", utsname.nodename, fcaname); 885 port->fp_sym_node_namelen = strlen(sym_name); 886 if (port->fp_sym_node_namelen >= FCHBA_SYMB_NAME_LEN) { 887 port->fp_sym_node_namelen = FCHBA_SYMB_NAME_LEN; 888 } 889 (void) strncpy(port->fp_sym_node_name, sym_name, 890 port->fp_sym_node_namelen); 891 kmem_free(sym_name, hostnlen + fcanlen + 2); 892 } 893 894 if (port->fp_sym_port_namelen == 0) { 895 char *pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 896 897 (void) ddi_pathname(port->fp_port_dip, pathname); 898 port->fp_sym_port_namelen = strlen(pathname); 899 if (port->fp_sym_port_namelen >= FCHBA_SYMB_NAME_LEN) { 900 port->fp_sym_port_namelen = FCHBA_SYMB_NAME_LEN; 901 } 902 (void) strncpy(port->fp_sym_port_name, pathname, 903 port->fp_sym_port_namelen); 904 kmem_free(pathname, MAXPATHLEN); 905 } 906 } 907 908 909 /* 910 * Perform port attach 911 */ 912 static int 913 fp_attach_handler(dev_info_t *dip) 914 { 915 int rval; 916 int instance; 917 int port_num; 918 int port_len; 919 char name[30]; 920 char i_pwwn[17]; 921 fp_cmd_t *pkt; 922 uint32_t ub_count; 923 fc_local_port_t *port; 924 job_request_t *job; 925 fc_local_port_t *phyport = NULL; 926 int portpro1; 927 char pwwn[17], nwwn[17]; 928 929 instance = ddi_get_instance(dip); 930 931 port_len = sizeof (port_num); 932 933 rval = ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF, 934 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 935 (caddr_t)&port_num, &port_len); 936 937 if (rval != DDI_SUCCESS) { 938 cmn_err(CE_WARN, "fp(%d): No port property in devinfo", 939 instance); 940 return (DDI_FAILURE); 941 } 942 943 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, instance, 944 DDI_NT_NEXUS, 0) != DDI_SUCCESS) { 945 cmn_err(CE_WARN, "fp(%d): failed to create devctl minor node", 946 instance); 947 return (DDI_FAILURE); 948 } 949 950 if (ddi_create_minor_node(dip, "fc", S_IFCHR, instance, 951 DDI_NT_FC_ATTACHMENT_POINT, 0) != DDI_SUCCESS) { 952 cmn_err(CE_WARN, "fp(%d): failed to create fc attachment" 953 " point minor node", instance); 954 ddi_remove_minor_node(dip, NULL); 955 return (DDI_FAILURE); 956 } 957 958 if (ddi_soft_state_zalloc(fp_driver_softstate, instance) 959 != DDI_SUCCESS) { 960 cmn_err(CE_WARN, "fp(%d): failed to alloc soft state", 961 instance); 962 ddi_remove_minor_node(dip, NULL); 963 return (DDI_FAILURE); 964 } 965 port = ddi_get_soft_state(fp_driver_softstate, instance); 966 967 (void) sprintf(port->fp_ibuf, "fp(%d)", instance); 968 969 port->fp_instance = instance; 970 port->fp_ulp_attach = 1; 971 port->fp_port_num = port_num; 972 port->fp_verbose = fp_verbosity; 973 port->fp_options = fp_options; 974 975 port->fp_fca_dip = ddi_get_parent(dip); 976 port->fp_port_dip = dip; 977 port->fp_fca_tran = (fc_fca_tran_t *) 978 ddi_get_driver_private(port->fp_fca_dip); 979 980 port->fp_task = port->fp_last_task = FP_TASK_IDLE; 981 982 /* 983 * Init the starting value of fp_rscn_count. Note that if 984 * FC_INVALID_RSCN_COUNT is 0 (which is what it currently is), the 985 * actual # of RSCNs will be (fp_rscn_count - 1) 986 */ 987 port->fp_rscn_count = FC_INVALID_RSCN_COUNT + 1; 988 989 mutex_init(&port->fp_mutex, NULL, MUTEX_DRIVER, NULL); 990 cv_init(&port->fp_cv, NULL, CV_DRIVER, NULL); 991 cv_init(&port->fp_attach_cv, NULL, CV_DRIVER, NULL); 992 993 (void) sprintf(name, "fp%d_cache", instance); 994 995 if ((portpro1 = ddi_prop_get_int(DDI_DEV_T_ANY, 996 dip, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 997 "phyport-instance", -1)) != -1) { 998 phyport = ddi_get_soft_state(fp_driver_softstate, portpro1); 999 fc_wwn_to_str(&phyport->fp_service_params.nport_ww_name, pwwn); 1000 fc_wwn_to_str(&phyport->fp_service_params.node_ww_name, nwwn); 1001 port->fp_npiv_type = FC_NPIV_PORT; 1002 } 1003 1004 /* 1005 * Allocate the pool of fc_packet_t structs to be used with 1006 * this fp instance. 1007 */ 1008 port->fp_pkt_cache = kmem_cache_create(name, 1009 (port->fp_fca_tran->fca_pkt_size) + sizeof (fp_cmd_t), 8, 1010 fp_cache_constructor, fp_cache_destructor, NULL, (void *)port, 1011 NULL, 0); 1012 1013 if (port->fp_pkt_cache == NULL) { 1014 goto cache_alloc_failed; 1015 } 1016 1017 1018 /* 1019 * Allocate the d_id and pwwn hash tables for all remote ports 1020 * connected to this local port. 1021 */ 1022 port->fp_did_table = kmem_zalloc(did_table_size * 1023 sizeof (struct d_id_hash), KM_SLEEP); 1024 1025 port->fp_pwwn_table = kmem_zalloc(pwwn_table_size * 1026 sizeof (struct pwwn_hash), KM_SLEEP); 1027 1028 port->fp_taskq = taskq_create("fp_ulp_callback", 1, 1029 MINCLSYSPRI, 1, 16, 0); 1030 1031 /* Indicate that don't have the pm components yet */ 1032 port->fp_soft_state |= FP_SOFT_NO_PMCOMP; 1033 1034 /* 1035 * Bind the callbacks with the FCA driver. This will open the gate 1036 * for asynchronous callbacks, so after this call the fp_mutex 1037 * must be held when updating the fc_local_port_t struct. 1038 * 1039 * This is done _before_ setting up the job thread so we can avoid 1040 * cleaning up after the thread_create() in the error path. This 1041 * also means fp will be operating with fp_els_resp_pkt set to NULL. 1042 */ 1043 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1044 goto bind_callbacks_failed; 1045 } 1046 1047 if (phyport) { 1048 mutex_enter(&phyport->fp_mutex); 1049 if (phyport->fp_port_next) { 1050 phyport->fp_port_next->fp_port_prev = port; 1051 port->fp_port_next = phyport->fp_port_next; 1052 phyport->fp_port_next = port; 1053 port->fp_port_prev = phyport; 1054 } else { 1055 phyport->fp_port_next = port; 1056 phyport->fp_port_prev = port; 1057 port->fp_port_next = phyport; 1058 port->fp_port_prev = phyport; 1059 } 1060 mutex_exit(&phyport->fp_mutex); 1061 } 1062 1063 /* 1064 * Init Symbolic Names 1065 */ 1066 fp_init_symbolic_names(port); 1067 1068 pkt = fp_alloc_pkt(port, sizeof (la_els_logi_t), sizeof (la_els_logi_t), 1069 KM_SLEEP, NULL); 1070 1071 if (pkt == NULL) { 1072 cmn_err(CE_WARN, "fp(%d): failed to allocate ELS packet", 1073 instance); 1074 goto alloc_els_packet_failed; 1075 } 1076 1077 (void) thread_create(NULL, 0, fp_job_handler, port, 0, &p0, TS_RUN, 1078 v.v_maxsyspri - 2); 1079 1080 fc_wwn_to_str(&port->fp_service_params.nport_ww_name, i_pwwn); 1081 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-port", 1082 i_pwwn) != DDI_PROP_SUCCESS) { 1083 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1084 "fp(%d): Updating 'initiator-port' property" 1085 " on fp dev_info node failed", instance); 1086 } 1087 1088 fc_wwn_to_str(&port->fp_service_params.node_ww_name, i_pwwn); 1089 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "initiator-node", 1090 i_pwwn) != DDI_PROP_SUCCESS) { 1091 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 1092 "fp(%d): Updating 'initiator-node' property" 1093 " on fp dev_info node failed", instance); 1094 } 1095 1096 mutex_enter(&port->fp_mutex); 1097 port->fp_els_resp_pkt = pkt; 1098 mutex_exit(&port->fp_mutex); 1099 1100 /* 1101 * Determine the count of unsolicited buffers this FCA can support 1102 */ 1103 fp_retrieve_caps(port); 1104 1105 /* 1106 * Allocate unsolicited buffer tokens 1107 */ 1108 if (port->fp_ub_count) { 1109 ub_count = port->fp_ub_count; 1110 port->fp_ub_tokens = kmem_zalloc(ub_count * 1111 sizeof (*port->fp_ub_tokens), KM_SLEEP); 1112 /* 1113 * Do not fail the attach if unsolicited buffer allocation 1114 * fails; Just try to get along with whatever the FCA can do. 1115 */ 1116 if (fc_ulp_uballoc(port, &ub_count, fp_unsol_buf_size, 1117 FC_TYPE_EXTENDED_LS, port->fp_ub_tokens) != 1118 FC_SUCCESS || ub_count != port->fp_ub_count) { 1119 cmn_err(CE_WARN, "fp(%d): failed to allocate " 1120 " Unsolicited buffers. proceeding with attach...", 1121 instance); 1122 kmem_free(port->fp_ub_tokens, 1123 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1124 port->fp_ub_tokens = NULL; 1125 } 1126 } 1127 1128 fp_load_ulp_modules(dip, port); 1129 1130 /* 1131 * Enable DDI_SUSPEND and DDI_RESUME for this instance. 1132 */ 1133 (void) ddi_prop_create(DDI_DEV_T_NONE, dip, DDI_PROP_CANSLEEP, 1134 "pm-hardware-state", "needs-suspend-resume", 1135 strlen("needs-suspend-resume") + 1); 1136 1137 /* 1138 * fctl maintains a list of all port handles, so 1139 * help fctl add this one to its list now. 1140 */ 1141 mutex_enter(&port->fp_mutex); 1142 fctl_add_port(port); 1143 1144 /* 1145 * If a state change is already in progress, set the bind state t 1146 * OFFLINE as well, so further state change callbacks into ULPs 1147 * will pass the appropriate states 1148 */ 1149 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE || 1150 port->fp_statec_busy) { 1151 port->fp_bind_state = FC_STATE_OFFLINE; 1152 mutex_exit(&port->fp_mutex); 1153 1154 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 1155 } else { 1156 /* 1157 * Without dropping the mutex, ensure that the port 1158 * startup happens ahead of state change callback 1159 * processing 1160 */ 1161 ASSERT(port->fp_job_tail == NULL && port->fp_job_head == NULL); 1162 1163 port->fp_last_task = port->fp_task; 1164 port->fp_task = FP_TASK_PORT_STARTUP; 1165 1166 job = fctl_alloc_job(JOB_PORT_STARTUP, JOB_TYPE_FCTL_ASYNC, 1167 fp_startup_done, (opaque_t)port, KM_SLEEP); 1168 1169 port->fp_job_head = port->fp_job_tail = job; 1170 1171 cv_signal(&port->fp_cv); 1172 1173 mutex_exit(&port->fp_mutex); 1174 } 1175 1176 mutex_enter(&port->fp_mutex); 1177 while (port->fp_ulp_attach) { 1178 cv_wait(&port->fp_attach_cv, &port->fp_mutex); 1179 } 1180 mutex_exit(&port->fp_mutex); 1181 1182 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 1183 "pm-components", fp_pm_comps, 1184 sizeof (fp_pm_comps) / sizeof (fp_pm_comps[0])) != 1185 DDI_PROP_SUCCESS) { 1186 FP_TRACE(FP_NHEAD2(9, 0), "Failed to create PM" 1187 " components property, PM disabled on this port."); 1188 mutex_enter(&port->fp_mutex); 1189 port->fp_pm_level = FP_PM_PORT_UP; 1190 mutex_exit(&port->fp_mutex); 1191 } else { 1192 if (pm_raise_power(dip, FP_PM_COMPONENT, 1193 FP_PM_PORT_UP) != DDI_SUCCESS) { 1194 FP_TRACE(FP_NHEAD2(9, 0), "Failed to raise" 1195 " power level"); 1196 mutex_enter(&port->fp_mutex); 1197 port->fp_pm_level = FP_PM_PORT_UP; 1198 mutex_exit(&port->fp_mutex); 1199 } 1200 1201 /* 1202 * Don't unset the FP_SOFT_NO_PMCOMP flag until after 1203 * the call to pm_raise_power. The PM framework can't 1204 * handle multiple threads calling into it during attach. 1205 */ 1206 1207 mutex_enter(&port->fp_mutex); 1208 port->fp_soft_state &= ~FP_SOFT_NO_PMCOMP; 1209 mutex_exit(&port->fp_mutex); 1210 } 1211 1212 ddi_report_dev(dip); 1213 1214 fp_log_port_event(port, ESC_SUNFC_PORT_ATTACH); 1215 1216 return (DDI_SUCCESS); 1217 1218 /* 1219 * Unwind any/all preceeding allocations in the event of an error. 1220 */ 1221 1222 alloc_els_packet_failed: 1223 1224 if (port->fp_fca_handle != NULL) { 1225 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1226 port->fp_fca_handle = NULL; 1227 } 1228 1229 if (port->fp_ub_tokens != NULL) { 1230 (void) fc_ulp_ubfree(port, port->fp_ub_count, 1231 port->fp_ub_tokens); 1232 kmem_free(port->fp_ub_tokens, 1233 port->fp_ub_count * sizeof (*port->fp_ub_tokens)); 1234 port->fp_ub_tokens = NULL; 1235 } 1236 1237 if (port->fp_els_resp_pkt != NULL) { 1238 fp_free_pkt(port->fp_els_resp_pkt); 1239 port->fp_els_resp_pkt = NULL; 1240 } 1241 1242 bind_callbacks_failed: 1243 1244 if (port->fp_taskq != NULL) { 1245 taskq_destroy(port->fp_taskq); 1246 } 1247 1248 if (port->fp_pwwn_table != NULL) { 1249 kmem_free(port->fp_pwwn_table, 1250 pwwn_table_size * sizeof (struct pwwn_hash)); 1251 port->fp_pwwn_table = NULL; 1252 } 1253 1254 if (port->fp_did_table != NULL) { 1255 kmem_free(port->fp_did_table, 1256 did_table_size * sizeof (struct d_id_hash)); 1257 port->fp_did_table = NULL; 1258 } 1259 1260 if (port->fp_pkt_cache != NULL) { 1261 kmem_cache_destroy(port->fp_pkt_cache); 1262 port->fp_pkt_cache = NULL; 1263 } 1264 1265 cache_alloc_failed: 1266 1267 cv_destroy(&port->fp_attach_cv); 1268 cv_destroy(&port->fp_cv); 1269 mutex_destroy(&port->fp_mutex); 1270 ddi_remove_minor_node(port->fp_port_dip, NULL); 1271 ddi_soft_state_free(fp_driver_softstate, instance); 1272 ddi_prop_remove_all(dip); 1273 1274 return (DDI_FAILURE); 1275 } 1276 1277 1278 /* 1279 * Handle DDI_RESUME request 1280 */ 1281 static int 1282 fp_resume_handler(dev_info_t *dip) 1283 { 1284 int rval; 1285 fc_local_port_t *port; 1286 1287 port = ddi_get_soft_state(fp_driver_softstate, ddi_get_instance(dip)); 1288 1289 ASSERT(port != NULL); 1290 1291 #ifdef DEBUG 1292 mutex_enter(&port->fp_mutex); 1293 ASSERT(port->fp_soft_state & FP_SOFT_SUSPEND); 1294 mutex_exit(&port->fp_mutex); 1295 #endif 1296 1297 /* 1298 * If the port was power suspended, raise the power level 1299 */ 1300 mutex_enter(&port->fp_mutex); 1301 if ((port->fp_soft_state & FP_SOFT_POWER_DOWN) && 1302 (!(port->fp_soft_state & FP_SOFT_NO_PMCOMP))) { 1303 ASSERT(port->fp_pm_level == FP_PM_PORT_DOWN); 1304 1305 mutex_exit(&port->fp_mutex); 1306 if (pm_raise_power(dip, FP_PM_COMPONENT, 1307 FP_PM_PORT_UP) != DDI_SUCCESS) { 1308 FP_TRACE(FP_NHEAD2(9, 0), 1309 "Failed to raise the power level"); 1310 return (DDI_FAILURE); 1311 } 1312 mutex_enter(&port->fp_mutex); 1313 } 1314 port->fp_soft_state &= ~FP_SOFT_SUSPEND; 1315 mutex_exit(&port->fp_mutex); 1316 1317 /* 1318 * All the discovery is initiated and handled by per-port thread. 1319 * Further all the discovery is done in handled in callback mode 1320 * (not polled mode); In a specific case such as this, the discovery 1321 * is required to happen in polled mode. The easiest way out is 1322 * to bail out port thread and get started. Come back and fix this 1323 * to do on demand discovery initiated by ULPs. ULPs such as FCP 1324 * will do on-demand discovery during pre-power-up busctl handling 1325 * which will only be possible when SCSA provides a new HBA vector 1326 * for sending down the PM busctl requests. 1327 */ 1328 (void) callb_generic_cpr(&port->fp_cpr_info, CB_CODE_CPR_RESUME); 1329 1330 rval = fp_resume_all(port, FC_CMD_RESUME); 1331 if (rval != DDI_SUCCESS) { 1332 mutex_enter(&port->fp_mutex); 1333 port->fp_soft_state |= FP_SOFT_SUSPEND; 1334 mutex_exit(&port->fp_mutex); 1335 (void) callb_generic_cpr(&port->fp_cpr_info, 1336 CB_CODE_CPR_CHKPT); 1337 } 1338 1339 return (rval); 1340 } 1341 1342 /* 1343 * Perform FC Port power on initialization 1344 */ 1345 static int 1346 fp_power_up(fc_local_port_t *port) 1347 { 1348 int rval; 1349 1350 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1351 1352 ASSERT((port->fp_soft_state & FP_SOFT_SUSPEND) == 0); 1353 ASSERT(port->fp_soft_state & FP_SOFT_POWER_DOWN); 1354 1355 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1356 1357 mutex_exit(&port->fp_mutex); 1358 1359 rval = fp_resume_all(port, FC_CMD_POWER_UP); 1360 if (rval != DDI_SUCCESS) { 1361 mutex_enter(&port->fp_mutex); 1362 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1363 } else { 1364 mutex_enter(&port->fp_mutex); 1365 } 1366 1367 return (rval); 1368 } 1369 1370 1371 /* 1372 * It is important to note that the power may possibly be removed between 1373 * SUSPEND and the ensuing RESUME operation. In such a context the underlying 1374 * FC port hardware would have gone through an OFFLINE to ONLINE transition 1375 * (hardware state). In this case, the port driver may need to rediscover the 1376 * topology, perform LOGINs, register with the name server again and perform 1377 * any such port initialization procedures. To perform LOGINs, the driver could 1378 * use the port device handle to see if a LOGIN needs to be performed and use 1379 * the D_ID and WWN in it. The LOGINs may fail (if the hardware is reconfigured 1380 * or removed) which will be reflected in the map the ULPs will see. 1381 */ 1382 static int 1383 fp_resume_all(fc_local_port_t *port, fc_attach_cmd_t cmd) 1384 { 1385 1386 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1387 1388 if (fp_bind_callbacks(port) != DDI_SUCCESS) { 1389 return (DDI_FAILURE); 1390 } 1391 1392 mutex_enter(&port->fp_mutex); 1393 1394 /* 1395 * If there are commands queued for delayed retry, instead of 1396 * working the hard way to figure out which ones are good for 1397 * restart and which ones not (ELSs are definitely not good 1398 * as the port will have to go through a new spin of rediscovery 1399 * now), so just flush them out. 1400 */ 1401 if (port->fp_restore & FP_RESTORE_WAIT_TIMEOUT) { 1402 fp_cmd_t *cmd; 1403 1404 port->fp_restore &= ~FP_RESTORE_WAIT_TIMEOUT; 1405 1406 mutex_exit(&port->fp_mutex); 1407 while ((cmd = fp_deque_cmd(port)) != NULL) { 1408 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 1409 fp_iodone(cmd); 1410 } 1411 mutex_enter(&port->fp_mutex); 1412 } 1413 1414 if (FC_PORT_STATE_MASK(port->fp_bind_state) == FC_STATE_OFFLINE) { 1415 if ((port->fp_restore & FP_RESTORE_OFFLINE_TIMEOUT) || 1416 port->fp_dev_count) { 1417 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1418 port->fp_offline_tid = timeout(fp_offline_timeout, 1419 (caddr_t)port, fp_offline_ticks); 1420 } 1421 if (port->fp_job_head) { 1422 cv_signal(&port->fp_cv); 1423 } 1424 mutex_exit(&port->fp_mutex); 1425 fctl_attach_ulps(port, cmd, &modlinkage); 1426 } else { 1427 struct job_request *job; 1428 1429 /* 1430 * If an OFFLINE timer was running at the time of 1431 * suspending, there is no need to restart it as 1432 * the port is ONLINE now. 1433 */ 1434 port->fp_restore &= ~FP_RESTORE_OFFLINE_TIMEOUT; 1435 if (port->fp_statec_busy == 0) { 1436 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 1437 } 1438 port->fp_statec_busy++; 1439 mutex_exit(&port->fp_mutex); 1440 1441 job = fctl_alloc_job(JOB_PORT_ONLINE, 1442 JOB_CANCEL_ULP_NOTIFICATION, NULL, NULL, KM_SLEEP); 1443 fctl_enque_job(port, job); 1444 1445 fctl_jobwait(job); 1446 fctl_remove_oldies(port); 1447 1448 fctl_attach_ulps(port, cmd, &modlinkage); 1449 fctl_dealloc_job(job); 1450 } 1451 1452 return (DDI_SUCCESS); 1453 } 1454 1455 1456 /* 1457 * At this time, there shouldn't be any I/O requests on this port. 1458 * But the unsolicited callbacks from the underlying FCA port need 1459 * to be handled very carefully. The steps followed to handle the 1460 * DDI_DETACH are: 1461 * + Grab the port driver mutex, check if the unsolicited 1462 * callback is currently under processing. If true, fail 1463 * the DDI_DETACH request by printing a message; If false 1464 * mark the DDI_DETACH as under progress, so that any 1465 * further unsolicited callbacks get bounced. 1466 * + Perform PRLO/LOGO if necessary, cleanup all the data 1467 * structures. 1468 * + Get the job_handler thread to gracefully exit. 1469 * + Unregister callbacks with the FCA port. 1470 * + Now that some peace is found, notify all the ULPs of 1471 * DDI_DETACH request (using ulp_port_detach entry point) 1472 * + Free all mutexes, semaphores, conditional variables. 1473 * + Free the soft state, return success. 1474 * 1475 * Important considerations: 1476 * Port driver de-registers state change and unsolicited 1477 * callbacks before taking up the task of notifying ULPs 1478 * and performing PRLO and LOGOs. 1479 * 1480 * A port may go offline at the time PRLO/LOGO is being 1481 * requested. It is expected of all FCA drivers to fail 1482 * such requests either immediately with a FC_OFFLINE 1483 * return code to fc_fca_transport() or return the packet 1484 * asynchronously with pkt state set to FC_PKT_PORT_OFFLINE 1485 */ 1486 static int 1487 fp_detach_handler(fc_local_port_t *port) 1488 { 1489 job_request_t *job; 1490 uint32_t delay_count; 1491 fc_orphan_t *orp, *tmporp; 1492 1493 /* 1494 * In a Fabric topology with many host ports connected to 1495 * a switch, another detaching instance of fp might have 1496 * triggered a LOGO (which is an unsolicited request to 1497 * this instance). So in order to be able to successfully 1498 * detach by taking care of such cases a delay of about 1499 * 30 seconds is introduced. 1500 */ 1501 delay_count = 0; 1502 mutex_enter(&port->fp_mutex); 1503 while ((port->fp_soft_state & 1504 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) && 1505 (delay_count < 30)) { 1506 mutex_exit(&port->fp_mutex); 1507 delay_count++; 1508 delay(drv_usectohz(1000000)); 1509 mutex_enter(&port->fp_mutex); 1510 } 1511 1512 if (port->fp_soft_state & 1513 (FP_SOFT_IN_STATEC_CB | FP_SOFT_IN_UNSOL_CB)) { 1514 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1515 mutex_exit(&port->fp_mutex); 1516 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1517 " Failing detach", port->fp_instance); 1518 return (DDI_FAILURE); 1519 } 1520 1521 port->fp_soft_state |= FP_SOFT_IN_DETACH; 1522 port->fp_soft_state &= ~FP_DETACH_INPROGRESS; 1523 mutex_exit(&port->fp_mutex); 1524 1525 /* 1526 * If we're powered down, we need to raise power prior to submitting 1527 * the JOB_PORT_SHUTDOWN job. Otherwise, the job handler will never 1528 * process the shutdown job. 1529 */ 1530 if (fctl_busy_port(port) != 0) { 1531 cmn_err(CE_WARN, "fp(%d): fctl_busy_port failed", 1532 port->fp_instance); 1533 mutex_enter(&port->fp_mutex); 1534 port->fp_soft_state &= ~FP_SOFT_IN_DETACH; 1535 mutex_exit(&port->fp_mutex); 1536 return (DDI_FAILURE); 1537 } 1538 1539 /* 1540 * This will deallocate data structs and cause the "job" thread 1541 * to exit, in preparation for DDI_DETACH on the instance. 1542 * This can sleep for an arbitrary duration, since it waits for 1543 * commands over the wire, timeout(9F) callbacks, etc. 1544 * 1545 * CAUTION: There is still a race here, where the "job" thread 1546 * can still be executing code even tho the fctl_jobwait() call 1547 * below has returned to us. In theory the fp driver could even be 1548 * modunloaded even tho the job thread isn't done executing. 1549 * without creating the race condition. 1550 */ 1551 job = fctl_alloc_job(JOB_PORT_SHUTDOWN, 0, NULL, 1552 (opaque_t)port, KM_SLEEP); 1553 fctl_enque_job(port, job); 1554 fctl_jobwait(job); 1555 fctl_dealloc_job(job); 1556 1557 1558 (void) pm_lower_power(port->fp_port_dip, FP_PM_COMPONENT, 1559 FP_PM_PORT_DOWN); 1560 1561 if (port->fp_taskq) { 1562 taskq_destroy(port->fp_taskq); 1563 } 1564 1565 ddi_prop_remove_all(port->fp_port_dip); 1566 1567 ddi_remove_minor_node(port->fp_port_dip, NULL); 1568 1569 fctl_remove_port(port); 1570 1571 fp_free_pkt(port->fp_els_resp_pkt); 1572 1573 if (port->fp_ub_tokens) { 1574 if (fc_ulp_ubfree(port, port->fp_ub_count, 1575 port->fp_ub_tokens) != FC_SUCCESS) { 1576 cmn_err(CE_WARN, "fp(%d): couldn't free " 1577 " unsolicited buffers", port->fp_instance); 1578 } 1579 kmem_free(port->fp_ub_tokens, 1580 sizeof (*port->fp_ub_tokens) * port->fp_ub_count); 1581 port->fp_ub_tokens = NULL; 1582 } 1583 1584 if (port->fp_pkt_cache != NULL) { 1585 kmem_cache_destroy(port->fp_pkt_cache); 1586 } 1587 1588 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1589 1590 mutex_enter(&port->fp_mutex); 1591 if (port->fp_did_table) { 1592 kmem_free(port->fp_did_table, did_table_size * 1593 sizeof (struct d_id_hash)); 1594 } 1595 1596 if (port->fp_pwwn_table) { 1597 kmem_free(port->fp_pwwn_table, pwwn_table_size * 1598 sizeof (struct pwwn_hash)); 1599 } 1600 orp = port->fp_orphan_list; 1601 while (orp) { 1602 tmporp = orp; 1603 orp = orp->orp_next; 1604 kmem_free(tmporp, sizeof (*orp)); 1605 } 1606 1607 mutex_exit(&port->fp_mutex); 1608 1609 fp_log_port_event(port, ESC_SUNFC_PORT_DETACH); 1610 1611 mutex_destroy(&port->fp_mutex); 1612 cv_destroy(&port->fp_attach_cv); 1613 cv_destroy(&port->fp_cv); 1614 ddi_soft_state_free(fp_driver_softstate, port->fp_instance); 1615 1616 return (DDI_SUCCESS); 1617 } 1618 1619 1620 /* 1621 * Steps to perform DDI_SUSPEND operation on a FC port 1622 * 1623 * - If already suspended return DDI_FAILURE 1624 * - If already power-suspended return DDI_SUCCESS 1625 * - If an unsolicited callback or state change handling is in 1626 * in progress, throw a warning message, return DDI_FAILURE 1627 * - Cancel timeouts 1628 * - SUSPEND the job_handler thread (means do nothing as it is 1629 * taken care of by the CPR frame work) 1630 */ 1631 static int 1632 fp_suspend_handler(fc_local_port_t *port) 1633 { 1634 uint32_t delay_count; 1635 1636 mutex_enter(&port->fp_mutex); 1637 1638 /* 1639 * The following should never happen, but 1640 * let the driver be more defensive here 1641 */ 1642 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1643 mutex_exit(&port->fp_mutex); 1644 return (DDI_FAILURE); 1645 } 1646 1647 /* 1648 * If the port is already power suspended, there 1649 * is nothing else to do, So return DDI_SUCCESS, 1650 * but mark the SUSPEND bit in the soft state 1651 * before leaving. 1652 */ 1653 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1654 port->fp_soft_state |= FP_SOFT_SUSPEND; 1655 mutex_exit(&port->fp_mutex); 1656 return (DDI_SUCCESS); 1657 } 1658 1659 /* 1660 * Check if an unsolicited callback or state change handling is 1661 * in progress. If true, fail the suspend operation; also throw 1662 * a warning message notifying the failure. Note that Sun PCI 1663 * hotplug spec recommends messages in cases of failure (but 1664 * not flooding the console) 1665 * 1666 * Busy waiting for a short interval (500 millisecond ?) to see 1667 * if the callback processing completes may be another idea. Since 1668 * most of the callback processing involves a lot of work, it 1669 * is safe to just fail the SUSPEND operation. It is definitely 1670 * not bad to fail the SUSPEND operation if the driver is busy. 1671 */ 1672 delay_count = 0; 1673 while ((port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1674 FP_SOFT_IN_UNSOL_CB)) && (delay_count < 30)) { 1675 mutex_exit(&port->fp_mutex); 1676 delay_count++; 1677 delay(drv_usectohz(1000000)); 1678 mutex_enter(&port->fp_mutex); 1679 } 1680 1681 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 1682 FP_SOFT_IN_UNSOL_CB)) { 1683 mutex_exit(&port->fp_mutex); 1684 cmn_err(CE_WARN, "fp(%d): FCA callback in progress: " 1685 " Failing suspend", port->fp_instance); 1686 return (DDI_FAILURE); 1687 } 1688 1689 /* 1690 * Check of FC port thread is busy 1691 */ 1692 if (port->fp_job_head) { 1693 mutex_exit(&port->fp_mutex); 1694 FP_TRACE(FP_NHEAD2(9, 0), 1695 "FC port thread is busy: Failing suspend"); 1696 return (DDI_FAILURE); 1697 } 1698 port->fp_soft_state |= FP_SOFT_SUSPEND; 1699 1700 fp_suspend_all(port); 1701 mutex_exit(&port->fp_mutex); 1702 1703 return (DDI_SUCCESS); 1704 } 1705 1706 1707 /* 1708 * Prepare for graceful power down of a FC port 1709 */ 1710 static int 1711 fp_power_down(fc_local_port_t *port) 1712 { 1713 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1714 1715 /* 1716 * Power down request followed by a DDI_SUSPEND should 1717 * never happen; If it does return DDI_SUCCESS 1718 */ 1719 if (port->fp_soft_state & FP_SOFT_SUSPEND) { 1720 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1721 return (DDI_SUCCESS); 1722 } 1723 1724 /* 1725 * If the port is already power suspended, there 1726 * is nothing else to do, So return DDI_SUCCESS, 1727 */ 1728 if (port->fp_soft_state & FP_SOFT_POWER_DOWN) { 1729 return (DDI_SUCCESS); 1730 } 1731 1732 /* 1733 * Check if an unsolicited callback or state change handling 1734 * is in progress. If true, fail the PM suspend operation. 1735 * But don't print a message unless the verbosity of the 1736 * driver desires otherwise. 1737 */ 1738 if ((port->fp_soft_state & FP_SOFT_IN_STATEC_CB) || 1739 (port->fp_soft_state & FP_SOFT_IN_UNSOL_CB)) { 1740 FP_TRACE(FP_NHEAD2(9, 0), 1741 "Unsolicited callback in progress: Failing power down"); 1742 return (DDI_FAILURE); 1743 } 1744 1745 /* 1746 * Check of FC port thread is busy 1747 */ 1748 if (port->fp_job_head) { 1749 FP_TRACE(FP_NHEAD2(9, 0), 1750 "FC port thread is busy: Failing power down"); 1751 return (DDI_FAILURE); 1752 } 1753 port->fp_soft_state |= FP_SOFT_POWER_DOWN; 1754 1755 /* 1756 * check if the ULPs are ready for power down 1757 */ 1758 mutex_exit(&port->fp_mutex); 1759 if (fctl_detach_ulps(port, FC_CMD_POWER_DOWN, 1760 &modlinkage) != FC_SUCCESS) { 1761 mutex_enter(&port->fp_mutex); 1762 port->fp_soft_state &= ~FP_SOFT_POWER_DOWN; 1763 mutex_exit(&port->fp_mutex); 1764 1765 /* 1766 * Power back up the obedient ULPs that went down 1767 */ 1768 fp_attach_ulps(port, FC_CMD_POWER_UP); 1769 1770 FP_TRACE(FP_NHEAD2(9, 0), 1771 "ULP(s) busy, detach_ulps failed. Failing power down"); 1772 mutex_enter(&port->fp_mutex); 1773 return (DDI_FAILURE); 1774 } 1775 mutex_enter(&port->fp_mutex); 1776 1777 fp_suspend_all(port); 1778 1779 return (DDI_SUCCESS); 1780 } 1781 1782 1783 /* 1784 * Suspend the entire FC port 1785 */ 1786 static void 1787 fp_suspend_all(fc_local_port_t *port) 1788 { 1789 int index; 1790 struct pwwn_hash *head; 1791 fc_remote_port_t *pd; 1792 1793 ASSERT(MUTEX_HELD(&port->fp_mutex)); 1794 1795 if (port->fp_wait_tid != 0) { 1796 timeout_id_t tid; 1797 1798 tid = port->fp_wait_tid; 1799 port->fp_wait_tid = (timeout_id_t)NULL; 1800 mutex_exit(&port->fp_mutex); 1801 (void) untimeout(tid); 1802 mutex_enter(&port->fp_mutex); 1803 port->fp_restore |= FP_RESTORE_WAIT_TIMEOUT; 1804 } 1805 1806 if (port->fp_offline_tid) { 1807 timeout_id_t tid; 1808 1809 tid = port->fp_offline_tid; 1810 port->fp_offline_tid = (timeout_id_t)NULL; 1811 mutex_exit(&port->fp_mutex); 1812 (void) untimeout(tid); 1813 mutex_enter(&port->fp_mutex); 1814 port->fp_restore |= FP_RESTORE_OFFLINE_TIMEOUT; 1815 } 1816 mutex_exit(&port->fp_mutex); 1817 port->fp_fca_tran->fca_unbind_port(port->fp_fca_handle); 1818 mutex_enter(&port->fp_mutex); 1819 1820 /* 1821 * Mark all devices as OLD, and reset the LOGIN state as well 1822 * (this will force the ULPs to perform a LOGIN after calling 1823 * fc_portgetmap() during RESUME/PM_RESUME) 1824 */ 1825 for (index = 0; index < pwwn_table_size; index++) { 1826 head = &port->fp_pwwn_table[index]; 1827 pd = head->pwwn_head; 1828 while (pd != NULL) { 1829 mutex_enter(&pd->pd_mutex); 1830 fp_remote_port_offline(pd); 1831 fctl_delist_did_table(port, pd); 1832 pd->pd_state = PORT_DEVICE_VALID; 1833 pd->pd_login_count = 0; 1834 mutex_exit(&pd->pd_mutex); 1835 pd = pd->pd_wwn_hnext; 1836 } 1837 } 1838 } 1839 1840 1841 /* 1842 * fp_cache_constructor: Constructor function for kmem_cache_create(9F). 1843 * Performs intializations for fc_packet_t structs. 1844 * Returns 0 for success or -1 for failure. 1845 * 1846 * This function allocates DMA handles for both command and responses. 1847 * Most of the ELSs used have both command and responses so it is strongly 1848 * desired to move them to cache constructor routine. 1849 * 1850 * Context: Can sleep iff called with KM_SLEEP flag. 1851 */ 1852 static int 1853 fp_cache_constructor(void *buf, void *cdarg, int kmflags) 1854 { 1855 int (*cb) (caddr_t); 1856 fc_packet_t *pkt; 1857 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1858 fc_local_port_t *port = (fc_local_port_t *)cdarg; 1859 1860 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1861 1862 cmd->cmd_next = NULL; 1863 cmd->cmd_flags = 0; 1864 cmd->cmd_dflags = 0; 1865 cmd->cmd_job = NULL; 1866 cmd->cmd_port = port; 1867 pkt = &cmd->cmd_pkt; 1868 1869 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1870 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1871 &pkt->pkt_cmd_dma) != DDI_SUCCESS) { 1872 return (-1); 1873 } 1874 1875 if (ddi_dma_alloc_handle(port->fp_fca_dip, 1876 port->fp_fca_tran->fca_dma_attr, cb, NULL, 1877 &pkt->pkt_resp_dma) != DDI_SUCCESS) { 1878 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1879 return (-1); 1880 } 1881 1882 pkt->pkt_cmd_acc = pkt->pkt_resp_acc = NULL; 1883 pkt->pkt_cmd_cookie_cnt = pkt->pkt_resp_cookie_cnt = 1884 pkt->pkt_data_cookie_cnt = 0; 1885 pkt->pkt_cmd_cookie = pkt->pkt_resp_cookie = 1886 pkt->pkt_data_cookie = NULL; 1887 pkt->pkt_fca_private = (caddr_t)buf + sizeof (fp_cmd_t); 1888 1889 return (0); 1890 } 1891 1892 1893 /* 1894 * fp_cache_destructor: Destructor function for kmem_cache_create(). 1895 * Performs un-intializations for fc_packet_t structs. 1896 */ 1897 /* ARGSUSED */ 1898 static void 1899 fp_cache_destructor(void *buf, void *cdarg) 1900 { 1901 fp_cmd_t *cmd = (fp_cmd_t *)buf; 1902 fc_packet_t *pkt; 1903 1904 pkt = &cmd->cmd_pkt; 1905 if (pkt->pkt_cmd_dma) { 1906 ddi_dma_free_handle(&pkt->pkt_cmd_dma); 1907 } 1908 1909 if (pkt->pkt_resp_dma) { 1910 ddi_dma_free_handle(&pkt->pkt_resp_dma); 1911 } 1912 } 1913 1914 1915 /* 1916 * Packet allocation for ELS and any other port driver commands 1917 * 1918 * Some ELSs like FLOGI and PLOGI are critical for topology and 1919 * device discovery and a system's inability to allocate memory 1920 * or DVMA resources while performing some of these critical ELSs 1921 * cause a lot of problem. While memory allocation failures are 1922 * rare, DVMA resource failures are common as the applications 1923 * are becoming more and more powerful on huge servers. So it 1924 * is desirable to have a framework support to reserve a fragment 1925 * of DVMA. So until this is fixed the correct way, the suffering 1926 * is huge whenever a LIP happens at a time DVMA resources are 1927 * drained out completely - So an attempt needs to be made to 1928 * KM_SLEEP while requesting for these resources, hoping that 1929 * the requests won't hang forever. 1930 * 1931 * The fc_remote_port_t argument is stored into the pkt_pd field in the 1932 * fc_packet_t struct prior to the fc_ulp_init_packet() call. This 1933 * ensures that the pd_ref_count for the fc_remote_port_t is valid. 1934 * If there is no fc_remote_port_t associated with the fc_packet_t, then 1935 * fp_alloc_pkt() must be called with pd set to NULL. 1936 */ 1937 1938 static fp_cmd_t * 1939 fp_alloc_pkt(fc_local_port_t *port, int cmd_len, int resp_len, int kmflags, 1940 fc_remote_port_t *pd) 1941 { 1942 int rval; 1943 ulong_t real_len; 1944 fp_cmd_t *cmd; 1945 fc_packet_t *pkt; 1946 int (*cb) (caddr_t); 1947 ddi_dma_cookie_t pkt_cookie; 1948 ddi_dma_cookie_t *cp; 1949 uint32_t cnt; 1950 1951 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 1952 1953 cb = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT; 1954 1955 cmd = (fp_cmd_t *)kmem_cache_alloc(port->fp_pkt_cache, kmflags); 1956 if (cmd == NULL) { 1957 return (cmd); 1958 } 1959 1960 cmd->cmd_ulp_pkt = NULL; 1961 cmd->cmd_flags = 0; 1962 pkt = &cmd->cmd_pkt; 1963 ASSERT(cmd->cmd_dflags == 0); 1964 1965 pkt->pkt_datalen = 0; 1966 pkt->pkt_data = NULL; 1967 pkt->pkt_state = 0; 1968 pkt->pkt_action = 0; 1969 pkt->pkt_reason = 0; 1970 pkt->pkt_expln = 0; 1971 1972 /* 1973 * Init pkt_pd with the given pointer; this must be done _before_ 1974 * the call to fc_ulp_init_packet(). 1975 */ 1976 pkt->pkt_pd = pd; 1977 1978 /* Now call the FCA driver to init its private, per-packet fields */ 1979 if (fc_ulp_init_packet((opaque_t)port, pkt, kmflags) != FC_SUCCESS) { 1980 goto alloc_pkt_failed; 1981 } 1982 1983 if (cmd_len) { 1984 ASSERT(pkt->pkt_cmd_dma != NULL); 1985 1986 rval = ddi_dma_mem_alloc(pkt->pkt_cmd_dma, cmd_len, 1987 port->fp_fca_tran->fca_acc_attr, DDI_DMA_CONSISTENT, 1988 cb, NULL, (caddr_t *)&pkt->pkt_cmd, &real_len, 1989 &pkt->pkt_cmd_acc); 1990 1991 if (rval != DDI_SUCCESS) { 1992 goto alloc_pkt_failed; 1993 } 1994 cmd->cmd_dflags |= FP_CMD_VALID_DMA_MEM; 1995 1996 if (real_len < cmd_len) { 1997 goto alloc_pkt_failed; 1998 } 1999 2000 rval = ddi_dma_addr_bind_handle(pkt->pkt_cmd_dma, NULL, 2001 pkt->pkt_cmd, real_len, DDI_DMA_WRITE | 2002 DDI_DMA_CONSISTENT, cb, NULL, 2003 &pkt_cookie, &pkt->pkt_cmd_cookie_cnt); 2004 2005 if (rval != DDI_DMA_MAPPED) { 2006 goto alloc_pkt_failed; 2007 } 2008 2009 cmd->cmd_dflags |= FP_CMD_VALID_DMA_BIND; 2010 2011 if (pkt->pkt_cmd_cookie_cnt > 2012 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2013 goto alloc_pkt_failed; 2014 } 2015 2016 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2017 2018 cp = pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2019 pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie), 2020 KM_NOSLEEP); 2021 2022 if (cp == NULL) { 2023 goto alloc_pkt_failed; 2024 } 2025 2026 *cp = pkt_cookie; 2027 cp++; 2028 for (cnt = 1; cnt < pkt->pkt_cmd_cookie_cnt; cnt++, cp++) { 2029 ddi_dma_nextcookie(pkt->pkt_cmd_dma, &pkt_cookie); 2030 *cp = pkt_cookie; 2031 } 2032 } 2033 2034 if (resp_len) { 2035 ASSERT(pkt->pkt_resp_dma != NULL); 2036 2037 rval = ddi_dma_mem_alloc(pkt->pkt_resp_dma, resp_len, 2038 port->fp_fca_tran->fca_acc_attr, 2039 DDI_DMA_CONSISTENT, cb, NULL, 2040 (caddr_t *)&pkt->pkt_resp, &real_len, 2041 &pkt->pkt_resp_acc); 2042 2043 if (rval != DDI_SUCCESS) { 2044 goto alloc_pkt_failed; 2045 } 2046 cmd->cmd_dflags |= FP_RESP_VALID_DMA_MEM; 2047 2048 if (real_len < resp_len) { 2049 goto alloc_pkt_failed; 2050 } 2051 2052 rval = ddi_dma_addr_bind_handle(pkt->pkt_resp_dma, NULL, 2053 pkt->pkt_resp, real_len, DDI_DMA_READ | 2054 DDI_DMA_CONSISTENT, cb, NULL, 2055 &pkt_cookie, &pkt->pkt_resp_cookie_cnt); 2056 2057 if (rval != DDI_DMA_MAPPED) { 2058 goto alloc_pkt_failed; 2059 } 2060 2061 cmd->cmd_dflags |= FP_RESP_VALID_DMA_BIND; 2062 2063 if (pkt->pkt_resp_cookie_cnt > 2064 port->fp_fca_tran->fca_dma_attr->dma_attr_sgllen) { 2065 goto alloc_pkt_failed; 2066 } 2067 2068 ASSERT(pkt->pkt_cmd_cookie_cnt != 0); 2069 2070 cp = pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc( 2071 pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie), 2072 KM_NOSLEEP); 2073 2074 if (cp == NULL) { 2075 goto alloc_pkt_failed; 2076 } 2077 2078 *cp = pkt_cookie; 2079 cp++; 2080 for (cnt = 1; cnt < pkt->pkt_resp_cookie_cnt; cnt++, cp++) { 2081 ddi_dma_nextcookie(pkt->pkt_resp_dma, &pkt_cookie); 2082 *cp = pkt_cookie; 2083 } 2084 } 2085 2086 pkt->pkt_cmdlen = cmd_len; 2087 pkt->pkt_rsplen = resp_len; 2088 pkt->pkt_ulp_private = cmd; 2089 2090 return (cmd); 2091 2092 alloc_pkt_failed: 2093 2094 fp_free_dma(cmd); 2095 2096 if (pkt->pkt_cmd_cookie != NULL) { 2097 kmem_free(pkt->pkt_cmd_cookie, 2098 pkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2099 pkt->pkt_cmd_cookie = NULL; 2100 } 2101 2102 if (pkt->pkt_resp_cookie != NULL) { 2103 kmem_free(pkt->pkt_resp_cookie, 2104 pkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t)); 2105 pkt->pkt_resp_cookie = NULL; 2106 } 2107 2108 kmem_cache_free(port->fp_pkt_cache, cmd); 2109 2110 return (NULL); 2111 } 2112 2113 2114 /* 2115 * Free FC packet 2116 */ 2117 static void 2118 fp_free_pkt(fp_cmd_t *cmd) 2119 { 2120 fc_local_port_t *port; 2121 fc_packet_t *pkt; 2122 2123 ASSERT(!MUTEX_HELD(&cmd->cmd_port->fp_mutex)); 2124 2125 cmd->cmd_next = NULL; 2126 cmd->cmd_job = NULL; 2127 pkt = &cmd->cmd_pkt; 2128 pkt->pkt_ulp_private = 0; 2129 pkt->pkt_tran_flags = 0; 2130 pkt->pkt_tran_type = 0; 2131 port = cmd->cmd_port; 2132 2133 if (pkt->pkt_cmd_cookie != NULL) { 2134 kmem_free(pkt->pkt_cmd_cookie, pkt->pkt_cmd_cookie_cnt * 2135 sizeof (ddi_dma_cookie_t)); 2136 pkt->pkt_cmd_cookie = NULL; 2137 } 2138 2139 if (pkt->pkt_resp_cookie != NULL) { 2140 kmem_free(pkt->pkt_resp_cookie, pkt->pkt_resp_cookie_cnt * 2141 sizeof (ddi_dma_cookie_t)); 2142 pkt->pkt_resp_cookie = NULL; 2143 } 2144 2145 fp_free_dma(cmd); 2146 (void) fc_ulp_uninit_packet((opaque_t)port, pkt); 2147 kmem_cache_free(port->fp_pkt_cache, (void *)cmd); 2148 } 2149 2150 2151 /* 2152 * Release DVMA resources 2153 */ 2154 static void 2155 fp_free_dma(fp_cmd_t *cmd) 2156 { 2157 fc_packet_t *pkt = &cmd->cmd_pkt; 2158 2159 pkt->pkt_cmdlen = 0; 2160 pkt->pkt_rsplen = 0; 2161 pkt->pkt_tran_type = 0; 2162 pkt->pkt_tran_flags = 0; 2163 2164 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_BIND) { 2165 (void) ddi_dma_unbind_handle(pkt->pkt_cmd_dma); 2166 } 2167 2168 if (cmd->cmd_dflags & FP_CMD_VALID_DMA_MEM) { 2169 if (pkt->pkt_cmd_acc) { 2170 ddi_dma_mem_free(&pkt->pkt_cmd_acc); 2171 } 2172 } 2173 2174 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_BIND) { 2175 (void) ddi_dma_unbind_handle(pkt->pkt_resp_dma); 2176 } 2177 2178 if (cmd->cmd_dflags & FP_RESP_VALID_DMA_MEM) { 2179 if (pkt->pkt_resp_acc) { 2180 ddi_dma_mem_free(&pkt->pkt_resp_acc); 2181 } 2182 } 2183 cmd->cmd_dflags = 0; 2184 } 2185 2186 2187 /* 2188 * Dedicated thread to perform various activities. One thread for 2189 * each fc_local_port_t (driver soft state) instance. 2190 * Note, this effectively works out to one thread for each local 2191 * port, but there are also some Solaris taskq threads in use on a per-local 2192 * port basis; these also need to be taken into consideration. 2193 */ 2194 static void 2195 fp_job_handler(fc_local_port_t *port) 2196 { 2197 int rval; 2198 uint32_t *d_id; 2199 fc_remote_port_t *pd; 2200 job_request_t *job; 2201 2202 #ifndef __lock_lint 2203 /* 2204 * Solaris-internal stuff for proper operation of kernel threads 2205 * with Solaris CPR. 2206 */ 2207 CALLB_CPR_INIT(&port->fp_cpr_info, &port->fp_mutex, 2208 callb_generic_cpr, "fp_job_handler"); 2209 #endif 2210 2211 2212 /* Loop forever waiting for work to do */ 2213 for (;;) { 2214 2215 mutex_enter(&port->fp_mutex); 2216 2217 /* 2218 * Sleep if no work to do right now, or if we want 2219 * to suspend or power-down. 2220 */ 2221 while (port->fp_job_head == NULL || 2222 (port->fp_soft_state & (FP_SOFT_POWER_DOWN | 2223 FP_SOFT_SUSPEND))) { 2224 CALLB_CPR_SAFE_BEGIN(&port->fp_cpr_info); 2225 cv_wait(&port->fp_cv, &port->fp_mutex); 2226 CALLB_CPR_SAFE_END(&port->fp_cpr_info, &port->fp_mutex); 2227 } 2228 2229 /* 2230 * OK, we've just been woken up, so retrieve the next entry 2231 * from the head of the job queue for this local port. 2232 */ 2233 job = fctl_deque_job(port); 2234 2235 /* 2236 * Handle all the fp driver's supported job codes here 2237 * in this big honkin' switch. 2238 */ 2239 switch (job->job_code) { 2240 case JOB_PORT_SHUTDOWN: 2241 /* 2242 * fp_port_shutdown() is only called from here. This 2243 * will prepare the local port instance (softstate) 2244 * for detaching. This cancels timeout callbacks, 2245 * executes LOGOs with remote ports, cleans up tables, 2246 * and deallocates data structs. 2247 */ 2248 fp_port_shutdown(port, job); 2249 2250 /* 2251 * This will exit the job thread. 2252 */ 2253 #ifndef __lock_lint 2254 CALLB_CPR_EXIT(&(port->fp_cpr_info)); 2255 #else 2256 mutex_exit(&port->fp_mutex); 2257 #endif 2258 fctl_jobdone(job); 2259 thread_exit(); 2260 2261 /* NOTREACHED */ 2262 2263 case JOB_ATTACH_ULP: { 2264 /* 2265 * This job is spawned in response to a ULP calling 2266 * fc_ulp_add(). 2267 */ 2268 2269 boolean_t do_attach_ulps = B_TRUE; 2270 2271 /* 2272 * If fp is detaching, we don't want to call 2273 * fp_startup_done as this asynchronous 2274 * notification may interfere with the re-attach. 2275 */ 2276 2277 if (port->fp_soft_state & (FP_DETACH_INPROGRESS | 2278 FP_SOFT_IN_DETACH | FP_DETACH_FAILED)) { 2279 do_attach_ulps = B_FALSE; 2280 } else { 2281 /* 2282 * We are going to force the transport 2283 * to attach to the ULPs, so set 2284 * fp_ulp_attach. This will keep any 2285 * potential detach from occurring until 2286 * we are done. 2287 */ 2288 port->fp_ulp_attach = 1; 2289 } 2290 2291 mutex_exit(&port->fp_mutex); 2292 2293 /* 2294 * NOTE: Since we just dropped the mutex, there is now 2295 * a race window where the fp_soft_state check above 2296 * could change here. This race is covered because an 2297 * additional check was added in the functions hidden 2298 * under fp_startup_done(). 2299 */ 2300 if (do_attach_ulps == B_TRUE) { 2301 /* 2302 * This goes thru a bit of a convoluted call 2303 * chain before spawning off a DDI taskq 2304 * request to perform the actual attach 2305 * operations. Blocking can occur at a number 2306 * of points. 2307 */ 2308 fp_startup_done((opaque_t)port, FC_PKT_SUCCESS); 2309 } 2310 job->job_result = FC_SUCCESS; 2311 fctl_jobdone(job); 2312 break; 2313 } 2314 2315 case JOB_ULP_NOTIFY: { 2316 /* 2317 * Pass state change notifications up to any/all 2318 * registered ULPs. 2319 */ 2320 uint32_t statec; 2321 2322 statec = job->job_ulp_listlen; 2323 if (statec == FC_STATE_RESET_REQUESTED) { 2324 port->fp_last_task = port->fp_task; 2325 port->fp_task = FP_TASK_OFFLINE; 2326 fp_port_offline(port, 0); 2327 port->fp_task = port->fp_last_task; 2328 port->fp_last_task = FP_TASK_IDLE; 2329 } 2330 2331 if (--port->fp_statec_busy == 0) { 2332 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2333 } 2334 2335 mutex_exit(&port->fp_mutex); 2336 2337 job->job_result = fp_ulp_notify(port, statec, KM_SLEEP); 2338 fctl_jobdone(job); 2339 break; 2340 } 2341 2342 case JOB_PLOGI_ONE: 2343 /* 2344 * Issue a PLOGI to a single remote port. Multiple 2345 * PLOGIs to different remote ports may occur in 2346 * parallel. 2347 * This can create the fc_remote_port_t if it does not 2348 * already exist. 2349 */ 2350 2351 mutex_exit(&port->fp_mutex); 2352 d_id = (uint32_t *)job->job_private; 2353 pd = fctl_get_remote_port_by_did(port, *d_id); 2354 2355 if (pd) { 2356 mutex_enter(&pd->pd_mutex); 2357 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 2358 pd->pd_login_count++; 2359 mutex_exit(&pd->pd_mutex); 2360 job->job_result = FC_SUCCESS; 2361 fctl_jobdone(job); 2362 break; 2363 } 2364 mutex_exit(&pd->pd_mutex); 2365 } else { 2366 mutex_enter(&port->fp_mutex); 2367 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2368 mutex_exit(&port->fp_mutex); 2369 pd = fp_create_remote_port_by_ns(port, 2370 *d_id, KM_SLEEP); 2371 if (pd == NULL) { 2372 job->job_result = FC_FAILURE; 2373 fctl_jobdone(job); 2374 break; 2375 } 2376 } else { 2377 mutex_exit(&port->fp_mutex); 2378 } 2379 } 2380 2381 job->job_flags |= JOB_TYPE_FP_ASYNC; 2382 job->job_counter = 1; 2383 2384 rval = fp_port_login(port, *d_id, job, 2385 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 2386 2387 if (rval != FC_SUCCESS) { 2388 job->job_result = rval; 2389 fctl_jobdone(job); 2390 } 2391 break; 2392 2393 case JOB_LOGO_ONE: { 2394 /* 2395 * Issue a PLOGO to a single remote port. Multiple 2396 * PLOGOs to different remote ports may occur in 2397 * parallel. 2398 */ 2399 fc_remote_port_t *pd; 2400 2401 #ifndef __lock_lint 2402 ASSERT(job->job_counter > 0); 2403 #endif 2404 2405 pd = (fc_remote_port_t *)job->job_ulp_pkts; 2406 2407 mutex_enter(&pd->pd_mutex); 2408 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 2409 mutex_exit(&pd->pd_mutex); 2410 job->job_result = FC_LOGINREQ; 2411 mutex_exit(&port->fp_mutex); 2412 fctl_jobdone(job); 2413 break; 2414 } 2415 if (pd->pd_login_count > 1) { 2416 pd->pd_login_count--; 2417 mutex_exit(&pd->pd_mutex); 2418 job->job_result = FC_SUCCESS; 2419 mutex_exit(&port->fp_mutex); 2420 fctl_jobdone(job); 2421 break; 2422 } 2423 mutex_exit(&pd->pd_mutex); 2424 mutex_exit(&port->fp_mutex); 2425 job->job_flags |= JOB_TYPE_FP_ASYNC; 2426 (void) fp_logout(port, pd, job); 2427 break; 2428 } 2429 2430 case JOB_FCIO_LOGIN: 2431 /* 2432 * PLOGI initiated at ioctl request. 2433 */ 2434 mutex_exit(&port->fp_mutex); 2435 job->job_result = 2436 fp_fcio_login(port, job->job_private, job); 2437 fctl_jobdone(job); 2438 break; 2439 2440 case JOB_FCIO_LOGOUT: 2441 /* 2442 * PLOGO initiated at ioctl request. 2443 */ 2444 mutex_exit(&port->fp_mutex); 2445 job->job_result = 2446 fp_fcio_logout(port, job->job_private, job); 2447 fctl_jobdone(job); 2448 break; 2449 2450 case JOB_PORT_GETMAP: 2451 case JOB_PORT_GETMAP_PLOGI_ALL: { 2452 port->fp_last_task = port->fp_task; 2453 port->fp_task = FP_TASK_GETMAP; 2454 2455 switch (port->fp_topology) { 2456 case FC_TOP_PRIVATE_LOOP: 2457 job->job_counter = 1; 2458 2459 fp_get_loopmap(port, job); 2460 mutex_exit(&port->fp_mutex); 2461 fp_jobwait(job); 2462 fctl_fillout_map(port, 2463 (fc_portmap_t **)job->job_private, 2464 (uint32_t *)job->job_arg, 1, 0, 0); 2465 fctl_jobdone(job); 2466 mutex_enter(&port->fp_mutex); 2467 break; 2468 2469 case FC_TOP_PUBLIC_LOOP: 2470 case FC_TOP_FABRIC: 2471 mutex_exit(&port->fp_mutex); 2472 job->job_counter = 1; 2473 2474 job->job_result = fp_ns_getmap(port, 2475 job, (fc_portmap_t **)job->job_private, 2476 (uint32_t *)job->job_arg, 2477 FCTL_GAN_START_ID); 2478 fctl_jobdone(job); 2479 mutex_enter(&port->fp_mutex); 2480 break; 2481 2482 case FC_TOP_PT_PT: 2483 mutex_exit(&port->fp_mutex); 2484 fctl_fillout_map(port, 2485 (fc_portmap_t **)job->job_private, 2486 (uint32_t *)job->job_arg, 1, 0, 0); 2487 fctl_jobdone(job); 2488 mutex_enter(&port->fp_mutex); 2489 break; 2490 2491 default: 2492 mutex_exit(&port->fp_mutex); 2493 fctl_jobdone(job); 2494 mutex_enter(&port->fp_mutex); 2495 break; 2496 } 2497 port->fp_task = port->fp_last_task; 2498 port->fp_last_task = FP_TASK_IDLE; 2499 mutex_exit(&port->fp_mutex); 2500 break; 2501 } 2502 2503 case JOB_PORT_OFFLINE: { 2504 fp_log_port_event(port, ESC_SUNFC_PORT_OFFLINE); 2505 2506 port->fp_last_task = port->fp_task; 2507 port->fp_task = FP_TASK_OFFLINE; 2508 2509 if (port->fp_statec_busy > 2) { 2510 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2511 fp_port_offline(port, 0); 2512 if (--port->fp_statec_busy == 0) { 2513 port->fp_soft_state &= 2514 ~FP_SOFT_IN_STATEC_CB; 2515 } 2516 } else { 2517 fp_port_offline(port, 1); 2518 } 2519 2520 port->fp_task = port->fp_last_task; 2521 port->fp_last_task = FP_TASK_IDLE; 2522 2523 mutex_exit(&port->fp_mutex); 2524 2525 fctl_jobdone(job); 2526 break; 2527 } 2528 2529 case JOB_PORT_STARTUP: { 2530 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2531 if (port->fp_statec_busy > 1) { 2532 mutex_exit(&port->fp_mutex); 2533 break; 2534 } 2535 mutex_exit(&port->fp_mutex); 2536 2537 FP_TRACE(FP_NHEAD2(9, rval), 2538 "Topology discovery failed"); 2539 break; 2540 } 2541 2542 /* 2543 * Attempt building device handles in case 2544 * of private Loop. 2545 */ 2546 if (port->fp_topology == FC_TOP_PRIVATE_LOOP) { 2547 job->job_counter = 1; 2548 2549 fp_get_loopmap(port, job); 2550 mutex_exit(&port->fp_mutex); 2551 fp_jobwait(job); 2552 mutex_enter(&port->fp_mutex); 2553 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 2554 ASSERT(port->fp_total_devices == 0); 2555 port->fp_total_devices = 2556 port->fp_dev_count; 2557 } 2558 } else if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2559 /* 2560 * Hack to avoid state changes going up early 2561 */ 2562 port->fp_statec_busy++; 2563 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 2564 2565 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 2566 fp_fabric_online(port, job); 2567 job->job_flags &= ~JOB_CANCEL_ULP_NOTIFICATION; 2568 } 2569 mutex_exit(&port->fp_mutex); 2570 fctl_jobdone(job); 2571 break; 2572 } 2573 2574 case JOB_PORT_ONLINE: { 2575 char *newtop; 2576 char *oldtop; 2577 uint32_t old_top; 2578 2579 fp_log_port_event(port, ESC_SUNFC_PORT_ONLINE); 2580 2581 /* 2582 * Bail out early if there are a lot of 2583 * state changes in the pipeline 2584 */ 2585 if (port->fp_statec_busy > 1) { 2586 --port->fp_statec_busy; 2587 mutex_exit(&port->fp_mutex); 2588 fctl_jobdone(job); 2589 break; 2590 } 2591 2592 switch (old_top = port->fp_topology) { 2593 case FC_TOP_PRIVATE_LOOP: 2594 oldtop = "Private Loop"; 2595 break; 2596 2597 case FC_TOP_PUBLIC_LOOP: 2598 oldtop = "Public Loop"; 2599 break; 2600 2601 case FC_TOP_PT_PT: 2602 oldtop = "Point to Point"; 2603 break; 2604 2605 case FC_TOP_FABRIC: 2606 oldtop = "Fabric"; 2607 break; 2608 2609 default: 2610 oldtop = NULL; 2611 break; 2612 } 2613 2614 port->fp_last_task = port->fp_task; 2615 port->fp_task = FP_TASK_ONLINE; 2616 2617 if ((rval = fp_port_startup(port, job)) != FC_SUCCESS) { 2618 2619 port->fp_task = port->fp_last_task; 2620 port->fp_last_task = FP_TASK_IDLE; 2621 2622 if (port->fp_statec_busy > 1) { 2623 --port->fp_statec_busy; 2624 mutex_exit(&port->fp_mutex); 2625 break; 2626 } 2627 2628 port->fp_state = FC_STATE_OFFLINE; 2629 2630 FP_TRACE(FP_NHEAD2(9, rval), 2631 "Topology discovery failed"); 2632 2633 if (--port->fp_statec_busy == 0) { 2634 port->fp_soft_state &= 2635 ~FP_SOFT_IN_STATEC_CB; 2636 } 2637 2638 if (port->fp_offline_tid == NULL) { 2639 port->fp_offline_tid = 2640 timeout(fp_offline_timeout, 2641 (caddr_t)port, fp_offline_ticks); 2642 } 2643 2644 mutex_exit(&port->fp_mutex); 2645 break; 2646 } 2647 2648 switch (port->fp_topology) { 2649 case FC_TOP_PRIVATE_LOOP: 2650 newtop = "Private Loop"; 2651 break; 2652 2653 case FC_TOP_PUBLIC_LOOP: 2654 newtop = "Public Loop"; 2655 break; 2656 2657 case FC_TOP_PT_PT: 2658 newtop = "Point to Point"; 2659 break; 2660 2661 case FC_TOP_FABRIC: 2662 newtop = "Fabric"; 2663 break; 2664 2665 default: 2666 newtop = NULL; 2667 break; 2668 } 2669 2670 if (oldtop && newtop && strcmp(oldtop, newtop)) { 2671 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2672 "Change in FC Topology old = %s new = %s", 2673 oldtop, newtop); 2674 } 2675 2676 switch (port->fp_topology) { 2677 case FC_TOP_PRIVATE_LOOP: { 2678 int orphan = (old_top == FC_TOP_FABRIC || 2679 old_top == FC_TOP_PUBLIC_LOOP) ? 1 : 0; 2680 2681 mutex_exit(&port->fp_mutex); 2682 fp_loop_online(port, job, orphan); 2683 break; 2684 } 2685 2686 case FC_TOP_PUBLIC_LOOP: 2687 /* FALLTHROUGH */ 2688 case FC_TOP_FABRIC: 2689 fp_fabric_online(port, job); 2690 mutex_exit(&port->fp_mutex); 2691 break; 2692 2693 case FC_TOP_PT_PT: 2694 fp_p2p_online(port, job); 2695 mutex_exit(&port->fp_mutex); 2696 break; 2697 2698 default: 2699 if (--port->fp_statec_busy != 0) { 2700 /* 2701 * Watch curiously at what the next 2702 * state transition can do. 2703 */ 2704 mutex_exit(&port->fp_mutex); 2705 break; 2706 } 2707 2708 FP_TRACE(FP_NHEAD2(9, 0), 2709 "Topology Unknown, Offlining the port.."); 2710 2711 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 2712 port->fp_state = FC_STATE_OFFLINE; 2713 2714 if (port->fp_offline_tid == NULL) { 2715 port->fp_offline_tid = 2716 timeout(fp_offline_timeout, 2717 (caddr_t)port, fp_offline_ticks); 2718 } 2719 mutex_exit(&port->fp_mutex); 2720 break; 2721 } 2722 2723 mutex_enter(&port->fp_mutex); 2724 2725 port->fp_task = port->fp_last_task; 2726 port->fp_last_task = FP_TASK_IDLE; 2727 2728 mutex_exit(&port->fp_mutex); 2729 2730 fctl_jobdone(job); 2731 break; 2732 } 2733 2734 case JOB_PLOGI_GROUP: { 2735 mutex_exit(&port->fp_mutex); 2736 fp_plogi_group(port, job); 2737 break; 2738 } 2739 2740 case JOB_UNSOL_REQUEST: { 2741 mutex_exit(&port->fp_mutex); 2742 fp_handle_unsol_buf(port, 2743 (fc_unsol_buf_t *)job->job_private, job); 2744 fctl_dealloc_job(job); 2745 break; 2746 } 2747 2748 case JOB_NS_CMD: { 2749 fctl_ns_req_t *ns_cmd; 2750 2751 mutex_exit(&port->fp_mutex); 2752 2753 job->job_flags |= JOB_TYPE_FP_ASYNC; 2754 ns_cmd = (fctl_ns_req_t *)job->job_private; 2755 if (ns_cmd->ns_cmd_code < NS_GA_NXT || 2756 ns_cmd->ns_cmd_code > NS_DA_ID) { 2757 job->job_result = FC_BADCMD; 2758 fctl_jobdone(job); 2759 break; 2760 } 2761 2762 if (FC_IS_CMD_A_REG(ns_cmd->ns_cmd_code)) { 2763 if (ns_cmd->ns_pd != NULL) { 2764 job->job_result = FC_BADOBJECT; 2765 fctl_jobdone(job); 2766 break; 2767 } 2768 2769 job->job_counter = 1; 2770 2771 rval = fp_ns_reg(port, ns_cmd->ns_pd, 2772 ns_cmd->ns_cmd_code, job, 0, KM_SLEEP); 2773 2774 if (rval != FC_SUCCESS) { 2775 job->job_result = rval; 2776 fctl_jobdone(job); 2777 } 2778 break; 2779 } 2780 job->job_result = FC_SUCCESS; 2781 job->job_counter = 1; 2782 2783 rval = fp_ns_query(port, ns_cmd, job, 0, KM_SLEEP); 2784 if (rval != FC_SUCCESS) { 2785 fctl_jobdone(job); 2786 } 2787 break; 2788 } 2789 2790 case JOB_LINK_RESET: { 2791 la_wwn_t *pwwn; 2792 uint32_t topology; 2793 2794 pwwn = (la_wwn_t *)job->job_private; 2795 ASSERT(pwwn != NULL); 2796 2797 topology = port->fp_topology; 2798 mutex_exit(&port->fp_mutex); 2799 2800 if (fctl_is_wwn_zero(pwwn) == FC_SUCCESS || 2801 topology == FC_TOP_PRIVATE_LOOP) { 2802 job->job_flags |= JOB_TYPE_FP_ASYNC; 2803 rval = port->fp_fca_tran->fca_reset( 2804 port->fp_fca_handle, FC_FCA_LINK_RESET); 2805 job->job_result = rval; 2806 fp_jobdone(job); 2807 } else { 2808 ASSERT((job->job_flags & 2809 JOB_TYPE_FP_ASYNC) == 0); 2810 2811 if (FC_IS_TOP_SWITCH(topology)) { 2812 rval = fp_remote_lip(port, pwwn, 2813 KM_SLEEP, job); 2814 } else { 2815 rval = FC_FAILURE; 2816 } 2817 if (rval != FC_SUCCESS) { 2818 job->job_result = rval; 2819 } 2820 fctl_jobdone(job); 2821 } 2822 break; 2823 } 2824 2825 default: 2826 mutex_exit(&port->fp_mutex); 2827 job->job_result = FC_BADCMD; 2828 fctl_jobdone(job); 2829 break; 2830 } 2831 } 2832 /* NOTREACHED */ 2833 } 2834 2835 2836 /* 2837 * Perform FC port bring up initialization 2838 */ 2839 static int 2840 fp_port_startup(fc_local_port_t *port, job_request_t *job) 2841 { 2842 int rval; 2843 uint32_t state; 2844 uint32_t src_id; 2845 fc_lilpmap_t *lilp_map; 2846 2847 ASSERT(MUTEX_HELD(&port->fp_mutex)); 2848 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 2849 2850 FP_DTRACE(FP_NHEAD1(2, 0), "Entering fp_port_startup;" 2851 " port=%p, job=%p", port, job); 2852 2853 port->fp_topology = FC_TOP_UNKNOWN; 2854 port->fp_port_id.port_id = 0; 2855 state = FC_PORT_STATE_MASK(port->fp_state); 2856 2857 if (state == FC_STATE_OFFLINE) { 2858 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2859 job->job_result = FC_OFFLINE; 2860 mutex_exit(&port->fp_mutex); 2861 fctl_jobdone(job); 2862 mutex_enter(&port->fp_mutex); 2863 return (FC_OFFLINE); 2864 } 2865 2866 if (state == FC_STATE_LOOP) { 2867 port->fp_port_type.port_type = FC_NS_PORT_NL; 2868 mutex_exit(&port->fp_mutex); 2869 2870 lilp_map = &port->fp_lilp_map; 2871 if ((rval = fp_get_lilpmap(port, lilp_map)) != FC_SUCCESS) { 2872 job->job_result = FC_FAILURE; 2873 fctl_jobdone(job); 2874 2875 FP_TRACE(FP_NHEAD1(9, rval), 2876 "LILP map Invalid or not present"); 2877 mutex_enter(&port->fp_mutex); 2878 return (FC_FAILURE); 2879 } 2880 2881 if (lilp_map->lilp_length == 0) { 2882 job->job_result = FC_NO_MAP; 2883 fctl_jobdone(job); 2884 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 2885 "LILP map length zero"); 2886 mutex_enter(&port->fp_mutex); 2887 return (FC_NO_MAP); 2888 } 2889 src_id = lilp_map->lilp_myalpa & 0xFF; 2890 } else { 2891 fc_remote_port_t *pd; 2892 fc_fca_pm_t pm; 2893 fc_fca_p2p_info_t p2p_info; 2894 int pd_recepient; 2895 2896 /* 2897 * Get P2P remote port info if possible 2898 */ 2899 bzero((caddr_t)&pm, sizeof (pm)); 2900 2901 pm.pm_cmd_flags = FC_FCA_PM_READ; 2902 pm.pm_cmd_code = FC_PORT_GET_P2P_INFO; 2903 pm.pm_data_len = sizeof (fc_fca_p2p_info_t); 2904 pm.pm_data_buf = (caddr_t)&p2p_info; 2905 2906 rval = port->fp_fca_tran->fca_port_manage( 2907 port->fp_fca_handle, &pm); 2908 2909 if (rval == FC_SUCCESS) { 2910 port->fp_port_id.port_id = p2p_info.fca_d_id; 2911 port->fp_port_type.port_type = FC_NS_PORT_N; 2912 port->fp_topology = FC_TOP_PT_PT; 2913 port->fp_total_devices = 1; 2914 pd_recepient = fctl_wwn_cmp( 2915 &port->fp_service_params.nport_ww_name, 2916 &p2p_info.pwwn) < 0 ? 2917 PD_PLOGI_RECEPIENT : PD_PLOGI_INITIATOR; 2918 mutex_exit(&port->fp_mutex); 2919 pd = fctl_create_remote_port(port, 2920 &p2p_info.nwwn, 2921 &p2p_info.pwwn, 2922 p2p_info.d_id, 2923 pd_recepient, KM_NOSLEEP); 2924 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup;" 2925 " P2P port=%p pd=%p", port, pd); 2926 mutex_enter(&port->fp_mutex); 2927 return (FC_SUCCESS); 2928 } 2929 port->fp_port_type.port_type = FC_NS_PORT_N; 2930 mutex_exit(&port->fp_mutex); 2931 src_id = 0; 2932 } 2933 2934 job->job_counter = 1; 2935 job->job_result = FC_SUCCESS; 2936 2937 if ((rval = fp_fabric_login(port, src_id, job, FP_CMD_PLOGI_DONT_CARE, 2938 KM_SLEEP)) != FC_SUCCESS) { 2939 port->fp_port_type.port_type = FC_NS_PORT_UNKNOWN; 2940 job->job_result = FC_FAILURE; 2941 fctl_jobdone(job); 2942 2943 mutex_enter(&port->fp_mutex); 2944 if (port->fp_statec_busy <= 1) { 2945 mutex_exit(&port->fp_mutex); 2946 fp_printf(port, CE_NOTE, FP_LOG_ONLY, rval, NULL, 2947 "Couldn't transport FLOGI"); 2948 mutex_enter(&port->fp_mutex); 2949 } 2950 return (FC_FAILURE); 2951 } 2952 2953 fp_jobwait(job); 2954 2955 mutex_enter(&port->fp_mutex); 2956 if (job->job_result == FC_SUCCESS) { 2957 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 2958 mutex_exit(&port->fp_mutex); 2959 fp_ns_init(port, job, KM_SLEEP); 2960 mutex_enter(&port->fp_mutex); 2961 } 2962 } else { 2963 if (state == FC_STATE_LOOP) { 2964 port->fp_topology = FC_TOP_PRIVATE_LOOP; 2965 port->fp_port_id.port_id = 2966 port->fp_lilp_map.lilp_myalpa & 0xFF; 2967 } 2968 } 2969 2970 FP_DTRACE(FP_NHEAD1(2, 0), "Exiting fp_port_startup; port=%p, job=%p", 2971 port, job); 2972 2973 return (FC_SUCCESS); 2974 } 2975 2976 2977 /* 2978 * Perform ULP invocations following FC port startup 2979 */ 2980 /* ARGSUSED */ 2981 static void 2982 fp_startup_done(opaque_t arg, uchar_t result) 2983 { 2984 fc_local_port_t *port = arg; 2985 2986 fp_attach_ulps(port, FC_CMD_ATTACH); 2987 2988 FP_DTRACE(FP_NHEAD1(2, 0), "fp_startup almost complete; port=%p", port); 2989 } 2990 2991 2992 /* 2993 * Perform ULP port attach 2994 */ 2995 static void 2996 fp_ulp_port_attach(void *arg) 2997 { 2998 fp_soft_attach_t *att = (fp_soft_attach_t *)arg; 2999 fc_local_port_t *port = att->att_port; 3000 3001 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3002 " ULPs begin; port=%p, cmd=%x", port, att->att_cmd); 3003 3004 fctl_attach_ulps(att->att_port, att->att_cmd, &modlinkage); 3005 3006 if (att->att_need_pm_idle == B_TRUE) { 3007 fctl_idle_port(port); 3008 } 3009 3010 FP_DTRACE(FP_NHEAD1(1, 0), "port attach of" 3011 " ULPs end; port=%p, cmd=%x", port, att->att_cmd); 3012 3013 mutex_enter(&att->att_port->fp_mutex); 3014 att->att_port->fp_ulp_attach = 0; 3015 3016 port->fp_task = port->fp_last_task; 3017 port->fp_last_task = FP_TASK_IDLE; 3018 3019 cv_signal(&att->att_port->fp_attach_cv); 3020 3021 mutex_exit(&att->att_port->fp_mutex); 3022 3023 kmem_free(att, sizeof (fp_soft_attach_t)); 3024 } 3025 3026 /* 3027 * Entry point to funnel all requests down to FCAs 3028 */ 3029 static int 3030 fp_sendcmd(fc_local_port_t *port, fp_cmd_t *cmd, opaque_t fca_handle) 3031 { 3032 int rval; 3033 3034 mutex_enter(&port->fp_mutex); 3035 if (port->fp_statec_busy > 1 || (cmd->cmd_ulp_pkt != NULL && 3036 (port->fp_statec_busy || FC_PORT_STATE_MASK(port->fp_state) == 3037 FC_STATE_OFFLINE))) { 3038 /* 3039 * This means there is more than one state change 3040 * at this point of time - Since they are processed 3041 * serially, any processing of the current one should 3042 * be failed, failed and move up in processing the next 3043 */ 3044 cmd->cmd_pkt.pkt_state = FC_PKT_ELS_IN_PROGRESS; 3045 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3046 if (cmd->cmd_job) { 3047 /* 3048 * A state change that is going to be invalidated 3049 * by another one already in the port driver's queue 3050 * need not go up to all ULPs. This will minimize 3051 * needless processing and ripples in ULP modules 3052 */ 3053 cmd->cmd_job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3054 } 3055 mutex_exit(&port->fp_mutex); 3056 return (FC_STATEC_BUSY); 3057 } 3058 3059 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3060 cmd->cmd_pkt.pkt_state = FC_PKT_PORT_OFFLINE; 3061 cmd->cmd_pkt.pkt_reason = FC_REASON_OFFLINE; 3062 mutex_exit(&port->fp_mutex); 3063 3064 return (FC_OFFLINE); 3065 } 3066 mutex_exit(&port->fp_mutex); 3067 3068 rval = cmd->cmd_transport(fca_handle, &cmd->cmd_pkt); 3069 if (rval != FC_SUCCESS) { 3070 if (rval == FC_TRAN_BUSY) { 3071 cmd->cmd_retry_interval = fp_retry_delay; 3072 rval = fp_retry_cmd(&cmd->cmd_pkt); 3073 if (rval == FC_FAILURE) { 3074 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_BSY; 3075 } 3076 } 3077 } 3078 3079 return (rval); 3080 } 3081 3082 3083 /* 3084 * Each time a timeout kicks in, walk the wait queue, decrement the 3085 * the retry_interval, when the retry_interval becomes less than 3086 * or equal to zero, re-transport the command: If the re-transport 3087 * fails with BUSY, enqueue the command in the wait queue. 3088 * 3089 * In order to prevent looping forever because of commands enqueued 3090 * from within this function itself, save the current tail pointer 3091 * (in cur_tail) and exit the loop after serving this command. 3092 */ 3093 static void 3094 fp_resendcmd(void *port_handle) 3095 { 3096 int rval; 3097 fc_local_port_t *port; 3098 fp_cmd_t *cmd; 3099 fp_cmd_t *cur_tail; 3100 3101 port = port_handle; 3102 mutex_enter(&port->fp_mutex); 3103 cur_tail = port->fp_wait_tail; 3104 mutex_exit(&port->fp_mutex); 3105 3106 while ((cmd = fp_deque_cmd(port)) != NULL) { 3107 cmd->cmd_retry_interval -= fp_retry_ticker; 3108 /* Check if we are detaching */ 3109 if (port->fp_soft_state & 3110 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS)) { 3111 cmd->cmd_pkt.pkt_state = FC_PKT_TRAN_ERROR; 3112 cmd->cmd_pkt.pkt_reason = 0; 3113 fp_iodone(cmd); 3114 } else if (cmd->cmd_retry_interval <= 0) { 3115 rval = cmd->cmd_transport(port->fp_fca_handle, 3116 &cmd->cmd_pkt); 3117 3118 if (rval != FC_SUCCESS) { 3119 if (cmd->cmd_pkt.pkt_state == FC_PKT_TRAN_BSY) { 3120 if (--cmd->cmd_retry_count) { 3121 fp_enque_cmd(port, cmd); 3122 if (cmd == cur_tail) { 3123 break; 3124 } 3125 continue; 3126 } 3127 cmd->cmd_pkt.pkt_state = 3128 FC_PKT_TRAN_BSY; 3129 } else { 3130 cmd->cmd_pkt.pkt_state = 3131 FC_PKT_TRAN_ERROR; 3132 } 3133 cmd->cmd_pkt.pkt_reason = 0; 3134 fp_iodone(cmd); 3135 } 3136 } else { 3137 fp_enque_cmd(port, cmd); 3138 } 3139 3140 if (cmd == cur_tail) { 3141 break; 3142 } 3143 } 3144 3145 mutex_enter(&port->fp_mutex); 3146 if (port->fp_wait_head) { 3147 timeout_id_t tid; 3148 3149 mutex_exit(&port->fp_mutex); 3150 tid = timeout(fp_resendcmd, (caddr_t)port, 3151 fp_retry_ticks); 3152 mutex_enter(&port->fp_mutex); 3153 port->fp_wait_tid = tid; 3154 } else { 3155 port->fp_wait_tid = NULL; 3156 } 3157 mutex_exit(&port->fp_mutex); 3158 } 3159 3160 3161 /* 3162 * Handle Local, Fabric, N_Port, Transport (whatever that means) BUSY here. 3163 * 3164 * Yes, as you can see below, cmd_retry_count is used here too. That means 3165 * the retries for BUSY are less if there were transport failures (transport 3166 * failure means fca_transport failure). The goal is not to exceed overall 3167 * retries set in the cmd_retry_count (whatever may be the reason for retry) 3168 * 3169 * Return Values: 3170 * FC_SUCCESS 3171 * FC_FAILURE 3172 */ 3173 static int 3174 fp_retry_cmd(fc_packet_t *pkt) 3175 { 3176 fp_cmd_t *cmd; 3177 3178 cmd = pkt->pkt_ulp_private; 3179 3180 if (--cmd->cmd_retry_count) { 3181 fp_enque_cmd(cmd->cmd_port, cmd); 3182 return (FC_SUCCESS); 3183 } else { 3184 return (FC_FAILURE); 3185 } 3186 } 3187 3188 3189 /* 3190 * Queue up FC packet for deferred retry 3191 */ 3192 static void 3193 fp_enque_cmd(fc_local_port_t *port, fp_cmd_t *cmd) 3194 { 3195 timeout_id_t tid; 3196 3197 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3198 3199 #ifdef DEBUG 3200 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, &cmd->cmd_pkt, 3201 "Retrying ELS for %x", cmd->cmd_pkt.pkt_cmd_fhdr.d_id); 3202 #endif 3203 3204 mutex_enter(&port->fp_mutex); 3205 if (port->fp_wait_tail) { 3206 port->fp_wait_tail->cmd_next = cmd; 3207 port->fp_wait_tail = cmd; 3208 } else { 3209 ASSERT(port->fp_wait_head == NULL); 3210 port->fp_wait_head = port->fp_wait_tail = cmd; 3211 if (port->fp_wait_tid == NULL) { 3212 mutex_exit(&port->fp_mutex); 3213 tid = timeout(fp_resendcmd, (caddr_t)port, 3214 fp_retry_ticks); 3215 mutex_enter(&port->fp_mutex); 3216 port->fp_wait_tid = tid; 3217 } 3218 } 3219 mutex_exit(&port->fp_mutex); 3220 } 3221 3222 3223 /* 3224 * Handle all RJT codes 3225 */ 3226 static int 3227 fp_handle_reject(fc_packet_t *pkt) 3228 { 3229 int rval = FC_FAILURE; 3230 uchar_t next_class; 3231 fp_cmd_t *cmd; 3232 fc_local_port_t *port; 3233 3234 cmd = pkt->pkt_ulp_private; 3235 port = cmd->cmd_port; 3236 3237 switch (pkt->pkt_state) { 3238 case FC_PKT_FABRIC_RJT: 3239 case FC_PKT_NPORT_RJT: 3240 if (pkt->pkt_reason == FC_REASON_CLASS_NOT_SUPP) { 3241 next_class = fp_get_nextclass(cmd->cmd_port, 3242 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 3243 3244 if (next_class == FC_TRAN_CLASS_INVALID) { 3245 return (rval); 3246 } 3247 pkt->pkt_tran_flags = FC_TRAN_INTR | next_class; 3248 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 3249 3250 rval = fp_sendcmd(cmd->cmd_port, cmd, 3251 cmd->cmd_port->fp_fca_handle); 3252 3253 if (rval != FC_SUCCESS) { 3254 pkt->pkt_state = FC_PKT_TRAN_ERROR; 3255 } 3256 } 3257 break; 3258 3259 case FC_PKT_LS_RJT: 3260 case FC_PKT_BA_RJT: 3261 if ((pkt->pkt_reason == FC_REASON_LOGICAL_ERROR) || 3262 (pkt->pkt_reason == FC_REASON_LOGICAL_BSY)) { 3263 cmd->cmd_retry_interval = fp_retry_delay; 3264 rval = fp_retry_cmd(pkt); 3265 } 3266 break; 3267 3268 case FC_PKT_FS_RJT: 3269 if (pkt->pkt_reason == FC_REASON_FS_LOGICAL_BUSY) { 3270 cmd->cmd_retry_interval = fp_retry_delay; 3271 rval = fp_retry_cmd(pkt); 3272 } 3273 break; 3274 3275 case FC_PKT_LOCAL_RJT: 3276 if (pkt->pkt_reason == FC_REASON_QFULL) { 3277 cmd->cmd_retry_interval = fp_retry_delay; 3278 rval = fp_retry_cmd(pkt); 3279 } 3280 break; 3281 3282 default: 3283 FP_TRACE(FP_NHEAD1(1, 0), 3284 "fp_handle_reject(): Invalid pkt_state"); 3285 break; 3286 } 3287 3288 return (rval); 3289 } 3290 3291 3292 /* 3293 * Return the next class of service supported by the FCA 3294 */ 3295 static uchar_t 3296 fp_get_nextclass(fc_local_port_t *port, uchar_t cur_class) 3297 { 3298 uchar_t next_class; 3299 3300 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3301 3302 switch (cur_class) { 3303 case FC_TRAN_CLASS_INVALID: 3304 if (port->fp_cos & FC_NS_CLASS1) { 3305 next_class = FC_TRAN_CLASS1; 3306 break; 3307 } 3308 /* FALLTHROUGH */ 3309 3310 case FC_TRAN_CLASS1: 3311 if (port->fp_cos & FC_NS_CLASS2) { 3312 next_class = FC_TRAN_CLASS2; 3313 break; 3314 } 3315 /* FALLTHROUGH */ 3316 3317 case FC_TRAN_CLASS2: 3318 if (port->fp_cos & FC_NS_CLASS3) { 3319 next_class = FC_TRAN_CLASS3; 3320 break; 3321 } 3322 /* FALLTHROUGH */ 3323 3324 case FC_TRAN_CLASS3: 3325 default: 3326 next_class = FC_TRAN_CLASS_INVALID; 3327 break; 3328 } 3329 3330 return (next_class); 3331 } 3332 3333 3334 /* 3335 * Determine if a class of service is supported by the FCA 3336 */ 3337 static int 3338 fp_is_class_supported(uint32_t cos, uchar_t tran_class) 3339 { 3340 int rval; 3341 3342 switch (tran_class) { 3343 case FC_TRAN_CLASS1: 3344 if (cos & FC_NS_CLASS1) { 3345 rval = FC_SUCCESS; 3346 } else { 3347 rval = FC_FAILURE; 3348 } 3349 break; 3350 3351 case FC_TRAN_CLASS2: 3352 if (cos & FC_NS_CLASS2) { 3353 rval = FC_SUCCESS; 3354 } else { 3355 rval = FC_FAILURE; 3356 } 3357 break; 3358 3359 case FC_TRAN_CLASS3: 3360 if (cos & FC_NS_CLASS3) { 3361 rval = FC_SUCCESS; 3362 } else { 3363 rval = FC_FAILURE; 3364 } 3365 break; 3366 3367 default: 3368 rval = FC_FAILURE; 3369 break; 3370 } 3371 3372 return (rval); 3373 } 3374 3375 3376 /* 3377 * Dequeue FC packet for retry 3378 */ 3379 static fp_cmd_t * 3380 fp_deque_cmd(fc_local_port_t *port) 3381 { 3382 fp_cmd_t *cmd; 3383 3384 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3385 3386 mutex_enter(&port->fp_mutex); 3387 3388 if (port->fp_wait_head == NULL) { 3389 /* 3390 * To avoid races, NULL the fp_wait_tid as 3391 * we are about to exit the timeout thread. 3392 */ 3393 port->fp_wait_tid = NULL; 3394 mutex_exit(&port->fp_mutex); 3395 return (NULL); 3396 } 3397 3398 cmd = port->fp_wait_head; 3399 port->fp_wait_head = cmd->cmd_next; 3400 cmd->cmd_next = NULL; 3401 3402 if (port->fp_wait_head == NULL) { 3403 port->fp_wait_tail = NULL; 3404 } 3405 mutex_exit(&port->fp_mutex); 3406 3407 return (cmd); 3408 } 3409 3410 3411 /* 3412 * Wait for job completion 3413 */ 3414 static void 3415 fp_jobwait(job_request_t *job) 3416 { 3417 sema_p(&job->job_port_sema); 3418 } 3419 3420 3421 /* 3422 * Convert FC packet state to FC errno 3423 */ 3424 int 3425 fp_state_to_rval(uchar_t state) 3426 { 3427 int count; 3428 3429 for (count = 0; count < sizeof (fp_xlat) / 3430 sizeof (fp_xlat[0]); count++) { 3431 if (fp_xlat[count].xlat_state == state) { 3432 return (fp_xlat[count].xlat_rval); 3433 } 3434 } 3435 3436 return (FC_FAILURE); 3437 } 3438 3439 3440 /* 3441 * For Synchronous I/O requests, the caller is 3442 * expected to do fctl_jobdone(if necessary) 3443 * 3444 * We want to preserve at least one failure in the 3445 * job_result if it happens. 3446 * 3447 */ 3448 static void 3449 fp_iodone(fp_cmd_t *cmd) 3450 { 3451 fc_packet_t *ulp_pkt = cmd->cmd_ulp_pkt; 3452 job_request_t *job = cmd->cmd_job; 3453 fc_remote_port_t *pd = cmd->cmd_pkt.pkt_pd; 3454 3455 ASSERT(job != NULL); 3456 ASSERT(cmd->cmd_port != NULL); 3457 ASSERT(&cmd->cmd_pkt != NULL); 3458 3459 mutex_enter(&job->job_mutex); 3460 if (job->job_result == FC_SUCCESS) { 3461 job->job_result = fp_state_to_rval(cmd->cmd_pkt.pkt_state); 3462 } 3463 mutex_exit(&job->job_mutex); 3464 3465 if (pd) { 3466 mutex_enter(&pd->pd_mutex); 3467 pd->pd_flags = PD_IDLE; 3468 mutex_exit(&pd->pd_mutex); 3469 } 3470 3471 if (ulp_pkt) { 3472 if (pd && cmd->cmd_flags & FP_CMD_DELDEV_ON_ERROR && 3473 FP_IS_PKT_ERROR(ulp_pkt)) { 3474 fc_local_port_t *port; 3475 fc_remote_node_t *node; 3476 3477 port = cmd->cmd_port; 3478 3479 mutex_enter(&pd->pd_mutex); 3480 pd->pd_state = PORT_DEVICE_INVALID; 3481 pd->pd_ref_count--; 3482 node = pd->pd_remote_nodep; 3483 mutex_exit(&pd->pd_mutex); 3484 3485 ASSERT(node != NULL); 3486 ASSERT(port != NULL); 3487 3488 if (fctl_destroy_remote_port(port, pd) == 0) { 3489 fctl_destroy_remote_node(node); 3490 } 3491 3492 ulp_pkt->pkt_pd = NULL; 3493 } 3494 3495 ulp_pkt->pkt_comp(ulp_pkt); 3496 } 3497 3498 fp_free_pkt(cmd); 3499 fp_jobdone(job); 3500 } 3501 3502 3503 /* 3504 * Job completion handler 3505 */ 3506 static void 3507 fp_jobdone(job_request_t *job) 3508 { 3509 mutex_enter(&job->job_mutex); 3510 ASSERT(job->job_counter > 0); 3511 3512 if (--job->job_counter != 0) { 3513 mutex_exit(&job->job_mutex); 3514 return; 3515 } 3516 3517 if (job->job_ulp_pkts) { 3518 ASSERT(job->job_ulp_listlen > 0); 3519 kmem_free(job->job_ulp_pkts, 3520 sizeof (fc_packet_t *) * job->job_ulp_listlen); 3521 } 3522 3523 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3524 mutex_exit(&job->job_mutex); 3525 fctl_jobdone(job); 3526 } else { 3527 mutex_exit(&job->job_mutex); 3528 sema_v(&job->job_port_sema); 3529 } 3530 } 3531 3532 3533 /* 3534 * Try to perform shutdown of a port during a detach. No return 3535 * value since the detach should not fail because the port shutdown 3536 * failed. 3537 */ 3538 static void 3539 fp_port_shutdown(fc_local_port_t *port, job_request_t *job) 3540 { 3541 int index; 3542 int count; 3543 int flags; 3544 fp_cmd_t *cmd; 3545 struct pwwn_hash *head; 3546 fc_remote_port_t *pd; 3547 3548 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3549 3550 job->job_result = FC_SUCCESS; 3551 3552 if (port->fp_taskq) { 3553 /* 3554 * We must release the mutex here to ensure that other 3555 * potential jobs can complete their processing. Many 3556 * also need this mutex. 3557 */ 3558 mutex_exit(&port->fp_mutex); 3559 taskq_wait(port->fp_taskq); 3560 mutex_enter(&port->fp_mutex); 3561 } 3562 3563 if (port->fp_offline_tid) { 3564 timeout_id_t tid; 3565 3566 tid = port->fp_offline_tid; 3567 port->fp_offline_tid = NULL; 3568 mutex_exit(&port->fp_mutex); 3569 (void) untimeout(tid); 3570 mutex_enter(&port->fp_mutex); 3571 } 3572 3573 if (port->fp_wait_tid) { 3574 timeout_id_t tid; 3575 3576 tid = port->fp_wait_tid; 3577 port->fp_wait_tid = NULL; 3578 mutex_exit(&port->fp_mutex); 3579 (void) untimeout(tid); 3580 } else { 3581 mutex_exit(&port->fp_mutex); 3582 } 3583 3584 /* 3585 * While we cancel the timeout, let's also return the 3586 * the outstanding requests back to the callers. 3587 */ 3588 while ((cmd = fp_deque_cmd(port)) != NULL) { 3589 ASSERT(cmd->cmd_job != NULL); 3590 cmd->cmd_job->job_result = FC_OFFLINE; 3591 fp_iodone(cmd); 3592 } 3593 3594 /* 3595 * Gracefully LOGO with all the devices logged in. 3596 */ 3597 mutex_enter(&port->fp_mutex); 3598 3599 for (count = index = 0; index < pwwn_table_size; index++) { 3600 head = &port->fp_pwwn_table[index]; 3601 pd = head->pwwn_head; 3602 while (pd != NULL) { 3603 mutex_enter(&pd->pd_mutex); 3604 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3605 count++; 3606 } 3607 mutex_exit(&pd->pd_mutex); 3608 pd = pd->pd_wwn_hnext; 3609 } 3610 } 3611 3612 if (job->job_flags & JOB_TYPE_FP_ASYNC) { 3613 flags = job->job_flags; 3614 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 3615 } else { 3616 flags = 0; 3617 } 3618 if (count) { 3619 job->job_counter = count; 3620 3621 for (index = 0; index < pwwn_table_size; index++) { 3622 head = &port->fp_pwwn_table[index]; 3623 pd = head->pwwn_head; 3624 while (pd != NULL) { 3625 mutex_enter(&pd->pd_mutex); 3626 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3627 ASSERT(pd->pd_login_count > 0); 3628 /* 3629 * Force the counter to ONE in order 3630 * for us to really send LOGO els. 3631 */ 3632 pd->pd_login_count = 1; 3633 mutex_exit(&pd->pd_mutex); 3634 mutex_exit(&port->fp_mutex); 3635 (void) fp_logout(port, pd, job); 3636 mutex_enter(&port->fp_mutex); 3637 } else { 3638 mutex_exit(&pd->pd_mutex); 3639 } 3640 pd = pd->pd_wwn_hnext; 3641 } 3642 } 3643 mutex_exit(&port->fp_mutex); 3644 fp_jobwait(job); 3645 } else { 3646 mutex_exit(&port->fp_mutex); 3647 } 3648 3649 if (job->job_result != FC_SUCCESS) { 3650 FP_TRACE(FP_NHEAD1(9, 0), 3651 "Can't logout all devices. Proceeding with" 3652 " port shutdown"); 3653 job->job_result = FC_SUCCESS; 3654 } 3655 3656 fctl_destroy_all_remote_ports(port); 3657 3658 mutex_enter(&port->fp_mutex); 3659 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 3660 mutex_exit(&port->fp_mutex); 3661 fp_ns_fini(port, job); 3662 } else { 3663 mutex_exit(&port->fp_mutex); 3664 } 3665 3666 if (flags) { 3667 job->job_flags = flags; 3668 } 3669 3670 mutex_enter(&port->fp_mutex); 3671 3672 } 3673 3674 3675 /* 3676 * Build the port driver's data structures based on the AL_PA list 3677 */ 3678 static void 3679 fp_get_loopmap(fc_local_port_t *port, job_request_t *job) 3680 { 3681 int rval; 3682 int flag; 3683 int count; 3684 uint32_t d_id; 3685 fc_remote_port_t *pd; 3686 fc_lilpmap_t *lilp_map; 3687 3688 ASSERT(MUTEX_HELD(&port->fp_mutex)); 3689 3690 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 3691 job->job_result = FC_OFFLINE; 3692 mutex_exit(&port->fp_mutex); 3693 fp_jobdone(job); 3694 mutex_enter(&port->fp_mutex); 3695 return; 3696 } 3697 3698 if (port->fp_lilp_map.lilp_length == 0) { 3699 mutex_exit(&port->fp_mutex); 3700 job->job_result = FC_NO_MAP; 3701 fp_jobdone(job); 3702 mutex_enter(&port->fp_mutex); 3703 return; 3704 } 3705 mutex_exit(&port->fp_mutex); 3706 3707 lilp_map = &port->fp_lilp_map; 3708 job->job_counter = lilp_map->lilp_length; 3709 3710 if (job->job_code == JOB_PORT_GETMAP_PLOGI_ALL) { 3711 flag = FP_CMD_PLOGI_RETAIN; 3712 } else { 3713 flag = FP_CMD_PLOGI_DONT_CARE; 3714 } 3715 3716 for (count = 0; count < lilp_map->lilp_length; count++) { 3717 d_id = lilp_map->lilp_alpalist[count]; 3718 3719 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3720 fp_jobdone(job); 3721 continue; 3722 } 3723 3724 pd = fctl_get_remote_port_by_did(port, d_id); 3725 if (pd) { 3726 mutex_enter(&pd->pd_mutex); 3727 if (flag == FP_CMD_PLOGI_DONT_CARE || 3728 pd->pd_state == PORT_DEVICE_LOGGED_IN) { 3729 mutex_exit(&pd->pd_mutex); 3730 fp_jobdone(job); 3731 continue; 3732 } 3733 mutex_exit(&pd->pd_mutex); 3734 } 3735 3736 rval = fp_port_login(port, d_id, job, flag, 3737 KM_SLEEP, pd, NULL); 3738 if (rval != FC_SUCCESS) { 3739 fp_jobdone(job); 3740 } 3741 } 3742 3743 mutex_enter(&port->fp_mutex); 3744 } 3745 3746 3747 /* 3748 * Perform loop ONLINE processing 3749 */ 3750 static void 3751 fp_loop_online(fc_local_port_t *port, job_request_t *job, int orphan) 3752 { 3753 int count; 3754 int rval; 3755 uint32_t d_id; 3756 uint32_t listlen; 3757 fc_lilpmap_t *lilp_map; 3758 fc_remote_port_t *pd; 3759 fc_portmap_t *changelist; 3760 3761 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3762 3763 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online begin; port=%p, job=%p", 3764 port, job); 3765 3766 lilp_map = &port->fp_lilp_map; 3767 3768 if (lilp_map->lilp_length) { 3769 mutex_enter(&port->fp_mutex); 3770 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 3771 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 3772 mutex_exit(&port->fp_mutex); 3773 delay(drv_usectohz(PLDA_RR_TOV * 1000 * 1000)); 3774 } else { 3775 mutex_exit(&port->fp_mutex); 3776 } 3777 3778 job->job_counter = lilp_map->lilp_length; 3779 3780 for (count = 0; count < lilp_map->lilp_length; count++) { 3781 d_id = lilp_map->lilp_alpalist[count]; 3782 3783 if (d_id == (lilp_map->lilp_myalpa & 0xFF)) { 3784 fp_jobdone(job); 3785 continue; 3786 } 3787 3788 pd = fctl_get_remote_port_by_did(port, d_id); 3789 if (pd != NULL) { 3790 #ifdef DEBUG 3791 mutex_enter(&pd->pd_mutex); 3792 if (pd->pd_recepient == PD_PLOGI_INITIATOR) { 3793 ASSERT(pd->pd_type != PORT_DEVICE_OLD); 3794 } 3795 mutex_exit(&pd->pd_mutex); 3796 #endif 3797 fp_jobdone(job); 3798 continue; 3799 } 3800 3801 rval = fp_port_login(port, d_id, job, 3802 FP_CMD_PLOGI_DONT_CARE, KM_SLEEP, pd, NULL); 3803 3804 if (rval != FC_SUCCESS) { 3805 fp_jobdone(job); 3806 } 3807 } 3808 fp_jobwait(job); 3809 } 3810 listlen = 0; 3811 changelist = NULL; 3812 3813 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3814 mutex_enter(&port->fp_mutex); 3815 ASSERT(port->fp_statec_busy > 0); 3816 if (port->fp_statec_busy == 1) { 3817 mutex_exit(&port->fp_mutex); 3818 fctl_fillout_map(port, &changelist, &listlen, 3819 1, 0, orphan); 3820 3821 mutex_enter(&port->fp_mutex); 3822 if (port->fp_lilp_map.lilp_magic < MAGIC_LIRP) { 3823 ASSERT(port->fp_total_devices == 0); 3824 port->fp_total_devices = port->fp_dev_count; 3825 } 3826 } else { 3827 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 3828 } 3829 mutex_exit(&port->fp_mutex); 3830 } 3831 3832 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 3833 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 3834 listlen, listlen, KM_SLEEP); 3835 } else { 3836 mutex_enter(&port->fp_mutex); 3837 if (--port->fp_statec_busy == 0) { 3838 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 3839 } 3840 ASSERT(changelist == NULL && listlen == 0); 3841 mutex_exit(&port->fp_mutex); 3842 } 3843 3844 FP_TRACE(FP_NHEAD1(1, 0), "fp_loop_online end; port=%p, job=%p", 3845 port, job); 3846 } 3847 3848 3849 /* 3850 * Get an Arbitrated Loop map from the underlying FCA 3851 */ 3852 static int 3853 fp_get_lilpmap(fc_local_port_t *port, fc_lilpmap_t *lilp_map) 3854 { 3855 int rval; 3856 3857 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap Begin; port=%p, map=%p", 3858 port, lilp_map); 3859 3860 bzero((caddr_t)lilp_map, sizeof (fc_lilpmap_t)); 3861 rval = port->fp_fca_tran->fca_getmap(port->fp_fca_handle, lilp_map); 3862 lilp_map->lilp_magic &= 0xFF; /* Ignore upper byte */ 3863 3864 if (rval != FC_SUCCESS) { 3865 rval = FC_NO_MAP; 3866 } else if (lilp_map->lilp_length == 0 && 3867 (lilp_map->lilp_magic >= MAGIC_LISM && 3868 lilp_map->lilp_magic < MAGIC_LIRP)) { 3869 uchar_t lilp_length; 3870 3871 /* 3872 * Since the map length is zero, provide all 3873 * the valid AL_PAs for NL_ports discovery. 3874 */ 3875 lilp_length = sizeof (fp_valid_alpas) / 3876 sizeof (fp_valid_alpas[0]); 3877 lilp_map->lilp_length = lilp_length; 3878 bcopy(fp_valid_alpas, lilp_map->lilp_alpalist, 3879 lilp_length); 3880 } else { 3881 rval = fp_validate_lilp_map(lilp_map); 3882 3883 if (rval == FC_SUCCESS) { 3884 mutex_enter(&port->fp_mutex); 3885 port->fp_total_devices = lilp_map->lilp_length - 1; 3886 mutex_exit(&port->fp_mutex); 3887 } 3888 } 3889 3890 mutex_enter(&port->fp_mutex); 3891 if (rval != FC_SUCCESS && !(port->fp_soft_state & FP_SOFT_BAD_LINK)) { 3892 port->fp_soft_state |= FP_SOFT_BAD_LINK; 3893 mutex_exit(&port->fp_mutex); 3894 3895 if (port->fp_fca_tran->fca_reset(port->fp_fca_handle, 3896 FC_FCA_RESET_CORE) != FC_SUCCESS) { 3897 FP_TRACE(FP_NHEAD1(9, 0), 3898 "FCA reset failed after LILP map was found" 3899 " to be invalid"); 3900 } 3901 } else if (rval == FC_SUCCESS) { 3902 port->fp_soft_state &= ~FP_SOFT_BAD_LINK; 3903 mutex_exit(&port->fp_mutex); 3904 } else { 3905 mutex_exit(&port->fp_mutex); 3906 } 3907 3908 FP_TRACE(FP_NHEAD1(1, 0), "fp_get_lilpmap End; port=%p, map=%p", port, 3909 lilp_map); 3910 3911 return (rval); 3912 } 3913 3914 3915 /* 3916 * Perform Fabric Login: 3917 * 3918 * Return Values: 3919 * FC_SUCCESS 3920 * FC_FAILURE 3921 * FC_NOMEM 3922 * FC_TRANSPORT_ERROR 3923 * and a lot others defined in fc_error.h 3924 */ 3925 static int 3926 fp_fabric_login(fc_local_port_t *port, uint32_t s_id, job_request_t *job, 3927 int flag, int sleep) 3928 { 3929 int rval; 3930 fp_cmd_t *cmd; 3931 uchar_t class; 3932 3933 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 3934 3935 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login Begin; port=%p, job=%p", 3936 port, job); 3937 3938 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3939 if (class == FC_TRAN_CLASS_INVALID) { 3940 return (FC_ELS_BAD); 3941 } 3942 3943 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 3944 sizeof (la_els_logi_t), sleep, NULL); 3945 if (cmd == NULL) { 3946 return (FC_NOMEM); 3947 } 3948 3949 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 3950 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 3951 cmd->cmd_flags = flag; 3952 cmd->cmd_retry_count = fp_retry_count; 3953 cmd->cmd_ulp_pkt = NULL; 3954 3955 fp_xlogi_init(port, cmd, s_id, 0xFFFFFE, fp_flogi_intr, 3956 job, LA_ELS_FLOGI); 3957 3958 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 3959 if (rval != FC_SUCCESS) { 3960 fp_free_pkt(cmd); 3961 } 3962 3963 FP_TRACE(FP_NHEAD1(1, 0), "fp_fabric_login End; port=%p, job=%p", 3964 port, job); 3965 3966 return (rval); 3967 } 3968 3969 3970 /* 3971 * In some scenarios such as private loop device discovery period 3972 * the fc_remote_port_t data structure isn't allocated. The allocation 3973 * is done when the PLOGI is successful. In some other scenarios 3974 * such as Fabric topology, the fc_remote_port_t is already created 3975 * and initialized with appropriate values (as the NS provides 3976 * them) 3977 */ 3978 static int 3979 fp_port_login(fc_local_port_t *port, uint32_t d_id, job_request_t *job, 3980 int cmd_flag, int sleep, fc_remote_port_t *pd, fc_packet_t *ulp_pkt) 3981 { 3982 uchar_t class; 3983 fp_cmd_t *cmd; 3984 uint32_t src_id; 3985 fc_remote_port_t *tmp_pd; 3986 int relogin; 3987 int found = 0; 3988 3989 #ifdef DEBUG 3990 if (pd == NULL) { 3991 ASSERT(fctl_get_remote_port_by_did(port, d_id) == NULL); 3992 } 3993 #endif 3994 ASSERT(job->job_counter > 0); 3995 3996 class = fp_get_nextclass(port, FC_TRAN_CLASS_INVALID); 3997 if (class == FC_TRAN_CLASS_INVALID) { 3998 return (FC_ELS_BAD); 3999 } 4000 4001 mutex_enter(&port->fp_mutex); 4002 tmp_pd = fctl_lookup_pd_by_did(port, d_id); 4003 mutex_exit(&port->fp_mutex); 4004 4005 relogin = 1; 4006 if (tmp_pd) { 4007 mutex_enter(&tmp_pd->pd_mutex); 4008 if ((tmp_pd->pd_aux_flags & PD_DISABLE_RELOGIN) && 4009 !(tmp_pd->pd_aux_flags & PD_LOGGED_OUT)) { 4010 tmp_pd->pd_state = PORT_DEVICE_LOGGED_IN; 4011 relogin = 0; 4012 } 4013 mutex_exit(&tmp_pd->pd_mutex); 4014 } 4015 4016 if (!relogin) { 4017 mutex_enter(&tmp_pd->pd_mutex); 4018 if (tmp_pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4019 cmd_flag |= FP_CMD_PLOGI_RETAIN; 4020 } 4021 mutex_exit(&tmp_pd->pd_mutex); 4022 4023 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 4024 sizeof (la_els_adisc_t), sleep, tmp_pd); 4025 if (cmd == NULL) { 4026 return (FC_NOMEM); 4027 } 4028 4029 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4030 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4031 cmd->cmd_flags = cmd_flag; 4032 cmd->cmd_retry_count = fp_retry_count; 4033 cmd->cmd_ulp_pkt = ulp_pkt; 4034 4035 mutex_enter(&port->fp_mutex); 4036 mutex_enter(&tmp_pd->pd_mutex); 4037 fp_adisc_init(cmd, job); 4038 mutex_exit(&tmp_pd->pd_mutex); 4039 mutex_exit(&port->fp_mutex); 4040 4041 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_adisc_t); 4042 cmd->cmd_pkt.pkt_rsplen = sizeof (la_els_adisc_t); 4043 4044 } else { 4045 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 4046 sizeof (la_els_logi_t), sleep, pd); 4047 if (cmd == NULL) { 4048 return (FC_NOMEM); 4049 } 4050 4051 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 4052 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 4053 cmd->cmd_flags = cmd_flag; 4054 cmd->cmd_retry_count = fp_retry_count; 4055 cmd->cmd_ulp_pkt = ulp_pkt; 4056 4057 mutex_enter(&port->fp_mutex); 4058 src_id = port->fp_port_id.port_id; 4059 mutex_exit(&port->fp_mutex); 4060 4061 fp_xlogi_init(port, cmd, src_id, d_id, fp_plogi_intr, 4062 job, LA_ELS_PLOGI); 4063 } 4064 4065 if (pd) { 4066 mutex_enter(&pd->pd_mutex); 4067 pd->pd_flags = PD_ELS_IN_PROGRESS; 4068 mutex_exit(&pd->pd_mutex); 4069 } 4070 4071 /* npiv check to make sure we don't log into ourself */ 4072 if (relogin && (port->fp_topology == FC_TOP_FABRIC)) { 4073 if ((d_id & 0xffff00) == 4074 (port->fp_port_id.port_id & 0xffff00)) { 4075 found = 1; 4076 } 4077 } 4078 4079 if (found || 4080 (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS)) { 4081 if (found) { 4082 fc_packet_t *pkt = &cmd->cmd_pkt; 4083 pkt->pkt_state = FC_PKT_NPORT_RJT; 4084 } 4085 if (pd) { 4086 mutex_enter(&pd->pd_mutex); 4087 pd->pd_flags = PD_IDLE; 4088 mutex_exit(&pd->pd_mutex); 4089 } 4090 4091 if (ulp_pkt) { 4092 fc_packet_t *pkt = &cmd->cmd_pkt; 4093 4094 ulp_pkt->pkt_state = pkt->pkt_state; 4095 ulp_pkt->pkt_reason = pkt->pkt_reason; 4096 ulp_pkt->pkt_action = pkt->pkt_action; 4097 ulp_pkt->pkt_expln = pkt->pkt_expln; 4098 } 4099 4100 fp_iodone(cmd); 4101 } 4102 4103 return (FC_SUCCESS); 4104 } 4105 4106 4107 /* 4108 * Register the LOGIN parameters with a port device 4109 */ 4110 static void 4111 fp_register_login(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 4112 la_els_logi_t *acc, uchar_t class) 4113 { 4114 fc_remote_node_t *node; 4115 4116 ASSERT(pd != NULL); 4117 4118 mutex_enter(&pd->pd_mutex); 4119 node = pd->pd_remote_nodep; 4120 if (pd->pd_login_count == 0) { 4121 pd->pd_login_count++; 4122 } 4123 4124 if (handle) { 4125 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_csp, 4126 (uint8_t *)&acc->common_service, 4127 sizeof (acc->common_service), DDI_DEV_AUTOINCR); 4128 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp1, 4129 (uint8_t *)&acc->class_1, sizeof (acc->class_1), 4130 DDI_DEV_AUTOINCR); 4131 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp2, 4132 (uint8_t *)&acc->class_2, sizeof (acc->class_2), 4133 DDI_DEV_AUTOINCR); 4134 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_clsp3, 4135 (uint8_t *)&acc->class_3, sizeof (acc->class_3), 4136 DDI_DEV_AUTOINCR); 4137 } else { 4138 pd->pd_csp = acc->common_service; 4139 pd->pd_clsp1 = acc->class_1; 4140 pd->pd_clsp2 = acc->class_2; 4141 pd->pd_clsp3 = acc->class_3; 4142 } 4143 4144 pd->pd_state = PORT_DEVICE_LOGGED_IN; 4145 pd->pd_login_class = class; 4146 mutex_exit(&pd->pd_mutex); 4147 4148 #ifndef __lock_lint 4149 ASSERT(fctl_get_remote_port_by_did(pd->pd_port, 4150 pd->pd_port_id.port_id) == pd); 4151 #endif 4152 4153 mutex_enter(&node->fd_mutex); 4154 if (handle) { 4155 ddi_rep_get8(*handle, (uint8_t *)node->fd_vv, 4156 (uint8_t *)acc->vendor_version, sizeof (node->fd_vv), 4157 DDI_DEV_AUTOINCR); 4158 } else { 4159 bcopy(acc->vendor_version, node->fd_vv, sizeof (node->fd_vv)); 4160 } 4161 mutex_exit(&node->fd_mutex); 4162 } 4163 4164 4165 /* 4166 * Mark the remote port as OFFLINE 4167 */ 4168 static void 4169 fp_remote_port_offline(fc_remote_port_t *pd) 4170 { 4171 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4172 if (pd->pd_login_count && 4173 ((pd->pd_aux_flags & PD_DISABLE_RELOGIN) == 0)) { 4174 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4175 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4176 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4177 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4178 pd->pd_login_class = 0; 4179 } 4180 pd->pd_type = PORT_DEVICE_OLD; 4181 pd->pd_flags = PD_IDLE; 4182 fctl_tc_reset(&pd->pd_logo_tc); 4183 } 4184 4185 4186 /* 4187 * Deregistration of a port device 4188 */ 4189 static void 4190 fp_unregister_login(fc_remote_port_t *pd) 4191 { 4192 fc_remote_node_t *node; 4193 4194 ASSERT(pd != NULL); 4195 4196 mutex_enter(&pd->pd_mutex); 4197 pd->pd_login_count = 0; 4198 bzero((caddr_t)&pd->pd_csp, sizeof (struct common_service)); 4199 bzero((caddr_t)&pd->pd_clsp1, sizeof (struct service_param)); 4200 bzero((caddr_t)&pd->pd_clsp2, sizeof (struct service_param)); 4201 bzero((caddr_t)&pd->pd_clsp3, sizeof (struct service_param)); 4202 4203 pd->pd_state = PORT_DEVICE_VALID; 4204 pd->pd_login_class = 0; 4205 node = pd->pd_remote_nodep; 4206 mutex_exit(&pd->pd_mutex); 4207 4208 mutex_enter(&node->fd_mutex); 4209 bzero(node->fd_vv, sizeof (node->fd_vv)); 4210 mutex_exit(&node->fd_mutex); 4211 } 4212 4213 4214 /* 4215 * Handle OFFLINE state of an FCA port 4216 */ 4217 static void 4218 fp_port_offline(fc_local_port_t *port, int notify) 4219 { 4220 int index; 4221 int statec; 4222 timeout_id_t tid; 4223 struct pwwn_hash *head; 4224 fc_remote_port_t *pd; 4225 4226 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4227 4228 for (index = 0; index < pwwn_table_size; index++) { 4229 head = &port->fp_pwwn_table[index]; 4230 pd = head->pwwn_head; 4231 while (pd != NULL) { 4232 mutex_enter(&pd->pd_mutex); 4233 fp_remote_port_offline(pd); 4234 fctl_delist_did_table(port, pd); 4235 mutex_exit(&pd->pd_mutex); 4236 pd = pd->pd_wwn_hnext; 4237 } 4238 } 4239 port->fp_total_devices = 0; 4240 4241 statec = 0; 4242 if (notify) { 4243 /* 4244 * Decrement the statec busy counter as we 4245 * are almost done with handling the state 4246 * change 4247 */ 4248 ASSERT(port->fp_statec_busy > 0); 4249 if (--port->fp_statec_busy == 0) { 4250 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4251 } 4252 mutex_exit(&port->fp_mutex); 4253 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, NULL, 4254 0, 0, KM_SLEEP); 4255 mutex_enter(&port->fp_mutex); 4256 4257 if (port->fp_statec_busy) { 4258 statec++; 4259 } 4260 } else if (port->fp_statec_busy > 1) { 4261 statec++; 4262 } 4263 4264 if ((tid = port->fp_offline_tid) != NULL) { 4265 mutex_exit(&port->fp_mutex); 4266 (void) untimeout(tid); 4267 mutex_enter(&port->fp_mutex); 4268 } 4269 4270 if (!statec) { 4271 port->fp_offline_tid = timeout(fp_offline_timeout, 4272 (caddr_t)port, fp_offline_ticks); 4273 } 4274 } 4275 4276 4277 /* 4278 * Offline devices and send up a state change notification to ULPs 4279 */ 4280 static void 4281 fp_offline_timeout(void *port_handle) 4282 { 4283 int ret; 4284 fc_local_port_t *port = port_handle; 4285 uint32_t listlen = 0; 4286 fc_portmap_t *changelist = NULL; 4287 4288 mutex_enter(&port->fp_mutex); 4289 4290 if ((FC_PORT_STATE_MASK(port->fp_state) != FC_STATE_OFFLINE) || 4291 (port->fp_soft_state & 4292 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 4293 port->fp_dev_count == 0 || port->fp_statec_busy) { 4294 port->fp_offline_tid = NULL; 4295 mutex_exit(&port->fp_mutex); 4296 return; 4297 } 4298 4299 mutex_exit(&port->fp_mutex); 4300 4301 FP_TRACE(FP_NHEAD2(9, 0), "OFFLINE timeout"); 4302 4303 if (port->fp_options & FP_CORE_ON_OFFLINE_TIMEOUT) { 4304 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4305 FC_FCA_CORE)) != FC_SUCCESS) { 4306 FP_TRACE(FP_NHEAD1(9, ret), 4307 "Failed to force adapter dump"); 4308 } else { 4309 FP_TRACE(FP_NHEAD1(9, 0), 4310 "Forced adapter dump successfully"); 4311 } 4312 } else if (port->fp_options & FP_RESET_CORE_ON_OFFLINE_TIMEOUT) { 4313 if ((ret = port->fp_fca_tran->fca_reset(port->fp_fca_handle, 4314 FC_FCA_RESET_CORE)) != FC_SUCCESS) { 4315 FP_TRACE(FP_NHEAD1(9, ret), 4316 "Failed to force adapter dump and reset"); 4317 } else { 4318 FP_TRACE(FP_NHEAD1(9, 0), 4319 "Forced adapter dump and reset successfully"); 4320 } 4321 } 4322 4323 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 4324 (void) fp_ulp_statec_cb(port, FC_STATE_OFFLINE, changelist, 4325 listlen, listlen, KM_SLEEP); 4326 4327 mutex_enter(&port->fp_mutex); 4328 port->fp_offline_tid = NULL; 4329 mutex_exit(&port->fp_mutex); 4330 } 4331 4332 4333 /* 4334 * Perform general purpose ELS request initialization 4335 */ 4336 static void 4337 fp_els_init(fp_cmd_t *cmd, uint32_t s_id, uint32_t d_id, 4338 void (*comp) (), job_request_t *job) 4339 { 4340 fc_packet_t *pkt; 4341 4342 pkt = &cmd->cmd_pkt; 4343 cmd->cmd_job = job; 4344 4345 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_REQ; 4346 pkt->pkt_cmd_fhdr.d_id = d_id; 4347 pkt->pkt_cmd_fhdr.s_id = s_id; 4348 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 4349 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ; 4350 pkt->pkt_cmd_fhdr.seq_id = 0; 4351 pkt->pkt_cmd_fhdr.df_ctl = 0; 4352 pkt->pkt_cmd_fhdr.seq_cnt = 0; 4353 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 4354 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 4355 pkt->pkt_cmd_fhdr.ro = 0; 4356 pkt->pkt_cmd_fhdr.rsvd = 0; 4357 pkt->pkt_comp = comp; 4358 pkt->pkt_timeout = FP_ELS_TIMEOUT; 4359 } 4360 4361 4362 /* 4363 * Initialize PLOGI/FLOGI ELS request 4364 */ 4365 static void 4366 fp_xlogi_init(fc_local_port_t *port, fp_cmd_t *cmd, uint32_t s_id, 4367 uint32_t d_id, void (*intr) (), job_request_t *job, uchar_t ls_code) 4368 { 4369 ls_code_t payload; 4370 4371 fp_els_init(cmd, s_id, d_id, intr, job); 4372 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4373 4374 payload.ls_code = ls_code; 4375 payload.mbz = 0; 4376 4377 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, 4378 (uint8_t *)&port->fp_service_params, 4379 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (port->fp_service_params), 4380 DDI_DEV_AUTOINCR); 4381 4382 ddi_rep_put8(cmd->cmd_pkt.pkt_cmd_acc, (uint8_t *)&payload, 4383 (uint8_t *)cmd->cmd_pkt.pkt_cmd, sizeof (payload), 4384 DDI_DEV_AUTOINCR); 4385 } 4386 4387 4388 /* 4389 * Initialize LOGO ELS request 4390 */ 4391 static void 4392 fp_logo_init(fc_remote_port_t *pd, fp_cmd_t *cmd, job_request_t *job) 4393 { 4394 fc_local_port_t *port; 4395 fc_packet_t *pkt; 4396 la_els_logo_t payload; 4397 4398 port = pd->pd_port; 4399 pkt = &cmd->cmd_pkt; 4400 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4401 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4402 4403 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4404 fp_logo_intr, job); 4405 4406 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4407 4408 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4409 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4410 4411 payload.ls_code.ls_code = LA_ELS_LOGO; 4412 payload.ls_code.mbz = 0; 4413 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 4414 payload.nport_id = port->fp_port_id; 4415 4416 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4417 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4418 } 4419 4420 /* 4421 * Initialize RNID ELS request 4422 */ 4423 static void 4424 fp_rnid_init(fp_cmd_t *cmd, uint16_t flag, job_request_t *job) 4425 { 4426 fc_local_port_t *port; 4427 fc_packet_t *pkt; 4428 la_els_rnid_t payload; 4429 fc_remote_port_t *pd; 4430 4431 pkt = &cmd->cmd_pkt; 4432 pd = pkt->pkt_pd; 4433 port = pd->pd_port; 4434 4435 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4436 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4437 4438 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4439 fp_rnid_intr, job); 4440 4441 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4442 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4443 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4444 4445 payload.ls_code.ls_code = LA_ELS_RNID; 4446 payload.ls_code.mbz = 0; 4447 payload.data_format = flag; 4448 4449 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4450 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4451 } 4452 4453 /* 4454 * Initialize RLS ELS request 4455 */ 4456 static void 4457 fp_rls_init(fp_cmd_t *cmd, job_request_t *job) 4458 { 4459 fc_local_port_t *port; 4460 fc_packet_t *pkt; 4461 la_els_rls_t payload; 4462 fc_remote_port_t *pd; 4463 4464 pkt = &cmd->cmd_pkt; 4465 pd = pkt->pkt_pd; 4466 port = pd->pd_port; 4467 4468 ASSERT(MUTEX_HELD(&port->fp_mutex)); 4469 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4470 4471 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4472 fp_rls_intr, job); 4473 4474 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4475 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4476 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4477 4478 payload.ls_code.ls_code = LA_ELS_RLS; 4479 payload.ls_code.mbz = 0; 4480 payload.rls_portid = port->fp_port_id; 4481 4482 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4483 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4484 } 4485 4486 4487 /* 4488 * Initialize an ADISC ELS request 4489 */ 4490 static void 4491 fp_adisc_init(fp_cmd_t *cmd, job_request_t *job) 4492 { 4493 fc_local_port_t *port; 4494 fc_packet_t *pkt; 4495 la_els_adisc_t payload; 4496 fc_remote_port_t *pd; 4497 4498 pkt = &cmd->cmd_pkt; 4499 pd = pkt->pkt_pd; 4500 port = pd->pd_port; 4501 4502 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 4503 ASSERT(MUTEX_HELD(&pd->pd_port->fp_mutex)); 4504 4505 fp_els_init(cmd, port->fp_port_id.port_id, pd->pd_port_id.port_id, 4506 fp_adisc_intr, job); 4507 4508 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 4509 pkt->pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 4510 pkt->pkt_tran_type = FC_PKT_EXCHANGE; 4511 4512 payload.ls_code.ls_code = LA_ELS_ADISC; 4513 payload.ls_code.mbz = 0; 4514 payload.nport_id = port->fp_port_id; 4515 payload.port_wwn = port->fp_service_params.nport_ww_name; 4516 payload.node_wwn = port->fp_service_params.node_ww_name; 4517 payload.hard_addr = port->fp_hard_addr; 4518 4519 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 4520 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 4521 } 4522 4523 4524 /* 4525 * Send up a state change notification to ULPs. 4526 * Spawns a call to fctl_ulp_statec_cb in a taskq thread. 4527 */ 4528 static int 4529 fp_ulp_statec_cb(fc_local_port_t *port, uint32_t state, 4530 fc_portmap_t *changelist, uint32_t listlen, uint32_t alloc_len, int sleep) 4531 { 4532 fc_port_clist_t *clist; 4533 fc_remote_port_t *pd; 4534 int count; 4535 4536 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4537 4538 clist = kmem_zalloc(sizeof (*clist), sleep); 4539 if (clist == NULL) { 4540 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4541 return (FC_NOMEM); 4542 } 4543 4544 clist->clist_state = state; 4545 4546 mutex_enter(&port->fp_mutex); 4547 clist->clist_flags = port->fp_topology; 4548 mutex_exit(&port->fp_mutex); 4549 4550 clist->clist_port = (opaque_t)port; 4551 clist->clist_len = listlen; 4552 clist->clist_size = alloc_len; 4553 clist->clist_map = changelist; 4554 4555 /* 4556 * Bump the reference count of each fc_remote_port_t in this changelist. 4557 * This is necessary since these devices will be sitting in a taskq 4558 * and referenced later. When the state change notification is 4559 * complete, the reference counts will be decremented. 4560 */ 4561 for (count = 0; count < clist->clist_len; count++) { 4562 pd = clist->clist_map[count].map_pd; 4563 4564 if (pd != NULL) { 4565 mutex_enter(&pd->pd_mutex); 4566 ASSERT((pd->pd_ref_count >= 0) || 4567 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4568 pd->pd_ref_count++; 4569 4570 if (clist->clist_map[count].map_state != 4571 PORT_DEVICE_INVALID) { 4572 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4573 } 4574 4575 mutex_exit(&pd->pd_mutex); 4576 } 4577 } 4578 4579 #ifdef DEBUG 4580 /* 4581 * Sanity check for presence of OLD devices in the hash lists 4582 */ 4583 if (clist->clist_size) { 4584 ASSERT(clist->clist_map != NULL); 4585 for (count = 0; count < clist->clist_len; count++) { 4586 if (clist->clist_map[count].map_state == 4587 PORT_DEVICE_INVALID) { 4588 la_wwn_t pwwn; 4589 fc_portid_t d_id; 4590 4591 pd = clist->clist_map[count].map_pd; 4592 ASSERT(pd != NULL); 4593 4594 mutex_enter(&pd->pd_mutex); 4595 pwwn = pd->pd_port_name; 4596 d_id = pd->pd_port_id; 4597 mutex_exit(&pd->pd_mutex); 4598 4599 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4600 ASSERT(pd != clist->clist_map[count].map_pd); 4601 4602 pd = fctl_get_remote_port_by_did(port, 4603 d_id.port_id); 4604 ASSERT(pd != clist->clist_map[count].map_pd); 4605 } 4606 } 4607 } 4608 #endif 4609 4610 mutex_enter(&port->fp_mutex); 4611 4612 if (state == FC_STATE_ONLINE) { 4613 if (--port->fp_statec_busy == 0) { 4614 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 4615 } 4616 } 4617 mutex_exit(&port->fp_mutex); 4618 4619 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 4620 clist, KM_SLEEP); 4621 4622 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_statec fired; Port=%p," 4623 "state=%x, len=%d", port, state, listlen); 4624 4625 return (FC_SUCCESS); 4626 } 4627 4628 4629 /* 4630 * Send up a FC_STATE_DEVICE_CHANGE state notification to ULPs 4631 */ 4632 static int 4633 fp_ulp_devc_cb(fc_local_port_t *port, fc_portmap_t *changelist, 4634 uint32_t listlen, uint32_t alloc_len, int sleep, int sync) 4635 { 4636 int ret; 4637 fc_port_clist_t *clist; 4638 4639 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 4640 4641 clist = kmem_zalloc(sizeof (*clist), sleep); 4642 if (clist == NULL) { 4643 kmem_free(changelist, alloc_len * sizeof (*changelist)); 4644 return (FC_NOMEM); 4645 } 4646 4647 clist->clist_state = FC_STATE_DEVICE_CHANGE; 4648 4649 mutex_enter(&port->fp_mutex); 4650 clist->clist_flags = port->fp_topology; 4651 mutex_exit(&port->fp_mutex); 4652 4653 clist->clist_port = (opaque_t)port; 4654 clist->clist_len = listlen; 4655 clist->clist_size = alloc_len; 4656 clist->clist_map = changelist; 4657 4658 /* Send sysevents for target state changes */ 4659 4660 if (clist->clist_size) { 4661 int count; 4662 fc_remote_port_t *pd; 4663 4664 ASSERT(clist->clist_map != NULL); 4665 for (count = 0; count < clist->clist_len; count++) { 4666 pd = clist->clist_map[count].map_pd; 4667 4668 /* 4669 * Bump reference counts on all fc_remote_port_t 4670 * structs in this list. We don't know when the task 4671 * will fire, and we don't need these fc_remote_port_t 4672 * structs going away behind our back. 4673 */ 4674 if (pd) { 4675 mutex_enter(&pd->pd_mutex); 4676 ASSERT((pd->pd_ref_count >= 0) || 4677 (pd->pd_aux_flags & PD_GIVEN_TO_ULPS)); 4678 pd->pd_ref_count++; 4679 mutex_exit(&pd->pd_mutex); 4680 } 4681 4682 if (clist->clist_map[count].map_state == 4683 PORT_DEVICE_VALID) { 4684 if (clist->clist_map[count].map_type == 4685 PORT_DEVICE_NEW) { 4686 /* Update our state change counter */ 4687 mutex_enter(&port->fp_mutex); 4688 port->fp_last_change++; 4689 mutex_exit(&port->fp_mutex); 4690 4691 /* Additions */ 4692 fp_log_target_event(port, 4693 ESC_SUNFC_TARGET_ADD, 4694 clist->clist_map[count].map_pwwn, 4695 clist->clist_map[count].map_did. 4696 port_id); 4697 } 4698 4699 } else if ((clist->clist_map[count].map_type == 4700 PORT_DEVICE_OLD) && 4701 (clist->clist_map[count].map_state == 4702 PORT_DEVICE_INVALID)) { 4703 /* Update our state change counter */ 4704 mutex_enter(&port->fp_mutex); 4705 port->fp_last_change++; 4706 mutex_exit(&port->fp_mutex); 4707 4708 /* 4709 * For removals, we don't decrement 4710 * pd_ref_count until after the ULP's 4711 * state change callback function has 4712 * completed. 4713 */ 4714 4715 /* Removals */ 4716 fp_log_target_event(port, 4717 ESC_SUNFC_TARGET_REMOVE, 4718 clist->clist_map[count].map_pwwn, 4719 clist->clist_map[count].map_did.port_id); 4720 } 4721 4722 if (clist->clist_map[count].map_state != 4723 PORT_DEVICE_INVALID) { 4724 /* 4725 * Indicate that the ULPs are now aware of 4726 * this device. 4727 */ 4728 4729 mutex_enter(&pd->pd_mutex); 4730 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 4731 mutex_exit(&pd->pd_mutex); 4732 } 4733 4734 #ifdef DEBUG 4735 /* 4736 * Sanity check for OLD devices in the hash lists 4737 */ 4738 if (pd && clist->clist_map[count].map_state == 4739 PORT_DEVICE_INVALID) { 4740 la_wwn_t pwwn; 4741 fc_portid_t d_id; 4742 4743 mutex_enter(&pd->pd_mutex); 4744 pwwn = pd->pd_port_name; 4745 d_id = pd->pd_port_id; 4746 mutex_exit(&pd->pd_mutex); 4747 4748 /* 4749 * This overwrites the 'pd' local variable. 4750 * Beware of this if 'pd' ever gets 4751 * referenced below this block. 4752 */ 4753 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 4754 ASSERT(pd != clist->clist_map[count].map_pd); 4755 4756 pd = fctl_get_remote_port_by_did(port, 4757 d_id.port_id); 4758 ASSERT(pd != clist->clist_map[count].map_pd); 4759 } 4760 #endif 4761 } 4762 } 4763 4764 if (sync) { 4765 clist->clist_wait = 1; 4766 mutex_init(&clist->clist_mutex, NULL, MUTEX_DRIVER, NULL); 4767 cv_init(&clist->clist_cv, NULL, CV_DRIVER, NULL); 4768 } 4769 4770 ret = taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, clist, sleep); 4771 if (sync && ret) { 4772 mutex_enter(&clist->clist_mutex); 4773 while (clist->clist_wait) { 4774 cv_wait(&clist->clist_cv, &clist->clist_mutex); 4775 } 4776 mutex_exit(&clist->clist_mutex); 4777 4778 mutex_destroy(&clist->clist_mutex); 4779 cv_destroy(&clist->clist_cv); 4780 kmem_free(clist, sizeof (*clist)); 4781 } 4782 4783 if (!ret) { 4784 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc dispatch failed; " 4785 "port=%p", port); 4786 kmem_free(clist->clist_map, 4787 sizeof (*(clist->clist_map)) * clist->clist_size); 4788 kmem_free(clist, sizeof (*clist)); 4789 } else { 4790 FP_TRACE(FP_NHEAD1(4, 0), "fp_ulp_devc fired; port=%p, len=%d", 4791 port, listlen); 4792 } 4793 4794 return (FC_SUCCESS); 4795 } 4796 4797 4798 /* 4799 * Perform PLOGI to the group of devices for ULPs 4800 */ 4801 static void 4802 fp_plogi_group(fc_local_port_t *port, job_request_t *job) 4803 { 4804 int offline; 4805 int count; 4806 int rval; 4807 uint32_t listlen; 4808 uint32_t done; 4809 uint32_t d_id; 4810 fc_remote_node_t *node; 4811 fc_remote_port_t *pd; 4812 fc_remote_port_t *tmp_pd; 4813 fc_packet_t *ulp_pkt; 4814 la_els_logi_t *els_data; 4815 ls_code_t ls_code; 4816 4817 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group begin; port=%p, job=%p", 4818 port, job); 4819 4820 done = 0; 4821 listlen = job->job_ulp_listlen; 4822 job->job_counter = job->job_ulp_listlen; 4823 4824 mutex_enter(&port->fp_mutex); 4825 offline = (port->fp_statec_busy || 4826 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) ? 1 : 0; 4827 mutex_exit(&port->fp_mutex); 4828 4829 for (count = 0; count < listlen; count++) { 4830 ASSERT(job->job_ulp_pkts[count]->pkt_rsplen >= 4831 sizeof (la_els_logi_t)); 4832 4833 ulp_pkt = job->job_ulp_pkts[count]; 4834 pd = ulp_pkt->pkt_pd; 4835 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4836 4837 if (offline) { 4838 done++; 4839 4840 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 4841 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4842 ulp_pkt->pkt_pd = NULL; 4843 ulp_pkt->pkt_comp(ulp_pkt); 4844 4845 job->job_ulp_pkts[count] = NULL; 4846 4847 fp_jobdone(job); 4848 continue; 4849 } 4850 4851 if (pd == NULL) { 4852 pd = fctl_get_remote_port_by_did(port, d_id); 4853 if (pd == NULL) { /* reset later */ 4854 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4855 continue; 4856 } 4857 mutex_enter(&pd->pd_mutex); 4858 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 4859 mutex_exit(&pd->pd_mutex); 4860 ulp_pkt->pkt_state = FC_PKT_ELS_IN_PROGRESS; 4861 done++; 4862 ulp_pkt->pkt_comp(ulp_pkt); 4863 job->job_ulp_pkts[count] = NULL; 4864 fp_jobdone(job); 4865 } else { 4866 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4867 mutex_exit(&pd->pd_mutex); 4868 } 4869 continue; 4870 } 4871 4872 switch (ulp_pkt->pkt_state) { 4873 case FC_PKT_ELS_IN_PROGRESS: 4874 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 4875 /* FALLTHRU */ 4876 case FC_PKT_LOCAL_RJT: 4877 done++; 4878 ulp_pkt->pkt_comp(ulp_pkt); 4879 job->job_ulp_pkts[count] = NULL; 4880 fp_jobdone(job); 4881 continue; 4882 default: 4883 break; 4884 } 4885 4886 /* 4887 * Validate the pd corresponding to the d_id passed 4888 * by the ULPs 4889 */ 4890 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 4891 if ((tmp_pd == NULL) || (pd != tmp_pd)) { 4892 done++; 4893 ulp_pkt->pkt_state = FC_PKT_FAILURE; 4894 ulp_pkt->pkt_reason = FC_REASON_NO_CONNECTION; 4895 ulp_pkt->pkt_pd = NULL; 4896 ulp_pkt->pkt_comp(ulp_pkt); 4897 job->job_ulp_pkts[count] = NULL; 4898 fp_jobdone(job); 4899 continue; 4900 } 4901 4902 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group contd; " 4903 "port=%p, pd=%p", port, pd); 4904 4905 mutex_enter(&pd->pd_mutex); 4906 4907 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 4908 done++; 4909 els_data = (la_els_logi_t *)ulp_pkt->pkt_resp; 4910 4911 ls_code.ls_code = LA_ELS_ACC; 4912 ls_code.mbz = 0; 4913 4914 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4915 (uint8_t *)&ls_code, (uint8_t *)&els_data->ls_code, 4916 sizeof (ls_code_t), DDI_DEV_AUTOINCR); 4917 4918 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4919 (uint8_t *)&pd->pd_csp, 4920 (uint8_t *)&els_data->common_service, 4921 sizeof (pd->pd_csp), DDI_DEV_AUTOINCR); 4922 4923 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4924 (uint8_t *)&pd->pd_port_name, 4925 (uint8_t *)&els_data->nport_ww_name, 4926 sizeof (pd->pd_port_name), DDI_DEV_AUTOINCR); 4927 4928 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4929 (uint8_t *)&pd->pd_clsp1, 4930 (uint8_t *)&els_data->class_1, 4931 sizeof (pd->pd_clsp1), DDI_DEV_AUTOINCR); 4932 4933 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4934 (uint8_t *)&pd->pd_clsp2, 4935 (uint8_t *)&els_data->class_2, 4936 sizeof (pd->pd_clsp2), DDI_DEV_AUTOINCR); 4937 4938 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4939 (uint8_t *)&pd->pd_clsp3, 4940 (uint8_t *)&els_data->class_3, 4941 sizeof (pd->pd_clsp3), DDI_DEV_AUTOINCR); 4942 4943 node = pd->pd_remote_nodep; 4944 pd->pd_login_count++; 4945 pd->pd_flags = PD_IDLE; 4946 ulp_pkt->pkt_pd = pd; 4947 mutex_exit(&pd->pd_mutex); 4948 4949 mutex_enter(&node->fd_mutex); 4950 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4951 (uint8_t *)&node->fd_node_name, 4952 (uint8_t *)(&els_data->node_ww_name), 4953 sizeof (node->fd_node_name), DDI_DEV_AUTOINCR); 4954 4955 4956 ddi_rep_put8(ulp_pkt->pkt_resp_acc, 4957 (uint8_t *)&node->fd_vv, 4958 (uint8_t *)(&els_data->vendor_version), 4959 sizeof (node->fd_vv), DDI_DEV_AUTOINCR); 4960 4961 mutex_exit(&node->fd_mutex); 4962 ulp_pkt->pkt_state = FC_PKT_SUCCESS; 4963 } else { 4964 4965 ulp_pkt->pkt_state = FC_PKT_FAILURE; /* reset later */ 4966 mutex_exit(&pd->pd_mutex); 4967 } 4968 4969 if (ulp_pkt->pkt_state != FC_PKT_FAILURE) { 4970 ulp_pkt->pkt_comp(ulp_pkt); 4971 job->job_ulp_pkts[count] = NULL; 4972 fp_jobdone(job); 4973 } 4974 } 4975 4976 if (done == listlen) { 4977 fp_jobwait(job); 4978 fctl_jobdone(job); 4979 return; 4980 } 4981 4982 job->job_counter = listlen - done; 4983 4984 for (count = 0; count < listlen; count++) { 4985 int cmd_flags; 4986 4987 if ((ulp_pkt = job->job_ulp_pkts[count]) == NULL) { 4988 continue; 4989 } 4990 4991 ASSERT(ulp_pkt->pkt_state == FC_PKT_FAILURE); 4992 4993 cmd_flags = FP_CMD_PLOGI_RETAIN; 4994 4995 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 4996 ASSERT(d_id != 0); 4997 4998 pd = fctl_get_remote_port_by_did(port, d_id); 4999 5000 /* 5001 * We need to properly adjust the port device 5002 * reference counter before we assign the pd 5003 * to the ULP packets port device pointer. 5004 */ 5005 if (pd != NULL && ulp_pkt->pkt_pd == NULL) { 5006 mutex_enter(&pd->pd_mutex); 5007 pd->pd_ref_count++; 5008 mutex_exit(&pd->pd_mutex); 5009 FP_TRACE(FP_NHEAD1(3, 0), 5010 "fp_plogi_group: DID = 0x%x using new pd %p \ 5011 old pd NULL\n", d_id, pd); 5012 } else if (pd != NULL && ulp_pkt->pkt_pd != NULL && 5013 ulp_pkt->pkt_pd != pd) { 5014 mutex_enter(&pd->pd_mutex); 5015 pd->pd_ref_count++; 5016 mutex_exit(&pd->pd_mutex); 5017 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5018 ulp_pkt->pkt_pd->pd_ref_count--; 5019 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5020 FP_TRACE(FP_NHEAD1(3, 0), 5021 "fp_plogi_group: DID = 0x%x pkt_pd %p != pd %p\n", 5022 d_id, ulp_pkt->pkt_pd, pd); 5023 } else if (pd == NULL && ulp_pkt->pkt_pd != NULL) { 5024 mutex_enter(&ulp_pkt->pkt_pd->pd_mutex); 5025 ulp_pkt->pkt_pd->pd_ref_count--; 5026 mutex_exit(&ulp_pkt->pkt_pd->pd_mutex); 5027 FP_TRACE(FP_NHEAD1(3, 0), 5028 "fp_plogi_group: DID = 0x%x pd is NULL and \ 5029 pkt_pd = %p\n", d_id, ulp_pkt->pkt_pd); 5030 } 5031 5032 ulp_pkt->pkt_pd = pd; 5033 5034 if (pd != NULL) { 5035 mutex_enter(&pd->pd_mutex); 5036 d_id = pd->pd_port_id.port_id; 5037 pd->pd_flags = PD_ELS_IN_PROGRESS; 5038 mutex_exit(&pd->pd_mutex); 5039 } else { 5040 d_id = ulp_pkt->pkt_cmd_fhdr.d_id; 5041 #ifdef DEBUG 5042 pd = fctl_get_remote_port_by_did(port, d_id); 5043 ASSERT(pd == NULL); 5044 #endif 5045 /* 5046 * In the Fabric topology, use NS to create 5047 * port device, and if that fails still try 5048 * with PLOGI - which will make yet another 5049 * attempt to create after successful PLOGI 5050 */ 5051 mutex_enter(&port->fp_mutex); 5052 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 5053 mutex_exit(&port->fp_mutex); 5054 pd = fp_create_remote_port_by_ns(port, 5055 d_id, KM_SLEEP); 5056 if (pd) { 5057 cmd_flags |= FP_CMD_DELDEV_ON_ERROR; 5058 5059 mutex_enter(&pd->pd_mutex); 5060 pd->pd_flags = PD_ELS_IN_PROGRESS; 5061 mutex_exit(&pd->pd_mutex); 5062 5063 FP_TRACE(FP_NHEAD1(3, 0), 5064 "fp_plogi_group;" 5065 " NS created PD port=%p, job=%p," 5066 " pd=%p", port, job, pd); 5067 } 5068 } else { 5069 mutex_exit(&port->fp_mutex); 5070 } 5071 if ((ulp_pkt->pkt_pd == NULL) && (pd != NULL)) { 5072 FP_TRACE(FP_NHEAD1(3, 0), 5073 "fp_plogi_group;" 5074 "ulp_pkt's pd is NULL, get a pd %p", 5075 pd); 5076 mutex_enter(&pd->pd_mutex); 5077 pd->pd_ref_count++; 5078 mutex_exit(&pd->pd_mutex); 5079 } 5080 ulp_pkt->pkt_pd = pd; 5081 } 5082 5083 rval = fp_port_login(port, d_id, job, cmd_flags, 5084 KM_SLEEP, pd, ulp_pkt); 5085 5086 if (rval == FC_SUCCESS) { 5087 continue; 5088 } 5089 5090 if (rval == FC_STATEC_BUSY) { 5091 ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5092 ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5093 } else { 5094 ulp_pkt->pkt_state = FC_PKT_FAILURE; 5095 } 5096 5097 if (pd) { 5098 mutex_enter(&pd->pd_mutex); 5099 pd->pd_flags = PD_IDLE; 5100 mutex_exit(&pd->pd_mutex); 5101 } 5102 5103 if (cmd_flags & FP_CMD_DELDEV_ON_ERROR) { 5104 ASSERT(pd != NULL); 5105 5106 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_group: NS created," 5107 " PD removed; port=%p, job=%p", port, job); 5108 5109 mutex_enter(&pd->pd_mutex); 5110 pd->pd_ref_count--; 5111 node = pd->pd_remote_nodep; 5112 mutex_exit(&pd->pd_mutex); 5113 5114 ASSERT(node != NULL); 5115 5116 if (fctl_destroy_remote_port(port, pd) == 0) { 5117 fctl_destroy_remote_node(node); 5118 } 5119 ulp_pkt->pkt_pd = NULL; 5120 } 5121 ulp_pkt->pkt_comp(ulp_pkt); 5122 fp_jobdone(job); 5123 } 5124 5125 fp_jobwait(job); 5126 fctl_jobdone(job); 5127 5128 FP_TRACE(FP_NHEAD1(1, 0), "fp_plogi_group end: port=%p, job=%p", 5129 port, job); 5130 } 5131 5132 5133 /* 5134 * Name server request initialization 5135 */ 5136 static void 5137 fp_ns_init(fc_local_port_t *port, job_request_t *job, int sleep) 5138 { 5139 int rval; 5140 int count; 5141 int size; 5142 5143 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5144 5145 job->job_counter = 1; 5146 job->job_result = FC_SUCCESS; 5147 5148 rval = fp_port_login(port, 0xFFFFFC, job, FP_CMD_PLOGI_RETAIN, 5149 KM_SLEEP, NULL, NULL); 5150 5151 if (rval != FC_SUCCESS) { 5152 mutex_enter(&port->fp_mutex); 5153 port->fp_topology = FC_TOP_NO_NS; 5154 mutex_exit(&port->fp_mutex); 5155 return; 5156 } 5157 5158 fp_jobwait(job); 5159 5160 if (job->job_result != FC_SUCCESS) { 5161 mutex_enter(&port->fp_mutex); 5162 port->fp_topology = FC_TOP_NO_NS; 5163 mutex_exit(&port->fp_mutex); 5164 return; 5165 } 5166 5167 /* 5168 * At this time, we'll do NS registration for objects in the 5169 * ns_reg_cmds (see top of this file) array. 5170 * 5171 * Each time a ULP module registers with the transport, the 5172 * appropriate fc4 bit is set fc4 types and registered with 5173 * the NS for this support. Also, ULPs and FC admin utilities 5174 * may do registration for objects like IP address, symbolic 5175 * port/node name, Initial process associator at run time. 5176 */ 5177 size = sizeof (ns_reg_cmds) / sizeof (ns_reg_cmds[0]); 5178 job->job_counter = size; 5179 job->job_result = FC_SUCCESS; 5180 5181 for (count = 0; count < size; count++) { 5182 if (fp_ns_reg(port, NULL, ns_reg_cmds[count], 5183 job, 0, sleep) != FC_SUCCESS) { 5184 fp_jobdone(job); 5185 } 5186 } 5187 if (size) { 5188 fp_jobwait(job); 5189 } 5190 5191 job->job_result = FC_SUCCESS; 5192 5193 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 5194 5195 if (port->fp_dev_count < FP_MAX_DEVICES) { 5196 (void) fp_ns_get_devcount(port, job, 1, KM_SLEEP); 5197 } 5198 5199 job->job_counter = 1; 5200 5201 if (fp_ns_scr(port, job, FC_SCR_FULL_REGISTRATION, 5202 sleep) == FC_SUCCESS) { 5203 fp_jobwait(job); 5204 } 5205 } 5206 5207 5208 /* 5209 * Name server finish: 5210 * Unregister for RSCNs 5211 * Unregister all the host port objects in the Name Server 5212 * Perform LOGO with the NS; 5213 */ 5214 static void 5215 fp_ns_fini(fc_local_port_t *port, job_request_t *job) 5216 { 5217 fp_cmd_t *cmd; 5218 uchar_t class; 5219 uint32_t s_id; 5220 fc_packet_t *pkt; 5221 la_els_logo_t payload; 5222 5223 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5224 5225 job->job_counter = 1; 5226 5227 if (fp_ns_scr(port, job, FC_SCR_CLEAR_REGISTRATION, KM_SLEEP) != 5228 FC_SUCCESS) { 5229 fp_jobdone(job); 5230 } 5231 fp_jobwait(job); 5232 5233 job->job_counter = 1; 5234 5235 if (fp_ns_reg(port, NULL, NS_DA_ID, job, 0, KM_SLEEP) != FC_SUCCESS) { 5236 fp_jobdone(job); 5237 } 5238 fp_jobwait(job); 5239 5240 job->job_counter = 1; 5241 5242 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 5243 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, NULL); 5244 pkt = &cmd->cmd_pkt; 5245 5246 mutex_enter(&port->fp_mutex); 5247 class = port->fp_ns_login_class; 5248 s_id = port->fp_port_id.port_id; 5249 payload.nport_id = port->fp_port_id; 5250 mutex_exit(&port->fp_mutex); 5251 5252 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 5253 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 5254 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 5255 cmd->cmd_retry_count = 1; 5256 cmd->cmd_ulp_pkt = NULL; 5257 5258 if (port->fp_npiv_type == FC_NPIV_PORT) { 5259 fp_els_init(cmd, s_id, 0xFFFFFE, fp_logo_intr, job); 5260 } else { 5261 fp_els_init(cmd, s_id, 0xFFFFFC, fp_logo_intr, job); 5262 } 5263 5264 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 5265 5266 payload.ls_code.ls_code = LA_ELS_LOGO; 5267 payload.ls_code.mbz = 0; 5268 payload.nport_ww_name = port->fp_service_params.nport_ww_name; 5269 5270 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 5271 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 5272 5273 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 5274 fp_iodone(cmd); 5275 } 5276 fp_jobwait(job); 5277 } 5278 5279 5280 /* 5281 * NS Registration function. 5282 * 5283 * It should be seriously noted that FC-GS-2 currently doesn't support 5284 * an Object Registration by a D_ID other than the owner of the object. 5285 * What we are aiming at currently is to at least allow Symbolic Node/Port 5286 * Name registration for any N_Port Identifier by the host software. 5287 * 5288 * Anyway, if the second argument (fc_remote_port_t *) is NULL, this 5289 * function treats the request as Host NS Object. 5290 */ 5291 static int 5292 fp_ns_reg(fc_local_port_t *port, fc_remote_port_t *pd, uint16_t cmd_code, 5293 job_request_t *job, int polled, int sleep) 5294 { 5295 int rval; 5296 fc_portid_t s_id; 5297 fc_packet_t *pkt; 5298 fp_cmd_t *cmd; 5299 5300 if (pd == NULL) { 5301 mutex_enter(&port->fp_mutex); 5302 s_id = port->fp_port_id; 5303 mutex_exit(&port->fp_mutex); 5304 } else { 5305 mutex_enter(&pd->pd_mutex); 5306 s_id = pd->pd_port_id; 5307 mutex_exit(&pd->pd_mutex); 5308 } 5309 5310 if (polled) { 5311 job->job_counter = 1; 5312 } 5313 5314 switch (cmd_code) { 5315 case NS_RPN_ID: 5316 case NS_RNN_ID: { 5317 ns_rxn_req_t rxn; 5318 5319 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5320 sizeof (ns_rxn_req_t), sizeof (fc_reg_resp_t), sleep, NULL); 5321 if (cmd == NULL) { 5322 return (FC_NOMEM); 5323 } 5324 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5325 pkt = &cmd->cmd_pkt; 5326 5327 if (pd == NULL) { 5328 rxn.rxn_xname = (cmd_code == NS_RPN_ID) ? 5329 (port->fp_service_params.nport_ww_name) : 5330 (port->fp_service_params.node_ww_name); 5331 } else { 5332 if (cmd_code == NS_RPN_ID) { 5333 mutex_enter(&pd->pd_mutex); 5334 rxn.rxn_xname = pd->pd_port_name; 5335 mutex_exit(&pd->pd_mutex); 5336 } else { 5337 fc_remote_node_t *node; 5338 5339 mutex_enter(&pd->pd_mutex); 5340 node = pd->pd_remote_nodep; 5341 mutex_exit(&pd->pd_mutex); 5342 5343 mutex_enter(&node->fd_mutex); 5344 rxn.rxn_xname = node->fd_node_name; 5345 mutex_exit(&node->fd_mutex); 5346 } 5347 } 5348 rxn.rxn_port_id = s_id; 5349 5350 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rxn, 5351 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5352 sizeof (rxn), DDI_DEV_AUTOINCR); 5353 5354 break; 5355 } 5356 5357 case NS_RCS_ID: { 5358 ns_rcos_t rcos; 5359 5360 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5361 sizeof (ns_rcos_t), sizeof (fc_reg_resp_t), sleep, NULL); 5362 if (cmd == NULL) { 5363 return (FC_NOMEM); 5364 } 5365 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5366 pkt = &cmd->cmd_pkt; 5367 5368 if (pd == NULL) { 5369 rcos.rcos_cos = port->fp_cos; 5370 } else { 5371 mutex_enter(&pd->pd_mutex); 5372 rcos.rcos_cos = pd->pd_cos; 5373 mutex_exit(&pd->pd_mutex); 5374 } 5375 rcos.rcos_port_id = s_id; 5376 5377 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rcos, 5378 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5379 sizeof (rcos), DDI_DEV_AUTOINCR); 5380 5381 break; 5382 } 5383 5384 case NS_RFT_ID: { 5385 ns_rfc_type_t rfc; 5386 5387 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5388 sizeof (ns_rfc_type_t), sizeof (fc_reg_resp_t), sleep, 5389 NULL); 5390 if (cmd == NULL) { 5391 return (FC_NOMEM); 5392 } 5393 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5394 pkt = &cmd->cmd_pkt; 5395 5396 if (pd == NULL) { 5397 mutex_enter(&port->fp_mutex); 5398 bcopy(port->fp_fc4_types, rfc.rfc_types, 5399 sizeof (port->fp_fc4_types)); 5400 mutex_exit(&port->fp_mutex); 5401 } else { 5402 mutex_enter(&pd->pd_mutex); 5403 bcopy(pd->pd_fc4types, rfc.rfc_types, 5404 sizeof (pd->pd_fc4types)); 5405 mutex_exit(&pd->pd_mutex); 5406 } 5407 rfc.rfc_port_id = s_id; 5408 5409 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rfc, 5410 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5411 sizeof (rfc), DDI_DEV_AUTOINCR); 5412 5413 break; 5414 } 5415 5416 case NS_RSPN_ID: { 5417 uchar_t name_len; 5418 int pl_size; 5419 fc_portid_t spn; 5420 5421 if (pd == NULL) { 5422 mutex_enter(&port->fp_mutex); 5423 name_len = port->fp_sym_port_namelen; 5424 mutex_exit(&port->fp_mutex); 5425 } else { 5426 mutex_enter(&pd->pd_mutex); 5427 name_len = pd->pd_spn_len; 5428 mutex_exit(&pd->pd_mutex); 5429 } 5430 5431 pl_size = sizeof (fc_portid_t) + name_len + 1; 5432 5433 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + pl_size, 5434 sizeof (fc_reg_resp_t), sleep, NULL); 5435 if (cmd == NULL) { 5436 return (FC_NOMEM); 5437 } 5438 5439 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5440 5441 pkt = &cmd->cmd_pkt; 5442 5443 spn = s_id; 5444 5445 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&spn, (uint8_t *) 5446 (pkt->pkt_cmd + sizeof (fc_ct_header_t)), sizeof (spn), 5447 DDI_DEV_AUTOINCR); 5448 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5449 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) 5450 + sizeof (fc_portid_t)), 1, DDI_DEV_AUTOINCR); 5451 5452 if (pd == NULL) { 5453 mutex_enter(&port->fp_mutex); 5454 ddi_rep_put8(pkt->pkt_cmd_acc, 5455 (uint8_t *)port->fp_sym_port_name, (uint8_t *) 5456 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5457 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5458 mutex_exit(&port->fp_mutex); 5459 } else { 5460 mutex_enter(&pd->pd_mutex); 5461 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)pd->pd_spn, 5462 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5463 sizeof (spn) + 1), name_len, DDI_DEV_AUTOINCR); 5464 mutex_exit(&pd->pd_mutex); 5465 } 5466 break; 5467 } 5468 5469 case NS_RPT_ID: { 5470 ns_rpt_t rpt; 5471 5472 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5473 sizeof (ns_rpt_t), sizeof (fc_reg_resp_t), sleep, NULL); 5474 if (cmd == NULL) { 5475 return (FC_NOMEM); 5476 } 5477 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5478 pkt = &cmd->cmd_pkt; 5479 5480 if (pd == NULL) { 5481 rpt.rpt_type = port->fp_port_type; 5482 } else { 5483 mutex_enter(&pd->pd_mutex); 5484 rpt.rpt_type = pd->pd_porttype; 5485 mutex_exit(&pd->pd_mutex); 5486 } 5487 rpt.rpt_port_id = s_id; 5488 5489 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rpt, 5490 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5491 sizeof (rpt), DDI_DEV_AUTOINCR); 5492 5493 break; 5494 } 5495 5496 case NS_RIP_NN: { 5497 ns_rip_t rip; 5498 5499 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5500 sizeof (ns_rip_t), sizeof (fc_reg_resp_t), sleep, NULL); 5501 if (cmd == NULL) { 5502 return (FC_NOMEM); 5503 } 5504 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5505 pkt = &cmd->cmd_pkt; 5506 5507 if (pd == NULL) { 5508 rip.rip_node_name = 5509 port->fp_service_params.node_ww_name; 5510 bcopy(port->fp_ip_addr, rip.rip_ip_addr, 5511 sizeof (port->fp_ip_addr)); 5512 } else { 5513 fc_remote_node_t *node; 5514 5515 /* 5516 * The most correct implementation should have the IP 5517 * address in the fc_remote_node_t structure; I believe 5518 * Node WWN and IP address should have one to one 5519 * correlation (but guess what this is changing in 5520 * FC-GS-2 latest draft) 5521 */ 5522 mutex_enter(&pd->pd_mutex); 5523 node = pd->pd_remote_nodep; 5524 bcopy(pd->pd_ip_addr, rip.rip_ip_addr, 5525 sizeof (pd->pd_ip_addr)); 5526 mutex_exit(&pd->pd_mutex); 5527 5528 mutex_enter(&node->fd_mutex); 5529 rip.rip_node_name = node->fd_node_name; 5530 mutex_exit(&node->fd_mutex); 5531 } 5532 5533 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rip, 5534 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5535 sizeof (rip), DDI_DEV_AUTOINCR); 5536 5537 break; 5538 } 5539 5540 case NS_RIPA_NN: { 5541 ns_ipa_t ipa; 5542 5543 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5544 sizeof (ns_ipa_t), sizeof (fc_reg_resp_t), sleep, NULL); 5545 if (cmd == NULL) { 5546 return (FC_NOMEM); 5547 } 5548 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5549 pkt = &cmd->cmd_pkt; 5550 5551 if (pd == NULL) { 5552 ipa.ipa_node_name = 5553 port->fp_service_params.node_ww_name; 5554 bcopy(port->fp_ipa, ipa.ipa_value, 5555 sizeof (port->fp_ipa)); 5556 } else { 5557 fc_remote_node_t *node; 5558 5559 mutex_enter(&pd->pd_mutex); 5560 node = pd->pd_remote_nodep; 5561 mutex_exit(&pd->pd_mutex); 5562 5563 mutex_enter(&node->fd_mutex); 5564 ipa.ipa_node_name = node->fd_node_name; 5565 bcopy(node->fd_ipa, ipa.ipa_value, 5566 sizeof (node->fd_ipa)); 5567 mutex_exit(&node->fd_mutex); 5568 } 5569 5570 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ipa, 5571 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5572 sizeof (ipa), DDI_DEV_AUTOINCR); 5573 5574 break; 5575 } 5576 5577 case NS_RSNN_NN: { 5578 uchar_t name_len; 5579 int pl_size; 5580 la_wwn_t snn; 5581 fc_remote_node_t *node = NULL; 5582 5583 if (pd == NULL) { 5584 mutex_enter(&port->fp_mutex); 5585 name_len = port->fp_sym_node_namelen; 5586 mutex_exit(&port->fp_mutex); 5587 } else { 5588 mutex_enter(&pd->pd_mutex); 5589 node = pd->pd_remote_nodep; 5590 mutex_exit(&pd->pd_mutex); 5591 5592 mutex_enter(&node->fd_mutex); 5593 name_len = node->fd_snn_len; 5594 mutex_exit(&node->fd_mutex); 5595 } 5596 5597 pl_size = sizeof (la_wwn_t) + name_len + 1; 5598 5599 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5600 pl_size, sizeof (fc_reg_resp_t), sleep, NULL); 5601 if (cmd == NULL) { 5602 return (FC_NOMEM); 5603 } 5604 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5605 5606 pkt = &cmd->cmd_pkt; 5607 5608 bcopy(&port->fp_service_params.node_ww_name, 5609 &snn, sizeof (la_wwn_t)); 5610 5611 if (pd == NULL) { 5612 mutex_enter(&port->fp_mutex); 5613 ddi_rep_put8(pkt->pkt_cmd_acc, 5614 (uint8_t *)port->fp_sym_node_name, (uint8_t *) 5615 (pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5616 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5617 mutex_exit(&port->fp_mutex); 5618 } else { 5619 ASSERT(node != NULL); 5620 mutex_enter(&node->fd_mutex); 5621 ddi_rep_put8(pkt->pkt_cmd_acc, 5622 (uint8_t *)node->fd_snn, 5623 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t) + 5624 sizeof (snn) + 1), name_len, DDI_DEV_AUTOINCR); 5625 mutex_exit(&node->fd_mutex); 5626 } 5627 5628 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&snn, 5629 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5630 sizeof (snn), DDI_DEV_AUTOINCR); 5631 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&name_len, 5632 (uint8_t *)(pkt->pkt_cmd 5633 + sizeof (fc_ct_header_t) + sizeof (snn)), 5634 1, DDI_DEV_AUTOINCR); 5635 5636 break; 5637 } 5638 5639 case NS_DA_ID: { 5640 ns_remall_t rall; 5641 char tmp[4] = {0}; 5642 char *ptr; 5643 5644 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 5645 sizeof (ns_remall_t), sizeof (fc_reg_resp_t), sleep, NULL); 5646 5647 if (cmd == NULL) { 5648 return (FC_NOMEM); 5649 } 5650 5651 fp_ct_init(port, cmd, NULL, cmd_code, NULL, 0, 0, job); 5652 pkt = &cmd->cmd_pkt; 5653 5654 ptr = (char *)(&s_id); 5655 tmp[3] = *ptr++; 5656 tmp[2] = *ptr++; 5657 tmp[1] = *ptr++; 5658 tmp[0] = *ptr; 5659 #if defined(_BIT_FIELDS_LTOH) 5660 bcopy((caddr_t)tmp, (caddr_t)(&rall.rem_port_id), 4); 5661 #else 5662 rall.rem_port_id = s_id; 5663 #endif 5664 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&rall, 5665 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 5666 sizeof (rall), DDI_DEV_AUTOINCR); 5667 5668 break; 5669 } 5670 5671 default: 5672 return (FC_FAILURE); 5673 } 5674 5675 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 5676 5677 if (rval != FC_SUCCESS) { 5678 job->job_result = rval; 5679 fp_iodone(cmd); 5680 } 5681 5682 if (polled) { 5683 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 5684 fp_jobwait(job); 5685 } else { 5686 rval = FC_SUCCESS; 5687 } 5688 5689 return (rval); 5690 } 5691 5692 5693 /* 5694 * Common interrupt handler 5695 */ 5696 static int 5697 fp_common_intr(fc_packet_t *pkt, int iodone) 5698 { 5699 int rval = FC_FAILURE; 5700 fp_cmd_t *cmd; 5701 fc_local_port_t *port; 5702 5703 cmd = pkt->pkt_ulp_private; 5704 port = cmd->cmd_port; 5705 5706 /* 5707 * Fail fast the upper layer requests if 5708 * a state change has occurred amidst. 5709 */ 5710 mutex_enter(&port->fp_mutex); 5711 if (cmd->cmd_ulp_pkt != NULL && port->fp_statec_busy) { 5712 mutex_exit(&port->fp_mutex); 5713 cmd->cmd_ulp_pkt->pkt_state = FC_PKT_PORT_OFFLINE; 5714 cmd->cmd_ulp_pkt->pkt_reason = FC_REASON_OFFLINE; 5715 } else if (!(port->fp_soft_state & 5716 (FP_SOFT_IN_DETACH | FP_DETACH_INPROGRESS))) { 5717 mutex_exit(&port->fp_mutex); 5718 5719 switch (pkt->pkt_state) { 5720 case FC_PKT_LOCAL_BSY: 5721 case FC_PKT_FABRIC_BSY: 5722 case FC_PKT_NPORT_BSY: 5723 case FC_PKT_TIMEOUT: 5724 cmd->cmd_retry_interval = (pkt->pkt_state == 5725 FC_PKT_TIMEOUT) ? 0 : fp_retry_delay; 5726 rval = fp_retry_cmd(pkt); 5727 break; 5728 5729 case FC_PKT_FABRIC_RJT: 5730 case FC_PKT_NPORT_RJT: 5731 case FC_PKT_LOCAL_RJT: 5732 case FC_PKT_LS_RJT: 5733 case FC_PKT_FS_RJT: 5734 case FC_PKT_BA_RJT: 5735 rval = fp_handle_reject(pkt); 5736 break; 5737 5738 default: 5739 if (pkt->pkt_resp_resid) { 5740 cmd->cmd_retry_interval = 0; 5741 rval = fp_retry_cmd(pkt); 5742 } 5743 break; 5744 } 5745 } else { 5746 mutex_exit(&port->fp_mutex); 5747 } 5748 5749 if (rval != FC_SUCCESS && iodone) { 5750 fp_iodone(cmd); 5751 rval = FC_SUCCESS; 5752 } 5753 5754 return (rval); 5755 } 5756 5757 5758 /* 5759 * Some not so long winding theory on point to point topology: 5760 * 5761 * In the ACC payload, if the D_ID is ZERO and the common service 5762 * parameters indicate N_Port, then the topology is POINT TO POINT. 5763 * 5764 * In a point to point topology with an N_Port, during Fabric Login, 5765 * the destination N_Port will check with our WWN and decide if it 5766 * needs to issue PLOGI or not. That means, FLOGI could potentially 5767 * trigger an unsolicited PLOGI from an N_Port. The Unsolicited 5768 * PLOGI creates the device handles. 5769 * 5770 * Assuming that the host port WWN is greater than the other N_Port 5771 * WWN, then we become the master (be aware that this isn't the word 5772 * used in the FC standards) and initiate the PLOGI. 5773 * 5774 */ 5775 static void 5776 fp_flogi_intr(fc_packet_t *pkt) 5777 { 5778 int state; 5779 int f_port; 5780 uint32_t s_id; 5781 uint32_t d_id; 5782 fp_cmd_t *cmd; 5783 fc_local_port_t *port; 5784 la_wwn_t *swwn; 5785 la_wwn_t dwwn; 5786 la_wwn_t nwwn; 5787 fc_remote_port_t *pd; 5788 la_els_logi_t *acc; 5789 com_svc_t csp; 5790 ls_code_t resp; 5791 5792 cmd = pkt->pkt_ulp_private; 5793 port = cmd->cmd_port; 5794 5795 FP_TRACE(FP_NHEAD1(1, 0), "fp_flogi_intr; port=%p, pkt=%p, state=%x", 5796 port, pkt, pkt->pkt_state); 5797 5798 if (FP_IS_PKT_ERROR(pkt)) { 5799 (void) fp_common_intr(pkt, 1); 5800 return; 5801 } 5802 5803 /* 5804 * Currently, we don't need to swap bytes here because qlc is faking the 5805 * response for us and so endianness is getting taken care of. But we 5806 * have to fix this and generalize this at some point 5807 */ 5808 acc = (la_els_logi_t *)pkt->pkt_resp; 5809 5810 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 5811 sizeof (resp), DDI_DEV_AUTOINCR); 5812 5813 ASSERT(resp.ls_code == LA_ELS_ACC); 5814 if (resp.ls_code != LA_ELS_ACC) { 5815 (void) fp_common_intr(pkt, 1); 5816 return; 5817 } 5818 5819 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&csp, 5820 (uint8_t *)&acc->common_service, sizeof (csp), DDI_DEV_AUTOINCR); 5821 5822 f_port = FP_IS_F_PORT(csp.cmn_features) ? 1 : 0; 5823 5824 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 5825 5826 mutex_enter(&port->fp_mutex); 5827 state = FC_PORT_STATE_MASK(port->fp_state); 5828 mutex_exit(&port->fp_mutex); 5829 5830 if (pkt->pkt_resp_fhdr.d_id == 0) { 5831 if (f_port == 0 && state != FC_STATE_LOOP) { 5832 swwn = &port->fp_service_params.nport_ww_name; 5833 5834 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&dwwn, 5835 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 5836 DDI_DEV_AUTOINCR); 5837 5838 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 5839 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 5840 DDI_DEV_AUTOINCR); 5841 5842 mutex_enter(&port->fp_mutex); 5843 5844 port->fp_topology = FC_TOP_PT_PT; 5845 port->fp_total_devices = 1; 5846 if (fctl_wwn_cmp(swwn, &dwwn) >= 0) { 5847 port->fp_ptpt_master = 1; 5848 /* 5849 * Let us choose 'X' as S_ID and 'Y' 5850 * as D_ID and that'll work; hopefully 5851 * If not, it will get changed. 5852 */ 5853 s_id = port->fp_instance + FP_DEFAULT_SID; 5854 d_id = port->fp_instance + FP_DEFAULT_DID; 5855 port->fp_port_id.port_id = s_id; 5856 mutex_exit(&port->fp_mutex); 5857 5858 pd = fctl_create_remote_port(port, 5859 &nwwn, &dwwn, d_id, PD_PLOGI_INITIATOR, 5860 KM_NOSLEEP); 5861 if (pd == NULL) { 5862 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 5863 0, NULL, "couldn't create device" 5864 " d_id=%X", d_id); 5865 fp_iodone(cmd); 5866 return; 5867 } 5868 5869 cmd->cmd_pkt.pkt_tran_flags = 5870 pkt->pkt_tran_flags; 5871 cmd->cmd_pkt.pkt_tran_type = pkt->pkt_tran_type; 5872 cmd->cmd_flags = FP_CMD_PLOGI_RETAIN; 5873 cmd->cmd_retry_count = fp_retry_count; 5874 5875 fp_xlogi_init(port, cmd, s_id, d_id, 5876 fp_plogi_intr, cmd->cmd_job, LA_ELS_PLOGI); 5877 5878 (&cmd->cmd_pkt)->pkt_pd = pd; 5879 5880 /* 5881 * We've just created this fc_remote_port_t, and 5882 * we're about to use it to send a PLOGI, so 5883 * bump the reference count right now. When 5884 * the packet is freed, the reference count will 5885 * be decremented. The ULP may also start using 5886 * it, so mark it as given away as well. 5887 */ 5888 pd->pd_ref_count++; 5889 pd->pd_aux_flags |= PD_GIVEN_TO_ULPS; 5890 5891 if (fp_sendcmd(port, cmd, 5892 port->fp_fca_handle) == FC_SUCCESS) { 5893 return; 5894 } 5895 } else { 5896 /* 5897 * The device handles will be created when the 5898 * unsolicited PLOGI is completed successfully 5899 */ 5900 port->fp_ptpt_master = 0; 5901 mutex_exit(&port->fp_mutex); 5902 } 5903 } 5904 pkt->pkt_state = FC_PKT_FAILURE; 5905 } else { 5906 if (f_port) { 5907 mutex_enter(&port->fp_mutex); 5908 if (state == FC_STATE_LOOP) { 5909 port->fp_topology = FC_TOP_PUBLIC_LOOP; 5910 } else { 5911 port->fp_topology = FC_TOP_FABRIC; 5912 5913 ddi_rep_get8(pkt->pkt_resp_acc, 5914 (uint8_t *)&port->fp_fabric_name, 5915 (uint8_t *)&acc->node_ww_name, 5916 sizeof (la_wwn_t), 5917 DDI_DEV_AUTOINCR); 5918 } 5919 port->fp_port_id.port_id = pkt->pkt_resp_fhdr.d_id; 5920 mutex_exit(&port->fp_mutex); 5921 } else { 5922 pkt->pkt_state = FC_PKT_FAILURE; 5923 } 5924 } 5925 fp_iodone(cmd); 5926 } 5927 5928 5929 /* 5930 * Handle solicited PLOGI response 5931 */ 5932 static void 5933 fp_plogi_intr(fc_packet_t *pkt) 5934 { 5935 int nl_port; 5936 int bailout; 5937 uint32_t d_id; 5938 fp_cmd_t *cmd; 5939 la_els_logi_t *acc; 5940 fc_local_port_t *port; 5941 fc_remote_port_t *pd; 5942 la_wwn_t nwwn; 5943 la_wwn_t pwwn; 5944 ls_code_t resp; 5945 5946 nl_port = 0; 5947 cmd = pkt->pkt_ulp_private; 5948 port = cmd->cmd_port; 5949 d_id = pkt->pkt_cmd_fhdr.d_id; 5950 5951 #ifndef __lock_lint 5952 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 5953 #endif 5954 5955 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: port=%p, job=%p, d_id=%x," 5956 " jcount=%d pkt=%p, state=%x", port, cmd->cmd_job, d_id, 5957 cmd->cmd_job->job_counter, pkt, pkt->pkt_state); 5958 5959 /* 5960 * Bail out early on ULP initiated requests if the 5961 * state change has occurred 5962 */ 5963 mutex_enter(&port->fp_mutex); 5964 bailout = ((port->fp_statec_busy || 5965 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 5966 cmd->cmd_ulp_pkt) ? 1 : 0; 5967 mutex_exit(&port->fp_mutex); 5968 5969 if (FP_IS_PKT_ERROR(pkt) || bailout) { 5970 int skip_msg = 0; 5971 int giveup = 0; 5972 5973 if (cmd->cmd_ulp_pkt) { 5974 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 5975 cmd->cmd_ulp_pkt->pkt_reason = pkt->pkt_reason; 5976 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 5977 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 5978 } 5979 5980 /* 5981 * If an unsolicited cross login already created 5982 * a device speed up the discovery by not retrying 5983 * the command mindlessly. 5984 */ 5985 if (pkt->pkt_pd == NULL && 5986 fctl_get_remote_port_by_did(port, d_id) != NULL) { 5987 fp_iodone(cmd); 5988 return; 5989 } 5990 5991 if (pkt->pkt_pd != NULL) { 5992 giveup = (pkt->pkt_pd->pd_recepient == 5993 PD_PLOGI_RECEPIENT) ? 1 : 0; 5994 if (giveup) { 5995 /* 5996 * This pd is marked as plogi 5997 * recipient, stop retrying 5998 */ 5999 FP_TRACE(FP_NHEAD1(3, 0), 6000 "fp_plogi_intr: stop retry as" 6001 " a cross login was accepted" 6002 " from d_id=%x, port=%p.", 6003 d_id, port); 6004 fp_iodone(cmd); 6005 return; 6006 } 6007 } 6008 6009 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6010 return; 6011 } 6012 6013 if ((pd = fctl_get_remote_port_by_did(port, d_id)) != NULL) { 6014 mutex_enter(&pd->pd_mutex); 6015 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 6016 skip_msg++; 6017 } 6018 mutex_exit(&pd->pd_mutex); 6019 } 6020 6021 mutex_enter(&port->fp_mutex); 6022 if (!bailout && !(skip_msg && port->fp_statec_busy) && 6023 port->fp_statec_busy <= 1 && 6024 pkt->pkt_reason != FC_REASON_FCAL_OPN_FAIL) { 6025 mutex_exit(&port->fp_mutex); 6026 /* 6027 * In case of Login Collisions, JNI HBAs returns the 6028 * FC pkt back to the Initiator with the state set to 6029 * FC_PKT_LS_RJT and reason to FC_REASON_LOGICAL_ERROR. 6030 * QLC HBAs handles such cases in the FW and doesnot 6031 * return the LS_RJT with Logical error when 6032 * login collision happens. 6033 */ 6034 if ((pkt->pkt_state != FC_PKT_LS_RJT) || 6035 (pkt->pkt_reason != FC_REASON_LOGICAL_ERROR)) { 6036 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6037 "PLOGI to %x failed", d_id); 6038 } 6039 FP_TRACE(FP_NHEAD2(9, 0), 6040 "PLOGI to %x failed. state=%x reason=%x.", 6041 d_id, pkt->pkt_state, pkt->pkt_reason); 6042 } else { 6043 mutex_exit(&port->fp_mutex); 6044 } 6045 6046 fp_iodone(cmd); 6047 return; 6048 } 6049 6050 acc = (la_els_logi_t *)pkt->pkt_resp; 6051 6052 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, (uint8_t *)acc, 6053 sizeof (resp), DDI_DEV_AUTOINCR); 6054 6055 ASSERT(resp.ls_code == LA_ELS_ACC); 6056 if (resp.ls_code != LA_ELS_ACC) { 6057 (void) fp_common_intr(pkt, 1); 6058 return; 6059 } 6060 6061 if (d_id == FS_NAME_SERVER || d_id == FS_FABRIC_CONTROLLER) { 6062 mutex_enter(&port->fp_mutex); 6063 port->fp_ns_login_class = FC_TRAN_CLASS(pkt->pkt_tran_flags); 6064 mutex_exit(&port->fp_mutex); 6065 fp_iodone(cmd); 6066 return; 6067 } 6068 6069 ASSERT(acc == (la_els_logi_t *)pkt->pkt_resp); 6070 6071 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 6072 (uint8_t *)&acc->nport_ww_name, sizeof (la_wwn_t), 6073 DDI_DEV_AUTOINCR); 6074 6075 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 6076 (uint8_t *)&acc->node_ww_name, sizeof (la_wwn_t), 6077 DDI_DEV_AUTOINCR); 6078 6079 ASSERT(fctl_is_wwn_zero(&pwwn) == FC_FAILURE); 6080 ASSERT(fctl_is_wwn_zero(&nwwn) == FC_FAILURE); 6081 6082 if ((pd = pkt->pkt_pd) == NULL) { 6083 pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6084 if (pd == NULL) { 6085 pd = fctl_create_remote_port(port, &nwwn, &pwwn, d_id, 6086 PD_PLOGI_INITIATOR, KM_NOSLEEP); 6087 if (pd == NULL) { 6088 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6089 "couldn't create port device handles" 6090 " d_id=%x", d_id); 6091 fp_iodone(cmd); 6092 return; 6093 } 6094 } else { 6095 fc_remote_port_t *tmp_pd; 6096 6097 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6098 if (tmp_pd != NULL) { 6099 fp_iodone(cmd); 6100 return; 6101 } 6102 6103 mutex_enter(&port->fp_mutex); 6104 mutex_enter(&pd->pd_mutex); 6105 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN) || 6106 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6107 cmd->cmd_flags |= FP_CMD_PLOGI_RETAIN; 6108 } 6109 6110 if (pd->pd_type == PORT_DEVICE_OLD) { 6111 if (pd->pd_port_id.port_id != d_id) { 6112 fctl_delist_did_table(port, pd); 6113 pd->pd_type = PORT_DEVICE_CHANGED; 6114 pd->pd_port_id.port_id = d_id; 6115 } else { 6116 pd->pd_type = PORT_DEVICE_NOCHANGE; 6117 } 6118 } 6119 6120 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6121 char ww_name[17]; 6122 6123 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6124 6125 mutex_exit(&pd->pd_mutex); 6126 mutex_exit(&port->fp_mutex); 6127 FP_TRACE(FP_NHEAD2(9, 0), 6128 "Possible Duplicate name or address" 6129 " identifiers in the PLOGI response" 6130 " D_ID=%x, PWWN=%s: Please check the" 6131 " configuration", d_id, ww_name); 6132 fp_iodone(cmd); 6133 return; 6134 } 6135 fctl_enlist_did_table(port, pd); 6136 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6137 mutex_exit(&pd->pd_mutex); 6138 mutex_exit(&port->fp_mutex); 6139 } 6140 } else { 6141 fc_remote_port_t *tmp_pd, *new_wwn_pd; 6142 6143 tmp_pd = fctl_get_remote_port_by_did(port, d_id); 6144 new_wwn_pd = fctl_get_remote_port_by_pwwn(port, &pwwn); 6145 6146 mutex_enter(&port->fp_mutex); 6147 mutex_enter(&pd->pd_mutex); 6148 if (fctl_wwn_cmp(&pd->pd_port_name, &pwwn) == 0) { 6149 FP_TRACE(FP_NHEAD1(3, 0), "fp_plogi_intr: d_id=%x," 6150 " pd_state=%x pd_type=%x", d_id, pd->pd_state, 6151 pd->pd_type); 6152 if ((pd->pd_state == PORT_DEVICE_LOGGED_IN && 6153 pd->pd_type == PORT_DEVICE_OLD) || 6154 (pd->pd_aux_flags & PD_LOGGED_OUT)) { 6155 pd->pd_type = PORT_DEVICE_NOCHANGE; 6156 } else if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 6157 pd->pd_type = PORT_DEVICE_NEW; 6158 } 6159 } else { 6160 char old_name[17]; 6161 char new_name[17]; 6162 6163 fc_wwn_to_str(&pd->pd_port_name, old_name); 6164 fc_wwn_to_str(&pwwn, new_name); 6165 6166 FP_TRACE(FP_NHEAD1(9, 0), 6167 "fp_plogi_intr: PWWN of a device with D_ID=%x " 6168 "changed. New PWWN = %s, OLD PWWN = %s ; tmp_pd:%p " 6169 "pd:%p new_wwn_pd:%p, cmd_ulp_pkt:%p, bailout:0x%x", 6170 d_id, new_name, old_name, tmp_pd, pd, new_wwn_pd, 6171 cmd->cmd_ulp_pkt, bailout); 6172 6173 FP_TRACE(FP_NHEAD2(9, 0), 6174 "PWWN of a device with D_ID=%x changed." 6175 " New PWWN = %s, OLD PWWN = %s", d_id, 6176 new_name, old_name); 6177 6178 if (cmd->cmd_ulp_pkt && !bailout) { 6179 fc_remote_node_t *rnodep; 6180 fc_portmap_t *changelist; 6181 fc_portmap_t *listptr; 6182 int len = 1; 6183 /* # entries in changelist */ 6184 6185 fctl_delist_pwwn_table(port, pd); 6186 6187 /* 6188 * Lets now check if there already is a pd with 6189 * this new WWN in the table. If so, we'll mark 6190 * it as invalid 6191 */ 6192 6193 if (new_wwn_pd) { 6194 /* 6195 * There is another pd with in the pwwn 6196 * table with the same WWN that we got 6197 * in the PLOGI payload. We have to get 6198 * it out of the pwwn table, update the 6199 * pd's state (fp_fillout_old_map does 6200 * this for us) and add it to the 6201 * changelist that goes up to ULPs. 6202 * 6203 * len is length of changelist and so 6204 * increment it. 6205 */ 6206 len++; 6207 6208 if (tmp_pd != pd) { 6209 /* 6210 * Odd case where pwwn and did 6211 * tables are out of sync but 6212 * we will handle that too. See 6213 * more comments below. 6214 * 6215 * One more device that ULPs 6216 * should know about and so len 6217 * gets incremented again. 6218 */ 6219 len++; 6220 } 6221 6222 listptr = changelist = kmem_zalloc(len * 6223 sizeof (*changelist), KM_SLEEP); 6224 6225 mutex_enter(&new_wwn_pd->pd_mutex); 6226 rnodep = new_wwn_pd->pd_remote_nodep; 6227 mutex_exit(&new_wwn_pd->pd_mutex); 6228 6229 /* 6230 * Hold the fd_mutex since 6231 * fctl_copy_portmap_held expects it. 6232 * Preserve lock hierarchy by grabbing 6233 * fd_mutex before pd_mutex 6234 */ 6235 if (rnodep) { 6236 mutex_enter(&rnodep->fd_mutex); 6237 } 6238 mutex_enter(&new_wwn_pd->pd_mutex); 6239 fp_fillout_old_map_held(listptr++, 6240 new_wwn_pd, 0); 6241 mutex_exit(&new_wwn_pd->pd_mutex); 6242 if (rnodep) { 6243 mutex_exit(&rnodep->fd_mutex); 6244 } 6245 6246 /* 6247 * Safety check : 6248 * Lets ensure that the pwwn and did 6249 * tables are in sync. Ideally, we 6250 * should not find that these two pd's 6251 * are different. 6252 */ 6253 if (tmp_pd != pd) { 6254 mutex_enter(&tmp_pd->pd_mutex); 6255 rnodep = 6256 tmp_pd->pd_remote_nodep; 6257 mutex_exit(&tmp_pd->pd_mutex); 6258 6259 /* As above grab fd_mutex */ 6260 if (rnodep) { 6261 mutex_enter(&rnodep-> 6262 fd_mutex); 6263 } 6264 mutex_enter(&tmp_pd->pd_mutex); 6265 6266 fp_fillout_old_map_held( 6267 listptr++, tmp_pd, 0); 6268 6269 mutex_exit(&tmp_pd->pd_mutex); 6270 if (rnodep) { 6271 mutex_exit(&rnodep-> 6272 fd_mutex); 6273 } 6274 6275 /* 6276 * Now add "pd" (not tmp_pd) 6277 * to fp_did_table to sync it up 6278 * with fp_pwwn_table 6279 * 6280 * pd->pd_mutex is already held 6281 * at this point 6282 */ 6283 fctl_enlist_did_table(port, pd); 6284 } 6285 } else { 6286 listptr = changelist = kmem_zalloc( 6287 sizeof (*changelist), KM_SLEEP); 6288 } 6289 6290 ASSERT(changelist != NULL); 6291 6292 fp_fillout_changed_map(listptr, pd, &d_id, 6293 &pwwn); 6294 fctl_enlist_pwwn_table(port, pd); 6295 6296 mutex_exit(&pd->pd_mutex); 6297 mutex_exit(&port->fp_mutex); 6298 6299 fp_iodone(cmd); 6300 6301 (void) fp_ulp_devc_cb(port, changelist, len, 6302 len, KM_NOSLEEP, 0); 6303 6304 return; 6305 } 6306 } 6307 6308 if (pd->pd_porttype.port_type == FC_NS_PORT_NL) { 6309 nl_port = 1; 6310 } 6311 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) 6312 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6313 6314 mutex_exit(&pd->pd_mutex); 6315 mutex_exit(&port->fp_mutex); 6316 6317 if (tmp_pd == NULL) { 6318 mutex_enter(&port->fp_mutex); 6319 mutex_enter(&pd->pd_mutex); 6320 if (pd->pd_aux_flags & PD_IN_DID_QUEUE) { 6321 char ww_name[17]; 6322 6323 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6324 mutex_exit(&pd->pd_mutex); 6325 mutex_exit(&port->fp_mutex); 6326 FP_TRACE(FP_NHEAD2(9, 0), 6327 "Possible Duplicate name or address" 6328 " identifiers in the PLOGI response" 6329 " D_ID=%x, PWWN=%s: Please check the" 6330 " configuration", d_id, ww_name); 6331 fp_iodone(cmd); 6332 return; 6333 } 6334 fctl_enlist_did_table(port, pd); 6335 pd->pd_aux_flags &= ~PD_LOGGED_OUT; 6336 mutex_exit(&pd->pd_mutex); 6337 mutex_exit(&port->fp_mutex); 6338 } 6339 } 6340 fp_register_login(&pkt->pkt_resp_acc, pd, acc, 6341 FC_TRAN_CLASS(pkt->pkt_tran_flags)); 6342 6343 if (cmd->cmd_ulp_pkt) { 6344 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6345 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6346 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6347 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6348 if (pd != NULL) { 6349 FP_TRACE(FP_NHEAD1(9, 0), 6350 "fp_plogi_intr;" 6351 "ulp_pkt's pd is NULL, get a pd %p", 6352 pd); 6353 mutex_enter(&pd->pd_mutex); 6354 pd->pd_ref_count++; 6355 mutex_exit(&pd->pd_mutex); 6356 } 6357 cmd->cmd_ulp_pkt->pkt_pd = pd; 6358 } 6359 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6360 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6361 sizeof (fc_frame_hdr_t)); 6362 bcopy((caddr_t)pkt->pkt_resp, 6363 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6364 sizeof (la_els_logi_t)); 6365 } 6366 6367 mutex_enter(&port->fp_mutex); 6368 if (port->fp_topology == FC_TOP_PRIVATE_LOOP || nl_port) { 6369 mutex_enter(&pd->pd_mutex); 6370 6371 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6372 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6373 cmd->cmd_retry_count = fp_retry_count; 6374 6375 /* 6376 * If the fc_remote_port_t pointer is not set in the given 6377 * fc_packet_t, then this fc_remote_port_t must have just 6378 * been created. Save the pointer and also increment the 6379 * fc_remote_port_t reference count. 6380 */ 6381 if (pkt->pkt_pd == NULL) { 6382 pkt->pkt_pd = pd; 6383 pd->pd_ref_count++; /* It's in use! */ 6384 } 6385 6386 fp_adisc_init(cmd, cmd->cmd_job); 6387 6388 pkt->pkt_cmdlen = sizeof (la_els_adisc_t); 6389 pkt->pkt_rsplen = sizeof (la_els_adisc_t); 6390 6391 mutex_exit(&pd->pd_mutex); 6392 mutex_exit(&port->fp_mutex); 6393 6394 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6395 return; 6396 } 6397 } else { 6398 mutex_exit(&port->fp_mutex); 6399 } 6400 6401 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6402 mutex_enter(&port->fp_mutex); 6403 mutex_enter(&pd->pd_mutex); 6404 6405 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6406 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6407 cmd->cmd_retry_count = fp_retry_count; 6408 6409 fp_logo_init(pd, cmd, cmd->cmd_job); 6410 6411 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6412 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6413 6414 mutex_exit(&pd->pd_mutex); 6415 mutex_exit(&port->fp_mutex); 6416 6417 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 6418 return; 6419 } 6420 6421 } 6422 fp_iodone(cmd); 6423 } 6424 6425 6426 /* 6427 * Handle solicited ADISC response 6428 */ 6429 static void 6430 fp_adisc_intr(fc_packet_t *pkt) 6431 { 6432 int rval; 6433 int bailout; 6434 fp_cmd_t *cmd; 6435 fc_local_port_t *port; 6436 fc_remote_port_t *pd; 6437 la_els_adisc_t *acc; 6438 ls_code_t resp; 6439 fc_hardaddr_t ha; 6440 fc_portmap_t *changelist; 6441 int initiator, adiscfail = 0; 6442 6443 pd = pkt->pkt_pd; 6444 cmd = pkt->pkt_ulp_private; 6445 port = cmd->cmd_port; 6446 6447 #ifndef __lock_lint 6448 ASSERT(cmd->cmd_job && cmd->cmd_job->job_counter); 6449 #endif 6450 6451 ASSERT(pd != NULL && port != NULL && cmd != NULL); 6452 6453 mutex_enter(&port->fp_mutex); 6454 bailout = ((port->fp_statec_busy || 6455 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) && 6456 cmd->cmd_ulp_pkt) ? 1 : 0; 6457 mutex_exit(&port->fp_mutex); 6458 6459 if (bailout) { 6460 fp_iodone(cmd); 6461 return; 6462 } 6463 6464 if (pkt->pkt_state == FC_PKT_SUCCESS && pkt->pkt_resp_resid == 0) { 6465 acc = (la_els_adisc_t *)pkt->pkt_resp; 6466 6467 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6468 (uint8_t *)acc, sizeof (resp), DDI_DEV_AUTOINCR); 6469 6470 if (resp.ls_code == LA_ELS_ACC) { 6471 int is_private; 6472 6473 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&ha, 6474 (uint8_t *)&acc->hard_addr, sizeof (ha), 6475 DDI_DEV_AUTOINCR); 6476 6477 mutex_enter(&port->fp_mutex); 6478 6479 is_private = 6480 (port->fp_topology == FC_TOP_PRIVATE_LOOP) ? 1 : 0; 6481 6482 mutex_enter(&pd->pd_mutex); 6483 if ((pd->pd_aux_flags & PD_IN_DID_QUEUE) == 0) { 6484 fctl_enlist_did_table(port, pd); 6485 } 6486 mutex_exit(&pd->pd_mutex); 6487 6488 mutex_exit(&port->fp_mutex); 6489 6490 mutex_enter(&pd->pd_mutex); 6491 if (pd->pd_type != PORT_DEVICE_NEW) { 6492 if (is_private && (pd->pd_hard_addr.hard_addr != 6493 ha.hard_addr)) { 6494 pd->pd_type = PORT_DEVICE_CHANGED; 6495 } else { 6496 pd->pd_type = PORT_DEVICE_NOCHANGE; 6497 } 6498 } 6499 6500 if (is_private && (ha.hard_addr && 6501 pd->pd_port_id.port_id != ha.hard_addr)) { 6502 char ww_name[17]; 6503 6504 fc_wwn_to_str(&pd->pd_port_name, ww_name); 6505 6506 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6507 "NL_Port Identifier %x doesn't match" 6508 " with Hard Address %x, Will use Port" 6509 " WWN %s", pd->pd_port_id.port_id, 6510 ha.hard_addr, ww_name); 6511 6512 pd->pd_hard_addr.hard_addr = 0; 6513 } else { 6514 pd->pd_hard_addr.hard_addr = ha.hard_addr; 6515 } 6516 mutex_exit(&pd->pd_mutex); 6517 } else { 6518 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6519 return; 6520 } 6521 } 6522 } else { 6523 if (fp_common_intr(pkt, 0) == FC_SUCCESS) { 6524 return; 6525 } 6526 6527 mutex_enter(&port->fp_mutex); 6528 if (port->fp_statec_busy <= 1) { 6529 mutex_exit(&port->fp_mutex); 6530 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, pkt, 6531 "ADISC to %x failed, cmd_flags=%x", 6532 pkt->pkt_cmd_fhdr.d_id, cmd->cmd_flags); 6533 cmd->cmd_flags &= ~FP_CMD_PLOGI_RETAIN; 6534 adiscfail = 1; 6535 } else { 6536 mutex_exit(&port->fp_mutex); 6537 } 6538 } 6539 6540 if (cmd->cmd_ulp_pkt) { 6541 cmd->cmd_ulp_pkt->pkt_state = pkt->pkt_state; 6542 cmd->cmd_ulp_pkt->pkt_action = pkt->pkt_action; 6543 cmd->cmd_ulp_pkt->pkt_expln = pkt->pkt_expln; 6544 if (cmd->cmd_ulp_pkt->pkt_pd == NULL) { 6545 cmd->cmd_ulp_pkt->pkt_pd = pd; 6546 FP_TRACE(FP_NHEAD1(9, 0), 6547 "fp_adisc__intr;" 6548 "ulp_pkt's pd is NULL, get a pd %p", 6549 pd); 6550 6551 } 6552 bcopy((caddr_t)&pkt->pkt_resp_fhdr, 6553 (caddr_t)&cmd->cmd_ulp_pkt->pkt_resp_fhdr, 6554 sizeof (fc_frame_hdr_t)); 6555 bcopy((caddr_t)pkt->pkt_resp, 6556 (caddr_t)cmd->cmd_ulp_pkt->pkt_resp, 6557 sizeof (la_els_logi_t)); 6558 } 6559 6560 if ((cmd->cmd_flags & FP_CMD_PLOGI_RETAIN) == 0) { 6561 FP_TRACE(FP_NHEAD1(9, 0), 6562 "fp_adisc_intr: Perform LOGO.cmd_flags=%x, " 6563 "fp_retry_count=%x, ulp_pkt=%p", 6564 cmd->cmd_flags, fp_retry_count, cmd->cmd_ulp_pkt); 6565 6566 mutex_enter(&port->fp_mutex); 6567 mutex_enter(&pd->pd_mutex); 6568 6569 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 6570 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6571 cmd->cmd_retry_count = fp_retry_count; 6572 6573 fp_logo_init(pd, cmd, cmd->cmd_job); 6574 6575 pkt->pkt_cmdlen = sizeof (la_els_logo_t); 6576 pkt->pkt_rsplen = FP_PORT_IDENTIFIER_LEN; 6577 6578 mutex_exit(&pd->pd_mutex); 6579 mutex_exit(&port->fp_mutex); 6580 6581 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 6582 if (adiscfail) { 6583 mutex_enter(&pd->pd_mutex); 6584 initiator = 6585 (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 6586 pd->pd_state = PORT_DEVICE_VALID; 6587 pd->pd_aux_flags |= PD_LOGGED_OUT; 6588 if (pd->pd_aux_flags & PD_DISABLE_RELOGIN) 6589 pd->pd_type = PORT_DEVICE_NEW; 6590 else 6591 pd->pd_type = PORT_DEVICE_NOCHANGE; 6592 mutex_exit(&pd->pd_mutex); 6593 6594 changelist = 6595 kmem_zalloc(sizeof (*changelist), KM_SLEEP); 6596 6597 if (initiator) { 6598 fp_unregister_login(pd); 6599 fctl_copy_portmap(changelist, pd); 6600 } else { 6601 fp_fillout_old_map(changelist, pd, 0); 6602 } 6603 6604 FP_TRACE(FP_NHEAD1(9, 0), 6605 "fp_adisc_intr: Dev change notification " 6606 "to ULP port=%p, pd=%p, map_type=%x map_state=%x " 6607 "map_flags=%x initiator=%d", port, pd, 6608 changelist->map_type, changelist->map_state, 6609 changelist->map_flags, initiator); 6610 6611 (void) fp_ulp_devc_cb(port, changelist, 6612 1, 1, KM_SLEEP, 0); 6613 } 6614 if (rval == FC_SUCCESS) { 6615 return; 6616 } 6617 } 6618 fp_iodone(cmd); 6619 } 6620 6621 6622 /* 6623 * Handle solicited LOGO response 6624 */ 6625 static void 6626 fp_logo_intr(fc_packet_t *pkt) 6627 { 6628 ls_code_t resp; 6629 6630 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6631 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6632 6633 if (FP_IS_PKT_ERROR(pkt)) { 6634 (void) fp_common_intr(pkt, 1); 6635 return; 6636 } 6637 6638 ASSERT(resp.ls_code == LA_ELS_ACC); 6639 if (resp.ls_code != LA_ELS_ACC) { 6640 (void) fp_common_intr(pkt, 1); 6641 return; 6642 } 6643 6644 if (pkt->pkt_pd != NULL) { 6645 fp_unregister_login(pkt->pkt_pd); 6646 } 6647 fp_iodone(pkt->pkt_ulp_private); 6648 } 6649 6650 6651 /* 6652 * Handle solicited RNID response 6653 */ 6654 static void 6655 fp_rnid_intr(fc_packet_t *pkt) 6656 { 6657 ls_code_t resp; 6658 job_request_t *job; 6659 fp_cmd_t *cmd; 6660 la_els_rnid_acc_t *acc; 6661 6662 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6663 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6664 6665 cmd = pkt->pkt_ulp_private; 6666 job = cmd->cmd_job; 6667 ASSERT(job->job_private != NULL); 6668 6669 /* If failure or LS_RJT then retry the packet, if needed */ 6670 if (pkt->pkt_state != FC_PKT_SUCCESS || resp.ls_code != LA_ELS_ACC) { 6671 (void) fp_common_intr(pkt, 1); 6672 return; 6673 } 6674 6675 /* Save node_id memory allocated in ioctl code */ 6676 acc = (la_els_rnid_acc_t *)pkt->pkt_resp; 6677 6678 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6679 (uint8_t *)acc, sizeof (la_els_rnid_acc_t), DDI_DEV_AUTOINCR); 6680 6681 /* wakeup the ioctl thread and free the pkt */ 6682 fp_iodone(cmd); 6683 } 6684 6685 6686 /* 6687 * Handle solicited RLS response 6688 */ 6689 static void 6690 fp_rls_intr(fc_packet_t *pkt) 6691 { 6692 ls_code_t resp; 6693 job_request_t *job; 6694 fp_cmd_t *cmd; 6695 la_els_rls_acc_t *acc; 6696 6697 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp, 6698 (uint8_t *)pkt->pkt_resp, sizeof (resp), DDI_DEV_AUTOINCR); 6699 6700 cmd = pkt->pkt_ulp_private; 6701 job = cmd->cmd_job; 6702 ASSERT(job->job_private != NULL); 6703 6704 /* If failure or LS_RJT then retry the packet, if needed */ 6705 if (FP_IS_PKT_ERROR(pkt) || resp.ls_code != LA_ELS_ACC) { 6706 (void) fp_common_intr(pkt, 1); 6707 return; 6708 } 6709 6710 /* Save link error status block in memory allocated in ioctl code */ 6711 acc = (la_els_rls_acc_t *)pkt->pkt_resp; 6712 6713 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)job->job_private, 6714 (uint8_t *)&acc->rls_link_params, sizeof (fc_rls_acc_t), 6715 DDI_DEV_AUTOINCR); 6716 6717 /* wakeup the ioctl thread and free the pkt */ 6718 fp_iodone(cmd); 6719 } 6720 6721 6722 /* 6723 * A solicited command completion interrupt (mostly for commands 6724 * that require almost no post processing such as SCR ELS) 6725 */ 6726 static void 6727 fp_intr(fc_packet_t *pkt) 6728 { 6729 if (FP_IS_PKT_ERROR(pkt)) { 6730 (void) fp_common_intr(pkt, 1); 6731 return; 6732 } 6733 fp_iodone(pkt->pkt_ulp_private); 6734 } 6735 6736 6737 /* 6738 * Handle the underlying port's state change 6739 */ 6740 static void 6741 fp_statec_cb(opaque_t port_handle, uint32_t state) 6742 { 6743 fc_local_port_t *port = port_handle; 6744 job_request_t *job; 6745 6746 /* 6747 * If it is not possible to process the callbacks 6748 * just drop the callback on the floor; Don't bother 6749 * to do something that isn't safe at this time 6750 */ 6751 mutex_enter(&port->fp_mutex); 6752 if ((port->fp_soft_state & 6753 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 6754 (FC_PORT_STATE_MASK(port->fp_state) == FC_PORT_STATE_MASK(state))) { 6755 mutex_exit(&port->fp_mutex); 6756 return; 6757 } 6758 6759 if (port->fp_statec_busy == 0) { 6760 port->fp_soft_state |= FP_SOFT_IN_STATEC_CB; 6761 #ifdef DEBUG 6762 } else { 6763 ASSERT(port->fp_soft_state & FP_SOFT_IN_STATEC_CB); 6764 #endif 6765 } 6766 6767 port->fp_statec_busy++; 6768 6769 /* 6770 * For now, force the trusted method of device authentication (by 6771 * PLOGI) when LIPs do not involve OFFLINE to ONLINE transition. 6772 */ 6773 if (FC_PORT_STATE_MASK(state) == FC_STATE_LIP || 6774 FC_PORT_STATE_MASK(state) == FC_STATE_LIP_LBIT_SET) { 6775 state = FC_PORT_SPEED_MASK(port->fp_state) | FC_STATE_LOOP; 6776 fp_port_offline(port, 0); 6777 } 6778 mutex_exit(&port->fp_mutex); 6779 6780 switch (FC_PORT_STATE_MASK(state)) { 6781 case FC_STATE_OFFLINE: 6782 job = fctl_alloc_job(JOB_PORT_OFFLINE, 6783 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6784 if (job == NULL) { 6785 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6786 " fp_statec_cb() couldn't submit a job " 6787 " to the thread: failing.."); 6788 mutex_enter(&port->fp_mutex); 6789 if (--port->fp_statec_busy == 0) { 6790 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6791 } 6792 mutex_exit(&port->fp_mutex); 6793 return; 6794 } 6795 mutex_enter(&port->fp_mutex); 6796 /* 6797 * Zero out this field so that we do not retain 6798 * the fabric name as its no longer valid 6799 */ 6800 bzero(&port->fp_fabric_name, sizeof (la_wwn_t)); 6801 port->fp_state = state; 6802 mutex_exit(&port->fp_mutex); 6803 6804 fctl_enque_job(port, job); 6805 break; 6806 6807 case FC_STATE_ONLINE: 6808 case FC_STATE_LOOP: 6809 mutex_enter(&port->fp_mutex); 6810 port->fp_state = state; 6811 6812 if (port->fp_offline_tid) { 6813 timeout_id_t tid; 6814 6815 tid = port->fp_offline_tid; 6816 port->fp_offline_tid = NULL; 6817 mutex_exit(&port->fp_mutex); 6818 (void) untimeout(tid); 6819 } else { 6820 mutex_exit(&port->fp_mutex); 6821 } 6822 6823 job = fctl_alloc_job(JOB_PORT_ONLINE, 6824 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6825 if (job == NULL) { 6826 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6827 "fp_statec_cb() couldn't submit a job " 6828 "to the thread: failing.."); 6829 6830 mutex_enter(&port->fp_mutex); 6831 if (--port->fp_statec_busy == 0) { 6832 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6833 } 6834 mutex_exit(&port->fp_mutex); 6835 return; 6836 } 6837 fctl_enque_job(port, job); 6838 break; 6839 6840 case FC_STATE_RESET_REQUESTED: 6841 mutex_enter(&port->fp_mutex); 6842 port->fp_state = FC_STATE_OFFLINE; 6843 port->fp_soft_state |= FP_SOFT_IN_FCA_RESET; 6844 mutex_exit(&port->fp_mutex); 6845 /* FALLTHROUGH */ 6846 6847 case FC_STATE_RESET: 6848 job = fctl_alloc_job(JOB_ULP_NOTIFY, 6849 JOB_TYPE_FCTL_ASYNC, NULL, NULL, KM_NOSLEEP); 6850 if (job == NULL) { 6851 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 6852 "fp_statec_cb() couldn't submit a job" 6853 " to the thread: failing.."); 6854 6855 mutex_enter(&port->fp_mutex); 6856 if (--port->fp_statec_busy == 0) { 6857 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6858 } 6859 mutex_exit(&port->fp_mutex); 6860 return; 6861 } 6862 6863 /* squeeze into some field in the job structure */ 6864 job->job_ulp_listlen = FC_PORT_STATE_MASK(state); 6865 fctl_enque_job(port, job); 6866 break; 6867 6868 case FC_STATE_TARGET_PORT_RESET: 6869 (void) fp_ulp_notify(port, state, KM_NOSLEEP); 6870 /* FALLTHROUGH */ 6871 6872 case FC_STATE_NAMESERVICE: 6873 /* FALLTHROUGH */ 6874 6875 default: 6876 mutex_enter(&port->fp_mutex); 6877 if (--port->fp_statec_busy == 0) { 6878 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 6879 } 6880 mutex_exit(&port->fp_mutex); 6881 break; 6882 } 6883 } 6884 6885 6886 /* 6887 * Register with the Name Server for RSCNs 6888 */ 6889 static int 6890 fp_ns_scr(fc_local_port_t *port, job_request_t *job, uchar_t scr_func, 6891 int sleep) 6892 { 6893 uint32_t s_id; 6894 uchar_t class; 6895 fc_scr_req_t payload; 6896 fp_cmd_t *cmd; 6897 fc_packet_t *pkt; 6898 6899 mutex_enter(&port->fp_mutex); 6900 s_id = port->fp_port_id.port_id; 6901 class = port->fp_ns_login_class; 6902 mutex_exit(&port->fp_mutex); 6903 6904 cmd = fp_alloc_pkt(port, sizeof (fc_scr_req_t), 6905 sizeof (fc_scr_resp_t), sleep, NULL); 6906 if (cmd == NULL) { 6907 return (FC_NOMEM); 6908 } 6909 6910 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 6911 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 6912 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 6913 cmd->cmd_retry_count = fp_retry_count; 6914 cmd->cmd_ulp_pkt = NULL; 6915 6916 pkt = &cmd->cmd_pkt; 6917 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 6918 6919 fp_els_init(cmd, s_id, 0xFFFFFD, fp_intr, job); 6920 6921 payload.ls_code.ls_code = LA_ELS_SCR; 6922 payload.ls_code.mbz = 0; 6923 payload.scr_rsvd = 0; 6924 payload.scr_func = scr_func; 6925 6926 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 6927 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 6928 6929 job->job_counter = 1; 6930 6931 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 6932 fp_iodone(cmd); 6933 } 6934 6935 return (FC_SUCCESS); 6936 } 6937 6938 6939 /* 6940 * There are basically two methods to determine the total number of 6941 * devices out in the NS database; Reading the details of the two 6942 * methods described below, it shouldn't be hard to identify which 6943 * of the two methods is better. 6944 * 6945 * Method 1. 6946 * Iteratively issue GANs until all ports identifiers are walked 6947 * 6948 * Method 2. 6949 * Issue GID_PT (get port Identifiers) with Maximum residual 6950 * field in the request CT HEADER set to accommodate only the 6951 * CT HEADER in the response frame. And if FC-GS2 has been 6952 * carefully read, the NS here has a chance to FS_ACC the 6953 * request and indicate the residual size in the FS_ACC. 6954 * 6955 * Method 2 is wonderful, although it's not mandatory for the NS 6956 * to update the Maximum/Residual Field as can be seen in 4.3.1.6 6957 * (note with particular care the use of the auxiliary verb 'may') 6958 * 6959 */ 6960 static int 6961 fp_ns_get_devcount(fc_local_port_t *port, job_request_t *job, int create, 6962 int sleep) 6963 { 6964 int flags; 6965 int rval; 6966 uint32_t src_id; 6967 fctl_ns_req_t *ns_cmd; 6968 6969 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 6970 6971 mutex_enter(&port->fp_mutex); 6972 src_id = port->fp_port_id.port_id; 6973 mutex_exit(&port->fp_mutex); 6974 6975 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 6976 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pt_t), 6977 sizeof (ns_resp_gid_pt_t), 0, 6978 (FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF), sleep); 6979 6980 if (ns_cmd == NULL) { 6981 return (FC_NOMEM); 6982 } 6983 6984 ns_cmd->ns_cmd_code = NS_GID_PT; 6985 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.port_type 6986 = FC_NS_PORT_NX; /* All port types */ 6987 ((ns_req_gid_pt_t *)(ns_cmd->ns_cmd_buf))->port_type.rsvd = 0; 6988 6989 } else { 6990 uint32_t ns_flags; 6991 6992 ns_flags = FCTL_NS_GET_DEV_COUNT | FCTL_NS_NO_DATA_BUF; 6993 if (create) { 6994 ns_flags |= FCTL_NS_CREATE_DEVICE; 6995 } 6996 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 6997 sizeof (ns_resp_gan_t), sizeof (int), ns_flags, sleep); 6998 6999 if (ns_cmd == NULL) { 7000 return (FC_NOMEM); 7001 } 7002 ns_cmd->ns_gan_index = 0; 7003 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 7004 ns_cmd->ns_cmd_code = NS_GA_NXT; 7005 ns_cmd->ns_gan_max = 0xFFFF; 7006 7007 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = src_id; 7008 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 7009 } 7010 7011 flags = job->job_flags; 7012 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 7013 job->job_counter = 1; 7014 7015 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 7016 job->job_flags = flags; 7017 7018 if (!create && (port->fp_options & FP_NS_SMART_COUNT)) { 7019 uint16_t max_resid; 7020 7021 /* 7022 * Revert to scanning the NS if NS_GID_PT isn't 7023 * helping us figure out total number of devices. 7024 */ 7025 if (job->job_result != FC_SUCCESS || 7026 ns_cmd->ns_resp_hdr.ct_cmdrsp != FS_ACC_IU) { 7027 mutex_enter(&port->fp_mutex); 7028 port->fp_options &= ~FP_NS_SMART_COUNT; 7029 mutex_exit(&port->fp_mutex); 7030 7031 fctl_free_ns_cmd(ns_cmd); 7032 return (fp_ns_get_devcount(port, job, create, sleep)); 7033 } 7034 7035 mutex_enter(&port->fp_mutex); 7036 port->fp_total_devices = 1; 7037 max_resid = ns_cmd->ns_resp_hdr.ct_aiusize; 7038 if (max_resid) { 7039 /* 7040 * Since port identifier is 4 bytes and max_resid 7041 * is also in WORDS, max_resid simply indicates 7042 * the total number of port identifiers not 7043 * transferred 7044 */ 7045 port->fp_total_devices += max_resid; 7046 } 7047 mutex_exit(&port->fp_mutex); 7048 } 7049 mutex_enter(&port->fp_mutex); 7050 port->fp_total_devices = *((int *)ns_cmd->ns_data_buf); 7051 mutex_exit(&port->fp_mutex); 7052 fctl_free_ns_cmd(ns_cmd); 7053 7054 return (rval); 7055 } 7056 7057 /* 7058 * One heck of a function to serve userland. 7059 */ 7060 static int 7061 fp_fciocmd(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 7062 { 7063 int rval = 0; 7064 int jcode; 7065 uint32_t ret; 7066 uchar_t open_flag; 7067 fcio_t *kfcio; 7068 job_request_t *job; 7069 boolean_t use32 = B_FALSE; 7070 7071 #ifdef _MULTI_DATAMODEL 7072 switch (ddi_model_convert_from(mode & FMODELS)) { 7073 case DDI_MODEL_ILP32: 7074 use32 = B_TRUE; 7075 break; 7076 7077 case DDI_MODEL_NONE: 7078 default: 7079 break; 7080 } 7081 #endif 7082 7083 mutex_enter(&port->fp_mutex); 7084 if (port->fp_soft_state & (FP_SOFT_IN_STATEC_CB | 7085 FP_SOFT_IN_UNSOL_CB)) { 7086 fcio->fcio_errno = FC_STATEC_BUSY; 7087 mutex_exit(&port->fp_mutex); 7088 rval = EAGAIN; 7089 if (fp_fcio_copyout(fcio, data, mode)) { 7090 rval = EFAULT; 7091 } 7092 return (rval); 7093 } 7094 open_flag = port->fp_flag; 7095 mutex_exit(&port->fp_mutex); 7096 7097 if (fp_check_perms(open_flag, fcio->fcio_cmd) != FC_SUCCESS) { 7098 fcio->fcio_errno = FC_FAILURE; 7099 rval = EACCES; 7100 if (fp_fcio_copyout(fcio, data, mode)) { 7101 rval = EFAULT; 7102 } 7103 return (rval); 7104 } 7105 7106 /* 7107 * If an exclusive open was demanded during open, don't let 7108 * either innocuous or devil threads to share the file 7109 * descriptor and fire down exclusive access commands 7110 */ 7111 mutex_enter(&port->fp_mutex); 7112 if (port->fp_flag & FP_EXCL) { 7113 if (port->fp_flag & FP_EXCL_BUSY) { 7114 mutex_exit(&port->fp_mutex); 7115 fcio->fcio_errno = FC_FAILURE; 7116 return (EBUSY); 7117 } 7118 port->fp_flag |= FP_EXCL_BUSY; 7119 } 7120 mutex_exit(&port->fp_mutex); 7121 7122 switch (fcio->fcio_cmd) { 7123 case FCIO_GET_HOST_PARAMS: { 7124 fc_port_dev_t *val; 7125 fc_port_dev32_t *val32; 7126 int index; 7127 int lilp_device_count; 7128 fc_lilpmap_t *lilp_map; 7129 uchar_t *alpa_list; 7130 7131 if (use32 == B_TRUE) { 7132 if (fcio->fcio_olen != sizeof (*val32) || 7133 fcio->fcio_xfer != FCIO_XFER_READ) { 7134 rval = EINVAL; 7135 break; 7136 } 7137 } else { 7138 if (fcio->fcio_olen != sizeof (*val) || 7139 fcio->fcio_xfer != FCIO_XFER_READ) { 7140 rval = EINVAL; 7141 break; 7142 } 7143 } 7144 7145 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7146 7147 mutex_enter(&port->fp_mutex); 7148 val->dev_did = port->fp_port_id; 7149 val->dev_hard_addr = port->fp_hard_addr; 7150 val->dev_pwwn = port->fp_service_params.nport_ww_name; 7151 val->dev_nwwn = port->fp_service_params.node_ww_name; 7152 val->dev_state = port->fp_state; 7153 7154 lilp_map = &port->fp_lilp_map; 7155 alpa_list = &lilp_map->lilp_alpalist[0]; 7156 lilp_device_count = lilp_map->lilp_length; 7157 for (index = 0; index < lilp_device_count; index++) { 7158 uint32_t d_id; 7159 7160 d_id = alpa_list[index]; 7161 if (d_id == port->fp_port_id.port_id) { 7162 break; 7163 } 7164 } 7165 val->dev_did.priv_lilp_posit = (uint8_t)(index & 0xff); 7166 7167 bcopy(port->fp_fc4_types, val->dev_type, 7168 sizeof (port->fp_fc4_types)); 7169 mutex_exit(&port->fp_mutex); 7170 7171 if (use32 == B_TRUE) { 7172 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7173 7174 val32->dev_did = val->dev_did; 7175 val32->dev_hard_addr = val->dev_hard_addr; 7176 val32->dev_pwwn = val->dev_pwwn; 7177 val32->dev_nwwn = val->dev_nwwn; 7178 val32->dev_state = val->dev_state; 7179 val32->dev_did.priv_lilp_posit = 7180 val->dev_did.priv_lilp_posit; 7181 7182 bcopy(val->dev_type, val32->dev_type, 7183 sizeof (port->fp_fc4_types)); 7184 7185 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7186 fcio->fcio_olen, mode) == 0) { 7187 if (fp_fcio_copyout(fcio, data, mode)) { 7188 rval = EFAULT; 7189 } 7190 } else { 7191 rval = EFAULT; 7192 } 7193 7194 kmem_free(val32, sizeof (*val32)); 7195 } else { 7196 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7197 fcio->fcio_olen, mode) == 0) { 7198 if (fp_fcio_copyout(fcio, data, mode)) { 7199 rval = EFAULT; 7200 } 7201 } else { 7202 rval = EFAULT; 7203 } 7204 } 7205 7206 /* need to free "val" here */ 7207 kmem_free(val, sizeof (*val)); 7208 break; 7209 } 7210 7211 case FCIO_GET_OTHER_ADAPTER_PORTS: { 7212 uint32_t index; 7213 char *tmpPath; 7214 fc_local_port_t *tmpPort; 7215 7216 if (fcio->fcio_olen < MAXPATHLEN || 7217 fcio->fcio_ilen != sizeof (uint32_t)) { 7218 rval = EINVAL; 7219 break; 7220 } 7221 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7222 rval = EFAULT; 7223 break; 7224 } 7225 7226 tmpPort = fctl_get_adapter_port_by_index(port, index); 7227 if (tmpPort == NULL) { 7228 FP_TRACE(FP_NHEAD1(9, 0), 7229 "User supplied index out of range"); 7230 fcio->fcio_errno = FC_BADPORT; 7231 rval = EFAULT; 7232 if (fp_fcio_copyout(fcio, data, mode)) { 7233 rval = EFAULT; 7234 } 7235 break; 7236 } 7237 7238 tmpPath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7239 (void) ddi_pathname(tmpPort->fp_port_dip, tmpPath); 7240 if (fp_copyout((void *)tmpPath, (void *)fcio->fcio_obuf, 7241 MAXPATHLEN, mode) == 0) { 7242 if (fp_fcio_copyout(fcio, data, mode)) { 7243 rval = EFAULT; 7244 } 7245 } else { 7246 rval = EFAULT; 7247 } 7248 kmem_free(tmpPath, MAXPATHLEN); 7249 break; 7250 } 7251 7252 case FCIO_NPIV_GET_ADAPTER_ATTRIBUTES: 7253 case FCIO_GET_ADAPTER_ATTRIBUTES: { 7254 fc_hba_adapter_attributes_t *val; 7255 fc_hba_adapter_attributes32_t *val32; 7256 7257 if (use32 == B_TRUE) { 7258 if (fcio->fcio_olen < sizeof (*val32) || 7259 fcio->fcio_xfer != FCIO_XFER_READ) { 7260 rval = EINVAL; 7261 break; 7262 } 7263 } else { 7264 if (fcio->fcio_olen < sizeof (*val) || 7265 fcio->fcio_xfer != FCIO_XFER_READ) { 7266 rval = EINVAL; 7267 break; 7268 } 7269 } 7270 7271 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7272 val->version = FC_HBA_ADAPTER_ATTRIBUTES_VERSION; 7273 mutex_enter(&port->fp_mutex); 7274 bcopy(port->fp_hba_port_attrs.manufacturer, 7275 val->Manufacturer, 7276 sizeof (val->Manufacturer)); 7277 bcopy(port->fp_hba_port_attrs.serial_number, 7278 val->SerialNumber, 7279 sizeof (val->SerialNumber)); 7280 bcopy(port->fp_hba_port_attrs.model, 7281 val->Model, 7282 sizeof (val->Model)); 7283 bcopy(port->fp_hba_port_attrs.model_description, 7284 val->ModelDescription, 7285 sizeof (val->ModelDescription)); 7286 bcopy(port->fp_sym_node_name, val->NodeSymbolicName, 7287 sizeof (val->NodeSymbolicName)); 7288 bcopy(port->fp_hba_port_attrs.hardware_version, 7289 val->HardwareVersion, 7290 sizeof (val->HardwareVersion)); 7291 bcopy(port->fp_hba_port_attrs.option_rom_version, 7292 val->OptionROMVersion, 7293 sizeof (val->OptionROMVersion)); 7294 bcopy(port->fp_hba_port_attrs.firmware_version, 7295 val->FirmwareVersion, 7296 sizeof (val->FirmwareVersion)); 7297 val->VendorSpecificID = 7298 port->fp_hba_port_attrs.vendor_specific_id; 7299 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7300 &val->NodeWWN.raw_wwn, 7301 sizeof (val->NodeWWN.raw_wwn)); 7302 7303 7304 bcopy(port->fp_hba_port_attrs.driver_name, 7305 val->DriverName, 7306 sizeof (val->DriverName)); 7307 bcopy(port->fp_hba_port_attrs.driver_version, 7308 val->DriverVersion, 7309 sizeof (val->DriverVersion)); 7310 mutex_exit(&port->fp_mutex); 7311 7312 if (fcio->fcio_cmd == FCIO_GET_ADAPTER_ATTRIBUTES) { 7313 val->NumberOfPorts = fctl_count_fru_ports(port, 0); 7314 } else { 7315 val->NumberOfPorts = fctl_count_fru_ports(port, 1); 7316 } 7317 7318 if (use32 == B_TRUE) { 7319 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7320 val32->version = val->version; 7321 bcopy(val->Manufacturer, val32->Manufacturer, 7322 sizeof (val->Manufacturer)); 7323 bcopy(val->SerialNumber, val32->SerialNumber, 7324 sizeof (val->SerialNumber)); 7325 bcopy(val->Model, val32->Model, 7326 sizeof (val->Model)); 7327 bcopy(val->ModelDescription, val32->ModelDescription, 7328 sizeof (val->ModelDescription)); 7329 bcopy(val->NodeSymbolicName, val32->NodeSymbolicName, 7330 sizeof (val->NodeSymbolicName)); 7331 bcopy(val->HardwareVersion, val32->HardwareVersion, 7332 sizeof (val->HardwareVersion)); 7333 bcopy(val->OptionROMVersion, val32->OptionROMVersion, 7334 sizeof (val->OptionROMVersion)); 7335 bcopy(val->FirmwareVersion, val32->FirmwareVersion, 7336 sizeof (val->FirmwareVersion)); 7337 val32->VendorSpecificID = val->VendorSpecificID; 7338 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7339 sizeof (val->NodeWWN.raw_wwn)); 7340 bcopy(val->DriverName, val32->DriverName, 7341 sizeof (val->DriverName)); 7342 bcopy(val->DriverVersion, val32->DriverVersion, 7343 sizeof (val->DriverVersion)); 7344 7345 val32->NumberOfPorts = val->NumberOfPorts; 7346 7347 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7348 fcio->fcio_olen, mode) == 0) { 7349 if (fp_fcio_copyout(fcio, data, mode)) { 7350 rval = EFAULT; 7351 } 7352 } else { 7353 rval = EFAULT; 7354 } 7355 7356 kmem_free(val32, sizeof (*val32)); 7357 } else { 7358 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7359 fcio->fcio_olen, mode) == 0) { 7360 if (fp_fcio_copyout(fcio, data, mode)) { 7361 rval = EFAULT; 7362 } 7363 } else { 7364 rval = EFAULT; 7365 } 7366 } 7367 7368 kmem_free(val, sizeof (*val)); 7369 break; 7370 } 7371 7372 case FCIO_GET_NPIV_ATTRIBUTES: { 7373 fc_hba_npiv_attributes_t *attrs; 7374 7375 attrs = kmem_zalloc(sizeof (*attrs), KM_SLEEP); 7376 mutex_enter(&port->fp_mutex); 7377 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7378 &attrs->NodeWWN.raw_wwn, 7379 sizeof (attrs->NodeWWN.raw_wwn)); 7380 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7381 &attrs->PortWWN.raw_wwn, 7382 sizeof (attrs->PortWWN.raw_wwn)); 7383 mutex_exit(&port->fp_mutex); 7384 if (fp_copyout((void *)attrs, (void *)fcio->fcio_obuf, 7385 fcio->fcio_olen, mode) == 0) { 7386 if (fp_fcio_copyout(fcio, data, mode)) { 7387 rval = EFAULT; 7388 } 7389 } else { 7390 rval = EFAULT; 7391 } 7392 kmem_free(attrs, sizeof (*attrs)); 7393 break; 7394 } 7395 7396 case FCIO_DELETE_NPIV_PORT: { 7397 fc_local_port_t *tmpport; 7398 char ww_pname[17]; 7399 la_wwn_t vwwn[1]; 7400 7401 FP_TRACE(FP_NHEAD1(1, 0), "Delete NPIV Port"); 7402 if (ddi_copyin(fcio->fcio_ibuf, 7403 &vwwn, sizeof (la_wwn_t), mode)) { 7404 rval = EFAULT; 7405 break; 7406 } 7407 7408 fc_wwn_to_str(&vwwn[0], ww_pname); 7409 FP_TRACE(FP_NHEAD1(3, 0), 7410 "Delete NPIV Port %s", ww_pname); 7411 tmpport = fc_delete_npiv_port(port, &vwwn[0]); 7412 if (tmpport == NULL) { 7413 FP_TRACE(FP_NHEAD1(3, 0), 7414 "Delete NPIV Port : no found"); 7415 rval = EFAULT; 7416 } else { 7417 fc_local_port_t *nextport = tmpport->fp_port_next; 7418 fc_local_port_t *prevport = tmpport->fp_port_prev; 7419 int portlen, portindex, ret; 7420 7421 portlen = sizeof (portindex); 7422 ret = ddi_prop_op(DDI_DEV_T_ANY, 7423 tmpport->fp_port_dip, PROP_LEN_AND_VAL_BUF, 7424 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "port", 7425 (caddr_t)&portindex, &portlen); 7426 if (ret != DDI_SUCCESS) { 7427 rval = EFAULT; 7428 break; 7429 } 7430 if (ndi_devi_offline(tmpport->fp_port_dip, 7431 NDI_DEVI_REMOVE) != DDI_SUCCESS) { 7432 FP_TRACE(FP_NHEAD1(1, 0), 7433 "Delete NPIV Port failed"); 7434 mutex_enter(&port->fp_mutex); 7435 tmpport->fp_npiv_state = 0; 7436 mutex_exit(&port->fp_mutex); 7437 rval = EFAULT; 7438 } else { 7439 mutex_enter(&port->fp_mutex); 7440 nextport->fp_port_prev = prevport; 7441 prevport->fp_port_next = nextport; 7442 if (port == port->fp_port_next) { 7443 port->fp_port_next = 7444 port->fp_port_prev = NULL; 7445 } 7446 port->fp_npiv_portnum--; 7447 FP_TRACE(FP_NHEAD1(3, 0), 7448 "Delete NPIV Port %d", portindex); 7449 port->fp_npiv_portindex[portindex-1] = 0; 7450 mutex_exit(&port->fp_mutex); 7451 } 7452 } 7453 break; 7454 } 7455 7456 case FCIO_CREATE_NPIV_PORT: { 7457 char ww_nname[17], ww_pname[17]; 7458 la_npiv_create_entry_t entrybuf; 7459 uint32_t vportindex = 0; 7460 int npiv_ret = 0; 7461 char *portname, *fcaname; 7462 7463 portname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7464 (void) ddi_pathname(port->fp_port_dip, portname); 7465 fcaname = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 7466 (void) ddi_pathname(port->fp_fca_dip, fcaname); 7467 FP_TRACE(FP_NHEAD1(1, 0), 7468 "Create NPIV port %s %s %s", portname, fcaname, 7469 ddi_driver_name(port->fp_fca_dip)); 7470 kmem_free(portname, MAXPATHLEN); 7471 kmem_free(fcaname, MAXPATHLEN); 7472 if (ddi_copyin(fcio->fcio_ibuf, 7473 &entrybuf, sizeof (la_npiv_create_entry_t), mode)) { 7474 rval = EFAULT; 7475 break; 7476 } 7477 7478 fc_wwn_to_str(&entrybuf.VNodeWWN, ww_nname); 7479 fc_wwn_to_str(&entrybuf.VPortWWN, ww_pname); 7480 vportindex = entrybuf.vindex; 7481 FP_TRACE(FP_NHEAD1(3, 0), 7482 "Create NPIV Port %s %s %d", 7483 ww_nname, ww_pname, vportindex); 7484 7485 if (fc_get_npiv_port(port, &entrybuf.VPortWWN)) { 7486 rval = EFAULT; 7487 break; 7488 } 7489 npiv_ret = fctl_fca_create_npivport(port->fp_fca_dip, 7490 port->fp_port_dip, ww_nname, ww_pname, &vportindex); 7491 if (npiv_ret == NDI_SUCCESS) { 7492 mutex_enter(&port->fp_mutex); 7493 port->fp_npiv_portnum++; 7494 mutex_exit(&port->fp_mutex); 7495 if (fp_copyout((void *)&vportindex, 7496 (void *)fcio->fcio_obuf, 7497 fcio->fcio_olen, mode) == 0) { 7498 if (fp_fcio_copyout(fcio, data, mode)) { 7499 rval = EFAULT; 7500 } 7501 } else { 7502 rval = EFAULT; 7503 } 7504 } else { 7505 rval = EFAULT; 7506 } 7507 FP_TRACE(FP_NHEAD1(3, 0), 7508 "Create NPIV Port %d %d", npiv_ret, vportindex); 7509 break; 7510 } 7511 7512 case FCIO_GET_NPIV_PORT_LIST: { 7513 fc_hba_npiv_port_list_t *list; 7514 int count; 7515 7516 if ((fcio->fcio_xfer != FCIO_XFER_READ) || 7517 (fcio->fcio_olen == 0) || (fcio->fcio_obuf == 0)) { 7518 rval = EINVAL; 7519 break; 7520 } 7521 7522 list = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 7523 list->version = FC_HBA_LIST_VERSION; 7524 /* build npiv port list */ 7525 count = fc_ulp_get_npiv_port_list(port, (char *)list->hbaPaths); 7526 if (count < 0) { 7527 rval = ENXIO; 7528 FP_TRACE(FP_NHEAD1(1, 0), "Build NPIV Port List error"); 7529 kmem_free(list, fcio->fcio_olen); 7530 break; 7531 } 7532 list->numAdapters = count; 7533 7534 if (fp_copyout((void *)list, (void *)fcio->fcio_obuf, 7535 fcio->fcio_olen, mode) == 0) { 7536 if (fp_fcio_copyout(fcio, data, mode)) { 7537 FP_TRACE(FP_NHEAD1(1, 0), 7538 "Copy NPIV Port data error"); 7539 rval = EFAULT; 7540 } 7541 } else { 7542 FP_TRACE(FP_NHEAD1(1, 0), "Copy NPIV Port List error"); 7543 rval = EFAULT; 7544 } 7545 kmem_free(list, fcio->fcio_olen); 7546 break; 7547 } 7548 7549 case FCIO_GET_ADAPTER_PORT_NPIV_ATTRIBUTES: { 7550 fc_hba_port_npiv_attributes_t *val; 7551 7552 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7553 val->version = FC_HBA_PORT_NPIV_ATTRIBUTES_VERSION; 7554 7555 mutex_enter(&port->fp_mutex); 7556 val->npivflag = port->fp_npiv_flag; 7557 val->lastChange = port->fp_last_change; 7558 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7559 &val->PortWWN.raw_wwn, 7560 sizeof (val->PortWWN.raw_wwn)); 7561 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7562 &val->NodeWWN.raw_wwn, 7563 sizeof (val->NodeWWN.raw_wwn)); 7564 mutex_exit(&port->fp_mutex); 7565 7566 val->NumberOfNPIVPorts = fc_ulp_get_npiv_port_num(port); 7567 if (port->fp_npiv_type != FC_NPIV_PORT) { 7568 val->MaxNumberOfNPIVPorts = 7569 port->fp_fca_tran->fca_num_npivports; 7570 } else { 7571 val->MaxNumberOfNPIVPorts = 0; 7572 } 7573 7574 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7575 fcio->fcio_olen, mode) == 0) { 7576 if (fp_fcio_copyout(fcio, data, mode)) { 7577 rval = EFAULT; 7578 } 7579 } else { 7580 rval = EFAULT; 7581 } 7582 kmem_free(val, sizeof (*val)); 7583 break; 7584 } 7585 7586 case FCIO_GET_ADAPTER_PORT_ATTRIBUTES: { 7587 fc_hba_port_attributes_t *val; 7588 fc_hba_port_attributes32_t *val32; 7589 7590 if (use32 == B_TRUE) { 7591 if (fcio->fcio_olen < sizeof (*val32) || 7592 fcio->fcio_xfer != FCIO_XFER_READ) { 7593 rval = EINVAL; 7594 break; 7595 } 7596 } else { 7597 if (fcio->fcio_olen < sizeof (*val) || 7598 fcio->fcio_xfer != FCIO_XFER_READ) { 7599 rval = EINVAL; 7600 break; 7601 } 7602 } 7603 7604 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7605 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7606 mutex_enter(&port->fp_mutex); 7607 val->lastChange = port->fp_last_change; 7608 val->fp_minor = port->fp_instance; 7609 7610 bcopy(&port->fp_service_params.nport_ww_name.raw_wwn, 7611 &val->PortWWN.raw_wwn, 7612 sizeof (val->PortWWN.raw_wwn)); 7613 bcopy(&port->fp_service_params.node_ww_name.raw_wwn, 7614 &val->NodeWWN.raw_wwn, 7615 sizeof (val->NodeWWN.raw_wwn)); 7616 bcopy(&port->fp_fabric_name, &val->FabricName.raw_wwn, 7617 sizeof (val->FabricName.raw_wwn)); 7618 7619 val->PortFcId = port->fp_port_id.port_id; 7620 7621 switch (FC_PORT_STATE_MASK(port->fp_state)) { 7622 case FC_STATE_OFFLINE: 7623 val->PortState = FC_HBA_PORTSTATE_OFFLINE; 7624 break; 7625 case FC_STATE_ONLINE: 7626 case FC_STATE_LOOP: 7627 case FC_STATE_NAMESERVICE: 7628 val->PortState = FC_HBA_PORTSTATE_ONLINE; 7629 break; 7630 default: 7631 val->PortState = FC_HBA_PORTSTATE_UNKNOWN; 7632 break; 7633 } 7634 7635 /* Translate from LV to FC-HBA port type codes */ 7636 switch (port->fp_port_type.port_type) { 7637 case FC_NS_PORT_N: 7638 val->PortType = FC_HBA_PORTTYPE_NPORT; 7639 break; 7640 case FC_NS_PORT_NL: /* Actually means loop for us */ 7641 val->PortType = FC_HBA_PORTTYPE_LPORT; 7642 break; 7643 case FC_NS_PORT_F: 7644 val->PortType = FC_HBA_PORTTYPE_FPORT; 7645 break; 7646 case FC_NS_PORT_FL: 7647 val->PortType = FC_HBA_PORTTYPE_FLPORT; 7648 break; 7649 case FC_NS_PORT_E: 7650 val->PortType = FC_HBA_PORTTYPE_EPORT; 7651 break; 7652 default: 7653 val->PortType = FC_HBA_PORTTYPE_OTHER; 7654 break; 7655 } 7656 7657 7658 /* 7659 * If fp has decided that the topology is public loop, 7660 * we will indicate that using the appropriate 7661 * FC HBA API constant. 7662 */ 7663 switch (port->fp_topology) { 7664 case FC_TOP_PUBLIC_LOOP: 7665 val->PortType = FC_HBA_PORTTYPE_NLPORT; 7666 break; 7667 7668 case FC_TOP_PT_PT: 7669 val->PortType = FC_HBA_PORTTYPE_PTP; 7670 break; 7671 7672 case FC_TOP_UNKNOWN: 7673 /* 7674 * This should cover the case where nothing is connected 7675 * to the port. Crystal+ is p'bly an exception here. 7676 * For Crystal+, port 0 will come up as private loop 7677 * (i.e fp_bind_state will be FC_STATE_LOOP) even when 7678 * nothing is connected to it. 7679 * Current plan is to let userland handle this. 7680 */ 7681 if (port->fp_bind_state == FC_STATE_OFFLINE) 7682 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7683 break; 7684 7685 default: 7686 /* 7687 * Do Nothing. 7688 * Unused: 7689 * val->PortType = FC_HBA_PORTTYPE_GPORT; 7690 */ 7691 break; 7692 } 7693 7694 val->PortSupportedClassofService = 7695 port->fp_hba_port_attrs.supported_cos; 7696 val->PortSupportedFc4Types[0] = 0; 7697 bcopy(port->fp_fc4_types, val->PortActiveFc4Types, 7698 sizeof (val->PortActiveFc4Types)); 7699 bcopy(port->fp_sym_port_name, val->PortSymbolicName, 7700 sizeof (val->PortSymbolicName)); 7701 val->PortSupportedSpeed = 7702 port->fp_hba_port_attrs.supported_speed; 7703 7704 switch (FC_PORT_SPEED_MASK(port->fp_state)) { 7705 case FC_STATE_1GBIT_SPEED: 7706 val->PortSpeed = FC_HBA_PORTSPEED_1GBIT; 7707 break; 7708 case FC_STATE_2GBIT_SPEED: 7709 val->PortSpeed = FC_HBA_PORTSPEED_2GBIT; 7710 break; 7711 case FC_STATE_4GBIT_SPEED: 7712 val->PortSpeed = FC_HBA_PORTSPEED_4GBIT; 7713 break; 7714 case FC_STATE_8GBIT_SPEED: 7715 val->PortSpeed = FC_HBA_PORTSPEED_8GBIT; 7716 break; 7717 case FC_STATE_10GBIT_SPEED: 7718 val->PortSpeed = FC_HBA_PORTSPEED_10GBIT; 7719 break; 7720 case FC_STATE_16GBIT_SPEED: 7721 val->PortSpeed = FC_HBA_PORTSPEED_16GBIT; 7722 break; 7723 default: 7724 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7725 break; 7726 } 7727 val->PortMaxFrameSize = port->fp_hba_port_attrs.max_frame_size; 7728 val->NumberofDiscoveredPorts = port->fp_dev_count; 7729 mutex_exit(&port->fp_mutex); 7730 7731 if (use32 == B_TRUE) { 7732 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7733 val32->version = val->version; 7734 val32->lastChange = val->lastChange; 7735 val32->fp_minor = val->fp_minor; 7736 7737 bcopy(&val->PortWWN.raw_wwn, &val32->PortWWN.raw_wwn, 7738 sizeof (val->PortWWN.raw_wwn)); 7739 bcopy(&val->NodeWWN.raw_wwn, &val32->NodeWWN.raw_wwn, 7740 sizeof (val->NodeWWN.raw_wwn)); 7741 val32->PortFcId = val->PortFcId; 7742 val32->PortState = val->PortState; 7743 val32->PortType = val->PortType; 7744 7745 val32->PortSupportedClassofService = 7746 val->PortSupportedClassofService; 7747 bcopy(val->PortActiveFc4Types, 7748 val32->PortActiveFc4Types, 7749 sizeof (val->PortActiveFc4Types)); 7750 bcopy(val->PortSymbolicName, val32->PortSymbolicName, 7751 sizeof (val->PortSymbolicName)); 7752 bcopy(&val->FabricName, &val32->FabricName, 7753 sizeof (val->FabricName.raw_wwn)); 7754 val32->PortSupportedSpeed = val->PortSupportedSpeed; 7755 val32->PortSpeed = val->PortSpeed; 7756 7757 val32->PortMaxFrameSize = val->PortMaxFrameSize; 7758 val32->NumberofDiscoveredPorts = 7759 val->NumberofDiscoveredPorts; 7760 7761 if (fp_copyout((void *)val32, (void *)fcio->fcio_obuf, 7762 fcio->fcio_olen, mode) == 0) { 7763 if (fp_fcio_copyout(fcio, data, mode)) { 7764 rval = EFAULT; 7765 } 7766 } else { 7767 rval = EFAULT; 7768 } 7769 7770 kmem_free(val32, sizeof (*val32)); 7771 } else { 7772 if (fp_copyout((void *)val, (void *)fcio->fcio_obuf, 7773 fcio->fcio_olen, mode) == 0) { 7774 if (fp_fcio_copyout(fcio, data, mode)) { 7775 rval = EFAULT; 7776 } 7777 } else { 7778 rval = EFAULT; 7779 } 7780 } 7781 7782 kmem_free(val, sizeof (*val)); 7783 break; 7784 } 7785 7786 case FCIO_GET_DISCOVERED_PORT_ATTRIBUTES: { 7787 fc_hba_port_attributes_t *val; 7788 fc_hba_port_attributes32_t *val32; 7789 uint32_t index = 0; 7790 fc_remote_port_t *tmp_pd; 7791 7792 if (use32 == B_TRUE) { 7793 if (fcio->fcio_olen < sizeof (*val32) || 7794 fcio->fcio_xfer != FCIO_XFER_READ) { 7795 rval = EINVAL; 7796 break; 7797 } 7798 } else { 7799 if (fcio->fcio_olen < sizeof (*val) || 7800 fcio->fcio_xfer != FCIO_XFER_READ) { 7801 rval = EINVAL; 7802 break; 7803 } 7804 } 7805 7806 if (ddi_copyin(fcio->fcio_ibuf, &index, sizeof (index), mode)) { 7807 rval = EFAULT; 7808 break; 7809 } 7810 7811 if (index >= port->fp_dev_count) { 7812 FP_TRACE(FP_NHEAD1(9, 0), 7813 "User supplied index out of range"); 7814 fcio->fcio_errno = FC_OUTOFBOUNDS; 7815 rval = EINVAL; 7816 if (fp_fcio_copyout(fcio, data, mode)) { 7817 rval = EFAULT; 7818 } 7819 break; 7820 } 7821 7822 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7823 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7824 7825 mutex_enter(&port->fp_mutex); 7826 tmp_pd = fctl_lookup_pd_by_index(port, index); 7827 7828 if (tmp_pd == NULL) { 7829 fcio->fcio_errno = FC_BADPORT; 7830 rval = EINVAL; 7831 } else { 7832 val->lastChange = port->fp_last_change; 7833 val->fp_minor = port->fp_instance; 7834 7835 mutex_enter(&tmp_pd->pd_mutex); 7836 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7837 &val->PortWWN.raw_wwn, 7838 sizeof (val->PortWWN.raw_wwn)); 7839 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 7840 &val->NodeWWN.raw_wwn, 7841 sizeof (val->NodeWWN.raw_wwn)); 7842 val->PortFcId = tmp_pd->pd_port_id.port_id; 7843 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 7844 sizeof (val->PortSymbolicName)); 7845 val->PortSupportedClassofService = tmp_pd->pd_cos; 7846 /* 7847 * we will assume the sizeof these pd_fc4types and 7848 * portActiveFc4Types will remain the same. we could 7849 * add in a check for it, but we decided it was unneeded 7850 */ 7851 bcopy((caddr_t)tmp_pd->pd_fc4types, 7852 val->PortActiveFc4Types, 7853 sizeof (tmp_pd->pd_fc4types)); 7854 val->PortState = 7855 fp_map_remote_port_state(tmp_pd->pd_state); 7856 mutex_exit(&tmp_pd->pd_mutex); 7857 7858 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7859 val->PortSupportedFc4Types[0] = 0; 7860 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7861 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7862 val->PortMaxFrameSize = 0; 7863 val->NumberofDiscoveredPorts = 0; 7864 7865 if (use32 == B_TRUE) { 7866 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7867 val32->version = val->version; 7868 val32->lastChange = val->lastChange; 7869 val32->fp_minor = val->fp_minor; 7870 7871 bcopy(&val->PortWWN.raw_wwn, 7872 &val32->PortWWN.raw_wwn, 7873 sizeof (val->PortWWN.raw_wwn)); 7874 bcopy(&val->NodeWWN.raw_wwn, 7875 &val32->NodeWWN.raw_wwn, 7876 sizeof (val->NodeWWN.raw_wwn)); 7877 val32->PortFcId = val->PortFcId; 7878 bcopy(val->PortSymbolicName, 7879 val32->PortSymbolicName, 7880 sizeof (val->PortSymbolicName)); 7881 val32->PortSupportedClassofService = 7882 val->PortSupportedClassofService; 7883 bcopy(val->PortActiveFc4Types, 7884 val32->PortActiveFc4Types, 7885 sizeof (tmp_pd->pd_fc4types)); 7886 7887 val32->PortType = val->PortType; 7888 val32->PortState = val->PortState; 7889 val32->PortSupportedFc4Types[0] = 7890 val->PortSupportedFc4Types[0]; 7891 val32->PortSupportedSpeed = 7892 val->PortSupportedSpeed; 7893 val32->PortSpeed = val->PortSpeed; 7894 val32->PortMaxFrameSize = 7895 val->PortMaxFrameSize; 7896 val32->NumberofDiscoveredPorts = 7897 val->NumberofDiscoveredPorts; 7898 7899 if (fp_copyout((void *)val32, 7900 (void *)fcio->fcio_obuf, 7901 fcio->fcio_olen, mode) == 0) { 7902 if (fp_fcio_copyout(fcio, 7903 data, mode)) { 7904 rval = EFAULT; 7905 } 7906 } else { 7907 rval = EFAULT; 7908 } 7909 7910 kmem_free(val32, sizeof (*val32)); 7911 } else { 7912 if (fp_copyout((void *)val, 7913 (void *)fcio->fcio_obuf, 7914 fcio->fcio_olen, mode) == 0) { 7915 if (fp_fcio_copyout(fcio, data, mode)) { 7916 rval = EFAULT; 7917 } 7918 } else { 7919 rval = EFAULT; 7920 } 7921 } 7922 } 7923 7924 mutex_exit(&port->fp_mutex); 7925 kmem_free(val, sizeof (*val)); 7926 break; 7927 } 7928 7929 case FCIO_GET_PORT_ATTRIBUTES: { 7930 fc_hba_port_attributes_t *val; 7931 fc_hba_port_attributes32_t *val32; 7932 la_wwn_t wwn; 7933 fc_remote_port_t *tmp_pd; 7934 7935 if (use32 == B_TRUE) { 7936 if (fcio->fcio_olen < sizeof (*val32) || 7937 fcio->fcio_xfer != FCIO_XFER_READ) { 7938 rval = EINVAL; 7939 break; 7940 } 7941 } else { 7942 if (fcio->fcio_olen < sizeof (*val) || 7943 fcio->fcio_xfer != FCIO_XFER_READ) { 7944 rval = EINVAL; 7945 break; 7946 } 7947 } 7948 7949 if (ddi_copyin(fcio->fcio_ibuf, &wwn, sizeof (wwn), mode)) { 7950 rval = EFAULT; 7951 break; 7952 } 7953 7954 val = kmem_zalloc(sizeof (*val), KM_SLEEP); 7955 val->version = FC_HBA_PORT_ATTRIBUTES_VERSION; 7956 7957 mutex_enter(&port->fp_mutex); 7958 tmp_pd = fctl_lookup_pd_by_wwn(port, wwn); 7959 val->lastChange = port->fp_last_change; 7960 val->fp_minor = port->fp_instance; 7961 mutex_exit(&port->fp_mutex); 7962 7963 if (tmp_pd == NULL) { 7964 fcio->fcio_errno = FC_BADWWN; 7965 rval = EINVAL; 7966 } else { 7967 mutex_enter(&tmp_pd->pd_mutex); 7968 bcopy(&tmp_pd->pd_port_name.raw_wwn, 7969 &val->PortWWN.raw_wwn, 7970 sizeof (val->PortWWN.raw_wwn)); 7971 bcopy(&tmp_pd->pd_remote_nodep->fd_node_name.raw_wwn, 7972 &val->NodeWWN.raw_wwn, 7973 sizeof (val->NodeWWN.raw_wwn)); 7974 val->PortFcId = tmp_pd->pd_port_id.port_id; 7975 bcopy(tmp_pd->pd_spn, val->PortSymbolicName, 7976 sizeof (val->PortSymbolicName)); 7977 val->PortSupportedClassofService = tmp_pd->pd_cos; 7978 val->PortType = FC_HBA_PORTTYPE_UNKNOWN; 7979 val->PortState = 7980 fp_map_remote_port_state(tmp_pd->pd_state); 7981 val->PortSupportedFc4Types[0] = 0; 7982 /* 7983 * we will assume the sizeof these pd_fc4types and 7984 * portActiveFc4Types will remain the same. we could 7985 * add in a check for it, but we decided it was unneeded 7986 */ 7987 bcopy((caddr_t)tmp_pd->pd_fc4types, 7988 val->PortActiveFc4Types, 7989 sizeof (tmp_pd->pd_fc4types)); 7990 val->PortSupportedSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7991 val->PortSpeed = FC_HBA_PORTSPEED_UNKNOWN; 7992 val->PortMaxFrameSize = 0; 7993 val->NumberofDiscoveredPorts = 0; 7994 mutex_exit(&tmp_pd->pd_mutex); 7995 7996 if (use32 == B_TRUE) { 7997 val32 = kmem_zalloc(sizeof (*val32), KM_SLEEP); 7998 val32->version = val->version; 7999 val32->lastChange = val->lastChange; 8000 val32->fp_minor = val->fp_minor; 8001 bcopy(&val->PortWWN.raw_wwn, 8002 &val32->PortWWN.raw_wwn, 8003 sizeof (val->PortWWN.raw_wwn)); 8004 bcopy(&val->NodeWWN.raw_wwn, 8005 &val32->NodeWWN.raw_wwn, 8006 sizeof (val->NodeWWN.raw_wwn)); 8007 val32->PortFcId = val->PortFcId; 8008 bcopy(val->PortSymbolicName, 8009 val32->PortSymbolicName, 8010 sizeof (val->PortSymbolicName)); 8011 val32->PortSupportedClassofService = 8012 val->PortSupportedClassofService; 8013 val32->PortType = val->PortType; 8014 val32->PortState = val->PortState; 8015 val32->PortSupportedFc4Types[0] = 8016 val->PortSupportedFc4Types[0]; 8017 bcopy(val->PortActiveFc4Types, 8018 val32->PortActiveFc4Types, 8019 sizeof (tmp_pd->pd_fc4types)); 8020 val32->PortSupportedSpeed = 8021 val->PortSupportedSpeed; 8022 val32->PortSpeed = val->PortSpeed; 8023 val32->PortMaxFrameSize = val->PortMaxFrameSize; 8024 val32->NumberofDiscoveredPorts = 8025 val->NumberofDiscoveredPorts; 8026 8027 if (fp_copyout((void *)val32, 8028 (void *)fcio->fcio_obuf, 8029 fcio->fcio_olen, mode) == 0) { 8030 if (fp_fcio_copyout(fcio, data, mode)) { 8031 rval = EFAULT; 8032 } 8033 } else { 8034 rval = EFAULT; 8035 } 8036 8037 kmem_free(val32, sizeof (*val32)); 8038 } else { 8039 if (fp_copyout((void *)val, 8040 (void *)fcio->fcio_obuf, 8041 fcio->fcio_olen, mode) == 0) { 8042 if (fp_fcio_copyout(fcio, data, mode)) { 8043 rval = EFAULT; 8044 } 8045 } else { 8046 rval = EFAULT; 8047 } 8048 } 8049 } 8050 kmem_free(val, sizeof (*val)); 8051 break; 8052 } 8053 8054 case FCIO_GET_NUM_DEVS: { 8055 int num_devices; 8056 8057 if (fcio->fcio_olen != sizeof (num_devices) || 8058 fcio->fcio_xfer != FCIO_XFER_READ) { 8059 rval = EINVAL; 8060 break; 8061 } 8062 8063 mutex_enter(&port->fp_mutex); 8064 switch (port->fp_topology) { 8065 case FC_TOP_PRIVATE_LOOP: 8066 case FC_TOP_PT_PT: 8067 num_devices = port->fp_total_devices; 8068 fcio->fcio_errno = FC_SUCCESS; 8069 break; 8070 8071 case FC_TOP_PUBLIC_LOOP: 8072 case FC_TOP_FABRIC: 8073 mutex_exit(&port->fp_mutex); 8074 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, 8075 NULL, KM_SLEEP); 8076 ASSERT(job != NULL); 8077 8078 /* 8079 * In FC-GS-2 the Name Server doesn't send out 8080 * RSCNs for any Name Server Database updates 8081 * When it is finally fixed there is no need 8082 * to probe as below and should be removed. 8083 */ 8084 (void) fp_ns_get_devcount(port, job, 0, KM_SLEEP); 8085 fctl_dealloc_job(job); 8086 8087 mutex_enter(&port->fp_mutex); 8088 num_devices = port->fp_total_devices; 8089 fcio->fcio_errno = FC_SUCCESS; 8090 break; 8091 8092 case FC_TOP_NO_NS: 8093 /* FALLTHROUGH */ 8094 case FC_TOP_UNKNOWN: 8095 /* FALLTHROUGH */ 8096 default: 8097 num_devices = 0; 8098 fcio->fcio_errno = FC_SUCCESS; 8099 break; 8100 } 8101 mutex_exit(&port->fp_mutex); 8102 8103 if (fp_copyout((void *)&num_devices, 8104 (void *)fcio->fcio_obuf, fcio->fcio_olen, 8105 mode) == 0) { 8106 if (fp_fcio_copyout(fcio, data, mode)) { 8107 rval = EFAULT; 8108 } 8109 } else { 8110 rval = EFAULT; 8111 } 8112 break; 8113 } 8114 8115 case FCIO_GET_DEV_LIST: { 8116 int num_devices; 8117 int new_count; 8118 int map_size; 8119 8120 if (fcio->fcio_xfer != FCIO_XFER_READ || 8121 fcio->fcio_alen != sizeof (new_count)) { 8122 rval = EINVAL; 8123 break; 8124 } 8125 8126 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 8127 8128 mutex_enter(&port->fp_mutex); 8129 if (num_devices < port->fp_total_devices) { 8130 fcio->fcio_errno = FC_TOOMANY; 8131 new_count = port->fp_total_devices; 8132 mutex_exit(&port->fp_mutex); 8133 8134 if (fp_copyout((void *)&new_count, 8135 (void *)fcio->fcio_abuf, 8136 sizeof (new_count), mode)) { 8137 rval = EFAULT; 8138 break; 8139 } 8140 8141 if (fp_fcio_copyout(fcio, data, mode)) { 8142 rval = EFAULT; 8143 break; 8144 } 8145 rval = EINVAL; 8146 break; 8147 } 8148 8149 if (port->fp_total_devices <= 0) { 8150 fcio->fcio_errno = FC_NO_MAP; 8151 new_count = port->fp_total_devices; 8152 mutex_exit(&port->fp_mutex); 8153 8154 if (fp_copyout((void *)&new_count, 8155 (void *)fcio->fcio_abuf, 8156 sizeof (new_count), mode)) { 8157 rval = EFAULT; 8158 break; 8159 } 8160 8161 if (fp_fcio_copyout(fcio, data, mode)) { 8162 rval = EFAULT; 8163 break; 8164 } 8165 rval = EINVAL; 8166 break; 8167 } 8168 8169 switch (port->fp_topology) { 8170 case FC_TOP_PRIVATE_LOOP: 8171 if (fp_fillout_loopmap(port, fcio, 8172 mode) != FC_SUCCESS) { 8173 rval = EFAULT; 8174 break; 8175 } 8176 if (fp_fcio_copyout(fcio, data, mode)) { 8177 rval = EFAULT; 8178 } 8179 break; 8180 8181 case FC_TOP_PT_PT: 8182 if (fp_fillout_p2pmap(port, fcio, 8183 mode) != FC_SUCCESS) { 8184 rval = EFAULT; 8185 break; 8186 } 8187 if (fp_fcio_copyout(fcio, data, mode)) { 8188 rval = EFAULT; 8189 } 8190 break; 8191 8192 case FC_TOP_PUBLIC_LOOP: 8193 case FC_TOP_FABRIC: { 8194 fctl_ns_req_t *ns_cmd; 8195 8196 map_size = 8197 sizeof (fc_port_dev_t) * port->fp_total_devices; 8198 8199 mutex_exit(&port->fp_mutex); 8200 8201 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 8202 sizeof (ns_resp_gan_t), map_size, 8203 (FCTL_NS_FILL_NS_MAP | FCTL_NS_BUF_IS_USERLAND), 8204 KM_SLEEP); 8205 ASSERT(ns_cmd != NULL); 8206 8207 ns_cmd->ns_gan_index = 0; 8208 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 8209 ns_cmd->ns_cmd_code = NS_GA_NXT; 8210 ns_cmd->ns_gan_max = map_size / sizeof (fc_port_dev_t); 8211 8212 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, 8213 NULL, KM_SLEEP); 8214 ASSERT(job != NULL); 8215 8216 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 8217 8218 if (ret != FC_SUCCESS || 8219 job->job_result != FC_SUCCESS) { 8220 fctl_free_ns_cmd(ns_cmd); 8221 8222 fcio->fcio_errno = job->job_result; 8223 new_count = 0; 8224 if (fp_copyout((void *)&new_count, 8225 (void *)fcio->fcio_abuf, 8226 sizeof (new_count), mode)) { 8227 fctl_dealloc_job(job); 8228 mutex_enter(&port->fp_mutex); 8229 rval = EFAULT; 8230 break; 8231 } 8232 8233 if (fp_fcio_copyout(fcio, data, mode)) { 8234 fctl_dealloc_job(job); 8235 mutex_enter(&port->fp_mutex); 8236 rval = EFAULT; 8237 break; 8238 } 8239 rval = EIO; 8240 mutex_enter(&port->fp_mutex); 8241 break; 8242 } 8243 fctl_dealloc_job(job); 8244 8245 new_count = ns_cmd->ns_gan_index; 8246 if (fp_copyout((void *)&new_count, 8247 (void *)fcio->fcio_abuf, sizeof (new_count), 8248 mode)) { 8249 rval = EFAULT; 8250 fctl_free_ns_cmd(ns_cmd); 8251 mutex_enter(&port->fp_mutex); 8252 break; 8253 } 8254 8255 if (fp_copyout((void *)ns_cmd->ns_data_buf, 8256 (void *)fcio->fcio_obuf, sizeof (fc_port_dev_t) * 8257 ns_cmd->ns_gan_index, mode)) { 8258 rval = EFAULT; 8259 fctl_free_ns_cmd(ns_cmd); 8260 mutex_enter(&port->fp_mutex); 8261 break; 8262 } 8263 fctl_free_ns_cmd(ns_cmd); 8264 8265 if (fp_fcio_copyout(fcio, data, mode)) { 8266 rval = EFAULT; 8267 } 8268 mutex_enter(&port->fp_mutex); 8269 break; 8270 } 8271 8272 case FC_TOP_NO_NS: 8273 /* FALLTHROUGH */ 8274 case FC_TOP_UNKNOWN: 8275 /* FALLTHROUGH */ 8276 default: 8277 fcio->fcio_errno = FC_NO_MAP; 8278 num_devices = port->fp_total_devices; 8279 8280 if (fp_copyout((void *)&new_count, 8281 (void *)fcio->fcio_abuf, 8282 sizeof (new_count), mode)) { 8283 rval = EFAULT; 8284 break; 8285 } 8286 8287 if (fp_fcio_copyout(fcio, data, mode)) { 8288 rval = EFAULT; 8289 break; 8290 } 8291 rval = EINVAL; 8292 break; 8293 } 8294 mutex_exit(&port->fp_mutex); 8295 break; 8296 } 8297 8298 case FCIO_GET_SYM_PNAME: { 8299 rval = ENOTSUP; 8300 break; 8301 } 8302 8303 case FCIO_GET_SYM_NNAME: { 8304 rval = ENOTSUP; 8305 break; 8306 } 8307 8308 case FCIO_SET_SYM_PNAME: { 8309 rval = ENOTSUP; 8310 break; 8311 } 8312 8313 case FCIO_SET_SYM_NNAME: { 8314 rval = ENOTSUP; 8315 break; 8316 } 8317 8318 case FCIO_GET_LOGI_PARAMS: { 8319 la_wwn_t pwwn; 8320 la_wwn_t *my_pwwn; 8321 la_els_logi_t *params; 8322 la_els_logi32_t *params32; 8323 fc_remote_node_t *node; 8324 fc_remote_port_t *pd; 8325 8326 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8327 (fcio->fcio_xfer & FCIO_XFER_READ) == 0 || 8328 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0) { 8329 rval = EINVAL; 8330 break; 8331 } 8332 8333 if (use32 == B_TRUE) { 8334 if (fcio->fcio_olen != sizeof (la_els_logi32_t)) { 8335 rval = EINVAL; 8336 break; 8337 } 8338 } else { 8339 if (fcio->fcio_olen != sizeof (la_els_logi_t)) { 8340 rval = EINVAL; 8341 break; 8342 } 8343 } 8344 8345 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8346 rval = EFAULT; 8347 break; 8348 } 8349 8350 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8351 if (pd == NULL) { 8352 mutex_enter(&port->fp_mutex); 8353 my_pwwn = &port->fp_service_params.nport_ww_name; 8354 mutex_exit(&port->fp_mutex); 8355 8356 if (fctl_wwn_cmp(&pwwn, my_pwwn) != 0) { 8357 rval = ENXIO; 8358 break; 8359 } 8360 8361 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8362 mutex_enter(&port->fp_mutex); 8363 *params = port->fp_service_params; 8364 mutex_exit(&port->fp_mutex); 8365 } else { 8366 params = kmem_zalloc(sizeof (*params), KM_SLEEP); 8367 8368 mutex_enter(&pd->pd_mutex); 8369 params->ls_code.mbz = params->ls_code.ls_code = 0; 8370 params->common_service = pd->pd_csp; 8371 params->nport_ww_name = pd->pd_port_name; 8372 params->class_1 = pd->pd_clsp1; 8373 params->class_2 = pd->pd_clsp2; 8374 params->class_3 = pd->pd_clsp3; 8375 node = pd->pd_remote_nodep; 8376 mutex_exit(&pd->pd_mutex); 8377 8378 bzero(params->reserved, sizeof (params->reserved)); 8379 8380 mutex_enter(&node->fd_mutex); 8381 bcopy(node->fd_vv, params->vendor_version, 8382 sizeof (node->fd_vv)); 8383 params->node_ww_name = node->fd_node_name; 8384 mutex_exit(&node->fd_mutex); 8385 8386 fctl_release_remote_port(pd); 8387 } 8388 8389 if (use32 == B_TRUE) { 8390 params32 = kmem_zalloc(sizeof (*params32), KM_SLEEP); 8391 8392 params32->ls_code.mbz = params->ls_code.mbz; 8393 params32->common_service = params->common_service; 8394 params32->nport_ww_name = params->nport_ww_name; 8395 params32->class_1 = params->class_1; 8396 params32->class_2 = params->class_2; 8397 params32->class_3 = params->class_3; 8398 bzero(params32->reserved, sizeof (params32->reserved)); 8399 bcopy(params->vendor_version, params32->vendor_version, 8400 sizeof (node->fd_vv)); 8401 params32->node_ww_name = params->node_ww_name; 8402 8403 if (ddi_copyout((void *)params32, 8404 (void *)fcio->fcio_obuf, 8405 sizeof (*params32), mode)) { 8406 rval = EFAULT; 8407 } 8408 8409 kmem_free(params32, sizeof (*params32)); 8410 } else { 8411 if (ddi_copyout((void *)params, (void *)fcio->fcio_obuf, 8412 sizeof (*params), mode)) { 8413 rval = EFAULT; 8414 } 8415 } 8416 8417 kmem_free(params, sizeof (*params)); 8418 if (fp_fcio_copyout(fcio, data, mode)) { 8419 rval = EFAULT; 8420 } 8421 break; 8422 } 8423 8424 case FCIO_DEV_LOGOUT: 8425 case FCIO_DEV_LOGIN: 8426 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8427 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8428 rval = EINVAL; 8429 8430 if (fp_fcio_copyout(fcio, data, mode)) { 8431 rval = EFAULT; 8432 } 8433 break; 8434 } 8435 8436 if (fcio->fcio_cmd == FCIO_DEV_LOGIN) { 8437 jcode = JOB_FCIO_LOGIN; 8438 } else { 8439 jcode = JOB_FCIO_LOGOUT; 8440 } 8441 8442 kfcio = kmem_zalloc(sizeof (*kfcio), KM_SLEEP); 8443 bcopy(fcio, kfcio, sizeof (*fcio)); 8444 8445 if (kfcio->fcio_ilen) { 8446 kfcio->fcio_ibuf = kmem_zalloc(kfcio->fcio_ilen, 8447 KM_SLEEP); 8448 8449 if (ddi_copyin((void *)fcio->fcio_ibuf, 8450 (void *)kfcio->fcio_ibuf, kfcio->fcio_ilen, 8451 mode)) { 8452 rval = EFAULT; 8453 8454 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8455 kmem_free(kfcio, sizeof (*kfcio)); 8456 fcio->fcio_errno = job->job_result; 8457 if (fp_fcio_copyout(fcio, data, mode)) { 8458 rval = EFAULT; 8459 } 8460 break; 8461 } 8462 } 8463 8464 job = fctl_alloc_job(jcode, 0, NULL, NULL, KM_SLEEP); 8465 job->job_private = kfcio; 8466 8467 fctl_enque_job(port, job); 8468 fctl_jobwait(job); 8469 8470 rval = job->job_result; 8471 8472 fcio->fcio_errno = kfcio->fcio_errno; 8473 if (fp_fcio_copyout(fcio, data, mode)) { 8474 rval = EFAULT; 8475 } 8476 8477 kmem_free(kfcio->fcio_ibuf, kfcio->fcio_ilen); 8478 kmem_free(kfcio, sizeof (*kfcio)); 8479 fctl_dealloc_job(job); 8480 break; 8481 8482 case FCIO_GET_STATE: { 8483 la_wwn_t pwwn; 8484 uint32_t state; 8485 fc_remote_port_t *pd; 8486 fctl_ns_req_t *ns_cmd; 8487 8488 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8489 fcio->fcio_olen != sizeof (state) || 8490 (fcio->fcio_xfer & FCIO_XFER_WRITE) == 0 || 8491 (fcio->fcio_xfer & FCIO_XFER_READ) == 0) { 8492 rval = EINVAL; 8493 break; 8494 } 8495 8496 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8497 rval = EFAULT; 8498 break; 8499 } 8500 fcio->fcio_errno = 0; 8501 8502 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8503 if (pd == NULL) { 8504 mutex_enter(&port->fp_mutex); 8505 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 8506 mutex_exit(&port->fp_mutex); 8507 job = fctl_alloc_job(JOB_PLOGI_ONE, 0, 8508 NULL, NULL, KM_SLEEP); 8509 8510 job->job_counter = 1; 8511 job->job_result = FC_SUCCESS; 8512 8513 ns_cmd = fctl_alloc_ns_cmd( 8514 sizeof (ns_req_gid_pn_t), 8515 sizeof (ns_resp_gid_pn_t), 8516 sizeof (ns_resp_gid_pn_t), 8517 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 8518 ASSERT(ns_cmd != NULL); 8519 8520 ns_cmd->ns_cmd_code = NS_GID_PN; 8521 ((ns_req_gid_pn_t *) 8522 (ns_cmd->ns_cmd_buf))->pwwn = pwwn; 8523 8524 ret = fp_ns_query(port, ns_cmd, job, 8525 1, KM_SLEEP); 8526 8527 if (ret != FC_SUCCESS || job->job_result != 8528 FC_SUCCESS) { 8529 if (ret != FC_SUCCESS) { 8530 fcio->fcio_errno = ret; 8531 } else { 8532 fcio->fcio_errno = 8533 job->job_result; 8534 } 8535 rval = EIO; 8536 } else { 8537 state = PORT_DEVICE_INVALID; 8538 } 8539 fctl_free_ns_cmd(ns_cmd); 8540 fctl_dealloc_job(job); 8541 } else { 8542 mutex_exit(&port->fp_mutex); 8543 fcio->fcio_errno = FC_BADWWN; 8544 rval = ENXIO; 8545 } 8546 } else { 8547 mutex_enter(&pd->pd_mutex); 8548 state = pd->pd_state; 8549 mutex_exit(&pd->pd_mutex); 8550 8551 fctl_release_remote_port(pd); 8552 } 8553 8554 if (!rval) { 8555 if (ddi_copyout((void *)&state, 8556 (void *)fcio->fcio_obuf, sizeof (state), 8557 mode)) { 8558 rval = EFAULT; 8559 } 8560 } 8561 if (fp_fcio_copyout(fcio, data, mode)) { 8562 rval = EFAULT; 8563 } 8564 break; 8565 } 8566 8567 case FCIO_DEV_REMOVE: { 8568 la_wwn_t pwwn; 8569 fc_portmap_t *changelist; 8570 fc_remote_port_t *pd; 8571 8572 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 8573 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8574 rval = EINVAL; 8575 break; 8576 } 8577 8578 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, sizeof (pwwn), mode)) { 8579 rval = EFAULT; 8580 break; 8581 } 8582 8583 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 8584 if (pd == NULL) { 8585 rval = ENXIO; 8586 fcio->fcio_errno = FC_BADWWN; 8587 if (fp_fcio_copyout(fcio, data, mode)) { 8588 rval = EFAULT; 8589 } 8590 break; 8591 } 8592 8593 mutex_enter(&pd->pd_mutex); 8594 if (pd->pd_ref_count > 1) { 8595 mutex_exit(&pd->pd_mutex); 8596 8597 rval = EBUSY; 8598 fcio->fcio_errno = FC_FAILURE; 8599 fctl_release_remote_port(pd); 8600 8601 if (fp_fcio_copyout(fcio, data, mode)) { 8602 rval = EFAULT; 8603 } 8604 break; 8605 } 8606 mutex_exit(&pd->pd_mutex); 8607 8608 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 8609 8610 fctl_copy_portmap(changelist, pd); 8611 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 8612 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 8613 8614 fctl_release_remote_port(pd); 8615 break; 8616 } 8617 8618 case FCIO_GET_FCODE_REV: { 8619 caddr_t fcode_rev; 8620 fc_fca_pm_t pm; 8621 8622 if (fcio->fcio_olen < FC_FCODE_REV_SIZE || 8623 fcio->fcio_xfer != FCIO_XFER_READ) { 8624 rval = EINVAL; 8625 break; 8626 } 8627 bzero((caddr_t)&pm, sizeof (pm)); 8628 8629 fcode_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8630 8631 pm.pm_cmd_flags = FC_FCA_PM_READ; 8632 pm.pm_cmd_code = FC_PORT_GET_FCODE_REV; 8633 pm.pm_data_len = fcio->fcio_olen; 8634 pm.pm_data_buf = fcode_rev; 8635 8636 ret = port->fp_fca_tran->fca_port_manage( 8637 port->fp_fca_handle, &pm); 8638 8639 if (ret == FC_SUCCESS) { 8640 if (ddi_copyout((void *)fcode_rev, 8641 (void *)fcio->fcio_obuf, 8642 fcio->fcio_olen, mode) == 0) { 8643 if (fp_fcio_copyout(fcio, data, mode)) { 8644 rval = EFAULT; 8645 } 8646 } else { 8647 rval = EFAULT; 8648 } 8649 } else { 8650 /* 8651 * check if buffer was not large enough to obtain 8652 * FCODE version. 8653 */ 8654 if (pm.pm_data_len > fcio->fcio_olen) { 8655 rval = ENOMEM; 8656 } else { 8657 rval = EIO; 8658 } 8659 fcio->fcio_errno = ret; 8660 if (fp_fcio_copyout(fcio, data, mode)) { 8661 rval = EFAULT; 8662 } 8663 } 8664 kmem_free(fcode_rev, fcio->fcio_olen); 8665 break; 8666 } 8667 8668 case FCIO_GET_FW_REV: { 8669 caddr_t fw_rev; 8670 fc_fca_pm_t pm; 8671 8672 if (fcio->fcio_olen < FC_FW_REV_SIZE || 8673 fcio->fcio_xfer != FCIO_XFER_READ) { 8674 rval = EINVAL; 8675 break; 8676 } 8677 bzero((caddr_t)&pm, sizeof (pm)); 8678 8679 fw_rev = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 8680 8681 pm.pm_cmd_flags = FC_FCA_PM_READ; 8682 pm.pm_cmd_code = FC_PORT_GET_FW_REV; 8683 pm.pm_data_len = fcio->fcio_olen; 8684 pm.pm_data_buf = fw_rev; 8685 8686 ret = port->fp_fca_tran->fca_port_manage( 8687 port->fp_fca_handle, &pm); 8688 8689 if (ret == FC_SUCCESS) { 8690 if (ddi_copyout((void *)fw_rev, 8691 (void *)fcio->fcio_obuf, 8692 fcio->fcio_olen, mode) == 0) { 8693 if (fp_fcio_copyout(fcio, data, mode)) { 8694 rval = EFAULT; 8695 } 8696 } else { 8697 rval = EFAULT; 8698 } 8699 } else { 8700 if (fp_fcio_copyout(fcio, data, mode)) { 8701 rval = EFAULT; 8702 } 8703 rval = EIO; 8704 } 8705 kmem_free(fw_rev, fcio->fcio_olen); 8706 break; 8707 } 8708 8709 case FCIO_GET_DUMP_SIZE: { 8710 uint32_t dump_size; 8711 fc_fca_pm_t pm; 8712 8713 if (fcio->fcio_olen != sizeof (dump_size) || 8714 fcio->fcio_xfer != FCIO_XFER_READ) { 8715 rval = EINVAL; 8716 break; 8717 } 8718 bzero((caddr_t)&pm, sizeof (pm)); 8719 pm.pm_cmd_flags = FC_FCA_PM_READ; 8720 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8721 pm.pm_data_len = sizeof (dump_size); 8722 pm.pm_data_buf = (caddr_t)&dump_size; 8723 8724 ret = port->fp_fca_tran->fca_port_manage( 8725 port->fp_fca_handle, &pm); 8726 8727 if (ret == FC_SUCCESS) { 8728 if (ddi_copyout((void *)&dump_size, 8729 (void *)fcio->fcio_obuf, sizeof (dump_size), 8730 mode) == 0) { 8731 if (fp_fcio_copyout(fcio, data, mode)) { 8732 rval = EFAULT; 8733 } 8734 } else { 8735 rval = EFAULT; 8736 } 8737 } else { 8738 fcio->fcio_errno = ret; 8739 rval = EIO; 8740 if (fp_fcio_copyout(fcio, data, mode)) { 8741 rval = EFAULT; 8742 } 8743 } 8744 break; 8745 } 8746 8747 case FCIO_DOWNLOAD_FW: { 8748 caddr_t firmware; 8749 fc_fca_pm_t pm; 8750 8751 if (fcio->fcio_ilen <= 0 || 8752 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8753 rval = EINVAL; 8754 break; 8755 } 8756 8757 firmware = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8758 if (ddi_copyin(fcio->fcio_ibuf, firmware, 8759 fcio->fcio_ilen, mode)) { 8760 rval = EFAULT; 8761 kmem_free(firmware, fcio->fcio_ilen); 8762 break; 8763 } 8764 8765 bzero((caddr_t)&pm, sizeof (pm)); 8766 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8767 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FW; 8768 pm.pm_data_len = fcio->fcio_ilen; 8769 pm.pm_data_buf = firmware; 8770 8771 ret = port->fp_fca_tran->fca_port_manage( 8772 port->fp_fca_handle, &pm); 8773 8774 kmem_free(firmware, fcio->fcio_ilen); 8775 8776 if (ret != FC_SUCCESS) { 8777 fcio->fcio_errno = ret; 8778 rval = EIO; 8779 if (fp_fcio_copyout(fcio, data, mode)) { 8780 rval = EFAULT; 8781 } 8782 } 8783 break; 8784 } 8785 8786 case FCIO_DOWNLOAD_FCODE: { 8787 caddr_t fcode; 8788 fc_fca_pm_t pm; 8789 8790 if (fcio->fcio_ilen <= 0 || 8791 fcio->fcio_xfer != FCIO_XFER_WRITE) { 8792 rval = EINVAL; 8793 break; 8794 } 8795 8796 fcode = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 8797 if (ddi_copyin(fcio->fcio_ibuf, fcode, 8798 fcio->fcio_ilen, mode)) { 8799 rval = EFAULT; 8800 kmem_free(fcode, fcio->fcio_ilen); 8801 break; 8802 } 8803 8804 bzero((caddr_t)&pm, sizeof (pm)); 8805 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 8806 pm.pm_cmd_code = FC_PORT_DOWNLOAD_FCODE; 8807 pm.pm_data_len = fcio->fcio_ilen; 8808 pm.pm_data_buf = fcode; 8809 8810 ret = port->fp_fca_tran->fca_port_manage( 8811 port->fp_fca_handle, &pm); 8812 8813 kmem_free(fcode, fcio->fcio_ilen); 8814 8815 if (ret != FC_SUCCESS) { 8816 fcio->fcio_errno = ret; 8817 rval = EIO; 8818 if (fp_fcio_copyout(fcio, data, mode)) { 8819 rval = EFAULT; 8820 } 8821 } 8822 break; 8823 } 8824 8825 case FCIO_FORCE_DUMP: 8826 ret = port->fp_fca_tran->fca_reset( 8827 port->fp_fca_handle, FC_FCA_CORE); 8828 8829 if (ret != FC_SUCCESS) { 8830 fcio->fcio_errno = ret; 8831 rval = EIO; 8832 if (fp_fcio_copyout(fcio, data, mode)) { 8833 rval = EFAULT; 8834 } 8835 } 8836 break; 8837 8838 case FCIO_GET_DUMP: { 8839 caddr_t dump; 8840 uint32_t dump_size; 8841 fc_fca_pm_t pm; 8842 8843 if (fcio->fcio_xfer != FCIO_XFER_READ) { 8844 rval = EINVAL; 8845 break; 8846 } 8847 bzero((caddr_t)&pm, sizeof (pm)); 8848 8849 pm.pm_cmd_flags = FC_FCA_PM_READ; 8850 pm.pm_cmd_code = FC_PORT_GET_DUMP_SIZE; 8851 pm.pm_data_len = sizeof (dump_size); 8852 pm.pm_data_buf = (caddr_t)&dump_size; 8853 8854 ret = port->fp_fca_tran->fca_port_manage( 8855 port->fp_fca_handle, &pm); 8856 8857 if (ret != FC_SUCCESS) { 8858 fcio->fcio_errno = ret; 8859 rval = EIO; 8860 if (fp_fcio_copyout(fcio, data, mode)) { 8861 rval = EFAULT; 8862 } 8863 break; 8864 } 8865 if (fcio->fcio_olen != dump_size) { 8866 fcio->fcio_errno = FC_NOMEM; 8867 rval = EINVAL; 8868 if (fp_fcio_copyout(fcio, data, mode)) { 8869 rval = EFAULT; 8870 } 8871 break; 8872 } 8873 8874 dump = kmem_zalloc(dump_size, KM_SLEEP); 8875 8876 bzero((caddr_t)&pm, sizeof (pm)); 8877 pm.pm_cmd_flags = FC_FCA_PM_READ; 8878 pm.pm_cmd_code = FC_PORT_GET_DUMP; 8879 pm.pm_data_len = dump_size; 8880 pm.pm_data_buf = dump; 8881 8882 ret = port->fp_fca_tran->fca_port_manage( 8883 port->fp_fca_handle, &pm); 8884 8885 if (ret == FC_SUCCESS) { 8886 if (ddi_copyout((void *)dump, (void *)fcio->fcio_obuf, 8887 dump_size, mode) == 0) { 8888 if (fp_fcio_copyout(fcio, data, mode)) { 8889 rval = EFAULT; 8890 } 8891 } else { 8892 rval = EFAULT; 8893 } 8894 } else { 8895 fcio->fcio_errno = ret; 8896 rval = EIO; 8897 if (fp_fcio_copyout(fcio, data, mode)) { 8898 rval = EFAULT; 8899 } 8900 } 8901 kmem_free(dump, dump_size); 8902 break; 8903 } 8904 8905 case FCIO_GET_TOPOLOGY: { 8906 uint32_t user_topology; 8907 8908 if (fcio->fcio_xfer != FCIO_XFER_READ || 8909 fcio->fcio_olen != sizeof (user_topology)) { 8910 rval = EINVAL; 8911 break; 8912 } 8913 8914 mutex_enter(&port->fp_mutex); 8915 if (FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 8916 user_topology = FC_TOP_UNKNOWN; 8917 } else { 8918 user_topology = port->fp_topology; 8919 } 8920 mutex_exit(&port->fp_mutex); 8921 8922 if (ddi_copyout((void *)&user_topology, 8923 (void *)fcio->fcio_obuf, sizeof (user_topology), 8924 mode)) { 8925 rval = EFAULT; 8926 } 8927 break; 8928 } 8929 8930 case FCIO_RESET_LINK: { 8931 la_wwn_t pwwn; 8932 8933 /* 8934 * Look at the output buffer field; if this field has zero 8935 * bytes then attempt to reset the local link/loop. If the 8936 * fcio_ibuf field points to a WWN, see if it's an NL_Port, 8937 * and if yes, determine the LFA and reset the remote LIP 8938 * by LINIT ELS. 8939 */ 8940 8941 if (fcio->fcio_xfer != FCIO_XFER_WRITE || 8942 fcio->fcio_ilen != sizeof (pwwn)) { 8943 rval = EINVAL; 8944 break; 8945 } 8946 8947 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 8948 sizeof (pwwn), mode)) { 8949 rval = EFAULT; 8950 break; 8951 } 8952 8953 mutex_enter(&port->fp_mutex); 8954 if (port->fp_soft_state & FP_SOFT_IN_LINK_RESET) { 8955 mutex_exit(&port->fp_mutex); 8956 break; 8957 } 8958 port->fp_soft_state |= FP_SOFT_IN_LINK_RESET; 8959 mutex_exit(&port->fp_mutex); 8960 8961 job = fctl_alloc_job(JOB_LINK_RESET, 0, NULL, NULL, KM_SLEEP); 8962 if (job == NULL) { 8963 rval = ENOMEM; 8964 break; 8965 } 8966 job->job_counter = 1; 8967 job->job_private = (void *)&pwwn; 8968 8969 fctl_enque_job(port, job); 8970 fctl_jobwait(job); 8971 8972 mutex_enter(&port->fp_mutex); 8973 port->fp_soft_state &= ~FP_SOFT_IN_LINK_RESET; 8974 mutex_exit(&port->fp_mutex); 8975 8976 if (job->job_result != FC_SUCCESS) { 8977 fcio->fcio_errno = job->job_result; 8978 rval = EIO; 8979 if (fp_fcio_copyout(fcio, data, mode)) { 8980 rval = EFAULT; 8981 } 8982 } 8983 fctl_dealloc_job(job); 8984 break; 8985 } 8986 8987 case FCIO_RESET_HARD: 8988 ret = port->fp_fca_tran->fca_reset( 8989 port->fp_fca_handle, FC_FCA_RESET); 8990 if (ret != FC_SUCCESS) { 8991 fcio->fcio_errno = ret; 8992 rval = EIO; 8993 if (fp_fcio_copyout(fcio, data, mode)) { 8994 rval = EFAULT; 8995 } 8996 } 8997 break; 8998 8999 case FCIO_RESET_HARD_CORE: 9000 ret = port->fp_fca_tran->fca_reset( 9001 port->fp_fca_handle, FC_FCA_RESET_CORE); 9002 if (ret != FC_SUCCESS) { 9003 rval = EIO; 9004 fcio->fcio_errno = ret; 9005 if (fp_fcio_copyout(fcio, data, mode)) { 9006 rval = EFAULT; 9007 } 9008 } 9009 break; 9010 9011 case FCIO_DIAG: { 9012 fc_fca_pm_t pm; 9013 9014 bzero((caddr_t)&pm, sizeof (fc_fca_pm_t)); 9015 9016 /* Validate user buffer from ioctl call. */ 9017 if (((fcio->fcio_ilen > 0) && (fcio->fcio_ibuf == NULL)) || 9018 ((fcio->fcio_ilen <= 0) && (fcio->fcio_ibuf != NULL)) || 9019 ((fcio->fcio_alen > 0) && (fcio->fcio_abuf == NULL)) || 9020 ((fcio->fcio_alen <= 0) && (fcio->fcio_abuf != NULL)) || 9021 ((fcio->fcio_olen > 0) && (fcio->fcio_obuf == NULL)) || 9022 ((fcio->fcio_olen <= 0) && (fcio->fcio_obuf != NULL))) { 9023 rval = EFAULT; 9024 break; 9025 } 9026 9027 if ((pm.pm_cmd_len = fcio->fcio_ilen) > 0) { 9028 pm.pm_cmd_buf = kmem_zalloc(fcio->fcio_ilen, KM_SLEEP); 9029 if (ddi_copyin(fcio->fcio_ibuf, pm.pm_cmd_buf, 9030 fcio->fcio_ilen, mode)) { 9031 rval = EFAULT; 9032 goto fp_fcio_diag_cleanup; 9033 } 9034 } 9035 9036 if ((pm.pm_data_len = fcio->fcio_alen) > 0) { 9037 pm.pm_data_buf = kmem_zalloc(fcio->fcio_alen, KM_SLEEP); 9038 if (ddi_copyin(fcio->fcio_abuf, pm.pm_data_buf, 9039 fcio->fcio_alen, mode)) { 9040 rval = EFAULT; 9041 goto fp_fcio_diag_cleanup; 9042 } 9043 } 9044 9045 if ((pm.pm_stat_len = fcio->fcio_olen) > 0) { 9046 pm.pm_stat_buf = kmem_zalloc(fcio->fcio_olen, KM_SLEEP); 9047 } 9048 9049 pm.pm_cmd_code = FC_PORT_DIAG; 9050 pm.pm_cmd_flags = fcio->fcio_cmd_flags; 9051 9052 ret = port->fp_fca_tran->fca_port_manage( 9053 port->fp_fca_handle, &pm); 9054 9055 if (ret != FC_SUCCESS) { 9056 if (ret == FC_INVALID_REQUEST) { 9057 rval = ENOTTY; 9058 } else { 9059 rval = EIO; 9060 } 9061 9062 fcio->fcio_errno = ret; 9063 if (fp_fcio_copyout(fcio, data, mode)) { 9064 rval = EFAULT; 9065 } 9066 goto fp_fcio_diag_cleanup; 9067 } 9068 9069 /* 9070 * pm_stat_len will contain the number of status bytes 9071 * an FCA driver requires to return the complete status 9072 * of the requested diag operation. If the user buffer 9073 * is not large enough to hold the entire status, We 9074 * copy only the portion of data the fits in the buffer and 9075 * return a ENOMEM to the user application. 9076 */ 9077 if (pm.pm_stat_len > fcio->fcio_olen) { 9078 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 9079 "fp:FCIO_DIAG:status buffer too small\n"); 9080 9081 rval = ENOMEM; 9082 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9083 fcio->fcio_olen, mode)) { 9084 rval = EFAULT; 9085 goto fp_fcio_diag_cleanup; 9086 } 9087 } else { 9088 /* 9089 * Copy only data pm_stat_len bytes of data 9090 */ 9091 if (ddi_copyout(pm.pm_stat_buf, fcio->fcio_obuf, 9092 pm.pm_stat_len, mode)) { 9093 rval = EFAULT; 9094 goto fp_fcio_diag_cleanup; 9095 } 9096 } 9097 9098 if (fp_fcio_copyout(fcio, data, mode)) { 9099 rval = EFAULT; 9100 } 9101 9102 fp_fcio_diag_cleanup: 9103 if (pm.pm_cmd_buf != NULL) { 9104 kmem_free(pm.pm_cmd_buf, fcio->fcio_ilen); 9105 } 9106 if (pm.pm_data_buf != NULL) { 9107 kmem_free(pm.pm_data_buf, fcio->fcio_alen); 9108 } 9109 if (pm.pm_stat_buf != NULL) { 9110 kmem_free(pm.pm_stat_buf, fcio->fcio_olen); 9111 } 9112 9113 break; 9114 } 9115 9116 case FCIO_GET_NODE_ID: { 9117 /* validate parameters */ 9118 if (fcio->fcio_xfer != FCIO_XFER_READ || 9119 fcio->fcio_olen < sizeof (fc_rnid_t)) { 9120 rval = EINVAL; 9121 break; 9122 } 9123 9124 rval = fp_get_rnid(port, data, mode, fcio); 9125 9126 /* ioctl handling is over */ 9127 break; 9128 } 9129 9130 case FCIO_SEND_NODE_ID: { 9131 la_wwn_t pwwn; 9132 9133 /* validate parameters */ 9134 if (fcio->fcio_ilen != sizeof (la_wwn_t) || 9135 fcio->fcio_xfer != FCIO_XFER_READ) { 9136 rval = EINVAL; 9137 break; 9138 } 9139 9140 if (ddi_copyin(fcio->fcio_ibuf, &pwwn, 9141 sizeof (la_wwn_t), mode)) { 9142 rval = EFAULT; 9143 break; 9144 } 9145 9146 rval = fp_send_rnid(port, data, mode, fcio, &pwwn); 9147 9148 /* ioctl handling is over */ 9149 break; 9150 } 9151 9152 case FCIO_SET_NODE_ID: { 9153 if (fcio->fcio_ilen != sizeof (fc_rnid_t) || 9154 (fcio->fcio_xfer != FCIO_XFER_WRITE)) { 9155 rval = EINVAL; 9156 break; 9157 } 9158 9159 rval = fp_set_rnid(port, data, mode, fcio); 9160 break; 9161 } 9162 9163 case FCIO_LINK_STATUS: { 9164 fc_portid_t rls_req; 9165 fc_rls_acc_t *rls_acc; 9166 fc_fca_pm_t pm; 9167 uint32_t dest, src_id; 9168 fp_cmd_t *cmd; 9169 fc_remote_port_t *pd; 9170 uchar_t pd_flags; 9171 9172 /* validate parameters */ 9173 if (fcio->fcio_ilen != sizeof (fc_portid_t) || 9174 fcio->fcio_olen != sizeof (fc_rls_acc_t) || 9175 fcio->fcio_xfer != FCIO_XFER_RW) { 9176 rval = EINVAL; 9177 break; 9178 } 9179 9180 if ((fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_FPORT) && 9181 (fcio->fcio_cmd_flags != FCIO_CFLAGS_RLS_DEST_NPORT)) { 9182 rval = EINVAL; 9183 break; 9184 } 9185 9186 if (ddi_copyin((void *)fcio->fcio_ibuf, (void *)&rls_req, 9187 sizeof (fc_portid_t), mode)) { 9188 rval = EFAULT; 9189 break; 9190 } 9191 9192 9193 /* Determine the destination of the RLS frame */ 9194 if (fcio->fcio_cmd_flags == FCIO_CFLAGS_RLS_DEST_FPORT) { 9195 dest = FS_FABRIC_F_PORT; 9196 } else { 9197 dest = rls_req.port_id; 9198 } 9199 9200 mutex_enter(&port->fp_mutex); 9201 src_id = port->fp_port_id.port_id; 9202 mutex_exit(&port->fp_mutex); 9203 9204 /* If dest is zero OR same as FCA ID, then use port_manage() */ 9205 if (dest == 0 || dest == src_id) { 9206 9207 /* Allocate memory for link error status block */ 9208 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9209 ASSERT(rls_acc != NULL); 9210 9211 /* Prepare the port management structure */ 9212 bzero((caddr_t)&pm, sizeof (pm)); 9213 9214 pm.pm_cmd_flags = FC_FCA_PM_READ; 9215 pm.pm_cmd_code = FC_PORT_RLS; 9216 pm.pm_data_len = sizeof (*rls_acc); 9217 pm.pm_data_buf = (caddr_t)rls_acc; 9218 9219 /* Get the adapter's link error status block */ 9220 ret = port->fp_fca_tran->fca_port_manage( 9221 port->fp_fca_handle, &pm); 9222 9223 if (ret == FC_SUCCESS) { 9224 /* xfer link status block to userland */ 9225 if (ddi_copyout((void *)rls_acc, 9226 (void *)fcio->fcio_obuf, 9227 sizeof (*rls_acc), mode) == 0) { 9228 if (fp_fcio_copyout(fcio, data, 9229 mode)) { 9230 rval = EFAULT; 9231 } 9232 } else { 9233 rval = EFAULT; 9234 } 9235 } else { 9236 rval = EIO; 9237 fcio->fcio_errno = ret; 9238 if (fp_fcio_copyout(fcio, data, mode)) { 9239 rval = EFAULT; 9240 } 9241 } 9242 9243 kmem_free(rls_acc, sizeof (*rls_acc)); 9244 9245 /* ioctl handling is over */ 9246 break; 9247 } 9248 9249 /* 9250 * Send RLS to the destination port. 9251 * Having RLS frame destination is as FPORT is not yet 9252 * supported and will be implemented in future, if needed. 9253 * Following call to get "pd" will fail if dest is FPORT 9254 */ 9255 pd = fctl_hold_remote_port_by_did(port, dest); 9256 if (pd == NULL) { 9257 fcio->fcio_errno = FC_BADOBJECT; 9258 rval = ENXIO; 9259 if (fp_fcio_copyout(fcio, data, mode)) { 9260 rval = EFAULT; 9261 } 9262 break; 9263 } 9264 9265 mutex_enter(&pd->pd_mutex); 9266 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 9267 mutex_exit(&pd->pd_mutex); 9268 fctl_release_remote_port(pd); 9269 9270 fcio->fcio_errno = FC_LOGINREQ; 9271 rval = EINVAL; 9272 if (fp_fcio_copyout(fcio, data, mode)) { 9273 rval = EFAULT; 9274 } 9275 break; 9276 } 9277 ASSERT(pd->pd_login_count >= 1); 9278 mutex_exit(&pd->pd_mutex); 9279 9280 /* 9281 * Allocate job structure and set job_code as DUMMY, 9282 * because we will not go through the job thread. 9283 * Instead fp_sendcmd() is called directly here. 9284 */ 9285 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9286 NULL, NULL, KM_SLEEP); 9287 ASSERT(job != NULL); 9288 9289 job->job_counter = 1; 9290 9291 cmd = fp_alloc_pkt(port, sizeof (la_els_rls_t), 9292 sizeof (la_els_rls_acc_t), KM_SLEEP, pd); 9293 if (cmd == NULL) { 9294 fcio->fcio_errno = FC_NOMEM; 9295 rval = ENOMEM; 9296 9297 fctl_release_remote_port(pd); 9298 9299 fctl_dealloc_job(job); 9300 if (fp_fcio_copyout(fcio, data, mode)) { 9301 rval = EFAULT; 9302 } 9303 break; 9304 } 9305 9306 /* Allocate memory for link error status block */ 9307 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP); 9308 9309 mutex_enter(&port->fp_mutex); 9310 mutex_enter(&pd->pd_mutex); 9311 9312 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9313 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9314 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9315 cmd->cmd_retry_count = 1; 9316 cmd->cmd_ulp_pkt = NULL; 9317 9318 fp_rls_init(cmd, job); 9319 9320 job->job_private = (void *)rls_acc; 9321 9322 pd_flags = pd->pd_flags; 9323 pd->pd_flags = PD_ELS_IN_PROGRESS; 9324 9325 mutex_exit(&pd->pd_mutex); 9326 mutex_exit(&port->fp_mutex); 9327 9328 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9329 fctl_jobwait(job); 9330 9331 fcio->fcio_errno = job->job_result; 9332 if (job->job_result == FC_SUCCESS) { 9333 ASSERT(pd != NULL); 9334 /* 9335 * link error status block is now available. 9336 * Copy it to userland 9337 */ 9338 ASSERT(job->job_private == (void *)rls_acc); 9339 if (ddi_copyout((void *)rls_acc, 9340 (void *)fcio->fcio_obuf, 9341 sizeof (*rls_acc), mode) == 0) { 9342 if (fp_fcio_copyout(fcio, data, 9343 mode)) { 9344 rval = EFAULT; 9345 } 9346 } else { 9347 rval = EFAULT; 9348 } 9349 } else { 9350 rval = EIO; 9351 } 9352 } else { 9353 rval = EIO; 9354 fp_free_pkt(cmd); 9355 } 9356 9357 if (rval) { 9358 mutex_enter(&port->fp_mutex); 9359 mutex_enter(&pd->pd_mutex); 9360 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 9361 pd->pd_flags = pd_flags; 9362 } 9363 mutex_exit(&pd->pd_mutex); 9364 mutex_exit(&port->fp_mutex); 9365 } 9366 9367 fctl_release_remote_port(pd); 9368 fctl_dealloc_job(job); 9369 kmem_free(rls_acc, sizeof (*rls_acc)); 9370 9371 if (fp_fcio_copyout(fcio, data, mode)) { 9372 rval = EFAULT; 9373 } 9374 break; 9375 } 9376 9377 case FCIO_NS: { 9378 fc_ns_cmd_t *ns_req; 9379 fc_ns_cmd32_t *ns_req32; 9380 fctl_ns_req_t *ns_cmd; 9381 9382 if (use32 == B_TRUE) { 9383 if (fcio->fcio_ilen != sizeof (*ns_req32)) { 9384 rval = EINVAL; 9385 break; 9386 } 9387 9388 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9389 ns_req32 = kmem_zalloc(sizeof (*ns_req32), KM_SLEEP); 9390 9391 if (ddi_copyin(fcio->fcio_ibuf, ns_req32, 9392 sizeof (*ns_req32), mode)) { 9393 rval = EFAULT; 9394 kmem_free(ns_req, sizeof (*ns_req)); 9395 kmem_free(ns_req32, sizeof (*ns_req32)); 9396 break; 9397 } 9398 9399 ns_req->ns_flags = ns_req32->ns_flags; 9400 ns_req->ns_cmd = ns_req32->ns_cmd; 9401 ns_req->ns_req_len = ns_req32->ns_req_len; 9402 ns_req->ns_req_payload = ns_req32->ns_req_payload; 9403 ns_req->ns_resp_len = ns_req32->ns_resp_len; 9404 ns_req->ns_resp_payload = ns_req32->ns_resp_payload; 9405 ns_req->ns_fctl_private = ns_req32->ns_fctl_private; 9406 ns_req->ns_resp_hdr = ns_req32->ns_resp_hdr; 9407 9408 kmem_free(ns_req32, sizeof (*ns_req32)); 9409 } else { 9410 if (fcio->fcio_ilen != sizeof (*ns_req)) { 9411 rval = EINVAL; 9412 break; 9413 } 9414 9415 ns_req = kmem_zalloc(sizeof (*ns_req), KM_SLEEP); 9416 9417 if (ddi_copyin(fcio->fcio_ibuf, ns_req, 9418 sizeof (fc_ns_cmd_t), mode)) { 9419 rval = EFAULT; 9420 kmem_free(ns_req, sizeof (*ns_req)); 9421 break; 9422 } 9423 } 9424 9425 if (ns_req->ns_req_len <= 0) { 9426 rval = EINVAL; 9427 kmem_free(ns_req, sizeof (*ns_req)); 9428 break; 9429 } 9430 9431 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, NULL, KM_SLEEP); 9432 ASSERT(job != NULL); 9433 9434 ns_cmd = fctl_alloc_ns_cmd(ns_req->ns_req_len, 9435 ns_req->ns_resp_len, ns_req->ns_resp_len, 9436 FCTL_NS_FILL_NS_MAP, KM_SLEEP); 9437 ASSERT(ns_cmd != NULL); 9438 ns_cmd->ns_cmd_code = ns_req->ns_cmd; 9439 9440 if (ns_cmd->ns_cmd_code == NS_GA_NXT) { 9441 ns_cmd->ns_gan_max = 1; 9442 ns_cmd->ns_gan_index = 0; 9443 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 9444 } 9445 9446 if (ddi_copyin(ns_req->ns_req_payload, 9447 ns_cmd->ns_cmd_buf, ns_req->ns_req_len, mode)) { 9448 rval = EFAULT; 9449 fctl_free_ns_cmd(ns_cmd); 9450 fctl_dealloc_job(job); 9451 kmem_free(ns_req, sizeof (*ns_req)); 9452 break; 9453 } 9454 9455 job->job_private = (void *)ns_cmd; 9456 fctl_enque_job(port, job); 9457 fctl_jobwait(job); 9458 rval = job->job_result; 9459 9460 if (rval == FC_SUCCESS) { 9461 if (ns_req->ns_resp_len) { 9462 if (ddi_copyout(ns_cmd->ns_data_buf, 9463 ns_req->ns_resp_payload, 9464 ns_cmd->ns_data_len, mode)) { 9465 rval = EFAULT; 9466 fctl_free_ns_cmd(ns_cmd); 9467 fctl_dealloc_job(job); 9468 kmem_free(ns_req, sizeof (*ns_req)); 9469 break; 9470 } 9471 } 9472 } else { 9473 rval = EIO; 9474 } 9475 ns_req->ns_resp_hdr = ns_cmd->ns_resp_hdr; 9476 fctl_free_ns_cmd(ns_cmd); 9477 fctl_dealloc_job(job); 9478 kmem_free(ns_req, sizeof (*ns_req)); 9479 9480 if (fp_fcio_copyout(fcio, data, mode)) { 9481 rval = EFAULT; 9482 } 9483 break; 9484 } 9485 9486 default: 9487 rval = ENOTTY; 9488 break; 9489 } 9490 9491 /* 9492 * If set, reset the EXCL busy bit to 9493 * receive other exclusive access commands 9494 */ 9495 mutex_enter(&port->fp_mutex); 9496 if (port->fp_flag & FP_EXCL_BUSY) { 9497 port->fp_flag &= ~FP_EXCL_BUSY; 9498 } 9499 mutex_exit(&port->fp_mutex); 9500 9501 return (rval); 9502 } 9503 9504 9505 /* 9506 * This function assumes that the response length 9507 * is same regardless of data model (LP32 or LP64) 9508 * which is true for all the ioctls currently 9509 * supported. 9510 */ 9511 static int 9512 fp_copyout(void *from, void *to, size_t len, int mode) 9513 { 9514 return (ddi_copyout(from, to, len, mode)); 9515 } 9516 9517 /* 9518 * This function does the set rnid 9519 */ 9520 static int 9521 fp_set_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9522 { 9523 int rval = 0; 9524 fc_rnid_t *rnid; 9525 fc_fca_pm_t pm; 9526 9527 /* Allocate memory for node id block */ 9528 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9529 9530 if (ddi_copyin(fcio->fcio_ibuf, rnid, sizeof (fc_rnid_t), mode)) { 9531 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", EFAULT); 9532 kmem_free(rnid, sizeof (fc_rnid_t)); 9533 return (EFAULT); 9534 } 9535 9536 /* Prepare the port management structure */ 9537 bzero((caddr_t)&pm, sizeof (pm)); 9538 9539 pm.pm_cmd_flags = FC_FCA_PM_WRITE; 9540 pm.pm_cmd_code = FC_PORT_SET_NODE_ID; 9541 pm.pm_data_len = sizeof (*rnid); 9542 pm.pm_data_buf = (caddr_t)rnid; 9543 9544 /* Get the adapter's node data */ 9545 rval = port->fp_fca_tran->fca_port_manage( 9546 port->fp_fca_handle, &pm); 9547 9548 if (rval != FC_SUCCESS) { 9549 fcio->fcio_errno = rval; 9550 rval = EIO; 9551 if (fp_fcio_copyout(fcio, data, mode)) { 9552 rval = EFAULT; 9553 } 9554 } else { 9555 mutex_enter(&port->fp_mutex); 9556 /* copy to the port structure */ 9557 bcopy(rnid, &port->fp_rnid_params, 9558 sizeof (port->fp_rnid_params)); 9559 mutex_exit(&port->fp_mutex); 9560 } 9561 9562 kmem_free(rnid, sizeof (fc_rnid_t)); 9563 9564 if (rval != FC_SUCCESS) { 9565 FP_TRACE(FP_NHEAD1(3, 0), "fp_set_rnid: failed = %d", rval); 9566 } 9567 9568 return (rval); 9569 } 9570 9571 /* 9572 * This function does the local pwwn get rnid 9573 */ 9574 static int 9575 fp_get_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio) 9576 { 9577 fc_rnid_t *rnid; 9578 fc_fca_pm_t pm; 9579 int rval = 0; 9580 uint32_t ret; 9581 9582 /* Allocate memory for rnid data block */ 9583 rnid = kmem_zalloc(sizeof (fc_rnid_t), KM_SLEEP); 9584 9585 mutex_enter(&port->fp_mutex); 9586 if (port->fp_rnid_init == 1) { 9587 bcopy(&port->fp_rnid_params, rnid, sizeof (fc_rnid_t)); 9588 mutex_exit(&port->fp_mutex); 9589 /* xfer node info to userland */ 9590 if (ddi_copyout((void *)rnid, (void *)fcio->fcio_obuf, 9591 sizeof (*rnid), mode) == 0) { 9592 if (fp_fcio_copyout(fcio, data, mode)) { 9593 rval = EFAULT; 9594 } 9595 } else { 9596 rval = EFAULT; 9597 } 9598 9599 kmem_free(rnid, sizeof (fc_rnid_t)); 9600 9601 if (rval != FC_SUCCESS) { 9602 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", 9603 rval); 9604 } 9605 9606 return (rval); 9607 } 9608 mutex_exit(&port->fp_mutex); 9609 9610 /* Prepare the port management structure */ 9611 bzero((caddr_t)&pm, sizeof (pm)); 9612 9613 pm.pm_cmd_flags = FC_FCA_PM_READ; 9614 pm.pm_cmd_code = FC_PORT_GET_NODE_ID; 9615 pm.pm_data_len = sizeof (fc_rnid_t); 9616 pm.pm_data_buf = (caddr_t)rnid; 9617 9618 /* Get the adapter's node data */ 9619 ret = port->fp_fca_tran->fca_port_manage( 9620 port->fp_fca_handle, 9621 &pm); 9622 9623 if (ret == FC_SUCCESS) { 9624 /* initialize in the port_info */ 9625 mutex_enter(&port->fp_mutex); 9626 port->fp_rnid_init = 1; 9627 bcopy(rnid, &port->fp_rnid_params, sizeof (*rnid)); 9628 mutex_exit(&port->fp_mutex); 9629 9630 /* xfer node info to userland */ 9631 if (ddi_copyout((void *)rnid, 9632 (void *)fcio->fcio_obuf, 9633 sizeof (*rnid), mode) == 0) { 9634 if (fp_fcio_copyout(fcio, data, 9635 mode)) { 9636 rval = EFAULT; 9637 } 9638 } else { 9639 rval = EFAULT; 9640 } 9641 } else { 9642 rval = EIO; 9643 fcio->fcio_errno = ret; 9644 if (fp_fcio_copyout(fcio, data, mode)) { 9645 rval = EFAULT; 9646 } 9647 } 9648 9649 kmem_free(rnid, sizeof (fc_rnid_t)); 9650 9651 if (rval != FC_SUCCESS) { 9652 FP_TRACE(FP_NHEAD1(3, 0), "fp_get_rnid: failed = %d", rval); 9653 } 9654 9655 return (rval); 9656 } 9657 9658 static int 9659 fp_send_rnid(fc_local_port_t *port, intptr_t data, int mode, fcio_t *fcio, 9660 la_wwn_t *pwwn) 9661 { 9662 int rval = 0; 9663 fc_remote_port_t *pd; 9664 fp_cmd_t *cmd; 9665 job_request_t *job; 9666 la_els_rnid_acc_t *rnid_acc; 9667 9668 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 9669 if (pd == NULL) { 9670 /* 9671 * We can safely assume that the destination port 9672 * is logged in. Either the user land will explicitly 9673 * login before issuing RNID ioctl or the device would 9674 * have been configured, meaning already logged in. 9675 */ 9676 9677 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", ENXIO); 9678 9679 return (ENXIO); 9680 } 9681 /* 9682 * Allocate job structure and set job_code as DUMMY, 9683 * because we will not go thorugh the job thread. 9684 * Instead fp_sendcmd() is called directly here. 9685 */ 9686 job = fctl_alloc_job(JOB_DUMMY, JOB_TYPE_FP_ASYNC, 9687 NULL, NULL, KM_SLEEP); 9688 9689 ASSERT(job != NULL); 9690 9691 job->job_counter = 1; 9692 9693 cmd = fp_alloc_pkt(port, sizeof (la_els_rnid_t), 9694 sizeof (la_els_rnid_acc_t), KM_SLEEP, pd); 9695 if (cmd == NULL) { 9696 fcio->fcio_errno = FC_NOMEM; 9697 rval = ENOMEM; 9698 9699 fctl_dealloc_job(job); 9700 if (fp_fcio_copyout(fcio, data, mode)) { 9701 rval = EFAULT; 9702 } 9703 9704 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9705 9706 return (rval); 9707 } 9708 9709 /* Allocate memory for node id accept block */ 9710 rnid_acc = kmem_zalloc(sizeof (la_els_rnid_acc_t), KM_SLEEP); 9711 9712 mutex_enter(&port->fp_mutex); 9713 mutex_enter(&pd->pd_mutex); 9714 9715 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 9716 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 9717 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 9718 cmd->cmd_retry_count = 1; 9719 cmd->cmd_ulp_pkt = NULL; 9720 9721 fp_rnid_init(cmd, fcio->fcio_cmd_flags, job); 9722 9723 job->job_private = (void *)rnid_acc; 9724 9725 pd->pd_flags = PD_ELS_IN_PROGRESS; 9726 9727 mutex_exit(&pd->pd_mutex); 9728 mutex_exit(&port->fp_mutex); 9729 9730 if (fp_sendcmd(port, cmd, port->fp_fca_handle) == FC_SUCCESS) { 9731 fctl_jobwait(job); 9732 fcio->fcio_errno = job->job_result; 9733 if (job->job_result == FC_SUCCESS) { 9734 int rnid_cnt; 9735 ASSERT(pd != NULL); 9736 /* 9737 * node id block is now available. 9738 * Copy it to userland 9739 */ 9740 ASSERT(job->job_private == (void *)rnid_acc); 9741 9742 /* get the response length */ 9743 rnid_cnt = sizeof (ls_code_t) + sizeof (fc_rnid_hdr_t) + 9744 rnid_acc->hdr.cmn_len + 9745 rnid_acc->hdr.specific_len; 9746 9747 if (fcio->fcio_olen < rnid_cnt) { 9748 rval = EINVAL; 9749 } else if (ddi_copyout((void *)rnid_acc, 9750 (void *)fcio->fcio_obuf, 9751 rnid_cnt, mode) == 0) { 9752 if (fp_fcio_copyout(fcio, data, 9753 mode)) { 9754 rval = EFAULT; 9755 } 9756 } else { 9757 rval = EFAULT; 9758 } 9759 } else { 9760 rval = EIO; 9761 } 9762 } else { 9763 rval = EIO; 9764 if (pd) { 9765 mutex_enter(&pd->pd_mutex); 9766 pd->pd_flags = PD_IDLE; 9767 mutex_exit(&pd->pd_mutex); 9768 } 9769 fp_free_pkt(cmd); 9770 } 9771 9772 fctl_dealloc_job(job); 9773 kmem_free(rnid_acc, sizeof (la_els_rnid_acc_t)); 9774 9775 if (fp_fcio_copyout(fcio, data, mode)) { 9776 rval = EFAULT; 9777 } 9778 9779 if (rval != FC_SUCCESS) { 9780 FP_TRACE(FP_NHEAD1(3, 0), "fp_send_rnid: failed = %d", rval); 9781 } 9782 9783 return (rval); 9784 } 9785 9786 /* 9787 * Copy out to userland 9788 */ 9789 static int 9790 fp_fcio_copyout(fcio_t *fcio, intptr_t data, int mode) 9791 { 9792 int rval; 9793 9794 #ifdef _MULTI_DATAMODEL 9795 switch (ddi_model_convert_from(mode & FMODELS)) { 9796 case DDI_MODEL_ILP32: { 9797 struct fcio32 fcio32; 9798 9799 fcio32.fcio_xfer = fcio->fcio_xfer; 9800 fcio32.fcio_cmd = fcio->fcio_cmd; 9801 fcio32.fcio_flags = fcio->fcio_flags; 9802 fcio32.fcio_cmd_flags = fcio->fcio_cmd_flags; 9803 fcio32.fcio_ilen = fcio->fcio_ilen; 9804 fcio32.fcio_ibuf = 9805 (caddr32_t)(uintptr_t)fcio->fcio_ibuf; 9806 fcio32.fcio_olen = fcio->fcio_olen; 9807 fcio32.fcio_obuf = 9808 (caddr32_t)(uintptr_t)fcio->fcio_obuf; 9809 fcio32.fcio_alen = fcio->fcio_alen; 9810 fcio32.fcio_abuf = 9811 (caddr32_t)(uintptr_t)fcio->fcio_abuf; 9812 fcio32.fcio_errno = fcio->fcio_errno; 9813 9814 rval = ddi_copyout((void *)&fcio32, (void *)data, 9815 sizeof (struct fcio32), mode); 9816 break; 9817 } 9818 case DDI_MODEL_NONE: 9819 rval = ddi_copyout((void *)fcio, (void *)data, 9820 sizeof (fcio_t), mode); 9821 break; 9822 } 9823 #else 9824 rval = ddi_copyout((void *)fcio, (void *)data, sizeof (fcio_t), mode); 9825 #endif 9826 9827 return (rval); 9828 } 9829 9830 9831 static void 9832 fp_p2p_online(fc_local_port_t *port, job_request_t *job) 9833 { 9834 uint32_t listlen; 9835 fc_portmap_t *changelist; 9836 9837 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9838 ASSERT(port->fp_topology == FC_TOP_PT_PT); 9839 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 9840 9841 listlen = 0; 9842 changelist = NULL; 9843 9844 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9845 if (port->fp_statec_busy > 1) { 9846 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 9847 } 9848 } 9849 mutex_exit(&port->fp_mutex); 9850 9851 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 9852 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 9853 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 9854 listlen, listlen, KM_SLEEP); 9855 9856 mutex_enter(&port->fp_mutex); 9857 } else { 9858 ASSERT(changelist == NULL && listlen == 0); 9859 mutex_enter(&port->fp_mutex); 9860 if (--port->fp_statec_busy == 0) { 9861 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 9862 } 9863 } 9864 } 9865 9866 static int 9867 fp_fillout_p2pmap(fc_local_port_t *port, fcio_t *fcio, int mode) 9868 { 9869 int rval; 9870 int count; 9871 int index; 9872 int num_devices; 9873 fc_remote_node_t *node; 9874 fc_port_dev_t *devlist; 9875 struct pwwn_hash *head; 9876 fc_remote_port_t *pd; 9877 9878 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9879 9880 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 9881 9882 devlist = kmem_zalloc(sizeof (fc_port_dev_t) * num_devices, KM_SLEEP); 9883 9884 for (count = index = 0; index < pwwn_table_size; index++) { 9885 head = &port->fp_pwwn_table[index]; 9886 pd = head->pwwn_head; 9887 while (pd != NULL) { 9888 mutex_enter(&pd->pd_mutex); 9889 if (pd->pd_state == PORT_DEVICE_INVALID) { 9890 mutex_exit(&pd->pd_mutex); 9891 pd = pd->pd_wwn_hnext; 9892 continue; 9893 } 9894 9895 devlist[count].dev_state = pd->pd_state; 9896 devlist[count].dev_hard_addr = pd->pd_hard_addr; 9897 devlist[count].dev_did = pd->pd_port_id; 9898 devlist[count].dev_did.priv_lilp_posit = 9899 (uint8_t)(index & 0xff); 9900 bcopy((caddr_t)pd->pd_fc4types, 9901 (caddr_t)devlist[count].dev_type, 9902 sizeof (pd->pd_fc4types)); 9903 9904 bcopy((caddr_t)&pd->pd_port_name, 9905 (caddr_t)&devlist[count].dev_pwwn, 9906 sizeof (la_wwn_t)); 9907 9908 node = pd->pd_remote_nodep; 9909 mutex_exit(&pd->pd_mutex); 9910 9911 if (node) { 9912 mutex_enter(&node->fd_mutex); 9913 bcopy((caddr_t)&node->fd_node_name, 9914 (caddr_t)&devlist[count].dev_nwwn, 9915 sizeof (la_wwn_t)); 9916 mutex_exit(&node->fd_mutex); 9917 } 9918 count++; 9919 if (count >= num_devices) { 9920 goto found; 9921 } 9922 } 9923 } 9924 found: 9925 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 9926 sizeof (count), mode)) { 9927 rval = FC_FAILURE; 9928 } else if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 9929 sizeof (fc_port_dev_t) * num_devices, mode)) { 9930 rval = FC_FAILURE; 9931 } else { 9932 rval = FC_SUCCESS; 9933 } 9934 9935 kmem_free(devlist, sizeof (fc_port_dev_t) * num_devices); 9936 9937 return (rval); 9938 } 9939 9940 9941 /* 9942 * Handle Fabric ONLINE 9943 */ 9944 static void 9945 fp_fabric_online(fc_local_port_t *port, job_request_t *job) 9946 { 9947 int index; 9948 int rval; 9949 int dbg_count; 9950 int count = 0; 9951 char ww_name[17]; 9952 uint32_t d_id; 9953 uint32_t listlen; 9954 fctl_ns_req_t *ns_cmd; 9955 struct pwwn_hash *head; 9956 fc_remote_port_t *pd; 9957 fc_remote_port_t *npd; 9958 fc_portmap_t *changelist; 9959 9960 ASSERT(MUTEX_HELD(&port->fp_mutex)); 9961 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 9962 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 9963 9964 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 9965 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 9966 0, KM_SLEEP); 9967 9968 ASSERT(ns_cmd != NULL); 9969 9970 ns_cmd->ns_cmd_code = NS_GID_PN; 9971 9972 /* 9973 * Check if orphans are showing up now 9974 */ 9975 if (port->fp_orphan_count) { 9976 fc_orphan_t *orp; 9977 fc_orphan_t *norp = NULL; 9978 fc_orphan_t *prev = NULL; 9979 9980 for (orp = port->fp_orphan_list; orp; orp = norp) { 9981 norp = orp->orp_next; 9982 mutex_exit(&port->fp_mutex); 9983 orp->orp_nscan++; 9984 9985 job->job_counter = 1; 9986 job->job_result = FC_SUCCESS; 9987 9988 ((ns_req_gid_pn_t *) 9989 (ns_cmd->ns_cmd_buf))->pwwn = orp->orp_pwwn; 9990 ((ns_resp_gid_pn_t *) 9991 ns_cmd->ns_data_buf)->pid.port_id = 0; 9992 ((ns_resp_gid_pn_t *) 9993 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 9994 9995 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 9996 if (rval == FC_SUCCESS) { 9997 d_id = 9998 BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 9999 pd = fp_create_remote_port_by_ns(port, 10000 d_id, KM_SLEEP); 10001 10002 if (pd != NULL) { 10003 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10004 10005 fp_printf(port, CE_WARN, FP_LOG_ONLY, 10006 0, NULL, "N_x Port with D_ID=%x," 10007 " PWWN=%s reappeared in fabric", 10008 d_id, ww_name); 10009 10010 mutex_enter(&port->fp_mutex); 10011 if (prev) { 10012 prev->orp_next = orp->orp_next; 10013 } else { 10014 ASSERT(orp == 10015 port->fp_orphan_list); 10016 port->fp_orphan_list = 10017 orp->orp_next; 10018 } 10019 port->fp_orphan_count--; 10020 mutex_exit(&port->fp_mutex); 10021 kmem_free(orp, sizeof (*orp)); 10022 count++; 10023 10024 mutex_enter(&pd->pd_mutex); 10025 pd->pd_flags = PD_ELS_MARK; 10026 10027 mutex_exit(&pd->pd_mutex); 10028 } else { 10029 prev = orp; 10030 } 10031 } else { 10032 if (orp->orp_nscan == FC_ORPHAN_SCAN_LIMIT) { 10033 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 10034 10035 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, 10036 NULL, 10037 " Port WWN %s removed from orphan" 10038 " list after %d scans", ww_name, 10039 orp->orp_nscan); 10040 10041 mutex_enter(&port->fp_mutex); 10042 if (prev) { 10043 prev->orp_next = orp->orp_next; 10044 } else { 10045 ASSERT(orp == 10046 port->fp_orphan_list); 10047 port->fp_orphan_list = 10048 orp->orp_next; 10049 } 10050 port->fp_orphan_count--; 10051 mutex_exit(&port->fp_mutex); 10052 10053 kmem_free(orp, sizeof (*orp)); 10054 } else { 10055 prev = orp; 10056 } 10057 } 10058 mutex_enter(&port->fp_mutex); 10059 } 10060 } 10061 10062 /* 10063 * Walk the Port WWN hash table, reestablish LOGIN 10064 * if a LOGIN is already performed on a particular 10065 * device; Any failure to LOGIN should mark the 10066 * port device OLD. 10067 */ 10068 for (index = 0; index < pwwn_table_size; index++) { 10069 head = &port->fp_pwwn_table[index]; 10070 npd = head->pwwn_head; 10071 10072 while ((pd = npd) != NULL) { 10073 la_wwn_t *pwwn; 10074 10075 npd = pd->pd_wwn_hnext; 10076 10077 /* 10078 * Don't count in the port devices that are new 10079 * unless the total number of devices visible 10080 * through this port is less than FP_MAX_DEVICES 10081 */ 10082 mutex_enter(&pd->pd_mutex); 10083 if (port->fp_dev_count >= FP_MAX_DEVICES || 10084 (port->fp_options & FP_TARGET_MODE)) { 10085 if (pd->pd_type == PORT_DEVICE_NEW || 10086 pd->pd_flags == PD_ELS_MARK || 10087 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10088 mutex_exit(&pd->pd_mutex); 10089 continue; 10090 } 10091 } else { 10092 if (pd->pd_flags == PD_ELS_MARK || 10093 pd->pd_recepient != PD_PLOGI_INITIATOR) { 10094 mutex_exit(&pd->pd_mutex); 10095 continue; 10096 } 10097 pd->pd_type = PORT_DEVICE_OLD; 10098 } 10099 count++; 10100 10101 /* 10102 * Consult with the name server about D_ID changes 10103 */ 10104 job->job_counter = 1; 10105 job->job_result = FC_SUCCESS; 10106 10107 ((ns_req_gid_pn_t *) 10108 (ns_cmd->ns_cmd_buf))->pwwn = pd->pd_port_name; 10109 ((ns_resp_gid_pn_t *) 10110 ns_cmd->ns_data_buf)->pid.port_id = 0; 10111 10112 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)-> 10113 pid.priv_lilp_posit = 0; 10114 10115 pwwn = &pd->pd_port_name; 10116 pd->pd_flags = PD_ELS_MARK; 10117 10118 mutex_exit(&pd->pd_mutex); 10119 mutex_exit(&port->fp_mutex); 10120 10121 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 10122 if (rval != FC_SUCCESS) { 10123 fc_wwn_to_str(pwwn, ww_name); 10124 10125 mutex_enter(&pd->pd_mutex); 10126 d_id = pd->pd_port_id.port_id; 10127 pd->pd_type = PORT_DEVICE_DELETE; 10128 mutex_exit(&pd->pd_mutex); 10129 10130 FP_TRACE(FP_NHEAD1(3, 0), 10131 "fp_fabric_online: PD " 10132 "disappeared; d_id=%x, PWWN=%s", 10133 d_id, ww_name); 10134 10135 FP_TRACE(FP_NHEAD2(9, 0), 10136 "N_x Port with D_ID=%x, PWWN=%s" 10137 " disappeared from fabric", d_id, 10138 ww_name); 10139 10140 mutex_enter(&port->fp_mutex); 10141 continue; 10142 } 10143 10144 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 10145 10146 mutex_enter(&port->fp_mutex); 10147 mutex_enter(&pd->pd_mutex); 10148 if (d_id != pd->pd_port_id.port_id) { 10149 fctl_delist_did_table(port, pd); 10150 fc_wwn_to_str(pwwn, ww_name); 10151 10152 FP_TRACE(FP_NHEAD2(9, 0), 10153 "D_ID of a device with PWWN %s changed." 10154 " New D_ID = %x, OLD D_ID = %x", ww_name, 10155 d_id, pd->pd_port_id.port_id); 10156 10157 pd->pd_port_id.port_id = BE_32(d_id); 10158 pd->pd_type = PORT_DEVICE_CHANGED; 10159 fctl_enlist_did_table(port, pd); 10160 } 10161 mutex_exit(&pd->pd_mutex); 10162 10163 } 10164 } 10165 10166 if (ns_cmd) { 10167 fctl_free_ns_cmd(ns_cmd); 10168 } 10169 10170 listlen = 0; 10171 changelist = NULL; 10172 if (count) { 10173 if (port->fp_soft_state & FP_SOFT_IN_FCA_RESET) { 10174 port->fp_soft_state &= ~FP_SOFT_IN_FCA_RESET; 10175 mutex_exit(&port->fp_mutex); 10176 delay(drv_usectohz(FLA_RR_TOV * 1000 * 1000)); 10177 mutex_enter(&port->fp_mutex); 10178 } 10179 10180 dbg_count = 0; 10181 10182 job->job_counter = count; 10183 10184 for (index = 0; index < pwwn_table_size; index++) { 10185 head = &port->fp_pwwn_table[index]; 10186 npd = head->pwwn_head; 10187 10188 while ((pd = npd) != NULL) { 10189 npd = pd->pd_wwn_hnext; 10190 10191 mutex_enter(&pd->pd_mutex); 10192 if (pd->pd_flags != PD_ELS_MARK) { 10193 mutex_exit(&pd->pd_mutex); 10194 continue; 10195 } 10196 10197 dbg_count++; 10198 10199 /* 10200 * If it is already marked deletion, nothing 10201 * else to do. 10202 */ 10203 if (pd->pd_type == PORT_DEVICE_DELETE) { 10204 pd->pd_type = PORT_DEVICE_OLD; 10205 10206 mutex_exit(&pd->pd_mutex); 10207 mutex_exit(&port->fp_mutex); 10208 fp_jobdone(job); 10209 mutex_enter(&port->fp_mutex); 10210 10211 continue; 10212 } 10213 10214 /* 10215 * If it is freshly discovered out of 10216 * the orphan list, nothing else to do 10217 */ 10218 if (pd->pd_type == PORT_DEVICE_NEW) { 10219 pd->pd_flags = PD_IDLE; 10220 10221 mutex_exit(&pd->pd_mutex); 10222 mutex_exit(&port->fp_mutex); 10223 fp_jobdone(job); 10224 mutex_enter(&port->fp_mutex); 10225 10226 continue; 10227 } 10228 10229 pd->pd_flags = PD_IDLE; 10230 d_id = pd->pd_port_id.port_id; 10231 10232 /* 10233 * Explicitly mark all devices OLD; successful 10234 * PLOGI should reset this to either NO_CHANGE 10235 * or CHANGED. 10236 */ 10237 if (pd->pd_type != PORT_DEVICE_CHANGED) { 10238 pd->pd_type = PORT_DEVICE_OLD; 10239 } 10240 10241 mutex_exit(&pd->pd_mutex); 10242 mutex_exit(&port->fp_mutex); 10243 10244 rval = fp_port_login(port, d_id, job, 10245 FP_CMD_PLOGI_RETAIN, KM_SLEEP, pd, NULL); 10246 10247 if (rval != FC_SUCCESS) { 10248 fp_jobdone(job); 10249 } 10250 mutex_enter(&port->fp_mutex); 10251 } 10252 } 10253 mutex_exit(&port->fp_mutex); 10254 10255 ASSERT(dbg_count == count); 10256 fp_jobwait(job); 10257 10258 mutex_enter(&port->fp_mutex); 10259 10260 ASSERT(port->fp_statec_busy > 0); 10261 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10262 if (port->fp_statec_busy > 1) { 10263 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10264 } 10265 } 10266 mutex_exit(&port->fp_mutex); 10267 } else { 10268 ASSERT(port->fp_statec_busy > 0); 10269 if (port->fp_statec_busy > 1) { 10270 job->job_flags |= JOB_CANCEL_ULP_NOTIFICATION; 10271 } 10272 mutex_exit(&port->fp_mutex); 10273 } 10274 10275 if ((job->job_flags & JOB_CANCEL_ULP_NOTIFICATION) == 0) { 10276 fctl_fillout_map(port, &changelist, &listlen, 1, 0, 0); 10277 10278 (void) fp_ulp_statec_cb(port, FC_STATE_ONLINE, changelist, 10279 listlen, listlen, KM_SLEEP); 10280 10281 mutex_enter(&port->fp_mutex); 10282 } else { 10283 ASSERT(changelist == NULL && listlen == 0); 10284 mutex_enter(&port->fp_mutex); 10285 if (--port->fp_statec_busy == 0) { 10286 port->fp_soft_state &= ~FP_SOFT_IN_STATEC_CB; 10287 } 10288 } 10289 } 10290 10291 10292 /* 10293 * Fill out device list for userland ioctl in private loop 10294 */ 10295 static int 10296 fp_fillout_loopmap(fc_local_port_t *port, fcio_t *fcio, int mode) 10297 { 10298 int rval; 10299 int count; 10300 int index; 10301 int num_devices; 10302 fc_remote_node_t *node; 10303 fc_port_dev_t *devlist; 10304 int lilp_device_count; 10305 fc_lilpmap_t *lilp_map; 10306 uchar_t *alpa_list; 10307 10308 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10309 10310 num_devices = fcio->fcio_olen / sizeof (fc_port_dev_t); 10311 if (port->fp_total_devices > port->fp_dev_count && 10312 num_devices >= port->fp_total_devices) { 10313 job_request_t *job; 10314 10315 mutex_exit(&port->fp_mutex); 10316 job = fctl_alloc_job(JOB_PORT_GETMAP, 0, NULL, NULL, KM_SLEEP); 10317 job->job_counter = 1; 10318 10319 mutex_enter(&port->fp_mutex); 10320 fp_get_loopmap(port, job); 10321 mutex_exit(&port->fp_mutex); 10322 10323 fp_jobwait(job); 10324 fctl_dealloc_job(job); 10325 } else { 10326 mutex_exit(&port->fp_mutex); 10327 } 10328 devlist = kmem_zalloc(sizeof (*devlist) * num_devices, KM_SLEEP); 10329 10330 mutex_enter(&port->fp_mutex); 10331 10332 /* 10333 * Applications are accustomed to getting the device list in 10334 * LILP map order. The HBA firmware usually returns the device 10335 * map in the LILP map order and diagnostic applications would 10336 * prefer to receive in the device list in that order too 10337 */ 10338 lilp_map = &port->fp_lilp_map; 10339 alpa_list = &lilp_map->lilp_alpalist[0]; 10340 10341 /* 10342 * the length field corresponds to the offset in the LILP frame 10343 * which begins with 1. The thing to note here is that the 10344 * lilp_device_count is 1 more than fp->fp_total_devices since 10345 * the host adapter's alpa also shows up in the lilp map. We 10346 * don't however return details of the host adapter since 10347 * fctl_get_remote_port_by_did fails for the host adapter's ALPA 10348 * and applications are required to issue the FCIO_GET_HOST_PARAMS 10349 * ioctl to obtain details about the host adapter port. 10350 */ 10351 lilp_device_count = lilp_map->lilp_length; 10352 10353 for (count = index = 0; index < lilp_device_count && 10354 count < num_devices; index++) { 10355 uint32_t d_id; 10356 fc_remote_port_t *pd; 10357 10358 d_id = alpa_list[index]; 10359 10360 mutex_exit(&port->fp_mutex); 10361 pd = fctl_get_remote_port_by_did(port, d_id); 10362 mutex_enter(&port->fp_mutex); 10363 10364 if (pd != NULL) { 10365 mutex_enter(&pd->pd_mutex); 10366 10367 if (pd->pd_state == PORT_DEVICE_INVALID) { 10368 mutex_exit(&pd->pd_mutex); 10369 continue; 10370 } 10371 10372 devlist[count].dev_state = pd->pd_state; 10373 devlist[count].dev_hard_addr = pd->pd_hard_addr; 10374 devlist[count].dev_did = pd->pd_port_id; 10375 devlist[count].dev_did.priv_lilp_posit = 10376 (uint8_t)(index & 0xff); 10377 bcopy((caddr_t)pd->pd_fc4types, 10378 (caddr_t)devlist[count].dev_type, 10379 sizeof (pd->pd_fc4types)); 10380 10381 bcopy((caddr_t)&pd->pd_port_name, 10382 (caddr_t)&devlist[count].dev_pwwn, 10383 sizeof (la_wwn_t)); 10384 10385 node = pd->pd_remote_nodep; 10386 mutex_exit(&pd->pd_mutex); 10387 10388 if (node) { 10389 mutex_enter(&node->fd_mutex); 10390 bcopy((caddr_t)&node->fd_node_name, 10391 (caddr_t)&devlist[count].dev_nwwn, 10392 sizeof (la_wwn_t)); 10393 mutex_exit(&node->fd_mutex); 10394 } 10395 count++; 10396 } 10397 } 10398 10399 if (fp_copyout((void *)&count, (void *)fcio->fcio_abuf, 10400 sizeof (count), mode)) { 10401 rval = FC_FAILURE; 10402 } 10403 10404 if (fp_copyout((void *)devlist, (void *)fcio->fcio_obuf, 10405 sizeof (fc_port_dev_t) * num_devices, mode)) { 10406 rval = FC_FAILURE; 10407 } else { 10408 rval = FC_SUCCESS; 10409 } 10410 10411 kmem_free(devlist, sizeof (*devlist) * num_devices); 10412 ASSERT(MUTEX_HELD(&port->fp_mutex)); 10413 10414 return (rval); 10415 } 10416 10417 10418 /* 10419 * Completion function for responses to unsolicited commands 10420 */ 10421 static void 10422 fp_unsol_intr(fc_packet_t *pkt) 10423 { 10424 fp_cmd_t *cmd; 10425 fc_local_port_t *port; 10426 10427 cmd = pkt->pkt_ulp_private; 10428 port = cmd->cmd_port; 10429 10430 if (pkt->pkt_state != FC_PKT_SUCCESS) { 10431 fp_printf(port, CE_WARN, FP_LOG_ONLY, 0, pkt, 10432 "couldn't post response to unsolicited request;" 10433 " ox_id=%x rx_id=%x", pkt->pkt_cmd_fhdr.ox_id, 10434 pkt->pkt_resp_fhdr.rx_id); 10435 } 10436 10437 if (cmd == port->fp_els_resp_pkt) { 10438 mutex_enter(&port->fp_mutex); 10439 port->fp_els_resp_pkt_busy = 0; 10440 mutex_exit(&port->fp_mutex); 10441 return; 10442 } 10443 10444 fp_free_pkt(cmd); 10445 } 10446 10447 10448 /* 10449 * solicited LINIT ELS completion function 10450 */ 10451 static void 10452 fp_linit_intr(fc_packet_t *pkt) 10453 { 10454 fp_cmd_t *cmd; 10455 job_request_t *job; 10456 fc_linit_resp_t acc; 10457 10458 if (FP_IS_PKT_ERROR(pkt)) { 10459 (void) fp_common_intr(pkt, 1); 10460 return; 10461 } 10462 10463 cmd = pkt->pkt_ulp_private; 10464 job = cmd->cmd_job; 10465 10466 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&acc, 10467 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR); 10468 10469 if (acc.status != FC_LINIT_SUCCESS) { 10470 job->job_result = FC_FAILURE; 10471 } else { 10472 job->job_result = FC_SUCCESS; 10473 } 10474 fp_iodone(cmd); 10475 } 10476 10477 10478 /* 10479 * Decode the unsolicited request; For FC-4 Device and Link data frames 10480 * notify the registered ULP of this FC-4 type right here. For Unsolicited 10481 * ELS requests, submit a request to the job_handler thread to work on it. 10482 * The intent is to act quickly on the FC-4 unsolicited link and data frames 10483 * and save much of the interrupt time processing of unsolicited ELS requests 10484 * and hand it off to the job_handler thread. 10485 */ 10486 static void 10487 fp_unsol_cb(opaque_t port_handle, fc_unsol_buf_t *buf, uint32_t type) 10488 { 10489 uchar_t r_ctl; 10490 uchar_t ls_code; 10491 uint32_t s_id; 10492 uint32_t rscn_count = FC_INVALID_RSCN_COUNT; 10493 uint32_t cb_arg; 10494 fp_cmd_t *cmd; 10495 fc_local_port_t *port; 10496 job_request_t *job; 10497 fc_remote_port_t *pd; 10498 10499 port = port_handle; 10500 10501 FP_TRACE(FP_NHEAD1(1, 0), "fp_unsol_cb: s_id=%x," 10502 " d_id=%x, type=%x, r_ctl=%x, f_ctl=%x" 10503 " seq_id=%x, df_ctl=%x, seq_cnt=%x, ox_id=%x, rx_id=%x" 10504 " ro=%x, buffer[0]:%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10505 buf->ub_frame.type, buf->ub_frame.r_ctl, buf->ub_frame.f_ctl, 10506 buf->ub_frame.seq_id, buf->ub_frame.df_ctl, buf->ub_frame.seq_cnt, 10507 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro, 10508 buf->ub_buffer[0]); 10509 10510 if (type & 0x80000000) { 10511 /* 10512 * Huh ? Nothing much can be done without 10513 * a valid buffer. So just exit. 10514 */ 10515 return; 10516 } 10517 /* 10518 * If the unsolicited interrupts arrive while it isn't 10519 * safe to handle unsolicited callbacks; Drop them, yes, 10520 * drop them on the floor 10521 */ 10522 mutex_enter(&port->fp_mutex); 10523 port->fp_active_ubs++; 10524 if ((port->fp_soft_state & 10525 (FP_SOFT_IN_DETACH | FP_SOFT_SUSPEND | FP_SOFT_POWER_DOWN)) || 10526 FC_PORT_STATE_MASK(port->fp_state) == FC_STATE_OFFLINE) { 10527 10528 FP_TRACE(FP_NHEAD1(3, 0), "fp_unsol_cb: port state is " 10529 "not ONLINE. s_id=%x, d_id=%x, type=%x, " 10530 "seq_id=%x, ox_id=%x, rx_id=%x" 10531 "ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 10532 buf->ub_frame.type, buf->ub_frame.seq_id, 10533 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 10534 10535 ASSERT(port->fp_active_ubs > 0); 10536 if (--(port->fp_active_ubs) == 0) { 10537 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10538 } 10539 10540 mutex_exit(&port->fp_mutex); 10541 10542 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10543 1, &buf->ub_token); 10544 10545 return; 10546 } 10547 10548 r_ctl = buf->ub_frame.r_ctl; 10549 s_id = buf->ub_frame.s_id; 10550 if (port->fp_active_ubs == 1) { 10551 port->fp_soft_state |= FP_SOFT_IN_UNSOL_CB; 10552 } 10553 10554 if (r_ctl == R_CTL_ELS_REQ && buf->ub_buffer[0] == LA_ELS_LOGO && 10555 port->fp_statec_busy) { 10556 mutex_exit(&port->fp_mutex); 10557 pd = fctl_get_remote_port_by_did(port, s_id); 10558 if (pd) { 10559 mutex_enter(&pd->pd_mutex); 10560 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10561 FP_TRACE(FP_NHEAD1(3, 0), 10562 "LOGO for LOGGED IN D_ID %x", 10563 buf->ub_frame.s_id); 10564 pd->pd_state = PORT_DEVICE_VALID; 10565 } 10566 mutex_exit(&pd->pd_mutex); 10567 } 10568 10569 mutex_enter(&port->fp_mutex); 10570 ASSERT(port->fp_active_ubs > 0); 10571 if (--(port->fp_active_ubs) == 0) { 10572 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10573 } 10574 mutex_exit(&port->fp_mutex); 10575 10576 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10577 1, &buf->ub_token); 10578 10579 FP_TRACE(FP_NHEAD1(3, 0), 10580 "fp_unsol_cb() bailing out LOGO for D_ID %x", 10581 buf->ub_frame.s_id); 10582 return; 10583 } 10584 10585 if (port->fp_els_resp_pkt_busy == 0) { 10586 if (r_ctl == R_CTL_ELS_REQ) { 10587 ls_code = buf->ub_buffer[0]; 10588 10589 switch (ls_code) { 10590 case LA_ELS_PLOGI: 10591 case LA_ELS_FLOGI: 10592 port->fp_els_resp_pkt_busy = 1; 10593 mutex_exit(&port->fp_mutex); 10594 fp_i_handle_unsol_els(port, buf); 10595 10596 mutex_enter(&port->fp_mutex); 10597 ASSERT(port->fp_active_ubs > 0); 10598 if (--(port->fp_active_ubs) == 0) { 10599 port->fp_soft_state &= 10600 ~FP_SOFT_IN_UNSOL_CB; 10601 } 10602 mutex_exit(&port->fp_mutex); 10603 port->fp_fca_tran->fca_ub_release( 10604 port->fp_fca_handle, 1, &buf->ub_token); 10605 10606 return; 10607 case LA_ELS_RSCN: 10608 if (++(port)->fp_rscn_count == 10609 FC_INVALID_RSCN_COUNT) { 10610 ++(port)->fp_rscn_count; 10611 } 10612 rscn_count = port->fp_rscn_count; 10613 break; 10614 10615 default: 10616 break; 10617 } 10618 } 10619 } else if ((r_ctl == R_CTL_ELS_REQ) && 10620 (buf->ub_buffer[0] == LA_ELS_RSCN)) { 10621 if (++port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10622 ++port->fp_rscn_count; 10623 } 10624 rscn_count = port->fp_rscn_count; 10625 } 10626 10627 mutex_exit(&port->fp_mutex); 10628 10629 switch (r_ctl & R_CTL_ROUTING) { 10630 case R_CTL_DEVICE_DATA: 10631 /* 10632 * If the unsolicited buffer is a CT IU, 10633 * have the job_handler thread work on it. 10634 */ 10635 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10636 break; 10637 } 10638 /* FALLTHROUGH */ 10639 10640 case R_CTL_FC4_SVC: { 10641 int sendup = 0; 10642 10643 /* 10644 * If a LOGIN isn't performed before this request 10645 * shut the door on this port with a reply that a 10646 * LOGIN is required. We make an exception however 10647 * for IP broadcast packets and pass them through 10648 * to the IP ULP(s) to handle broadcast requests. 10649 * This is not a problem for private loop devices 10650 * but for fabric topologies we don't log into the 10651 * remote ports during port initialization and 10652 * the ULPs need to log into requesting ports on 10653 * demand. 10654 */ 10655 pd = fctl_get_remote_port_by_did(port, s_id); 10656 if (pd) { 10657 mutex_enter(&pd->pd_mutex); 10658 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 10659 sendup++; 10660 } 10661 mutex_exit(&pd->pd_mutex); 10662 } else if ((pd == NULL) && 10663 (buf->ub_frame.type == FC_TYPE_IS8802_SNAP) && 10664 (buf->ub_frame.d_id == 0xffffff || 10665 buf->ub_frame.d_id == 0x00)) { 10666 /* brodacst IP frame - so sendup via job thread */ 10667 break; 10668 } 10669 10670 /* 10671 * Send all FC4 services via job thread too 10672 */ 10673 if ((r_ctl & R_CTL_ROUTING) == R_CTL_FC4_SVC) { 10674 break; 10675 } 10676 10677 if (sendup || !FC_IS_REAL_DEVICE(s_id)) { 10678 fctl_ulp_unsol_cb(port, buf, buf->ub_frame.type); 10679 return; 10680 } 10681 10682 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10683 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10684 0, KM_NOSLEEP, pd); 10685 if (cmd != NULL) { 10686 fp_els_rjt_init(port, cmd, buf, 10687 FC_ACTION_NON_RETRYABLE, 10688 FC_REASON_LOGIN_REQUIRED, NULL); 10689 10690 if (fp_sendcmd(port, cmd, 10691 port->fp_fca_handle) != FC_SUCCESS) { 10692 fp_free_pkt(cmd); 10693 } 10694 } 10695 } 10696 10697 mutex_enter(&port->fp_mutex); 10698 ASSERT(port->fp_active_ubs > 0); 10699 if (--(port->fp_active_ubs) == 0) { 10700 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10701 } 10702 mutex_exit(&port->fp_mutex); 10703 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10704 1, &buf->ub_token); 10705 10706 return; 10707 } 10708 10709 default: 10710 break; 10711 } 10712 10713 /* 10714 * Submit a Request to the job_handler thread to work 10715 * on the unsolicited request. The potential side effect 10716 * of this is that the unsolicited buffer takes a little 10717 * longer to get released but we save interrupt time in 10718 * the bargain. 10719 */ 10720 cb_arg = (rscn_count == FC_INVALID_RSCN_COUNT) ? NULL : rscn_count; 10721 10722 /* 10723 * One way that the rscn_count will get used is described below : 10724 * 10725 * 1. fp_unsol_cb() gets an RSCN and updates fp_rscn_count. 10726 * 2. Before mutex is released, a copy of it is stored in rscn_count. 10727 * 3. The count is passed to job thread as JOB_UNSOL_REQUEST (below) 10728 * by overloading the job_cb_arg to pass the rscn_count 10729 * 4. When one of the routines processing the RSCN picks it up (ex: 10730 * fp_validate_rscn_page()), it passes this count in the map 10731 * structure (as part of the map_rscn_info structure member) to the 10732 * ULPs. 10733 * 5. When ULPs make calls back to the transport (example interfaces for 10734 * this are fc_ulp_transport(), fc_ulp_login(), fc_issue_els()), they 10735 * can now pass back this count as part of the fc_packet's 10736 * pkt_ulp_rscn_count member. fcp does this currently. 10737 * 6. When transport gets a call to transport a command on the wire, it 10738 * will check to see if there is a valid pkt_ulp_rsvd1 field in the 10739 * fc_packet. If there is, it will match that info with the current 10740 * rscn_count on that instance of the port. If they don't match up 10741 * then there was a newer RSCN. The ULP gets back an error code which 10742 * informs it about it - FC_DEVICE_BUSY_NEW_RSCN. 10743 * 7. At this point the ULP is free to make up its own mind as to how to 10744 * handle this. Currently, fcp will reset its retry counters and keep 10745 * retrying the operation it was doing in anticipation of getting a 10746 * new state change call back for the new RSCN. 10747 */ 10748 job = fctl_alloc_job(JOB_UNSOL_REQUEST, 0, NULL, 10749 (opaque_t)(uintptr_t)cb_arg, KM_NOSLEEP); 10750 if (job == NULL) { 10751 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, "fp_unsol_cb() " 10752 "couldn't submit a job to the thread, failing.."); 10753 10754 mutex_enter(&port->fp_mutex); 10755 10756 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 10757 --port->fp_rscn_count; 10758 } 10759 10760 ASSERT(port->fp_active_ubs > 0); 10761 if (--(port->fp_active_ubs) == 0) { 10762 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10763 } 10764 10765 mutex_exit(&port->fp_mutex); 10766 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10767 1, &buf->ub_token); 10768 10769 return; 10770 } 10771 job->job_private = (void *)buf; 10772 fctl_enque_job(port, job); 10773 } 10774 10775 10776 /* 10777 * Handle unsolicited requests 10778 */ 10779 static void 10780 fp_handle_unsol_buf(fc_local_port_t *port, fc_unsol_buf_t *buf, 10781 job_request_t *job) 10782 { 10783 uchar_t r_ctl; 10784 uchar_t ls_code; 10785 uint32_t s_id; 10786 fp_cmd_t *cmd; 10787 fc_remote_port_t *pd; 10788 fp_unsol_spec_t *ub_spec; 10789 10790 r_ctl = buf->ub_frame.r_ctl; 10791 s_id = buf->ub_frame.s_id; 10792 10793 switch (r_ctl & R_CTL_ROUTING) { 10794 case R_CTL_EXTENDED_SVC: 10795 if (r_ctl != R_CTL_ELS_REQ) { 10796 break; 10797 } 10798 10799 ls_code = buf->ub_buffer[0]; 10800 switch (ls_code) { 10801 case LA_ELS_LOGO: 10802 case LA_ELS_ADISC: 10803 case LA_ELS_PRLO: 10804 pd = fctl_get_remote_port_by_did(port, s_id); 10805 if (pd == NULL) { 10806 if (!FC_IS_REAL_DEVICE(s_id)) { 10807 break; 10808 } 10809 if (!FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10810 break; 10811 } 10812 if ((cmd = fp_alloc_pkt(port, 10813 sizeof (la_els_rjt_t), 0, KM_SLEEP, 10814 NULL)) == NULL) { 10815 /* 10816 * Can this actually fail when 10817 * given KM_SLEEP? (Could be used 10818 * this way in a number of places.) 10819 */ 10820 break; 10821 } 10822 10823 fp_els_rjt_init(port, cmd, buf, 10824 FC_ACTION_NON_RETRYABLE, 10825 FC_REASON_INVALID_LINK_CTRL, job); 10826 10827 if (fp_sendcmd(port, cmd, 10828 port->fp_fca_handle) != FC_SUCCESS) { 10829 fp_free_pkt(cmd); 10830 } 10831 10832 break; 10833 } 10834 if (ls_code == LA_ELS_LOGO) { 10835 fp_handle_unsol_logo(port, buf, pd, job); 10836 } else if (ls_code == LA_ELS_ADISC) { 10837 fp_handle_unsol_adisc(port, buf, pd, job); 10838 } else { 10839 fp_handle_unsol_prlo(port, buf, pd, job); 10840 } 10841 break; 10842 10843 case LA_ELS_PLOGI: 10844 fp_handle_unsol_plogi(port, buf, job, KM_SLEEP); 10845 break; 10846 10847 case LA_ELS_FLOGI: 10848 fp_handle_unsol_flogi(port, buf, job, KM_SLEEP); 10849 break; 10850 10851 case LA_ELS_RSCN: 10852 fp_handle_unsol_rscn(port, buf, job, KM_SLEEP); 10853 break; 10854 10855 default: 10856 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10857 ub_spec->port = port; 10858 ub_spec->buf = buf; 10859 10860 (void) taskq_dispatch(port->fp_taskq, 10861 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10862 return; 10863 } 10864 break; 10865 10866 case R_CTL_BASIC_SVC: 10867 /* 10868 * The unsolicited basic link services could be ABTS 10869 * and RMC (Or even a NOP). Just BA_RJT them until 10870 * such time there arises a need to handle them more 10871 * carefully. 10872 */ 10873 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10874 cmd = fp_alloc_pkt(port, sizeof (la_ba_rjt_t), 10875 0, KM_SLEEP, NULL); 10876 if (cmd != NULL) { 10877 fp_ba_rjt_init(port, cmd, buf, job); 10878 if (fp_sendcmd(port, cmd, 10879 port->fp_fca_handle) != FC_SUCCESS) { 10880 fp_free_pkt(cmd); 10881 } 10882 } 10883 } 10884 break; 10885 10886 case R_CTL_DEVICE_DATA: 10887 if (buf->ub_frame.type == FC_TYPE_FC_SERVICES) { 10888 /* 10889 * Mostly this is of type FC_TYPE_FC_SERVICES. 10890 * As we don't like any Unsolicited FC services 10891 * requests, we would do well to RJT them as 10892 * well. 10893 */ 10894 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10895 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10896 0, KM_SLEEP, NULL); 10897 if (cmd != NULL) { 10898 fp_els_rjt_init(port, cmd, buf, 10899 FC_ACTION_NON_RETRYABLE, 10900 FC_REASON_INVALID_LINK_CTRL, job); 10901 10902 if (fp_sendcmd(port, cmd, 10903 port->fp_fca_handle) != 10904 FC_SUCCESS) { 10905 fp_free_pkt(cmd); 10906 } 10907 } 10908 } 10909 break; 10910 } 10911 /* FALLTHROUGH */ 10912 10913 case R_CTL_FC4_SVC: 10914 ub_spec = kmem_zalloc(sizeof (*ub_spec), KM_SLEEP); 10915 ub_spec->port = port; 10916 ub_spec->buf = buf; 10917 10918 (void) taskq_dispatch(port->fp_taskq, 10919 fp_ulp_unsol_cb, ub_spec, KM_SLEEP); 10920 return; 10921 10922 case R_CTL_LINK_CTL: 10923 /* 10924 * Turn deaf ear on unsolicited link control frames. 10925 * Typical unsolicited link control Frame is an LCR 10926 * (to reset End to End credit to the default login 10927 * value and abort current sequences for all classes) 10928 * An intelligent microcode/firmware should handle 10929 * this transparently at its level and not pass all 10930 * the way up here. 10931 * 10932 * Possible responses to LCR are R_RDY, F_RJT, P_RJT 10933 * or F_BSY. P_RJT is chosen to be the most appropriate 10934 * at this time. 10935 */ 10936 /* FALLTHROUGH */ 10937 10938 default: 10939 /* 10940 * Just reject everything else as an invalid request. 10941 */ 10942 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 10943 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 10944 0, KM_SLEEP, NULL); 10945 if (cmd != NULL) { 10946 fp_els_rjt_init(port, cmd, buf, 10947 FC_ACTION_NON_RETRYABLE, 10948 FC_REASON_INVALID_LINK_CTRL, job); 10949 10950 if (fp_sendcmd(port, cmd, 10951 port->fp_fca_handle) != FC_SUCCESS) { 10952 fp_free_pkt(cmd); 10953 } 10954 } 10955 } 10956 break; 10957 } 10958 10959 mutex_enter(&port->fp_mutex); 10960 ASSERT(port->fp_active_ubs > 0); 10961 if (--(port->fp_active_ubs) == 0) { 10962 port->fp_soft_state &= ~FP_SOFT_IN_UNSOL_CB; 10963 } 10964 mutex_exit(&port->fp_mutex); 10965 port->fp_fca_tran->fca_ub_release(port->fp_fca_handle, 10966 1, &buf->ub_token); 10967 } 10968 10969 10970 /* 10971 * Prepare a BA_RJT and send it over. 10972 */ 10973 static void 10974 fp_ba_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 10975 job_request_t *job) 10976 { 10977 fc_packet_t *pkt; 10978 la_ba_rjt_t payload; 10979 10980 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 10981 10982 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 10983 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 10984 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 10985 cmd->cmd_retry_count = 1; 10986 cmd->cmd_ulp_pkt = NULL; 10987 10988 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 10989 cmd->cmd_job = job; 10990 10991 pkt = &cmd->cmd_pkt; 10992 10993 fp_unsol_resp_init(pkt, buf, R_CTL_LS_BA_RJT, FC_TYPE_BASIC_LS); 10994 10995 payload.reserved = 0; 10996 payload.reason_code = FC_REASON_CMD_UNSUPPORTED; 10997 payload.explanation = FC_EXPLN_NONE; 10998 payload.vendor = 0; 10999 11000 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11001 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11002 } 11003 11004 11005 /* 11006 * Prepare an LS_RJT and send it over 11007 */ 11008 static void 11009 fp_els_rjt_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11010 uchar_t action, uchar_t reason, job_request_t *job) 11011 { 11012 fc_packet_t *pkt; 11013 la_els_rjt_t payload; 11014 11015 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11016 11017 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11018 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11019 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11020 cmd->cmd_retry_count = 1; 11021 cmd->cmd_ulp_pkt = NULL; 11022 11023 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11024 cmd->cmd_job = job; 11025 11026 pkt = &cmd->cmd_pkt; 11027 11028 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11029 11030 payload.ls_code.ls_code = LA_ELS_RJT; 11031 payload.ls_code.mbz = 0; 11032 payload.action = action; 11033 payload.reason = reason; 11034 payload.reserved = 0; 11035 payload.vu = 0; 11036 11037 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11038 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11039 } 11040 11041 /* 11042 * Function: fp_prlo_acc_init 11043 * 11044 * Description: Initializes an Link Service Accept for a PRLO. 11045 * 11046 * Arguments: *port Local port through which the PRLO was 11047 * received. 11048 * cmd Command that will carry the accept. 11049 * *buf Unsolicited buffer containing the PRLO 11050 * request. 11051 * job Job request. 11052 * sleep Allocation mode. 11053 * 11054 * Return Value: *cmd Command containing the response. 11055 * 11056 * Context: Depends on the parameter sleep. 11057 */ 11058 fp_cmd_t * 11059 fp_prlo_acc_init(fc_local_port_t *port, fc_remote_port_t *pd, 11060 fc_unsol_buf_t *buf, job_request_t *job, int sleep) 11061 { 11062 fp_cmd_t *cmd; 11063 fc_packet_t *pkt; 11064 la_els_prlo_t *req; 11065 size_t len; 11066 uint16_t flags; 11067 11068 req = (la_els_prlo_t *)buf->ub_buffer; 11069 len = (size_t)ntohs(req->payload_length); 11070 11071 /* 11072 * The payload of the accept to a PRLO has to be the exact match of 11073 * the payload of the request (at the exception of the code). 11074 */ 11075 cmd = fp_alloc_pkt(port, (int)len, 0, sleep, pd); 11076 11077 if (cmd) { 11078 /* 11079 * The fp command was successfully allocated. 11080 */ 11081 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11082 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11083 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11084 cmd->cmd_retry_count = 1; 11085 cmd->cmd_ulp_pkt = NULL; 11086 11087 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11088 cmd->cmd_job = job; 11089 11090 pkt = &cmd->cmd_pkt; 11091 11092 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, 11093 FC_TYPE_EXTENDED_LS); 11094 11095 /* The code is overwritten for the copy. */ 11096 req->ls_code = LA_ELS_ACC; 11097 /* Response code is set. */ 11098 flags = ntohs(req->flags); 11099 flags &= ~SP_RESP_CODE_MASK; 11100 flags |= SP_RESP_CODE_REQ_EXECUTED; 11101 req->flags = htons(flags); 11102 11103 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)req, 11104 (uint8_t *)pkt->pkt_cmd, len, DDI_DEV_AUTOINCR); 11105 } 11106 return (cmd); 11107 } 11108 11109 /* 11110 * Prepare an ACC response to an ELS request 11111 */ 11112 static void 11113 fp_els_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 11114 job_request_t *job) 11115 { 11116 fc_packet_t *pkt; 11117 ls_code_t payload; 11118 11119 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 11120 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 11121 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 11122 cmd->cmd_retry_count = 1; 11123 cmd->cmd_ulp_pkt = NULL; 11124 11125 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 11126 cmd->cmd_job = job; 11127 11128 pkt = &cmd->cmd_pkt; 11129 11130 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 11131 11132 payload.ls_code = LA_ELS_ACC; 11133 payload.mbz = 0; 11134 11135 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 11136 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 11137 } 11138 11139 /* 11140 * Unsolicited PRLO handler 11141 * 11142 * A Process Logout should be handled by the ULP that established it. However, 11143 * some devices send a PRLO to trigger a PLOGI followed by a PRLI. This happens 11144 * when a device implicitly logs out an initiator (for whatever reason) and 11145 * tries to get that initiator to restablish the connection (PLOGI and PRLI). 11146 * The logical thing to do for the device would be to send a LOGO in response 11147 * to any FC4 frame sent by the initiator. Some devices choose, however, to send 11148 * a PRLO instead. 11149 * 11150 * From a Fibre Channel standpoint a PRLO calls for a PRLI. There's no reason to 11151 * think that the Port Login has been lost. If we follow the Fibre Channel 11152 * protocol to the letter a PRLI should be sent after accepting the PRLO. If 11153 * the Port Login has also been lost, the remote port will reject the PRLI 11154 * indicating that we must PLOGI first. The initiator will then turn around and 11155 * send a PLOGI. The way Leadville is layered and the way the ULP interface 11156 * is defined doesn't allow this scenario to be followed easily. If FCP were to 11157 * handle the PRLO and attempt the PRLI, the reject indicating that a PLOGI is 11158 * needed would be received by FCP. FCP would have, then, to tell the transport 11159 * (fp) to PLOGI. The problem is, the transport would still think the Port 11160 * Login is valid and there is no way for FCP to tell the transport: "PLOGI even 11161 * if you think it's not necessary". To work around that difficulty, the PRLO 11162 * is treated by the transport as a LOGO. The downside to it is a Port Login 11163 * may be disrupted (if a PLOGI wasn't actually needed) and another ULP (that 11164 * has nothing to do with the PRLO) may be impacted. However, this is a 11165 * scenario very unlikely to happen. As of today the only ULP in Leadville 11166 * using PRLI/PRLOs is FCP. For a PRLO to disrupt another ULP (that would be 11167 * FCIP), a SCSI target would have to be running FCP and FCIP (which is very 11168 * unlikely). 11169 */ 11170 static void 11171 fp_handle_unsol_prlo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11172 fc_remote_port_t *pd, job_request_t *job) 11173 { 11174 int busy; 11175 int rval; 11176 int retain; 11177 fp_cmd_t *cmd; 11178 fc_portmap_t *listptr; 11179 boolean_t tolerance; 11180 la_els_prlo_t *req; 11181 11182 req = (la_els_prlo_t *)buf->ub_buffer; 11183 11184 if ((ntohs(req->payload_length) != 11185 (sizeof (service_parameter_page_t) + sizeof (ls_code_t))) || 11186 (req->page_length != sizeof (service_parameter_page_t))) { 11187 /* 11188 * We are being very restrictive. Only on page per 11189 * payload. If it is not the case we reject the ELS although 11190 * we should reply indicating we handle only single page 11191 * per PRLO. 11192 */ 11193 goto fp_reject_prlo; 11194 } 11195 11196 if (ntohs(req->payload_length) > buf->ub_bufsize) { 11197 /* 11198 * This is in case the payload advertizes a size bigger than 11199 * what it really is. 11200 */ 11201 goto fp_reject_prlo; 11202 } 11203 11204 mutex_enter(&port->fp_mutex); 11205 busy = port->fp_statec_busy; 11206 mutex_exit(&port->fp_mutex); 11207 11208 mutex_enter(&pd->pd_mutex); 11209 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11210 if (!busy) { 11211 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11212 pd->pd_state == PORT_DEVICE_INVALID || 11213 pd->pd_flags == PD_ELS_IN_PROGRESS || 11214 pd->pd_type == PORT_DEVICE_OLD) { 11215 busy++; 11216 } 11217 } 11218 11219 if (busy) { 11220 mutex_exit(&pd->pd_mutex); 11221 11222 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11223 "pd=%p - busy", 11224 pd->pd_port_id.port_id, pd); 11225 11226 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11227 goto fp_reject_prlo; 11228 } 11229 } else { 11230 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11231 11232 if (tolerance) { 11233 fctl_tc_reset(&pd->pd_logo_tc); 11234 retain = 0; 11235 pd->pd_state = PORT_DEVICE_INVALID; 11236 } 11237 11238 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11239 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11240 tolerance, retain); 11241 11242 pd->pd_aux_flags |= PD_LOGGED_OUT; 11243 mutex_exit(&pd->pd_mutex); 11244 11245 cmd = fp_prlo_acc_init(port, pd, buf, job, KM_SLEEP); 11246 if (cmd == NULL) { 11247 return; 11248 } 11249 11250 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11251 if (rval != FC_SUCCESS) { 11252 fp_free_pkt(cmd); 11253 return; 11254 } 11255 11256 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11257 11258 if (retain) { 11259 fp_unregister_login(pd); 11260 fctl_copy_portmap(listptr, pd); 11261 } else { 11262 uint32_t d_id; 11263 char ww_name[17]; 11264 11265 mutex_enter(&pd->pd_mutex); 11266 d_id = pd->pd_port_id.port_id; 11267 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11268 mutex_exit(&pd->pd_mutex); 11269 11270 FP_TRACE(FP_NHEAD2(9, 0), 11271 "N_x Port with D_ID=%x, PWWN=%s logged out" 11272 " %d times in %d us; Giving up", d_id, ww_name, 11273 FC_LOGO_TOLERANCE_LIMIT, 11274 FC_LOGO_TOLERANCE_TIME_LIMIT); 11275 11276 fp_fillout_old_map(listptr, pd, 0); 11277 listptr->map_type = PORT_DEVICE_OLD; 11278 } 11279 11280 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11281 return; 11282 } 11283 11284 fp_reject_prlo: 11285 11286 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 0, KM_SLEEP, pd); 11287 if (cmd != NULL) { 11288 fp_els_rjt_init(port, cmd, buf, FC_ACTION_NON_RETRYABLE, 11289 FC_REASON_INVALID_LINK_CTRL, job); 11290 11291 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11292 fp_free_pkt(cmd); 11293 } 11294 } 11295 } 11296 11297 /* 11298 * Unsolicited LOGO handler 11299 */ 11300 static void 11301 fp_handle_unsol_logo(fc_local_port_t *port, fc_unsol_buf_t *buf, 11302 fc_remote_port_t *pd, job_request_t *job) 11303 { 11304 int busy; 11305 int rval; 11306 int retain; 11307 fp_cmd_t *cmd; 11308 fc_portmap_t *listptr; 11309 boolean_t tolerance; 11310 11311 mutex_enter(&port->fp_mutex); 11312 busy = port->fp_statec_busy; 11313 mutex_exit(&port->fp_mutex); 11314 11315 mutex_enter(&pd->pd_mutex); 11316 tolerance = fctl_tc_increment(&pd->pd_logo_tc); 11317 if (!busy) { 11318 if (pd->pd_state != PORT_DEVICE_LOGGED_IN || 11319 pd->pd_state == PORT_DEVICE_INVALID || 11320 pd->pd_flags == PD_ELS_IN_PROGRESS || 11321 pd->pd_type == PORT_DEVICE_OLD) { 11322 busy++; 11323 } 11324 } 11325 11326 if (busy) { 11327 mutex_exit(&pd->pd_mutex); 11328 11329 FP_TRACE(FP_NHEAD1(5, 0), "Logout; D_ID=%x," 11330 "pd=%p - busy", 11331 pd->pd_port_id.port_id, pd); 11332 11333 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11334 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11335 0, KM_SLEEP, pd); 11336 if (cmd != NULL) { 11337 fp_els_rjt_init(port, cmd, buf, 11338 FC_ACTION_NON_RETRYABLE, 11339 FC_REASON_INVALID_LINK_CTRL, job); 11340 11341 if (fp_sendcmd(port, cmd, 11342 port->fp_fca_handle) != FC_SUCCESS) { 11343 fp_free_pkt(cmd); 11344 } 11345 } 11346 } 11347 } else { 11348 retain = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 11349 11350 if (tolerance) { 11351 fctl_tc_reset(&pd->pd_logo_tc); 11352 retain = 0; 11353 pd->pd_state = PORT_DEVICE_INVALID; 11354 } 11355 11356 FP_TRACE(FP_NHEAD1(5, 0), "Accepting LOGO; d_id=%x, pd=%p," 11357 " tolerance=%d retain=%d", pd->pd_port_id.port_id, pd, 11358 tolerance, retain); 11359 11360 pd->pd_aux_flags |= PD_LOGGED_OUT; 11361 mutex_exit(&pd->pd_mutex); 11362 11363 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, 11364 KM_SLEEP, pd); 11365 if (cmd == NULL) { 11366 return; 11367 } 11368 11369 fp_els_acc_init(port, cmd, buf, job); 11370 11371 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 11372 if (rval != FC_SUCCESS) { 11373 fp_free_pkt(cmd); 11374 return; 11375 } 11376 11377 listptr = kmem_zalloc(sizeof (fc_portmap_t), KM_SLEEP); 11378 11379 if (retain) { 11380 job_request_t *job; 11381 fctl_ns_req_t *ns_cmd; 11382 11383 /* 11384 * when get LOGO, first try to get PID from nameserver 11385 * if failed, then we do not need 11386 * send PLOGI to that remote port 11387 */ 11388 job = fctl_alloc_job( 11389 JOB_NS_CMD, 0, NULL, (opaque_t)port, KM_SLEEP); 11390 11391 if (job != NULL) { 11392 ns_cmd = fctl_alloc_ns_cmd( 11393 sizeof (ns_req_gid_pn_t), 11394 sizeof (ns_resp_gid_pn_t), 11395 sizeof (ns_resp_gid_pn_t), 11396 0, KM_SLEEP); 11397 if (ns_cmd != NULL) { 11398 int ret; 11399 job->job_result = FC_SUCCESS; 11400 ns_cmd->ns_cmd_code = NS_GID_PN; 11401 ((ns_req_gid_pn_t *) 11402 (ns_cmd->ns_cmd_buf))->pwwn = 11403 pd->pd_port_name; 11404 ret = fp_ns_query( 11405 port, ns_cmd, job, 1, KM_SLEEP); 11406 if ((ret != FC_SUCCESS) || 11407 (job->job_result != FC_SUCCESS)) { 11408 fctl_free_ns_cmd(ns_cmd); 11409 fctl_dealloc_job(job); 11410 FP_TRACE(FP_NHEAD2(9, 0), 11411 "NS query failed,", 11412 " delete pd"); 11413 goto delete_pd; 11414 } 11415 fctl_free_ns_cmd(ns_cmd); 11416 } 11417 fctl_dealloc_job(job); 11418 } 11419 fp_unregister_login(pd); 11420 fctl_copy_portmap(listptr, pd); 11421 } else { 11422 uint32_t d_id; 11423 char ww_name[17]; 11424 11425 delete_pd: 11426 mutex_enter(&pd->pd_mutex); 11427 d_id = pd->pd_port_id.port_id; 11428 fc_wwn_to_str(&pd->pd_port_name, ww_name); 11429 mutex_exit(&pd->pd_mutex); 11430 11431 FP_TRACE(FP_NHEAD2(9, 0), 11432 "N_x Port with D_ID=%x, PWWN=%s logged out" 11433 " %d times in %d us; Giving up", d_id, ww_name, 11434 FC_LOGO_TOLERANCE_LIMIT, 11435 FC_LOGO_TOLERANCE_TIME_LIMIT); 11436 11437 fp_fillout_old_map(listptr, pd, 0); 11438 listptr->map_type = PORT_DEVICE_OLD; 11439 } 11440 11441 (void) fp_ulp_devc_cb(port, listptr, 1, 1, KM_SLEEP, 0); 11442 } 11443 } 11444 11445 11446 /* 11447 * Perform general purpose preparation of a response to an unsolicited request 11448 */ 11449 static void 11450 fp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf, 11451 uchar_t r_ctl, uchar_t type) 11452 { 11453 pkt->pkt_cmd_fhdr.r_ctl = r_ctl; 11454 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id; 11455 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id; 11456 pkt->pkt_cmd_fhdr.type = type; 11457 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT; 11458 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id; 11459 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl; 11460 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt; 11461 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id; 11462 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id; 11463 pkt->pkt_cmd_fhdr.ro = 0; 11464 pkt->pkt_cmd_fhdr.rsvd = 0; 11465 pkt->pkt_comp = fp_unsol_intr; 11466 pkt->pkt_timeout = FP_ELS_TIMEOUT; 11467 } 11468 11469 /* 11470 * Immediate handling of unsolicited FLOGI and PLOGI requests. In the 11471 * early development days of public loop soc+ firmware, numerous problems 11472 * were encountered (the details are undocumented and history now) which 11473 * led to the birth of this function. 11474 * 11475 * If a pre-allocated unsolicited response packet is free, send out an 11476 * immediate response, otherwise submit the request to the port thread 11477 * to do the deferred processing. 11478 */ 11479 static void 11480 fp_i_handle_unsol_els(fc_local_port_t *port, fc_unsol_buf_t *buf) 11481 { 11482 int sent; 11483 int f_port; 11484 int do_acc; 11485 fp_cmd_t *cmd; 11486 la_els_logi_t *payload; 11487 fc_remote_port_t *pd; 11488 char dww_name[17]; 11489 11490 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 11491 11492 cmd = port->fp_els_resp_pkt; 11493 11494 mutex_enter(&port->fp_mutex); 11495 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11496 mutex_exit(&port->fp_mutex); 11497 11498 switch (buf->ub_buffer[0]) { 11499 case LA_ELS_PLOGI: { 11500 int small; 11501 11502 payload = (la_els_logi_t *)buf->ub_buffer; 11503 11504 f_port = FP_IS_F_PORT(payload-> 11505 common_service.cmn_features) ? 1 : 0; 11506 11507 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 11508 &payload->nport_ww_name); 11509 pd = fctl_get_remote_port_by_pwwn(port, 11510 &payload->nport_ww_name); 11511 if (pd) { 11512 mutex_enter(&pd->pd_mutex); 11513 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11514 /* 11515 * Most likely this means a cross login is in 11516 * progress or a device about to be yanked out. 11517 * Only accept the plogi if my wwn is smaller. 11518 */ 11519 if (pd->pd_type == PORT_DEVICE_OLD) { 11520 sent = 1; 11521 } 11522 /* 11523 * Stop plogi request (if any) 11524 * attempt from local side to speedup 11525 * the discovery progress. 11526 * Mark the pd as PD_PLOGI_RECEPIENT. 11527 */ 11528 if (f_port == 0 && small < 0) { 11529 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11530 } 11531 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11532 11533 mutex_exit(&pd->pd_mutex); 11534 11535 FP_TRACE(FP_NHEAD1(3, 0), "fp_i_handle_unsol_els: " 11536 "Unsol PLOGI received. PD still exists in the " 11537 "PWWN list. pd=%p PWWN=%s, sent=%x", 11538 pd, dww_name, sent); 11539 11540 if (f_port == 0 && small < 0) { 11541 FP_TRACE(FP_NHEAD1(3, 0), 11542 "fp_i_handle_unsol_els: Mark the pd" 11543 " as plogi recipient, pd=%p, PWWN=%s" 11544 ", sent=%x", 11545 pd, dww_name, sent); 11546 } 11547 } else { 11548 sent = 0; 11549 } 11550 11551 /* 11552 * To avoid Login collisions, accept only if my WWN 11553 * is smaller than the requester (A curious side note 11554 * would be that this rule may not satisfy the PLOGIs 11555 * initiated by the switch from not-so-well known 11556 * ports such as 0xFFFC41) 11557 */ 11558 if ((f_port == 0 && small < 0) || 11559 (((small > 0 && do_acc) || 11560 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11561 if (fp_is_class_supported(port->fp_cos, 11562 buf->ub_class) == FC_FAILURE) { 11563 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11564 cmd->cmd_pkt.pkt_cmdlen = 11565 sizeof (la_els_rjt_t); 11566 cmd->cmd_pkt.pkt_rsplen = 0; 11567 fp_els_rjt_init(port, cmd, buf, 11568 FC_ACTION_NON_RETRYABLE, 11569 FC_REASON_CLASS_NOT_SUPP, NULL); 11570 FP_TRACE(FP_NHEAD1(3, 0), 11571 "fp_i_handle_unsol_els: " 11572 "Unsupported class. " 11573 "Rejecting PLOGI"); 11574 11575 } else { 11576 mutex_enter(&port->fp_mutex); 11577 port->fp_els_resp_pkt_busy = 0; 11578 mutex_exit(&port->fp_mutex); 11579 return; 11580 } 11581 } else { 11582 cmd->cmd_pkt.pkt_cmdlen = 11583 sizeof (la_els_logi_t); 11584 cmd->cmd_pkt.pkt_rsplen = 0; 11585 11586 /* 11587 * Sometime later, we should validate 11588 * the service parameters instead of 11589 * just accepting it. 11590 */ 11591 fp_login_acc_init(port, cmd, buf, NULL, 11592 KM_NOSLEEP); 11593 FP_TRACE(FP_NHEAD1(3, 0), 11594 "fp_i_handle_unsol_els: Accepting PLOGI," 11595 " f_port=%d, small=%d, do_acc=%d," 11596 " sent=%d.", f_port, small, do_acc, 11597 sent); 11598 /* 11599 * If fp_port_id is zero and topology is 11600 * Point-to-Point, get the local port id from 11601 * the d_id in the PLOGI request. 11602 * If the outgoing FLOGI hasn't been accepted, 11603 * the topology will be unknown here. But it's 11604 * still safe to save the d_id to fp_port_id, 11605 * just because it will be overwritten later 11606 * if the topology is not Point-to-Point. 11607 */ 11608 mutex_enter(&port->fp_mutex); 11609 if ((port->fp_port_id.port_id == 0) && 11610 (port->fp_topology == FC_TOP_PT_PT || 11611 port->fp_topology == FC_TOP_UNKNOWN)) { 11612 port->fp_port_id.port_id = 11613 buf->ub_frame.d_id; 11614 } 11615 mutex_exit(&port->fp_mutex); 11616 } 11617 } else { 11618 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11619 port->fp_options & FP_SEND_RJT) { 11620 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11621 cmd->cmd_pkt.pkt_rsplen = 0; 11622 fp_els_rjt_init(port, cmd, buf, 11623 FC_ACTION_NON_RETRYABLE, 11624 FC_REASON_LOGICAL_BSY, NULL); 11625 FP_TRACE(FP_NHEAD1(3, 0), 11626 "fp_i_handle_unsol_els: " 11627 "Rejecting PLOGI with Logical Busy." 11628 "Possible Login collision."); 11629 } else { 11630 mutex_enter(&port->fp_mutex); 11631 port->fp_els_resp_pkt_busy = 0; 11632 mutex_exit(&port->fp_mutex); 11633 return; 11634 } 11635 } 11636 break; 11637 } 11638 11639 case LA_ELS_FLOGI: 11640 if (fp_is_class_supported(port->fp_cos, 11641 buf->ub_class) == FC_FAILURE) { 11642 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11643 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11644 cmd->cmd_pkt.pkt_rsplen = 0; 11645 fp_els_rjt_init(port, cmd, buf, 11646 FC_ACTION_NON_RETRYABLE, 11647 FC_REASON_CLASS_NOT_SUPP, NULL); 11648 FP_TRACE(FP_NHEAD1(3, 0), 11649 "fp_i_handle_unsol_els: " 11650 "Unsupported Class. Rejecting FLOGI."); 11651 } else { 11652 mutex_enter(&port->fp_mutex); 11653 port->fp_els_resp_pkt_busy = 0; 11654 mutex_exit(&port->fp_mutex); 11655 return; 11656 } 11657 } else { 11658 mutex_enter(&port->fp_mutex); 11659 if (FC_PORT_STATE_MASK(port->fp_state) != 11660 FC_STATE_ONLINE || (port->fp_port_id.port_id && 11661 buf->ub_frame.s_id == port->fp_port_id.port_id)) { 11662 mutex_exit(&port->fp_mutex); 11663 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11664 cmd->cmd_pkt.pkt_cmdlen = 11665 sizeof (la_els_rjt_t); 11666 cmd->cmd_pkt.pkt_rsplen = 0; 11667 fp_els_rjt_init(port, cmd, buf, 11668 FC_ACTION_NON_RETRYABLE, 11669 FC_REASON_INVALID_LINK_CTRL, 11670 NULL); 11671 FP_TRACE(FP_NHEAD1(3, 0), 11672 "fp_i_handle_unsol_els: " 11673 "Invalid Link Ctrl. " 11674 "Rejecting FLOGI."); 11675 } else { 11676 mutex_enter(&port->fp_mutex); 11677 port->fp_els_resp_pkt_busy = 0; 11678 mutex_exit(&port->fp_mutex); 11679 return; 11680 } 11681 } else { 11682 mutex_exit(&port->fp_mutex); 11683 cmd->cmd_pkt.pkt_cmdlen = 11684 sizeof (la_els_logi_t); 11685 cmd->cmd_pkt.pkt_rsplen = 0; 11686 /* 11687 * Let's not aggressively validate the N_Port's 11688 * service parameters until PLOGI. Suffice it 11689 * to give a hint that we are an N_Port and we 11690 * are game to some serious stuff here. 11691 */ 11692 fp_login_acc_init(port, cmd, buf, 11693 NULL, KM_NOSLEEP); 11694 FP_TRACE(FP_NHEAD1(3, 0), 11695 "fp_i_handle_unsol_els: " 11696 "Accepting FLOGI."); 11697 } 11698 } 11699 break; 11700 11701 default: 11702 return; 11703 } 11704 11705 if ((fp_sendcmd(port, cmd, port->fp_fca_handle)) != FC_SUCCESS) { 11706 mutex_enter(&port->fp_mutex); 11707 port->fp_els_resp_pkt_busy = 0; 11708 mutex_exit(&port->fp_mutex); 11709 } 11710 } 11711 11712 11713 /* 11714 * Handle unsolicited PLOGI request 11715 */ 11716 static void 11717 fp_handle_unsol_plogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11718 job_request_t *job, int sleep) 11719 { 11720 int sent; 11721 int small; 11722 int f_port; 11723 int do_acc; 11724 fp_cmd_t *cmd; 11725 la_wwn_t *swwn; 11726 la_wwn_t *dwwn; 11727 la_els_logi_t *payload; 11728 fc_remote_port_t *pd; 11729 char dww_name[17]; 11730 11731 payload = (la_els_logi_t *)buf->ub_buffer; 11732 f_port = FP_IS_F_PORT(payload->common_service.cmn_features) ? 1 : 0; 11733 11734 mutex_enter(&port->fp_mutex); 11735 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 11736 mutex_exit(&port->fp_mutex); 11737 11738 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: s_id=%x, d_id=%x," 11739 "type=%x, f_ctl=%x" 11740 " seq_id=%x, ox_id=%x, rx_id=%x" 11741 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 11742 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 11743 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 11744 11745 swwn = &port->fp_service_params.nport_ww_name; 11746 dwwn = &payload->nport_ww_name; 11747 small = fctl_wwn_cmp(swwn, dwwn); 11748 pd = fctl_get_remote_port_by_pwwn(port, dwwn); 11749 if (pd) { 11750 mutex_enter(&pd->pd_mutex); 11751 sent = (pd->pd_flags == PD_ELS_IN_PROGRESS) ? 1 : 0; 11752 /* 11753 * Most likely this means a cross login is in 11754 * progress or a device about to be yanked out. 11755 * Only accept the plogi if my wwn is smaller. 11756 */ 11757 11758 if (pd->pd_type == PORT_DEVICE_OLD) { 11759 sent = 1; 11760 } 11761 /* 11762 * Stop plogi request (if any) 11763 * attempt from local side to speedup 11764 * the discovery progress. 11765 * Mark the pd as PD_PLOGI_RECEPIENT. 11766 */ 11767 if (f_port == 0 && small < 0) { 11768 pd->pd_recepient = PD_PLOGI_RECEPIENT; 11769 } 11770 fc_wwn_to_str(&pd->pd_port_name, dww_name); 11771 11772 mutex_exit(&pd->pd_mutex); 11773 11774 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: Unsol PLOGI" 11775 " received. PD still exists in the PWWN list. pd=%p " 11776 "PWWN=%s, sent=%x", pd, dww_name, sent); 11777 11778 if (f_port == 0 && small < 0) { 11779 FP_TRACE(FP_NHEAD1(3, 0), 11780 "fp_handle_unsol_plogi: Mark the pd" 11781 " as plogi recipient, pd=%p, PWWN=%s" 11782 ", sent=%x", 11783 pd, dww_name, sent); 11784 } 11785 } else { 11786 sent = 0; 11787 } 11788 11789 /* 11790 * Avoid Login collisions by accepting only if my WWN is smaller. 11791 * 11792 * A side note: There is no need to start a PLOGI from this end in 11793 * this context if login isn't going to be accepted for the 11794 * above reason as either a LIP (in private loop), RSCN (in 11795 * fabric topology), or an FLOGI (in point to point - Huh ? 11796 * check FC-PH) would normally drive the PLOGI from this end. 11797 * At this point of time there is no need for an inbound PLOGI 11798 * to kick an outbound PLOGI when it is going to be rejected 11799 * for the reason of WWN being smaller. However it isn't hard 11800 * to do that either (when such a need arises, start a timer 11801 * for a duration that extends beyond a normal device discovery 11802 * time and check if an outbound PLOGI did go before that, if 11803 * none fire one) 11804 * 11805 * Unfortunately, as it turned out, during booting, it is possible 11806 * to miss another initiator in the same loop as port driver 11807 * instances are serially attached. While preserving the above 11808 * comments for belly laughs, please kick an outbound PLOGI in 11809 * a non-switch environment (which is a pt pt between N_Ports or 11810 * a private loop) 11811 * 11812 * While preserving the above comments for amusement, send an 11813 * ACC if the PLOGI is going to be rejected for WWN being smaller 11814 * when no discovery is in progress at this end. Turn around 11815 * and make the port device as the PLOGI initiator, so that 11816 * during subsequent link/loop initialization, this end drives 11817 * the PLOGI (In fact both ends do in this particular case, but 11818 * only one wins) 11819 * 11820 * Make sure the PLOGIs initiated by the switch from not-so-well-known 11821 * ports (such as 0xFFFC41) are accepted too. 11822 */ 11823 if ((f_port == 0 && small < 0) || (((small > 0 && do_acc) || 11824 FC_MUST_ACCEPT_D_ID(buf->ub_frame.s_id)) && sent == 0)) { 11825 if (fp_is_class_supported(port->fp_cos, 11826 buf->ub_class) == FC_FAILURE) { 11827 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11828 cmd = fp_alloc_pkt(port, 11829 sizeof (la_els_logi_t), 0, sleep, pd); 11830 if (cmd == NULL) { 11831 return; 11832 } 11833 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11834 cmd->cmd_pkt.pkt_rsplen = 0; 11835 fp_els_rjt_init(port, cmd, buf, 11836 FC_ACTION_NON_RETRYABLE, 11837 FC_REASON_CLASS_NOT_SUPP, job); 11838 FP_TRACE(FP_NHEAD1(3, 0), 11839 "fp_handle_unsol_plogi: " 11840 "Unsupported class. rejecting PLOGI"); 11841 } 11842 } else { 11843 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11844 0, sleep, pd); 11845 if (cmd == NULL) { 11846 return; 11847 } 11848 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_logi_t); 11849 cmd->cmd_pkt.pkt_rsplen = 0; 11850 11851 /* 11852 * Sometime later, we should validate the service 11853 * parameters instead of just accepting it. 11854 */ 11855 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 11856 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11857 "Accepting PLOGI, f_port=%d, small=%d, " 11858 "do_acc=%d, sent=%d.", f_port, small, do_acc, 11859 sent); 11860 11861 /* 11862 * If fp_port_id is zero and topology is 11863 * Point-to-Point, get the local port id from 11864 * the d_id in the PLOGI request. 11865 * If the outgoing FLOGI hasn't been accepted, 11866 * the topology will be unknown here. But it's 11867 * still safe to save the d_id to fp_port_id, 11868 * just because it will be overwritten later 11869 * if the topology is not Point-to-Point. 11870 */ 11871 mutex_enter(&port->fp_mutex); 11872 if ((port->fp_port_id.port_id == 0) && 11873 (port->fp_topology == FC_TOP_PT_PT || 11874 port->fp_topology == FC_TOP_UNKNOWN)) { 11875 port->fp_port_id.port_id = 11876 buf->ub_frame.d_id; 11877 } 11878 mutex_exit(&port->fp_mutex); 11879 } 11880 } else { 11881 if (FP_IS_CLASS_1_OR_2(buf->ub_class) || 11882 port->fp_options & FP_SEND_RJT) { 11883 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11884 0, sleep, pd); 11885 if (cmd == NULL) { 11886 return; 11887 } 11888 cmd->cmd_pkt.pkt_cmdlen = sizeof (la_els_rjt_t); 11889 cmd->cmd_pkt.pkt_rsplen = 0; 11890 /* 11891 * Send out Logical busy to indicate 11892 * the detection of PLOGI collision 11893 */ 11894 fp_els_rjt_init(port, cmd, buf, 11895 FC_ACTION_NON_RETRYABLE, 11896 FC_REASON_LOGICAL_BSY, job); 11897 11898 fc_wwn_to_str(dwwn, dww_name); 11899 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_plogi: " 11900 "Rejecting Unsol PLOGI with Logical Busy." 11901 "possible PLOGI collision. PWWN=%s, sent=%x", 11902 dww_name, sent); 11903 } else { 11904 return; 11905 } 11906 } 11907 11908 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11909 fp_free_pkt(cmd); 11910 } 11911 } 11912 11913 11914 /* 11915 * Handle mischievous turning over of our own FLOGI requests back to 11916 * us by the SOC+ microcode. In other words, look at the class of such 11917 * bone headed requests, if 1 or 2, bluntly P_RJT them, if 3 drop them 11918 * on the floor 11919 */ 11920 static void 11921 fp_handle_unsol_flogi(fc_local_port_t *port, fc_unsol_buf_t *buf, 11922 job_request_t *job, int sleep) 11923 { 11924 uint32_t state; 11925 uint32_t s_id; 11926 fp_cmd_t *cmd; 11927 11928 if (fp_is_class_supported(port->fp_cos, buf->ub_class) == FC_FAILURE) { 11929 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11930 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11931 0, sleep, NULL); 11932 if (cmd == NULL) { 11933 return; 11934 } 11935 fp_els_rjt_init(port, cmd, buf, 11936 FC_ACTION_NON_RETRYABLE, 11937 FC_REASON_CLASS_NOT_SUPP, job); 11938 } else { 11939 return; 11940 } 11941 } else { 11942 11943 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi:" 11944 " s_id=%x, d_id=%x, type=%x, f_ctl=%x" 11945 " seq_id=%x, ox_id=%x, rx_id=%x, ro=%x", 11946 buf->ub_frame.s_id, buf->ub_frame.d_id, 11947 buf->ub_frame.type, buf->ub_frame.f_ctl, 11948 buf->ub_frame.seq_id, buf->ub_frame.ox_id, 11949 buf->ub_frame.rx_id, buf->ub_frame.ro); 11950 11951 mutex_enter(&port->fp_mutex); 11952 state = FC_PORT_STATE_MASK(port->fp_state); 11953 s_id = port->fp_port_id.port_id; 11954 mutex_exit(&port->fp_mutex); 11955 11956 if (state != FC_STATE_ONLINE || 11957 (s_id && buf->ub_frame.s_id == s_id)) { 11958 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 11959 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 11960 0, sleep, NULL); 11961 if (cmd == NULL) { 11962 return; 11963 } 11964 fp_els_rjt_init(port, cmd, buf, 11965 FC_ACTION_NON_RETRYABLE, 11966 FC_REASON_INVALID_LINK_CTRL, job); 11967 FP_TRACE(FP_NHEAD1(3, 0), 11968 "fp_handle_unsol_flogi: " 11969 "Rejecting PLOGI. Invalid Link CTRL"); 11970 } else { 11971 return; 11972 } 11973 } else { 11974 cmd = fp_alloc_pkt(port, sizeof (la_els_logi_t), 11975 0, sleep, NULL); 11976 if (cmd == NULL) { 11977 return; 11978 } 11979 /* 11980 * Let's not aggressively validate the N_Port's 11981 * service parameters until PLOGI. Suffice it 11982 * to give a hint that we are an N_Port and we 11983 * are game to some serious stuff here. 11984 */ 11985 fp_login_acc_init(port, cmd, buf, job, KM_SLEEP); 11986 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_flogi: " 11987 "Accepting PLOGI"); 11988 } 11989 } 11990 11991 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 11992 fp_free_pkt(cmd); 11993 } 11994 } 11995 11996 11997 /* 11998 * Perform PLOGI accept 11999 */ 12000 static void 12001 fp_login_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 12002 job_request_t *job, int sleep) 12003 { 12004 fc_packet_t *pkt; 12005 fc_portmap_t *listptr; 12006 la_els_logi_t payload; 12007 12008 ASSERT(buf != NULL); 12009 12010 /* 12011 * If we are sending ACC to PLOGI and we haven't already 12012 * create port and node device handles, let's create them 12013 * here. 12014 */ 12015 if (buf->ub_buffer[0] == LA_ELS_PLOGI && 12016 FC_IS_REAL_DEVICE(buf->ub_frame.s_id)) { 12017 int small; 12018 int do_acc; 12019 fc_remote_port_t *pd; 12020 la_els_logi_t *req; 12021 12022 req = (la_els_logi_t *)buf->ub_buffer; 12023 small = fctl_wwn_cmp(&port->fp_service_params.nport_ww_name, 12024 &req->nport_ww_name); 12025 12026 mutex_enter(&port->fp_mutex); 12027 do_acc = (port->fp_statec_busy == 0) ? 1 : 0; 12028 mutex_exit(&port->fp_mutex); 12029 12030 pd = fctl_create_remote_port(port, &req->node_ww_name, 12031 &req->nport_ww_name, buf->ub_frame.s_id, 12032 PD_PLOGI_RECEPIENT, sleep); 12033 if (pd == NULL) { 12034 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: " 12035 "Couldn't create port device for d_id:0x%x", 12036 buf->ub_frame.s_id); 12037 12038 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 12039 "couldn't create port device d_id=%x", 12040 buf->ub_frame.s_id); 12041 } else { 12042 /* 12043 * usoc currently returns PLOGIs inline and 12044 * the maximum buffer size is 60 bytes or so. 12045 * So attempt not to look beyond what is in 12046 * the unsolicited buffer 12047 * 12048 * JNI also traverses this path sometimes 12049 */ 12050 if (buf->ub_bufsize >= sizeof (la_els_logi_t)) { 12051 fp_register_login(NULL, pd, req, buf->ub_class); 12052 } else { 12053 mutex_enter(&pd->pd_mutex); 12054 if (pd->pd_login_count == 0) { 12055 pd->pd_login_count++; 12056 } 12057 pd->pd_state = PORT_DEVICE_LOGGED_IN; 12058 pd->pd_login_class = buf->ub_class; 12059 mutex_exit(&pd->pd_mutex); 12060 } 12061 12062 listptr = kmem_zalloc(sizeof (fc_portmap_t), sleep); 12063 if (listptr != NULL) { 12064 fctl_copy_portmap(listptr, pd); 12065 (void) fp_ulp_devc_cb(port, listptr, 12066 1, 1, sleep, 0); 12067 } 12068 12069 if (small > 0 && do_acc) { 12070 mutex_enter(&pd->pd_mutex); 12071 pd->pd_recepient = PD_PLOGI_INITIATOR; 12072 mutex_exit(&pd->pd_mutex); 12073 } 12074 } 12075 } 12076 12077 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 12078 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 12079 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12080 cmd->cmd_retry_count = 1; 12081 cmd->cmd_ulp_pkt = NULL; 12082 12083 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12084 cmd->cmd_job = job; 12085 12086 pkt = &cmd->cmd_pkt; 12087 12088 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 12089 12090 payload = port->fp_service_params; 12091 payload.ls_code.ls_code = LA_ELS_ACC; 12092 12093 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12094 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12095 12096 FP_TRACE(FP_NHEAD1(3, 0), "login_acc_init: ELS:0x%x d_id:0x%x " 12097 "bufsize:0x%x sizeof(la_els_logi):0x%x " 12098 "port's wwn:0x%01x%03x%04x%08x requestor's wwn:0x%01x%03x%04x%08x " 12099 "statec_busy:0x%x", buf->ub_buffer[0], buf->ub_frame.s_id, 12100 buf->ub_bufsize, sizeof (la_els_logi_t), 12101 port->fp_service_params.nport_ww_name.w.naa_id, 12102 port->fp_service_params.nport_ww_name.w.nport_id, 12103 port->fp_service_params.nport_ww_name.w.wwn_hi, 12104 port->fp_service_params.nport_ww_name.w.wwn_lo, 12105 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.naa_id, 12106 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.nport_id, 12107 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_hi, 12108 ((la_els_logi_t *)buf->ub_buffer)->nport_ww_name.w.wwn_lo, 12109 port->fp_statec_busy); 12110 } 12111 12112 12113 #define RSCN_EVENT_NAME_LEN 256 12114 12115 /* 12116 * Handle RSCNs 12117 */ 12118 static void 12119 fp_handle_unsol_rscn(fc_local_port_t *port, fc_unsol_buf_t *buf, 12120 job_request_t *job, int sleep) 12121 { 12122 uint32_t mask; 12123 fp_cmd_t *cmd; 12124 uint32_t count; 12125 int listindex; 12126 int16_t len; 12127 fc_rscn_t *payload; 12128 fc_portmap_t *listptr; 12129 fctl_ns_req_t *ns_cmd; 12130 fc_affected_id_t *page; 12131 caddr_t nvname; 12132 nvlist_t *attr_list = NULL; 12133 12134 mutex_enter(&port->fp_mutex); 12135 if (!FC_IS_TOP_SWITCH(port->fp_topology)) { 12136 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12137 --port->fp_rscn_count; 12138 } 12139 mutex_exit(&port->fp_mutex); 12140 return; 12141 } 12142 mutex_exit(&port->fp_mutex); 12143 12144 cmd = fp_alloc_pkt(port, FP_PORT_IDENTIFIER_LEN, 0, sleep, NULL); 12145 if (cmd != NULL) { 12146 fp_els_acc_init(port, cmd, buf, job); 12147 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 12148 fp_free_pkt(cmd); 12149 } 12150 } 12151 12152 payload = (fc_rscn_t *)buf->ub_buffer; 12153 ASSERT(payload->rscn_code == LA_ELS_RSCN); 12154 ASSERT(payload->rscn_len == FP_PORT_IDENTIFIER_LEN); 12155 12156 len = payload->rscn_payload_len - FP_PORT_IDENTIFIER_LEN; 12157 12158 if (len <= 0) { 12159 mutex_enter(&port->fp_mutex); 12160 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12161 --port->fp_rscn_count; 12162 } 12163 mutex_exit(&port->fp_mutex); 12164 12165 return; 12166 } 12167 12168 ASSERT((len & 0x3) == 0); /* Must be power of 4 */ 12169 count = (len >> 2) << 1; /* number of pages multiplied by 2 */ 12170 12171 listptr = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 12172 page = (fc_affected_id_t *)(buf->ub_buffer + sizeof (fc_rscn_t)); 12173 12174 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12175 12176 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpn_id_t), 12177 sizeof (ns_resp_gpn_id_t), sizeof (ns_resp_gpn_id_t), 12178 0, sleep); 12179 if (ns_cmd == NULL) { 12180 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12181 12182 mutex_enter(&port->fp_mutex); 12183 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 12184 --port->fp_rscn_count; 12185 } 12186 mutex_exit(&port->fp_mutex); 12187 12188 return; 12189 } 12190 12191 ns_cmd->ns_cmd_code = NS_GPN_ID; 12192 12193 FP_TRACE(FP_NHEAD1(3, 0), "fp_handle_unsol_rscn: s_id=%x, d_id=%x," 12194 "type=%x, f_ctl=%x seq_id=%x, ox_id=%x, rx_id=%x" 12195 " ro=%x", buf->ub_frame.s_id, buf->ub_frame.d_id, 12196 buf->ub_frame.type, buf->ub_frame.f_ctl, buf->ub_frame.seq_id, 12197 buf->ub_frame.ox_id, buf->ub_frame.rx_id, buf->ub_frame.ro); 12198 12199 /* Only proceed if we can allocate nvname and the nvlist */ 12200 if ((nvname = kmem_zalloc(RSCN_EVENT_NAME_LEN, KM_NOSLEEP)) != NULL && 12201 nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 12202 KM_NOSLEEP) == DDI_SUCCESS) { 12203 if (!(attr_list && nvlist_add_uint32(attr_list, "instance", 12204 port->fp_instance) == DDI_SUCCESS && 12205 nvlist_add_byte_array(attr_list, "port-wwn", 12206 port->fp_service_params.nport_ww_name.raw_wwn, 12207 sizeof (la_wwn_t)) == DDI_SUCCESS)) { 12208 nvlist_free(attr_list); 12209 attr_list = NULL; 12210 } 12211 } 12212 12213 for (listindex = 0; len; len -= FP_PORT_IDENTIFIER_LEN, page++) { 12214 /* Add affected page to the event payload */ 12215 if (attr_list != NULL) { 12216 (void) snprintf(nvname, RSCN_EVENT_NAME_LEN, 12217 "affected_page_%d", listindex); 12218 if (attr_list && nvlist_add_uint32(attr_list, nvname, 12219 ntohl(*(uint32_t *)page)) != DDI_SUCCESS) { 12220 /* We don't want to send a partial event, so dump it */ 12221 nvlist_free(attr_list); 12222 attr_list = NULL; 12223 } 12224 } 12225 /* 12226 * Query the NS to get the Port WWN for this 12227 * affected D_ID. 12228 */ 12229 mask = 0; 12230 switch (page->aff_format & FC_RSCN_ADDRESS_MASK) { 12231 case FC_RSCN_PORT_ADDRESS: 12232 fp_validate_rscn_page(port, page, job, ns_cmd, 12233 listptr, &listindex, sleep); 12234 12235 if (listindex == 0) { 12236 /* 12237 * We essentially did not process this RSCN. So, 12238 * ULPs are not going to be called and so we 12239 * decrement the rscn_count 12240 */ 12241 mutex_enter(&port->fp_mutex); 12242 if (--port->fp_rscn_count == 12243 FC_INVALID_RSCN_COUNT) { 12244 --port->fp_rscn_count; 12245 } 12246 mutex_exit(&port->fp_mutex); 12247 } 12248 break; 12249 12250 case FC_RSCN_AREA_ADDRESS: 12251 mask = 0xFFFF00; 12252 /* FALLTHROUGH */ 12253 12254 case FC_RSCN_DOMAIN_ADDRESS: 12255 if (!mask) { 12256 mask = 0xFF0000; 12257 } 12258 fp_validate_area_domain(port, page->aff_d_id, mask, 12259 job, sleep); 12260 break; 12261 12262 case FC_RSCN_FABRIC_ADDRESS: 12263 /* 12264 * We need to discover all the devices on this 12265 * port. 12266 */ 12267 fp_validate_area_domain(port, 0, 0, job, sleep); 12268 break; 12269 12270 default: 12271 break; 12272 } 12273 } 12274 if (attr_list != NULL) { 12275 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, 12276 EC_SUNFC, ESC_SUNFC_PORT_RSCN, attr_list, 12277 NULL, DDI_SLEEP); 12278 nvlist_free(attr_list); 12279 } else { 12280 FP_TRACE(FP_NHEAD1(9, 0), 12281 "RSCN handled, but event not sent to userland"); 12282 } 12283 if (nvname != NULL) { 12284 kmem_free(nvname, RSCN_EVENT_NAME_LEN); 12285 } 12286 12287 if (ns_cmd) { 12288 fctl_free_ns_cmd(ns_cmd); 12289 } 12290 12291 if (listindex) { 12292 #ifdef DEBUG 12293 page = (fc_affected_id_t *)(buf->ub_buffer + 12294 sizeof (fc_rscn_t)); 12295 12296 if (listptr->map_did.port_id != page->aff_d_id) { 12297 FP_TRACE(FP_NHEAD1(9, 0), 12298 "PORT RSCN: processed=%x, reporting=%x", 12299 listptr->map_did.port_id, page->aff_d_id); 12300 } 12301 #endif 12302 12303 (void) fp_ulp_devc_cb(port, listptr, listindex, count, 12304 sleep, 0); 12305 } else { 12306 kmem_free(listptr, sizeof (fc_portmap_t) * count); 12307 } 12308 } 12309 12310 12311 /* 12312 * Fill out old map for ULPs with fp_mutex, fd_mutex and pd_mutex held 12313 */ 12314 static void 12315 fp_fillout_old_map_held(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12316 { 12317 int is_switch; 12318 int initiator; 12319 fc_local_port_t *port; 12320 12321 port = pd->pd_port; 12322 12323 /* This function has the following bunch of assumptions */ 12324 ASSERT(port != NULL); 12325 ASSERT(MUTEX_HELD(&port->fp_mutex)); 12326 ASSERT(MUTEX_HELD(&pd->pd_remote_nodep->fd_mutex)); 12327 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12328 12329 pd->pd_state = PORT_DEVICE_INVALID; 12330 pd->pd_type = PORT_DEVICE_OLD; 12331 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12332 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12333 12334 fctl_delist_did_table(port, pd); 12335 fctl_delist_pwwn_table(port, pd); 12336 12337 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map_held: port=%p, d_id=%x" 12338 " removed the PD=%p from DID and PWWN tables", 12339 port, pd->pd_port_id.port_id, pd); 12340 12341 if ((!flag) && port && initiator && is_switch) { 12342 (void) fctl_add_orphan_held(port, pd); 12343 } 12344 fctl_copy_portmap_held(map, pd); 12345 map->map_pd = pd; 12346 } 12347 12348 /* 12349 * Fill out old map for ULPs 12350 */ 12351 static void 12352 fp_fillout_old_map(fc_portmap_t *map, fc_remote_port_t *pd, uchar_t flag) 12353 { 12354 int is_switch; 12355 int initiator; 12356 fc_local_port_t *port; 12357 12358 mutex_enter(&pd->pd_mutex); 12359 port = pd->pd_port; 12360 mutex_exit(&pd->pd_mutex); 12361 12362 mutex_enter(&port->fp_mutex); 12363 mutex_enter(&pd->pd_mutex); 12364 12365 pd->pd_state = PORT_DEVICE_INVALID; 12366 pd->pd_type = PORT_DEVICE_OLD; 12367 initiator = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 12368 is_switch = FC_IS_TOP_SWITCH(port->fp_topology); 12369 12370 fctl_delist_did_table(port, pd); 12371 fctl_delist_pwwn_table(port, pd); 12372 12373 FP_TRACE(FP_NHEAD1(6, 0), "fp_fillout_old_map: port=%p, d_id=%x" 12374 " removed the PD=%p from DID and PWWN tables", 12375 port, pd->pd_port_id.port_id, pd); 12376 12377 mutex_exit(&pd->pd_mutex); 12378 mutex_exit(&port->fp_mutex); 12379 12380 ASSERT(port != NULL); 12381 if ((!flag) && port && initiator && is_switch) { 12382 (void) fctl_add_orphan(port, pd, KM_NOSLEEP); 12383 } 12384 fctl_copy_portmap(map, pd); 12385 map->map_pd = pd; 12386 } 12387 12388 12389 /* 12390 * Fillout Changed Map for ULPs 12391 */ 12392 static void 12393 fp_fillout_changed_map(fc_portmap_t *map, fc_remote_port_t *pd, 12394 uint32_t *new_did, la_wwn_t *new_pwwn) 12395 { 12396 ASSERT(MUTEX_HELD(&pd->pd_mutex)); 12397 12398 pd->pd_type = PORT_DEVICE_CHANGED; 12399 if (new_did) { 12400 pd->pd_port_id.port_id = *new_did; 12401 } 12402 if (new_pwwn) { 12403 pd->pd_port_name = *new_pwwn; 12404 } 12405 mutex_exit(&pd->pd_mutex); 12406 12407 fctl_copy_portmap(map, pd); 12408 12409 mutex_enter(&pd->pd_mutex); 12410 pd->pd_type = PORT_DEVICE_NOCHANGE; 12411 } 12412 12413 12414 /* 12415 * Fillout New Name Server map 12416 */ 12417 static void 12418 fp_fillout_new_nsmap(fc_local_port_t *port, ddi_acc_handle_t *handle, 12419 fc_portmap_t *port_map, ns_resp_gan_t *gan_resp, uint32_t d_id) 12420 { 12421 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12422 12423 if (handle) { 12424 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_pwwn, 12425 (uint8_t *)&gan_resp->gan_pwwn, sizeof (gan_resp->gan_pwwn), 12426 DDI_DEV_AUTOINCR); 12427 ddi_rep_get8(*handle, (uint8_t *)&port_map->map_nwwn, 12428 (uint8_t *)&gan_resp->gan_nwwn, sizeof (gan_resp->gan_nwwn), 12429 DDI_DEV_AUTOINCR); 12430 ddi_rep_get8(*handle, (uint8_t *)port_map->map_fc4_types, 12431 (uint8_t *)gan_resp->gan_fc4types, 12432 sizeof (gan_resp->gan_fc4types), DDI_DEV_AUTOINCR); 12433 } else { 12434 bcopy(&gan_resp->gan_pwwn, &port_map->map_pwwn, 12435 sizeof (gan_resp->gan_pwwn)); 12436 bcopy(&gan_resp->gan_nwwn, &port_map->map_nwwn, 12437 sizeof (gan_resp->gan_nwwn)); 12438 bcopy(gan_resp->gan_fc4types, port_map->map_fc4_types, 12439 sizeof (gan_resp->gan_fc4types)); 12440 } 12441 port_map->map_did.port_id = d_id; 12442 port_map->map_did.priv_lilp_posit = 0; 12443 port_map->map_hard_addr.hard_addr = 0; 12444 port_map->map_hard_addr.rsvd = 0; 12445 port_map->map_state = PORT_DEVICE_INVALID; 12446 port_map->map_type = PORT_DEVICE_NEW; 12447 port_map->map_flags = 0; 12448 port_map->map_pd = NULL; 12449 12450 (void) fctl_remove_if_orphan(port, &port_map->map_pwwn); 12451 12452 ASSERT(port != NULL); 12453 } 12454 12455 12456 /* 12457 * Perform LINIT ELS 12458 */ 12459 static int 12460 fp_remote_lip(fc_local_port_t *port, la_wwn_t *pwwn, int sleep, 12461 job_request_t *job) 12462 { 12463 int rval; 12464 uint32_t d_id; 12465 uint32_t s_id; 12466 uint32_t lfa; 12467 uchar_t class; 12468 uint32_t ret; 12469 fp_cmd_t *cmd; 12470 fc_porttype_t ptype; 12471 fc_packet_t *pkt; 12472 fc_linit_req_t payload; 12473 fc_remote_port_t *pd; 12474 12475 rval = 0; 12476 12477 ASSERT(job != NULL); 12478 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12479 12480 pd = fctl_get_remote_port_by_pwwn(port, pwwn); 12481 if (pd == NULL) { 12482 fctl_ns_req_t *ns_cmd; 12483 12484 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 12485 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 12486 0, sleep); 12487 12488 if (ns_cmd == NULL) { 12489 return (FC_NOMEM); 12490 } 12491 job->job_result = FC_SUCCESS; 12492 ns_cmd->ns_cmd_code = NS_GID_PN; 12493 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = *pwwn; 12494 12495 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12496 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12497 fctl_free_ns_cmd(ns_cmd); 12498 return (FC_FAILURE); 12499 } 12500 bcopy(ns_cmd->ns_data_buf, (caddr_t)&d_id, sizeof (d_id)); 12501 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 12502 12503 fctl_free_ns_cmd(ns_cmd); 12504 lfa = d_id & 0xFFFF00; 12505 12506 /* 12507 * Given this D_ID, get the port type to see if 12508 * we can do LINIT on the LFA 12509 */ 12510 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gpt_id_t), 12511 sizeof (ns_resp_gpt_id_t), sizeof (ns_resp_gpt_id_t), 12512 0, sleep); 12513 12514 if (ns_cmd == NULL) { 12515 return (FC_NOMEM); 12516 } 12517 12518 job->job_result = FC_SUCCESS; 12519 ns_cmd->ns_cmd_code = NS_GPT_ID; 12520 12521 ((ns_req_gpt_id_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id; 12522 ((ns_req_gpt_id_t *) 12523 (ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 12524 12525 ret = fp_ns_query(port, ns_cmd, job, 1, sleep); 12526 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 12527 fctl_free_ns_cmd(ns_cmd); 12528 return (FC_FAILURE); 12529 } 12530 bcopy(ns_cmd->ns_data_buf, (caddr_t)&ptype, sizeof (ptype)); 12531 12532 fctl_free_ns_cmd(ns_cmd); 12533 12534 switch (ptype.port_type) { 12535 case FC_NS_PORT_NL: 12536 case FC_NS_PORT_F_NL: 12537 case FC_NS_PORT_FL: 12538 break; 12539 12540 default: 12541 return (FC_FAILURE); 12542 } 12543 } else { 12544 mutex_enter(&pd->pd_mutex); 12545 ptype = pd->pd_porttype; 12546 12547 switch (pd->pd_porttype.port_type) { 12548 case FC_NS_PORT_NL: 12549 case FC_NS_PORT_F_NL: 12550 case FC_NS_PORT_FL: 12551 lfa = pd->pd_port_id.port_id & 0xFFFF00; 12552 break; 12553 12554 default: 12555 mutex_exit(&pd->pd_mutex); 12556 return (FC_FAILURE); 12557 } 12558 mutex_exit(&pd->pd_mutex); 12559 } 12560 12561 mutex_enter(&port->fp_mutex); 12562 s_id = port->fp_port_id.port_id; 12563 class = port->fp_ns_login_class; 12564 mutex_exit(&port->fp_mutex); 12565 12566 cmd = fp_alloc_pkt(port, sizeof (fc_linit_req_t), 12567 sizeof (fc_linit_resp_t), sleep, pd); 12568 if (cmd == NULL) { 12569 return (FC_NOMEM); 12570 } 12571 12572 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12573 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12574 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 12575 cmd->cmd_retry_count = fp_retry_count; 12576 cmd->cmd_ulp_pkt = NULL; 12577 12578 pkt = &cmd->cmd_pkt; 12579 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 12580 12581 fp_els_init(cmd, s_id, lfa, fp_linit_intr, job); 12582 12583 /* 12584 * How does LIP work by the way ? 12585 * If the L_Port receives three consecutive identical ordered 12586 * sets whose first two characters (fully decoded) are equal to 12587 * the values shown in Table 3 of FC-AL-2 then the L_Port shall 12588 * recognize a Loop Initialization Primitive sequence. The 12589 * character 3 determines the type of lip: 12590 * LIP(F7) Normal LIP 12591 * LIP(F8) Loop Failure LIP 12592 * 12593 * The possible combination for the 3rd and 4th bytes are: 12594 * F7, F7 Normal Lip - No valid AL_PA 12595 * F8, F8 Loop Failure - No valid AL_PA 12596 * F7, AL_PS Normal Lip - Valid source AL_PA 12597 * F8, AL_PS Loop Failure - Valid source AL_PA 12598 * AL_PD AL_PS Loop reset of AL_PD originated by AL_PS 12599 * And Normal Lip for all other loop members 12600 * 0xFF AL_PS Vendor specific reset of all loop members 12601 * 12602 * Now, it may not always be that we, at the source, may have an 12603 * AL_PS (AL_PA of source) for 4th character slot, so we decide 12604 * to do (Normal Lip, No Valid AL_PA), that means, in the LINIT 12605 * payload we are going to set: 12606 * lip_b3 = 0xF7; Normal LIP 12607 * lip_b4 = 0xF7; No valid source AL_PA 12608 */ 12609 payload.ls_code.ls_code = LA_ELS_LINIT; 12610 payload.ls_code.mbz = 0; 12611 payload.rsvd = 0; 12612 payload.func = 0; /* Let Fabric determine the best way */ 12613 payload.lip_b3 = 0xF7; /* Normal LIP */ 12614 payload.lip_b4 = 0xF7; /* No valid source AL_PA */ 12615 12616 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 12617 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 12618 12619 job->job_counter = 1; 12620 12621 ret = fp_sendcmd(port, cmd, port->fp_fca_handle); 12622 if (ret == FC_SUCCESS) { 12623 fp_jobwait(job); 12624 rval = job->job_result; 12625 } else { 12626 rval = FC_FAILURE; 12627 fp_free_pkt(cmd); 12628 } 12629 12630 return (rval); 12631 } 12632 12633 12634 /* 12635 * Fill out the device handles with GAN response 12636 */ 12637 static void 12638 fp_stuff_device_with_gan(ddi_acc_handle_t *handle, fc_remote_port_t *pd, 12639 ns_resp_gan_t *gan_resp) 12640 { 12641 fc_remote_node_t *node; 12642 fc_porttype_t type; 12643 fc_local_port_t *port; 12644 12645 ASSERT(pd != NULL); 12646 ASSERT(handle != NULL); 12647 12648 port = pd->pd_port; 12649 12650 FP_TRACE(FP_NHEAD1(1, 0), "GAN PD stuffing; pd=%p," 12651 " port_id=%x, sym_len=%d fc4-type=%x", 12652 pd, gan_resp->gan_type_id.rsvd, 12653 gan_resp->gan_spnlen, gan_resp->gan_fc4types[0]); 12654 12655 mutex_enter(&pd->pd_mutex); 12656 12657 ddi_rep_get8(*handle, (uint8_t *)&type, 12658 (uint8_t *)&gan_resp->gan_type_id, sizeof (type), DDI_DEV_AUTOINCR); 12659 12660 pd->pd_porttype.port_type = type.port_type; 12661 pd->pd_porttype.rsvd = 0; 12662 12663 pd->pd_spn_len = gan_resp->gan_spnlen; 12664 if (pd->pd_spn_len) { 12665 ddi_rep_get8(*handle, (uint8_t *)pd->pd_spn, 12666 (uint8_t *)gan_resp->gan_spname, pd->pd_spn_len, 12667 DDI_DEV_AUTOINCR); 12668 } 12669 12670 ddi_rep_get8(*handle, (uint8_t *)pd->pd_ip_addr, 12671 (uint8_t *)gan_resp->gan_ip, sizeof (pd->pd_ip_addr), 12672 DDI_DEV_AUTOINCR); 12673 ddi_rep_get8(*handle, (uint8_t *)&pd->pd_cos, 12674 (uint8_t *)&gan_resp->gan_cos, sizeof (pd->pd_cos), 12675 DDI_DEV_AUTOINCR); 12676 ddi_rep_get8(*handle, (uint8_t *)pd->pd_fc4types, 12677 (uint8_t *)gan_resp->gan_fc4types, sizeof (pd->pd_fc4types), 12678 DDI_DEV_AUTOINCR); 12679 12680 node = pd->pd_remote_nodep; 12681 mutex_exit(&pd->pd_mutex); 12682 12683 mutex_enter(&node->fd_mutex); 12684 12685 ddi_rep_get8(*handle, (uint8_t *)node->fd_ipa, 12686 (uint8_t *)gan_resp->gan_ipa, sizeof (node->fd_ipa), 12687 DDI_DEV_AUTOINCR); 12688 12689 node->fd_snn_len = gan_resp->gan_snnlen; 12690 if (node->fd_snn_len) { 12691 ddi_rep_get8(*handle, (uint8_t *)node->fd_snn, 12692 (uint8_t *)gan_resp->gan_snname, node->fd_snn_len, 12693 DDI_DEV_AUTOINCR); 12694 } 12695 12696 mutex_exit(&node->fd_mutex); 12697 } 12698 12699 12700 /* 12701 * Handles all NS Queries (also means that this function 12702 * doesn't handle NS object registration) 12703 */ 12704 static int 12705 fp_ns_query(fc_local_port_t *port, fctl_ns_req_t *ns_cmd, job_request_t *job, 12706 int polled, int sleep) 12707 { 12708 int rval; 12709 fp_cmd_t *cmd; 12710 12711 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12712 12713 if (ns_cmd->ns_cmd_size == 0) { 12714 return (FC_FAILURE); 12715 } 12716 12717 cmd = fp_alloc_pkt(port, sizeof (fc_ct_header_t) + 12718 ns_cmd->ns_cmd_size, sizeof (fc_ct_header_t) + 12719 ns_cmd->ns_resp_size, sleep, NULL); 12720 if (cmd == NULL) { 12721 return (FC_NOMEM); 12722 } 12723 12724 fp_ct_init(port, cmd, ns_cmd, ns_cmd->ns_cmd_code, ns_cmd->ns_cmd_buf, 12725 ns_cmd->ns_cmd_size, ns_cmd->ns_resp_size, job); 12726 12727 if (polled) { 12728 job->job_counter = 1; 12729 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 12730 } 12731 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 12732 if (rval != FC_SUCCESS) { 12733 job->job_result = rval; 12734 fp_iodone(cmd); 12735 if (polled == 0) { 12736 /* 12737 * Return FC_SUCCESS to indicate that 12738 * fp_iodone is performed already. 12739 */ 12740 rval = FC_SUCCESS; 12741 } 12742 } 12743 12744 if (polled) { 12745 fp_jobwait(job); 12746 rval = job->job_result; 12747 } 12748 12749 return (rval); 12750 } 12751 12752 12753 /* 12754 * Initialize Common Transport request 12755 */ 12756 static void 12757 fp_ct_init(fc_local_port_t *port, fp_cmd_t *cmd, fctl_ns_req_t *ns_cmd, 12758 uint16_t cmd_code, caddr_t cmd_buf, uint16_t cmd_len, 12759 uint16_t resp_len, job_request_t *job) 12760 { 12761 uint32_t s_id; 12762 uchar_t class; 12763 fc_packet_t *pkt; 12764 fc_ct_header_t ct; 12765 12766 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 12767 12768 mutex_enter(&port->fp_mutex); 12769 s_id = port->fp_port_id.port_id; 12770 class = port->fp_ns_login_class; 12771 mutex_exit(&port->fp_mutex); 12772 12773 cmd->cmd_job = job; 12774 cmd->cmd_private = ns_cmd; 12775 pkt = &cmd->cmd_pkt; 12776 12777 ct.ct_rev = CT_REV; 12778 ct.ct_inid = 0; 12779 ct.ct_fcstype = FCSTYPE_DIRECTORY; 12780 ct.ct_fcssubtype = FCSSUB_DS_NAME_SERVER; 12781 ct.ct_options = 0; 12782 ct.ct_reserved1 = 0; 12783 ct.ct_cmdrsp = cmd_code; 12784 ct.ct_aiusize = resp_len >> 2; 12785 ct.ct_reserved2 = 0; 12786 ct.ct_reason = 0; 12787 ct.ct_expln = 0; 12788 ct.ct_vendor = 0; 12789 12790 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&ct, (uint8_t *)pkt->pkt_cmd, 12791 sizeof (ct), DDI_DEV_AUTOINCR); 12792 12793 pkt->pkt_cmd_fhdr.r_ctl = R_CTL_UNSOL_CONTROL; 12794 pkt->pkt_cmd_fhdr.d_id = 0xFFFFFC; 12795 pkt->pkt_cmd_fhdr.s_id = s_id; 12796 pkt->pkt_cmd_fhdr.type = FC_TYPE_FC_SERVICES; 12797 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_SEQ_INITIATIVE | 12798 F_CTL_FIRST_SEQ | F_CTL_END_SEQ; 12799 pkt->pkt_cmd_fhdr.seq_id = 0; 12800 pkt->pkt_cmd_fhdr.df_ctl = 0; 12801 pkt->pkt_cmd_fhdr.seq_cnt = 0; 12802 pkt->pkt_cmd_fhdr.ox_id = 0xffff; 12803 pkt->pkt_cmd_fhdr.rx_id = 0xffff; 12804 pkt->pkt_cmd_fhdr.ro = 0; 12805 pkt->pkt_cmd_fhdr.rsvd = 0; 12806 12807 pkt->pkt_comp = fp_ns_intr; 12808 pkt->pkt_ulp_private = (opaque_t)cmd; 12809 pkt->pkt_timeout = FP_NS_TIMEOUT; 12810 12811 if (cmd_buf) { 12812 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)cmd_buf, 12813 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 12814 cmd_len, DDI_DEV_AUTOINCR); 12815 } 12816 12817 cmd->cmd_transport = port->fp_fca_tran->fca_transport; 12818 12819 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | class; 12820 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 12821 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 12822 cmd->cmd_retry_count = fp_retry_count; 12823 cmd->cmd_ulp_pkt = NULL; 12824 } 12825 12826 12827 /* 12828 * Name Server request interrupt routine 12829 */ 12830 static void 12831 fp_ns_intr(fc_packet_t *pkt) 12832 { 12833 fp_cmd_t *cmd; 12834 fc_local_port_t *port; 12835 fc_ct_header_t resp_hdr; 12836 fc_ct_header_t cmd_hdr; 12837 fctl_ns_req_t *ns_cmd; 12838 12839 cmd = pkt->pkt_ulp_private; 12840 port = cmd->cmd_port; 12841 12842 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&cmd_hdr, 12843 (uint8_t *)pkt->pkt_cmd, sizeof (cmd_hdr), DDI_DEV_AUTOINCR); 12844 12845 ns_cmd = (fctl_ns_req_t *) 12846 (((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private); 12847 12848 if (!FP_IS_PKT_ERROR(pkt)) { 12849 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&resp_hdr, 12850 (uint8_t *)pkt->pkt_resp, sizeof (resp_hdr), 12851 DDI_DEV_AUTOINCR); 12852 12853 /* 12854 * On x86 architectures, make sure the resp_hdr is big endian. 12855 * This macro is a NOP on sparc architectures mainly because 12856 * we don't want to end up wasting time since the end result 12857 * is going to be the same. 12858 */ 12859 MAKE_BE_32(&resp_hdr); 12860 12861 if (ns_cmd) { 12862 /* 12863 * Always copy out the response CT_HDR 12864 */ 12865 bcopy(&resp_hdr, &ns_cmd->ns_resp_hdr, 12866 sizeof (resp_hdr)); 12867 } 12868 12869 if (resp_hdr.ct_cmdrsp == FS_RJT_IU) { 12870 pkt->pkt_state = FC_PKT_FS_RJT; 12871 pkt->pkt_reason = resp_hdr.ct_reason; 12872 pkt->pkt_expln = resp_hdr.ct_expln; 12873 } 12874 } 12875 12876 if (FP_IS_PKT_ERROR(pkt)) { 12877 if (ns_cmd) { 12878 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 12879 ASSERT(ns_cmd->ns_pd != NULL); 12880 12881 /* Mark it OLD if not already done */ 12882 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 12883 ns_cmd->ns_pd->pd_type = PORT_DEVICE_OLD; 12884 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 12885 } 12886 12887 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 12888 fctl_free_ns_cmd(ns_cmd); 12889 ((fp_cmd_t *) 12890 (pkt->pkt_ulp_private))->cmd_private = NULL; 12891 } 12892 12893 } 12894 12895 FP_TRACE(FP_NHEAD1(4, 0), "NS failure; pkt state=%x reason=%x", 12896 pkt->pkt_state, pkt->pkt_reason); 12897 12898 (void) fp_common_intr(pkt, 1); 12899 12900 return; 12901 } 12902 12903 if (resp_hdr.ct_cmdrsp != FS_ACC_IU) { 12904 uint32_t d_id; 12905 fc_local_port_t *port; 12906 fp_cmd_t *cmd; 12907 12908 d_id = pkt->pkt_cmd_fhdr.d_id; 12909 cmd = pkt->pkt_ulp_private; 12910 port = cmd->cmd_port; 12911 FP_TRACE(FP_NHEAD2(9, 0), 12912 "Bogus NS response received for D_ID=%x", d_id); 12913 } 12914 12915 if (cmd_hdr.ct_cmdrsp == NS_GA_NXT) { 12916 fp_gan_handler(pkt, ns_cmd); 12917 return; 12918 } 12919 12920 if (cmd_hdr.ct_cmdrsp >= NS_GPN_ID && 12921 cmd_hdr.ct_cmdrsp <= NS_GID_PT) { 12922 if (ns_cmd) { 12923 if ((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0) { 12924 fp_ns_query_handler(pkt, ns_cmd); 12925 return; 12926 } 12927 } 12928 } 12929 12930 fp_iodone(pkt->pkt_ulp_private); 12931 } 12932 12933 12934 /* 12935 * Process NS_GAN response 12936 */ 12937 static void 12938 fp_gan_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 12939 { 12940 int my_did; 12941 fc_portid_t d_id; 12942 fp_cmd_t *cmd; 12943 fc_local_port_t *port; 12944 fc_remote_port_t *pd; 12945 ns_req_gan_t gan_req; 12946 ns_resp_gan_t *gan_resp; 12947 12948 ASSERT(ns_cmd != NULL); 12949 12950 cmd = pkt->pkt_ulp_private; 12951 port = cmd->cmd_port; 12952 12953 gan_resp = (ns_resp_gan_t *)(pkt->pkt_resp + sizeof (fc_ct_header_t)); 12954 12955 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&d_id, 12956 (uint8_t *)&gan_resp->gan_type_id, sizeof (d_id), DDI_DEV_AUTOINCR); 12957 12958 *(uint32_t *)&d_id = BE_32(*(uint32_t *)&d_id); 12959 12960 /* 12961 * In this case the priv_lilp_posit field in reality 12962 * is actually represents the relative position on a private loop. 12963 * So zero it while dealing with Port Identifiers. 12964 */ 12965 d_id.priv_lilp_posit = 0; 12966 pd = fctl_get_remote_port_by_did(port, d_id.port_id); 12967 if (ns_cmd->ns_gan_sid == d_id.port_id) { 12968 /* 12969 * We've come a full circle; time to get out. 12970 */ 12971 fp_iodone(cmd); 12972 return; 12973 } 12974 12975 if (ns_cmd->ns_gan_sid == FCTL_GAN_START_ID) { 12976 ns_cmd->ns_gan_sid = d_id.port_id; 12977 } 12978 12979 mutex_enter(&port->fp_mutex); 12980 my_did = (d_id.port_id == port->fp_port_id.port_id) ? 1 : 0; 12981 mutex_exit(&port->fp_mutex); 12982 12983 FP_TRACE(FP_NHEAD1(1, 0), "GAN response; port=%p, d_id=%x", port, 12984 d_id.port_id); 12985 12986 if (my_did == 0) { 12987 la_wwn_t pwwn; 12988 la_wwn_t nwwn; 12989 12990 FP_TRACE(FP_NHEAD1(1, 0), "GAN response details; " 12991 "port=%p, d_id=%x, type_id=%x, " 12992 "pwwn=%x %x %x %x %x %x %x %x, " 12993 "nwwn=%x %x %x %x %x %x %x %x", 12994 port, d_id.port_id, gan_resp->gan_type_id, 12995 12996 gan_resp->gan_pwwn.raw_wwn[0], 12997 gan_resp->gan_pwwn.raw_wwn[1], 12998 gan_resp->gan_pwwn.raw_wwn[2], 12999 gan_resp->gan_pwwn.raw_wwn[3], 13000 gan_resp->gan_pwwn.raw_wwn[4], 13001 gan_resp->gan_pwwn.raw_wwn[5], 13002 gan_resp->gan_pwwn.raw_wwn[6], 13003 gan_resp->gan_pwwn.raw_wwn[7], 13004 13005 gan_resp->gan_nwwn.raw_wwn[0], 13006 gan_resp->gan_nwwn.raw_wwn[1], 13007 gan_resp->gan_nwwn.raw_wwn[2], 13008 gan_resp->gan_nwwn.raw_wwn[3], 13009 gan_resp->gan_nwwn.raw_wwn[4], 13010 gan_resp->gan_nwwn.raw_wwn[5], 13011 gan_resp->gan_nwwn.raw_wwn[6], 13012 gan_resp->gan_nwwn.raw_wwn[7]); 13013 13014 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&nwwn, 13015 (uint8_t *)&gan_resp->gan_nwwn, sizeof (nwwn), 13016 DDI_DEV_AUTOINCR); 13017 13018 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)&pwwn, 13019 (uint8_t *)&gan_resp->gan_pwwn, sizeof (pwwn), 13020 DDI_DEV_AUTOINCR); 13021 13022 if (ns_cmd->ns_flags & FCTL_NS_CREATE_DEVICE && pd == NULL) { 13023 pd = fctl_create_remote_port(port, &nwwn, &pwwn, 13024 d_id.port_id, PD_PLOGI_INITIATOR, KM_NOSLEEP); 13025 } 13026 if (pd != NULL) { 13027 fp_stuff_device_with_gan(&pkt->pkt_resp_acc, 13028 pd, gan_resp); 13029 } 13030 13031 if (ns_cmd->ns_flags & FCTL_NS_GET_DEV_COUNT) { 13032 *((int *)ns_cmd->ns_data_buf) += 1; 13033 } 13034 13035 if (ns_cmd->ns_flags & FCTL_NS_FILL_NS_MAP) { 13036 ASSERT((ns_cmd->ns_flags & FCTL_NS_NO_DATA_BUF) == 0); 13037 13038 if (ns_cmd->ns_flags & FCTL_NS_BUF_IS_USERLAND) { 13039 fc_port_dev_t *userbuf; 13040 13041 userbuf = ((fc_port_dev_t *) 13042 ns_cmd->ns_data_buf) + 13043 ns_cmd->ns_gan_index++; 13044 13045 userbuf->dev_did = d_id; 13046 13047 ddi_rep_get8(pkt->pkt_resp_acc, 13048 (uint8_t *)userbuf->dev_type, 13049 (uint8_t *)gan_resp->gan_fc4types, 13050 sizeof (userbuf->dev_type), 13051 DDI_DEV_AUTOINCR); 13052 13053 userbuf->dev_nwwn = nwwn; 13054 userbuf->dev_pwwn = pwwn; 13055 13056 if (pd != NULL) { 13057 mutex_enter(&pd->pd_mutex); 13058 userbuf->dev_state = pd->pd_state; 13059 userbuf->dev_hard_addr = 13060 pd->pd_hard_addr; 13061 mutex_exit(&pd->pd_mutex); 13062 } else { 13063 userbuf->dev_state = 13064 PORT_DEVICE_INVALID; 13065 } 13066 } else if (ns_cmd->ns_flags & 13067 FCTL_NS_BUF_IS_FC_PORTMAP) { 13068 fc_portmap_t *map; 13069 13070 map = ((fc_portmap_t *) 13071 ns_cmd->ns_data_buf) + 13072 ns_cmd->ns_gan_index++; 13073 13074 /* 13075 * First fill it like any new map 13076 * and update the port device info 13077 * below. 13078 */ 13079 fp_fillout_new_nsmap(port, &pkt->pkt_resp_acc, 13080 map, gan_resp, d_id.port_id); 13081 if (pd != NULL) { 13082 fctl_copy_portmap(map, pd); 13083 } else { 13084 map->map_state = PORT_DEVICE_INVALID; 13085 map->map_type = PORT_DEVICE_NOCHANGE; 13086 } 13087 } else { 13088 caddr_t dst_ptr; 13089 13090 dst_ptr = ns_cmd->ns_data_buf + 13091 (NS_GAN_RESP_LEN) * ns_cmd->ns_gan_index++; 13092 13093 ddi_rep_get8(pkt->pkt_resp_acc, 13094 (uint8_t *)dst_ptr, (uint8_t *)gan_resp, 13095 NS_GAN_RESP_LEN, DDI_DEV_AUTOINCR); 13096 } 13097 } else { 13098 ns_cmd->ns_gan_index++; 13099 } 13100 if (ns_cmd->ns_gan_index >= ns_cmd->ns_gan_max) { 13101 fp_iodone(cmd); 13102 return; 13103 } 13104 } 13105 13106 gan_req.pid = d_id; 13107 13108 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&gan_req, 13109 (uint8_t *)(pkt->pkt_cmd + sizeof (fc_ct_header_t)), 13110 sizeof (gan_req), DDI_DEV_AUTOINCR); 13111 13112 if (cmd->cmd_transport(port->fp_fca_handle, pkt) != FC_SUCCESS) { 13113 pkt->pkt_state = FC_PKT_TRAN_ERROR; 13114 fp_iodone(cmd); 13115 } 13116 } 13117 13118 13119 /* 13120 * Handle NS Query interrupt 13121 */ 13122 static void 13123 fp_ns_query_handler(fc_packet_t *pkt, fctl_ns_req_t *ns_cmd) 13124 { 13125 fp_cmd_t *cmd; 13126 fc_local_port_t *port; 13127 caddr_t src_ptr; 13128 uint32_t xfer_len; 13129 13130 cmd = pkt->pkt_ulp_private; 13131 port = cmd->cmd_port; 13132 13133 xfer_len = ns_cmd->ns_resp_size; 13134 13135 FP_TRACE(FP_NHEAD1(1, 0), "NS Query response, cmd_code=%x, xfer_len=%x", 13136 ns_cmd->ns_cmd_code, xfer_len); 13137 13138 if (ns_cmd->ns_cmd_code == NS_GPN_ID) { 13139 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13140 13141 FP_TRACE(FP_NHEAD1(6, 0), "GPN_ID results; %x %x %x %x %x", 13142 src_ptr[0], src_ptr[1], src_ptr[2], src_ptr[3], src_ptr[4]); 13143 } 13144 13145 if (xfer_len <= ns_cmd->ns_data_len) { 13146 src_ptr = (caddr_t)pkt->pkt_resp + sizeof (fc_ct_header_t); 13147 ddi_rep_get8(pkt->pkt_resp_acc, (uint8_t *)ns_cmd->ns_data_buf, 13148 (uint8_t *)src_ptr, xfer_len, DDI_DEV_AUTOINCR); 13149 } 13150 13151 if (ns_cmd->ns_flags & FCTL_NS_VALIDATE_PD) { 13152 ASSERT(ns_cmd->ns_pd != NULL); 13153 13154 mutex_enter(&ns_cmd->ns_pd->pd_mutex); 13155 if (ns_cmd->ns_pd->pd_type == PORT_DEVICE_OLD) { 13156 ns_cmd->ns_pd->pd_type = PORT_DEVICE_NOCHANGE; 13157 } 13158 mutex_exit(&ns_cmd->ns_pd->pd_mutex); 13159 } 13160 13161 if (ns_cmd->ns_flags & FCTL_NS_ASYNC_REQUEST) { 13162 fctl_free_ns_cmd(ns_cmd); 13163 ((fp_cmd_t *)(pkt->pkt_ulp_private))->cmd_private = NULL; 13164 } 13165 fp_iodone(cmd); 13166 } 13167 13168 13169 /* 13170 * Handle unsolicited ADISC ELS request 13171 */ 13172 static void 13173 fp_handle_unsol_adisc(fc_local_port_t *port, fc_unsol_buf_t *buf, 13174 fc_remote_port_t *pd, job_request_t *job) 13175 { 13176 int rval; 13177 fp_cmd_t *cmd; 13178 13179 FP_TRACE(FP_NHEAD1(5, 0), "ADISC; port=%p, D_ID=%x state=%x, pd=%p", 13180 port, pd->pd_port_id.port_id, pd->pd_state, pd); 13181 mutex_enter(&pd->pd_mutex); 13182 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 13183 mutex_exit(&pd->pd_mutex); 13184 if (FP_IS_CLASS_1_OR_2(buf->ub_class)) { 13185 cmd = fp_alloc_pkt(port, sizeof (la_els_rjt_t), 13186 0, KM_SLEEP, pd); 13187 if (cmd != NULL) { 13188 fp_els_rjt_init(port, cmd, buf, 13189 FC_ACTION_NON_RETRYABLE, 13190 FC_REASON_INVALID_LINK_CTRL, job); 13191 13192 if (fp_sendcmd(port, cmd, 13193 port->fp_fca_handle) != FC_SUCCESS) { 13194 fp_free_pkt(cmd); 13195 } 13196 } 13197 } 13198 } else { 13199 mutex_exit(&pd->pd_mutex); 13200 /* 13201 * Yes, yes, we don't have a hard address. But we 13202 * we should still respond. Huh ? Visit 21.19.2 13203 * of FC-PH-2 which essentially says that if an 13204 * NL_Port doesn't have a hard address, or if a port 13205 * does not have FC-AL capability, it shall report 13206 * zeroes in this field. 13207 */ 13208 cmd = fp_alloc_pkt(port, sizeof (la_els_adisc_t), 13209 0, KM_SLEEP, pd); 13210 if (cmd == NULL) { 13211 return; 13212 } 13213 fp_adisc_acc_init(port, cmd, buf, job); 13214 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13215 if (rval != FC_SUCCESS) { 13216 fp_free_pkt(cmd); 13217 } 13218 } 13219 } 13220 13221 13222 /* 13223 * Initialize ADISC response. 13224 */ 13225 static void 13226 fp_adisc_acc_init(fc_local_port_t *port, fp_cmd_t *cmd, fc_unsol_buf_t *buf, 13227 job_request_t *job) 13228 { 13229 fc_packet_t *pkt; 13230 la_els_adisc_t payload; 13231 13232 cmd->cmd_pkt.pkt_tran_flags = buf->ub_class; 13233 cmd->cmd_pkt.pkt_tran_type = FC_PKT_OUTBOUND; 13234 cmd->cmd_flags = FP_CMD_CFLAG_UNDEFINED; 13235 cmd->cmd_retry_count = 1; 13236 cmd->cmd_ulp_pkt = NULL; 13237 13238 cmd->cmd_transport = port->fp_fca_tran->fca_els_send; 13239 cmd->cmd_job = job; 13240 13241 pkt = &cmd->cmd_pkt; 13242 13243 fp_unsol_resp_init(pkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS); 13244 13245 payload.ls_code.ls_code = LA_ELS_ACC; 13246 payload.ls_code.mbz = 0; 13247 13248 mutex_enter(&port->fp_mutex); 13249 payload.nport_id = port->fp_port_id; 13250 payload.hard_addr = port->fp_hard_addr; 13251 mutex_exit(&port->fp_mutex); 13252 13253 payload.port_wwn = port->fp_service_params.nport_ww_name; 13254 payload.node_wwn = port->fp_service_params.node_ww_name; 13255 13256 ddi_rep_put8(pkt->pkt_cmd_acc, (uint8_t *)&payload, 13257 (uint8_t *)pkt->pkt_cmd, sizeof (payload), DDI_DEV_AUTOINCR); 13258 } 13259 13260 13261 /* 13262 * Hold and Install the requested ULP drivers 13263 */ 13264 static void 13265 fp_load_ulp_modules(dev_info_t *dip, fc_local_port_t *port) 13266 { 13267 int len; 13268 int count; 13269 int data_len; 13270 major_t ulp_major; 13271 caddr_t ulp_name; 13272 caddr_t data_ptr; 13273 caddr_t data_buf; 13274 13275 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13276 13277 data_buf = NULL; 13278 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 13279 DDI_PROP_DONTPASS, "load-ulp-list", 13280 (caddr_t)&data_buf, &data_len) != DDI_PROP_SUCCESS) { 13281 return; 13282 } 13283 13284 len = strlen(data_buf); 13285 port->fp_ulp_nload = fctl_atoi(data_buf, 10); 13286 13287 data_ptr = data_buf + len + 1; 13288 for (count = 0; count < port->fp_ulp_nload; count++) { 13289 len = strlen(data_ptr) + 1; 13290 ulp_name = kmem_zalloc(len, KM_SLEEP); 13291 bcopy(data_ptr, ulp_name, len); 13292 13293 ulp_major = ddi_name_to_major(ulp_name); 13294 13295 if (ulp_major != (major_t)-1) { 13296 if (modload("drv", ulp_name) < 0) { 13297 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 13298 0, NULL, "failed to load %s", 13299 ulp_name); 13300 } 13301 } else { 13302 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13303 "%s isn't a valid driver", ulp_name); 13304 } 13305 13306 kmem_free(ulp_name, len); 13307 data_ptr += len; /* Skip to next field */ 13308 } 13309 13310 /* 13311 * Free the memory allocated by DDI 13312 */ 13313 if (data_buf != NULL) { 13314 kmem_free(data_buf, data_len); 13315 } 13316 } 13317 13318 13319 /* 13320 * Perform LOGO operation 13321 */ 13322 static int 13323 fp_logout(fc_local_port_t *port, fc_remote_port_t *pd, job_request_t *job) 13324 { 13325 int rval; 13326 fp_cmd_t *cmd; 13327 13328 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13329 ASSERT(!MUTEX_HELD(&pd->pd_mutex)); 13330 13331 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 13332 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 13333 13334 mutex_enter(&port->fp_mutex); 13335 mutex_enter(&pd->pd_mutex); 13336 13337 ASSERT(pd->pd_state == PORT_DEVICE_LOGGED_IN); 13338 ASSERT(pd->pd_login_count == 1); 13339 13340 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 13341 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 13342 cmd->cmd_flags = 0; 13343 cmd->cmd_retry_count = 1; 13344 cmd->cmd_ulp_pkt = NULL; 13345 13346 fp_logo_init(pd, cmd, job); 13347 13348 mutex_exit(&pd->pd_mutex); 13349 mutex_exit(&port->fp_mutex); 13350 13351 rval = fp_sendcmd(port, cmd, port->fp_fca_handle); 13352 if (rval != FC_SUCCESS) { 13353 fp_iodone(cmd); 13354 } 13355 13356 return (rval); 13357 } 13358 13359 13360 /* 13361 * Perform Port attach callbacks to registered ULPs 13362 */ 13363 static void 13364 fp_attach_ulps(fc_local_port_t *port, fc_attach_cmd_t cmd) 13365 { 13366 fp_soft_attach_t *att; 13367 13368 att = kmem_zalloc(sizeof (*att), KM_SLEEP); 13369 att->att_cmd = cmd; 13370 att->att_port = port; 13371 13372 /* 13373 * We need to remember whether or not fctl_busy_port 13374 * succeeded so we know whether or not to call 13375 * fctl_idle_port when the task is complete. 13376 */ 13377 13378 if (fctl_busy_port(port) == 0) { 13379 att->att_need_pm_idle = B_TRUE; 13380 } else { 13381 att->att_need_pm_idle = B_FALSE; 13382 } 13383 13384 (void) taskq_dispatch(port->fp_taskq, fp_ulp_port_attach, 13385 att, KM_SLEEP); 13386 } 13387 13388 13389 /* 13390 * Forward state change notifications on to interested ULPs. 13391 * Spawns a call to fctl_ulp_statec_cb() in a taskq thread to do all the 13392 * real work. 13393 */ 13394 static int 13395 fp_ulp_notify(fc_local_port_t *port, uint32_t statec, int sleep) 13396 { 13397 fc_port_clist_t *clist; 13398 13399 clist = kmem_zalloc(sizeof (*clist), sleep); 13400 if (clist == NULL) { 13401 return (FC_NOMEM); 13402 } 13403 13404 clist->clist_state = statec; 13405 13406 mutex_enter(&port->fp_mutex); 13407 clist->clist_flags = port->fp_topology; 13408 mutex_exit(&port->fp_mutex); 13409 13410 clist->clist_port = (opaque_t)port; 13411 clist->clist_len = 0; 13412 clist->clist_size = 0; 13413 clist->clist_map = NULL; 13414 13415 (void) taskq_dispatch(port->fp_taskq, fctl_ulp_statec_cb, 13416 clist, KM_SLEEP); 13417 13418 return (FC_SUCCESS); 13419 } 13420 13421 13422 /* 13423 * Get name server map 13424 */ 13425 static int 13426 fp_ns_getmap(fc_local_port_t *port, job_request_t *job, fc_portmap_t **map, 13427 uint32_t *len, uint32_t sid) 13428 { 13429 int ret; 13430 fctl_ns_req_t *ns_cmd; 13431 13432 /* 13433 * Don't let the allocator do anything for response; 13434 * we have have buffer ready to fillout. 13435 */ 13436 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13437 sizeof (ns_resp_gan_t), 0, (FCTL_NS_FILL_NS_MAP | 13438 FCTL_NS_BUF_IS_FC_PORTMAP), KM_SLEEP); 13439 13440 ns_cmd->ns_data_len = sizeof (**map) * (*len); 13441 ns_cmd->ns_data_buf = (caddr_t)*map; 13442 13443 ASSERT(ns_cmd != NULL); 13444 13445 ns_cmd->ns_gan_index = 0; 13446 ns_cmd->ns_gan_sid = sid; 13447 ns_cmd->ns_cmd_code = NS_GA_NXT; 13448 ns_cmd->ns_gan_max = *len; 13449 13450 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13451 13452 if (ns_cmd->ns_gan_index != *len) { 13453 *len = ns_cmd->ns_gan_index; 13454 } 13455 ns_cmd->ns_data_len = 0; 13456 ns_cmd->ns_data_buf = NULL; 13457 fctl_free_ns_cmd(ns_cmd); 13458 13459 return (ret); 13460 } 13461 13462 13463 /* 13464 * Create a remote port in Fabric topology by using NS services 13465 */ 13466 static fc_remote_port_t * 13467 fp_create_remote_port_by_ns(fc_local_port_t *port, uint32_t d_id, int sleep) 13468 { 13469 int rval; 13470 job_request_t *job; 13471 fctl_ns_req_t *ns_cmd; 13472 fc_remote_port_t *pd; 13473 13474 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13475 13476 FP_TRACE(FP_NHEAD1(1, 0), "PD creation begin; port=%p, d_id=%x", 13477 port, d_id); 13478 13479 #ifdef DEBUG 13480 mutex_enter(&port->fp_mutex); 13481 ASSERT(FC_IS_TOP_SWITCH(port->fp_topology)); 13482 mutex_exit(&port->fp_mutex); 13483 #endif 13484 13485 job = fctl_alloc_job(JOB_NS_CMD, 0, NULL, (opaque_t)port, sleep); 13486 if (job == NULL) { 13487 return (NULL); 13488 } 13489 13490 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gan_t), 13491 sizeof (ns_resp_gan_t), 0, (FCTL_NS_CREATE_DEVICE | 13492 FCTL_NS_NO_DATA_BUF), sleep); 13493 if (ns_cmd == NULL) { 13494 return (NULL); 13495 } 13496 13497 job->job_result = FC_SUCCESS; 13498 ns_cmd->ns_gan_max = 1; 13499 ns_cmd->ns_cmd_code = NS_GA_NXT; 13500 ns_cmd->ns_gan_sid = FCTL_GAN_START_ID; 13501 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.port_id = d_id - 1; 13502 ((ns_req_gan_t *)(ns_cmd->ns_cmd_buf))->pid.priv_lilp_posit = 0; 13503 13504 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 13505 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 13506 fctl_free_ns_cmd(ns_cmd); 13507 13508 if (rval != FC_SUCCESS || job->job_result != FC_SUCCESS) { 13509 fctl_dealloc_job(job); 13510 return (NULL); 13511 } 13512 fctl_dealloc_job(job); 13513 13514 pd = fctl_get_remote_port_by_did(port, d_id); 13515 13516 FP_TRACE(FP_NHEAD1(1, 0), "PD creation end; port=%p, d_id=%x, pd=%p", 13517 port, d_id, pd); 13518 13519 return (pd); 13520 } 13521 13522 13523 /* 13524 * Check for the permissions on an ioctl command. If it is required to have an 13525 * EXCLUSIVE open performed, return a FAILURE to just shut the door on it. If 13526 * the ioctl command isn't in one of the list built, shut the door on that too. 13527 * 13528 * Certain ioctls perform hardware accesses in FCA drivers, and it needs 13529 * to be made sure that users open the port for an exclusive access while 13530 * performing those operations. 13531 * 13532 * This can prevent a casual user from inflicting damage on the port by 13533 * sending these ioctls from multiple processes/threads (there is no good 13534 * reason why one would need to do that) without actually realizing how 13535 * expensive such commands could turn out to be. 13536 * 13537 * It is also important to note that, even with an exclusive access, 13538 * multiple threads can share the same file descriptor and fire down 13539 * commands in parallel. To prevent that the driver needs to make sure 13540 * that such commands aren't in progress already. This is taken care of 13541 * in the FP_EXCL_BUSY bit of fp_flag. 13542 */ 13543 static int 13544 fp_check_perms(uchar_t open_flag, uint16_t ioctl_cmd) 13545 { 13546 int ret = FC_FAILURE; 13547 int count; 13548 13549 for (count = 0; 13550 count < sizeof (fp_perm_list) / sizeof (fp_perm_list[0]); 13551 count++) { 13552 if (fp_perm_list[count].fp_ioctl_cmd == ioctl_cmd) { 13553 if (fp_perm_list[count].fp_open_flag & open_flag) { 13554 ret = FC_SUCCESS; 13555 } 13556 break; 13557 } 13558 } 13559 13560 return (ret); 13561 } 13562 13563 13564 /* 13565 * Bind Port driver's unsolicited, state change callbacks 13566 */ 13567 static int 13568 fp_bind_callbacks(fc_local_port_t *port) 13569 { 13570 fc_fca_bind_info_t bind_info = {0}; 13571 fc_fca_port_info_t *port_info; 13572 int rval = DDI_SUCCESS; 13573 uint16_t class; 13574 int node_namelen, port_namelen; 13575 char *nname = NULL, *pname = NULL; 13576 13577 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13578 13579 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13580 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13581 "node-name", &nname) != DDI_PROP_SUCCESS) { 13582 FP_TRACE(FP_NHEAD1(1, 0), 13583 "fp_bind_callback fail to get node-name"); 13584 } 13585 if (nname) { 13586 fc_str_to_wwn(nname, &(bind_info.port_nwwn)); 13587 } 13588 13589 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, port->fp_port_dip, 13590 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, 13591 "port-name", &pname) != DDI_PROP_SUCCESS) { 13592 FP_TRACE(FP_NHEAD1(1, 0), 13593 "fp_bind_callback fail to get port-name"); 13594 } 13595 if (pname) { 13596 fc_str_to_wwn(pname, &(bind_info.port_pwwn)); 13597 } 13598 13599 if (port->fp_npiv_type == FC_NPIV_PORT) { 13600 bind_info.port_npiv = 1; 13601 } 13602 13603 /* 13604 * fca_bind_port returns the FCA driver's handle for the local 13605 * port instance. If the port number isn't supported it returns NULL. 13606 * It also sets up callback in the FCA for various 13607 * things like state change, ELS etc.. 13608 */ 13609 bind_info.port_statec_cb = fp_statec_cb; 13610 bind_info.port_unsol_cb = fp_unsol_cb; 13611 bind_info.port_num = port->fp_port_num; 13612 bind_info.port_handle = (opaque_t)port; 13613 13614 port_info = kmem_zalloc(sizeof (*port_info), KM_SLEEP); 13615 13616 /* 13617 * Hold the port driver mutex as the callbacks are bound until the 13618 * service parameters are properly filled in (in order to be able to 13619 * properly respond to unsolicited ELS requests) 13620 */ 13621 mutex_enter(&port->fp_mutex); 13622 13623 port->fp_fca_handle = port->fp_fca_tran->fca_bind_port( 13624 port->fp_fca_dip, port_info, &bind_info); 13625 13626 if (port->fp_fca_handle == NULL) { 13627 rval = DDI_FAILURE; 13628 goto exit; 13629 } 13630 13631 port->fp_bind_state = port->fp_state = port_info->pi_port_state; 13632 port->fp_service_params = port_info->pi_login_params; 13633 port->fp_hard_addr = port_info->pi_hard_addr; 13634 13635 /* Copy from the FCA structure to the FP structure */ 13636 port->fp_hba_port_attrs = port_info->pi_attrs; 13637 13638 if (port_info->pi_rnid_params.status == FC_SUCCESS) { 13639 port->fp_rnid_init = 1; 13640 bcopy(&port_info->pi_rnid_params.params, 13641 &port->fp_rnid_params, 13642 sizeof (port->fp_rnid_params)); 13643 } else { 13644 port->fp_rnid_init = 0; 13645 } 13646 13647 node_namelen = strlen((char *)&port_info->pi_attrs.sym_node_name); 13648 if (node_namelen) { 13649 bcopy(&port_info->pi_attrs.sym_node_name, 13650 &port->fp_sym_node_name, 13651 node_namelen); 13652 port->fp_sym_node_namelen = node_namelen; 13653 } 13654 port_namelen = strlen((char *)&port_info->pi_attrs.sym_port_name); 13655 if (port_namelen) { 13656 bcopy(&port_info->pi_attrs.sym_port_name, 13657 &port->fp_sym_port_name, 13658 port_namelen); 13659 port->fp_sym_port_namelen = port_namelen; 13660 } 13661 13662 /* zero out the normally unused fields right away */ 13663 port->fp_service_params.ls_code.mbz = 0; 13664 port->fp_service_params.ls_code.ls_code = 0; 13665 bzero(&port->fp_service_params.reserved, 13666 sizeof (port->fp_service_params.reserved)); 13667 13668 class = port_info->pi_login_params.class_1.class_opt; 13669 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS1 : 0; 13670 13671 class = port_info->pi_login_params.class_2.class_opt; 13672 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS2 : 0; 13673 13674 class = port_info->pi_login_params.class_3.class_opt; 13675 port->fp_cos |= (class & 0x8000) ? FC_NS_CLASS3 : 0; 13676 13677 exit: 13678 if (nname) { 13679 ddi_prop_free(nname); 13680 } 13681 if (pname) { 13682 ddi_prop_free(pname); 13683 } 13684 mutex_exit(&port->fp_mutex); 13685 kmem_free(port_info, sizeof (*port_info)); 13686 13687 return (rval); 13688 } 13689 13690 13691 /* 13692 * Retrieve FCA capabilities 13693 */ 13694 static void 13695 fp_retrieve_caps(fc_local_port_t *port) 13696 { 13697 int rval; 13698 int ub_count; 13699 fc_fcp_dma_t fcp_dma; 13700 fc_reset_action_t action; 13701 fc_dma_behavior_t dma_behavior; 13702 13703 ASSERT(!MUTEX_HELD(&port->fp_mutex)); 13704 13705 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13706 FC_CAP_UNSOL_BUF, &ub_count); 13707 13708 switch (rval) { 13709 case FC_CAP_FOUND: 13710 case FC_CAP_SETTABLE: 13711 switch (ub_count) { 13712 case 0: 13713 break; 13714 13715 case -1: 13716 ub_count = fp_unsol_buf_count; 13717 break; 13718 13719 default: 13720 /* 1/4th of total buffers is my share */ 13721 ub_count = 13722 (ub_count / port->fp_fca_tran->fca_numports) >> 2; 13723 break; 13724 } 13725 break; 13726 13727 default: 13728 ub_count = 0; 13729 break; 13730 } 13731 13732 mutex_enter(&port->fp_mutex); 13733 port->fp_ub_count = ub_count; 13734 mutex_exit(&port->fp_mutex); 13735 13736 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13737 FC_CAP_POST_RESET_BEHAVIOR, &action); 13738 13739 switch (rval) { 13740 case FC_CAP_FOUND: 13741 case FC_CAP_SETTABLE: 13742 switch (action) { 13743 case FC_RESET_RETURN_NONE: 13744 case FC_RESET_RETURN_ALL: 13745 case FC_RESET_RETURN_OUTSTANDING: 13746 break; 13747 13748 default: 13749 action = FC_RESET_RETURN_NONE; 13750 break; 13751 } 13752 break; 13753 13754 default: 13755 action = FC_RESET_RETURN_NONE; 13756 break; 13757 } 13758 mutex_enter(&port->fp_mutex); 13759 port->fp_reset_action = action; 13760 mutex_exit(&port->fp_mutex); 13761 13762 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13763 FC_CAP_NOSTREAM_ON_UNALIGN_BUF, &dma_behavior); 13764 13765 switch (rval) { 13766 case FC_CAP_FOUND: 13767 switch (dma_behavior) { 13768 case FC_ALLOW_STREAMING: 13769 /* FALLTHROUGH */ 13770 case FC_NO_STREAMING: 13771 break; 13772 13773 default: 13774 /* 13775 * If capability was found and the value 13776 * was incorrect assume the worst 13777 */ 13778 dma_behavior = FC_NO_STREAMING; 13779 break; 13780 } 13781 break; 13782 13783 default: 13784 /* 13785 * If capability was not defined - allow streaming; existing 13786 * FCAs should not be affected. 13787 */ 13788 dma_behavior = FC_ALLOW_STREAMING; 13789 break; 13790 } 13791 mutex_enter(&port->fp_mutex); 13792 port->fp_dma_behavior = dma_behavior; 13793 mutex_exit(&port->fp_mutex); 13794 13795 rval = port->fp_fca_tran->fca_get_cap(port->fp_fca_handle, 13796 FC_CAP_FCP_DMA, &fcp_dma); 13797 13798 if (rval != FC_CAP_FOUND || (fcp_dma != FC_NO_DVMA_SPACE && 13799 fcp_dma != FC_DVMA_SPACE)) { 13800 fcp_dma = FC_DVMA_SPACE; 13801 } 13802 13803 mutex_enter(&port->fp_mutex); 13804 port->fp_fcp_dma = fcp_dma; 13805 mutex_exit(&port->fp_mutex); 13806 } 13807 13808 13809 /* 13810 * Handle Domain, Area changes in the Fabric. 13811 */ 13812 static void 13813 fp_validate_area_domain(fc_local_port_t *port, uint32_t id, uint32_t mask, 13814 job_request_t *job, int sleep) 13815 { 13816 #ifdef DEBUG 13817 uint32_t dcnt; 13818 #endif 13819 int rval; 13820 int send; 13821 int index; 13822 int listindex; 13823 int login; 13824 int job_flags; 13825 char ww_name[17]; 13826 uint32_t d_id; 13827 uint32_t count; 13828 fctl_ns_req_t *ns_cmd; 13829 fc_portmap_t *list; 13830 fc_orphan_t *orp; 13831 fc_orphan_t *norp; 13832 fc_orphan_t *prev; 13833 fc_remote_port_t *pd; 13834 fc_remote_port_t *npd; 13835 struct pwwn_hash *head; 13836 13837 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 13838 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 13839 0, sleep); 13840 if (ns_cmd == NULL) { 13841 mutex_enter(&port->fp_mutex); 13842 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13843 --port->fp_rscn_count; 13844 } 13845 mutex_exit(&port->fp_mutex); 13846 13847 return; 13848 } 13849 ns_cmd->ns_cmd_code = NS_GID_PN; 13850 13851 /* 13852 * We need to get a new count of devices from the 13853 * name server, which will also create any new devices 13854 * as needed. 13855 */ 13856 13857 (void) fp_ns_get_devcount(port, job, 1, sleep); 13858 13859 FP_TRACE(FP_NHEAD1(3, 0), 13860 "fp_validate_area_domain: get_devcount found %d devices", 13861 port->fp_total_devices); 13862 13863 mutex_enter(&port->fp_mutex); 13864 13865 for (count = index = 0; index < pwwn_table_size; index++) { 13866 head = &port->fp_pwwn_table[index]; 13867 pd = head->pwwn_head; 13868 while (pd != NULL) { 13869 mutex_enter(&pd->pd_mutex); 13870 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 13871 if ((pd->pd_port_id.port_id & mask) == id && 13872 pd->pd_recepient == PD_PLOGI_INITIATOR) { 13873 count++; 13874 pd->pd_type = PORT_DEVICE_OLD; 13875 pd->pd_flags = PD_ELS_MARK; 13876 } 13877 } 13878 mutex_exit(&pd->pd_mutex); 13879 pd = pd->pd_wwn_hnext; 13880 } 13881 } 13882 13883 #ifdef DEBUG 13884 dcnt = count; 13885 #endif /* DEBUG */ 13886 13887 /* 13888 * Since port->fp_orphan_count is declared an 'int' it is 13889 * theoretically possible that the count could go negative. 13890 * 13891 * This would be bad and if that happens we really do want 13892 * to know. 13893 */ 13894 13895 ASSERT(port->fp_orphan_count >= 0); 13896 13897 count += port->fp_orphan_count; 13898 13899 /* 13900 * We add the port->fp_total_devices value to the count 13901 * in the case where our port is newly attached. This is 13902 * because we haven't done any discovery and we don't have 13903 * any orphans in the port's orphan list. If we do not do 13904 * this addition to count then we won't alloc enough kmem 13905 * to do discovery with. 13906 */ 13907 13908 if (count == 0) { 13909 count += port->fp_total_devices; 13910 FP_TRACE(FP_NHEAD1(3, 0), "fp_validate_area_domain: " 13911 "0x%x orphans found, using 0x%x", 13912 port->fp_orphan_count, count); 13913 } 13914 13915 mutex_exit(&port->fp_mutex); 13916 13917 /* 13918 * Allocate the change list 13919 */ 13920 13921 list = kmem_zalloc(sizeof (fc_portmap_t) * count, sleep); 13922 if (list == NULL) { 13923 fp_printf(port, CE_NOTE, FP_LOG_ONLY, 0, NULL, 13924 " Not enough memory to service RSCNs" 13925 " for %d ports, continuing...", count); 13926 13927 fctl_free_ns_cmd(ns_cmd); 13928 13929 mutex_enter(&port->fp_mutex); 13930 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 13931 --port->fp_rscn_count; 13932 } 13933 mutex_exit(&port->fp_mutex); 13934 13935 return; 13936 } 13937 13938 /* 13939 * Attempt to validate or invalidate the devices that were 13940 * already in the pwwn hash table. 13941 */ 13942 13943 mutex_enter(&port->fp_mutex); 13944 for (listindex = 0, index = 0; index < pwwn_table_size; index++) { 13945 head = &port->fp_pwwn_table[index]; 13946 npd = head->pwwn_head; 13947 13948 while ((pd = npd) != NULL) { 13949 npd = pd->pd_wwn_hnext; 13950 13951 mutex_enter(&pd->pd_mutex); 13952 if ((pd->pd_port_id.port_id & mask) == id && 13953 pd->pd_flags == PD_ELS_MARK) { 13954 la_wwn_t *pwwn; 13955 13956 job->job_result = FC_SUCCESS; 13957 13958 ((ns_req_gid_pn_t *) 13959 (ns_cmd->ns_cmd_buf))->pwwn = 13960 pd->pd_port_name; 13961 13962 pwwn = &pd->pd_port_name; 13963 d_id = pd->pd_port_id.port_id; 13964 13965 mutex_exit(&pd->pd_mutex); 13966 mutex_exit(&port->fp_mutex); 13967 13968 rval = fp_ns_query(port, ns_cmd, job, 1, 13969 sleep); 13970 if (rval != FC_SUCCESS) { 13971 fc_wwn_to_str(pwwn, ww_name); 13972 13973 FP_TRACE(FP_NHEAD1(3, 0), 13974 "AREA RSCN: PD disappeared; " 13975 "d_id=%x, PWWN=%s", d_id, ww_name); 13976 13977 FP_TRACE(FP_NHEAD2(9, 0), 13978 "N_x Port with D_ID=%x," 13979 " PWWN=%s disappeared from fabric", 13980 d_id, ww_name); 13981 13982 fp_fillout_old_map(list + listindex++, 13983 pd, 1); 13984 } else { 13985 fctl_copy_portmap(list + listindex++, 13986 pd); 13987 13988 mutex_enter(&pd->pd_mutex); 13989 pd->pd_flags = PD_ELS_IN_PROGRESS; 13990 mutex_exit(&pd->pd_mutex); 13991 } 13992 13993 mutex_enter(&port->fp_mutex); 13994 } else { 13995 mutex_exit(&pd->pd_mutex); 13996 } 13997 } 13998 } 13999 14000 mutex_exit(&port->fp_mutex); 14001 14002 ASSERT(listindex == dcnt); 14003 14004 job->job_counter = listindex; 14005 job_flags = job->job_flags; 14006 job->job_flags |= JOB_TYPE_FP_ASYNC; 14007 14008 /* 14009 * Login (if we were the initiator) or validate devices in the 14010 * port map. 14011 */ 14012 14013 for (index = 0; index < listindex; index++) { 14014 pd = list[index].map_pd; 14015 14016 mutex_enter(&pd->pd_mutex); 14017 ASSERT((pd->pd_port_id.port_id & mask) == id); 14018 14019 if (pd->pd_flags != PD_ELS_IN_PROGRESS) { 14020 ASSERT(pd->pd_type == PORT_DEVICE_OLD); 14021 mutex_exit(&pd->pd_mutex); 14022 fp_jobdone(job); 14023 continue; 14024 } 14025 14026 login = (pd->pd_state == PORT_DEVICE_LOGGED_IN) ? 1 : 0; 14027 send = (pd->pd_recepient == PD_PLOGI_INITIATOR) ? 1 : 0; 14028 d_id = pd->pd_port_id.port_id; 14029 mutex_exit(&pd->pd_mutex); 14030 14031 if ((d_id & mask) == id && send) { 14032 if (login) { 14033 FP_TRACE(FP_NHEAD1(6, 0), 14034 "RSCN and PLOGI request;" 14035 " pd=%p, job=%p d_id=%x, index=%d", pd, 14036 job, d_id, index); 14037 14038 rval = fp_port_login(port, d_id, job, 14039 FP_CMD_PLOGI_RETAIN, sleep, pd, NULL); 14040 if (rval != FC_SUCCESS) { 14041 mutex_enter(&pd->pd_mutex); 14042 pd->pd_flags = PD_IDLE; 14043 mutex_exit(&pd->pd_mutex); 14044 14045 job->job_result = rval; 14046 fp_jobdone(job); 14047 } 14048 14049 FP_TRACE(FP_NHEAD2(4, 0), 14050 "PLOGI succeeded:no skip(1) for " 14051 "D_ID %x", d_id); 14052 list[index].map_flags |= 14053 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14054 } else { 14055 FP_TRACE(FP_NHEAD1(6, 0), "RSCN and NS request;" 14056 " pd=%p, job=%p d_id=%x, index=%d", pd, 14057 job, d_id, index); 14058 14059 rval = fp_ns_validate_device(port, pd, job, 14060 0, sleep); 14061 if (rval != FC_SUCCESS) { 14062 fp_jobdone(job); 14063 } 14064 mutex_enter(&pd->pd_mutex); 14065 pd->pd_flags = PD_IDLE; 14066 mutex_exit(&pd->pd_mutex); 14067 } 14068 } else { 14069 FP_TRACE(FP_NHEAD1(6, 0), 14070 "RSCN and NO request sent; pd=%p," 14071 " d_id=%x, index=%d", pd, d_id, index); 14072 14073 mutex_enter(&pd->pd_mutex); 14074 pd->pd_flags = PD_IDLE; 14075 mutex_exit(&pd->pd_mutex); 14076 14077 fp_jobdone(job); 14078 } 14079 } 14080 14081 if (listindex) { 14082 fctl_jobwait(job); 14083 } 14084 job->job_flags = job_flags; 14085 14086 /* 14087 * Orphan list validation. 14088 */ 14089 mutex_enter(&port->fp_mutex); 14090 for (prev = NULL, orp = port->fp_orphan_list; port->fp_orphan_count && 14091 orp != NULL; orp = norp) { 14092 norp = orp->orp_next; 14093 mutex_exit(&port->fp_mutex); 14094 14095 job->job_counter = 1; 14096 job->job_result = FC_SUCCESS; 14097 ASSERT((job->job_flags & JOB_TYPE_FP_ASYNC) == 0); 14098 14099 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = orp->orp_pwwn; 14100 14101 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14102 ((ns_resp_gid_pn_t *) 14103 ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14104 14105 rval = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14106 if (rval == FC_SUCCESS) { 14107 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14108 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14109 if (pd != NULL) { 14110 fc_wwn_to_str(&orp->orp_pwwn, ww_name); 14111 14112 FP_TRACE(FP_NHEAD1(6, 0), 14113 "RSCN and ORPHAN list " 14114 "success; d_id=%x, PWWN=%s", d_id, ww_name); 14115 14116 FP_TRACE(FP_NHEAD2(6, 0), 14117 "N_x Port with D_ID=%x, PWWN=%s reappeared" 14118 " in fabric", d_id, ww_name); 14119 14120 mutex_enter(&port->fp_mutex); 14121 if (prev) { 14122 prev->orp_next = orp->orp_next; 14123 } else { 14124 ASSERT(orp == port->fp_orphan_list); 14125 port->fp_orphan_list = orp->orp_next; 14126 } 14127 port->fp_orphan_count--; 14128 mutex_exit(&port->fp_mutex); 14129 14130 kmem_free(orp, sizeof (*orp)); 14131 fctl_copy_portmap(list + listindex++, pd); 14132 } else { 14133 prev = orp; 14134 } 14135 } else { 14136 prev = orp; 14137 } 14138 mutex_enter(&port->fp_mutex); 14139 } 14140 mutex_exit(&port->fp_mutex); 14141 14142 /* 14143 * One more pass through the list to delist old devices from 14144 * the d_id and pwwn tables and possibly add to the orphan list. 14145 */ 14146 14147 for (index = 0; index < listindex; index++) { 14148 pd = list[index].map_pd; 14149 ASSERT(pd != NULL); 14150 14151 /* 14152 * Update PLOGI results; For NS validation 14153 * of orphan list, it is redundant 14154 * 14155 * Take care to preserve PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY if 14156 * appropriate as fctl_copy_portmap() will clear map_flags. 14157 */ 14158 if (list[index].map_flags & 14159 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) { 14160 fctl_copy_portmap(list + index, pd); 14161 list[index].map_flags |= 14162 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14163 } else { 14164 fctl_copy_portmap(list + index, pd); 14165 } 14166 14167 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14168 "results; pd=%p, d_id=%x pwwn=%x %x %x %x %x %x %x %x", 14169 pd, pd->pd_port_id.port_id, 14170 pd->pd_port_name.raw_wwn[0], 14171 pd->pd_port_name.raw_wwn[1], 14172 pd->pd_port_name.raw_wwn[2], 14173 pd->pd_port_name.raw_wwn[3], 14174 pd->pd_port_name.raw_wwn[4], 14175 pd->pd_port_name.raw_wwn[5], 14176 pd->pd_port_name.raw_wwn[6], 14177 pd->pd_port_name.raw_wwn[7]); 14178 14179 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with Area DOMAIN " 14180 "results continued, pd=%p type=%x, flags=%x, state=%x", 14181 pd, pd->pd_type, pd->pd_flags, pd->pd_state); 14182 14183 mutex_enter(&pd->pd_mutex); 14184 if (pd->pd_type == PORT_DEVICE_OLD) { 14185 int initiator; 14186 14187 pd->pd_flags = PD_IDLE; 14188 initiator = (pd->pd_recepient == 14189 PD_PLOGI_INITIATOR) ? 1 : 0; 14190 14191 mutex_exit(&pd->pd_mutex); 14192 14193 mutex_enter(&port->fp_mutex); 14194 mutex_enter(&pd->pd_mutex); 14195 14196 pd->pd_state = PORT_DEVICE_INVALID; 14197 fctl_delist_did_table(port, pd); 14198 fctl_delist_pwwn_table(port, pd); 14199 14200 mutex_exit(&pd->pd_mutex); 14201 mutex_exit(&port->fp_mutex); 14202 14203 if (initiator) { 14204 (void) fctl_add_orphan(port, pd, sleep); 14205 } 14206 list[index].map_pd = pd; 14207 } else { 14208 ASSERT(pd->pd_flags == PD_IDLE); 14209 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14210 /* 14211 * Reset LOGO tolerance to zero 14212 */ 14213 fctl_tc_reset(&pd->pd_logo_tc); 14214 } 14215 mutex_exit(&pd->pd_mutex); 14216 } 14217 } 14218 14219 if (ns_cmd) { 14220 fctl_free_ns_cmd(ns_cmd); 14221 } 14222 if (listindex) { 14223 (void) fp_ulp_devc_cb(port, list, listindex, count, 14224 sleep, 0); 14225 } else { 14226 kmem_free(list, sizeof (*list) * count); 14227 14228 mutex_enter(&port->fp_mutex); 14229 if (--port->fp_rscn_count == FC_INVALID_RSCN_COUNT) { 14230 --port->fp_rscn_count; 14231 } 14232 mutex_exit(&port->fp_mutex); 14233 } 14234 } 14235 14236 14237 /* 14238 * Work hard to make sense out of an RSCN page. 14239 */ 14240 static void 14241 fp_validate_rscn_page(fc_local_port_t *port, fc_affected_id_t *page, 14242 job_request_t *job, fctl_ns_req_t *ns_cmd, fc_portmap_t *listptr, 14243 int *listindex, int sleep) 14244 { 14245 int rval; 14246 char ww_name[17]; 14247 la_wwn_t *pwwn; 14248 fc_remote_port_t *pwwn_pd; 14249 fc_remote_port_t *did_pd; 14250 14251 did_pd = fctl_get_remote_port_by_did(port, page->aff_d_id); 14252 14253 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; " 14254 "port=%p, d_id=%x, pd=%p, rscn_count:0x%x", port, page->aff_d_id, 14255 did_pd, (uint32_t)(uintptr_t)job->job_cb_arg); 14256 14257 if (did_pd != NULL) { 14258 mutex_enter(&did_pd->pd_mutex); 14259 if (did_pd->pd_flags != PD_IDLE) { 14260 mutex_exit(&did_pd->pd_mutex); 14261 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page: " 14262 "PD is BUSY; port=%p, d_id=%x, pd=%p", 14263 port, page->aff_d_id, did_pd); 14264 return; 14265 } 14266 did_pd->pd_flags = PD_ELS_IN_PROGRESS; 14267 mutex_exit(&did_pd->pd_mutex); 14268 } 14269 14270 job->job_counter = 1; 14271 14272 pwwn = &((ns_resp_gpn_id_t *)ns_cmd->ns_data_buf)->pwwn; 14273 14274 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.port_id = page->aff_d_id; 14275 ((ns_req_gpn_id_t *)ns_cmd->ns_cmd_buf)->pid.priv_lilp_posit = 0; 14276 14277 bzero(ns_cmd->ns_data_buf, sizeof (la_wwn_t)); 14278 rval = fp_ns_query(port, ns_cmd, job, 1, sleep); 14279 14280 FP_TRACE(FP_NHEAD1(1, 0), "NS Query Response for D_ID page; rev=%x," 14281 " in_id=%x, cmdrsp=%x, reason=%x, expln=%x", 14282 ns_cmd->ns_resp_hdr.ct_rev, ns_cmd->ns_resp_hdr.ct_inid, 14283 ns_cmd->ns_resp_hdr.ct_cmdrsp, ns_cmd->ns_resp_hdr.ct_reason, 14284 ns_cmd->ns_resp_hdr.ct_expln); 14285 14286 job->job_counter = 1; 14287 14288 if (rval != FC_SUCCESS || fctl_is_wwn_zero(pwwn) == FC_SUCCESS) { 14289 /* 14290 * What this means is that the D_ID 14291 * disappeared from the Fabric. 14292 */ 14293 if (did_pd == NULL) { 14294 FP_TRACE(FP_NHEAD1(1, 0), "RSCN with D_ID page;" 14295 " NULL PD disappeared, rval=%x", rval); 14296 return; 14297 } 14298 14299 fc_wwn_to_str(&did_pd->pd_port_name, ww_name); 14300 14301 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14302 (uint32_t)(uintptr_t)job->job_cb_arg; 14303 14304 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14305 14306 FP_TRACE(FP_NHEAD1(3, 0), "RSCN: PD disappeared; " 14307 "d_id=%x, PWWN=%s", page->aff_d_id, ww_name); 14308 14309 FP_TRACE(FP_NHEAD2(9, 0), 14310 "GPN_ID for D_ID=%x failed", page->aff_d_id); 14311 14312 FP_TRACE(FP_NHEAD2(9, 0), 14313 "N_x Port with D_ID=%x, PWWN=%s disappeared from" 14314 " fabric", page->aff_d_id, ww_name); 14315 14316 mutex_enter(&did_pd->pd_mutex); 14317 did_pd->pd_flags = PD_IDLE; 14318 mutex_exit(&did_pd->pd_mutex); 14319 14320 FP_TRACE(FP_NHEAD1(3, 0), "RSCN with D_ID (%x) page; " 14321 "PD disappeared, pd=%p", page->aff_d_id, did_pd); 14322 14323 return; 14324 } 14325 14326 pwwn_pd = fctl_get_remote_port_by_pwwn(port, pwwn); 14327 14328 if (did_pd != NULL && pwwn_pd != NULL && did_pd == pwwn_pd) { 14329 /* 14330 * There is no change. Do PLOGI again and add it to 14331 * ULP portmap baggage and return. Note: When RSCNs 14332 * arrive with per page states, the need for PLOGI 14333 * can be determined correctly. 14334 */ 14335 mutex_enter(&pwwn_pd->pd_mutex); 14336 pwwn_pd->pd_type = PORT_DEVICE_NOCHANGE; 14337 mutex_exit(&pwwn_pd->pd_mutex); 14338 14339 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14340 (uint32_t)(uintptr_t)job->job_cb_arg; 14341 14342 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14343 14344 mutex_enter(&pwwn_pd->pd_mutex); 14345 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14346 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14347 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14348 mutex_exit(&pwwn_pd->pd_mutex); 14349 14350 rval = fp_port_login(port, page->aff_d_id, job, 14351 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14352 if (rval == FC_SUCCESS) { 14353 fp_jobwait(job); 14354 rval = job->job_result; 14355 14356 /* 14357 * Reset LOGO tolerance to zero 14358 * Also we are the PLOGI initiator now. 14359 */ 14360 mutex_enter(&pwwn_pd->pd_mutex); 14361 fctl_tc_reset(&pwwn_pd->pd_logo_tc); 14362 pwwn_pd->pd_recepient = PD_PLOGI_INITIATOR; 14363 mutex_exit(&pwwn_pd->pd_mutex); 14364 } 14365 14366 if (rval == FC_SUCCESS) { 14367 struct fc_portmap *map = 14368 listptr + *listindex - 1; 14369 14370 FP_TRACE(FP_NHEAD2(4, 0), 14371 "PLOGI succeeded: no skip(2)" 14372 " for D_ID %x", page->aff_d_id); 14373 map->map_flags |= 14374 PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY; 14375 } else { 14376 FP_TRACE(FP_NHEAD2(9, rval), 14377 "PLOGI to D_ID=%x failed", page->aff_d_id); 14378 14379 FP_TRACE(FP_NHEAD2(9, 0), 14380 "N_x Port with D_ID=%x, PWWN=%s" 14381 " disappeared from fabric", 14382 page->aff_d_id, ww_name); 14383 14384 fp_fillout_old_map(listptr + 14385 *listindex - 1, pwwn_pd, 0); 14386 } 14387 } else { 14388 mutex_exit(&pwwn_pd->pd_mutex); 14389 } 14390 14391 mutex_enter(&did_pd->pd_mutex); 14392 did_pd->pd_flags = PD_IDLE; 14393 mutex_exit(&did_pd->pd_mutex); 14394 14395 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14396 "Case ONE, rval=%x, result=%x pd=%p", page->aff_d_id, rval, 14397 job->job_result, pwwn_pd); 14398 14399 return; 14400 } 14401 14402 if (did_pd == NULL && pwwn_pd == NULL) { 14403 14404 fc_orphan_t *orp = NULL; 14405 fc_orphan_t *norp = NULL; 14406 fc_orphan_t *prev = NULL; 14407 14408 /* 14409 * Hunt down the orphan list before giving up. 14410 */ 14411 14412 mutex_enter(&port->fp_mutex); 14413 if (port->fp_orphan_count) { 14414 14415 for (orp = port->fp_orphan_list; orp; orp = norp) { 14416 norp = orp->orp_next; 14417 14418 if (fctl_wwn_cmp(&orp->orp_pwwn, pwwn) != 0) { 14419 prev = orp; 14420 continue; 14421 } 14422 14423 if (prev) { 14424 prev->orp_next = orp->orp_next; 14425 } else { 14426 ASSERT(orp == 14427 port->fp_orphan_list); 14428 port->fp_orphan_list = 14429 orp->orp_next; 14430 } 14431 port->fp_orphan_count--; 14432 break; 14433 } 14434 } 14435 14436 mutex_exit(&port->fp_mutex); 14437 pwwn_pd = fp_create_remote_port_by_ns(port, 14438 page->aff_d_id, sleep); 14439 14440 if (pwwn_pd != NULL) { 14441 14442 if (orp) { 14443 fc_wwn_to_str(&orp->orp_pwwn, 14444 ww_name); 14445 14446 FP_TRACE(FP_NHEAD2(9, 0), 14447 "N_x Port with D_ID=%x," 14448 " PWWN=%s reappeared in fabric", 14449 page->aff_d_id, ww_name); 14450 14451 kmem_free(orp, sizeof (*orp)); 14452 } 14453 14454 (listptr + *listindex)-> 14455 map_rscn_info.ulp_rscn_count = 14456 (uint32_t)(uintptr_t)job->job_cb_arg; 14457 14458 fctl_copy_portmap(listptr + 14459 (*listindex)++, pwwn_pd); 14460 } 14461 14462 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID (0x%x) page; " 14463 "Case TWO", page->aff_d_id); 14464 14465 return; 14466 } 14467 14468 if (pwwn_pd != NULL && did_pd == NULL) { 14469 uint32_t old_d_id; 14470 uint32_t d_id = page->aff_d_id; 14471 14472 /* 14473 * What this means is there is a new D_ID for this 14474 * Port WWN. Take out the port device off D_ID 14475 * list and put it back with a new D_ID. Perform 14476 * PLOGI if already logged in. 14477 */ 14478 mutex_enter(&port->fp_mutex); 14479 mutex_enter(&pwwn_pd->pd_mutex); 14480 14481 old_d_id = pwwn_pd->pd_port_id.port_id; 14482 14483 fctl_delist_did_table(port, pwwn_pd); 14484 14485 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14486 (uint32_t)(uintptr_t)job->job_cb_arg; 14487 14488 fp_fillout_changed_map(listptr + (*listindex)++, pwwn_pd, 14489 &d_id, NULL); 14490 fctl_enlist_did_table(port, pwwn_pd); 14491 14492 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page;" 14493 " Case THREE, pd=%p," 14494 " state=%x", pwwn_pd, pwwn_pd->pd_state); 14495 14496 if ((pwwn_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14497 (pwwn_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14498 fc_wwn_to_str(&pwwn_pd->pd_port_name, ww_name); 14499 14500 mutex_exit(&pwwn_pd->pd_mutex); 14501 mutex_exit(&port->fp_mutex); 14502 14503 FP_TRACE(FP_NHEAD2(9, 0), 14504 "N_x Port with D_ID=%x, PWWN=%s has a new" 14505 " D_ID=%x now", old_d_id, ww_name, d_id); 14506 14507 rval = fp_port_login(port, page->aff_d_id, job, 14508 FP_CMD_PLOGI_RETAIN, sleep, pwwn_pd, NULL); 14509 if (rval == FC_SUCCESS) { 14510 fp_jobwait(job); 14511 rval = job->job_result; 14512 } 14513 14514 if (rval != FC_SUCCESS) { 14515 fp_fillout_old_map(listptr + 14516 *listindex - 1, pwwn_pd, 0); 14517 } 14518 } else { 14519 mutex_exit(&pwwn_pd->pd_mutex); 14520 mutex_exit(&port->fp_mutex); 14521 } 14522 14523 return; 14524 } 14525 14526 if (pwwn_pd == NULL && did_pd != NULL) { 14527 fc_portmap_t *ptr; 14528 uint32_t len = 1; 14529 char old_ww_name[17]; 14530 14531 mutex_enter(&did_pd->pd_mutex); 14532 fc_wwn_to_str(&did_pd->pd_port_name, old_ww_name); 14533 mutex_exit(&did_pd->pd_mutex); 14534 14535 fc_wwn_to_str(pwwn, ww_name); 14536 14537 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14538 (uint32_t)(uintptr_t)job->job_cb_arg; 14539 14540 /* 14541 * What this means is that there is a new Port WWN for 14542 * this D_ID; Mark the Port device as old and provide 14543 * the new PWWN and D_ID combination as new. 14544 */ 14545 fp_fillout_old_map(listptr + (*listindex)++, did_pd, 0); 14546 14547 FP_TRACE(FP_NHEAD2(9, 0), 14548 "N_x Port with D_ID=%x, PWWN=%s has a new PWWN=%s now", 14549 page->aff_d_id, old_ww_name, ww_name); 14550 14551 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14552 (uint32_t)(uintptr_t)job->job_cb_arg; 14553 14554 ptr = listptr + (*listindex)++; 14555 14556 job->job_counter = 1; 14557 14558 if (fp_ns_getmap(port, job, &ptr, &len, 14559 page->aff_d_id - 1) != FC_SUCCESS) { 14560 (*listindex)--; 14561 } 14562 14563 mutex_enter(&did_pd->pd_mutex); 14564 did_pd->pd_flags = PD_IDLE; 14565 mutex_exit(&did_pd->pd_mutex); 14566 14567 return; 14568 } 14569 14570 /* 14571 * A weird case of Port WWN and D_ID existence but not matching up 14572 * between them. Trust your instincts - Take the port device handle 14573 * off Port WWN list, fix it with new Port WWN and put it back, In 14574 * the mean time mark the port device corresponding to the old port 14575 * WWN as OLD. 14576 */ 14577 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; Case WEIRD, pwwn_pd=%p," 14578 " did_pd=%p", pwwn_pd, did_pd); 14579 14580 mutex_enter(&port->fp_mutex); 14581 mutex_enter(&pwwn_pd->pd_mutex); 14582 14583 pwwn_pd->pd_type = PORT_DEVICE_OLD; 14584 pwwn_pd->pd_state = PORT_DEVICE_INVALID; 14585 fctl_delist_did_table(port, pwwn_pd); 14586 fctl_delist_pwwn_table(port, pwwn_pd); 14587 14588 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14589 " pwwn-d_id=%x pwwn-wwn=%x %x %x %x %x %x %x %x", 14590 pwwn_pd->pd_port_id.port_id, 14591 14592 pwwn_pd->pd_port_name.raw_wwn[0], 14593 pwwn_pd->pd_port_name.raw_wwn[1], 14594 pwwn_pd->pd_port_name.raw_wwn[2], 14595 pwwn_pd->pd_port_name.raw_wwn[3], 14596 pwwn_pd->pd_port_name.raw_wwn[4], 14597 pwwn_pd->pd_port_name.raw_wwn[5], 14598 pwwn_pd->pd_port_name.raw_wwn[6], 14599 pwwn_pd->pd_port_name.raw_wwn[7]); 14600 14601 mutex_exit(&pwwn_pd->pd_mutex); 14602 mutex_exit(&port->fp_mutex); 14603 14604 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14605 (uint32_t)(uintptr_t)job->job_cb_arg; 14606 14607 fctl_copy_portmap(listptr + (*listindex)++, pwwn_pd); 14608 14609 mutex_enter(&port->fp_mutex); 14610 mutex_enter(&did_pd->pd_mutex); 14611 14612 fctl_delist_pwwn_table(port, did_pd); 14613 14614 (listptr + *listindex)->map_rscn_info.ulp_rscn_count = 14615 (uint32_t)(uintptr_t)job->job_cb_arg; 14616 14617 fp_fillout_changed_map(listptr + (*listindex)++, did_pd, NULL, pwwn); 14618 fctl_enlist_pwwn_table(port, did_pd); 14619 14620 FP_TRACE(FP_NHEAD1(6, 0), "RSCN with D_ID page; case WEIRD continued," 14621 " d_id=%x, state=%x, did-wwn=%x %x %x %x %x %x %x %x", 14622 did_pd->pd_port_id.port_id, did_pd->pd_state, 14623 14624 did_pd->pd_port_name.raw_wwn[0], 14625 did_pd->pd_port_name.raw_wwn[1], 14626 did_pd->pd_port_name.raw_wwn[2], 14627 did_pd->pd_port_name.raw_wwn[3], 14628 did_pd->pd_port_name.raw_wwn[4], 14629 did_pd->pd_port_name.raw_wwn[5], 14630 did_pd->pd_port_name.raw_wwn[6], 14631 did_pd->pd_port_name.raw_wwn[7]); 14632 14633 if ((did_pd->pd_state == PORT_DEVICE_LOGGED_IN) || 14634 (did_pd->pd_aux_flags & PD_LOGGED_OUT)) { 14635 mutex_exit(&did_pd->pd_mutex); 14636 mutex_exit(&port->fp_mutex); 14637 14638 rval = fp_port_login(port, page->aff_d_id, job, 14639 FP_CMD_PLOGI_RETAIN, sleep, did_pd, NULL); 14640 if (rval == FC_SUCCESS) { 14641 fp_jobwait(job); 14642 if (job->job_result != FC_SUCCESS) { 14643 fp_fillout_old_map(listptr + 14644 *listindex - 1, did_pd, 0); 14645 } 14646 } else { 14647 fp_fillout_old_map(listptr + *listindex - 1, did_pd, 0); 14648 } 14649 } else { 14650 mutex_exit(&did_pd->pd_mutex); 14651 mutex_exit(&port->fp_mutex); 14652 } 14653 14654 mutex_enter(&did_pd->pd_mutex); 14655 did_pd->pd_flags = PD_IDLE; 14656 mutex_exit(&did_pd->pd_mutex); 14657 } 14658 14659 14660 /* 14661 * Check with NS for the presence of this port WWN 14662 */ 14663 static int 14664 fp_ns_validate_device(fc_local_port_t *port, fc_remote_port_t *pd, 14665 job_request_t *job, int polled, int sleep) 14666 { 14667 la_wwn_t pwwn; 14668 uint32_t flags; 14669 fctl_ns_req_t *ns_cmd; 14670 14671 flags = FCTL_NS_VALIDATE_PD | ((polled) ? 0: FCTL_NS_ASYNC_REQUEST); 14672 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14673 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14674 flags, sleep); 14675 if (ns_cmd == NULL) { 14676 return (FC_NOMEM); 14677 } 14678 14679 mutex_enter(&pd->pd_mutex); 14680 pwwn = pd->pd_port_name; 14681 mutex_exit(&pd->pd_mutex); 14682 14683 ns_cmd->ns_cmd_code = NS_GID_PN; 14684 ns_cmd->ns_pd = pd; 14685 ((ns_req_gid_pn_t *)ns_cmd->ns_cmd_buf)->pwwn = pwwn; 14686 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.port_id = 0; 14687 ((ns_resp_gid_pn_t *)ns_cmd->ns_data_buf)->pid.priv_lilp_posit = 0; 14688 14689 return (fp_ns_query(port, ns_cmd, job, polled, sleep)); 14690 } 14691 14692 14693 /* 14694 * Sanity check the LILP map returned by FCA 14695 */ 14696 static int 14697 fp_validate_lilp_map(fc_lilpmap_t *lilp_map) 14698 { 14699 int count; 14700 14701 if (lilp_map->lilp_length == 0) { 14702 return (FC_FAILURE); 14703 } 14704 14705 for (count = 0; count < lilp_map->lilp_length; count++) { 14706 if (fp_is_valid_alpa(lilp_map->lilp_alpalist[count]) != 14707 FC_SUCCESS) { 14708 return (FC_FAILURE); 14709 } 14710 } 14711 14712 return (FC_SUCCESS); 14713 } 14714 14715 14716 /* 14717 * Sanity check if the AL_PA is a valid address 14718 */ 14719 static int 14720 fp_is_valid_alpa(uchar_t al_pa) 14721 { 14722 int count; 14723 14724 for (count = 0; count < sizeof (fp_valid_alpas); count++) { 14725 if (al_pa == fp_valid_alpas[count] || al_pa == 0) { 14726 return (FC_SUCCESS); 14727 } 14728 } 14729 14730 return (FC_FAILURE); 14731 } 14732 14733 14734 /* 14735 * Post unsolicited callbacks to ULPs 14736 */ 14737 static void 14738 fp_ulp_unsol_cb(void *arg) 14739 { 14740 fp_unsol_spec_t *ub_spec = (fp_unsol_spec_t *)arg; 14741 14742 fctl_ulp_unsol_cb(ub_spec->port, ub_spec->buf, 14743 ub_spec->buf->ub_frame.type); 14744 kmem_free(ub_spec, sizeof (*ub_spec)); 14745 } 14746 14747 14748 /* 14749 * Perform message reporting in a consistent manner. Unless there is 14750 * a strong reason NOT to use this function (which is very very rare) 14751 * all message reporting should go through this. 14752 */ 14753 static void 14754 fp_printf(fc_local_port_t *port, int level, fp_mesg_dest_t dest, int fc_errno, 14755 fc_packet_t *pkt, const char *fmt, ...) 14756 { 14757 caddr_t buf; 14758 va_list ap; 14759 14760 switch (level) { 14761 case CE_NOTE: 14762 if ((port->fp_verbose & FP_WARNING_MESSAGES) == 0) { 14763 return; 14764 } 14765 break; 14766 14767 case CE_WARN: 14768 if ((port->fp_verbose & FP_FATAL_MESSAGES) == 0) { 14769 return; 14770 } 14771 break; 14772 } 14773 14774 buf = kmem_zalloc(256, KM_NOSLEEP); 14775 if (buf == NULL) { 14776 return; 14777 } 14778 14779 (void) sprintf(buf, "fp(%d): ", port->fp_instance); 14780 14781 va_start(ap, fmt); 14782 (void) vsprintf(buf + strlen(buf), fmt, ap); 14783 va_end(ap); 14784 14785 if (fc_errno) { 14786 char *errmsg; 14787 14788 (void) fc_ulp_error(fc_errno, &errmsg); 14789 (void) sprintf(buf + strlen(buf), " FC Error=%s", errmsg); 14790 } else { 14791 if (pkt) { 14792 caddr_t state, reason, action, expln; 14793 14794 (void) fc_ulp_pkt_error(pkt, &state, &reason, 14795 &action, &expln); 14796 14797 (void) sprintf(buf + strlen(buf), 14798 " state=%s, reason=%s", state, reason); 14799 14800 if (pkt->pkt_resp_resid) { 14801 (void) sprintf(buf + strlen(buf), 14802 " resp resid=%x\n", pkt->pkt_resp_resid); 14803 } 14804 } 14805 } 14806 14807 switch (dest) { 14808 case FP_CONSOLE_ONLY: 14809 cmn_err(level, "^%s", buf); 14810 break; 14811 14812 case FP_LOG_ONLY: 14813 cmn_err(level, "!%s", buf); 14814 break; 14815 14816 default: 14817 cmn_err(level, "%s", buf); 14818 break; 14819 } 14820 14821 kmem_free(buf, 256); 14822 } 14823 14824 static int 14825 fp_fcio_login(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 14826 { 14827 int ret; 14828 uint32_t d_id; 14829 la_wwn_t pwwn; 14830 fc_remote_port_t *pd = NULL; 14831 fc_remote_port_t *held_pd = NULL; 14832 fctl_ns_req_t *ns_cmd; 14833 fc_portmap_t *changelist; 14834 14835 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 14836 14837 mutex_enter(&port->fp_mutex); 14838 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14839 mutex_exit(&port->fp_mutex); 14840 job->job_counter = 1; 14841 14842 job->job_result = FC_SUCCESS; 14843 14844 ns_cmd = fctl_alloc_ns_cmd(sizeof (ns_req_gid_pn_t), 14845 sizeof (ns_resp_gid_pn_t), sizeof (ns_resp_gid_pn_t), 14846 FCTL_NS_BUF_IS_USERLAND, KM_SLEEP); 14847 14848 ASSERT(ns_cmd != NULL); 14849 14850 ns_cmd->ns_cmd_code = NS_GID_PN; 14851 ((ns_req_gid_pn_t *)(ns_cmd->ns_cmd_buf))->pwwn = pwwn; 14852 14853 ret = fp_ns_query(port, ns_cmd, job, 1, KM_SLEEP); 14854 14855 if (ret != FC_SUCCESS || job->job_result != FC_SUCCESS) { 14856 if (ret != FC_SUCCESS) { 14857 fcio->fcio_errno = ret; 14858 } else { 14859 fcio->fcio_errno = job->job_result; 14860 } 14861 fctl_free_ns_cmd(ns_cmd); 14862 return (EIO); 14863 } 14864 d_id = BE_32(*((uint32_t *)ns_cmd->ns_data_buf)); 14865 fctl_free_ns_cmd(ns_cmd); 14866 } else { 14867 mutex_exit(&port->fp_mutex); 14868 14869 held_pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14870 if (held_pd == NULL) { 14871 fcio->fcio_errno = FC_BADWWN; 14872 return (EIO); 14873 } 14874 pd = held_pd; 14875 14876 mutex_enter(&pd->pd_mutex); 14877 d_id = pd->pd_port_id.port_id; 14878 mutex_exit(&pd->pd_mutex); 14879 } 14880 14881 job->job_counter = 1; 14882 14883 pd = fctl_get_remote_port_by_did(port, d_id); 14884 14885 if (pd) { 14886 mutex_enter(&pd->pd_mutex); 14887 if (pd->pd_state == PORT_DEVICE_LOGGED_IN) { 14888 pd->pd_login_count++; 14889 mutex_exit(&pd->pd_mutex); 14890 14891 fcio->fcio_errno = FC_SUCCESS; 14892 if (held_pd) { 14893 fctl_release_remote_port(held_pd); 14894 } 14895 14896 return (0); 14897 } 14898 mutex_exit(&pd->pd_mutex); 14899 } else { 14900 mutex_enter(&port->fp_mutex); 14901 if (FC_IS_TOP_SWITCH(port->fp_topology)) { 14902 mutex_exit(&port->fp_mutex); 14903 pd = fp_create_remote_port_by_ns(port, d_id, KM_SLEEP); 14904 if (pd == NULL) { 14905 fcio->fcio_errno = FC_FAILURE; 14906 if (held_pd) { 14907 fctl_release_remote_port(held_pd); 14908 } 14909 return (EIO); 14910 } 14911 } else { 14912 mutex_exit(&port->fp_mutex); 14913 } 14914 } 14915 14916 job->job_flags &= ~JOB_TYPE_FP_ASYNC; 14917 job->job_counter = 1; 14918 14919 ret = fp_port_login(port, d_id, job, FP_CMD_PLOGI_RETAIN, 14920 KM_SLEEP, pd, NULL); 14921 14922 if (ret != FC_SUCCESS) { 14923 fcio->fcio_errno = ret; 14924 if (held_pd) { 14925 fctl_release_remote_port(held_pd); 14926 } 14927 return (EIO); 14928 } 14929 fp_jobwait(job); 14930 14931 fcio->fcio_errno = job->job_result; 14932 14933 if (held_pd) { 14934 fctl_release_remote_port(held_pd); 14935 } 14936 14937 if (job->job_result != FC_SUCCESS) { 14938 return (EIO); 14939 } 14940 14941 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14942 if (pd == NULL) { 14943 fcio->fcio_errno = FC_BADDEV; 14944 return (ENODEV); 14945 } 14946 14947 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 14948 14949 fctl_copy_portmap(changelist, pd); 14950 changelist->map_type = PORT_DEVICE_USER_LOGIN; 14951 14952 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 14953 14954 mutex_enter(&pd->pd_mutex); 14955 pd->pd_type = PORT_DEVICE_NOCHANGE; 14956 mutex_exit(&pd->pd_mutex); 14957 14958 fctl_release_remote_port(pd); 14959 14960 return (0); 14961 } 14962 14963 14964 static int 14965 fp_fcio_logout(fc_local_port_t *port, fcio_t *fcio, job_request_t *job) 14966 { 14967 la_wwn_t pwwn; 14968 fp_cmd_t *cmd; 14969 fc_portmap_t *changelist; 14970 fc_remote_port_t *pd; 14971 14972 bcopy(fcio->fcio_ibuf, &pwwn, sizeof (pwwn)); 14973 14974 pd = fctl_hold_remote_port_by_pwwn(port, &pwwn); 14975 if (pd == NULL) { 14976 fcio->fcio_errno = FC_BADWWN; 14977 return (ENXIO); 14978 } 14979 14980 mutex_enter(&pd->pd_mutex); 14981 if (pd->pd_state != PORT_DEVICE_LOGGED_IN) { 14982 fcio->fcio_errno = FC_LOGINREQ; 14983 mutex_exit(&pd->pd_mutex); 14984 14985 fctl_release_remote_port(pd); 14986 14987 return (EINVAL); 14988 } 14989 14990 ASSERT(pd->pd_login_count >= 1); 14991 14992 if (pd->pd_flags == PD_ELS_IN_PROGRESS) { 14993 fcio->fcio_errno = FC_FAILURE; 14994 mutex_exit(&pd->pd_mutex); 14995 14996 fctl_release_remote_port(pd); 14997 14998 return (EBUSY); 14999 } 15000 15001 if (pd->pd_login_count > 1) { 15002 pd->pd_login_count--; 15003 fcio->fcio_errno = FC_SUCCESS; 15004 mutex_exit(&pd->pd_mutex); 15005 15006 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15007 15008 fctl_copy_portmap(changelist, pd); 15009 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15010 15011 fctl_release_remote_port(pd); 15012 15013 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15014 15015 return (0); 15016 } 15017 15018 pd->pd_flags = PD_ELS_IN_PROGRESS; 15019 mutex_exit(&pd->pd_mutex); 15020 15021 job->job_counter = 1; 15022 15023 cmd = fp_alloc_pkt(port, sizeof (la_els_logo_t), 15024 FP_PORT_IDENTIFIER_LEN, KM_SLEEP, pd); 15025 if (cmd == NULL) { 15026 fcio->fcio_errno = FC_NOMEM; 15027 fctl_release_remote_port(pd); 15028 15029 mutex_enter(&pd->pd_mutex); 15030 pd->pd_flags = PD_IDLE; 15031 mutex_exit(&pd->pd_mutex); 15032 15033 return (ENOMEM); 15034 } 15035 15036 mutex_enter(&port->fp_mutex); 15037 mutex_enter(&pd->pd_mutex); 15038 15039 cmd->cmd_pkt.pkt_tran_flags = FC_TRAN_INTR | pd->pd_login_class; 15040 cmd->cmd_pkt.pkt_tran_type = FC_PKT_EXCHANGE; 15041 cmd->cmd_flags = FP_CMD_PLOGI_DONT_CARE; 15042 cmd->cmd_retry_count = 1; 15043 cmd->cmd_ulp_pkt = NULL; 15044 15045 fp_logo_init(pd, cmd, job); 15046 15047 mutex_exit(&pd->pd_mutex); 15048 mutex_exit(&port->fp_mutex); 15049 15050 if (fp_sendcmd(port, cmd, port->fp_fca_handle) != FC_SUCCESS) { 15051 mutex_enter(&pd->pd_mutex); 15052 pd->pd_flags = PD_IDLE; 15053 mutex_exit(&pd->pd_mutex); 15054 15055 fp_free_pkt(cmd); 15056 fctl_release_remote_port(pd); 15057 15058 return (EIO); 15059 } 15060 15061 fp_jobwait(job); 15062 15063 fcio->fcio_errno = job->job_result; 15064 if (job->job_result != FC_SUCCESS) { 15065 mutex_enter(&pd->pd_mutex); 15066 pd->pd_flags = PD_IDLE; 15067 mutex_exit(&pd->pd_mutex); 15068 15069 fctl_release_remote_port(pd); 15070 15071 return (EIO); 15072 } 15073 15074 ASSERT(pd != NULL); 15075 15076 changelist = kmem_zalloc(sizeof (*changelist), KM_SLEEP); 15077 15078 fctl_copy_portmap(changelist, pd); 15079 changelist->map_type = PORT_DEVICE_USER_LOGOUT; 15080 changelist->map_state = PORT_DEVICE_INVALID; 15081 15082 mutex_enter(&port->fp_mutex); 15083 mutex_enter(&pd->pd_mutex); 15084 15085 fctl_delist_did_table(port, pd); 15086 fctl_delist_pwwn_table(port, pd); 15087 pd->pd_flags = PD_IDLE; 15088 15089 mutex_exit(&pd->pd_mutex); 15090 mutex_exit(&port->fp_mutex); 15091 15092 (void) fp_ulp_devc_cb(port, changelist, 1, 1, KM_SLEEP, 1); 15093 15094 fctl_release_remote_port(pd); 15095 15096 return (0); 15097 } 15098 15099 15100 15101 /* 15102 * Send a syslog event for adapter port level events. 15103 */ 15104 static void 15105 fp_log_port_event(fc_local_port_t *port, char *subclass) 15106 { 15107 nvlist_t *attr_list; 15108 15109 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15110 KM_SLEEP) != DDI_SUCCESS) { 15111 goto alloc_failed; 15112 } 15113 15114 if (nvlist_add_uint32(attr_list, "instance", 15115 port->fp_instance) != DDI_SUCCESS) { 15116 goto error; 15117 } 15118 15119 if (nvlist_add_byte_array(attr_list, "port-wwn", 15120 port->fp_service_params.nport_ww_name.raw_wwn, 15121 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15122 goto error; 15123 } 15124 15125 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15126 subclass, attr_list, NULL, DDI_SLEEP); 15127 15128 nvlist_free(attr_list); 15129 return; 15130 15131 error: 15132 nvlist_free(attr_list); 15133 alloc_failed: 15134 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15135 } 15136 15137 15138 static void 15139 fp_log_target_event(fc_local_port_t *port, char *subclass, la_wwn_t tgt_pwwn, 15140 uint32_t port_id) 15141 { 15142 nvlist_t *attr_list; 15143 15144 if (nvlist_alloc(&attr_list, NV_UNIQUE_NAME_TYPE, 15145 KM_SLEEP) != DDI_SUCCESS) { 15146 goto alloc_failed; 15147 } 15148 15149 if (nvlist_add_uint32(attr_list, "instance", 15150 port->fp_instance) != DDI_SUCCESS) { 15151 goto error; 15152 } 15153 15154 if (nvlist_add_byte_array(attr_list, "port-wwn", 15155 port->fp_service_params.nport_ww_name.raw_wwn, 15156 sizeof (la_wwn_t)) != DDI_SUCCESS) { 15157 goto error; 15158 } 15159 15160 if (nvlist_add_byte_array(attr_list, "target-port-wwn", 15161 tgt_pwwn.raw_wwn, sizeof (la_wwn_t)) != DDI_SUCCESS) { 15162 goto error; 15163 } 15164 15165 if (nvlist_add_uint32(attr_list, "target-port-id", 15166 port_id) != DDI_SUCCESS) { 15167 goto error; 15168 } 15169 15170 (void) ddi_log_sysevent(port->fp_port_dip, DDI_VENDOR_SUNW, EC_SUNFC, 15171 subclass, attr_list, NULL, DDI_SLEEP); 15172 15173 nvlist_free(attr_list); 15174 return; 15175 15176 error: 15177 nvlist_free(attr_list); 15178 alloc_failed: 15179 FP_TRACE(FP_NHEAD1(9, 0), "Unable to send %s event", subclass); 15180 } 15181 15182 static uint32_t 15183 fp_map_remote_port_state(uint32_t rm_state) 15184 { 15185 switch (rm_state) { 15186 case PORT_DEVICE_LOGGED_IN: 15187 return (FC_HBA_PORTSTATE_ONLINE); 15188 case PORT_DEVICE_VALID: 15189 case PORT_DEVICE_INVALID: 15190 default: 15191 return (FC_HBA_PORTSTATE_UNKNOWN); 15192 } 15193 } 15194