1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 26 */ 27 28 29 #define DEF_ICFG 1 30 31 #include <emlxs.h> 32 #include <emlxs_version.h> 33 34 35 char emlxs_revision[] = EMLXS_REVISION; 36 char emlxs_version[] = EMLXS_VERSION; 37 char emlxs_name[] = EMLXS_NAME; 38 char emlxs_label[] = EMLXS_LABEL; 39 40 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 41 EMLXS_MSG_DEF(EMLXS_SOLARIS_C); 42 43 #ifdef MENLO_SUPPORT 44 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp); 45 #endif /* MENLO_SUPPORT */ 46 47 static void emlxs_fca_attach(emlxs_hba_t *hba); 48 static void emlxs_fca_detach(emlxs_hba_t *hba); 49 static void emlxs_drv_banner(emlxs_hba_t *hba); 50 static int32_t emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd); 51 52 static int32_t emlxs_get_props(emlxs_hba_t *hba); 53 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp); 54 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp); 55 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp); 56 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp); 57 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp); 58 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 59 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp); 60 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 61 static uint32_t emlxs_add_instance(int32_t ddiinst); 62 static void emlxs_iodone(emlxs_buf_t *sbp); 63 static int emlxs_pm_lower_power(dev_info_t *dip); 64 static int emlxs_pm_raise_power(dev_info_t *dip); 65 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, 66 uint32_t failed); 67 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3); 68 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba); 69 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code, 70 uint32_t args, uint32_t *arg); 71 72 static void emlxs_read_vport_prop(emlxs_hba_t *hba); 73 74 75 76 /* 77 * Driver Entry Routines. 78 */ 79 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t); 80 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t); 81 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *); 82 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *); 83 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t, 84 cred_t *, int32_t *); 85 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 86 87 88 /* 89 * FC_AL Transport Functions. 90 */ 91 static opaque_t emlxs_bind_port(dev_info_t *, fc_fca_port_info_t *, 92 fc_fca_bind_info_t *); 93 static void emlxs_unbind_port(opaque_t); 94 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *); 95 static int32_t emlxs_get_cap(opaque_t, char *, void *); 96 static int32_t emlxs_set_cap(opaque_t, char *, void *); 97 static int32_t emlxs_get_map(opaque_t, fc_lilpmap_t *); 98 static int32_t emlxs_ub_alloc(opaque_t, uint64_t *, uint32_t, 99 uint32_t *, uint32_t); 100 static int32_t emlxs_ub_free(opaque_t, uint32_t, uint64_t *); 101 102 static opaque_t emlxs_get_device(opaque_t, fc_portid_t); 103 static int32_t emlxs_notify(opaque_t, uint32_t); 104 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *); 105 106 /* 107 * Driver Internal Functions. 108 */ 109 110 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *); 111 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t); 112 #ifdef EMLXS_I386 113 #ifdef S11 114 static int32_t emlxs_quiesce(dev_info_t *); 115 #endif 116 #endif 117 static int32_t emlxs_hba_resume(dev_info_t *); 118 static int32_t emlxs_hba_suspend(dev_info_t *); 119 static int32_t emlxs_hba_detach(dev_info_t *); 120 static int32_t emlxs_hba_attach(dev_info_t *); 121 static void emlxs_lock_destroy(emlxs_hba_t *); 122 static void emlxs_lock_init(emlxs_hba_t *); 123 static ULP_BDE64 *emlxs_pkt_to_bpl(ULP_BDE64 *, fc_packet_t *, 124 uint32_t, uint8_t); 125 126 char *emlxs_pm_components[] = { 127 "NAME=emlxx000", 128 "0=Device D3 State", 129 "1=Device D0 State" 130 }; 131 132 133 /* 134 * Default emlx dma limits 135 */ 136 ddi_dma_lim_t emlxs_dma_lim = { 137 (uint32_t)0, /* dlim_addr_lo */ 138 (uint32_t)0xffffffff, /* dlim_addr_hi */ 139 (uint_t)0x00ffffff, /* dlim_cntr_max */ 140 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */ 141 1, /* dlim_minxfer */ 142 0x00ffffff /* dlim_dmaspeed */ 143 }; 144 145 /* 146 * Be careful when using these attributes; the defaults listed below are 147 * (almost) the most general case, permitting allocation in almost any 148 * way supported by the LightPulse family. The sole exception is the 149 * alignment specified as requiring memory allocation on a 4-byte boundary; 150 * the Lightpulse can DMA memory on any byte boundary. 151 * 152 * The LightPulse family currently is limited to 16M transfers; 153 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields. 154 */ 155 ddi_dma_attr_t emlxs_dma_attr = { 156 DMA_ATTR_V0, /* dma_attr_version */ 157 (uint64_t)0, /* dma_attr_addr_lo */ 158 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 159 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 160 1, /* dma_attr_align */ 161 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 162 1, /* dma_attr_minxfer */ 163 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 164 (uint64_t)0xffffffff, /* dma_attr_seg */ 165 EMLXS_SGLLEN, /* dma_attr_sgllen */ 166 1, /* dma_attr_granular */ 167 0 /* dma_attr_flags */ 168 }; 169 170 ddi_dma_attr_t emlxs_dma_attr_ro = { 171 DMA_ATTR_V0, /* dma_attr_version */ 172 (uint64_t)0, /* dma_attr_addr_lo */ 173 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 174 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 175 1, /* dma_attr_align */ 176 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 177 1, /* dma_attr_minxfer */ 178 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 179 (uint64_t)0xffffffff, /* dma_attr_seg */ 180 EMLXS_SGLLEN, /* dma_attr_sgllen */ 181 1, /* dma_attr_granular */ 182 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */ 183 }; 184 185 ddi_dma_attr_t emlxs_dma_attr_1sg = { 186 DMA_ATTR_V0, /* dma_attr_version */ 187 (uint64_t)0, /* dma_attr_addr_lo */ 188 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 189 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 190 1, /* dma_attr_align */ 191 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 192 1, /* dma_attr_minxfer */ 193 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 194 (uint64_t)0xffffffff, /* dma_attr_seg */ 195 1, /* dma_attr_sgllen */ 196 1, /* dma_attr_granular */ 197 0 /* dma_attr_flags */ 198 }; 199 200 #if (EMLXS_MODREV >= EMLXS_MODREV3) 201 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = { 202 DMA_ATTR_V0, /* dma_attr_version */ 203 (uint64_t)0, /* dma_attr_addr_lo */ 204 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 205 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 206 1, /* dma_attr_align */ 207 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 208 1, /* dma_attr_minxfer */ 209 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 210 (uint64_t)0xffffffff, /* dma_attr_seg */ 211 EMLXS_SGLLEN, /* dma_attr_sgllen */ 212 1, /* dma_attr_granular */ 213 0 /* dma_attr_flags */ 214 }; 215 #endif /* >= EMLXS_MODREV3 */ 216 217 /* 218 * DDI access attributes for device 219 */ 220 ddi_device_acc_attr_t emlxs_dev_acc_attr = { 221 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 222 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */ 223 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 224 DDI_DEFAULT_ACC /* devacc_attr_access */ 225 }; 226 227 /* 228 * DDI access attributes for data 229 */ 230 ddi_device_acc_attr_t emlxs_data_acc_attr = { 231 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 232 DDI_NEVERSWAP_ACC, /* don't swap for Data */ 233 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 234 DDI_DEFAULT_ACC /* devacc_attr_access */ 235 }; 236 237 /* 238 * Fill in the FC Transport structure, 239 * as defined in the Fibre Channel Transport Programmming Guide. 240 */ 241 #if (EMLXS_MODREV == EMLXS_MODREV5) 242 static fc_fca_tran_t emlxs_fca_tran = { 243 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */ 244 MAX_VPORTS, /* fca numerb of ports */ 245 sizeof (emlxs_buf_t), /* fca pkt size */ 246 2048, /* fca cmd max */ 247 &emlxs_dma_lim, /* fca dma limits */ 248 0, /* fca iblock, to be filled in later */ 249 &emlxs_dma_attr, /* fca dma attributes */ 250 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 251 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 252 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 253 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 254 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 255 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 256 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 257 &emlxs_data_acc_attr, /* fca access atributes */ 258 0, /* fca_num_npivports */ 259 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */ 260 emlxs_bind_port, 261 emlxs_unbind_port, 262 emlxs_pkt_init, 263 emlxs_pkt_uninit, 264 emlxs_transport, 265 emlxs_get_cap, 266 emlxs_set_cap, 267 emlxs_get_map, 268 emlxs_transport, 269 emlxs_ub_alloc, 270 emlxs_ub_free, 271 emlxs_ub_release, 272 emlxs_pkt_abort, 273 emlxs_fca_reset, 274 emlxs_port_manage, 275 emlxs_get_device, 276 emlxs_notify 277 }; 278 #endif /* EMLXS_MODREV5 */ 279 280 281 #if (EMLXS_MODREV == EMLXS_MODREV4) 282 static fc_fca_tran_t emlxs_fca_tran = { 283 FCTL_FCA_MODREV_4, /* fca_version */ 284 MAX_VPORTS, /* fca numerb of ports */ 285 sizeof (emlxs_buf_t), /* fca pkt size */ 286 2048, /* fca cmd max */ 287 &emlxs_dma_lim, /* fca dma limits */ 288 0, /* fca iblock, to be filled in later */ 289 &emlxs_dma_attr, /* fca dma attributes */ 290 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 291 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 292 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 293 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 294 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 295 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 296 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 297 &emlxs_data_acc_attr, /* fca access atributes */ 298 emlxs_bind_port, 299 emlxs_unbind_port, 300 emlxs_pkt_init, 301 emlxs_pkt_uninit, 302 emlxs_transport, 303 emlxs_get_cap, 304 emlxs_set_cap, 305 emlxs_get_map, 306 emlxs_transport, 307 emlxs_ub_alloc, 308 emlxs_ub_free, 309 emlxs_ub_release, 310 emlxs_pkt_abort, 311 emlxs_fca_reset, 312 emlxs_port_manage, 313 emlxs_get_device, 314 emlxs_notify 315 }; 316 #endif /* EMLXS_MODEREV4 */ 317 318 319 #if (EMLXS_MODREV == EMLXS_MODREV3) 320 static fc_fca_tran_t emlxs_fca_tran = { 321 FCTL_FCA_MODREV_3, /* fca_version */ 322 MAX_VPORTS, /* fca numerb of ports */ 323 sizeof (emlxs_buf_t), /* fca pkt size */ 324 2048, /* fca cmd max */ 325 &emlxs_dma_lim, /* fca dma limits */ 326 0, /* fca iblock, to be filled in later */ 327 &emlxs_dma_attr, /* fca dma attributes */ 328 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 329 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 330 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 331 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 332 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 333 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 334 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 335 &emlxs_data_acc_attr, /* fca access atributes */ 336 emlxs_bind_port, 337 emlxs_unbind_port, 338 emlxs_pkt_init, 339 emlxs_pkt_uninit, 340 emlxs_transport, 341 emlxs_get_cap, 342 emlxs_set_cap, 343 emlxs_get_map, 344 emlxs_transport, 345 emlxs_ub_alloc, 346 emlxs_ub_free, 347 emlxs_ub_release, 348 emlxs_pkt_abort, 349 emlxs_fca_reset, 350 emlxs_port_manage, 351 emlxs_get_device, 352 emlxs_notify 353 }; 354 #endif /* EMLXS_MODREV3 */ 355 356 357 #if (EMLXS_MODREV == EMLXS_MODREV2) 358 static fc_fca_tran_t emlxs_fca_tran = { 359 FCTL_FCA_MODREV_2, /* fca_version */ 360 MAX_VPORTS, /* number of ports */ 361 sizeof (emlxs_buf_t), /* pkt size */ 362 2048, /* max cmds */ 363 &emlxs_dma_lim, /* DMA limits */ 364 0, /* iblock, to be filled in later */ 365 &emlxs_dma_attr, /* dma attributes */ 366 &emlxs_data_acc_attr, /* access atributes */ 367 emlxs_bind_port, 368 emlxs_unbind_port, 369 emlxs_pkt_init, 370 emlxs_pkt_uninit, 371 emlxs_transport, 372 emlxs_get_cap, 373 emlxs_set_cap, 374 emlxs_get_map, 375 emlxs_transport, 376 emlxs_ub_alloc, 377 emlxs_ub_free, 378 emlxs_ub_release, 379 emlxs_pkt_abort, 380 emlxs_fca_reset, 381 emlxs_port_manage, 382 emlxs_get_device, 383 emlxs_notify 384 }; 385 #endif /* EMLXS_MODREV2 */ 386 387 /* 388 * state pointer which the implementation uses as a place to 389 * hang a set of per-driver structures; 390 * 391 */ 392 void *emlxs_soft_state = NULL; 393 394 /* 395 * Driver Global variables. 396 */ 397 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */ 398 399 emlxs_device_t emlxs_device; 400 401 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */ 402 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */ 403 404 405 /* 406 * Single private "global" lock used to gain access to 407 * the hba_list and/or any other case where we want need to be 408 * single-threaded. 409 */ 410 uint32_t emlxs_diag_state; 411 412 /* 413 * CB ops vector. Used for administration only. 414 */ 415 static struct cb_ops emlxs_cb_ops = { 416 emlxs_open, /* cb_open */ 417 emlxs_close, /* cb_close */ 418 nodev, /* cb_strategy */ 419 nodev, /* cb_print */ 420 nodev, /* cb_dump */ 421 nodev, /* cb_read */ 422 nodev, /* cb_write */ 423 emlxs_ioctl, /* cb_ioctl */ 424 nodev, /* cb_devmap */ 425 nodev, /* cb_mmap */ 426 nodev, /* cb_segmap */ 427 nochpoll, /* cb_chpoll */ 428 ddi_prop_op, /* cb_prop_op */ 429 0, /* cb_stream */ 430 #ifdef _LP64 431 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 432 #else 433 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 434 #endif 435 CB_REV, /* rev */ 436 nodev, /* cb_aread */ 437 nodev /* cb_awrite */ 438 }; 439 440 static struct dev_ops emlxs_ops = { 441 DEVO_REV, /* rev */ 442 0, /* refcnt */ 443 emlxs_info, /* getinfo */ 444 nulldev, /* identify */ 445 nulldev, /* probe */ 446 emlxs_attach, /* attach */ 447 emlxs_detach, /* detach */ 448 nodev, /* reset */ 449 &emlxs_cb_ops, /* devo_cb_ops */ 450 NULL, /* devo_bus_ops */ 451 emlxs_power, /* power ops */ 452 #ifdef EMLXS_I386 453 #ifdef S11 454 emlxs_quiesce, /* quiesce */ 455 #endif 456 #endif 457 }; 458 459 #include <sys/modctl.h> 460 extern struct mod_ops mod_driverops; 461 462 #ifdef SAN_DIAG_SUPPORT 463 extern kmutex_t sd_bucket_mutex; 464 extern sd_bucket_info_t sd_bucket; 465 #endif /* SAN_DIAG_SUPPORT */ 466 467 /* 468 * Module linkage information for the kernel. 469 */ 470 static struct modldrv emlxs_modldrv = { 471 &mod_driverops, /* module type - driver */ 472 emlxs_name, /* module name */ 473 &emlxs_ops, /* driver ops */ 474 }; 475 476 477 /* 478 * Driver module linkage structure 479 */ 480 static struct modlinkage emlxs_modlinkage = { 481 MODREV_1, /* ml_rev - must be MODREV_1 */ 482 &emlxs_modldrv, /* ml_linkage */ 483 NULL /* end of driver linkage */ 484 }; 485 486 487 /* We only need to add entries for non-default return codes. */ 488 /* Entries do not need to be in order. */ 489 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 490 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */ 491 492 emlxs_xlat_err_t emlxs_iostat_tbl[] = { 493 /* {f/w code, pkt_state, pkt_reason, */ 494 /* pkt_expln, pkt_action} */ 495 496 /* 0x00 - Do not remove */ 497 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE, 498 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 499 500 /* 0x01 - Do not remove */ 501 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE, 502 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 503 504 /* 0x02 */ 505 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS, 506 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE}, 507 508 /* 509 * This is a default entry. 510 * The real codes are written dynamically in emlxs_els.c 511 */ 512 /* 0x09 */ 513 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE, 514 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 515 516 /* Special error code */ 517 /* 0x10 */ 518 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 519 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 520 521 /* Special error code */ 522 /* 0x11 */ 523 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, 524 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 525 526 /* CLASS 2 only */ 527 /* 0x04 */ 528 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR, 529 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 530 531 /* CLASS 2 only */ 532 /* 0x05 */ 533 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR, 534 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 535 536 /* CLASS 2 only */ 537 /* 0x06 */ 538 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY, 539 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 540 541 /* CLASS 2 only */ 542 /* 0x07 */ 543 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY, 544 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 545 }; 546 547 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t)) 548 549 550 /* We only need to add entries for non-default return codes. */ 551 /* Entries do not need to be in order. */ 552 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 553 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */ 554 555 emlxs_xlat_err_t emlxs_ioerr_tbl[] = { 556 /* {f/w code, pkt_state, pkt_reason, */ 557 /* pkt_expln, pkt_action} */ 558 559 /* 0x01 */ 560 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 561 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 562 563 /* 0x02 */ 564 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 565 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 566 567 /* 0x04 */ 568 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 569 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 570 571 /* 0x05 */ 572 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 573 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 574 575 /* 0x06 */ 576 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 577 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 578 579 /* 0x07 */ 580 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 581 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 582 583 /* 0x08 */ 584 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 585 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 586 587 /* 0x0B */ 588 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 589 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 590 591 /* 0x0D */ 592 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 593 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 594 595 /* 0x0E */ 596 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 597 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 598 599 /* 0x0F */ 600 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME, 601 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 602 603 /* 0x11 */ 604 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 605 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 606 607 /* 0x13 */ 608 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH, 609 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 610 611 /* 0x14 */ 612 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED, 613 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 614 615 /* 0x15 */ 616 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 617 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 618 619 /* 0x16 */ 620 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 621 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 622 623 /* 0x17 */ 624 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT, 625 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 626 627 /* 0x18 */ 628 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL, 629 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 630 631 /* 0x1A */ 632 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 633 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 634 635 /* 0x21 */ 636 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID, 637 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 638 639 /* Occurs at link down */ 640 /* 0x28 */ 641 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 642 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 643 644 /* 0xF0 */ 645 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 646 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 647 }; 648 649 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t)) 650 651 652 653 emlxs_table_t emlxs_error_table[] = { 654 {IOERR_SUCCESS, "No error."}, 655 {IOERR_MISSING_CONTINUE, "Missing continue."}, 656 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."}, 657 {IOERR_INTERNAL_ERROR, "Internal error."}, 658 {IOERR_INVALID_RPI, "Invalid RPI."}, 659 {IOERR_NO_XRI, "No XRI."}, 660 {IOERR_ILLEGAL_COMMAND, "Illegal command."}, 661 {IOERR_XCHG_DROPPED, "Exchange dropped."}, 662 {IOERR_ILLEGAL_FIELD, "Illegal field."}, 663 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."}, 664 {IOERR_TX_DMA_FAILED, "TX DMA failed."}, 665 {IOERR_RX_DMA_FAILED, "RX DMA failed."}, 666 {IOERR_ILLEGAL_FRAME, "Illegal frame."}, 667 {IOERR_NO_RESOURCES, "No resources."}, 668 {IOERR_ILLEGAL_LENGTH, "Illegal length."}, 669 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."}, 670 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."}, 671 {IOERR_ABORT_REQUESTED, "Abort requested."}, 672 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."}, 673 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."}, 674 {IOERR_RING_RESET, "Ring reset."}, 675 {IOERR_LINK_DOWN, "Link down."}, 676 {IOERR_CORRUPTED_DATA, "Corrupted data."}, 677 {IOERR_CORRUPTED_RPI, "Corrupted RPI."}, 678 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."}, 679 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."}, 680 {IOERR_DUP_FRAME, "Duplicate frame."}, 681 {IOERR_LINK_CONTROL_FRAME, "Link control frame."}, 682 {IOERR_BAD_HOST_ADDRESS, "Bad host address."}, 683 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."}, 684 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."}, 685 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."}, 686 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."}, 687 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."}, 688 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"}, 689 {IOERR_XRIBUF_MISSING, "XRI buffer missing"}, 690 {IOERR_ROFFSET_INVAL, "Relative offset invalid."}, 691 {IOERR_ROFFSET_MISSING, "Relative offset missing."}, 692 {IOERR_INSUF_BUFFER, "Buffer too small."}, 693 {IOERR_MISSING_SI, "ELS frame missing SI"}, 694 {IOERR_MISSING_ES, "Exhausted burst without ES"}, 695 {IOERR_INCOMP_XFER, "Transfer incomplete."}, 696 {IOERR_ABORT_TIMEOUT, "Abort timeout."} 697 698 }; /* emlxs_error_table */ 699 700 701 emlxs_table_t emlxs_state_table[] = { 702 {IOSTAT_SUCCESS, "Success."}, 703 {IOSTAT_FCP_RSP_ERROR, "FCP response error."}, 704 {IOSTAT_REMOTE_STOP, "Remote stop."}, 705 {IOSTAT_LOCAL_REJECT, "Local reject."}, 706 {IOSTAT_NPORT_RJT, "NPort reject."}, 707 {IOSTAT_FABRIC_RJT, "Fabric reject."}, 708 {IOSTAT_NPORT_BSY, "Nport busy."}, 709 {IOSTAT_FABRIC_BSY, "Fabric busy."}, 710 {IOSTAT_INTERMED_RSP, "Intermediate response."}, 711 {IOSTAT_LS_RJT, "LS reject."}, 712 {IOSTAT_CMD_REJECT, "Cmd reject."}, 713 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."}, 714 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."}, 715 {IOSTAT_DATA_UNDERRUN, "Data underrun."}, 716 {IOSTAT_DATA_OVERRUN, "Data overrun."}, 717 718 }; /* emlxs_state_table */ 719 720 721 #ifdef MENLO_SUPPORT 722 emlxs_table_t emlxs_menlo_cmd_table[] = { 723 {MENLO_CMD_INITIALIZE, "MENLO_INIT"}, 724 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"}, 725 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"}, 726 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"}, 727 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"}, 728 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"}, 729 730 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"}, 731 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"}, 732 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"}, 733 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"}, 734 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"}, 735 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"}, 736 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"}, 737 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"}, 738 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"}, 739 740 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"}, 741 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"}, 742 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"}, 743 744 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"}, 745 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"}, 746 747 {MENLO_CMD_RESET, "MENLO_RESET"}, 748 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"} 749 750 }; /* emlxs_menlo_cmd_table */ 751 752 emlxs_table_t emlxs_menlo_rsp_table[] = { 753 {MENLO_RSP_SUCCESS, "SUCCESS"}, 754 {MENLO_ERR_FAILED, "FAILED"}, 755 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"}, 756 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"}, 757 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"}, 758 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"}, 759 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"}, 760 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"}, 761 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"}, 762 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"}, 763 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"}, 764 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"}, 765 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"}, 766 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"}, 767 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"}, 768 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"}, 769 {MENLO_ERR_BUSY, "BUSY"}, 770 771 }; /* emlxs_menlo_rsp_table */ 772 773 #endif /* MENLO_SUPPORT */ 774 775 776 emlxs_table_t emlxs_mscmd_table[] = { 777 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 778 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 779 {MS_GTIN, "MS_GTIN"}, 780 {MS_GIEL, "MS_GIEL"}, 781 {MS_GIET, "MS_GIET"}, 782 {MS_GDID, "MS_GDID"}, 783 {MS_GMID, "MS_GMID"}, 784 {MS_GFN, "MS_GFN"}, 785 {MS_GIELN, "MS_GIELN"}, 786 {MS_GMAL, "MS_GMAL"}, 787 {MS_GIEIL, "MS_GIEIL"}, 788 {MS_GPL, "MS_GPL"}, 789 {MS_GPT, "MS_GPT"}, 790 {MS_GPPN, "MS_GPPN"}, 791 {MS_GAPNL, "MS_GAPNL"}, 792 {MS_GPS, "MS_GPS"}, 793 {MS_GPSC, "MS_GPSC"}, 794 {MS_GATIN, "MS_GATIN"}, 795 {MS_GSES, "MS_GSES"}, 796 {MS_GPLNL, "MS_GPLNL"}, 797 {MS_GPLT, "MS_GPLT"}, 798 {MS_GPLML, "MS_GPLML"}, 799 {MS_GPAB, "MS_GPAB"}, 800 {MS_GNPL, "MS_GNPL"}, 801 {MS_GPNL, "MS_GPNL"}, 802 {MS_GPFCP, "MS_GPFCP"}, 803 {MS_GPLI, "MS_GPLI"}, 804 {MS_GNID, "MS_GNID"}, 805 {MS_RIELN, "MS_RIELN"}, 806 {MS_RPL, "MS_RPL"}, 807 {MS_RPLN, "MS_RPLN"}, 808 {MS_RPLT, "MS_RPLT"}, 809 {MS_RPLM, "MS_RPLM"}, 810 {MS_RPAB, "MS_RPAB"}, 811 {MS_RPFCP, "MS_RPFCP"}, 812 {MS_RPLI, "MS_RPLI"}, 813 {MS_DPL, "MS_DPL"}, 814 {MS_DPLN, "MS_DPLN"}, 815 {MS_DPLM, "MS_DPLM"}, 816 {MS_DPLML, "MS_DPLML"}, 817 {MS_DPLI, "MS_DPLI"}, 818 {MS_DPAB, "MS_DPAB"}, 819 {MS_DPALL, "MS_DPALL"} 820 821 }; /* emlxs_mscmd_table */ 822 823 824 emlxs_table_t emlxs_ctcmd_table[] = { 825 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 826 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 827 {SLI_CTNS_GA_NXT, "GA_NXT"}, 828 {SLI_CTNS_GPN_ID, "GPN_ID"}, 829 {SLI_CTNS_GNN_ID, "GNN_ID"}, 830 {SLI_CTNS_GCS_ID, "GCS_ID"}, 831 {SLI_CTNS_GFT_ID, "GFT_ID"}, 832 {SLI_CTNS_GSPN_ID, "GSPN_ID"}, 833 {SLI_CTNS_GPT_ID, "GPT_ID"}, 834 {SLI_CTNS_GID_PN, "GID_PN"}, 835 {SLI_CTNS_GID_NN, "GID_NN"}, 836 {SLI_CTNS_GIP_NN, "GIP_NN"}, 837 {SLI_CTNS_GIPA_NN, "GIPA_NN"}, 838 {SLI_CTNS_GSNN_NN, "GSNN_NN"}, 839 {SLI_CTNS_GNN_IP, "GNN_IP"}, 840 {SLI_CTNS_GIPA_IP, "GIPA_IP"}, 841 {SLI_CTNS_GID_FT, "GID_FT"}, 842 {SLI_CTNS_GID_PT, "GID_PT"}, 843 {SLI_CTNS_RPN_ID, "RPN_ID"}, 844 {SLI_CTNS_RNN_ID, "RNN_ID"}, 845 {SLI_CTNS_RCS_ID, "RCS_ID"}, 846 {SLI_CTNS_RFT_ID, "RFT_ID"}, 847 {SLI_CTNS_RSPN_ID, "RSPN_ID"}, 848 {SLI_CTNS_RPT_ID, "RPT_ID"}, 849 {SLI_CTNS_RIP_NN, "RIP_NN"}, 850 {SLI_CTNS_RIPA_NN, "RIPA_NN"}, 851 {SLI_CTNS_RSNN_NN, "RSNN_NN"}, 852 {SLI_CTNS_DA_ID, "DA_ID"}, 853 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 854 855 }; /* emlxs_ctcmd_table */ 856 857 858 859 emlxs_table_t emlxs_rmcmd_table[] = { 860 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 861 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 862 {CT_OP_GSAT, "RM_GSAT"}, 863 {CT_OP_GHAT, "RM_GHAT"}, 864 {CT_OP_GPAT, "RM_GPAT"}, 865 {CT_OP_GDAT, "RM_GDAT"}, 866 {CT_OP_GPST, "RM_GPST"}, 867 {CT_OP_GDP, "RM_GDP"}, 868 {CT_OP_GDPG, "RM_GDPG"}, 869 {CT_OP_GEPS, "RM_GEPS"}, 870 {CT_OP_GLAT, "RM_GLAT"}, 871 {CT_OP_SSAT, "RM_SSAT"}, 872 {CT_OP_SHAT, "RM_SHAT"}, 873 {CT_OP_SPAT, "RM_SPAT"}, 874 {CT_OP_SDAT, "RM_SDAT"}, 875 {CT_OP_SDP, "RM_SDP"}, 876 {CT_OP_SBBS, "RM_SBBS"}, 877 {CT_OP_RPST, "RM_RPST"}, 878 {CT_OP_VFW, "RM_VFW"}, 879 {CT_OP_DFW, "RM_DFW"}, 880 {CT_OP_RES, "RM_RES"}, 881 {CT_OP_RHD, "RM_RHD"}, 882 {CT_OP_UFW, "RM_UFW"}, 883 {CT_OP_RDP, "RM_RDP"}, 884 {CT_OP_GHDR, "RM_GHDR"}, 885 {CT_OP_CHD, "RM_CHD"}, 886 {CT_OP_SSR, "RM_SSR"}, 887 {CT_OP_RSAT, "RM_RSAT"}, 888 {CT_OP_WSAT, "RM_WSAT"}, 889 {CT_OP_RSAH, "RM_RSAH"}, 890 {CT_OP_WSAH, "RM_WSAH"}, 891 {CT_OP_RACT, "RM_RACT"}, 892 {CT_OP_WACT, "RM_WACT"}, 893 {CT_OP_RKT, "RM_RKT"}, 894 {CT_OP_WKT, "RM_WKT"}, 895 {CT_OP_SSC, "RM_SSC"}, 896 {CT_OP_QHBA, "RM_QHBA"}, 897 {CT_OP_GST, "RM_GST"}, 898 {CT_OP_GFTM, "RM_GFTM"}, 899 {CT_OP_SRL, "RM_SRL"}, 900 {CT_OP_SI, "RM_SI"}, 901 {CT_OP_SRC, "RM_SRC"}, 902 {CT_OP_GPB, "RM_GPB"}, 903 {CT_OP_SPB, "RM_SPB"}, 904 {CT_OP_RPB, "RM_RPB"}, 905 {CT_OP_RAPB, "RM_RAPB"}, 906 {CT_OP_GBC, "RM_GBC"}, 907 {CT_OP_GBS, "RM_GBS"}, 908 {CT_OP_SBS, "RM_SBS"}, 909 {CT_OP_GANI, "RM_GANI"}, 910 {CT_OP_GRV, "RM_GRV"}, 911 {CT_OP_GAPBS, "RM_GAPBS"}, 912 {CT_OP_APBC, "RM_APBC"}, 913 {CT_OP_GDT, "RM_GDT"}, 914 {CT_OP_GDLMI, "RM_GDLMI"}, 915 {CT_OP_GANA, "RM_GANA"}, 916 {CT_OP_GDLV, "RM_GDLV"}, 917 {CT_OP_GWUP, "RM_GWUP"}, 918 {CT_OP_GLM, "RM_GLM"}, 919 {CT_OP_GABS, "RM_GABS"}, 920 {CT_OP_SABS, "RM_SABS"}, 921 {CT_OP_RPR, "RM_RPR"}, 922 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 923 924 }; /* emlxs_rmcmd_table */ 925 926 927 emlxs_table_t emlxs_elscmd_table[] = { 928 {ELS_CMD_ACC, "ACC"}, 929 {ELS_CMD_LS_RJT, "LS_RJT"}, 930 {ELS_CMD_PLOGI, "PLOGI"}, 931 {ELS_CMD_FLOGI, "FLOGI"}, 932 {ELS_CMD_LOGO, "LOGO"}, 933 {ELS_CMD_ABTX, "ABTX"}, 934 {ELS_CMD_RCS, "RCS"}, 935 {ELS_CMD_RES, "RES"}, 936 {ELS_CMD_RSS, "RSS"}, 937 {ELS_CMD_RSI, "RSI"}, 938 {ELS_CMD_ESTS, "ESTS"}, 939 {ELS_CMD_ESTC, "ESTC"}, 940 {ELS_CMD_ADVC, "ADVC"}, 941 {ELS_CMD_RTV, "RTV"}, 942 {ELS_CMD_RLS, "RLS"}, 943 {ELS_CMD_ECHO, "ECHO"}, 944 {ELS_CMD_TEST, "TEST"}, 945 {ELS_CMD_RRQ, "RRQ"}, 946 {ELS_CMD_PRLI, "PRLI"}, 947 {ELS_CMD_PRLO, "PRLO"}, 948 {ELS_CMD_SCN, "SCN"}, 949 {ELS_CMD_TPLS, "TPLS"}, 950 {ELS_CMD_GPRLO, "GPRLO"}, 951 {ELS_CMD_GAID, "GAID"}, 952 {ELS_CMD_FACT, "FACT"}, 953 {ELS_CMD_FDACT, "FDACT"}, 954 {ELS_CMD_NACT, "NACT"}, 955 {ELS_CMD_NDACT, "NDACT"}, 956 {ELS_CMD_QoSR, "QoSR"}, 957 {ELS_CMD_RVCS, "RVCS"}, 958 {ELS_CMD_PDISC, "PDISC"}, 959 {ELS_CMD_FDISC, "FDISC"}, 960 {ELS_CMD_ADISC, "ADISC"}, 961 {ELS_CMD_FARP, "FARP"}, 962 {ELS_CMD_FARPR, "FARPR"}, 963 {ELS_CMD_FAN, "FAN"}, 964 {ELS_CMD_RSCN, "RSCN"}, 965 {ELS_CMD_SCR, "SCR"}, 966 {ELS_CMD_LINIT, "LINIT"}, 967 {ELS_CMD_RNID, "RNID"}, 968 {ELS_CMD_AUTH, "AUTH"} 969 970 }; /* emlxs_elscmd_table */ 971 972 973 /* 974 * 975 * Device Driver Entry Routines 976 * 977 */ 978 979 #ifdef MODSYM_SUPPORT 980 static void emlxs_fca_modclose(); 981 static int emlxs_fca_modopen(); 982 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */ 983 984 static int 985 emlxs_fca_modopen() 986 { 987 int err; 988 989 if (emlxs_modsym.mod_fctl) { 990 return (0); 991 } 992 993 /* Leadville (fctl) */ 994 err = 0; 995 emlxs_modsym.mod_fctl = 996 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err); 997 if (!emlxs_modsym.mod_fctl) { 998 cmn_err(CE_WARN, 999 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d", 1000 DRIVER_NAME, err); 1001 1002 goto failed; 1003 } 1004 1005 err = 0; 1006 /* Check if the fctl fc_fca_attach is present */ 1007 emlxs_modsym.fc_fca_attach = 1008 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", 1009 &err); 1010 if ((void *)emlxs_modsym.fc_fca_attach == NULL) { 1011 cmn_err(CE_WARN, 1012 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME); 1013 goto failed; 1014 } 1015 1016 err = 0; 1017 /* Check if the fctl fc_fca_detach is present */ 1018 emlxs_modsym.fc_fca_detach = 1019 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", 1020 &err); 1021 if ((void *)emlxs_modsym.fc_fca_detach == NULL) { 1022 cmn_err(CE_WARN, 1023 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME); 1024 goto failed; 1025 } 1026 1027 err = 0; 1028 /* Check if the fctl fc_fca_init is present */ 1029 emlxs_modsym.fc_fca_init = 1030 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err); 1031 if ((void *)emlxs_modsym.fc_fca_init == NULL) { 1032 cmn_err(CE_WARN, 1033 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME); 1034 goto failed; 1035 } 1036 1037 return (0); 1038 1039 failed: 1040 1041 emlxs_fca_modclose(); 1042 1043 return (1); 1044 1045 1046 } /* emlxs_fca_modopen() */ 1047 1048 1049 static void 1050 emlxs_fca_modclose() 1051 { 1052 if (emlxs_modsym.mod_fctl) { 1053 (void) ddi_modclose(emlxs_modsym.mod_fctl); 1054 emlxs_modsym.mod_fctl = 0; 1055 } 1056 1057 emlxs_modsym.fc_fca_attach = NULL; 1058 emlxs_modsym.fc_fca_detach = NULL; 1059 emlxs_modsym.fc_fca_init = NULL; 1060 1061 return; 1062 1063 } /* emlxs_fca_modclose() */ 1064 1065 #endif /* MODSYM_SUPPORT */ 1066 1067 1068 1069 /* 1070 * Global driver initialization, called once when driver is loaded 1071 */ 1072 int 1073 _init(void) 1074 { 1075 int ret; 1076 char buf[64]; 1077 1078 /* 1079 * First init call for this driver, 1080 * so initialize the emlxs_dev_ctl structure. 1081 */ 1082 bzero(&emlxs_device, sizeof (emlxs_device)); 1083 1084 #ifdef MODSYM_SUPPORT 1085 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t)); 1086 #endif /* MODSYM_SUPPORT */ 1087 1088 (void) sprintf(buf, "%s_device mutex", DRIVER_NAME); 1089 mutex_init(&emlxs_device.lock, buf, MUTEX_DRIVER, NULL); 1090 1091 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp); 1092 emlxs_device.drv_timestamp = ddi_get_time(); 1093 1094 for (ret = 0; ret < MAX_FC_BRDS; ret++) { 1095 emlxs_instance[ret] = (uint32_t)-1; 1096 } 1097 1098 /* 1099 * Provide for one ddiinst of the emlxs_dev_ctl structure 1100 * for each possible board in the system. 1101 */ 1102 if ((ret = ddi_soft_state_init(&emlxs_soft_state, 1103 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) { 1104 cmn_err(CE_WARN, 1105 "?%s: _init: ddi_soft_state_init failed. rval=%x", 1106 DRIVER_NAME, ret); 1107 1108 return (ret); 1109 } 1110 1111 #ifdef MODSYM_SUPPORT 1112 /* Open SFS */ 1113 (void) emlxs_fca_modopen(); 1114 #endif /* MODSYM_SUPPORT */ 1115 1116 /* Setup devops for SFS */ 1117 MODSYM(fc_fca_init)(&emlxs_ops); 1118 1119 if ((ret = mod_install(&emlxs_modlinkage)) != 0) { 1120 (void) ddi_soft_state_fini(&emlxs_soft_state); 1121 #ifdef MODSYM_SUPPORT 1122 /* Close SFS */ 1123 emlxs_fca_modclose(); 1124 #endif /* MODSYM_SUPPORT */ 1125 1126 return (ret); 1127 } 1128 1129 #ifdef SAN_DIAG_SUPPORT 1130 (void) sprintf(buf, "%s_sd_bucket mutex", DRIVER_NAME); 1131 mutex_init(&sd_bucket_mutex, buf, MUTEX_DRIVER, NULL); 1132 #endif /* SAN_DIAG_SUPPORT */ 1133 1134 return (ret); 1135 1136 } /* _init() */ 1137 1138 1139 /* 1140 * Called when driver is unloaded. 1141 */ 1142 int 1143 _fini(void) 1144 { 1145 int ret; 1146 1147 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) { 1148 return (ret); 1149 } 1150 #ifdef MODSYM_SUPPORT 1151 /* Close SFS */ 1152 emlxs_fca_modclose(); 1153 #endif /* MODSYM_SUPPORT */ 1154 1155 /* 1156 * Destroy the soft state structure 1157 */ 1158 (void) ddi_soft_state_fini(&emlxs_soft_state); 1159 1160 /* Destroy the global device lock */ 1161 mutex_destroy(&emlxs_device.lock); 1162 1163 #ifdef SAN_DIAG_SUPPORT 1164 mutex_destroy(&sd_bucket_mutex); 1165 #endif /* SAN_DIAG_SUPPORT */ 1166 1167 return (ret); 1168 1169 } /* _fini() */ 1170 1171 1172 1173 int 1174 _info(struct modinfo *modinfop) 1175 { 1176 1177 return (mod_info(&emlxs_modlinkage, modinfop)); 1178 1179 } /* _info() */ 1180 1181 1182 /* 1183 * Attach an ddiinst of an emlx host adapter. 1184 * Allocate data structures, initialize the adapter and we're ready to fly. 1185 */ 1186 static int 1187 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1188 { 1189 emlxs_hba_t *hba; 1190 int ddiinst; 1191 int emlxinst; 1192 int rval; 1193 1194 switch (cmd) { 1195 case DDI_ATTACH: 1196 /* If successful this will set EMLXS_PM_IN_ATTACH */ 1197 rval = emlxs_hba_attach(dip); 1198 break; 1199 1200 case DDI_PM_RESUME: 1201 /* This will resume the driver */ 1202 rval = emlxs_pm_raise_power(dip); 1203 break; 1204 1205 case DDI_RESUME: 1206 /* This will resume the driver */ 1207 rval = emlxs_hba_resume(dip); 1208 break; 1209 1210 default: 1211 rval = DDI_FAILURE; 1212 } 1213 1214 if (rval == DDI_SUCCESS) { 1215 ddiinst = ddi_get_instance(dip); 1216 emlxinst = emlxs_get_instance(ddiinst); 1217 hba = emlxs_device.hba[emlxinst]; 1218 1219 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) { 1220 1221 /* Enable driver dump feature */ 1222 mutex_enter(&EMLXS_PORT_LOCK); 1223 hba->flag |= FC_DUMP_SAFE; 1224 mutex_exit(&EMLXS_PORT_LOCK); 1225 } 1226 } 1227 1228 return (rval); 1229 1230 } /* emlxs_attach() */ 1231 1232 1233 /* 1234 * Detach/prepare driver to unload (see detach(9E)). 1235 */ 1236 static int 1237 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1238 { 1239 emlxs_hba_t *hba; 1240 emlxs_port_t *port; 1241 int ddiinst; 1242 int emlxinst; 1243 int rval; 1244 1245 ddiinst = ddi_get_instance(dip); 1246 emlxinst = emlxs_get_instance(ddiinst); 1247 hba = emlxs_device.hba[emlxinst]; 1248 1249 if (hba == NULL) { 1250 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME); 1251 1252 return (DDI_FAILURE); 1253 } 1254 1255 if (hba == (emlxs_hba_t *)-1) { 1256 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.", 1257 DRIVER_NAME); 1258 1259 return (DDI_FAILURE); 1260 } 1261 1262 port = &PPORT; 1263 rval = DDI_SUCCESS; 1264 1265 /* Check driver dump */ 1266 mutex_enter(&EMLXS_PORT_LOCK); 1267 1268 if (hba->flag & FC_DUMP_ACTIVE) { 1269 mutex_exit(&EMLXS_PORT_LOCK); 1270 1271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1272 "emlxs_detach: Driver busy. Driver dump active."); 1273 1274 return (DDI_FAILURE); 1275 } 1276 1277 #ifdef SFCT_SUPPORT 1278 if (port->tgt_mode && ((port->fct_flags & FCT_STATE_PORT_ONLINE) || 1279 (port->fct_flags & FCT_STATE_NOT_ACKED))) { 1280 mutex_exit(&EMLXS_PORT_LOCK); 1281 1282 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1283 "emlxs_detach: Driver busy. Target mode active."); 1284 1285 return (DDI_FAILURE); 1286 } 1287 #endif /* SFCT_SUPPORT */ 1288 1289 if (port->ini_mode && (port->flag & EMLXS_PORT_BOUND)) { 1290 mutex_exit(&EMLXS_PORT_LOCK); 1291 1292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1293 "emlxs_detach: Driver busy. Initiator mode active."); 1294 1295 return (DDI_FAILURE); 1296 } 1297 1298 hba->flag &= ~FC_DUMP_SAFE; 1299 1300 mutex_exit(&EMLXS_PORT_LOCK); 1301 1302 switch (cmd) { 1303 case DDI_DETACH: 1304 1305 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1306 "DDI_DETACH"); 1307 1308 rval = emlxs_hba_detach(dip); 1309 1310 if (rval != DDI_SUCCESS) { 1311 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1312 "Unable to detach."); 1313 } 1314 break; 1315 1316 1317 case DDI_PM_SUSPEND: 1318 1319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1320 "DDI_PM_SUSPEND"); 1321 1322 /* This will suspend the driver */ 1323 rval = emlxs_pm_lower_power(dip); 1324 1325 if (rval != DDI_SUCCESS) { 1326 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1327 "Unable to lower power."); 1328 } 1329 1330 break; 1331 1332 1333 case DDI_SUSPEND: 1334 1335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1336 "DDI_SUSPEND"); 1337 1338 /* Suspend the driver */ 1339 rval = emlxs_hba_suspend(dip); 1340 1341 if (rval != DDI_SUCCESS) { 1342 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1343 "Unable to suspend driver."); 1344 } 1345 break; 1346 1347 1348 default: 1349 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x", 1350 DRIVER_NAME, cmd); 1351 rval = DDI_FAILURE; 1352 } 1353 1354 if (rval == DDI_FAILURE) { 1355 /* Re-Enable driver dump feature */ 1356 mutex_enter(&EMLXS_PORT_LOCK); 1357 hba->flag |= FC_DUMP_SAFE; 1358 mutex_exit(&EMLXS_PORT_LOCK); 1359 } 1360 1361 return (rval); 1362 1363 } /* emlxs_detach() */ 1364 1365 1366 /* EMLXS_PORT_LOCK must be held when calling this */ 1367 extern void 1368 emlxs_port_init(emlxs_port_t *port) 1369 { 1370 emlxs_hba_t *hba = HBA; 1371 1372 /* Initialize the base node */ 1373 bzero((caddr_t)&port->node_base, sizeof (NODELIST)); 1374 port->node_base.nlp_Rpi = 0; 1375 port->node_base.nlp_DID = 0xffffff; 1376 port->node_base.nlp_list_next = NULL; 1377 port->node_base.nlp_list_prev = NULL; 1378 port->node_base.nlp_active = 1; 1379 port->node_base.nlp_base = 1; 1380 port->node_count = 0; 1381 1382 if (!(port->flag & EMLXS_PORT_ENABLE)) { 1383 uint8_t dummy_wwn[8] = 1384 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1385 1386 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn, 1387 sizeof (NAME_TYPE)); 1388 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn, 1389 sizeof (NAME_TYPE)); 1390 } 1391 1392 if (!(port->flag & EMLXS_PORT_CONFIG)) { 1393 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 256); 1394 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 256); 1395 } 1396 1397 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam, 1398 sizeof (SERV_PARM)); 1399 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName, 1400 sizeof (NAME_TYPE)); 1401 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName, 1402 sizeof (NAME_TYPE)); 1403 1404 return; 1405 1406 } /* emlxs_port_init() */ 1407 1408 1409 void 1410 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba) 1411 { 1412 #define NXT_PTR_OFF PCI_BYTE 1413 #define PCIE_DEVCTL_OFF 0x8 1414 #define PCIE_CAP_ID 0x10 1415 1416 uint8_t cap_ptr; 1417 uint8_t cap_id; 1418 uint16_t tmp16; 1419 1420 cap_ptr = ddi_get8(hba->pci_acc_handle, 1421 (uint8_t *)(hba->pci_addr + PCI_CAP_POINTER)); 1422 1423 while (cap_ptr) { 1424 cap_id = ddi_get8(hba->pci_acc_handle, 1425 (uint8_t *)(hba->pci_addr + cap_ptr)); 1426 1427 if (cap_id == PCIE_CAP_ID) { 1428 break; 1429 } 1430 cap_ptr = ddi_get8(hba->pci_acc_handle, 1431 (uint8_t *)(hba->pci_addr + cap_ptr + NXT_PTR_OFF)); 1432 } 1433 1434 /* PCI Express Capability Register Set */ 1435 /* Turn off the Correctable Error Reporting */ 1436 /* (the Device Control Register, bit 0). */ 1437 1438 if (cap_id == PCIE_CAP_ID) { 1439 tmp16 = ddi_get16(hba->pci_acc_handle, 1440 (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF)); 1441 tmp16 &= ~1; 1442 (void) ddi_put16(hba->pci_acc_handle, 1443 (uint16_t *)(hba->pci_addr + cap_ptr + PCIE_DEVCTL_OFF), 1444 tmp16); 1445 } 1446 } 1447 1448 /* 1449 * emlxs_bind_port 1450 * 1451 * Arguments: 1452 * 1453 * dip: the dev_info pointer for the ddiinst 1454 * port_info: pointer to info handed back to the transport 1455 * bind_info: pointer to info from the transport 1456 * 1457 * Return values: a port handle for this port, NULL for failure 1458 * 1459 */ 1460 static opaque_t 1461 emlxs_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info, 1462 fc_fca_bind_info_t *bind_info) 1463 { 1464 emlxs_hba_t *hba; 1465 emlxs_port_t *port; 1466 emlxs_port_t *vport; 1467 int ddiinst; 1468 emlxs_vpd_t *vpd; 1469 emlxs_config_t *cfg; 1470 char *dptr; 1471 char buffer[16]; 1472 uint32_t length; 1473 uint32_t len; 1474 char topology[32]; 1475 char linkspeed[32]; 1476 1477 ddiinst = ddi_get_instance(dip); 1478 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 1479 port = &PPORT; 1480 1481 ddiinst = hba->ddiinst; 1482 vpd = &VPD; 1483 cfg = &CFG; 1484 1485 mutex_enter(&EMLXS_PORT_LOCK); 1486 1487 if (bind_info->port_num > 0) { 1488 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1489 if (!(hba->flag & FC_NPIV_ENABLED) || 1490 !(bind_info->port_npiv) || 1491 (bind_info->port_num > hba->vpi_max)) 1492 #elif (EMLXS_MODREV >= EMLXS_MODREV3) 1493 if (!(hba->flag & FC_NPIV_ENABLED) || 1494 (bind_info->port_num > hba->vpi_high)) 1495 #endif 1496 { 1497 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1498 "emlxs_port_bind: Port %d not supported.", 1499 bind_info->port_num); 1500 1501 mutex_exit(&EMLXS_PORT_LOCK); 1502 1503 port_info->pi_error = FC_OUTOFBOUNDS; 1504 return (NULL); 1505 } 1506 } 1507 1508 /* Get true port pointer */ 1509 port = &VPORT(bind_info->port_num); 1510 1511 if (port->tgt_mode) { 1512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1513 "emlxs_port_bind: Port %d is in target mode.", 1514 bind_info->port_num); 1515 1516 mutex_exit(&EMLXS_PORT_LOCK); 1517 1518 port_info->pi_error = FC_OUTOFBOUNDS; 1519 return (NULL); 1520 } 1521 1522 if (!port->ini_mode) { 1523 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1524 "emlxs_port_bind: Port %d is not in initiator mode.", 1525 bind_info->port_num); 1526 1527 mutex_exit(&EMLXS_PORT_LOCK); 1528 1529 port_info->pi_error = FC_OUTOFBOUNDS; 1530 return (NULL); 1531 } 1532 1533 /* Make sure the port is not already bound to the transport */ 1534 if (port->flag & EMLXS_PORT_BOUND) { 1535 1536 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1537 "emlxs_port_bind: Port %d already bound. flag=%x", 1538 bind_info->port_num, port->flag); 1539 1540 mutex_exit(&EMLXS_PORT_LOCK); 1541 1542 port_info->pi_error = FC_ALREADY; 1543 return (NULL); 1544 } 1545 1546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1547 "fca_bind_port: Port %d: port_info=%p bind_info=%p", 1548 bind_info->port_num, port_info, bind_info); 1549 1550 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1551 if (bind_info->port_npiv) { 1552 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn, 1553 sizeof (NAME_TYPE)); 1554 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn, 1555 sizeof (NAME_TYPE)); 1556 if (port->snn[0] == 0) { 1557 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 1558 256); 1559 } 1560 1561 if (port->spn[0] == 0) { 1562 (void) sprintf((caddr_t)port->spn, "%s VPort-%d", 1563 (caddr_t)hba->spn, port->vpi); 1564 } 1565 port->flag |= (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 1566 } 1567 #endif /* >= EMLXS_MODREV5 */ 1568 1569 /* 1570 * Restricted login should apply both physical and 1571 * virtual ports. 1572 */ 1573 if (cfg[CFG_VPORT_RESTRICTED].current) { 1574 port->flag |= EMLXS_PORT_RESTRICTED; 1575 } 1576 1577 /* Perform generic port initialization */ 1578 emlxs_port_init(port); 1579 1580 /* Perform SFS specific initialization */ 1581 port->ulp_handle = bind_info->port_handle; 1582 port->ulp_statec_cb = bind_info->port_statec_cb; 1583 port->ulp_unsol_cb = bind_info->port_unsol_cb; 1584 port->ub_count = EMLXS_UB_TOKEN_OFFSET; 1585 port->ub_pool = NULL; 1586 1587 /* Update the port info structure */ 1588 1589 /* Set the topology and state */ 1590 if ((hba->state < FC_LINK_UP) || 1591 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLE) || 1592 !(hba->flag & FC_NPIV_SUPPORTED)))) { 1593 port_info->pi_port_state = FC_STATE_OFFLINE; 1594 port_info->pi_topology = FC_TOP_UNKNOWN; 1595 } 1596 #ifdef MENLO_SUPPORT 1597 else if (hba->flag & FC_MENLO_MODE) { 1598 port_info->pi_port_state = FC_STATE_OFFLINE; 1599 port_info->pi_topology = FC_TOP_UNKNOWN; 1600 } 1601 #endif /* MENLO_SUPPORT */ 1602 else { 1603 /* Check for loop topology */ 1604 if (hba->topology == TOPOLOGY_LOOP) { 1605 port_info->pi_port_state = FC_STATE_LOOP; 1606 (void) strcpy(topology, ", loop"); 1607 1608 if (hba->flag & FC_FABRIC_ATTACHED) { 1609 port_info->pi_topology = FC_TOP_PUBLIC_LOOP; 1610 } else { 1611 port_info->pi_topology = FC_TOP_PRIVATE_LOOP; 1612 } 1613 } else { 1614 port_info->pi_topology = FC_TOP_FABRIC; 1615 port_info->pi_port_state = FC_STATE_ONLINE; 1616 (void) strcpy(topology, ", fabric"); 1617 } 1618 1619 /* Set the link speed */ 1620 switch (hba->linkspeed) { 1621 case 0: 1622 (void) strcpy(linkspeed, "Gb"); 1623 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1624 break; 1625 1626 case LA_1GHZ_LINK: 1627 (void) strcpy(linkspeed, "1Gb"); 1628 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1629 break; 1630 case LA_2GHZ_LINK: 1631 (void) strcpy(linkspeed, "2Gb"); 1632 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED; 1633 break; 1634 case LA_4GHZ_LINK: 1635 (void) strcpy(linkspeed, "4Gb"); 1636 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED; 1637 break; 1638 case LA_8GHZ_LINK: 1639 (void) strcpy(linkspeed, "8Gb"); 1640 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED; 1641 break; 1642 case LA_10GHZ_LINK: 1643 (void) strcpy(linkspeed, "10Gb"); 1644 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED; 1645 break; 1646 default: 1647 (void) sprintf(linkspeed, "unknown(0x%x)", 1648 hba->linkspeed); 1649 break; 1650 } 1651 1652 /* Adjusting port context for link up messages */ 1653 vport = port; 1654 port = &PPORT; 1655 if (vport->vpi == 0) { 1656 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "%s%s", 1657 linkspeed, topology); 1658 } else if (!(hba->flag & FC_NPIV_LINKUP)) { 1659 hba->flag |= FC_NPIV_LINKUP; 1660 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_npiv_link_up_msg, 1661 "%s%s", linkspeed, topology); 1662 } 1663 port = vport; 1664 1665 } 1666 1667 /* PCIE Correctable Error Reporting workaround */ 1668 if ((hba->model_info.chip == EMLXS_BE_CHIP) && 1669 (bind_info->port_num == 0)) { 1670 emlxs_disable_pcie_ce_err(hba); 1671 } 1672 1673 /* Save initial state */ 1674 port->ulp_statec = port_info->pi_port_state; 1675 1676 /* 1677 * The transport needs a copy of the common service parameters 1678 * for this port. The transport can get any updates through 1679 * the getcap entry point. 1680 */ 1681 bcopy((void *) &port->sparam, 1682 (void *) &port_info->pi_login_params.common_service, 1683 sizeof (SERV_PARM)); 1684 1685 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 1686 /* Swap the service parameters for ULP */ 1687 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params. 1688 common_service); 1689 #endif /* EMLXS_MODREV2X */ 1690 1691 port_info->pi_login_params.common_service.btob_credit = 0xffff; 1692 1693 bcopy((void *) &port->wwnn, 1694 (void *) &port_info->pi_login_params.node_ww_name, 1695 sizeof (NAME_TYPE)); 1696 1697 bcopy((void *) &port->wwpn, 1698 (void *) &port_info->pi_login_params.nport_ww_name, 1699 sizeof (NAME_TYPE)); 1700 1701 /* 1702 * We need to turn off CLASS2 support. 1703 * Otherwise, FC transport will use CLASS2 as default class 1704 * and never try with CLASS3. 1705 */ 1706 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1707 #if (EMLXS_MODREVX >= EMLXS_MODREV3X) 1708 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) { 1709 port_info->pi_login_params.class_1.class_opt &= ~0x0080; 1710 } 1711 1712 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) { 1713 port_info->pi_login_params.class_2.class_opt &= ~0x0080; 1714 } 1715 #else /* EMLXS_SPARC or EMLXS_MODREV2X */ 1716 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) { 1717 port_info->pi_login_params.class_1.class_opt &= ~0x8000; 1718 } 1719 1720 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) { 1721 port_info->pi_login_params.class_2.class_opt &= ~0x8000; 1722 } 1723 #endif /* >= EMLXS_MODREV3X */ 1724 #endif /* >= EMLXS_MODREV3 */ 1725 1726 1727 #if (EMLXS_MODREV <= EMLXS_MODREV2) 1728 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) { 1729 port_info->pi_login_params.class_1.data[0] &= ~0x80; 1730 } 1731 1732 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) { 1733 port_info->pi_login_params.class_2.data[0] &= ~0x80; 1734 } 1735 #endif /* <= EMLXS_MODREV2 */ 1736 1737 /* Additional parameters */ 1738 port_info->pi_s_id.port_id = port->did; 1739 port_info->pi_s_id.priv_lilp_posit = 0; 1740 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current; 1741 1742 /* Initialize the RNID parameters */ 1743 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params)); 1744 1745 (void) sprintf((char *)port_info->pi_rnid_params.params.global_id, 1746 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType, 1747 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 1748 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 1749 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 1750 1751 port_info->pi_rnid_params.params.unit_type = RNID_HBA; 1752 port_info->pi_rnid_params.params.port_id = port->did; 1753 port_info->pi_rnid_params.params.ip_version = RNID_IPV4; 1754 1755 /* Initialize the port attributes */ 1756 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs)); 1757 1758 (void) strcpy(port_info->pi_attrs.manufacturer, "Emulex"); 1759 1760 port_info->pi_rnid_params.status = FC_SUCCESS; 1761 1762 (void) strcpy(port_info->pi_attrs.serial_number, vpd->serial_num); 1763 1764 (void) sprintf(port_info->pi_attrs.firmware_version, "%s (%s)", 1765 vpd->fw_version, vpd->fw_label); 1766 1767 #ifdef EMLXS_I386 1768 (void) sprintf(port_info->pi_attrs.option_rom_version, 1769 "Boot:%s", vpd->boot_version); 1770 #else /* EMLXS_SPARC */ 1771 (void) sprintf(port_info->pi_attrs.option_rom_version, 1772 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version); 1773 #endif /* EMLXS_I386 */ 1774 1775 1776 (void) sprintf(port_info->pi_attrs.driver_version, "%s (%s)", 1777 emlxs_version, emlxs_revision); 1778 1779 (void) strcpy(port_info->pi_attrs.driver_name, DRIVER_NAME); 1780 1781 port_info->pi_attrs.vendor_specific_id = 1782 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX); 1783 1784 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3); 1785 1786 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE; 1787 1788 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1789 1790 port_info->pi_rnid_params.params.num_attached = 0; 1791 1792 /* 1793 * Copy the serial number string (right most 16 chars) into the right 1794 * justified local buffer 1795 */ 1796 bzero(buffer, sizeof (buffer)); 1797 length = strlen(vpd->serial_num); 1798 len = (length > 16) ? 16 : length; 1799 bcopy(&vpd->serial_num[(length - len)], 1800 &buffer[(sizeof (buffer) - len)], len); 1801 1802 port_info->pi_attrs.hba_fru_details.port_index = vpd->port_index; 1803 1804 #endif /* >= EMLXS_MODREV5 */ 1805 1806 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLX_MODREV == EMLXS_MODREV4)) 1807 1808 port_info->pi_rnid_params.params.num_attached = 0; 1809 1810 if (hba->flag & FC_NPIV_ENABLED) { 1811 uint8_t byte; 1812 uint8_t *wwpn; 1813 uint32_t i; 1814 uint32_t j; 1815 1816 /* Copy the WWPN as a string into the local buffer */ 1817 wwpn = (uint8_t *)&hba->wwpn; 1818 for (i = 0; i < 16; i++) { 1819 byte = *wwpn++; 1820 j = ((byte & 0xf0) >> 4); 1821 if (j <= 9) { 1822 buffer[i] = 1823 (char)((uint8_t)'0' + (uint8_t)j); 1824 } else { 1825 buffer[i] = 1826 (char)((uint8_t)'A' + (uint8_t)(j - 1827 10)); 1828 } 1829 1830 i++; 1831 j = (byte & 0xf); 1832 if (j <= 9) { 1833 buffer[i] = 1834 (char)((uint8_t)'0' + (uint8_t)j); 1835 } else { 1836 buffer[i] = 1837 (char)((uint8_t)'A' + (uint8_t)(j - 1838 10)); 1839 } 1840 } 1841 1842 port_info->pi_attrs.hba_fru_details.port_index = port->vpi; 1843 } else { 1844 /* Copy the serial number string (right most 16 chars) */ 1845 /* into the right justified local buffer */ 1846 bzero(buffer, sizeof (buffer)); 1847 length = strlen(vpd->serial_num); 1848 len = (length > 16) ? 16 : length; 1849 bcopy(&vpd->serial_num[(length - len)], 1850 &buffer[(sizeof (buffer) - len)], len); 1851 1852 port_info->pi_attrs.hba_fru_details.port_index = 1853 vpd->port_index; 1854 } 1855 1856 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */ 1857 1858 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1859 1860 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high; 1861 dptr[0] = buffer[0]; 1862 dptr[1] = buffer[1]; 1863 dptr[2] = buffer[2]; 1864 dptr[3] = buffer[3]; 1865 dptr[4] = buffer[4]; 1866 dptr[5] = buffer[5]; 1867 dptr[6] = buffer[6]; 1868 dptr[7] = buffer[7]; 1869 port_info->pi_attrs.hba_fru_details.high = 1870 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high); 1871 1872 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low; 1873 dptr[0] = buffer[8]; 1874 dptr[1] = buffer[9]; 1875 dptr[2] = buffer[10]; 1876 dptr[3] = buffer[11]; 1877 dptr[4] = buffer[12]; 1878 dptr[5] = buffer[13]; 1879 dptr[6] = buffer[14]; 1880 dptr[7] = buffer[15]; 1881 port_info->pi_attrs.hba_fru_details.low = 1882 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low); 1883 1884 #endif /* >= EMLXS_MODREV3 */ 1885 1886 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1887 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name, 1888 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN); 1889 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name, 1890 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN); 1891 #endif /* >= EMLXS_MODREV4 */ 1892 1893 (void) sprintf(port_info->pi_attrs.hardware_version, "%x", vpd->biuRev); 1894 1895 /* Set the hba speed limit */ 1896 if (vpd->link_speed & LMT_10GB_CAPABLE) { 1897 port_info->pi_attrs.supported_speed |= 1898 FC_HBA_PORTSPEED_10GBIT; 1899 } 1900 if (vpd->link_speed & LMT_8GB_CAPABLE) { 1901 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT; 1902 } 1903 if (vpd->link_speed & LMT_4GB_CAPABLE) { 1904 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT; 1905 } 1906 if (vpd->link_speed & LMT_2GB_CAPABLE) { 1907 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT; 1908 } 1909 if (vpd->link_speed & LMT_1GB_CAPABLE) { 1910 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT; 1911 } 1912 1913 /* Set the hba model info */ 1914 (void) strcpy(port_info->pi_attrs.model, hba->model_info.model); 1915 (void) strcpy(port_info->pi_attrs.model_description, 1916 hba->model_info.model_desc); 1917 1918 1919 /* Log information */ 1920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1921 "Bind info: port_num = %d", bind_info->port_num); 1922 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1923 "Bind info: port_handle = %p", bind_info->port_handle); 1924 1925 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1926 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1927 "Bind info: port_npiv = %d", bind_info->port_npiv); 1928 #endif /* >= EMLXS_MODREV5 */ 1929 1930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1931 "Port info: pi_topology = %x", port_info->pi_topology); 1932 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1933 "Port info: pi_error = %x", port_info->pi_error); 1934 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1935 "Port info: pi_port_state = %x", port_info->pi_port_state); 1936 1937 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1938 "Port info: port_id = %x", port_info->pi_s_id.port_id); 1939 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1940 "Port info: priv_lilp_posit = %x", 1941 port_info->pi_s_id.priv_lilp_posit); 1942 1943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1944 "Port info: hard_addr = %x", 1945 port_info->pi_hard_addr.hard_addr); 1946 1947 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1948 "Port info: rnid.status = %x", 1949 port_info->pi_rnid_params.status); 1950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1951 "Port info: rnid.global_id = %16s", 1952 port_info->pi_rnid_params.params.global_id); 1953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1954 "Port info: rnid.unit_type = %x", 1955 port_info->pi_rnid_params.params.unit_type); 1956 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1957 "Port info: rnid.port_id = %x", 1958 port_info->pi_rnid_params.params.port_id); 1959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1960 "Port info: rnid.num_attached = %x", 1961 port_info->pi_rnid_params.params.num_attached); 1962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1963 "Port info: rnid.ip_version = %x", 1964 port_info->pi_rnid_params.params.ip_version); 1965 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1966 "Port info: rnid.udp_port = %x", 1967 port_info->pi_rnid_params.params.udp_port); 1968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1969 "Port info: rnid.ip_addr = %16s", 1970 port_info->pi_rnid_params.params.ip_addr); 1971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1972 "Port info: rnid.spec_id_resv = %x", 1973 port_info->pi_rnid_params.params.specific_id_resv); 1974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1975 "Port info: rnid.topo_flags = %x", 1976 port_info->pi_rnid_params.params.topo_flags); 1977 1978 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1979 "Port info: manufacturer = %s", 1980 port_info->pi_attrs.manufacturer); 1981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1982 "Port info: serial_num = %s", 1983 port_info->pi_attrs.serial_number); 1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1985 "Port info: model = %s", port_info->pi_attrs.model); 1986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1987 "Port info: model_description = %s", 1988 port_info->pi_attrs.model_description); 1989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1990 "Port info: hardware_version = %s", 1991 port_info->pi_attrs.hardware_version); 1992 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1993 "Port info: driver_version = %s", 1994 port_info->pi_attrs.driver_version); 1995 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1996 "Port info: option_rom_version = %s", 1997 port_info->pi_attrs.option_rom_version); 1998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1999 "Port info: firmware_version = %s", 2000 port_info->pi_attrs.firmware_version); 2001 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2002 "Port info: driver_name = %s", 2003 port_info->pi_attrs.driver_name); 2004 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2005 "Port info: vendor_specific_id = %x", 2006 port_info->pi_attrs.vendor_specific_id); 2007 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2008 "Port info: supported_cos = %x", 2009 port_info->pi_attrs.supported_cos); 2010 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2011 "Port info: supported_speed = %x", 2012 port_info->pi_attrs.supported_speed); 2013 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2014 "Port info: max_frame_size = %x", 2015 port_info->pi_attrs.max_frame_size); 2016 2017 #if (EMLXS_MODREV >= EMLXS_MODREV3) 2018 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2019 "Port info: fru_port_index = %x", 2020 port_info->pi_attrs.hba_fru_details.port_index); 2021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2022 "Port info: fru_high = %llx", 2023 port_info->pi_attrs.hba_fru_details.high); 2024 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2025 "Port info: fru_low = %llx", 2026 port_info->pi_attrs.hba_fru_details.low); 2027 #endif /* >= EMLXS_MODREV3 */ 2028 2029 #if (EMLXS_MODREV >= EMLXS_MODREV4) 2030 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2031 "Port info: sym_node_name = %s", 2032 port_info->pi_attrs.sym_node_name); 2033 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2034 "Port info: sym_port_name = %s", 2035 port_info->pi_attrs.sym_port_name); 2036 #endif /* >= EMLXS_MODREV4 */ 2037 2038 /* Set the bound flag */ 2039 port->flag |= EMLXS_PORT_BOUND; 2040 hba->num_of_ports++; 2041 2042 mutex_exit(&EMLXS_PORT_LOCK); 2043 2044 return ((opaque_t)port); 2045 2046 } /* emlxs_bind_port() */ 2047 2048 2049 static void 2050 emlxs_unbind_port(opaque_t fca_port_handle) 2051 { 2052 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2053 emlxs_hba_t *hba = HBA; 2054 2055 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2056 "fca_unbind_port: port=%p", port); 2057 2058 /* Destroy & flush all port nodes, if they exist */ 2059 if (port->node_count) { 2060 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 2061 (void) emlxs_sli4_unreg_all_rpi_by_port(port); 2062 } else { 2063 (void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0); 2064 } 2065 } 2066 2067 #if (EMLXS_MODREV >= EMLXS_MODREV5) 2068 if ((hba->flag & FC_NPIV_ENABLED) && 2069 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE))) { 2070 (void) emlxs_mb_unreg_vpi(port); 2071 } 2072 #endif 2073 2074 mutex_enter(&EMLXS_PORT_LOCK); 2075 2076 if (!(port->flag & EMLXS_PORT_BOUND)) { 2077 mutex_exit(&EMLXS_PORT_LOCK); 2078 return; 2079 } 2080 2081 port->flag &= ~EMLXS_PORT_BOUND; 2082 hba->num_of_ports--; 2083 2084 port->ulp_handle = 0; 2085 port->ulp_statec = FC_STATE_OFFLINE; 2086 port->ulp_statec_cb = NULL; 2087 port->ulp_unsol_cb = NULL; 2088 2089 mutex_exit(&EMLXS_PORT_LOCK); 2090 2091 return; 2092 2093 } /* emlxs_unbind_port() */ 2094 2095 2096 /*ARGSUSED*/ 2097 extern int 2098 emlxs_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 2099 { 2100 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2101 emlxs_hba_t *hba = HBA; 2102 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 2103 2104 if (!sbp) { 2105 return (FC_FAILURE); 2106 } 2107 bzero((void *)sbp, sizeof (emlxs_buf_t)); 2108 2109 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, (void *)hba->intr_arg); 2110 sbp->pkt_flags = 2111 PACKET_VALID | PACKET_ULP_OWNED; 2112 sbp->port = port; 2113 sbp->pkt = pkt; 2114 sbp->iocbq.sbp = sbp; 2115 2116 return (FC_SUCCESS); 2117 2118 } /* emlxs_pkt_init() */ 2119 2120 2121 2122 static void 2123 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp) 2124 { 2125 emlxs_hba_t *hba = HBA; 2126 emlxs_config_t *cfg = &CFG; 2127 fc_packet_t *pkt = PRIV2PKT(sbp); 2128 uint32_t *iptr; 2129 2130 mutex_enter(&sbp->mtx); 2131 2132 /* Reinitialize */ 2133 sbp->pkt = pkt; 2134 sbp->port = port; 2135 sbp->bmp = NULL; 2136 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED); 2137 sbp->iotag = 0; 2138 sbp->ticks = 0; 2139 sbp->abort_attempts = 0; 2140 sbp->fpkt = NULL; 2141 sbp->flush_count = 0; 2142 sbp->next = NULL; 2143 2144 if (!port->tgt_mode) { 2145 sbp->node = NULL; 2146 sbp->did = 0; 2147 sbp->lun = 0; 2148 sbp->class = 0; 2149 sbp->class = 0; 2150 sbp->channel = NULL; 2151 } 2152 2153 bzero((void *)&sbp->iocbq, sizeof (IOCBQ)); 2154 sbp->iocbq.sbp = sbp; 2155 2156 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp || 2157 ddi_in_panic()) { 2158 sbp->pkt_flags |= PACKET_POLLED; 2159 } 2160 2161 /* Prepare the fc packet */ 2162 pkt->pkt_state = FC_PKT_SUCCESS; 2163 pkt->pkt_reason = 0; 2164 pkt->pkt_action = 0; 2165 pkt->pkt_expln = 0; 2166 pkt->pkt_data_resid = 0; 2167 pkt->pkt_resp_resid = 0; 2168 2169 /* Make sure all pkt's have a proper timeout */ 2170 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 2171 /* This disables all IOCB on chip timeouts */ 2172 pkt->pkt_timeout = 0x80000000; 2173 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) { 2174 pkt->pkt_timeout = 60; 2175 } 2176 2177 /* Clear the response buffer */ 2178 if (pkt->pkt_rsplen) { 2179 /* Check for FCP commands */ 2180 if ((pkt->pkt_tran_type == FC_PKT_FCP_READ) || 2181 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) { 2182 iptr = (uint32_t *)pkt->pkt_resp; 2183 iptr[2] = 0; 2184 iptr[3] = 0; 2185 } else { 2186 bzero(pkt->pkt_resp, pkt->pkt_rsplen); 2187 } 2188 } 2189 2190 mutex_exit(&sbp->mtx); 2191 2192 return; 2193 2194 } /* emlxs_initialize_pkt() */ 2195 2196 2197 2198 /* 2199 * We may not need this routine 2200 */ 2201 /*ARGSUSED*/ 2202 extern int 2203 emlxs_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt) 2204 { 2205 emlxs_buf_t *sbp = PKT2PRIV(pkt); 2206 2207 if (!sbp) { 2208 return (FC_FAILURE); 2209 } 2210 2211 if (!(sbp->pkt_flags & PACKET_VALID)) { 2212 return (FC_FAILURE); 2213 } 2214 sbp->pkt_flags &= ~PACKET_VALID; 2215 mutex_destroy(&sbp->mtx); 2216 2217 return (FC_SUCCESS); 2218 2219 } /* emlxs_pkt_uninit() */ 2220 2221 2222 static int 2223 emlxs_get_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2224 { 2225 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2226 emlxs_hba_t *hba = HBA; 2227 int32_t rval; 2228 2229 if (!(port->flag & EMLXS_PORT_BOUND)) { 2230 return (FC_CAP_ERROR); 2231 } 2232 2233 if (strcmp(cap, FC_NODE_WWN) == 0) { 2234 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2235 "fca_get_cap: FC_NODE_WWN"); 2236 2237 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE)); 2238 rval = FC_CAP_FOUND; 2239 2240 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2241 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2242 "fca_get_cap: FC_LOGIN_PARAMS"); 2243 2244 /* 2245 * We need to turn off CLASS2 support. 2246 * Otherwise, FC transport will use CLASS2 as default class 2247 * and never try with CLASS3. 2248 */ 2249 hba->sparam.cls2.classValid = 0; 2250 2251 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM)); 2252 2253 rval = FC_CAP_FOUND; 2254 2255 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2256 int32_t *num_bufs; 2257 emlxs_config_t *cfg = &CFG; 2258 2259 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2260 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)", 2261 cfg[CFG_UB_BUFS].current); 2262 2263 num_bufs = (int32_t *)ptr; 2264 2265 /* We multiply by MAX_VPORTS because ULP uses a */ 2266 /* formula to calculate ub bufs from this */ 2267 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS); 2268 2269 rval = FC_CAP_FOUND; 2270 2271 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2272 int32_t *size; 2273 2274 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2275 "fca_get_cap: FC_CAP_PAYLOAD_SIZE"); 2276 2277 size = (int32_t *)ptr; 2278 *size = -1; 2279 rval = FC_CAP_FOUND; 2280 2281 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2282 fc_reset_action_t *action; 2283 2284 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2285 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR"); 2286 2287 action = (fc_reset_action_t *)ptr; 2288 *action = FC_RESET_RETURN_ALL; 2289 rval = FC_CAP_FOUND; 2290 2291 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) { 2292 fc_dma_behavior_t *behavior; 2293 2294 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2295 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF"); 2296 2297 behavior = (fc_dma_behavior_t *)ptr; 2298 *behavior = FC_ALLOW_STREAMING; 2299 rval = FC_CAP_FOUND; 2300 2301 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) { 2302 fc_fcp_dma_t *fcp_dma; 2303 2304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2305 "fca_get_cap: FC_CAP_FCP_DMA"); 2306 2307 fcp_dma = (fc_fcp_dma_t *)ptr; 2308 *fcp_dma = FC_DVMA_SPACE; 2309 rval = FC_CAP_FOUND; 2310 2311 } else { 2312 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2313 "fca_get_cap: Unknown capability. [%s]", cap); 2314 2315 rval = FC_CAP_ERROR; 2316 2317 } 2318 2319 return (rval); 2320 2321 } /* emlxs_get_cap() */ 2322 2323 2324 2325 static int 2326 emlxs_set_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2327 { 2328 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2329 2330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2331 "fca_set_cap: cap=[%s] arg=%p", cap, ptr); 2332 2333 return (FC_CAP_ERROR); 2334 2335 } /* emlxs_set_cap() */ 2336 2337 2338 static opaque_t 2339 emlxs_get_device(opaque_t fca_port_handle, fc_portid_t d_id) 2340 { 2341 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2342 2343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2344 "fca_get_device: did=%x", d_id.port_id); 2345 2346 return (NULL); 2347 2348 } /* emlxs_get_device() */ 2349 2350 2351 static int32_t 2352 emlxs_notify(opaque_t fca_port_handle, uint32_t cmd) 2353 { 2354 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2355 2356 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x", 2357 cmd); 2358 2359 return (FC_SUCCESS); 2360 2361 } /* emlxs_notify */ 2362 2363 2364 2365 static int 2366 emlxs_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf) 2367 { 2368 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2369 emlxs_hba_t *hba = HBA; 2370 uint32_t lilp_length; 2371 2372 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2373 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf, 2374 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2], 2375 port->alpa_map[3], port->alpa_map[4]); 2376 2377 if (!(port->flag & EMLXS_PORT_BOUND)) { 2378 return (FC_NOMAP); 2379 } 2380 2381 if (hba->topology != TOPOLOGY_LOOP) { 2382 return (FC_NOMAP); 2383 } 2384 2385 /* Check if alpa map is available */ 2386 if (port->alpa_map[0] != 0) { 2387 mapbuf->lilp_magic = MAGIC_LILP; 2388 } else { /* No LILP map available */ 2389 2390 /* Set lilp_magic to MAGIC_LISA and this will */ 2391 /* trigger an ALPA scan in ULP */ 2392 mapbuf->lilp_magic = MAGIC_LISA; 2393 } 2394 2395 mapbuf->lilp_myalpa = port->did; 2396 2397 /* The first byte of the alpa_map is the lilp map length */ 2398 /* Add one to include the lilp length byte itself */ 2399 lilp_length = (uint32_t)port->alpa_map[0] + 1; 2400 2401 /* Make sure the max transfer is 128 bytes */ 2402 if (lilp_length > 128) { 2403 lilp_length = 128; 2404 } 2405 2406 /* We start copying from the lilp_length field */ 2407 /* in order to get a word aligned address */ 2408 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length, 2409 lilp_length); 2410 2411 return (FC_SUCCESS); 2412 2413 } /* emlxs_get_map() */ 2414 2415 2416 2417 extern int 2418 emlxs_transport(opaque_t fca_port_handle, fc_packet_t *pkt) 2419 { 2420 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2421 emlxs_hba_t *hba = HBA; 2422 emlxs_buf_t *sbp; 2423 uint32_t rval; 2424 uint32_t pkt_flags; 2425 2426 /* Make sure adapter is online */ 2427 if (!(hba->flag & FC_ONLINE_MODE)) { 2428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 2429 "Adapter offline."); 2430 2431 return (FC_OFFLINE); 2432 } 2433 2434 /* Validate packet */ 2435 sbp = PKT2PRIV(pkt); 2436 2437 /* Make sure ULP was told that the port was online */ 2438 if ((port->ulp_statec == FC_STATE_OFFLINE) && 2439 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2440 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 2441 "Port offline."); 2442 2443 return (FC_OFFLINE); 2444 } 2445 2446 if (sbp->port != port) { 2447 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2448 "Invalid port handle. sbp=%p port=%p flags=%x", sbp, 2449 sbp->port, sbp->pkt_flags); 2450 return (FC_BADPACKET); 2451 } 2452 2453 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) { 2454 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2455 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp, 2456 sbp->port, sbp->pkt_flags); 2457 return (FC_BADPACKET); 2458 } 2459 #ifdef SFCT_SUPPORT 2460 if (port->tgt_mode && !sbp->fct_cmd && 2461 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2462 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2463 "Packet blocked. Target mode."); 2464 return (FC_TRANSPORT_ERROR); 2465 } 2466 #endif /* SFCT_SUPPORT */ 2467 2468 #ifdef IDLE_TIMER 2469 emlxs_pm_busy_component(hba); 2470 #endif /* IDLE_TIMER */ 2471 2472 /* Prepare the packet for transport */ 2473 emlxs_initialize_pkt(port, sbp); 2474 2475 /* Save a copy of the pkt flags. */ 2476 /* We will check the polling flag later */ 2477 pkt_flags = sbp->pkt_flags; 2478 2479 /* Send the packet */ 2480 switch (pkt->pkt_tran_type) { 2481 case FC_PKT_FCP_READ: 2482 case FC_PKT_FCP_WRITE: 2483 rval = emlxs_send_fcp_cmd(port, sbp); 2484 break; 2485 2486 case FC_PKT_IP_WRITE: 2487 case FC_PKT_BROADCAST: 2488 rval = emlxs_send_ip(port, sbp); 2489 break; 2490 2491 case FC_PKT_EXCHANGE: 2492 switch (pkt->pkt_cmd_fhdr.type) { 2493 case FC_TYPE_SCSI_FCP: 2494 rval = emlxs_send_fcp_cmd(port, sbp); 2495 break; 2496 2497 case FC_TYPE_FC_SERVICES: 2498 rval = emlxs_send_ct(port, sbp); 2499 break; 2500 2501 #ifdef MENLO_SUPPORT 2502 case EMLXS_MENLO_TYPE: 2503 rval = emlxs_send_menlo(port, sbp); 2504 break; 2505 #endif /* MENLO_SUPPORT */ 2506 2507 default: 2508 rval = emlxs_send_els(port, sbp); 2509 } 2510 break; 2511 2512 case FC_PKT_OUTBOUND: 2513 switch (pkt->pkt_cmd_fhdr.type) { 2514 #ifdef SFCT_SUPPORT 2515 case FC_TYPE_SCSI_FCP: 2516 rval = emlxs_send_fct_status(port, sbp); 2517 break; 2518 2519 case FC_TYPE_BASIC_LS: 2520 rval = emlxs_send_fct_abort(port, sbp); 2521 break; 2522 #endif /* SFCT_SUPPORT */ 2523 2524 case FC_TYPE_FC_SERVICES: 2525 rval = emlxs_send_ct_rsp(port, sbp); 2526 break; 2527 #ifdef MENLO_SUPPORT 2528 case EMLXS_MENLO_TYPE: 2529 rval = emlxs_send_menlo(port, sbp); 2530 break; 2531 #endif /* MENLO_SUPPORT */ 2532 2533 default: 2534 rval = emlxs_send_els_rsp(port, sbp); 2535 } 2536 break; 2537 2538 default: 2539 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2540 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type); 2541 rval = FC_TRANSPORT_ERROR; 2542 break; 2543 } 2544 2545 /* Check if send was not successful */ 2546 if (rval != FC_SUCCESS) { 2547 /* Return packet to ULP */ 2548 mutex_enter(&sbp->mtx); 2549 sbp->pkt_flags |= PACKET_ULP_OWNED; 2550 mutex_exit(&sbp->mtx); 2551 2552 return (rval); 2553 } 2554 2555 /* Check if this packet should be polled for completion before */ 2556 /* returning. This check must be done with a saved copy of the */ 2557 /* pkt_flags because the packet itself could already be freed from */ 2558 /* memory if it was not polled. */ 2559 if (pkt_flags & PACKET_POLLED) { 2560 emlxs_poll(port, sbp); 2561 } 2562 2563 return (FC_SUCCESS); 2564 2565 } /* emlxs_transport() */ 2566 2567 2568 2569 static void 2570 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp) 2571 { 2572 emlxs_hba_t *hba = HBA; 2573 fc_packet_t *pkt = PRIV2PKT(sbp); 2574 clock_t timeout; 2575 clock_t time; 2576 uint32_t att_bit; 2577 CHANNEL *cp; 2578 int in_panic = 0; 2579 2580 mutex_enter(&EMLXS_PORT_LOCK); 2581 hba->io_poll_count++; 2582 mutex_exit(&EMLXS_PORT_LOCK); 2583 2584 /* Check for panic situation */ 2585 cp = (CHANNEL *)sbp->channel; 2586 2587 if (ddi_in_panic()) { 2588 in_panic = 1; 2589 /* 2590 * In panic situations there will be one thread with 2591 * no interrrupts (hard or soft) and no timers 2592 */ 2593 2594 /* 2595 * We must manually poll everything in this thread 2596 * to keep the driver going. 2597 */ 2598 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 2599 switch (cp->channelno) { 2600 case FC_FCP_RING: 2601 att_bit = HA_R0ATT; 2602 break; 2603 2604 case FC_IP_RING: 2605 att_bit = HA_R1ATT; 2606 break; 2607 2608 case FC_ELS_RING: 2609 att_bit = HA_R2ATT; 2610 break; 2611 2612 case FC_CT_RING: 2613 att_bit = HA_R3ATT; 2614 break; 2615 } 2616 } 2617 2618 /* Keep polling the chip until our IO is completed */ 2619 /* Driver's timer will not function during panics. */ 2620 /* Therefore, timer checks must be performed manually. */ 2621 (void) drv_getparm(LBOLT, &time); 2622 timeout = time + drv_usectohz(1000000); 2623 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2624 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) { 2625 EMLXS_SLI_POLL_INTR(hba, att_bit); 2626 } else { 2627 EMLXS_SLI_POLL_INTR(hba, 0); 2628 } 2629 (void) drv_getparm(LBOLT, &time); 2630 2631 /* Trigger timer checks periodically */ 2632 if (time >= timeout) { 2633 emlxs_timer_checks(hba); 2634 timeout = time + drv_usectohz(1000000); 2635 } 2636 } 2637 } else { 2638 /* Wait for IO completion */ 2639 /* The driver's timer will detect */ 2640 /* any timeout and abort the I/O. */ 2641 mutex_enter(&EMLXS_PKT_LOCK); 2642 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2643 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK); 2644 } 2645 mutex_exit(&EMLXS_PKT_LOCK); 2646 } 2647 2648 /* Check for fcp reset pkt */ 2649 if (sbp->pkt_flags & PACKET_FCP_RESET) { 2650 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2651 /* Flush the IO's on the chipq */ 2652 (void) emlxs_chipq_node_flush(port, 2653 &hba->chan[hba->channel_fcp], 2654 sbp->node, sbp); 2655 } else { 2656 /* Flush the IO's on the chipq for this lun */ 2657 (void) emlxs_chipq_lun_flush(port, 2658 sbp->node, sbp->lun, sbp); 2659 } 2660 2661 if (sbp->flush_count == 0) { 2662 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2663 goto done; 2664 } 2665 2666 /* Set the timeout so the flush has time to complete */ 2667 timeout = emlxs_timeout(hba, 60); 2668 (void) drv_getparm(LBOLT, &time); 2669 while ((time < timeout) && sbp->flush_count > 0) { 2670 delay(drv_usectohz(500000)); 2671 (void) drv_getparm(LBOLT, &time); 2672 } 2673 2674 if (sbp->flush_count == 0) { 2675 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2676 goto done; 2677 } 2678 2679 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2680 "sbp=%p flush_count=%d. Waiting...", sbp, 2681 sbp->flush_count); 2682 2683 /* Let's try this one more time */ 2684 2685 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2686 /* Flush the IO's on the chipq */ 2687 (void) emlxs_chipq_node_flush(port, 2688 &hba->chan[hba->channel_fcp], 2689 sbp->node, sbp); 2690 } else { 2691 /* Flush the IO's on the chipq for this lun */ 2692 (void) emlxs_chipq_lun_flush(port, 2693 sbp->node, sbp->lun, sbp); 2694 } 2695 2696 /* Reset the timeout so the flush has time to complete */ 2697 timeout = emlxs_timeout(hba, 60); 2698 (void) drv_getparm(LBOLT, &time); 2699 while ((time < timeout) && sbp->flush_count > 0) { 2700 delay(drv_usectohz(500000)); 2701 (void) drv_getparm(LBOLT, &time); 2702 } 2703 2704 if (sbp->flush_count == 0) { 2705 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2706 goto done; 2707 } 2708 2709 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2710 "sbp=%p flush_count=%d. Resetting link.", sbp, 2711 sbp->flush_count); 2712 2713 /* Let's first try to reset the link */ 2714 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 2715 2716 if (sbp->flush_count == 0) { 2717 goto done; 2718 } 2719 2720 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2721 "sbp=%p flush_count=%d. Resetting HBA.", sbp, 2722 sbp->flush_count); 2723 2724 /* If that doesn't work, reset the adapter */ 2725 (void) emlxs_reset(port, FC_FCA_RESET); 2726 2727 if (sbp->flush_count != 0) { 2728 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2729 "sbp=%p flush_count=%d. Giving up.", sbp, 2730 sbp->flush_count); 2731 } 2732 2733 } 2734 /* PACKET_FCP_RESET */ 2735 done: 2736 2737 /* Packet has been declared completed and is now ready to be returned */ 2738 2739 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 2740 emlxs_unswap_pkt(sbp); 2741 #endif /* EMLXS_MODREV2X */ 2742 2743 mutex_enter(&sbp->mtx); 2744 sbp->pkt_flags |= PACKET_ULP_OWNED; 2745 mutex_exit(&sbp->mtx); 2746 2747 mutex_enter(&EMLXS_PORT_LOCK); 2748 hba->io_poll_count--; 2749 mutex_exit(&EMLXS_PORT_LOCK); 2750 2751 #ifdef FMA_SUPPORT 2752 if (!in_panic) { 2753 emlxs_check_dma(hba, sbp); 2754 } 2755 #endif 2756 2757 /* Make ULP completion callback if required */ 2758 if (pkt->pkt_comp) { 2759 cp->ulpCmplCmd++; 2760 (*pkt->pkt_comp) (pkt); 2761 } 2762 2763 #ifdef FMA_SUPPORT 2764 if (hba->flag & FC_DMA_CHECK_ERROR) { 2765 emlxs_thread_spawn(hba, emlxs_restart_thread, 2766 NULL, NULL); 2767 } 2768 #endif 2769 2770 return; 2771 2772 } /* emlxs_poll() */ 2773 2774 2775 static int 2776 emlxs_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size, 2777 uint32_t *count, uint32_t type) 2778 { 2779 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2780 emlxs_hba_t *hba = HBA; 2781 2782 char *err = NULL; 2783 emlxs_unsol_buf_t *pool; 2784 emlxs_unsol_buf_t *new_pool; 2785 int32_t i; 2786 int result; 2787 uint32_t free_resv; 2788 uint32_t free; 2789 emlxs_config_t *cfg = &CFG; 2790 fc_unsol_buf_t *ubp; 2791 emlxs_ub_priv_t *ub_priv; 2792 int rc; 2793 2794 if (port->tgt_mode) { 2795 if (tokens && count) { 2796 bzero(tokens, (sizeof (uint64_t) * (*count))); 2797 } 2798 return (FC_SUCCESS); 2799 } 2800 2801 if (!(port->flag & EMLXS_PORT_BOUND)) { 2802 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2803 "ub_alloc failed: Port not bound! size=%x count=%d " 2804 "type=%x", size, *count, type); 2805 2806 return (FC_FAILURE); 2807 } 2808 2809 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2810 "ub_alloc: size=%x count=%d type=%x", size, *count, type); 2811 2812 if (count && (*count > EMLXS_MAX_UBUFS)) { 2813 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2814 "ub_alloc failed: Too many unsolicted buffers requested. " 2815 "count=%x", *count); 2816 2817 return (FC_FAILURE); 2818 2819 } 2820 2821 if (tokens == NULL) { 2822 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2823 "ub_alloc failed: Token array is NULL."); 2824 2825 return (FC_FAILURE); 2826 } 2827 2828 /* Clear the token array */ 2829 bzero(tokens, (sizeof (uint64_t) * (*count))); 2830 2831 free_resv = 0; 2832 free = *count; 2833 switch (type) { 2834 case FC_TYPE_BASIC_LS: 2835 err = "BASIC_LS"; 2836 break; 2837 case FC_TYPE_EXTENDED_LS: 2838 err = "EXTENDED_LS"; 2839 free = *count / 2; /* Hold 50% for normal use */ 2840 free_resv = *count - free; /* Reserve 50% for RSCN use */ 2841 break; 2842 case FC_TYPE_IS8802: 2843 err = "IS8802"; 2844 break; 2845 case FC_TYPE_IS8802_SNAP: 2846 err = "IS8802_SNAP"; 2847 2848 if (cfg[CFG_NETWORK_ON].current == 0) { 2849 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2850 "ub_alloc failed: IP support is disabled."); 2851 2852 return (FC_FAILURE); 2853 } 2854 break; 2855 case FC_TYPE_SCSI_FCP: 2856 err = "SCSI_FCP"; 2857 break; 2858 case FC_TYPE_SCSI_GPP: 2859 err = "SCSI_GPP"; 2860 break; 2861 case FC_TYPE_HIPP_FP: 2862 err = "HIPP_FP"; 2863 break; 2864 case FC_TYPE_IPI3_MASTER: 2865 err = "IPI3_MASTER"; 2866 break; 2867 case FC_TYPE_IPI3_SLAVE: 2868 err = "IPI3_SLAVE"; 2869 break; 2870 case FC_TYPE_IPI3_PEER: 2871 err = "IPI3_PEER"; 2872 break; 2873 case FC_TYPE_FC_SERVICES: 2874 err = "FC_SERVICES"; 2875 break; 2876 } 2877 2878 mutex_enter(&EMLXS_UB_LOCK); 2879 2880 /* 2881 * Walk through the list of the unsolicited buffers 2882 * for this ddiinst of emlx. 2883 */ 2884 2885 pool = port->ub_pool; 2886 2887 /* 2888 * The emlxs_ub_alloc() can be called more than once with different 2889 * size. We will reject the call if there are 2890 * duplicate size with the same FC-4 type. 2891 */ 2892 while (pool) { 2893 if ((pool->pool_type == type) && 2894 (pool->pool_buf_size == size)) { 2895 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2896 "ub_alloc failed: Unsolicited buffer pool for %s " 2897 "of size 0x%x bytes already exists.", err, size); 2898 2899 result = FC_FAILURE; 2900 goto fail; 2901 } 2902 2903 pool = pool->pool_next; 2904 } 2905 2906 mutex_exit(&EMLXS_UB_LOCK); 2907 2908 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t), 2909 KM_SLEEP); 2910 2911 new_pool->pool_next = NULL; 2912 new_pool->pool_type = type; 2913 new_pool->pool_buf_size = size; 2914 new_pool->pool_nentries = *count; 2915 new_pool->pool_available = new_pool->pool_nentries; 2916 new_pool->pool_free = free; 2917 new_pool->pool_free_resv = free_resv; 2918 new_pool->fc_ubufs = 2919 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP); 2920 2921 new_pool->pool_first_token = port->ub_count; 2922 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries; 2923 2924 for (i = 0; i < new_pool->pool_nentries; i++) { 2925 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i]; 2926 ubp->ub_port_handle = port->ulp_handle; 2927 ubp->ub_token = (uint64_t)((unsigned long)ubp); 2928 ubp->ub_bufsize = size; 2929 ubp->ub_class = FC_TRAN_CLASS3; 2930 ubp->ub_port_private = NULL; 2931 ubp->ub_fca_private = 2932 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t), 2933 KM_SLEEP); 2934 2935 /* 2936 * Initialize emlxs_ub_priv_t 2937 */ 2938 ub_priv = ubp->ub_fca_private; 2939 ub_priv->ubp = ubp; 2940 ub_priv->port = port; 2941 ub_priv->flags = EMLXS_UB_FREE; 2942 ub_priv->available = 1; 2943 ub_priv->pool = new_pool; 2944 ub_priv->time = 0; 2945 ub_priv->timeout = 0; 2946 ub_priv->token = port->ub_count; 2947 ub_priv->cmd = 0; 2948 2949 /* Allocate the actual buffer */ 2950 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP); 2951 2952 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 2953 "ub_alloc: buffer=%p token=%x size=%x type=%x ", ubp, 2954 ub_priv->token, ubp->ub_bufsize, type); 2955 2956 tokens[i] = (uint64_t)((unsigned long)ubp); 2957 port->ub_count++; 2958 } 2959 2960 mutex_enter(&EMLXS_UB_LOCK); 2961 2962 /* Add the pool to the top of the pool list */ 2963 new_pool->pool_prev = NULL; 2964 new_pool->pool_next = port->ub_pool; 2965 2966 if (port->ub_pool) { 2967 port->ub_pool->pool_prev = new_pool; 2968 } 2969 port->ub_pool = new_pool; 2970 2971 /* Set the post counts */ 2972 if (type == FC_TYPE_IS8802_SNAP) { 2973 MAILBOXQ *mbox; 2974 2975 port->ub_post[hba->channel_ip] += new_pool->pool_nentries; 2976 2977 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 2978 MEM_MBOX, 1))) { 2979 emlxs_mb_config_farp(hba, mbox); 2980 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 2981 mbox, MBX_NOWAIT, 0); 2982 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 2983 (void) emlxs_mem_put(hba, MEM_MBOX, 2984 (uint8_t *)mbox); 2985 } 2986 } 2987 port->flag |= EMLXS_PORT_IP_UP; 2988 } else if (type == FC_TYPE_EXTENDED_LS) { 2989 port->ub_post[hba->channel_els] += new_pool->pool_nentries; 2990 } else if (type == FC_TYPE_FC_SERVICES) { 2991 port->ub_post[hba->channel_ct] += new_pool->pool_nentries; 2992 } 2993 2994 mutex_exit(&EMLXS_UB_LOCK); 2995 2996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2997 "%d unsolicited buffers allocated for %s of size 0x%x bytes.", 2998 *count, err, size); 2999 3000 return (FC_SUCCESS); 3001 3002 fail: 3003 3004 /* Clean the pool */ 3005 for (i = 0; tokens[i] != NULL; i++) { 3006 /* Get the buffer object */ 3007 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3008 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3009 3010 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3011 "ub_alloc failed: Freed buffer=%p token=%x size=%x " 3012 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type); 3013 3014 /* Free the actual buffer */ 3015 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3016 3017 /* Free the private area of the buffer object */ 3018 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3019 3020 tokens[i] = 0; 3021 port->ub_count--; 3022 } 3023 3024 /* Free the array of buffer objects in the pool */ 3025 kmem_free((caddr_t)new_pool->fc_ubufs, 3026 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries)); 3027 3028 /* Free the pool object */ 3029 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t)); 3030 3031 mutex_exit(&EMLXS_UB_LOCK); 3032 3033 return (result); 3034 3035 } /* emlxs_ub_alloc() */ 3036 3037 3038 static void 3039 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp) 3040 { 3041 emlxs_hba_t *hba = HBA; 3042 emlxs_ub_priv_t *ub_priv; 3043 fc_packet_t *pkt; 3044 ELS_PKT *els; 3045 uint32_t sid; 3046 3047 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3048 3049 if (hba->state <= FC_LINK_DOWN) { 3050 return; 3051 } 3052 3053 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + 3054 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) { 3055 return; 3056 } 3057 3058 sid = LE_SWAP24_LO(ubp->ub_frame.s_id); 3059 3060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg, 3061 "%s dropped: sid=%x. Rejecting.", 3062 emlxs_elscmd_xlate(ub_priv->cmd), sid); 3063 3064 pkt->pkt_tran_type = FC_PKT_OUTBOUND; 3065 pkt->pkt_timeout = (2 * hba->fc_ratov); 3066 3067 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) { 3068 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3; 3069 pkt->pkt_tran_flags |= FC_TRAN_CLASS2; 3070 } 3071 3072 /* Build the fc header */ 3073 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id; 3074 pkt->pkt_cmd_fhdr.r_ctl = 3075 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL; 3076 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did); 3077 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 3078 pkt->pkt_cmd_fhdr.f_ctl = 3079 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ; 3080 pkt->pkt_cmd_fhdr.seq_id = 0; 3081 pkt->pkt_cmd_fhdr.df_ctl = 0; 3082 pkt->pkt_cmd_fhdr.seq_cnt = 0; 3083 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff; 3084 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id; 3085 pkt->pkt_cmd_fhdr.ro = 0; 3086 3087 /* Build the command */ 3088 els = (ELS_PKT *) pkt->pkt_cmd; 3089 els->elsCode = 0x01; 3090 els->un.lsRjt.un.b.lsRjtRsvd0 = 0; 3091 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3092 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3093 els->un.lsRjt.un.b.vendorUnique = 0x02; 3094 3095 /* Send the pkt later in another thread */ 3096 (void) emlxs_pkt_send(pkt, 0); 3097 3098 return; 3099 3100 } /* emlxs_ub_els_reject() */ 3101 3102 extern int 3103 emlxs_ub_release(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3104 { 3105 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3106 emlxs_hba_t *hba = HBA; 3107 fc_unsol_buf_t *ubp; 3108 emlxs_ub_priv_t *ub_priv; 3109 uint32_t i; 3110 uint32_t time; 3111 emlxs_unsol_buf_t *pool; 3112 3113 if (count == 0) { 3114 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3115 "ub_release: Nothing to do. count=%d", count); 3116 3117 return (FC_SUCCESS); 3118 } 3119 3120 if (!(port->flag & EMLXS_PORT_BOUND)) { 3121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3122 "ub_release failed: Port not bound. count=%d token[0]=%p", 3123 count, tokens[0]); 3124 3125 return (FC_UNBOUND); 3126 } 3127 3128 mutex_enter(&EMLXS_UB_LOCK); 3129 3130 if (!port->ub_pool) { 3131 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3132 "ub_release failed: No pools! count=%d token[0]=%p", 3133 count, tokens[0]); 3134 3135 mutex_exit(&EMLXS_UB_LOCK); 3136 return (FC_UB_BADTOKEN); 3137 } 3138 3139 for (i = 0; i < count; i++) { 3140 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3141 3142 if (!ubp) { 3143 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3144 "ub_release failed: count=%d tokens[%d]=0", count, 3145 i); 3146 3147 mutex_exit(&EMLXS_UB_LOCK); 3148 return (FC_UB_BADTOKEN); 3149 } 3150 3151 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3152 3153 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3155 "ub_release failed: Dead buffer found. ubp=%p", 3156 ubp); 3157 3158 mutex_exit(&EMLXS_UB_LOCK); 3159 return (FC_UB_BADTOKEN); 3160 } 3161 3162 if (ub_priv->flags == EMLXS_UB_FREE) { 3163 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3164 "ub_release: Buffer already free! ubp=%p token=%x", 3165 ubp, ub_priv->token); 3166 3167 continue; 3168 } 3169 3170 /* Check for dropped els buffer */ 3171 /* ULP will do this sometimes without sending a reply */ 3172 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) && 3173 !(ub_priv->flags & EMLXS_UB_REPLY)) { 3174 emlxs_ub_els_reject(port, ubp); 3175 } 3176 3177 /* Mark the buffer free */ 3178 ub_priv->flags = EMLXS_UB_FREE; 3179 bzero(ubp->ub_buffer, ubp->ub_bufsize); 3180 3181 time = hba->timer_tics - ub_priv->time; 3182 ub_priv->time = 0; 3183 ub_priv->timeout = 0; 3184 3185 pool = ub_priv->pool; 3186 3187 if (ub_priv->flags & EMLXS_UB_RESV) { 3188 pool->pool_free_resv++; 3189 } else { 3190 pool->pool_free++; 3191 } 3192 3193 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3194 "ub_release: ubp=%p token=%x time=%d av=%d (%d,%d,%d,%d)", 3195 ubp, ub_priv->token, time, ub_priv->available, 3196 pool->pool_nentries, pool->pool_available, 3197 pool->pool_free, pool->pool_free_resv); 3198 3199 /* Check if pool can be destroyed now */ 3200 if ((pool->pool_available == 0) && 3201 (pool->pool_free + pool->pool_free_resv == 3202 pool->pool_nentries)) { 3203 emlxs_ub_destroy(port, pool); 3204 } 3205 } 3206 3207 mutex_exit(&EMLXS_UB_LOCK); 3208 3209 return (FC_SUCCESS); 3210 3211 } /* emlxs_ub_release() */ 3212 3213 3214 static int 3215 emlxs_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3216 { 3217 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3218 emlxs_unsol_buf_t *pool; 3219 fc_unsol_buf_t *ubp; 3220 emlxs_ub_priv_t *ub_priv; 3221 uint32_t i; 3222 3223 if (port->tgt_mode) { 3224 return (FC_SUCCESS); 3225 } 3226 3227 if (count == 0) { 3228 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3229 "ub_free: Nothing to do. count=%d token[0]=%p", count, 3230 tokens[0]); 3231 3232 return (FC_SUCCESS); 3233 } 3234 3235 if (!(port->flag & EMLXS_PORT_BOUND)) { 3236 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3237 "ub_free: Port not bound. count=%d token[0]=%p", count, 3238 tokens[0]); 3239 3240 return (FC_SUCCESS); 3241 } 3242 3243 mutex_enter(&EMLXS_UB_LOCK); 3244 3245 if (!port->ub_pool) { 3246 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3247 "ub_free failed: No pools! count=%d token[0]=%p", count, 3248 tokens[0]); 3249 3250 mutex_exit(&EMLXS_UB_LOCK); 3251 return (FC_UB_BADTOKEN); 3252 } 3253 3254 /* Process buffer list */ 3255 for (i = 0; i < count; i++) { 3256 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3257 3258 if (!ubp) { 3259 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3260 "ub_free failed: count=%d tokens[%d]=0", count, 3261 i); 3262 3263 mutex_exit(&EMLXS_UB_LOCK); 3264 return (FC_UB_BADTOKEN); 3265 } 3266 3267 /* Mark buffer unavailable */ 3268 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3269 3270 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3272 "ub_free failed: Dead buffer found. ubp=%p", ubp); 3273 3274 mutex_exit(&EMLXS_UB_LOCK); 3275 return (FC_UB_BADTOKEN); 3276 } 3277 3278 ub_priv->available = 0; 3279 3280 /* Mark one less buffer available in the parent pool */ 3281 pool = ub_priv->pool; 3282 3283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3284 "ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 3285 ub_priv->token, pool->pool_nentries, 3286 pool->pool_available - 1, pool->pool_free, 3287 pool->pool_free_resv); 3288 3289 if (pool->pool_available) { 3290 pool->pool_available--; 3291 3292 /* Check if pool can be destroyed */ 3293 if ((pool->pool_available == 0) && 3294 (pool->pool_free + pool->pool_free_resv == 3295 pool->pool_nentries)) { 3296 emlxs_ub_destroy(port, pool); 3297 } 3298 } 3299 } 3300 3301 mutex_exit(&EMLXS_UB_LOCK); 3302 3303 return (FC_SUCCESS); 3304 3305 } /* emlxs_ub_free() */ 3306 3307 3308 /* EMLXS_UB_LOCK must be held when calling this routine */ 3309 extern void 3310 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool) 3311 { 3312 emlxs_hba_t *hba = HBA; 3313 emlxs_unsol_buf_t *next; 3314 emlxs_unsol_buf_t *prev; 3315 fc_unsol_buf_t *ubp; 3316 uint32_t i; 3317 3318 /* Remove the pool object from the pool list */ 3319 next = pool->pool_next; 3320 prev = pool->pool_prev; 3321 3322 if (port->ub_pool == pool) { 3323 port->ub_pool = next; 3324 } 3325 3326 if (prev) { 3327 prev->pool_next = next; 3328 } 3329 3330 if (next) { 3331 next->pool_prev = prev; 3332 } 3333 3334 pool->pool_prev = NULL; 3335 pool->pool_next = NULL; 3336 3337 /* Clear the post counts */ 3338 switch (pool->pool_type) { 3339 case FC_TYPE_IS8802_SNAP: 3340 port->ub_post[hba->channel_ip] -= pool->pool_nentries; 3341 break; 3342 3343 case FC_TYPE_EXTENDED_LS: 3344 port->ub_post[hba->channel_els] -= pool->pool_nentries; 3345 break; 3346 3347 case FC_TYPE_FC_SERVICES: 3348 port->ub_post[hba->channel_ct] -= pool->pool_nentries; 3349 break; 3350 } 3351 3352 /* Now free the pool memory */ 3353 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3354 "ub_destroy: pool=%p type=%d size=%d count=%d", pool, 3355 pool->pool_type, pool->pool_buf_size, pool->pool_nentries); 3356 3357 /* Process the array of buffer objects in the pool */ 3358 for (i = 0; i < pool->pool_nentries; i++) { 3359 /* Get the buffer object */ 3360 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 3361 3362 /* Free the memory the buffer object represents */ 3363 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3364 3365 /* Free the private area of the buffer object */ 3366 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3367 } 3368 3369 /* Free the array of buffer objects in the pool */ 3370 kmem_free((caddr_t)pool->fc_ubufs, 3371 (sizeof (fc_unsol_buf_t)*pool->pool_nentries)); 3372 3373 /* Free the pool object */ 3374 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t)); 3375 3376 return; 3377 3378 } /* emlxs_ub_destroy() */ 3379 3380 3381 /*ARGSUSED*/ 3382 extern int 3383 emlxs_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 3384 { 3385 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3386 emlxs_hba_t *hba = HBA; 3387 emlxs_config_t *cfg = &CFG; 3388 3389 emlxs_buf_t *sbp; 3390 NODELIST *nlp; 3391 NODELIST *prev_nlp; 3392 uint8_t channelno; 3393 CHANNEL *cp; 3394 clock_t timeout; 3395 clock_t time; 3396 int32_t pkt_ret; 3397 IOCBQ *iocbq; 3398 IOCBQ *next; 3399 IOCBQ *prev; 3400 uint32_t found; 3401 uint32_t att_bit; 3402 uint32_t pass = 0; 3403 3404 sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 3405 iocbq = &sbp->iocbq; 3406 nlp = (NODELIST *)sbp->node; 3407 cp = (CHANNEL *)sbp->channel; 3408 channelno = (cp) ? cp->channelno : 0; 3409 3410 if (!(port->flag & EMLXS_PORT_BOUND)) { 3411 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3412 "Port not bound."); 3413 return (FC_UNBOUND); 3414 } 3415 3416 if (!(hba->flag & FC_ONLINE_MODE)) { 3417 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3418 "Adapter offline."); 3419 return (FC_OFFLINE); 3420 } 3421 3422 /* ULP requires the aborted pkt to be completed */ 3423 /* back to ULP before returning from this call. */ 3424 /* SUN knows of problems with this call so they suggested that we */ 3425 /* always return a FC_FAILURE for this call, until it is worked out. */ 3426 3427 /* Check if pkt is no good */ 3428 if (!(sbp->pkt_flags & PACKET_VALID) || 3429 (sbp->pkt_flags & PACKET_ULP_OWNED)) { 3430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3431 "Bad sbp. flags=%x", sbp->pkt_flags); 3432 return (FC_FAILURE); 3433 } 3434 3435 /* Tag this now */ 3436 /* This will prevent any thread except ours from completing it */ 3437 mutex_enter(&sbp->mtx); 3438 3439 /* Check again if we still own this */ 3440 if (!(sbp->pkt_flags & PACKET_VALID) || 3441 (sbp->pkt_flags & PACKET_ULP_OWNED)) { 3442 mutex_exit(&sbp->mtx); 3443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3444 "Bad sbp. flags=%x", sbp->pkt_flags); 3445 return (FC_FAILURE); 3446 } 3447 3448 /* Check if pkt is a real polled command */ 3449 if (!(sbp->pkt_flags & PACKET_IN_ABORT) && 3450 (sbp->pkt_flags & PACKET_POLLED)) { 3451 mutex_exit(&sbp->mtx); 3452 3453 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3454 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp, 3455 sbp->pkt_flags); 3456 return (FC_FAILURE); 3457 } 3458 3459 sbp->pkt_flags |= PACKET_POLLED; 3460 sbp->pkt_flags |= PACKET_IN_ABORT; 3461 3462 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | 3463 PACKET_IN_TIMEOUT)) { 3464 mutex_exit(&sbp->mtx); 3465 3466 /* Do nothing, pkt already on its way out */ 3467 goto done; 3468 } 3469 3470 mutex_exit(&sbp->mtx); 3471 3472 begin: 3473 pass++; 3474 3475 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 3476 3477 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3478 /* Find it on the queue */ 3479 found = 0; 3480 if (iocbq->flag & IOCB_PRIORITY) { 3481 /* Search the priority queue */ 3482 prev = NULL; 3483 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first; 3484 3485 while (next) { 3486 if (next == iocbq) { 3487 /* Remove it */ 3488 if (prev) { 3489 prev->next = iocbq->next; 3490 } 3491 3492 if (nlp->nlp_ptx[channelno].q_last == 3493 (void *)iocbq) { 3494 nlp->nlp_ptx[channelno].q_last = 3495 (void *)prev; 3496 } 3497 3498 if (nlp->nlp_ptx[channelno].q_first == 3499 (void *)iocbq) { 3500 nlp->nlp_ptx[channelno]. 3501 q_first = 3502 (void *)iocbq->next; 3503 } 3504 3505 nlp->nlp_ptx[channelno].q_cnt--; 3506 iocbq->next = NULL; 3507 found = 1; 3508 break; 3509 } 3510 3511 prev = next; 3512 next = next->next; 3513 } 3514 } else { 3515 /* Search the normal queue */ 3516 prev = NULL; 3517 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first; 3518 3519 while (next) { 3520 if (next == iocbq) { 3521 /* Remove it */ 3522 if (prev) { 3523 prev->next = iocbq->next; 3524 } 3525 3526 if (nlp->nlp_tx[channelno].q_last == 3527 (void *)iocbq) { 3528 nlp->nlp_tx[channelno].q_last = 3529 (void *)prev; 3530 } 3531 3532 if (nlp->nlp_tx[channelno].q_first == 3533 (void *)iocbq) { 3534 nlp->nlp_tx[channelno].q_first = 3535 (void *)iocbq->next; 3536 } 3537 3538 nlp->nlp_tx[channelno].q_cnt--; 3539 iocbq->next = NULL; 3540 found = 1; 3541 break; 3542 } 3543 3544 prev = next; 3545 next = (IOCBQ *) next->next; 3546 } 3547 } 3548 3549 if (!found) { 3550 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3551 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3552 "I/O not found in driver. sbp=%p flags=%x", sbp, 3553 sbp->pkt_flags); 3554 goto done; 3555 } 3556 3557 /* Check if node still needs servicing */ 3558 if ((nlp->nlp_ptx[channelno].q_first) || 3559 (nlp->nlp_tx[channelno].q_first && 3560 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) { 3561 3562 /* 3563 * If this is the base node, 3564 * then don't shift the pointers 3565 */ 3566 /* We want to drain the base node before moving on */ 3567 if (!nlp->nlp_base) { 3568 /* Just shift channel queue */ 3569 /* pointers to next node */ 3570 cp->nodeq.q_last = (void *) nlp; 3571 cp->nodeq.q_first = nlp->nlp_next[channelno]; 3572 } 3573 } else { 3574 /* Remove node from channel queue */ 3575 3576 /* If this is the only node on list */ 3577 if (cp->nodeq.q_first == (void *)nlp && 3578 cp->nodeq.q_last == (void *)nlp) { 3579 cp->nodeq.q_last = NULL; 3580 cp->nodeq.q_first = NULL; 3581 cp->nodeq.q_cnt = 0; 3582 } else if (cp->nodeq.q_first == (void *)nlp) { 3583 cp->nodeq.q_first = nlp->nlp_next[channelno]; 3584 ((NODELIST *) cp->nodeq.q_last)-> 3585 nlp_next[channelno] = cp->nodeq.q_first; 3586 cp->nodeq.q_cnt--; 3587 } else { 3588 /* 3589 * This is a little more difficult find the 3590 * previous node in the circular channel queue 3591 */ 3592 prev_nlp = nlp; 3593 while (prev_nlp->nlp_next[channelno] != nlp) { 3594 prev_nlp = prev_nlp-> 3595 nlp_next[channelno]; 3596 } 3597 3598 prev_nlp->nlp_next[channelno] = 3599 nlp->nlp_next[channelno]; 3600 3601 if (cp->nodeq.q_last == (void *)nlp) { 3602 cp->nodeq.q_last = (void *)prev_nlp; 3603 } 3604 cp->nodeq.q_cnt--; 3605 3606 } 3607 3608 /* Clear node */ 3609 nlp->nlp_next[channelno] = NULL; 3610 } 3611 3612 /* Free the ULPIOTAG and the bmp */ 3613 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 3614 hba->fc_table[sbp->iotag] = NULL; 3615 emlxs_sli4_free_xri(hba, sbp, sbp->xp); 3616 } else { 3617 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1); 3618 } 3619 3620 3621 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3622 3623 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 3624 IOERR_ABORT_REQUESTED, 1); 3625 3626 goto done; 3627 } 3628 3629 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3630 3631 3632 /* Check the chip queue */ 3633 mutex_enter(&EMLXS_FCTAB_LOCK); 3634 3635 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) && 3636 !(sbp->pkt_flags & PACKET_XRI_CLOSED) && 3637 (sbp == hba->fc_table[sbp->iotag])) { 3638 3639 /* Create the abort IOCB */ 3640 if (hba->state >= FC_LINK_UP) { 3641 iocbq = 3642 emlxs_create_abort_xri_cn(port, sbp->node, 3643 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS); 3644 3645 mutex_enter(&sbp->mtx); 3646 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3647 sbp->ticks = 3648 hba->timer_tics + (4 * hba->fc_ratov) + 10; 3649 sbp->abort_attempts++; 3650 mutex_exit(&sbp->mtx); 3651 } else { 3652 iocbq = 3653 emlxs_create_close_xri_cn(port, sbp->node, 3654 sbp->iotag, cp); 3655 3656 mutex_enter(&sbp->mtx); 3657 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3658 sbp->ticks = hba->timer_tics + 30; 3659 sbp->abort_attempts++; 3660 mutex_exit(&sbp->mtx); 3661 } 3662 3663 mutex_exit(&EMLXS_FCTAB_LOCK); 3664 3665 /* Send this iocbq */ 3666 if (iocbq) { 3667 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 3668 iocbq = NULL; 3669 } 3670 3671 goto done; 3672 } 3673 3674 mutex_exit(&EMLXS_FCTAB_LOCK); 3675 3676 /* Pkt was not on any queues */ 3677 3678 /* Check again if we still own this */ 3679 if (!(sbp->pkt_flags & PACKET_VALID) || 3680 (sbp->pkt_flags & 3681 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION | 3682 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3683 goto done; 3684 } 3685 3686 if (!sleep) { 3687 return (FC_FAILURE); 3688 } 3689 3690 /* Apparently the pkt was not found. Let's delay and try again */ 3691 if (pass < 5) { 3692 delay(drv_usectohz(5000000)); /* 5 seconds */ 3693 3694 /* Check again if we still own this */ 3695 if (!(sbp->pkt_flags & PACKET_VALID) || 3696 (sbp->pkt_flags & 3697 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION | 3698 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3699 goto done; 3700 } 3701 3702 goto begin; 3703 } 3704 3705 force_it: 3706 3707 /* Force the completion now */ 3708 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3709 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag); 3710 3711 /* Now complete it */ 3712 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 3713 1); 3714 3715 done: 3716 3717 /* Now wait for the pkt to complete */ 3718 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3719 /* Set thread timeout */ 3720 timeout = emlxs_timeout(hba, 30); 3721 3722 /* Check for panic situation */ 3723 if (ddi_in_panic()) { 3724 3725 /* 3726 * In panic situations there will be one thread with no 3727 * interrrupts (hard or soft) and no timers 3728 */ 3729 3730 /* 3731 * We must manually poll everything in this thread 3732 * to keep the driver going. 3733 */ 3734 3735 cp = (CHANNEL *)sbp->channel; 3736 switch (cp->channelno) { 3737 case FC_FCP_RING: 3738 att_bit = HA_R0ATT; 3739 break; 3740 3741 case FC_IP_RING: 3742 att_bit = HA_R1ATT; 3743 break; 3744 3745 case FC_ELS_RING: 3746 att_bit = HA_R2ATT; 3747 break; 3748 3749 case FC_CT_RING: 3750 att_bit = HA_R3ATT; 3751 break; 3752 } 3753 3754 /* Keep polling the chip until our IO is completed */ 3755 (void) drv_getparm(LBOLT, &time); 3756 while ((time < timeout) && 3757 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3758 EMLXS_SLI_POLL_INTR(hba, att_bit); 3759 (void) drv_getparm(LBOLT, &time); 3760 } 3761 } else { 3762 /* Wait for IO completion or timeout */ 3763 mutex_enter(&EMLXS_PKT_LOCK); 3764 pkt_ret = 0; 3765 while ((pkt_ret != -1) && 3766 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3767 pkt_ret = 3768 cv_timedwait(&EMLXS_PKT_CV, 3769 &EMLXS_PKT_LOCK, timeout); 3770 } 3771 mutex_exit(&EMLXS_PKT_LOCK); 3772 } 3773 3774 /* Check if timeout occured. This is not good. */ 3775 /* Something happened to our IO. */ 3776 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3777 /* Force the completion now */ 3778 goto force_it; 3779 } 3780 } 3781 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 3782 emlxs_unswap_pkt(sbp); 3783 #endif /* EMLXS_MODREV2X */ 3784 3785 /* Check again if we still own this */ 3786 if ((sbp->pkt_flags & PACKET_VALID) && 3787 !(sbp->pkt_flags & PACKET_ULP_OWNED)) { 3788 mutex_enter(&sbp->mtx); 3789 if ((sbp->pkt_flags & PACKET_VALID) && 3790 !(sbp->pkt_flags & PACKET_ULP_OWNED)) { 3791 sbp->pkt_flags |= PACKET_ULP_OWNED; 3792 } 3793 mutex_exit(&sbp->mtx); 3794 } 3795 3796 #ifdef ULP_PATCH5 3797 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) { 3798 return (FC_FAILURE); 3799 } 3800 #endif /* ULP_PATCH5 */ 3801 3802 return (FC_SUCCESS); 3803 3804 } /* emlxs_pkt_abort() */ 3805 3806 3807 static void 3808 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip) 3809 { 3810 emlxs_port_t *port = &PPORT; 3811 fc_packet_t *pkt; 3812 emlxs_buf_t *sbp; 3813 uint32_t i; 3814 uint32_t flg; 3815 uint32_t rc; 3816 uint32_t txcnt; 3817 uint32_t chipcnt; 3818 3819 txcnt = 0; 3820 chipcnt = 0; 3821 3822 mutex_enter(&EMLXS_FCTAB_LOCK); 3823 for (i = 0; i < hba->max_iotag; i++) { 3824 sbp = hba->fc_table[i]; 3825 if (sbp == NULL || sbp == STALE_PACKET) { 3826 continue; 3827 } 3828 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ); 3829 pkt = PRIV2PKT(sbp); 3830 mutex_exit(&EMLXS_FCTAB_LOCK); 3831 rc = emlxs_pkt_abort(port, pkt, 0); 3832 if (rc == FC_SUCCESS) { 3833 if (flg) { 3834 chipcnt++; 3835 } else { 3836 txcnt++; 3837 } 3838 } 3839 mutex_enter(&EMLXS_FCTAB_LOCK); 3840 } 3841 mutex_exit(&EMLXS_FCTAB_LOCK); 3842 *tx = txcnt; 3843 *chip = chipcnt; 3844 } /* emlxs_abort_all() */ 3845 3846 3847 extern int32_t 3848 emlxs_reset(emlxs_port_t *port, uint32_t cmd) 3849 { 3850 emlxs_hba_t *hba = HBA; 3851 int rval; 3852 int ret; 3853 clock_t timeout; 3854 3855 switch (cmd) { 3856 case FC_FCA_LINK_RESET: 3857 3858 if (!(hba->flag & FC_ONLINE_MODE) || 3859 (hba->state <= FC_LINK_DOWN)) { 3860 return (FC_SUCCESS); 3861 } 3862 3863 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3864 "Resetting Link."); 3865 3866 mutex_enter(&EMLXS_LINKUP_LOCK); 3867 hba->linkup_wait_flag = TRUE; 3868 mutex_exit(&EMLXS_LINKUP_LOCK); 3869 3870 if (emlxs_reset_link(hba, 1, 1)) { 3871 mutex_enter(&EMLXS_LINKUP_LOCK); 3872 hba->linkup_wait_flag = FALSE; 3873 mutex_exit(&EMLXS_LINKUP_LOCK); 3874 3875 return (FC_FAILURE); 3876 } 3877 3878 mutex_enter(&EMLXS_LINKUP_LOCK); 3879 timeout = emlxs_timeout(hba, 60); 3880 ret = 0; 3881 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) { 3882 ret = 3883 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK, 3884 timeout); 3885 } 3886 3887 hba->linkup_wait_flag = FALSE; 3888 mutex_exit(&EMLXS_LINKUP_LOCK); 3889 3890 if (ret == -1) { 3891 return (FC_FAILURE); 3892 } 3893 3894 return (FC_SUCCESS); 3895 3896 case FC_FCA_CORE: 3897 #ifdef DUMP_SUPPORT 3898 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3899 "Dumping Core."); 3900 3901 /* Schedule a USER dump */ 3902 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 3903 3904 /* Wait for dump to complete */ 3905 emlxs_dump_wait(hba); 3906 3907 return (FC_SUCCESS); 3908 #endif /* DUMP_SUPPORT */ 3909 3910 case FC_FCA_RESET: 3911 case FC_FCA_RESET_CORE: 3912 3913 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3914 "Resetting Adapter."); 3915 3916 rval = FC_SUCCESS; 3917 3918 if (emlxs_offline(hba) == 0) { 3919 (void) emlxs_online(hba); 3920 } else { 3921 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3922 "Adapter reset failed. Device busy."); 3923 3924 rval = FC_DEVICE_BUSY; 3925 } 3926 3927 return (rval); 3928 3929 default: 3930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3931 "emlxs_reset: Unknown command. cmd=%x", cmd); 3932 3933 break; 3934 } 3935 3936 return (FC_FAILURE); 3937 3938 } /* emlxs_reset() */ 3939 3940 3941 static int32_t 3942 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd) 3943 { 3944 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3945 emlxs_hba_t *hba = HBA; 3946 int32_t rval; 3947 3948 if (!(port->flag & EMLXS_PORT_BOUND)) { 3949 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3950 "fca_reset: Port not bound."); 3951 3952 return (FC_UNBOUND); 3953 } 3954 3955 switch (cmd) { 3956 case FC_FCA_LINK_RESET: 3957 if (hba->fw_flag & FW_UPDATE_NEEDED) { 3958 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3959 "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET"); 3960 cmd = FC_FCA_RESET; 3961 } else { 3962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3963 "fca_reset: FC_FCA_LINK_RESET"); 3964 } 3965 break; 3966 3967 case FC_FCA_CORE: 3968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3969 "fca_reset: FC_FCA_CORE"); 3970 break; 3971 3972 case FC_FCA_RESET: 3973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3974 "fca_reset: FC_FCA_RESET"); 3975 break; 3976 3977 case FC_FCA_RESET_CORE: 3978 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3979 "fca_reset: FC_FCA_RESET_CORE"); 3980 break; 3981 3982 default: 3983 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3984 "fca_reset: Unknown command. cmd=%x", cmd); 3985 return (FC_FAILURE); 3986 } 3987 3988 if (hba->fw_flag & FW_UPDATE_NEEDED) { 3989 hba->fw_flag |= FW_UPDATE_KERNEL; 3990 } 3991 3992 rval = emlxs_reset(port, cmd); 3993 3994 return (rval); 3995 3996 } /* emlxs_fca_reset() */ 3997 3998 3999 extern int 4000 emlxs_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm) 4001 { 4002 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 4003 emlxs_hba_t *hba = HBA; 4004 int32_t ret; 4005 emlxs_vpd_t *vpd = &VPD; 4006 4007 4008 ret = FC_SUCCESS; 4009 4010 if (!(port->flag & EMLXS_PORT_BOUND)) { 4011 return (FC_UNBOUND); 4012 } 4013 4014 4015 #ifdef IDLE_TIMER 4016 emlxs_pm_busy_component(hba); 4017 #endif /* IDLE_TIMER */ 4018 4019 switch (pm->pm_cmd_code) { 4020 4021 case FC_PORT_GET_FW_REV: 4022 { 4023 char buffer[128]; 4024 4025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4026 "fca_port_manage: FC_PORT_GET_FW_REV"); 4027 4028 (void) sprintf(buffer, "%s %s", hba->model_info.model, 4029 vpd->fw_version); 4030 bzero(pm->pm_data_buf, pm->pm_data_len); 4031 4032 if (pm->pm_data_len < strlen(buffer) + 1) { 4033 ret = FC_NOMEM; 4034 4035 break; 4036 } 4037 4038 (void) strcpy(pm->pm_data_buf, buffer); 4039 break; 4040 } 4041 4042 case FC_PORT_GET_FCODE_REV: 4043 { 4044 char buffer[128]; 4045 4046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4047 "fca_port_manage: FC_PORT_GET_FCODE_REV"); 4048 4049 /* Force update here just to be sure */ 4050 emlxs_get_fcode_version(hba); 4051 4052 (void) sprintf(buffer, "%s %s", hba->model_info.model, 4053 vpd->fcode_version); 4054 bzero(pm->pm_data_buf, pm->pm_data_len); 4055 4056 if (pm->pm_data_len < strlen(buffer) + 1) { 4057 ret = FC_NOMEM; 4058 break; 4059 } 4060 4061 (void) strcpy(pm->pm_data_buf, buffer); 4062 break; 4063 } 4064 4065 case FC_PORT_GET_DUMP_SIZE: 4066 { 4067 #ifdef DUMP_SUPPORT 4068 uint32_t dump_size = 0; 4069 4070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4071 "fca_port_manage: FC_PORT_GET_DUMP_SIZE"); 4072 4073 if (pm->pm_data_len < sizeof (uint32_t)) { 4074 ret = FC_NOMEM; 4075 break; 4076 } 4077 4078 (void) emlxs_get_dump(hba, NULL, &dump_size); 4079 4080 *((uint32_t *)pm->pm_data_buf) = dump_size; 4081 4082 #else 4083 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4084 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported."); 4085 4086 #endif /* DUMP_SUPPORT */ 4087 4088 break; 4089 } 4090 4091 case FC_PORT_GET_DUMP: 4092 { 4093 #ifdef DUMP_SUPPORT 4094 uint32_t dump_size = 0; 4095 4096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4097 "fca_port_manage: FC_PORT_GET_DUMP"); 4098 4099 (void) emlxs_get_dump(hba, NULL, &dump_size); 4100 4101 if (pm->pm_data_len < dump_size) { 4102 ret = FC_NOMEM; 4103 break; 4104 } 4105 4106 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf, 4107 (uint32_t *)&dump_size); 4108 #else 4109 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4110 "fca_port_manage: FC_PORT_GET_DUMP unsupported."); 4111 4112 #endif /* DUMP_SUPPORT */ 4113 4114 break; 4115 } 4116 4117 case FC_PORT_FORCE_DUMP: 4118 { 4119 #ifdef DUMP_SUPPORT 4120 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4121 "fca_port_manage: FC_PORT_FORCE_DUMP"); 4122 4123 /* Schedule a USER dump */ 4124 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 4125 4126 /* Wait for dump to complete */ 4127 emlxs_dump_wait(hba); 4128 #else 4129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4130 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported."); 4131 4132 #endif /* DUMP_SUPPORT */ 4133 break; 4134 } 4135 4136 case FC_PORT_LINK_STATE: 4137 { 4138 uint32_t *link_state; 4139 4140 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4141 "fca_port_manage: FC_PORT_LINK_STATE"); 4142 4143 if (pm->pm_stat_len != sizeof (*link_state)) { 4144 ret = FC_NOMEM; 4145 break; 4146 } 4147 4148 if (pm->pm_cmd_buf != NULL) { 4149 /* 4150 * Can't look beyond the FCA port. 4151 */ 4152 ret = FC_INVALID_REQUEST; 4153 break; 4154 } 4155 4156 link_state = (uint32_t *)pm->pm_stat_buf; 4157 4158 /* Set the state */ 4159 if (hba->state >= FC_LINK_UP) { 4160 /* Check for loop topology */ 4161 if (hba->topology == TOPOLOGY_LOOP) { 4162 *link_state = FC_STATE_LOOP; 4163 } else { 4164 *link_state = FC_STATE_ONLINE; 4165 } 4166 4167 /* Set the link speed */ 4168 switch (hba->linkspeed) { 4169 case LA_2GHZ_LINK: 4170 *link_state |= FC_STATE_2GBIT_SPEED; 4171 break; 4172 case LA_4GHZ_LINK: 4173 *link_state |= FC_STATE_4GBIT_SPEED; 4174 break; 4175 case LA_8GHZ_LINK: 4176 *link_state |= FC_STATE_8GBIT_SPEED; 4177 break; 4178 case LA_10GHZ_LINK: 4179 *link_state |= FC_STATE_10GBIT_SPEED; 4180 break; 4181 case LA_1GHZ_LINK: 4182 default: 4183 *link_state |= FC_STATE_1GBIT_SPEED; 4184 break; 4185 } 4186 } else { 4187 *link_state = FC_STATE_OFFLINE; 4188 } 4189 4190 break; 4191 } 4192 4193 4194 case FC_PORT_ERR_STATS: 4195 case FC_PORT_RLS: 4196 { 4197 MAILBOXQ *mbq; 4198 MAILBOX *mb; 4199 fc_rls_acc_t *bp; 4200 4201 if (!(hba->flag & FC_ONLINE_MODE)) { 4202 return (FC_OFFLINE); 4203 } 4204 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4205 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS"); 4206 4207 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) { 4208 ret = FC_NOMEM; 4209 break; 4210 } 4211 4212 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, 4213 MEM_MBOX, 1)) == 0) { 4214 ret = FC_NOMEM; 4215 break; 4216 } 4217 mb = (MAILBOX *)mbq; 4218 4219 emlxs_mb_read_lnk_stat(hba, mbq); 4220 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) 4221 != MBX_SUCCESS) { 4222 ret = FC_PBUSY; 4223 } else { 4224 bp = (fc_rls_acc_t *)pm->pm_data_buf; 4225 4226 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt; 4227 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt; 4228 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt; 4229 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt; 4230 bp->rls_invalid_word = 4231 mb->un.varRdLnk.invalidXmitWord; 4232 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt; 4233 } 4234 4235 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 4236 break; 4237 } 4238 4239 case FC_PORT_DOWNLOAD_FW: 4240 if (!(hba->flag & FC_ONLINE_MODE)) { 4241 return (FC_OFFLINE); 4242 } 4243 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4244 "fca_port_manage: FC_PORT_DOWNLOAD_FW"); 4245 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4246 pm->pm_data_len, 1); 4247 break; 4248 4249 case FC_PORT_DOWNLOAD_FCODE: 4250 if (!(hba->flag & FC_ONLINE_MODE)) { 4251 return (FC_OFFLINE); 4252 } 4253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4254 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE"); 4255 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4256 pm->pm_data_len, 1); 4257 break; 4258 4259 case FC_PORT_DIAG: 4260 { 4261 uint32_t errno = 0; 4262 uint32_t did = 0; 4263 uint32_t pattern = 0; 4264 4265 switch (pm->pm_cmd_flags) { 4266 case EMLXS_DIAG_BIU: 4267 4268 if (!(hba->flag & FC_ONLINE_MODE)) { 4269 return (FC_OFFLINE); 4270 } 4271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4272 "fca_port_manage: EMLXS_DIAG_BIU"); 4273 4274 if (pm->pm_data_len) { 4275 pattern = *((uint32_t *)pm->pm_data_buf); 4276 } 4277 4278 errno = emlxs_diag_biu_run(hba, pattern); 4279 4280 if (pm->pm_stat_len == sizeof (errno)) { 4281 *(int *)pm->pm_stat_buf = errno; 4282 } 4283 4284 break; 4285 4286 4287 case EMLXS_DIAG_POST: 4288 4289 if (!(hba->flag & FC_ONLINE_MODE)) { 4290 return (FC_OFFLINE); 4291 } 4292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4293 "fca_port_manage: EMLXS_DIAG_POST"); 4294 4295 errno = emlxs_diag_post_run(hba); 4296 4297 if (pm->pm_stat_len == sizeof (errno)) { 4298 *(int *)pm->pm_stat_buf = errno; 4299 } 4300 4301 break; 4302 4303 4304 case EMLXS_DIAG_ECHO: 4305 4306 if (!(hba->flag & FC_ONLINE_MODE)) { 4307 return (FC_OFFLINE); 4308 } 4309 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4310 "fca_port_manage: EMLXS_DIAG_ECHO"); 4311 4312 if (pm->pm_cmd_len != sizeof (uint32_t)) { 4313 ret = FC_INVALID_REQUEST; 4314 break; 4315 } 4316 4317 did = *((uint32_t *)pm->pm_cmd_buf); 4318 4319 if (pm->pm_data_len) { 4320 pattern = *((uint32_t *)pm->pm_data_buf); 4321 } 4322 4323 errno = emlxs_diag_echo_run(port, did, pattern); 4324 4325 if (pm->pm_stat_len == sizeof (errno)) { 4326 *(int *)pm->pm_stat_buf = errno; 4327 } 4328 4329 break; 4330 4331 4332 case EMLXS_PARM_GET_NUM: 4333 { 4334 uint32_t *num; 4335 emlxs_config_t *cfg; 4336 uint32_t i; 4337 uint32_t count; 4338 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4339 "fca_port_manage: EMLXS_PARM_GET_NUM"); 4340 4341 if (pm->pm_stat_len < sizeof (uint32_t)) { 4342 ret = FC_NOMEM; 4343 break; 4344 } 4345 4346 num = (uint32_t *)pm->pm_stat_buf; 4347 count = 0; 4348 cfg = &CFG; 4349 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4350 if (!(cfg->flags & PARM_HIDDEN)) { 4351 count++; 4352 } 4353 4354 } 4355 4356 *num = count; 4357 4358 break; 4359 } 4360 4361 case EMLXS_PARM_GET_LIST: 4362 { 4363 emlxs_parm_t *parm; 4364 emlxs_config_t *cfg; 4365 uint32_t i; 4366 uint32_t max_count; 4367 4368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4369 "fca_port_manage: EMLXS_PARM_GET_LIST"); 4370 4371 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4372 ret = FC_NOMEM; 4373 break; 4374 } 4375 4376 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t); 4377 4378 parm = (emlxs_parm_t *)pm->pm_stat_buf; 4379 cfg = &CFG; 4380 for (i = 0; i < NUM_CFG_PARAM && max_count; i++, 4381 cfg++) { 4382 if (!(cfg->flags & PARM_HIDDEN)) { 4383 (void) strcpy(parm->label, cfg->string); 4384 parm->min = cfg->low; 4385 parm->max = cfg->hi; 4386 parm->def = cfg->def; 4387 parm->current = cfg->current; 4388 parm->flags = cfg->flags; 4389 (void) strcpy(parm->help, cfg->help); 4390 parm++; 4391 max_count--; 4392 } 4393 } 4394 4395 break; 4396 } 4397 4398 case EMLXS_PARM_GET: 4399 { 4400 emlxs_parm_t *parm_in; 4401 emlxs_parm_t *parm_out; 4402 emlxs_config_t *cfg; 4403 uint32_t i; 4404 uint32_t len; 4405 4406 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4407 EMLXS_MSGF(EMLXS_CONTEXT, 4408 &emlxs_sfs_debug_msg, 4409 "fca_port_manage: EMLXS_PARM_GET. " 4410 "inbuf too small."); 4411 4412 ret = FC_BADCMD; 4413 break; 4414 } 4415 4416 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4417 EMLXS_MSGF(EMLXS_CONTEXT, 4418 &emlxs_sfs_debug_msg, 4419 "fca_port_manage: EMLXS_PARM_GET. " 4420 "outbuf too small"); 4421 4422 ret = FC_BADCMD; 4423 break; 4424 } 4425 4426 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4427 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4428 len = strlen(parm_in->label); 4429 cfg = &CFG; 4430 ret = FC_BADOBJECT; 4431 4432 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4433 "fca_port_manage: EMLXS_PARM_GET: %s", 4434 parm_in->label); 4435 4436 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4437 if (len == strlen(cfg->string) && 4438 (strcmp(parm_in->label, 4439 cfg->string) == 0)) { 4440 (void) strcpy(parm_out->label, 4441 cfg->string); 4442 parm_out->min = cfg->low; 4443 parm_out->max = cfg->hi; 4444 parm_out->def = cfg->def; 4445 parm_out->current = cfg->current; 4446 parm_out->flags = cfg->flags; 4447 (void) strcpy(parm_out->help, 4448 cfg->help); 4449 4450 ret = FC_SUCCESS; 4451 break; 4452 } 4453 } 4454 4455 break; 4456 } 4457 4458 case EMLXS_PARM_SET: 4459 { 4460 emlxs_parm_t *parm_in; 4461 emlxs_parm_t *parm_out; 4462 emlxs_config_t *cfg; 4463 uint32_t i; 4464 uint32_t len; 4465 4466 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4467 EMLXS_MSGF(EMLXS_CONTEXT, 4468 &emlxs_sfs_debug_msg, 4469 "fca_port_manage: EMLXS_PARM_GET. " 4470 "inbuf too small."); 4471 4472 ret = FC_BADCMD; 4473 break; 4474 } 4475 4476 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4477 EMLXS_MSGF(EMLXS_CONTEXT, 4478 &emlxs_sfs_debug_msg, 4479 "fca_port_manage: EMLXS_PARM_GET. " 4480 "outbuf too small"); 4481 ret = FC_BADCMD; 4482 break; 4483 } 4484 4485 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4486 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4487 len = strlen(parm_in->label); 4488 cfg = &CFG; 4489 ret = FC_BADOBJECT; 4490 4491 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4492 "fca_port_manage: EMLXS_PARM_SET: %s=0x%x,%d", 4493 parm_in->label, parm_in->current, 4494 parm_in->current); 4495 4496 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4497 /* Find matching parameter string */ 4498 if (len == strlen(cfg->string) && 4499 (strcmp(parm_in->label, 4500 cfg->string) == 0)) { 4501 /* Attempt to update parameter */ 4502 if (emlxs_set_parm(hba, i, 4503 parm_in->current) == FC_SUCCESS) { 4504 (void) strcpy(parm_out->label, 4505 cfg->string); 4506 parm_out->min = cfg->low; 4507 parm_out->max = cfg->hi; 4508 parm_out->def = cfg->def; 4509 parm_out->current = 4510 cfg->current; 4511 parm_out->flags = cfg->flags; 4512 (void) strcpy(parm_out->help, 4513 cfg->help); 4514 4515 ret = FC_SUCCESS; 4516 } 4517 4518 break; 4519 } 4520 } 4521 4522 break; 4523 } 4524 4525 case EMLXS_LOG_GET: 4526 { 4527 emlxs_log_req_t *req; 4528 emlxs_log_resp_t *resp; 4529 uint32_t len; 4530 4531 /* Check command size */ 4532 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) { 4533 ret = FC_BADCMD; 4534 break; 4535 } 4536 4537 /* Get the request */ 4538 req = (emlxs_log_req_t *)pm->pm_cmd_buf; 4539 4540 /* Calculate the response length from the request */ 4541 len = sizeof (emlxs_log_resp_t) + 4542 (req->count * MAX_LOG_MSG_LENGTH); 4543 4544 /* Check the response buffer length */ 4545 if (pm->pm_stat_len < len) { 4546 ret = FC_BADCMD; 4547 break; 4548 } 4549 4550 /* Get the response pointer */ 4551 resp = (emlxs_log_resp_t *)pm->pm_stat_buf; 4552 4553 /* Get the request log enties */ 4554 (void) emlxs_msg_log_get(hba, req, resp); 4555 4556 ret = FC_SUCCESS; 4557 break; 4558 } 4559 4560 case EMLXS_GET_BOOT_REV: 4561 { 4562 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4563 "fca_port_manage: EMLXS_GET_BOOT_REV"); 4564 4565 if (pm->pm_stat_len < strlen(vpd->boot_version)) { 4566 ret = FC_NOMEM; 4567 break; 4568 } 4569 4570 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4571 (void) sprintf(pm->pm_stat_buf, "%s %s", 4572 hba->model_info.model, vpd->boot_version); 4573 4574 break; 4575 } 4576 4577 case EMLXS_DOWNLOAD_BOOT: 4578 if (!(hba->flag & FC_ONLINE_MODE)) { 4579 return (FC_OFFLINE); 4580 } 4581 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4582 "fca_port_manage: EMLXS_DOWNLOAD_BOOT"); 4583 4584 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4585 pm->pm_data_len, 1); 4586 break; 4587 4588 case EMLXS_DOWNLOAD_CFL: 4589 { 4590 uint32_t *buffer; 4591 uint32_t region; 4592 uint32_t length; 4593 4594 if (!(hba->flag & FC_ONLINE_MODE)) { 4595 return (FC_OFFLINE); 4596 } 4597 4598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4599 "fca_port_manage: EMLXS_DOWNLOAD_CFL"); 4600 4601 /* Extract the region number from the first word. */ 4602 buffer = (uint32_t *)pm->pm_data_buf; 4603 region = *buffer++; 4604 4605 /* Adjust the image length for the header word */ 4606 length = pm->pm_data_len - 4; 4607 4608 ret = 4609 emlxs_cfl_download(hba, region, (caddr_t)buffer, 4610 length); 4611 break; 4612 } 4613 4614 case EMLXS_VPD_GET: 4615 { 4616 emlxs_vpd_desc_t *vpd_out; 4617 4618 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4619 "fca_port_manage: EMLXS_VPD_GET"); 4620 4621 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) { 4622 ret = FC_BADCMD; 4623 break; 4624 } 4625 4626 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf; 4627 bzero(vpd_out, sizeof (emlxs_vpd_desc_t)); 4628 4629 (void) strncpy(vpd_out->id, vpd->id, 4630 sizeof (vpd_out->id)); 4631 (void) strncpy(vpd_out->part_num, vpd->part_num, 4632 sizeof (vpd_out->part_num)); 4633 (void) strncpy(vpd_out->eng_change, vpd->eng_change, 4634 sizeof (vpd_out->eng_change)); 4635 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer, 4636 sizeof (vpd_out->manufacturer)); 4637 (void) strncpy(vpd_out->serial_num, vpd->serial_num, 4638 sizeof (vpd_out->serial_num)); 4639 (void) strncpy(vpd_out->model, vpd->model, 4640 sizeof (vpd_out->model)); 4641 (void) strncpy(vpd_out->model_desc, vpd->model_desc, 4642 sizeof (vpd_out->model_desc)); 4643 (void) strncpy(vpd_out->port_num, vpd->port_num, 4644 sizeof (vpd_out->port_num)); 4645 (void) strncpy(vpd_out->prog_types, vpd->prog_types, 4646 sizeof (vpd_out->prog_types)); 4647 4648 ret = FC_SUCCESS; 4649 4650 break; 4651 } 4652 4653 case EMLXS_GET_FCIO_REV: 4654 { 4655 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4656 "fca_port_manage: EMLXS_GET_FCIO_REV"); 4657 4658 if (pm->pm_stat_len < sizeof (uint32_t)) { 4659 ret = FC_NOMEM; 4660 break; 4661 } 4662 4663 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4664 *(uint32_t *)pm->pm_stat_buf = FCIO_REV; 4665 4666 break; 4667 } 4668 4669 case EMLXS_GET_DFC_REV: 4670 { 4671 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4672 "fca_port_manage: EMLXS_GET_DFC_REV"); 4673 4674 if (pm->pm_stat_len < sizeof (uint32_t)) { 4675 ret = FC_NOMEM; 4676 break; 4677 } 4678 4679 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4680 *(uint32_t *)pm->pm_stat_buf = DFC_REV; 4681 4682 break; 4683 } 4684 4685 case EMLXS_SET_BOOT_STATE: 4686 case EMLXS_SET_BOOT_STATE_old: 4687 { 4688 uint32_t state; 4689 4690 if (!(hba->flag & FC_ONLINE_MODE)) { 4691 return (FC_OFFLINE); 4692 } 4693 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4694 EMLXS_MSGF(EMLXS_CONTEXT, 4695 &emlxs_sfs_debug_msg, 4696 "fca_port_manage: EMLXS_SET_BOOT_STATE"); 4697 ret = FC_BADCMD; 4698 break; 4699 } 4700 4701 state = *(uint32_t *)pm->pm_cmd_buf; 4702 4703 if (state == 0) { 4704 EMLXS_MSGF(EMLXS_CONTEXT, 4705 &emlxs_sfs_debug_msg, 4706 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4707 "Disable"); 4708 ret = emlxs_boot_code_disable(hba); 4709 } else { 4710 EMLXS_MSGF(EMLXS_CONTEXT, 4711 &emlxs_sfs_debug_msg, 4712 "fca_port_manage: EMLXS_SET_BOOT_STATE: " 4713 "Enable"); 4714 ret = emlxs_boot_code_enable(hba); 4715 } 4716 4717 break; 4718 } 4719 4720 case EMLXS_GET_BOOT_STATE: 4721 case EMLXS_GET_BOOT_STATE_old: 4722 { 4723 if (!(hba->flag & FC_ONLINE_MODE)) { 4724 return (FC_OFFLINE); 4725 } 4726 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4727 "fca_port_manage: EMLXS_GET_BOOT_STATE"); 4728 4729 if (pm->pm_stat_len < sizeof (uint32_t)) { 4730 ret = FC_NOMEM; 4731 break; 4732 } 4733 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4734 4735 ret = emlxs_boot_code_state(hba); 4736 4737 if (ret == FC_SUCCESS) { 4738 *(uint32_t *)pm->pm_stat_buf = 1; 4739 ret = FC_SUCCESS; 4740 } else if (ret == FC_FAILURE) { 4741 ret = FC_SUCCESS; 4742 } 4743 4744 break; 4745 } 4746 4747 case EMLXS_HW_ERROR_TEST: 4748 { 4749 if (!(hba->flag & FC_ONLINE_MODE)) { 4750 return (FC_OFFLINE); 4751 } 4752 4753 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4754 "fca_port_manage: EMLXS_HW_ERROR_TEST"); 4755 4756 /* Trigger a mailbox timeout */ 4757 hba->mbox_timer = hba->timer_tics; 4758 4759 break; 4760 } 4761 4762 case EMLXS_TEST_CODE: 4763 { 4764 uint32_t *cmd; 4765 4766 if (!(hba->flag & FC_ONLINE_MODE)) { 4767 return (FC_OFFLINE); 4768 } 4769 4770 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4771 "fca_port_manage: EMLXS_TEST_CODE"); 4772 4773 if (pm->pm_cmd_len < sizeof (uint32_t)) { 4774 EMLXS_MSGF(EMLXS_CONTEXT, 4775 &emlxs_sfs_debug_msg, 4776 "fca_port_manage: EMLXS_TEST_CODE. " 4777 "inbuf to small."); 4778 4779 ret = FC_BADCMD; 4780 break; 4781 } 4782 4783 cmd = (uint32_t *)pm->pm_cmd_buf; 4784 4785 ret = emlxs_test(hba, cmd[0], 4786 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]); 4787 4788 break; 4789 } 4790 4791 case EMLXS_BAR_IO: 4792 { 4793 uint32_t *cmd; 4794 uint32_t *datap; 4795 uint32_t offset; 4796 caddr_t addr; 4797 uint32_t i; 4798 uint32_t tx_cnt; 4799 uint32_t chip_cnt; 4800 4801 cmd = (uint32_t *)pm->pm_cmd_buf; 4802 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4803 "fca_port_manage: EMLXS_BAR_IO %x %x %x", 4804 cmd[0], cmd[1], cmd[2]); 4805 4806 offset = cmd[1]; 4807 4808 ret = FC_SUCCESS; 4809 4810 switch (cmd[0]) { 4811 case 2: /* bar1read */ 4812 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4813 return (FC_BADCMD); 4814 } 4815 4816 /* Registers in this range are invalid */ 4817 if ((offset >= 0x4C00) && (offset < 0x5000)) { 4818 return (FC_BADCMD); 4819 } 4820 if ((offset >= 0x5800) || (offset & 0x3)) { 4821 return (FC_BADCMD); 4822 } 4823 datap = (uint32_t *)pm->pm_stat_buf; 4824 4825 for (i = 0; i < pm->pm_stat_len; 4826 i += sizeof (uint32_t)) { 4827 if ((offset >= 0x4C00) && 4828 (offset < 0x5000)) { 4829 pm->pm_stat_len = i; 4830 break; 4831 } 4832 if (offset >= 0x5800) { 4833 pm->pm_stat_len = i; 4834 break; 4835 } 4836 addr = hba->sli.sli4.bar1_addr + offset; 4837 *datap = READ_BAR1_REG(hba, addr); 4838 datap++; 4839 offset += sizeof (uint32_t); 4840 } 4841 #ifdef FMA_SUPPORT 4842 /* Access handle validation */ 4843 EMLXS_CHK_ACC_HANDLE(hba, 4844 hba->sli.sli4.bar1_acc_handle); 4845 #endif /* FMA_SUPPORT */ 4846 break; 4847 case 3: /* bar2read */ 4848 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4849 return (FC_BADCMD); 4850 } 4851 if ((offset >= 0x1000) || (offset & 0x3)) { 4852 return (FC_BADCMD); 4853 } 4854 datap = (uint32_t *)pm->pm_stat_buf; 4855 4856 for (i = 0; i < pm->pm_stat_len; 4857 i += sizeof (uint32_t)) { 4858 *datap = READ_BAR2_REG(hba, 4859 hba->sli.sli4.bar2_addr + offset); 4860 datap++; 4861 offset += sizeof (uint32_t); 4862 } 4863 #ifdef FMA_SUPPORT 4864 /* Access handle validation */ 4865 EMLXS_CHK_ACC_HANDLE(hba, 4866 hba->sli.sli4.bar2_acc_handle); 4867 #endif /* FMA_SUPPORT */ 4868 break; 4869 case 4: /* bar1write */ 4870 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4871 return (FC_BADCMD); 4872 } 4873 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr + 4874 offset, cmd[2]); 4875 #ifdef FMA_SUPPORT 4876 /* Access handle validation */ 4877 EMLXS_CHK_ACC_HANDLE(hba, 4878 hba->sli.sli4.bar1_acc_handle); 4879 #endif /* FMA_SUPPORT */ 4880 break; 4881 case 5: /* bar2write */ 4882 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4883 return (FC_BADCMD); 4884 } 4885 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr + 4886 offset, cmd[2]); 4887 #ifdef FMA_SUPPORT 4888 /* Access handle validation */ 4889 EMLXS_CHK_ACC_HANDLE(hba, 4890 hba->sli.sli4.bar2_acc_handle); 4891 #endif /* FMA_SUPPORT */ 4892 break; 4893 case 6: /* dumpbsmbox */ 4894 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4895 return (FC_BADCMD); 4896 } 4897 if (offset != 0) { 4898 return (FC_BADCMD); 4899 } 4900 4901 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt, 4902 (caddr_t)pm->pm_stat_buf, 256); 4903 break; 4904 case 7: /* pciread */ 4905 if ((offset >= 0x200) || (offset & 0x3)) { 4906 return (FC_BADCMD); 4907 } 4908 datap = (uint32_t *)pm->pm_stat_buf; 4909 for (i = 0; i < pm->pm_stat_len; 4910 i += sizeof (uint32_t)) { 4911 *datap = ddi_get32(hba->pci_acc_handle, 4912 (uint32_t *)(hba->pci_addr + 4913 offset)); 4914 datap++; 4915 offset += sizeof (uint32_t); 4916 } 4917 #ifdef FMA_SUPPORT 4918 /* Access handle validation */ 4919 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle); 4920 #endif /* FMA_SUPPORT */ 4921 break; 4922 case 8: /* abortall */ 4923 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4924 return (FC_BADCMD); 4925 } 4926 emlxs_abort_all(hba, &tx_cnt, &chip_cnt); 4927 datap = (uint32_t *)pm->pm_stat_buf; 4928 *datap++ = tx_cnt; 4929 *datap = chip_cnt; 4930 break; 4931 default: 4932 ret = FC_BADCMD; 4933 break; 4934 } 4935 break; 4936 } 4937 4938 default: 4939 4940 ret = FC_INVALID_REQUEST; 4941 break; 4942 } 4943 4944 break; 4945 4946 } 4947 4948 case FC_PORT_INITIALIZE: 4949 if (!(hba->flag & FC_ONLINE_MODE)) { 4950 return (FC_OFFLINE); 4951 } 4952 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4953 "fca_port_manage: FC_PORT_INITIALIZE"); 4954 break; 4955 4956 case FC_PORT_LOOPBACK: 4957 if (!(hba->flag & FC_ONLINE_MODE)) { 4958 return (FC_OFFLINE); 4959 } 4960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4961 "fca_port_manage: FC_PORT_LOOPBACK"); 4962 break; 4963 4964 case FC_PORT_BYPASS: 4965 if (!(hba->flag & FC_ONLINE_MODE)) { 4966 return (FC_OFFLINE); 4967 } 4968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4969 "fca_port_manage: FC_PORT_BYPASS"); 4970 ret = FC_INVALID_REQUEST; 4971 break; 4972 4973 case FC_PORT_UNBYPASS: 4974 if (!(hba->flag & FC_ONLINE_MODE)) { 4975 return (FC_OFFLINE); 4976 } 4977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4978 "fca_port_manage: FC_PORT_UNBYPASS"); 4979 ret = FC_INVALID_REQUEST; 4980 break; 4981 4982 case FC_PORT_GET_NODE_ID: 4983 { 4984 fc_rnid_t *rnid; 4985 4986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4987 "fca_port_manage: FC_PORT_GET_NODE_ID"); 4988 4989 bzero(pm->pm_data_buf, pm->pm_data_len); 4990 4991 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 4992 ret = FC_NOMEM; 4993 break; 4994 } 4995 4996 rnid = (fc_rnid_t *)pm->pm_data_buf; 4997 4998 (void) sprintf((char *)rnid->global_id, 4999 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", 5000 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, 5001 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 5002 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 5003 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 5004 5005 rnid->unit_type = RNID_HBA; 5006 rnid->port_id = port->did; 5007 rnid->ip_version = RNID_IPV4; 5008 5009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5010 "GET_NODE_ID: wwpn: %s", rnid->global_id); 5011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5012 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 5013 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5014 "GET_NODE_ID: port_id: 0x%x", rnid->port_id); 5015 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5016 "GET_NODE_ID: num_attach: %d", rnid->num_attached); 5017 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5018 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 5019 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5020 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 5021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5022 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr); 5023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5024 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 5025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5026 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 5027 5028 ret = FC_SUCCESS; 5029 break; 5030 } 5031 5032 case FC_PORT_SET_NODE_ID: 5033 { 5034 fc_rnid_t *rnid; 5035 5036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5037 "fca_port_manage: FC_PORT_SET_NODE_ID"); 5038 5039 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 5040 ret = FC_NOMEM; 5041 break; 5042 } 5043 5044 rnid = (fc_rnid_t *)pm->pm_data_buf; 5045 5046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5047 "SET_NODE_ID: wwpn: %s", rnid->global_id); 5048 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5049 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 5050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5051 "SET_NODE_ID: port_id: 0x%x", rnid->port_id); 5052 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5053 "SET_NODE_ID: num_attach: %d", rnid->num_attached); 5054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5055 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 5056 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5057 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 5058 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5059 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr); 5060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5061 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 5062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5063 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 5064 5065 ret = FC_SUCCESS; 5066 break; 5067 } 5068 5069 #ifdef S11 5070 case FC_PORT_GET_P2P_INFO: 5071 { 5072 fc_fca_p2p_info_t *p2p_info; 5073 NODELIST *ndlp; 5074 5075 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5076 "fca_port_manage: FC_PORT_GET_P2P_INFO"); 5077 5078 bzero(pm->pm_data_buf, pm->pm_data_len); 5079 5080 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) { 5081 ret = FC_NOMEM; 5082 break; 5083 } 5084 5085 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf; 5086 5087 if (hba->state >= FC_LINK_UP) { 5088 if ((hba->topology == TOPOLOGY_PT_PT) && 5089 (hba->flag & FC_PT_TO_PT)) { 5090 p2p_info->fca_d_id = port->did; 5091 p2p_info->d_id = port->rdid; 5092 5093 ndlp = emlxs_node_find_did(port, 5094 port->rdid); 5095 5096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5097 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, " 5098 "d_id: 0x%x, ndlp: 0x%p", port->did, 5099 port->rdid, ndlp); 5100 if (ndlp) { 5101 bcopy(&ndlp->nlp_portname, 5102 (caddr_t)&p2p_info->pwwn, 5103 sizeof (la_wwn_t)); 5104 bcopy(&ndlp->nlp_nodename, 5105 (caddr_t)&p2p_info->nwwn, 5106 sizeof (la_wwn_t)); 5107 5108 ret = FC_SUCCESS; 5109 break; 5110 5111 } 5112 } 5113 } 5114 5115 ret = FC_FAILURE; 5116 break; 5117 } 5118 #endif 5119 5120 default: 5121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5122 "fca_port_manage: code=%x", pm->pm_cmd_code); 5123 ret = FC_INVALID_REQUEST; 5124 break; 5125 5126 } 5127 5128 return (ret); 5129 5130 } /* emlxs_port_manage() */ 5131 5132 5133 /*ARGSUSED*/ 5134 static uint32_t 5135 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, 5136 uint32_t *arg) 5137 { 5138 uint32_t rval = 0; 5139 emlxs_port_t *port = &PPORT; 5140 5141 switch (test_code) { 5142 #ifdef TEST_SUPPORT 5143 case 1: /* SCSI underrun */ 5144 { 5145 hba->underrun_counter = (args)? arg[0]:1; 5146 break; 5147 } 5148 #endif /* TEST_SUPPORT */ 5149 5150 default: 5151 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5152 "emlxs_test: Unsupported test code. (0x%x)", test_code); 5153 rval = FC_INVALID_REQUEST; 5154 } 5155 5156 return (rval); 5157 5158 } /* emlxs_test() */ 5159 5160 5161 /* 5162 * Given the device number, return the devinfo pointer or the ddiinst number. 5163 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even 5164 * before attach. 5165 * 5166 * Translate "dev_t" to a pointer to the associated "dev_info_t". 5167 */ 5168 /*ARGSUSED*/ 5169 static int 5170 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5171 { 5172 emlxs_hba_t *hba; 5173 int32_t ddiinst; 5174 5175 ddiinst = getminor((dev_t)arg); 5176 5177 switch (infocmd) { 5178 case DDI_INFO_DEVT2DEVINFO: 5179 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5180 if (hba) 5181 *result = hba->dip; 5182 else 5183 *result = NULL; 5184 break; 5185 5186 case DDI_INFO_DEVT2INSTANCE: 5187 *result = (void *)((unsigned long)ddiinst); 5188 break; 5189 5190 default: 5191 return (DDI_FAILURE); 5192 } 5193 5194 return (DDI_SUCCESS); 5195 5196 } /* emlxs_info() */ 5197 5198 5199 static int32_t 5200 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level) 5201 { 5202 emlxs_hba_t *hba; 5203 emlxs_port_t *port; 5204 int32_t ddiinst; 5205 int rval = DDI_SUCCESS; 5206 5207 ddiinst = ddi_get_instance(dip); 5208 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5209 port = &PPORT; 5210 5211 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5212 "fca_power: comp=%x level=%x", comp, level); 5213 5214 if (hba == NULL || comp != EMLXS_PM_ADAPTER) { 5215 return (DDI_FAILURE); 5216 } 5217 5218 mutex_enter(&EMLXS_PM_LOCK); 5219 5220 /* If we are already at the proper level then return success */ 5221 if (hba->pm_level == level) { 5222 mutex_exit(&EMLXS_PM_LOCK); 5223 return (DDI_SUCCESS); 5224 } 5225 5226 switch (level) { 5227 case EMLXS_PM_ADAPTER_UP: 5228 5229 /* 5230 * If we are already in emlxs_attach, 5231 * let emlxs_hba_attach take care of things 5232 */ 5233 if (hba->pm_state & EMLXS_PM_IN_ATTACH) { 5234 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5235 break; 5236 } 5237 5238 /* Check if adapter is suspended */ 5239 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5240 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5241 5242 /* Try to resume the port */ 5243 rval = emlxs_hba_resume(dip); 5244 5245 if (rval != DDI_SUCCESS) { 5246 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5247 } 5248 break; 5249 } 5250 5251 /* Set adapter up */ 5252 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5253 break; 5254 5255 case EMLXS_PM_ADAPTER_DOWN: 5256 5257 5258 /* 5259 * If we are already in emlxs_detach, 5260 * let emlxs_hba_detach take care of things 5261 */ 5262 if (hba->pm_state & EMLXS_PM_IN_DETACH) { 5263 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5264 break; 5265 } 5266 5267 /* Check if adapter is not suspended */ 5268 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5269 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5270 5271 /* Try to suspend the port */ 5272 rval = emlxs_hba_suspend(dip); 5273 5274 if (rval != DDI_SUCCESS) { 5275 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5276 } 5277 5278 break; 5279 } 5280 5281 /* Set adapter down */ 5282 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5283 break; 5284 5285 default: 5286 rval = DDI_FAILURE; 5287 break; 5288 5289 } 5290 5291 mutex_exit(&EMLXS_PM_LOCK); 5292 5293 return (rval); 5294 5295 } /* emlxs_power() */ 5296 5297 5298 #ifdef EMLXS_I386 5299 #ifdef S11 5300 /* 5301 * quiesce(9E) entry point. 5302 * 5303 * This function is called when the system is single-thread at hight PIL 5304 * with preemption disabled. Therefore, this function must not be blocked. 5305 * 5306 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 5307 * DDI_FAILURE indicates an error condition and should almost never happen. 5308 */ 5309 static int 5310 emlxs_quiesce(dev_info_t *dip) 5311 { 5312 emlxs_hba_t *hba; 5313 emlxs_port_t *port; 5314 int32_t ddiinst; 5315 int rval = DDI_SUCCESS; 5316 5317 ddiinst = ddi_get_instance(dip); 5318 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5319 port = &PPORT; 5320 5321 if (hba == NULL || port == NULL) { 5322 return (DDI_FAILURE); 5323 } 5324 5325 /* The fourth arg 1 indicates the call is from quiesce */ 5326 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) { 5327 return (rval); 5328 } else { 5329 return (DDI_FAILURE); 5330 } 5331 5332 } /* emlxs_quiesce */ 5333 #endif 5334 #endif /* EMLXS_I386 */ 5335 5336 5337 static int 5338 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p) 5339 { 5340 emlxs_hba_t *hba; 5341 emlxs_port_t *port; 5342 int ddiinst; 5343 5344 ddiinst = getminor(*dev_p); 5345 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5346 5347 if (hba == NULL) { 5348 return (ENXIO); 5349 } 5350 5351 port = &PPORT; 5352 5353 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5354 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5355 "open failed: Driver suspended."); 5356 return (ENXIO); 5357 } 5358 5359 if (otype != OTYP_CHR) { 5360 return (EINVAL); 5361 } 5362 5363 if (drv_priv(cred_p)) { 5364 return (EPERM); 5365 } 5366 5367 mutex_enter(&EMLXS_IOCTL_LOCK); 5368 5369 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) { 5370 mutex_exit(&EMLXS_IOCTL_LOCK); 5371 return (EBUSY); 5372 } 5373 5374 if (flag & FEXCL) { 5375 if (hba->ioctl_flags & EMLXS_OPEN) { 5376 mutex_exit(&EMLXS_IOCTL_LOCK); 5377 return (EBUSY); 5378 } 5379 5380 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE; 5381 } 5382 5383 hba->ioctl_flags |= EMLXS_OPEN; 5384 5385 mutex_exit(&EMLXS_IOCTL_LOCK); 5386 5387 return (0); 5388 5389 } /* emlxs_open() */ 5390 5391 5392 /*ARGSUSED*/ 5393 static int 5394 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p) 5395 { 5396 emlxs_hba_t *hba; 5397 int ddiinst; 5398 5399 ddiinst = getminor(dev); 5400 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5401 5402 if (hba == NULL) { 5403 return (ENXIO); 5404 } 5405 5406 if (otype != OTYP_CHR) { 5407 return (EINVAL); 5408 } 5409 5410 mutex_enter(&EMLXS_IOCTL_LOCK); 5411 5412 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5413 mutex_exit(&EMLXS_IOCTL_LOCK); 5414 return (ENODEV); 5415 } 5416 5417 hba->ioctl_flags &= ~EMLXS_OPEN; 5418 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE; 5419 5420 mutex_exit(&EMLXS_IOCTL_LOCK); 5421 5422 return (0); 5423 5424 } /* emlxs_close() */ 5425 5426 5427 /*ARGSUSED*/ 5428 static int 5429 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode, 5430 cred_t *cred_p, int32_t *rval_p) 5431 { 5432 emlxs_hba_t *hba; 5433 emlxs_port_t *port; 5434 int rval = 0; /* return code */ 5435 int ddiinst; 5436 5437 ddiinst = getminor(dev); 5438 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5439 5440 if (hba == NULL) { 5441 return (ENXIO); 5442 } 5443 5444 port = &PPORT; 5445 5446 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5447 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5448 "ioctl failed: Driver suspended."); 5449 5450 return (ENXIO); 5451 } 5452 5453 mutex_enter(&EMLXS_IOCTL_LOCK); 5454 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 5455 mutex_exit(&EMLXS_IOCTL_LOCK); 5456 return (ENXIO); 5457 } 5458 mutex_exit(&EMLXS_IOCTL_LOCK); 5459 5460 #ifdef IDLE_TIMER 5461 emlxs_pm_busy_component(hba); 5462 #endif /* IDLE_TIMER */ 5463 5464 switch (cmd) { 5465 case EMLXS_DFC_COMMAND: 5466 rval = emlxs_dfc_manage(hba, (void *)arg, mode); 5467 break; 5468 5469 default: 5470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5471 "ioctl: Invalid command received. cmd=%x", cmd); 5472 rval = EINVAL; 5473 } 5474 5475 done: 5476 return (rval); 5477 5478 } /* emlxs_ioctl() */ 5479 5480 5481 5482 /* 5483 * 5484 * Device Driver Common Routines 5485 * 5486 */ 5487 5488 /* EMLXS_PM_LOCK must be held for this call */ 5489 static int 5490 emlxs_hba_resume(dev_info_t *dip) 5491 { 5492 emlxs_hba_t *hba; 5493 emlxs_port_t *port; 5494 int ddiinst; 5495 5496 ddiinst = ddi_get_instance(dip); 5497 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5498 port = &PPORT; 5499 5500 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL); 5501 5502 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5503 return (DDI_SUCCESS); 5504 } 5505 5506 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5507 5508 /* Take the adapter online */ 5509 if (emlxs_power_up(hba)) { 5510 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg, 5511 "Unable to take adapter online."); 5512 5513 hba->pm_state |= EMLXS_PM_SUSPENDED; 5514 5515 return (DDI_FAILURE); 5516 } 5517 5518 return (DDI_SUCCESS); 5519 5520 } /* emlxs_hba_resume() */ 5521 5522 5523 /* EMLXS_PM_LOCK must be held for this call */ 5524 static int 5525 emlxs_hba_suspend(dev_info_t *dip) 5526 { 5527 emlxs_hba_t *hba; 5528 emlxs_port_t *port; 5529 int ddiinst; 5530 5531 ddiinst = ddi_get_instance(dip); 5532 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5533 port = &PPORT; 5534 5535 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL); 5536 5537 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5538 return (DDI_SUCCESS); 5539 } 5540 5541 hba->pm_state |= EMLXS_PM_SUSPENDED; 5542 5543 /* Take the adapter offline */ 5544 if (emlxs_power_down(hba)) { 5545 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 5546 5547 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg, 5548 "Unable to take adapter offline."); 5549 5550 return (DDI_FAILURE); 5551 } 5552 5553 return (DDI_SUCCESS); 5554 5555 } /* emlxs_hba_suspend() */ 5556 5557 5558 5559 static void 5560 emlxs_lock_init(emlxs_hba_t *hba) 5561 { 5562 emlxs_port_t *port = &PPORT; 5563 int32_t ddiinst; 5564 char buf[64]; 5565 uint32_t i; 5566 5567 ddiinst = hba->ddiinst; 5568 5569 /* Initialize the power management */ 5570 (void) sprintf(buf, "%s%d_pm_lock mutex", DRIVER_NAME, ddiinst); 5571 mutex_init(&EMLXS_PM_LOCK, buf, MUTEX_DRIVER, 5572 (void *)hba->intr_arg); 5573 5574 (void) sprintf(buf, "%s%d_adap_lock mutex", DRIVER_NAME, ddiinst); 5575 mutex_init(&EMLXS_TIMER_LOCK, buf, MUTEX_DRIVER, 5576 (void *)hba->intr_arg); 5577 5578 (void) sprintf(buf, "%s%d_adap_lock cv", DRIVER_NAME, ddiinst); 5579 cv_init(&hba->timer_lock_cv, buf, CV_DRIVER, NULL); 5580 5581 (void) sprintf(buf, "%s%d_port_lock mutex", DRIVER_NAME, ddiinst); 5582 mutex_init(&EMLXS_PORT_LOCK, buf, MUTEX_DRIVER, 5583 (void *)hba->intr_arg); 5584 5585 (void) sprintf(buf, "%s%d_mbox_lock mutex", DRIVER_NAME, ddiinst); 5586 mutex_init(&EMLXS_MBOX_LOCK, buf, MUTEX_DRIVER, 5587 (void *)hba->intr_arg); 5588 5589 (void) sprintf(buf, "%s%d_mbox_lock cv", DRIVER_NAME, ddiinst); 5590 cv_init(&EMLXS_MBOX_CV, buf, CV_DRIVER, NULL); 5591 5592 (void) sprintf(buf, "%s%d_linkup_lock mutex", DRIVER_NAME, ddiinst); 5593 mutex_init(&EMLXS_LINKUP_LOCK, buf, MUTEX_DRIVER, 5594 (void *)hba->intr_arg); 5595 5596 (void) sprintf(buf, "%s%d_linkup_lock cv", DRIVER_NAME, ddiinst); 5597 cv_init(&EMLXS_LINKUP_CV, buf, CV_DRIVER, NULL); 5598 5599 (void) sprintf(buf, "%s%d_ring_tx_lock mutex", DRIVER_NAME, ddiinst); 5600 mutex_init(&EMLXS_TX_CHANNEL_LOCK, buf, MUTEX_DRIVER, 5601 (void *)hba->intr_arg); 5602 5603 for (i = 0; i < MAX_RINGS; i++) { 5604 (void) sprintf(buf, "%s%d_cmd_ring%d_lock mutex", DRIVER_NAME, 5605 ddiinst, i); 5606 mutex_init(&EMLXS_CMD_RING_LOCK(i), buf, MUTEX_DRIVER, 5607 (void *)hba->intr_arg); 5608 } 5609 5610 (void) sprintf(buf, "%s%d_fctab_lock mutex", DRIVER_NAME, ddiinst); 5611 mutex_init(&EMLXS_FCTAB_LOCK, buf, MUTEX_DRIVER, 5612 (void *)hba->intr_arg); 5613 5614 (void) sprintf(buf, "%s%d_memget_lock mutex", DRIVER_NAME, ddiinst); 5615 mutex_init(&EMLXS_MEMGET_LOCK, buf, MUTEX_DRIVER, 5616 (void *)hba->intr_arg); 5617 5618 (void) sprintf(buf, "%s%d_memput_lock mutex", DRIVER_NAME, ddiinst); 5619 mutex_init(&EMLXS_MEMPUT_LOCK, buf, MUTEX_DRIVER, 5620 (void *)hba->intr_arg); 5621 5622 (void) sprintf(buf, "%s%d_ioctl_lock mutex", DRIVER_NAME, ddiinst); 5623 mutex_init(&EMLXS_IOCTL_LOCK, buf, MUTEX_DRIVER, 5624 (void *)hba->intr_arg); 5625 5626 #ifdef DUMP_SUPPORT 5627 (void) sprintf(buf, "%s%d_dump mutex", DRIVER_NAME, ddiinst); 5628 mutex_init(&EMLXS_DUMP_LOCK, buf, MUTEX_DRIVER, 5629 (void *)hba->intr_arg); 5630 #endif /* DUMP_SUPPORT */ 5631 5632 (void) sprintf(buf, "%s%d_thread_lock mutex", DRIVER_NAME, ddiinst); 5633 mutex_init(&EMLXS_SPAWN_LOCK, buf, MUTEX_DRIVER, 5634 (void *)hba->intr_arg); 5635 5636 /* Create per port locks */ 5637 for (i = 0; i < MAX_VPORTS; i++) { 5638 port = &VPORT(i); 5639 5640 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL); 5641 5642 if (i == 0) { 5643 (void) sprintf(buf, "%s%d_pkt_lock mutex", DRIVER_NAME, 5644 ddiinst); 5645 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5646 (void *)hba->intr_arg); 5647 5648 (void) sprintf(buf, "%s%d_pkt_lock cv", DRIVER_NAME, 5649 ddiinst); 5650 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5651 5652 (void) sprintf(buf, "%s%d_ub_lock mutex", DRIVER_NAME, 5653 ddiinst); 5654 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5655 (void *)hba->intr_arg); 5656 } else { 5657 (void) sprintf(buf, "%s%d.%d_pkt_lock mutex", 5658 DRIVER_NAME, ddiinst, port->vpi); 5659 mutex_init(&EMLXS_PKT_LOCK, buf, MUTEX_DRIVER, 5660 (void *)hba->intr_arg); 5661 5662 (void) sprintf(buf, "%s%d.%d_pkt_lock cv", DRIVER_NAME, 5663 ddiinst, port->vpi); 5664 cv_init(&EMLXS_PKT_CV, buf, CV_DRIVER, NULL); 5665 5666 (void) sprintf(buf, "%s%d.%d_ub_lock mutex", 5667 DRIVER_NAME, ddiinst, port->vpi); 5668 mutex_init(&EMLXS_UB_LOCK, buf, MUTEX_DRIVER, 5669 (void *)hba->intr_arg); 5670 } 5671 } 5672 5673 return; 5674 5675 } /* emlxs_lock_init() */ 5676 5677 5678 5679 static void 5680 emlxs_lock_destroy(emlxs_hba_t *hba) 5681 { 5682 emlxs_port_t *port = &PPORT; 5683 uint32_t i; 5684 5685 mutex_destroy(&EMLXS_TIMER_LOCK); 5686 cv_destroy(&hba->timer_lock_cv); 5687 5688 mutex_destroy(&EMLXS_PORT_LOCK); 5689 5690 cv_destroy(&EMLXS_MBOX_CV); 5691 cv_destroy(&EMLXS_LINKUP_CV); 5692 5693 mutex_destroy(&EMLXS_LINKUP_LOCK); 5694 mutex_destroy(&EMLXS_MBOX_LOCK); 5695 5696 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK); 5697 5698 for (i = 0; i < MAX_RINGS; i++) { 5699 mutex_destroy(&EMLXS_CMD_RING_LOCK(i)); 5700 } 5701 5702 mutex_destroy(&EMLXS_FCTAB_LOCK); 5703 mutex_destroy(&EMLXS_MEMGET_LOCK); 5704 mutex_destroy(&EMLXS_MEMPUT_LOCK); 5705 mutex_destroy(&EMLXS_IOCTL_LOCK); 5706 mutex_destroy(&EMLXS_SPAWN_LOCK); 5707 mutex_destroy(&EMLXS_PM_LOCK); 5708 5709 #ifdef DUMP_SUPPORT 5710 mutex_destroy(&EMLXS_DUMP_LOCK); 5711 #endif /* DUMP_SUPPORT */ 5712 5713 /* Destroy per port locks */ 5714 for (i = 0; i < MAX_VPORTS; i++) { 5715 port = &VPORT(i); 5716 rw_destroy(&port->node_rwlock); 5717 mutex_destroy(&EMLXS_PKT_LOCK); 5718 cv_destroy(&EMLXS_PKT_CV); 5719 mutex_destroy(&EMLXS_UB_LOCK); 5720 } 5721 5722 return; 5723 5724 } /* emlxs_lock_destroy() */ 5725 5726 5727 /* init_flag values */ 5728 #define ATTACH_SOFT_STATE 0x00000001 5729 #define ATTACH_FCA_TRAN 0x00000002 5730 #define ATTACH_HBA 0x00000004 5731 #define ATTACH_LOG 0x00000008 5732 #define ATTACH_MAP_BUS 0x00000010 5733 #define ATTACH_INTR_INIT 0x00000020 5734 #define ATTACH_PROP 0x00000040 5735 #define ATTACH_LOCK 0x00000080 5736 #define ATTACH_THREAD 0x00000100 5737 #define ATTACH_INTR_ADD 0x00000200 5738 #define ATTACH_ONLINE 0x00000400 5739 #define ATTACH_NODE 0x00000800 5740 #define ATTACH_FCT 0x00001000 5741 #define ATTACH_FCA 0x00002000 5742 #define ATTACH_KSTAT 0x00004000 5743 #define ATTACH_DHCHAP 0x00008000 5744 #define ATTACH_FM 0x00010000 5745 #define ATTACH_MAP_SLI 0x00020000 5746 #define ATTACH_SPAWN 0x00040000 5747 #define ATTACH_EVENTS 0x00080000 5748 5749 static void 5750 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed) 5751 { 5752 emlxs_hba_t *hba = NULL; 5753 int ddiinst; 5754 5755 ddiinst = ddi_get_instance(dip); 5756 5757 if (init_flag & ATTACH_HBA) { 5758 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5759 5760 if (init_flag & ATTACH_SPAWN) { 5761 emlxs_thread_spawn_destroy(hba); 5762 } 5763 5764 if (init_flag & ATTACH_EVENTS) { 5765 (void) emlxs_event_queue_destroy(hba); 5766 } 5767 5768 if (init_flag & ATTACH_ONLINE) { 5769 (void) emlxs_offline(hba); 5770 } 5771 5772 if (init_flag & ATTACH_INTR_ADD) { 5773 (void) EMLXS_INTR_REMOVE(hba); 5774 } 5775 #ifdef SFCT_SUPPORT 5776 if (init_flag & ATTACH_FCT) { 5777 emlxs_fct_detach(hba); 5778 emlxs_fct_modclose(); 5779 } 5780 #endif /* SFCT_SUPPORT */ 5781 5782 #ifdef DHCHAP_SUPPORT 5783 if (init_flag & ATTACH_DHCHAP) { 5784 emlxs_dhc_detach(hba); 5785 } 5786 #endif /* DHCHAP_SUPPORT */ 5787 5788 if (init_flag & ATTACH_KSTAT) { 5789 kstat_delete(hba->kstat); 5790 } 5791 5792 if (init_flag & ATTACH_FCA) { 5793 emlxs_fca_detach(hba); 5794 } 5795 5796 if (init_flag & ATTACH_NODE) { 5797 (void) ddi_remove_minor_node(hba->dip, "devctl"); 5798 } 5799 5800 if (init_flag & ATTACH_THREAD) { 5801 emlxs_thread_destroy(&hba->iodone_thread); 5802 } 5803 5804 if (init_flag & ATTACH_PROP) { 5805 (void) ddi_prop_remove_all(hba->dip); 5806 } 5807 5808 if (init_flag & ATTACH_LOCK) { 5809 emlxs_lock_destroy(hba); 5810 } 5811 5812 if (init_flag & ATTACH_INTR_INIT) { 5813 (void) EMLXS_INTR_UNINIT(hba); 5814 } 5815 5816 if (init_flag & ATTACH_MAP_BUS) { 5817 emlxs_unmap_bus(hba); 5818 } 5819 5820 if (init_flag & ATTACH_MAP_SLI) { 5821 EMLXS_SLI_UNMAP_HDW(hba); 5822 } 5823 5824 #ifdef FMA_SUPPORT 5825 if (init_flag & ATTACH_FM) { 5826 emlxs_fm_fini(hba); 5827 } 5828 #endif /* FMA_SUPPORT */ 5829 5830 if (init_flag & ATTACH_LOG) { 5831 (void) emlxs_msg_log_destroy(hba); 5832 } 5833 5834 if (init_flag & ATTACH_FCA_TRAN) { 5835 (void) ddi_set_driver_private(hba->dip, NULL); 5836 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t)); 5837 hba->fca_tran = NULL; 5838 } 5839 5840 if (init_flag & ATTACH_HBA) { 5841 emlxs_device.log[hba->emlxinst] = 0; 5842 emlxs_device.hba[hba->emlxinst] = 5843 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0)); 5844 #ifdef DUMP_SUPPORT 5845 emlxs_device.dump_txtfile[hba->emlxinst] = 0; 5846 emlxs_device.dump_dmpfile[hba->emlxinst] = 0; 5847 emlxs_device.dump_ceefile[hba->emlxinst] = 0; 5848 #endif /* DUMP_SUPPORT */ 5849 5850 } 5851 } 5852 5853 if (init_flag & ATTACH_SOFT_STATE) { 5854 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst); 5855 } 5856 5857 return; 5858 5859 } /* emlxs_driver_remove() */ 5860 5861 5862 5863 /* This determines which ports will be initiator mode */ 5864 static void 5865 emlxs_fca_init(emlxs_hba_t *hba) 5866 { 5867 emlxs_port_t *port = &PPORT; 5868 emlxs_port_t *vport; 5869 uint32_t i; 5870 5871 if (!hba->ini_mode) { 5872 return; 5873 } 5874 /* Check if SFS present */ 5875 if (((void *)MODSYM(fc_fca_init) == NULL) || 5876 ((void *)MODSYM(fc_fca_attach) == NULL)) { 5877 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5878 "SFS not present. Initiator mode disabled."); 5879 goto failed; 5880 } 5881 5882 /* Check if our SFS driver interface matches the current SFS stack */ 5883 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) { 5884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5885 "SFS/FCA version mismatch. FCA=0x%x", 5886 hba->fca_tran->fca_version); 5887 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5888 "SFS present. Initiator mode disabled."); 5889 5890 goto failed; 5891 } 5892 5893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5894 "SFS present. Initiator mode enabled."); 5895 5896 return; 5897 5898 failed: 5899 5900 hba->ini_mode = 0; 5901 for (i = 0; i < MAX_VPORTS; i++) { 5902 vport = &VPORT(i); 5903 vport->ini_mode = 0; 5904 } 5905 5906 return; 5907 5908 } /* emlxs_fca_init() */ 5909 5910 5911 /* This determines which ports will be initiator or target mode */ 5912 static void 5913 emlxs_set_mode(emlxs_hba_t *hba) 5914 { 5915 emlxs_port_t *port = &PPORT; 5916 emlxs_port_t *vport; 5917 uint32_t i; 5918 uint32_t tgt_mode = 0; 5919 5920 #ifdef SFCT_SUPPORT 5921 emlxs_config_t *cfg; 5922 5923 cfg = &hba->config[CFG_TARGET_MODE]; 5924 tgt_mode = cfg->current; 5925 5926 if (tgt_mode) { 5927 if (emlxs_fct_modopen() != 0) { 5928 tgt_mode = 0; 5929 } 5930 } 5931 5932 port->fct_flags = 0; 5933 #endif /* SFCT_SUPPORT */ 5934 5935 /* Initialize physical port */ 5936 if (tgt_mode) { 5937 hba->tgt_mode = 1; 5938 hba->ini_mode = 0; 5939 5940 port->tgt_mode = 1; 5941 port->ini_mode = 0; 5942 } else { 5943 hba->tgt_mode = 0; 5944 hba->ini_mode = 1; 5945 5946 port->tgt_mode = 0; 5947 port->ini_mode = 1; 5948 } 5949 5950 /* Initialize virtual ports */ 5951 /* Virtual ports take on the mode of the parent physical port */ 5952 for (i = 1; i < MAX_VPORTS; i++) { 5953 vport = &VPORT(i); 5954 5955 #ifdef SFCT_SUPPORT 5956 vport->fct_flags = 0; 5957 #endif /* SFCT_SUPPORT */ 5958 5959 vport->ini_mode = port->ini_mode; 5960 vport->tgt_mode = port->tgt_mode; 5961 } 5962 5963 /* Check if initiator mode is requested */ 5964 if (hba->ini_mode) { 5965 emlxs_fca_init(hba); 5966 } else { 5967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5968 "Initiator mode not enabled."); 5969 } 5970 5971 #ifdef SFCT_SUPPORT 5972 /* Check if target mode is requested */ 5973 if (hba->tgt_mode) { 5974 emlxs_fct_init(hba); 5975 } else { 5976 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 5977 "Target mode not enabled."); 5978 } 5979 #endif /* SFCT_SUPPORT */ 5980 5981 return; 5982 5983 } /* emlxs_set_mode() */ 5984 5985 5986 5987 static void 5988 emlxs_fca_attach(emlxs_hba_t *hba) 5989 { 5990 /* Update our transport structure */ 5991 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg; 5992 hba->fca_tran->fca_cmd_max = hba->io_throttle; 5993 5994 #if (EMLXS_MODREV >= EMLXS_MODREV5) 5995 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn, 5996 sizeof (NAME_TYPE)); 5997 #endif /* >= EMLXS_MODREV5 */ 5998 5999 return; 6000 6001 } /* emlxs_fca_attach() */ 6002 6003 6004 static void 6005 emlxs_fca_detach(emlxs_hba_t *hba) 6006 { 6007 uint32_t i; 6008 emlxs_port_t *vport; 6009 6010 if (hba->ini_mode) { 6011 if ((void *)MODSYM(fc_fca_detach) != NULL) { 6012 MODSYM(fc_fca_detach)(hba->dip); 6013 } 6014 6015 hba->ini_mode = 0; 6016 6017 for (i = 0; i < MAX_VPORTS; i++) { 6018 vport = &VPORT(i); 6019 vport->ini_mode = 0; 6020 } 6021 } 6022 6023 return; 6024 6025 } /* emlxs_fca_detach() */ 6026 6027 6028 6029 static void 6030 emlxs_drv_banner(emlxs_hba_t *hba) 6031 { 6032 emlxs_port_t *port = &PPORT; 6033 uint32_t i; 6034 char sli_mode[16]; 6035 char msi_mode[16]; 6036 char npiv_mode[16]; 6037 emlxs_vpd_t *vpd = &VPD; 6038 emlxs_config_t *cfg = &CFG; 6039 uint8_t *wwpn; 6040 uint8_t *wwnn; 6041 6042 /* Display firmware library one time */ 6043 if (emlxs_instance_count == 1) { 6044 emlxs_fw_show(hba); 6045 } 6046 6047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label, 6048 emlxs_revision); 6049 6050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6051 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model, 6052 hba->model_info.device_id, hba->model_info.ssdid, 6053 hba->model_info.id); 6054 6055 #ifdef EMLXS_I386 6056 6057 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6058 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label, 6059 vpd->boot_version); 6060 6061 #else /* EMLXS_SPARC */ 6062 6063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6064 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version, 6065 vpd->fw_label, vpd->boot_version, vpd->fcode_version); 6066 6067 #endif /* EMLXS_I386 */ 6068 6069 if (hba->sli_mode > 3) { 6070 (void) sprintf(sli_mode, "SLI:%d(%s)", hba->sli_mode, 6071 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP")); 6072 } else { 6073 (void) sprintf(sli_mode, "SLI:%d", hba->sli_mode); 6074 } 6075 6076 (void) strcpy(msi_mode, " INTX:1"); 6077 6078 #ifdef MSI_SUPPORT 6079 if (hba->intr_flags & EMLXS_MSI_ENABLED) { 6080 switch (hba->intr_type) { 6081 case DDI_INTR_TYPE_FIXED: 6082 (void) strcpy(msi_mode, " MSI:0"); 6083 break; 6084 6085 case DDI_INTR_TYPE_MSI: 6086 (void) sprintf(msi_mode, " MSI:%d", hba->intr_count); 6087 break; 6088 6089 case DDI_INTR_TYPE_MSIX: 6090 (void) sprintf(msi_mode, " MSIX:%d", hba->intr_count); 6091 break; 6092 } 6093 } 6094 #endif 6095 6096 (void) strcpy(npiv_mode, ""); 6097 6098 if (hba->flag & FC_NPIV_ENABLED) { 6099 (void) sprintf(npiv_mode, " NPIV:%d", hba->vpi_max+1); 6100 } else { 6101 (void) strcpy(npiv_mode, " NPIV:0"); 6102 } 6103 6104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s", 6105 sli_mode, msi_mode, npiv_mode, 6106 ((hba->ini_mode)? " FCA":""), ((hba->tgt_mode)? " FCT":"")); 6107 6108 wwpn = (uint8_t *)&hba->wwpn; 6109 wwnn = (uint8_t *)&hba->wwnn; 6110 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6111 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 6112 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 6113 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], 6114 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5], 6115 wwnn[6], wwnn[7]); 6116 6117 for (i = 0; i < MAX_VPORTS; i++) { 6118 port = &VPORT(i); 6119 6120 if (!(port->flag & EMLXS_PORT_CONFIG)) { 6121 continue; 6122 } 6123 6124 wwpn = (uint8_t *)&port->wwpn; 6125 wwnn = (uint8_t *)&port->wwnn; 6126 6127 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6128 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 6129 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 6130 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], 6131 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], 6132 wwnn[4], wwnn[5], wwnn[6], wwnn[7]); 6133 } 6134 port = &PPORT; 6135 6136 /* 6137 * No dependency for Restricted login parameter. 6138 */ 6139 if ((cfg[CFG_VPORT_RESTRICTED].current) && (port->ini_mode)) { 6140 port->flag |= EMLXS_PORT_RESTRICTED; 6141 } else { 6142 port->flag &= ~EMLXS_PORT_RESTRICTED; 6143 } 6144 6145 /* 6146 * Announce the device: ddi_report_dev() prints a banner at boot time, 6147 * announcing the device pointed to by dip. 6148 */ 6149 (void) ddi_report_dev(hba->dip); 6150 6151 return; 6152 6153 } /* emlxs_drv_banner() */ 6154 6155 6156 extern void 6157 emlxs_get_fcode_version(emlxs_hba_t *hba) 6158 { 6159 emlxs_vpd_t *vpd = &VPD; 6160 char *prop_str; 6161 int status; 6162 6163 /* Setup fcode version property */ 6164 prop_str = NULL; 6165 status = 6166 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0, 6167 "fcode-version", (char **)&prop_str); 6168 6169 if (status == DDI_PROP_SUCCESS) { 6170 bcopy(prop_str, vpd->fcode_version, strlen(prop_str)); 6171 (void) ddi_prop_free((void *)prop_str); 6172 } else { 6173 (void) strcpy(vpd->fcode_version, "none"); 6174 } 6175 6176 return; 6177 6178 } /* emlxs_get_fcode_version() */ 6179 6180 6181 static int 6182 emlxs_hba_attach(dev_info_t *dip) 6183 { 6184 emlxs_hba_t *hba; 6185 emlxs_port_t *port; 6186 emlxs_config_t *cfg; 6187 char *prop_str; 6188 int ddiinst; 6189 int32_t emlxinst; 6190 int status; 6191 uint32_t rval; 6192 uint32_t init_flag = 0; 6193 char local_pm_components[32]; 6194 #ifdef EMLXS_I386 6195 uint32_t i; 6196 #endif /* EMLXS_I386 */ 6197 6198 ddiinst = ddi_get_instance(dip); 6199 emlxinst = emlxs_add_instance(ddiinst); 6200 6201 if (emlxinst >= MAX_FC_BRDS) { 6202 cmn_err(CE_WARN, 6203 "?%s: fca_hba_attach failed. Too many driver ddiinsts. " 6204 "inst=%x", DRIVER_NAME, ddiinst); 6205 return (DDI_FAILURE); 6206 } 6207 6208 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) { 6209 return (DDI_FAILURE); 6210 } 6211 6212 if (emlxs_device.hba[emlxinst]) { 6213 return (DDI_SUCCESS); 6214 } 6215 6216 /* An adapter can accidentally be plugged into a slave-only PCI slot */ 6217 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 6218 cmn_err(CE_WARN, 6219 "?%s%d: fca_hba_attach failed. Device in slave-only slot.", 6220 DRIVER_NAME, ddiinst); 6221 return (DDI_FAILURE); 6222 } 6223 6224 /* Allocate emlxs_dev_ctl structure. */ 6225 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) { 6226 cmn_err(CE_WARN, 6227 "?%s%d: fca_hba_attach failed. Unable to allocate soft " 6228 "state.", DRIVER_NAME, ddiinst); 6229 return (DDI_FAILURE); 6230 } 6231 init_flag |= ATTACH_SOFT_STATE; 6232 6233 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state, 6234 ddiinst)) == NULL) { 6235 cmn_err(CE_WARN, 6236 "?%s%d: fca_hba_attach failed. Unable to get soft state.", 6237 DRIVER_NAME, ddiinst); 6238 goto failed; 6239 } 6240 bzero((char *)hba, sizeof (emlxs_hba_t)); 6241 6242 emlxs_device.hba[emlxinst] = hba; 6243 emlxs_device.log[emlxinst] = &hba->log; 6244 6245 #ifdef DUMP_SUPPORT 6246 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile; 6247 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile; 6248 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile; 6249 #endif /* DUMP_SUPPORT */ 6250 6251 hba->dip = dip; 6252 hba->emlxinst = emlxinst; 6253 hba->ddiinst = ddiinst; 6254 hba->ini_mode = 0; 6255 hba->tgt_mode = 0; 6256 6257 init_flag |= ATTACH_HBA; 6258 6259 /* Enable the physical port on this HBA */ 6260 port = &PPORT; 6261 port->hba = hba; 6262 port->vpi = 0; 6263 port->flag |= EMLXS_PORT_ENABLE; 6264 6265 /* Allocate a transport structure */ 6266 hba->fca_tran = 6267 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP); 6268 if (hba->fca_tran == NULL) { 6269 cmn_err(CE_WARN, 6270 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran " 6271 "memory.", DRIVER_NAME, ddiinst); 6272 goto failed; 6273 } 6274 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran, 6275 sizeof (fc_fca_tran_t)); 6276 6277 /* 6278 * Copy the global ddi_dma_attr to the local hba fields 6279 */ 6280 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr, 6281 sizeof (ddi_dma_attr_t)); 6282 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro, 6283 sizeof (ddi_dma_attr_t)); 6284 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg, 6285 sizeof (ddi_dma_attr_t)); 6286 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp, 6287 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t)); 6288 6289 /* Reset the fca_tran dma_attr fields to the per-hba copies */ 6290 hba->fca_tran->fca_dma_attr = &hba->dma_attr; 6291 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg; 6292 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg; 6293 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro; 6294 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg; 6295 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp; 6296 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg; 6297 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr; 6298 6299 /* Set the transport structure pointer in our dip */ 6300 /* SFS may panic if we are in target only mode */ 6301 /* We will update the transport structure later */ 6302 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran); 6303 init_flag |= ATTACH_FCA_TRAN; 6304 6305 /* Perform driver integrity check */ 6306 rval = emlxs_integrity_check(hba); 6307 if (rval) { 6308 cmn_err(CE_WARN, 6309 "?%s%d: fca_hba_attach failed. Driver integrity check " 6310 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval); 6311 goto failed; 6312 } 6313 6314 cfg = &CFG; 6315 6316 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg)); 6317 #ifdef MSI_SUPPORT 6318 if ((void *)&ddi_intr_get_supported_types != NULL) { 6319 hba->intr_flags |= EMLXS_MSI_ENABLED; 6320 } 6321 #endif /* MSI_SUPPORT */ 6322 6323 6324 /* Create the msg log file */ 6325 if (emlxs_msg_log_create(hba) == 0) { 6326 cmn_err(CE_WARN, 6327 "?%s%d: fca_hba_attach failed. Unable to create message " 6328 "log", DRIVER_NAME, ddiinst); 6329 goto failed; 6330 6331 } 6332 init_flag |= ATTACH_LOG; 6333 6334 /* We can begin to use EMLXS_MSGF from this point on */ 6335 6336 /* Create the event queue */ 6337 if (emlxs_event_queue_create(hba) == 0) { 6338 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6339 "Unable to create event queue"); 6340 6341 goto failed; 6342 6343 } 6344 init_flag |= ATTACH_EVENTS; 6345 6346 /* 6347 * Find the I/O bus type If it is not a SBUS card, 6348 * then it is a PCI card. Default is PCI_FC (0). 6349 */ 6350 prop_str = NULL; 6351 status = ddi_prop_lookup_string(DDI_DEV_T_ANY, 6352 (dev_info_t *)dip, 0, "name", (char **)&prop_str); 6353 6354 if (status == DDI_PROP_SUCCESS) { 6355 if (strncmp(prop_str, "lpfs", 4) == 0) { 6356 hba->bus_type = SBUS_FC; 6357 } 6358 6359 (void) ddi_prop_free((void *)prop_str); 6360 } 6361 6362 /* 6363 * Copy DDS from the config method and update configuration parameters 6364 */ 6365 (void) emlxs_get_props(hba); 6366 6367 #ifdef FMA_SUPPORT 6368 hba->fm_caps = cfg[CFG_FM_CAPS].current; 6369 6370 emlxs_fm_init(hba); 6371 6372 init_flag |= ATTACH_FM; 6373 #endif /* FMA_SUPPORT */ 6374 6375 if (emlxs_map_bus(hba)) { 6376 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6377 "Unable to map memory"); 6378 goto failed; 6379 6380 } 6381 init_flag |= ATTACH_MAP_BUS; 6382 6383 /* Attempt to identify the adapter */ 6384 rval = emlxs_init_adapter_info(hba); 6385 6386 if (rval == 0) { 6387 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6388 "Unable to get adapter info. Id:%d Device id:0x%x " 6389 "Model:%s", hba->model_info.id, 6390 hba->model_info.device_id, hba->model_info.model); 6391 goto failed; 6392 } 6393 6394 /* Check if adapter is not supported */ 6395 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) { 6396 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6397 "Unsupported adapter found. Id:%d Device id:0x%x " 6398 "SSDID:0x%x Model:%s", hba->model_info.id, 6399 hba->model_info.device_id, 6400 hba->model_info.ssdid, hba->model_info.model); 6401 goto failed; 6402 } 6403 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) { 6404 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE; 6405 #ifdef EMLXS_I386 6406 /* 6407 * TigerShark has 64K limit for SG element size 6408 * Do this for x86 alone. For SPARC, the driver 6409 * breaks up the single SGE later on. 6410 */ 6411 hba->dma_attr_ro.dma_attr_count_max = 0xffff; 6412 6413 i = cfg[CFG_MAX_XFER_SIZE].current; 6414 /* Update SGL size based on max_xfer_size */ 6415 if (i > 688128) { 6416 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6417 hba->sli.sli4.mem_sgl_size = 4096; 6418 } else if (i > 339968) { 6419 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6420 hba->sli.sli4.mem_sgl_size = 2048; 6421 } else { 6422 hba->sli.sli4.mem_sgl_size = 1024; 6423 } 6424 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size); 6425 #endif /* EMLXS_I386 */ 6426 } else { 6427 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE; 6428 #ifdef EMLXS_I386 6429 i = cfg[CFG_MAX_XFER_SIZE].current; 6430 /* Update BPL size based on max_xfer_size */ 6431 if (i > 688128) { 6432 /* 688128 = (((2048 / 12) - 2) * 4096) */ 6433 hba->sli.sli3.mem_bpl_size = 4096; 6434 } else if (i > 339968) { 6435 /* 339968 = (((1024 / 12) - 2) * 4096) */ 6436 hba->sli.sli3.mem_bpl_size = 2048; 6437 } else { 6438 hba->sli.sli3.mem_bpl_size = 1024; 6439 } 6440 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size); 6441 #endif /* EMLXS_I386 */ 6442 } 6443 6444 #ifdef EMLXS_I386 6445 /* Update dma_attr_sgllen based on BPL size */ 6446 hba->dma_attr.dma_attr_sgllen = i; 6447 hba->dma_attr_ro.dma_attr_sgllen = i; 6448 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i; 6449 #endif /* EMLXS_I386 */ 6450 6451 if (EMLXS_SLI_MAP_HDW(hba)) { 6452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6453 "Unable to map memory"); 6454 goto failed; 6455 6456 } 6457 init_flag |= ATTACH_MAP_SLI; 6458 6459 /* Initialize the interrupts. But don't add them yet */ 6460 status = EMLXS_INTR_INIT(hba, 0); 6461 if (status != DDI_SUCCESS) { 6462 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6463 "Unable to initalize interrupt(s)."); 6464 goto failed; 6465 6466 } 6467 init_flag |= ATTACH_INTR_INIT; 6468 6469 /* Initialize LOCKs */ 6470 emlxs_lock_init(hba); 6471 init_flag |= ATTACH_LOCK; 6472 6473 /* Initialize the power management */ 6474 mutex_enter(&EMLXS_PM_LOCK); 6475 hba->pm_state = EMLXS_PM_IN_ATTACH; 6476 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 6477 hba->pm_busy = 0; 6478 #ifdef IDLE_TIMER 6479 hba->pm_active = 1; 6480 hba->pm_idle_timer = 0; 6481 #endif /* IDLE_TIMER */ 6482 mutex_exit(&EMLXS_PM_LOCK); 6483 6484 /* Set the pm component name */ 6485 (void) sprintf(local_pm_components, "NAME=%s%d", DRIVER_NAME, 6486 ddiinst); 6487 emlxs_pm_components[0] = local_pm_components; 6488 6489 /* Check if power management support is enabled */ 6490 if (cfg[CFG_PM_SUPPORT].current) { 6491 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 6492 "pm-components", emlxs_pm_components, 6493 sizeof (emlxs_pm_components) / 6494 sizeof (emlxs_pm_components[0])) != 6495 DDI_PROP_SUCCESS) { 6496 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6497 "Unable to create pm components."); 6498 goto failed; 6499 } 6500 } 6501 6502 /* Needed for suspend and resume support */ 6503 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state", 6504 "needs-suspend-resume"); 6505 init_flag |= ATTACH_PROP; 6506 6507 emlxs_thread_spawn_create(hba); 6508 init_flag |= ATTACH_SPAWN; 6509 6510 emlxs_thread_create(hba, &hba->iodone_thread); 6511 6512 init_flag |= ATTACH_THREAD; 6513 6514 /* Setup initiator / target ports */ 6515 emlxs_set_mode(hba); 6516 6517 /* If driver did not attach to either stack, */ 6518 /* then driver attach failed */ 6519 if (!hba->tgt_mode && !hba->ini_mode) { 6520 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6521 "Driver interfaces not enabled."); 6522 goto failed; 6523 } 6524 6525 /* 6526 * Initialize HBA 6527 */ 6528 6529 /* Set initial state */ 6530 mutex_enter(&EMLXS_PORT_LOCK); 6531 emlxs_diag_state = DDI_OFFDI; 6532 hba->flag |= FC_OFFLINE_MODE; 6533 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE); 6534 mutex_exit(&EMLXS_PORT_LOCK); 6535 6536 if (status = emlxs_online(hba)) { 6537 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6538 "Unable to initialize adapter."); 6539 goto failed; 6540 } 6541 init_flag |= ATTACH_ONLINE; 6542 6543 /* This is to ensure that the model property is properly set */ 6544 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 6545 hba->model_info.model); 6546 6547 /* Create the device node. */ 6548 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) == 6549 DDI_FAILURE) { 6550 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 6551 "Unable to create device node."); 6552 goto failed; 6553 } 6554 init_flag |= ATTACH_NODE; 6555 6556 /* Attach initiator now */ 6557 /* This must come after emlxs_online() */ 6558 emlxs_fca_attach(hba); 6559 init_flag |= ATTACH_FCA; 6560 6561 /* Initialize kstat information */ 6562 hba->kstat = kstat_create(DRIVER_NAME, 6563 ddiinst, "statistics", "controller", 6564 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t), 6565 KSTAT_FLAG_VIRTUAL); 6566 6567 if (hba->kstat == NULL) { 6568 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6569 "kstat_create failed."); 6570 } else { 6571 hba->kstat->ks_data = (void *)&hba->stats; 6572 kstat_install(hba->kstat); 6573 init_flag |= ATTACH_KSTAT; 6574 } 6575 6576 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 6577 /* Setup virtual port properties */ 6578 emlxs_read_vport_prop(hba); 6579 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 6580 6581 6582 #ifdef DHCHAP_SUPPORT 6583 emlxs_dhc_attach(hba); 6584 init_flag |= ATTACH_DHCHAP; 6585 #endif /* DHCHAP_SUPPORT */ 6586 6587 /* Display the driver banner now */ 6588 emlxs_drv_banner(hba); 6589 6590 /* Raise the power level */ 6591 6592 /* 6593 * This will not execute emlxs_hba_resume because 6594 * EMLXS_PM_IN_ATTACH is set 6595 */ 6596 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) { 6597 /* Set power up anyway. This should not happen! */ 6598 mutex_enter(&EMLXS_PM_LOCK); 6599 hba->pm_level = EMLXS_PM_ADAPTER_UP; 6600 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6601 mutex_exit(&EMLXS_PM_LOCK); 6602 } else { 6603 mutex_enter(&EMLXS_PM_LOCK); 6604 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 6605 mutex_exit(&EMLXS_PM_LOCK); 6606 } 6607 6608 #ifdef SFCT_SUPPORT 6609 /* Do this last */ 6610 emlxs_fct_attach(hba); 6611 init_flag |= ATTACH_FCT; 6612 #endif /* SFCT_SUPPORT */ 6613 6614 return (DDI_SUCCESS); 6615 6616 failed: 6617 6618 emlxs_driver_remove(dip, init_flag, 1); 6619 6620 return (DDI_FAILURE); 6621 6622 } /* emlxs_hba_attach() */ 6623 6624 6625 static int 6626 emlxs_hba_detach(dev_info_t *dip) 6627 { 6628 emlxs_hba_t *hba; 6629 emlxs_port_t *port; 6630 int ddiinst; 6631 int count; 6632 uint32_t init_flag = (uint32_t)-1; 6633 6634 ddiinst = ddi_get_instance(dip); 6635 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6636 port = &PPORT; 6637 6638 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL); 6639 6640 mutex_enter(&EMLXS_PM_LOCK); 6641 hba->pm_state |= EMLXS_PM_IN_DETACH; 6642 mutex_exit(&EMLXS_PM_LOCK); 6643 6644 /* Lower the power level */ 6645 /* 6646 * This will not suspend the driver since the 6647 * EMLXS_PM_IN_DETACH has been set 6648 */ 6649 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) { 6650 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6651 "Unable to lower power."); 6652 6653 mutex_enter(&EMLXS_PM_LOCK); 6654 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6655 mutex_exit(&EMLXS_PM_LOCK); 6656 6657 return (DDI_FAILURE); 6658 } 6659 6660 /* Take the adapter offline first, if not already */ 6661 if (emlxs_offline(hba) != 0) { 6662 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 6663 "Unable to take adapter offline."); 6664 6665 mutex_enter(&EMLXS_PM_LOCK); 6666 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 6667 mutex_exit(&EMLXS_PM_LOCK); 6668 6669 (void) emlxs_pm_raise_power(dip); 6670 6671 return (DDI_FAILURE); 6672 } 6673 /* Check ub buffer pools */ 6674 if (port->ub_pool) { 6675 mutex_enter(&EMLXS_UB_LOCK); 6676 6677 /* Wait up to 10 seconds for all ub pools to be freed */ 6678 count = 10 * 2; 6679 while (port->ub_pool && count) { 6680 mutex_exit(&EMLXS_UB_LOCK); 6681 delay(drv_usectohz(500000)); /* half second wait */ 6682 count--; 6683 mutex_enter(&EMLXS_UB_LOCK); 6684 } 6685 6686 if (port->ub_pool) { 6687 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6688 "fca_unbind_port: Unsolicited buffers still " 6689 "active. port=%p. Destroying...", port); 6690 6691 /* Destroy all pools */ 6692 while (port->ub_pool) { 6693 emlxs_ub_destroy(port, port->ub_pool); 6694 } 6695 } 6696 6697 mutex_exit(&EMLXS_UB_LOCK); 6698 } 6699 init_flag &= ~ATTACH_ONLINE; 6700 6701 /* Remove the driver instance */ 6702 emlxs_driver_remove(dip, init_flag, 0); 6703 6704 return (DDI_SUCCESS); 6705 6706 } /* emlxs_hba_detach() */ 6707 6708 6709 extern int 6710 emlxs_map_bus(emlxs_hba_t *hba) 6711 { 6712 emlxs_port_t *port = &PPORT; 6713 dev_info_t *dip; 6714 ddi_device_acc_attr_t dev_attr; 6715 int status; 6716 6717 dip = (dev_info_t *)hba->dip; 6718 dev_attr = emlxs_dev_acc_attr; 6719 6720 if (hba->bus_type == SBUS_FC) { 6721 if (hba->pci_acc_handle == 0) { 6722 status = ddi_regs_map_setup(dip, 6723 SBUS_DFLY_PCI_CFG_RINDEX, 6724 (caddr_t *)&hba->pci_addr, 6725 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6726 if (status != DDI_SUCCESS) { 6727 EMLXS_MSGF(EMLXS_CONTEXT, 6728 &emlxs_attach_failed_msg, 6729 "(SBUS) ddi_regs_map_setup PCI failed. " 6730 "status=%x", status); 6731 goto failed; 6732 } 6733 } 6734 6735 if (hba->sbus_pci_handle == 0) { 6736 status = ddi_regs_map_setup(dip, 6737 SBUS_TITAN_PCI_CFG_RINDEX, 6738 (caddr_t *)&hba->sbus_pci_addr, 6739 0, 0, &dev_attr, &hba->sbus_pci_handle); 6740 if (status != DDI_SUCCESS) { 6741 EMLXS_MSGF(EMLXS_CONTEXT, 6742 &emlxs_attach_failed_msg, 6743 "(SBUS) ddi_regs_map_setup TITAN PCI " 6744 "failed. status=%x", status); 6745 goto failed; 6746 } 6747 } 6748 6749 } else { /* ****** PCI ****** */ 6750 6751 if (hba->pci_acc_handle == 0) { 6752 status = ddi_regs_map_setup(dip, 6753 PCI_CFG_RINDEX, 6754 (caddr_t *)&hba->pci_addr, 6755 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 6756 if (status != DDI_SUCCESS) { 6757 EMLXS_MSGF(EMLXS_CONTEXT, 6758 &emlxs_attach_failed_msg, 6759 "(PCI) ddi_regs_map_setup PCI failed. " 6760 "status=%x", status); 6761 goto failed; 6762 } 6763 } 6764 #ifdef EMLXS_I386 6765 /* Setting up PCI configure space */ 6766 (void) ddi_put16(hba->pci_acc_handle, 6767 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 6768 CMD_CFG_VALUE | CMD_IO_ENBL); 6769 6770 #ifdef FMA_SUPPORT 6771 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle) 6772 != DDI_FM_OK) { 6773 EMLXS_MSGF(EMLXS_CONTEXT, 6774 &emlxs_invalid_access_handle_msg, NULL); 6775 goto failed; 6776 } 6777 #endif /* FMA_SUPPORT */ 6778 6779 #endif /* EMLXS_I386 */ 6780 6781 } 6782 return (0); 6783 6784 failed: 6785 6786 emlxs_unmap_bus(hba); 6787 return (ENOMEM); 6788 6789 } /* emlxs_map_bus() */ 6790 6791 6792 extern void 6793 emlxs_unmap_bus(emlxs_hba_t *hba) 6794 { 6795 if (hba->pci_acc_handle) { 6796 (void) ddi_regs_map_free(&hba->pci_acc_handle); 6797 hba->pci_acc_handle = 0; 6798 } 6799 6800 if (hba->sbus_pci_handle) { 6801 (void) ddi_regs_map_free(&hba->sbus_pci_handle); 6802 hba->sbus_pci_handle = 0; 6803 } 6804 6805 return; 6806 6807 } /* emlxs_unmap_bus() */ 6808 6809 6810 static int 6811 emlxs_get_props(emlxs_hba_t *hba) 6812 { 6813 emlxs_config_t *cfg; 6814 uint32_t i; 6815 char string[256]; 6816 uint32_t new_value; 6817 6818 /* Initialize each parameter */ 6819 for (i = 0; i < NUM_CFG_PARAM; i++) { 6820 cfg = &hba->config[i]; 6821 6822 /* Ensure strings are terminated */ 6823 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0; 6824 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0; 6825 6826 /* Set the current value to the default value */ 6827 new_value = cfg->def; 6828 6829 /* First check for the global setting */ 6830 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6831 (void *)hba->dip, DDI_PROP_DONTPASS, 6832 cfg->string, new_value); 6833 6834 /* Now check for the per adapter ddiinst setting */ 6835 (void) sprintf(string, "%s%d-%s", DRIVER_NAME, hba->ddiinst, 6836 cfg->string); 6837 6838 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6839 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value); 6840 6841 /* Now check the parameter */ 6842 cfg->current = emlxs_check_parm(hba, i, new_value); 6843 } 6844 6845 return (0); 6846 6847 } /* emlxs_get_props() */ 6848 6849 6850 extern uint32_t 6851 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 6852 { 6853 emlxs_port_t *port = &PPORT; 6854 uint32_t i; 6855 emlxs_config_t *cfg; 6856 emlxs_vpd_t *vpd = &VPD; 6857 6858 if (index > NUM_CFG_PARAM) { 6859 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 6860 "emlxs_check_parm failed. Invalid index = %d", index); 6861 6862 return (new_value); 6863 } 6864 6865 cfg = &hba->config[index]; 6866 6867 if (new_value > cfg->hi) { 6868 new_value = cfg->def; 6869 } else if (new_value < cfg->low) { 6870 new_value = cfg->def; 6871 } 6872 6873 /* Perform additional checks */ 6874 switch (index) { 6875 case CFG_NPIV_ENABLE: 6876 if (hba->tgt_mode) { 6877 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6878 "enable-npiv: Not supported in target mode. " 6879 "Disabling."); 6880 6881 new_value = 0; 6882 } 6883 break; 6884 6885 #ifdef DHCHAP_SUPPORT 6886 case CFG_AUTH_ENABLE: 6887 if (hba->tgt_mode) { 6888 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6889 "enable-auth: Not supported in target mode. " 6890 "Disabling."); 6891 6892 new_value = 0; 6893 } 6894 break; 6895 #endif /* DHCHAP_SUPPORT */ 6896 6897 case CFG_NUM_NODES: 6898 switch (new_value) { 6899 case 1: 6900 case 2: 6901 /* Must have at least 3 if not 0 */ 6902 return (3); 6903 6904 default: 6905 break; 6906 } 6907 break; 6908 6909 case CFG_FW_CHECK: 6910 /* The 0x2 bit implies the 0x1 bit will also be set */ 6911 if (new_value & 0x2) { 6912 new_value |= 0x1; 6913 } 6914 6915 /* The 0x4 bit should not be set if 0x1 or 0x2 is not set */ 6916 if (!(new_value & 0x3) && (new_value & 0x4)) { 6917 new_value &= ~0x4; 6918 } 6919 break; 6920 6921 case CFG_LINK_SPEED: 6922 if (vpd->link_speed) { 6923 switch (new_value) { 6924 case 0: 6925 break; 6926 6927 case 1: 6928 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) { 6929 new_value = 0; 6930 6931 EMLXS_MSGF(EMLXS_CONTEXT, 6932 &emlxs_init_msg, 6933 "link-speed: 1Gb not supported " 6934 "by adapter. Switching to auto " 6935 "detect."); 6936 } 6937 break; 6938 6939 case 2: 6940 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) { 6941 new_value = 0; 6942 6943 EMLXS_MSGF(EMLXS_CONTEXT, 6944 &emlxs_init_msg, 6945 "link-speed: 2Gb not supported " 6946 "by adapter. Switching to auto " 6947 "detect."); 6948 } 6949 break; 6950 case 4: 6951 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) { 6952 new_value = 0; 6953 6954 EMLXS_MSGF(EMLXS_CONTEXT, 6955 &emlxs_init_msg, 6956 "link-speed: 4Gb not supported " 6957 "by adapter. Switching to auto " 6958 "detect."); 6959 } 6960 break; 6961 6962 case 8: 6963 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) { 6964 new_value = 0; 6965 6966 EMLXS_MSGF(EMLXS_CONTEXT, 6967 &emlxs_init_msg, 6968 "link-speed: 8Gb not supported " 6969 "by adapter. Switching to auto " 6970 "detect."); 6971 } 6972 break; 6973 6974 case 10: 6975 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) { 6976 new_value = 0; 6977 6978 EMLXS_MSGF(EMLXS_CONTEXT, 6979 &emlxs_init_msg, 6980 "link-speed: 10Gb not supported " 6981 "by adapter. Switching to auto " 6982 "detect."); 6983 } 6984 break; 6985 6986 default: 6987 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 6988 "link-speed: Invalid value=%d provided. " 6989 "Switching to auto detect.", 6990 new_value); 6991 6992 new_value = 0; 6993 } 6994 } else { /* Perform basic validity check */ 6995 6996 /* Perform additional check on link speed */ 6997 switch (new_value) { 6998 case 0: 6999 case 1: 7000 case 2: 7001 case 4: 7002 case 8: 7003 case 10: 7004 /* link-speed is a valid choice */ 7005 break; 7006 7007 default: 7008 new_value = cfg->def; 7009 } 7010 } 7011 break; 7012 7013 case CFG_TOPOLOGY: 7014 /* Perform additional check on topology */ 7015 switch (new_value) { 7016 case 0: 7017 case 2: 7018 case 4: 7019 case 6: 7020 /* topology is a valid choice */ 7021 break; 7022 7023 default: 7024 return (cfg->def); 7025 } 7026 break; 7027 7028 #ifdef DHCHAP_SUPPORT 7029 case CFG_AUTH_TYPE: 7030 { 7031 uint32_t shift; 7032 uint32_t mask; 7033 7034 /* Perform additional check on auth type */ 7035 shift = 12; 7036 mask = 0xF000; 7037 for (i = 0; i < 4; i++) { 7038 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) { 7039 return (cfg->def); 7040 } 7041 7042 shift -= 4; 7043 mask >>= 4; 7044 } 7045 break; 7046 } 7047 7048 case CFG_AUTH_HASH: 7049 { 7050 uint32_t shift; 7051 uint32_t mask; 7052 7053 /* Perform additional check on auth hash */ 7054 shift = 12; 7055 mask = 0xF000; 7056 for (i = 0; i < 4; i++) { 7057 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) { 7058 return (cfg->def); 7059 } 7060 7061 shift -= 4; 7062 mask >>= 4; 7063 } 7064 break; 7065 } 7066 7067 case CFG_AUTH_GROUP: 7068 { 7069 uint32_t shift; 7070 uint32_t mask; 7071 7072 /* Perform additional check on auth group */ 7073 shift = 28; 7074 mask = 0xF0000000; 7075 for (i = 0; i < 8; i++) { 7076 if (((new_value & mask) >> shift) > 7077 DFC_AUTH_GROUP_MAX) { 7078 return (cfg->def); 7079 } 7080 7081 shift -= 4; 7082 mask >>= 4; 7083 } 7084 break; 7085 } 7086 7087 case CFG_AUTH_INTERVAL: 7088 if (new_value < 10) { 7089 return (10); 7090 } 7091 break; 7092 7093 7094 #endif /* DHCHAP_SUPPORT */ 7095 7096 } /* switch */ 7097 7098 return (new_value); 7099 7100 } /* emlxs_check_parm() */ 7101 7102 7103 extern uint32_t 7104 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 7105 { 7106 emlxs_port_t *port = &PPORT; 7107 emlxs_port_t *vport; 7108 uint32_t vpi; 7109 emlxs_config_t *cfg; 7110 uint32_t old_value; 7111 7112 if (index > NUM_CFG_PARAM) { 7113 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7114 "emlxs_set_parm failed. Invalid index = %d", index); 7115 7116 return ((uint32_t)FC_FAILURE); 7117 } 7118 7119 cfg = &hba->config[index]; 7120 7121 if (!(cfg->flags & PARM_DYNAMIC)) { 7122 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7123 "emlxs_set_parm failed. %s is not dynamic.", cfg->string); 7124 7125 return ((uint32_t)FC_FAILURE); 7126 } 7127 7128 /* Check new value */ 7129 old_value = new_value; 7130 new_value = emlxs_check_parm(hba, index, new_value); 7131 7132 if (old_value != new_value) { 7133 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7134 "emlxs_set_parm: %s invalid. 0x%x --> 0x%x", 7135 cfg->string, old_value, new_value); 7136 } 7137 7138 /* Return now if no actual change */ 7139 if (new_value == cfg->current) { 7140 return (FC_SUCCESS); 7141 } 7142 7143 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7144 "emlxs_set_parm: %s changing. 0x%x --> 0x%x", 7145 cfg->string, cfg->current, new_value); 7146 7147 old_value = cfg->current; 7148 cfg->current = new_value; 7149 7150 /* React to change if needed */ 7151 switch (index) { 7152 7153 case CFG_PCI_MAX_READ: 7154 /* Update MXR */ 7155 emlxs_pcix_mxr_update(hba, 1); 7156 break; 7157 7158 case CFG_SLI_MODE: 7159 /* Check SLI mode */ 7160 if ((hba->sli_mode == 3) && (new_value == 2)) { 7161 /* All vports must be disabled first */ 7162 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 7163 vport = &VPORT(vpi); 7164 7165 if (vport->flag & EMLXS_PORT_ENABLE) { 7166 /* Reset current value */ 7167 cfg->current = old_value; 7168 7169 EMLXS_MSGF(EMLXS_CONTEXT, 7170 &emlxs_sfs_debug_msg, 7171 "emlxs_set_parm failed. %s: vpi=%d " 7172 "still enabled. Value restored to " 7173 "0x%x.", cfg->string, vpi, 7174 old_value); 7175 7176 return (2); 7177 } 7178 } 7179 } 7180 break; 7181 7182 case CFG_NPIV_ENABLE: 7183 /* Check if NPIV is being disabled */ 7184 if ((old_value == 1) && (new_value == 0)) { 7185 /* All vports must be disabled first */ 7186 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 7187 vport = &VPORT(vpi); 7188 7189 if (vport->flag & EMLXS_PORT_ENABLE) { 7190 /* Reset current value */ 7191 cfg->current = old_value; 7192 7193 EMLXS_MSGF(EMLXS_CONTEXT, 7194 &emlxs_sfs_debug_msg, 7195 "emlxs_set_parm failed. %s: vpi=%d " 7196 "still enabled. Value restored to " 7197 "0x%x.", cfg->string, vpi, 7198 old_value); 7199 7200 return (2); 7201 } 7202 } 7203 } 7204 7205 /* Trigger adapter reset */ 7206 /* (void) emlxs_reset(port, FC_FCA_RESET); */ 7207 7208 break; 7209 7210 7211 case CFG_VPORT_RESTRICTED: 7212 for (vpi = 0; vpi < MAX_VPORTS; vpi++) { 7213 vport = &VPORT(vpi); 7214 7215 if (!(vport->flag & EMLXS_PORT_CONFIG)) { 7216 continue; 7217 } 7218 7219 if (vport->options & EMLXS_OPT_RESTRICT_MASK) { 7220 continue; 7221 } 7222 7223 if (new_value) { 7224 vport->flag |= EMLXS_PORT_RESTRICTED; 7225 } else { 7226 vport->flag &= ~EMLXS_PORT_RESTRICTED; 7227 } 7228 } 7229 7230 break; 7231 7232 #ifdef DHCHAP_SUPPORT 7233 case CFG_AUTH_ENABLE: 7234 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 7235 break; 7236 7237 case CFG_AUTH_TMO: 7238 hba->auth_cfg.authentication_timeout = cfg->current; 7239 break; 7240 7241 case CFG_AUTH_MODE: 7242 hba->auth_cfg.authentication_mode = cfg->current; 7243 break; 7244 7245 case CFG_AUTH_BIDIR: 7246 hba->auth_cfg.bidirectional = cfg->current; 7247 break; 7248 7249 case CFG_AUTH_TYPE: 7250 hba->auth_cfg.authentication_type_priority[0] = 7251 (cfg->current & 0xF000) >> 12; 7252 hba->auth_cfg.authentication_type_priority[1] = 7253 (cfg->current & 0x0F00) >> 8; 7254 hba->auth_cfg.authentication_type_priority[2] = 7255 (cfg->current & 0x00F0) >> 4; 7256 hba->auth_cfg.authentication_type_priority[3] = 7257 (cfg->current & 0x000F); 7258 break; 7259 7260 case CFG_AUTH_HASH: 7261 hba->auth_cfg.hash_priority[0] = 7262 (cfg->current & 0xF000) >> 12; 7263 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8; 7264 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4; 7265 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F); 7266 break; 7267 7268 case CFG_AUTH_GROUP: 7269 hba->auth_cfg.dh_group_priority[0] = 7270 (cfg->current & 0xF0000000) >> 28; 7271 hba->auth_cfg.dh_group_priority[1] = 7272 (cfg->current & 0x0F000000) >> 24; 7273 hba->auth_cfg.dh_group_priority[2] = 7274 (cfg->current & 0x00F00000) >> 20; 7275 hba->auth_cfg.dh_group_priority[3] = 7276 (cfg->current & 0x000F0000) >> 16; 7277 hba->auth_cfg.dh_group_priority[4] = 7278 (cfg->current & 0x0000F000) >> 12; 7279 hba->auth_cfg.dh_group_priority[5] = 7280 (cfg->current & 0x00000F00) >> 8; 7281 hba->auth_cfg.dh_group_priority[6] = 7282 (cfg->current & 0x000000F0) >> 4; 7283 hba->auth_cfg.dh_group_priority[7] = 7284 (cfg->current & 0x0000000F); 7285 break; 7286 7287 case CFG_AUTH_INTERVAL: 7288 hba->auth_cfg.reauthenticate_time_interval = cfg->current; 7289 break; 7290 #endif /* DHCHAP_SUPPORT */ 7291 7292 } 7293 7294 return (FC_SUCCESS); 7295 7296 } /* emlxs_set_parm() */ 7297 7298 7299 /* 7300 * emlxs_mem_alloc OS specific routine for memory allocation / mapping 7301 * 7302 * The buf_info->flags field describes the memory operation requested. 7303 * 7304 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA 7305 * Virtual address is supplied in buf_info->virt 7306 * DMA mapping flag is in buf_info->align 7307 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE) 7308 * The mapped physical address is returned buf_info->phys 7309 * 7310 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and 7311 * if FC_MBUF_DMA is set the memory is also mapped for DMA 7312 * The byte alignment of the memory request is supplied in buf_info->align 7313 * The byte size of the memory request is supplied in buf_info->size 7314 * The virtual address is returned buf_info->virt 7315 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA) 7316 */ 7317 extern uint8_t * 7318 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7319 { 7320 emlxs_port_t *port = &PPORT; 7321 ddi_dma_attr_t dma_attr; 7322 ddi_device_acc_attr_t dev_attr; 7323 uint_t cookie_count; 7324 size_t dma_reallen; 7325 ddi_dma_cookie_t dma_cookie; 7326 uint_t dma_flag; 7327 int status; 7328 7329 dma_attr = hba->dma_attr_1sg; 7330 dev_attr = emlxs_data_acc_attr; 7331 7332 if (buf_info->flags & FC_MBUF_SNGLSG) { 7333 dma_attr.dma_attr_sgllen = 1; 7334 } 7335 7336 if (buf_info->flags & FC_MBUF_DMA32) { 7337 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff; 7338 } 7339 7340 if (buf_info->flags & FC_MBUF_PHYSONLY) { 7341 7342 if (buf_info->virt == 0) { 7343 goto done; 7344 } 7345 7346 /* 7347 * Allocate the DMA handle for this DMA object 7348 */ 7349 status = ddi_dma_alloc_handle((void *)hba->dip, 7350 &dma_attr, DDI_DMA_DONTWAIT, 7351 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle); 7352 if (status != DDI_SUCCESS) { 7353 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7354 "ddi_dma_alloc_handle failed: size=%x align=%x " 7355 "flags=%x", buf_info->size, buf_info->align, 7356 buf_info->flags); 7357 7358 buf_info->phys = 0; 7359 buf_info->dma_handle = 0; 7360 goto done; 7361 } 7362 7363 switch (buf_info->align) { 7364 case DMA_READ_WRITE: 7365 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT); 7366 break; 7367 case DMA_READ_ONLY: 7368 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT); 7369 break; 7370 case DMA_WRITE_ONLY: 7371 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT); 7372 break; 7373 } 7374 7375 /* Map this page of memory */ 7376 status = ddi_dma_addr_bind_handle( 7377 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7378 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7379 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie, 7380 &cookie_count); 7381 7382 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7383 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7384 "ddi_dma_addr_bind_handle failed: status=%x " 7385 "count=%x flags=%x", status, cookie_count, 7386 buf_info->flags); 7387 7388 (void) ddi_dma_free_handle( 7389 (ddi_dma_handle_t *)&buf_info->dma_handle); 7390 buf_info->phys = 0; 7391 buf_info->dma_handle = 0; 7392 goto done; 7393 } 7394 7395 if (hba->bus_type == SBUS_FC) { 7396 7397 int32_t burstsizes_limit = 0xff; 7398 int32_t ret_burst; 7399 7400 ret_burst = ddi_dma_burstsizes( 7401 buf_info->dma_handle) & burstsizes_limit; 7402 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7403 ret_burst) == DDI_FAILURE) { 7404 EMLXS_MSGF(EMLXS_CONTEXT, 7405 &emlxs_mem_alloc_failed_msg, 7406 "ddi_dma_set_sbus64 failed."); 7407 } 7408 } 7409 7410 /* Save Physical address */ 7411 buf_info->phys = dma_cookie.dmac_laddress; 7412 7413 /* 7414 * Just to be sure, let's add this 7415 */ 7416 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle, 7417 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7418 7419 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) { 7420 7421 dma_attr.dma_attr_align = buf_info->align; 7422 7423 /* 7424 * Allocate the DMA handle for this DMA object 7425 */ 7426 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr, 7427 DDI_DMA_DONTWAIT, NULL, 7428 (ddi_dma_handle_t *)&buf_info->dma_handle); 7429 if (status != DDI_SUCCESS) { 7430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7431 "ddi_dma_alloc_handle failed: size=%x align=%x " 7432 "flags=%x", buf_info->size, buf_info->align, 7433 buf_info->flags); 7434 7435 buf_info->virt = 0; 7436 buf_info->phys = 0; 7437 buf_info->data_handle = 0; 7438 buf_info->dma_handle = 0; 7439 goto done; 7440 } 7441 7442 status = ddi_dma_mem_alloc( 7443 (ddi_dma_handle_t)buf_info->dma_handle, 7444 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT, 7445 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt, 7446 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle); 7447 7448 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) { 7449 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7450 "ddi_dma_mem_alloc failed: size=%x align=%x " 7451 "flags=%x", buf_info->size, buf_info->align, 7452 buf_info->flags); 7453 7454 (void) ddi_dma_free_handle( 7455 (ddi_dma_handle_t *)&buf_info->dma_handle); 7456 7457 buf_info->virt = 0; 7458 buf_info->phys = 0; 7459 buf_info->data_handle = 0; 7460 buf_info->dma_handle = 0; 7461 goto done; 7462 } 7463 7464 /* Map this page of memory */ 7465 status = ddi_dma_addr_bind_handle( 7466 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 7467 (caddr_t)buf_info->virt, (size_t)buf_info->size, 7468 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 7469 &dma_cookie, &cookie_count); 7470 7471 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 7472 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7473 "ddi_dma_addr_bind_handle failed: status=%x " 7474 "count=%d size=%x align=%x flags=%x", status, 7475 cookie_count, buf_info->size, buf_info->align, 7476 buf_info->flags); 7477 7478 (void) ddi_dma_mem_free( 7479 (ddi_acc_handle_t *)&buf_info->data_handle); 7480 (void) ddi_dma_free_handle( 7481 (ddi_dma_handle_t *)&buf_info->dma_handle); 7482 7483 buf_info->virt = 0; 7484 buf_info->phys = 0; 7485 buf_info->dma_handle = 0; 7486 buf_info->data_handle = 0; 7487 goto done; 7488 } 7489 7490 if (hba->bus_type == SBUS_FC) { 7491 int32_t burstsizes_limit = 0xff; 7492 int32_t ret_burst; 7493 7494 ret_burst = 7495 ddi_dma_burstsizes(buf_info-> 7496 dma_handle) & burstsizes_limit; 7497 if (ddi_dma_set_sbus64(buf_info->dma_handle, 7498 ret_burst) == DDI_FAILURE) { 7499 EMLXS_MSGF(EMLXS_CONTEXT, 7500 &emlxs_mem_alloc_failed_msg, 7501 "ddi_dma_set_sbus64 failed."); 7502 } 7503 } 7504 7505 /* Save Physical address */ 7506 buf_info->phys = dma_cookie.dmac_laddress; 7507 7508 /* Just to be sure, let's add this */ 7509 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle, 7510 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 7511 7512 } else { /* allocate virtual memory */ 7513 7514 buf_info->virt = 7515 (uint32_t *)kmem_zalloc((size_t)buf_info->size, 7516 KM_NOSLEEP); 7517 buf_info->phys = 0; 7518 buf_info->data_handle = 0; 7519 buf_info->dma_handle = 0; 7520 7521 if (buf_info->virt == (uint32_t *)0) { 7522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 7523 "size=%x flags=%x", buf_info->size, 7524 buf_info->flags); 7525 } 7526 7527 } 7528 7529 done: 7530 7531 return ((uint8_t *)buf_info->virt); 7532 7533 } /* emlxs_mem_alloc() */ 7534 7535 7536 7537 /* 7538 * emlxs_mem_free: 7539 * 7540 * OS specific routine for memory de-allocation / unmapping 7541 * 7542 * The buf_info->flags field describes the memory operation requested. 7543 * 7544 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped 7545 * for DMA, but not freed. The mapped physical address to be unmapped is in 7546 * buf_info->phys 7547 * 7548 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only 7549 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in 7550 * buf_info->phys. The virtual address to be freed is in buf_info->virt 7551 */ 7552 /*ARGSUSED*/ 7553 extern void 7554 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info) 7555 { 7556 if (buf_info->flags & FC_MBUF_PHYSONLY) { 7557 7558 if (buf_info->dma_handle) { 7559 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7560 (void) ddi_dma_free_handle( 7561 (ddi_dma_handle_t *)&buf_info->dma_handle); 7562 buf_info->dma_handle = NULL; 7563 } 7564 7565 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) { 7566 7567 if (buf_info->dma_handle) { 7568 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 7569 (void) ddi_dma_mem_free( 7570 (ddi_acc_handle_t *)&buf_info->data_handle); 7571 (void) ddi_dma_free_handle( 7572 (ddi_dma_handle_t *)&buf_info->dma_handle); 7573 buf_info->dma_handle = NULL; 7574 buf_info->data_handle = NULL; 7575 } 7576 7577 } else { /* allocate virtual memory */ 7578 7579 if (buf_info->virt) { 7580 kmem_free(buf_info->virt, (size_t)buf_info->size); 7581 buf_info->virt = NULL; 7582 } 7583 } 7584 7585 } /* emlxs_mem_free() */ 7586 7587 7588 /* 7589 * A channel has a association with a msi id. 7590 * One msi id could be associated with multiple channels. 7591 */ 7592 static int 7593 emlxs_next_chan(emlxs_hba_t *hba, int msi_id) 7594 { 7595 emlxs_config_t *cfg = &CFG; 7596 EQ_DESC_t *eqp; 7597 int chan; 7598 int num_wq; 7599 7600 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 7601 /* For SLI4 round robin all WQs associated with the msi_id */ 7602 eqp = &hba->sli.sli4.eq[msi_id]; 7603 num_wq = cfg[CFG_NUM_WQ].current; 7604 7605 mutex_enter(&eqp->lastwq_lock); 7606 chan = eqp->lastwq; 7607 eqp->lastwq++; 7608 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) { 7609 eqp->lastwq -= num_wq; 7610 } 7611 mutex_exit(&eqp->lastwq_lock); 7612 7613 } else { 7614 chan = hba->channel_fcp; 7615 } 7616 return (chan); 7617 } 7618 7619 7620 static int 7621 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset) 7622 { 7623 int channel; 7624 7625 7626 /* IO to FCP2 device or a device reset always use fcp channel */ 7627 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) { 7628 return (hba->channel_fcp); 7629 } 7630 7631 channel = emlxs_next_chan(hba, 0); 7632 7633 7634 /* If channel is closed, then try fcp channel */ 7635 if (ndlp->nlp_flag[channel] & NLP_CLOSED) { 7636 channel = hba->channel_fcp; 7637 } 7638 return (channel); 7639 7640 } 7641 7642 static int32_t 7643 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp) 7644 { 7645 emlxs_hba_t *hba = HBA; 7646 fc_packet_t *pkt; 7647 emlxs_config_t *cfg; 7648 MAILBOXQ *mbq; 7649 MAILBOX *mb; 7650 uint32_t rc; 7651 7652 /* 7653 * This routine provides a alternative target reset provessing 7654 * method. Instead of sending an actual target reset to the 7655 * NPort, we will first unreg the login to that NPort. This 7656 * will cause all the outstanding IOs the quickly complete with 7657 * a NO RPI local error. Next we will force the ULP to relogin 7658 * to the NPort by sending an RSCN (for that NPort) to the 7659 * upper layer. This method should result in a fast target 7660 * reset, as far as IOs completing; however, since an actual 7661 * target reset is not sent to the NPort, it is not 100% 7662 * compatable. Things like reservations will not be broken. 7663 * By default this option is DISABLED, and its only enabled thru 7664 * a hidden configuration parameter (fast-tgt-reset). 7665 */ 7666 rc = FC_TRAN_BUSY; 7667 pkt = PRIV2PKT(sbp); 7668 cfg = &CFG; 7669 7670 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) { 7671 /* issue the mbox cmd to the sli */ 7672 mb = (MAILBOX *) mbq->mbox; 7673 bzero((void *) mb, MAILBOX_CMD_BSIZE); 7674 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi; 7675 #ifdef SLI3_SUPPORT 7676 mb->un.varUnregLogin.vpi = port->vpi; 7677 #endif /* SLI3_SUPPORT */ 7678 mb->mbxCommand = MBX_UNREG_LOGIN; 7679 mb->mbxOwner = OWN_HOST; 7680 7681 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7682 "Fast Target Reset: unreg rpi=x%x tmr %d", ndlp->nlp_Rpi, 7683 cfg[CFG_FAST_TGT_RESET_TMR].current); 7684 7685 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) 7686 == MBX_SUCCESS) { 7687 7688 ndlp->nlp_Rpi = 0; 7689 7690 mutex_enter(&sbp->mtx); 7691 sbp->node = (void *)ndlp; 7692 sbp->did = ndlp->nlp_DID; 7693 mutex_exit(&sbp->mtx); 7694 7695 if (pkt->pkt_rsplen) { 7696 bzero((uint8_t *)pkt->pkt_resp, 7697 pkt->pkt_rsplen); 7698 } 7699 if (cfg[CFG_FAST_TGT_RESET_TMR].current) { 7700 ndlp->nlp_force_rscn = hba->timer_tics + 7701 cfg[CFG_FAST_TGT_RESET_TMR].current; 7702 } 7703 7704 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0); 7705 } 7706 7707 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 7708 rc = FC_SUCCESS; 7709 } 7710 return (rc); 7711 } 7712 7713 static int32_t 7714 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp) 7715 { 7716 emlxs_hba_t *hba = HBA; 7717 fc_packet_t *pkt; 7718 emlxs_config_t *cfg; 7719 IOCBQ *iocbq; 7720 IOCB *iocb; 7721 CHANNEL *cp; 7722 NODELIST *ndlp; 7723 char *cmd; 7724 uint16_t lun; 7725 FCP_CMND *fcp_cmd; 7726 uint32_t did; 7727 uint32_t reset = 0; 7728 int channel; 7729 int32_t rval; 7730 7731 pkt = PRIV2PKT(sbp); 7732 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 7733 7734 /* Find target node object */ 7735 ndlp = emlxs_node_find_did(port, did); 7736 7737 if (!ndlp || !ndlp->nlp_active) { 7738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7739 "Node not found. did=%x", did); 7740 7741 return (FC_BADPACKET); 7742 } 7743 7744 /* When the fcp channel is closed we stop accepting any FCP cmd */ 7745 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 7746 return (FC_TRAN_BUSY); 7747 } 7748 7749 /* Snoop for target or lun reset first */ 7750 /* We always use FCP channel to send out target/lun reset fcp cmds */ 7751 /* interrupt affinity only applies to non tgt lun reset fcp cmd */ 7752 7753 cmd = (char *)pkt->pkt_cmd; 7754 lun = *((uint16_t *)cmd); 7755 lun = LE_SWAP16(lun); 7756 7757 iocbq = &sbp->iocbq; 7758 iocb = &iocbq->iocb; 7759 iocbq->node = (void *) ndlp; 7760 7761 /* Check for target reset */ 7762 if (cmd[10] & 0x20) { 7763 /* prepare iocb */ 7764 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7765 hba->channel_fcp)) != FC_SUCCESS) { 7766 7767 if (rval == 0xff) { 7768 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7769 0, 1); 7770 rval = FC_SUCCESS; 7771 } 7772 7773 return (rval); 7774 } 7775 7776 mutex_enter(&sbp->mtx); 7777 sbp->pkt_flags |= PACKET_FCP_TGT_RESET; 7778 sbp->pkt_flags |= PACKET_POLLED; 7779 mutex_exit(&sbp->mtx); 7780 7781 #ifdef SAN_DIAG_SUPPORT 7782 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET, 7783 (HBA_WWN *)&ndlp->nlp_portname, -1); 7784 #endif /* SAN_DIAG_SUPPORT */ 7785 7786 iocbq->flag |= IOCB_PRIORITY; 7787 7788 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7789 "Target Reset: did=%x", did); 7790 7791 cfg = &CFG; 7792 if (cfg[CFG_FAST_TGT_RESET].current) { 7793 if (emlxs_fast_target_reset(port, sbp, ndlp) == 7794 FC_SUCCESS) { 7795 return (FC_SUCCESS); 7796 } 7797 } 7798 7799 /* Close the node for any further normal IO */ 7800 emlxs_node_close(port, ndlp, hba->channel_fcp, 7801 pkt->pkt_timeout); 7802 7803 /* Flush the IO's on the tx queues */ 7804 (void) emlxs_tx_node_flush(port, ndlp, 7805 &hba->chan[hba->channel_fcp], 0, sbp); 7806 7807 /* This is the target reset fcp cmd */ 7808 reset = 1; 7809 } 7810 7811 /* Check for lun reset */ 7812 else if (cmd[10] & 0x10) { 7813 /* prepare iocb */ 7814 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7815 hba->channel_fcp)) != FC_SUCCESS) { 7816 7817 if (rval == 0xff) { 7818 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7819 0, 1); 7820 rval = FC_SUCCESS; 7821 } 7822 7823 return (rval); 7824 } 7825 7826 mutex_enter(&sbp->mtx); 7827 sbp->pkt_flags |= PACKET_FCP_LUN_RESET; 7828 sbp->pkt_flags |= PACKET_POLLED; 7829 mutex_exit(&sbp->mtx); 7830 7831 #ifdef SAN_DIAG_SUPPORT 7832 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET, 7833 (HBA_WWN *)&ndlp->nlp_portname, lun); 7834 #endif /* SAN_DIAG_SUPPORT */ 7835 7836 iocbq->flag |= IOCB_PRIORITY; 7837 7838 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7839 "LUN Reset: did=%x LUN=%02x02x", did, cmd[0], cmd[1]); 7840 7841 /* Flush the IO's on the tx queues for this lun */ 7842 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp); 7843 7844 /* This is the lun reset fcp cmd */ 7845 reset = 1; 7846 } 7847 7848 channel = emlxs_select_fcp_channel(hba, ndlp, reset); 7849 7850 #ifdef SAN_DIAG_SUPPORT 7851 sbp->sd_start_time = gethrtime(); 7852 #endif /* SAN_DIAG_SUPPORT */ 7853 7854 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 7855 emlxs_swap_fcp_pkt(sbp); 7856 #endif /* EMLXS_MODREV2X */ 7857 7858 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd; 7859 7860 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) { 7861 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE; 7862 } 7863 7864 if (reset == 0) { 7865 /* 7866 * tgt lun reset fcp cmd has been prepared 7867 * separately in the beginning 7868 */ 7869 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 7870 channel)) != FC_SUCCESS) { 7871 7872 if (rval == 0xff) { 7873 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 7874 0, 1); 7875 rval = FC_SUCCESS; 7876 } 7877 7878 return (rval); 7879 } 7880 } 7881 7882 cp = &hba->chan[channel]; 7883 cp->ulpSendCmd++; 7884 7885 /* Initalize sbp */ 7886 mutex_enter(&sbp->mtx); 7887 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 7888 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 7889 sbp->node = (void *)ndlp; 7890 sbp->lun = lun; 7891 sbp->class = iocb->ULPCLASS; 7892 sbp->did = ndlp->nlp_DID; 7893 mutex_exit(&sbp->mtx); 7894 7895 if (pkt->pkt_cmdlen) { 7896 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 7897 DDI_DMA_SYNC_FORDEV); 7898 } 7899 7900 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) { 7901 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen, 7902 DDI_DMA_SYNC_FORDEV); 7903 } 7904 7905 HBASTATS.FcpIssued++; 7906 7907 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 7908 return (FC_SUCCESS); 7909 7910 } /* emlxs_send_fcp_cmd() */ 7911 7912 7913 7914 7915 #ifdef SFCT_SUPPORT 7916 static int32_t 7917 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp) 7918 { 7919 emlxs_hba_t *hba = HBA; 7920 fc_packet_t *pkt; 7921 IOCBQ *iocbq; 7922 IOCB *iocb; 7923 NODELIST *ndlp; 7924 CHANNEL *cp; 7925 uint16_t iotag; 7926 uint32_t did; 7927 ddi_dma_cookie_t *cp_cmd; 7928 7929 pkt = PRIV2PKT(sbp); 7930 7931 did = sbp->did; 7932 ndlp = sbp->node; 7933 7934 iocbq = &sbp->iocbq; 7935 iocb = &iocbq->iocb; 7936 7937 /* Make sure node is still active */ 7938 if (!ndlp->nlp_active) { 7939 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 7940 "*Node not found. did=%x", did); 7941 7942 return (FC_BADPACKET); 7943 } 7944 7945 /* If gate is closed */ 7946 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 7947 return (FC_TRAN_BUSY); 7948 } 7949 7950 /* Get the iotag by registering the packet */ 7951 iotag = emlxs_register_pkt(sbp->channel, sbp); 7952 7953 if (!iotag) { 7954 /* No more command slots available, retry later */ 7955 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 7956 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 7957 7958 return (FC_TRAN_BUSY); 7959 } 7960 7961 /* Point of no return */ 7962 7963 cp = sbp->channel; 7964 cp->ulpSendCmd++; 7965 7966 #if (EMLXS_MODREV >= EMLXS_MODREV3) 7967 cp_cmd = pkt->pkt_cmd_cookie; 7968 #else 7969 cp_cmd = &pkt->pkt_cmd_cookie; 7970 #endif /* >= EMLXS_MODREV3 */ 7971 7972 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress); 7973 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress); 7974 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen; 7975 iocb->un.fcpt64.bdl.bdeFlags = 0; 7976 7977 if (hba->sli_mode < 3) { 7978 iocb->ULPBDECOUNT = 1; 7979 iocb->ULPLE = 1; 7980 } else { /* SLI3 */ 7981 7982 iocb->ULPBDECOUNT = 0; 7983 iocb->ULPLE = 0; 7984 iocb->unsli3.ext_iocb.ebde_count = 0; 7985 } 7986 7987 /* Initalize iocbq */ 7988 iocbq->port = (void *)port; 7989 iocbq->node = (void *)ndlp; 7990 iocbq->channel = (void *)cp; 7991 7992 /* Initalize iocb */ 7993 iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id; 7994 iocb->ULPIOTAG = iotag; 7995 iocb->ULPRSVDBYTE = 7996 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 7997 iocb->ULPOWNER = OWN_CHIP; 7998 iocb->ULPCLASS = sbp->class; 7999 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX; 8000 8001 /* Set the pkt timer */ 8002 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8003 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8004 8005 if (pkt->pkt_cmdlen) { 8006 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8007 DDI_DMA_SYNC_FORDEV); 8008 } 8009 8010 HBASTATS.FcpIssued++; 8011 8012 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8013 8014 return (FC_SUCCESS); 8015 8016 } /* emlxs_send_fct_status() */ 8017 8018 8019 static int32_t 8020 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp) 8021 { 8022 emlxs_hba_t *hba = HBA; 8023 fc_packet_t *pkt; 8024 IOCBQ *iocbq; 8025 IOCB *iocb; 8026 NODELIST *ndlp; 8027 uint16_t iotag; 8028 uint32_t did; 8029 8030 pkt = PRIV2PKT(sbp); 8031 8032 did = sbp->did; 8033 ndlp = sbp->node; 8034 8035 8036 iocbq = &sbp->iocbq; 8037 iocb = &iocbq->iocb; 8038 8039 /* Make sure node is still active */ 8040 if ((ndlp == NULL) || (!ndlp->nlp_active)) { 8041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8042 "*Node not found. did=%x", did); 8043 8044 return (FC_BADPACKET); 8045 } 8046 8047 /* If gate is closed */ 8048 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 8049 return (FC_TRAN_BUSY); 8050 } 8051 8052 /* Get the iotag by registering the packet */ 8053 iotag = emlxs_register_pkt(sbp->channel, sbp); 8054 8055 if (!iotag) { 8056 /* No more command slots available, retry later */ 8057 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8058 "*Adapter Busy. Unable to allocate iotag: did=0x%x", did); 8059 8060 return (FC_TRAN_BUSY); 8061 } 8062 8063 /* Point of no return */ 8064 iocbq->port = (void *)port; 8065 iocbq->node = (void *)ndlp; 8066 iocbq->channel = (void *)sbp->channel; 8067 ((CHANNEL *)sbp->channel)->ulpSendCmd++; 8068 8069 /* 8070 * Don't give the abort priority, we want the IOCB 8071 * we are aborting to be processed first. 8072 */ 8073 iocbq->flag |= IOCB_SPECIAL; 8074 8075 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 8076 iocb->ULPIOTAG = iotag; 8077 iocb->ULPLE = 1; 8078 iocb->ULPCLASS = sbp->class; 8079 iocb->ULPOWNER = OWN_CHIP; 8080 8081 if (hba->state >= FC_LINK_UP) { 8082 /* Create the abort IOCB */ 8083 iocb->un.acxri.abortType = ABORT_TYPE_ABTS; 8084 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX; 8085 8086 } else { 8087 /* Create the close IOCB */ 8088 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX; 8089 8090 } 8091 8092 iocb->ULPRSVDBYTE = 8093 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout); 8094 /* Set the pkt timer */ 8095 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8096 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8097 8098 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq); 8099 8100 return (FC_SUCCESS); 8101 8102 } /* emlxs_send_fct_abort() */ 8103 8104 #endif /* SFCT_SUPPORT */ 8105 8106 8107 static int32_t 8108 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp) 8109 { 8110 emlxs_hba_t *hba = HBA; 8111 fc_packet_t *pkt; 8112 IOCBQ *iocbq; 8113 IOCB *iocb; 8114 CHANNEL *cp; 8115 uint32_t i; 8116 NODELIST *ndlp; 8117 uint32_t did; 8118 int32_t rval; 8119 8120 pkt = PRIV2PKT(sbp); 8121 cp = &hba->chan[hba->channel_ip]; 8122 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8123 8124 /* Check if node exists */ 8125 /* Broadcast did is always a success */ 8126 ndlp = emlxs_node_find_did(port, did); 8127 8128 if (!ndlp || !ndlp->nlp_active) { 8129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8130 "Node not found. did=0x%x", did); 8131 8132 return (FC_BADPACKET); 8133 } 8134 8135 /* Check if gate is temporarily closed */ 8136 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) { 8137 return (FC_TRAN_BUSY); 8138 } 8139 8140 /* Check if an exchange has been created */ 8141 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) { 8142 /* No exchange. Try creating one */ 8143 (void) emlxs_create_xri(port, cp, ndlp); 8144 8145 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8146 "Adapter Busy. Exchange not found. did=0x%x", did); 8147 8148 return (FC_TRAN_BUSY); 8149 } 8150 8151 /* ULP PATCH: pkt_cmdlen was found to be set to zero */ 8152 /* on BROADCAST commands */ 8153 if (pkt->pkt_cmdlen == 0) { 8154 /* Set the pkt_cmdlen to the cookie size */ 8155 #if (EMLXS_MODREV >= EMLXS_MODREV3) 8156 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) { 8157 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size; 8158 } 8159 #else 8160 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size; 8161 #endif /* >= EMLXS_MODREV3 */ 8162 8163 } 8164 8165 iocbq = &sbp->iocbq; 8166 iocb = &iocbq->iocb; 8167 8168 iocbq->node = (void *)ndlp; 8169 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) { 8170 8171 if (rval == 0xff) { 8172 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8173 rval = FC_SUCCESS; 8174 } 8175 8176 return (rval); 8177 } 8178 8179 cp->ulpSendCmd++; 8180 8181 /* Initalize sbp */ 8182 mutex_enter(&sbp->mtx); 8183 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8184 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8185 sbp->node = (void *)ndlp; 8186 sbp->lun = 0; 8187 sbp->class = iocb->ULPCLASS; 8188 sbp->did = did; 8189 mutex_exit(&sbp->mtx); 8190 8191 if (pkt->pkt_cmdlen) { 8192 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8193 DDI_DMA_SYNC_FORDEV); 8194 } 8195 8196 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8197 8198 return (FC_SUCCESS); 8199 8200 } /* emlxs_send_ip() */ 8201 8202 8203 static int32_t 8204 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp) 8205 { 8206 emlxs_hba_t *hba = HBA; 8207 emlxs_port_t *vport; 8208 fc_packet_t *pkt; 8209 IOCBQ *iocbq; 8210 CHANNEL *cp; 8211 uint32_t cmd; 8212 int i; 8213 ELS_PKT *els_pkt; 8214 NODELIST *ndlp; 8215 uint32_t did; 8216 char fcsp_msg[32]; 8217 int rc; 8218 int32_t rval; 8219 8220 fcsp_msg[0] = 0; 8221 pkt = PRIV2PKT(sbp); 8222 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 8223 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8224 8225 iocbq = &sbp->iocbq; 8226 8227 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8228 emlxs_swap_els_pkt(sbp); 8229 #endif /* EMLXS_MODREV2X */ 8230 8231 cmd = *((uint32_t *)pkt->pkt_cmd); 8232 cmd &= ELS_CMD_MASK; 8233 8234 /* Point of no return, except for ADISC & PLOGI */ 8235 8236 /* Check node */ 8237 switch (cmd) { 8238 case ELS_CMD_FLOGI: 8239 if (port->vpi > 0) { 8240 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8241 if (!(port->flag & EMLXS_PORT_INIT_VPI_CMPL)) { 8242 (void) emlxs_mb_init_vpi(port); 8243 if (!(port->flag & 8244 EMLXS_PORT_INIT_VPI_CMPL)) { 8245 pkt->pkt_state = 8246 FC_PKT_LOCAL_RJT; 8247 8248 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8249 emlxs_unswap_pkt(sbp); 8250 #endif /* EMLXS_MODREV2X */ 8251 8252 return (FC_FAILURE); 8253 } 8254 } 8255 } 8256 cmd = ELS_CMD_FDISC; 8257 *((uint32_t *)pkt->pkt_cmd) = cmd; 8258 } 8259 ndlp = NULL; 8260 8261 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 8262 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 8263 } 8264 8265 /* We will process these cmds at the bottom of this routine */ 8266 break; 8267 8268 case ELS_CMD_PLOGI: 8269 /* Make sure we don't log into ourself */ 8270 for (i = 0; i < MAX_VPORTS; i++) { 8271 vport = &VPORT(i); 8272 8273 if (!(vport->flag & EMLXS_PORT_BOUND)) { 8274 continue; 8275 } 8276 8277 if (did == vport->did) { 8278 pkt->pkt_state = FC_PKT_NPORT_RJT; 8279 8280 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8281 emlxs_unswap_pkt(sbp); 8282 #endif /* EMLXS_MODREV2X */ 8283 8284 return (FC_FAILURE); 8285 } 8286 } 8287 8288 ndlp = NULL; 8289 8290 /* Check if this is the first PLOGI */ 8291 /* after a PT_TO_PT connection */ 8292 if ((hba->flag & FC_PT_TO_PT) && (port->did == 0)) { 8293 MAILBOXQ *mbox; 8294 8295 /* ULP bug fix */ 8296 if (pkt->pkt_cmd_fhdr.s_id == 0) { 8297 pkt->pkt_cmd_fhdr.s_id = 8298 pkt->pkt_cmd_fhdr.d_id - FP_DEFAULT_DID + 8299 FP_DEFAULT_SID; 8300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, 8301 "PLOGI: P2P Fix. sid=0-->%x did=%x", 8302 pkt->pkt_cmd_fhdr.s_id, 8303 pkt->pkt_cmd_fhdr.d_id); 8304 } 8305 8306 mutex_enter(&EMLXS_PORT_LOCK); 8307 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id); 8308 mutex_exit(&EMLXS_PORT_LOCK); 8309 8310 /* Update our service parms */ 8311 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 8312 MEM_MBOX, 1))) { 8313 emlxs_mb_config_link(hba, mbox); 8314 8315 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 8316 mbox, MBX_NOWAIT, 0); 8317 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 8318 (void) emlxs_mem_put(hba, MEM_MBOX, 8319 (uint8_t *)mbox); 8320 } 8321 8322 } 8323 } 8324 8325 /* We will process these cmds at the bottom of this routine */ 8326 break; 8327 8328 default: 8329 ndlp = emlxs_node_find_did(port, did); 8330 8331 /* If an ADISC is being sent and we have no node, */ 8332 /* then we must fail the ADISC now */ 8333 if (!ndlp && (cmd == ELS_CMD_ADISC) && !port->tgt_mode) { 8334 8335 /* Build the LS_RJT response */ 8336 els_pkt = (ELS_PKT *)pkt->pkt_resp; 8337 els_pkt->elsCode = 0x01; 8338 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0; 8339 els_pkt->un.lsRjt.un.b.lsRjtRsnCode = 8340 LSRJT_LOGICAL_ERR; 8341 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp = 8342 LSEXP_NOTHING_MORE; 8343 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03; 8344 8345 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8346 "ADISC Rejected. Node not found. did=0x%x", did); 8347 8348 if (sbp->channel == NULL) { 8349 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8350 sbp->channel = 8351 &hba->chan[hba->channel_els]; 8352 } else { 8353 sbp->channel = 8354 &hba->chan[FC_ELS_RING]; 8355 } 8356 } 8357 8358 /* Return this as rejected by the target */ 8359 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1); 8360 8361 return (FC_SUCCESS); 8362 } 8363 } 8364 8365 /* DID == BCAST_DID is special case to indicate that */ 8366 /* RPI is being passed in seq_id field */ 8367 /* This is used by emlxs_send_logo() for target mode */ 8368 8369 /* Initalize iocbq */ 8370 iocbq->node = (void *)ndlp; 8371 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) { 8372 8373 if (rval == 0xff) { 8374 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8375 rval = FC_SUCCESS; 8376 } 8377 8378 return (rval); 8379 } 8380 8381 cp = &hba->chan[hba->channel_els]; 8382 cp->ulpSendCmd++; 8383 8384 /* Check cmd */ 8385 switch (cmd) { 8386 case ELS_CMD_PRLI: 8387 { 8388 /* 8389 * if our firmware version is 3.20 or later, 8390 * set the following bits for FC-TAPE support. 8391 */ 8392 8393 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 8394 els_pkt->un.prli.ConfmComplAllowed = 1; 8395 els_pkt->un.prli.Retry = 1; 8396 els_pkt->un.prli.TaskRetryIdReq = 1; 8397 } else { 8398 els_pkt->un.prli.ConfmComplAllowed = 0; 8399 els_pkt->un.prli.Retry = 0; 8400 els_pkt->un.prli.TaskRetryIdReq = 0; 8401 } 8402 8403 break; 8404 } 8405 8406 /* This is a patch for the ULP stack. */ 8407 8408 /* 8409 * ULP only reads our service paramters once during bind_port, 8410 * but the service parameters change due to topology. 8411 */ 8412 case ELS_CMD_FLOGI: 8413 case ELS_CMD_FDISC: 8414 case ELS_CMD_PLOGI: 8415 case ELS_CMD_PDISC: 8416 { 8417 /* Copy latest service parameters to payload */ 8418 bcopy((void *) &port->sparam, 8419 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8420 8421 if ((hba->flag & FC_NPIV_ENABLED) && 8422 (hba->flag & FC_NPIV_SUPPORTED) && 8423 (cmd == ELS_CMD_PLOGI)) { 8424 SERV_PARM *sp; 8425 emlxs_vvl_fmt_t *vvl; 8426 8427 sp = (SERV_PARM *)&els_pkt->un.logi; 8428 sp->VALID_VENDOR_VERSION = 1; 8429 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0]; 8430 vvl->un0.w0.oui = 0x0000C9; 8431 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0); 8432 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0; 8433 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1); 8434 } 8435 8436 #ifdef DHCHAP_SUPPORT 8437 emlxs_dhc_init_sp(port, did, 8438 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8439 #endif /* DHCHAP_SUPPORT */ 8440 8441 break; 8442 } 8443 8444 } 8445 8446 /* Initialize the sbp */ 8447 mutex_enter(&sbp->mtx); 8448 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8449 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8450 sbp->node = (void *)ndlp; 8451 sbp->lun = 0; 8452 sbp->did = did; 8453 mutex_exit(&sbp->mtx); 8454 8455 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s", 8456 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg); 8457 8458 if (pkt->pkt_cmdlen) { 8459 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8460 DDI_DMA_SYNC_FORDEV); 8461 } 8462 8463 /* Check node */ 8464 switch (cmd) { 8465 case ELS_CMD_FLOGI: 8466 if (port->ini_mode) { 8467 /* Make sure fabric node is destroyed */ 8468 /* It should already have been destroyed at link down */ 8469 /* Unregister the fabric did and attempt a deferred */ 8470 /* iocb send */ 8471 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 8472 if (emlxs_mb_unreg_did(port, FABRIC_DID, NULL, 8473 NULL, iocbq) == 0) { 8474 /* Deferring iocb tx until */ 8475 /* completion of unreg */ 8476 return (FC_SUCCESS); 8477 } 8478 } 8479 } 8480 break; 8481 8482 case ELS_CMD_PLOGI: 8483 8484 ndlp = emlxs_node_find_did(port, did); 8485 8486 if (ndlp && ndlp->nlp_active) { 8487 /* Close the node for any further normal IO */ 8488 emlxs_node_close(port, ndlp, hba->channel_fcp, 8489 pkt->pkt_timeout + 10); 8490 emlxs_node_close(port, ndlp, hba->channel_ip, 8491 pkt->pkt_timeout + 10); 8492 8493 /* Flush tx queues */ 8494 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8495 8496 /* Flush chip queues */ 8497 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8498 } 8499 8500 break; 8501 8502 case ELS_CMD_PRLI: 8503 8504 ndlp = emlxs_node_find_did(port, did); 8505 8506 if (ndlp && ndlp->nlp_active) { 8507 /* 8508 * Close the node for any further FCP IO; 8509 * Flush all outstanding I/O only if 8510 * "Establish Image Pair" bit is set. 8511 */ 8512 emlxs_node_close(port, ndlp, hba->channel_fcp, 8513 pkt->pkt_timeout + 10); 8514 8515 if (els_pkt->un.prli.estabImagePair) { 8516 /* Flush tx queues */ 8517 (void) emlxs_tx_node_flush(port, ndlp, 8518 &hba->chan[hba->channel_fcp], 0, 0); 8519 8520 /* Flush chip queues */ 8521 (void) emlxs_chipq_node_flush(port, 8522 &hba->chan[hba->channel_fcp], ndlp, 0); 8523 } 8524 } 8525 8526 break; 8527 8528 } 8529 8530 HBASTATS.ElsCmdIssued++; 8531 8532 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8533 8534 return (FC_SUCCESS); 8535 8536 } /* emlxs_send_els() */ 8537 8538 8539 8540 8541 static int32_t 8542 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 8543 { 8544 emlxs_hba_t *hba = HBA; 8545 emlxs_config_t *cfg = &CFG; 8546 fc_packet_t *pkt; 8547 IOCBQ *iocbq; 8548 IOCB *iocb; 8549 NODELIST *ndlp; 8550 CHANNEL *cp; 8551 int i; 8552 uint32_t cmd; 8553 uint32_t ucmd; 8554 ELS_PKT *els_pkt; 8555 fc_unsol_buf_t *ubp; 8556 emlxs_ub_priv_t *ub_priv; 8557 uint32_t did; 8558 char fcsp_msg[32]; 8559 uint8_t *ub_buffer; 8560 int32_t rval; 8561 8562 fcsp_msg[0] = 0; 8563 pkt = PRIV2PKT(sbp); 8564 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 8565 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8566 8567 iocbq = &sbp->iocbq; 8568 iocb = &iocbq->iocb; 8569 8570 /* Acquire the unsolicited command this pkt is replying to */ 8571 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) { 8572 /* This is for auto replies when no ub's are used */ 8573 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT; 8574 ubp = NULL; 8575 ub_priv = NULL; 8576 ub_buffer = NULL; 8577 8578 #ifdef SFCT_SUPPORT 8579 if (sbp->fct_cmd) { 8580 fct_els_t *els = 8581 (fct_els_t *)sbp->fct_cmd->cmd_specific; 8582 ub_buffer = (uint8_t *)els->els_req_payload; 8583 } 8584 #endif /* SFCT_SUPPORT */ 8585 8586 } else { 8587 /* Find the ub buffer that goes with this reply */ 8588 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) { 8589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 8590 "ELS reply: Invalid oxid=%x", 8591 pkt->pkt_cmd_fhdr.ox_id); 8592 return (FC_BADPACKET); 8593 } 8594 8595 ub_buffer = (uint8_t *)ubp->ub_buffer; 8596 ub_priv = ubp->ub_fca_private; 8597 ucmd = ub_priv->cmd; 8598 8599 ub_priv->flags |= EMLXS_UB_REPLY; 8600 8601 /* Reset oxid to ELS command */ 8602 /* We do this because the ub is only valid */ 8603 /* until we return from this thread */ 8604 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff; 8605 } 8606 8607 /* Save the result */ 8608 sbp->ucmd = ucmd; 8609 8610 if (sbp->channel == NULL) { 8611 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8612 sbp->channel = &hba->chan[hba->channel_els]; 8613 } else { 8614 sbp->channel = &hba->chan[FC_ELS_RING]; 8615 } 8616 } 8617 8618 /* Check for interceptions */ 8619 switch (ucmd) { 8620 8621 #ifdef ULP_PATCH2 8622 case ELS_CMD_LOGO: 8623 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) { 8624 break; 8625 } 8626 8627 /* Check if this was generated by ULP and not us */ 8628 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8629 8630 /* 8631 * Since we replied to this already, 8632 * we won't need to send this now 8633 */ 8634 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8635 8636 return (FC_SUCCESS); 8637 } 8638 8639 break; 8640 #endif /* ULP_PATCH2 */ 8641 8642 #ifdef ULP_PATCH3 8643 case ELS_CMD_PRLI: 8644 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) { 8645 break; 8646 } 8647 8648 /* Check if this was generated by ULP and not us */ 8649 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8650 8651 /* 8652 * Since we replied to this already, 8653 * we won't need to send this now 8654 */ 8655 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8656 8657 return (FC_SUCCESS); 8658 } 8659 8660 break; 8661 #endif /* ULP_PATCH3 */ 8662 8663 8664 #ifdef ULP_PATCH4 8665 case ELS_CMD_PRLO: 8666 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) { 8667 break; 8668 } 8669 8670 /* Check if this was generated by ULP and not us */ 8671 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 8672 /* 8673 * Since we replied to this already, 8674 * we won't need to send this now 8675 */ 8676 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8677 8678 return (FC_SUCCESS); 8679 } 8680 8681 break; 8682 #endif /* ULP_PATCH4 */ 8683 8684 #ifdef ULP_PATCH6 8685 case ELS_CMD_RSCN: 8686 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) { 8687 break; 8688 } 8689 8690 /* Check if this RSCN was generated by us */ 8691 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8692 cmd = *((uint32_t *)pkt->pkt_cmd); 8693 cmd = LE_SWAP32(cmd); 8694 cmd &= ELS_CMD_MASK; 8695 8696 /* 8697 * If ULP is accepting this, 8698 * then close affected node 8699 */ 8700 if (port->ini_mode && ub_buffer && cmd 8701 == ELS_CMD_ACC) { 8702 fc_rscn_t *rscn; 8703 uint32_t count; 8704 uint32_t *lp; 8705 8706 /* 8707 * Only the Leadville code path will 8708 * come thru here. The RSCN data is NOT 8709 * swapped properly for the Comstar code 8710 * path. 8711 */ 8712 lp = (uint32_t *)ub_buffer; 8713 rscn = (fc_rscn_t *)lp++; 8714 count = 8715 ((rscn->rscn_payload_len - 4) / 4); 8716 8717 /* Close affected ports */ 8718 for (i = 0; i < count; i++, lp++) { 8719 (void) emlxs_port_offline(port, 8720 *lp); 8721 } 8722 } 8723 8724 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8725 "RSCN %s: did=%x oxid=%x rxid=%x. " 8726 "Intercepted.", emlxs_elscmd_xlate(cmd), 8727 did, pkt->pkt_cmd_fhdr.ox_id, 8728 pkt->pkt_cmd_fhdr.rx_id); 8729 8730 /* 8731 * Since we generated this RSCN, 8732 * we won't need to send this reply 8733 */ 8734 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8735 8736 return (FC_SUCCESS); 8737 } 8738 8739 break; 8740 #endif /* ULP_PATCH6 */ 8741 8742 case ELS_CMD_PLOGI: 8743 /* Check if this PLOGI was generated by us */ 8744 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 8745 cmd = *((uint32_t *)pkt->pkt_cmd); 8746 cmd = LE_SWAP32(cmd); 8747 cmd &= ELS_CMD_MASK; 8748 8749 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8750 "PLOGI %s: did=%x oxid=%x rxid=%x. " 8751 "Intercepted.", emlxs_elscmd_xlate(cmd), 8752 did, pkt->pkt_cmd_fhdr.ox_id, 8753 pkt->pkt_cmd_fhdr.rx_id); 8754 8755 /* 8756 * Since we generated this PLOGI, 8757 * we won't need to send this reply 8758 */ 8759 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 8760 8761 return (FC_SUCCESS); 8762 } 8763 8764 break; 8765 } 8766 8767 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8768 emlxs_swap_els_pkt(sbp); 8769 #endif /* EMLXS_MODREV2X */ 8770 8771 8772 cmd = *((uint32_t *)pkt->pkt_cmd); 8773 cmd &= ELS_CMD_MASK; 8774 8775 /* Check if modifications are needed */ 8776 switch (ucmd) { 8777 case (ELS_CMD_PRLI): 8778 8779 if (cmd == ELS_CMD_ACC) { 8780 /* This is a patch for the ULP stack. */ 8781 /* ULP does not keep track of FCP2 support */ 8782 8783 if (port->ini_mode && hba->vpd.feaLevelHigh >= 0x02) { 8784 els_pkt->un.prli.ConfmComplAllowed = 1; 8785 els_pkt->un.prli.Retry = 1; 8786 els_pkt->un.prli.TaskRetryIdReq = 1; 8787 } else { 8788 els_pkt->un.prli.ConfmComplAllowed = 0; 8789 els_pkt->un.prli.Retry = 0; 8790 els_pkt->un.prli.TaskRetryIdReq = 0; 8791 } 8792 } 8793 8794 break; 8795 8796 case ELS_CMD_FLOGI: 8797 case ELS_CMD_PLOGI: 8798 case ELS_CMD_FDISC: 8799 case ELS_CMD_PDISC: 8800 8801 if (cmd == ELS_CMD_ACC) { 8802 /* This is a patch for the ULP stack. */ 8803 8804 /* 8805 * ULP only reads our service parameters 8806 * once during bind_port, but the service 8807 * parameters change due to topology. 8808 */ 8809 8810 /* Copy latest service parameters to payload */ 8811 bcopy((void *)&port->sparam, 8812 (void *)&els_pkt->un.logi, sizeof (SERV_PARM)); 8813 8814 #ifdef DHCHAP_SUPPORT 8815 emlxs_dhc_init_sp(port, did, 8816 (SERV_PARM *)&els_pkt->un.logi, (char **)&fcsp_msg); 8817 #endif /* DHCHAP_SUPPORT */ 8818 8819 } 8820 8821 break; 8822 8823 } 8824 8825 /* Initalize iocbq */ 8826 iocbq->node = (void *)NULL; 8827 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) { 8828 8829 if (rval == 0xff) { 8830 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 8831 rval = FC_SUCCESS; 8832 } 8833 8834 return (rval); 8835 } 8836 8837 cp = &hba->chan[hba->channel_els]; 8838 cp->ulpSendCmd++; 8839 8840 /* Initalize sbp */ 8841 mutex_enter(&sbp->mtx); 8842 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8843 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8844 sbp->node = (void *) NULL; 8845 sbp->lun = 0; 8846 sbp->class = iocb->ULPCLASS; 8847 sbp->did = did; 8848 mutex_exit(&sbp->mtx); 8849 8850 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 8851 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd), 8852 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id, 8853 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg); 8854 8855 /* Process nodes */ 8856 switch (ucmd) { 8857 case ELS_CMD_RSCN: 8858 { 8859 if (port->ini_mode && ub_buffer && cmd == ELS_CMD_ACC) { 8860 fc_rscn_t *rscn; 8861 uint32_t count; 8862 uint32_t *lp = NULL; 8863 8864 /* 8865 * Only the Leadville code path will come thru 8866 * here. The RSCN data is NOT swapped properly 8867 * for the Comstar code path. 8868 */ 8869 lp = (uint32_t *)ub_buffer; 8870 rscn = (fc_rscn_t *)lp++; 8871 count = ((rscn->rscn_payload_len - 4) / 4); 8872 8873 /* Close affected ports */ 8874 for (i = 0; i < count; i++, lp++) { 8875 (void) emlxs_port_offline(port, *lp); 8876 } 8877 } 8878 break; 8879 } 8880 case ELS_CMD_PLOGI: 8881 8882 if (cmd == ELS_CMD_ACC) { 8883 ndlp = emlxs_node_find_did(port, did); 8884 8885 if (ndlp && ndlp->nlp_active) { 8886 /* Close the node for any further normal IO */ 8887 emlxs_node_close(port, ndlp, hba->channel_fcp, 8888 pkt->pkt_timeout + 10); 8889 emlxs_node_close(port, ndlp, hba->channel_ip, 8890 pkt->pkt_timeout + 10); 8891 8892 /* Flush tx queue */ 8893 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8894 8895 /* Flush chip queue */ 8896 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8897 } 8898 } 8899 8900 break; 8901 8902 case ELS_CMD_PRLI: 8903 8904 if (cmd == ELS_CMD_ACC) { 8905 ndlp = emlxs_node_find_did(port, did); 8906 8907 if (ndlp && ndlp->nlp_active) { 8908 /* Close the node for any further normal IO */ 8909 emlxs_node_close(port, ndlp, hba->channel_fcp, 8910 pkt->pkt_timeout + 10); 8911 8912 /* Flush tx queues */ 8913 (void) emlxs_tx_node_flush(port, ndlp, 8914 &hba->chan[hba->channel_fcp], 0, 0); 8915 8916 /* Flush chip queues */ 8917 (void) emlxs_chipq_node_flush(port, 8918 &hba->chan[hba->channel_fcp], ndlp, 0); 8919 } 8920 } 8921 8922 break; 8923 8924 case ELS_CMD_PRLO: 8925 8926 if (cmd == ELS_CMD_ACC) { 8927 ndlp = emlxs_node_find_did(port, did); 8928 8929 if (ndlp && ndlp->nlp_active) { 8930 /* Close the node for any further normal IO */ 8931 emlxs_node_close(port, ndlp, 8932 hba->channel_fcp, 60); 8933 8934 /* Flush tx queues */ 8935 (void) emlxs_tx_node_flush(port, ndlp, 8936 &hba->chan[hba->channel_fcp], 0, 0); 8937 8938 /* Flush chip queues */ 8939 (void) emlxs_chipq_node_flush(port, 8940 &hba->chan[hba->channel_fcp], ndlp, 0); 8941 } 8942 } 8943 8944 break; 8945 8946 case ELS_CMD_LOGO: 8947 8948 if (cmd == ELS_CMD_ACC) { 8949 ndlp = emlxs_node_find_did(port, did); 8950 8951 if (ndlp && ndlp->nlp_active) { 8952 /* Close the node for any further normal IO */ 8953 emlxs_node_close(port, ndlp, 8954 hba->channel_fcp, 60); 8955 emlxs_node_close(port, ndlp, 8956 hba->channel_ip, 60); 8957 8958 /* Flush tx queues */ 8959 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 8960 8961 /* Flush chip queues */ 8962 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 8963 } 8964 } 8965 8966 break; 8967 } 8968 8969 if (pkt->pkt_cmdlen) { 8970 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8971 DDI_DMA_SYNC_FORDEV); 8972 } 8973 8974 HBASTATS.ElsRspIssued++; 8975 8976 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8977 8978 return (FC_SUCCESS); 8979 8980 } /* emlxs_send_els_rsp() */ 8981 8982 8983 #ifdef MENLO_SUPPORT 8984 static int32_t 8985 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp) 8986 { 8987 emlxs_hba_t *hba = HBA; 8988 fc_packet_t *pkt; 8989 IOCBQ *iocbq; 8990 IOCB *iocb; 8991 CHANNEL *cp; 8992 NODELIST *ndlp; 8993 uint32_t did; 8994 uint32_t *lp; 8995 int32_t rval; 8996 8997 pkt = PRIV2PKT(sbp); 8998 did = EMLXS_MENLO_DID; 8999 lp = (uint32_t *)pkt->pkt_cmd; 9000 9001 iocbq = &sbp->iocbq; 9002 iocb = &iocbq->iocb; 9003 9004 ndlp = emlxs_node_find_did(port, did); 9005 9006 if (!ndlp || !ndlp->nlp_active) { 9007 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 9008 "Node not found. did=0x%x", did); 9009 9010 return (FC_BADPACKET); 9011 } 9012 9013 iocbq->node = (void *) ndlp; 9014 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9015 9016 if (rval == 0xff) { 9017 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9018 rval = FC_SUCCESS; 9019 } 9020 9021 return (rval); 9022 } 9023 9024 cp = &hba->chan[hba->channel_ct]; 9025 cp->ulpSendCmd++; 9026 9027 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) { 9028 /* Cmd phase */ 9029 9030 /* Initalize iocb */ 9031 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id; 9032 iocb->ULPCONTEXT = 0; 9033 iocb->ULPPU = 3; 9034 9035 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9036 "%s: [%08x,%08x,%08x,%08x]", 9037 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]), 9038 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4])); 9039 9040 } else { /* FC_PKT_OUTBOUND */ 9041 9042 /* MENLO_CMD_FW_DOWNLOAD Data Phase */ 9043 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX; 9044 9045 /* Initalize iocb */ 9046 iocb->un.genreq64.param = 0; 9047 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 9048 iocb->ULPPU = 1; 9049 9050 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9051 "%s: Data: rxid=0x%x size=%d", 9052 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD), 9053 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen); 9054 } 9055 9056 /* Initalize sbp */ 9057 mutex_enter(&sbp->mtx); 9058 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9059 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9060 sbp->node = (void *) ndlp; 9061 sbp->lun = 0; 9062 sbp->class = iocb->ULPCLASS; 9063 sbp->did = did; 9064 mutex_exit(&sbp->mtx); 9065 9066 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9067 DDI_DMA_SYNC_FORDEV); 9068 9069 HBASTATS.CtCmdIssued++; 9070 9071 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9072 9073 return (FC_SUCCESS); 9074 9075 } /* emlxs_send_menlo() */ 9076 #endif /* MENLO_SUPPORT */ 9077 9078 9079 static int32_t 9080 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp) 9081 { 9082 emlxs_hba_t *hba = HBA; 9083 fc_packet_t *pkt; 9084 IOCBQ *iocbq; 9085 IOCB *iocb; 9086 NODELIST *ndlp; 9087 uint32_t did; 9088 CHANNEL *cp; 9089 int32_t rval; 9090 9091 pkt = PRIV2PKT(sbp); 9092 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9093 9094 iocbq = &sbp->iocbq; 9095 iocb = &iocbq->iocb; 9096 9097 ndlp = emlxs_node_find_did(port, did); 9098 9099 if (!ndlp || !ndlp->nlp_active) { 9100 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 9101 "Node not found. did=0x%x", did); 9102 9103 return (FC_BADPACKET); 9104 } 9105 9106 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9107 emlxs_swap_ct_pkt(sbp); 9108 #endif /* EMLXS_MODREV2X */ 9109 9110 iocbq->node = (void *)ndlp; 9111 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9112 9113 if (rval == 0xff) { 9114 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9115 rval = FC_SUCCESS; 9116 } 9117 9118 return (rval); 9119 } 9120 9121 cp = &hba->chan[hba->channel_ct]; 9122 cp->ulpSendCmd++; 9123 9124 /* Initalize sbp */ 9125 mutex_enter(&sbp->mtx); 9126 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9127 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9128 sbp->node = (void *)ndlp; 9129 sbp->lun = 0; 9130 sbp->class = iocb->ULPCLASS; 9131 sbp->did = did; 9132 mutex_exit(&sbp->mtx); 9133 9134 if (did == NAMESERVER_DID) { 9135 SLI_CT_REQUEST *CtCmd; 9136 uint32_t *lp0; 9137 9138 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9139 lp0 = (uint32_t *)pkt->pkt_cmd; 9140 9141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9142 "%s: did=%x [%08x,%08x]", 9143 emlxs_ctcmd_xlate( 9144 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9145 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9146 9147 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 9148 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 9149 } 9150 9151 } else if (did == FDMI_DID) { 9152 SLI_CT_REQUEST *CtCmd; 9153 uint32_t *lp0; 9154 9155 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9156 lp0 = (uint32_t *)pkt->pkt_cmd; 9157 9158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9159 "%s: did=%x [%08x,%08x]", 9160 emlxs_mscmd_xlate( 9161 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9162 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9163 } else { 9164 SLI_CT_REQUEST *CtCmd; 9165 uint32_t *lp0; 9166 9167 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9168 lp0 = (uint32_t *)pkt->pkt_cmd; 9169 9170 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9171 "%s: did=%x [%08x,%08x]", 9172 emlxs_rmcmd_xlate( 9173 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 9174 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 9175 } 9176 9177 if (pkt->pkt_cmdlen) { 9178 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9179 DDI_DMA_SYNC_FORDEV); 9180 } 9181 9182 HBASTATS.CtCmdIssued++; 9183 9184 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9185 9186 return (FC_SUCCESS); 9187 9188 } /* emlxs_send_ct() */ 9189 9190 9191 static int32_t 9192 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 9193 { 9194 emlxs_hba_t *hba = HBA; 9195 fc_packet_t *pkt; 9196 CHANNEL *cp; 9197 IOCBQ *iocbq; 9198 IOCB *iocb; 9199 uint32_t *cmd; 9200 SLI_CT_REQUEST *CtCmd; 9201 int32_t rval; 9202 9203 pkt = PRIV2PKT(sbp); 9204 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 9205 cmd = (uint32_t *)pkt->pkt_cmd; 9206 9207 iocbq = &sbp->iocbq; 9208 iocb = &iocbq->iocb; 9209 9210 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9211 emlxs_swap_ct_pkt(sbp); 9212 #endif /* EMLXS_MODREV2X */ 9213 9214 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9215 9216 if (rval == 0xff) { 9217 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9218 rval = FC_SUCCESS; 9219 } 9220 9221 return (rval); 9222 } 9223 9224 cp = &hba->chan[hba->channel_ct]; 9225 cp->ulpSendCmd++; 9226 9227 /* Initalize sbp */ 9228 mutex_enter(&sbp->mtx); 9229 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9230 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9231 sbp->node = NULL; 9232 sbp->lun = 0; 9233 sbp->class = iocb->ULPCLASS; 9234 mutex_exit(&sbp->mtx); 9235 9236 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg, 9237 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ", 9238 emlxs_rmcmd_xlate(LE_SWAP16( 9239 CtCmd->CommandResponse.bits.CmdRsp)), 9240 CtCmd->ReasonCode, CtCmd->Explanation, 9241 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]), 9242 pkt->pkt_cmd_fhdr.rx_id); 9243 9244 if (pkt->pkt_cmdlen) { 9245 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9246 DDI_DMA_SYNC_FORDEV); 9247 } 9248 9249 HBASTATS.CtRspIssued++; 9250 9251 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9252 9253 return (FC_SUCCESS); 9254 9255 } /* emlxs_send_ct_rsp() */ 9256 9257 9258 /* 9259 * emlxs_get_instance() 9260 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst. 9261 */ 9262 extern uint32_t 9263 emlxs_get_instance(int32_t ddiinst) 9264 { 9265 uint32_t i; 9266 uint32_t inst; 9267 9268 mutex_enter(&emlxs_device.lock); 9269 9270 inst = MAX_FC_BRDS; 9271 for (i = 0; i < emlxs_instance_count; i++) { 9272 if (emlxs_instance[i] == ddiinst) { 9273 inst = i; 9274 break; 9275 } 9276 } 9277 9278 mutex_exit(&emlxs_device.lock); 9279 9280 return (inst); 9281 9282 } /* emlxs_get_instance() */ 9283 9284 9285 /* 9286 * emlxs_add_instance() 9287 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst. 9288 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0. 9289 */ 9290 static uint32_t 9291 emlxs_add_instance(int32_t ddiinst) 9292 { 9293 uint32_t i; 9294 9295 mutex_enter(&emlxs_device.lock); 9296 9297 /* First see if the ddiinst already exists */ 9298 for (i = 0; i < emlxs_instance_count; i++) { 9299 if (emlxs_instance[i] == ddiinst) { 9300 break; 9301 } 9302 } 9303 9304 /* If it doesn't already exist, add it */ 9305 if (i >= emlxs_instance_count) { 9306 if ((i = emlxs_instance_count) < MAX_FC_BRDS) { 9307 emlxs_instance[i] = ddiinst; 9308 emlxs_instance_count++; 9309 emlxs_device.hba_count = emlxs_instance_count; 9310 } 9311 } 9312 9313 mutex_exit(&emlxs_device.lock); 9314 9315 return (i); 9316 9317 } /* emlxs_add_instance() */ 9318 9319 9320 /*ARGSUSED*/ 9321 extern void 9322 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9323 uint32_t doneq) 9324 { 9325 emlxs_hba_t *hba; 9326 emlxs_port_t *port; 9327 emlxs_buf_t *fpkt; 9328 9329 port = sbp->port; 9330 9331 if (!port) { 9332 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg, 9333 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags); 9334 9335 return; 9336 } 9337 9338 hba = HBA; 9339 9340 mutex_enter(&sbp->mtx); 9341 9342 /* Check for error conditions */ 9343 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED | 9344 PACKET_IN_DONEQ | PACKET_IN_COMPLETION | 9345 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) { 9346 if (sbp->pkt_flags & PACKET_ULP_OWNED) { 9347 EMLXS_MSGF(EMLXS_CONTEXT, 9348 &emlxs_pkt_completion_error_msg, 9349 "Packet already returned. sbp=%p flags=%x", sbp, 9350 sbp->pkt_flags); 9351 } 9352 9353 else if (sbp->pkt_flags & PACKET_COMPLETED) { 9354 EMLXS_MSGF(EMLXS_CONTEXT, 9355 &emlxs_pkt_completion_error_msg, 9356 "Packet already completed. sbp=%p flags=%x", sbp, 9357 sbp->pkt_flags); 9358 } 9359 9360 else if (sbp->pkt_flags & PACKET_IN_DONEQ) { 9361 EMLXS_MSGF(EMLXS_CONTEXT, 9362 &emlxs_pkt_completion_error_msg, 9363 "Pkt already on done queue. sbp=%p flags=%x", sbp, 9364 sbp->pkt_flags); 9365 } 9366 9367 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) { 9368 EMLXS_MSGF(EMLXS_CONTEXT, 9369 &emlxs_pkt_completion_error_msg, 9370 "Packet already in completion. sbp=%p flags=%x", 9371 sbp, sbp->pkt_flags); 9372 } 9373 9374 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 9375 EMLXS_MSGF(EMLXS_CONTEXT, 9376 &emlxs_pkt_completion_error_msg, 9377 "Packet still on chip queue. sbp=%p flags=%x", 9378 sbp, sbp->pkt_flags); 9379 } 9380 9381 else if (sbp->pkt_flags & PACKET_IN_TXQ) { 9382 EMLXS_MSGF(EMLXS_CONTEXT, 9383 &emlxs_pkt_completion_error_msg, 9384 "Packet still on tx queue. sbp=%p flags=%x", sbp, 9385 sbp->pkt_flags); 9386 } 9387 9388 mutex_exit(&sbp->mtx); 9389 return; 9390 } 9391 9392 /* Packet is now in completion */ 9393 sbp->pkt_flags |= PACKET_IN_COMPLETION; 9394 9395 /* Set the state if not already set */ 9396 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9397 emlxs_set_pkt_state(sbp, iostat, localstat, 0); 9398 } 9399 9400 /* Check for parent flush packet */ 9401 /* If pkt has a parent flush packet then adjust its count now */ 9402 fpkt = sbp->fpkt; 9403 if (fpkt) { 9404 /* 9405 * We will try to NULL sbp->fpkt inside the 9406 * fpkt's mutex if possible 9407 */ 9408 9409 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) { 9410 mutex_enter(&fpkt->mtx); 9411 if (fpkt->flush_count) { 9412 fpkt->flush_count--; 9413 } 9414 sbp->fpkt = NULL; 9415 mutex_exit(&fpkt->mtx); 9416 } else { /* fpkt has been returned already */ 9417 9418 sbp->fpkt = NULL; 9419 } 9420 } 9421 9422 /* If pkt is polled, then wake up sleeping thread */ 9423 if (sbp->pkt_flags & PACKET_POLLED) { 9424 /* Don't set the PACKET_ULP_OWNED flag here */ 9425 /* because the polling thread will do it */ 9426 sbp->pkt_flags |= PACKET_COMPLETED; 9427 mutex_exit(&sbp->mtx); 9428 9429 /* Wake up sleeping thread */ 9430 mutex_enter(&EMLXS_PKT_LOCK); 9431 cv_broadcast(&EMLXS_PKT_CV); 9432 mutex_exit(&EMLXS_PKT_LOCK); 9433 } 9434 9435 /* If packet was generated by our driver, */ 9436 /* then complete it immediately */ 9437 else if (sbp->pkt_flags & PACKET_ALLOCATED) { 9438 mutex_exit(&sbp->mtx); 9439 9440 emlxs_iodone(sbp); 9441 } 9442 9443 /* Put the pkt on the done queue for callback */ 9444 /* completion in another thread */ 9445 else { 9446 sbp->pkt_flags |= PACKET_IN_DONEQ; 9447 sbp->next = NULL; 9448 mutex_exit(&sbp->mtx); 9449 9450 /* Put pkt on doneq, so I/O's will be completed in order */ 9451 mutex_enter(&EMLXS_PORT_LOCK); 9452 if (hba->iodone_tail == NULL) { 9453 hba->iodone_list = sbp; 9454 hba->iodone_count = 1; 9455 } else { 9456 hba->iodone_tail->next = sbp; 9457 hba->iodone_count++; 9458 } 9459 hba->iodone_tail = sbp; 9460 mutex_exit(&EMLXS_PORT_LOCK); 9461 9462 /* Trigger a thread to service the doneq */ 9463 emlxs_thread_trigger1(&hba->iodone_thread, 9464 emlxs_iodone_server); 9465 } 9466 9467 return; 9468 9469 } /* emlxs_pkt_complete() */ 9470 9471 9472 #ifdef SAN_DIAG_SUPPORT 9473 /* 9474 * This routine is called with EMLXS_PORT_LOCK held so we can just increment 9475 * normally. Don't have to use atomic operations. 9476 */ 9477 extern void 9478 emlxs_update_sd_bucket(emlxs_buf_t *sbp) 9479 { 9480 emlxs_port_t *vport; 9481 fc_packet_t *pkt; 9482 uint32_t did; 9483 hrtime_t t; 9484 hrtime_t delta_time; 9485 int i; 9486 NODELIST *ndlp; 9487 9488 vport = sbp->port; 9489 9490 if ((sd_bucket.search_type == 0) || 9491 (vport->sd_io_latency_state != SD_COLLECTING)) 9492 return; 9493 9494 /* Compute the iolatency time in microseconds */ 9495 t = gethrtime(); 9496 delta_time = t - sbp->sd_start_time; 9497 pkt = PRIV2PKT(sbp); 9498 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9499 ndlp = emlxs_node_find_did(vport, did); 9500 9501 if (ndlp) { 9502 if (delta_time >= 9503 sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) 9504 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1]. 9505 count++; 9506 else if (delta_time <= sd_bucket.values[0]) 9507 ndlp->sd_dev_bucket[0].count++; 9508 else { 9509 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) { 9510 if ((delta_time > sd_bucket.values[i-1]) && 9511 (delta_time <= sd_bucket.values[i])) { 9512 ndlp->sd_dev_bucket[i].count++; 9513 break; 9514 } 9515 } 9516 } 9517 } 9518 } 9519 #endif /* SAN_DIAG_SUPPORT */ 9520 9521 /*ARGSUSED*/ 9522 static void 9523 emlxs_iodone_server(void *arg1, void *arg2, void *arg3) 9524 { 9525 emlxs_hba_t *hba = (emlxs_hba_t *)arg1; 9526 emlxs_buf_t *sbp; 9527 9528 mutex_enter(&EMLXS_PORT_LOCK); 9529 9530 /* Remove one pkt from the doneq head and complete it */ 9531 while ((sbp = hba->iodone_list) != NULL) { 9532 if ((hba->iodone_list = sbp->next) == NULL) { 9533 hba->iodone_tail = NULL; 9534 hba->iodone_count = 0; 9535 } else { 9536 hba->iodone_count--; 9537 } 9538 9539 mutex_exit(&EMLXS_PORT_LOCK); 9540 9541 /* Prepare the pkt for completion */ 9542 mutex_enter(&sbp->mtx); 9543 sbp->next = NULL; 9544 sbp->pkt_flags &= ~PACKET_IN_DONEQ; 9545 mutex_exit(&sbp->mtx); 9546 9547 /* Complete the IO now */ 9548 emlxs_iodone(sbp); 9549 9550 /* Reacquire lock and check if more work is to be done */ 9551 mutex_enter(&EMLXS_PORT_LOCK); 9552 } 9553 9554 mutex_exit(&EMLXS_PORT_LOCK); 9555 9556 #ifdef FMA_SUPPORT 9557 if (hba->flag & FC_DMA_CHECK_ERROR) { 9558 emlxs_thread_spawn(hba, emlxs_restart_thread, 9559 NULL, NULL); 9560 } 9561 #endif /* FMA_SUPPORT */ 9562 9563 return; 9564 9565 } /* End emlxs_iodone_server */ 9566 9567 9568 static void 9569 emlxs_iodone(emlxs_buf_t *sbp) 9570 { 9571 #ifdef FMA_SUPPORT 9572 emlxs_port_t *port = sbp->port; 9573 emlxs_hba_t *hba = port->hba; 9574 #endif /* FMA_SUPPORT */ 9575 9576 fc_packet_t *pkt; 9577 CHANNEL *cp; 9578 9579 pkt = PRIV2PKT(sbp); 9580 9581 /* Check one more time that the pkt has not already been returned */ 9582 if (sbp->pkt_flags & PACKET_ULP_OWNED) { 9583 return; 9584 } 9585 cp = (CHANNEL *)sbp->channel; 9586 9587 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9588 emlxs_unswap_pkt(sbp); 9589 #endif /* EMLXS_MODREV2X */ 9590 9591 mutex_enter(&sbp->mtx); 9592 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED); 9593 mutex_exit(&sbp->mtx); 9594 9595 if (pkt->pkt_comp) { 9596 #ifdef FMA_SUPPORT 9597 emlxs_check_dma(hba, sbp); 9598 #endif /* FMA_SUPPORT */ 9599 cp->ulpCmplCmd++; 9600 (*pkt->pkt_comp) (pkt); 9601 } 9602 9603 return; 9604 9605 } /* emlxs_iodone() */ 9606 9607 9608 9609 extern fc_unsol_buf_t * 9610 emlxs_ub_find(emlxs_port_t *port, uint32_t token) 9611 { 9612 emlxs_unsol_buf_t *pool; 9613 fc_unsol_buf_t *ubp; 9614 emlxs_ub_priv_t *ub_priv; 9615 9616 /* Check if this is a valid ub token */ 9617 if (token < EMLXS_UB_TOKEN_OFFSET) { 9618 return (NULL); 9619 } 9620 9621 mutex_enter(&EMLXS_UB_LOCK); 9622 9623 pool = port->ub_pool; 9624 while (pool) { 9625 /* Find a pool with the proper token range */ 9626 if (token >= pool->pool_first_token && 9627 token <= pool->pool_last_token) { 9628 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token - 9629 pool->pool_first_token)]; 9630 ub_priv = ubp->ub_fca_private; 9631 9632 if (ub_priv->token != token) { 9633 EMLXS_MSGF(EMLXS_CONTEXT, 9634 &emlxs_sfs_debug_msg, 9635 "ub_find: Invalid token=%x", ubp, token, 9636 ub_priv->token); 9637 9638 ubp = NULL; 9639 } 9640 9641 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { 9642 EMLXS_MSGF(EMLXS_CONTEXT, 9643 &emlxs_sfs_debug_msg, 9644 "ub_find: Buffer not in use. buffer=%p " 9645 "token=%x", ubp, token); 9646 9647 ubp = NULL; 9648 } 9649 9650 mutex_exit(&EMLXS_UB_LOCK); 9651 9652 return (ubp); 9653 } 9654 9655 pool = pool->pool_next; 9656 } 9657 9658 mutex_exit(&EMLXS_UB_LOCK); 9659 9660 return (NULL); 9661 9662 } /* emlxs_ub_find() */ 9663 9664 9665 9666 extern fc_unsol_buf_t * 9667 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, 9668 uint32_t reserve) 9669 { 9670 emlxs_hba_t *hba = HBA; 9671 emlxs_unsol_buf_t *pool; 9672 fc_unsol_buf_t *ubp; 9673 emlxs_ub_priv_t *ub_priv; 9674 uint32_t i; 9675 uint32_t resv_flag; 9676 uint32_t pool_free; 9677 uint32_t pool_free_resv; 9678 9679 mutex_enter(&EMLXS_UB_LOCK); 9680 9681 pool = port->ub_pool; 9682 while (pool) { 9683 /* Find a pool of the appropriate type and size */ 9684 if ((pool->pool_available == 0) || 9685 (pool->pool_type != type) || 9686 (pool->pool_buf_size < size)) { 9687 goto next_pool; 9688 } 9689 9690 9691 /* Adjust free counts based on availablity */ 9692 /* The free reserve count gets first priority */ 9693 pool_free_resv = 9694 min(pool->pool_free_resv, pool->pool_available); 9695 pool_free = 9696 min(pool->pool_free, 9697 (pool->pool_available - pool_free_resv)); 9698 9699 /* Initialize reserve flag */ 9700 resv_flag = reserve; 9701 9702 if (resv_flag) { 9703 if (pool_free_resv == 0) { 9704 if (pool_free == 0) { 9705 goto next_pool; 9706 } 9707 resv_flag = 0; 9708 } 9709 } else if (pool_free == 0) { 9710 goto next_pool; 9711 } 9712 9713 /* Find next available free buffer in this pool */ 9714 for (i = 0; i < pool->pool_nentries; i++) { 9715 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 9716 ub_priv = ubp->ub_fca_private; 9717 9718 if (!ub_priv->available || 9719 ub_priv->flags != EMLXS_UB_FREE) { 9720 continue; 9721 } 9722 9723 ub_priv->time = hba->timer_tics; 9724 9725 /* Timeout in 5 minutes */ 9726 ub_priv->timeout = (5 * 60); 9727 9728 ub_priv->flags = EMLXS_UB_IN_USE; 9729 9730 /* Alloc the buffer from the pool */ 9731 if (resv_flag) { 9732 ub_priv->flags |= EMLXS_UB_RESV; 9733 pool->pool_free_resv--; 9734 } else { 9735 pool->pool_free--; 9736 } 9737 9738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 9739 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 9740 ub_priv->token, pool->pool_nentries, 9741 pool->pool_available, pool->pool_free, 9742 pool->pool_free_resv); 9743 9744 mutex_exit(&EMLXS_UB_LOCK); 9745 9746 return (ubp); 9747 } 9748 next_pool: 9749 9750 pool = pool->pool_next; 9751 } 9752 9753 mutex_exit(&EMLXS_UB_LOCK); 9754 9755 return (NULL); 9756 9757 } /* emlxs_ub_get() */ 9758 9759 9760 9761 extern void 9762 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 9763 uint32_t lock) 9764 { 9765 fc_packet_t *pkt; 9766 fcp_rsp_t *fcp_rsp; 9767 uint32_t i; 9768 emlxs_xlat_err_t *tptr; 9769 emlxs_xlat_err_t *entry; 9770 9771 9772 pkt = PRIV2PKT(sbp); 9773 9774 if (lock) { 9775 mutex_enter(&sbp->mtx); 9776 } 9777 9778 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 9779 sbp->pkt_flags |= PACKET_STATE_VALID; 9780 9781 /* Perform table lookup */ 9782 entry = NULL; 9783 if (iostat != IOSTAT_LOCAL_REJECT) { 9784 tptr = emlxs_iostat_tbl; 9785 for (i = 0; i < IOSTAT_MAX; i++, tptr++) { 9786 if (iostat == tptr->emlxs_status) { 9787 entry = tptr; 9788 break; 9789 } 9790 } 9791 } else { /* iostate == IOSTAT_LOCAL_REJECT */ 9792 9793 tptr = emlxs_ioerr_tbl; 9794 for (i = 0; i < IOERR_MAX; i++, tptr++) { 9795 if (localstat == tptr->emlxs_status) { 9796 entry = tptr; 9797 break; 9798 } 9799 } 9800 } 9801 9802 if (entry) { 9803 pkt->pkt_state = entry->pkt_state; 9804 pkt->pkt_reason = entry->pkt_reason; 9805 pkt->pkt_expln = entry->pkt_expln; 9806 pkt->pkt_action = entry->pkt_action; 9807 } else { 9808 /* Set defaults */ 9809 pkt->pkt_state = FC_PKT_TRAN_ERROR; 9810 pkt->pkt_reason = FC_REASON_ABORTED; 9811 pkt->pkt_expln = FC_EXPLN_NONE; 9812 pkt->pkt_action = FC_ACTION_RETRYABLE; 9813 } 9814 9815 9816 /* Set the residual counts and response frame */ 9817 /* Check if response frame was received from the chip */ 9818 /* If so, then the residual counts will already be set */ 9819 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID | 9820 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) { 9821 /* We have to create the response frame */ 9822 if (iostat == IOSTAT_SUCCESS) { 9823 pkt->pkt_resp_resid = 0; 9824 pkt->pkt_data_resid = 0; 9825 9826 if ((pkt->pkt_cmd_fhdr.type == 9827 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen && 9828 pkt->pkt_resp) { 9829 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp; 9830 9831 fcp_rsp->fcp_u.fcp_status. 9832 rsp_len_set = 1; 9833 fcp_rsp->fcp_response_len = 8; 9834 } 9835 } else { 9836 /* Otherwise assume no data */ 9837 /* and no response received */ 9838 pkt->pkt_data_resid = pkt->pkt_datalen; 9839 pkt->pkt_resp_resid = pkt->pkt_rsplen; 9840 } 9841 } 9842 } 9843 9844 if (lock) { 9845 mutex_exit(&sbp->mtx); 9846 } 9847 9848 return; 9849 9850 } /* emlxs_set_pkt_state() */ 9851 9852 9853 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9854 9855 extern void 9856 emlxs_swap_service_params(SERV_PARM *sp) 9857 { 9858 uint16_t *p; 9859 int size; 9860 int i; 9861 9862 size = (sizeof (CSP) - 4) / 2; 9863 p = (uint16_t *)&sp->cmn; 9864 for (i = 0; i < size; i++) { 9865 p[i] = LE_SWAP16(p[i]); 9866 } 9867 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov); 9868 9869 size = sizeof (CLASS_PARMS) / 2; 9870 p = (uint16_t *)&sp->cls1; 9871 for (i = 0; i < size; i++, p++) { 9872 *p = LE_SWAP16(*p); 9873 } 9874 9875 size = sizeof (CLASS_PARMS) / 2; 9876 p = (uint16_t *)&sp->cls2; 9877 for (i = 0; i < size; i++, p++) { 9878 *p = LE_SWAP16(*p); 9879 } 9880 9881 size = sizeof (CLASS_PARMS) / 2; 9882 p = (uint16_t *)&sp->cls3; 9883 for (i = 0; i < size; i++, p++) { 9884 *p = LE_SWAP16(*p); 9885 } 9886 9887 size = sizeof (CLASS_PARMS) / 2; 9888 p = (uint16_t *)&sp->cls4; 9889 for (i = 0; i < size; i++, p++) { 9890 *p = LE_SWAP16(*p); 9891 } 9892 9893 return; 9894 9895 } /* emlxs_swap_service_params() */ 9896 9897 extern void 9898 emlxs_unswap_pkt(emlxs_buf_t *sbp) 9899 { 9900 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9901 emlxs_swap_fcp_pkt(sbp); 9902 } 9903 9904 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9905 emlxs_swap_els_pkt(sbp); 9906 } 9907 9908 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 9909 emlxs_swap_ct_pkt(sbp); 9910 } 9911 9912 } /* emlxs_unswap_pkt() */ 9913 9914 9915 extern void 9916 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp) 9917 { 9918 fc_packet_t *pkt; 9919 FCP_CMND *cmd; 9920 fcp_rsp_t *rsp; 9921 uint16_t *lunp; 9922 uint32_t i; 9923 9924 mutex_enter(&sbp->mtx); 9925 9926 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9927 mutex_exit(&sbp->mtx); 9928 return; 9929 } 9930 9931 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 9932 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED; 9933 } else { 9934 sbp->pkt_flags |= PACKET_FCP_SWAPPED; 9935 } 9936 9937 mutex_exit(&sbp->mtx); 9938 9939 pkt = PRIV2PKT(sbp); 9940 9941 cmd = (FCP_CMND *)pkt->pkt_cmd; 9942 rsp = (pkt->pkt_rsplen && 9943 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ? 9944 (fcp_rsp_t *)pkt->pkt_resp : NULL; 9945 9946 /* The size of data buffer needs to be swapped. */ 9947 cmd->fcpDl = LE_SWAP32(cmd->fcpDl); 9948 9949 /* 9950 * Swap first 2 words of FCP CMND payload. 9951 */ 9952 lunp = (uint16_t *)&cmd->fcpLunMsl; 9953 for (i = 0; i < 4; i++) { 9954 lunp[i] = LE_SWAP16(lunp[i]); 9955 } 9956 9957 if (rsp) { 9958 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid); 9959 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len); 9960 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len); 9961 } 9962 9963 return; 9964 9965 } /* emlxs_swap_fcp_pkt() */ 9966 9967 9968 extern void 9969 emlxs_swap_els_pkt(emlxs_buf_t *sbp) 9970 { 9971 fc_packet_t *pkt; 9972 uint32_t *cmd; 9973 uint32_t *rsp; 9974 uint32_t command; 9975 uint16_t *c; 9976 uint32_t i; 9977 uint32_t swapped; 9978 9979 mutex_enter(&sbp->mtx); 9980 9981 if (sbp->pkt_flags & PACKET_ALLOCATED) { 9982 mutex_exit(&sbp->mtx); 9983 return; 9984 } 9985 9986 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 9987 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED; 9988 swapped = 1; 9989 } else { 9990 sbp->pkt_flags |= PACKET_ELS_SWAPPED; 9991 swapped = 0; 9992 } 9993 9994 mutex_exit(&sbp->mtx); 9995 9996 pkt = PRIV2PKT(sbp); 9997 9998 cmd = (uint32_t *)pkt->pkt_cmd; 9999 rsp = (pkt->pkt_rsplen && 10000 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ? 10001 (uint32_t *)pkt->pkt_resp : NULL; 10002 10003 if (!swapped) { 10004 cmd[0] = LE_SWAP32(cmd[0]); 10005 command = cmd[0] & ELS_CMD_MASK; 10006 } else { 10007 command = cmd[0] & ELS_CMD_MASK; 10008 cmd[0] = LE_SWAP32(cmd[0]); 10009 } 10010 10011 if (rsp) { 10012 rsp[0] = LE_SWAP32(rsp[0]); 10013 } 10014 10015 switch (command) { 10016 case ELS_CMD_ACC: 10017 if (sbp->ucmd == ELS_CMD_ADISC) { 10018 /* Hard address of originator */ 10019 cmd[1] = LE_SWAP32(cmd[1]); 10020 10021 /* N_Port ID of originator */ 10022 cmd[6] = LE_SWAP32(cmd[6]); 10023 } 10024 break; 10025 10026 case ELS_CMD_PLOGI: 10027 case ELS_CMD_FLOGI: 10028 case ELS_CMD_FDISC: 10029 if (rsp) { 10030 emlxs_swap_service_params((SERV_PARM *) & rsp[1]); 10031 } 10032 break; 10033 10034 case ELS_CMD_LOGO: 10035 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */ 10036 break; 10037 10038 case ELS_CMD_RLS: 10039 cmd[1] = LE_SWAP32(cmd[1]); 10040 10041 if (rsp) { 10042 for (i = 0; i < 6; i++) { 10043 rsp[1 + i] = LE_SWAP32(rsp[1 + i]); 10044 } 10045 } 10046 break; 10047 10048 case ELS_CMD_ADISC: 10049 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */ 10050 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */ 10051 break; 10052 10053 case ELS_CMD_PRLI: 10054 c = (uint16_t *)&cmd[1]; 10055 c[1] = LE_SWAP16(c[1]); 10056 10057 cmd[4] = LE_SWAP32(cmd[4]); 10058 10059 if (rsp) { 10060 rsp[4] = LE_SWAP32(rsp[4]); 10061 } 10062 break; 10063 10064 case ELS_CMD_SCR: 10065 cmd[1] = LE_SWAP32(cmd[1]); 10066 break; 10067 10068 case ELS_CMD_LINIT: 10069 if (rsp) { 10070 rsp[1] = LE_SWAP32(rsp[1]); 10071 } 10072 break; 10073 10074 default: 10075 break; 10076 } 10077 10078 return; 10079 10080 } /* emlxs_swap_els_pkt() */ 10081 10082 10083 extern void 10084 emlxs_swap_ct_pkt(emlxs_buf_t *sbp) 10085 { 10086 fc_packet_t *pkt; 10087 uint32_t *cmd; 10088 uint32_t *rsp; 10089 uint32_t command; 10090 uint32_t i; 10091 uint32_t swapped; 10092 10093 mutex_enter(&sbp->mtx); 10094 10095 if (sbp->pkt_flags & PACKET_ALLOCATED) { 10096 mutex_exit(&sbp->mtx); 10097 return; 10098 } 10099 10100 if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 10101 sbp->pkt_flags &= ~PACKET_CT_SWAPPED; 10102 swapped = 1; 10103 } else { 10104 sbp->pkt_flags |= PACKET_CT_SWAPPED; 10105 swapped = 0; 10106 } 10107 10108 mutex_exit(&sbp->mtx); 10109 10110 pkt = PRIV2PKT(sbp); 10111 10112 cmd = (uint32_t *)pkt->pkt_cmd; 10113 rsp = (pkt->pkt_rsplen && 10114 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ? 10115 (uint32_t *)pkt->pkt_resp : NULL; 10116 10117 if (!swapped) { 10118 cmd[0] = 0x01000000; 10119 command = cmd[2]; 10120 } 10121 10122 cmd[0] = LE_SWAP32(cmd[0]); 10123 cmd[1] = LE_SWAP32(cmd[1]); 10124 cmd[2] = LE_SWAP32(cmd[2]); 10125 cmd[3] = LE_SWAP32(cmd[3]); 10126 10127 if (swapped) { 10128 command = cmd[2]; 10129 } 10130 10131 switch ((command >> 16)) { 10132 case SLI_CTNS_GA_NXT: 10133 cmd[4] = LE_SWAP32(cmd[4]); 10134 break; 10135 10136 case SLI_CTNS_GPN_ID: 10137 case SLI_CTNS_GNN_ID: 10138 case SLI_CTNS_RPN_ID: 10139 case SLI_CTNS_RNN_ID: 10140 case SLI_CTNS_RSPN_ID: 10141 cmd[4] = LE_SWAP32(cmd[4]); 10142 break; 10143 10144 case SLI_CTNS_RCS_ID: 10145 case SLI_CTNS_RPT_ID: 10146 cmd[4] = LE_SWAP32(cmd[4]); 10147 cmd[5] = LE_SWAP32(cmd[5]); 10148 break; 10149 10150 case SLI_CTNS_RFT_ID: 10151 cmd[4] = LE_SWAP32(cmd[4]); 10152 10153 /* Swap FC4 types */ 10154 for (i = 0; i < 8; i++) { 10155 cmd[5 + i] = LE_SWAP32(cmd[5 + i]); 10156 } 10157 break; 10158 10159 case SLI_CTNS_GFT_ID: 10160 if (rsp) { 10161 /* Swap FC4 types */ 10162 for (i = 0; i < 8; i++) { 10163 rsp[4 + i] = LE_SWAP32(rsp[4 + i]); 10164 } 10165 } 10166 break; 10167 10168 case SLI_CTNS_GCS_ID: 10169 case SLI_CTNS_GSPN_ID: 10170 case SLI_CTNS_GSNN_NN: 10171 case SLI_CTNS_GIP_NN: 10172 case SLI_CTNS_GIPA_NN: 10173 10174 case SLI_CTNS_GPT_ID: 10175 case SLI_CTNS_GID_NN: 10176 case SLI_CTNS_GNN_IP: 10177 case SLI_CTNS_GIPA_IP: 10178 case SLI_CTNS_GID_FT: 10179 case SLI_CTNS_GID_PT: 10180 case SLI_CTNS_GID_PN: 10181 case SLI_CTNS_RIP_NN: 10182 case SLI_CTNS_RIPA_NN: 10183 case SLI_CTNS_RSNN_NN: 10184 case SLI_CTNS_DA_ID: 10185 case SLI_CT_RESPONSE_FS_RJT: 10186 case SLI_CT_RESPONSE_FS_ACC: 10187 10188 default: 10189 break; 10190 } 10191 return; 10192 10193 } /* emlxs_swap_ct_pkt() */ 10194 10195 10196 extern void 10197 emlxs_swap_els_ub(fc_unsol_buf_t *ubp) 10198 { 10199 emlxs_ub_priv_t *ub_priv; 10200 fc_rscn_t *rscn; 10201 uint32_t count; 10202 uint32_t i; 10203 uint32_t *lp; 10204 la_els_logi_t *logi; 10205 10206 ub_priv = ubp->ub_fca_private; 10207 10208 switch (ub_priv->cmd) { 10209 case ELS_CMD_RSCN: 10210 rscn = (fc_rscn_t *)ubp->ub_buffer; 10211 10212 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len); 10213 10214 count = ((rscn->rscn_payload_len - 4) / 4); 10215 lp = (uint32_t *)ubp->ub_buffer + 1; 10216 for (i = 0; i < count; i++, lp++) { 10217 *lp = LE_SWAP32(*lp); 10218 } 10219 10220 break; 10221 10222 case ELS_CMD_FLOGI: 10223 case ELS_CMD_PLOGI: 10224 case ELS_CMD_FDISC: 10225 case ELS_CMD_PDISC: 10226 logi = (la_els_logi_t *)ubp->ub_buffer; 10227 emlxs_swap_service_params( 10228 (SERV_PARM *)&logi->common_service); 10229 break; 10230 10231 /* ULP handles this */ 10232 case ELS_CMD_LOGO: 10233 case ELS_CMD_PRLI: 10234 case ELS_CMD_PRLO: 10235 case ELS_CMD_ADISC: 10236 default: 10237 break; 10238 } 10239 10240 return; 10241 10242 } /* emlxs_swap_els_ub() */ 10243 10244 10245 #endif /* EMLXS_MODREV2X */ 10246 10247 10248 extern char * 10249 emlxs_elscmd_xlate(uint32_t elscmd) 10250 { 10251 static char buffer[32]; 10252 uint32_t i; 10253 uint32_t count; 10254 10255 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t); 10256 for (i = 0; i < count; i++) { 10257 if (elscmd == emlxs_elscmd_table[i].code) { 10258 return (emlxs_elscmd_table[i].string); 10259 } 10260 } 10261 10262 (void) sprintf(buffer, "ELS=0x%x", elscmd); 10263 return (buffer); 10264 10265 } /* emlxs_elscmd_xlate() */ 10266 10267 10268 extern char * 10269 emlxs_ctcmd_xlate(uint32_t ctcmd) 10270 { 10271 static char buffer[32]; 10272 uint32_t i; 10273 uint32_t count; 10274 10275 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t); 10276 for (i = 0; i < count; i++) { 10277 if (ctcmd == emlxs_ctcmd_table[i].code) { 10278 return (emlxs_ctcmd_table[i].string); 10279 } 10280 } 10281 10282 (void) sprintf(buffer, "cmd=0x%x", ctcmd); 10283 return (buffer); 10284 10285 } /* emlxs_ctcmd_xlate() */ 10286 10287 10288 #ifdef MENLO_SUPPORT 10289 extern char * 10290 emlxs_menlo_cmd_xlate(uint32_t cmd) 10291 { 10292 static char buffer[32]; 10293 uint32_t i; 10294 uint32_t count; 10295 10296 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t); 10297 for (i = 0; i < count; i++) { 10298 if (cmd == emlxs_menlo_cmd_table[i].code) { 10299 return (emlxs_menlo_cmd_table[i].string); 10300 } 10301 } 10302 10303 (void) sprintf(buffer, "Cmd=0x%x", cmd); 10304 return (buffer); 10305 10306 } /* emlxs_menlo_cmd_xlate() */ 10307 10308 extern char * 10309 emlxs_menlo_rsp_xlate(uint32_t rsp) 10310 { 10311 static char buffer[32]; 10312 uint32_t i; 10313 uint32_t count; 10314 10315 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t); 10316 for (i = 0; i < count; i++) { 10317 if (rsp == emlxs_menlo_rsp_table[i].code) { 10318 return (emlxs_menlo_rsp_table[i].string); 10319 } 10320 } 10321 10322 (void) sprintf(buffer, "Rsp=0x%x", rsp); 10323 return (buffer); 10324 10325 } /* emlxs_menlo_rsp_xlate() */ 10326 10327 #endif /* MENLO_SUPPORT */ 10328 10329 10330 extern char * 10331 emlxs_rmcmd_xlate(uint32_t rmcmd) 10332 { 10333 static char buffer[32]; 10334 uint32_t i; 10335 uint32_t count; 10336 10337 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t); 10338 for (i = 0; i < count; i++) { 10339 if (rmcmd == emlxs_rmcmd_table[i].code) { 10340 return (emlxs_rmcmd_table[i].string); 10341 } 10342 } 10343 10344 (void) sprintf(buffer, "RM=0x%x", rmcmd); 10345 return (buffer); 10346 10347 } /* emlxs_rmcmd_xlate() */ 10348 10349 10350 10351 extern char * 10352 emlxs_mscmd_xlate(uint16_t mscmd) 10353 { 10354 static char buffer[32]; 10355 uint32_t i; 10356 uint32_t count; 10357 10358 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t); 10359 for (i = 0; i < count; i++) { 10360 if (mscmd == emlxs_mscmd_table[i].code) { 10361 return (emlxs_mscmd_table[i].string); 10362 } 10363 } 10364 10365 (void) sprintf(buffer, "Cmd=0x%x", mscmd); 10366 return (buffer); 10367 10368 } /* emlxs_mscmd_xlate() */ 10369 10370 10371 extern char * 10372 emlxs_state_xlate(uint8_t state) 10373 { 10374 static char buffer[32]; 10375 uint32_t i; 10376 uint32_t count; 10377 10378 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t); 10379 for (i = 0; i < count; i++) { 10380 if (state == emlxs_state_table[i].code) { 10381 return (emlxs_state_table[i].string); 10382 } 10383 } 10384 10385 (void) sprintf(buffer, "State=0x%x", state); 10386 return (buffer); 10387 10388 } /* emlxs_state_xlate() */ 10389 10390 10391 extern char * 10392 emlxs_error_xlate(uint8_t errno) 10393 { 10394 static char buffer[32]; 10395 uint32_t i; 10396 uint32_t count; 10397 10398 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t); 10399 for (i = 0; i < count; i++) { 10400 if (errno == emlxs_error_table[i].code) { 10401 return (emlxs_error_table[i].string); 10402 } 10403 } 10404 10405 (void) sprintf(buffer, "Errno=0x%x", errno); 10406 return (buffer); 10407 10408 } /* emlxs_error_xlate() */ 10409 10410 10411 static int 10412 emlxs_pm_lower_power(dev_info_t *dip) 10413 { 10414 int ddiinst; 10415 int emlxinst; 10416 emlxs_config_t *cfg; 10417 int32_t rval; 10418 emlxs_hba_t *hba; 10419 10420 ddiinst = ddi_get_instance(dip); 10421 emlxinst = emlxs_get_instance(ddiinst); 10422 hba = emlxs_device.hba[emlxinst]; 10423 cfg = &CFG; 10424 10425 rval = DDI_SUCCESS; 10426 10427 /* Lower the power level */ 10428 if (cfg[CFG_PM_SUPPORT].current) { 10429 rval = 10430 pm_lower_power(dip, EMLXS_PM_ADAPTER, 10431 EMLXS_PM_ADAPTER_DOWN); 10432 } else { 10433 /* We do not have kernel support of power management enabled */ 10434 /* therefore, call our power management routine directly */ 10435 rval = 10436 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN); 10437 } 10438 10439 return (rval); 10440 10441 } /* emlxs_pm_lower_power() */ 10442 10443 10444 static int 10445 emlxs_pm_raise_power(dev_info_t *dip) 10446 { 10447 int ddiinst; 10448 int emlxinst; 10449 emlxs_config_t *cfg; 10450 int32_t rval; 10451 emlxs_hba_t *hba; 10452 10453 ddiinst = ddi_get_instance(dip); 10454 emlxinst = emlxs_get_instance(ddiinst); 10455 hba = emlxs_device.hba[emlxinst]; 10456 cfg = &CFG; 10457 10458 /* Raise the power level */ 10459 if (cfg[CFG_PM_SUPPORT].current) { 10460 rval = 10461 pm_raise_power(dip, EMLXS_PM_ADAPTER, 10462 EMLXS_PM_ADAPTER_UP); 10463 } else { 10464 /* We do not have kernel support of power management enabled */ 10465 /* therefore, call our power management routine directly */ 10466 rval = 10467 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP); 10468 } 10469 10470 return (rval); 10471 10472 } /* emlxs_pm_raise_power() */ 10473 10474 10475 #ifdef IDLE_TIMER 10476 10477 extern int 10478 emlxs_pm_busy_component(emlxs_hba_t *hba) 10479 { 10480 emlxs_config_t *cfg = &CFG; 10481 int rval; 10482 10483 hba->pm_active = 1; 10484 10485 if (hba->pm_busy) { 10486 return (DDI_SUCCESS); 10487 } 10488 10489 mutex_enter(&EMLXS_PM_LOCK); 10490 10491 if (hba->pm_busy) { 10492 mutex_exit(&EMLXS_PM_LOCK); 10493 return (DDI_SUCCESS); 10494 } 10495 hba->pm_busy = 1; 10496 10497 mutex_exit(&EMLXS_PM_LOCK); 10498 10499 /* Attempt to notify system that we are busy */ 10500 if (cfg[CFG_PM_SUPPORT].current) { 10501 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10502 "pm_busy_component."); 10503 10504 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER); 10505 10506 if (rval != DDI_SUCCESS) { 10507 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10508 "pm_busy_component failed. ret=%d", rval); 10509 10510 /* If this attempt failed then clear our flags */ 10511 mutex_enter(&EMLXS_PM_LOCK); 10512 hba->pm_busy = 0; 10513 mutex_exit(&EMLXS_PM_LOCK); 10514 10515 return (rval); 10516 } 10517 } 10518 10519 return (DDI_SUCCESS); 10520 10521 } /* emlxs_pm_busy_component() */ 10522 10523 10524 extern int 10525 emlxs_pm_idle_component(emlxs_hba_t *hba) 10526 { 10527 emlxs_config_t *cfg = &CFG; 10528 int rval; 10529 10530 if (!hba->pm_busy) { 10531 return (DDI_SUCCESS); 10532 } 10533 10534 mutex_enter(&EMLXS_PM_LOCK); 10535 10536 if (!hba->pm_busy) { 10537 mutex_exit(&EMLXS_PM_LOCK); 10538 return (DDI_SUCCESS); 10539 } 10540 hba->pm_busy = 0; 10541 10542 mutex_exit(&EMLXS_PM_LOCK); 10543 10544 if (cfg[CFG_PM_SUPPORT].current) { 10545 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10546 "pm_idle_component."); 10547 10548 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER); 10549 10550 if (rval != DDI_SUCCESS) { 10551 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 10552 "pm_idle_component failed. ret=%d", rval); 10553 10554 /* If this attempt failed then */ 10555 /* reset our flags for another attempt */ 10556 mutex_enter(&EMLXS_PM_LOCK); 10557 hba->pm_busy = 1; 10558 mutex_exit(&EMLXS_PM_LOCK); 10559 10560 return (rval); 10561 } 10562 } 10563 10564 return (DDI_SUCCESS); 10565 10566 } /* emlxs_pm_idle_component() */ 10567 10568 10569 extern void 10570 emlxs_pm_idle_timer(emlxs_hba_t *hba) 10571 { 10572 emlxs_config_t *cfg = &CFG; 10573 10574 if (hba->pm_active) { 10575 /* Clear active flag and reset idle timer */ 10576 mutex_enter(&EMLXS_PM_LOCK); 10577 hba->pm_active = 0; 10578 hba->pm_idle_timer = 10579 hba->timer_tics + cfg[CFG_PM_IDLE].current; 10580 mutex_exit(&EMLXS_PM_LOCK); 10581 } 10582 10583 /* Check for idle timeout */ 10584 else if (hba->timer_tics >= hba->pm_idle_timer) { 10585 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) { 10586 mutex_enter(&EMLXS_PM_LOCK); 10587 hba->pm_idle_timer = 10588 hba->timer_tics + cfg[CFG_PM_IDLE].current; 10589 mutex_exit(&EMLXS_PM_LOCK); 10590 } 10591 } 10592 10593 return; 10594 10595 } /* emlxs_pm_idle_timer() */ 10596 10597 #endif /* IDLE_TIMER */ 10598 10599 10600 static void 10601 emlxs_read_vport_prop(emlxs_hba_t *hba) 10602 { 10603 emlxs_port_t *port = &PPORT; 10604 emlxs_config_t *cfg = &CFG; 10605 char **arrayp; 10606 uint8_t *s; 10607 uint8_t *np; 10608 NAME_TYPE pwwpn; 10609 NAME_TYPE wwnn; 10610 NAME_TYPE wwpn; 10611 uint32_t vpi; 10612 uint32_t cnt; 10613 uint32_t rval; 10614 uint32_t i; 10615 uint32_t j; 10616 uint32_t c1; 10617 uint32_t sum; 10618 uint32_t errors; 10619 char buffer[64]; 10620 10621 /* Check for the per adapter vport setting */ 10622 (void) sprintf(buffer, "%s%d-vport", DRIVER_NAME, hba->ddiinst); 10623 cnt = 0; 10624 arrayp = NULL; 10625 rval = 10626 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 10627 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt); 10628 10629 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 10630 /* Check for the global vport setting */ 10631 cnt = 0; 10632 arrayp = NULL; 10633 rval = 10634 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 10635 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt); 10636 } 10637 10638 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 10639 return; 10640 } 10641 10642 for (i = 0; i < cnt; i++) { 10643 errors = 0; 10644 s = (uint8_t *)arrayp[i]; 10645 10646 if (!s) { 10647 break; 10648 } 10649 10650 np = (uint8_t *)&pwwpn; 10651 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10652 c1 = *s++; 10653 if ((c1 >= '0') && (c1 <= '9')) { 10654 sum = ((c1 - '0') << 4); 10655 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10656 sum = ((c1 - 'a' + 10) << 4); 10657 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10658 sum = ((c1 - 'A' + 10) << 4); 10659 } else { 10660 EMLXS_MSGF(EMLXS_CONTEXT, 10661 &emlxs_attach_debug_msg, 10662 "Config error: Invalid PWWPN found. " 10663 "entry=%d byte=%d hi_nibble=%c", 10664 i, j, c1); 10665 errors++; 10666 } 10667 10668 c1 = *s++; 10669 if ((c1 >= '0') && (c1 <= '9')) { 10670 sum |= (c1 - '0'); 10671 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10672 sum |= (c1 - 'a' + 10); 10673 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10674 sum |= (c1 - 'A' + 10); 10675 } else { 10676 EMLXS_MSGF(EMLXS_CONTEXT, 10677 &emlxs_attach_debug_msg, 10678 "Config error: Invalid PWWPN found. " 10679 "entry=%d byte=%d lo_nibble=%c", 10680 i, j, c1); 10681 errors++; 10682 } 10683 10684 *np++ = sum; 10685 } 10686 10687 if (*s++ != ':') { 10688 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10689 "Config error: Invalid delimiter after PWWPN. " 10690 "entry=%d", i); 10691 goto out; 10692 } 10693 10694 np = (uint8_t *)&wwnn; 10695 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10696 c1 = *s++; 10697 if ((c1 >= '0') && (c1 <= '9')) { 10698 sum = ((c1 - '0') << 4); 10699 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10700 sum = ((c1 - 'a' + 10) << 4); 10701 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10702 sum = ((c1 - 'A' + 10) << 4); 10703 } else { 10704 EMLXS_MSGF(EMLXS_CONTEXT, 10705 &emlxs_attach_debug_msg, 10706 "Config error: Invalid WWNN found. " 10707 "entry=%d byte=%d hi_nibble=%c", 10708 i, j, c1); 10709 errors++; 10710 } 10711 10712 c1 = *s++; 10713 if ((c1 >= '0') && (c1 <= '9')) { 10714 sum |= (c1 - '0'); 10715 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10716 sum |= (c1 - 'a' + 10); 10717 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10718 sum |= (c1 - 'A' + 10); 10719 } else { 10720 EMLXS_MSGF(EMLXS_CONTEXT, 10721 &emlxs_attach_debug_msg, 10722 "Config error: Invalid WWNN found. " 10723 "entry=%d byte=%d lo_nibble=%c", 10724 i, j, c1); 10725 errors++; 10726 } 10727 10728 *np++ = sum; 10729 } 10730 10731 if (*s++ != ':') { 10732 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10733 "Config error: Invalid delimiter after WWNN. " 10734 "entry=%d", i); 10735 goto out; 10736 } 10737 10738 np = (uint8_t *)&wwpn; 10739 for (j = 0; j < sizeof (NAME_TYPE); j++) { 10740 c1 = *s++; 10741 if ((c1 >= '0') && (c1 <= '9')) { 10742 sum = ((c1 - '0') << 4); 10743 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10744 sum = ((c1 - 'a' + 10) << 4); 10745 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10746 sum = ((c1 - 'A' + 10) << 4); 10747 } else { 10748 EMLXS_MSGF(EMLXS_CONTEXT, 10749 &emlxs_attach_debug_msg, 10750 "Config error: Invalid WWPN found. " 10751 "entry=%d byte=%d hi_nibble=%c", 10752 i, j, c1); 10753 10754 errors++; 10755 } 10756 10757 c1 = *s++; 10758 if ((c1 >= '0') && (c1 <= '9')) { 10759 sum |= (c1 - '0'); 10760 } else if ((c1 >= 'a') && (c1 <= 'f')) { 10761 sum |= (c1 - 'a' + 10); 10762 } else if ((c1 >= 'A') && (c1 <= 'F')) { 10763 sum |= (c1 - 'A' + 10); 10764 } else { 10765 EMLXS_MSGF(EMLXS_CONTEXT, 10766 &emlxs_attach_debug_msg, 10767 "Config error: Invalid WWPN found. " 10768 "entry=%d byte=%d lo_nibble=%c", 10769 i, j, c1); 10770 10771 errors++; 10772 } 10773 10774 *np++ = sum; 10775 } 10776 10777 if (*s++ != ':') { 10778 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 10779 "Config error: Invalid delimiter after WWPN. " 10780 "entry=%d", i); 10781 10782 goto out; 10783 } 10784 10785 sum = 0; 10786 do { 10787 c1 = *s++; 10788 if ((c1 < '0') || (c1 > '9')) { 10789 EMLXS_MSGF(EMLXS_CONTEXT, 10790 &emlxs_attach_debug_msg, 10791 "Config error: Invalid VPI found. " 10792 "entry=%d c=%c vpi=%d", i, c1, sum); 10793 10794 goto out; 10795 } 10796 10797 sum = (sum * 10) + (c1 - '0'); 10798 10799 } while (*s != 0); 10800 10801 vpi = sum; 10802 10803 if (errors) { 10804 continue; 10805 } 10806 10807 /* Entry has been read */ 10808 10809 /* Check if the physical port wwpn */ 10810 /* matches our physical port wwpn */ 10811 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) { 10812 continue; 10813 } 10814 10815 /* Check vpi range */ 10816 if ((vpi == 0) || (vpi >= MAX_VPORTS)) { 10817 continue; 10818 } 10819 10820 /* Check if port has already been configured */ 10821 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) { 10822 continue; 10823 } 10824 10825 /* Set the highest configured vpi */ 10826 if (vpi > hba->vpi_high) { 10827 hba->vpi_high = vpi; 10828 } 10829 10830 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn, 10831 sizeof (NAME_TYPE)); 10832 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn, 10833 sizeof (NAME_TYPE)); 10834 10835 if (hba->port[vpi].snn[0] == 0) { 10836 (void) strncpy((caddr_t)hba->port[vpi].snn, 10837 (caddr_t)hba->snn, 256); 10838 } 10839 10840 if (hba->port[vpi].spn[0] == 0) { 10841 (void) sprintf((caddr_t)hba->port[vpi].spn, 10842 "%s VPort-%d", 10843 (caddr_t)hba->spn, vpi); 10844 } 10845 10846 hba->port[vpi].flag |= 10847 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLE); 10848 10849 if (cfg[CFG_VPORT_RESTRICTED].current) { 10850 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED; 10851 } 10852 } 10853 10854 out: 10855 10856 (void) ddi_prop_free((void *) arrayp); 10857 return; 10858 10859 } /* emlxs_read_vport_prop() */ 10860 10861 10862 extern char * 10863 emlxs_wwn_xlate(char *buffer, uint8_t *wwn) 10864 { 10865 (void) sprintf(buffer, "%02x%02x%02x%02x%02x%02x%02x%02x", 10866 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff, 10867 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff); 10868 10869 return (buffer); 10870 10871 } /* emlxs_wwn_xlate() */ 10872 10873 10874 /* This is called at port online and offline */ 10875 extern void 10876 emlxs_ub_flush(emlxs_port_t *port) 10877 { 10878 emlxs_hba_t *hba = HBA; 10879 fc_unsol_buf_t *ubp; 10880 emlxs_ub_priv_t *ub_priv; 10881 emlxs_ub_priv_t *next; 10882 10883 /* Return if nothing to do */ 10884 if (!port->ub_wait_head) { 10885 return; 10886 } 10887 10888 mutex_enter(&EMLXS_PORT_LOCK); 10889 ub_priv = port->ub_wait_head; 10890 port->ub_wait_head = NULL; 10891 port->ub_wait_tail = NULL; 10892 mutex_exit(&EMLXS_PORT_LOCK); 10893 10894 while (ub_priv) { 10895 next = ub_priv->next; 10896 ubp = ub_priv->ubp; 10897 10898 /* Check if ULP is online and we have a callback function */ 10899 if ((port->ulp_statec != FC_STATE_OFFLINE) && 10900 port->ulp_unsol_cb) { 10901 /* Send ULP the ub buffer */ 10902 port->ulp_unsol_cb(port->ulp_handle, ubp, 10903 ubp->ub_frame.type); 10904 } else { /* Drop the buffer */ 10905 10906 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10907 } 10908 10909 ub_priv = next; 10910 10911 } /* while () */ 10912 10913 return; 10914 10915 } /* emlxs_ub_flush() */ 10916 10917 10918 extern void 10919 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp) 10920 { 10921 emlxs_hba_t *hba = HBA; 10922 emlxs_ub_priv_t *ub_priv; 10923 10924 ub_priv = ubp->ub_fca_private; 10925 10926 /* Check if ULP is online */ 10927 if (port->ulp_statec != FC_STATE_OFFLINE) { 10928 if (port->ulp_unsol_cb) { 10929 port->ulp_unsol_cb(port->ulp_handle, ubp, 10930 ubp->ub_frame.type); 10931 } else { 10932 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10933 } 10934 10935 return; 10936 } else { /* ULP offline */ 10937 10938 if (hba->state >= FC_LINK_UP) { 10939 /* Add buffer to queue tail */ 10940 mutex_enter(&EMLXS_PORT_LOCK); 10941 10942 if (port->ub_wait_tail) { 10943 port->ub_wait_tail->next = ub_priv; 10944 } 10945 port->ub_wait_tail = ub_priv; 10946 10947 if (!port->ub_wait_head) { 10948 port->ub_wait_head = ub_priv; 10949 } 10950 10951 mutex_exit(&EMLXS_PORT_LOCK); 10952 } else { 10953 (void) emlxs_ub_release(port, 1, &ubp->ub_token); 10954 } 10955 } 10956 10957 return; 10958 10959 } /* emlxs_ub_callback() */ 10960 10961 10962 static uint32_t 10963 emlxs_integrity_check(emlxs_hba_t *hba) 10964 { 10965 uint32_t size; 10966 uint32_t errors = 0; 10967 int ddiinst = hba->ddiinst; 10968 10969 size = 16; 10970 if (sizeof (ULP_BDL) != size) { 10971 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16", 10972 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL)); 10973 10974 errors++; 10975 } 10976 size = 8; 10977 if (sizeof (ULP_BDE) != size) { 10978 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8", 10979 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE)); 10980 10981 errors++; 10982 } 10983 size = 12; 10984 if (sizeof (ULP_BDE64) != size) { 10985 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12", 10986 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64)); 10987 10988 errors++; 10989 } 10990 size = 16; 10991 if (sizeof (HBQE_t) != size) { 10992 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16", 10993 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t)); 10994 10995 errors++; 10996 } 10997 size = 8; 10998 if (sizeof (HGP) != size) { 10999 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8", 11000 DRIVER_NAME, ddiinst, (int)sizeof (HGP)); 11001 11002 errors++; 11003 } 11004 if (sizeof (PGP) != size) { 11005 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8", 11006 DRIVER_NAME, ddiinst, (int)sizeof (PGP)); 11007 11008 errors++; 11009 } 11010 size = 4; 11011 if (sizeof (WORD5) != size) { 11012 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4", 11013 DRIVER_NAME, ddiinst, (int)sizeof (WORD5)); 11014 11015 errors++; 11016 } 11017 size = 124; 11018 if (sizeof (MAILVARIANTS) != size) { 11019 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. " 11020 "%d != 124", DRIVER_NAME, ddiinst, 11021 (int)sizeof (MAILVARIANTS)); 11022 11023 errors++; 11024 } 11025 size = 128; 11026 if (sizeof (SLI1_DESC) != size) { 11027 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128", 11028 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC)); 11029 11030 errors++; 11031 } 11032 if (sizeof (SLI2_DESC) != size) { 11033 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128", 11034 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC)); 11035 11036 errors++; 11037 } 11038 size = MBOX_SIZE; 11039 if (sizeof (MAILBOX) != size) { 11040 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d", 11041 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE); 11042 11043 errors++; 11044 } 11045 size = PCB_SIZE; 11046 if (sizeof (PCB) != size) { 11047 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d", 11048 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE); 11049 11050 errors++; 11051 } 11052 size = 260; 11053 if (sizeof (ATTRIBUTE_ENTRY) != size) { 11054 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. " 11055 "%d != 260", DRIVER_NAME, ddiinst, 11056 (int)sizeof (ATTRIBUTE_ENTRY)); 11057 11058 errors++; 11059 } 11060 size = SLI_SLIM1_SIZE; 11061 if (sizeof (SLIM1) != size) { 11062 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d", 11063 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE); 11064 11065 errors++; 11066 } 11067 size = SLI3_IOCB_CMD_SIZE; 11068 if (sizeof (IOCB) != size) { 11069 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 11070 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 11071 SLI3_IOCB_CMD_SIZE); 11072 11073 errors++; 11074 } 11075 11076 size = SLI_SLIM2_SIZE; 11077 if (sizeof (SLIM2) != size) { 11078 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d", 11079 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2), 11080 SLI_SLIM2_SIZE); 11081 11082 errors++; 11083 } 11084 return (errors); 11085 11086 } /* emlxs_integrity_check() */ 11087 11088 11089 #ifdef FMA_SUPPORT 11090 /* 11091 * FMA support 11092 */ 11093 11094 extern void 11095 emlxs_fm_init(emlxs_hba_t *hba) 11096 { 11097 ddi_iblock_cookie_t iblk; 11098 11099 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 11100 return; 11101 } 11102 11103 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11104 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 11105 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 11106 } 11107 11108 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) { 11109 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 11110 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR; 11111 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR; 11112 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR; 11113 } else { 11114 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11115 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11116 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11117 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR; 11118 } 11119 11120 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk); 11121 11122 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 11123 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11124 pci_ereport_setup(hba->dip); 11125 } 11126 11127 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11128 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb, 11129 (void *)hba); 11130 } 11131 11132 } /* emlxs_fm_init() */ 11133 11134 11135 extern void 11136 emlxs_fm_fini(emlxs_hba_t *hba) 11137 { 11138 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 11139 return; 11140 } 11141 11142 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 11143 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11144 pci_ereport_teardown(hba->dip); 11145 } 11146 11147 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 11148 ddi_fm_handler_unregister(hba->dip); 11149 } 11150 11151 (void) ddi_fm_fini(hba->dip); 11152 11153 } /* emlxs_fm_fini() */ 11154 11155 11156 extern int 11157 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle) 11158 { 11159 ddi_fm_error_t err; 11160 11161 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11162 return (DDI_FM_OK); 11163 } 11164 11165 /* Some S10 versions do not define the ahi_err structure */ 11166 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) { 11167 return (DDI_FM_OK); 11168 } 11169 11170 err.fme_status = DDI_FM_OK; 11171 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION); 11172 11173 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */ 11174 if ((void *)&ddi_fm_acc_err_clear != NULL) { 11175 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 11176 } 11177 11178 return (err.fme_status); 11179 11180 } /* emlxs_fm_check_acc_handle() */ 11181 11182 11183 extern int 11184 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle) 11185 { 11186 ddi_fm_error_t err; 11187 11188 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 11189 return (DDI_FM_OK); 11190 } 11191 11192 err.fme_status = DDI_FM_OK; 11193 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION); 11194 11195 return (err.fme_status); 11196 11197 } /* emlxs_fm_check_dma_handle() */ 11198 11199 11200 extern void 11201 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail) 11202 { 11203 uint64_t ena; 11204 char buf[FM_MAX_CLASS]; 11205 11206 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 11207 return; 11208 } 11209 11210 if (detail == NULL) { 11211 return; 11212 } 11213 11214 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 11215 ena = fm_ena_generate(0, FM_ENA_FMT1); 11216 11217 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP, 11218 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 11219 11220 } /* emlxs_fm_ereport() */ 11221 11222 11223 extern void 11224 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact) 11225 { 11226 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 11227 return; 11228 } 11229 11230 if (impact == NULL) { 11231 return; 11232 } 11233 11234 if ((hba->pm_state & EMLXS_PM_IN_DETACH) && 11235 (impact == DDI_SERVICE_DEGRADED)) { 11236 impact = DDI_SERVICE_UNAFFECTED; 11237 } 11238 11239 ddi_fm_service_impact(hba->dip, impact); 11240 11241 return; 11242 11243 } /* emlxs_fm_service_impact() */ 11244 11245 11246 /* 11247 * The I/O fault service error handling callback function 11248 */ 11249 /*ARGSUSED*/ 11250 extern int 11251 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 11252 const void *impl_data) 11253 { 11254 /* 11255 * as the driver can always deal with an error 11256 * in any dma or access handle, we can just return 11257 * the fme_status value. 11258 */ 11259 pci_ereport_post(dip, err, NULL); 11260 return (err->fme_status); 11261 11262 } /* emlxs_fm_error_cb() */ 11263 11264 extern void 11265 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp) 11266 { 11267 emlxs_port_t *port = sbp->port; 11268 fc_packet_t *pkt = PRIV2PKT(sbp); 11269 11270 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 11271 if (emlxs_fm_check_dma_handle(hba, 11272 hba->sli.sli4.slim2.dma_handle) 11273 != DDI_FM_OK) { 11274 EMLXS_MSGF(EMLXS_CONTEXT, 11275 &emlxs_invalid_dma_handle_msg, 11276 "slim2: hdl=%p", 11277 hba->sli.sli4.slim2.dma_handle); 11278 11279 mutex_enter(&EMLXS_PORT_LOCK); 11280 hba->flag |= FC_DMA_CHECK_ERROR; 11281 mutex_exit(&EMLXS_PORT_LOCK); 11282 } 11283 } else { 11284 if (emlxs_fm_check_dma_handle(hba, 11285 hba->sli.sli3.slim2.dma_handle) 11286 != DDI_FM_OK) { 11287 EMLXS_MSGF(EMLXS_CONTEXT, 11288 &emlxs_invalid_dma_handle_msg, 11289 "slim2: hdl=%p", 11290 hba->sli.sli3.slim2.dma_handle); 11291 11292 mutex_enter(&EMLXS_PORT_LOCK); 11293 hba->flag |= FC_DMA_CHECK_ERROR; 11294 mutex_exit(&EMLXS_PORT_LOCK); 11295 } 11296 } 11297 11298 if (hba->flag & FC_DMA_CHECK_ERROR) { 11299 pkt->pkt_state = FC_PKT_TRAN_ERROR; 11300 pkt->pkt_reason = FC_REASON_DMA_ERROR; 11301 pkt->pkt_expln = FC_EXPLN_NONE; 11302 pkt->pkt_action = FC_ACTION_RETRYABLE; 11303 return; 11304 } 11305 11306 if (pkt->pkt_cmdlen) { 11307 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma) 11308 != DDI_FM_OK) { 11309 EMLXS_MSGF(EMLXS_CONTEXT, 11310 &emlxs_invalid_dma_handle_msg, 11311 "pkt_cmd_dma: hdl=%p", 11312 pkt->pkt_cmd_dma); 11313 11314 pkt->pkt_state = FC_PKT_TRAN_ERROR; 11315 pkt->pkt_reason = FC_REASON_DMA_ERROR; 11316 pkt->pkt_expln = FC_EXPLN_NONE; 11317 pkt->pkt_action = FC_ACTION_RETRYABLE; 11318 11319 return; 11320 } 11321 } 11322 11323 if (pkt->pkt_rsplen) { 11324 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma) 11325 != DDI_FM_OK) { 11326 EMLXS_MSGF(EMLXS_CONTEXT, 11327 &emlxs_invalid_dma_handle_msg, 11328 "pkt_resp_dma: hdl=%p", 11329 pkt->pkt_resp_dma); 11330 11331 pkt->pkt_state = FC_PKT_TRAN_ERROR; 11332 pkt->pkt_reason = FC_REASON_DMA_ERROR; 11333 pkt->pkt_expln = FC_EXPLN_NONE; 11334 pkt->pkt_action = FC_ACTION_RETRYABLE; 11335 11336 return; 11337 } 11338 } 11339 11340 if (pkt->pkt_datalen) { 11341 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma) 11342 != DDI_FM_OK) { 11343 EMLXS_MSGF(EMLXS_CONTEXT, 11344 &emlxs_invalid_dma_handle_msg, 11345 "pkt_data_dma: hdl=%p", 11346 pkt->pkt_data_dma); 11347 11348 pkt->pkt_state = FC_PKT_TRAN_ERROR; 11349 pkt->pkt_reason = FC_REASON_DMA_ERROR; 11350 pkt->pkt_expln = FC_EXPLN_NONE; 11351 pkt->pkt_action = FC_ACTION_RETRYABLE; 11352 11353 return; 11354 } 11355 } 11356 11357 return; 11358 11359 } 11360 #endif /* FMA_SUPPORT */ 11361 11362 11363 extern void 11364 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size) 11365 { 11366 uint32_t word; 11367 uint32_t *wptr; 11368 uint32_t i; 11369 11370 wptr = (uint32_t *)buffer; 11371 11372 size += (size%4)? (4-(size%4)):0; 11373 for (i = 0; i < size / 4; i++) { 11374 word = *wptr; 11375 *wptr++ = SWAP32(word); 11376 } 11377 11378 return; 11379 11380 } /* emlxs_swap32_buffer() */ 11381 11382 11383 extern void 11384 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size) 11385 { 11386 uint32_t word; 11387 uint32_t *sptr; 11388 uint32_t *dptr; 11389 uint32_t i; 11390 11391 sptr = (uint32_t *)src; 11392 dptr = (uint32_t *)dst; 11393 11394 size += (size%4)? (4-(size%4)):0; 11395 for (i = 0; i < size / 4; i++) { 11396 word = *sptr++; 11397 *dptr++ = SWAP32(word); 11398 } 11399 11400 return; 11401 11402 } /* emlxs_swap32_buffer() */ 11403