1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at 9 * http://www.opensource.org/licenses/cddl1.txt. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2004-2012 Emulex. All rights reserved. 24 * Use is subject to license terms. 25 * Copyright 2020 RackTop Systems, Inc. 26 */ 27 28 #define DEF_ICFG 1 29 30 #include <emlxs.h> 31 #include <emlxs_version.h> 32 33 34 static char emlxs_copyright[] = EMLXS_COPYRIGHT; 35 char emlxs_revision[] = EMLXS_REVISION; 36 char emlxs_version[] = EMLXS_VERSION; 37 char emlxs_name[] = EMLXS_NAME; 38 char emlxs_label[] = EMLXS_LABEL; 39 40 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 41 EMLXS_MSG_DEF(EMLXS_SOLARIS_C); 42 43 #ifdef MENLO_SUPPORT 44 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp); 45 #endif /* MENLO_SUPPORT */ 46 47 static void emlxs_fca_attach(emlxs_hba_t *hba); 48 static void emlxs_fca_detach(emlxs_hba_t *hba); 49 static void emlxs_drv_banner(emlxs_hba_t *hba); 50 51 static int32_t emlxs_get_props(emlxs_hba_t *hba); 52 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, 53 uint32_t *pkt_flags); 54 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp); 55 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp); 56 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp); 57 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp); 58 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 59 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp); 60 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp); 61 static uint32_t emlxs_add_instance(int32_t ddiinst); 62 static void emlxs_iodone(emlxs_buf_t *sbp); 63 static int emlxs_pm_lower_power(dev_info_t *dip); 64 static int emlxs_pm_raise_power(dev_info_t *dip); 65 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, 66 uint32_t failed); 67 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3); 68 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba); 69 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code, 70 uint32_t args, uint32_t *arg); 71 72 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 73 static void emlxs_read_vport_prop(emlxs_hba_t *hba); 74 #endif /* EMLXS_MODREV3 && EMLXS_MODREV4 */ 75 76 static void emlxs_mode_init_masks(emlxs_hba_t *hba); 77 78 79 extern int 80 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id); 81 extern int 82 emlxs_select_msiid(emlxs_hba_t *hba); 83 extern void 84 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba); 85 86 /* 87 * Driver Entry Routines. 88 */ 89 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t); 90 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t); 91 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *); 92 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *); 93 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t, 94 cred_t *, int32_t *); 95 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **); 96 97 98 /* 99 * FC_AL Transport Functions. 100 */ 101 static opaque_t emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *, 102 fc_fca_bind_info_t *); 103 static void emlxs_fca_unbind_port(opaque_t); 104 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *); 105 static int32_t emlxs_fca_get_cap(opaque_t, char *, void *); 106 static int32_t emlxs_fca_set_cap(opaque_t, char *, void *); 107 static int32_t emlxs_fca_get_map(opaque_t, fc_lilpmap_t *); 108 static int32_t emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t, 109 uint32_t *, uint32_t); 110 static int32_t emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *); 111 112 static opaque_t emlxs_fca_get_device(opaque_t, fc_portid_t); 113 static int32_t emlxs_fca_notify(opaque_t, uint32_t); 114 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *); 115 116 /* 117 * Driver Internal Functions. 118 */ 119 120 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *); 121 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t); 122 #ifdef EMLXS_I386 123 #ifdef S11 124 static int32_t emlxs_quiesce(dev_info_t *); 125 #endif /* S11 */ 126 #endif /* EMLXS_I386 */ 127 static int32_t emlxs_hba_resume(dev_info_t *); 128 static int32_t emlxs_hba_suspend(dev_info_t *); 129 static int32_t emlxs_hba_detach(dev_info_t *); 130 static int32_t emlxs_hba_attach(dev_info_t *); 131 static void emlxs_lock_destroy(emlxs_hba_t *); 132 static void emlxs_lock_init(emlxs_hba_t *); 133 134 char *emlxs_pm_components[] = { 135 "NAME=" DRIVER_NAME "000", 136 "0=Device D3 State", 137 "1=Device D0 State" 138 }; 139 140 141 /* 142 * Default emlx dma limits 143 */ 144 ddi_dma_lim_t emlxs_dma_lim = { 145 (uint32_t)0, /* dlim_addr_lo */ 146 (uint32_t)0xffffffff, /* dlim_addr_hi */ 147 (uint_t)0x00ffffff, /* dlim_cntr_max */ 148 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */ 149 1, /* dlim_minxfer */ 150 0x00ffffff /* dlim_dmaspeed */ 151 }; 152 153 /* 154 * Be careful when using these attributes; the defaults listed below are 155 * (almost) the most general case, permitting allocation in almost any 156 * way supported by the LightPulse family. The sole exception is the 157 * alignment specified as requiring memory allocation on a 4-byte boundary; 158 * the Lightpulse can DMA memory on any byte boundary. 159 * 160 * The LightPulse family currently is limited to 16M transfers; 161 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields. 162 */ 163 ddi_dma_attr_t emlxs_dma_attr = { 164 DMA_ATTR_V0, /* dma_attr_version */ 165 (uint64_t)0, /* dma_attr_addr_lo */ 166 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 167 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 168 1, /* dma_attr_align */ 169 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 170 1, /* dma_attr_minxfer */ 171 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 172 (uint64_t)0xffffffff, /* dma_attr_seg */ 173 1, /* dma_attr_sgllen */ 174 1, /* dma_attr_granular */ 175 0 /* dma_attr_flags */ 176 }; 177 178 ddi_dma_attr_t emlxs_dma_attr_ro = { 179 DMA_ATTR_V0, /* dma_attr_version */ 180 (uint64_t)0, /* dma_attr_addr_lo */ 181 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 182 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 183 1, /* dma_attr_align */ 184 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 185 1, /* dma_attr_minxfer */ 186 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 187 (uint64_t)0xffffffff, /* dma_attr_seg */ 188 1, /* dma_attr_sgllen */ 189 1, /* dma_attr_granular */ 190 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */ 191 }; 192 193 ddi_dma_attr_t emlxs_dma_attr_1sg = { 194 DMA_ATTR_V0, /* dma_attr_version */ 195 (uint64_t)0, /* dma_attr_addr_lo */ 196 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 197 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 198 1, /* dma_attr_align */ 199 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 200 1, /* dma_attr_minxfer */ 201 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 202 (uint64_t)0xffffffff, /* dma_attr_seg */ 203 1, /* dma_attr_sgllen */ 204 1, /* dma_attr_granular */ 205 0 /* dma_attr_flags */ 206 }; 207 208 #if (EMLXS_MODREV >= EMLXS_MODREV3) 209 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = { 210 DMA_ATTR_V0, /* dma_attr_version */ 211 (uint64_t)0, /* dma_attr_addr_lo */ 212 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */ 213 (uint64_t)0x00ffffff, /* dma_attr_count_max */ 214 1, /* dma_attr_align */ 215 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */ 216 1, /* dma_attr_minxfer */ 217 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */ 218 (uint64_t)0xffffffff, /* dma_attr_seg */ 219 1, /* dma_attr_sgllen */ 220 1, /* dma_attr_granular */ 221 0 /* dma_attr_flags */ 222 }; 223 #endif /* >= EMLXS_MODREV3 */ 224 225 /* 226 * DDI access attributes for device 227 */ 228 ddi_device_acc_attr_t emlxs_dev_acc_attr = { 229 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 230 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */ 231 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 232 DDI_DEFAULT_ACC /* devacc_attr_access */ 233 }; 234 235 /* 236 * DDI access attributes for data 237 */ 238 ddi_device_acc_attr_t emlxs_data_acc_attr = { 239 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */ 240 DDI_NEVERSWAP_ACC, /* don't swap for Data */ 241 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */ 242 DDI_DEFAULT_ACC /* devacc_attr_access */ 243 }; 244 245 /* 246 * Fill in the FC Transport structure, 247 * as defined in the Fibre Channel Transport Programmming Guide. 248 */ 249 #if (EMLXS_MODREV == EMLXS_MODREV5) 250 static fc_fca_tran_t emlxs_fca_tran = { 251 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */ 252 MAX_VPORTS, /* fca numerb of ports */ 253 sizeof (emlxs_buf_t), /* fca pkt size */ 254 2048, /* fca cmd max */ 255 &emlxs_dma_lim, /* fca dma limits */ 256 0, /* fca iblock, to be filled in later */ 257 &emlxs_dma_attr, /* fca dma attributes */ 258 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 259 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 260 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 261 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 262 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 263 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 264 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 265 &emlxs_data_acc_attr, /* fca access atributes */ 266 0, /* fca_num_npivports */ 267 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */ 268 emlxs_fca_bind_port, 269 emlxs_fca_unbind_port, 270 emlxs_fca_pkt_init, 271 emlxs_fca_pkt_uninit, 272 emlxs_fca_transport, 273 emlxs_fca_get_cap, 274 emlxs_fca_set_cap, 275 emlxs_fca_get_map, 276 emlxs_fca_transport, 277 emlxs_fca_ub_alloc, 278 emlxs_fca_ub_free, 279 emlxs_fca_ub_release, 280 emlxs_fca_pkt_abort, 281 emlxs_fca_reset, 282 emlxs_fca_port_manage, 283 emlxs_fca_get_device, 284 emlxs_fca_notify 285 }; 286 #endif /* EMLXS_MODREV5 */ 287 288 289 #if (EMLXS_MODREV == EMLXS_MODREV4) 290 static fc_fca_tran_t emlxs_fca_tran = { 291 FCTL_FCA_MODREV_4, /* fca_version */ 292 MAX_VPORTS, /* fca numerb of ports */ 293 sizeof (emlxs_buf_t), /* fca pkt size */ 294 2048, /* fca cmd max */ 295 &emlxs_dma_lim, /* fca dma limits */ 296 0, /* fca iblock, to be filled in later */ 297 &emlxs_dma_attr, /* fca dma attributes */ 298 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 299 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 300 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 301 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 302 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 303 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 304 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 305 &emlxs_data_acc_attr, /* fca access atributes */ 306 emlxs_fca_bind_port, 307 emlxs_fca_unbind_port, 308 emlxs_fca_pkt_init, 309 emlxs_fca_pkt_uninit, 310 emlxs_fca_transport, 311 emlxs_fca_get_cap, 312 emlxs_fca_set_cap, 313 emlxs_fca_get_map, 314 emlxs_fca_transport, 315 emlxs_fca_ub_alloc, 316 emlxs_fca_ub_free, 317 emlxs_fca_ub_release, 318 emlxs_fca_pkt_abort, 319 emlxs_fca_reset, 320 emlxs_fca_port_manage, 321 emlxs_fca_get_device, 322 emlxs_fca_notify 323 }; 324 #endif /* EMLXS_MODEREV4 */ 325 326 327 #if (EMLXS_MODREV == EMLXS_MODREV3) 328 static fc_fca_tran_t emlxs_fca_tran = { 329 FCTL_FCA_MODREV_3, /* fca_version */ 330 MAX_VPORTS, /* fca numerb of ports */ 331 sizeof (emlxs_buf_t), /* fca pkt size */ 332 2048, /* fca cmd max */ 333 &emlxs_dma_lim, /* fca dma limits */ 334 0, /* fca iblock, to be filled in later */ 335 &emlxs_dma_attr, /* fca dma attributes */ 336 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */ 337 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */ 338 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */ 339 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */ 340 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */ 341 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */ 342 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */ 343 &emlxs_data_acc_attr, /* fca access atributes */ 344 emlxs_fca_bind_port, 345 emlxs_fca_unbind_port, 346 emlxs_fca_pkt_init, 347 emlxs_fca_pkt_uninit, 348 emlxs_fca_transport, 349 emlxs_fca_get_cap, 350 emlxs_fca_set_cap, 351 emlxs_fca_get_map, 352 emlxs_fca_transport, 353 emlxs_fca_ub_alloc, 354 emlxs_fca_ub_free, 355 emlxs_fca_ub_release, 356 emlxs_fca_pkt_abort, 357 emlxs_fca_reset, 358 emlxs_fca_port_manage, 359 emlxs_fca_get_device, 360 emlxs_fca_notify 361 }; 362 #endif /* EMLXS_MODREV3 */ 363 364 365 #if (EMLXS_MODREV == EMLXS_MODREV2) 366 static fc_fca_tran_t emlxs_fca_tran = { 367 FCTL_FCA_MODREV_2, /* fca_version */ 368 MAX_VPORTS, /* number of ports */ 369 sizeof (emlxs_buf_t), /* pkt size */ 370 2048, /* max cmds */ 371 &emlxs_dma_lim, /* DMA limits */ 372 0, /* iblock, to be filled in later */ 373 &emlxs_dma_attr, /* dma attributes */ 374 &emlxs_data_acc_attr, /* access atributes */ 375 emlxs_fca_bind_port, 376 emlxs_fca_unbind_port, 377 emlxs_fca_pkt_init, 378 emlxs_fca_pkt_uninit, 379 emlxs_fca_transport, 380 emlxs_fca_get_cap, 381 emlxs_fca_set_cap, 382 emlxs_fca_get_map, 383 emlxs_fca_transport, 384 emlxs_fca_ub_alloc, 385 emlxs_fca_ub_free, 386 emlxs_fca_ub_release, 387 emlxs_fca_pkt_abort, 388 emlxs_fca_reset, 389 emlxs_fca_port_manage, 390 emlxs_fca_get_device, 391 emlxs_fca_notify 392 }; 393 #endif /* EMLXS_MODREV2 */ 394 395 396 /* 397 * state pointer which the implementation uses as a place to 398 * hang a set of per-driver structures; 399 * 400 */ 401 void *emlxs_soft_state = NULL; 402 403 /* 404 * Driver Global variables. 405 */ 406 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */ 407 408 emlxs_device_t emlxs_device; 409 410 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */ 411 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */ 412 uint32_t emlxs_instance_flag = 0; /* uses emlxs_device.lock */ 413 #define EMLXS_FW_SHOW 0x00000001 414 415 416 /* 417 * CB ops vector. Used for administration only. 418 */ 419 static struct cb_ops emlxs_cb_ops = { 420 emlxs_open, /* cb_open */ 421 emlxs_close, /* cb_close */ 422 nodev, /* cb_strategy */ 423 nodev, /* cb_print */ 424 nodev, /* cb_dump */ 425 nodev, /* cb_read */ 426 nodev, /* cb_write */ 427 emlxs_ioctl, /* cb_ioctl */ 428 nodev, /* cb_devmap */ 429 nodev, /* cb_mmap */ 430 nodev, /* cb_segmap */ 431 nochpoll, /* cb_chpoll */ 432 ddi_prop_op, /* cb_prop_op */ 433 0, /* cb_stream */ 434 #ifdef _LP64 435 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 436 #else 437 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */ 438 #endif 439 CB_REV, /* rev */ 440 nodev, /* cb_aread */ 441 nodev /* cb_awrite */ 442 }; 443 444 static struct dev_ops emlxs_ops = { 445 DEVO_REV, /* rev */ 446 0, /* refcnt */ 447 emlxs_info, /* getinfo */ 448 nulldev, /* identify */ 449 nulldev, /* probe */ 450 emlxs_attach, /* attach */ 451 emlxs_detach, /* detach */ 452 nodev, /* reset */ 453 &emlxs_cb_ops, /* devo_cb_ops */ 454 NULL, /* devo_bus_ops */ 455 emlxs_power, /* power ops */ 456 #ifdef EMLXS_I386 457 #ifdef S11 458 emlxs_quiesce, /* quiesce */ 459 #endif /* S11 */ 460 #endif /* EMLXS_I386 */ 461 }; 462 463 #include <sys/modctl.h> 464 extern struct mod_ops mod_driverops; 465 466 #ifdef SAN_DIAG_SUPPORT 467 extern kmutex_t emlxs_sd_bucket_mutex; 468 extern sd_bucket_info_t emlxs_sd_bucket; 469 #endif /* SAN_DIAG_SUPPORT */ 470 471 /* 472 * Module linkage information for the kernel. 473 */ 474 static struct modldrv emlxs_modldrv = { 475 &mod_driverops, /* module type - driver */ 476 emlxs_name, /* module name */ 477 &emlxs_ops, /* driver ops */ 478 }; 479 480 481 /* 482 * Driver module linkage structure 483 */ 484 static struct modlinkage emlxs_modlinkage = { 485 MODREV_1, /* ml_rev - must be MODREV_1 */ 486 &emlxs_modldrv, /* ml_linkage */ 487 NULL /* end of driver linkage */ 488 }; 489 490 491 /* We only need to add entries for non-default return codes. */ 492 /* Entries do not need to be in order. */ 493 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 494 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */ 495 496 emlxs_xlat_err_t emlxs_iostat_tbl[] = { 497 /* {f/w code, pkt_state, pkt_reason, */ 498 /* pkt_expln, pkt_action} */ 499 500 /* 0x00 - Do not remove */ 501 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE, 502 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 503 504 /* 0x01 - Do not remove */ 505 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE, 506 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 507 508 /* 0x02 */ 509 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS, 510 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE}, 511 512 /* 513 * This is a default entry. 514 * The real codes are written dynamically in emlxs_els.c 515 */ 516 /* 0x09 */ 517 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE, 518 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 519 520 /* Special error code */ 521 /* 0x10 */ 522 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 523 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 524 525 /* Special error code */ 526 /* 0x11 */ 527 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, 528 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 529 530 /* Special error code */ 531 /* 0x12 */ 532 {IOSTAT_RSP_INVALID, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, 533 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 534 535 /* CLASS 2 only */ 536 /* 0x04 */ 537 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR, 538 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 539 540 /* CLASS 2 only */ 541 /* 0x05 */ 542 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR, 543 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 544 545 /* CLASS 2 only */ 546 /* 0x06 */ 547 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY, 548 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 549 550 /* CLASS 2 only */ 551 /* 0x07 */ 552 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY, 553 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY}, 554 }; 555 556 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t)) 557 558 559 /* We only need to add entries for non-default return codes. */ 560 /* Entries do not need to be in order. */ 561 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */ 562 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */ 563 564 emlxs_xlat_err_t emlxs_ioerr_tbl[] = { 565 /* {f/w code, pkt_state, pkt_reason, */ 566 /* pkt_expln, pkt_action} */ 567 568 /* 0x01 */ 569 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN, 570 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 571 572 /* 0x02 */ 573 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 574 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 575 576 /* 0x04 */ 577 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 578 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 579 580 /* 0x05 */ 581 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 582 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 583 584 /* 0x06 */ 585 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 586 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 587 588 /* 0x07 */ 589 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED, 590 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 591 592 /* 0x08 */ 593 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ, 594 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 595 596 /* 0x0B */ 597 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 598 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 599 600 /* 0x0D */ 601 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 602 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 603 604 /* 0x0E */ 605 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR, 606 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 607 608 /* 0x0F */ 609 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME, 610 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 611 612 /* 0x11 */ 613 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM, 614 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 615 616 /* 0x13 */ 617 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH, 618 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 619 620 /* 0x14 */ 621 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED, 622 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 623 624 /* 0x15 */ 625 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 626 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 627 628 /* 0x16 */ 629 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED, 630 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 631 632 /* 0x17 */ 633 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT, 634 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 635 636 /* 0x18 */ 637 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL, 638 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 639 640 /* 0x1A */ 641 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 642 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 643 644 /* 0x21 */ 645 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID, 646 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 647 648 /* Occurs at link down */ 649 /* 0x28 */ 650 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE, 651 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 652 653 /* 0xF0 */ 654 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT, 655 FC_EXPLN_NONE, FC_ACTION_RETRYABLE}, 656 }; 657 658 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t)) 659 660 661 662 emlxs_table_t emlxs_error_table[] = { 663 {IOERR_SUCCESS, "No error."}, 664 {IOERR_MISSING_CONTINUE, "Missing continue."}, 665 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."}, 666 {IOERR_INTERNAL_ERROR, "Internal error."}, 667 {IOERR_INVALID_RPI, "Invalid RPI."}, 668 {IOERR_NO_XRI, "No XRI."}, 669 {IOERR_ILLEGAL_COMMAND, "Illegal command."}, 670 {IOERR_XCHG_DROPPED, "Exchange dropped."}, 671 {IOERR_ILLEGAL_FIELD, "Illegal field."}, 672 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."}, 673 {IOERR_TX_DMA_FAILED, "TX DMA failed."}, 674 {IOERR_RX_DMA_FAILED, "RX DMA failed."}, 675 {IOERR_ILLEGAL_FRAME, "Illegal frame."}, 676 {IOERR_NO_RESOURCES, "No resources."}, 677 {IOERR_ILLEGAL_LENGTH, "Illegal length."}, 678 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."}, 679 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."}, 680 {IOERR_ABORT_REQUESTED, "Abort requested."}, 681 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."}, 682 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."}, 683 {IOERR_RING_RESET, "Ring reset."}, 684 {IOERR_LINK_DOWN, "Link down."}, 685 {IOERR_CORRUPTED_DATA, "Corrupted data."}, 686 {IOERR_CORRUPTED_RPI, "Corrupted RPI."}, 687 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."}, 688 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."}, 689 {IOERR_DUP_FRAME, "Duplicate frame."}, 690 {IOERR_LINK_CONTROL_FRAME, "Link control frame."}, 691 {IOERR_BAD_HOST_ADDRESS, "Bad host address."}, 692 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."}, 693 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."}, 694 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."}, 695 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."}, 696 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."}, 697 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"}, 698 {IOERR_XRIBUF_MISSING, "XRI buffer missing"}, 699 {IOERR_ROFFSET_INVAL, "Relative offset invalid."}, 700 {IOERR_ROFFSET_MISSING, "Relative offset missing."}, 701 {IOERR_INSUF_BUFFER, "Buffer too small."}, 702 {IOERR_MISSING_SI, "ELS frame missing SI"}, 703 {IOERR_MISSING_ES, "Exhausted burst without ES"}, 704 {IOERR_INCOMP_XFER, "Transfer incomplete."}, 705 {IOERR_ABORT_TIMEOUT, "Abort timeout."} 706 707 }; /* emlxs_error_table */ 708 709 710 emlxs_table_t emlxs_state_table[] = { 711 {IOSTAT_SUCCESS, "Success."}, 712 {IOSTAT_FCP_RSP_ERROR, "FCP response error."}, 713 {IOSTAT_REMOTE_STOP, "Remote stop."}, 714 {IOSTAT_LOCAL_REJECT, "Local reject."}, 715 {IOSTAT_NPORT_RJT, "NPort reject."}, 716 {IOSTAT_FABRIC_RJT, "Fabric reject."}, 717 {IOSTAT_NPORT_BSY, "Nport busy."}, 718 {IOSTAT_FABRIC_BSY, "Fabric busy."}, 719 {IOSTAT_INTERMED_RSP, "Intermediate response."}, 720 {IOSTAT_LS_RJT, "LS reject."}, 721 {IOSTAT_CMD_REJECT, "Cmd reject."}, 722 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."}, 723 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."}, 724 {IOSTAT_DATA_UNDERRUN, "Data underrun."}, 725 {IOSTAT_DATA_OVERRUN, "Data overrun."}, 726 {IOSTAT_RSP_INVALID, "Response Invalid."}, 727 728 }; /* emlxs_state_table */ 729 730 731 #ifdef MENLO_SUPPORT 732 emlxs_table_t emlxs_menlo_cmd_table[] = { 733 {MENLO_CMD_INITIALIZE, "MENLO_INIT"}, 734 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"}, 735 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"}, 736 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"}, 737 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"}, 738 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"}, 739 740 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"}, 741 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"}, 742 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"}, 743 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"}, 744 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"}, 745 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"}, 746 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"}, 747 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"}, 748 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"}, 749 750 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"}, 751 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"}, 752 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"}, 753 754 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"}, 755 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"}, 756 757 {MENLO_CMD_RESET, "MENLO_RESET"}, 758 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"} 759 760 }; /* emlxs_menlo_cmd_table */ 761 762 emlxs_table_t emlxs_menlo_rsp_table[] = { 763 {MENLO_RSP_SUCCESS, "SUCCESS"}, 764 {MENLO_ERR_FAILED, "FAILED"}, 765 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"}, 766 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"}, 767 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"}, 768 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"}, 769 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"}, 770 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"}, 771 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"}, 772 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"}, 773 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"}, 774 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"}, 775 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"}, 776 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"}, 777 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"}, 778 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"}, 779 {MENLO_ERR_BUSY, "BUSY"}, 780 781 }; /* emlxs_menlo_rsp_table */ 782 783 #endif /* MENLO_SUPPORT */ 784 785 786 emlxs_table_t emlxs_mscmd_table[] = { 787 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 788 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 789 {MS_GTIN, "MS_GTIN"}, 790 {MS_GIEL, "MS_GIEL"}, 791 {MS_GIET, "MS_GIET"}, 792 {MS_GDID, "MS_GDID"}, 793 {MS_GMID, "MS_GMID"}, 794 {MS_GFN, "MS_GFN"}, 795 {MS_GIELN, "MS_GIELN"}, 796 {MS_GMAL, "MS_GMAL"}, 797 {MS_GIEIL, "MS_GIEIL"}, 798 {MS_GPL, "MS_GPL"}, 799 {MS_GPT, "MS_GPT"}, 800 {MS_GPPN, "MS_GPPN"}, 801 {MS_GAPNL, "MS_GAPNL"}, 802 {MS_GPS, "MS_GPS"}, 803 {MS_GPSC, "MS_GPSC"}, 804 {MS_GATIN, "MS_GATIN"}, 805 {MS_GSES, "MS_GSES"}, 806 {MS_GPLNL, "MS_GPLNL"}, 807 {MS_GPLT, "MS_GPLT"}, 808 {MS_GPLML, "MS_GPLML"}, 809 {MS_GPAB, "MS_GPAB"}, 810 {MS_GNPL, "MS_GNPL"}, 811 {MS_GPNL, "MS_GPNL"}, 812 {MS_GPFCP, "MS_GPFCP"}, 813 {MS_GPLI, "MS_GPLI"}, 814 {MS_GNID, "MS_GNID"}, 815 {MS_RIELN, "MS_RIELN"}, 816 {MS_RPL, "MS_RPL"}, 817 {MS_RPLN, "MS_RPLN"}, 818 {MS_RPLT, "MS_RPLT"}, 819 {MS_RPLM, "MS_RPLM"}, 820 {MS_RPAB, "MS_RPAB"}, 821 {MS_RPFCP, "MS_RPFCP"}, 822 {MS_RPLI, "MS_RPLI"}, 823 {MS_DPL, "MS_DPL"}, 824 {MS_DPLN, "MS_DPLN"}, 825 {MS_DPLM, "MS_DPLM"}, 826 {MS_DPLML, "MS_DPLML"}, 827 {MS_DPLI, "MS_DPLI"}, 828 {MS_DPAB, "MS_DPAB"}, 829 {MS_DPALL, "MS_DPALL"} 830 831 }; /* emlxs_mscmd_table */ 832 833 834 emlxs_table_t emlxs_ctcmd_table[] = { 835 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 836 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 837 {SLI_CTNS_GA_NXT, "GA_NXT"}, 838 {SLI_CTNS_GPN_ID, "GPN_ID"}, 839 {SLI_CTNS_GNN_ID, "GNN_ID"}, 840 {SLI_CTNS_GCS_ID, "GCS_ID"}, 841 {SLI_CTNS_GFT_ID, "GFT_ID"}, 842 {SLI_CTNS_GSPN_ID, "GSPN_ID"}, 843 {SLI_CTNS_GPT_ID, "GPT_ID"}, 844 {SLI_CTNS_GID_PN, "GID_PN"}, 845 {SLI_CTNS_GID_NN, "GID_NN"}, 846 {SLI_CTNS_GIP_NN, "GIP_NN"}, 847 {SLI_CTNS_GIPA_NN, "GIPA_NN"}, 848 {SLI_CTNS_GSNN_NN, "GSNN_NN"}, 849 {SLI_CTNS_GNN_IP, "GNN_IP"}, 850 {SLI_CTNS_GIPA_IP, "GIPA_IP"}, 851 {SLI_CTNS_GID_FT, "GID_FT"}, 852 {SLI_CTNS_GID_PT, "GID_PT"}, 853 {SLI_CTNS_RPN_ID, "RPN_ID"}, 854 {SLI_CTNS_RNN_ID, "RNN_ID"}, 855 {SLI_CTNS_RCS_ID, "RCS_ID"}, 856 {SLI_CTNS_RFT_ID, "RFT_ID"}, 857 {SLI_CTNS_RSPN_ID, "RSPN_ID"}, 858 {SLI_CTNS_RPT_ID, "RPT_ID"}, 859 {SLI_CTNS_RIP_NN, "RIP_NN"}, 860 {SLI_CTNS_RIPA_NN, "RIPA_NN"}, 861 {SLI_CTNS_RSNN_NN, "RSNN_NN"}, 862 {SLI_CTNS_DA_ID, "DA_ID"}, 863 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 864 865 }; /* emlxs_ctcmd_table */ 866 867 868 869 emlxs_table_t emlxs_rmcmd_table[] = { 870 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"}, 871 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"}, 872 {CT_OP_GSAT, "RM_GSAT"}, 873 {CT_OP_GHAT, "RM_GHAT"}, 874 {CT_OP_GPAT, "RM_GPAT"}, 875 {CT_OP_GDAT, "RM_GDAT"}, 876 {CT_OP_GPST, "RM_GPST"}, 877 {CT_OP_GDP, "RM_GDP"}, 878 {CT_OP_GDPG, "RM_GDPG"}, 879 {CT_OP_GEPS, "RM_GEPS"}, 880 {CT_OP_GLAT, "RM_GLAT"}, 881 {CT_OP_SSAT, "RM_SSAT"}, 882 {CT_OP_SHAT, "RM_SHAT"}, 883 {CT_OP_SPAT, "RM_SPAT"}, 884 {CT_OP_SDAT, "RM_SDAT"}, 885 {CT_OP_SDP, "RM_SDP"}, 886 {CT_OP_SBBS, "RM_SBBS"}, 887 {CT_OP_RPST, "RM_RPST"}, 888 {CT_OP_VFW, "RM_VFW"}, 889 {CT_OP_DFW, "RM_DFW"}, 890 {CT_OP_RES, "RM_RES"}, 891 {CT_OP_RHD, "RM_RHD"}, 892 {CT_OP_UFW, "RM_UFW"}, 893 {CT_OP_RDP, "RM_RDP"}, 894 {CT_OP_GHDR, "RM_GHDR"}, 895 {CT_OP_CHD, "RM_CHD"}, 896 {CT_OP_SSR, "RM_SSR"}, 897 {CT_OP_RSAT, "RM_RSAT"}, 898 {CT_OP_WSAT, "RM_WSAT"}, 899 {CT_OP_RSAH, "RM_RSAH"}, 900 {CT_OP_WSAH, "RM_WSAH"}, 901 {CT_OP_RACT, "RM_RACT"}, 902 {CT_OP_WACT, "RM_WACT"}, 903 {CT_OP_RKT, "RM_RKT"}, 904 {CT_OP_WKT, "RM_WKT"}, 905 {CT_OP_SSC, "RM_SSC"}, 906 {CT_OP_QHBA, "RM_QHBA"}, 907 {CT_OP_GST, "RM_GST"}, 908 {CT_OP_GFTM, "RM_GFTM"}, 909 {CT_OP_SRL, "RM_SRL"}, 910 {CT_OP_SI, "RM_SI"}, 911 {CT_OP_SRC, "RM_SRC"}, 912 {CT_OP_GPB, "RM_GPB"}, 913 {CT_OP_SPB, "RM_SPB"}, 914 {CT_OP_RPB, "RM_RPB"}, 915 {CT_OP_RAPB, "RM_RAPB"}, 916 {CT_OP_GBC, "RM_GBC"}, 917 {CT_OP_GBS, "RM_GBS"}, 918 {CT_OP_SBS, "RM_SBS"}, 919 {CT_OP_GANI, "RM_GANI"}, 920 {CT_OP_GRV, "RM_GRV"}, 921 {CT_OP_GAPBS, "RM_GAPBS"}, 922 {CT_OP_APBC, "RM_APBC"}, 923 {CT_OP_GDT, "RM_GDT"}, 924 {CT_OP_GDLMI, "RM_GDLMI"}, 925 {CT_OP_GANA, "RM_GANA"}, 926 {CT_OP_GDLV, "RM_GDLV"}, 927 {CT_OP_GWUP, "RM_GWUP"}, 928 {CT_OP_GLM, "RM_GLM"}, 929 {CT_OP_GABS, "RM_GABS"}, 930 {CT_OP_SABS, "RM_SABS"}, 931 {CT_OP_RPR, "RM_RPR"}, 932 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */ 933 934 }; /* emlxs_rmcmd_table */ 935 936 937 emlxs_table_t emlxs_elscmd_table[] = { 938 {ELS_CMD_ACC, "ACC"}, 939 {ELS_CMD_LS_RJT, "LS_RJT"}, 940 {ELS_CMD_PLOGI, "PLOGI"}, 941 {ELS_CMD_FLOGI, "FLOGI"}, 942 {ELS_CMD_LOGO, "LOGO"}, 943 {ELS_CMD_ABTX, "ABTX"}, 944 {ELS_CMD_RCS, "RCS"}, 945 {ELS_CMD_RES, "RES"}, 946 {ELS_CMD_RSS, "RSS"}, 947 {ELS_CMD_RSI, "RSI"}, 948 {ELS_CMD_ESTS, "ESTS"}, 949 {ELS_CMD_ESTC, "ESTC"}, 950 {ELS_CMD_ADVC, "ADVC"}, 951 {ELS_CMD_RTV, "RTV"}, 952 {ELS_CMD_RLS, "RLS"}, 953 {ELS_CMD_ECHO, "ECHO"}, 954 {ELS_CMD_TEST, "TEST"}, 955 {ELS_CMD_RRQ, "RRQ"}, 956 {ELS_CMD_REC, "REC"}, 957 {ELS_CMD_PRLI, "PRLI"}, 958 {ELS_CMD_PRLO, "PRLO"}, 959 {ELS_CMD_SCN, "SCN"}, 960 {ELS_CMD_TPLS, "TPLS"}, 961 {ELS_CMD_GPRLO, "GPRLO"}, 962 {ELS_CMD_GAID, "GAID"}, 963 {ELS_CMD_FACT, "FACT"}, 964 {ELS_CMD_FDACT, "FDACT"}, 965 {ELS_CMD_NACT, "NACT"}, 966 {ELS_CMD_NDACT, "NDACT"}, 967 {ELS_CMD_QoSR, "QoSR"}, 968 {ELS_CMD_RVCS, "RVCS"}, 969 {ELS_CMD_PDISC, "PDISC"}, 970 {ELS_CMD_FDISC, "FDISC"}, 971 {ELS_CMD_ADISC, "ADISC"}, 972 {ELS_CMD_FARP, "FARP"}, 973 {ELS_CMD_FARPR, "FARPR"}, 974 {ELS_CMD_FAN, "FAN"}, 975 {ELS_CMD_RSCN, "RSCN"}, 976 {ELS_CMD_SCR, "SCR"}, 977 {ELS_CMD_LINIT, "LINIT"}, 978 {ELS_CMD_RNID, "RNID"}, 979 {ELS_CMD_AUTH, "AUTH"} 980 981 }; /* emlxs_elscmd_table */ 982 983 984 emlxs_table_t emlxs_mode_table[] = { 985 {MODE_NONE, "NONE"}, 986 {MODE_INITIATOR, "INITIATOR"}, 987 {MODE_TARGET, "TARGET"}, 988 {MODE_ALL, "INITIATOR | TARGET"} 989 }; /* emlxs_mode_table */ 990 991 /* 992 * 993 * Device Driver Entry Routines 994 * 995 */ 996 997 #ifdef MODSYM_SUPPORT 998 static void emlxs_fca_modclose(); 999 static int emlxs_fca_modopen(); 1000 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */ 1001 1002 static int 1003 emlxs_fca_modopen() 1004 { 1005 int err; 1006 1007 if (emlxs_modsym.mod_fctl) { 1008 return (0); 1009 } 1010 1011 /* Leadville (fctl) */ 1012 err = 0; 1013 emlxs_modsym.mod_fctl = 1014 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err); 1015 if (!emlxs_modsym.mod_fctl) { 1016 cmn_err(CE_WARN, 1017 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d", 1018 DRIVER_NAME, err); 1019 1020 goto failed; 1021 } 1022 1023 err = 0; 1024 /* Check if the fctl fc_fca_attach is present */ 1025 emlxs_modsym.fc_fca_attach = 1026 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach", 1027 &err); 1028 if ((void *)emlxs_modsym.fc_fca_attach == NULL) { 1029 cmn_err(CE_WARN, 1030 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME); 1031 goto failed; 1032 } 1033 1034 err = 0; 1035 /* Check if the fctl fc_fca_detach is present */ 1036 emlxs_modsym.fc_fca_detach = 1037 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach", 1038 &err); 1039 if ((void *)emlxs_modsym.fc_fca_detach == NULL) { 1040 cmn_err(CE_WARN, 1041 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME); 1042 goto failed; 1043 } 1044 1045 err = 0; 1046 /* Check if the fctl fc_fca_init is present */ 1047 emlxs_modsym.fc_fca_init = 1048 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err); 1049 if ((void *)emlxs_modsym.fc_fca_init == NULL) { 1050 cmn_err(CE_WARN, 1051 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME); 1052 goto failed; 1053 } 1054 1055 return (0); 1056 1057 failed: 1058 1059 emlxs_fca_modclose(); 1060 1061 return (1); 1062 1063 1064 } /* emlxs_fca_modopen() */ 1065 1066 1067 static void 1068 emlxs_fca_modclose() 1069 { 1070 if (emlxs_modsym.mod_fctl) { 1071 (void) ddi_modclose(emlxs_modsym.mod_fctl); 1072 emlxs_modsym.mod_fctl = 0; 1073 } 1074 1075 emlxs_modsym.fc_fca_attach = NULL; 1076 emlxs_modsym.fc_fca_detach = NULL; 1077 emlxs_modsym.fc_fca_init = NULL; 1078 1079 return; 1080 1081 } /* emlxs_fca_modclose() */ 1082 1083 #endif /* MODSYM_SUPPORT */ 1084 1085 1086 1087 /* 1088 * Global driver initialization, called once when driver is loaded 1089 */ 1090 int 1091 _init(void) 1092 { 1093 int ret; 1094 1095 /* 1096 * First init call for this driver, 1097 * so initialize the emlxs_dev_ctl structure. 1098 */ 1099 bzero(&emlxs_device, sizeof (emlxs_device)); 1100 1101 #ifdef MODSYM_SUPPORT 1102 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t)); 1103 #endif /* MODSYM_SUPPORT */ 1104 1105 mutex_init(&emlxs_device.lock, NULL, MUTEX_DRIVER, NULL); 1106 1107 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp); 1108 emlxs_device.drv_timestamp = ddi_get_time(); 1109 1110 for (ret = 0; ret < MAX_FC_BRDS; ret++) { 1111 emlxs_instance[ret] = (uint32_t)-1; 1112 } 1113 1114 /* 1115 * Provide for one ddiinst of the emlxs_dev_ctl structure 1116 * for each possible board in the system. 1117 */ 1118 if ((ret = ddi_soft_state_init(&emlxs_soft_state, 1119 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) { 1120 cmn_err(CE_WARN, 1121 "?%s: _init: ddi_soft_state_init failed. rval=%x", 1122 DRIVER_NAME, ret); 1123 1124 return (ret); 1125 } 1126 1127 #ifdef MODSYM_SUPPORT 1128 /* Open SFS */ 1129 (void) emlxs_fca_modopen(); 1130 #endif /* MODSYM_SUPPORT */ 1131 1132 /* Setup devops for SFS */ 1133 MODSYM(fc_fca_init)(&emlxs_ops); 1134 1135 if ((ret = mod_install(&emlxs_modlinkage)) != 0) { 1136 (void) ddi_soft_state_fini(&emlxs_soft_state); 1137 #ifdef MODSYM_SUPPORT 1138 /* Close SFS */ 1139 emlxs_fca_modclose(); 1140 #endif /* MODSYM_SUPPORT */ 1141 1142 return (ret); 1143 } 1144 1145 #ifdef SAN_DIAG_SUPPORT 1146 mutex_init(&emlxs_sd_bucket_mutex, NULL, MUTEX_DRIVER, NULL); 1147 #endif /* SAN_DIAG_SUPPORT */ 1148 1149 return (ret); 1150 1151 } /* _init() */ 1152 1153 1154 /* 1155 * Called when driver is unloaded. 1156 */ 1157 int 1158 _fini(void) 1159 { 1160 int ret; 1161 1162 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) { 1163 return (ret); 1164 } 1165 #ifdef MODSYM_SUPPORT 1166 /* Close SFS */ 1167 emlxs_fca_modclose(); 1168 #endif /* MODSYM_SUPPORT */ 1169 1170 /* 1171 * Destroy the soft state structure 1172 */ 1173 (void) ddi_soft_state_fini(&emlxs_soft_state); 1174 1175 /* Destroy the global device lock */ 1176 mutex_destroy(&emlxs_device.lock); 1177 1178 #ifdef SAN_DIAG_SUPPORT 1179 mutex_destroy(&emlxs_sd_bucket_mutex); 1180 #endif /* SAN_DIAG_SUPPORT */ 1181 1182 return (ret); 1183 1184 } /* _fini() */ 1185 1186 1187 1188 int 1189 _info(struct modinfo *modinfop) 1190 { 1191 1192 return (mod_info(&emlxs_modlinkage, modinfop)); 1193 1194 } /* _info() */ 1195 1196 1197 /* 1198 * Attach an ddiinst of an emlx host adapter. 1199 * Allocate data structures, initialize the adapter and we're ready to fly. 1200 */ 1201 static int 1202 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 1203 { 1204 emlxs_hba_t *hba; 1205 int ddiinst; 1206 int emlxinst; 1207 int rval; 1208 1209 switch (cmd) { 1210 case DDI_ATTACH: 1211 /* If successful this will set EMLXS_PM_IN_ATTACH */ 1212 rval = emlxs_hba_attach(dip); 1213 break; 1214 1215 case DDI_RESUME: 1216 /* This will resume the driver */ 1217 rval = emlxs_hba_resume(dip); 1218 break; 1219 1220 default: 1221 rval = DDI_FAILURE; 1222 } 1223 1224 if (rval == DDI_SUCCESS) { 1225 ddiinst = ddi_get_instance(dip); 1226 emlxinst = emlxs_get_instance(ddiinst); 1227 hba = emlxs_device.hba[emlxinst]; 1228 1229 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) { 1230 1231 /* Enable driver dump feature */ 1232 mutex_enter(&EMLXS_PORT_LOCK); 1233 hba->flag |= FC_DUMP_SAFE; 1234 mutex_exit(&EMLXS_PORT_LOCK); 1235 } 1236 } 1237 1238 return (rval); 1239 1240 } /* emlxs_attach() */ 1241 1242 1243 /* 1244 * Detach/prepare driver to unload (see detach(9E)). 1245 */ 1246 static int 1247 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 1248 { 1249 emlxs_hba_t *hba; 1250 emlxs_port_t *port; 1251 int ddiinst; 1252 int emlxinst; 1253 int rval; 1254 1255 ddiinst = ddi_get_instance(dip); 1256 emlxinst = emlxs_get_instance(ddiinst); 1257 hba = emlxs_device.hba[emlxinst]; 1258 1259 if (hba == NULL) { 1260 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME); 1261 1262 return (DDI_FAILURE); 1263 } 1264 1265 if (hba == (emlxs_hba_t *)-1) { 1266 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.", 1267 DRIVER_NAME); 1268 1269 return (DDI_FAILURE); 1270 } 1271 1272 port = &PPORT; 1273 rval = DDI_SUCCESS; 1274 1275 /* Check driver dump */ 1276 mutex_enter(&EMLXS_PORT_LOCK); 1277 1278 if (hba->flag & FC_DUMP_ACTIVE) { 1279 mutex_exit(&EMLXS_PORT_LOCK); 1280 1281 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1282 "detach: Driver busy. Driver dump active."); 1283 1284 return (DDI_FAILURE); 1285 } 1286 1287 #ifdef SFCT_SUPPORT 1288 if ((port->flag & EMLXS_TGT_BOUND) && 1289 ((port->fct_flags & FCT_STATE_PORT_ONLINE) || 1290 (port->fct_flags & FCT_STATE_NOT_ACKED))) { 1291 mutex_exit(&EMLXS_PORT_LOCK); 1292 1293 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1294 "detach: Driver busy. Target mode active."); 1295 1296 return (DDI_FAILURE); 1297 } 1298 #endif /* SFCT_SUPPORT */ 1299 1300 if (port->flag & EMLXS_INI_BOUND) { 1301 mutex_exit(&EMLXS_PORT_LOCK); 1302 1303 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1304 "detach: Driver busy. Initiator mode active."); 1305 1306 return (DDI_FAILURE); 1307 } 1308 1309 hba->flag &= ~FC_DUMP_SAFE; 1310 1311 mutex_exit(&EMLXS_PORT_LOCK); 1312 1313 switch (cmd) { 1314 case DDI_DETACH: 1315 1316 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1317 "DDI_DETACH"); 1318 1319 rval = emlxs_hba_detach(dip); 1320 1321 if (rval != DDI_SUCCESS) { 1322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1323 "Unable to detach."); 1324 } 1325 break; 1326 1327 case DDI_SUSPEND: 1328 1329 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, 1330 "DDI_SUSPEND"); 1331 1332 /* Suspend the driver */ 1333 rval = emlxs_hba_suspend(dip); 1334 1335 if (rval != DDI_SUCCESS) { 1336 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 1337 "Unable to suspend driver."); 1338 } 1339 break; 1340 1341 default: 1342 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x", 1343 DRIVER_NAME, cmd); 1344 rval = DDI_FAILURE; 1345 } 1346 1347 if (rval == DDI_FAILURE) { 1348 /* Re-Enable driver dump feature */ 1349 mutex_enter(&EMLXS_PORT_LOCK); 1350 hba->flag |= FC_DUMP_SAFE; 1351 mutex_exit(&EMLXS_PORT_LOCK); 1352 } 1353 1354 return (rval); 1355 1356 } /* emlxs_detach() */ 1357 1358 1359 /* EMLXS_PORT_LOCK must be held when calling this */ 1360 extern void 1361 emlxs_port_init(emlxs_port_t *port) 1362 { 1363 emlxs_hba_t *hba = HBA; 1364 1365 /* Initialize the base node */ 1366 bzero((caddr_t)&port->node_base, sizeof (NODELIST)); 1367 port->node_base.nlp_Rpi = 0; 1368 port->node_base.nlp_DID = 0xffffff; 1369 port->node_base.nlp_list_next = NULL; 1370 port->node_base.nlp_list_prev = NULL; 1371 port->node_base.nlp_active = 1; 1372 port->node_base.nlp_base = 1; 1373 port->node_count = 0; 1374 1375 if (!(port->flag & EMLXS_PORT_ENABLED)) { 1376 uint8_t dummy_wwn[8] = 1377 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 1378 1379 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn, 1380 sizeof (NAME_TYPE)); 1381 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn, 1382 sizeof (NAME_TYPE)); 1383 } 1384 1385 if (!(port->flag & EMLXS_PORT_CONFIG)) { 1386 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 1387 (sizeof (port->snn)-1)); 1388 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn, 1389 (sizeof (port->spn)-1)); 1390 } 1391 1392 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam, 1393 sizeof (SERV_PARM)); 1394 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName, 1395 sizeof (NAME_TYPE)); 1396 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName, 1397 sizeof (NAME_TYPE)); 1398 1399 return; 1400 1401 } /* emlxs_port_init() */ 1402 1403 1404 void 1405 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba) 1406 { 1407 uint16_t reg; 1408 1409 if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) { 1410 return; 1411 } 1412 1413 /* Turn off the Correctable Error Reporting */ 1414 /* (the Device Control Register, bit 0). */ 1415 reg = ddi_get16(hba->pci_acc_handle, 1416 (uint16_t *)(hba->pci_addr + 1417 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] + 1418 PCIE_DEVCTL)); 1419 1420 reg &= ~1; 1421 1422 (void) ddi_put16(hba->pci_acc_handle, 1423 (uint16_t *)(hba->pci_addr + 1424 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] + 1425 PCIE_DEVCTL), 1426 reg); 1427 1428 return; 1429 1430 } /* emlxs_disable_pcie_ce_err() */ 1431 1432 1433 /* 1434 * emlxs_fca_bind_port 1435 * 1436 * Arguments: 1437 * 1438 * dip: the dev_info pointer for the ddiinst 1439 * port_info: pointer to info handed back to the transport 1440 * bind_info: pointer to info from the transport 1441 * 1442 * Return values: a port handle for this port, NULL for failure 1443 * 1444 */ 1445 static opaque_t 1446 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info, 1447 fc_fca_bind_info_t *bind_info) 1448 { 1449 emlxs_hba_t *hba; 1450 emlxs_port_t *port; 1451 emlxs_port_t *pport; 1452 emlxs_port_t *vport; 1453 int ddiinst; 1454 emlxs_vpd_t *vpd; 1455 emlxs_config_t *cfg; 1456 char *dptr; 1457 char buffer[16]; 1458 uint32_t length; 1459 uint32_t len; 1460 char topology[32]; 1461 char linkspeed[32]; 1462 uint32_t linkstate; 1463 1464 ddiinst = ddi_get_instance(dip); 1465 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 1466 port = &PPORT; 1467 pport = &PPORT; 1468 1469 ddiinst = hba->ddiinst; 1470 vpd = &VPD; 1471 cfg = &CFG; 1472 1473 mutex_enter(&EMLXS_PORT_LOCK); 1474 1475 if (bind_info->port_num > 0) { 1476 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1477 if (!(hba->flag & FC_NPIV_ENABLED) || 1478 !(bind_info->port_npiv) || 1479 (bind_info->port_num > hba->vpi_max)) 1480 #elif (EMLXS_MODREV >= EMLXS_MODREV3) 1481 if (!(hba->flag & FC_NPIV_ENABLED) || 1482 (bind_info->port_num > hba->vpi_high)) 1483 #endif 1484 { 1485 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1486 "fca_bind_port: Port %d not supported.", 1487 bind_info->port_num); 1488 1489 mutex_exit(&EMLXS_PORT_LOCK); 1490 1491 port_info->pi_error = FC_OUTOFBOUNDS; 1492 return (NULL); 1493 } 1494 } 1495 1496 /* Get true port pointer */ 1497 port = &VPORT(bind_info->port_num); 1498 1499 /* Make sure the port is not already bound to the transport */ 1500 if (port->flag & EMLXS_INI_BOUND) { 1501 1502 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1503 "fca_bind_port: Port %d already bound. flag=%x", 1504 bind_info->port_num, port->flag); 1505 1506 mutex_exit(&EMLXS_PORT_LOCK); 1507 1508 port_info->pi_error = FC_ALREADY; 1509 return (NULL); 1510 } 1511 1512 if (!(pport->flag & EMLXS_INI_ENABLED)) { 1513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1514 "fca_bind_port: Physical port does not support " 1515 "initiator mode."); 1516 1517 mutex_exit(&EMLXS_PORT_LOCK); 1518 1519 port_info->pi_error = FC_OUTOFBOUNDS; 1520 return (NULL); 1521 } 1522 1523 /* Make sure port enable flag is set */ 1524 /* Just in case fca_port_unbind is called just prior to fca_port_bind */ 1525 /* without a driver attach or resume operation */ 1526 port->flag |= EMLXS_PORT_ENABLED; 1527 1528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 1529 "fca_bind_port: Port %d: port_info=%p bind_info=%p", 1530 bind_info->port_num, port_info, bind_info); 1531 1532 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1533 if (bind_info->port_npiv) { 1534 /* Leadville is telling us about a new virtual port */ 1535 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn, 1536 sizeof (NAME_TYPE)); 1537 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn, 1538 sizeof (NAME_TYPE)); 1539 if (port->snn[0] == 0) { 1540 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn, 1541 (sizeof (port->snn)-1)); 1542 1543 } 1544 1545 if (port->spn[0] == 0) { 1546 (void) snprintf((caddr_t)port->spn, 1547 (sizeof (port->spn)-1), "%s VPort-%d", 1548 (caddr_t)hba->spn, port->vpi); 1549 } 1550 port->flag |= EMLXS_PORT_CONFIG; 1551 } 1552 #endif /* >= EMLXS_MODREV5 */ 1553 1554 /* 1555 * Restricted login should apply both physical and 1556 * virtual ports. 1557 */ 1558 if (cfg[CFG_VPORT_RESTRICTED].current) { 1559 port->flag |= EMLXS_PORT_RESTRICTED; 1560 } 1561 1562 /* Perform generic port initialization */ 1563 emlxs_port_init(port); 1564 1565 /* Perform SFS specific initialization */ 1566 port->ulp_handle = bind_info->port_handle; 1567 port->ulp_statec_cb = bind_info->port_statec_cb; 1568 port->ulp_unsol_cb = bind_info->port_unsol_cb; 1569 1570 /* Set the bound flag */ 1571 port->flag |= EMLXS_INI_BOUND; 1572 hba->num_of_ports++; 1573 1574 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 1575 mutex_exit(&EMLXS_PORT_LOCK); 1576 (void) emlxs_vpi_port_bind_notify(port); 1577 mutex_enter(&EMLXS_PORT_LOCK); 1578 1579 linkstate = (port->vpip->state == VPI_STATE_PORT_ONLINE) ? 1580 FC_LINK_UP : FC_LINK_DOWN; 1581 } else { 1582 linkstate = hba->state; 1583 } 1584 1585 /* Update the port info structure */ 1586 1587 /* Set the topology and state */ 1588 if (port->mode == MODE_TARGET) { 1589 port_info->pi_port_state = FC_STATE_OFFLINE; 1590 port_info->pi_topology = FC_TOP_UNKNOWN; 1591 } else if ((linkstate < FC_LINK_UP) || 1592 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLED) || 1593 !(hba->flag & FC_NPIV_SUPPORTED)))) { 1594 port_info->pi_port_state = FC_STATE_OFFLINE; 1595 port_info->pi_topology = FC_TOP_UNKNOWN; 1596 } 1597 #ifdef MENLO_SUPPORT 1598 else if (hba->flag & FC_MENLO_MODE) { 1599 port_info->pi_port_state = FC_STATE_OFFLINE; 1600 port_info->pi_topology = FC_TOP_UNKNOWN; 1601 } 1602 #endif /* MENLO_SUPPORT */ 1603 else { 1604 /* Check for loop topology */ 1605 if (hba->topology == TOPOLOGY_LOOP) { 1606 port_info->pi_port_state = FC_STATE_LOOP; 1607 (void) strlcpy(topology, ", loop", sizeof (topology)); 1608 1609 if (hba->flag & FC_FABRIC_ATTACHED) { 1610 port_info->pi_topology = FC_TOP_PUBLIC_LOOP; 1611 } else { 1612 port_info->pi_topology = FC_TOP_PRIVATE_LOOP; 1613 } 1614 } else { 1615 port_info->pi_topology = FC_TOP_FABRIC; 1616 port_info->pi_port_state = FC_STATE_ONLINE; 1617 (void) strlcpy(topology, ", fabric", sizeof (topology)); 1618 } 1619 1620 /* Set the link speed */ 1621 switch (hba->linkspeed) { 1622 case 0: 1623 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed)); 1624 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1625 break; 1626 1627 case LA_1GHZ_LINK: 1628 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed)); 1629 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED; 1630 break; 1631 case LA_2GHZ_LINK: 1632 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed)); 1633 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED; 1634 break; 1635 case LA_4GHZ_LINK: 1636 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed)); 1637 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED; 1638 break; 1639 case LA_8GHZ_LINK: 1640 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed)); 1641 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED; 1642 break; 1643 case LA_10GHZ_LINK: 1644 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed)); 1645 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED; 1646 break; 1647 case LA_16GHZ_LINK: 1648 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed)); 1649 port_info->pi_port_state |= FC_STATE_16GBIT_SPEED; 1650 break; 1651 case LA_32GHZ_LINK: 1652 (void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed)); 1653 port_info->pi_port_state |= FC_STATE_32GBIT_SPEED; 1654 break; 1655 default: 1656 (void) snprintf(linkspeed, sizeof (linkspeed), 1657 "unknown(0x%x)", hba->linkspeed); 1658 break; 1659 } 1660 1661 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) { 1662 /* Adjusting port context for link up messages */ 1663 vport = port; 1664 port = &PPORT; 1665 if (vport->vpi == 0) { 1666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, 1667 "%s%s, initiator", 1668 linkspeed, topology); 1669 } else if (!(hba->flag & FC_NPIV_LINKUP)) { 1670 hba->flag |= FC_NPIV_LINKUP; 1671 EMLXS_MSGF(EMLXS_CONTEXT, 1672 &emlxs_npiv_link_up_msg, 1673 "%s%s, initiator", linkspeed, topology); 1674 } 1675 port = vport; 1676 } 1677 } 1678 1679 /* PCIE Correctable Error Reporting workaround */ 1680 if (((hba->model_info.chip == EMLXS_BE2_CHIP) || 1681 (hba->model_info.chip == EMLXS_BE3_CHIP)) && 1682 (bind_info->port_num == 0)) { 1683 emlxs_disable_pcie_ce_err(hba); 1684 } 1685 1686 /* Save initial state */ 1687 port->ulp_statec = port_info->pi_port_state; 1688 1689 /* 1690 * The transport needs a copy of the common service parameters 1691 * for this port. The transport can get any updates through 1692 * the getcap entry point. 1693 */ 1694 bcopy((void *) &port->sparam, 1695 (void *) &port_info->pi_login_params.common_service, 1696 sizeof (SERV_PARM)); 1697 1698 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 1699 /* Swap the service parameters for ULP */ 1700 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params. 1701 common_service); 1702 #endif /* EMLXS_MODREV2X */ 1703 1704 port_info->pi_login_params.common_service.btob_credit = 0xffff; 1705 1706 bcopy((void *) &port->wwnn, 1707 (void *) &port_info->pi_login_params.node_ww_name, 1708 sizeof (NAME_TYPE)); 1709 1710 bcopy((void *) &port->wwpn, 1711 (void *) &port_info->pi_login_params.nport_ww_name, 1712 sizeof (NAME_TYPE)); 1713 1714 /* 1715 * We need to turn off CLASS2 support. 1716 * Otherwise, FC transport will use CLASS2 as default class 1717 * and never try with CLASS3. 1718 */ 1719 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1720 #if (EMLXS_MODREVX >= EMLXS_MODREV3X) 1721 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) { 1722 port_info->pi_login_params.class_1.class_opt &= ~0x0080; 1723 } 1724 1725 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) { 1726 port_info->pi_login_params.class_2.class_opt &= ~0x0080; 1727 } 1728 #else /* EMLXS_SPARC or EMLXS_MODREV2X */ 1729 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) { 1730 port_info->pi_login_params.class_1.class_opt &= ~0x8000; 1731 } 1732 1733 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) { 1734 port_info->pi_login_params.class_2.class_opt &= ~0x8000; 1735 } 1736 #endif /* >= EMLXS_MODREV3X */ 1737 #endif /* >= EMLXS_MODREV3 */ 1738 1739 1740 #if (EMLXS_MODREV <= EMLXS_MODREV2) 1741 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) { 1742 port_info->pi_login_params.class_1.data[0] &= ~0x80; 1743 } 1744 1745 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) { 1746 port_info->pi_login_params.class_2.data[0] &= ~0x80; 1747 } 1748 #endif /* <= EMLXS_MODREV2 */ 1749 1750 /* Additional parameters */ 1751 port_info->pi_s_id.port_id = port->did; 1752 port_info->pi_s_id.priv_lilp_posit = 0; 1753 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current; 1754 1755 /* Initialize the RNID parameters */ 1756 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params)); 1757 1758 (void) snprintf((char *)port_info->pi_rnid_params.params.global_id, 1759 (sizeof (port_info->pi_rnid_params.params.global_id)-1), 1760 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType, 1761 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 1762 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 1763 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 1764 1765 port_info->pi_rnid_params.params.unit_type = RNID_HBA; 1766 port_info->pi_rnid_params.params.port_id = port->did; 1767 port_info->pi_rnid_params.params.ip_version = RNID_IPV4; 1768 1769 /* Initialize the port attributes */ 1770 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs)); 1771 1772 (void) strncpy(port_info->pi_attrs.manufacturer, 1773 hba->model_info.manufacturer, 1774 (sizeof (port_info->pi_attrs.manufacturer)-1)); 1775 1776 port_info->pi_rnid_params.status = FC_SUCCESS; 1777 1778 (void) strncpy(port_info->pi_attrs.serial_number, vpd->serial_num, 1779 (sizeof (port_info->pi_attrs.serial_number)-1)); 1780 1781 (void) snprintf(port_info->pi_attrs.firmware_version, 1782 (sizeof (port_info->pi_attrs.firmware_version)-1), "%s (%s)", 1783 vpd->fw_version, vpd->fw_label); 1784 1785 #ifdef EMLXS_I386 1786 (void) snprintf(port_info->pi_attrs.option_rom_version, 1787 (sizeof (port_info->pi_attrs.option_rom_version)-1), 1788 "Boot:%s", vpd->boot_version); 1789 #else /* EMLXS_SPARC */ 1790 (void) snprintf(port_info->pi_attrs.option_rom_version, 1791 (sizeof (port_info->pi_attrs.option_rom_version)-1), 1792 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version); 1793 #endif /* EMLXS_I386 */ 1794 1795 (void) snprintf(port_info->pi_attrs.driver_version, 1796 (sizeof (port_info->pi_attrs.driver_version)-1), "%s (%s)", 1797 emlxs_version, emlxs_revision); 1798 1799 (void) strncpy(port_info->pi_attrs.driver_name, DRIVER_NAME, 1800 (sizeof (port_info->pi_attrs.driver_name)-1)); 1801 1802 port_info->pi_attrs.vendor_specific_id = 1803 (hba->model_info.device_id << 16) | hba->model_info.vendor_id; 1804 1805 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3); 1806 1807 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE; 1808 1809 #if (EMLXS_MODREV >= EMLXS_MODREV3) 1810 port_info->pi_rnid_params.params.num_attached = 0; 1811 1812 if ((hba->model_info.chip & EMLXS_LANCER_CHIPS) != 0) { 1813 uint8_t byte; 1814 uint8_t *wwpn; 1815 uint32_t i; 1816 uint32_t j; 1817 1818 /* Copy the WWPN as a string into the local buffer */ 1819 wwpn = (uint8_t *)&hba->wwpn; 1820 for (i = 0; i < 16; i++) { 1821 byte = *wwpn++; 1822 j = ((byte & 0xf0) >> 4); 1823 if (j <= 9) { 1824 buffer[i] = 1825 (char)((uint8_t)'0' + (uint8_t)j); 1826 } else { 1827 buffer[i] = 1828 (char)((uint8_t)'A' + (uint8_t)(j - 1829 10)); 1830 } 1831 1832 i++; 1833 j = (byte & 0xf); 1834 if (j <= 9) { 1835 buffer[i] = 1836 (char)((uint8_t)'0' + (uint8_t)j); 1837 } else { 1838 buffer[i] = 1839 (char)((uint8_t)'A' + (uint8_t)(j - 1840 10)); 1841 } 1842 } 1843 1844 port_info->pi_attrs.hba_fru_details.port_index = 0; 1845 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4)) 1846 1847 } else if (hba->flag & FC_NPIV_ENABLED) { 1848 uint8_t byte; 1849 uint8_t *wwpn; 1850 uint32_t i; 1851 uint32_t j; 1852 1853 /* Copy the WWPN as a string into the local buffer */ 1854 wwpn = (uint8_t *)&hba->wwpn; 1855 for (i = 0; i < 16; i++) { 1856 byte = *wwpn++; 1857 j = ((byte & 0xf0) >> 4); 1858 if (j <= 9) { 1859 buffer[i] = 1860 (char)((uint8_t)'0' + (uint8_t)j); 1861 } else { 1862 buffer[i] = 1863 (char)((uint8_t)'A' + (uint8_t)(j - 1864 10)); 1865 } 1866 1867 i++; 1868 j = (byte & 0xf); 1869 if (j <= 9) { 1870 buffer[i] = 1871 (char)((uint8_t)'0' + (uint8_t)j); 1872 } else { 1873 buffer[i] = 1874 (char)((uint8_t)'A' + (uint8_t)(j - 1875 10)); 1876 } 1877 } 1878 1879 port_info->pi_attrs.hba_fru_details.port_index = port->vpi; 1880 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */ 1881 1882 } else { 1883 /* Copy the serial number string (right most 16 chars) */ 1884 /* into the right justified local buffer */ 1885 bzero(buffer, sizeof (buffer)); 1886 length = strlen(vpd->serial_num); 1887 len = (length > 16) ? 16 : length; 1888 bcopy(&vpd->serial_num[(length - len)], 1889 &buffer[(sizeof (buffer) - len)], len); 1890 1891 port_info->pi_attrs.hba_fru_details.port_index = 1892 vpd->port_index; 1893 } 1894 1895 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high; 1896 dptr[0] = buffer[0]; 1897 dptr[1] = buffer[1]; 1898 dptr[2] = buffer[2]; 1899 dptr[3] = buffer[3]; 1900 dptr[4] = buffer[4]; 1901 dptr[5] = buffer[5]; 1902 dptr[6] = buffer[6]; 1903 dptr[7] = buffer[7]; 1904 port_info->pi_attrs.hba_fru_details.high = 1905 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high); 1906 1907 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low; 1908 dptr[0] = buffer[8]; 1909 dptr[1] = buffer[9]; 1910 dptr[2] = buffer[10]; 1911 dptr[3] = buffer[11]; 1912 dptr[4] = buffer[12]; 1913 dptr[5] = buffer[13]; 1914 dptr[6] = buffer[14]; 1915 dptr[7] = buffer[15]; 1916 port_info->pi_attrs.hba_fru_details.low = 1917 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low); 1918 1919 #endif /* >= EMLXS_MODREV3 */ 1920 1921 #if (EMLXS_MODREV >= EMLXS_MODREV4) 1922 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name, 1923 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN); 1924 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name, 1925 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN); 1926 #endif /* >= EMLXS_MODREV4 */ 1927 1928 (void) snprintf(port_info->pi_attrs.hardware_version, 1929 (sizeof (port_info->pi_attrs.hardware_version)-1), 1930 "%x", vpd->biuRev); 1931 1932 /* Set the hba speed limit */ 1933 if (vpd->link_speed & LMT_32GB_CAPABLE) { 1934 port_info->pi_attrs.supported_speed |= 1935 FC_HBA_PORTSPEED_32GBIT; 1936 } 1937 if (vpd->link_speed & LMT_16GB_CAPABLE) { 1938 port_info->pi_attrs.supported_speed |= 1939 FC_HBA_PORTSPEED_16GBIT; 1940 } 1941 if (vpd->link_speed & LMT_10GB_CAPABLE) { 1942 port_info->pi_attrs.supported_speed |= 1943 FC_HBA_PORTSPEED_10GBIT; 1944 } 1945 if (vpd->link_speed & LMT_8GB_CAPABLE) { 1946 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT; 1947 } 1948 if (vpd->link_speed & LMT_4GB_CAPABLE) { 1949 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT; 1950 } 1951 if (vpd->link_speed & LMT_2GB_CAPABLE) { 1952 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT; 1953 } 1954 if (vpd->link_speed & LMT_1GB_CAPABLE) { 1955 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT; 1956 } 1957 1958 /* Set the hba model info */ 1959 (void) strncpy(port_info->pi_attrs.model, hba->model_info.model, 1960 (sizeof (port_info->pi_attrs.model)-1)); 1961 (void) strncpy(port_info->pi_attrs.model_description, 1962 hba->model_info.model_desc, 1963 (sizeof (port_info->pi_attrs.model_description)-1)); 1964 1965 1966 /* Log information */ 1967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1968 "Bind info: port_num = %d", bind_info->port_num); 1969 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1970 "Bind info: port_handle = %p", bind_info->port_handle); 1971 1972 #if (EMLXS_MODREV >= EMLXS_MODREV5) 1973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1974 "Bind info: port_npiv = %d", bind_info->port_npiv); 1975 #endif /* >= EMLXS_MODREV5 */ 1976 1977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1978 "Port info: pi_topology = %x", port_info->pi_topology); 1979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1980 "Port info: pi_error = %x", port_info->pi_error); 1981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1982 "Port info: pi_port_state = %x", port_info->pi_port_state); 1983 1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1985 "Port info: port_id = %x", port_info->pi_s_id.port_id); 1986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1987 "Port info: priv_lilp_posit = %x", 1988 port_info->pi_s_id.priv_lilp_posit); 1989 1990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1991 "Port info: hard_addr = %x", 1992 port_info->pi_hard_addr.hard_addr); 1993 1994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1995 "Port info: rnid.status = %x", 1996 port_info->pi_rnid_params.status); 1997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 1998 "Port info: rnid.global_id = %16s", 1999 port_info->pi_rnid_params.params.global_id); 2000 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2001 "Port info: rnid.unit_type = %x", 2002 port_info->pi_rnid_params.params.unit_type); 2003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2004 "Port info: rnid.port_id = %x", 2005 port_info->pi_rnid_params.params.port_id); 2006 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2007 "Port info: rnid.num_attached = %x", 2008 port_info->pi_rnid_params.params.num_attached); 2009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2010 "Port info: rnid.ip_version = %x", 2011 port_info->pi_rnid_params.params.ip_version); 2012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2013 "Port info: rnid.udp_port = %x", 2014 port_info->pi_rnid_params.params.udp_port); 2015 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2016 "Port info: rnid.ip_addr = %16s", 2017 port_info->pi_rnid_params.params.ip_addr); 2018 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2019 "Port info: rnid.spec_id_resv = %x", 2020 port_info->pi_rnid_params.params.specific_id_resv); 2021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2022 "Port info: rnid.topo_flags = %x", 2023 port_info->pi_rnid_params.params.topo_flags); 2024 2025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2026 "Port info: manufacturer = %s", 2027 port_info->pi_attrs.manufacturer); 2028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2029 "Port info: serial_num = %s", 2030 port_info->pi_attrs.serial_number); 2031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2032 "Port info: model = %s", port_info->pi_attrs.model); 2033 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2034 "Port info: model_description = %s", 2035 port_info->pi_attrs.model_description); 2036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2037 "Port info: hardware_version = %s", 2038 port_info->pi_attrs.hardware_version); 2039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2040 "Port info: driver_version = %s", 2041 port_info->pi_attrs.driver_version); 2042 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2043 "Port info: option_rom_version = %s", 2044 port_info->pi_attrs.option_rom_version); 2045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2046 "Port info: firmware_version = %s", 2047 port_info->pi_attrs.firmware_version); 2048 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2049 "Port info: driver_name = %s", 2050 port_info->pi_attrs.driver_name); 2051 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2052 "Port info: vendor_specific_id = %x", 2053 port_info->pi_attrs.vendor_specific_id); 2054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2055 "Port info: supported_cos = %x", 2056 port_info->pi_attrs.supported_cos); 2057 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2058 "Port info: supported_speed = %x", 2059 port_info->pi_attrs.supported_speed); 2060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2061 "Port info: max_frame_size = %x", 2062 port_info->pi_attrs.max_frame_size); 2063 2064 #if (EMLXS_MODREV >= EMLXS_MODREV3) 2065 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2066 "Port info: fru_port_index = %x", 2067 port_info->pi_attrs.hba_fru_details.port_index); 2068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2069 "Port info: fru_high = %llx", 2070 port_info->pi_attrs.hba_fru_details.high); 2071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2072 "Port info: fru_low = %llx", 2073 port_info->pi_attrs.hba_fru_details.low); 2074 #endif /* >= EMLXS_MODREV3 */ 2075 2076 #if (EMLXS_MODREV >= EMLXS_MODREV4) 2077 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2078 "Port info: sym_node_name = %s", 2079 port_info->pi_attrs.sym_node_name); 2080 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 2081 "Port info: sym_port_name = %s", 2082 port_info->pi_attrs.sym_port_name); 2083 #endif /* >= EMLXS_MODREV4 */ 2084 2085 mutex_exit(&EMLXS_PORT_LOCK); 2086 2087 #ifdef SFCT_SUPPORT 2088 if (port->flag & EMLXS_TGT_ENABLED) { 2089 emlxs_fct_bind_port(port); 2090 } 2091 #endif /* SFCT_SUPPORT */ 2092 2093 return ((opaque_t)port); 2094 2095 } /* emlxs_fca_bind_port() */ 2096 2097 2098 static void 2099 emlxs_fca_unbind_port(opaque_t fca_port_handle) 2100 { 2101 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2102 emlxs_hba_t *hba = HBA; 2103 2104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2105 "fca_unbind_port: port=%p", port); 2106 2107 if (!(port->flag & EMLXS_PORT_BOUND)) { 2108 return; 2109 } 2110 2111 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 2112 (void) emlxs_vpi_port_unbind_notify(port, 1); 2113 } 2114 2115 /* Destroy & flush all port nodes, if they exist */ 2116 if (port->node_count) { 2117 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0); 2118 } 2119 2120 #if (EMLXS_MODREV >= EMLXS_MODREV5) 2121 if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) && 2122 (hba->flag & FC_NPIV_ENABLED) && 2123 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED))) { 2124 (void) emlxs_mb_unreg_vpi(port); 2125 } 2126 #endif 2127 2128 mutex_enter(&EMLXS_PORT_LOCK); 2129 if (port->flag & EMLXS_INI_BOUND) { 2130 #if (EMLXS_MODREV >= EMLXS_MODREV5) 2131 port->flag &= ~(EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED); 2132 #endif 2133 port->flag &= ~EMLXS_INI_BOUND; 2134 hba->num_of_ports--; 2135 2136 /* Wait until ulp callback interface is idle */ 2137 while (port->ulp_busy) { 2138 mutex_exit(&EMLXS_PORT_LOCK); 2139 delay(drv_usectohz(500000)); 2140 mutex_enter(&EMLXS_PORT_LOCK); 2141 } 2142 2143 port->ulp_handle = 0; 2144 port->ulp_statec = FC_STATE_OFFLINE; 2145 port->ulp_statec_cb = NULL; 2146 port->ulp_unsol_cb = NULL; 2147 } 2148 mutex_exit(&EMLXS_PORT_LOCK); 2149 2150 #ifdef SFCT_SUPPORT 2151 /* Check if port was target bound */ 2152 if (port->flag & EMLXS_TGT_BOUND) { 2153 emlxs_fct_unbind_port(port); 2154 } 2155 #endif /* SFCT_SUPPORT */ 2156 2157 return; 2158 2159 } /* emlxs_fca_unbind_port() */ 2160 2161 2162 /*ARGSUSED*/ 2163 extern int 2164 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 2165 { 2166 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2167 emlxs_hba_t *hba = HBA; 2168 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 2169 2170 if (!sbp) { 2171 return (FC_FAILURE); 2172 } 2173 bzero((void *)sbp, sizeof (emlxs_buf_t)); 2174 2175 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg)); 2176 sbp->pkt_flags = 2177 PACKET_VALID | PACKET_ULP_OWNED; 2178 sbp->port = port; 2179 sbp->pkt = pkt; 2180 sbp->iocbq.sbp = sbp; 2181 2182 return (FC_SUCCESS); 2183 2184 } /* emlxs_fca_pkt_init() */ 2185 2186 2187 2188 static void 2189 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp) 2190 { 2191 emlxs_hba_t *hba = HBA; 2192 emlxs_config_t *cfg = &CFG; 2193 fc_packet_t *pkt = PRIV2PKT(sbp); 2194 2195 mutex_enter(&sbp->mtx); 2196 2197 /* Reinitialize */ 2198 sbp->pkt = pkt; 2199 sbp->port = port; 2200 sbp->bmp = NULL; 2201 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED); 2202 sbp->iotag = 0; 2203 sbp->ticks = 0; 2204 sbp->abort_attempts = 0; 2205 sbp->fpkt = NULL; 2206 sbp->flush_count = 0; 2207 sbp->next = NULL; 2208 2209 if (port->mode == MODE_INITIATOR) { 2210 sbp->node = NULL; 2211 sbp->did = 0; 2212 sbp->lun = EMLXS_LUN_NONE; 2213 sbp->class = 0; 2214 sbp->channel = NULL; 2215 } 2216 2217 bzero((void *)&sbp->iocbq, sizeof (IOCBQ)); 2218 sbp->iocbq.sbp = sbp; 2219 2220 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp || 2221 ddi_in_panic()) { 2222 sbp->pkt_flags |= PACKET_POLLED; 2223 } 2224 2225 /* Prepare the fc packet */ 2226 pkt->pkt_state = FC_PKT_SUCCESS; 2227 pkt->pkt_reason = 0; 2228 pkt->pkt_action = 0; 2229 pkt->pkt_expln = 0; 2230 pkt->pkt_data_resid = 0; 2231 pkt->pkt_resp_resid = 0; 2232 2233 /* Make sure all pkt's have a proper timeout */ 2234 if (!cfg[CFG_TIMEOUT_ENABLE].current) { 2235 /* This disables all IOCB on chip timeouts */ 2236 pkt->pkt_timeout = 0x80000000; 2237 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) { 2238 pkt->pkt_timeout = 60; 2239 } 2240 2241 /* Clear the response buffer */ 2242 if (pkt->pkt_rsplen) { 2243 bzero(pkt->pkt_resp, pkt->pkt_rsplen); 2244 } 2245 2246 mutex_exit(&sbp->mtx); 2247 2248 return; 2249 2250 } /* emlxs_initialize_pkt() */ 2251 2252 2253 2254 /* 2255 * We may not need this routine 2256 */ 2257 /*ARGSUSED*/ 2258 extern int 2259 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt) 2260 { 2261 emlxs_buf_t *sbp = PKT2PRIV(pkt); 2262 2263 if (!sbp) { 2264 return (FC_FAILURE); 2265 } 2266 2267 if (!(sbp->pkt_flags & PACKET_VALID)) { 2268 return (FC_FAILURE); 2269 } 2270 sbp->pkt_flags &= ~PACKET_VALID; 2271 mutex_destroy(&sbp->mtx); 2272 2273 return (FC_SUCCESS); 2274 2275 } /* emlxs_fca_pkt_uninit() */ 2276 2277 2278 static int 2279 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2280 { 2281 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2282 emlxs_hba_t *hba = HBA; 2283 int32_t rval; 2284 emlxs_config_t *cfg = &CFG; 2285 2286 if (!(port->flag & EMLXS_INI_BOUND)) { 2287 return (FC_CAP_ERROR); 2288 } 2289 2290 if (strcmp(cap, FC_NODE_WWN) == 0) { 2291 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2292 "fca_get_cap: FC_NODE_WWN"); 2293 2294 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE)); 2295 rval = FC_CAP_FOUND; 2296 2297 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) { 2298 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2299 "fca_get_cap: FC_LOGIN_PARAMS"); 2300 2301 /* 2302 * We need to turn off CLASS2 support. 2303 * Otherwise, FC transport will use CLASS2 as default class 2304 * and never try with CLASS3. 2305 */ 2306 hba->sparam.cls2.classValid = 0; 2307 2308 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM)); 2309 2310 rval = FC_CAP_FOUND; 2311 2312 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) { 2313 int32_t *num_bufs; 2314 2315 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2316 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)", 2317 cfg[CFG_UB_BUFS].current); 2318 2319 num_bufs = (int32_t *)ptr; 2320 2321 /* We multiply by MAX_VPORTS because ULP uses a */ 2322 /* formula to calculate ub bufs from this */ 2323 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS); 2324 2325 rval = FC_CAP_FOUND; 2326 2327 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) { 2328 int32_t *size; 2329 2330 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2331 "fca_get_cap: FC_CAP_PAYLOAD_SIZE"); 2332 2333 size = (int32_t *)ptr; 2334 *size = -1; 2335 rval = FC_CAP_FOUND; 2336 2337 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) { 2338 fc_reset_action_t *action; 2339 2340 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2341 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR"); 2342 2343 action = (fc_reset_action_t *)ptr; 2344 *action = FC_RESET_RETURN_ALL; 2345 rval = FC_CAP_FOUND; 2346 2347 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) { 2348 fc_dma_behavior_t *behavior; 2349 2350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2351 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF"); 2352 2353 behavior = (fc_dma_behavior_t *)ptr; 2354 *behavior = FC_ALLOW_STREAMING; 2355 rval = FC_CAP_FOUND; 2356 2357 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) { 2358 fc_fcp_dma_t *fcp_dma; 2359 2360 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2361 "fca_get_cap: FC_CAP_FCP_DMA"); 2362 2363 fcp_dma = (fc_fcp_dma_t *)ptr; 2364 *fcp_dma = FC_DVMA_SPACE; 2365 rval = FC_CAP_FOUND; 2366 2367 } else { 2368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2369 "fca_get_cap: Unknown capability. [%s]", cap); 2370 2371 rval = FC_CAP_ERROR; 2372 2373 } 2374 2375 return (rval); 2376 2377 } /* emlxs_fca_get_cap() */ 2378 2379 2380 2381 static int 2382 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr) 2383 { 2384 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2385 2386 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2387 "fca_set_cap: cap=[%s] arg=%p", cap, ptr); 2388 2389 return (FC_CAP_ERROR); 2390 2391 } /* emlxs_fca_set_cap() */ 2392 2393 2394 static opaque_t 2395 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id) 2396 { 2397 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2398 2399 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2400 "fca_get_device: did=%x", d_id.port_id); 2401 2402 return (NULL); 2403 2404 } /* emlxs_fca_get_device() */ 2405 2406 2407 static int32_t 2408 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd) 2409 { 2410 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2411 2412 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x", 2413 cmd); 2414 2415 return (FC_SUCCESS); 2416 2417 } /* emlxs_fca_notify */ 2418 2419 2420 2421 static int 2422 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf) 2423 { 2424 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2425 emlxs_hba_t *hba = HBA; 2426 uint32_t lilp_length; 2427 2428 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2429 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf, 2430 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2], 2431 port->alpa_map[3], port->alpa_map[4]); 2432 2433 if (!(port->flag & EMLXS_INI_BOUND)) { 2434 return (FC_NOMAP); 2435 } 2436 2437 if (hba->topology != TOPOLOGY_LOOP) { 2438 return (FC_NOMAP); 2439 } 2440 2441 /* Check if alpa map is available */ 2442 if (port->alpa_map[0] != 0) { 2443 mapbuf->lilp_magic = MAGIC_LILP; 2444 } else { /* No LILP map available */ 2445 2446 /* Set lilp_magic to MAGIC_LISA and this will */ 2447 /* trigger an ALPA scan in ULP */ 2448 mapbuf->lilp_magic = MAGIC_LISA; 2449 } 2450 2451 mapbuf->lilp_myalpa = port->did; 2452 2453 /* The first byte of the alpa_map is the lilp map length */ 2454 /* Add one to include the lilp length byte itself */ 2455 lilp_length = (uint32_t)port->alpa_map[0] + 1; 2456 2457 /* Make sure the max transfer is 128 bytes */ 2458 if (lilp_length > 128) { 2459 lilp_length = 128; 2460 } 2461 2462 /* We start copying from the lilp_length field */ 2463 /* in order to get a word aligned address */ 2464 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length, 2465 lilp_length); 2466 2467 return (FC_SUCCESS); 2468 2469 } /* emlxs_fca_get_map() */ 2470 2471 2472 2473 extern int 2474 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt) 2475 { 2476 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2477 emlxs_hba_t *hba = HBA; 2478 emlxs_buf_t *sbp; 2479 uint32_t rval; 2480 uint32_t pkt_flags; 2481 2482 /* Validate packet */ 2483 sbp = PKT2PRIV(pkt); 2484 2485 /* Make sure adapter is online */ 2486 if (!(hba->flag & FC_ONLINE_MODE) && 2487 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2488 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 2489 "Adapter offline."); 2490 2491 rval = (hba->flag & FC_ONLINING_MODE) ? 2492 FC_TRAN_BUSY : FC_OFFLINE; 2493 return (rval); 2494 } 2495 2496 /* Make sure ULP was told that the port was online */ 2497 if ((port->ulp_statec == FC_STATE_OFFLINE) && 2498 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2499 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 2500 "Port offline."); 2501 2502 return (FC_OFFLINE); 2503 } 2504 2505 if (sbp->port != port) { 2506 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2507 "Invalid port handle. sbp=%p port=%p flags=%x", sbp, 2508 sbp->port, sbp->pkt_flags); 2509 return (FC_BADPACKET); 2510 } 2511 2512 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) { 2513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2514 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp, 2515 sbp->port, sbp->pkt_flags); 2516 return (FC_BADPACKET); 2517 } 2518 2519 #ifdef SFCT_SUPPORT 2520 if ((port->mode == MODE_TARGET) && !sbp->fct_cmd && 2521 !(sbp->pkt_flags & PACKET_ALLOCATED)) { 2522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2523 "Packet blocked. Target mode."); 2524 return (FC_TRANSPORT_ERROR); 2525 } 2526 #endif /* SFCT_SUPPORT */ 2527 2528 #ifdef IDLE_TIMER 2529 emlxs_pm_busy_component(hba); 2530 #endif /* IDLE_TIMER */ 2531 2532 /* Prepare the packet for transport */ 2533 emlxs_initialize_pkt(port, sbp); 2534 2535 /* Save a copy of the pkt flags. */ 2536 /* We will check the polling flag later */ 2537 pkt_flags = sbp->pkt_flags; 2538 2539 /* Send the packet */ 2540 switch (pkt->pkt_tran_type) { 2541 case FC_PKT_FCP_READ: 2542 case FC_PKT_FCP_WRITE: 2543 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags); 2544 break; 2545 2546 case FC_PKT_IP_WRITE: 2547 case FC_PKT_BROADCAST: 2548 rval = emlxs_send_ip(port, sbp); 2549 break; 2550 2551 case FC_PKT_EXCHANGE: 2552 switch (pkt->pkt_cmd_fhdr.type) { 2553 case FC_TYPE_SCSI_FCP: 2554 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags); 2555 break; 2556 2557 case FC_TYPE_FC_SERVICES: 2558 rval = emlxs_send_ct(port, sbp); 2559 break; 2560 2561 #ifdef MENLO_SUPPORT 2562 case EMLXS_MENLO_TYPE: 2563 rval = emlxs_send_menlo(port, sbp); 2564 break; 2565 #endif /* MENLO_SUPPORT */ 2566 2567 default: 2568 rval = emlxs_send_els(port, sbp); 2569 } 2570 break; 2571 2572 case FC_PKT_OUTBOUND: 2573 switch (pkt->pkt_cmd_fhdr.type) { 2574 #ifdef SFCT_SUPPORT 2575 case FC_TYPE_SCSI_FCP: 2576 rval = emlxs_send_fct_status(port, sbp); 2577 break; 2578 2579 case FC_TYPE_BASIC_LS: 2580 rval = emlxs_send_fct_abort(port, sbp); 2581 break; 2582 #endif /* SFCT_SUPPORT */ 2583 2584 case FC_TYPE_FC_SERVICES: 2585 rval = emlxs_send_ct_rsp(port, sbp); 2586 break; 2587 #ifdef MENLO_SUPPORT 2588 case EMLXS_MENLO_TYPE: 2589 rval = emlxs_send_menlo(port, sbp); 2590 break; 2591 #endif /* MENLO_SUPPORT */ 2592 2593 default: 2594 rval = emlxs_send_els_rsp(port, sbp); 2595 } 2596 break; 2597 2598 default: 2599 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 2600 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type); 2601 rval = FC_TRANSPORT_ERROR; 2602 break; 2603 } 2604 2605 /* Check if send was not successful */ 2606 if (rval != FC_SUCCESS) { 2607 /* Return packet to ULP */ 2608 mutex_enter(&sbp->mtx); 2609 sbp->pkt_flags |= PACKET_ULP_OWNED; 2610 mutex_exit(&sbp->mtx); 2611 2612 return (rval); 2613 } 2614 2615 /* Check if this packet should be polled for completion before */ 2616 /* returning. This check must be done with a saved copy of the */ 2617 /* pkt_flags because the packet itself could already be freed from */ 2618 /* memory if it was not polled. */ 2619 if (pkt_flags & PACKET_POLLED) { 2620 emlxs_poll(port, sbp); 2621 } 2622 2623 return (FC_SUCCESS); 2624 2625 } /* emlxs_fca_transport() */ 2626 2627 2628 2629 static void 2630 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp) 2631 { 2632 emlxs_hba_t *hba = HBA; 2633 fc_packet_t *pkt = PRIV2PKT(sbp); 2634 clock_t timeout; 2635 clock_t time; 2636 CHANNEL *cp; 2637 int in_panic = 0; 2638 2639 mutex_enter(&EMLXS_PORT_LOCK); 2640 hba->io_poll_count++; 2641 mutex_exit(&EMLXS_PORT_LOCK); 2642 2643 /* Check for panic situation */ 2644 cp = (CHANNEL *)sbp->channel; 2645 2646 if (ddi_in_panic()) { 2647 in_panic = 1; 2648 /* 2649 * In panic situations there will be one thread with 2650 * no interrrupts (hard or soft) and no timers 2651 */ 2652 2653 /* 2654 * We must manually poll everything in this thread 2655 * to keep the driver going. 2656 */ 2657 2658 /* Keep polling the chip until our IO is completed */ 2659 /* Driver's timer will not function during panics. */ 2660 /* Therefore, timer checks must be performed manually. */ 2661 (void) drv_getparm(LBOLT, &time); 2662 timeout = time + drv_usectohz(1000000); 2663 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2664 EMLXS_SLI_POLL_INTR(hba); 2665 (void) drv_getparm(LBOLT, &time); 2666 2667 /* Trigger timer checks periodically */ 2668 if (time >= timeout) { 2669 emlxs_timer_checks(hba); 2670 timeout = time + drv_usectohz(1000000); 2671 } 2672 } 2673 } else { 2674 /* Wait for IO completion */ 2675 /* The driver's timer will detect */ 2676 /* any timeout and abort the I/O. */ 2677 mutex_enter(&EMLXS_PKT_LOCK); 2678 while (!(sbp->pkt_flags & PACKET_COMPLETED)) { 2679 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK); 2680 } 2681 mutex_exit(&EMLXS_PKT_LOCK); 2682 } 2683 2684 /* Check for fcp reset pkt */ 2685 if (sbp->pkt_flags & PACKET_FCP_RESET) { 2686 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2687 /* Flush the IO's on the chipq */ 2688 (void) emlxs_chipq_node_flush(port, 2689 &hba->chan[hba->channel_fcp], 2690 sbp->node, sbp); 2691 } else { 2692 /* Flush the IO's on the chipq for this lun */ 2693 (void) emlxs_chipq_lun_flush(port, 2694 sbp->node, sbp->lun, sbp); 2695 } 2696 2697 if (sbp->flush_count == 0) { 2698 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2699 goto done; 2700 } 2701 2702 /* Set the timeout so the flush has time to complete */ 2703 timeout = emlxs_timeout(hba, 60); 2704 (void) drv_getparm(LBOLT, &time); 2705 while ((time < timeout) && sbp->flush_count > 0) { 2706 delay(drv_usectohz(500000)); 2707 (void) drv_getparm(LBOLT, &time); 2708 } 2709 2710 if (sbp->flush_count == 0) { 2711 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2712 goto done; 2713 } 2714 2715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2716 "sbp=%p flush_count=%d. Waiting...", sbp, 2717 sbp->flush_count); 2718 2719 /* Let's try this one more time */ 2720 2721 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) { 2722 /* Flush the IO's on the chipq */ 2723 (void) emlxs_chipq_node_flush(port, 2724 &hba->chan[hba->channel_fcp], 2725 sbp->node, sbp); 2726 } else { 2727 /* Flush the IO's on the chipq for this lun */ 2728 (void) emlxs_chipq_lun_flush(port, 2729 sbp->node, sbp->lun, sbp); 2730 } 2731 2732 /* Reset the timeout so the flush has time to complete */ 2733 timeout = emlxs_timeout(hba, 60); 2734 (void) drv_getparm(LBOLT, &time); 2735 while ((time < timeout) && sbp->flush_count > 0) { 2736 delay(drv_usectohz(500000)); 2737 (void) drv_getparm(LBOLT, &time); 2738 } 2739 2740 if (sbp->flush_count == 0) { 2741 emlxs_node_open(port, sbp->node, hba->channel_fcp); 2742 goto done; 2743 } 2744 2745 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2746 "sbp=%p flush_count=%d. Resetting link.", sbp, 2747 sbp->flush_count); 2748 2749 /* Let's first try to reset the link */ 2750 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 2751 2752 if (sbp->flush_count == 0) { 2753 goto done; 2754 } 2755 2756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2757 "sbp=%p flush_count=%d. Resetting HBA.", sbp, 2758 sbp->flush_count); 2759 2760 /* If that doesn't work, reset the adapter */ 2761 (void) emlxs_reset(port, FC_FCA_RESET); 2762 2763 if (sbp->flush_count != 0) { 2764 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg, 2765 "sbp=%p flush_count=%d. Giving up.", sbp, 2766 sbp->flush_count); 2767 } 2768 2769 } 2770 /* PACKET_FCP_RESET */ 2771 done: 2772 2773 /* Packet has been declared completed and is now ready to be returned */ 2774 2775 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 2776 emlxs_unswap_pkt(sbp); 2777 #endif /* EMLXS_MODREV2X */ 2778 2779 mutex_enter(&sbp->mtx); 2780 sbp->pkt_flags |= PACKET_ULP_OWNED; 2781 mutex_exit(&sbp->mtx); 2782 2783 mutex_enter(&EMLXS_PORT_LOCK); 2784 hba->io_poll_count--; 2785 mutex_exit(&EMLXS_PORT_LOCK); 2786 2787 #ifdef FMA_SUPPORT 2788 if (!in_panic) { 2789 emlxs_check_dma(hba, sbp); 2790 } 2791 #endif 2792 2793 /* Make ULP completion callback if required */ 2794 if (pkt->pkt_comp) { 2795 cp->ulpCmplCmd++; 2796 (*pkt->pkt_comp) (pkt); 2797 } 2798 2799 #ifdef FMA_SUPPORT 2800 if (hba->flag & FC_DMA_CHECK_ERROR) { 2801 emlxs_thread_spawn(hba, emlxs_restart_thread, 2802 NULL, NULL); 2803 } 2804 #endif 2805 2806 return; 2807 2808 } /* emlxs_poll() */ 2809 2810 2811 static int 2812 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size, 2813 uint32_t *count, uint32_t type) 2814 { 2815 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 2816 emlxs_hba_t *hba = HBA; 2817 char *err = NULL; 2818 emlxs_unsol_buf_t *pool = NULL; 2819 emlxs_unsol_buf_t *new_pool = NULL; 2820 emlxs_config_t *cfg = &CFG; 2821 int32_t i; 2822 int result; 2823 uint32_t free_resv; 2824 uint32_t free; 2825 fc_unsol_buf_t *ubp; 2826 emlxs_ub_priv_t *ub_priv; 2827 int rc; 2828 2829 if (!(port->flag & EMLXS_INI_ENABLED)) { 2830 if (tokens && count) { 2831 bzero(tokens, (sizeof (uint64_t) * (*count))); 2832 } 2833 return (FC_SUCCESS); 2834 } 2835 2836 if (!(port->flag & EMLXS_INI_BOUND)) { 2837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2838 "fca_ub_alloc failed: Port not bound! size=%x count=%d " 2839 "type=%x", size, *count, type); 2840 2841 return (FC_FAILURE); 2842 } 2843 2844 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2845 "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type); 2846 2847 if (count && (*count > EMLXS_MAX_UBUFS)) { 2848 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2849 "fca_ub_alloc failed: Too many unsolicted buffers " 2850 "requested. count=%x", *count); 2851 2852 return (FC_FAILURE); 2853 2854 } 2855 2856 if (tokens == NULL) { 2857 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2858 "fca_ub_alloc failed: Token array is NULL."); 2859 2860 return (FC_FAILURE); 2861 } 2862 2863 /* Clear the token array */ 2864 bzero(tokens, (sizeof (uint64_t) * (*count))); 2865 2866 free_resv = 0; 2867 free = *count; 2868 switch (type) { 2869 case FC_TYPE_BASIC_LS: 2870 err = "BASIC_LS"; 2871 break; 2872 case FC_TYPE_EXTENDED_LS: 2873 err = "EXTENDED_LS"; 2874 free = *count / 2; /* Hold 50% for normal use */ 2875 free_resv = *count - free; /* Reserve 50% for RSCN use */ 2876 break; 2877 case FC_TYPE_IS8802: 2878 err = "IS8802"; 2879 break; 2880 case FC_TYPE_IS8802_SNAP: 2881 err = "IS8802_SNAP"; 2882 2883 if (cfg[CFG_NETWORK_ON].current == 0) { 2884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 2885 "fca_ub_alloc failed: IP support is disabled."); 2886 2887 return (FC_FAILURE); 2888 } 2889 break; 2890 case FC_TYPE_SCSI_FCP: 2891 err = "SCSI_FCP"; 2892 break; 2893 case FC_TYPE_SCSI_GPP: 2894 err = "SCSI_GPP"; 2895 break; 2896 case FC_TYPE_HIPP_FP: 2897 err = "HIPP_FP"; 2898 break; 2899 case FC_TYPE_IPI3_MASTER: 2900 err = "IPI3_MASTER"; 2901 break; 2902 case FC_TYPE_IPI3_SLAVE: 2903 err = "IPI3_SLAVE"; 2904 break; 2905 case FC_TYPE_IPI3_PEER: 2906 err = "IPI3_PEER"; 2907 break; 2908 case FC_TYPE_FC_SERVICES: 2909 err = "FC_SERVICES"; 2910 break; 2911 } 2912 2913 mutex_enter(&EMLXS_UB_LOCK); 2914 2915 /* 2916 * Walk through the list of the unsolicited buffers 2917 * for this ddiinst of emlx. 2918 */ 2919 2920 pool = port->ub_pool; 2921 2922 /* 2923 * The emlxs_fca_ub_alloc() can be called more than once with different 2924 * size. We will reject the call if there are 2925 * duplicate size with the same FC-4 type. 2926 */ 2927 while (pool) { 2928 if ((pool->pool_type == type) && 2929 (pool->pool_buf_size == size)) { 2930 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 2931 "fca_ub_alloc failed: Unsolicited buffer pool " 2932 "for %s of size 0x%x bytes already exists.", 2933 err, size); 2934 2935 result = FC_FAILURE; 2936 goto fail; 2937 } 2938 2939 pool = pool->pool_next; 2940 } 2941 2942 mutex_exit(&EMLXS_UB_LOCK); 2943 2944 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t), 2945 KM_SLEEP); 2946 2947 new_pool->pool_next = NULL; 2948 new_pool->pool_type = type; 2949 new_pool->pool_buf_size = size; 2950 new_pool->pool_nentries = *count; 2951 new_pool->pool_available = new_pool->pool_nentries; 2952 new_pool->pool_free = free; 2953 new_pool->pool_free_resv = free_resv; 2954 new_pool->fc_ubufs = 2955 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP); 2956 2957 new_pool->pool_first_token = port->ub_count; 2958 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries; 2959 2960 for (i = 0; i < new_pool->pool_nentries; i++) { 2961 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i]; 2962 ubp->ub_port_handle = port->ulp_handle; 2963 ubp->ub_token = (uint64_t)((unsigned long)ubp); 2964 ubp->ub_bufsize = size; 2965 ubp->ub_class = FC_TRAN_CLASS3; 2966 ubp->ub_port_private = NULL; 2967 ubp->ub_fca_private = 2968 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t), 2969 KM_SLEEP); 2970 2971 /* 2972 * Initialize emlxs_ub_priv_t 2973 */ 2974 ub_priv = ubp->ub_fca_private; 2975 ub_priv->ubp = ubp; 2976 ub_priv->port = port; 2977 ub_priv->flags = EMLXS_UB_FREE; 2978 ub_priv->available = 1; 2979 ub_priv->pool = new_pool; 2980 ub_priv->time = 0; 2981 ub_priv->timeout = 0; 2982 ub_priv->token = port->ub_count; 2983 ub_priv->cmd = 0; 2984 2985 /* Allocate the actual buffer */ 2986 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP); 2987 2988 2989 tokens[i] = (uint64_t)((unsigned long)ubp); 2990 port->ub_count++; 2991 } 2992 2993 mutex_enter(&EMLXS_UB_LOCK); 2994 2995 /* Add the pool to the top of the pool list */ 2996 new_pool->pool_prev = NULL; 2997 new_pool->pool_next = port->ub_pool; 2998 2999 if (port->ub_pool) { 3000 port->ub_pool->pool_prev = new_pool; 3001 } 3002 port->ub_pool = new_pool; 3003 3004 /* Set the post counts */ 3005 if (type == FC_TYPE_IS8802_SNAP) { 3006 MAILBOXQ *mbox; 3007 3008 port->ub_post[hba->channel_ip] += new_pool->pool_nentries; 3009 3010 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 3011 MEM_MBOX))) { 3012 emlxs_mb_config_farp(hba, mbox); 3013 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 3014 mbox, MBX_NOWAIT, 0); 3015 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { 3016 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox); 3017 } 3018 } 3019 port->flag |= EMLXS_PORT_IP_UP; 3020 } else if (type == FC_TYPE_EXTENDED_LS) { 3021 port->ub_post[hba->channel_els] += new_pool->pool_nentries; 3022 } else if (type == FC_TYPE_FC_SERVICES) { 3023 port->ub_post[hba->channel_ct] += new_pool->pool_nentries; 3024 } 3025 3026 mutex_exit(&EMLXS_UB_LOCK); 3027 3028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 3029 "%d unsolicited buffers allocated for %s of size 0x%x bytes.", 3030 *count, err, size); 3031 3032 return (FC_SUCCESS); 3033 3034 fail: 3035 3036 /* Clean the pool */ 3037 for (i = 0; tokens[i] != 0; i++) { 3038 /* Get the buffer object */ 3039 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3040 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3041 3042 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3043 "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x " 3044 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type); 3045 3046 /* Free the actual buffer */ 3047 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3048 3049 /* Free the private area of the buffer object */ 3050 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3051 3052 tokens[i] = 0; 3053 port->ub_count--; 3054 } 3055 3056 if (new_pool) { 3057 /* Free the array of buffer objects in the pool */ 3058 kmem_free((caddr_t)new_pool->fc_ubufs, 3059 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries)); 3060 3061 /* Free the pool object */ 3062 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t)); 3063 } 3064 3065 mutex_exit(&EMLXS_UB_LOCK); 3066 3067 return (result); 3068 3069 } /* emlxs_fca_ub_alloc() */ 3070 3071 3072 static void 3073 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp) 3074 { 3075 emlxs_hba_t *hba = HBA; 3076 emlxs_ub_priv_t *ub_priv; 3077 fc_packet_t *pkt; 3078 ELS_PKT *els; 3079 uint32_t sid; 3080 3081 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3082 3083 if (hba->state <= FC_LINK_DOWN) { 3084 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id); 3085 return; 3086 } 3087 3088 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) + 3089 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) { 3090 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id); 3091 return; 3092 } 3093 3094 sid = LE_SWAP24_LO(ubp->ub_frame.s_id); 3095 3096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg, 3097 "%s dropped: sid=%x. Rejecting.", 3098 emlxs_elscmd_xlate(ub_priv->cmd), sid); 3099 3100 pkt->pkt_tran_type = FC_PKT_OUTBOUND; 3101 pkt->pkt_timeout = (2 * hba->fc_ratov); 3102 3103 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) { 3104 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3; 3105 pkt->pkt_tran_flags |= FC_TRAN_CLASS2; 3106 } 3107 3108 /* Build the fc header */ 3109 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id; 3110 pkt->pkt_cmd_fhdr.r_ctl = 3111 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL; 3112 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did); 3113 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS; 3114 pkt->pkt_cmd_fhdr.f_ctl = 3115 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ; 3116 pkt->pkt_cmd_fhdr.seq_id = 0; 3117 pkt->pkt_cmd_fhdr.df_ctl = 0; 3118 pkt->pkt_cmd_fhdr.seq_cnt = 0; 3119 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff; 3120 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id; 3121 pkt->pkt_cmd_fhdr.ro = 0; 3122 3123 /* Build the command */ 3124 els = (ELS_PKT *) pkt->pkt_cmd; 3125 els->elsCode = 0x01; 3126 els->un.lsRjt.un.b.lsRjtRsvd0 = 0; 3127 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; 3128 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; 3129 els->un.lsRjt.un.b.vendorUnique = 0x02; 3130 3131 /* Send the pkt later in another thread */ 3132 (void) emlxs_pkt_send(pkt, 0); 3133 3134 return; 3135 3136 } /* emlxs_ub_els_reject() */ 3137 3138 extern int 3139 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count, 3140 uint64_t tokens[]) 3141 { 3142 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3143 emlxs_hba_t *hba = HBA; 3144 fc_unsol_buf_t *ubp; 3145 emlxs_ub_priv_t *ub_priv; 3146 uint32_t i; 3147 uint32_t time; 3148 emlxs_unsol_buf_t *pool; 3149 3150 if (count == 0) { 3151 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3152 "fca_ub_release: Nothing to do. count=%d", count); 3153 3154 return (FC_SUCCESS); 3155 } 3156 3157 if (!(port->flag & EMLXS_INI_BOUND)) { 3158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3159 "fca_ub_release failed: Port not bound. count=%d " 3160 "token[0]=%p", 3161 count, tokens[0]); 3162 3163 return (FC_UNBOUND); 3164 } 3165 3166 mutex_enter(&EMLXS_UB_LOCK); 3167 3168 if (!port->ub_pool) { 3169 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3170 "fca_ub_release failed: No pools! count=%d token[0]=%p", 3171 count, tokens[0]); 3172 3173 mutex_exit(&EMLXS_UB_LOCK); 3174 return (FC_UB_BADTOKEN); 3175 } 3176 3177 for (i = 0; i < count; i++) { 3178 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3179 3180 if (!ubp) { 3181 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3182 "fca_ub_release failed: count=%d tokens[%d]=0", 3183 count, i); 3184 3185 mutex_exit(&EMLXS_UB_LOCK); 3186 return (FC_UB_BADTOKEN); 3187 } 3188 3189 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3190 3191 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3192 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3193 "fca_ub_release failed: Dead buffer found. ubp=%p", 3194 ubp); 3195 3196 mutex_exit(&EMLXS_UB_LOCK); 3197 return (FC_UB_BADTOKEN); 3198 } 3199 3200 if (ub_priv->flags == EMLXS_UB_FREE) { 3201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3202 "fca_ub_release: Buffer already free! ubp=%p " 3203 "token=%x", 3204 ubp, ub_priv->token); 3205 3206 continue; 3207 } 3208 3209 /* Check for dropped els buffer */ 3210 /* ULP will do this sometimes without sending a reply */ 3211 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) && 3212 !(ub_priv->flags & EMLXS_UB_REPLY)) { 3213 emlxs_ub_els_reject(port, ubp); 3214 } 3215 3216 /* Mark the buffer free */ 3217 ub_priv->flags = EMLXS_UB_FREE; 3218 bzero(ubp->ub_buffer, ubp->ub_bufsize); 3219 3220 time = hba->timer_tics - ub_priv->time; 3221 ub_priv->time = 0; 3222 ub_priv->timeout = 0; 3223 3224 pool = ub_priv->pool; 3225 3226 if (ub_priv->flags & EMLXS_UB_RESV) { 3227 pool->pool_free_resv++; 3228 } else { 3229 pool->pool_free++; 3230 } 3231 3232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3233 "fca_ub_release: ubp=%p token=%x time=%d av=%d " 3234 "(%d,%d,%d,%d)", 3235 ubp, ub_priv->token, time, ub_priv->available, 3236 pool->pool_nentries, pool->pool_available, 3237 pool->pool_free, pool->pool_free_resv); 3238 3239 /* Check if pool can be destroyed now */ 3240 if ((pool->pool_available == 0) && 3241 (pool->pool_free + pool->pool_free_resv == 3242 pool->pool_nentries)) { 3243 emlxs_ub_destroy(port, pool); 3244 } 3245 } 3246 3247 mutex_exit(&EMLXS_UB_LOCK); 3248 3249 return (FC_SUCCESS); 3250 3251 } /* emlxs_fca_ub_release() */ 3252 3253 3254 static int 3255 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[]) 3256 { 3257 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3258 emlxs_unsol_buf_t *pool; 3259 fc_unsol_buf_t *ubp; 3260 emlxs_ub_priv_t *ub_priv; 3261 uint32_t i; 3262 3263 if (!(port->flag & EMLXS_INI_ENABLED)) { 3264 return (FC_SUCCESS); 3265 } 3266 3267 if (count == 0) { 3268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3269 "fca_ub_free: Nothing to do. count=%d token[0]=%p", count, 3270 tokens[0]); 3271 3272 return (FC_SUCCESS); 3273 } 3274 3275 if (!(port->flag & EMLXS_INI_BOUND)) { 3276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3277 "fca_ub_free: Port not bound. count=%d token[0]=%p", count, 3278 tokens[0]); 3279 3280 return (FC_SUCCESS); 3281 } 3282 3283 mutex_enter(&EMLXS_UB_LOCK); 3284 3285 if (!port->ub_pool) { 3286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3287 "fca_ub_free failed: No pools! count=%d token[0]=%p", count, 3288 tokens[0]); 3289 3290 mutex_exit(&EMLXS_UB_LOCK); 3291 return (FC_UB_BADTOKEN); 3292 } 3293 3294 /* Process buffer list */ 3295 for (i = 0; i < count; i++) { 3296 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]); 3297 3298 if (!ubp) { 3299 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3300 "fca_ub_free failed: count=%d tokens[%d]=0", count, 3301 i); 3302 3303 mutex_exit(&EMLXS_UB_LOCK); 3304 return (FC_UB_BADTOKEN); 3305 } 3306 3307 /* Mark buffer unavailable */ 3308 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private; 3309 3310 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) { 3311 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3312 "fca_ub_free failed: Dead buffer found. ubp=%p", 3313 ubp); 3314 3315 mutex_exit(&EMLXS_UB_LOCK); 3316 return (FC_UB_BADTOKEN); 3317 } 3318 3319 ub_priv->available = 0; 3320 3321 /* Mark one less buffer available in the parent pool */ 3322 pool = ub_priv->pool; 3323 3324 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 3325 "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 3326 ub_priv->token, pool->pool_nentries, 3327 pool->pool_available - 1, pool->pool_free, 3328 pool->pool_free_resv); 3329 3330 if (pool->pool_available) { 3331 pool->pool_available--; 3332 3333 /* Check if pool can be destroyed */ 3334 if ((pool->pool_available == 0) && 3335 (pool->pool_free + pool->pool_free_resv == 3336 pool->pool_nentries)) { 3337 emlxs_ub_destroy(port, pool); 3338 } 3339 } 3340 } 3341 3342 mutex_exit(&EMLXS_UB_LOCK); 3343 3344 return (FC_SUCCESS); 3345 3346 } /* emlxs_fca_ub_free() */ 3347 3348 3349 /* EMLXS_UB_LOCK must be held when calling this routine */ 3350 extern void 3351 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool) 3352 { 3353 emlxs_hba_t *hba = HBA; 3354 emlxs_unsol_buf_t *next; 3355 emlxs_unsol_buf_t *prev; 3356 fc_unsol_buf_t *ubp; 3357 uint32_t i; 3358 3359 /* Remove the pool object from the pool list */ 3360 next = pool->pool_next; 3361 prev = pool->pool_prev; 3362 3363 if (port->ub_pool == pool) { 3364 port->ub_pool = next; 3365 } 3366 3367 if (prev) { 3368 prev->pool_next = next; 3369 } 3370 3371 if (next) { 3372 next->pool_prev = prev; 3373 } 3374 3375 pool->pool_prev = NULL; 3376 pool->pool_next = NULL; 3377 3378 /* Clear the post counts */ 3379 switch (pool->pool_type) { 3380 case FC_TYPE_IS8802_SNAP: 3381 port->ub_post[hba->channel_ip] -= pool->pool_nentries; 3382 break; 3383 3384 case FC_TYPE_EXTENDED_LS: 3385 port->ub_post[hba->channel_els] -= pool->pool_nentries; 3386 break; 3387 3388 case FC_TYPE_FC_SERVICES: 3389 port->ub_post[hba->channel_ct] -= pool->pool_nentries; 3390 break; 3391 } 3392 3393 /* Now free the pool memory */ 3394 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3395 "ub_destroy: pool=%p type=%d size=%d count=%d", pool, 3396 pool->pool_type, pool->pool_buf_size, pool->pool_nentries); 3397 3398 /* Process the array of buffer objects in the pool */ 3399 for (i = 0; i < pool->pool_nentries; i++) { 3400 /* Get the buffer object */ 3401 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 3402 3403 /* Free the memory the buffer object represents */ 3404 kmem_free(ubp->ub_buffer, ubp->ub_bufsize); 3405 3406 /* Free the private area of the buffer object */ 3407 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t)); 3408 } 3409 3410 /* Free the array of buffer objects in the pool */ 3411 kmem_free((caddr_t)pool->fc_ubufs, 3412 (sizeof (fc_unsol_buf_t)*pool->pool_nentries)); 3413 3414 /* Free the pool object */ 3415 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t)); 3416 3417 return; 3418 3419 } /* emlxs_ub_destroy() */ 3420 3421 3422 /*ARGSUSED*/ 3423 extern int 3424 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep) 3425 { 3426 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 3427 emlxs_hba_t *hba = HBA; 3428 emlxs_config_t *cfg = &CFG; 3429 3430 emlxs_buf_t *sbp; 3431 NODELIST *nlp; 3432 NODELIST *prev_nlp; 3433 uint8_t channelno; 3434 CHANNEL *cp; 3435 clock_t pkt_timeout; 3436 clock_t timer; 3437 clock_t time; 3438 int32_t pkt_ret; 3439 IOCBQ *iocbq; 3440 IOCBQ *next; 3441 IOCBQ *prev; 3442 uint32_t found; 3443 uint32_t pass = 0; 3444 3445 sbp = (emlxs_buf_t *)pkt->pkt_fca_private; 3446 iocbq = &sbp->iocbq; 3447 nlp = (NODELIST *)sbp->node; 3448 cp = (CHANNEL *)sbp->channel; 3449 channelno = (cp) ? cp->channelno : 0; 3450 3451 if (!(port->flag & EMLXS_INI_BOUND)) { 3452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3453 "Port not bound."); 3454 return (FC_UNBOUND); 3455 } 3456 3457 if (!(hba->flag & FC_ONLINE_MODE)) { 3458 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3459 "Adapter offline."); 3460 return (FC_OFFLINE); 3461 } 3462 3463 /* ULP requires the aborted pkt to be completed */ 3464 /* back to ULP before returning from this call. */ 3465 /* SUN knows of problems with this call so they suggested that we */ 3466 /* always return a FC_FAILURE for this call, until it is worked out. */ 3467 3468 /* Check if pkt is no good */ 3469 if (!(sbp->pkt_flags & PACKET_VALID) || 3470 (sbp->pkt_flags & PACKET_ULP_OWNED)) { 3471 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3472 "Bad sbp. flags=%x", sbp->pkt_flags); 3473 return (FC_FAILURE); 3474 } 3475 3476 /* Tag this now */ 3477 /* This will prevent any thread except ours from completing it */ 3478 mutex_enter(&sbp->mtx); 3479 3480 /* Check again if we still own this */ 3481 if (!(sbp->pkt_flags & PACKET_VALID) || 3482 (sbp->pkt_flags & PACKET_ULP_OWNED)) { 3483 mutex_exit(&sbp->mtx); 3484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3485 "Bad sbp. flags=%x", sbp->pkt_flags); 3486 return (FC_FAILURE); 3487 } 3488 3489 /* Check if pkt is a real polled command */ 3490 if (!(sbp->pkt_flags & PACKET_IN_ABORT) && 3491 (sbp->pkt_flags & PACKET_POLLED)) { 3492 mutex_exit(&sbp->mtx); 3493 3494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3495 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp, 3496 sbp->pkt_flags); 3497 return (FC_FAILURE); 3498 } 3499 3500 sbp->pkt_flags |= PACKET_POLLED; 3501 sbp->pkt_flags |= PACKET_IN_ABORT; 3502 3503 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH | 3504 PACKET_IN_TIMEOUT)) { 3505 mutex_exit(&sbp->mtx); 3506 3507 /* Do nothing, pkt already on its way out */ 3508 goto done; 3509 } 3510 3511 mutex_exit(&sbp->mtx); 3512 3513 begin: 3514 pass++; 3515 3516 mutex_enter(&EMLXS_TX_CHANNEL_LOCK); 3517 3518 if (sbp->pkt_flags & PACKET_IN_TXQ) { 3519 /* Find it on the queue */ 3520 found = 0; 3521 if (iocbq->flag & IOCB_PRIORITY) { 3522 /* Search the priority queue */ 3523 prev = NULL; 3524 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first; 3525 3526 while (next) { 3527 if (next == iocbq) { 3528 /* Remove it */ 3529 if (prev) { 3530 prev->next = iocbq->next; 3531 } 3532 3533 if (nlp->nlp_ptx[channelno].q_last == 3534 (void *)iocbq) { 3535 nlp->nlp_ptx[channelno].q_last = 3536 (void *)prev; 3537 } 3538 3539 if (nlp->nlp_ptx[channelno].q_first == 3540 (void *)iocbq) { 3541 nlp->nlp_ptx[channelno]. 3542 q_first = 3543 (void *)iocbq->next; 3544 } 3545 3546 nlp->nlp_ptx[channelno].q_cnt--; 3547 iocbq->next = NULL; 3548 found = 1; 3549 break; 3550 } 3551 3552 prev = next; 3553 next = next->next; 3554 } 3555 } else { 3556 /* Search the normal queue */ 3557 prev = NULL; 3558 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first; 3559 3560 while (next) { 3561 if (next == iocbq) { 3562 /* Remove it */ 3563 if (prev) { 3564 prev->next = iocbq->next; 3565 } 3566 3567 if (nlp->nlp_tx[channelno].q_last == 3568 (void *)iocbq) { 3569 nlp->nlp_tx[channelno].q_last = 3570 (void *)prev; 3571 } 3572 3573 if (nlp->nlp_tx[channelno].q_first == 3574 (void *)iocbq) { 3575 nlp->nlp_tx[channelno].q_first = 3576 (void *)iocbq->next; 3577 } 3578 3579 nlp->nlp_tx[channelno].q_cnt--; 3580 iocbq->next = NULL; 3581 found = 1; 3582 break; 3583 } 3584 3585 prev = next; 3586 next = (IOCBQ *) next->next; 3587 } 3588 } 3589 3590 if (!found) { 3591 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3592 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg, 3593 "I/O not found in driver. sbp=%p flags=%x", sbp, 3594 sbp->pkt_flags); 3595 goto done; 3596 } 3597 3598 /* Check if node still needs servicing */ 3599 if ((nlp->nlp_ptx[channelno].q_first) || 3600 (nlp->nlp_tx[channelno].q_first && 3601 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) { 3602 3603 /* 3604 * If this is the base node, 3605 * then don't shift the pointers 3606 */ 3607 /* We want to drain the base node before moving on */ 3608 if (!nlp->nlp_base) { 3609 /* Just shift channel queue */ 3610 /* pointers to next node */ 3611 cp->nodeq.q_last = (void *) nlp; 3612 cp->nodeq.q_first = nlp->nlp_next[channelno]; 3613 } 3614 } else { 3615 /* Remove node from channel queue */ 3616 3617 /* If this is the only node on list */ 3618 if (cp->nodeq.q_first == (void *)nlp && 3619 cp->nodeq.q_last == (void *)nlp) { 3620 cp->nodeq.q_last = NULL; 3621 cp->nodeq.q_first = NULL; 3622 cp->nodeq.q_cnt = 0; 3623 } else if (cp->nodeq.q_first == (void *)nlp) { 3624 cp->nodeq.q_first = nlp->nlp_next[channelno]; 3625 ((NODELIST *) cp->nodeq.q_last)-> 3626 nlp_next[channelno] = cp->nodeq.q_first; 3627 cp->nodeq.q_cnt--; 3628 } else { 3629 /* 3630 * This is a little more difficult find the 3631 * previous node in the circular channel queue 3632 */ 3633 prev_nlp = nlp; 3634 while (prev_nlp->nlp_next[channelno] != nlp) { 3635 prev_nlp = prev_nlp-> 3636 nlp_next[channelno]; 3637 } 3638 3639 prev_nlp->nlp_next[channelno] = 3640 nlp->nlp_next[channelno]; 3641 3642 if (cp->nodeq.q_last == (void *)nlp) { 3643 cp->nodeq.q_last = (void *)prev_nlp; 3644 } 3645 cp->nodeq.q_cnt--; 3646 3647 } 3648 3649 /* Clear node */ 3650 nlp->nlp_next[channelno] = NULL; 3651 } 3652 3653 /* Free the ULPIOTAG and the bmp */ 3654 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 3655 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1); 3656 } else { 3657 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1); 3658 } 3659 3660 3661 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3662 3663 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 3664 IOERR_ABORT_REQUESTED, 1); 3665 3666 goto done; 3667 } 3668 3669 mutex_exit(&EMLXS_TX_CHANNEL_LOCK); 3670 3671 3672 /* Check the chip queue */ 3673 mutex_enter(&EMLXS_FCTAB_LOCK); 3674 3675 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) && 3676 !(sbp->pkt_flags & PACKET_XRI_CLOSED) && 3677 (sbp == hba->fc_table[sbp->iotag])) { 3678 3679 /* Create the abort IOCB */ 3680 if (hba->state >= FC_LINK_UP) { 3681 iocbq = 3682 emlxs_create_abort_xri_cn(port, sbp->node, 3683 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS); 3684 3685 mutex_enter(&sbp->mtx); 3686 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3687 sbp->ticks = 3688 hba->timer_tics + (4 * hba->fc_ratov) + 10; 3689 sbp->abort_attempts++; 3690 mutex_exit(&sbp->mtx); 3691 } else { 3692 iocbq = 3693 emlxs_create_close_xri_cn(port, sbp->node, 3694 sbp->iotag, cp); 3695 3696 mutex_enter(&sbp->mtx); 3697 sbp->pkt_flags |= PACKET_XRI_CLOSED; 3698 sbp->ticks = hba->timer_tics + 30; 3699 sbp->abort_attempts++; 3700 mutex_exit(&sbp->mtx); 3701 } 3702 3703 mutex_exit(&EMLXS_FCTAB_LOCK); 3704 3705 /* Send this iocbq */ 3706 if (iocbq) { 3707 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 3708 iocbq = NULL; 3709 } 3710 3711 goto done; 3712 } 3713 3714 mutex_exit(&EMLXS_FCTAB_LOCK); 3715 3716 /* Pkt was not on any queues */ 3717 3718 /* Check again if we still own this */ 3719 if (!(sbp->pkt_flags & PACKET_VALID) || 3720 (sbp->pkt_flags & 3721 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION | 3722 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3723 goto done; 3724 } 3725 3726 if (!sleep) { 3727 return (FC_FAILURE); 3728 } 3729 3730 /* Apparently the pkt was not found. Let's delay and try again */ 3731 if (pass < 5) { 3732 delay(drv_usectohz(5000000)); /* 5 seconds */ 3733 3734 /* Check again if we still own this */ 3735 if (!(sbp->pkt_flags & PACKET_VALID) || 3736 (sbp->pkt_flags & 3737 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION | 3738 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) { 3739 goto done; 3740 } 3741 3742 goto begin; 3743 } 3744 3745 force_it: 3746 3747 /* Force the completion now */ 3748 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3749 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag); 3750 3751 /* Now complete it */ 3752 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED, 3753 1); 3754 3755 done: 3756 3757 /* Now wait for the pkt to complete */ 3758 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3759 /* Set thread timeout */ 3760 pkt_timeout = emlxs_timeout(hba, 30); 3761 3762 /* Check for panic situation */ 3763 if (ddi_in_panic()) { 3764 3765 /* 3766 * In panic situations there will be one thread with no 3767 * interrrupts (hard or soft) and no timers 3768 */ 3769 3770 /* 3771 * We must manually poll everything in this thread 3772 * to keep the driver going. 3773 */ 3774 3775 /* Keep polling the chip until our IO is completed */ 3776 (void) drv_getparm(LBOLT, &time); 3777 timer = time + drv_usectohz(1000000); 3778 while ((time < pkt_timeout) && 3779 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3780 EMLXS_SLI_POLL_INTR(hba); 3781 (void) drv_getparm(LBOLT, &time); 3782 3783 /* Trigger timer checks periodically */ 3784 if (time >= timer) { 3785 emlxs_timer_checks(hba); 3786 timer = time + drv_usectohz(1000000); 3787 } 3788 } 3789 } else { 3790 /* Wait for IO completion or pkt_timeout */ 3791 mutex_enter(&EMLXS_PKT_LOCK); 3792 pkt_ret = 0; 3793 while ((pkt_ret != -1) && 3794 !(sbp->pkt_flags & PACKET_COMPLETED)) { 3795 pkt_ret = 3796 cv_timedwait(&EMLXS_PKT_CV, 3797 &EMLXS_PKT_LOCK, pkt_timeout); 3798 } 3799 mutex_exit(&EMLXS_PKT_LOCK); 3800 } 3801 3802 /* Check if pkt_timeout occured. This is not good. */ 3803 /* Something happened to our IO. */ 3804 if (!(sbp->pkt_flags & PACKET_COMPLETED)) { 3805 /* Force the completion now */ 3806 goto force_it; 3807 } 3808 } 3809 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 3810 emlxs_unswap_pkt(sbp); 3811 #endif /* EMLXS_MODREV2X */ 3812 3813 /* Check again if we still own this */ 3814 if ((sbp->pkt_flags & PACKET_VALID) && 3815 !(sbp->pkt_flags & PACKET_ULP_OWNED)) { 3816 mutex_enter(&sbp->mtx); 3817 if ((sbp->pkt_flags & PACKET_VALID) && 3818 !(sbp->pkt_flags & PACKET_ULP_OWNED)) { 3819 sbp->pkt_flags |= PACKET_ULP_OWNED; 3820 } 3821 mutex_exit(&sbp->mtx); 3822 } 3823 3824 #ifdef ULP_PATCH5 3825 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) { 3826 return (FC_FAILURE); 3827 } 3828 #endif /* ULP_PATCH5 */ 3829 3830 return (FC_SUCCESS); 3831 3832 } /* emlxs_fca_pkt_abort() */ 3833 3834 3835 static void 3836 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip) 3837 { 3838 emlxs_port_t *port = &PPORT; 3839 fc_packet_t *pkt; 3840 emlxs_buf_t *sbp; 3841 uint32_t i; 3842 uint32_t flg; 3843 uint32_t rc; 3844 uint32_t txcnt; 3845 uint32_t chipcnt; 3846 3847 txcnt = 0; 3848 chipcnt = 0; 3849 3850 mutex_enter(&EMLXS_FCTAB_LOCK); 3851 for (i = 0; i < hba->max_iotag; i++) { 3852 sbp = hba->fc_table[i]; 3853 if (sbp == NULL || sbp == STALE_PACKET) { 3854 continue; 3855 } 3856 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ); 3857 pkt = PRIV2PKT(sbp); 3858 mutex_exit(&EMLXS_FCTAB_LOCK); 3859 rc = emlxs_fca_pkt_abort(port, pkt, 0); 3860 if (rc == FC_SUCCESS) { 3861 if (flg) { 3862 chipcnt++; 3863 } else { 3864 txcnt++; 3865 } 3866 } 3867 mutex_enter(&EMLXS_FCTAB_LOCK); 3868 } 3869 mutex_exit(&EMLXS_FCTAB_LOCK); 3870 *tx = txcnt; 3871 *chip = chipcnt; 3872 } /* emlxs_abort_all() */ 3873 3874 3875 extern int32_t 3876 emlxs_reset(emlxs_port_t *port, uint32_t cmd) 3877 { 3878 emlxs_hba_t *hba = HBA; 3879 int rval; 3880 int i = 0; 3881 int ret; 3882 clock_t timeout; 3883 3884 switch (cmd) { 3885 case FC_FCA_LINK_RESET: 3886 3887 mutex_enter(&EMLXS_PORT_LOCK); 3888 if (!(hba->flag & FC_ONLINE_MODE) || 3889 (hba->state <= FC_LINK_DOWN)) { 3890 mutex_exit(&EMLXS_PORT_LOCK); 3891 return (FC_SUCCESS); 3892 } 3893 3894 if (hba->reset_state & 3895 (FC_LINK_RESET_INP | FC_PORT_RESET_INP)) { 3896 mutex_exit(&EMLXS_PORT_LOCK); 3897 return (FC_FAILURE); 3898 } 3899 3900 hba->reset_state |= FC_LINK_RESET_INP; 3901 hba->reset_request |= FC_LINK_RESET; 3902 mutex_exit(&EMLXS_PORT_LOCK); 3903 3904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3905 "Resetting Link."); 3906 3907 mutex_enter(&EMLXS_LINKUP_LOCK); 3908 hba->linkup_wait_flag = TRUE; 3909 mutex_exit(&EMLXS_LINKUP_LOCK); 3910 3911 if (emlxs_reset_link(hba, 1, 1)) { 3912 mutex_enter(&EMLXS_LINKUP_LOCK); 3913 hba->linkup_wait_flag = FALSE; 3914 mutex_exit(&EMLXS_LINKUP_LOCK); 3915 3916 mutex_enter(&EMLXS_PORT_LOCK); 3917 hba->reset_state &= ~FC_LINK_RESET_INP; 3918 hba->reset_request &= ~FC_LINK_RESET; 3919 mutex_exit(&EMLXS_PORT_LOCK); 3920 3921 return (FC_FAILURE); 3922 } 3923 3924 mutex_enter(&EMLXS_LINKUP_LOCK); 3925 timeout = emlxs_timeout(hba, 60); 3926 ret = 0; 3927 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) { 3928 ret = 3929 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK, 3930 timeout); 3931 } 3932 3933 hba->linkup_wait_flag = FALSE; 3934 mutex_exit(&EMLXS_LINKUP_LOCK); 3935 3936 mutex_enter(&EMLXS_PORT_LOCK); 3937 hba->reset_state &= ~FC_LINK_RESET_INP; 3938 hba->reset_request &= ~FC_LINK_RESET; 3939 mutex_exit(&EMLXS_PORT_LOCK); 3940 3941 if (ret == -1) { 3942 return (FC_FAILURE); 3943 } 3944 3945 return (FC_SUCCESS); 3946 3947 case FC_FCA_CORE: 3948 #ifdef DUMP_SUPPORT 3949 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3950 "Dumping Core."); 3951 3952 /* Schedule a USER dump */ 3953 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 3954 3955 /* Wait for dump to complete */ 3956 emlxs_dump_wait(hba); 3957 3958 return (FC_SUCCESS); 3959 #endif /* DUMP_SUPPORT */ 3960 3961 case FC_FCA_RESET: 3962 case FC_FCA_RESET_CORE: 3963 3964 mutex_enter(&EMLXS_PORT_LOCK); 3965 if (hba->reset_state & FC_PORT_RESET_INP) { 3966 mutex_exit(&EMLXS_PORT_LOCK); 3967 return (FC_FAILURE); 3968 } 3969 3970 hba->reset_state |= FC_PORT_RESET_INP; 3971 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET); 3972 3973 /* wait for any pending link resets to complete */ 3974 while ((hba->reset_state & FC_LINK_RESET_INP) && 3975 (i++ < 1000)) { 3976 mutex_exit(&EMLXS_PORT_LOCK); 3977 delay(drv_usectohz(1000)); 3978 mutex_enter(&EMLXS_PORT_LOCK); 3979 } 3980 3981 if (hba->reset_state & FC_LINK_RESET_INP) { 3982 hba->reset_state &= ~FC_PORT_RESET_INP; 3983 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET); 3984 mutex_exit(&EMLXS_PORT_LOCK); 3985 return (FC_FAILURE); 3986 } 3987 mutex_exit(&EMLXS_PORT_LOCK); 3988 3989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3990 "Resetting Adapter."); 3991 3992 rval = FC_SUCCESS; 3993 3994 if (emlxs_offline(hba, 0) == 0) { 3995 (void) emlxs_online(hba); 3996 } else { 3997 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 3998 "Adapter reset failed. Device busy."); 3999 4000 rval = FC_DEVICE_BUSY; 4001 } 4002 4003 mutex_enter(&EMLXS_PORT_LOCK); 4004 hba->reset_state &= ~FC_PORT_RESET_INP; 4005 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET); 4006 mutex_exit(&EMLXS_PORT_LOCK); 4007 4008 return (rval); 4009 4010 case EMLXS_DFC_RESET_ALL: 4011 case EMLXS_DFC_RESET_ALL_FORCE_DUMP: 4012 4013 mutex_enter(&EMLXS_PORT_LOCK); 4014 if (hba->reset_state & FC_PORT_RESET_INP) { 4015 mutex_exit(&EMLXS_PORT_LOCK); 4016 return (FC_FAILURE); 4017 } 4018 4019 hba->reset_state |= FC_PORT_RESET_INP; 4020 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET); 4021 4022 /* wait for any pending link resets to complete */ 4023 while ((hba->reset_state & FC_LINK_RESET_INP) && 4024 (i++ < 1000)) { 4025 mutex_exit(&EMLXS_PORT_LOCK); 4026 delay(drv_usectohz(1000)); 4027 mutex_enter(&EMLXS_PORT_LOCK); 4028 } 4029 4030 if (hba->reset_state & FC_LINK_RESET_INP) { 4031 hba->reset_state &= ~FC_PORT_RESET_INP; 4032 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET); 4033 mutex_exit(&EMLXS_PORT_LOCK); 4034 return (FC_FAILURE); 4035 } 4036 mutex_exit(&EMLXS_PORT_LOCK); 4037 4038 rval = FC_SUCCESS; 4039 4040 if (cmd == EMLXS_DFC_RESET_ALL) { 4041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4042 "Resetting Adapter (All Firmware Reset)."); 4043 4044 emlxs_sli4_hba_reset_all(hba, 0); 4045 } else { 4046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4047 "Resetting Adapter " 4048 "(All Firmware Reset, Force Dump)."); 4049 4050 emlxs_sli4_hba_reset_all(hba, 1); 4051 } 4052 4053 mutex_enter(&EMLXS_PORT_LOCK); 4054 hba->reset_state &= ~FC_PORT_RESET_INP; 4055 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET); 4056 mutex_exit(&EMLXS_PORT_LOCK); 4057 4058 /* Wait for the timer thread to detect the error condition */ 4059 delay(drv_usectohz(1000000)); 4060 4061 /* Wait for the HBA to re-initialize */ 4062 i = 0; 4063 mutex_enter(&EMLXS_PORT_LOCK); 4064 while (!(hba->flag & FC_ONLINE_MODE) && (i++ < 30)) { 4065 mutex_exit(&EMLXS_PORT_LOCK); 4066 delay(drv_usectohz(1000000)); 4067 mutex_enter(&EMLXS_PORT_LOCK); 4068 } 4069 4070 if (!(hba->flag & FC_ONLINE_MODE)) { 4071 rval = FC_FAILURE; 4072 } 4073 4074 mutex_exit(&EMLXS_PORT_LOCK); 4075 4076 return (rval); 4077 4078 default: 4079 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4080 "reset: Unknown command. cmd=%x", cmd); 4081 4082 break; 4083 } 4084 4085 return (FC_FAILURE); 4086 4087 } /* emlxs_reset() */ 4088 4089 4090 extern int32_t 4091 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd) 4092 { 4093 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 4094 emlxs_hba_t *hba = HBA; 4095 int32_t rval; 4096 4097 if (port->mode != MODE_INITIATOR) { 4098 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4099 "fca_reset failed. Port is not in initiator mode."); 4100 4101 return (FC_FAILURE); 4102 } 4103 4104 if (!(port->flag & EMLXS_INI_BOUND)) { 4105 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4106 "fca_reset: Port not bound."); 4107 4108 return (FC_UNBOUND); 4109 } 4110 4111 switch (cmd) { 4112 case FC_FCA_LINK_RESET: 4113 if (hba->fw_flag & FW_UPDATE_NEEDED) { 4114 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4115 "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET"); 4116 cmd = FC_FCA_RESET; 4117 } else { 4118 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4119 "fca_reset: FC_FCA_LINK_RESET"); 4120 } 4121 break; 4122 4123 case FC_FCA_CORE: 4124 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4125 "fca_reset: FC_FCA_CORE"); 4126 break; 4127 4128 case FC_FCA_RESET: 4129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4130 "fca_reset: FC_FCA_RESET"); 4131 break; 4132 4133 case FC_FCA_RESET_CORE: 4134 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4135 "fca_reset: FC_FCA_RESET_CORE"); 4136 break; 4137 4138 default: 4139 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4140 "fca_reset: Unknown command. cmd=%x", cmd); 4141 return (FC_FAILURE); 4142 } 4143 4144 if (hba->fw_flag & FW_UPDATE_NEEDED) { 4145 hba->fw_flag |= FW_UPDATE_KERNEL; 4146 } 4147 4148 rval = emlxs_reset(port, cmd); 4149 4150 return (rval); 4151 4152 } /* emlxs_fca_reset() */ 4153 4154 4155 extern int 4156 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm) 4157 { 4158 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle; 4159 emlxs_hba_t *hba = HBA; 4160 int32_t ret; 4161 emlxs_vpd_t *vpd = &VPD; 4162 4163 ret = FC_SUCCESS; 4164 4165 #ifdef IDLE_TIMER 4166 emlxs_pm_busy_component(hba); 4167 #endif /* IDLE_TIMER */ 4168 4169 switch (pm->pm_cmd_code) { 4170 4171 case FC_PORT_GET_FW_REV: 4172 { 4173 char buffer[128]; 4174 4175 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4176 "fca_port_manage: FC_PORT_GET_FW_REV"); 4177 4178 (void) snprintf(buffer, (sizeof (buffer)-1), 4179 "%s %s", hba->model_info.model, 4180 vpd->fw_version); 4181 bzero(pm->pm_data_buf, pm->pm_data_len); 4182 4183 if (pm->pm_data_len < strlen(buffer) + 1) { 4184 ret = FC_NOMEM; 4185 4186 break; 4187 } 4188 4189 (void) strncpy(pm->pm_data_buf, buffer, 4190 (pm->pm_data_len-1)); 4191 break; 4192 } 4193 4194 case FC_PORT_GET_FCODE_REV: 4195 { 4196 char buffer[128]; 4197 4198 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4199 "fca_port_manage: FC_PORT_GET_FCODE_REV"); 4200 4201 /* Force update here just to be sure */ 4202 emlxs_get_fcode_version(hba); 4203 4204 (void) snprintf(buffer, (sizeof (buffer)-1), 4205 "%s %s", hba->model_info.model, 4206 vpd->fcode_version); 4207 bzero(pm->pm_data_buf, pm->pm_data_len); 4208 4209 if (pm->pm_data_len < strlen(buffer) + 1) { 4210 ret = FC_NOMEM; 4211 break; 4212 } 4213 4214 (void) strncpy(pm->pm_data_buf, buffer, 4215 (pm->pm_data_len-1)); 4216 break; 4217 } 4218 4219 case FC_PORT_GET_DUMP_SIZE: 4220 { 4221 #ifdef DUMP_SUPPORT 4222 uint32_t dump_size = 0; 4223 4224 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4225 "fca_port_manage: FC_PORT_GET_DUMP_SIZE"); 4226 4227 if (pm->pm_data_len < sizeof (uint32_t)) { 4228 ret = FC_NOMEM; 4229 break; 4230 } 4231 4232 (void) emlxs_get_dump(hba, NULL, &dump_size); 4233 4234 *((uint32_t *)pm->pm_data_buf) = dump_size; 4235 4236 #else 4237 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4238 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported."); 4239 4240 #endif /* DUMP_SUPPORT */ 4241 4242 break; 4243 } 4244 4245 case FC_PORT_GET_DUMP: 4246 { 4247 #ifdef DUMP_SUPPORT 4248 uint32_t dump_size = 0; 4249 4250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4251 "fca_port_manage: FC_PORT_GET_DUMP"); 4252 4253 (void) emlxs_get_dump(hba, NULL, &dump_size); 4254 4255 if (pm->pm_data_len < dump_size) { 4256 ret = FC_NOMEM; 4257 break; 4258 } 4259 4260 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf, 4261 (uint32_t *)&dump_size); 4262 #else 4263 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4264 "fca_port_manage: FC_PORT_GET_DUMP unsupported."); 4265 4266 #endif /* DUMP_SUPPORT */ 4267 4268 break; 4269 } 4270 4271 case FC_PORT_FORCE_DUMP: 4272 { 4273 #ifdef DUMP_SUPPORT 4274 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4275 "fca_port_manage: FC_PORT_FORCE_DUMP"); 4276 4277 /* Schedule a USER dump */ 4278 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0); 4279 4280 /* Wait for dump to complete */ 4281 emlxs_dump_wait(hba); 4282 #else 4283 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4284 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported."); 4285 4286 #endif /* DUMP_SUPPORT */ 4287 break; 4288 } 4289 4290 case FC_PORT_LINK_STATE: 4291 { 4292 uint32_t *link_state; 4293 4294 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4295 "fca_port_manage: FC_PORT_LINK_STATE"); 4296 4297 if (pm->pm_stat_len != sizeof (*link_state)) { 4298 ret = FC_NOMEM; 4299 break; 4300 } 4301 4302 if (pm->pm_cmd_buf != NULL) { 4303 /* 4304 * Can't look beyond the FCA port. 4305 */ 4306 ret = FC_INVALID_REQUEST; 4307 break; 4308 } 4309 4310 link_state = (uint32_t *)pm->pm_stat_buf; 4311 4312 /* Set the state */ 4313 if (hba->state >= FC_LINK_UP) { 4314 /* Check for loop topology */ 4315 if (hba->topology == TOPOLOGY_LOOP) { 4316 *link_state = FC_STATE_LOOP; 4317 } else { 4318 *link_state = FC_STATE_ONLINE; 4319 } 4320 4321 /* Set the link speed */ 4322 switch (hba->linkspeed) { 4323 case LA_2GHZ_LINK: 4324 *link_state |= FC_STATE_2GBIT_SPEED; 4325 break; 4326 case LA_4GHZ_LINK: 4327 *link_state |= FC_STATE_4GBIT_SPEED; 4328 break; 4329 case LA_8GHZ_LINK: 4330 *link_state |= FC_STATE_8GBIT_SPEED; 4331 break; 4332 case LA_10GHZ_LINK: 4333 *link_state |= FC_STATE_10GBIT_SPEED; 4334 break; 4335 case LA_16GHZ_LINK: 4336 *link_state |= FC_STATE_16GBIT_SPEED; 4337 break; 4338 case LA_32GHZ_LINK: 4339 *link_state |= FC_STATE_32GBIT_SPEED; 4340 break; 4341 case LA_1GHZ_LINK: 4342 default: 4343 *link_state |= FC_STATE_1GBIT_SPEED; 4344 break; 4345 } 4346 } else { 4347 *link_state = FC_STATE_OFFLINE; 4348 } 4349 4350 break; 4351 } 4352 4353 4354 case FC_PORT_ERR_STATS: 4355 case FC_PORT_RLS: 4356 { 4357 MAILBOXQ *mbq; 4358 MAILBOX *mb; 4359 fc_rls_acc_t *bp; 4360 4361 if (!(hba->flag & FC_ONLINE_MODE)) { 4362 return (FC_OFFLINE); 4363 } 4364 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4365 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS"); 4366 4367 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) { 4368 ret = FC_NOMEM; 4369 break; 4370 } 4371 4372 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, 4373 MEM_MBOX)) == 0) { 4374 ret = FC_NOMEM; 4375 break; 4376 } 4377 mb = (MAILBOX *)mbq; 4378 4379 emlxs_mb_read_lnk_stat(hba, mbq); 4380 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) 4381 != MBX_SUCCESS) { 4382 ret = FC_PBUSY; 4383 } else { 4384 bp = (fc_rls_acc_t *)pm->pm_data_buf; 4385 4386 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt; 4387 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt; 4388 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt; 4389 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt; 4390 bp->rls_invalid_word = 4391 mb->un.varRdLnk.invalidXmitWord; 4392 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt; 4393 } 4394 4395 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 4396 break; 4397 } 4398 4399 case FC_PORT_DOWNLOAD_FW: 4400 if (!(hba->flag & FC_ONLINE_MODE)) { 4401 return (FC_OFFLINE); 4402 } 4403 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4404 "fca_port_manage: FC_PORT_DOWNLOAD_FW"); 4405 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4406 pm->pm_data_len, 1); 4407 break; 4408 4409 case FC_PORT_DOWNLOAD_FCODE: 4410 if (!(hba->flag & FC_ONLINE_MODE)) { 4411 return (FC_OFFLINE); 4412 } 4413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4414 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE"); 4415 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4416 pm->pm_data_len, 1); 4417 break; 4418 4419 case FC_PORT_DIAG: 4420 { 4421 uint32_t errno = 0; 4422 uint32_t did = 0; 4423 uint32_t pattern = 0; 4424 4425 switch (pm->pm_cmd_flags) { 4426 case EMLXS_DIAG_BIU: 4427 4428 if (!(hba->flag & FC_ONLINE_MODE)) { 4429 return (FC_OFFLINE); 4430 } 4431 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4432 "fca_port_manage: DIAG_BIU"); 4433 4434 if (pm->pm_data_len) { 4435 pattern = *((uint32_t *)pm->pm_data_buf); 4436 } 4437 4438 errno = emlxs_diag_biu_run(hba, pattern); 4439 4440 if (pm->pm_stat_len == sizeof (errno)) { 4441 *(int *)pm->pm_stat_buf = errno; 4442 } 4443 4444 break; 4445 4446 4447 case EMLXS_DIAG_POST: 4448 4449 if (!(hba->flag & FC_ONLINE_MODE)) { 4450 return (FC_OFFLINE); 4451 } 4452 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4453 "fca_port_manage: DIAG_POST"); 4454 4455 errno = emlxs_diag_post_run(hba); 4456 4457 if (pm->pm_stat_len == sizeof (errno)) { 4458 *(int *)pm->pm_stat_buf = errno; 4459 } 4460 4461 break; 4462 4463 4464 case EMLXS_DIAG_ECHO: 4465 4466 if (!(hba->flag & FC_ONLINE_MODE)) { 4467 return (FC_OFFLINE); 4468 } 4469 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4470 "fca_port_manage: DIAG_ECHO"); 4471 4472 if (pm->pm_cmd_len != sizeof (uint32_t)) { 4473 ret = FC_INVALID_REQUEST; 4474 break; 4475 } 4476 4477 did = *((uint32_t *)pm->pm_cmd_buf); 4478 4479 if (pm->pm_data_len) { 4480 pattern = *((uint32_t *)pm->pm_data_buf); 4481 } 4482 4483 errno = emlxs_diag_echo_run(port, did, pattern); 4484 4485 if (pm->pm_stat_len == sizeof (errno)) { 4486 *(int *)pm->pm_stat_buf = errno; 4487 } 4488 4489 break; 4490 4491 4492 case EMLXS_PARM_GET_NUM: 4493 { 4494 uint32_t *num; 4495 emlxs_config_t *cfg; 4496 uint32_t i; 4497 uint32_t count; 4498 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4499 "fca_port_manage: PARM_GET_NUM"); 4500 4501 if (pm->pm_stat_len < sizeof (uint32_t)) { 4502 ret = FC_NOMEM; 4503 break; 4504 } 4505 4506 num = (uint32_t *)pm->pm_stat_buf; 4507 count = 0; 4508 cfg = &CFG; 4509 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4510 if (!(cfg->flags & PARM_HIDDEN)) { 4511 count++; 4512 } 4513 4514 } 4515 4516 *num = count; 4517 4518 break; 4519 } 4520 4521 case EMLXS_PARM_GET_LIST: 4522 { 4523 emlxs_parm_t *parm; 4524 emlxs_config_t *cfg; 4525 uint32_t i; 4526 uint32_t max_count; 4527 4528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4529 "fca_port_manage: PARM_GET_LIST"); 4530 4531 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4532 ret = FC_NOMEM; 4533 break; 4534 } 4535 4536 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t); 4537 4538 parm = (emlxs_parm_t *)pm->pm_stat_buf; 4539 cfg = &CFG; 4540 for (i = 0; i < NUM_CFG_PARAM && max_count; i++, 4541 cfg++) { 4542 if (!(cfg->flags & PARM_HIDDEN)) { 4543 (void) strncpy(parm->label, cfg->string, 4544 (sizeof (parm->label)-1)); 4545 parm->min = cfg->low; 4546 parm->max = cfg->hi; 4547 parm->def = cfg->def; 4548 parm->current = cfg->current; 4549 parm->flags = cfg->flags; 4550 (void) strncpy(parm->help, cfg->help, 4551 (sizeof (parm->help)-1)); 4552 parm++; 4553 max_count--; 4554 } 4555 } 4556 4557 break; 4558 } 4559 4560 case EMLXS_PARM_GET: 4561 { 4562 emlxs_parm_t *parm_in; 4563 emlxs_parm_t *parm_out; 4564 emlxs_config_t *cfg; 4565 uint32_t i; 4566 uint32_t len; 4567 4568 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4569 EMLXS_MSGF(EMLXS_CONTEXT, 4570 &emlxs_sfs_debug_msg, 4571 "fca_port_manage: PARM_GET. " 4572 "inbuf too small."); 4573 4574 ret = FC_BADCMD; 4575 break; 4576 } 4577 4578 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4579 EMLXS_MSGF(EMLXS_CONTEXT, 4580 &emlxs_sfs_debug_msg, 4581 "fca_port_manage: PARM_GET. " 4582 "outbuf too small"); 4583 4584 ret = FC_BADCMD; 4585 break; 4586 } 4587 4588 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4589 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4590 len = strlen(parm_in->label); 4591 cfg = &CFG; 4592 ret = FC_BADOBJECT; 4593 4594 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4595 "fca_port_manage: PARM_GET: %s=0x%x,%d", 4596 parm_in->label, parm_in->current, 4597 parm_in->current); 4598 4599 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4600 if (len == strlen(cfg->string) && 4601 (strcmp(parm_in->label, 4602 cfg->string) == 0)) { 4603 (void) strncpy(parm_out->label, 4604 cfg->string, 4605 (sizeof (parm_out->label)-1)); 4606 parm_out->min = cfg->low; 4607 parm_out->max = cfg->hi; 4608 parm_out->def = cfg->def; 4609 parm_out->current = cfg->current; 4610 parm_out->flags = cfg->flags; 4611 (void) strncpy(parm_out->help, 4612 cfg->help, 4613 (sizeof (parm_out->help)-1)); 4614 4615 ret = FC_SUCCESS; 4616 break; 4617 } 4618 } 4619 4620 break; 4621 } 4622 4623 case EMLXS_PARM_SET: 4624 { 4625 emlxs_parm_t *parm_in; 4626 emlxs_parm_t *parm_out; 4627 emlxs_config_t *cfg; 4628 uint32_t i; 4629 uint32_t len; 4630 4631 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) { 4632 EMLXS_MSGF(EMLXS_CONTEXT, 4633 &emlxs_sfs_debug_msg, 4634 "fca_port_manage: PARM_GET. " 4635 "inbuf too small."); 4636 4637 ret = FC_BADCMD; 4638 break; 4639 } 4640 4641 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) { 4642 EMLXS_MSGF(EMLXS_CONTEXT, 4643 &emlxs_sfs_debug_msg, 4644 "fca_port_manage: PARM_GET. " 4645 "outbuf too small"); 4646 ret = FC_BADCMD; 4647 break; 4648 } 4649 4650 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf; 4651 parm_out = (emlxs_parm_t *)pm->pm_stat_buf; 4652 len = strlen(parm_in->label); 4653 cfg = &CFG; 4654 ret = FC_BADOBJECT; 4655 4656 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4657 "fca_port_manage: PARM_SET: %s=0x%x,%d", 4658 parm_in->label, parm_in->current, 4659 parm_in->current); 4660 4661 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) { 4662 /* Find matching parameter string */ 4663 if (len == strlen(cfg->string) && 4664 (strcmp(parm_in->label, 4665 cfg->string) == 0)) { 4666 /* Attempt to update parameter */ 4667 if (emlxs_set_parm(hba, i, 4668 parm_in->current) == FC_SUCCESS) { 4669 (void) strncpy(parm_out->label, 4670 cfg->string, 4671 (sizeof (parm_out->label)- 4672 1)); 4673 parm_out->min = cfg->low; 4674 parm_out->max = cfg->hi; 4675 parm_out->def = cfg->def; 4676 parm_out->current = 4677 cfg->current; 4678 parm_out->flags = cfg->flags; 4679 (void) strncpy(parm_out->help, 4680 cfg->help, 4681 (sizeof (parm_out->help)- 4682 1)); 4683 4684 ret = FC_SUCCESS; 4685 } 4686 4687 break; 4688 } 4689 } 4690 4691 break; 4692 } 4693 4694 case EMLXS_LOG_GET: 4695 { 4696 emlxs_log_req_t *req; 4697 emlxs_log_resp_t *resp; 4698 uint32_t len; 4699 4700 /* Check command size */ 4701 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) { 4702 ret = FC_BADCMD; 4703 break; 4704 } 4705 4706 /* Get the request */ 4707 req = (emlxs_log_req_t *)pm->pm_cmd_buf; 4708 4709 /* Calculate the response length from the request */ 4710 len = sizeof (emlxs_log_resp_t) + 4711 (req->count * MAX_LOG_MSG_LENGTH); 4712 4713 /* Check the response buffer length */ 4714 if (pm->pm_stat_len < len) { 4715 ret = FC_BADCMD; 4716 break; 4717 } 4718 4719 /* Get the response pointer */ 4720 resp = (emlxs_log_resp_t *)pm->pm_stat_buf; 4721 4722 /* Get the request log enties */ 4723 (void) emlxs_msg_log_get(hba, req, resp); 4724 4725 ret = FC_SUCCESS; 4726 break; 4727 } 4728 4729 case EMLXS_GET_BOOT_REV: 4730 { 4731 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4732 "fca_port_manage: GET_BOOT_REV"); 4733 4734 if (pm->pm_stat_len < strlen(vpd->boot_version)) { 4735 ret = FC_NOMEM; 4736 break; 4737 } 4738 4739 bzero(pm->pm_stat_buf, pm->pm_stat_len); 4740 (void) snprintf(pm->pm_stat_buf, pm->pm_stat_len, 4741 "%s %s", hba->model_info.model, vpd->boot_version); 4742 4743 break; 4744 } 4745 4746 case EMLXS_DOWNLOAD_BOOT: 4747 if (!(hba->flag & FC_ONLINE_MODE)) { 4748 return (FC_OFFLINE); 4749 } 4750 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4751 "fca_port_manage: DOWNLOAD_BOOT"); 4752 4753 ret = emlxs_fw_download(hba, pm->pm_data_buf, 4754 pm->pm_data_len, 1); 4755 break; 4756 4757 case EMLXS_DOWNLOAD_CFL: 4758 { 4759 uint32_t *buffer; 4760 uint32_t region; 4761 uint32_t length; 4762 4763 if (!(hba->flag & FC_ONLINE_MODE)) { 4764 return (FC_OFFLINE); 4765 } 4766 4767 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4768 "fca_port_manage: DOWNLOAD_CFL"); 4769 4770 /* Extract the region number from the first word. */ 4771 buffer = (uint32_t *)pm->pm_data_buf; 4772 region = *buffer++; 4773 4774 /* Adjust the image length for the header word */ 4775 length = pm->pm_data_len - 4; 4776 4777 ret = 4778 emlxs_cfl_download(hba, region, (caddr_t)buffer, 4779 length); 4780 break; 4781 } 4782 4783 case EMLXS_VPD_GET: 4784 { 4785 emlxs_vpd_desc_t *vpd_out; 4786 4787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4788 "fca_port_manage: VPD_GET"); 4789 4790 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) { 4791 ret = FC_BADCMD; 4792 break; 4793 } 4794 4795 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf; 4796 bzero(vpd_out, pm->pm_stat_len); 4797 4798 (void) strncpy(vpd_out->id, vpd->id, 4799 (sizeof (vpd_out->id)-1)); 4800 (void) strncpy(vpd_out->part_num, vpd->part_num, 4801 (sizeof (vpd_out->part_num)-1)); 4802 (void) strncpy(vpd_out->eng_change, vpd->eng_change, 4803 (sizeof (vpd_out->eng_change)-1)); 4804 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer, 4805 (sizeof (vpd_out->manufacturer)-1)); 4806 (void) strncpy(vpd_out->serial_num, vpd->serial_num, 4807 (sizeof (vpd_out->serial_num)-1)); 4808 (void) strncpy(vpd_out->model, vpd->model, 4809 (sizeof (vpd_out->model)-1)); 4810 (void) strncpy(vpd_out->model_desc, vpd->model_desc, 4811 (sizeof (vpd_out->model_desc)-1)); 4812 (void) strncpy(vpd_out->port_num, vpd->port_num, 4813 (sizeof (vpd_out->port_num)-1)); 4814 (void) strncpy(vpd_out->prog_types, vpd->prog_types, 4815 (sizeof (vpd_out->prog_types)-1)); 4816 4817 ret = FC_SUCCESS; 4818 4819 break; 4820 } 4821 4822 case EMLXS_VPD_GET_V2: 4823 { 4824 emlxs_vpd_desc_v2_t *vpd_out; 4825 4826 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4827 "fca_port_manage: VPD_GET_V2"); 4828 4829 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_v2_t)) { 4830 ret = FC_BADCMD; 4831 break; 4832 } 4833 4834 vpd_out = (emlxs_vpd_desc_v2_t *)pm->pm_stat_buf; 4835 bzero(vpd_out, pm->pm_stat_len); 4836 4837 (void) strncpy(vpd_out->id, vpd->id, 4838 (sizeof (vpd_out->id)-1)); 4839 (void) strncpy(vpd_out->part_num, vpd->part_num, 4840 (sizeof (vpd_out->part_num)-1)); 4841 (void) strncpy(vpd_out->eng_change, vpd->eng_change, 4842 (sizeof (vpd_out->eng_change)-1)); 4843 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer, 4844 (sizeof (vpd_out->manufacturer)-1)); 4845 (void) strncpy(vpd_out->serial_num, vpd->serial_num, 4846 (sizeof (vpd_out->serial_num)-1)); 4847 (void) strncpy(vpd_out->model, vpd->model, 4848 (sizeof (vpd_out->model)-1)); 4849 (void) strncpy(vpd_out->model_desc, vpd->model_desc, 4850 (sizeof (vpd_out->model_desc)-1)); 4851 (void) strncpy(vpd_out->port_num, vpd->port_num, 4852 (sizeof (vpd_out->port_num)-1)); 4853 (void) strncpy(vpd_out->prog_types, vpd->prog_types, 4854 (sizeof (vpd_out->prog_types)-1)); 4855 4856 ret = FC_SUCCESS; 4857 4858 break; 4859 } 4860 4861 case EMLXS_PHY_GET: 4862 { 4863 emlxs_phy_desc_t *phy_out; 4864 MAILBOXQ *mbq; 4865 MAILBOX4 *mb; 4866 IOCTL_COMMON_GET_PHY_DETAILS *phy; 4867 mbox_req_hdr_t *hdr_req; 4868 4869 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4870 "fca_port_manage: EMLXS_PHY_GET"); 4871 4872 if (pm->pm_stat_len < sizeof (emlxs_phy_desc_t)) { 4873 ret = FC_BADCMD; 4874 break; 4875 } 4876 4877 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 4878 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4879 "Invalid sli_mode. mode=%d", hba->sli_mode); 4880 ret = FC_BADCMD; 4881 break; 4882 } 4883 4884 phy_out = (emlxs_phy_desc_t *)pm->pm_stat_buf; 4885 bzero(phy_out, sizeof (emlxs_phy_desc_t)); 4886 4887 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, 4888 MEM_MBOX)) == 0) { 4889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4890 "Unable to allocate mailbox buffer."); 4891 ret = FC_NOMEM; 4892 break; 4893 } 4894 4895 mb = (MAILBOX4*)mbq; 4896 4897 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE); 4898 4899 mb->un.varSLIConfig.be.embedded = 1; 4900 mbq->mbox_cmpl = NULL; 4901 4902 mb->mbxCommand = MBX_SLI_CONFIG; 4903 mb->mbxOwner = OWN_HOST; 4904 4905 hdr_req = (mbox_req_hdr_t *) 4906 &mb->un.varSLIConfig.be.un_hdr.hdr_req; 4907 hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON; 4908 hdr_req->opcode = COMMON_OPCODE_GET_PHY_DETAILS; 4909 hdr_req->timeout = 0; 4910 hdr_req->req_length = 4911 sizeof (IOCTL_COMMON_GET_PHY_DETAILS); 4912 4913 phy = (IOCTL_COMMON_GET_PHY_DETAILS *)(hdr_req + 1); 4914 4915 /* Send read request */ 4916 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) != 4917 MBX_SUCCESS) { 4918 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4919 "Unable to get PHY details. status=%x", 4920 mb->mbxStatus); 4921 4922 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 4923 4924 ret = FC_FAILURE; 4925 break; 4926 } 4927 4928 phy_out->phy_type = phy->params.response.phy_type; 4929 phy_out->interface_type = 4930 phy->params.response.interface_type; 4931 phy_out->misc_params = phy->params.response.misc_params; 4932 phy_out->rsvd[0] = phy->params.response.rsvd[0]; 4933 phy_out->rsvd[1] = phy->params.response.rsvd[1]; 4934 phy_out->rsvd[2] = phy->params.response.rsvd[2]; 4935 phy_out->rsvd[3] = phy->params.response.rsvd[3]; 4936 4937 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 4938 4939 ret = FC_SUCCESS; 4940 break; 4941 } 4942 4943 #ifdef NODE_THROTTLE_SUPPORT 4944 case EMLXS_SET_THROTTLE: 4945 { 4946 emlxs_node_t *node; 4947 uint32_t scope = 0; 4948 uint32_t i; 4949 char buf1[32]; 4950 emlxs_throttle_desc_t *desc; 4951 4952 if ((pm->pm_data_buf == NULL) || 4953 (pm->pm_data_len != 4954 sizeof (emlxs_throttle_desc_t))) { 4955 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4956 "fca_port_manage: EMLXS_SET_THROTTLE: " 4957 "Descriptor buffer not valid. %d", 4958 pm->pm_data_len); 4959 ret = FC_BADCMD; 4960 break; 4961 } 4962 4963 if ((pm->pm_cmd_buf != NULL) && 4964 (pm->pm_cmd_len == sizeof (uint32_t))) { 4965 scope = *(uint32_t *)pm->pm_cmd_buf; 4966 } 4967 4968 desc = (emlxs_throttle_desc_t *)pm->pm_data_buf; 4969 desc->throttle = MIN(desc->throttle, MAX_NODE_THROTTLE); 4970 4971 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 4972 "fca_port_manage: EMLXS_SET_THROTTLE: scope=%d " 4973 "depth=%d", 4974 scope, desc->throttle); 4975 4976 rw_enter(&port->node_rwlock, RW_WRITER); 4977 switch (scope) { 4978 case 1: /* all */ 4979 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 4980 node = port->node_table[i]; 4981 while (node != NULL) { 4982 node->io_throttle = desc->throttle; 4983 4984 EMLXS_MSGF(EMLXS_CONTEXT, 4985 &emlxs_sfs_debug_msg, 4986 "EMLXS_SET_THROTTLE: wwpn=%s " 4987 "depth=%d", 4988 emlxs_wwn_xlate(buf1, sizeof (buf1), 4989 (uint8_t *)&node->nlp_portname), 4990 node->io_throttle); 4991 4992 node = (NODELIST *)node->nlp_list_next; 4993 } 4994 } 4995 break; 4996 4997 case 2: /* FCP */ 4998 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 4999 node = port->node_table[i]; 5000 while (node != NULL) { 5001 if (!(node->nlp_fcp_info & 5002 NLP_FCP_TGT_DEVICE)) { 5003 node = (NODELIST *) 5004 node->nlp_list_next; 5005 continue; 5006 } 5007 5008 node->io_throttle = desc->throttle; 5009 5010 EMLXS_MSGF(EMLXS_CONTEXT, 5011 &emlxs_sfs_debug_msg, 5012 "EMLXS_SET_THROTTLE: wwpn=%s " 5013 "depth=%d", 5014 emlxs_wwn_xlate(buf1, sizeof (buf1), 5015 (uint8_t *)&node->nlp_portname), 5016 node->io_throttle); 5017 5018 node = (NODELIST *)node->nlp_list_next; 5019 } 5020 } 5021 break; 5022 5023 case 0: /* WWPN */ 5024 default: 5025 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 5026 node = port->node_table[i]; 5027 while (node != NULL) { 5028 if (bcmp((caddr_t)&node->nlp_portname, 5029 desc->wwpn, 8)) { 5030 node = (NODELIST *) 5031 node->nlp_list_next; 5032 continue; 5033 } 5034 5035 node->io_throttle = desc->throttle; 5036 5037 EMLXS_MSGF(EMLXS_CONTEXT, 5038 &emlxs_sfs_debug_msg, 5039 "EMLXS_SET_THROTTLE: wwpn=%s " 5040 "depth=%d", 5041 emlxs_wwn_xlate(buf1, sizeof (buf1), 5042 (uint8_t *)&node->nlp_portname), 5043 node->io_throttle); 5044 5045 goto set_throttle_done; 5046 } 5047 } 5048 set_throttle_done: 5049 break; 5050 } 5051 5052 rw_exit(&port->node_rwlock); 5053 ret = FC_SUCCESS; 5054 5055 break; 5056 } 5057 5058 case EMLXS_GET_THROTTLE: 5059 { 5060 emlxs_node_t *node; 5061 uint32_t i; 5062 uint32_t j; 5063 char buf1[32]; 5064 uint32_t count; 5065 emlxs_throttle_desc_t *desc; 5066 5067 if (pm->pm_stat_len == sizeof (uint32_t)) { 5068 count = emlxs_nport_count(port); 5069 *(uint32_t *)pm->pm_stat_buf = count; 5070 5071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5072 "fca_port_manage: EMLXS_GET_THROTTLE: " 5073 "count=%d", 5074 count); 5075 5076 ret = FC_SUCCESS; 5077 break; 5078 } 5079 5080 if ((pm->pm_stat_buf == NULL) || 5081 (pm->pm_stat_len < 5082 sizeof (emlxs_throttle_desc_t))) { 5083 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5084 "fca_port_manage: EMLXS_GET_THROTTLE: " 5085 "Descriptor buffer too small. %d", 5086 pm->pm_data_len); 5087 ret = FC_BADCMD; 5088 break; 5089 } 5090 5091 count = pm->pm_stat_len / 5092 sizeof (emlxs_throttle_desc_t); 5093 desc = (emlxs_throttle_desc_t *)pm->pm_stat_buf; 5094 5095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5096 "fca_port_manage: EMLXS_GET_THROTTLE: max=%d", 5097 count); 5098 5099 rw_enter(&port->node_rwlock, RW_READER); 5100 j = 0; 5101 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { 5102 node = port->node_table[i]; 5103 while (node != NULL) { 5104 if ((node->nlp_DID & 0xFFF000) == 5105 0xFFF000) { 5106 node = (NODELIST *) 5107 node->nlp_list_next; 5108 continue; 5109 } 5110 5111 bcopy((uint8_t *)&node->nlp_portname, 5112 desc[j].wwpn, 8); 5113 desc[j].throttle = node->io_throttle; 5114 5115 EMLXS_MSGF(EMLXS_CONTEXT, 5116 &emlxs_sfs_debug_msg, 5117 "EMLXS_GET_THROTTLE: wwpn=%s " 5118 "depth=%d", 5119 emlxs_wwn_xlate(buf1, sizeof (buf1), 5120 desc[j].wwpn), 5121 desc[j].throttle); 5122 5123 j++; 5124 if (j >= count) { 5125 goto get_throttle_done; 5126 } 5127 5128 node = (NODELIST *)node->nlp_list_next; 5129 } 5130 } 5131 get_throttle_done: 5132 rw_exit(&port->node_rwlock); 5133 ret = FC_SUCCESS; 5134 5135 break; 5136 } 5137 #endif /* NODE_THROTTLE_SUPPORT */ 5138 5139 case EMLXS_GET_FCIO_REV: 5140 { 5141 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5142 "fca_port_manage: GET_FCIO_REV"); 5143 5144 if (pm->pm_stat_len < sizeof (uint32_t)) { 5145 ret = FC_NOMEM; 5146 break; 5147 } 5148 5149 bzero(pm->pm_stat_buf, pm->pm_stat_len); 5150 *(uint32_t *)pm->pm_stat_buf = FCIO_REV; 5151 5152 break; 5153 } 5154 5155 case EMLXS_GET_DFC_REV: 5156 { 5157 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5158 "fca_port_manage: GET_DFC_REV"); 5159 5160 if (pm->pm_stat_len < sizeof (uint32_t)) { 5161 ret = FC_NOMEM; 5162 break; 5163 } 5164 5165 bzero(pm->pm_stat_buf, pm->pm_stat_len); 5166 *(uint32_t *)pm->pm_stat_buf = DFC_REV; 5167 5168 break; 5169 } 5170 5171 case EMLXS_SET_BOOT_STATE: 5172 case EMLXS_SET_BOOT_STATE_old: 5173 { 5174 uint32_t state; 5175 5176 if (!(hba->flag & FC_ONLINE_MODE)) { 5177 return (FC_OFFLINE); 5178 } 5179 if (pm->pm_cmd_len < sizeof (uint32_t)) { 5180 EMLXS_MSGF(EMLXS_CONTEXT, 5181 &emlxs_sfs_debug_msg, 5182 "fca_port_manage: SET_BOOT_STATE"); 5183 ret = FC_BADCMD; 5184 break; 5185 } 5186 5187 state = *(uint32_t *)pm->pm_cmd_buf; 5188 5189 if (state == 0) { 5190 EMLXS_MSGF(EMLXS_CONTEXT, 5191 &emlxs_sfs_debug_msg, 5192 "fca_port_manage: SET_BOOT_STATE: " 5193 "Disable"); 5194 ret = emlxs_boot_code_disable(hba); 5195 } else { 5196 EMLXS_MSGF(EMLXS_CONTEXT, 5197 &emlxs_sfs_debug_msg, 5198 "fca_port_manage: SET_BOOT_STATE: " 5199 "Enable"); 5200 ret = emlxs_boot_code_enable(hba); 5201 } 5202 5203 break; 5204 } 5205 5206 case EMLXS_GET_BOOT_STATE: 5207 case EMLXS_GET_BOOT_STATE_old: 5208 { 5209 if (!(hba->flag & FC_ONLINE_MODE)) { 5210 return (FC_OFFLINE); 5211 } 5212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5213 "fca_port_manage: GET_BOOT_STATE"); 5214 5215 if (pm->pm_stat_len < sizeof (uint32_t)) { 5216 ret = FC_NOMEM; 5217 break; 5218 } 5219 bzero(pm->pm_stat_buf, pm->pm_stat_len); 5220 5221 ret = emlxs_boot_code_state(hba); 5222 5223 if (ret == FC_SUCCESS) { 5224 *(uint32_t *)pm->pm_stat_buf = 1; 5225 ret = FC_SUCCESS; 5226 } else if (ret == FC_FAILURE) { 5227 ret = FC_SUCCESS; 5228 } 5229 5230 break; 5231 } 5232 5233 case EMLXS_HW_ERROR_TEST: 5234 { 5235 /* 5236 * This command is used for simulating HW ERROR 5237 * on SLI4 only. 5238 */ 5239 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5240 ret = FC_INVALID_REQUEST; 5241 break; 5242 } 5243 hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR; 5244 break; 5245 } 5246 5247 case EMLXS_MB_TIMEOUT_TEST: 5248 { 5249 if (!(hba->flag & FC_ONLINE_MODE)) { 5250 return (FC_OFFLINE); 5251 } 5252 5253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5254 "fca_port_manage: HW_ERROR_TEST"); 5255 5256 /* Trigger a mailbox timeout */ 5257 hba->mbox_timer = hba->timer_tics; 5258 5259 break; 5260 } 5261 5262 case EMLXS_TEST_CODE: 5263 { 5264 uint32_t *cmd; 5265 5266 if (!(hba->flag & FC_ONLINE_MODE)) { 5267 return (FC_OFFLINE); 5268 } 5269 5270 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5271 "fca_port_manage: TEST_CODE"); 5272 5273 if (pm->pm_cmd_len < sizeof (uint32_t)) { 5274 EMLXS_MSGF(EMLXS_CONTEXT, 5275 &emlxs_sfs_debug_msg, 5276 "fca_port_manage: TEST_CODE. " 5277 "inbuf to small."); 5278 5279 ret = FC_BADCMD; 5280 break; 5281 } 5282 5283 cmd = (uint32_t *)pm->pm_cmd_buf; 5284 5285 ret = emlxs_test(hba, cmd[0], 5286 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]); 5287 5288 break; 5289 } 5290 5291 case EMLXS_BAR_IO: 5292 { 5293 uint32_t *cmd; 5294 uint32_t *datap; 5295 FCIO_Q_STAT_t *qp; 5296 clock_t time; 5297 uint32_t offset; 5298 caddr_t addr; 5299 uint32_t i; 5300 uint32_t tx_cnt; 5301 uint32_t chip_cnt; 5302 5303 cmd = (uint32_t *)pm->pm_cmd_buf; 5304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5305 "fca_port_manage: BAR_IO %x %x %x", 5306 cmd[0], cmd[1], cmd[2]); 5307 5308 offset = cmd[1]; 5309 5310 ret = FC_SUCCESS; 5311 5312 switch (cmd[0]) { 5313 case 2: /* bar1read */ 5314 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5315 return (FC_BADCMD); 5316 } 5317 5318 /* Registers in this range are invalid */ 5319 if ((offset >= 0x4C00) && (offset < 0x5000)) { 5320 return (FC_BADCMD); 5321 } 5322 if ((offset >= 0x5800) || (offset & 0x3)) { 5323 return (FC_BADCMD); 5324 } 5325 datap = (uint32_t *)pm->pm_stat_buf; 5326 5327 for (i = 0; i < pm->pm_stat_len; 5328 i += sizeof (uint32_t)) { 5329 if ((offset >= 0x4C00) && 5330 (offset < 0x5000)) { 5331 pm->pm_stat_len = i; 5332 break; 5333 } 5334 if (offset >= 0x5800) { 5335 pm->pm_stat_len = i; 5336 break; 5337 } 5338 addr = hba->sli.sli4.bar1_addr + offset; 5339 *datap = READ_BAR1_REG(hba, addr); 5340 datap++; 5341 offset += sizeof (uint32_t); 5342 } 5343 #ifdef FMA_SUPPORT 5344 /* Access handle validation */ 5345 EMLXS_CHK_ACC_HANDLE(hba, 5346 hba->sli.sli4.bar1_acc_handle); 5347 #endif /* FMA_SUPPORT */ 5348 break; 5349 case 3: /* bar2read */ 5350 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5351 return (FC_BADCMD); 5352 } 5353 if ((offset >= 0x1000) || (offset & 0x3)) { 5354 return (FC_BADCMD); 5355 } 5356 datap = (uint32_t *)pm->pm_stat_buf; 5357 5358 for (i = 0; i < pm->pm_stat_len; 5359 i += sizeof (uint32_t)) { 5360 *datap = READ_BAR2_REG(hba, 5361 hba->sli.sli4.bar2_addr + offset); 5362 datap++; 5363 offset += sizeof (uint32_t); 5364 } 5365 #ifdef FMA_SUPPORT 5366 /* Access handle validation */ 5367 EMLXS_CHK_ACC_HANDLE(hba, 5368 hba->sli.sli4.bar2_acc_handle); 5369 #endif /* FMA_SUPPORT */ 5370 break; 5371 case 4: /* bar1write */ 5372 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5373 return (FC_BADCMD); 5374 } 5375 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr + 5376 offset, cmd[2]); 5377 #ifdef FMA_SUPPORT 5378 /* Access handle validation */ 5379 EMLXS_CHK_ACC_HANDLE(hba, 5380 hba->sli.sli4.bar1_acc_handle); 5381 #endif /* FMA_SUPPORT */ 5382 break; 5383 case 5: /* bar2write */ 5384 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5385 return (FC_BADCMD); 5386 } 5387 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr + 5388 offset, cmd[2]); 5389 #ifdef FMA_SUPPORT 5390 /* Access handle validation */ 5391 EMLXS_CHK_ACC_HANDLE(hba, 5392 hba->sli.sli4.bar2_acc_handle); 5393 #endif /* FMA_SUPPORT */ 5394 break; 5395 case 6: /* dumpbsmbox */ 5396 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5397 return (FC_BADCMD); 5398 } 5399 if (offset != 0) { 5400 return (FC_BADCMD); 5401 } 5402 5403 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt, 5404 (caddr_t)pm->pm_stat_buf, 256); 5405 break; 5406 case 7: /* pciread */ 5407 if ((offset >= 0x200) || (offset & 0x3)) { 5408 return (FC_BADCMD); 5409 } 5410 datap = (uint32_t *)pm->pm_stat_buf; 5411 for (i = 0; i < pm->pm_stat_len; 5412 i += sizeof (uint32_t)) { 5413 *datap = ddi_get32(hba->pci_acc_handle, 5414 (uint32_t *)(hba->pci_addr + 5415 offset)); 5416 datap++; 5417 offset += sizeof (uint32_t); 5418 } 5419 #ifdef FMA_SUPPORT 5420 /* Access handle validation */ 5421 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle); 5422 #endif /* FMA_SUPPORT */ 5423 break; 5424 case 8: /* abortall */ 5425 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5426 return (FC_BADCMD); 5427 } 5428 emlxs_abort_all(hba, &tx_cnt, &chip_cnt); 5429 datap = (uint32_t *)pm->pm_stat_buf; 5430 *datap++ = tx_cnt; 5431 *datap = chip_cnt; 5432 break; 5433 case 9: /* get_q_info */ 5434 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5435 return (FC_BADCMD); 5436 } 5437 qp = (FCIO_Q_STAT_t *)pm->pm_stat_buf; 5438 for (i = 0; i < FCIO_MAX_EQS; i++) { 5439 addr = hba->sli.sli4.eq[i].addr.virt; 5440 qp->eq[i].host_index = 5441 hba->sli.sli4.eq[i].host_index; 5442 qp->eq[i].max_index = 5443 hba->sli.sli4.eq[i].max_index; 5444 qp->eq[i].qid = 5445 hba->sli.sli4.eq[i].qid; 5446 qp->eq[i].msix_vector = 5447 hba->sli.sli4.eq[i].msix_vector; 5448 qp->eq[i].phys = 5449 hba->sli.sli4.eq[i].addr.phys; 5450 qp->eq[i].virt = PADDR_LO( 5451 (uintptr_t)addr); 5452 qp->eq[i].virt_hi = PADDR_HI( 5453 (uintptr_t)addr); 5454 qp->eq[i].max_proc = 5455 hba->sli.sli4.eq[i].max_proc; 5456 qp->eq[i].isr_count = 5457 hba->sli.sli4.eq[i].isr_count; 5458 qp->eq[i].num_proc = 5459 hba->sli.sli4.eq[i].num_proc; 5460 } 5461 for (i = 0; i < FCIO_MAX_CQS; i++) { 5462 addr = hba->sli.sli4.cq[i].addr.virt; 5463 qp->cq[i].host_index = 5464 hba->sli.sli4.cq[i].host_index; 5465 qp->cq[i].max_index = 5466 hba->sli.sli4.cq[i].max_index; 5467 qp->cq[i].qid = 5468 hba->sli.sli4.cq[i].qid; 5469 qp->cq[i].eqid = 5470 hba->sli.sli4.cq[i].eqid; 5471 qp->cq[i].type = 5472 hba->sli.sli4.cq[i].type; 5473 qp->cq[i].phys = 5474 hba->sli.sli4.cq[i].addr.phys; 5475 qp->cq[i].virt = PADDR_LO( 5476 (uintptr_t)addr); 5477 qp->cq[i].virt_hi = PADDR_HI( 5478 (uintptr_t)addr); 5479 qp->cq[i].max_proc = 5480 hba->sli.sli4.cq[i].max_proc; 5481 qp->cq[i].isr_count = 5482 hba->sli.sli4.cq[i].isr_count; 5483 qp->cq[i].num_proc = 5484 hba->sli.sli4.cq[i].num_proc; 5485 } 5486 for (i = 0; i < FCIO_MAX_WQS; i++) { 5487 addr = hba->sli.sli4.wq[i].addr.virt; 5488 qp->wq[i].host_index = 5489 hba->sli.sli4.wq[i].host_index; 5490 qp->wq[i].max_index = 5491 hba->sli.sli4.wq[i].max_index; 5492 qp->wq[i].port_index = 5493 hba->sli.sli4.wq[i].port_index; 5494 qp->wq[i].release_depth = 5495 hba->sli.sli4.wq[i].release_depth; 5496 qp->wq[i].qid = 5497 hba->sli.sli4.wq[i].qid; 5498 qp->wq[i].cqid = 5499 hba->sli.sli4.wq[i].cqid; 5500 qp->wq[i].phys = 5501 hba->sli.sli4.wq[i].addr.phys; 5502 qp->wq[i].virt = PADDR_LO( 5503 (uintptr_t)addr); 5504 qp->wq[i].virt_hi = PADDR_HI( 5505 (uintptr_t)addr); 5506 qp->wq[i].num_proc = 5507 hba->sli.sli4.wq[i].num_proc; 5508 qp->wq[i].num_busy = 5509 hba->sli.sli4.wq[i].num_busy; 5510 } 5511 for (i = 0; i < FCIO_MAX_RQS; i++) { 5512 addr = hba->sli.sli4.rq[i].addr.virt; 5513 qp->rq[i].qid = 5514 hba->sli.sli4.rq[i].qid; 5515 qp->rq[i].cqid = 5516 hba->sli.sli4.rq[i].cqid; 5517 qp->rq[i].host_index = 5518 hba->sli.sli4.rq[i].host_index; 5519 qp->rq[i].max_index = 5520 hba->sli.sli4.rq[i].max_index; 5521 qp->rq[i].phys = 5522 hba->sli.sli4.rq[i].addr.phys; 5523 qp->rq[i].virt = PADDR_LO( 5524 (uintptr_t)addr); 5525 qp->rq[i].virt_hi = PADDR_HI( 5526 (uintptr_t)addr); 5527 qp->rq[i].num_proc = 5528 hba->sli.sli4.rq[i].num_proc; 5529 } 5530 qp->que_start_timer = 5531 hba->sli.sli4.que_stat_timer; 5532 (void) drv_getparm(LBOLT, &time); 5533 qp->que_current_timer = (uint32_t)time; 5534 qp->intr_count = hba->intr_count; 5535 break; 5536 case 10: /* zero_q_stat */ 5537 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 5538 return (FC_BADCMD); 5539 } 5540 emlxs_sli4_zero_queue_stat(hba); 5541 break; 5542 default: 5543 ret = FC_BADCMD; 5544 break; 5545 } 5546 break; 5547 } 5548 5549 default: 5550 5551 ret = FC_INVALID_REQUEST; 5552 break; 5553 } 5554 5555 break; 5556 5557 } 5558 5559 case FC_PORT_INITIALIZE: 5560 if (!(hba->flag & FC_ONLINE_MODE)) { 5561 return (FC_OFFLINE); 5562 } 5563 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5564 "fca_port_manage: FC_PORT_INITIALIZE"); 5565 break; 5566 5567 case FC_PORT_LOOPBACK: 5568 if (!(hba->flag & FC_ONLINE_MODE)) { 5569 return (FC_OFFLINE); 5570 } 5571 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5572 "fca_port_manage: FC_PORT_LOOPBACK"); 5573 break; 5574 5575 case FC_PORT_BYPASS: 5576 if (!(hba->flag & FC_ONLINE_MODE)) { 5577 return (FC_OFFLINE); 5578 } 5579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5580 "fca_port_manage: FC_PORT_BYPASS"); 5581 ret = FC_INVALID_REQUEST; 5582 break; 5583 5584 case FC_PORT_UNBYPASS: 5585 if (!(hba->flag & FC_ONLINE_MODE)) { 5586 return (FC_OFFLINE); 5587 } 5588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5589 "fca_port_manage: FC_PORT_UNBYPASS"); 5590 ret = FC_INVALID_REQUEST; 5591 break; 5592 5593 case FC_PORT_GET_NODE_ID: 5594 { 5595 fc_rnid_t *rnid; 5596 5597 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5598 "fca_port_manage: FC_PORT_GET_NODE_ID"); 5599 5600 bzero(pm->pm_data_buf, pm->pm_data_len); 5601 5602 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 5603 ret = FC_NOMEM; 5604 break; 5605 } 5606 5607 rnid = (fc_rnid_t *)pm->pm_data_buf; 5608 5609 (void) snprintf((char *)rnid->global_id, 5610 (sizeof (rnid->global_id)-1), 5611 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", 5612 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, 5613 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0], 5614 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3], 5615 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]); 5616 5617 rnid->unit_type = RNID_HBA; 5618 rnid->port_id = port->did; 5619 rnid->ip_version = RNID_IPV4; 5620 5621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5622 "GET_NODE_ID: wwpn: %s", rnid->global_id); 5623 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5624 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 5625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5626 "GET_NODE_ID: port_id: 0x%x", rnid->port_id); 5627 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5628 "GET_NODE_ID: num_attach: %d", rnid->num_attached); 5629 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5630 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 5631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5632 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 5633 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5634 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr); 5635 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5636 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 5637 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5638 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 5639 5640 ret = FC_SUCCESS; 5641 break; 5642 } 5643 5644 case FC_PORT_SET_NODE_ID: 5645 { 5646 fc_rnid_t *rnid; 5647 5648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5649 "fca_port_manage: FC_PORT_SET_NODE_ID"); 5650 5651 if (pm->pm_data_len < sizeof (fc_rnid_t)) { 5652 ret = FC_NOMEM; 5653 break; 5654 } 5655 5656 rnid = (fc_rnid_t *)pm->pm_data_buf; 5657 5658 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5659 "SET_NODE_ID: wwpn: %s", rnid->global_id); 5660 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5661 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type); 5662 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5663 "SET_NODE_ID: port_id: 0x%x", rnid->port_id); 5664 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5665 "SET_NODE_ID: num_attach: %d", rnid->num_attached); 5666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5667 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version); 5668 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5669 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port); 5670 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5671 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr); 5672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5673 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv); 5674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5675 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags); 5676 5677 ret = FC_SUCCESS; 5678 break; 5679 } 5680 5681 #ifdef S11 5682 case FC_PORT_GET_P2P_INFO: 5683 { 5684 fc_fca_p2p_info_t *p2p_info; 5685 NODELIST *ndlp; 5686 5687 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5688 "fca_port_manage: FC_PORT_GET_P2P_INFO"); 5689 5690 bzero(pm->pm_data_buf, pm->pm_data_len); 5691 5692 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) { 5693 ret = FC_NOMEM; 5694 break; 5695 } 5696 5697 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf; 5698 5699 if (hba->state >= FC_LINK_UP) { 5700 if ((hba->topology == TOPOLOGY_PT_PT) && 5701 (hba->flag & FC_PT_TO_PT)) { 5702 p2p_info->fca_d_id = port->did; 5703 p2p_info->d_id = port->rdid; 5704 5705 ndlp = emlxs_node_find_did(port, 5706 port->rdid, 1); 5707 5708 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5709 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, " 5710 "d_id: 0x%x, ndlp: 0x%p", port->did, 5711 port->rdid, ndlp); 5712 if (ndlp) { 5713 bcopy(&ndlp->nlp_portname, 5714 (caddr_t)&p2p_info->pwwn, 5715 sizeof (la_wwn_t)); 5716 bcopy(&ndlp->nlp_nodename, 5717 (caddr_t)&p2p_info->nwwn, 5718 sizeof (la_wwn_t)); 5719 5720 ret = FC_SUCCESS; 5721 break; 5722 5723 } 5724 } 5725 } 5726 5727 ret = FC_FAILURE; 5728 break; 5729 } 5730 #endif /* S11 */ 5731 5732 default: 5733 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5734 "fca_port_manage: code=%x", pm->pm_cmd_code); 5735 ret = FC_INVALID_REQUEST; 5736 break; 5737 5738 } 5739 5740 return (ret); 5741 5742 } /* emlxs_fca_port_manage() */ 5743 5744 5745 /*ARGSUSED*/ 5746 static uint32_t 5747 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args, 5748 uint32_t *arg) 5749 { 5750 uint32_t rval = 0; 5751 emlxs_port_t *port = &PPORT; 5752 5753 switch (test_code) { 5754 #ifdef TEST_SUPPORT 5755 case 1: /* SCSI underrun */ 5756 { 5757 hba->underrun_counter = (args)? arg[0]:1; 5758 break; 5759 } 5760 #endif /* TEST_SUPPORT */ 5761 5762 default: 5763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5764 "test: Unsupported test code. (0x%x)", test_code); 5765 rval = FC_INVALID_REQUEST; 5766 } 5767 5768 return (rval); 5769 5770 } /* emlxs_test() */ 5771 5772 5773 /* 5774 * Given the device number, return the devinfo pointer or the ddiinst number. 5775 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even 5776 * before attach. 5777 * 5778 * Translate "dev_t" to a pointer to the associated "dev_info_t". 5779 */ 5780 /*ARGSUSED*/ 5781 static int 5782 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 5783 { 5784 emlxs_hba_t *hba; 5785 int32_t ddiinst; 5786 5787 ddiinst = getminor((dev_t)arg); 5788 5789 switch (infocmd) { 5790 case DDI_INFO_DEVT2DEVINFO: 5791 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5792 if (hba) 5793 *result = hba->dip; 5794 else 5795 *result = NULL; 5796 break; 5797 5798 case DDI_INFO_DEVT2INSTANCE: 5799 *result = (void *)((unsigned long)ddiinst); 5800 break; 5801 5802 default: 5803 return (DDI_FAILURE); 5804 } 5805 5806 return (DDI_SUCCESS); 5807 5808 } /* emlxs_info() */ 5809 5810 5811 static int32_t 5812 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level) 5813 { 5814 emlxs_hba_t *hba; 5815 emlxs_port_t *port; 5816 int32_t ddiinst; 5817 int rval = DDI_SUCCESS; 5818 5819 ddiinst = ddi_get_instance(dip); 5820 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5821 port = &PPORT; 5822 5823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 5824 "fca_power: comp=%x level=%x", comp, level); 5825 5826 if (hba == NULL || comp != EMLXS_PM_ADAPTER) { 5827 return (DDI_FAILURE); 5828 } 5829 5830 mutex_enter(&EMLXS_PM_LOCK); 5831 5832 /* If we are already at the proper level then return success */ 5833 if (hba->pm_level == level) { 5834 mutex_exit(&EMLXS_PM_LOCK); 5835 return (DDI_SUCCESS); 5836 } 5837 5838 switch (level) { 5839 case EMLXS_PM_ADAPTER_UP: 5840 5841 /* 5842 * If we are already in emlxs_attach, 5843 * let emlxs_hba_attach take care of things 5844 */ 5845 if (hba->pm_state & EMLXS_PM_IN_ATTACH) { 5846 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5847 break; 5848 } 5849 5850 /* Check if adapter is suspended */ 5851 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5852 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5853 5854 /* Try to resume the port */ 5855 rval = emlxs_hba_resume(dip); 5856 5857 if (rval != DDI_SUCCESS) { 5858 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5859 } 5860 break; 5861 } 5862 5863 /* Set adapter up */ 5864 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5865 break; 5866 5867 case EMLXS_PM_ADAPTER_DOWN: 5868 5869 5870 /* 5871 * If we are already in emlxs_detach, 5872 * let emlxs_hba_detach take care of things 5873 */ 5874 if (hba->pm_state & EMLXS_PM_IN_DETACH) { 5875 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5876 break; 5877 } 5878 5879 /* Check if adapter is not suspended */ 5880 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 5881 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5882 5883 /* Try to suspend the port */ 5884 rval = emlxs_hba_suspend(dip); 5885 5886 if (rval != DDI_SUCCESS) { 5887 hba->pm_level = EMLXS_PM_ADAPTER_UP; 5888 } 5889 5890 break; 5891 } 5892 5893 /* Set adapter down */ 5894 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 5895 break; 5896 5897 default: 5898 rval = DDI_FAILURE; 5899 break; 5900 5901 } 5902 5903 mutex_exit(&EMLXS_PM_LOCK); 5904 5905 return (rval); 5906 5907 } /* emlxs_power() */ 5908 5909 5910 #ifdef EMLXS_I386 5911 #ifdef S11 5912 /* 5913 * quiesce(9E) entry point. 5914 * 5915 * This function is called when the system is single-thread at hight PIL 5916 * with preemption disabled. Therefore, this function must not be blocked. 5917 * 5918 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 5919 * DDI_FAILURE indicates an error condition and should almost never happen. 5920 */ 5921 static int 5922 emlxs_quiesce(dev_info_t *dip) 5923 { 5924 emlxs_hba_t *hba; 5925 emlxs_port_t *port; 5926 int32_t ddiinst; 5927 int rval = DDI_SUCCESS; 5928 5929 ddiinst = ddi_get_instance(dip); 5930 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5931 port = &PPORT; 5932 5933 if (hba == NULL || port == NULL) { 5934 return (DDI_FAILURE); 5935 } 5936 5937 /* The fourth arg 1 indicates the call is from quiesce */ 5938 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) { 5939 return (rval); 5940 } else { 5941 return (DDI_FAILURE); 5942 } 5943 5944 } /* emlxs_quiesce */ 5945 #endif /* S11 */ 5946 #endif /* EMLXS_I386 */ 5947 5948 5949 static int 5950 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p) 5951 { 5952 emlxs_hba_t *hba; 5953 emlxs_port_t *port; 5954 int ddiinst; 5955 5956 ddiinst = getminor(*dev_p); 5957 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 5958 5959 if (hba == NULL) { 5960 return (ENXIO); 5961 } 5962 5963 port = &PPORT; 5964 5965 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 5966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 5967 "open failed: Driver suspended."); 5968 return (ENXIO); 5969 } 5970 5971 if (otype != OTYP_CHR) { 5972 return (EINVAL); 5973 } 5974 5975 if (drv_priv(cred_p)) { 5976 return (EPERM); 5977 } 5978 5979 mutex_enter(&EMLXS_IOCTL_LOCK); 5980 5981 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) { 5982 mutex_exit(&EMLXS_IOCTL_LOCK); 5983 return (EBUSY); 5984 } 5985 5986 if (flag & FEXCL) { 5987 if (hba->ioctl_flags & EMLXS_OPEN) { 5988 mutex_exit(&EMLXS_IOCTL_LOCK); 5989 return (EBUSY); 5990 } 5991 5992 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE; 5993 } 5994 5995 hba->ioctl_flags |= EMLXS_OPEN; 5996 5997 mutex_exit(&EMLXS_IOCTL_LOCK); 5998 5999 return (0); 6000 6001 } /* emlxs_open() */ 6002 6003 6004 /*ARGSUSED*/ 6005 static int 6006 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p) 6007 { 6008 emlxs_hba_t *hba; 6009 int ddiinst; 6010 6011 ddiinst = getminor(dev); 6012 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6013 6014 if (hba == NULL) { 6015 return (ENXIO); 6016 } 6017 6018 if (otype != OTYP_CHR) { 6019 return (EINVAL); 6020 } 6021 6022 mutex_enter(&EMLXS_IOCTL_LOCK); 6023 6024 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 6025 mutex_exit(&EMLXS_IOCTL_LOCK); 6026 return (ENODEV); 6027 } 6028 6029 hba->ioctl_flags &= ~EMLXS_OPEN; 6030 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE; 6031 6032 mutex_exit(&EMLXS_IOCTL_LOCK); 6033 6034 return (0); 6035 6036 } /* emlxs_close() */ 6037 6038 6039 /*ARGSUSED*/ 6040 static int 6041 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode, 6042 cred_t *cred_p, int32_t *rval_p) 6043 { 6044 emlxs_hba_t *hba; 6045 emlxs_port_t *port; 6046 int rval = 0; /* return code */ 6047 int ddiinst; 6048 6049 ddiinst = getminor(dev); 6050 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6051 6052 if (hba == NULL) { 6053 return (ENXIO); 6054 } 6055 6056 port = &PPORT; 6057 6058 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 6059 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 6060 "ioctl failed: Driver suspended."); 6061 6062 return (ENXIO); 6063 } 6064 6065 mutex_enter(&EMLXS_IOCTL_LOCK); 6066 if (!(hba->ioctl_flags & EMLXS_OPEN)) { 6067 mutex_exit(&EMLXS_IOCTL_LOCK); 6068 return (ENXIO); 6069 } 6070 mutex_exit(&EMLXS_IOCTL_LOCK); 6071 6072 #ifdef IDLE_TIMER 6073 emlxs_pm_busy_component(hba); 6074 #endif /* IDLE_TIMER */ 6075 6076 switch (cmd) { 6077 case EMLXS_DFC_COMMAND: 6078 rval = emlxs_dfc_manage(hba, (void *)arg, mode); 6079 break; 6080 6081 default: 6082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg, 6083 "ioctl: Invalid command received. cmd=%x", cmd); 6084 rval = EINVAL; 6085 } 6086 6087 done: 6088 return (rval); 6089 6090 } /* emlxs_ioctl() */ 6091 6092 6093 6094 /* 6095 * 6096 * Device Driver Common Routines 6097 * 6098 */ 6099 6100 /* EMLXS_PM_LOCK must be held for this call */ 6101 static int 6102 emlxs_hba_resume(dev_info_t *dip) 6103 { 6104 emlxs_hba_t *hba; 6105 emlxs_port_t *port; 6106 int ddiinst; 6107 6108 ddiinst = ddi_get_instance(dip); 6109 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6110 port = &PPORT; 6111 6112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL); 6113 6114 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) { 6115 return (DDI_SUCCESS); 6116 } 6117 6118 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 6119 6120 /* Re-enable the physical port on this HBA */ 6121 port->flag |= EMLXS_PORT_ENABLED; 6122 6123 /* Take the adapter online */ 6124 if (emlxs_power_up(hba)) { 6125 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg, 6126 "Unable to take adapter online."); 6127 6128 hba->pm_state |= EMLXS_PM_SUSPENDED; 6129 6130 return (DDI_FAILURE); 6131 } 6132 6133 return (DDI_SUCCESS); 6134 6135 } /* emlxs_hba_resume() */ 6136 6137 6138 /* EMLXS_PM_LOCK must be held for this call */ 6139 static int 6140 emlxs_hba_suspend(dev_info_t *dip) 6141 { 6142 emlxs_hba_t *hba; 6143 emlxs_port_t *port; 6144 int ddiinst; 6145 6146 ddiinst = ddi_get_instance(dip); 6147 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6148 port = &PPORT; 6149 6150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL); 6151 6152 if (hba->pm_state & EMLXS_PM_SUSPENDED) { 6153 return (DDI_SUCCESS); 6154 } 6155 6156 hba->pm_state |= EMLXS_PM_SUSPENDED; 6157 6158 /* Take the adapter offline */ 6159 if (emlxs_power_down(hba)) { 6160 hba->pm_state &= ~EMLXS_PM_SUSPENDED; 6161 6162 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg, 6163 "Unable to take adapter offline."); 6164 6165 return (DDI_FAILURE); 6166 } 6167 6168 return (DDI_SUCCESS); 6169 6170 } /* emlxs_hba_suspend() */ 6171 6172 6173 6174 static void 6175 emlxs_lock_init(emlxs_hba_t *hba) 6176 { 6177 emlxs_port_t *port = &PPORT; 6178 uint32_t i; 6179 6180 /* Initialize the power management */ 6181 mutex_init(&EMLXS_PM_LOCK, NULL, MUTEX_DRIVER, 6182 DDI_INTR_PRI(hba->intr_arg)); 6183 6184 mutex_init(&EMLXS_TIMER_LOCK, NULL, MUTEX_DRIVER, 6185 DDI_INTR_PRI(hba->intr_arg)); 6186 6187 cv_init(&hba->timer_lock_cv, NULL, CV_DRIVER, NULL); 6188 6189 mutex_init(&EMLXS_PORT_LOCK, NULL, MUTEX_DRIVER, 6190 DDI_INTR_PRI(hba->intr_arg)); 6191 6192 mutex_init(&EMLXS_MBOX_LOCK, NULL, MUTEX_DRIVER, 6193 DDI_INTR_PRI(hba->intr_arg)); 6194 6195 cv_init(&EMLXS_MBOX_CV, NULL, CV_DRIVER, NULL); 6196 6197 mutex_init(&EMLXS_LINKUP_LOCK, NULL, MUTEX_DRIVER, 6198 DDI_INTR_PRI(hba->intr_arg)); 6199 6200 cv_init(&EMLXS_LINKUP_CV, NULL, CV_DRIVER, NULL); 6201 6202 mutex_init(&EMLXS_TX_CHANNEL_LOCK, NULL, MUTEX_DRIVER, 6203 DDI_INTR_PRI(hba->intr_arg)); 6204 6205 for (i = 0; i < MAX_RINGS; i++) { 6206 mutex_init(&EMLXS_CMD_RING_LOCK(i), NULL, MUTEX_DRIVER, 6207 DDI_INTR_PRI(hba->intr_arg)); 6208 } 6209 6210 6211 for (i = 0; i < EMLXS_MAX_WQS; i++) { 6212 mutex_init(&EMLXS_QUE_LOCK(i), NULL, MUTEX_DRIVER, 6213 DDI_INTR_PRI(hba->intr_arg)); 6214 } 6215 6216 mutex_init(&EMLXS_MSIID_LOCK, NULL, MUTEX_DRIVER, 6217 DDI_INTR_PRI(hba->intr_arg)); 6218 6219 mutex_init(&EMLXS_FCTAB_LOCK, NULL, MUTEX_DRIVER, 6220 DDI_INTR_PRI(hba->intr_arg)); 6221 6222 mutex_init(&EMLXS_MEMGET_LOCK, NULL, MUTEX_DRIVER, 6223 DDI_INTR_PRI(hba->intr_arg)); 6224 6225 mutex_init(&EMLXS_MEMPUT_LOCK, NULL, MUTEX_DRIVER, 6226 DDI_INTR_PRI(hba->intr_arg)); 6227 6228 mutex_init(&EMLXS_IOCTL_LOCK, NULL, MUTEX_DRIVER, 6229 DDI_INTR_PRI(hba->intr_arg)); 6230 6231 #ifdef DUMP_SUPPORT 6232 mutex_init(&EMLXS_DUMP_LOCK, NULL, MUTEX_DRIVER, 6233 DDI_INTR_PRI(hba->intr_arg)); 6234 #endif /* DUMP_SUPPORT */ 6235 6236 mutex_init(&EMLXS_SPAWN_LOCK, NULL, MUTEX_DRIVER, 6237 DDI_INTR_PRI(hba->intr_arg)); 6238 6239 /* Create per port locks */ 6240 for (i = 0; i < MAX_VPORTS; i++) { 6241 port = &VPORT(i); 6242 6243 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL); 6244 6245 if (i == 0) { 6246 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER, 6247 DDI_INTR_PRI(hba->intr_arg)); 6248 6249 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL); 6250 6251 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER, 6252 DDI_INTR_PRI(hba->intr_arg)); 6253 } else { 6254 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER, 6255 DDI_INTR_PRI(hba->intr_arg)); 6256 6257 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL); 6258 6259 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER, 6260 DDI_INTR_PRI(hba->intr_arg)); 6261 } 6262 } 6263 6264 return; 6265 6266 } /* emlxs_lock_init() */ 6267 6268 6269 6270 static void 6271 emlxs_lock_destroy(emlxs_hba_t *hba) 6272 { 6273 emlxs_port_t *port = &PPORT; 6274 uint32_t i; 6275 6276 mutex_destroy(&EMLXS_TIMER_LOCK); 6277 cv_destroy(&hba->timer_lock_cv); 6278 6279 mutex_destroy(&EMLXS_PORT_LOCK); 6280 6281 cv_destroy(&EMLXS_MBOX_CV); 6282 cv_destroy(&EMLXS_LINKUP_CV); 6283 6284 mutex_destroy(&EMLXS_LINKUP_LOCK); 6285 mutex_destroy(&EMLXS_MBOX_LOCK); 6286 6287 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK); 6288 6289 for (i = 0; i < MAX_RINGS; i++) { 6290 mutex_destroy(&EMLXS_CMD_RING_LOCK(i)); 6291 } 6292 6293 for (i = 0; i < EMLXS_MAX_WQS; i++) { 6294 mutex_destroy(&EMLXS_QUE_LOCK(i)); 6295 } 6296 6297 mutex_destroy(&EMLXS_MSIID_LOCK); 6298 6299 mutex_destroy(&EMLXS_FCTAB_LOCK); 6300 mutex_destroy(&EMLXS_MEMGET_LOCK); 6301 mutex_destroy(&EMLXS_MEMPUT_LOCK); 6302 mutex_destroy(&EMLXS_IOCTL_LOCK); 6303 mutex_destroy(&EMLXS_SPAWN_LOCK); 6304 mutex_destroy(&EMLXS_PM_LOCK); 6305 6306 #ifdef DUMP_SUPPORT 6307 mutex_destroy(&EMLXS_DUMP_LOCK); 6308 #endif /* DUMP_SUPPORT */ 6309 6310 /* Destroy per port locks */ 6311 for (i = 0; i < MAX_VPORTS; i++) { 6312 port = &VPORT(i); 6313 rw_destroy(&port->node_rwlock); 6314 mutex_destroy(&EMLXS_PKT_LOCK); 6315 cv_destroy(&EMLXS_PKT_CV); 6316 mutex_destroy(&EMLXS_UB_LOCK); 6317 } 6318 6319 return; 6320 6321 } /* emlxs_lock_destroy() */ 6322 6323 6324 /* init_flag values */ 6325 #define ATTACH_SOFT_STATE 0x00000001 6326 #define ATTACH_FCA_TRAN 0x00000002 6327 #define ATTACH_HBA 0x00000004 6328 #define ATTACH_LOG 0x00000008 6329 #define ATTACH_MAP_BUS 0x00000010 6330 #define ATTACH_INTR_INIT 0x00000020 6331 #define ATTACH_PROP 0x00000040 6332 #define ATTACH_LOCK 0x00000080 6333 #define ATTACH_THREAD 0x00000100 6334 #define ATTACH_INTR_ADD 0x00000200 6335 #define ATTACH_ONLINE 0x00000400 6336 #define ATTACH_NODE 0x00000800 6337 #define ATTACH_FCT 0x00001000 6338 #define ATTACH_FCA 0x00002000 6339 #define ATTACH_KSTAT 0x00004000 6340 #define ATTACH_DHCHAP 0x00008000 6341 #define ATTACH_FM 0x00010000 6342 #define ATTACH_MAP_SLI 0x00020000 6343 #define ATTACH_SPAWN 0x00040000 6344 #define ATTACH_EVENTS 0x00080000 6345 6346 static void 6347 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed) 6348 { 6349 emlxs_hba_t *hba = NULL; 6350 int ddiinst; 6351 6352 ddiinst = ddi_get_instance(dip); 6353 6354 if (init_flag & ATTACH_HBA) { 6355 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 6356 6357 if (init_flag & ATTACH_SPAWN) { 6358 emlxs_thread_spawn_destroy(hba); 6359 } 6360 6361 if (init_flag & ATTACH_EVENTS) { 6362 (void) emlxs_event_queue_destroy(hba); 6363 } 6364 6365 if (init_flag & ATTACH_ONLINE) { 6366 (void) emlxs_offline(hba, 1); 6367 } 6368 6369 if (init_flag & ATTACH_INTR_ADD) { 6370 (void) EMLXS_INTR_REMOVE(hba); 6371 } 6372 #ifdef SFCT_SUPPORT 6373 if (init_flag & ATTACH_FCT) { 6374 emlxs_fct_detach(hba); 6375 emlxs_fct_modclose(); 6376 } 6377 #endif /* SFCT_SUPPORT */ 6378 6379 #ifdef DHCHAP_SUPPORT 6380 if (init_flag & ATTACH_DHCHAP) { 6381 emlxs_dhc_detach(hba); 6382 } 6383 #endif /* DHCHAP_SUPPORT */ 6384 6385 if (init_flag & ATTACH_KSTAT) { 6386 kstat_delete(hba->kstat); 6387 } 6388 6389 if (init_flag & ATTACH_FCA) { 6390 emlxs_fca_detach(hba); 6391 } 6392 6393 if (init_flag & ATTACH_NODE) { 6394 (void) ddi_remove_minor_node(hba->dip, "devctl"); 6395 } 6396 6397 if (init_flag & ATTACH_THREAD) { 6398 emlxs_thread_destroy(&hba->iodone_thread); 6399 } 6400 6401 if (init_flag & ATTACH_PROP) { 6402 (void) ddi_prop_remove_all(hba->dip); 6403 } 6404 6405 if (init_flag & ATTACH_LOCK) { 6406 emlxs_lock_destroy(hba); 6407 } 6408 6409 if (init_flag & ATTACH_INTR_INIT) { 6410 (void) EMLXS_INTR_UNINIT(hba); 6411 } 6412 6413 if (init_flag & ATTACH_MAP_BUS) { 6414 emlxs_unmap_bus(hba); 6415 } 6416 6417 if (init_flag & ATTACH_MAP_SLI) { 6418 EMLXS_SLI_UNMAP_HDW(hba); 6419 } 6420 6421 #ifdef FMA_SUPPORT 6422 if (init_flag & ATTACH_FM) { 6423 emlxs_fm_fini(hba); 6424 } 6425 #endif /* FMA_SUPPORT */ 6426 6427 if (init_flag & ATTACH_LOG) { 6428 emlxs_msg_log_destroy(hba); 6429 } 6430 6431 if (init_flag & ATTACH_FCA_TRAN) { 6432 (void) ddi_set_driver_private(hba->dip, NULL); 6433 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t)); 6434 hba->fca_tran = NULL; 6435 } 6436 6437 if (init_flag & ATTACH_HBA) { 6438 emlxs_device.log[hba->emlxinst] = 0; 6439 emlxs_device.hba[hba->emlxinst] = 6440 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0)); 6441 #ifdef DUMP_SUPPORT 6442 emlxs_device.dump_txtfile[hba->emlxinst] = 0; 6443 emlxs_device.dump_dmpfile[hba->emlxinst] = 0; 6444 emlxs_device.dump_ceefile[hba->emlxinst] = 0; 6445 #endif /* DUMP_SUPPORT */ 6446 6447 } 6448 } 6449 6450 if (init_flag & ATTACH_SOFT_STATE) { 6451 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst); 6452 } 6453 6454 return; 6455 6456 } /* emlxs_driver_remove() */ 6457 6458 6459 /* This determines which ports will be initiator mode */ 6460 static uint32_t 6461 emlxs_fca_init(emlxs_hba_t *hba) 6462 { 6463 emlxs_port_t *port = &PPORT; 6464 6465 /* Check if SFS present */ 6466 if (((void *)MODSYM(fc_fca_init) == NULL) || 6467 ((void *)MODSYM(fc_fca_attach) == NULL)) { 6468 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6469 "SFS not present."); 6470 return (1); 6471 } 6472 6473 /* Check if our SFS driver interface matches the current SFS stack */ 6474 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) { 6475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6476 "SFS/FCA version mismatch. FCA=0x%x", 6477 hba->fca_tran->fca_version); 6478 return (1); 6479 } 6480 6481 return (0); 6482 6483 } /* emlxs_fca_init() */ 6484 6485 6486 /* This determines which ports will be initiator or target mode */ 6487 static void 6488 emlxs_mode_init(emlxs_hba_t *hba) 6489 { 6490 emlxs_port_t *port = &PPORT; 6491 emlxs_config_t *cfg = &CFG; 6492 emlxs_port_t *vport; 6493 uint32_t i; 6494 uint32_t mode_mask; 6495 6496 /* Initialize mode masks */ 6497 (void) emlxs_mode_init_masks(hba); 6498 6499 if (!(port->mode_mask & MODE_INITIATOR)) { 6500 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6501 "Initiator mode not enabled."); 6502 6503 #ifdef SFCT_SUPPORT 6504 /* Disable dynamic target mode */ 6505 cfg[CFG_DTM_ENABLE].current = 0; 6506 #endif /* SFCT_SUPPORT */ 6507 6508 goto done1; 6509 } 6510 6511 /* Try to initialize fca interface */ 6512 if (emlxs_fca_init(hba) != 0) { 6513 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6514 "Initiator mode disabled."); 6515 6516 /* Disable initiator mode */ 6517 port->mode_mask &= ~MODE_INITIATOR; 6518 6519 #ifdef SFCT_SUPPORT 6520 /* Disable dynamic target mode */ 6521 cfg[CFG_DTM_ENABLE].current = 0; 6522 #endif /* SFCT_SUPPORT */ 6523 6524 goto done1; 6525 } 6526 6527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6528 "Initiator mode enabled."); 6529 6530 done1: 6531 6532 #ifdef SFCT_SUPPORT 6533 if (!(port->mode_mask & MODE_TARGET)) { 6534 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6535 "Target mode not enabled."); 6536 6537 /* Disable target modes */ 6538 cfg[CFG_DTM_ENABLE].current = 0; 6539 cfg[CFG_TARGET_MODE].current = 0; 6540 6541 goto done2; 6542 } 6543 6544 /* Try to open the COMSTAR module */ 6545 if (emlxs_fct_modopen() != 0) { 6546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6547 "Target mode disabled."); 6548 6549 /* Disable target modes */ 6550 port->mode_mask &= ~MODE_TARGET; 6551 cfg[CFG_DTM_ENABLE].current = 0; 6552 cfg[CFG_TARGET_MODE].current = 0; 6553 6554 goto done2; 6555 } 6556 6557 /* Try to initialize fct interface */ 6558 if (emlxs_fct_init(hba) != 0) { 6559 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6560 "Target mode disabled."); 6561 6562 /* Disable target modes */ 6563 port->mode_mask &= ~MODE_TARGET; 6564 cfg[CFG_DTM_ENABLE].current = 0; 6565 cfg[CFG_TARGET_MODE].current = 0; 6566 6567 goto done2; 6568 } 6569 6570 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6571 "Target mode enabled."); 6572 6573 done2: 6574 /* Adjust target mode parameter flags */ 6575 if (cfg[CFG_DTM_ENABLE].current) { 6576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6577 "Dynamic target mode enabled."); 6578 6579 cfg[CFG_TARGET_MODE].flags |= PARM_DYNAMIC; 6580 } else { 6581 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6582 "Dynamic target mode disabled."); 6583 6584 cfg[CFG_TARGET_MODE].flags &= ~PARM_DYNAMIC; 6585 } 6586 #endif /* SFCT_SUPPORT */ 6587 6588 /* Now set port flags */ 6589 mutex_enter(&EMLXS_PORT_LOCK); 6590 6591 /* Set flags for physical port */ 6592 if (port->mode_mask & MODE_INITIATOR) { 6593 port->flag |= EMLXS_INI_ENABLED; 6594 } else { 6595 port->flag &= ~EMLXS_INI_ENABLED; 6596 } 6597 6598 if (port->mode_mask & MODE_TARGET) { 6599 port->flag |= EMLXS_TGT_ENABLED; 6600 } else { 6601 port->flag &= ~EMLXS_TGT_ENABLED; 6602 } 6603 6604 for (i = 1; i < MAX_VPORTS; i++) { 6605 vport = &VPORT(i); 6606 6607 /* Physical port mask has only allowable bits */ 6608 mode_mask = vport->mode_mask & port->mode_mask; 6609 6610 /* Set flags for physical port */ 6611 if (mode_mask & MODE_INITIATOR) { 6612 vport->flag |= EMLXS_INI_ENABLED; 6613 } else { 6614 vport->flag &= ~EMLXS_INI_ENABLED; 6615 } 6616 6617 if (mode_mask & MODE_TARGET) { 6618 vport->flag |= EMLXS_TGT_ENABLED; 6619 } else { 6620 vport->flag &= ~EMLXS_TGT_ENABLED; 6621 } 6622 } 6623 6624 /* Set initial driver mode */ 6625 emlxs_mode_set(hba); 6626 6627 mutex_exit(&EMLXS_PORT_LOCK); 6628 6629 /* Recheck possible mode dependent parameters */ 6630 /* in case conditions have changed. */ 6631 if (port->mode != MODE_NONE) { 6632 for (i = 0; i < NUM_CFG_PARAM; i++) { 6633 cfg = &hba->config[i]; 6634 cfg->current = emlxs_check_parm(hba, i, cfg->current); 6635 } 6636 } 6637 6638 return; 6639 6640 } /* emlxs_mode_init() */ 6641 6642 6643 /* This must be called while holding the EMLXS_PORT_LOCK */ 6644 extern void 6645 emlxs_mode_set(emlxs_hba_t *hba) 6646 { 6647 emlxs_port_t *port = &PPORT; 6648 #ifdef SFCT_SUPPORT 6649 emlxs_config_t *cfg = &CFG; 6650 #endif /* SFCT_SUPPORT */ 6651 emlxs_port_t *vport; 6652 uint32_t i; 6653 uint32_t cfg_tgt_mode = 0; 6654 6655 /* mutex_enter(&EMLXS_PORT_LOCK); */ 6656 6657 #ifdef SFCT_SUPPORT 6658 cfg_tgt_mode = cfg[CFG_TARGET_MODE].current; 6659 #endif /* SFCT_SUPPORT */ 6660 6661 /* Initiator mode requested */ 6662 if (!cfg_tgt_mode) { 6663 for (i = 0; i < MAX_VPORTS; i++) { 6664 vport = &VPORT(i); 6665 vport->mode = (vport->flag & EMLXS_INI_ENABLED)? 6666 MODE_INITIATOR:MODE_NONE; 6667 } 6668 #ifdef SFCT_SUPPORT 6669 /* Target mode requested */ 6670 } else { 6671 for (i = 0; i < MAX_VPORTS; i++) { 6672 vport = &VPORT(i); 6673 vport->mode = (vport->flag & EMLXS_TGT_ENABLED)? 6674 MODE_TARGET:MODE_NONE; 6675 } 6676 #endif /* SFCT_SUPPORT */ 6677 } 6678 6679 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 6680 "MODE: %s", emlxs_mode_xlate(port->mode)); 6681 6682 /* mutex_exit(&EMLXS_PORT_LOCK); */ 6683 6684 return; 6685 6686 } /* emlxs_mode_set() */ 6687 6688 6689 static void 6690 emlxs_mode_init_masks(emlxs_hba_t *hba) 6691 { 6692 emlxs_port_t *port = &PPORT; 6693 emlxs_port_t *vport; 6694 uint32_t i; 6695 6696 #ifdef SFCT_SUPPORT 6697 emlxs_config_t *cfg = &CFG; 6698 uint32_t vport_mode_mask; 6699 uint32_t cfg_vport_mode_mask; 6700 uint32_t mode_mask; 6701 char string[256]; 6702 6703 port->mode_mask = 0; 6704 6705 if (!cfg[CFG_TARGET_MODE].current || 6706 cfg[CFG_DTM_ENABLE].current) { 6707 port->mode_mask |= MODE_INITIATOR; 6708 } 6709 6710 if (cfg[CFG_TARGET_MODE].current || 6711 cfg[CFG_DTM_ENABLE].current) { 6712 port->mode_mask |= MODE_TARGET; 6713 } 6714 6715 /* Physical port mask has only allowable bits */ 6716 vport_mode_mask = port->mode_mask; 6717 cfg_vport_mode_mask = cfg[CFG_VPORT_MODE_MASK].current; 6718 6719 /* Check dynamic target mode value for virtual ports */ 6720 if (cfg[CFG_DTM_ENABLE].current == 0) { 6721 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6722 "%s = 0: Virtual target ports are not supported.", 6723 cfg[CFG_DTM_ENABLE].string); 6724 6725 vport_mode_mask &= ~MODE_TARGET; 6726 } 6727 6728 cfg_vport_mode_mask &= vport_mode_mask; 6729 6730 if (cfg[CFG_VPORT_MODE_MASK].current != cfg_vport_mode_mask) { 6731 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6732 "%s: Changing 0x%x --> 0x%x", 6733 cfg[CFG_VPORT_MODE_MASK].string, 6734 cfg[CFG_VPORT_MODE_MASK].current, 6735 cfg_vport_mode_mask); 6736 6737 cfg[CFG_VPORT_MODE_MASK].current = cfg_vport_mode_mask; 6738 } 6739 6740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6741 "pport-mode-mask: %s", emlxs_mode_xlate(port->mode_mask)); 6742 6743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6744 "vport-mode-mask: %s", emlxs_mode_xlate(cfg_vport_mode_mask)); 6745 6746 for (i = 1; i < MAX_VPORTS; i++) { 6747 vport = &VPORT(i); 6748 6749 (void) snprintf(string, sizeof (string), 6750 "%s%d-vport%d-mode-mask", DRIVER_NAME, hba->ddiinst, i); 6751 6752 mode_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 6753 (void *)hba->dip, DDI_PROP_DONTPASS, string, 6754 cfg_vport_mode_mask); 6755 6756 vport->mode_mask = mode_mask & vport_mode_mask; 6757 6758 if (vport->mode_mask != cfg_vport_mode_mask) { 6759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 6760 "vport%d-mode-mask: %s", 6761 i, emlxs_mode_xlate(vport->mode_mask)); 6762 } 6763 } 6764 #else 6765 port->mode_mask = MODE_INITIATOR; 6766 for (i = 1; i < MAX_VPORTS; i++) { 6767 vport = &VPORT(i); 6768 vport->mode_mask = MODE_INITIATOR; 6769 } 6770 #endif /* SFCT_SUPPORT */ 6771 6772 return; 6773 6774 } /* emlxs_mode_init_masks() */ 6775 6776 6777 static void 6778 emlxs_fca_attach(emlxs_hba_t *hba) 6779 { 6780 emlxs_port_t *port; 6781 uint32_t i; 6782 6783 /* Update our transport structure */ 6784 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg; 6785 hba->fca_tran->fca_cmd_max = hba->io_throttle; 6786 6787 for (i = 0; i < MAX_VPORTS; i++) { 6788 port = &VPORT(i); 6789 port->ub_count = EMLXS_UB_TOKEN_OFFSET; 6790 port->ub_pool = NULL; 6791 } 6792 6793 #if (EMLXS_MODREV >= EMLXS_MODREV5) 6794 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn, 6795 sizeof (NAME_TYPE)); 6796 #endif /* >= EMLXS_MODREV5 */ 6797 6798 return; 6799 6800 } /* emlxs_fca_attach() */ 6801 6802 6803 static void 6804 emlxs_fca_detach(emlxs_hba_t *hba) 6805 { 6806 emlxs_port_t *port = &PPORT; 6807 uint32_t i; 6808 emlxs_port_t *vport; 6809 6810 if (!(port->flag & EMLXS_INI_ENABLED)) { 6811 return; 6812 } 6813 6814 if ((void *)MODSYM(fc_fca_detach) != NULL) { 6815 MODSYM(fc_fca_detach)(hba->dip); 6816 } 6817 6818 /* Disable INI mode for all ports */ 6819 for (i = 0; i < MAX_VPORTS; i++) { 6820 vport = &VPORT(i); 6821 vport->flag &= ~EMLXS_INI_ENABLED; 6822 } 6823 6824 return; 6825 6826 } /* emlxs_fca_detach() */ 6827 6828 6829 static void 6830 emlxs_drv_banner(emlxs_hba_t *hba) 6831 { 6832 emlxs_port_t *port = &PPORT; 6833 uint32_t i; 6834 char sli_mode[16]; 6835 char msi_mode[16]; 6836 char npiv_mode[16]; 6837 emlxs_vpd_t *vpd = &VPD; 6838 uint8_t *wwpn; 6839 uint8_t *wwnn; 6840 uint32_t fw_show = 0; 6841 6842 /* Display firmware library one time for all driver instances */ 6843 mutex_enter(&emlxs_device.lock); 6844 if (!(emlxs_instance_flag & EMLXS_FW_SHOW)) { 6845 emlxs_instance_flag |= EMLXS_FW_SHOW; 6846 fw_show = 1; 6847 } 6848 mutex_exit(&emlxs_device.lock); 6849 6850 if (fw_show) { 6851 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s", 6852 emlxs_copyright); 6853 emlxs_fw_show(hba); 6854 } 6855 6856 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label, 6857 emlxs_revision); 6858 6859 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6860 "%s Ven_id:%x Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model, 6861 hba->model_info.vendor_id, hba->model_info.device_id, 6862 hba->model_info.ssdid, hba->model_info.id); 6863 6864 #ifdef EMLXS_I386 6865 6866 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6867 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label, 6868 vpd->boot_version); 6869 6870 #else /* EMLXS_SPARC */ 6871 6872 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6873 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version, 6874 vpd->fw_label, vpd->boot_version, vpd->fcode_version); 6875 6876 #endif /* EMLXS_I386 */ 6877 6878 if (hba->sli_mode > 3) { 6879 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d(%s)", 6880 hba->sli_mode, 6881 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP")); 6882 } else { 6883 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d", 6884 hba->sli_mode); 6885 } 6886 6887 (void) strlcpy(msi_mode, " INTX:1", sizeof (msi_mode)); 6888 6889 #ifdef MSI_SUPPORT 6890 if (hba->intr_flags & EMLXS_MSI_ENABLED) { 6891 switch (hba->intr_type) { 6892 case DDI_INTR_TYPE_FIXED: 6893 (void) strlcpy(msi_mode, " MSI:0", sizeof (msi_mode)); 6894 break; 6895 6896 case DDI_INTR_TYPE_MSI: 6897 (void) snprintf(msi_mode, sizeof (msi_mode), " MSI:%d", 6898 hba->intr_count); 6899 break; 6900 6901 case DDI_INTR_TYPE_MSIX: 6902 (void) snprintf(msi_mode, sizeof (msi_mode), " MSIX:%d", 6903 hba->intr_count); 6904 break; 6905 } 6906 } 6907 #endif /* MSI_SUPPORT */ 6908 6909 (void) strlcpy(npiv_mode, "", sizeof (npiv_mode)); 6910 6911 if (hba->flag & FC_NPIV_ENABLED) { 6912 (void) snprintf(npiv_mode, sizeof (npiv_mode), " NPIV:%d", 6913 hba->vpi_max+1); 6914 } else { 6915 (void) strlcpy(npiv_mode, " NPIV:0", sizeof (npiv_mode)); 6916 } 6917 6918 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 6919 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s%s", 6920 sli_mode, msi_mode, npiv_mode, 6921 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""), 6922 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""), 6923 ((SLI4_FCOE_MODE)? " FCoE":" FC")); 6924 } else { 6925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s", 6926 sli_mode, msi_mode, npiv_mode, 6927 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""), 6928 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":"")); 6929 } 6930 6931 wwpn = (uint8_t *)&hba->wwpn; 6932 wwnn = (uint8_t *)&hba->wwnn; 6933 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6934 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 6935 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 6936 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], 6937 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5], 6938 wwnn[6], wwnn[7]); 6939 6940 for (i = 0; i < MAX_VPORTS; i++) { 6941 port = &VPORT(i); 6942 6943 if (!(port->flag & EMLXS_PORT_CONFIG)) { 6944 continue; 6945 } 6946 6947 wwpn = (uint8_t *)&port->wwpn; 6948 wwnn = (uint8_t *)&port->wwnn; 6949 6950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, 6951 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X " 6952 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X", 6953 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], 6954 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], 6955 wwnn[4], wwnn[5], wwnn[6], wwnn[7]); 6956 } 6957 6958 /* 6959 * Announce the device: ddi_report_dev() prints a banner at boot time, 6960 * announcing the device pointed to by dip. 6961 */ 6962 (void) ddi_report_dev(hba->dip); 6963 6964 return; 6965 6966 } /* emlxs_drv_banner() */ 6967 6968 6969 extern void 6970 emlxs_get_fcode_version(emlxs_hba_t *hba) 6971 { 6972 emlxs_vpd_t *vpd = &VPD; 6973 char *prop_str; 6974 int status; 6975 6976 /* Setup fcode version property */ 6977 prop_str = NULL; 6978 status = 6979 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0, 6980 "fcode-version", (char **)&prop_str); 6981 6982 if (status == DDI_PROP_SUCCESS) { 6983 bcopy(prop_str, vpd->fcode_version, strlen(prop_str)); 6984 (void) ddi_prop_free((void *)prop_str); 6985 } else { 6986 (void) strncpy(vpd->fcode_version, "none", 6987 (sizeof (vpd->fcode_version)-1)); 6988 } 6989 6990 return; 6991 6992 } /* emlxs_get_fcode_version() */ 6993 6994 6995 static int 6996 emlxs_hba_attach(dev_info_t *dip) 6997 { 6998 emlxs_hba_t *hba; 6999 emlxs_port_t *port; 7000 emlxs_config_t *cfg; 7001 char *prop_str; 7002 int ddiinst; 7003 int32_t emlxinst; 7004 int status; 7005 uint32_t rval; 7006 uint32_t init_flag = 0; 7007 char local_pm_components[32]; 7008 uint32_t i; 7009 7010 ddiinst = ddi_get_instance(dip); 7011 emlxinst = emlxs_add_instance(ddiinst); 7012 7013 if (emlxinst >= MAX_FC_BRDS) { 7014 cmn_err(CE_WARN, 7015 "?%s: fca_hba_attach failed. Too many driver ddiinsts. " 7016 "inst=%x", DRIVER_NAME, ddiinst); 7017 return (DDI_FAILURE); 7018 } 7019 7020 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) { 7021 return (DDI_FAILURE); 7022 } 7023 7024 if (emlxs_device.hba[emlxinst]) { 7025 return (DDI_SUCCESS); 7026 } 7027 7028 /* An adapter can accidentally be plugged into a slave-only PCI slot */ 7029 if (ddi_slaveonly(dip) == DDI_SUCCESS) { 7030 cmn_err(CE_WARN, 7031 "?%s%d: fca_hba_attach failed. Device in slave-only slot.", 7032 DRIVER_NAME, ddiinst); 7033 return (DDI_FAILURE); 7034 } 7035 7036 /* Allocate emlxs_dev_ctl structure. */ 7037 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) { 7038 cmn_err(CE_WARN, 7039 "?%s%d: fca_hba_attach failed. Unable to allocate soft " 7040 "state.", DRIVER_NAME, ddiinst); 7041 return (DDI_FAILURE); 7042 } 7043 init_flag |= ATTACH_SOFT_STATE; 7044 7045 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state, 7046 ddiinst)) == NULL) { 7047 cmn_err(CE_WARN, 7048 "?%s%d: fca_hba_attach failed. Unable to get soft state.", 7049 DRIVER_NAME, ddiinst); 7050 goto failed; 7051 } 7052 bzero((char *)hba, sizeof (emlxs_hba_t)); 7053 7054 emlxs_device.hba[emlxinst] = hba; 7055 emlxs_device.log[emlxinst] = &hba->log; 7056 7057 #ifdef DUMP_SUPPORT 7058 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile; 7059 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile; 7060 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile; 7061 #endif /* DUMP_SUPPORT */ 7062 7063 hba->dip = dip; 7064 hba->emlxinst = emlxinst; 7065 hba->ddiinst = ddiinst; 7066 7067 init_flag |= ATTACH_HBA; 7068 7069 /* Enable the physical port on this HBA */ 7070 port = &PPORT; 7071 port->hba = hba; 7072 port->vpi = 0; 7073 port->flag |= EMLXS_PORT_ENABLED; 7074 7075 /* Allocate a transport structure */ 7076 hba->fca_tran = 7077 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP); 7078 if (hba->fca_tran == NULL) { 7079 cmn_err(CE_WARN, 7080 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran " 7081 "memory.", DRIVER_NAME, ddiinst); 7082 goto failed; 7083 } 7084 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran, 7085 sizeof (fc_fca_tran_t)); 7086 7087 /* 7088 * Copy the global ddi_dma_attr to the local hba fields 7089 */ 7090 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr, 7091 sizeof (ddi_dma_attr_t)); 7092 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro, 7093 sizeof (ddi_dma_attr_t)); 7094 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg, 7095 sizeof (ddi_dma_attr_t)); 7096 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp, 7097 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t)); 7098 7099 /* Reset the fca_tran dma_attr fields to the per-hba copies */ 7100 hba->fca_tran->fca_dma_attr = &hba->dma_attr; 7101 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg; 7102 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg; 7103 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro; 7104 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg; 7105 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp; 7106 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg; 7107 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr; 7108 7109 /* Set the transport structure pointer in our dip */ 7110 /* SFS may panic if we are in target only mode */ 7111 /* We will update the transport structure later */ 7112 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran); 7113 init_flag |= ATTACH_FCA_TRAN; 7114 7115 /* Perform driver integrity check */ 7116 rval = emlxs_integrity_check(hba); 7117 if (rval) { 7118 cmn_err(CE_WARN, 7119 "?%s%d: fca_hba_attach failed. Driver integrity check " 7120 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval); 7121 goto failed; 7122 } 7123 7124 cfg = &CFG; 7125 7126 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg)); 7127 /* 7128 * Gen7 chips respond with unknown command so we disable heartbeat 7129 * it can be re enabled in emlxs.conf 7130 */ 7131 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_6) 7132 cfg[CFG_HEARTBEAT_ENABLE].current = 0; 7133 7134 #ifdef MSI_SUPPORT 7135 if ((void *)&ddi_intr_get_supported_types != NULL) { 7136 hba->intr_flags |= EMLXS_MSI_ENABLED; 7137 } 7138 #endif /* MSI_SUPPORT */ 7139 7140 7141 /* Create the msg log file */ 7142 if (emlxs_msg_log_create(hba) == 0) { 7143 cmn_err(CE_WARN, 7144 "?%s%d: fca_hba_attach failed. Unable to create message " 7145 "log", DRIVER_NAME, ddiinst); 7146 goto failed; 7147 7148 } 7149 init_flag |= ATTACH_LOG; 7150 7151 /* We can begin to use EMLXS_MSGF from this point on */ 7152 7153 /* 7154 * Find the I/O bus type If it is not a SBUS card, 7155 * then it is a PCI card. Default is PCI_FC (0). 7156 */ 7157 prop_str = NULL; 7158 status = ddi_prop_lookup_string(DDI_DEV_T_ANY, 7159 (dev_info_t *)dip, 0, "name", (char **)&prop_str); 7160 7161 if (status == DDI_PROP_SUCCESS) { 7162 if (strncmp(prop_str, "lpfs", 4) == 0) { 7163 hba->bus_type = SBUS_FC; 7164 } 7165 7166 (void) ddi_prop_free((void *)prop_str); 7167 } 7168 7169 /* 7170 * Copy DDS from the config method and update configuration parameters 7171 */ 7172 (void) emlxs_get_props(hba); 7173 7174 #ifdef FMA_SUPPORT 7175 hba->fm_caps = cfg[CFG_FM_CAPS].current; 7176 7177 emlxs_fm_init(hba); 7178 7179 init_flag |= ATTACH_FM; 7180 #endif /* FMA_SUPPORT */ 7181 7182 if (emlxs_map_bus(hba)) { 7183 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7184 "Unable to map memory"); 7185 goto failed; 7186 7187 } 7188 init_flag |= ATTACH_MAP_BUS; 7189 7190 /* Attempt to identify the adapter */ 7191 rval = emlxs_init_adapter_info(hba); 7192 7193 if (rval == 0) { 7194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7195 "Unable to get adapter info. Id:%d Vendor id:0x%x " 7196 "Device id:0x%x Model:%s", hba->model_info.id, 7197 hba->model_info.vendor_id, hba->model_info.device_id, 7198 hba->model_info.model); 7199 goto failed; 7200 } 7201 #define FILTER_ORACLE_BRANDED 7202 #ifdef FILTER_ORACLE_BRANDED 7203 7204 /* Oracle branded adapters are not supported in this driver */ 7205 if (hba->model_info.flags & EMLXS_ORACLE_BRANDED) { 7206 hba->model_info.flags |= EMLXS_NOT_SUPPORTED; 7207 } 7208 #endif /* FILTER_ORACLE_BRANDED */ 7209 7210 /* Check if adapter is not supported */ 7211 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) { 7212 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7213 "Unsupported adapter found. Id:%d Vendor id:0x%x " 7214 "Device id:0x%x SSDID:0x%x Model:%s", hba->model_info.id, 7215 hba->model_info.vendor_id, hba->model_info.device_id, 7216 hba->model_info.ssdid, hba->model_info.model); 7217 goto failed; 7218 } 7219 7220 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) { 7221 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE; 7222 7223 #ifdef EMLXS_I386 7224 /* 7225 * TigerShark has 64K limit for SG element size 7226 * Do this for x86 alone. For SPARC, the driver 7227 * breaks up the single SGE later on. 7228 */ 7229 hba->dma_attr_ro.dma_attr_count_max = 0xffff; 7230 7231 i = cfg[CFG_MAX_XFER_SIZE].current; 7232 /* Update SGL size based on max_xfer_size */ 7233 if (i > 516096) { 7234 /* 516096 = (((2048 / 16) - 2) * 4096) */ 7235 hba->sli.sli4.mem_sgl_size = 4096; 7236 } else if (i > 253952) { 7237 /* 253952 = (((1024 / 16) - 2) * 4096) */ 7238 hba->sli.sli4.mem_sgl_size = 2048; 7239 } else { 7240 hba->sli.sli4.mem_sgl_size = 1024; 7241 } 7242 #endif /* EMLXS_I386 */ 7243 7244 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size); 7245 } else { 7246 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE; 7247 7248 #ifdef EMLXS_I386 7249 i = cfg[CFG_MAX_XFER_SIZE].current; 7250 /* Update BPL size based on max_xfer_size */ 7251 if (i > 688128) { 7252 /* 688128 = (((2048 / 12) - 2) * 4096) */ 7253 hba->sli.sli3.mem_bpl_size = 4096; 7254 } else if (i > 339968) { 7255 /* 339968 = (((1024 / 12) - 2) * 4096) */ 7256 hba->sli.sli3.mem_bpl_size = 2048; 7257 } else { 7258 hba->sli.sli3.mem_bpl_size = 1024; 7259 } 7260 #endif /* EMLXS_I386 */ 7261 7262 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size); 7263 } 7264 7265 /* Update dma_attr_sgllen based on true SGL length */ 7266 hba->dma_attr.dma_attr_sgllen = i; 7267 hba->dma_attr_ro.dma_attr_sgllen = i; 7268 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i; 7269 7270 if (EMLXS_SLI_MAP_HDW(hba)) { 7271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7272 "Unable to map memory"); 7273 goto failed; 7274 7275 } 7276 init_flag |= ATTACH_MAP_SLI; 7277 7278 /* Initialize the interrupts. But don't add them yet */ 7279 status = EMLXS_INTR_INIT(hba, 0); 7280 if (status != DDI_SUCCESS) { 7281 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7282 "Unable to initalize interrupt(s)."); 7283 goto failed; 7284 7285 } 7286 init_flag |= ATTACH_INTR_INIT; 7287 7288 /* Initialize LOCKs */ 7289 emlxs_msg_lock_reinit(hba); 7290 emlxs_lock_init(hba); 7291 init_flag |= ATTACH_LOCK; 7292 7293 /* Create the event queue */ 7294 if (emlxs_event_queue_create(hba) == 0) { 7295 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7296 "Unable to create event queue"); 7297 7298 goto failed; 7299 7300 } 7301 init_flag |= ATTACH_EVENTS; 7302 7303 /* Initialize the power management */ 7304 mutex_enter(&EMLXS_PM_LOCK); 7305 hba->pm_state = EMLXS_PM_IN_ATTACH; 7306 hba->pm_level = EMLXS_PM_ADAPTER_DOWN; 7307 hba->pm_busy = 0; 7308 #ifdef IDLE_TIMER 7309 hba->pm_active = 1; 7310 hba->pm_idle_timer = 0; 7311 #endif /* IDLE_TIMER */ 7312 mutex_exit(&EMLXS_PM_LOCK); 7313 7314 /* Set the pm component name */ 7315 (void) snprintf(local_pm_components, sizeof (local_pm_components), 7316 "NAME=%s%d", DRIVER_NAME, ddiinst); 7317 emlxs_pm_components[0] = local_pm_components; 7318 7319 /* Check if power management support is enabled */ 7320 if (cfg[CFG_PM_SUPPORT].current) { 7321 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip, 7322 "pm-components", emlxs_pm_components, 7323 sizeof (emlxs_pm_components) / 7324 sizeof (emlxs_pm_components[0])) != 7325 DDI_PROP_SUCCESS) { 7326 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7327 "Unable to create pm components."); 7328 goto failed; 7329 } 7330 } 7331 7332 /* Needed for suspend and resume support */ 7333 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state", 7334 "needs-suspend-resume"); 7335 init_flag |= ATTACH_PROP; 7336 7337 emlxs_thread_spawn_create(hba); 7338 init_flag |= ATTACH_SPAWN; 7339 7340 emlxs_thread_create(hba, &hba->iodone_thread); 7341 7342 init_flag |= ATTACH_THREAD; 7343 7344 retry: 7345 /* Setup initiator / target ports */ 7346 emlxs_mode_init(hba); 7347 7348 /* If driver did not attach to either stack, */ 7349 /* then driver attach fails */ 7350 if (port->mode == MODE_NONE) { 7351 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7352 "Driver interfaces not enabled."); 7353 goto failed; 7354 } 7355 7356 /* 7357 * Initialize HBA 7358 */ 7359 7360 /* Set initial state */ 7361 mutex_enter(&EMLXS_PORT_LOCK); 7362 hba->flag |= FC_OFFLINE_MODE; 7363 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE); 7364 mutex_exit(&EMLXS_PORT_LOCK); 7365 7366 if (status = emlxs_online(hba)) { 7367 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7368 "Unable to initialize adapter."); 7369 7370 if (status == EAGAIN) { 7371 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7372 "Retrying adapter initialization ..."); 7373 goto retry; 7374 } 7375 goto failed; 7376 } 7377 init_flag |= ATTACH_ONLINE; 7378 7379 /* This is to ensure that the model property is properly set */ 7380 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model", 7381 hba->model_info.model); 7382 7383 /* Create the device node. */ 7384 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) == 7385 DDI_FAILURE) { 7386 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg, 7387 "Unable to create device node."); 7388 goto failed; 7389 } 7390 init_flag |= ATTACH_NODE; 7391 7392 /* Attach initiator now */ 7393 /* This must come after emlxs_online() */ 7394 emlxs_fca_attach(hba); 7395 init_flag |= ATTACH_FCA; 7396 7397 /* Initialize kstat information */ 7398 hba->kstat = kstat_create(DRIVER_NAME, 7399 ddiinst, "statistics", "controller", 7400 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t), 7401 KSTAT_FLAG_VIRTUAL); 7402 7403 if (hba->kstat == NULL) { 7404 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 7405 "kstat_create failed."); 7406 } else { 7407 hba->kstat->ks_data = (void *)&hba->stats; 7408 kstat_install(hba->kstat); 7409 init_flag |= ATTACH_KSTAT; 7410 } 7411 7412 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 7413 /* Setup virtual port properties */ 7414 emlxs_read_vport_prop(hba); 7415 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 7416 7417 7418 #ifdef DHCHAP_SUPPORT 7419 emlxs_dhc_attach(hba); 7420 init_flag |= ATTACH_DHCHAP; 7421 #endif /* DHCHAP_SUPPORT */ 7422 7423 /* Display the driver banner now */ 7424 emlxs_drv_banner(hba); 7425 7426 /* Raise the power level */ 7427 7428 /* 7429 * This will not execute emlxs_hba_resume because 7430 * EMLXS_PM_IN_ATTACH is set 7431 */ 7432 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) { 7433 /* Set power up anyway. This should not happen! */ 7434 mutex_enter(&EMLXS_PM_LOCK); 7435 hba->pm_level = EMLXS_PM_ADAPTER_UP; 7436 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 7437 mutex_exit(&EMLXS_PM_LOCK); 7438 } else { 7439 mutex_enter(&EMLXS_PM_LOCK); 7440 hba->pm_state &= ~EMLXS_PM_IN_ATTACH; 7441 mutex_exit(&EMLXS_PM_LOCK); 7442 } 7443 7444 #ifdef SFCT_SUPPORT 7445 if (port->flag & EMLXS_TGT_ENABLED) { 7446 /* Do this last */ 7447 emlxs_fct_attach(hba); 7448 init_flag |= ATTACH_FCT; 7449 } 7450 #endif /* SFCT_SUPPORT */ 7451 7452 return (DDI_SUCCESS); 7453 7454 failed: 7455 7456 emlxs_driver_remove(dip, init_flag, 1); 7457 7458 return (DDI_FAILURE); 7459 7460 } /* emlxs_hba_attach() */ 7461 7462 7463 static int 7464 emlxs_hba_detach(dev_info_t *dip) 7465 { 7466 emlxs_hba_t *hba; 7467 emlxs_port_t *port; 7468 int ddiinst; 7469 int count; 7470 uint32_t init_flag = (uint32_t)-1; 7471 7472 ddiinst = ddi_get_instance(dip); 7473 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst); 7474 port = &PPORT; 7475 7476 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL); 7477 7478 mutex_enter(&EMLXS_PM_LOCK); 7479 hba->pm_state |= EMLXS_PM_IN_DETACH; 7480 mutex_exit(&EMLXS_PM_LOCK); 7481 7482 /* Lower the power level */ 7483 /* 7484 * This will not suspend the driver since the 7485 * EMLXS_PM_IN_DETACH has been set 7486 */ 7487 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) { 7488 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 7489 "Unable to lower power."); 7490 7491 mutex_enter(&EMLXS_PM_LOCK); 7492 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 7493 mutex_exit(&EMLXS_PM_LOCK); 7494 7495 return (DDI_FAILURE); 7496 } 7497 7498 /* Take the adapter offline first, if not already */ 7499 if (emlxs_offline(hba, 1) != 0) { 7500 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg, 7501 "Unable to take adapter offline."); 7502 7503 mutex_enter(&EMLXS_PM_LOCK); 7504 hba->pm_state &= ~EMLXS_PM_IN_DETACH; 7505 mutex_exit(&EMLXS_PM_LOCK); 7506 7507 (void) emlxs_pm_raise_power(dip); 7508 7509 return (DDI_FAILURE); 7510 } 7511 /* Check ub buffer pools */ 7512 if (port->ub_pool) { 7513 mutex_enter(&EMLXS_UB_LOCK); 7514 7515 /* Wait up to 10 seconds for all ub pools to be freed */ 7516 count = 10 * 2; 7517 while (port->ub_pool && count) { 7518 mutex_exit(&EMLXS_UB_LOCK); 7519 delay(drv_usectohz(500000)); /* half second wait */ 7520 count--; 7521 mutex_enter(&EMLXS_UB_LOCK); 7522 } 7523 7524 if (port->ub_pool) { 7525 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7526 "fca_unbind_port: Unsolicited buffers still " 7527 "active. port=%p. Destroying...", port); 7528 7529 /* Destroy all pools */ 7530 while (port->ub_pool) { 7531 emlxs_ub_destroy(port, port->ub_pool); 7532 } 7533 } 7534 7535 mutex_exit(&EMLXS_UB_LOCK); 7536 } 7537 init_flag &= ~ATTACH_ONLINE; 7538 7539 /* Remove the driver instance */ 7540 emlxs_driver_remove(dip, init_flag, 0); 7541 7542 return (DDI_SUCCESS); 7543 7544 } /* emlxs_hba_detach() */ 7545 7546 7547 extern int 7548 emlxs_map_bus(emlxs_hba_t *hba) 7549 { 7550 emlxs_port_t *port = &PPORT; 7551 dev_info_t *dip; 7552 ddi_device_acc_attr_t dev_attr; 7553 int status; 7554 7555 dip = (dev_info_t *)hba->dip; 7556 dev_attr = emlxs_dev_acc_attr; 7557 7558 if (hba->bus_type == SBUS_FC) { 7559 if (hba->pci_acc_handle == 0) { 7560 status = ddi_regs_map_setup(dip, 7561 SBUS_DFLY_PCI_CFG_RINDEX, 7562 (caddr_t *)&hba->pci_addr, 7563 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 7564 if (status != DDI_SUCCESS) { 7565 EMLXS_MSGF(EMLXS_CONTEXT, 7566 &emlxs_attach_failed_msg, 7567 "(SBUS) ddi_regs_map_setup PCI failed. " 7568 "status=%x", status); 7569 goto failed; 7570 } 7571 } 7572 7573 if (hba->sbus_pci_handle == 0) { 7574 status = ddi_regs_map_setup(dip, 7575 SBUS_TITAN_PCI_CFG_RINDEX, 7576 (caddr_t *)&hba->sbus_pci_addr, 7577 0, 0, &dev_attr, &hba->sbus_pci_handle); 7578 if (status != DDI_SUCCESS) { 7579 EMLXS_MSGF(EMLXS_CONTEXT, 7580 &emlxs_attach_failed_msg, 7581 "(SBUS) ddi_regs_map_setup TITAN PCI " 7582 "failed. status=%x", status); 7583 goto failed; 7584 } 7585 } 7586 7587 } else { /* ****** PCI ****** */ 7588 7589 if (hba->pci_acc_handle == 0) { 7590 status = ddi_regs_map_setup(dip, 7591 PCI_CFG_RINDEX, 7592 (caddr_t *)&hba->pci_addr, 7593 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle); 7594 if (status != DDI_SUCCESS) { 7595 EMLXS_MSGF(EMLXS_CONTEXT, 7596 &emlxs_attach_failed_msg, 7597 "(PCI) ddi_regs_map_setup PCI failed. " 7598 "status=%x", status); 7599 goto failed; 7600 } 7601 } 7602 #ifdef EMLXS_I386 7603 /* Setting up PCI configure space */ 7604 (void) ddi_put16(hba->pci_acc_handle, 7605 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER), 7606 CMD_CFG_VALUE | CMD_IO_ENBL); 7607 7608 #ifdef FMA_SUPPORT 7609 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle) 7610 != DDI_FM_OK) { 7611 EMLXS_MSGF(EMLXS_CONTEXT, 7612 &emlxs_invalid_access_handle_msg, NULL); 7613 goto failed; 7614 } 7615 #endif /* FMA_SUPPORT */ 7616 7617 #endif /* EMLXS_I386 */ 7618 7619 } 7620 return (0); 7621 7622 failed: 7623 7624 emlxs_unmap_bus(hba); 7625 return (ENOMEM); 7626 7627 } /* emlxs_map_bus() */ 7628 7629 7630 extern void 7631 emlxs_unmap_bus(emlxs_hba_t *hba) 7632 { 7633 if (hba->pci_acc_handle) { 7634 (void) ddi_regs_map_free(&hba->pci_acc_handle); 7635 hba->pci_acc_handle = 0; 7636 } 7637 7638 if (hba->sbus_pci_handle) { 7639 (void) ddi_regs_map_free(&hba->sbus_pci_handle); 7640 hba->sbus_pci_handle = 0; 7641 } 7642 7643 return; 7644 7645 } /* emlxs_unmap_bus() */ 7646 7647 7648 static int 7649 emlxs_get_props(emlxs_hba_t *hba) 7650 { 7651 emlxs_config_t *cfg; 7652 uint32_t i; 7653 char string[256]; 7654 uint32_t new_value; 7655 7656 /* Initialize each parameter */ 7657 for (i = 0; i < NUM_CFG_PARAM; i++) { 7658 cfg = &hba->config[i]; 7659 7660 /* Ensure strings are terminated */ 7661 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0; 7662 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0; 7663 7664 /* Set the current value to the default value */ 7665 new_value = cfg->def; 7666 7667 /* First check for the global setting */ 7668 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 7669 (void *)hba->dip, DDI_PROP_DONTPASS, 7670 cfg->string, new_value); 7671 7672 /* Now check for the per adapter ddiinst setting */ 7673 (void) snprintf(string, sizeof (string), "%s%d-%s", DRIVER_NAME, 7674 hba->ddiinst, cfg->string); 7675 7676 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, 7677 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value); 7678 7679 /* Now check the parameter */ 7680 cfg->current = emlxs_check_parm(hba, i, new_value); 7681 } 7682 7683 return (0); 7684 7685 } /* emlxs_get_props() */ 7686 7687 7688 extern uint32_t 7689 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 7690 { 7691 emlxs_port_t *port = &PPORT; 7692 uint32_t i; 7693 emlxs_config_t *cfg; 7694 emlxs_vpd_t *vpd = &VPD; 7695 7696 if (index >= NUM_CFG_PARAM) { 7697 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7698 "check_parm failed. Invalid index = %d", index); 7699 7700 return (new_value); 7701 } 7702 7703 cfg = &hba->config[index]; 7704 7705 if (new_value > cfg->hi) { 7706 new_value = cfg->def; 7707 } else if (new_value < cfg->low) { 7708 new_value = cfg->def; 7709 } 7710 7711 /* Perform additional checks */ 7712 switch (index) { 7713 #ifdef SFCT_SUPPORT 7714 case CFG_NPIV_ENABLE: 7715 if (hba->config[CFG_TARGET_MODE].current && 7716 hba->config[CFG_DTM_ENABLE].current == 0) { 7717 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 7718 "enable-npiv: Not supported in pure target mode. " 7719 "Disabling."); 7720 7721 new_value = 0; 7722 } 7723 break; 7724 #endif /* SFCT_SUPPORT */ 7725 7726 7727 case CFG_NUM_NODES: 7728 switch (new_value) { 7729 case 1: 7730 case 2: 7731 /* Must have at least 3 if not 0 */ 7732 return (3); 7733 7734 default: 7735 break; 7736 } 7737 break; 7738 7739 case CFG_FW_CHECK: 7740 /* The 0x2 bit implies the 0x1 bit will also be set */ 7741 if (new_value & 0x2) { 7742 new_value |= 0x1; 7743 } 7744 7745 /* The 0x4 bit should not be set if 0x1 or 0x2 is not set */ 7746 if (!(new_value & 0x3) && (new_value & 0x4)) { 7747 new_value &= ~0x4; 7748 } 7749 break; 7750 7751 case CFG_LINK_SPEED: 7752 if ((new_value > 8) && 7753 (hba->config[CFG_TOPOLOGY].current == 4)) { 7754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 7755 "link-speed: %dGb not supported in loop topology. " 7756 "Switching to auto detect.", 7757 new_value); 7758 7759 new_value = 0; 7760 break; 7761 } 7762 7763 if (vpd->link_speed) { 7764 switch (new_value) { 7765 case 0: 7766 break; 7767 7768 case 1: 7769 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) { 7770 new_value = 0; 7771 7772 EMLXS_MSGF(EMLXS_CONTEXT, 7773 &emlxs_init_msg, 7774 "link-speed: 1Gb not supported " 7775 "by adapter. Switching to auto " 7776 "detect."); 7777 } 7778 break; 7779 7780 case 2: 7781 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) { 7782 new_value = 0; 7783 7784 EMLXS_MSGF(EMLXS_CONTEXT, 7785 &emlxs_init_msg, 7786 "link-speed: 2Gb not supported " 7787 "by adapter. Switching to auto " 7788 "detect."); 7789 } 7790 break; 7791 7792 case 4: 7793 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) { 7794 new_value = 0; 7795 7796 EMLXS_MSGF(EMLXS_CONTEXT, 7797 &emlxs_init_msg, 7798 "link-speed: 4Gb not supported " 7799 "by adapter. Switching to auto " 7800 "detect."); 7801 } 7802 break; 7803 7804 case 8: 7805 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) { 7806 new_value = 0; 7807 7808 EMLXS_MSGF(EMLXS_CONTEXT, 7809 &emlxs_init_msg, 7810 "link-speed: 8Gb not supported " 7811 "by adapter. Switching to auto " 7812 "detect."); 7813 } 7814 break; 7815 7816 case 16: 7817 if (!(vpd->link_speed & LMT_16GB_CAPABLE)) { 7818 new_value = 0; 7819 7820 EMLXS_MSGF(EMLXS_CONTEXT, 7821 &emlxs_init_msg, 7822 "link-speed: 16Gb not supported " 7823 "by adapter. Switching to auto " 7824 "detect."); 7825 } 7826 break; 7827 7828 case 32: 7829 if (!(vpd->link_speed & LMT_32GB_CAPABLE)) { 7830 new_value = 0; 7831 7832 EMLXS_MSGF(EMLXS_CONTEXT, 7833 &emlxs_init_msg, 7834 "link-speed: 32Gb not supported " 7835 "by adapter. Switching to auto " 7836 "detect."); 7837 } 7838 break; 7839 7840 default: 7841 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 7842 "link-speed: Invalid value=%d provided. " 7843 "Switching to auto detect.", 7844 new_value); 7845 7846 new_value = 0; 7847 } 7848 } else { /* Perform basic validity check */ 7849 7850 /* Perform additional check on link speed */ 7851 switch (new_value) { 7852 case 0: 7853 case 1: 7854 case 2: 7855 case 4: 7856 case 8: 7857 case 16: 7858 /* link-speed is a valid choice */ 7859 break; 7860 7861 default: 7862 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 7863 "link-speed: Invalid value=%d provided. " 7864 "Switching to auto detect.", 7865 new_value); 7866 7867 new_value = 0; 7868 } 7869 } 7870 break; 7871 7872 case CFG_TOPOLOGY: 7873 if ((new_value == 4) && 7874 (hba->config[CFG_LINK_SPEED].current > 8)) { 7875 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 7876 "topology: Loop topology not supported " 7877 "with link speeds greater than 8Gb. " 7878 "Switching to auto detect."); 7879 7880 new_value = 0; 7881 break; 7882 } 7883 7884 /* Perform additional check on topology */ 7885 switch (new_value) { 7886 case 0: 7887 case 2: 7888 case 4: 7889 case 6: 7890 /* topology is a valid choice */ 7891 break; 7892 7893 default: 7894 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg, 7895 "topology: Invalid value=%d provided. " 7896 "Switching to auto detect.", 7897 new_value); 7898 7899 new_value = 0; 7900 break; 7901 } 7902 break; 7903 7904 #ifdef DHCHAP_SUPPORT 7905 case CFG_AUTH_TYPE: 7906 { 7907 uint32_t shift; 7908 uint32_t mask; 7909 7910 /* Perform additional check on auth type */ 7911 shift = 12; 7912 mask = 0xF000; 7913 for (i = 0; i < 4; i++) { 7914 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) { 7915 return (cfg->def); 7916 } 7917 7918 shift -= 4; 7919 mask >>= 4; 7920 } 7921 break; 7922 } 7923 7924 case CFG_AUTH_HASH: 7925 { 7926 uint32_t shift; 7927 uint32_t mask; 7928 7929 /* Perform additional check on auth hash */ 7930 shift = 12; 7931 mask = 0xF000; 7932 for (i = 0; i < 4; i++) { 7933 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) { 7934 return (cfg->def); 7935 } 7936 7937 shift -= 4; 7938 mask >>= 4; 7939 } 7940 break; 7941 } 7942 7943 case CFG_AUTH_GROUP: 7944 { 7945 uint32_t shift; 7946 uint32_t mask; 7947 7948 /* Perform additional check on auth group */ 7949 shift = 28; 7950 mask = 0xF0000000; 7951 for (i = 0; i < 8; i++) { 7952 if (((new_value & mask) >> shift) > 7953 DFC_AUTH_GROUP_MAX) { 7954 return (cfg->def); 7955 } 7956 7957 shift -= 4; 7958 mask >>= 4; 7959 } 7960 break; 7961 } 7962 7963 case CFG_AUTH_INTERVAL: 7964 if (new_value < 10) { 7965 return (10); 7966 } 7967 break; 7968 7969 7970 #endif /* DHCHAP_SUPPORT */ 7971 7972 } /* switch */ 7973 7974 return (new_value); 7975 7976 } /* emlxs_check_parm() */ 7977 7978 7979 extern uint32_t 7980 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value) 7981 { 7982 emlxs_port_t *port = &PPORT; 7983 emlxs_port_t *vport; 7984 uint32_t vpi; 7985 emlxs_config_t *cfg; 7986 uint32_t old_value; 7987 7988 if (index >= NUM_CFG_PARAM) { 7989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7990 "set_parm failed. Invalid index = %d", index); 7991 7992 return ((uint32_t)FC_FAILURE); 7993 } 7994 7995 cfg = &hba->config[index]; 7996 7997 if (!(cfg->flags & PARM_DYNAMIC)) { 7998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 7999 "set_parm failed. %s is not dynamic.", cfg->string); 8000 8001 return ((uint32_t)FC_FAILURE); 8002 } 8003 8004 /* Check new value */ 8005 old_value = new_value; 8006 new_value = emlxs_check_parm(hba, index, new_value); 8007 8008 if (old_value != new_value) { 8009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 8010 "set_parm: %s invalid. 0x%x --> 0x%x", 8011 cfg->string, old_value, new_value); 8012 } 8013 8014 /* Return now if no actual change */ 8015 if (new_value == cfg->current) { 8016 return (FC_SUCCESS); 8017 } 8018 8019 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 8020 "set_parm: %s changing. 0x%x --> 0x%x", 8021 cfg->string, cfg->current, new_value); 8022 8023 old_value = cfg->current; 8024 cfg->current = new_value; 8025 8026 /* React to change if needed */ 8027 switch (index) { 8028 8029 case CFG_PCI_MAX_READ: 8030 /* Update MXR */ 8031 emlxs_pcix_mxr_update(hba, 1); 8032 break; 8033 8034 #ifdef SFCT_SUPPORT 8035 case CFG_TARGET_MODE: 8036 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 8037 break; 8038 #endif /* SFCT_SUPPORT */ 8039 8040 case CFG_SLI_MODE: 8041 /* Check SLI mode */ 8042 if ((hba->sli_mode == 3) && (new_value == 2)) { 8043 /* All vports must be disabled first */ 8044 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 8045 vport = &VPORT(vpi); 8046 8047 if (vport->flag & EMLXS_PORT_ENABLED) { 8048 /* Reset current value */ 8049 cfg->current = old_value; 8050 8051 EMLXS_MSGF(EMLXS_CONTEXT, 8052 &emlxs_sfs_debug_msg, 8053 "set_parm failed. %s: vpi=%d " 8054 "still enabled. Value restored to " 8055 "0x%x.", cfg->string, vpi, 8056 old_value); 8057 8058 return (2); 8059 } 8060 } 8061 } 8062 8063 if ((hba->sli_mode >= 4) && (new_value < 4)) { 8064 /* 8065 * Not allow to set to SLI 2 or 3 if HBA supports SLI4 8066 */ 8067 cfg->current = old_value; 8068 return ((uint32_t)FC_FAILURE); 8069 } 8070 8071 break; 8072 8073 case CFG_NPIV_ENABLE: 8074 /* Check if NPIV is being disabled */ 8075 if ((old_value == 1) && (new_value == 0)) { 8076 /* All vports must be disabled first */ 8077 for (vpi = 1; vpi < MAX_VPORTS; vpi++) { 8078 vport = &VPORT(vpi); 8079 8080 if (vport->flag & EMLXS_PORT_ENABLED) { 8081 /* Reset current value */ 8082 cfg->current = old_value; 8083 8084 EMLXS_MSGF(EMLXS_CONTEXT, 8085 &emlxs_sfs_debug_msg, 8086 "set_parm failed. %s: vpi=%d " 8087 "still enabled. Value restored to " 8088 "0x%x.", cfg->string, vpi, 8089 old_value); 8090 8091 return (2); 8092 } 8093 } 8094 } 8095 8096 /* Trigger adapter reset */ 8097 /* (void) emlxs_reset(port, FC_FCA_RESET); */ 8098 8099 break; 8100 8101 8102 case CFG_VPORT_RESTRICTED: 8103 for (vpi = 0; vpi < MAX_VPORTS; vpi++) { 8104 vport = &VPORT(vpi); 8105 8106 if (!(vport->flag & EMLXS_PORT_CONFIG)) { 8107 continue; 8108 } 8109 8110 if (vport->options & EMLXS_OPT_RESTRICT_MASK) { 8111 continue; 8112 } 8113 8114 if (new_value) { 8115 vport->flag |= EMLXS_PORT_RESTRICTED; 8116 } else { 8117 vport->flag &= ~EMLXS_PORT_RESTRICTED; 8118 } 8119 } 8120 8121 break; 8122 8123 #ifdef DHCHAP_SUPPORT 8124 case CFG_AUTH_ENABLE: 8125 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 8126 break; 8127 8128 case CFG_AUTH_TMO: 8129 hba->auth_cfg.authentication_timeout = cfg->current; 8130 break; 8131 8132 case CFG_AUTH_MODE: 8133 hba->auth_cfg.authentication_mode = cfg->current; 8134 break; 8135 8136 case CFG_AUTH_BIDIR: 8137 hba->auth_cfg.bidirectional = cfg->current; 8138 break; 8139 8140 case CFG_AUTH_TYPE: 8141 hba->auth_cfg.authentication_type_priority[0] = 8142 (cfg->current & 0xF000) >> 12; 8143 hba->auth_cfg.authentication_type_priority[1] = 8144 (cfg->current & 0x0F00) >> 8; 8145 hba->auth_cfg.authentication_type_priority[2] = 8146 (cfg->current & 0x00F0) >> 4; 8147 hba->auth_cfg.authentication_type_priority[3] = 8148 (cfg->current & 0x000F); 8149 break; 8150 8151 case CFG_AUTH_HASH: 8152 hba->auth_cfg.hash_priority[0] = 8153 (cfg->current & 0xF000) >> 12; 8154 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8; 8155 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4; 8156 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F); 8157 break; 8158 8159 case CFG_AUTH_GROUP: 8160 hba->auth_cfg.dh_group_priority[0] = 8161 (cfg->current & 0xF0000000) >> 28; 8162 hba->auth_cfg.dh_group_priority[1] = 8163 (cfg->current & 0x0F000000) >> 24; 8164 hba->auth_cfg.dh_group_priority[2] = 8165 (cfg->current & 0x00F00000) >> 20; 8166 hba->auth_cfg.dh_group_priority[3] = 8167 (cfg->current & 0x000F0000) >> 16; 8168 hba->auth_cfg.dh_group_priority[4] = 8169 (cfg->current & 0x0000F000) >> 12; 8170 hba->auth_cfg.dh_group_priority[5] = 8171 (cfg->current & 0x00000F00) >> 8; 8172 hba->auth_cfg.dh_group_priority[6] = 8173 (cfg->current & 0x000000F0) >> 4; 8174 hba->auth_cfg.dh_group_priority[7] = 8175 (cfg->current & 0x0000000F); 8176 break; 8177 8178 case CFG_AUTH_INTERVAL: 8179 hba->auth_cfg.reauthenticate_time_interval = cfg->current; 8180 break; 8181 #endif /* DHCHAP_SUPPORT */ 8182 8183 } 8184 8185 return (FC_SUCCESS); 8186 8187 } /* emlxs_set_parm() */ 8188 8189 8190 /* 8191 * emlxs_mem_alloc OS specific routine for memory allocation / mapping 8192 * 8193 * The buf_info->flags field describes the memory operation requested. 8194 * 8195 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA 8196 * Virtual address is supplied in buf_info->virt 8197 * DMA mapping flag is in buf_info->align 8198 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE) 8199 * The mapped physical address is returned buf_info->phys 8200 * 8201 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and 8202 * if FC_MBUF_DMA is set the memory is also mapped for DMA 8203 * The byte alignment of the memory request is supplied in buf_info->align 8204 * The byte size of the memory request is supplied in buf_info->size 8205 * The virtual address is returned buf_info->virt 8206 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA) 8207 */ 8208 extern uint8_t * 8209 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info) 8210 { 8211 emlxs_port_t *port = &PPORT; 8212 ddi_dma_attr_t dma_attr; 8213 ddi_device_acc_attr_t dev_attr; 8214 uint_t cookie_count; 8215 size_t dma_reallen; 8216 ddi_dma_cookie_t dma_cookie; 8217 uint_t dma_flag; 8218 int status; 8219 8220 dma_attr = hba->dma_attr_1sg; 8221 dev_attr = emlxs_data_acc_attr; 8222 8223 if (buf_info->flags & FC_MBUF_SNGLSG) { 8224 dma_attr.dma_attr_sgllen = 1; 8225 } 8226 8227 if (buf_info->flags & FC_MBUF_PHYSONLY) { 8228 8229 if (buf_info->virt == NULL) { 8230 goto done; 8231 } 8232 8233 /* 8234 * Allocate the DMA handle for this DMA object 8235 */ 8236 status = ddi_dma_alloc_handle((void *)hba->dip, 8237 &dma_attr, DDI_DMA_DONTWAIT, 8238 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle); 8239 if (status != DDI_SUCCESS) { 8240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 8241 "ddi_dma_alloc_handle failed: size=%x align=%x " 8242 "flags=%x", buf_info->size, buf_info->align, 8243 buf_info->flags); 8244 8245 buf_info->phys = 0; 8246 buf_info->dma_handle = 0; 8247 goto done; 8248 } 8249 8250 switch (buf_info->align) { 8251 case DMA_READ_WRITE: 8252 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT); 8253 break; 8254 case DMA_READ_ONLY: 8255 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT); 8256 break; 8257 case DMA_WRITE_ONLY: 8258 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT); 8259 break; 8260 default: 8261 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 8262 "Invalid DMA flag"); 8263 (void) ddi_dma_free_handle( 8264 (ddi_dma_handle_t *)&buf_info->dma_handle); 8265 buf_info->phys = 0; 8266 buf_info->dma_handle = 0; 8267 return ((uint8_t *)buf_info->virt); 8268 } 8269 8270 /* Map this page of memory */ 8271 status = ddi_dma_addr_bind_handle( 8272 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 8273 (caddr_t)buf_info->virt, (size_t)buf_info->size, 8274 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie, 8275 &cookie_count); 8276 8277 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 8278 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 8279 "ddi_dma_addr_bind_handle failed: status=%x " 8280 "count=%x flags=%x", status, cookie_count, 8281 buf_info->flags); 8282 8283 (void) ddi_dma_free_handle( 8284 (ddi_dma_handle_t *)&buf_info->dma_handle); 8285 buf_info->phys = 0; 8286 buf_info->dma_handle = 0; 8287 goto done; 8288 } 8289 8290 if (hba->bus_type == SBUS_FC) { 8291 8292 int32_t burstsizes_limit = 0xff; 8293 int32_t ret_burst; 8294 8295 ret_burst = ddi_dma_burstsizes( 8296 buf_info->dma_handle) & burstsizes_limit; 8297 if (ddi_dma_set_sbus64(buf_info->dma_handle, 8298 ret_burst) == DDI_FAILURE) { 8299 EMLXS_MSGF(EMLXS_CONTEXT, 8300 &emlxs_mem_alloc_failed_msg, 8301 "ddi_dma_set_sbus64 failed."); 8302 } 8303 } 8304 8305 /* Save Physical address */ 8306 buf_info->phys = dma_cookie.dmac_laddress; 8307 8308 /* 8309 * Just to be sure, let's add this 8310 */ 8311 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle, 8312 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 8313 8314 } else if (buf_info->flags & FC_MBUF_DMA) { 8315 8316 dma_attr.dma_attr_align = buf_info->align; 8317 8318 /* 8319 * Allocate the DMA handle for this DMA object 8320 */ 8321 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr, 8322 DDI_DMA_DONTWAIT, NULL, 8323 (ddi_dma_handle_t *)&buf_info->dma_handle); 8324 if (status != DDI_SUCCESS) { 8325 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 8326 "ddi_dma_alloc_handle failed: size=%x align=%x " 8327 "flags=%x", buf_info->size, buf_info->align, 8328 buf_info->flags); 8329 8330 buf_info->virt = NULL; 8331 buf_info->phys = 0; 8332 buf_info->data_handle = 0; 8333 buf_info->dma_handle = 0; 8334 goto done; 8335 } 8336 8337 status = ddi_dma_mem_alloc( 8338 (ddi_dma_handle_t)buf_info->dma_handle, 8339 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT, 8340 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt, 8341 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle); 8342 8343 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) { 8344 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 8345 "ddi_dma_mem_alloc failed: size=%x align=%x " 8346 "flags=%x", buf_info->size, buf_info->align, 8347 buf_info->flags); 8348 8349 (void) ddi_dma_free_handle( 8350 (ddi_dma_handle_t *)&buf_info->dma_handle); 8351 8352 buf_info->virt = NULL; 8353 buf_info->phys = 0; 8354 buf_info->data_handle = 0; 8355 buf_info->dma_handle = 0; 8356 goto done; 8357 } 8358 8359 /* Map this page of memory */ 8360 status = ddi_dma_addr_bind_handle( 8361 (ddi_dma_handle_t)buf_info->dma_handle, NULL, 8362 (caddr_t)buf_info->virt, (size_t)buf_info->size, 8363 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL, 8364 &dma_cookie, &cookie_count); 8365 8366 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) { 8367 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 8368 "ddi_dma_addr_bind_handle failed: status=%x " 8369 "count=%d size=%x align=%x flags=%x", status, 8370 cookie_count, buf_info->size, buf_info->align, 8371 buf_info->flags); 8372 8373 (void) ddi_dma_mem_free( 8374 (ddi_acc_handle_t *)&buf_info->data_handle); 8375 (void) ddi_dma_free_handle( 8376 (ddi_dma_handle_t *)&buf_info->dma_handle); 8377 8378 buf_info->virt = NULL; 8379 buf_info->phys = 0; 8380 buf_info->dma_handle = 0; 8381 buf_info->data_handle = 0; 8382 goto done; 8383 } 8384 8385 if (hba->bus_type == SBUS_FC) { 8386 int32_t burstsizes_limit = 0xff; 8387 int32_t ret_burst; 8388 8389 ret_burst = 8390 ddi_dma_burstsizes(buf_info-> 8391 dma_handle) & burstsizes_limit; 8392 if (ddi_dma_set_sbus64(buf_info->dma_handle, 8393 ret_burst) == DDI_FAILURE) { 8394 EMLXS_MSGF(EMLXS_CONTEXT, 8395 &emlxs_mem_alloc_failed_msg, 8396 "ddi_dma_set_sbus64 failed."); 8397 } 8398 } 8399 8400 /* Save Physical address */ 8401 buf_info->phys = dma_cookie.dmac_laddress; 8402 8403 /* Just to be sure, let's add this */ 8404 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle, 8405 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV); 8406 8407 } else { /* allocate virtual memory */ 8408 8409 buf_info->virt = 8410 kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP); 8411 buf_info->phys = 0; 8412 buf_info->data_handle = 0; 8413 buf_info->dma_handle = 0; 8414 8415 if (buf_info->virt == (uint32_t *)0) { 8416 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, 8417 "size=%x flags=%x", buf_info->size, 8418 buf_info->flags); 8419 } 8420 8421 } 8422 8423 done: 8424 8425 return ((uint8_t *)buf_info->virt); 8426 8427 } /* emlxs_mem_alloc() */ 8428 8429 8430 8431 /* 8432 * emlxs_mem_free: 8433 * 8434 * OS specific routine for memory de-allocation / unmapping 8435 * 8436 * The buf_info->flags field describes the memory operation requested. 8437 * 8438 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped 8439 * for DMA, but not freed. The mapped physical address to be unmapped is in 8440 * buf_info->phys 8441 * 8442 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only 8443 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in 8444 * buf_info->phys. The virtual address to be freed is in buf_info->virt 8445 */ 8446 /*ARGSUSED*/ 8447 extern void 8448 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info) 8449 { 8450 if (buf_info->flags & FC_MBUF_PHYSONLY) { 8451 8452 if (buf_info->dma_handle) { 8453 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 8454 (void) ddi_dma_free_handle( 8455 (ddi_dma_handle_t *)&buf_info->dma_handle); 8456 buf_info->dma_handle = NULL; 8457 } 8458 8459 } else if (buf_info->flags & FC_MBUF_DMA) { 8460 8461 if (buf_info->dma_handle) { 8462 (void) ddi_dma_unbind_handle(buf_info->dma_handle); 8463 if (buf_info->data_handle) { 8464 (void) ddi_dma_mem_free( 8465 (ddi_acc_handle_t *)&buf_info->data_handle); 8466 } 8467 (void) ddi_dma_free_handle( 8468 (ddi_dma_handle_t *)&buf_info->dma_handle); 8469 buf_info->dma_handle = NULL; 8470 buf_info->data_handle = NULL; 8471 } 8472 8473 } else { /* allocate virtual memory */ 8474 8475 if (buf_info->virt) { 8476 kmem_free(buf_info->virt, (size_t)buf_info->size); 8477 buf_info->virt = NULL; 8478 } 8479 } 8480 8481 } /* emlxs_mem_free() */ 8482 8483 8484 static int 8485 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset) 8486 { 8487 int channel; 8488 int msi_id; 8489 8490 8491 /* IO to FCP2 device or a device reset always use fcp channel */ 8492 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) { 8493 return (hba->channel_fcp); 8494 } 8495 8496 8497 msi_id = emlxs_select_msiid(hba); 8498 channel = emlxs_msiid_to_chan(hba, msi_id); 8499 8500 8501 8502 /* If channel is closed, then try fcp channel */ 8503 if (ndlp->nlp_flag[channel] & NLP_CLOSED) { 8504 channel = hba->channel_fcp; 8505 } 8506 return (channel); 8507 8508 } /* emlxs_select_fcp_channel() */ 8509 8510 8511 static int32_t 8512 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp) 8513 { 8514 emlxs_hba_t *hba = HBA; 8515 fc_packet_t *pkt; 8516 emlxs_config_t *cfg; 8517 MAILBOXQ *mbq; 8518 MAILBOX *mb; 8519 uint32_t rc; 8520 8521 /* 8522 * This routine provides a alternative target reset provessing 8523 * method. Instead of sending an actual target reset to the 8524 * NPort, we will first unreg the login to that NPort. This 8525 * will cause all the outstanding IOs the quickly complete with 8526 * a NO RPI local error. Next we will force the ULP to relogin 8527 * to the NPort by sending an RSCN (for that NPort) to the 8528 * upper layer. This method should result in a fast target 8529 * reset, as far as IOs completing; however, since an actual 8530 * target reset is not sent to the NPort, it is not 100% 8531 * compatable. Things like reservations will not be broken. 8532 * By default this option is DISABLED, and its only enabled thru 8533 * a hidden configuration parameter (fast-tgt-reset). 8534 */ 8535 rc = FC_TRAN_BUSY; 8536 pkt = PRIV2PKT(sbp); 8537 cfg = &CFG; 8538 8539 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) { 8540 /* issue the mbox cmd to the sli */ 8541 mb = (MAILBOX *) mbq->mbox; 8542 bzero((void *) mb, MAILBOX_CMD_BSIZE); 8543 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi; 8544 #ifdef SLI3_SUPPORT 8545 mb->un.varUnregLogin.vpi = port->vpi; 8546 #endif /* SLI3_SUPPORT */ 8547 mb->mbxCommand = MBX_UNREG_LOGIN; 8548 mb->mbxOwner = OWN_HOST; 8549 8550 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8551 "Fast Target Reset: unreg rpi=%d tmr=%d", ndlp->nlp_Rpi, 8552 cfg[CFG_FAST_TGT_RESET_TMR].current); 8553 8554 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) 8555 == MBX_SUCCESS) { 8556 8557 ndlp->nlp_Rpi = 0; 8558 8559 mutex_enter(&sbp->mtx); 8560 sbp->node = (void *)ndlp; 8561 sbp->did = ndlp->nlp_DID; 8562 mutex_exit(&sbp->mtx); 8563 8564 if (pkt->pkt_rsplen) { 8565 bzero((uint8_t *)pkt->pkt_resp, 8566 pkt->pkt_rsplen); 8567 } 8568 if (cfg[CFG_FAST_TGT_RESET_TMR].current) { 8569 ndlp->nlp_force_rscn = hba->timer_tics + 8570 cfg[CFG_FAST_TGT_RESET_TMR].current; 8571 } 8572 8573 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0); 8574 } 8575 8576 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq); 8577 rc = FC_SUCCESS; 8578 } 8579 return (rc); 8580 } /* emlxs_fast_target_reset() */ 8581 8582 static int32_t 8583 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags) 8584 { 8585 emlxs_hba_t *hba = HBA; 8586 fc_packet_t *pkt; 8587 emlxs_config_t *cfg; 8588 IOCBQ *iocbq; 8589 IOCB *iocb; 8590 CHANNEL *cp; 8591 NODELIST *ndlp; 8592 char *cmd; 8593 uint16_t lun; 8594 FCP_CMND *fcp_cmd; 8595 uint32_t did; 8596 uint32_t reset = 0; 8597 int channel; 8598 int32_t rval; 8599 8600 pkt = PRIV2PKT(sbp); 8601 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8602 8603 /* Find target node object */ 8604 ndlp = emlxs_node_find_did(port, did, 1); 8605 8606 if (!ndlp || !ndlp->nlp_active) { 8607 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8608 "Node not found. did=%x", did); 8609 8610 return (FC_BADPACKET); 8611 } 8612 8613 /* When the fcp channel is closed we stop accepting any FCP cmd */ 8614 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 8615 return (FC_TRAN_BUSY); 8616 } 8617 8618 /* Snoop for target or lun reset first */ 8619 /* We always use FCP channel to send out target/lun reset fcp cmds */ 8620 /* interrupt affinity only applies to non tgt lun reset fcp cmd */ 8621 8622 cmd = (char *)pkt->pkt_cmd; 8623 lun = *((uint16_t *)cmd); 8624 lun = LE_SWAP16(lun); 8625 8626 iocbq = &sbp->iocbq; 8627 iocb = &iocbq->iocb; 8628 iocbq->node = (void *) ndlp; 8629 8630 /* Check for target reset */ 8631 if (cmd[10] & 0x20) { 8632 /* prepare iocb */ 8633 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 8634 hba->channel_fcp)) != FC_SUCCESS) { 8635 8636 if (rval == 0xff) { 8637 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 8638 0, 1); 8639 rval = FC_SUCCESS; 8640 } 8641 8642 return (rval); 8643 } 8644 8645 mutex_enter(&sbp->mtx); 8646 sbp->pkt_flags |= PACKET_FCP_TGT_RESET; 8647 sbp->pkt_flags |= PACKET_POLLED; 8648 *pkt_flags = sbp->pkt_flags; 8649 mutex_exit(&sbp->mtx); 8650 8651 #ifdef SAN_DIAG_SUPPORT 8652 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET, 8653 (HBA_WWN *)&ndlp->nlp_portname, -1); 8654 #endif /* SAN_DIAG_SUPPORT */ 8655 8656 iocbq->flag |= IOCB_PRIORITY; 8657 8658 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8659 "Target Reset: did=%x", did); 8660 8661 cfg = &CFG; 8662 if (cfg[CFG_FAST_TGT_RESET].current) { 8663 if (emlxs_fast_target_reset(port, sbp, ndlp) == 8664 FC_SUCCESS) { 8665 return (FC_SUCCESS); 8666 } 8667 } 8668 8669 /* Close the node for any further normal IO */ 8670 emlxs_node_close(port, ndlp, hba->channel_fcp, 8671 pkt->pkt_timeout); 8672 8673 /* Flush the IO's on the tx queues */ 8674 (void) emlxs_tx_node_flush(port, ndlp, 8675 &hba->chan[hba->channel_fcp], 0, sbp); 8676 8677 /* This is the target reset fcp cmd */ 8678 reset = 1; 8679 } 8680 8681 /* Check for lun reset */ 8682 else if (cmd[10] & 0x10) { 8683 /* prepare iocb */ 8684 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 8685 hba->channel_fcp)) != FC_SUCCESS) { 8686 8687 if (rval == 0xff) { 8688 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 8689 0, 1); 8690 rval = FC_SUCCESS; 8691 } 8692 8693 return (rval); 8694 } 8695 8696 mutex_enter(&sbp->mtx); 8697 sbp->pkt_flags |= PACKET_FCP_LUN_RESET; 8698 sbp->pkt_flags |= PACKET_POLLED; 8699 *pkt_flags = sbp->pkt_flags; 8700 mutex_exit(&sbp->mtx); 8701 8702 #ifdef SAN_DIAG_SUPPORT 8703 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET, 8704 (HBA_WWN *)&ndlp->nlp_portname, lun); 8705 #endif /* SAN_DIAG_SUPPORT */ 8706 8707 iocbq->flag |= IOCB_PRIORITY; 8708 8709 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8710 "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun, 8711 cmd[0], cmd[1]); 8712 8713 /* Flush the IO's on the tx queues for this lun */ 8714 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp); 8715 8716 /* This is the lun reset fcp cmd */ 8717 reset = 1; 8718 } 8719 8720 channel = emlxs_select_fcp_channel(hba, ndlp, reset); 8721 8722 #ifdef SAN_DIAG_SUPPORT 8723 sbp->sd_start_time = gethrtime(); 8724 #endif /* SAN_DIAG_SUPPORT */ 8725 8726 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 8727 emlxs_swap_fcp_pkt(sbp); 8728 #endif /* EMLXS_MODREV2X */ 8729 8730 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd; 8731 8732 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) { 8733 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE; 8734 } 8735 8736 if (reset == 0) { 8737 /* 8738 * tgt lun reset fcp cmd has been prepared 8739 * separately in the beginning 8740 */ 8741 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp, 8742 channel)) != FC_SUCCESS) { 8743 8744 if (rval == 0xff) { 8745 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 8746 0, 1); 8747 rval = FC_SUCCESS; 8748 } 8749 8750 return (rval); 8751 } 8752 } 8753 8754 cp = &hba->chan[channel]; 8755 cp->ulpSendCmd++; 8756 8757 /* Initalize sbp */ 8758 mutex_enter(&sbp->mtx); 8759 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 8760 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 8761 sbp->node = (void *)ndlp; 8762 sbp->lun = lun; 8763 sbp->class = iocb->ULPCLASS; 8764 sbp->did = ndlp->nlp_DID; 8765 mutex_exit(&sbp->mtx); 8766 8767 if (pkt->pkt_cmdlen) { 8768 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 8769 DDI_DMA_SYNC_FORDEV); 8770 } 8771 8772 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) { 8773 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen, 8774 DDI_DMA_SYNC_FORDEV); 8775 } 8776 8777 HBASTATS.FcpIssued++; 8778 8779 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8780 return (FC_SUCCESS); 8781 8782 } /* emlxs_send_fcp_cmd() */ 8783 8784 8785 8786 8787 /* 8788 * We have to consider this setup works for INTX, MSI, and MSIX 8789 * For INTX, intr_count is always 1 8790 * For MSI, intr_count is always 2 by default 8791 * For MSIX, intr_count is configurable (1, 2, 4, 8) for now. 8792 */ 8793 extern int 8794 emlxs_select_msiid(emlxs_hba_t *hba) 8795 { 8796 int msiid = 0; 8797 8798 /* We use round-robin */ 8799 mutex_enter(&EMLXS_MSIID_LOCK); 8800 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8801 msiid = hba->last_msiid; 8802 hba->last_msiid ++; 8803 if (hba->last_msiid >= hba->intr_count) { 8804 hba->last_msiid = 0; 8805 } 8806 } else { 8807 /* This should work for INTX and MSI also */ 8808 /* For SLI3 the chan_count is always 4 */ 8809 /* For SLI3 the msiid is limited to chan_count */ 8810 msiid = hba->last_msiid; 8811 hba->last_msiid ++; 8812 if (hba->intr_count > hba->chan_count) { 8813 if (hba->last_msiid >= hba->chan_count) { 8814 hba->last_msiid = 0; 8815 } 8816 } else { 8817 if (hba->last_msiid >= hba->intr_count) { 8818 hba->last_msiid = 0; 8819 } 8820 } 8821 } 8822 mutex_exit(&EMLXS_MSIID_LOCK); 8823 8824 return (msiid); 8825 } /* emlxs_select_msiid */ 8826 8827 8828 /* 8829 * A channel has a association with a msi id. 8830 * One msi id could be associated with multiple channels. 8831 */ 8832 extern int 8833 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id) 8834 { 8835 emlxs_config_t *cfg = &CFG; 8836 EQ_DESC_t *eqp; 8837 int chan; 8838 int num_wq; 8839 8840 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 8841 /* For SLI4 round robin all WQs associated with the msi_id */ 8842 eqp = &hba->sli.sli4.eq[msi_id]; 8843 8844 mutex_enter(&eqp->lastwq_lock); 8845 chan = eqp->lastwq; 8846 eqp->lastwq++; 8847 num_wq = cfg[CFG_NUM_WQ].current; 8848 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) { 8849 eqp->lastwq -= num_wq; 8850 } 8851 mutex_exit(&eqp->lastwq_lock); 8852 8853 return (chan); 8854 } else { 8855 /* This is for SLI3 mode */ 8856 return (hba->msi2chan[msi_id]); 8857 } 8858 8859 } /* emlxs_msiid_to_chan */ 8860 8861 8862 #ifdef SFCT_SUPPORT 8863 static int32_t 8864 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp) 8865 { 8866 emlxs_hba_t *hba = HBA; 8867 IOCBQ *iocbq; 8868 IOCB *iocb; 8869 NODELIST *ndlp; 8870 CHANNEL *cp; 8871 uint32_t did; 8872 8873 did = sbp->did; 8874 ndlp = sbp->node; 8875 cp = (CHANNEL *)sbp->channel; 8876 8877 iocbq = &sbp->iocbq; 8878 iocb = &iocbq->iocb; 8879 8880 /* Make sure node is still active */ 8881 if (!ndlp->nlp_active) { 8882 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8883 "*Node not found. did=%x", did); 8884 8885 return (FC_BADPACKET); 8886 } 8887 8888 /* If gate is closed */ 8889 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 8890 return (FC_TRAN_BUSY); 8891 } 8892 8893 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX; 8894 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) != 8895 IOERR_SUCCESS) { 8896 return (FC_TRAN_BUSY); 8897 } 8898 8899 HBASTATS.FcpIssued++; 8900 8901 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 8902 8903 return (FC_SUCCESS); 8904 8905 } /* emlxs_send_fct_status() */ 8906 8907 8908 static int32_t 8909 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp) 8910 { 8911 emlxs_hba_t *hba = HBA; 8912 IOCBQ *iocbq; 8913 IOCB *iocb; 8914 NODELIST *ndlp; 8915 CHANNEL *cp; 8916 uint32_t did; 8917 8918 did = sbp->did; 8919 ndlp = sbp->node; 8920 cp = (CHANNEL *)sbp->channel; 8921 8922 iocbq = &sbp->iocbq; 8923 iocb = &iocbq->iocb; 8924 8925 /* Make sure node is still active */ 8926 if ((ndlp == NULL) || (!ndlp->nlp_active)) { 8927 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8928 "*Node not found. did=%x", did); 8929 8930 return (FC_BADPACKET); 8931 } 8932 8933 /* If gate is closed */ 8934 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) { 8935 return (FC_TRAN_BUSY); 8936 } 8937 8938 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX; 8939 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) != 8940 IOERR_SUCCESS) { 8941 return (FC_TRAN_BUSY); 8942 } 8943 8944 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq); 8945 8946 return (FC_SUCCESS); 8947 8948 } /* emlxs_send_fct_abort() */ 8949 8950 #endif /* SFCT_SUPPORT */ 8951 8952 8953 static int32_t 8954 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp) 8955 { 8956 emlxs_hba_t *hba = HBA; 8957 fc_packet_t *pkt; 8958 IOCBQ *iocbq; 8959 IOCB *iocb; 8960 CHANNEL *cp; 8961 uint32_t i; 8962 NODELIST *ndlp; 8963 uint32_t did; 8964 int32_t rval; 8965 8966 pkt = PRIV2PKT(sbp); 8967 cp = &hba->chan[hba->channel_ip]; 8968 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 8969 8970 /* Check if node exists */ 8971 /* Broadcast did is always a success */ 8972 ndlp = emlxs_node_find_did(port, did, 1); 8973 8974 if (!ndlp || !ndlp->nlp_active) { 8975 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 8976 "Node not found. did=0x%x", did); 8977 8978 return (FC_BADPACKET); 8979 } 8980 8981 /* Check if gate is temporarily closed */ 8982 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) { 8983 return (FC_TRAN_BUSY); 8984 } 8985 8986 /* Check if an exchange has been created */ 8987 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) { 8988 /* No exchange. Try creating one */ 8989 (void) emlxs_create_xri(port, cp, ndlp); 8990 8991 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 8992 "Adapter Busy. Exchange not found. did=0x%x", did); 8993 8994 return (FC_TRAN_BUSY); 8995 } 8996 8997 /* ULP PATCH: pkt_cmdlen was found to be set to zero */ 8998 /* on BROADCAST commands */ 8999 if (pkt->pkt_cmdlen == 0) { 9000 /* Set the pkt_cmdlen to the cookie size */ 9001 #if (EMLXS_MODREV >= EMLXS_MODREV3) 9002 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) { 9003 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size; 9004 } 9005 #else 9006 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size; 9007 #endif /* >= EMLXS_MODREV3 */ 9008 9009 } 9010 9011 iocbq = &sbp->iocbq; 9012 iocb = &iocbq->iocb; 9013 9014 iocbq->node = (void *)ndlp; 9015 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) { 9016 9017 if (rval == 0xff) { 9018 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9019 rval = FC_SUCCESS; 9020 } 9021 9022 return (rval); 9023 } 9024 9025 cp->ulpSendCmd++; 9026 9027 /* Initalize sbp */ 9028 mutex_enter(&sbp->mtx); 9029 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9030 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9031 sbp->node = (void *)ndlp; 9032 sbp->lun = EMLXS_LUN_NONE; 9033 sbp->class = iocb->ULPCLASS; 9034 sbp->did = did; 9035 mutex_exit(&sbp->mtx); 9036 9037 if (pkt->pkt_cmdlen) { 9038 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9039 DDI_DMA_SYNC_FORDEV); 9040 } 9041 9042 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9043 9044 return (FC_SUCCESS); 9045 9046 } /* emlxs_send_ip() */ 9047 9048 9049 static int32_t 9050 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp) 9051 { 9052 emlxs_hba_t *hba = HBA; 9053 emlxs_port_t *vport; 9054 fc_packet_t *pkt; 9055 IOCBQ *iocbq; 9056 CHANNEL *cp; 9057 SERV_PARM *sp; 9058 uint32_t cmd; 9059 int i; 9060 ELS_PKT *els_pkt; 9061 NODELIST *ndlp; 9062 uint32_t did; 9063 char fcsp_msg[32]; 9064 int rc; 9065 int32_t rval; 9066 emlxs_config_t *cfg = &CFG; 9067 9068 fcsp_msg[0] = 0; 9069 pkt = PRIV2PKT(sbp); 9070 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 9071 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9072 9073 iocbq = &sbp->iocbq; 9074 9075 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9076 emlxs_swap_els_pkt(sbp); 9077 #endif /* EMLXS_MODREV2X */ 9078 9079 cmd = *((uint32_t *)pkt->pkt_cmd); 9080 cmd &= ELS_CMD_MASK; 9081 9082 /* Point of no return, except for ADISC & PLOGI */ 9083 9084 /* Check node */ 9085 switch (cmd) { 9086 case ELS_CMD_FLOGI: 9087 case ELS_CMD_FDISC: 9088 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 9089 9090 if (emlxs_vpi_logi_notify(port, sbp)) { 9091 pkt->pkt_state = FC_PKT_LOCAL_RJT; 9092 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9093 emlxs_unswap_pkt(sbp); 9094 #endif /* EMLXS_MODREV2X */ 9095 return (FC_FAILURE); 9096 } 9097 } else { 9098 /* 9099 * If FLOGI is already complete, then we 9100 * should not be receiving another FLOGI. 9101 * Reset the link to recover. 9102 */ 9103 if (port->flag & EMLXS_PORT_FLOGI_CMPL) { 9104 pkt->pkt_state = FC_PKT_LOCAL_RJT; 9105 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9106 emlxs_unswap_pkt(sbp); 9107 #endif /* EMLXS_MODREV2X */ 9108 9109 (void) emlxs_reset(port, FC_FCA_LINK_RESET); 9110 return (FC_FAILURE); 9111 } 9112 9113 if (port->vpi > 0) { 9114 *((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC; 9115 } 9116 } 9117 9118 /* Command may have been changed */ 9119 cmd = *((uint32_t *)pkt->pkt_cmd); 9120 cmd &= ELS_CMD_MASK; 9121 9122 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 9123 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 9124 } 9125 9126 ndlp = NULL; 9127 9128 /* We will process these cmds at the bottom of this routine */ 9129 break; 9130 9131 case ELS_CMD_PLOGI: 9132 /* Make sure we don't log into ourself */ 9133 for (i = 0; i < MAX_VPORTS; i++) { 9134 vport = &VPORT(i); 9135 9136 if (!(vport->flag & EMLXS_INI_BOUND)) { 9137 continue; 9138 } 9139 9140 if (did == vport->did) { 9141 pkt->pkt_state = FC_PKT_NPORT_RJT; 9142 9143 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9144 emlxs_unswap_pkt(sbp); 9145 #endif /* EMLXS_MODREV2X */ 9146 9147 return (FC_FAILURE); 9148 } 9149 } 9150 9151 ndlp = NULL; 9152 9153 if (hba->flag & FC_PT_TO_PT) { 9154 MAILBOXQ *mbox; 9155 9156 /* ULP bug fix */ 9157 if (pkt->pkt_cmd_fhdr.s_id == 0) { 9158 pkt->pkt_cmd_fhdr.s_id = FP_DEFAULT_SID; 9159 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, 9160 "PLOGI: P2P Fix. sid=0-->%x did=%x", 9161 pkt->pkt_cmd_fhdr.s_id, 9162 pkt->pkt_cmd_fhdr.d_id); 9163 } 9164 9165 mutex_enter(&EMLXS_PORT_LOCK); 9166 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id); 9167 port->rdid = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9168 mutex_exit(&EMLXS_PORT_LOCK); 9169 9170 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) { 9171 /* Update our service parms */ 9172 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, 9173 MEM_MBOX))) { 9174 emlxs_mb_config_link(hba, mbox); 9175 9176 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, 9177 mbox, MBX_NOWAIT, 0); 9178 if ((rc != MBX_BUSY) && 9179 (rc != MBX_SUCCESS)) { 9180 emlxs_mem_put(hba, MEM_MBOX, 9181 (void *)mbox); 9182 } 9183 } 9184 } 9185 } 9186 9187 /* We will process these cmds at the bottom of this routine */ 9188 break; 9189 9190 default: 9191 ndlp = emlxs_node_find_did(port, did, 1); 9192 9193 /* If an ADISC is being sent and we have no node, */ 9194 /* then we must fail the ADISC now */ 9195 if (!ndlp && (cmd == ELS_CMD_ADISC) && 9196 (port->mode == MODE_INITIATOR)) { 9197 9198 /* Build the LS_RJT response */ 9199 els_pkt = (ELS_PKT *)pkt->pkt_resp; 9200 els_pkt->elsCode = 0x01; 9201 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0; 9202 els_pkt->un.lsRjt.un.b.lsRjtRsnCode = 9203 LSRJT_LOGICAL_ERR; 9204 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp = 9205 LSEXP_NOTHING_MORE; 9206 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03; 9207 9208 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, 9209 "ADISC Rejected. Node not found. did=0x%x", did); 9210 9211 if (sbp->channel == NULL) { 9212 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 9213 sbp->channel = 9214 &hba->chan[hba->channel_els]; 9215 } else { 9216 sbp->channel = 9217 &hba->chan[FC_ELS_RING]; 9218 } 9219 } 9220 9221 /* Return this as rejected by the target */ 9222 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1); 9223 9224 return (FC_SUCCESS); 9225 } 9226 } 9227 9228 /* DID == BCAST_DID is special case to indicate that */ 9229 /* RPI is being passed in seq_id field */ 9230 /* This is used by emlxs_send_logo() for target mode */ 9231 9232 /* Initalize iocbq */ 9233 iocbq->node = (void *)ndlp; 9234 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) { 9235 9236 if (rval == 0xff) { 9237 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9238 rval = FC_SUCCESS; 9239 } 9240 9241 return (rval); 9242 } 9243 9244 cp = &hba->chan[hba->channel_els]; 9245 cp->ulpSendCmd++; 9246 sp = (SERV_PARM *)&els_pkt->un.logi; 9247 9248 /* Check cmd */ 9249 switch (cmd) { 9250 case ELS_CMD_PRLI: 9251 /* 9252 * if our firmware version is 3.20 or later, 9253 * set the following bits for FC-TAPE support. 9254 */ 9255 if ((port->mode == MODE_INITIATOR) && 9256 (hba->vpd.feaLevelHigh >= 0x02) && 9257 (cfg[CFG_ADISC_SUPPORT].current != 0)) { 9258 els_pkt->un.prli.ConfmComplAllowed = 1; 9259 els_pkt->un.prli.Retry = 1; 9260 els_pkt->un.prli.TaskRetryIdReq = 1; 9261 } else { 9262 els_pkt->un.prli.ConfmComplAllowed = 0; 9263 els_pkt->un.prli.Retry = 0; 9264 els_pkt->un.prli.TaskRetryIdReq = 0; 9265 } 9266 9267 break; 9268 9269 /* This is a patch for the ULP stack. */ 9270 9271 /* 9272 * ULP only reads our service parameters once during bind_port, 9273 * but the service parameters change due to topology. 9274 */ 9275 case ELS_CMD_FLOGI: 9276 case ELS_CMD_FDISC: 9277 case ELS_CMD_PLOGI: 9278 case ELS_CMD_PDISC: 9279 /* Copy latest service parameters to payload */ 9280 bcopy((void *) &port->sparam, (void *)sp, sizeof (SERV_PARM)); 9281 9282 if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) { 9283 9284 /* Clear support for virtual fabrics */ 9285 /* randomOffset bit controls this for FLOGI */ 9286 sp->cmn.randomOffset = 0; 9287 9288 /* Set R_A_TOV to current value */ 9289 sp->cmn.w2.r_a_tov = 9290 LE_SWAP32((hba->fc_ratov * 1000)); 9291 } 9292 9293 if ((hba->flag & FC_NPIV_ENABLED) && 9294 (hba->flag & FC_NPIV_SUPPORTED) && 9295 (cmd == ELS_CMD_PLOGI)) { 9296 emlxs_vvl_fmt_t *vvl; 9297 9298 sp->VALID_VENDOR_VERSION = 1; 9299 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0]; 9300 vvl->un0.w0.oui = 0x0000C9; 9301 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0); 9302 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0; 9303 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1); 9304 } 9305 9306 #ifdef DHCHAP_SUPPORT 9307 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg); 9308 #endif /* DHCHAP_SUPPORT */ 9309 9310 break; 9311 } 9312 9313 /* Initialize the sbp */ 9314 mutex_enter(&sbp->mtx); 9315 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9316 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9317 sbp->node = (void *)ndlp; 9318 sbp->lun = EMLXS_LUN_NONE; 9319 sbp->did = did; 9320 mutex_exit(&sbp->mtx); 9321 9322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s", 9323 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg); 9324 9325 if (pkt->pkt_cmdlen) { 9326 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9327 DDI_DMA_SYNC_FORDEV); 9328 } 9329 9330 /* Check node */ 9331 switch (cmd) { 9332 case ELS_CMD_FLOGI: 9333 case ELS_CMD_FDISC: 9334 if (port->mode == MODE_INITIATOR) { 9335 /* Make sure fabric node is destroyed */ 9336 /* It should already have been destroyed at link down */ 9337 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) { 9338 ndlp = emlxs_node_find_did(port, FABRIC_DID, 1); 9339 if (ndlp) { 9340 if (EMLXS_SLI_UNREG_NODE(port, ndlp, 9341 NULL, NULL, iocbq) == 0) { 9342 /* Deferring iocb tx until */ 9343 /* completion of unreg */ 9344 return (FC_SUCCESS); 9345 } 9346 } 9347 } 9348 } 9349 break; 9350 9351 case ELS_CMD_PLOGI: 9352 9353 ndlp = emlxs_node_find_did(port, did, 1); 9354 9355 if (ndlp && ndlp->nlp_active) { 9356 /* Close the node for any further normal IO */ 9357 emlxs_node_close(port, ndlp, hba->channel_fcp, 9358 pkt->pkt_timeout + 10); 9359 emlxs_node_close(port, ndlp, hba->channel_ip, 9360 pkt->pkt_timeout + 10); 9361 9362 /* Flush tx queues */ 9363 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 9364 9365 /* Flush chip queues */ 9366 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 9367 } 9368 9369 break; 9370 9371 case ELS_CMD_PRLI: 9372 9373 ndlp = emlxs_node_find_did(port, did, 1); 9374 9375 if (ndlp && ndlp->nlp_active) { 9376 /* 9377 * Close the node for any further FCP IO; 9378 * Flush all outstanding I/O only if 9379 * "Establish Image Pair" bit is set. 9380 */ 9381 emlxs_node_close(port, ndlp, hba->channel_fcp, 9382 pkt->pkt_timeout + 10); 9383 9384 if (els_pkt->un.prli.estabImagePair) { 9385 /* Flush tx queues */ 9386 (void) emlxs_tx_node_flush(port, ndlp, 9387 &hba->chan[hba->channel_fcp], 0, 0); 9388 9389 /* Flush chip queues */ 9390 (void) emlxs_chipq_node_flush(port, 9391 &hba->chan[hba->channel_fcp], ndlp, 0); 9392 } 9393 } 9394 9395 break; 9396 9397 } 9398 9399 HBASTATS.ElsCmdIssued++; 9400 9401 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9402 9403 return (FC_SUCCESS); 9404 9405 } /* emlxs_send_els() */ 9406 9407 9408 9409 9410 static int32_t 9411 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 9412 { 9413 emlxs_hba_t *hba = HBA; 9414 emlxs_config_t *cfg = &CFG; 9415 fc_packet_t *pkt; 9416 IOCBQ *iocbq; 9417 IOCB *iocb; 9418 NODELIST *ndlp; 9419 CHANNEL *cp; 9420 int i; 9421 uint32_t cmd; 9422 uint32_t ucmd; 9423 ELS_PKT *els_pkt; 9424 fc_unsol_buf_t *ubp; 9425 emlxs_ub_priv_t *ub_priv; 9426 uint32_t did; 9427 char fcsp_msg[32]; 9428 uint8_t *ub_buffer; 9429 int32_t rval; 9430 9431 fcsp_msg[0] = 0; 9432 pkt = PRIV2PKT(sbp); 9433 els_pkt = (ELS_PKT *)pkt->pkt_cmd; 9434 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9435 9436 iocbq = &sbp->iocbq; 9437 iocb = &iocbq->iocb; 9438 9439 /* Acquire the unsolicited command this pkt is replying to */ 9440 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) { 9441 /* This is for auto replies when no ub's are used */ 9442 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT; 9443 ubp = NULL; 9444 ub_priv = NULL; 9445 ub_buffer = NULL; 9446 9447 #ifdef SFCT_SUPPORT 9448 if (sbp->fct_cmd) { 9449 fct_els_t *els = 9450 (fct_els_t *)sbp->fct_cmd->cmd_specific; 9451 ub_buffer = (uint8_t *)els->els_req_payload; 9452 } 9453 #endif /* SFCT_SUPPORT */ 9454 9455 } else { 9456 /* Find the ub buffer that goes with this reply */ 9457 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) { 9458 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg, 9459 "ELS reply: Invalid oxid=%x", 9460 pkt->pkt_cmd_fhdr.ox_id); 9461 return (FC_BADPACKET); 9462 } 9463 9464 ub_buffer = (uint8_t *)ubp->ub_buffer; 9465 ub_priv = ubp->ub_fca_private; 9466 ucmd = ub_priv->cmd; 9467 9468 ub_priv->flags |= EMLXS_UB_REPLY; 9469 9470 /* Reset oxid to ELS command */ 9471 /* We do this because the ub is only valid */ 9472 /* until we return from this thread */ 9473 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff; 9474 } 9475 9476 /* Save the result */ 9477 sbp->ucmd = ucmd; 9478 9479 if (sbp->channel == NULL) { 9480 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 9481 sbp->channel = &hba->chan[hba->channel_els]; 9482 } else { 9483 sbp->channel = &hba->chan[FC_ELS_RING]; 9484 } 9485 } 9486 9487 /* Check for interceptions */ 9488 switch (ucmd) { 9489 9490 #ifdef ULP_PATCH2 9491 case ELS_CMD_LOGO: 9492 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) { 9493 break; 9494 } 9495 9496 /* Check if this was generated by ULP and not us */ 9497 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 9498 9499 /* 9500 * Since we replied to this already, 9501 * we won't need to send this now 9502 */ 9503 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 9504 9505 return (FC_SUCCESS); 9506 } 9507 9508 break; 9509 #endif /* ULP_PATCH2 */ 9510 9511 #ifdef ULP_PATCH3 9512 case ELS_CMD_PRLI: 9513 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) { 9514 break; 9515 } 9516 9517 /* Check if this was generated by ULP and not us */ 9518 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 9519 9520 /* 9521 * Since we replied to this already, 9522 * we won't need to send this now 9523 */ 9524 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 9525 9526 return (FC_SUCCESS); 9527 } 9528 9529 break; 9530 #endif /* ULP_PATCH3 */ 9531 9532 9533 #ifdef ULP_PATCH4 9534 case ELS_CMD_PRLO: 9535 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) { 9536 break; 9537 } 9538 9539 /* Check if this was generated by ULP and not us */ 9540 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) { 9541 /* 9542 * Since we replied to this already, 9543 * we won't need to send this now 9544 */ 9545 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 9546 9547 return (FC_SUCCESS); 9548 } 9549 9550 break; 9551 #endif /* ULP_PATCH4 */ 9552 9553 #ifdef ULP_PATCH6 9554 case ELS_CMD_RSCN: 9555 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) { 9556 break; 9557 } 9558 9559 /* Check if this RSCN was generated by us */ 9560 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 9561 cmd = *((uint32_t *)pkt->pkt_cmd); 9562 cmd = LE_SWAP32(cmd); 9563 cmd &= ELS_CMD_MASK; 9564 9565 /* 9566 * If ULP is accepting this, 9567 * then close affected node 9568 */ 9569 if ((port->mode == MODE_INITIATOR) && ub_buffer && 9570 cmd == ELS_CMD_ACC) { 9571 fc_rscn_t *rscn; 9572 uint32_t count; 9573 uint32_t *lp; 9574 9575 /* 9576 * Only the Leadville code path will 9577 * come thru here. The RSCN data is NOT 9578 * swapped properly for the Comstar code 9579 * path. 9580 */ 9581 lp = (uint32_t *)ub_buffer; 9582 rscn = (fc_rscn_t *)lp++; 9583 count = 9584 ((rscn->rscn_payload_len - 4) / 4); 9585 9586 /* Close affected ports */ 9587 for (i = 0; i < count; i++, lp++) { 9588 (void) emlxs_port_offline(port, 9589 *lp); 9590 } 9591 } 9592 9593 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 9594 "RSCN %s: did=%x oxid=%x rxid=%x. " 9595 "Intercepted.", emlxs_elscmd_xlate(cmd), 9596 did, pkt->pkt_cmd_fhdr.ox_id, 9597 pkt->pkt_cmd_fhdr.rx_id); 9598 9599 /* 9600 * Since we generated this RSCN, 9601 * we won't need to send this reply 9602 */ 9603 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 9604 9605 return (FC_SUCCESS); 9606 } 9607 9608 break; 9609 #endif /* ULP_PATCH6 */ 9610 9611 case ELS_CMD_PLOGI: 9612 /* Check if this PLOGI was generated by us */ 9613 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) { 9614 cmd = *((uint32_t *)pkt->pkt_cmd); 9615 cmd = LE_SWAP32(cmd); 9616 cmd &= ELS_CMD_MASK; 9617 9618 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 9619 "PLOGI %s: did=%x oxid=%x rxid=%x. " 9620 "Intercepted.", emlxs_elscmd_xlate(cmd), 9621 did, pkt->pkt_cmd_fhdr.ox_id, 9622 pkt->pkt_cmd_fhdr.rx_id); 9623 9624 /* 9625 * Since we generated this PLOGI, 9626 * we won't need to send this reply 9627 */ 9628 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1); 9629 9630 return (FC_SUCCESS); 9631 } 9632 9633 break; 9634 } 9635 9636 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9637 emlxs_swap_els_pkt(sbp); 9638 #endif /* EMLXS_MODREV2X */ 9639 9640 9641 cmd = *((uint32_t *)pkt->pkt_cmd); 9642 cmd &= ELS_CMD_MASK; 9643 9644 /* Check if modifications are needed */ 9645 switch (ucmd) { 9646 case (ELS_CMD_PRLI): 9647 9648 if (cmd == ELS_CMD_ACC) { 9649 /* This is a patch for the ULP stack. */ 9650 /* ULP does not keep track of FCP2 support */ 9651 if ((port->mode == MODE_INITIATOR) && 9652 (hba->vpd.feaLevelHigh >= 0x02) && 9653 (cfg[CFG_ADISC_SUPPORT].current != 0)) { 9654 els_pkt->un.prli.ConfmComplAllowed = 1; 9655 els_pkt->un.prli.Retry = 1; 9656 els_pkt->un.prli.TaskRetryIdReq = 1; 9657 } else { 9658 els_pkt->un.prli.ConfmComplAllowed = 0; 9659 els_pkt->un.prli.Retry = 0; 9660 els_pkt->un.prli.TaskRetryIdReq = 0; 9661 } 9662 } 9663 9664 break; 9665 9666 case ELS_CMD_FLOGI: 9667 case ELS_CMD_FDISC: 9668 if (cmd == ELS_CMD_ACC) { 9669 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi; 9670 9671 /* This is a patch for the ULP stack. */ 9672 9673 /* 9674 * ULP only reads our service parameters 9675 * once during bind_port, but the service 9676 * parameters change due to topology. 9677 */ 9678 9679 /* Copy latest service parameters to payload */ 9680 bcopy((void *)&port->sparam, 9681 (void *)sp, sizeof (SERV_PARM)); 9682 9683 /* We are in pt-to-pt mode. Set R_A_TOV to default */ 9684 sp->cmn.w2.r_a_tov = 9685 LE_SWAP32((FF_DEF_RATOV * 1000)); 9686 9687 /* Clear support for virtual fabrics */ 9688 /* randomOffset bit controls this for FLOGI */ 9689 sp->cmn.randomOffset = 0; 9690 #ifdef DHCHAP_SUPPORT 9691 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg); 9692 #endif /* DHCHAP_SUPPORT */ 9693 } 9694 break; 9695 9696 case ELS_CMD_PLOGI: 9697 case ELS_CMD_PDISC: 9698 if (cmd == ELS_CMD_ACC) { 9699 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi; 9700 9701 /* This is a patch for the ULP stack. */ 9702 9703 /* 9704 * ULP only reads our service parameters 9705 * once during bind_port, but the service 9706 * parameters change due to topology. 9707 */ 9708 9709 /* Copy latest service parameters to payload */ 9710 bcopy((void *)&port->sparam, 9711 (void *)sp, sizeof (SERV_PARM)); 9712 9713 #ifdef DHCHAP_SUPPORT 9714 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg); 9715 #endif /* DHCHAP_SUPPORT */ 9716 } 9717 break; 9718 9719 } 9720 9721 /* Initalize iocbq */ 9722 iocbq->node = (void *)NULL; 9723 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) { 9724 9725 if (rval == 0xff) { 9726 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9727 rval = FC_SUCCESS; 9728 } 9729 9730 return (rval); 9731 } 9732 9733 cp = &hba->chan[hba->channel_els]; 9734 cp->ulpSendCmd++; 9735 9736 /* Initalize sbp */ 9737 mutex_enter(&sbp->mtx); 9738 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9739 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9740 sbp->node = (void *) NULL; 9741 sbp->lun = EMLXS_LUN_NONE; 9742 sbp->class = iocb->ULPCLASS; 9743 sbp->did = did; 9744 mutex_exit(&sbp->mtx); 9745 9746 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg, 9747 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd), 9748 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id, 9749 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg); 9750 9751 /* Process nodes */ 9752 switch (ucmd) { 9753 case ELS_CMD_RSCN: 9754 if ((port->mode == MODE_INITIATOR) && ub_buffer && 9755 cmd == ELS_CMD_ACC) { 9756 fc_rscn_t *rscn; 9757 uint32_t count; 9758 uint32_t *lp = NULL; 9759 9760 /* 9761 * Only the Leadville code path will come thru 9762 * here. The RSCN data is NOT swapped properly 9763 * for the Comstar code path. 9764 */ 9765 lp = (uint32_t *)ub_buffer; 9766 rscn = (fc_rscn_t *)lp++; 9767 count = ((rscn->rscn_payload_len - 4) / 4); 9768 9769 /* Close affected ports */ 9770 for (i = 0; i < count; i++, lp++) { 9771 (void) emlxs_port_offline(port, *lp); 9772 } 9773 } 9774 break; 9775 9776 case ELS_CMD_PLOGI: 9777 if (cmd == ELS_CMD_ACC) { 9778 ndlp = emlxs_node_find_did(port, did, 1); 9779 9780 if (ndlp && ndlp->nlp_active) { 9781 /* Close the node for any further normal IO */ 9782 emlxs_node_close(port, ndlp, hba->channel_fcp, 9783 pkt->pkt_timeout + 10); 9784 emlxs_node_close(port, ndlp, hba->channel_ip, 9785 pkt->pkt_timeout + 10); 9786 9787 /* Flush tx queue */ 9788 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 9789 9790 /* Flush chip queue */ 9791 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 9792 } 9793 } 9794 break; 9795 9796 case ELS_CMD_PRLI: 9797 if (cmd == ELS_CMD_ACC) { 9798 ndlp = emlxs_node_find_did(port, did, 1); 9799 9800 if (ndlp && ndlp->nlp_active) { 9801 /* Close the node for any further normal IO */ 9802 emlxs_node_close(port, ndlp, hba->channel_fcp, 9803 pkt->pkt_timeout + 10); 9804 9805 /* Flush tx queues */ 9806 (void) emlxs_tx_node_flush(port, ndlp, 9807 &hba->chan[hba->channel_fcp], 0, 0); 9808 9809 /* Flush chip queues */ 9810 (void) emlxs_chipq_node_flush(port, 9811 &hba->chan[hba->channel_fcp], ndlp, 0); 9812 } 9813 } 9814 break; 9815 9816 case ELS_CMD_PRLO: 9817 if (cmd == ELS_CMD_ACC) { 9818 ndlp = emlxs_node_find_did(port, did, 1); 9819 9820 if (ndlp && ndlp->nlp_active) { 9821 /* Close the node for any further normal IO */ 9822 emlxs_node_close(port, ndlp, 9823 hba->channel_fcp, 60); 9824 9825 /* Flush tx queues */ 9826 (void) emlxs_tx_node_flush(port, ndlp, 9827 &hba->chan[hba->channel_fcp], 0, 0); 9828 9829 /* Flush chip queues */ 9830 (void) emlxs_chipq_node_flush(port, 9831 &hba->chan[hba->channel_fcp], ndlp, 0); 9832 } 9833 } 9834 9835 break; 9836 9837 case ELS_CMD_LOGO: 9838 if (cmd == ELS_CMD_ACC) { 9839 ndlp = emlxs_node_find_did(port, did, 1); 9840 9841 if (ndlp && ndlp->nlp_active) { 9842 /* Close the node for any further normal IO */ 9843 emlxs_node_close(port, ndlp, 9844 hba->channel_fcp, 60); 9845 emlxs_node_close(port, ndlp, 9846 hba->channel_ip, 60); 9847 9848 /* Flush tx queues */ 9849 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); 9850 9851 /* Flush chip queues */ 9852 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0); 9853 } 9854 } 9855 9856 break; 9857 } 9858 9859 if (pkt->pkt_cmdlen) { 9860 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9861 DDI_DMA_SYNC_FORDEV); 9862 } 9863 9864 HBASTATS.ElsRspIssued++; 9865 9866 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9867 9868 return (FC_SUCCESS); 9869 9870 } /* emlxs_send_els_rsp() */ 9871 9872 9873 #ifdef MENLO_SUPPORT 9874 static int32_t 9875 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp) 9876 { 9877 emlxs_hba_t *hba = HBA; 9878 fc_packet_t *pkt; 9879 IOCBQ *iocbq; 9880 IOCB *iocb; 9881 CHANNEL *cp; 9882 NODELIST *ndlp; 9883 uint32_t did; 9884 uint32_t *lp; 9885 int32_t rval; 9886 9887 pkt = PRIV2PKT(sbp); 9888 did = EMLXS_MENLO_DID; 9889 lp = (uint32_t *)pkt->pkt_cmd; 9890 9891 iocbq = &sbp->iocbq; 9892 iocb = &iocbq->iocb; 9893 9894 ndlp = emlxs_node_find_did(port, did, 1); 9895 9896 if (!ndlp || !ndlp->nlp_active) { 9897 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 9898 "Node not found. did=0x%x", did); 9899 9900 return (FC_BADPACKET); 9901 } 9902 9903 iocbq->node = (void *) ndlp; 9904 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 9905 9906 if (rval == 0xff) { 9907 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 9908 rval = FC_SUCCESS; 9909 } 9910 9911 return (rval); 9912 } 9913 9914 cp = &hba->chan[hba->channel_ct]; 9915 cp->ulpSendCmd++; 9916 9917 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) { 9918 /* Cmd phase */ 9919 9920 /* Initalize iocb */ 9921 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id; 9922 iocb->ULPCONTEXT = 0; 9923 iocb->ULPPU = 3; 9924 9925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9926 "%s: [%08x,%08x,%08x,%08x]", 9927 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]), 9928 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4])); 9929 9930 } else { /* FC_PKT_OUTBOUND */ 9931 9932 /* MENLO_CMD_FW_DOWNLOAD Data Phase */ 9933 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX; 9934 9935 /* Initalize iocb */ 9936 iocb->un.genreq64.param = 0; 9937 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id; 9938 iocb->ULPPU = 1; 9939 9940 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 9941 "%s: Data: rxid=0x%x size=%d", 9942 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD), 9943 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen); 9944 } 9945 9946 /* Initalize sbp */ 9947 mutex_enter(&sbp->mtx); 9948 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 9949 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 9950 sbp->node = (void *) ndlp; 9951 sbp->lun = EMLXS_LUN_NONE; 9952 sbp->class = iocb->ULPCLASS; 9953 sbp->did = did; 9954 mutex_exit(&sbp->mtx); 9955 9956 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 9957 DDI_DMA_SYNC_FORDEV); 9958 9959 HBASTATS.CtCmdIssued++; 9960 9961 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 9962 9963 return (FC_SUCCESS); 9964 9965 } /* emlxs_send_menlo() */ 9966 #endif /* MENLO_SUPPORT */ 9967 9968 9969 static int32_t 9970 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp) 9971 { 9972 emlxs_hba_t *hba = HBA; 9973 fc_packet_t *pkt; 9974 IOCBQ *iocbq; 9975 IOCB *iocb; 9976 NODELIST *ndlp; 9977 uint32_t did; 9978 CHANNEL *cp; 9979 int32_t rval; 9980 9981 pkt = PRIV2PKT(sbp); 9982 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 9983 9984 iocbq = &sbp->iocbq; 9985 iocb = &iocbq->iocb; 9986 9987 ndlp = emlxs_node_find_did(port, did, 1); 9988 9989 if (!ndlp || !ndlp->nlp_active) { 9990 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg, 9991 "Node not found. did=0x%x", did); 9992 9993 return (FC_BADPACKET); 9994 } 9995 9996 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 9997 emlxs_swap_ct_pkt(sbp); 9998 #endif /* EMLXS_MODREV2X */ 9999 10000 iocbq->node = (void *)ndlp; 10001 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 10002 10003 if (rval == 0xff) { 10004 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 10005 rval = FC_SUCCESS; 10006 } 10007 10008 return (rval); 10009 } 10010 10011 cp = &hba->chan[hba->channel_ct]; 10012 cp->ulpSendCmd++; 10013 10014 /* Initalize sbp */ 10015 mutex_enter(&sbp->mtx); 10016 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 10017 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 10018 sbp->node = (void *)ndlp; 10019 sbp->lun = EMLXS_LUN_NONE; 10020 sbp->class = iocb->ULPCLASS; 10021 sbp->did = did; 10022 mutex_exit(&sbp->mtx); 10023 10024 if (did == NAMESERVER_DID) { 10025 SLI_CT_REQUEST *CtCmd; 10026 uint32_t *lp0; 10027 10028 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 10029 lp0 = (uint32_t *)pkt->pkt_cmd; 10030 10031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 10032 "%s: did=%x [%08x,%08x]", 10033 emlxs_ctcmd_xlate( 10034 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 10035 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 10036 10037 if (hba->flag & FC_NPIV_DELAY_REQUIRED) { 10038 sbp->pkt_flags |= PACKET_DELAY_REQUIRED; 10039 } 10040 10041 } else if (did == FDMI_DID) { 10042 SLI_CT_REQUEST *CtCmd; 10043 uint32_t *lp0; 10044 10045 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 10046 lp0 = (uint32_t *)pkt->pkt_cmd; 10047 10048 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 10049 "%s: did=%x [%08x,%08x]", 10050 emlxs_mscmd_xlate( 10051 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 10052 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 10053 } else { 10054 SLI_CT_REQUEST *CtCmd; 10055 uint32_t *lp0; 10056 10057 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 10058 lp0 = (uint32_t *)pkt->pkt_cmd; 10059 10060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg, 10061 "%s: did=%x [%08x,%08x]", 10062 emlxs_rmcmd_xlate( 10063 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)), 10064 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5])); 10065 } 10066 10067 if (pkt->pkt_cmdlen) { 10068 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 10069 DDI_DMA_SYNC_FORDEV); 10070 } 10071 10072 HBASTATS.CtCmdIssued++; 10073 10074 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 10075 10076 return (FC_SUCCESS); 10077 10078 } /* emlxs_send_ct() */ 10079 10080 10081 static int32_t 10082 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp) 10083 { 10084 emlxs_hba_t *hba = HBA; 10085 fc_packet_t *pkt; 10086 CHANNEL *cp; 10087 IOCBQ *iocbq; 10088 IOCB *iocb; 10089 uint32_t *cmd; 10090 SLI_CT_REQUEST *CtCmd; 10091 int32_t rval; 10092 10093 pkt = PRIV2PKT(sbp); 10094 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd; 10095 cmd = (uint32_t *)pkt->pkt_cmd; 10096 10097 iocbq = &sbp->iocbq; 10098 iocb = &iocbq->iocb; 10099 10100 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 10101 emlxs_swap_ct_pkt(sbp); 10102 #endif /* EMLXS_MODREV2X */ 10103 10104 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) { 10105 10106 if (rval == 0xff) { 10107 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1); 10108 rval = FC_SUCCESS; 10109 } 10110 10111 return (rval); 10112 } 10113 10114 cp = &hba->chan[hba->channel_ct]; 10115 cp->ulpSendCmd++; 10116 10117 /* Initalize sbp */ 10118 mutex_enter(&sbp->mtx); 10119 sbp->ticks = hba->timer_tics + pkt->pkt_timeout + 10120 ((pkt->pkt_timeout > 0xff) ? 0 : 10); 10121 sbp->node = NULL; 10122 sbp->lun = EMLXS_LUN_NONE; 10123 sbp->class = iocb->ULPCLASS; 10124 mutex_exit(&sbp->mtx); 10125 10126 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg, 10127 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ", 10128 emlxs_rmcmd_xlate(LE_SWAP16( 10129 CtCmd->CommandResponse.bits.CmdRsp)), 10130 CtCmd->ReasonCode, CtCmd->Explanation, 10131 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]), 10132 pkt->pkt_cmd_fhdr.rx_id); 10133 10134 if (pkt->pkt_cmdlen) { 10135 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen, 10136 DDI_DMA_SYNC_FORDEV); 10137 } 10138 10139 HBASTATS.CtRspIssued++; 10140 10141 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); 10142 10143 return (FC_SUCCESS); 10144 10145 } /* emlxs_send_ct_rsp() */ 10146 10147 10148 /* 10149 * emlxs_get_instance() 10150 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst. 10151 */ 10152 extern uint32_t 10153 emlxs_get_instance(int32_t ddiinst) 10154 { 10155 uint32_t i; 10156 uint32_t inst; 10157 10158 mutex_enter(&emlxs_device.lock); 10159 10160 inst = MAX_FC_BRDS; 10161 for (i = 0; i < emlxs_instance_count; i++) { 10162 if (emlxs_instance[i] == ddiinst) { 10163 inst = i; 10164 break; 10165 } 10166 } 10167 10168 mutex_exit(&emlxs_device.lock); 10169 10170 return (inst); 10171 10172 } /* emlxs_get_instance() */ 10173 10174 10175 /* 10176 * emlxs_add_instance() 10177 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst. 10178 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0. 10179 */ 10180 static uint32_t 10181 emlxs_add_instance(int32_t ddiinst) 10182 { 10183 uint32_t i; 10184 10185 mutex_enter(&emlxs_device.lock); 10186 10187 /* First see if the ddiinst already exists */ 10188 for (i = 0; i < emlxs_instance_count; i++) { 10189 if (emlxs_instance[i] == ddiinst) { 10190 break; 10191 } 10192 } 10193 10194 /* If it doesn't already exist, add it */ 10195 if (i >= emlxs_instance_count) { 10196 if ((i = emlxs_instance_count) < MAX_FC_BRDS) { 10197 emlxs_instance[i] = ddiinst; 10198 emlxs_instance_count++; 10199 emlxs_device.hba_count = emlxs_instance_count; 10200 } 10201 } 10202 10203 mutex_exit(&emlxs_device.lock); 10204 10205 return (i); 10206 10207 } /* emlxs_add_instance() */ 10208 10209 10210 /*ARGSUSED*/ 10211 extern void 10212 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 10213 uint32_t doneq) 10214 { 10215 emlxs_hba_t *hba; 10216 emlxs_port_t *port; 10217 emlxs_buf_t *fpkt; 10218 10219 port = sbp->port; 10220 10221 if (!port) { 10222 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg, 10223 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags); 10224 10225 return; 10226 } 10227 10228 hba = HBA; 10229 10230 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) && 10231 (sbp->iotag)) { 10232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg, 10233 "WARNING: Completing IO with iotag. sbp=%p iotag=%d " 10234 "xri_flags=%x", 10235 sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0)); 10236 10237 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1); 10238 } 10239 10240 mutex_enter(&sbp->mtx); 10241 10242 /* Check for error conditions */ 10243 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED | 10244 PACKET_IN_DONEQ | PACKET_IN_COMPLETION | 10245 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) { 10246 if (sbp->pkt_flags & PACKET_ULP_OWNED) { 10247 EMLXS_MSGF(EMLXS_CONTEXT, 10248 &emlxs_pkt_completion_error_msg, 10249 "Packet already returned. sbp=%p flags=%x", sbp, 10250 sbp->pkt_flags); 10251 } 10252 10253 else if (sbp->pkt_flags & PACKET_COMPLETED) { 10254 EMLXS_MSGF(EMLXS_CONTEXT, 10255 &emlxs_pkt_completion_error_msg, 10256 "Packet already completed. sbp=%p flags=%x", sbp, 10257 sbp->pkt_flags); 10258 } 10259 10260 else if (sbp->pkt_flags & PACKET_IN_DONEQ) { 10261 EMLXS_MSGF(EMLXS_CONTEXT, 10262 &emlxs_pkt_completion_error_msg, 10263 "Pkt already on done queue. sbp=%p flags=%x", sbp, 10264 sbp->pkt_flags); 10265 } 10266 10267 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) { 10268 EMLXS_MSGF(EMLXS_CONTEXT, 10269 &emlxs_pkt_completion_error_msg, 10270 "Packet already in completion. sbp=%p flags=%x", 10271 sbp, sbp->pkt_flags); 10272 } 10273 10274 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) { 10275 EMLXS_MSGF(EMLXS_CONTEXT, 10276 &emlxs_pkt_completion_error_msg, 10277 "Packet still on chip queue. sbp=%p flags=%x", 10278 sbp, sbp->pkt_flags); 10279 } 10280 10281 else if (sbp->pkt_flags & PACKET_IN_TXQ) { 10282 EMLXS_MSGF(EMLXS_CONTEXT, 10283 &emlxs_pkt_completion_error_msg, 10284 "Packet still on tx queue. sbp=%p flags=%x", sbp, 10285 sbp->pkt_flags); 10286 } 10287 10288 mutex_exit(&sbp->mtx); 10289 return; 10290 } 10291 10292 /* Packet is now in completion */ 10293 sbp->pkt_flags |= PACKET_IN_COMPLETION; 10294 10295 /* Set the state if not already set */ 10296 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 10297 emlxs_set_pkt_state(sbp, iostat, localstat, 0); 10298 } 10299 10300 /* Check for parent flush packet */ 10301 /* If pkt has a parent flush packet then adjust its count now */ 10302 fpkt = sbp->fpkt; 10303 if (fpkt) { 10304 /* 10305 * We will try to NULL sbp->fpkt inside the 10306 * fpkt's mutex if possible 10307 */ 10308 10309 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) { 10310 mutex_enter(&fpkt->mtx); 10311 if (fpkt->flush_count) { 10312 fpkt->flush_count--; 10313 } 10314 sbp->fpkt = NULL; 10315 mutex_exit(&fpkt->mtx); 10316 } else { /* fpkt has been returned already */ 10317 10318 sbp->fpkt = NULL; 10319 } 10320 } 10321 10322 /* If pkt is polled, then wake up sleeping thread */ 10323 if (sbp->pkt_flags & PACKET_POLLED) { 10324 /* Don't set the PACKET_ULP_OWNED flag here */ 10325 /* because the polling thread will do it */ 10326 sbp->pkt_flags |= PACKET_COMPLETED; 10327 mutex_exit(&sbp->mtx); 10328 10329 /* Wake up sleeping thread */ 10330 mutex_enter(&EMLXS_PKT_LOCK); 10331 cv_broadcast(&EMLXS_PKT_CV); 10332 mutex_exit(&EMLXS_PKT_LOCK); 10333 } 10334 10335 /* If packet was generated by our driver, */ 10336 /* then complete it immediately */ 10337 else if (sbp->pkt_flags & PACKET_ALLOCATED) { 10338 mutex_exit(&sbp->mtx); 10339 10340 emlxs_iodone(sbp); 10341 } 10342 10343 /* Put the pkt on the done queue for callback */ 10344 /* completion in another thread */ 10345 else { 10346 sbp->pkt_flags |= PACKET_IN_DONEQ; 10347 sbp->next = NULL; 10348 mutex_exit(&sbp->mtx); 10349 10350 /* Put pkt on doneq, so I/O's will be completed in order */ 10351 mutex_enter(&EMLXS_PORT_LOCK); 10352 if (hba->iodone_tail == NULL) { 10353 hba->iodone_list = sbp; 10354 hba->iodone_count = 1; 10355 } else { 10356 hba->iodone_tail->next = sbp; 10357 hba->iodone_count++; 10358 } 10359 hba->iodone_tail = sbp; 10360 mutex_exit(&EMLXS_PORT_LOCK); 10361 10362 /* Trigger a thread to service the doneq */ 10363 emlxs_thread_trigger1(&hba->iodone_thread, 10364 emlxs_iodone_server); 10365 } 10366 10367 return; 10368 10369 } /* emlxs_pkt_complete() */ 10370 10371 10372 #ifdef SAN_DIAG_SUPPORT 10373 /* 10374 * This routine is called with EMLXS_PORT_LOCK held so we can just increment 10375 * normally. Don't have to use atomic operations. 10376 */ 10377 extern void 10378 emlxs_update_sd_bucket(emlxs_buf_t *sbp) 10379 { 10380 emlxs_port_t *vport; 10381 fc_packet_t *pkt; 10382 uint32_t did; 10383 hrtime_t t; 10384 hrtime_t delta_time; 10385 int i; 10386 NODELIST *ndlp; 10387 10388 vport = sbp->port; 10389 10390 if ((emlxs_sd_bucket.search_type == 0) || 10391 (vport->sd_io_latency_state != SD_COLLECTING)) { 10392 return; 10393 } 10394 10395 /* Compute the iolatency time in microseconds */ 10396 t = gethrtime(); 10397 delta_time = t - sbp->sd_start_time; 10398 pkt = PRIV2PKT(sbp); 10399 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id); 10400 ndlp = emlxs_node_find_did(vport, did, 1); 10401 10402 if (!ndlp) { 10403 return; 10404 } 10405 10406 if (delta_time >= 10407 emlxs_sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) { 10408 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1]. 10409 count++; 10410 } else if (delta_time <= emlxs_sd_bucket.values[0]) { 10411 ndlp->sd_dev_bucket[0].count++; 10412 } else { 10413 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) { 10414 if ((delta_time > emlxs_sd_bucket.values[i-1]) && 10415 (delta_time <= emlxs_sd_bucket.values[i])) { 10416 ndlp->sd_dev_bucket[i].count++; 10417 break; 10418 } 10419 } 10420 } 10421 10422 return; 10423 10424 } /* emlxs_update_sd_bucket() */ 10425 #endif /* SAN_DIAG_SUPPORT */ 10426 10427 /*ARGSUSED*/ 10428 static void 10429 emlxs_iodone_server(void *arg1, void *arg2, void *arg3) 10430 { 10431 emlxs_hba_t *hba = (emlxs_hba_t *)arg1; 10432 emlxs_buf_t *sbp; 10433 10434 mutex_enter(&EMLXS_PORT_LOCK); 10435 10436 /* Remove one pkt from the doneq head and complete it */ 10437 while ((sbp = hba->iodone_list) != NULL) { 10438 if ((hba->iodone_list = sbp->next) == NULL) { 10439 hba->iodone_tail = NULL; 10440 hba->iodone_count = 0; 10441 } else { 10442 hba->iodone_count--; 10443 } 10444 10445 mutex_exit(&EMLXS_PORT_LOCK); 10446 10447 /* Prepare the pkt for completion */ 10448 mutex_enter(&sbp->mtx); 10449 sbp->next = NULL; 10450 sbp->pkt_flags &= ~PACKET_IN_DONEQ; 10451 mutex_exit(&sbp->mtx); 10452 10453 /* Complete the IO now */ 10454 emlxs_iodone(sbp); 10455 10456 /* Reacquire lock and check if more work is to be done */ 10457 mutex_enter(&EMLXS_PORT_LOCK); 10458 } 10459 10460 mutex_exit(&EMLXS_PORT_LOCK); 10461 10462 #ifdef FMA_SUPPORT 10463 if (hba->flag & FC_DMA_CHECK_ERROR) { 10464 emlxs_thread_spawn(hba, emlxs_restart_thread, 10465 NULL, NULL); 10466 } 10467 #endif /* FMA_SUPPORT */ 10468 10469 return; 10470 10471 } /* End emlxs_iodone_server */ 10472 10473 10474 static void 10475 emlxs_iodone(emlxs_buf_t *sbp) 10476 { 10477 #ifdef FMA_SUPPORT 10478 emlxs_port_t *port = sbp->port; 10479 emlxs_hba_t *hba = port->hba; 10480 #endif /* FMA_SUPPORT */ 10481 10482 fc_packet_t *pkt; 10483 CHANNEL *cp; 10484 10485 pkt = PRIV2PKT(sbp); 10486 10487 /* Check one more time that the pkt has not already been returned */ 10488 if (sbp->pkt_flags & PACKET_ULP_OWNED) { 10489 return; 10490 } 10491 10492 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 10493 emlxs_unswap_pkt(sbp); 10494 #endif /* EMLXS_MODREV2X */ 10495 10496 mutex_enter(&sbp->mtx); 10497 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED); 10498 mutex_exit(&sbp->mtx); 10499 10500 if (pkt->pkt_comp) { 10501 #ifdef FMA_SUPPORT 10502 emlxs_check_dma(hba, sbp); 10503 #endif /* FMA_SUPPORT */ 10504 10505 if (sbp->channel) { 10506 cp = (CHANNEL *)sbp->channel; 10507 cp->ulpCmplCmd++; 10508 } 10509 10510 (*pkt->pkt_comp) (pkt); 10511 } 10512 10513 return; 10514 10515 } /* emlxs_iodone() */ 10516 10517 10518 10519 extern fc_unsol_buf_t * 10520 emlxs_ub_find(emlxs_port_t *port, uint32_t token) 10521 { 10522 emlxs_unsol_buf_t *pool; 10523 fc_unsol_buf_t *ubp; 10524 emlxs_ub_priv_t *ub_priv; 10525 10526 /* Check if this is a valid ub token */ 10527 if (token < EMLXS_UB_TOKEN_OFFSET) { 10528 return (NULL); 10529 } 10530 10531 mutex_enter(&EMLXS_UB_LOCK); 10532 10533 pool = port->ub_pool; 10534 while (pool) { 10535 /* Find a pool with the proper token range */ 10536 if (token >= pool->pool_first_token && 10537 token <= pool->pool_last_token) { 10538 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token - 10539 pool->pool_first_token)]; 10540 ub_priv = ubp->ub_fca_private; 10541 10542 if (ub_priv->token != token) { 10543 EMLXS_MSGF(EMLXS_CONTEXT, 10544 &emlxs_sfs_debug_msg, 10545 "ub_find: Invalid token=%x", ubp, token, 10546 ub_priv->token); 10547 10548 ubp = NULL; 10549 } 10550 10551 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { 10552 EMLXS_MSGF(EMLXS_CONTEXT, 10553 &emlxs_sfs_debug_msg, 10554 "ub_find: Buffer not in use. buffer=%p " 10555 "token=%x", ubp, token); 10556 10557 ubp = NULL; 10558 } 10559 10560 mutex_exit(&EMLXS_UB_LOCK); 10561 10562 return (ubp); 10563 } 10564 10565 pool = pool->pool_next; 10566 } 10567 10568 mutex_exit(&EMLXS_UB_LOCK); 10569 10570 return (NULL); 10571 10572 } /* emlxs_ub_find() */ 10573 10574 10575 10576 extern fc_unsol_buf_t * 10577 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type, 10578 uint32_t reserve) 10579 { 10580 emlxs_hba_t *hba = HBA; 10581 emlxs_unsol_buf_t *pool; 10582 fc_unsol_buf_t *ubp; 10583 emlxs_ub_priv_t *ub_priv; 10584 uint32_t i; 10585 uint32_t resv_flag; 10586 uint32_t pool_free; 10587 uint32_t pool_free_resv; 10588 10589 mutex_enter(&EMLXS_UB_LOCK); 10590 10591 pool = port->ub_pool; 10592 while (pool) { 10593 /* Find a pool of the appropriate type and size */ 10594 if ((pool->pool_available == 0) || 10595 (pool->pool_type != type) || 10596 (pool->pool_buf_size < size)) { 10597 goto next_pool; 10598 } 10599 10600 10601 /* Adjust free counts based on availablity */ 10602 /* The free reserve count gets first priority */ 10603 pool_free_resv = 10604 min(pool->pool_free_resv, pool->pool_available); 10605 pool_free = 10606 min(pool->pool_free, 10607 (pool->pool_available - pool_free_resv)); 10608 10609 /* Initialize reserve flag */ 10610 resv_flag = reserve; 10611 10612 if (resv_flag) { 10613 if (pool_free_resv == 0) { 10614 if (pool_free == 0) { 10615 goto next_pool; 10616 } 10617 resv_flag = 0; 10618 } 10619 } else if (pool_free == 0) { 10620 goto next_pool; 10621 } 10622 10623 /* Find next available free buffer in this pool */ 10624 for (i = 0; i < pool->pool_nentries; i++) { 10625 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i]; 10626 ub_priv = ubp->ub_fca_private; 10627 10628 if (!ub_priv->available || 10629 ub_priv->flags != EMLXS_UB_FREE) { 10630 continue; 10631 } 10632 10633 ub_priv->time = hba->timer_tics; 10634 10635 /* Timeout in 5 minutes */ 10636 ub_priv->timeout = (5 * 60); 10637 10638 ub_priv->flags = EMLXS_UB_IN_USE; 10639 10640 /* Alloc the buffer from the pool */ 10641 if (resv_flag) { 10642 ub_priv->flags |= EMLXS_UB_RESV; 10643 pool->pool_free_resv--; 10644 } else { 10645 pool->pool_free--; 10646 } 10647 10648 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg, 10649 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp, 10650 ub_priv->token, pool->pool_nentries, 10651 pool->pool_available, pool->pool_free, 10652 pool->pool_free_resv); 10653 10654 mutex_exit(&EMLXS_UB_LOCK); 10655 10656 return (ubp); 10657 } 10658 next_pool: 10659 10660 pool = pool->pool_next; 10661 } 10662 10663 mutex_exit(&EMLXS_UB_LOCK); 10664 10665 return (NULL); 10666 10667 } /* emlxs_ub_get() */ 10668 10669 10670 10671 extern void 10672 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat, 10673 uint32_t lock) 10674 { 10675 fc_packet_t *pkt; 10676 fcp_rsp_t *fcp_rsp; 10677 uint32_t i; 10678 emlxs_xlat_err_t *tptr; 10679 emlxs_xlat_err_t *entry; 10680 10681 10682 pkt = PRIV2PKT(sbp); 10683 10684 /* Warning: Some FCT sbp's don't have */ 10685 /* fc_packet objects, so just return */ 10686 if (!pkt) { 10687 return; 10688 } 10689 10690 if (lock) { 10691 mutex_enter(&sbp->mtx); 10692 } 10693 10694 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) { 10695 sbp->pkt_flags |= PACKET_STATE_VALID; 10696 10697 /* Perform table lookup */ 10698 entry = NULL; 10699 if (iostat != IOSTAT_LOCAL_REJECT) { 10700 tptr = emlxs_iostat_tbl; 10701 for (i = 0; i < IOSTAT_MAX; i++, tptr++) { 10702 if (iostat == tptr->emlxs_status) { 10703 entry = tptr; 10704 break; 10705 } 10706 } 10707 } else { /* iostate == IOSTAT_LOCAL_REJECT */ 10708 10709 tptr = emlxs_ioerr_tbl; 10710 for (i = 0; i < IOERR_MAX; i++, tptr++) { 10711 if (localstat == tptr->emlxs_status) { 10712 entry = tptr; 10713 break; 10714 } 10715 } 10716 } 10717 10718 if (entry) { 10719 pkt->pkt_state = entry->pkt_state; 10720 pkt->pkt_reason = entry->pkt_reason; 10721 pkt->pkt_expln = entry->pkt_expln; 10722 pkt->pkt_action = entry->pkt_action; 10723 } else { 10724 /* Set defaults */ 10725 pkt->pkt_state = FC_PKT_TRAN_ERROR; 10726 pkt->pkt_reason = FC_REASON_ABORTED; 10727 pkt->pkt_expln = FC_EXPLN_NONE; 10728 pkt->pkt_action = FC_ACTION_RETRYABLE; 10729 } 10730 10731 10732 /* Set the residual counts and response frame */ 10733 /* Check if response frame was received from the chip */ 10734 /* If so, then the residual counts will already be set */ 10735 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID | 10736 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) { 10737 /* We have to create the response frame */ 10738 if (iostat == IOSTAT_SUCCESS) { 10739 pkt->pkt_resp_resid = 0; 10740 pkt->pkt_data_resid = 0; 10741 10742 if ((pkt->pkt_cmd_fhdr.type == 10743 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen && 10744 pkt->pkt_resp) { 10745 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp; 10746 10747 fcp_rsp->fcp_u.fcp_status. 10748 rsp_len_set = 1; 10749 fcp_rsp->fcp_response_len = 8; 10750 } 10751 } else { 10752 /* Otherwise assume no data */ 10753 /* and no response received */ 10754 pkt->pkt_data_resid = pkt->pkt_datalen; 10755 pkt->pkt_resp_resid = pkt->pkt_rsplen; 10756 } 10757 } 10758 } 10759 10760 if (lock) { 10761 mutex_exit(&sbp->mtx); 10762 } 10763 10764 return; 10765 10766 } /* emlxs_set_pkt_state() */ 10767 10768 10769 #if (EMLXS_MODREVX == EMLXS_MODREV2X) 10770 10771 extern void 10772 emlxs_swap_service_params(SERV_PARM *sp) 10773 { 10774 uint16_t *p; 10775 int size; 10776 int i; 10777 10778 size = (sizeof (CSP) - 4) / 2; 10779 p = (uint16_t *)&sp->cmn; 10780 for (i = 0; i < size; i++) { 10781 p[i] = LE_SWAP16(p[i]); 10782 } 10783 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov); 10784 10785 size = sizeof (CLASS_PARMS) / 2; 10786 p = (uint16_t *)&sp->cls1; 10787 for (i = 0; i < size; i++, p++) { 10788 *p = LE_SWAP16(*p); 10789 } 10790 10791 size = sizeof (CLASS_PARMS) / 2; 10792 p = (uint16_t *)&sp->cls2; 10793 for (i = 0; i < size; i++, p++) { 10794 *p = LE_SWAP16(*p); 10795 } 10796 10797 size = sizeof (CLASS_PARMS) / 2; 10798 p = (uint16_t *)&sp->cls3; 10799 for (i = 0; i < size; i++, p++) { 10800 *p = LE_SWAP16(*p); 10801 } 10802 10803 size = sizeof (CLASS_PARMS) / 2; 10804 p = (uint16_t *)&sp->cls4; 10805 for (i = 0; i < size; i++, p++) { 10806 *p = LE_SWAP16(*p); 10807 } 10808 10809 return; 10810 10811 } /* emlxs_swap_service_params() */ 10812 10813 extern void 10814 emlxs_unswap_pkt(emlxs_buf_t *sbp) 10815 { 10816 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 10817 emlxs_swap_fcp_pkt(sbp); 10818 } 10819 10820 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 10821 emlxs_swap_els_pkt(sbp); 10822 } 10823 10824 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 10825 emlxs_swap_ct_pkt(sbp); 10826 } 10827 10828 } /* emlxs_unswap_pkt() */ 10829 10830 10831 extern void 10832 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp) 10833 { 10834 fc_packet_t *pkt; 10835 FCP_CMND *cmd; 10836 fcp_rsp_t *rsp; 10837 uint16_t *lunp; 10838 uint32_t i; 10839 10840 mutex_enter(&sbp->mtx); 10841 10842 if (sbp->pkt_flags & PACKET_ALLOCATED) { 10843 mutex_exit(&sbp->mtx); 10844 return; 10845 } 10846 10847 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) { 10848 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED; 10849 } else { 10850 sbp->pkt_flags |= PACKET_FCP_SWAPPED; 10851 } 10852 10853 mutex_exit(&sbp->mtx); 10854 10855 pkt = PRIV2PKT(sbp); 10856 10857 cmd = (FCP_CMND *)pkt->pkt_cmd; 10858 rsp = (pkt->pkt_rsplen && 10859 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ? 10860 (fcp_rsp_t *)pkt->pkt_resp : NULL; 10861 10862 /* The size of data buffer needs to be swapped. */ 10863 cmd->fcpDl = LE_SWAP32(cmd->fcpDl); 10864 10865 /* 10866 * Swap first 2 words of FCP CMND payload. 10867 */ 10868 lunp = (uint16_t *)&cmd->fcpLunMsl; 10869 for (i = 0; i < 4; i++) { 10870 lunp[i] = LE_SWAP16(lunp[i]); 10871 } 10872 10873 if (rsp) { 10874 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid); 10875 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len); 10876 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len); 10877 } 10878 10879 return; 10880 10881 } /* emlxs_swap_fcp_pkt() */ 10882 10883 10884 extern void 10885 emlxs_swap_els_pkt(emlxs_buf_t *sbp) 10886 { 10887 fc_packet_t *pkt; 10888 uint32_t *cmd; 10889 uint32_t *rsp; 10890 uint32_t command; 10891 uint16_t *c; 10892 uint32_t i; 10893 uint32_t swapped; 10894 10895 mutex_enter(&sbp->mtx); 10896 10897 if (sbp->pkt_flags & PACKET_ALLOCATED) { 10898 mutex_exit(&sbp->mtx); 10899 return; 10900 } 10901 10902 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) { 10903 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED; 10904 swapped = 1; 10905 } else { 10906 sbp->pkt_flags |= PACKET_ELS_SWAPPED; 10907 swapped = 0; 10908 } 10909 10910 mutex_exit(&sbp->mtx); 10911 10912 pkt = PRIV2PKT(sbp); 10913 10914 cmd = (uint32_t *)pkt->pkt_cmd; 10915 rsp = (pkt->pkt_rsplen && 10916 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ? 10917 (uint32_t *)pkt->pkt_resp : NULL; 10918 10919 if (!swapped) { 10920 cmd[0] = LE_SWAP32(cmd[0]); 10921 command = cmd[0] & ELS_CMD_MASK; 10922 } else { 10923 command = cmd[0] & ELS_CMD_MASK; 10924 cmd[0] = LE_SWAP32(cmd[0]); 10925 } 10926 10927 if (rsp) { 10928 rsp[0] = LE_SWAP32(rsp[0]); 10929 } 10930 10931 switch (command) { 10932 case ELS_CMD_ACC: 10933 if (sbp->ucmd == ELS_CMD_ADISC) { 10934 /* Hard address of originator */ 10935 cmd[1] = LE_SWAP32(cmd[1]); 10936 10937 /* N_Port ID of originator */ 10938 cmd[6] = LE_SWAP32(cmd[6]); 10939 } 10940 break; 10941 10942 case ELS_CMD_PLOGI: 10943 case ELS_CMD_FLOGI: 10944 case ELS_CMD_FDISC: 10945 if (rsp) { 10946 emlxs_swap_service_params((SERV_PARM *) & rsp[1]); 10947 } 10948 break; 10949 10950 case ELS_CMD_LOGO: 10951 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */ 10952 break; 10953 10954 case ELS_CMD_RLS: 10955 cmd[1] = LE_SWAP32(cmd[1]); 10956 10957 if (rsp) { 10958 for (i = 0; i < 6; i++) { 10959 rsp[1 + i] = LE_SWAP32(rsp[1 + i]); 10960 } 10961 } 10962 break; 10963 10964 case ELS_CMD_ADISC: 10965 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */ 10966 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */ 10967 break; 10968 10969 case ELS_CMD_PRLI: 10970 c = (uint16_t *)&cmd[1]; 10971 c[1] = LE_SWAP16(c[1]); 10972 10973 cmd[4] = LE_SWAP32(cmd[4]); 10974 10975 if (rsp) { 10976 rsp[4] = LE_SWAP32(rsp[4]); 10977 } 10978 break; 10979 10980 case ELS_CMD_SCR: 10981 cmd[1] = LE_SWAP32(cmd[1]); 10982 break; 10983 10984 case ELS_CMD_LINIT: 10985 if (rsp) { 10986 rsp[1] = LE_SWAP32(rsp[1]); 10987 } 10988 break; 10989 10990 default: 10991 break; 10992 } 10993 10994 return; 10995 10996 } /* emlxs_swap_els_pkt() */ 10997 10998 10999 extern void 11000 emlxs_swap_ct_pkt(emlxs_buf_t *sbp) 11001 { 11002 fc_packet_t *pkt; 11003 uint32_t *cmd; 11004 uint32_t *rsp; 11005 uint32_t command; 11006 uint32_t i; 11007 uint32_t swapped; 11008 11009 mutex_enter(&sbp->mtx); 11010 11011 if (sbp->pkt_flags & PACKET_ALLOCATED) { 11012 mutex_exit(&sbp->mtx); 11013 return; 11014 } 11015 11016 if (sbp->pkt_flags & PACKET_CT_SWAPPED) { 11017 sbp->pkt_flags &= ~PACKET_CT_SWAPPED; 11018 swapped = 1; 11019 } else { 11020 sbp->pkt_flags |= PACKET_CT_SWAPPED; 11021 swapped = 0; 11022 } 11023 11024 mutex_exit(&sbp->mtx); 11025 11026 pkt = PRIV2PKT(sbp); 11027 11028 cmd = (uint32_t *)pkt->pkt_cmd; 11029 rsp = (pkt->pkt_rsplen && 11030 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ? 11031 (uint32_t *)pkt->pkt_resp : NULL; 11032 11033 if (!swapped) { 11034 cmd[0] = 0x01000000; 11035 command = cmd[2]; 11036 } 11037 11038 cmd[0] = LE_SWAP32(cmd[0]); 11039 cmd[1] = LE_SWAP32(cmd[1]); 11040 cmd[2] = LE_SWAP32(cmd[2]); 11041 cmd[3] = LE_SWAP32(cmd[3]); 11042 11043 if (swapped) { 11044 command = cmd[2]; 11045 } 11046 11047 switch ((command >> 16)) { 11048 case SLI_CTNS_GA_NXT: 11049 cmd[4] = LE_SWAP32(cmd[4]); 11050 break; 11051 11052 case SLI_CTNS_GPN_ID: 11053 case SLI_CTNS_GNN_ID: 11054 case SLI_CTNS_RPN_ID: 11055 case SLI_CTNS_RNN_ID: 11056 case SLI_CTNS_RSPN_ID: 11057 cmd[4] = LE_SWAP32(cmd[4]); 11058 break; 11059 11060 case SLI_CTNS_RCS_ID: 11061 case SLI_CTNS_RPT_ID: 11062 cmd[4] = LE_SWAP32(cmd[4]); 11063 cmd[5] = LE_SWAP32(cmd[5]); 11064 break; 11065 11066 case SLI_CTNS_RFT_ID: 11067 cmd[4] = LE_SWAP32(cmd[4]); 11068 11069 /* Swap FC4 types */ 11070 for (i = 0; i < 8; i++) { 11071 cmd[5 + i] = LE_SWAP32(cmd[5 + i]); 11072 } 11073 break; 11074 11075 case SLI_CTNS_GFT_ID: 11076 if (rsp) { 11077 /* Swap FC4 types */ 11078 for (i = 0; i < 8; i++) { 11079 rsp[4 + i] = LE_SWAP32(rsp[4 + i]); 11080 } 11081 } 11082 break; 11083 11084 case SLI_CTNS_GCS_ID: 11085 case SLI_CTNS_GSPN_ID: 11086 case SLI_CTNS_GSNN_NN: 11087 case SLI_CTNS_GIP_NN: 11088 case SLI_CTNS_GIPA_NN: 11089 11090 case SLI_CTNS_GPT_ID: 11091 case SLI_CTNS_GID_NN: 11092 case SLI_CTNS_GNN_IP: 11093 case SLI_CTNS_GIPA_IP: 11094 case SLI_CTNS_GID_FT: 11095 case SLI_CTNS_GID_PT: 11096 case SLI_CTNS_GID_PN: 11097 case SLI_CTNS_RIP_NN: 11098 case SLI_CTNS_RIPA_NN: 11099 case SLI_CTNS_RSNN_NN: 11100 case SLI_CTNS_DA_ID: 11101 case SLI_CT_RESPONSE_FS_RJT: 11102 case SLI_CT_RESPONSE_FS_ACC: 11103 11104 default: 11105 break; 11106 } 11107 return; 11108 11109 } /* emlxs_swap_ct_pkt() */ 11110 11111 11112 extern void 11113 emlxs_swap_els_ub(fc_unsol_buf_t *ubp) 11114 { 11115 emlxs_ub_priv_t *ub_priv; 11116 fc_rscn_t *rscn; 11117 uint32_t count; 11118 uint32_t i; 11119 uint32_t *lp; 11120 la_els_logi_t *logi; 11121 11122 ub_priv = ubp->ub_fca_private; 11123 11124 switch (ub_priv->cmd) { 11125 case ELS_CMD_RSCN: 11126 rscn = (fc_rscn_t *)ubp->ub_buffer; 11127 11128 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len); 11129 11130 count = ((rscn->rscn_payload_len - 4) / 4); 11131 lp = (uint32_t *)ubp->ub_buffer + 1; 11132 for (i = 0; i < count; i++, lp++) { 11133 *lp = LE_SWAP32(*lp); 11134 } 11135 11136 break; 11137 11138 case ELS_CMD_FLOGI: 11139 case ELS_CMD_PLOGI: 11140 case ELS_CMD_FDISC: 11141 case ELS_CMD_PDISC: 11142 logi = (la_els_logi_t *)ubp->ub_buffer; 11143 emlxs_swap_service_params( 11144 (SERV_PARM *)&logi->common_service); 11145 break; 11146 11147 /* ULP handles this */ 11148 case ELS_CMD_LOGO: 11149 case ELS_CMD_PRLI: 11150 case ELS_CMD_PRLO: 11151 case ELS_CMD_ADISC: 11152 default: 11153 break; 11154 } 11155 11156 return; 11157 11158 } /* emlxs_swap_els_ub() */ 11159 11160 11161 #endif /* EMLXS_MODREV2X */ 11162 11163 11164 extern char * 11165 emlxs_mode_xlate(uint32_t mode) 11166 { 11167 static char buffer[32]; 11168 uint32_t i; 11169 uint32_t count; 11170 11171 count = sizeof (emlxs_mode_table) / sizeof (emlxs_table_t); 11172 for (i = 0; i < count; i++) { 11173 if (mode == emlxs_mode_table[i].code) { 11174 return (emlxs_mode_table[i].string); 11175 } 11176 } 11177 11178 (void) snprintf(buffer, sizeof (buffer), "Unknown (%x)", mode); 11179 return (buffer); 11180 11181 } /* emlxs_mode_xlate() */ 11182 11183 11184 extern char * 11185 emlxs_elscmd_xlate(uint32_t elscmd) 11186 { 11187 static char buffer[32]; 11188 uint32_t i; 11189 uint32_t count; 11190 11191 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t); 11192 for (i = 0; i < count; i++) { 11193 if (elscmd == emlxs_elscmd_table[i].code) { 11194 return (emlxs_elscmd_table[i].string); 11195 } 11196 } 11197 11198 (void) snprintf(buffer, sizeof (buffer), "ELS=0x%x", elscmd); 11199 return (buffer); 11200 11201 } /* emlxs_elscmd_xlate() */ 11202 11203 11204 extern char * 11205 emlxs_ctcmd_xlate(uint32_t ctcmd) 11206 { 11207 static char buffer[32]; 11208 uint32_t i; 11209 uint32_t count; 11210 11211 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t); 11212 for (i = 0; i < count; i++) { 11213 if (ctcmd == emlxs_ctcmd_table[i].code) { 11214 return (emlxs_ctcmd_table[i].string); 11215 } 11216 } 11217 11218 (void) snprintf(buffer, sizeof (buffer), "cmd=0x%x", ctcmd); 11219 return (buffer); 11220 11221 } /* emlxs_ctcmd_xlate() */ 11222 11223 11224 #ifdef MENLO_SUPPORT 11225 extern char * 11226 emlxs_menlo_cmd_xlate(uint32_t cmd) 11227 { 11228 static char buffer[32]; 11229 uint32_t i; 11230 uint32_t count; 11231 11232 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t); 11233 for (i = 0; i < count; i++) { 11234 if (cmd == emlxs_menlo_cmd_table[i].code) { 11235 return (emlxs_menlo_cmd_table[i].string); 11236 } 11237 } 11238 11239 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd); 11240 return (buffer); 11241 11242 } /* emlxs_menlo_cmd_xlate() */ 11243 11244 extern char * 11245 emlxs_menlo_rsp_xlate(uint32_t rsp) 11246 { 11247 static char buffer[32]; 11248 uint32_t i; 11249 uint32_t count; 11250 11251 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t); 11252 for (i = 0; i < count; i++) { 11253 if (rsp == emlxs_menlo_rsp_table[i].code) { 11254 return (emlxs_menlo_rsp_table[i].string); 11255 } 11256 } 11257 11258 (void) snprintf(buffer, sizeof (buffer), "Rsp=0x%x", rsp); 11259 return (buffer); 11260 11261 } /* emlxs_menlo_rsp_xlate() */ 11262 11263 #endif /* MENLO_SUPPORT */ 11264 11265 11266 extern char * 11267 emlxs_rmcmd_xlate(uint32_t rmcmd) 11268 { 11269 static char buffer[32]; 11270 uint32_t i; 11271 uint32_t count; 11272 11273 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t); 11274 for (i = 0; i < count; i++) { 11275 if (rmcmd == emlxs_rmcmd_table[i].code) { 11276 return (emlxs_rmcmd_table[i].string); 11277 } 11278 } 11279 11280 (void) snprintf(buffer, sizeof (buffer), "RM=0x%x", rmcmd); 11281 return (buffer); 11282 11283 } /* emlxs_rmcmd_xlate() */ 11284 11285 11286 11287 extern char * 11288 emlxs_mscmd_xlate(uint16_t mscmd) 11289 { 11290 static char buffer[32]; 11291 uint32_t i; 11292 uint32_t count; 11293 11294 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t); 11295 for (i = 0; i < count; i++) { 11296 if (mscmd == emlxs_mscmd_table[i].code) { 11297 return (emlxs_mscmd_table[i].string); 11298 } 11299 } 11300 11301 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", mscmd); 11302 return (buffer); 11303 11304 } /* emlxs_mscmd_xlate() */ 11305 11306 11307 extern char * 11308 emlxs_state_xlate(uint8_t state) 11309 { 11310 static char buffer[32]; 11311 uint32_t i; 11312 uint32_t count; 11313 11314 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t); 11315 for (i = 0; i < count; i++) { 11316 if (state == emlxs_state_table[i].code) { 11317 return (emlxs_state_table[i].string); 11318 } 11319 } 11320 11321 (void) snprintf(buffer, sizeof (buffer), "State=0x%x", state); 11322 return (buffer); 11323 11324 } /* emlxs_state_xlate() */ 11325 11326 11327 extern char * 11328 emlxs_error_xlate(uint8_t errno) 11329 { 11330 static char buffer[32]; 11331 uint32_t i; 11332 uint32_t count; 11333 11334 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t); 11335 for (i = 0; i < count; i++) { 11336 if (errno == emlxs_error_table[i].code) { 11337 return (emlxs_error_table[i].string); 11338 } 11339 } 11340 11341 (void) snprintf(buffer, sizeof (buffer), "Errno=0x%x", errno); 11342 return (buffer); 11343 11344 } /* emlxs_error_xlate() */ 11345 11346 11347 static int 11348 emlxs_pm_lower_power(dev_info_t *dip) 11349 { 11350 int ddiinst; 11351 int emlxinst; 11352 emlxs_config_t *cfg; 11353 int32_t rval; 11354 emlxs_hba_t *hba; 11355 11356 ddiinst = ddi_get_instance(dip); 11357 emlxinst = emlxs_get_instance(ddiinst); 11358 hba = emlxs_device.hba[emlxinst]; 11359 cfg = &CFG; 11360 11361 rval = DDI_SUCCESS; 11362 11363 /* Lower the power level */ 11364 if (cfg[CFG_PM_SUPPORT].current) { 11365 rval = 11366 pm_lower_power(dip, EMLXS_PM_ADAPTER, 11367 EMLXS_PM_ADAPTER_DOWN); 11368 } else { 11369 /* We do not have kernel support of power management enabled */ 11370 /* therefore, call our power management routine directly */ 11371 rval = 11372 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN); 11373 } 11374 11375 return (rval); 11376 11377 } /* emlxs_pm_lower_power() */ 11378 11379 11380 static int 11381 emlxs_pm_raise_power(dev_info_t *dip) 11382 { 11383 int ddiinst; 11384 int emlxinst; 11385 emlxs_config_t *cfg; 11386 int32_t rval; 11387 emlxs_hba_t *hba; 11388 11389 ddiinst = ddi_get_instance(dip); 11390 emlxinst = emlxs_get_instance(ddiinst); 11391 hba = emlxs_device.hba[emlxinst]; 11392 cfg = &CFG; 11393 11394 /* Raise the power level */ 11395 if (cfg[CFG_PM_SUPPORT].current) { 11396 rval = 11397 pm_raise_power(dip, EMLXS_PM_ADAPTER, 11398 EMLXS_PM_ADAPTER_UP); 11399 } else { 11400 /* We do not have kernel support of power management enabled */ 11401 /* therefore, call our power management routine directly */ 11402 rval = 11403 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP); 11404 } 11405 11406 return (rval); 11407 11408 } /* emlxs_pm_raise_power() */ 11409 11410 11411 #ifdef IDLE_TIMER 11412 11413 extern int 11414 emlxs_pm_busy_component(emlxs_hba_t *hba) 11415 { 11416 emlxs_config_t *cfg = &CFG; 11417 int rval; 11418 11419 hba->pm_active = 1; 11420 11421 if (hba->pm_busy) { 11422 return (DDI_SUCCESS); 11423 } 11424 11425 mutex_enter(&EMLXS_PM_LOCK); 11426 11427 if (hba->pm_busy) { 11428 mutex_exit(&EMLXS_PM_LOCK); 11429 return (DDI_SUCCESS); 11430 } 11431 hba->pm_busy = 1; 11432 11433 mutex_exit(&EMLXS_PM_LOCK); 11434 11435 /* Attempt to notify system that we are busy */ 11436 if (cfg[CFG_PM_SUPPORT].current) { 11437 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 11438 "pm_busy_component."); 11439 11440 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER); 11441 11442 if (rval != DDI_SUCCESS) { 11443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 11444 "pm_busy_component failed. ret=%d", rval); 11445 11446 /* If this attempt failed then clear our flags */ 11447 mutex_enter(&EMLXS_PM_LOCK); 11448 hba->pm_busy = 0; 11449 mutex_exit(&EMLXS_PM_LOCK); 11450 11451 return (rval); 11452 } 11453 } 11454 11455 return (DDI_SUCCESS); 11456 11457 } /* emlxs_pm_busy_component() */ 11458 11459 11460 extern int 11461 emlxs_pm_idle_component(emlxs_hba_t *hba) 11462 { 11463 emlxs_config_t *cfg = &CFG; 11464 int rval; 11465 11466 if (!hba->pm_busy) { 11467 return (DDI_SUCCESS); 11468 } 11469 11470 mutex_enter(&EMLXS_PM_LOCK); 11471 11472 if (!hba->pm_busy) { 11473 mutex_exit(&EMLXS_PM_LOCK); 11474 return (DDI_SUCCESS); 11475 } 11476 hba->pm_busy = 0; 11477 11478 mutex_exit(&EMLXS_PM_LOCK); 11479 11480 if (cfg[CFG_PM_SUPPORT].current) { 11481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 11482 "pm_idle_component."); 11483 11484 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER); 11485 11486 if (rval != DDI_SUCCESS) { 11487 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, 11488 "pm_idle_component failed. ret=%d", rval); 11489 11490 /* If this attempt failed then */ 11491 /* reset our flags for another attempt */ 11492 mutex_enter(&EMLXS_PM_LOCK); 11493 hba->pm_busy = 1; 11494 mutex_exit(&EMLXS_PM_LOCK); 11495 11496 return (rval); 11497 } 11498 } 11499 11500 return (DDI_SUCCESS); 11501 11502 } /* emlxs_pm_idle_component() */ 11503 11504 11505 extern void 11506 emlxs_pm_idle_timer(emlxs_hba_t *hba) 11507 { 11508 emlxs_config_t *cfg = &CFG; 11509 11510 if (hba->pm_active) { 11511 /* Clear active flag and reset idle timer */ 11512 mutex_enter(&EMLXS_PM_LOCK); 11513 hba->pm_active = 0; 11514 hba->pm_idle_timer = 11515 hba->timer_tics + cfg[CFG_PM_IDLE].current; 11516 mutex_exit(&EMLXS_PM_LOCK); 11517 } 11518 11519 /* Check for idle timeout */ 11520 else if (hba->timer_tics >= hba->pm_idle_timer) { 11521 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) { 11522 mutex_enter(&EMLXS_PM_LOCK); 11523 hba->pm_idle_timer = 11524 hba->timer_tics + cfg[CFG_PM_IDLE].current; 11525 mutex_exit(&EMLXS_PM_LOCK); 11526 } 11527 } 11528 11529 return; 11530 11531 } /* emlxs_pm_idle_timer() */ 11532 11533 #endif /* IDLE_TIMER */ 11534 11535 11536 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4) 11537 static void 11538 emlxs_read_vport_prop(emlxs_hba_t *hba) 11539 { 11540 emlxs_port_t *port = &PPORT; 11541 emlxs_config_t *cfg = &CFG; 11542 char **arrayp; 11543 uint8_t *s; 11544 uint8_t *np; 11545 NAME_TYPE pwwpn; 11546 NAME_TYPE wwnn; 11547 NAME_TYPE wwpn; 11548 uint32_t vpi; 11549 uint32_t cnt; 11550 uint32_t rval; 11551 uint32_t i; 11552 uint32_t j; 11553 uint32_t c1; 11554 uint32_t sum; 11555 uint32_t errors; 11556 char buffer[64]; 11557 11558 /* Check for the per adapter vport setting */ 11559 (void) snprintf(buffer, sizeof (buffer), "%s%d-vport", DRIVER_NAME, 11560 hba->ddiinst); 11561 cnt = 0; 11562 arrayp = NULL; 11563 rval = 11564 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 11565 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt); 11566 11567 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 11568 /* Check for the global vport setting */ 11569 cnt = 0; 11570 arrayp = NULL; 11571 rval = 11572 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip, 11573 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt); 11574 } 11575 11576 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) { 11577 return; 11578 } 11579 11580 for (i = 0; i < cnt; i++) { 11581 errors = 0; 11582 s = (uint8_t *)arrayp[i]; 11583 11584 if (!s) { 11585 break; 11586 } 11587 11588 np = (uint8_t *)&pwwpn; 11589 for (j = 0; j < sizeof (NAME_TYPE); j++) { 11590 c1 = *s++; 11591 if ((c1 >= '0') && (c1 <= '9')) { 11592 sum = ((c1 - '0') << 4); 11593 } else if ((c1 >= 'a') && (c1 <= 'f')) { 11594 sum = ((c1 - 'a' + 10) << 4); 11595 } else if ((c1 >= 'A') && (c1 <= 'F')) { 11596 sum = ((c1 - 'A' + 10) << 4); 11597 } else { 11598 EMLXS_MSGF(EMLXS_CONTEXT, 11599 &emlxs_attach_debug_msg, 11600 "Config error: Invalid PWWPN found. " 11601 "entry=%d byte=%d hi_nibble=%c", 11602 i, j, c1); 11603 errors++; 11604 } 11605 11606 c1 = *s++; 11607 if ((c1 >= '0') && (c1 <= '9')) { 11608 sum |= (c1 - '0'); 11609 } else if ((c1 >= 'a') && (c1 <= 'f')) { 11610 sum |= (c1 - 'a' + 10); 11611 } else if ((c1 >= 'A') && (c1 <= 'F')) { 11612 sum |= (c1 - 'A' + 10); 11613 } else { 11614 EMLXS_MSGF(EMLXS_CONTEXT, 11615 &emlxs_attach_debug_msg, 11616 "Config error: Invalid PWWPN found. " 11617 "entry=%d byte=%d lo_nibble=%c", 11618 i, j, c1); 11619 errors++; 11620 } 11621 11622 *np++ = (uint8_t)sum; 11623 } 11624 11625 if (*s++ != ':') { 11626 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 11627 "Config error: Invalid delimiter after PWWPN. " 11628 "entry=%d", i); 11629 goto out; 11630 } 11631 11632 np = (uint8_t *)&wwnn; 11633 for (j = 0; j < sizeof (NAME_TYPE); j++) { 11634 c1 = *s++; 11635 if ((c1 >= '0') && (c1 <= '9')) { 11636 sum = ((c1 - '0') << 4); 11637 } else if ((c1 >= 'a') && (c1 <= 'f')) { 11638 sum = ((c1 - 'a' + 10) << 4); 11639 } else if ((c1 >= 'A') && (c1 <= 'F')) { 11640 sum = ((c1 - 'A' + 10) << 4); 11641 } else { 11642 EMLXS_MSGF(EMLXS_CONTEXT, 11643 &emlxs_attach_debug_msg, 11644 "Config error: Invalid WWNN found. " 11645 "entry=%d byte=%d hi_nibble=%c", 11646 i, j, c1); 11647 errors++; 11648 } 11649 11650 c1 = *s++; 11651 if ((c1 >= '0') && (c1 <= '9')) { 11652 sum |= (c1 - '0'); 11653 } else if ((c1 >= 'a') && (c1 <= 'f')) { 11654 sum |= (c1 - 'a' + 10); 11655 } else if ((c1 >= 'A') && (c1 <= 'F')) { 11656 sum |= (c1 - 'A' + 10); 11657 } else { 11658 EMLXS_MSGF(EMLXS_CONTEXT, 11659 &emlxs_attach_debug_msg, 11660 "Config error: Invalid WWNN found. " 11661 "entry=%d byte=%d lo_nibble=%c", 11662 i, j, c1); 11663 errors++; 11664 } 11665 11666 *np++ = (uint8_t)sum; 11667 } 11668 11669 if (*s++ != ':') { 11670 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 11671 "Config error: Invalid delimiter after WWNN. " 11672 "entry=%d", i); 11673 goto out; 11674 } 11675 11676 np = (uint8_t *)&wwpn; 11677 for (j = 0; j < sizeof (NAME_TYPE); j++) { 11678 c1 = *s++; 11679 if ((c1 >= '0') && (c1 <= '9')) { 11680 sum = ((c1 - '0') << 4); 11681 } else if ((c1 >= 'a') && (c1 <= 'f')) { 11682 sum = ((c1 - 'a' + 10) << 4); 11683 } else if ((c1 >= 'A') && (c1 <= 'F')) { 11684 sum = ((c1 - 'A' + 10) << 4); 11685 } else { 11686 EMLXS_MSGF(EMLXS_CONTEXT, 11687 &emlxs_attach_debug_msg, 11688 "Config error: Invalid WWPN found. " 11689 "entry=%d byte=%d hi_nibble=%c", 11690 i, j, c1); 11691 11692 errors++; 11693 } 11694 11695 c1 = *s++; 11696 if ((c1 >= '0') && (c1 <= '9')) { 11697 sum |= (c1 - '0'); 11698 } else if ((c1 >= 'a') && (c1 <= 'f')) { 11699 sum |= (c1 - 'a' + 10); 11700 } else if ((c1 >= 'A') && (c1 <= 'F')) { 11701 sum |= (c1 - 'A' + 10); 11702 } else { 11703 EMLXS_MSGF(EMLXS_CONTEXT, 11704 &emlxs_attach_debug_msg, 11705 "Config error: Invalid WWPN found. " 11706 "entry=%d byte=%d lo_nibble=%c", 11707 i, j, c1); 11708 11709 errors++; 11710 } 11711 11712 *np++ = (uint8_t)sum; 11713 } 11714 11715 if (*s++ != ':') { 11716 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg, 11717 "Config error: Invalid delimiter after WWPN. " 11718 "entry=%d", i); 11719 11720 goto out; 11721 } 11722 11723 sum = 0; 11724 do { 11725 c1 = *s++; 11726 if ((c1 < '0') || (c1 > '9')) { 11727 EMLXS_MSGF(EMLXS_CONTEXT, 11728 &emlxs_attach_debug_msg, 11729 "Config error: Invalid VPI found. " 11730 "entry=%d c=%c vpi=%d", i, c1, sum); 11731 11732 goto out; 11733 } 11734 11735 sum = (sum * 10) + (c1 - '0'); 11736 11737 } while (*s != 0); 11738 11739 vpi = sum; 11740 11741 if (errors) { 11742 continue; 11743 } 11744 11745 /* Entry has been read */ 11746 11747 /* Check if the physical port wwpn */ 11748 /* matches our physical port wwpn */ 11749 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) { 11750 continue; 11751 } 11752 11753 /* Check vpi range */ 11754 if ((vpi == 0) || (vpi >= MAX_VPORTS)) { 11755 continue; 11756 } 11757 11758 /* Check if port has already been configured */ 11759 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) { 11760 continue; 11761 } 11762 11763 /* Set the highest configured vpi */ 11764 if (vpi > hba->vpi_high) { 11765 hba->vpi_high = vpi; 11766 } 11767 11768 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn, 11769 sizeof (NAME_TYPE)); 11770 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn, 11771 sizeof (NAME_TYPE)); 11772 11773 if (hba->port[vpi].snn[0] == 0) { 11774 (void) strncpy((caddr_t)hba->port[vpi].snn, 11775 (caddr_t)hba->snn, 11776 (sizeof (hba->port[vpi].snn)-1)); 11777 } 11778 11779 if (hba->port[vpi].spn[0] == 0) { 11780 (void) snprintf((caddr_t)hba->port[vpi].spn, 11781 sizeof (hba->port[vpi].spn), 11782 "%s VPort-%d", 11783 (caddr_t)hba->spn, vpi); 11784 } 11785 11786 hba->port[vpi].flag |= 11787 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED); 11788 11789 if (cfg[CFG_VPORT_RESTRICTED].current) { 11790 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED; 11791 } 11792 } 11793 11794 out: 11795 11796 (void) ddi_prop_free((void *) arrayp); 11797 return; 11798 11799 } /* emlxs_read_vport_prop() */ 11800 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */ 11801 11802 11803 extern char * 11804 emlxs_wwn_xlate(char *buffer, size_t len, uint8_t *wwn) 11805 { 11806 (void) snprintf(buffer, len, "%02x%02x%02x%02x%02x%02x%02x%02x", 11807 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff, 11808 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff); 11809 11810 return (buffer); 11811 11812 } /* emlxs_wwn_xlate() */ 11813 11814 11815 extern int32_t 11816 emlxs_wwn_cmp(uint8_t *wwn1, uint8_t *wwn2) 11817 { 11818 uint32_t i; 11819 11820 for (i = 0; i < 8; i ++, wwn1 ++, wwn2 ++) { 11821 if (*wwn1 > *wwn2) { 11822 return (1); 11823 } 11824 if (*wwn1 < *wwn2) { 11825 return (-1); 11826 } 11827 } 11828 11829 return (0); 11830 11831 } /* emlxs_wwn_cmp() */ 11832 11833 11834 /* This is called at port online and offline */ 11835 extern void 11836 emlxs_ub_flush(emlxs_port_t *port) 11837 { 11838 emlxs_hba_t *hba = HBA; 11839 fc_unsol_buf_t *ubp; 11840 emlxs_ub_priv_t *ub_priv; 11841 emlxs_ub_priv_t *next; 11842 11843 /* Return if nothing to do */ 11844 if (!port->ub_wait_head) { 11845 return; 11846 } 11847 11848 mutex_enter(&EMLXS_PORT_LOCK); 11849 ub_priv = port->ub_wait_head; 11850 port->ub_wait_head = NULL; 11851 port->ub_wait_tail = NULL; 11852 mutex_exit(&EMLXS_PORT_LOCK); 11853 11854 while (ub_priv) { 11855 next = ub_priv->next; 11856 ubp = ub_priv->ubp; 11857 11858 /* Check if ULP is online and we have a callback function */ 11859 if (port->ulp_statec != FC_STATE_OFFLINE) { 11860 /* Send ULP the ub buffer */ 11861 emlxs_ulp_unsol_cb(port, ubp); 11862 } else { /* Drop the buffer */ 11863 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token); 11864 } 11865 11866 ub_priv = next; 11867 11868 } /* while () */ 11869 11870 return; 11871 11872 } /* emlxs_ub_flush() */ 11873 11874 11875 extern void 11876 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp) 11877 { 11878 emlxs_hba_t *hba = HBA; 11879 emlxs_ub_priv_t *ub_priv; 11880 11881 ub_priv = ubp->ub_fca_private; 11882 11883 /* Check if ULP is online */ 11884 if (port->ulp_statec != FC_STATE_OFFLINE) { 11885 emlxs_ulp_unsol_cb(port, ubp); 11886 11887 } else { /* ULP offline */ 11888 11889 if (hba->state >= FC_LINK_UP) { 11890 /* Add buffer to queue tail */ 11891 mutex_enter(&EMLXS_PORT_LOCK); 11892 11893 if (port->ub_wait_tail) { 11894 port->ub_wait_tail->next = ub_priv; 11895 } 11896 port->ub_wait_tail = ub_priv; 11897 11898 if (!port->ub_wait_head) { 11899 port->ub_wait_head = ub_priv; 11900 } 11901 11902 mutex_exit(&EMLXS_PORT_LOCK); 11903 } else { 11904 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token); 11905 } 11906 } 11907 11908 return; 11909 11910 } /* emlxs_ub_callback() */ 11911 11912 11913 extern void 11914 emlxs_fca_link_up(emlxs_port_t *port) 11915 { 11916 emlxs_ulp_statec_cb(port, port->ulp_statec); 11917 return; 11918 11919 } /* emlxs_fca_link_up() */ 11920 11921 11922 extern void 11923 emlxs_fca_link_down(emlxs_port_t *port) 11924 { 11925 emlxs_ulp_statec_cb(port, FC_STATE_OFFLINE); 11926 return; 11927 11928 } /* emlxs_fca_link_down() */ 11929 11930 11931 static uint32_t 11932 emlxs_integrity_check(emlxs_hba_t *hba) 11933 { 11934 uint32_t size; 11935 uint32_t errors = 0; 11936 int ddiinst = hba->ddiinst; 11937 11938 size = 16; 11939 if (sizeof (ULP_BDL) != size) { 11940 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16", 11941 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL)); 11942 11943 errors++; 11944 } 11945 size = 8; 11946 if (sizeof (ULP_BDE) != size) { 11947 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8", 11948 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE)); 11949 11950 errors++; 11951 } 11952 size = 12; 11953 if (sizeof (ULP_BDE64) != size) { 11954 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12", 11955 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64)); 11956 11957 errors++; 11958 } 11959 size = 16; 11960 if (sizeof (HBQE_t) != size) { 11961 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16", 11962 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t)); 11963 11964 errors++; 11965 } 11966 size = 8; 11967 if (sizeof (HGP) != size) { 11968 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8", 11969 DRIVER_NAME, ddiinst, (int)sizeof (HGP)); 11970 11971 errors++; 11972 } 11973 if (sizeof (PGP) != size) { 11974 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8", 11975 DRIVER_NAME, ddiinst, (int)sizeof (PGP)); 11976 11977 errors++; 11978 } 11979 size = 4; 11980 if (sizeof (WORD5) != size) { 11981 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4", 11982 DRIVER_NAME, ddiinst, (int)sizeof (WORD5)); 11983 11984 errors++; 11985 } 11986 size = 124; 11987 if (sizeof (MAILVARIANTS) != size) { 11988 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. " 11989 "%d != 124", DRIVER_NAME, ddiinst, 11990 (int)sizeof (MAILVARIANTS)); 11991 11992 errors++; 11993 } 11994 size = 128; 11995 if (sizeof (SLI1_DESC) != size) { 11996 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128", 11997 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC)); 11998 11999 errors++; 12000 } 12001 if (sizeof (SLI2_DESC) != size) { 12002 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128", 12003 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC)); 12004 12005 errors++; 12006 } 12007 size = MBOX_SIZE; 12008 if (sizeof (MAILBOX) != size) { 12009 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d", 12010 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE); 12011 12012 errors++; 12013 } 12014 size = PCB_SIZE; 12015 if (sizeof (PCB) != size) { 12016 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d", 12017 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE); 12018 12019 errors++; 12020 } 12021 size = 260; 12022 if (sizeof (ATTRIBUTE_ENTRY) != size) { 12023 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. " 12024 "%d != 260", DRIVER_NAME, ddiinst, 12025 (int)sizeof (ATTRIBUTE_ENTRY)); 12026 12027 errors++; 12028 } 12029 size = SLI_SLIM1_SIZE; 12030 if (sizeof (SLIM1) != size) { 12031 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d", 12032 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE); 12033 12034 errors++; 12035 } 12036 size = SLI3_IOCB_CMD_SIZE; 12037 if (sizeof (IOCB) != size) { 12038 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d", 12039 DRIVER_NAME, ddiinst, (int)sizeof (IOCB), 12040 SLI3_IOCB_CMD_SIZE); 12041 12042 errors++; 12043 } 12044 12045 size = SLI_SLIM2_SIZE; 12046 if (sizeof (SLIM2) != size) { 12047 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d", 12048 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2), 12049 SLI_SLIM2_SIZE); 12050 12051 errors++; 12052 } 12053 return (errors); 12054 12055 } /* emlxs_integrity_check() */ 12056 12057 12058 #ifdef FMA_SUPPORT 12059 /* 12060 * FMA support 12061 */ 12062 12063 extern void 12064 emlxs_fm_init(emlxs_hba_t *hba) 12065 { 12066 ddi_iblock_cookie_t iblk; 12067 12068 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 12069 return; 12070 } 12071 12072 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 12073 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 12074 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 12075 } 12076 12077 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) { 12078 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR; 12079 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR; 12080 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR; 12081 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR; 12082 } else { 12083 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12084 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12085 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12086 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR; 12087 } 12088 12089 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk); 12090 12091 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 12092 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 12093 pci_ereport_setup(hba->dip); 12094 } 12095 12096 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 12097 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb, 12098 (void *)hba); 12099 } 12100 12101 } /* emlxs_fm_init() */ 12102 12103 12104 extern void 12105 emlxs_fm_fini(emlxs_hba_t *hba) 12106 { 12107 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) { 12108 return; 12109 } 12110 12111 if (DDI_FM_EREPORT_CAP(hba->fm_caps) || 12112 DDI_FM_ERRCB_CAP(hba->fm_caps)) { 12113 pci_ereport_teardown(hba->dip); 12114 } 12115 12116 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) { 12117 ddi_fm_handler_unregister(hba->dip); 12118 } 12119 12120 (void) ddi_fm_fini(hba->dip); 12121 12122 } /* emlxs_fm_fini() */ 12123 12124 12125 extern int 12126 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle) 12127 { 12128 ddi_fm_error_t err; 12129 12130 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 12131 return (DDI_FM_OK); 12132 } 12133 12134 /* Some S10 versions do not define the ahi_err structure */ 12135 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) { 12136 return (DDI_FM_OK); 12137 } 12138 12139 err.fme_status = DDI_FM_OK; 12140 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION); 12141 12142 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */ 12143 if ((void *)&ddi_fm_acc_err_clear != NULL) { 12144 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 12145 } 12146 12147 return (err.fme_status); 12148 12149 } /* emlxs_fm_check_acc_handle() */ 12150 12151 12152 extern int 12153 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle) 12154 { 12155 ddi_fm_error_t err; 12156 12157 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) { 12158 return (DDI_FM_OK); 12159 } 12160 12161 err.fme_status = DDI_FM_OK; 12162 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION); 12163 12164 return (err.fme_status); 12165 12166 } /* emlxs_fm_check_dma_handle() */ 12167 12168 12169 extern void 12170 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail) 12171 { 12172 uint64_t ena; 12173 char buf[FM_MAX_CLASS]; 12174 12175 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 12176 return; 12177 } 12178 12179 if (detail == NULL) { 12180 return; 12181 } 12182 12183 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 12184 ena = fm_ena_generate(0, FM_ENA_FMT1); 12185 12186 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP, 12187 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 12188 12189 } /* emlxs_fm_ereport() */ 12190 12191 12192 extern void 12193 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact) 12194 { 12195 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) { 12196 return; 12197 } 12198 12199 if (impact == 0) { 12200 return; 12201 } 12202 12203 if ((hba->pm_state & EMLXS_PM_IN_DETACH) && 12204 (impact == DDI_SERVICE_DEGRADED)) { 12205 impact = DDI_SERVICE_UNAFFECTED; 12206 } 12207 12208 ddi_fm_service_impact(hba->dip, impact); 12209 12210 return; 12211 12212 } /* emlxs_fm_service_impact() */ 12213 12214 12215 /* 12216 * The I/O fault service error handling callback function 12217 */ 12218 /*ARGSUSED*/ 12219 extern int 12220 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 12221 const void *impl_data) 12222 { 12223 /* 12224 * as the driver can always deal with an error 12225 * in any dma or access handle, we can just return 12226 * the fme_status value. 12227 */ 12228 pci_ereport_post(dip, err, NULL); 12229 return (err->fme_status); 12230 12231 } /* emlxs_fm_error_cb() */ 12232 12233 extern void 12234 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp) 12235 { 12236 emlxs_port_t *port = sbp->port; 12237 fc_packet_t *pkt = PRIV2PKT(sbp); 12238 12239 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { 12240 if (emlxs_fm_check_dma_handle(hba, 12241 hba->sli.sli4.slim2.dma_handle) 12242 != DDI_FM_OK) { 12243 EMLXS_MSGF(EMLXS_CONTEXT, 12244 &emlxs_invalid_dma_handle_msg, 12245 "slim2: hdl=%p", 12246 hba->sli.sli4.slim2.dma_handle); 12247 12248 mutex_enter(&EMLXS_PORT_LOCK); 12249 hba->flag |= FC_DMA_CHECK_ERROR; 12250 mutex_exit(&EMLXS_PORT_LOCK); 12251 } 12252 } else { 12253 if (emlxs_fm_check_dma_handle(hba, 12254 hba->sli.sli3.slim2.dma_handle) 12255 != DDI_FM_OK) { 12256 EMLXS_MSGF(EMLXS_CONTEXT, 12257 &emlxs_invalid_dma_handle_msg, 12258 "slim2: hdl=%p", 12259 hba->sli.sli3.slim2.dma_handle); 12260 12261 mutex_enter(&EMLXS_PORT_LOCK); 12262 hba->flag |= FC_DMA_CHECK_ERROR; 12263 mutex_exit(&EMLXS_PORT_LOCK); 12264 } 12265 } 12266 12267 if (hba->flag & FC_DMA_CHECK_ERROR) { 12268 pkt->pkt_state = FC_PKT_TRAN_ERROR; 12269 pkt->pkt_reason = FC_REASON_DMA_ERROR; 12270 pkt->pkt_expln = FC_EXPLN_NONE; 12271 pkt->pkt_action = FC_ACTION_RETRYABLE; 12272 return; 12273 } 12274 12275 if (pkt->pkt_cmdlen) { 12276 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma) 12277 != DDI_FM_OK) { 12278 EMLXS_MSGF(EMLXS_CONTEXT, 12279 &emlxs_invalid_dma_handle_msg, 12280 "pkt_cmd_dma: hdl=%p", 12281 pkt->pkt_cmd_dma); 12282 12283 pkt->pkt_state = FC_PKT_TRAN_ERROR; 12284 pkt->pkt_reason = FC_REASON_DMA_ERROR; 12285 pkt->pkt_expln = FC_EXPLN_NONE; 12286 pkt->pkt_action = FC_ACTION_RETRYABLE; 12287 12288 return; 12289 } 12290 } 12291 12292 if (pkt->pkt_rsplen) { 12293 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma) 12294 != DDI_FM_OK) { 12295 EMLXS_MSGF(EMLXS_CONTEXT, 12296 &emlxs_invalid_dma_handle_msg, 12297 "pkt_resp_dma: hdl=%p", 12298 pkt->pkt_resp_dma); 12299 12300 pkt->pkt_state = FC_PKT_TRAN_ERROR; 12301 pkt->pkt_reason = FC_REASON_DMA_ERROR; 12302 pkt->pkt_expln = FC_EXPLN_NONE; 12303 pkt->pkt_action = FC_ACTION_RETRYABLE; 12304 12305 return; 12306 } 12307 } 12308 12309 if (pkt->pkt_datalen) { 12310 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma) 12311 != DDI_FM_OK) { 12312 EMLXS_MSGF(EMLXS_CONTEXT, 12313 &emlxs_invalid_dma_handle_msg, 12314 "pkt_data_dma: hdl=%p", 12315 pkt->pkt_data_dma); 12316 12317 pkt->pkt_state = FC_PKT_TRAN_ERROR; 12318 pkt->pkt_reason = FC_REASON_DMA_ERROR; 12319 pkt->pkt_expln = FC_EXPLN_NONE; 12320 pkt->pkt_action = FC_ACTION_RETRYABLE; 12321 12322 return; 12323 } 12324 } 12325 12326 return; 12327 12328 } 12329 #endif /* FMA_SUPPORT */ 12330 12331 12332 extern void 12333 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size) 12334 { 12335 uint32_t word; 12336 uint32_t *wptr; 12337 uint32_t i; 12338 12339 VERIFY((size % 4) == 0); 12340 12341 wptr = (uint32_t *)buffer; 12342 12343 for (i = 0; i < size / 4; i++) { 12344 word = *wptr; 12345 *wptr++ = SWAP32(word); 12346 } 12347 12348 return; 12349 12350 } /* emlxs_swap32_buffer() */ 12351 12352 12353 extern void 12354 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size) 12355 { 12356 uint32_t word; 12357 uint32_t *sptr; 12358 uint32_t *dptr; 12359 uint32_t i; 12360 12361 VERIFY((size % 4) == 0); 12362 12363 sptr = (uint32_t *)src; 12364 dptr = (uint32_t *)dst; 12365 12366 for (i = 0; i < size / 4; i++) { 12367 word = *sptr++; 12368 *dptr++ = SWAP32(word); 12369 } 12370 12371 return; 12372 12373 } /* emlxs_swap32_buffer() */ 12374 12375 12376 extern char * 12377 emlxs_strtoupper(char *str) 12378 { 12379 char *cptr = str; 12380 12381 while (*cptr) { 12382 if ((*cptr >= 'a') && (*cptr <= 'z')) { 12383 *cptr -= ('a' - 'A'); 12384 } 12385 cptr++; 12386 } 12387 12388 return (str); 12389 12390 } /* emlxs_strtoupper() */ 12391 12392 12393 extern void 12394 emlxs_ulp_statec_cb(emlxs_port_t *port, uint32_t statec) 12395 { 12396 emlxs_hba_t *hba = HBA; 12397 12398 /* This routine coordinates protection with emlxs_fca_unbind_port() */ 12399 12400 mutex_enter(&EMLXS_PORT_LOCK); 12401 if (!(port->flag & EMLXS_INI_BOUND)) { 12402 mutex_exit(&EMLXS_PORT_LOCK); 12403 return; 12404 } 12405 port->ulp_busy++; 12406 mutex_exit(&EMLXS_PORT_LOCK); 12407 12408 port->ulp_statec_cb(port->ulp_handle, statec); 12409 12410 mutex_enter(&EMLXS_PORT_LOCK); 12411 port->ulp_busy--; 12412 mutex_exit(&EMLXS_PORT_LOCK); 12413 12414 return; 12415 12416 } /* emlxs_ulp_statec_cb() */ 12417 12418 12419 extern void 12420 emlxs_ulp_unsol_cb(emlxs_port_t *port, fc_unsol_buf_t *ubp) 12421 { 12422 emlxs_hba_t *hba = HBA; 12423 12424 /* This routine coordinates protection with emlxs_fca_unbind_port() */ 12425 12426 mutex_enter(&EMLXS_PORT_LOCK); 12427 if (!(port->flag & EMLXS_INI_BOUND)) { 12428 mutex_exit(&EMLXS_PORT_LOCK); 12429 return; 12430 } 12431 port->ulp_busy++; 12432 mutex_exit(&EMLXS_PORT_LOCK); 12433 12434 port->ulp_unsol_cb(port->ulp_handle, ubp, ubp->ub_frame.type); 12435 12436 mutex_enter(&EMLXS_PORT_LOCK); 12437 port->ulp_busy--; 12438 mutex_exit(&EMLXS_PORT_LOCK); 12439 12440 return; 12441 12442 } /* emlxs_ulp_unsol_cb() */ 12443