1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Emulex. All rights reserved. 24 * Use is subject to License terms. 25 */ 26 27 28 #include "emlxs.h" 29 30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */ 31 EMLXS_MSG_DEF(EMLXS_MBOX_C); 32 33 static void emlxs_mb_part_slim(emlxs_hba_t *hba, MAILBOX *mb, 34 uint32_t hbainit); 35 static void emlxs_mb_set_mask(emlxs_hba_t *hba, MAILBOX *mb, uint32_t mask, 36 uint32_t ringno); 37 static void emlxs_mb_set_debug(emlxs_hba_t *hba, MAILBOX *mb, uint32_t word0, 38 uint32_t word1, uint32_t word2); 39 static int32_t emlxs_mb_handle_cmd(emlxs_hba_t *hba, MAILBOX *mb); 40 static void emlxs_mb_write_nv(emlxs_hba_t *hba, MAILBOX *mb); 41 42 static void emlxs_mb_init(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t flag, 43 uint32_t tmo); 44 static void emlxs_mb_retry(emlxs_hba_t *hba, MAILBOX *mb); 45 46 47 emlxs_table_t emlxs_mb_cmd_table[] = 48 { 49 {MBX_SHUTDOWN, "SHUTDOWN"}, 50 {MBX_LOAD_SM, "LOAD_SM"}, 51 {MBX_READ_NV, "READ_NV"}, 52 {MBX_WRITE_NV, "WRITE_NV"}, 53 {MBX_RUN_BIU_DIAG, "RUN_BIU_DIAG"}, 54 {MBX_INIT_LINK, "INIT_LINK"}, 55 {MBX_DOWN_LINK, "DOWN_LINK"}, 56 {MBX_CONFIG_LINK, "CONFIG_LINK"}, 57 {MBX_PART_SLIM, "PART_SLIM"}, 58 {MBX_CONFIG_RING, "CONFIG_RING"}, 59 {MBX_RESET_RING, "RESET_RING"}, 60 {MBX_READ_CONFIG, "READ_CONFIG"}, 61 {MBX_READ_RCONFIG, "READ_RCONFIG"}, 62 {MBX_READ_SPARM, "READ_SPARM"}, 63 {MBX_READ_STATUS, "READ_STATUS"}, 64 {MBX_READ_RPI, "READ_RPI"}, 65 {MBX_READ_XRI, "READ_XRI"}, 66 {MBX_READ_REV, "READ_REV"}, 67 {MBX_READ_LNK_STAT, "READ_LNK_STAT"}, 68 {MBX_REG_LOGIN, "REG_LOGIN"}, 69 {MBX_UNREG_LOGIN, "UNREG_LOGIN"}, 70 {MBX_READ_LA, "READ_LA"}, 71 {MBX_CLEAR_LA, "CLEAR_LA"}, 72 {MBX_DUMP_MEMORY, "DUMP_MEMORY"}, 73 {MBX_DUMP_CONTEXT, "DUMP_CONTEXT"}, 74 {MBX_RUN_DIAGS, "RUN_DIAGS"}, 75 {MBX_RESTART, "RESTART"}, 76 {MBX_UPDATE_CFG, "UPDATE_CFG"}, 77 {MBX_DOWN_LOAD, "DOWN_LOAD"}, 78 {MBX_DEL_LD_ENTRY, "DEL_LD_ENTRY"}, 79 {MBX_RUN_PROGRAM, "RUN_PROGRAM"}, 80 {MBX_SET_MASK, "SET_MASK"}, 81 {MBX_SET_VARIABLE, "SET_VARIABLE"}, 82 {MBX_UNREG_D_ID, "UNREG_D_ID"}, 83 {MBX_KILL_BOARD, "KILL_BOARD"}, 84 {MBX_CONFIG_FARP, "CONFIG_FARP"}, 85 {MBX_LOAD_AREA, "LOAD_AREA"}, 86 {MBX_RUN_BIU_DIAG64, "RUN_BIU_DIAG64"}, 87 {MBX_CONFIG_PORT, "CONFIG_PORT"}, 88 {MBX_READ_SPARM64, "READ_SPARM64"}, 89 {MBX_READ_RPI64, "READ_RPI64"}, 90 {MBX_CONFIG_MSI, "CONFIG_MSI"}, 91 {MBX_CONFIG_MSIX, "CONFIG_MSIX"}, 92 {MBX_REG_LOGIN64, "REG_LOGIN64"}, 93 {MBX_READ_LA64, "READ_LA64"}, 94 {MBX_FLASH_WR_ULA, "FLASH_WR_ULA"}, 95 {MBX_SET_DEBUG, "SET_DEBUG"}, 96 {MBX_GET_DEBUG, "GET_DEBUG"}, 97 {MBX_LOAD_EXP_ROM, "LOAD_EXP_ROM"}, 98 {MBX_BEACON, "BEACON"}, 99 {MBX_CONFIG_HBQ, "CONFIG_HBQ"}, /* SLI3 */ 100 {MBX_REG_VPI, "REG_VPI"}, /* NPIV */ 101 {MBX_ASYNC_EVENT, "ASYNC_EVENT"}, 102 {MBX_HEARTBEAT, "HEARTBEAT"}, 103 {MBX_READ_EVENT_LOG_STATUS, "READ_EVENT_LOG_STATUS"}, 104 {MBX_READ_EVENT_LOG, "READ_EVENT_LOG"}, 105 {MBX_WRITE_EVENT_LOG, "WRITE_EVENT_LOG"}, 106 {MBX_NV_LOG, "NV_LOG"} 107 108 }; /* emlxs_mb_cmd_table */ 109 110 111 /* ARGSUSED */ 112 extern void 113 emlxs_mb_async_event(emlxs_hba_t *hba, MAILBOX *mb) 114 { 115 bzero((void *) mb, MAILBOX_CMD_BSIZE); 116 117 mb->mbxCommand = MBX_ASYNC_EVENT; 118 mb->mbxOwner = OWN_HOST; 119 mb->un.varWords[0] = FC_ELS_RING; 120 121 return; 122 123 } /* emlxs_mb_async_event() */ 124 125 126 /* ARGSUSED */ 127 extern void 128 emlxs_mb_heartbeat(emlxs_hba_t *hba, MAILBOX *mb) 129 { 130 bzero((void *) mb, MAILBOX_CMD_BSIZE); 131 132 mb->mbxCommand = MBX_HEARTBEAT; 133 mb->mbxOwner = OWN_HOST; 134 135 return; 136 137 } /* emlxs_mb_heartbeat() */ 138 139 140 #ifdef MSI_SUPPORT 141 142 /* ARGSUSED */ 143 extern void 144 emlxs_mb_config_msi(emlxs_hba_t *hba, MAILBOX *mb, uint32_t *intr_map, 145 uint32_t intr_count) 146 { 147 uint32_t i; 148 uint32_t mask; 149 150 bzero((void *) mb, MAILBOX_CMD_BSIZE); 151 152 mb->mbxCommand = MBX_CONFIG_MSI; 153 154 /* Set the default message id to zero */ 155 mb->un.varCfgMSI.defaultPresent = 1; 156 mb->un.varCfgMSI.defaultMessageNumber = 0; 157 158 for (i = 1; i < intr_count; i++) { 159 mask = intr_map[i]; 160 161 mb->un.varCfgMSI.attConditions |= mask; 162 163 #ifdef EMLXS_BIG_ENDIAN 164 if (mask & HA_R0ATT) { 165 mb->un.varCfgMSI.messageNumberByHA[3] = i; 166 } 167 if (mask & HA_R1ATT) { 168 mb->un.varCfgMSI.messageNumberByHA[7] = i; 169 } 170 if (mask & HA_R2ATT) { 171 mb->un.varCfgMSI.messageNumberByHA[11] = i; 172 } 173 if (mask & HA_R3ATT) { 174 mb->un.varCfgMSI.messageNumberByHA[15] = i; 175 } 176 if (mask & HA_LATT) { 177 mb->un.varCfgMSI.messageNumberByHA[29] = i; 178 } 179 if (mask & HA_MBATT) { 180 mb->un.varCfgMSI.messageNumberByHA[30] = i; 181 } 182 if (mask & HA_ERATT) { 183 mb->un.varCfgMSI.messageNumberByHA[31] = i; 184 } 185 #endif /* EMLXS_BIG_ENDIAN */ 186 187 #ifdef EMLXS_LITTLE_ENDIAN 188 /* Accounts for half word swap of LE architecture */ 189 if (mask & HA_R0ATT) { 190 mb->un.varCfgMSI.messageNumberByHA[2] = i; 191 } 192 if (mask & HA_R1ATT) { 193 mb->un.varCfgMSI.messageNumberByHA[6] = i; 194 } 195 if (mask & HA_R2ATT) { 196 mb->un.varCfgMSI.messageNumberByHA[10] = i; 197 } 198 if (mask & HA_R3ATT) { 199 mb->un.varCfgMSI.messageNumberByHA[14] = i; 200 } 201 if (mask & HA_LATT) { 202 mb->un.varCfgMSI.messageNumberByHA[28] = i; 203 } 204 if (mask & HA_MBATT) { 205 mb->un.varCfgMSI.messageNumberByHA[31] = i; 206 } 207 if (mask & HA_ERATT) { 208 mb->un.varCfgMSI.messageNumberByHA[30] = i; 209 } 210 #endif /* EMLXS_LITTLE_ENDIAN */ 211 } 212 213 mb->mbxOwner = OWN_HOST; 214 215 return; 216 217 } /* emlxs_mb_config_msi() */ 218 219 220 /* ARGSUSED */ 221 extern void 222 emlxs_mb_config_msix(emlxs_hba_t *hba, MAILBOX *mb, uint32_t *intr_map, 223 uint32_t intr_count) 224 { 225 uint32_t i; 226 uint32_t mask; 227 228 bzero((void *) mb, MAILBOX_CMD_BSIZE); 229 230 mb->mbxCommand = MBX_CONFIG_MSIX; 231 232 /* Set the default message id to zero */ 233 mb->un.varCfgMSIX.defaultPresent = 1; 234 mb->un.varCfgMSIX.defaultMessageNumber = 0; 235 236 for (i = 1; i < intr_count; i++) { 237 mask = intr_map[i]; 238 239 mb->un.varCfgMSIX.attConditions1 |= mask; 240 241 #ifdef EMLXS_BIG_ENDIAN 242 if (mask & HA_R0ATT) { 243 mb->un.varCfgMSIX.messageNumberByHA[3] = i; 244 } 245 if (mask & HA_R1ATT) { 246 mb->un.varCfgMSIX.messageNumberByHA[7] = i; 247 } 248 if (mask & HA_R2ATT) { 249 mb->un.varCfgMSIX.messageNumberByHA[11] = i; 250 } 251 if (mask & HA_R3ATT) { 252 mb->un.varCfgMSIX.messageNumberByHA[15] = i; 253 } 254 if (mask & HA_LATT) { 255 mb->un.varCfgMSIX.messageNumberByHA[29] = i; 256 } 257 if (mask & HA_MBATT) { 258 mb->un.varCfgMSIX.messageNumberByHA[30] = i; 259 } 260 if (mask & HA_ERATT) { 261 mb->un.varCfgMSIX.messageNumberByHA[31] = i; 262 } 263 #endif /* EMLXS_BIG_ENDIAN */ 264 265 #ifdef EMLXS_LITTLE_ENDIAN 266 /* Accounts for word swap of LE architecture */ 267 if (mask & HA_R0ATT) { 268 mb->un.varCfgMSIX.messageNumberByHA[0] = i; 269 } 270 if (mask & HA_R1ATT) { 271 mb->un.varCfgMSIX.messageNumberByHA[4] = i; 272 } 273 if (mask & HA_R2ATT) { 274 mb->un.varCfgMSIX.messageNumberByHA[8] = i; 275 } 276 if (mask & HA_R3ATT) { 277 mb->un.varCfgMSIX.messageNumberByHA[12] = i; 278 } 279 if (mask & HA_LATT) { 280 mb->un.varCfgMSIX.messageNumberByHA[30] = i; 281 } 282 if (mask & HA_MBATT) { 283 mb->un.varCfgMSIX.messageNumberByHA[29] = i; 284 } 285 if (mask & HA_ERATT) { 286 mb->un.varCfgMSIX.messageNumberByHA[28] = i; 287 } 288 #endif /* EMLXS_LITTLE_ENDIAN */ 289 } 290 291 mb->mbxOwner = OWN_HOST; 292 293 return; 294 295 } /* emlxs_mb_config_msix() */ 296 297 298 #endif /* MSI_SUPPORT */ 299 300 /* ARGSUSED */ 301 extern void 302 emlxs_mb_reset_ring(emlxs_hba_t *hba, MAILBOX *mb, uint32_t ringno) 303 { 304 bzero((void *) mb, MAILBOX_CMD_BSIZE); 305 306 mb->mbxCommand = MBX_RESET_RING; 307 mb->un.varRstRing.ring_no = ringno; 308 mb->mbxOwner = OWN_HOST; 309 310 return; 311 312 } /* emlxs_mb_reset_ring() */ 313 314 315 316 /* 317 * emlxs_mb_dump_vpd Issue a DUMP MEMORY 318 * mailbox command 319 */ 320 /* ARGSUSED */ 321 extern void 322 emlxs_mb_dump_vpd(emlxs_hba_t *hba, MAILBOX *mb, uint32_t offset) 323 { 324 bzero((void *) mb, MAILBOX_CMD_BSIZE); 325 326 /* 327 * Setup to dump VPD region 328 */ 329 mb->mbxCommand = MBX_DUMP_MEMORY; 330 mb->un.varDmp.cv = 1; 331 mb->un.varDmp.type = DMP_NV_PARAMS; 332 mb->un.varDmp.entry_index = offset; 333 mb->un.varDmp.region_id = DMP_VPD_REGION; 334 mb->un.varDmp.word_cnt = DMP_VPD_DUMP_WCOUNT; /* limited by */ 335 /* mailbox size */ 336 337 mb->un.varDmp.co = 0; 338 mb->un.varDmp.resp_offset = 0; 339 mb->mbxOwner = OWN_HOST; 340 } /* emlxs_mb_dump_vpd() */ 341 342 343 /* 344 * emlxs_mb_read_nv Issue a READ NVPARAM 345 * mailbox command 346 */ 347 /* ARGSUSED */ 348 extern void 349 emlxs_mb_read_nv(emlxs_hba_t *hba, MAILBOX *mb) 350 { 351 bzero((void *) mb, MAILBOX_CMD_BSIZE); 352 353 mb->mbxCommand = MBX_READ_NV; 354 mb->mbxOwner = OWN_HOST; 355 356 } /* End emlxs_mb_read_nv */ 357 358 359 /* 360 * emlxs_mb_read_rev Issue a READ REV 361 * mailbox command 362 */ 363 /* ARGSUSED */ 364 extern void 365 emlxs_mb_read_rev(emlxs_hba_t *hba, MAILBOX *mb, uint32_t v3) 366 { 367 bzero((void *) mb, MAILBOX_CMD_BSIZE); 368 369 mb->un.varRdRev.cv = 1; 370 371 if (v3) { 372 mb->un.varRdRev.cv3 = 1; 373 } 374 375 mb->mbxCommand = MBX_READ_REV; 376 mb->mbxOwner = OWN_HOST; 377 378 } /* End emlxs_mb_read_rev */ 379 380 381 /* 382 * emlxs_mb_run_biu_diag Issue a RUN_BIU_DIAG 383 * mailbox command 384 */ 385 /* ARGSUSED */ 386 extern uint32_t 387 emlxs_mb_run_biu_diag(emlxs_hba_t *hba, MAILBOX *mb, uint64_t out, 388 uint64_t in) 389 { 390 bzero((void *) mb, MAILBOX_CMD_BSIZE); 391 392 mb->mbxCommand = MBX_RUN_BIU_DIAG64; 393 mb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE; 394 mb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = 395 (uint32_t)putPaddrHigh(out); 396 mb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = 397 (uint32_t)putPaddrLow(out); 398 mb->un.varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE; 399 mb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = 400 (uint32_t)putPaddrHigh(in); 401 mb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = (uint32_t)putPaddrLow(in); 402 mb->mbxOwner = OWN_HOST; 403 404 return (0); 405 406 } /* End emlxs_mb_run_biu_diag */ 407 408 409 /* 410 * emlxs_mb_read_la Issue a READ LA 411 * mailbox command 412 */ 413 extern uint32_t 414 emlxs_mb_read_la(emlxs_hba_t *hba, MAILBOX *mb) 415 { 416 MATCHMAP *mp; 417 418 bzero((void *) mb, MAILBOX_CMD_BSIZE); 419 420 if ((mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BUF)) == 0) { 421 mb->mbxCommand = MBX_READ_LA64; 422 423 return (1); 424 } 425 mb->mbxCommand = MBX_READ_LA64; 426 mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128; 427 mb->un.varReadLA.un.lilpBde64.addrHigh = 428 (uint32_t)putPaddrHigh(mp->phys); 429 mb->un.varReadLA.un.lilpBde64.addrLow = 430 (uint32_t)putPaddrLow(mp->phys); 431 mb->mbxOwner = OWN_HOST; 432 433 /* 434 * save address for completion 435 */ 436 ((MAILBOXQ *)mb)->bp = (uint8_t *)mp; 437 438 return (0); 439 440 } /* emlxs_mb_read_la() */ 441 442 443 /* 444 * emlxs_mb_clear_la Issue a CLEAR LA 445 * mailbox command 446 */ 447 extern void 448 emlxs_mb_clear_la(emlxs_hba_t *hba, MAILBOX *mb) 449 { 450 #ifdef FC_RPI_CHECK 451 emlxs_rpi_check(hba); 452 #endif /* FC_RPI_CHECK */ 453 454 bzero((void *) mb, MAILBOX_CMD_BSIZE); 455 456 mb->un.varClearLA.eventTag = hba->link_event_tag; 457 mb->mbxCommand = MBX_CLEAR_LA; 458 mb->mbxOwner = OWN_HOST; 459 460 return; 461 462 } /* End emlxs_mb_clear_la */ 463 464 465 /* 466 * emlxs_mb_read_status Issue a READ STATUS 467 * mailbox command 468 */ 469 /* ARGSUSED */ 470 extern void 471 emlxs_mb_read_status(emlxs_hba_t *hba, MAILBOX *mb) 472 { 473 bzero((void *) mb, MAILBOX_CMD_BSIZE); 474 475 mb->mbxCommand = MBX_READ_STATUS; 476 mb->mbxOwner = OWN_HOST; 477 478 } /* End fc_read_status */ 479 480 481 /* 482 * emlxs_mb_read_lnk_stat Issue a LINK STATUS 483 * mailbox command 484 */ 485 /* ARGSUSED */ 486 extern void 487 emlxs_mb_read_lnk_stat(emlxs_hba_t *hba, MAILBOX *mb) 488 { 489 bzero((void *) mb, MAILBOX_CMD_BSIZE); 490 491 mb->mbxCommand = MBX_READ_LNK_STAT; 492 mb->mbxOwner = OWN_HOST; 493 494 } /* End emlxs_mb_read_lnk_stat */ 495 496 497 /* 498 * emlxs_mb_write_nv Issue a WRITE NVPARAM 499 * mailbox command 500 */ 501 static void 502 emlxs_emb_mb_write_nv(emlxs_hba_t *hba, MAILBOX *mb) 503 { 504 int32_t i; 505 emlxs_config_t *cfg = &CFG; 506 507 bzero((void *) mb, MAILBOX_CMD_BSIZE); 508 509 bcopy((void *) &hba->wwnn, 510 (void *) mb->un.varWTnvp.nodename, 511 sizeof (NAME_TYPE)); 512 513 bcopy((void *) &hba->wwpn, 514 (void *) mb->un.varWTnvp.portname, 515 sizeof (NAME_TYPE)); 516 517 mb->un.varWTnvp.pref_DID = 0; 518 mb->un.varWTnvp.hardAL_PA = (uint8_t)cfg[CFG_ASSIGN_ALPA].current; 519 mb->un.varWTnvp.rsvd1[0] = 0xffffffff; 520 mb->un.varWTnvp.rsvd1[1] = 0xffffffff; 521 mb->un.varWTnvp.rsvd1[2] = 0xffffffff; 522 for (i = 0; i < 21; i++) { 523 mb->un.varWTnvp.rsvd3[i] = 0xffffffff; 524 } 525 526 mb->mbxCommand = MBX_WRITE_NV; 527 mb->mbxOwner = OWN_HOST; 528 } /* End emlxs_mb_write_nv */ 529 530 531 /* 532 * emlxs_mb_part_slim Issue a PARTITION SLIM 533 * mailbox command 534 */ 535 static void 536 emlxs_mb_part_slim(emlxs_hba_t *hba, MAILBOX *mb, uint32_t hbainit) 537 { 538 bzero((void *) mb, MAILBOX_CMD_BSIZE); 539 540 541 mb->un.varSlim.numRing = hba->ring_count; 542 mb->un.varSlim.hbainit = hbainit; 543 mb->mbxCommand = MBX_PART_SLIM; 544 mb->mbxOwner = OWN_HOST; 545 546 } /* End emlxs_mb_part_slim */ 547 548 549 /* 550 * emlxs_mb_config_ring Issue a CONFIG RING 551 * mailbox command 552 */ 553 extern void 554 emlxs_mb_config_ring(emlxs_hba_t *hba, int32_t ring, MAILBOX *mb) 555 { 556 int32_t i; 557 int32_t j; 558 559 bzero((void *) mb, MAILBOX_CMD_BSIZE); 560 561 j = 0; 562 for (i = 0; i < ring; i++) { 563 j += hba->ring_masks[i]; 564 } 565 566 for (i = 0; i < hba->ring_masks[ring]; i++) { 567 if ((j + i) >= 6) { 568 break; 569 } 570 mb->un.varCfgRing.rrRegs[i].rval = hba->ring_rval[j + i]; 571 mb->un.varCfgRing.rrRegs[i].rmask = hba->ring_rmask[j + i]; 572 573 mb->un.varCfgRing.rrRegs[i].tval = hba->ring_tval[j + i]; 574 mb->un.varCfgRing.rrRegs[i].tmask = hba->ring_tmask[j + i]; 575 } 576 577 mb->un.varCfgRing.ring = ring; 578 mb->un.varCfgRing.profile = 0; 579 mb->un.varCfgRing.maxOrigXchg = 0; 580 mb->un.varCfgRing.maxRespXchg = 0; 581 mb->un.varCfgRing.recvNotify = 1; 582 mb->un.varCfgRing.numMask = hba->ring_masks[ring]; 583 mb->mbxCommand = MBX_CONFIG_RING; 584 mb->mbxOwner = OWN_HOST; 585 586 return; 587 588 } /* End emlxs_mb_config_ring */ 589 590 591 /* 592 * emlxs_mb_config_link Issue a CONFIG LINK 593 * mailbox command 594 */ 595 extern void 596 emlxs_mb_config_link(emlxs_hba_t *hba, MAILBOX *mb) 597 { 598 emlxs_port_t *port = &PPORT; 599 emlxs_config_t *cfg = &CFG; 600 601 bzero((void *) mb, MAILBOX_CMD_BSIZE); 602 603 /* 604 * NEW_FEATURE SLI-2, Coalescing Response Feature. 605 */ 606 if (cfg[CFG_CR_DELAY].current) { 607 mb->un.varCfgLnk.cr = 1; 608 mb->un.varCfgLnk.ci = 1; 609 mb->un.varCfgLnk.cr_delay = cfg[CFG_CR_DELAY].current; 610 mb->un.varCfgLnk.cr_count = cfg[CFG_CR_COUNT].current; 611 } 612 if (cfg[CFG_ACK0].current) 613 mb->un.varCfgLnk.ack0_enable = 1; 614 615 mb->un.varCfgLnk.myId = port->did; 616 mb->un.varCfgLnk.edtov = hba->fc_edtov; 617 mb->un.varCfgLnk.arbtov = hba->fc_arbtov; 618 mb->un.varCfgLnk.ratov = hba->fc_ratov; 619 mb->un.varCfgLnk.rttov = hba->fc_rttov; 620 mb->un.varCfgLnk.altov = hba->fc_altov; 621 mb->un.varCfgLnk.crtov = hba->fc_crtov; 622 mb->un.varCfgLnk.citov = hba->fc_citov; 623 mb->mbxCommand = MBX_CONFIG_LINK; 624 mb->mbxOwner = OWN_HOST; 625 626 return; 627 628 } /* emlxs_mb_config_link() */ 629 630 631 /* 632 * emlxs_mb_init_link Issue an INIT LINK 633 * mailbox command 634 */ 635 extern void 636 emlxs_mb_init_link(emlxs_hba_t *hba, MAILBOX *mb, uint32_t topology, 637 uint32_t linkspeed) 638 { 639 emlxs_vpd_t *vpd = &VPD; 640 emlxs_config_t *cfg = &CFG; 641 642 bzero((void *) mb, MAILBOX_CMD_BSIZE); 643 644 switch (topology) { 645 case FLAGS_LOCAL_LB: 646 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; 647 mb->un.varInitLnk.link_flags |= FLAGS_LOCAL_LB; 648 break; 649 case FLAGS_TOPOLOGY_MODE_LOOP_PT: 650 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; 651 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; 652 break; 653 case FLAGS_TOPOLOGY_MODE_PT_PT: 654 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; 655 break; 656 case FLAGS_TOPOLOGY_MODE_LOOP: 657 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; 658 break; 659 case FLAGS_TOPOLOGY_MODE_PT_LOOP: 660 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; 661 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; 662 break; 663 } 664 665 if (cfg[CFG_LILP_ENABLE].current == 0) { 666 /* Disable LIRP/LILP support */ 667 mb->un.varInitLnk.link_flags |= FLAGS_LIRP_LILP; 668 } 669 /* 670 * Setting up the link speed 671 */ 672 switch (linkspeed) { 673 case 0: 674 break; 675 676 case 1: 677 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) { 678 linkspeed = 0; 679 } 680 break; 681 682 case 2: 683 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) { 684 linkspeed = 0; 685 } 686 break; 687 688 case 4: 689 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) { 690 linkspeed = 0; 691 } 692 break; 693 694 case 8: 695 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) { 696 linkspeed = 0; 697 } 698 break; 699 700 case 10: 701 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) { 702 linkspeed = 0; 703 } 704 break; 705 706 default: 707 linkspeed = 0; 708 break; 709 710 } 711 712 if ((linkspeed > 0) && (vpd->feaLevelHigh >= 0x02)) { 713 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 714 mb->un.varInitLnk.link_speed = linkspeed; 715 } 716 mb->un.varInitLnk.link_flags |= FLAGS_PREABORT_RETURN; 717 718 mb->un.varInitLnk.fabric_AL_PA = (uint8_t)cfg[CFG_ASSIGN_ALPA].current; 719 mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK; 720 mb->mbxOwner = OWN_HOST; 721 722 723 return; 724 725 } /* emlxs_mb_init_link() */ 726 727 728 /* 729 * emlxs_mb_down_link Issue a DOWN LINK 730 * mailbox command 731 */ 732 /* ARGSUSED */ 733 extern void 734 emlxs_mb_down_link(emlxs_hba_t *hba, MAILBOX *mb) 735 { 736 bzero((void *) mb, MAILBOX_CMD_BSIZE); 737 738 mb->mbxCommand = MBX_DOWN_LINK; 739 mb->mbxOwner = OWN_HOST; 740 741 return; 742 743 } /* emlxs_mb_down_link() */ 744 745 746 /* 747 * emlxs_mb_read_sparam Issue a READ SPARAM 748 * mailbox command 749 */ 750 extern uint32_t 751 emlxs_mb_read_sparam(emlxs_hba_t *hba, MAILBOX *mb) 752 { 753 MATCHMAP *mp; 754 755 bzero((void *) mb, MAILBOX_CMD_BSIZE); 756 757 if ((mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BUF)) == 0) { 758 mb->mbxCommand = MBX_READ_SPARM64; 759 760 return (1); 761 } 762 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM); 763 mb->un.varRdSparm.un.sp64.addrHigh = (uint32_t)putPaddrHigh(mp->phys); 764 mb->un.varRdSparm.un.sp64.addrLow = (uint32_t)putPaddrLow(mp->phys); 765 mb->mbxCommand = MBX_READ_SPARM64; 766 mb->mbxOwner = OWN_HOST; 767 768 /* 769 * save address for completion 770 */ 771 ((MAILBOXQ *)mb)->bp = (uint8_t *)mp; 772 773 return (0); 774 775 } /* emlxs_mb_read_sparam() */ 776 777 778 /* 779 * emlxs_mb_read_rpi Issue a READ RPI 780 * mailbox command 781 */ 782 /* ARGSUSED */ 783 extern uint32_t 784 emlxs_mb_read_rpi(emlxs_hba_t *hba, uint32_t rpi, MAILBOX *mb, uint32_t flag) 785 { 786 bzero((void *) mb, MAILBOX_CMD_BSIZE); 787 788 /* 789 * Set flag to issue action on cmpl 790 */ 791 mb->un.varWords[30] = flag; 792 mb->un.varRdRPI.reqRpi = (volatile uint16_t) rpi; 793 mb->mbxCommand = MBX_READ_RPI64; 794 mb->mbxOwner = OWN_HOST; 795 796 return (0); 797 798 } /* End emlxs_mb_read_rpi */ 799 800 801 /* 802 * emlxs_mb_read_xri Issue a READ XRI 803 * mailbox command 804 */ 805 /* ARGSUSED */ 806 extern uint32_t 807 emlxs_mb_read_xri(emlxs_hba_t *hba, uint32_t xri, MAILBOX *mb, uint32_t flag) 808 { 809 bzero((void *) mb, MAILBOX_CMD_BSIZE); 810 811 /* 812 * Set flag to issue action on cmpl 813 */ 814 mb->un.varWords[30] = flag; 815 mb->un.varRdXRI.reqXri = (volatile uint16_t) xri; 816 mb->mbxCommand = MBX_READ_XRI; 817 mb->mbxOwner = OWN_HOST; 818 819 return (0); 820 821 } /* End emlxs_mb_read_xri */ 822 823 824 /* ARGSUSED */ 825 extern int32_t 826 emlxs_mb_check_sparm(emlxs_hba_t *hba, SERV_PARM *nsp) 827 { 828 uint32_t nsp_value; 829 uint32_t *iptr; 830 831 if (nsp->cmn.fPort) { 832 return (0); 833 } 834 /* Validate the service parameters */ 835 iptr = (uint32_t *)& nsp->portName; 836 if (iptr[0] == 0 && iptr[1] == 0) { 837 return (1); 838 } 839 iptr = (uint32_t *)& nsp->nodeName; 840 if (iptr[0] == 0 && iptr[1] == 0) { 841 return (2); 842 } 843 if (nsp->cls2.classValid) { 844 nsp_value = ((nsp->cls2.rcvDataSizeMsb & 0x0f) << 8) | 845 nsp->cls2.rcvDataSizeLsb; 846 847 /* 848 * If the receive data length is zero then set it to the CSP 849 * value 850 */ 851 if (!nsp_value) { 852 nsp->cls2.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb; 853 nsp->cls2.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb; 854 return (0); 855 } 856 } 857 if (nsp->cls3.classValid) { 858 nsp_value = ((nsp->cls3.rcvDataSizeMsb & 0x0f) << 8) | 859 nsp->cls3.rcvDataSizeLsb; 860 861 /* 862 * If the receive data length is zero then set it to the CSP 863 * value 864 */ 865 /* This prevents a Emulex adapter bug from occurring */ 866 if (!nsp_value) { 867 nsp->cls3.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb; 868 nsp->cls3.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb; 869 return (0); 870 } 871 } 872 return (0); 873 874 } /* emlxs_mb_check_sparm() */ 875 876 877 /* 878 * emlxs_mb_reg_did Issue a REG_LOGIN 879 * mailbox command 880 */ 881 extern uint32_t 882 emlxs_mb_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param, 883 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq) 884 { 885 emlxs_hba_t *hba = HBA; 886 MATCHMAP *mp; 887 MAILBOXQ *mbq; 888 MAILBOX *mb; 889 uint32_t rval; 890 891 /* Check for invalid node ids to register */ 892 if (did == 0 || (did & 0xff000000)) { 893 return (1); 894 } 895 if ((rval = emlxs_mb_check_sparm(hba, param))) { 896 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg, 897 "Invalid service parameters. did=%06x rval=%d", did, rval); 898 899 return (1); 900 } 901 /* Check if the node limit has been reached */ 902 if (port->node_count >= hba->max_nodes) { 903 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg, 904 "Limit reached. did=%06x count=%d", did, port->node_count); 905 906 return (1); 907 } 908 if (!(mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) { 909 return (1); 910 } 911 /* Build login request */ 912 if ((mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BUF | MEM_PRI)) == 0) { 913 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 914 return (1); 915 } 916 bcopy((void *) param, (void *) mp->virt, sizeof (SERV_PARM)); 917 918 mb = (MAILBOX *) mbq->mbox; 919 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM); 920 mb->un.varRegLogin.un.sp64.addrHigh = (uint32_t)putPaddrHigh(mp->phys); 921 mb->un.varRegLogin.un.sp64.addrLow = (uint32_t)putPaddrLow(mp->phys); 922 mb->un.varRegLogin.rpi = 0; 923 mb->un.varRegLogin.did = did; 924 mb->un.varWords[30] = 0; /* flags */ 925 mb->mbxCommand = MBX_REG_LOGIN64; 926 mb->mbxOwner = OWN_HOST; 927 928 #ifdef SLI3_SUPPORT 929 mb->un.varRegLogin.vpi = 930 port->vpi; 931 #endif /* SLI3_SUPPORT */ 932 933 mbq->sbp = (uint8_t *)sbp; 934 mbq->ubp = (uint8_t *)ubp; 935 mbq->iocbq = (uint8_t *)iocbq; 936 mbq->bp = (uint8_t *)mp; 937 938 if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) { 939 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 940 } 941 return (0); 942 943 } /* emlxs_mb_reg_did() */ 944 945 /* 946 * emlxs_mb_unreg_rpi Issue a UNREG_LOGIN 947 * mailbox command 948 */ 949 extern uint32_t 950 emlxs_mb_unreg_rpi(emlxs_port_t *port, uint32_t rpi, emlxs_buf_t *sbp, 951 fc_unsol_buf_t *ubp, IOCBQ *iocbq) 952 { 953 emlxs_hba_t *hba = HBA; 954 MAILBOXQ *mbq; 955 MAILBOX *mb; 956 NODELIST *ndlp; 957 958 if (rpi != 0xffff) { 959 /* Make sure the node does already exist */ 960 ndlp = emlxs_node_find_rpi(port, rpi); 961 962 963 if (ndlp) { 964 /* 965 * If we just unregistered the host node then clear 966 * the host DID 967 */ 968 if (ndlp->nlp_DID == port->did) { 969 port->did = 0; 970 } 971 /* remove it */ 972 emlxs_node_rm(port, ndlp); 973 974 } else { 975 return (1); 976 } 977 } else { /* Unreg all */ 978 emlxs_node_destroy_all(port); 979 } 980 981 if (!(mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) { 982 return (1); 983 } 984 mb = (MAILBOX *) mbq->mbox; 985 mb->un.varUnregLogin.rpi = (uint16_t)rpi; 986 987 #ifdef SLI3_SUPPORT 988 mb->un.varUnregLogin.vpi = port->vpi; 989 #endif /* SLI3_SUPPORT */ 990 991 mb->mbxCommand = MBX_UNREG_LOGIN; 992 mb->mbxOwner = OWN_HOST; 993 mbq->sbp = (uint8_t *)sbp; 994 mbq->ubp = (uint8_t *)ubp; 995 mbq->iocbq = (uint8_t *)iocbq; 996 997 if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) { 998 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 999 } 1000 return (0); 1001 } /* emlxs_mb_unreg_rpi() */ 1002 1003 /* 1004 * emlxs_mb_unreg_did Issue a UNREG_DID 1005 * mailbox command 1006 */ 1007 extern uint32_t 1008 emlxs_mb_unreg_did(emlxs_port_t *port, uint32_t did, emlxs_buf_t *sbp, 1009 fc_unsol_buf_t *ubp, IOCBQ *iocbq) 1010 { 1011 emlxs_hba_t *hba = HBA; 1012 NODELIST *ndlp; 1013 MAILBOXQ *mbq; 1014 MAILBOX *mb; 1015 1016 /* 1017 * Unregister all default RPIs if did == 0xffffffff 1018 */ 1019 if (did != 0xffffffff) { 1020 /* Check for base node */ 1021 if (did == Bcast_DID) { 1022 /* just flush base node */ 1023 (void) emlxs_tx_node_flush(port, &port->node_base, 1024 0, 0, 0); 1025 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 1026 0); 1027 1028 /* Return now */ 1029 return (1); 1030 } 1031 /* 1032 * A zero DID means that we are trying to unreg the host node 1033 * after a link bounce 1034 */ 1035 1036 /* 1037 * If the prev_did == 0 then the adapter has been reset and 1038 * there is no need in unregistering 1039 */ 1040 1041 /* 1042 * If the prev_did != 0 then we can look for the hosts last 1043 * known DID node 1044 */ 1045 1046 if (did == 0) { 1047 if (port->prev_did == 0) { 1048 return (1); 1049 } 1050 did = port->prev_did; 1051 } 1052 /* Make sure the node does already exist */ 1053 ndlp = emlxs_node_find_did(port, did); 1054 1055 1056 if (ndlp) { 1057 /* remove it */ 1058 emlxs_node_rm(port, ndlp); 1059 1060 /* 1061 * If we just unregistered the host node then clear 1062 * the host DID 1063 */ 1064 if (did == port->did) { 1065 port->did = 0; 1066 } 1067 } else { 1068 return (1); 1069 } 1070 } 1071 if (!(mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) { 1072 return (1); 1073 } 1074 mb = (MAILBOX *) mbq->mbox; 1075 mb->un.varUnregDID.did = did; 1076 1077 #ifdef SLI3_SUPPORT 1078 mb->un.varUnregDID.vpi = port->vpi; 1079 #endif /* SLI3_SUPPORT */ 1080 1081 mb->mbxCommand = MBX_UNREG_D_ID; 1082 mb->mbxOwner = OWN_HOST; 1083 mbq->sbp = (uint8_t *)sbp; 1084 mbq->ubp = (uint8_t *)ubp; 1085 mbq->iocbq = (uint8_t *)iocbq; 1086 1087 if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) { 1088 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 1089 } 1090 return (0); 1091 1092 } /* End emlxs_mb_unreg_did */ 1093 1094 1095 /* 1096 * emlxs_mb_set_mask Issue a SET MASK 1097 * mailbox command 1098 */ 1099 /* ARGSUSED */ 1100 static void 1101 emlxs_mb_set_mask(emlxs_hba_t *hba, MAILBOX *mb, uint32_t mask, 1102 uint32_t ringno) 1103 { 1104 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1105 1106 mb->un.varWords[0] = 0x11223344; /* set passwd */ 1107 mb->un.varWords[1] = mask; /* set mask */ 1108 mb->un.varWords[2] = ringno; /* set ringno */ 1109 mb->mbxCommand = MBX_SET_MASK; 1110 mb->mbxOwner = OWN_HOST; 1111 1112 } /* End emlxs_mb_set_mask */ 1113 1114 1115 /* 1116 * emlxs_mb_set_debug Issue a special debug 1117 * mailbox command 1118 */ 1119 /* ARGSUSED */ 1120 static void 1121 emlxs_mb_set_debug(emlxs_hba_t *hba, MAILBOX *mb, uint32_t word0, 1122 uint32_t word1, uint32_t word2) 1123 { 1124 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1125 1126 mb->un.varWords[0] = word0; 1127 mb->un.varWords[1] = word1; 1128 mb->un.varWords[2] = word2; 1129 mb->mbxCommand = MBX_SET_DEBUG; 1130 mb->mbxOwner = OWN_HOST; 1131 1132 } /* End emlxs_mb_set_debug */ 1133 1134 1135 /* 1136 * emlxs_mb_set_var Issue a special debug mbox 1137 * command to write slim 1138 */ 1139 /* ARGSUSED */ 1140 extern void 1141 emlxs_mb_set_var(emlxs_hba_t *hba, MAILBOX *mb, uint32_t addr, uint32_t value) 1142 { 1143 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1144 1145 /* addr = 0x090597 is AUTO ABTS disable for ELS commands */ 1146 /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */ 1147 /* addr = 0x100506 is for setting PCI MAX READ value */ 1148 1149 /* 1150 * Always turn on DELAYED ABTS for ELS timeouts 1151 */ 1152 if ((addr == 0x052198) && (value == 0)) { 1153 value = 1; 1154 } 1155 mb->un.varWords[0] = addr; 1156 mb->un.varWords[1] = value; 1157 mb->mbxCommand = MBX_SET_VARIABLE; 1158 mb->mbxOwner = OWN_HOST; 1159 1160 } /* End emlxs_mb_set_var */ 1161 1162 1163 /* 1164 * Disable Traffic Cop 1165 */ 1166 /* ARGSUSED */ 1167 extern void 1168 emlxs_disable_tc(emlxs_hba_t *hba, MAILBOX *mb) 1169 { 1170 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1171 1172 mb->un.varWords[0] = 0x50797; 1173 mb->un.varWords[1] = 0; 1174 mb->un.varWords[2] = 0xfffffffe; 1175 mb->mbxCommand = MBX_SET_VARIABLE; 1176 mb->mbxOwner = OWN_HOST; 1177 1178 } /* End emlxs_disable_tc */ 1179 1180 1181 /* 1182 * emlxs_mb_config_port Issue a CONFIG_PORT 1183 * mailbox command 1184 */ 1185 extern uint32_t 1186 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOX *mb, uint32_t sli_mode, 1187 uint32_t hbainit) 1188 { 1189 emlxs_vpd_t *vpd = &VPD; 1190 emlxs_port_t *port = &PPORT; 1191 emlxs_config_t *cfg; 1192 RING *rp; 1193 uint64_t pcb; 1194 uint64_t mbx; 1195 uint64_t hgp; 1196 uint64_t pgp; 1197 uint64_t rgp; 1198 MAILBOX *mbox; 1199 SLIM2 *slim; 1200 SLI2_RDSC *rdsc; 1201 uint64_t offset; 1202 uint32_t Laddr; 1203 uint32_t i; 1204 1205 cfg = &CFG; 1206 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1207 mbox = NULL; 1208 slim = NULL; 1209 1210 mb->mbxCommand = MBX_CONFIG_PORT; 1211 mb->mbxOwner = OWN_HOST; 1212 1213 mb->un.varCfgPort.pcbLen = sizeof (PCB); 1214 1215 #ifdef SLI3_SUPPORT 1216 mb->un.varCfgPort.hbainit[0] = hbainit; 1217 #else /* SLI3_SUPPORT */ 1218 mb->un.varCfgPort.hbainit = hbainit; 1219 #endif /* SLI3_SUPPORT */ 1220 1221 pcb = hba->slim2.phys + (uint64_t)(unsigned long)& (slim->pcb); 1222 mb->un.varCfgPort.pcbLow = (uint32_t)putPaddrLow(pcb); 1223 mb->un.varCfgPort.pcbHigh = (uint32_t)putPaddrHigh(pcb); 1224 1225 /* Set Host pointers in SLIM flag */ 1226 mb->un.varCfgPort.hps = 1; 1227 1228 /* Initialize hba structure for assumed default SLI2 mode */ 1229 /* If config port succeeds, then we will update it then */ 1230 hba->sli_mode = 2; 1231 hba->vpi_max = 1; 1232 hba->flag &= ~FC_NPIV_ENABLED; 1233 1234 #ifdef SLI3_SUPPORT 1235 if (sli_mode >= 3) { 1236 mb->un.varCfgPort.sli_mode = 3; 1237 mb->un.varCfgPort.cerbm = 1; 1238 mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ; 1239 1240 #ifdef NPIV_SUPPORT 1241 if (cfg[CFG_NPIV_ENABLE].current) { 1242 if (vpd->feaLevelHigh >= 0x09) { 1243 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) { 1244 mb->un.varCfgPort.vpi_max = 1245 MAX_VPORTS - 1; 1246 } else { 1247 mb->un.varCfgPort.vpi_max = 1248 MAX_VPORTS_LIMITED - 1; 1249 } 1250 1251 mb->un.varCfgPort.cmv = 1; 1252 } else { 1253 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, 1254 "CFGPORT: Firmware does not support NPIV. " 1255 "level=%d", vpd->feaLevelHigh); 1256 } 1257 1258 } 1259 #endif /* NPIV_SUPPORT */ 1260 } 1261 #endif /* SLI3_SUPPORT */ 1262 1263 /* 1264 * Now setup pcb 1265 */ 1266 ((SLIM2 *) hba->slim2.virt)->pcb.type = TYPE_NATIVE_SLI2; 1267 ((SLIM2 *) hba->slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2; 1268 ((SLIM2 *) hba->slim2.virt)->pcb.maxRing = (hba->ring_count - 1); 1269 ((SLIM2 *) hba->slim2.virt)->pcb.mailBoxSize = sizeof (MAILBOX) + 1270 MBOX_EXTENSION_SIZE; 1271 1272 mbx = hba->slim2.phys + (uint64_t)(unsigned long)& (slim->mbx); 1273 ((SLIM2 *)hba->slim2.virt)->pcb.mbAddrHigh = 1274 (uint32_t)putPaddrHigh(mbx); 1275 ((SLIM2 *)hba->slim2.virt)->pcb.mbAddrLow = (uint32_t)putPaddrLow(mbx); 1276 1277 1278 /* 1279 * Set up HGP - Port Memory 1280 * 1281 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80 1282 * RR0Get 0xc4 0x84 1283 * CR1Put 0xc8 0x88 1284 * RR1Get 0xcc 0x8c 1285 * CR2Put 0xd0 0x90 1286 * RR2Get 0xd4 0x94 1287 * CR3Put 0xd8 0x98 1288 * RR3Get 0xdc 0x9c 1289 * 1290 * Reserved 0xa0-0xbf 1291 * 1292 * If HBQs configured: 1293 * HBQ 0 Put ptr 0xc0 1294 * HBQ 1 Put ptr 0xc4 1295 * HBQ 2 Put ptr 0xc8 1296 * ...... 1297 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4 1298 */ 1299 1300 #ifdef SLI3_SUPPORT 1301 if (sli_mode >= 3) { 1302 /* ERBM is enabled */ 1303 hba->hgp_ring_offset = 0x80; 1304 hba->hgp_hbq_offset = 0xC0; 1305 1306 hba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 1307 hba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 1308 1309 } else /* SLI2 */ 1310 #endif /* SLI3_SUPPORT */ 1311 { 1312 /* ERBM is disabled */ 1313 hba->hgp_ring_offset = 0xC0; 1314 hba->hgp_hbq_offset = 0; 1315 1316 hba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 1317 hba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 1318 } 1319 1320 /* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */ 1321 if (hba->bus_type == SBUS_FC) { 1322 hgp = hba->slim2.phys + 1323 (uint64_t)(unsigned long)& (mbox->us.s2.host); 1324 ((SLIM2 *)hba->slim2.virt)->pcb.hgpAddrHigh = 1325 (uint32_t)putPaddrHigh(hgp); 1326 ((SLIM2 *)hba->slim2.virt)->pcb.hgpAddrLow = 1327 (uint32_t)putPaddrLow(hgp); 1328 } else { 1329 ((SLIM2 *)hba->slim2.virt)->pcb.hgpAddrHigh = 1330 (uint32_t)ddi_get32(hba->pci_acc_handle, 1331 (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER)); 1332 1333 Laddr = ddi_get32(hba->pci_acc_handle, 1334 (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER)); 1335 Laddr &= ~0x4; 1336 ((SLIM2 *)hba->slim2.virt)->pcb.hgpAddrLow = 1337 (uint32_t)(Laddr + hba->hgp_ring_offset); 1338 1339 } 1340 1341 pgp = hba->slim2.phys + (uint64_t)(unsigned long)& (mbox->us.s2.port); 1342 ((SLIM2 *)hba->slim2.virt)->pcb.pgpAddrHigh = 1343 (uint32_t)putPaddrHigh(pgp); 1344 ((SLIM2 *)hba->slim2.virt)->pcb.pgpAddrLow = (uint32_t)putPaddrLow(pgp); 1345 1346 offset = 0; 1347 for (i = 0; i < 4; i++) { 1348 rp = &hba->ring[i]; 1349 rdsc = &((SLIM2 *) hba->slim2.virt)->pcb.rdsc[i]; 1350 1351 /* Setup command ring */ 1352 rgp = hba->slim2.phys + 1353 (uint64_t)(unsigned long)& (slim->IOCBs[offset]); 1354 rdsc->cmdAddrHigh = (uint32_t)putPaddrHigh(rgp); 1355 rdsc->cmdAddrLow = (uint32_t)putPaddrLow(rgp); 1356 rdsc->cmdEntries = rp->fc_numCiocb; 1357 1358 rp->fc_cmdringaddr = (void *) &((SLIM2 *) hba->slim2.virt)-> 1359 IOCBs[offset]; 1360 offset += rdsc->cmdEntries * hba->iocb_cmd_size; 1361 1362 /* Setup response ring */ 1363 rgp = hba->slim2.phys + 1364 (uint64_t)(unsigned long)& (slim->IOCBs[offset]); 1365 rdsc->rspAddrHigh = (uint32_t)putPaddrHigh(rgp); 1366 rdsc->rspAddrLow = (uint32_t)putPaddrLow(rgp); 1367 rdsc->rspEntries = rp->fc_numRiocb; 1368 1369 rp->fc_rspringaddr = (void *) &((SLIM2 *) hba->slim2.virt)-> 1370 IOCBs[offset]; 1371 offset += rdsc->rspEntries * hba->iocb_rsp_size; 1372 } 1373 1374 emlxs_pcimem_bcopy((uint32_t *)(&((SLIM2 *) hba->slim2.virt)->pcb), 1375 (uint32_t *)(&((SLIM2 *) hba->slim2.virt)->pcb), sizeof (PCB)); 1376 1377 offset = 1378 ((uint64_t)(unsigned long)& (((SLIM2 *) hba->slim2.virt)->pcb) - 1379 (uint64_t)(unsigned long)hba->slim2.virt); 1380 emlxs_mpdata_sync(hba->slim2.dma_handle, (off_t)offset, sizeof (PCB), 1381 DDI_DMA_SYNC_FORDEV); 1382 1383 return (0); 1384 1385 } /* emlxs_mb_config_port() */ 1386 1387 1388 #ifdef SLI3_SUPPORT 1389 extern void 1390 emlxs_mb_config_hbq(emlxs_hba_t *hba, MAILBOX *mb, int hbq_id) 1391 { 1392 HBQ_INIT_t *hbq; 1393 int i; 1394 1395 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1396 1397 hbq = &hba->hbq_table[hbq_id]; 1398 1399 mb->un.varCfgHbq.hbqId = hbq_id; 1400 mb->un.varCfgHbq.numEntries = hbq->HBQ_numEntries; 1401 mb->un.varCfgHbq.recvNotify = hbq->HBQ_recvNotify; 1402 mb->un.varCfgHbq.numMask = hbq->HBQ_num_mask; 1403 mb->un.varCfgHbq.profile = hbq->HBQ_profile; 1404 mb->un.varCfgHbq.ringMask = hbq->HBQ_ringMask; 1405 mb->un.varCfgHbq.headerLen = hbq->HBQ_headerLen; 1406 mb->un.varCfgHbq.logEntry = hbq->HBQ_logEntry; 1407 mb->un.varCfgHbq.hbqaddrLow = putPaddrLow(hbq->HBQ_host_buf.phys); 1408 mb->un.varCfgHbq.hbqaddrHigh = putPaddrHigh(hbq->HBQ_host_buf.phys); 1409 mb->mbxCommand = MBX_CONFIG_HBQ; 1410 mb->mbxOwner = OWN_HOST; 1411 1412 /* Copy info for profiles 2,3,5. Other profiles this area is reserved */ 1413 if ((hbq->HBQ_profile == 2) || (hbq->HBQ_profile == 3) || 1414 (hbq->HBQ_profile == 5)) { 1415 bcopy(&hbq->profiles.allprofiles, 1416 &mb->un.varCfgHbq.profiles.allprofiles, 1417 sizeof (hbq->profiles)); 1418 } 1419 /* Return if no rctl / type masks for this HBQ */ 1420 if (!hbq->HBQ_num_mask) { 1421 return; 1422 } 1423 /* Otherwise we setup specific rctl / type masks for this HBQ */ 1424 for (i = 0; i < hbq->HBQ_num_mask; i++) { 1425 mb->un.varCfgHbq.hbqMasks[i].tmatch = hbq->HBQ_Masks[i].tmatch; 1426 mb->un.varCfgHbq.hbqMasks[i].tmask = hbq->HBQ_Masks[i].tmask; 1427 mb->un.varCfgHbq.hbqMasks[i].rctlmatch = 1428 hbq->HBQ_Masks[i].rctlmatch; 1429 mb->un.varCfgHbq.hbqMasks[i].rctlmask = 1430 hbq->HBQ_Masks[i].rctlmask; 1431 } 1432 1433 return; 1434 1435 } /* emlxs_mb_config_hbq() */ 1436 1437 #endif /* SLI3_SUPPORT */ 1438 1439 1440 extern uint32_t 1441 emlxs_mb_reg_vpi(emlxs_port_t *port) 1442 { 1443 emlxs_hba_t *hba = HBA; 1444 MAILBOXQ *mbq; 1445 MAILBOX *mb; 1446 1447 if (!(hba->flag & FC_NPIV_ENABLED)) { 1448 return (0); 1449 } 1450 mutex_enter(&EMLXS_PORT_LOCK); 1451 1452 /* Can't reg vpi until ClearLA is sent */ 1453 if (hba->state != FC_READY) { 1454 mutex_exit(&EMLXS_PORT_LOCK); 1455 1456 return (1); 1457 } 1458 /* Must have port id */ 1459 if (!port->did) { 1460 mutex_exit(&EMLXS_PORT_LOCK); 1461 1462 return (1); 1463 } 1464 if (!(mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) { 1465 mutex_exit(&EMLXS_PORT_LOCK); 1466 1467 return (1); 1468 } 1469 port->flag |= EMLXS_PORT_REGISTERED; 1470 1471 mutex_exit(&EMLXS_PORT_LOCK); 1472 1473 mb = (MAILBOX *) mbq->mbox; 1474 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1475 mb->un.varRegVpi.vpi = port->vpi; 1476 mb->un.varRegVpi.sid = port->did; 1477 mb->mbxCommand = MBX_REG_VPI; 1478 mb->mbxOwner = OWN_HOST; 1479 1480 if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) { 1481 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 1482 } 1483 return (0); 1484 1485 } /* emlxs_mb_reg_vpi() */ 1486 1487 1488 extern uint32_t 1489 emlxs_mb_unreg_vpi(emlxs_port_t *port) 1490 { 1491 emlxs_hba_t *hba = HBA; 1492 MAILBOXQ *mbq; 1493 MAILBOX *mb; 1494 1495 mutex_enter(&EMLXS_PORT_LOCK); 1496 1497 if (!(port->flag & EMLXS_PORT_REGISTERED)) { 1498 mutex_exit(&EMLXS_PORT_LOCK); 1499 1500 return (0); 1501 } 1502 if (!(mbq = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) { 1503 mutex_exit(&EMLXS_PORT_LOCK); 1504 1505 return (1); 1506 } 1507 port->flag &= ~EMLXS_PORT_REGISTERED; 1508 1509 mutex_exit(&EMLXS_PORT_LOCK); 1510 1511 mb = (MAILBOX *) mbq->mbox; 1512 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1513 mb->un.varUnregVpi.vpi = port->vpi; 1514 mb->mbxCommand = MBX_UNREG_VPI; 1515 mb->mbxOwner = OWN_HOST; 1516 1517 if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) { 1518 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); 1519 } 1520 return (0); 1521 1522 } /* emlxs_mb_unreg_vpi() */ 1523 1524 1525 /* 1526 * emlxs_mb_config_farp Issue a CONFIG FARP 1527 * mailbox command 1528 */ 1529 extern void 1530 emlxs_mb_config_farp(emlxs_hba_t *hba, MAILBOX *mb) 1531 { 1532 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1533 1534 bcopy((uint8_t *)& hba->wwpn, 1535 (uint8_t *)& mb->un.varCfgFarp.portname, 1536 sizeof (NAME_TYPE)); 1537 1538 bcopy((uint8_t *)& hba->wwpn, 1539 (uint8_t *)& mb->un.varCfgFarp.nodename, 1540 sizeof (NAME_TYPE)); 1541 1542 mb->un.varCfgFarp.filterEnable = 1; 1543 mb->un.varCfgFarp.portName = 1; 1544 mb->un.varCfgFarp.nodeName = 1; 1545 mb->mbxCommand = MBX_CONFIG_FARP; 1546 mb->mbxOwner = OWN_HOST; 1547 } /* emlxs_mb_config_farp() */ 1548 1549 1550 /* 1551 * emlxs_mb_read_nv Issue a READ CONFIG 1552 * mailbox command 1553 */ 1554 /* ARGSUSED */ 1555 extern void 1556 emlxs_mb_read_config(emlxs_hba_t *hba, MAILBOX *mb) 1557 { 1558 bzero((void *) mb, MAILBOX_CMD_BSIZE); 1559 1560 mb->mbxCommand = MBX_READ_CONFIG; 1561 mb->mbxOwner = OWN_HOST; 1562 1563 } /* emlxs_mb_read_config() */ 1564 1565 1566 1567 /* 1568 * NAME: emlxs_mb_put 1569 * 1570 * FUNCTION: put mailbox cmd onto the mailbox queue. 1571 * 1572 * EXECUTION ENVIRONMENT: process and interrupt level. 1573 * 1574 * NOTES: 1575 * 1576 * CALLED FROM: emlxs_mb_issue_cmd 1577 * 1578 * INPUT: hba - pointer to the device info area mbp 1579 * - pointer to mailbox queue entry of mailbox cmd 1580 * 1581 * RETURNS: NULL - command queued 1582 */ 1583 extern void 1584 emlxs_mb_put(emlxs_hba_t *hba, MAILBOXQ *mbq) 1585 { 1586 1587 mutex_enter(&EMLXS_MBOX_LOCK); 1588 1589 if (hba->mbox_queue.q_first) { 1590 1591 /* 1592 * queue command to end of list 1593 */ 1594 ((MAILBOXQ *) hba->mbox_queue.q_last)->next = mbq; 1595 hba->mbox_queue.q_last = (uint8_t *)mbq; 1596 hba->mbox_queue.q_cnt++; 1597 } else { 1598 1599 /* 1600 * add command to empty list 1601 */ 1602 hba->mbox_queue.q_first = (uint8_t *)mbq; 1603 hba->mbox_queue.q_last = (uint8_t *)mbq; 1604 hba->mbox_queue.q_cnt = 1; 1605 } 1606 1607 mbq->next = NULL; 1608 1609 mutex_exit(&EMLXS_MBOX_LOCK); 1610 } /* emlxs_mb_put() */ 1611 1612 1613 /* 1614 * NAME: emlxs_mb_get 1615 * 1616 * FUNCTION: get a mailbox command from mailbox command queue 1617 * 1618 * EXECUTION ENVIRONMENT: interrupt level. 1619 * 1620 * NOTES: 1621 * 1622 * CALLED FROM: emlxs_handle_mb_event 1623 * 1624 * INPUT: hba - pointer to the device info area 1625 * 1626 * RETURNS: NULL - no match found mb pointer - pointer to a mailbox command 1627 */ 1628 extern MAILBOXQ * 1629 emlxs_mb_get(emlxs_hba_t *hba) 1630 { 1631 MAILBOXQ *p_first = NULL; 1632 1633 mutex_enter(&EMLXS_MBOX_LOCK); 1634 1635 if (hba->mbox_queue.q_first) { 1636 p_first = (MAILBOXQ *) hba->mbox_queue.q_first; 1637 hba->mbox_queue.q_first = (uint8_t *)p_first->next; 1638 1639 if (hba->mbox_queue.q_first == NULL) { 1640 hba->mbox_queue.q_last = NULL; 1641 hba->mbox_queue.q_cnt = 0; 1642 } else { 1643 hba->mbox_queue.q_cnt--; 1644 } 1645 1646 p_first->next = NULL; 1647 } 1648 mutex_exit(&EMLXS_MBOX_LOCK); 1649 1650 return (p_first); 1651 1652 } /* emlxs_mb_get() */ 1653 1654 1655 1656 /* EMLXS_PORT_LOCK must be held when calling this */ 1657 static void 1658 emlxs_mb_init(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t flag, uint32_t tmo) 1659 { 1660 MATCHMAP *mp; 1661 1662 HBASTATS.MboxIssued++; 1663 hba->mbox_queue_flag = flag; 1664 1665 /* Set the Mailbox timer */ 1666 hba->mbox_timer = hba->timer_tics + tmo; 1667 1668 /* Initialize mailbox */ 1669 mbq->flag &= MBQ_INIT_MASK; 1670 hba->mbox_mbqflag = mbq->flag; 1671 1672 mbq->next = 0; 1673 1674 mutex_enter(&EMLXS_MBOX_LOCK); 1675 if (flag == MBX_NOWAIT) { 1676 hba->mbox_mbq = 0; 1677 } else { 1678 hba->mbox_mbq = (uint8_t *)mbq; 1679 } 1680 mutex_exit(&EMLXS_MBOX_LOCK); 1681 1682 if (mbq->bp) { 1683 mp = (MATCHMAP *) mbq->bp; 1684 emlxs_mpdata_sync(mp->dma_handle, 0, mp->size, 1685 DDI_DMA_SYNC_FORDEV); 1686 1687 hba->mbox_bp = mbq->bp; 1688 mbq->bp = 0; 1689 } 1690 if (mbq->sbp) { 1691 hba->mbox_sbp = mbq->sbp; 1692 mbq->sbp = 0; 1693 } 1694 if (mbq->ubp) { 1695 hba->mbox_ubp = mbq->ubp; 1696 mbq->ubp = 0; 1697 } 1698 if (mbq->iocbq) { 1699 hba->mbox_iocbq = mbq->iocbq; 1700 mbq->iocbq = 0; 1701 } 1702 #ifdef MBOX_EXT_SUPPORT 1703 if (mbq->extbuf && mbq->extsize) { 1704 hba->mbox_ext = mbq->extbuf; 1705 hba->mbox_ext_size = mbq->extsize; 1706 } 1707 #endif /* MBOX_EXT_SUPPORT */ 1708 1709 return; 1710 1711 } /* emlxs_mb_init() */ 1712 1713 1714 extern void 1715 emlxs_mb_fini(emlxs_hba_t *hba, MAILBOX *mb, uint32_t mbxStatus) 1716 { 1717 emlxs_port_t *port = &PPORT; 1718 MATCHMAP *mbox_bp; 1719 emlxs_buf_t *mbox_sbp; 1720 fc_unsol_buf_t *mbox_ubp; 1721 IOCBQ *mbox_iocbq; 1722 MAILBOXQ *mbox_mbq; 1723 MAILBOX *mbox; 1724 uint32_t mbox_queue_flag; 1725 emlxs_ub_priv_t *ub_priv; 1726 1727 mutex_enter(&EMLXS_PORT_LOCK); 1728 1729 if (hba->mbox_queue_flag) { 1730 HBASTATS.MboxCompleted++; 1731 1732 if (mbxStatus != MBX_SUCCESS) { 1733 HBASTATS.MboxError++; 1734 } else { 1735 HBASTATS.MboxGood++; 1736 } 1737 } 1738 mbox_bp = (MATCHMAP *) hba->mbox_bp; 1739 mbox_sbp = (emlxs_buf_t *)hba->mbox_sbp; 1740 mbox_ubp = (fc_unsol_buf_t *)hba->mbox_ubp; 1741 mbox_iocbq = (IOCBQ *) hba->mbox_iocbq; 1742 mbox_mbq = (MAILBOXQ *) hba->mbox_mbq; 1743 mbox_queue_flag = hba->mbox_queue_flag; 1744 1745 #ifdef MBOX_EXT_SUPPORT 1746 hba->mbox_ext = 0; 1747 hba->mbox_ext_size = 0; 1748 #endif /* MBOX_EXT_SUPPORT */ 1749 1750 hba->mbox_bp = 0; 1751 hba->mbox_sbp = 0; 1752 hba->mbox_ubp = 0; 1753 hba->mbox_iocbq = 0; 1754 hba->mbox_mbqflag = 0; 1755 hba->mbox_mbq = 0; 1756 hba->mbox_timer = 0; 1757 hba->mbox_queue_flag = 0; 1758 1759 mutex_exit(&EMLXS_PORT_LOCK); 1760 1761 if (mbox_mbq) { 1762 if (mb) { 1763 /* 1764 * Copy the local mailbox provided back into the 1765 * original mailbox 1766 */ 1767 bcopy((uint32_t *)mb, (uint32_t *)mbox_mbq, 1768 MAILBOX_CMD_BSIZE); 1769 } 1770 mbox = (MAILBOX *) mbox_mbq; 1771 mbox->mbxStatus = mbxStatus; 1772 1773 /* Mark mailbox complete */ 1774 mbox_mbq->flag |= MBQ_COMPLETED; 1775 1776 /* Wake up the sleeping thread */ 1777 if (mbox_queue_flag == MBX_SLEEP) { 1778 mutex_enter(&EMLXS_MBOX_LOCK); 1779 cv_broadcast(&EMLXS_MBOX_CV); 1780 mutex_exit(&EMLXS_MBOX_LOCK); 1781 } 1782 } 1783 /* Check for deferred MBUF cleanup */ 1784 if (mbox_bp && (mbox_queue_flag == MBX_NOWAIT)) { 1785 (void) emlxs_mem_put(hba, MEM_BUF, (uint8_t *)mbox_bp); 1786 } 1787 #ifdef SFCT_SUPPORT 1788 if (mbox_sbp && mbox_sbp->fct_cmd) { 1789 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg, 1790 "FCT mailbox: %s: status=%x", 1791 emlxs_mb_cmd_xlate(mb->mbxCommand), 1792 (uint32_t)mb->mbxStatus); 1793 } 1794 #endif /* SFCT_SUPPORT */ 1795 1796 /* Check for deferred pkt completion */ 1797 if (mbox_sbp) { 1798 if (mbxStatus != MBX_SUCCESS) { 1799 /* Set error status */ 1800 mbox_sbp->pkt_flags &= ~PACKET_STATE_VALID; 1801 emlxs_set_pkt_state(mbox_sbp, IOSTAT_LOCAL_REJECT, 1802 IOERR_NO_RESOURCES, 1); 1803 } 1804 emlxs_pkt_complete(mbox_sbp, -1, 0, 1); 1805 } 1806 /* Check for deferred ub completion */ 1807 if (mbox_ubp) { 1808 ub_priv = mbox_ubp->ub_fca_private; 1809 port = ub_priv->port; 1810 1811 emlxs_ub_callback(port, mbox_ubp); 1812 } 1813 /* Check for deferred iocb tx */ 1814 if (mbox_iocbq) { 1815 emlxs_issue_iocb_cmd(hba, mbox_iocbq->ring, mbox_iocbq); 1816 } 1817 return; 1818 1819 } /* emlxs_mb_fini() */ 1820 1821 1822 1823 /* This should only be called with active MBX_NOWAIT mailboxes */ 1824 static void 1825 emlxs_mb_retry(emlxs_hba_t *hba, MAILBOX *mb) 1826 { 1827 MAILBOXQ *mbq; 1828 1829 mutex_enter(&EMLXS_PORT_LOCK); 1830 1831 HBASTATS.MboxCompleted++; 1832 1833 if (mb->mbxStatus != 0) { 1834 HBASTATS.MboxError++; 1835 } else { 1836 HBASTATS.MboxGood++; 1837 } 1838 1839 mbq = (MAILBOXQ *) mb; 1840 mbq->bp = (uint8_t *)hba->mbox_bp; 1841 mbq->sbp = (uint8_t *)hba->mbox_sbp; 1842 mbq->ubp = (uint8_t *)hba->mbox_ubp; 1843 mbq->iocbq = (uint8_t *)hba->mbox_iocbq; 1844 1845 hba->mbox_bp = 0; 1846 hba->mbox_sbp = 0; 1847 hba->mbox_ubp = 0; 1848 hba->mbox_iocbq = 0; 1849 hba->mbox_mbq = 0; 1850 hba->mbox_mbqflag = 0; 1851 hba->mbox_queue_flag = 0; 1852 1853 mutex_exit(&EMLXS_PORT_LOCK); 1854 1855 return; 1856 1857 } /* emlxs_mb_retry() */ 1858 1859 1860 1861 /* 1862 * emlxs_handle_mb_event 1863 * 1864 * Description: Process a Mailbox Attention. 1865 * Called from host_interrupt to process MBATT 1866 * 1867 * Returns: 1868 * 1869 */ 1870 extern uint32_t 1871 emlxs_handle_mb_event(emlxs_hba_t *hba) 1872 { 1873 emlxs_port_t *port = &PPORT; 1874 MAILBOX *mb; 1875 MAILBOX *swpmb; 1876 MAILBOX *mbox; 1877 MAILBOXQ *mbq; 1878 emlxs_config_t *cfg; 1879 uint32_t control; 1880 volatile uint32_t word0; 1881 MATCHMAP *mbox_bp; 1882 uint32_t la_enable; 1883 off_t offset; 1884 uint32_t i; 1885 MAILBOXQ mailbox; 1886 1887 cfg = &CFG; 1888 swpmb = (MAILBOX *) & word0; 1889 mb = (MAILBOX *) & mailbox; 1890 1891 switch (hba->mbox_queue_flag) { 1892 case 0: 1893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg, 1894 "No mailbox active."); 1895 return (0); 1896 1897 case MBX_POLL: 1898 1899 /* 1900 * Mark mailbox complete, this should wake up any polling 1901 * threads 1902 */ 1903 /* 1904 * This can happen if interrupts are enabled while a polled 1905 * mailbox command is outstanding 1906 */ 1907 /* 1908 * If we don't set MBQ_COMPLETED here, the polling thread may 1909 * wait until timeout error occurs 1910 */ 1911 1912 mutex_enter(&EMLXS_MBOX_LOCK); 1913 mbq = (MAILBOXQ *) hba->mbox_mbq; 1914 if (mbq) { 1915 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 1916 "Mailbox event. Completing Polled command."); 1917 mbq->flag |= MBQ_COMPLETED; 1918 } 1919 mutex_exit(&EMLXS_MBOX_LOCK); 1920 1921 return (0); 1922 1923 case MBX_SLEEP: 1924 case MBX_NOWAIT: 1925 break; 1926 1927 default: 1928 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg, 1929 "Invalid Mailbox flag (%x)."); 1930 return (0); 1931 } 1932 1933 /* Get first word of mailbox */ 1934 if (hba->flag & FC_SLIM2_MODE) { 1935 mbox = FC_SLIM2_MAILBOX(hba); 1936 offset = (off_t)((uint64_t)(unsigned long)mbox - 1937 (uint64_t)(unsigned long)hba->slim2.virt); 1938 1939 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 1940 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 1941 word0 = *((volatile uint32_t *) mbox); 1942 word0 = PCIMEM_LONG(word0); 1943 } else { 1944 mbox = FC_SLIM1_MAILBOX(hba); 1945 word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *) mbox)); 1946 } 1947 1948 i = 0; 1949 while (swpmb->mbxOwner == OWN_CHIP) { 1950 if (i++ > 10000) { 1951 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg, 1952 "OWN_CHIP: %s: status=%x", 1953 emlxs_mb_cmd_xlate(swpmb->mbxCommand), 1954 swpmb->mbxStatus); 1955 1956 return (1); 1957 } 1958 /* Get first word of mailbox */ 1959 if (hba->flag & FC_SLIM2_MODE) { 1960 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 1961 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 1962 word0 = *((volatile uint32_t *) mbox); 1963 word0 = PCIMEM_LONG(word0); 1964 } else { 1965 word0 = READ_SLIM_ADDR(hba, 1966 ((volatile uint32_t *) mbox)); 1967 } 1968 } 1969 1970 /* Now that we are the owner, DMA Sync entire mailbox if needed */ 1971 if (hba->flag & FC_SLIM2_MODE) { 1972 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 1973 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL); 1974 emlxs_pcimem_bcopy((uint32_t *)mbox, (uint32_t *)mb, 1975 MAILBOX_CMD_BSIZE); 1976 } else { 1977 READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox, 1978 MAILBOX_CMD_WSIZE); 1979 } 1980 1981 #ifdef MBOX_EXT_SUPPORT 1982 if (hba->mbox_ext) { 1983 uint32_t *mbox_ext = (uint32_t *)((uint8_t *)mbox + 1984 MBOX_EXTENSION_OFFSET); 1985 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET; 1986 1987 if (hba->flag & FC_SLIM2_MODE) { 1988 emlxs_mpdata_sync(hba->slim2.dma_handle, offset_ext, 1989 hba->mbox_ext_size, DDI_DMA_SYNC_FORKERNEL); 1990 emlxs_pcimem_bcopy(mbox_ext, (uint32_t *)hba->mbox_ext, 1991 hba->mbox_ext_size); 1992 } else { 1993 READ_SLIM_COPY(hba, (uint32_t *)hba->mbox_ext, mbox_ext, 1994 (hba->mbox_ext_size / 4)); 1995 } 1996 } 1997 #endif /* MBOX_EXT_SUPPORT */ 1998 1999 /* Now sync the memory buffer if one was used */ 2000 if (hba->mbox_bp) { 2001 mbox_bp = (MATCHMAP *) hba->mbox_bp; 2002 emlxs_mpdata_sync(mbox_bp->dma_handle, 0, mbox_bp->size, 2003 DDI_DMA_SYNC_FORKERNEL); 2004 } 2005 /* Mailbox has been completely received at this point */ 2006 2007 if (mb->mbxCommand == MBX_HEARTBEAT) { 2008 hba->heartbeat_active = 0; 2009 goto done; 2010 } 2011 if (hba->mbox_queue_flag == MBX_SLEEP) { 2012 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 2013 "Received. %s: status=%x Sleep.", 2014 emlxs_mb_cmd_xlate(swpmb->mbxCommand), swpmb->mbxStatus); 2015 } else { 2016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 2017 "Completed. %s: status=%x", 2018 emlxs_mb_cmd_xlate(swpmb->mbxCommand), swpmb->mbxStatus); 2019 } 2020 2021 /* Filter out passthru mailbox */ 2022 if (hba->mbox_mbqflag & MBQ_PASSTHRU) { 2023 goto done; 2024 } 2025 /* If succesful, process the result */ 2026 if (mb->mbxStatus == 0) { 2027 (void) emlxs_mb_handle_cmd(hba, mb); 2028 goto done; 2029 } 2030 /* ERROR RETURNED */ 2031 2032 /* Check for no resources */ 2033 if ((mb->mbxStatus == MBXERR_NO_RESOURCES) && 2034 (hba->mbox_queue_flag == MBX_NOWAIT)) { 2035 /* Retry only MBX_NOWAIT requests */ 2036 2037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg, 2038 "Retrying. %s: status=%x", 2039 emlxs_mb_cmd_xlate(mb->mbxCommand), 2040 (uint32_t)mb->mbxStatus); 2041 2042 if ((mbox = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX))) { 2043 bcopy((uint8_t *)mb, (uint8_t *)mbox, 2044 MAILBOX_CMD_BSIZE); 2045 2046 switch (mbox->mbxCommand) { 2047 case MBX_READ_SPARM: 2048 control = mbox->un.varRdSparm.un.sp.bdeSize; 2049 if (control == 0) { 2050 (void) emlxs_mb_read_sparam(hba, mbox); 2051 } 2052 break; 2053 2054 case MBX_READ_SPARM64: 2055 control = mbox->un.varRdSparm.un.sp64.tus.f. 2056 bdeSize; 2057 if (control == 0) { 2058 (void) emlxs_mb_read_sparam(hba, mbox); 2059 } 2060 break; 2061 2062 case MBX_REG_LOGIN: 2063 control = mbox->un.varRegLogin.un.sp.bdeSize; 2064 if (control == 0) { 2065 #ifdef NPIV_SUPPORT 2066 /* Special handle for vport PLOGI */ 2067 if (hba->mbox_iocbq == (uint8_t *)1) { 2068 hba->mbox_iocbq = NULL; 2069 } 2070 #endif /* NPIV_SUPPORT */ 2071 goto done; 2072 } 2073 break; 2074 2075 case MBX_REG_LOGIN64: 2076 control = mbox->un.varRegLogin.un.sp64.tus.f. 2077 bdeSize; 2078 if (control == 0) { 2079 #ifdef NPIV_SUPPORT 2080 /* Special handle for vport PLOGI */ 2081 if (hba->mbox_iocbq == (uint8_t *)1) { 2082 hba->mbox_iocbq = NULL; 2083 } 2084 #endif /* NPIV_SUPPORT */ 2085 goto done; 2086 } 2087 break; 2088 2089 case MBX_READ_LA: 2090 control = mbox->un.varReadLA.un.lilpBde.bdeSize; 2091 if (control == 0) { 2092 (void) emlxs_mb_read_la(hba, mbox); 2093 } 2094 break; 2095 2096 case MBX_READ_LA64: 2097 control = mbox->un.varReadLA.un.lilpBde64.tus.f. 2098 bdeSize; 2099 if (control == 0) { 2100 (void) emlxs_mb_read_la(hba, mbox); 2101 } 2102 break; 2103 } 2104 2105 mbox->mbxOwner = OWN_HOST; 2106 mbox->mbxStatus = 0; 2107 2108 /* Refresh the mailbox area */ 2109 emlxs_mb_retry(hba, mbox); 2110 2111 if (emlxs_mb_issue_cmd(hba, mbox, MBX_NOWAIT, 0) != 2112 MBX_BUSY) { 2113 (void) emlxs_mem_put(hba, MEM_MBOX, 2114 (uint8_t *)mbox); 2115 } 2116 return (0); 2117 } 2118 } 2119 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg, 2120 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand), 2121 (uint32_t)mb->mbxStatus); 2122 2123 /* 2124 * ERROR: process mailbox command error 2125 */ 2126 switch (mb->mbxCommand) { 2127 case MBX_REG_LOGIN: 2128 case MBX_REG_LOGIN64: 2129 2130 if (mb->mbxStatus == MBXERR_RPI_FULL) { 2131 #ifdef SLI3_SUPPORT 2132 port = &VPORT(mb->un.varRegLogin.vpi); 2133 #endif /* SLI3_SUPPORT */ 2134 2135 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg, 2136 "Limit reached. count=%d", port->node_count); 2137 } 2138 #ifdef NPIV_SUPPORT 2139 /* Special handle for vport PLOGI */ 2140 if (hba->mbox_iocbq == (uint8_t *)1) { 2141 hba->mbox_iocbq = NULL; 2142 } 2143 #endif /* NPIV_SUPPORT */ 2144 break; 2145 2146 case MBX_READ_LA: 2147 case MBX_READ_LA64: 2148 2149 /* Enable Link Attention interrupts */ 2150 mutex_enter(&EMLXS_PORT_LOCK); 2151 2152 if (!(hba->hc_copy & HC_LAINT_ENA)) { 2153 /* 2154 * hba->hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba, 2155 * hba->csr_addr)); 2156 */ 2157 hba->hc_copy |= HC_LAINT_ENA; 2158 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), 2159 hba->hc_copy); 2160 } 2161 mutex_exit(&EMLXS_PORT_LOCK); 2162 2163 break; 2164 2165 2166 case MBX_CLEAR_LA: 2167 2168 la_enable = 1; 2169 2170 if (mb->mbxStatus == 0x1601) { 2171 /* 2172 * Get a buffer which will be used for mailbox 2173 * commands 2174 */ 2175 if ((mbox = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | 2176 MEM_PRI))) { 2177 /* Get link attention message */ 2178 if (emlxs_mb_read_la(hba, mbox) == 0) { 2179 if (emlxs_mb_issue_cmd(hba, mbox, 2180 MBX_NOWAIT, 0) != MBX_BUSY) { 2181 (void) emlxs_mem_put(hba, 2182 MEM_MBOX, (uint8_t *)mbox); 2183 } 2184 la_enable = 0; 2185 } else { 2186 (void) emlxs_mem_put(hba, MEM_MBOX, 2187 (uint8_t *)mbox); 2188 } 2189 } 2190 } 2191 mutex_enter(&EMLXS_PORT_LOCK); 2192 if (la_enable) { 2193 if (!(hba->hc_copy & HC_LAINT_ENA)) { 2194 /* Enable Link Attention interrupts */ 2195 /* 2196 * hba->hc_copy = READ_CSR_REG(hba, 2197 * FC_HC_REG(hba, hba->csr_addr)); 2198 */ 2199 hba->hc_copy |= HC_LAINT_ENA; 2200 WRITE_CSR_REG(hba, 2201 FC_HC_REG(hba, hba->csr_addr), 2202 hba->hc_copy); 2203 } 2204 } else { 2205 if (hba->hc_copy & HC_LAINT_ENA) { 2206 /* Disable Link Attention interrupts */ 2207 /* 2208 * hba->hc_copy = READ_CSR_REG(hba, 2209 * FC_HC_REG(hba, hba->csr_addr)); 2210 */ 2211 hba->hc_copy &= ~HC_LAINT_ENA; 2212 WRITE_CSR_REG(hba, 2213 FC_HC_REG(hba, hba->csr_addr), 2214 hba->hc_copy); 2215 } 2216 } 2217 mutex_exit(&EMLXS_PORT_LOCK); 2218 2219 break; 2220 2221 case MBX_INIT_LINK: 2222 if ((hba->flag & FC_SLIM2_MODE) && 2223 (hba->mbox_queue_flag == MBX_NOWAIT)) { 2224 /* Retry only MBX_NOWAIT requests */ 2225 2226 if ((cfg[CFG_LINK_SPEED].current > 0) && 2227 ((mb->mbxStatus == 0x0011) || 2228 (mb->mbxStatus == 0x0500))) { 2229 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg, 2230 "Retrying. %s: status=%x. Auto-speed set.", 2231 emlxs_mb_cmd_xlate(mb->mbxCommand), 2232 (uint32_t)mb->mbxStatus); 2233 2234 if ((mbox = (MAILBOX *) emlxs_mem_get(hba, 2235 MEM_MBOX))) { 2236 bcopy((uint8_t *)mb, (uint8_t *)mbox, 2237 MAILBOX_CMD_BSIZE); 2238 2239 mbox->un.varInitLnk.link_flags &= 2240 ~FLAGS_LINK_SPEED; 2241 mbox->un.varInitLnk.link_speed = 0; 2242 mbox->mbxOwner = OWN_HOST; 2243 mbox->mbxStatus = 0; 2244 2245 /* Refresh the mailbox area */ 2246 emlxs_mb_retry(hba, mbox); 2247 2248 if (emlxs_mb_issue_cmd(hba, mbox, 2249 MBX_NOWAIT, 0) != MBX_BUSY) { 2250 (void) emlxs_mem_put(hba, 2251 MEM_MBOX, (uint8_t *)mbox); 2252 } 2253 return (0); 2254 } 2255 } 2256 } 2257 break; 2258 } 2259 2260 done: 2261 2262 /* Clean up the mailbox area */ 2263 emlxs_mb_fini(hba, mb, mb->mbxStatus); 2264 2265 /* Attempt to send pending mailboxes */ 2266 if ((mbox = (MAILBOX *) emlxs_mb_get(hba))) { 2267 if (emlxs_mb_issue_cmd(hba, mbox, MBX_NOWAIT, 0) != MBX_BUSY) { 2268 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox); 2269 } 2270 } 2271 return (0); 2272 2273 } /* emlxs_handle_mb_event() */ 2274 2275 2276 2277 /* 2278 * emlxs_mb_handle_cmd 2279 * 2280 * Description: Process a Mailbox Command. 2281 * Called from host_interrupt to process MBATT 2282 * 2283 * Returns: 2284 * 2285 */ 2286 static int 2287 emlxs_mb_handle_cmd(emlxs_hba_t *hba, MAILBOX *mb) 2288 { 2289 emlxs_port_t *port = &PPORT; 2290 emlxs_port_t *vport; 2291 MAILBOXQ *mbox; 2292 NODELIST *ndlp; 2293 volatile SERV_PARM *sp; 2294 int32_t i; 2295 uint32_t ldata; 2296 uint32_t ldid; 2297 uint16_t lrpi; 2298 uint16_t lvpi; 2299 MATCHMAP *mp; 2300 uint8_t *wwn; 2301 READ_LA_VAR la; 2302 2303 if (mb->mbxStatus != 0) { 2304 return (1); 2305 } 2306 mp = (MATCHMAP *) hba->mbox_bp; 2307 2308 /* 2309 * Mailbox command completed successfully, process completion 2310 */ 2311 switch (mb->mbxCommand) { 2312 case MBX_SHUTDOWN: 2313 case MBX_LOAD_SM: 2314 case MBX_READ_NV: 2315 case MBX_WRITE_NV: 2316 case MBX_RUN_BIU_DIAG: 2317 case MBX_RUN_BIU_DIAG64: 2318 case MBX_INIT_LINK: 2319 case MBX_DOWN_LINK: 2320 case MBX_CONFIG_LINK: 2321 case MBX_PART_SLIM: 2322 case MBX_CONFIG_RING: 2323 case MBX_RESET_RING: 2324 case MBX_READ_CONFIG: 2325 case MBX_READ_RCONFIG: 2326 case MBX_READ_STATUS: 2327 case MBX_READ_XRI: 2328 case MBX_READ_REV: 2329 case MBX_READ_LNK_STAT: 2330 case MBX_UNREG_LOGIN: 2331 case MBX_DUMP_MEMORY: 2332 case MBX_DUMP_CONTEXT: 2333 case MBX_RUN_DIAGS: 2334 case MBX_RESTART: 2335 case MBX_UPDATE_CFG: 2336 case MBX_DOWN_LOAD: 2337 case MBX_DEL_LD_ENTRY: 2338 case MBX_RUN_PROGRAM: 2339 case MBX_SET_MASK: 2340 case MBX_SET_VARIABLE: 2341 case MBX_UNREG_D_ID: 2342 case MBX_KILL_BOARD: 2343 case MBX_CONFIG_FARP: 2344 case MBX_LOAD_AREA: 2345 case MBX_CONFIG_PORT: 2346 case MBX_CONFIG_MSI: 2347 case MBX_FLASH_WR_ULA: 2348 case MBX_SET_DEBUG: 2349 case MBX_GET_DEBUG: 2350 case MBX_LOAD_EXP_ROM: 2351 case MBX_BEACON: 2352 case MBX_READ_RPI: 2353 case MBX_READ_RPI64: 2354 case MBX_REG_VPI: 2355 case MBX_UNREG_VPI: 2356 case MBX_CONFIG_HBQ: 2357 case MBX_ASYNC_EVENT: 2358 case MBX_HEARTBEAT: 2359 break; 2360 2361 case MBX_CONFIG_MSIX: 2362 break; 2363 2364 case MBX_READ_SPARM: /* a READ SPARAM command completed */ 2365 case MBX_READ_SPARM64: /* a READ SPARAM command completed */ 2366 { 2367 if (mp) { 2368 bcopy((caddr_t)mp->virt, (caddr_t)& hba->sparam, 2369 sizeof (SERV_PARM)); 2370 2371 bcopy((caddr_t)& hba->sparam.nodeName, 2372 (caddr_t)& hba->wwnn, 2373 sizeof (NAME_TYPE)); 2374 2375 bcopy((caddr_t)& hba->sparam.portName, 2376 (caddr_t)& hba->wwpn, 2377 sizeof (NAME_TYPE)); 2378 2379 /* Initialize the physical port */ 2380 bcopy((caddr_t)& hba->sparam, 2381 (caddr_t)& port->sparam, 2382 sizeof (SERV_PARM)); 2383 bcopy((caddr_t)& hba->wwpn, 2384 (caddr_t)& port->wwpn, sizeof (NAME_TYPE)); 2385 bcopy((caddr_t)& hba->wwnn, 2386 (caddr_t)& port->wwnn, sizeof (NAME_TYPE)); 2387 2388 /* Initialize the virtual ports */ 2389 for (i = 1; i < MAX_VPORTS; i++) { 2390 vport = &VPORT(i); 2391 if (vport->flag & EMLXS_PORT_BOUND) { 2392 continue; 2393 } 2394 bcopy((caddr_t)& hba->sparam, 2395 (caddr_t)& vport->sparam, 2396 sizeof (SERV_PARM)); 2397 2398 bcopy((caddr_t)& vport->wwnn, 2399 (caddr_t)& vport->sparam.nodeName, 2400 sizeof (NAME_TYPE)); 2401 2402 bcopy((caddr_t)& vport->wwpn, 2403 (caddr_t)& vport->sparam.portName, 2404 sizeof (NAME_TYPE)); 2405 } 2406 2407 } 2408 break; 2409 } 2410 2411 2412 case MBX_REG_LOGIN: 2413 case MBX_REG_LOGIN64: 2414 2415 if (!mp) { 2416 break; 2417 } 2418 #ifdef SLI3_SUPPORT 2419 ldata = mb->un.varWords[5]; 2420 lvpi = ldata & 0xffff; 2421 port = &VPORT(lvpi); 2422 #endif /* SLI3_SUPPORT */ 2423 2424 /* First copy command data */ 2425 ldata = mb->un.varWords[0]; /* get rpi */ 2426 lrpi = ldata & 0xffff; 2427 2428 ldata = mb->un.varWords[1]; /* get did */ 2429 ldid = ldata & Mask_DID; 2430 2431 sp = (volatile SERV_PARM *) mp->virt; 2432 ndlp = emlxs_node_find_did(port, ldid); 2433 2434 if (!ndlp) { 2435 /* Attempt to create a node */ 2436 if ((ndlp = (NODELIST *) emlxs_mem_get(hba, MEM_NLP))) { 2437 ndlp->nlp_Rpi = lrpi; 2438 ndlp->nlp_DID = ldid; 2439 2440 bcopy((uint8_t *)sp, 2441 (uint8_t *)& ndlp->sparm, 2442 sizeof (SERV_PARM)); 2443 2444 bcopy((uint8_t *)& sp->nodeName, 2445 (uint8_t *)& ndlp->nlp_nodename, 2446 sizeof (NAME_TYPE)); 2447 2448 bcopy((uint8_t *)& sp->portName, 2449 (uint8_t *)& ndlp->nlp_portname, 2450 sizeof (NAME_TYPE)); 2451 2452 ndlp->nlp_active = 1; 2453 ndlp->nlp_flag[FC_CT_RING] |= NLP_CLOSED; 2454 ndlp->nlp_flag[FC_ELS_RING] |= NLP_CLOSED; 2455 ndlp->nlp_flag[FC_FCP_RING] |= NLP_CLOSED; 2456 ndlp->nlp_flag[FC_IP_RING] |= NLP_CLOSED; 2457 2458 /* Add the node */ 2459 emlxs_node_add(port, ndlp); 2460 2461 /* Open the node */ 2462 emlxs_node_open(port, ndlp, FC_CT_RING); 2463 emlxs_node_open(port, ndlp, FC_ELS_RING); 2464 emlxs_node_open(port, ndlp, FC_IP_RING); 2465 emlxs_node_open(port, ndlp, FC_FCP_RING); 2466 } else { 2467 wwn = (uint8_t *)& sp->portName; 2468 EMLXS_MSGF(EMLXS_CONTEXT, 2469 &emlxs_node_create_failed_msg, 2470 "Unable to allocate node. did=%06x rpi=%x " 2471 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x", 2472 ldid, lrpi, wwn[0], wwn[1], wwn[2], wwn[3], 2473 wwn[4], wwn[5], wwn[6], wwn[7]); 2474 2475 break; 2476 } 2477 } else { 2478 mutex_enter(&EMLXS_PORT_LOCK); 2479 2480 ndlp->nlp_Rpi = lrpi; 2481 ndlp->nlp_DID = ldid; 2482 2483 bcopy((uint8_t *)sp, 2484 (uint8_t *)& ndlp->sparm, 2485 sizeof (SERV_PARM)); 2486 2487 bcopy((uint8_t *)& sp->nodeName, 2488 (uint8_t *)& ndlp->nlp_nodename, 2489 sizeof (NAME_TYPE)); 2490 2491 bcopy((uint8_t *)& sp->portName, 2492 (uint8_t *)& ndlp->nlp_portname, 2493 sizeof (NAME_TYPE)); 2494 2495 wwn = (uint8_t *)& ndlp->nlp_portname; 2496 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_update_msg, 2497 "node=%p did=%06x rpi=%x wwpn=" 2498 "%02x%02x%02x%02x%02x%02x%02x%02x", 2499 ndlp, ndlp->nlp_DID, ndlp->nlp_Rpi, wwn[0], wwn[1], 2500 wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]); 2501 2502 mutex_exit(&EMLXS_PORT_LOCK); 2503 2504 /* Open the node */ 2505 emlxs_node_open(port, ndlp, FC_CT_RING); 2506 emlxs_node_open(port, ndlp, FC_ELS_RING); 2507 emlxs_node_open(port, ndlp, FC_IP_RING); 2508 emlxs_node_open(port, ndlp, FC_FCP_RING); 2509 } 2510 2511 /* If this was a fabric login */ 2512 if (ndlp->nlp_DID == Fabric_DID) { 2513 /* 2514 * If CLEAR_LA has been sent, then attempt to 2515 * register the vpi now 2516 */ 2517 if (hba->state == FC_READY) { 2518 (void) emlxs_mb_reg_vpi(port); 2519 } 2520 #ifdef SLI3_SUPPORT 2521 /* 2522 * If NPIV Fabric support has just been established 2523 * on the physical port, then notify the vports of 2524 * the link up 2525 */ 2526 if ((lvpi == 0) && 2527 (hba->flag & FC_NPIV_ENABLED) && 2528 (hba->flag & FC_NPIV_SUPPORTED)) { 2529 /* Skip the physical port */ 2530 for (i = 1; i < MAX_VPORTS; i++) { 2531 vport = &VPORT(i); 2532 2533 if (!(vport->flag & EMLXS_PORT_BOUND) || 2534 !(vport->flag & 2535 EMLXS_PORT_ENABLE)) { 2536 continue; 2537 } 2538 emlxs_port_online(vport); 2539 } 2540 } 2541 #endif /* SLI3_SUPPORT */ 2542 2543 } 2544 #ifdef NPIV_SUPPORT 2545 if (hba->mbox_iocbq == (uint8_t *)1) { 2546 hba->mbox_iocbq = NULL; 2547 (void) emlxs_mb_unreg_did(port, ldid, NULL, NULL, NULL); 2548 } 2549 #endif /* NPIV_SUPPORT */ 2550 2551 #ifdef DHCHAP_SUPPORT 2552 if (hba->mbox_sbp || hba->mbox_ubp) { 2553 if (emlxs_dhc_auth_start(port, ndlp, hba->mbox_sbp, 2554 hba->mbox_ubp) == 0) { 2555 /* 2556 * Auth started - auth completion will handle 2557 * sbp and ubp now 2558 */ 2559 hba->mbox_sbp = NULL; 2560 hba->mbox_ubp = NULL; 2561 } 2562 } 2563 #endif /* DHCHAP_SUPPORT */ 2564 2565 #ifdef SFCT_SUPPORT 2566 if (hba->mbox_sbp && ((emlxs_buf_t *)hba->mbox_sbp)->fct_cmd) { 2567 emlxs_buf_t *cmd_sbp = (emlxs_buf_t *)hba->mbox_sbp; 2568 2569 if (cmd_sbp->fct_state == EMLXS_FCT_REG_PENDING) { 2570 hba->mbox_sbp = NULL; 2571 2572 mutex_enter(&EMLXS_PKT_LOCK); 2573 cmd_sbp->node = ndlp; 2574 cmd_sbp->fct_state = EMLXS_FCT_REG_COMPLETE; 2575 cv_broadcast(&EMLXS_PKT_CV); 2576 mutex_exit(&EMLXS_PKT_LOCK); 2577 } 2578 } 2579 #endif /* SFCT_SUPPORT */ 2580 2581 break; 2582 2583 case MBX_READ_LA: 2584 case MBX_READ_LA64: 2585 bcopy((uint32_t *)((char *)mb + sizeof (uint32_t)), 2586 (uint32_t *)& la, sizeof (READ_LA_VAR)); 2587 2588 if (mp) { 2589 bcopy((caddr_t)mp->virt, 2590 (caddr_t)port->alpa_map, 128); 2591 } else { 2592 bzero((caddr_t)port->alpa_map, 128); 2593 } 2594 2595 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_atten_msg, 2596 "type=%s tag=%d -> %d ALPA=%x", 2597 ((la.attType == AT_LINK_UP) ? 2598 "LinkUp" : "LinkDown"), 2599 (uint32_t)hba->link_event_tag, 2600 (uint32_t)la.eventTag, (uint32_t)la.granted_AL_PA); 2601 2602 if (la.pb) { 2603 hba->flag |= FC_BYPASSED_MODE; 2604 } else { 2605 hba->flag &= ~FC_BYPASSED_MODE; 2606 } 2607 2608 if (hba->link_event_tag == la.eventTag) { 2609 HBASTATS.LinkMultiEvent++; 2610 } else if (hba->link_event_tag + 1 < la.eventTag) { 2611 HBASTATS.LinkMultiEvent++; 2612 2613 if (hba->state > FC_LINK_DOWN) { 2614 /* Declare link down here */ 2615 emlxs_linkdown(hba); 2616 } 2617 } 2618 hba->link_event_tag = la.eventTag; 2619 port->lip_type = 0; 2620 2621 /* If link not already up then declare it up now */ 2622 if ((la.attType == AT_LINK_UP) && 2623 (hba->state < FC_LINK_UP)) { 2624 2625 /* Save the linkspeed */ 2626 hba->linkspeed = la.UlnkSpeed; 2627 2628 /* 2629 * Check for old model adapters that only 2630 * supported 1Gb 2631 */ 2632 if ((hba->linkspeed == 0) && 2633 (hba->model_info.chip & 2634 EMLXS_DRAGONFLY_CHIP)) { 2635 hba->linkspeed = LA_1GHZ_LINK; 2636 } 2637 if ((hba->topology = la.topology) == 2638 TOPOLOGY_LOOP) { 2639 port->did = la.granted_AL_PA; 2640 port->lip_type = la.lipType; 2641 2642 if (hba->flag & FC_SLIM2_MODE) { 2643 i = la.un.lilpBde64.tus.f. 2644 bdeSize; 2645 } else { 2646 i = la.un.lilpBde.bdeSize; 2647 } 2648 2649 if (i == 0) { 2650 port->alpa_map[0] = 0; 2651 } else { 2652 uint8_t *alpa_map; 2653 uint32_t j; 2654 2655 /* 2656 * Check number of devices in 2657 * map 2658 */ 2659 if (port->alpa_map[0] > 127) { 2660 port->alpa_map[0] = 127; 2661 } 2662 alpa_map = (uint8_t *)port->alpa_map; 2663 2664 EMLXS_MSGF(EMLXS_CONTEXT, 2665 &emlxs_link_atten_msg, 2666 "alpa_map: %d device(s): %02x " 2667 "%02x %02x %02x %02x %02x %02x", 2668 alpa_map[0], alpa_map[1], 2669 alpa_map[2], alpa_map[3], 2670 alpa_map[4], alpa_map[5], 2671 alpa_map[6], alpa_map[7]); 2672 2673 for (j = 8; j <= alpa_map[0]; j += 8) { 2674 EMLXS_MSGF(EMLXS_CONTEXT, 2675 &emlxs_link_atten_msg, 2676 "alpa_map: %02x %02x %02x " 2677 "%02x %02x %02x %02x %02x", 2678 alpa_map[j], 2679 alpa_map[j + 1], 2680 alpa_map[j + 2], 2681 alpa_map[j + 3], 2682 alpa_map[j + 4], 2683 alpa_map[j + 5], 2684 alpa_map[j + 6], 2685 alpa_map[j + 7]); 2686 } 2687 } 2688 } 2689 #ifdef MENLO_SUPPORT 2690 /* Check if Menlo maintenance mode is enabled */ 2691 if (hba->model_info.device_id == 2692 PCI_DEVICE_ID_LP21000_M) { 2693 if (la.mm == 1) { 2694 EMLXS_MSGF(EMLXS_CONTEXT, 2695 &emlxs_link_atten_msg, 2696 "Maintenance Mode enabled."); 2697 2698 mutex_enter(&EMLXS_PORT_LOCK); 2699 hba->flag |= FC_MENLO_MODE; 2700 mutex_exit(&EMLXS_PORT_LOCK); 2701 2702 mutex_enter(&EMLXS_LINKUP_LOCK); 2703 cv_broadcast(&EMLXS_LINKUP_CV); 2704 mutex_exit(&EMLXS_LINKUP_LOCK); 2705 } else { 2706 EMLXS_MSGF(EMLXS_CONTEXT, 2707 &emlxs_link_atten_msg, 2708 "Maintenance Mode disabled."); 2709 } 2710 2711 /* Check FCoE attention bit */ 2712 if (la.fa == 1) { 2713 (void) thread_create(NULL, 0, 2714 emlxs_fcoe_attention_thread, 2715 (char *)hba, 0, 2716 &p0, TS_RUN, 2717 v.v_maxsyspri - 2); 2718 } 2719 } 2720 #endif /* MENLO_SUPPORT */ 2721 2722 if ((mbox = (MAILBOXQ *) emlxs_mem_get(hba, 2723 MEM_MBOX | MEM_PRI))) { 2724 /* 2725 * This should turn on DELAYED ABTS 2726 * for ELS timeouts 2727 */ 2728 emlxs_mb_set_var(hba, (MAILBOX *) mbox, 2729 0x00052198, 0x1); 2730 2731 emlxs_mb_put(hba, mbox); 2732 } 2733 if ((mbox = (MAILBOXQ *) emlxs_mem_get(hba, 2734 MEM_MBOX | MEM_PRI))) { 2735 /* 2736 * If link not already down then 2737 * declare it down now 2738 */ 2739 if (emlxs_mb_read_sparam(hba, 2740 (MAILBOX *) mbox) == 0) { 2741 emlxs_mb_put(hba, mbox); 2742 } else { 2743 (void) emlxs_mem_put(hba, MEM_MBOX, 2744 (uint8_t *)mbox); 2745 } 2746 } 2747 if ((mbox = (MAILBOXQ *) emlxs_mem_get(hba, 2748 MEM_MBOX | MEM_PRI))) { 2749 emlxs_mb_config_link(hba, 2750 (MAILBOX *) mbox); 2751 2752 emlxs_mb_put(hba, mbox); 2753 } 2754 /* Declare the linkup here */ 2755 emlxs_linkup(hba); 2756 } 2757 /* If link not already down then declare it down now */ 2758 else if ((la.attType == AT_LINK_DOWN) && 2759 (hba->state > FC_LINK_DOWN)) { 2760 /* Declare link down here */ 2761 emlxs_linkdown(hba); 2762 } 2763 /* Enable Link attention interrupt */ 2764 mutex_enter(&EMLXS_PORT_LOCK); 2765 2766 if (!(hba->hc_copy & HC_LAINT_ENA)) { 2767 /* 2768 * hba->hc_copy = READ_CSR_REG(hba, 2769 * FC_HC_REG(hba, hba->csr_addr)); 2770 */ 2771 hba->hc_copy |= HC_LAINT_ENA; 2772 WRITE_CSR_REG(hba, FC_HC_REG(hba, 2773 hba->csr_addr), hba->hc_copy); 2774 } 2775 mutex_exit(&EMLXS_PORT_LOCK); 2776 2777 /* Log the link event */ 2778 emlxs_log_link_event(port); 2779 2780 break; 2781 2782 case MBX_CLEAR_LA: 2783 /* Enable on Link Attention interrupts */ 2784 mutex_enter(&EMLXS_PORT_LOCK); 2785 2786 if (!(hba->hc_copy & HC_LAINT_ENA)) { 2787 /* 2788 * hba->hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba, 2789 * hba->csr_addr)); 2790 */ 2791 hba->hc_copy |= HC_LAINT_ENA; 2792 WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), 2793 hba->hc_copy); 2794 } 2795 if (hba->state >= FC_LINK_UP) { 2796 emlxs_ffstate_change_locked(hba, FC_READY); 2797 } 2798 mutex_exit(&EMLXS_PORT_LOCK); 2799 2800 /* Adapter is now ready for FCP traffic */ 2801 if (hba->state == FC_READY) { 2802 /* Register vpi's for all ports that have did's */ 2803 for (i = 0; i < MAX_VPORTS; i++) { 2804 vport = &VPORT(i); 2805 2806 if (!(vport->flag & EMLXS_PORT_BOUND) || 2807 !(vport->did)) { 2808 continue; 2809 } 2810 (void) emlxs_mb_reg_vpi(vport); 2811 } 2812 2813 /* Attempt to send any pending IO */ 2814 emlxs_issue_iocb_cmd(hba, &hba->ring[FC_FCP_RING], 0); 2815 } 2816 break; 2817 2818 default: 2819 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg, 2820 "Unknown mailbox cmd: 0x%x", mb->mbxCommand); 2821 HBASTATS.MboxInvalid++; 2822 break; 2823 } 2824 2825 return (0); 2826 2827 } /* emlxs_mb_handle_cmd() */ 2828 2829 2830 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */ 2831 /* MBX_WAIT - returns MBX_TIMEOUT or mailbox_status */ 2832 /* MBX_SLEEP - returns MBX_TIMEOUT or mailbox_status */ 2833 /* MBX_POLL - returns MBX_TIMEOUT or mailbox_status */ 2834 2835 extern uint32_t 2836 emlxs_mb_issue_cmd(emlxs_hba_t *hba, MAILBOX *mb, int32_t flag, uint32_t tmo) 2837 { 2838 emlxs_port_t *port = &PPORT; 2839 MAILBOX *mbox; 2840 MAILBOXQ *mbq; 2841 volatile uint32_t word0; 2842 volatile uint32_t ldata; 2843 uint32_t ha_copy; 2844 off_t offset; 2845 MATCHMAP *mbox_bp; 2846 uint32_t tmo_local; 2847 MAILBOX *swpmb; 2848 2849 mbq = (MAILBOXQ *) mb; 2850 swpmb = (MAILBOX *) & word0; 2851 2852 mb->mbxStatus = MBX_SUCCESS; 2853 2854 /* Check for minimum timeouts */ 2855 switch (mb->mbxCommand) { 2856 /* Mailbox commands that erase/write flash */ 2857 case MBX_DOWN_LOAD: 2858 case MBX_UPDATE_CFG: 2859 case MBX_LOAD_AREA: 2860 case MBX_LOAD_EXP_ROM: 2861 case MBX_WRITE_NV: 2862 case MBX_FLASH_WR_ULA: 2863 case MBX_DEL_LD_ENTRY: 2864 case MBX_LOAD_SM: 2865 if (tmo < 300) { 2866 tmo = 300; 2867 } 2868 break; 2869 2870 default: 2871 if (tmo < 30) { 2872 tmo = 30; 2873 } 2874 break; 2875 } 2876 2877 /* Adjust wait flag */ 2878 if (flag != MBX_NOWAIT) { 2879 /* If interrupt is enabled, use sleep, otherwise poll */ 2880 if (hba->hc_copy & HC_MBINT_ENA) { 2881 flag = MBX_SLEEP; 2882 } else { 2883 flag = MBX_POLL; 2884 } 2885 } 2886 mutex_enter(&EMLXS_PORT_LOCK); 2887 2888 /* Check for hardware error */ 2889 if (hba->flag & FC_HARDWARE_ERROR) { 2890 mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ? 2891 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR; 2892 2893 mutex_exit(&EMLXS_PORT_LOCK); 2894 2895 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 2896 "Hardware error reported. %s failed. status=%x mb=%p", 2897 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb); 2898 2899 return (MBX_HARDWARE_ERROR); 2900 } 2901 if (hba->mbox_queue_flag) { 2902 /* If we are not polling, then queue it for later */ 2903 if (flag == MBX_NOWAIT) { 2904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 2905 "Busy. %s: mb=%p NoWait.", 2906 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 2907 2908 emlxs_mb_put(hba, mbq); 2909 2910 HBASTATS.MboxBusy++; 2911 2912 mutex_exit(&EMLXS_PORT_LOCK); 2913 2914 return (MBX_BUSY); 2915 } 2916 tmo_local = tmo * 20; /* Convert tmo seconds to 50 */ 2917 /* millisecond tics */ 2918 while (hba->mbox_queue_flag) { 2919 mutex_exit(&EMLXS_PORT_LOCK); 2920 2921 if (tmo_local-- == 0) { 2922 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg, 2923 "Timeout. %s: mb=%p tmo=%d Waiting.", 2924 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, 2925 tmo); 2926 2927 /* Non-lethalStatus mailbox timeout */ 2928 /* Does not indicate a hardware error */ 2929 mb->mbxStatus = MBX_TIMEOUT; 2930 return (MBX_TIMEOUT); 2931 } 2932 DELAYMS(50); 2933 mutex_enter(&EMLXS_PORT_LOCK); 2934 } 2935 } 2936 /* Initialize mailbox area */ 2937 emlxs_mb_init(hba, mbq, flag, tmo); 2938 2939 switch (flag) { 2940 case MBX_NOWAIT: 2941 2942 if (mb->mbxCommand != MBX_HEARTBEAT) { 2943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 2944 "Sending. %s: mb=%p NoWait.", 2945 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 2946 } 2947 break; 2948 2949 case MBX_SLEEP: 2950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 2951 "Sending. %s: mb=%p Sleep.", 2952 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 2953 2954 break; 2955 2956 case MBX_POLL: 2957 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 2958 "Sending. %s: mb=%p Polled.", 2959 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 2960 break; 2961 } 2962 2963 mb->mbxOwner = OWN_CHIP; 2964 2965 /* Clear the attention bit */ 2966 WRITE_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr), HA_MBATT); 2967 2968 if (hba->flag & FC_SLIM2_MODE) { 2969 /* First copy command data */ 2970 mbox = FC_SLIM2_MAILBOX(hba); 2971 offset = (off_t)((uint64_t)(unsigned long)mbox - 2972 (uint64_t)(unsigned long)hba->slim2.virt); 2973 2974 #ifdef MBOX_EXT_SUPPORT 2975 if (hba->mbox_ext) { 2976 uint32_t *mbox_ext = (uint32_t *)((uint8_t *)mbox + 2977 MBOX_EXTENSION_OFFSET); 2978 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET; 2979 2980 emlxs_pcimem_bcopy((uint32_t *)hba->mbox_ext, mbox_ext, 2981 hba->mbox_ext_size); 2982 emlxs_mpdata_sync(hba->slim2.dma_handle, offset_ext, 2983 hba->mbox_ext_size, DDI_DMA_SYNC_FORDEV); 2984 } 2985 #endif /* MBOX_EXT_SUPPORT */ 2986 2987 emlxs_pcimem_bcopy((uint32_t *)mb, (uint32_t *)mbox, 2988 MAILBOX_CMD_BSIZE); 2989 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 2990 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV); 2991 } 2992 /* Check for config port command */ 2993 else if (mb->mbxCommand == MBX_CONFIG_PORT) { 2994 /* copy command data into host mbox for cmpl */ 2995 mbox = FC_SLIM2_MAILBOX(hba); 2996 offset = (off_t)((uint64_t)(unsigned long)mbox - 2997 (uint64_t)(unsigned long)hba->slim2.virt); 2998 2999 emlxs_pcimem_bcopy((uint32_t *)mb, (uint32_t *)mbox, 3000 MAILBOX_CMD_BSIZE); 3001 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 3002 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV); 3003 3004 /* First copy command data */ 3005 mbox = FC_SLIM1_MAILBOX(hba); 3006 WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords, 3007 (MAILBOX_CMD_WSIZE - 1)); 3008 3009 /* copy over last word, with mbxOwner set */ 3010 ldata = *((volatile uint32_t *) mb); 3011 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *) mbox), ldata); 3012 3013 /* switch over to host mailbox */ 3014 /* 3015 * hba->mbox_queueaddr = (uint32_t *)&((SLIM2 *) 3016 * hba->slim2.virt)->mbx; 3017 */ 3018 hba->flag |= FC_SLIM2_MODE; 3019 } else { /* SLIM 1 */ 3020 mbox = FC_SLIM1_MAILBOX(hba); 3021 3022 #ifdef MBOX_EXT_SUPPORT 3023 if (hba->mbox_ext) { 3024 uint32_t *mbox_ext = (uint32_t *)((uint8_t *)mbox + 3025 MBOX_EXTENSION_OFFSET); 3026 WRITE_SLIM_COPY(hba, (uint32_t *)hba->mbox_ext, 3027 mbox_ext, (hba->mbox_ext_size / 4)); 3028 } 3029 #endif /* MBOX_EXT_SUPPORT */ 3030 3031 /* First copy command data */ 3032 WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords, 3033 (MAILBOX_CMD_WSIZE - 1)); 3034 3035 /* copy over last word, with mbxOwner set */ 3036 ldata = *((volatile uint32_t *) mb); 3037 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *) mbox), ldata); 3038 } 3039 3040 /* Interrupt board to do it right away */ 3041 WRITE_CSR_REG(hba, FC_CA_REG(hba, hba->csr_addr), CA_MBATT); 3042 3043 mutex_exit(&EMLXS_PORT_LOCK); 3044 3045 switch (flag) { 3046 case MBX_NOWAIT: 3047 return (MBX_SUCCESS); 3048 3049 case MBX_SLEEP: 3050 3051 /* Wait for completion */ 3052 /* The driver clock is timing the mailbox. */ 3053 /* emlxs_mb_fini() will be called externally. */ 3054 3055 mutex_enter(&EMLXS_MBOX_LOCK); 3056 while (!(mbq->flag & MBQ_COMPLETED)) { 3057 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK); 3058 } 3059 mutex_exit(&EMLXS_MBOX_LOCK); 3060 3061 if (mb->mbxStatus == MBX_TIMEOUT) { 3062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg, 3063 "Timeout. %s: mb=%p tmo=%d. Sleep.", 3064 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo); 3065 } else { 3066 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 3067 "Completed. %s: mb=%p status=%x Sleep.", 3068 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, 3069 mb->mbxStatus); 3070 } 3071 3072 break; 3073 3074 case MBX_POLL: 3075 3076 tmo_local = tmo * 2000; /* Convert tmo seconds to 500 usec */ 3077 /* tics */ 3078 3079 if (hba->state >= FC_INIT_START) { 3080 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba, 3081 hba->csr_addr)); 3082 3083 /* Wait for command to complete */ 3084 while (!(ha_copy & HA_MBATT) && 3085 !(mbq->flag & MBQ_COMPLETED)) { 3086 if (!hba->timer_id && (tmo_local-- == 0)) { 3087 /* self time */ 3088 EMLXS_MSGF(EMLXS_CONTEXT, 3089 &emlxs_hardware_error_msg, 3090 "Mailbox Timeout: %s: mb=%p Polled", 3091 emlxs_mb_cmd_xlate(mb->mbxCommand), 3092 mb); 3093 3094 hba->flag |= FC_MBOX_TIMEOUT; 3095 emlxs_ffstate_change(hba, FC_ERROR); 3096 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT); 3097 3098 break; 3099 } 3100 DELAYUS(500); 3101 ha_copy = READ_CSR_REG(hba, 3102 FC_HA_REG(hba, hba->csr_addr)); 3103 } 3104 3105 if (mb->mbxStatus == MBX_TIMEOUT) { 3106 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg, 3107 "Timeout. %s: mb=%p tmo=%d. Polled.", 3108 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, 3109 tmo); 3110 3111 break; 3112 } 3113 } 3114 /* Get first word of mailbox */ 3115 if (hba->flag & FC_SLIM2_MODE) { 3116 mbox = FC_SLIM2_MAILBOX(hba); 3117 offset = (off_t)((uint64_t)(unsigned long)mbox - 3118 (uint64_t)(unsigned long)hba->slim2.virt); 3119 3120 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 3121 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 3122 word0 = *((volatile uint32_t *) mbox); 3123 word0 = PCIMEM_LONG(word0); 3124 } else { 3125 mbox = FC_SLIM1_MAILBOX(hba); 3126 word0 = READ_SLIM_ADDR(hba, 3127 ((volatile uint32_t *) mbox)); 3128 } 3129 3130 /* Wait for command to complete */ 3131 while ((swpmb->mbxOwner == OWN_CHIP) && 3132 !(mbq->flag & MBQ_COMPLETED)) { 3133 if (!hba->timer_id && (tmo_local-- == 0)) { 3134 /* self time */ 3135 EMLXS_MSGF(EMLXS_CONTEXT, 3136 &emlxs_hardware_error_msg, 3137 "Mailbox Timeout: %s: mb=%p Polled.", 3138 emlxs_mb_cmd_xlate(mb->mbxCommand), mb); 3139 3140 hba->flag |= FC_MBOX_TIMEOUT; 3141 emlxs_ffstate_change(hba, FC_ERROR); 3142 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT); 3143 3144 break; 3145 } 3146 DELAYUS(500); 3147 3148 /* Get first word of mailbox */ 3149 if (hba->flag & FC_SLIM2_MODE) { 3150 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 3151 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL); 3152 word0 = *((volatile uint32_t *) mbox); 3153 word0 = PCIMEM_LONG(word0); 3154 } else { 3155 word0 = READ_SLIM_ADDR(hba, 3156 ((volatile uint32_t *) mbox)); 3157 } 3158 3159 } /* while */ 3160 3161 if (mb->mbxStatus == MBX_TIMEOUT) { 3162 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg, 3163 "Timeout. %s: mb=%p tmo=%d. Polled.", 3164 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo); 3165 3166 break; 3167 } 3168 /* copy results back to user */ 3169 if (hba->flag & FC_SLIM2_MODE) { 3170 emlxs_mpdata_sync(hba->slim2.dma_handle, offset, 3171 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL); 3172 emlxs_pcimem_bcopy((uint32_t *)mbox, (uint32_t *)mb, 3173 MAILBOX_CMD_BSIZE); 3174 } else { 3175 READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox, 3176 MAILBOX_CMD_WSIZE); 3177 } 3178 3179 #ifdef MBOX_EXT_SUPPORT 3180 if (hba->mbox_ext) { 3181 uint32_t *mbox_ext = (uint32_t *)((uint8_t *)mbox + 3182 MBOX_EXTENSION_OFFSET); 3183 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET; 3184 3185 if (hba->flag & FC_SLIM2_MODE) { 3186 emlxs_mpdata_sync(hba->slim2.dma_handle, 3187 offset_ext, hba->mbox_ext_size, 3188 DDI_DMA_SYNC_FORKERNEL); 3189 emlxs_pcimem_bcopy(mbox_ext, 3190 (uint32_t *)hba->mbox_ext, 3191 hba->mbox_ext_size); 3192 } else { 3193 READ_SLIM_COPY(hba, (uint32_t *)hba->mbox_ext, 3194 mbox_ext, (hba->mbox_ext_size / 4)); 3195 } 3196 } 3197 #endif /* MBOX_EXT_SUPPORT */ 3198 3199 /* Sync the memory buffer */ 3200 if (hba->mbox_bp) { 3201 mbox_bp = (MATCHMAP *) hba->mbox_bp; 3202 emlxs_mpdata_sync(mbox_bp->dma_handle, 0, mbox_bp->size, 3203 DDI_DMA_SYNC_FORKERNEL); 3204 } 3205 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg, 3206 "Completed. %s: mb=%p status=%x Polled.", 3207 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, mb->mbxStatus); 3208 3209 /* Process the result */ 3210 if (!(mbq->flag & MBQ_PASSTHRU)) { 3211 (void) emlxs_mb_handle_cmd(hba, mb); 3212 } 3213 /* Clear the attention bit */ 3214 WRITE_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr), HA_MBATT); 3215 3216 /* Clean up the mailbox area */ 3217 emlxs_mb_fini(hba, NULL, mb->mbxStatus); 3218 3219 break; 3220 3221 } /* switch (flag) */ 3222 3223 return (mb->mbxStatus); 3224 3225 } /* emlxs_mb_issue_cmd() */ 3226 3227 3228 3229 extern char * 3230 emlxs_mb_cmd_xlate(uint8_t cmd) 3231 { 3232 static char buffer[32]; 3233 uint32_t i; 3234 uint32_t count; 3235 3236 count = sizeof (emlxs_mb_cmd_table) / sizeof (emlxs_table_t); 3237 for (i = 0; i < count; i++) { 3238 if (cmd == emlxs_mb_cmd_table[i].code) { 3239 return (emlxs_mb_cmd_table[i].string); 3240 } 3241 } 3242 3243 (void) sprintf(buffer, "Cmd=0x%x", cmd); 3244 return (buffer); 3245 3246 } /* emlxs_mb_cmd_xlate() */ 3247