1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* Copyright 2010 QLogic Corporation */ 23 24 /* 25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 26 */ 27 28 /* 29 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file. 30 * 31 * *********************************************************************** 32 * * ** 33 * * NOTICE ** 34 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION ** 35 * * ALL RIGHTS RESERVED ** 36 * * ** 37 * *********************************************************************** 38 * 39 */ 40 41 #include <ql_apps.h> 42 #include <ql_api.h> 43 #include <ql_debug.h> 44 #include <ql_init.h> 45 #include <ql_iocb.h> 46 #include <ql_isr.h> 47 #include <ql_mbx.h> 48 #include <ql_nx.h> 49 #include <ql_xioctl.h> 50 51 /* 52 * Local data 53 */ 54 55 /* 56 * Local prototypes 57 */ 58 static uint16_t ql_nvram_request(ql_adapter_state_t *, uint32_t); 59 static int ql_nvram_24xx_config(ql_adapter_state_t *); 60 static void ql_23_properties(ql_adapter_state_t *, nvram_t *); 61 static void ql_24xx_properties(ql_adapter_state_t *, nvram_24xx_t *); 62 static int ql_check_isp_firmware(ql_adapter_state_t *); 63 static int ql_chip_diag(ql_adapter_state_t *); 64 static int ql_load_flash_fw(ql_adapter_state_t *); 65 static int ql_configure_loop(ql_adapter_state_t *); 66 static int ql_configure_hba(ql_adapter_state_t *); 67 static int ql_configure_fabric(ql_adapter_state_t *); 68 static int ql_configure_device_d_id(ql_adapter_state_t *); 69 static void ql_set_max_read_req(ql_adapter_state_t *); 70 static void ql_configure_n_port_info(ql_adapter_state_t *); 71 static void ql_clear_mcp(ql_adapter_state_t *); 72 static void ql_mps_reset(ql_adapter_state_t *); 73 74 /* 75 * ql_initialize_adapter 76 * Initialize board. 77 * 78 * Input: 79 * ha = adapter state pointer. 80 * 81 * Returns: 82 * ql local function return status code. 83 * 84 * Context: 85 * Kernel context. 86 */ 87 int 88 ql_initialize_adapter(ql_adapter_state_t *ha) 89 { 90 int rval; 91 class_svc_param_t *class3_param; 92 caddr_t msg; 93 la_els_logi_t *els = &ha->loginparams; 94 int retries = 5; 95 96 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 97 98 do { 99 /* Clear adapter flags. */ 100 TASK_DAEMON_LOCK(ha); 101 ha->task_daemon_flags &= TASK_DAEMON_STOP_FLG | 102 TASK_DAEMON_SLEEPING_FLG | TASK_DAEMON_ALIVE_FLG | 103 TASK_DAEMON_IDLE_CHK_FLG; 104 ha->task_daemon_flags |= LOOP_DOWN; 105 TASK_DAEMON_UNLOCK(ha); 106 107 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF; 108 ADAPTER_STATE_LOCK(ha); 109 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO; 110 ha->flags &= ~ONLINE; 111 ADAPTER_STATE_UNLOCK(ha); 112 113 ha->state = FC_STATE_OFFLINE; 114 msg = "Loop OFFLINE"; 115 116 rval = ql_pci_sbus_config(ha); 117 if (rval != QL_SUCCESS) { 118 TASK_DAEMON_LOCK(ha); 119 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) { 120 EL(ha, "ql_pci_sbus_cfg, isp_abort_needed\n"); 121 ha->task_daemon_flags |= ISP_ABORT_NEEDED; 122 } 123 TASK_DAEMON_UNLOCK(ha); 124 continue; 125 } 126 127 (void) ql_setup_fcache(ha); 128 129 /* Reset ISP chip. */ 130 ql_reset_chip(ha); 131 132 /* Get NVRAM configuration if needed. */ 133 if (ha->init_ctrl_blk.cb.version == 0) { 134 (void) ql_nvram_config(ha); 135 } 136 137 /* Set login parameters. */ 138 if (CFG_IST(ha, CFG_CTRL_24258081)) { 139 els->common_service.rx_bufsize = CHAR_TO_SHORT( 140 ha->init_ctrl_blk.cb24.max_frame_length[0], 141 ha->init_ctrl_blk.cb24.max_frame_length[1]); 142 bcopy((void *)&ha->init_ctrl_blk.cb24.port_name[0], 143 (void *)&els->nport_ww_name.raw_wwn[0], 8); 144 bcopy((void *)&ha->init_ctrl_blk.cb24.node_name[0], 145 (void *)&els->node_ww_name.raw_wwn[0], 8); 146 } else { 147 els->common_service.rx_bufsize = CHAR_TO_SHORT( 148 ha->init_ctrl_blk.cb.max_frame_length[0], 149 ha->init_ctrl_blk.cb.max_frame_length[1]); 150 bcopy((void *)&ha->init_ctrl_blk.cb.port_name[0], 151 (void *)&els->nport_ww_name.raw_wwn[0], 8); 152 bcopy((void *)&ha->init_ctrl_blk.cb.node_name[0], 153 (void *)&els->node_ww_name.raw_wwn[0], 8); 154 } 155 bcopy(QL_VERSION, ha->adapter_stats->revlvl.qlddv, 156 strlen(QL_VERSION)); 157 158 /* Determine which RISC code to use. */ 159 if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) { 160 if ((rval = ql_chip_diag(ha)) == QL_SUCCESS) { 161 rval = ql_load_isp_firmware(ha); 162 } 163 } 164 165 if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) == 166 QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS) { 167 168 (void) ql_fw_ready(ha, ha->fwwait); 169 170 if (!(ha->task_daemon_flags & QL_SUSPENDED) && 171 ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) { 172 if (ha->topology & QL_LOOP_CONNECTION) { 173 ha->state = ha->state | FC_STATE_LOOP; 174 msg = "Loop ONLINE"; 175 ha->task_daemon_flags |= STATE_ONLINE; 176 } else if (ha->topology & QL_P2P_CONNECTION) { 177 ha->state = ha->state | 178 FC_STATE_ONLINE; 179 msg = "Link ONLINE"; 180 ha->task_daemon_flags |= STATE_ONLINE; 181 } else { 182 msg = "Unknown Link state"; 183 } 184 } 185 } else { 186 TASK_DAEMON_LOCK(ha); 187 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) { 188 EL(ha, "failed, isp_abort_needed\n"); 189 ha->task_daemon_flags |= ISP_ABORT_NEEDED | 190 LOOP_DOWN; 191 } 192 TASK_DAEMON_UNLOCK(ha); 193 } 194 195 } while (retries-- != 0 && ha->task_daemon_flags & ISP_ABORT_NEEDED); 196 197 cmn_err(CE_NOTE, "!Qlogic %s(%d): %s", QL_NAME, ha->instance, msg); 198 199 /* Enable ISP interrupts and login parameters. */ 200 if (CFG_IST(ha, CFG_CTRL_8021)) { 201 ql_8021_enable_intrs(ha); 202 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 203 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC); 204 } else { 205 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC); 206 } 207 208 ADAPTER_STATE_LOCK(ha); 209 ha->flags |= (INTERRUPTS_ENABLED | ONLINE); 210 ADAPTER_STATE_UNLOCK(ha); 211 212 ha->task_daemon_flags &= ~(FC_STATE_CHANGE | RESET_MARKER_NEEDED | 213 COMMAND_WAIT_NEEDED); 214 215 /* 216 * Setup login parameters. 217 */ 218 els->common_service.fcph_version = 0x2006; 219 els->common_service.btob_credit = 3; 220 els->common_service.cmn_features = 0x8800; 221 els->common_service.conc_sequences = 0xff; 222 els->common_service.relative_offset = 3; 223 els->common_service.e_d_tov = 0x07d0; 224 225 class3_param = (class_svc_param_t *)&els->class_3; 226 class3_param->class_valid_svc_opt = 0x8800; 227 class3_param->rcv_data_size = els->common_service.rx_bufsize; 228 class3_param->conc_sequences = 0xff; 229 230 if (rval != QL_SUCCESS) { 231 EL(ha, "failed, rval = %xh\n", rval); 232 } else { 233 /*EMPTY*/ 234 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 235 } 236 return (rval); 237 } 238 239 /* 240 * ql_pci_sbus_config 241 * Setup device PCI/SBUS configuration registers. 242 * 243 * Input: 244 * ha = adapter state pointer. 245 * 246 * Returns: 247 * ql local function return status code. 248 * 249 * Context: 250 * Kernel context. 251 */ 252 int 253 ql_pci_sbus_config(ql_adapter_state_t *ha) 254 { 255 uint32_t timer; 256 uint16_t cmd, w16; 257 258 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 259 260 if (CFG_IST(ha, CFG_SBUS_CARD)) { 261 w16 = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle, 262 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_REVISION)); 263 EL(ha, "FPGA rev is %d.%d", (w16 & 0xf0) >> 4, 264 w16 & 0xf); 265 } else { 266 /* 267 * we want to respect framework's setting of PCI 268 * configuration space command register and also 269 * want to make sure that all bits of interest to us 270 * are properly set in command register. 271 */ 272 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM); 273 cmd = (uint16_t)(cmd | PCI_COMM_IO | PCI_COMM_MAE | 274 PCI_COMM_ME | PCI_COMM_MEMWR_INVAL | 275 PCI_COMM_PARITY_DETECT | PCI_COMM_SERR_ENABLE); 276 277 /* 278 * If this is a 2300 card and not 2312, reset the 279 * MEMWR_INVAL due to a bug in the 2300. Unfortunately, the 280 * 2310 also reports itself as a 2300 so we need to get the 281 * fb revision level -- a 6 indicates it really is a 2300 and 282 * not a 2310. 283 */ 284 285 if (ha->device_id == 0x2300) { 286 /* Pause RISC. */ 287 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC); 288 for (timer = 0; timer < 30000; timer++) { 289 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 290 0) { 291 break; 292 } else { 293 drv_usecwait(MILLISEC); 294 } 295 } 296 297 /* Select FPM registers. */ 298 WRT16_IO_REG(ha, ctrl_status, 0x20); 299 300 /* Get the fb rev level */ 301 if (RD16_IO_REG(ha, fb_cmd) == 6) { 302 cmd = (uint16_t)(cmd & ~PCI_COMM_MEMWR_INVAL); 303 } 304 305 /* Deselect FPM registers. */ 306 WRT16_IO_REG(ha, ctrl_status, 0x0); 307 308 /* Release RISC module. */ 309 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC); 310 for (timer = 0; timer < 30000; timer++) { 311 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 312 0) { 313 break; 314 } else { 315 drv_usecwait(MILLISEC); 316 } 317 } 318 } else if (ha->device_id == 0x2312) { 319 /* 320 * cPCI ISP2312 specific code to service function 1 321 * hot-swap registers. 322 */ 323 if ((RD16_IO_REG(ha, ctrl_status) & ISP_FUNC_NUM_MASK) 324 != 0) { 325 ql_pci_config_put8(ha, 0x66, 0xc2); 326 } 327 } 328 329 if (!(CFG_IST(ha, CFG_CTRL_8021)) && 330 ha->pci_max_read_req != 0) { 331 ql_set_max_read_req(ha); 332 } 333 334 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd); 335 336 /* Set cache line register. */ 337 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ, 0x10); 338 339 /* Set latency register. */ 340 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER, 0x40); 341 342 /* Reset expansion ROM address decode enable. */ 343 w16 = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_ROM); 344 w16 = (uint16_t)(w16 & ~BIT_0); 345 ql_pci_config_put16(ha, PCI_CONF_ROM, w16); 346 } 347 348 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 349 350 return (QL_SUCCESS); 351 } 352 353 /* 354 * Set the PCI max read request value. 355 * 356 * Input: 357 * ha: adapter state pointer. 358 * 359 * Output: 360 * none. 361 * 362 * Returns: 363 * 364 * Context: 365 * Kernel context. 366 */ 367 368 static void 369 ql_set_max_read_req(ql_adapter_state_t *ha) 370 { 371 uint16_t read_req, w16; 372 uint16_t tmp = ha->pci_max_read_req; 373 374 if ((ha->device_id == 0x2422) || 375 ((ha->device_id & 0xff00) == 0x2300)) { 376 /* check for vaild override value */ 377 if (tmp == 512 || tmp == 1024 || tmp == 2048 || 378 tmp == 4096) { 379 /* shift away the don't cares */ 380 tmp = (uint16_t)(tmp >> 10); 381 /* convert bit pos to request value */ 382 for (read_req = 0; tmp != 0; read_req++) { 383 tmp = (uint16_t)(tmp >> 1); 384 } 385 w16 = (uint16_t)ql_pci_config_get16(ha, 0x4e); 386 w16 = (uint16_t)(w16 & ~(BIT_3 & BIT_2)); 387 w16 = (uint16_t)(w16 | (read_req << 2)); 388 ql_pci_config_put16(ha, 0x4e, w16); 389 } else { 390 EL(ha, "invalid parameter value for " 391 "'pci-max-read-request': %d; using system " 392 "default\n", tmp); 393 } 394 } else if ((ha->device_id == 0x2432) || ((ha->device_id & 0xff00) == 395 0x2500) || (ha->device_id == 0x8432)) { 396 /* check for vaild override value */ 397 if (tmp == 128 || tmp == 256 || tmp == 512 || 398 tmp == 1024 || tmp == 2048 || tmp == 4096) { 399 /* shift away the don't cares */ 400 tmp = (uint16_t)(tmp >> 8); 401 /* convert bit pos to request value */ 402 for (read_req = 0; tmp != 0; read_req++) { 403 tmp = (uint16_t)(tmp >> 1); 404 } 405 w16 = (uint16_t)ql_pci_config_get16(ha, 0x54); 406 w16 = (uint16_t)(w16 & ~(BIT_14 | BIT_13 | 407 BIT_12)); 408 w16 = (uint16_t)(w16 | (read_req << 12)); 409 ql_pci_config_put16(ha, 0x54, w16); 410 } else { 411 EL(ha, "invalid parameter value for " 412 "'pci-max-read-request': %d; using system " 413 "default\n", tmp); 414 } 415 } 416 } 417 418 /* 419 * NVRAM configuration. 420 * 421 * Input: 422 * ha: adapter state pointer. 423 * ha->hba_buf = request and response rings 424 * 425 * Output: 426 * ha->init_ctrl_blk = initialization control block 427 * host adapters parameters in host adapter block 428 * 429 * Returns: 430 * ql local function return status code. 431 * 432 * Context: 433 * Kernel context. 434 */ 435 int 436 ql_nvram_config(ql_adapter_state_t *ha) 437 { 438 uint32_t cnt; 439 caddr_t dptr1, dptr2; 440 ql_init_cb_t *icb = &ha->init_ctrl_blk.cb; 441 ql_ip_init_cb_t *ip_icb = &ha->ip_init_ctrl_blk.cb; 442 nvram_t *nv = (nvram_t *)ha->request_ring_bp; 443 uint16_t *wptr = (uint16_t *)ha->request_ring_bp; 444 uint8_t chksum = 0; 445 int rval; 446 int idpromlen; 447 char idprombuf[32]; 448 uint32_t start_addr; 449 450 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 451 452 if (CFG_IST(ha, CFG_CTRL_24258081)) { 453 return (ql_nvram_24xx_config(ha)); 454 } 455 456 start_addr = 0; 457 if ((rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA)) == 458 QL_SUCCESS) { 459 /* Verify valid NVRAM checksum. */ 460 for (cnt = 0; cnt < sizeof (nvram_t)/2; cnt++) { 461 *wptr = (uint16_t)ql_get_nvram_word(ha, 462 (uint32_t)(cnt + start_addr)); 463 chksum = (uint8_t)(chksum + (uint8_t)*wptr); 464 chksum = (uint8_t)(chksum + (uint8_t)(*wptr >> 8)); 465 wptr++; 466 } 467 ql_release_nvram(ha); 468 } 469 470 /* Bad NVRAM data, set defaults parameters. */ 471 if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' || 472 nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' || 473 nv->nvram_version < 1) { 474 475 EL(ha, "failed, rval=%xh, checksum=%xh, " 476 "id=%02x%02x%02x%02xh, flsz=%xh, pciconfvid=%xh, " 477 "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1], 478 nv->id[2], nv->id[3], ha->xioctl->fdesc.flash_size, 479 ha->subven_id, nv->nvram_version); 480 481 /* Don't print nvram message if it's an on-board 2200 */ 482 if (!((CFG_IST(ha, CFG_CTRL_2200)) && 483 (ha->xioctl->fdesc.flash_size == 0))) { 484 cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed," 485 " using driver defaults.", QL_NAME, ha->instance); 486 } 487 488 /* Reset NVRAM data. */ 489 bzero((void *)nv, sizeof (nvram_t)); 490 491 /* 492 * Set default initialization control block. 493 */ 494 nv->parameter_block_version = ICB_VERSION; 495 nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1; 496 nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2; 497 498 nv->max_frame_length[1] = 4; 499 500 /* 501 * Allow 2048 byte frames for 2300 502 */ 503 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) { 504 nv->max_frame_length[1] = 8; 505 } 506 nv->max_iocb_allocation[1] = 1; 507 nv->execution_throttle[0] = 16; 508 nv->login_retry_count = 8; 509 510 idpromlen = 32; 511 512 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 513 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip, 514 DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf, 515 &idpromlen) != DDI_PROP_SUCCESS) { 516 517 QL_PRINT_3(CE_CONT, "(%d): Unable to read idprom " 518 "property\n", ha->instance); 519 cmn_err(CE_WARN, "%s(%d) : Unable to read idprom " 520 "property", QL_NAME, ha->instance); 521 522 nv->port_name[2] = 33; 523 nv->port_name[3] = 224; 524 nv->port_name[4] = 139; 525 nv->port_name[7] = (uint8_t) 526 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance); 527 } else { 528 529 nv->port_name[2] = idprombuf[2]; 530 nv->port_name[3] = idprombuf[3]; 531 nv->port_name[4] = idprombuf[4]; 532 nv->port_name[5] = idprombuf[5]; 533 nv->port_name[6] = idprombuf[6]; 534 nv->port_name[7] = idprombuf[7]; 535 nv->port_name[0] = (uint8_t) 536 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance); 537 } 538 539 /* Don't print nvram message if it's an on-board 2200 */ 540 if (!(CFG_IST(ha, CFG_CTRL_2200)) && 541 (ha->xioctl->fdesc.flash_size == 0)) { 542 cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using" 543 " default HBA parameters and temporary WWPN:" 544 " %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME, 545 ha->instance, nv->port_name[0], nv->port_name[1], 546 nv->port_name[2], nv->port_name[3], 547 nv->port_name[4], nv->port_name[5], 548 nv->port_name[6], nv->port_name[7]); 549 } 550 551 nv->login_timeout = 4; 552 553 /* Set default connection options for the 23xx to 2 */ 554 if (!(CFG_IST(ha, CFG_CTRL_2200))) { 555 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] | 556 BIT_5); 557 } 558 559 /* 560 * Set default host adapter parameters 561 */ 562 nv->host_p[0] = BIT_1; 563 nv->host_p[1] = BIT_2; 564 nv->reset_delay = 5; 565 nv->port_down_retry_count = 8; 566 nv->maximum_luns_per_target[0] = 8; 567 568 rval = QL_FUNCTION_FAILED; 569 } 570 571 /* Check for adapter node name (big endian). */ 572 for (cnt = 0; cnt < 8; cnt++) { 573 if (nv->node_name[cnt] != 0) { 574 break; 575 } 576 } 577 578 /* Copy port name if no node name (big endian). */ 579 if (cnt == 8) { 580 bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8); 581 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0); 582 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0); 583 } 584 585 /* Reset initialization control blocks. */ 586 bzero((void *)icb, sizeof (ql_init_cb_t)); 587 588 /* Get driver properties. */ 589 ql_23_properties(ha, nv); 590 591 cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x" 592 "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n", 593 QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1], 594 nv->port_name[2], nv->port_name[3], nv->port_name[4], 595 nv->port_name[5], nv->port_name[6], nv->port_name[7], 596 nv->node_name[0], nv->node_name[1], nv->node_name[2], 597 nv->node_name[3], nv->node_name[4], nv->node_name[5], 598 nv->node_name[6], nv->node_name[7]); 599 600 /* 601 * Copy over NVRAM RISC parameter block 602 * to initialization control block. 603 */ 604 dptr1 = (caddr_t)icb; 605 dptr2 = (caddr_t)&nv->parameter_block_version; 606 cnt = (uint32_t)((uintptr_t)&icb->request_q_outpointer[0] - 607 (uintptr_t)&icb->version); 608 while (cnt-- != 0) { 609 *dptr1++ = *dptr2++; 610 } 611 612 /* Copy 2nd half. */ 613 dptr1 = (caddr_t)&icb->add_fw_opt[0]; 614 cnt = (uint32_t)((uintptr_t)&icb->reserved_3[0] - 615 (uintptr_t)&icb->add_fw_opt[0]); 616 617 while (cnt-- != 0) { 618 *dptr1++ = *dptr2++; 619 } 620 621 /* 622 * Setup driver firmware options. 623 */ 624 icb->firmware_options[0] = (uint8_t) 625 (icb->firmware_options[0] | BIT_6 | BIT_1); 626 627 /* 628 * There is no use enabling fast post for SBUS or 2300 629 * Always enable 64bit addressing, except SBUS cards. 630 */ 631 ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING; 632 if (CFG_IST(ha, (CFG_SBUS_CARD | CFG_CTRL_2300 | CFG_CTRL_6322))) { 633 icb->firmware_options[0] = (uint8_t) 634 (icb->firmware_options[0] & ~BIT_3); 635 if (CFG_IST(ha, CFG_SBUS_CARD)) { 636 icb->special_options[0] = (uint8_t) 637 (icb->special_options[0] | BIT_5); 638 ha->cfg_flags &= ~CFG_ENABLE_64BIT_ADDRESSING; 639 } 640 } else { 641 icb->firmware_options[0] = (uint8_t) 642 (icb->firmware_options[0] | BIT_3); 643 } 644 /* RIO and ZIO not supported. */ 645 icb->add_fw_opt[0] = (uint8_t)(icb->add_fw_opt[0] & 646 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 647 648 icb->firmware_options[1] = (uint8_t)(icb->firmware_options[1] | 649 BIT_7 | BIT_6 | BIT_5 | BIT_2 | BIT_0); 650 icb->firmware_options[0] = (uint8_t) 651 (icb->firmware_options[0] & ~(BIT_5 | BIT_4)); 652 icb->firmware_options[1] = (uint8_t) 653 (icb->firmware_options[1] & ~BIT_4); 654 655 icb->add_fw_opt[1] = (uint8_t)(icb->add_fw_opt[1] & ~(BIT_5 | BIT_4)); 656 icb->special_options[0] = (uint8_t)(icb->special_options[0] | BIT_1); 657 658 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) { 659 if ((icb->special_options[1] & 0x20) == 0) { 660 EL(ha, "50 ohm is not set\n"); 661 } 662 } 663 icb->execution_throttle[0] = 0xff; 664 icb->execution_throttle[1] = 0xff; 665 666 if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) { 667 icb->firmware_options[1] = (uint8_t) 668 (icb->firmware_options[1] | BIT_7 | BIT_6); 669 icb->add_fw_opt[1] = (uint8_t) 670 (icb->add_fw_opt[1] | BIT_5 | BIT_4); 671 } 672 673 /* 674 * Set host adapter parameters 675 */ 676 ADAPTER_STATE_LOCK(ha); 677 ha->nvram_version = nv->nvram_version; 678 ha->adapter_features = CHAR_TO_SHORT(nv->adapter_features[0], 679 nv->adapter_features[1]); 680 681 nv->host_p[0] & BIT_4 ? (ha->cfg_flags |= CFG_DISABLE_RISC_CODE_LOAD) : 682 (ha->cfg_flags &= ~CFG_DISABLE_RISC_CODE_LOAD); 683 nv->host_p[0] & BIT_5 ? (ha->cfg_flags |= CFG_SET_CACHE_LINE_SIZE_1) : 684 (ha->cfg_flags &= ~CFG_SET_CACHE_LINE_SIZE_1); 685 686 nv->host_p[1] & BIT_1 ? (ha->cfg_flags |= CFG_ENABLE_LIP_RESET) : 687 (ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET); 688 nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) : 689 (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN); 690 nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) : 691 (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET); 692 693 nv->adapter_features[0] & BIT_3 ? 694 (ha->cfg_flags |= CFG_MULTI_CHIP_ADAPTER) : 695 (ha->cfg_flags &= ~CFG_MULTI_CHIP_ADAPTER); 696 697 ADAPTER_STATE_UNLOCK(ha); 698 699 ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0], 700 nv->execution_throttle[1]); 701 ha->loop_reset_delay = nv->reset_delay; 702 ha->port_down_retry_count = nv->port_down_retry_count; 703 ha->r_a_tov = (uint16_t)(icb->login_timeout < R_A_TOV_DEFAULT ? 704 R_A_TOV_DEFAULT : icb->login_timeout); 705 ha->maximum_luns_per_target = CHAR_TO_SHORT( 706 nv->maximum_luns_per_target[0], nv->maximum_luns_per_target[1]); 707 if (ha->maximum_luns_per_target == 0) { 708 ha->maximum_luns_per_target++; 709 } 710 711 /* 712 * Setup ring parameters in initialization control block 713 */ 714 cnt = REQUEST_ENTRY_CNT; 715 icb->request_q_length[0] = LSB(cnt); 716 icb->request_q_length[1] = MSB(cnt); 717 cnt = RESPONSE_ENTRY_CNT; 718 icb->response_q_length[0] = LSB(cnt); 719 icb->response_q_length[1] = MSB(cnt); 720 721 icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma))); 722 icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma))); 723 icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma))); 724 icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma))); 725 icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma))); 726 icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma))); 727 icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma))); 728 icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma))); 729 730 icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma))); 731 icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma))); 732 icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma))); 733 icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma))); 734 icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma))); 735 icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma))); 736 icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma))); 737 icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma))); 738 739 /* 740 * Setup IP initialization control block 741 */ 742 ip_icb->version = IP_ICB_VERSION; 743 744 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) { 745 ip_icb->ip_firmware_options[0] = (uint8_t) 746 (ip_icb->ip_firmware_options[0] | BIT_2 | BIT_0); 747 } else { 748 ip_icb->ip_firmware_options[0] = (uint8_t) 749 (ip_icb->ip_firmware_options[0] | BIT_2); 750 } 751 752 cnt = RCVBUF_CONTAINER_CNT; 753 ip_icb->queue_size[0] = LSB(cnt); 754 ip_icb->queue_size[1] = MSB(cnt); 755 756 ip_icb->queue_address[0] = LSB(LSW(LSD(ha->rcvbuf_dvma))); 757 ip_icb->queue_address[1] = MSB(LSW(LSD(ha->rcvbuf_dvma))); 758 ip_icb->queue_address[2] = LSB(MSW(LSD(ha->rcvbuf_dvma))); 759 ip_icb->queue_address[3] = MSB(MSW(LSD(ha->rcvbuf_dvma))); 760 ip_icb->queue_address[4] = LSB(LSW(MSD(ha->rcvbuf_dvma))); 761 ip_icb->queue_address[5] = MSB(LSW(MSD(ha->rcvbuf_dvma))); 762 ip_icb->queue_address[6] = LSB(MSW(MSD(ha->rcvbuf_dvma))); 763 ip_icb->queue_address[7] = MSB(MSW(MSD(ha->rcvbuf_dvma))); 764 765 if (rval != QL_SUCCESS) { 766 EL(ha, "failed, rval = %xh\n", rval); 767 } else { 768 /*EMPTY*/ 769 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 770 } 771 return (rval); 772 } 773 774 /* 775 * Get NVRAM data word 776 * Calculates word position in NVRAM and calls request routine to 777 * get the word from NVRAM. 778 * 779 * Input: 780 * ha = adapter state pointer. 781 * address = NVRAM word address. 782 * 783 * Returns: 784 * data word. 785 * 786 * Context: 787 * Kernel context. 788 */ 789 uint16_t 790 ql_get_nvram_word(ql_adapter_state_t *ha, uint32_t address) 791 { 792 uint32_t nv_cmd; 793 uint16_t rval; 794 795 QL_PRINT_4(CE_CONT, "(%d): started\n", ha->instance); 796 797 nv_cmd = address << 16; 798 nv_cmd = nv_cmd | NV_READ_OP; 799 800 rval = (uint16_t)ql_nvram_request(ha, nv_cmd); 801 802 QL_PRINT_4(CE_CONT, "(%d): NVRAM data = %xh\n", ha->instance, rval); 803 804 return (rval); 805 } 806 807 /* 808 * NVRAM request 809 * Sends read command to NVRAM and gets data from NVRAM. 810 * 811 * Input: 812 * ha = adapter state pointer. 813 * nv_cmd = Bit 26= start bit 814 * Bit 25, 24 = opcode 815 * Bit 23-16 = address 816 * Bit 15-0 = write data 817 * 818 * Returns: 819 * data word. 820 * 821 * Context: 822 * Kernel context. 823 */ 824 static uint16_t 825 ql_nvram_request(ql_adapter_state_t *ha, uint32_t nv_cmd) 826 { 827 uint8_t cnt; 828 uint16_t reg_data; 829 uint16_t data = 0; 830 831 /* Send command to NVRAM. */ 832 833 nv_cmd <<= 5; 834 for (cnt = 0; cnt < 11; cnt++) { 835 if (nv_cmd & BIT_31) { 836 ql_nv_write(ha, NV_DATA_OUT); 837 } else { 838 ql_nv_write(ha, 0); 839 } 840 nv_cmd <<= 1; 841 } 842 843 /* Read data from NVRAM. */ 844 845 for (cnt = 0; cnt < 16; cnt++) { 846 WRT16_IO_REG(ha, nvram, NV_SELECT+NV_CLOCK); 847 ql_nv_delay(); 848 data <<= 1; 849 reg_data = RD16_IO_REG(ha, nvram); 850 if (reg_data & NV_DATA_IN) { 851 data = (uint16_t)(data | BIT_0); 852 } 853 WRT16_IO_REG(ha, nvram, NV_SELECT); 854 ql_nv_delay(); 855 } 856 857 /* Deselect chip. */ 858 859 WRT16_IO_REG(ha, nvram, NV_DESELECT); 860 ql_nv_delay(); 861 862 return (data); 863 } 864 865 void 866 ql_nv_write(ql_adapter_state_t *ha, uint16_t data) 867 { 868 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT)); 869 ql_nv_delay(); 870 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT | NV_CLOCK)); 871 ql_nv_delay(); 872 WRT16_IO_REG(ha, nvram, (uint16_t)(data | NV_SELECT)); 873 ql_nv_delay(); 874 } 875 876 void 877 ql_nv_delay(void) 878 { 879 drv_usecwait(NV_DELAY_COUNT); 880 } 881 882 /* 883 * ql_nvram_24xx_config 884 * ISP2400 nvram. 885 * 886 * Input: 887 * ha: adapter state pointer. 888 * ha->hba_buf = request and response rings 889 * 890 * Output: 891 * ha->init_ctrl_blk = initialization control block 892 * host adapters parameters in host adapter block 893 * 894 * Returns: 895 * ql local function return status code. 896 * 897 * Context: 898 * Kernel context. 899 */ 900 int 901 ql_nvram_24xx_config(ql_adapter_state_t *ha) 902 { 903 uint32_t index, addr, chksum, saved_chksum; 904 uint32_t *longptr; 905 nvram_24xx_t nvram; 906 int idpromlen; 907 char idprombuf[32]; 908 caddr_t src, dst; 909 uint16_t w1; 910 int rval; 911 nvram_24xx_t *nv = (nvram_24xx_t *)&nvram; 912 ql_init_24xx_cb_t *icb = 913 (ql_init_24xx_cb_t *)&ha->init_ctrl_blk.cb24; 914 ql_ip_init_24xx_cb_t *ip_icb = &ha->ip_init_ctrl_blk.cb24; 915 916 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 917 918 if ((rval = ql_lock_nvram(ha, &addr, LNF_NVRAM_DATA)) == QL_SUCCESS) { 919 920 /* Get NVRAM data and calculate checksum. */ 921 longptr = (uint32_t *)nv; 922 chksum = saved_chksum = 0; 923 for (index = 0; index < sizeof (nvram_24xx_t) / 4; index++) { 924 rval = ql_24xx_read_flash(ha, addr++, longptr); 925 if (rval != QL_SUCCESS) { 926 EL(ha, "24xx_read_flash failed=%xh\n", rval); 927 break; 928 } 929 saved_chksum = chksum; 930 chksum += *longptr; 931 LITTLE_ENDIAN_32(longptr); 932 longptr++; 933 } 934 935 ql_release_nvram(ha); 936 } 937 938 /* Bad NVRAM data, set defaults parameters. */ 939 if (rval != QL_SUCCESS || chksum || nv->id[0] != 'I' || 940 nv->id[1] != 'S' || nv->id[2] != 'P' || nv->id[3] != ' ' || 941 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) { 942 943 cmn_err(CE_WARN, "%s(%d): NVRAM configuration failed, using " 944 "driver defaults.", QL_NAME, ha->instance); 945 946 EL(ha, "failed, rval=%xh, checksum=%xh, id=%c%c%c%c, " 947 "nvram_version=%x\n", rval, chksum, nv->id[0], nv->id[1], 948 nv->id[2], nv->id[3], CHAR_TO_SHORT(nv->nvram_version[0], 949 nv->nvram_version[1])); 950 951 saved_chksum = ~saved_chksum + 1; 952 953 (void) ql_flash_errlog(ha, FLASH_ERRLOG_NVRAM_CHKSUM_ERR, 0, 954 MSW(saved_chksum), LSW(saved_chksum)); 955 956 /* Reset NVRAM data. */ 957 bzero((void *)nv, sizeof (nvram_24xx_t)); 958 959 /* 960 * Set default initialization control block. 961 */ 962 nv->nvram_version[0] = LSB(ICB_24XX_VERSION); 963 nv->nvram_version[1] = MSB(ICB_24XX_VERSION); 964 965 nv->version[0] = 1; 966 nv->max_frame_length[1] = 8; 967 nv->execution_throttle[0] = 16; 968 nv->exchange_count[0] = 128; 969 nv->max_luns_per_target[0] = 8; 970 971 idpromlen = 32; 972 973 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 974 if (rval = ddi_getlongprop_buf(DDI_DEV_T_ANY, ha->dip, 975 DDI_PROP_CANSLEEP, "idprom", (caddr_t)idprombuf, 976 &idpromlen) != DDI_PROP_SUCCESS) { 977 978 cmn_err(CE_WARN, "%s(%d) : Unable to read idprom " 979 "property, rval=%x", QL_NAME, ha->instance, rval); 980 981 nv->port_name[0] = 33; 982 nv->port_name[3] = 224; 983 nv->port_name[4] = 139; 984 nv->port_name[7] = (uint8_t) 985 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance); 986 } else { 987 nv->port_name[2] = idprombuf[2]; 988 nv->port_name[3] = idprombuf[3]; 989 nv->port_name[4] = idprombuf[4]; 990 nv->port_name[5] = idprombuf[5]; 991 nv->port_name[6] = idprombuf[6]; 992 nv->port_name[7] = idprombuf[7]; 993 nv->port_name[0] = (uint8_t) 994 (NAA_ID_IEEE_EXTENDED << 4 | ha->instance); 995 } 996 997 cmn_err(CE_WARN, "%s(%d): Unreliable HBA NVRAM, using default " 998 "HBA parameters and temporary " 999 "WWPN: %02x%02x%02x%02x%02x%02x%02x%02x", QL_NAME, 1000 ha->instance, nv->port_name[0], nv->port_name[1], 1001 nv->port_name[2], nv->port_name[3], nv->port_name[4], 1002 nv->port_name[5], nv->port_name[6], nv->port_name[7]); 1003 1004 nv->login_retry_count[0] = 8; 1005 1006 nv->firmware_options_1[0] = BIT_2 | BIT_1; 1007 nv->firmware_options_1[1] = BIT_5; 1008 nv->firmware_options_2[0] = BIT_5; 1009 nv->firmware_options_2[1] = BIT_4; 1010 nv->firmware_options_3[1] = BIT_6; 1011 1012 /* 1013 * Set default host adapter parameters 1014 */ 1015 nv->host_p[0] = BIT_4 | BIT_1; 1016 nv->host_p[1] = BIT_3 | BIT_2; 1017 nv->reset_delay = 5; 1018 nv->max_luns_per_target[0] = 128; 1019 nv->port_down_retry_count[0] = 30; 1020 nv->link_down_timeout[0] = 30; 1021 1022 if (CFG_IST(ha, CFG_CTRL_8081)) { 1023 nv->firmware_options_3[2] = BIT_4; 1024 nv->feature_mask_l[0] = 9; 1025 nv->ext_blk.version[0] = 1; 1026 nv->ext_blk.fcf_vlan_match = 1; 1027 nv->ext_blk.fcf_vlan_id[0] = LSB(1002); 1028 nv->ext_blk.fcf_vlan_id[1] = MSB(1002); 1029 nv->fw.isp8001.e_node_mac_addr[1] = 2; 1030 nv->fw.isp8001.e_node_mac_addr[2] = 3; 1031 nv->fw.isp8001.e_node_mac_addr[3] = 4; 1032 nv->fw.isp8001.e_node_mac_addr[4] = MSB(ha->instance); 1033 nv->fw.isp8001.e_node_mac_addr[5] = LSB(ha->instance); 1034 } 1035 1036 rval = QL_FUNCTION_FAILED; 1037 } 1038 1039 /* Check for adapter node name (big endian). */ 1040 for (index = 0; index < 8; index++) { 1041 if (nv->node_name[index] != 0) { 1042 break; 1043 } 1044 } 1045 1046 /* Copy port name if no node name (big endian). */ 1047 if (index == 8) { 1048 bcopy((void *)&nv->port_name[0], (void *)&nv->node_name[0], 8); 1049 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0); 1050 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0); 1051 } 1052 1053 /* Reset initialization control blocks. */ 1054 bzero((void *)icb, sizeof (ql_init_24xx_cb_t)); 1055 1056 /* Get driver properties. */ 1057 ql_24xx_properties(ha, nv); 1058 1059 cmn_err(CE_CONT, "!Qlogic %s(%d) WWPN=%02x%02x%02x%02x" 1060 "%02x%02x%02x%02x : WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n", 1061 QL_NAME, ha->instance, nv->port_name[0], nv->port_name[1], 1062 nv->port_name[2], nv->port_name[3], nv->port_name[4], 1063 nv->port_name[5], nv->port_name[6], nv->port_name[7], 1064 nv->node_name[0], nv->node_name[1], nv->node_name[2], 1065 nv->node_name[3], nv->node_name[4], nv->node_name[5], 1066 nv->node_name[6], nv->node_name[7]); 1067 1068 /* 1069 * Copy over NVRAM Firmware Initialization Control Block. 1070 */ 1071 dst = (caddr_t)icb; 1072 src = (caddr_t)&nv->version; 1073 index = (uint32_t)((uintptr_t)&icb->response_q_inpointer[0] - 1074 (uintptr_t)icb); 1075 while (index--) { 1076 *dst++ = *src++; 1077 } 1078 icb->login_retry_count[0] = nv->login_retry_count[0]; 1079 icb->login_retry_count[1] = nv->login_retry_count[1]; 1080 icb->link_down_on_nos[0] = nv->link_down_on_nos[0]; 1081 icb->link_down_on_nos[1] = nv->link_down_on_nos[1]; 1082 1083 dst = (caddr_t)&icb->interrupt_delay_timer; 1084 src = (caddr_t)&nv->interrupt_delay_timer; 1085 index = (uint32_t)((uintptr_t)&icb->qos - 1086 (uintptr_t)&icb->interrupt_delay_timer); 1087 while (index--) { 1088 *dst++ = *src++; 1089 } 1090 1091 /* 1092 * Setup driver firmware options. 1093 */ 1094 if (CFG_IST(ha, CFG_CTRL_8081)) { 1095 dst = (caddr_t)icb->enode_mac_addr; 1096 src = (caddr_t)nv->fw.isp8001.e_node_mac_addr; 1097 index = sizeof (nv->fw.isp8001.e_node_mac_addr); 1098 while (index--) { 1099 *dst++ = *src++; 1100 } 1101 dst = (caddr_t)&icb->ext_blk; 1102 src = (caddr_t)&nv->ext_blk; 1103 index = sizeof (ql_ext_icb_8100_t); 1104 while (index--) { 1105 *dst++ = *src++; 1106 } 1107 EL(ha, "e_node_mac_addr=%02x-%02x-%02x-%02x-%02x-%02x\n", 1108 icb->enode_mac_addr[0], icb->enode_mac_addr[1], 1109 icb->enode_mac_addr[2], icb->enode_mac_addr[3], 1110 icb->enode_mac_addr[4], icb->enode_mac_addr[5]); 1111 } else { 1112 icb->firmware_options_1[0] = (uint8_t) 1113 (icb->firmware_options_1[0] | BIT_1); 1114 icb->firmware_options_1[1] = (uint8_t) 1115 (icb->firmware_options_1[1] | BIT_5 | BIT_2); 1116 icb->firmware_options_3[0] = (uint8_t) 1117 (icb->firmware_options_3[0] | BIT_1); 1118 } 1119 icb->firmware_options_1[0] = (uint8_t)(icb->firmware_options_1[0] & 1120 ~(BIT_5 | BIT_4)); 1121 icb->firmware_options_1[1] = (uint8_t)(icb->firmware_options_1[1] | 1122 BIT_6); 1123 icb->firmware_options_2[0] = (uint8_t)(icb->firmware_options_2[0] & 1124 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 1125 if (CFG_IST(ha, CFG_ENABLE_FCP_2_SUPPORT)) { 1126 icb->firmware_options_2[1] = (uint8_t) 1127 (icb->firmware_options_2[1] | BIT_4); 1128 } else { 1129 icb->firmware_options_2[1] = (uint8_t) 1130 (icb->firmware_options_2[1] & ~BIT_4); 1131 } 1132 1133 icb->firmware_options_3[0] = (uint8_t)(icb->firmware_options_3[0] & 1134 ~BIT_7); 1135 1136 /* enable special N port 2 N port login behaviour */ 1137 if (CFG_IST(ha, CFG_CTRL_2425)) { 1138 icb->firmware_options_3[1] = 1139 (uint8_t)(icb->firmware_options_3[1] | BIT_0); 1140 } 1141 1142 icb->execution_throttle[0] = 0xff; 1143 icb->execution_throttle[1] = 0xff; 1144 1145 /* 1146 * Set host adapter parameters 1147 */ 1148 ADAPTER_STATE_LOCK(ha); 1149 ha->nvram_version = CHAR_TO_SHORT(nv->nvram_version[0], 1150 nv->nvram_version[1]); 1151 nv->host_p[1] & BIT_2 ? (ha->cfg_flags |= CFG_ENABLE_FULL_LIP_LOGIN) : 1152 (ha->cfg_flags &= ~CFG_ENABLE_FULL_LIP_LOGIN); 1153 nv->host_p[1] & BIT_3 ? (ha->cfg_flags |= CFG_ENABLE_TARGET_RESET) : 1154 (ha->cfg_flags &= ~CFG_ENABLE_TARGET_RESET); 1155 ha->cfg_flags &= ~(CFG_DISABLE_RISC_CODE_LOAD | CFG_LR_SUPPORT | 1156 CFG_SET_CACHE_LINE_SIZE_1 | CFG_MULTI_CHIP_ADAPTER); 1157 ha->cfg_flags |= CFG_ENABLE_64BIT_ADDRESSING; 1158 if (CFG_IST(ha, CFG_CTRL_81XX) && nv->enhanced_features[0] & BIT_0) { 1159 ha->cfg_flags |= CFG_LR_SUPPORT; 1160 } 1161 ADAPTER_STATE_UNLOCK(ha); 1162 1163 ha->execution_throttle = CHAR_TO_SHORT(nv->execution_throttle[0], 1164 nv->execution_throttle[1]); 1165 ha->loop_reset_delay = nv->reset_delay; 1166 ha->port_down_retry_count = CHAR_TO_SHORT(nv->port_down_retry_count[0], 1167 nv->port_down_retry_count[1]); 1168 w1 = CHAR_TO_SHORT(icb->login_timeout[0], icb->login_timeout[1]); 1169 ha->r_a_tov = (uint16_t)(w1 < R_A_TOV_DEFAULT ? R_A_TOV_DEFAULT : w1); 1170 ha->maximum_luns_per_target = CHAR_TO_SHORT( 1171 nv->max_luns_per_target[0], nv->max_luns_per_target[1]); 1172 if (ha->maximum_luns_per_target == 0) { 1173 ha->maximum_luns_per_target++; 1174 } 1175 1176 /* ISP2422 Serial Link Control */ 1177 if (CFG_IST(ha, CFG_CTRL_2422)) { 1178 ha->serdes_param[0] = CHAR_TO_SHORT(nv->fw.isp2400.swing_opt[0], 1179 nv->fw.isp2400.swing_opt[1]); 1180 ha->serdes_param[1] = CHAR_TO_SHORT(nv->fw.isp2400.swing_1g[0], 1181 nv->fw.isp2400.swing_1g[1]); 1182 ha->serdes_param[2] = CHAR_TO_SHORT(nv->fw.isp2400.swing_2g[0], 1183 nv->fw.isp2400.swing_2g[1]); 1184 ha->serdes_param[3] = CHAR_TO_SHORT(nv->fw.isp2400.swing_4g[0], 1185 nv->fw.isp2400.swing_4g[1]); 1186 } 1187 1188 /* 1189 * Setup ring parameters in initialization control block 1190 */ 1191 w1 = REQUEST_ENTRY_CNT; 1192 icb->request_q_length[0] = LSB(w1); 1193 icb->request_q_length[1] = MSB(w1); 1194 w1 = RESPONSE_ENTRY_CNT; 1195 icb->response_q_length[0] = LSB(w1); 1196 icb->response_q_length[1] = MSB(w1); 1197 1198 icb->request_q_address[0] = LSB(LSW(LSD(ha->request_dvma))); 1199 icb->request_q_address[1] = MSB(LSW(LSD(ha->request_dvma))); 1200 icb->request_q_address[2] = LSB(MSW(LSD(ha->request_dvma))); 1201 icb->request_q_address[3] = MSB(MSW(LSD(ha->request_dvma))); 1202 icb->request_q_address[4] = LSB(LSW(MSD(ha->request_dvma))); 1203 icb->request_q_address[5] = MSB(LSW(MSD(ha->request_dvma))); 1204 icb->request_q_address[6] = LSB(MSW(MSD(ha->request_dvma))); 1205 icb->request_q_address[7] = MSB(MSW(MSD(ha->request_dvma))); 1206 1207 icb->response_q_address[0] = LSB(LSW(LSD(ha->response_dvma))); 1208 icb->response_q_address[1] = MSB(LSW(LSD(ha->response_dvma))); 1209 icb->response_q_address[2] = LSB(MSW(LSD(ha->response_dvma))); 1210 icb->response_q_address[3] = MSB(MSW(LSD(ha->response_dvma))); 1211 icb->response_q_address[4] = LSB(LSW(MSD(ha->response_dvma))); 1212 icb->response_q_address[5] = MSB(LSW(MSD(ha->response_dvma))); 1213 icb->response_q_address[6] = LSB(MSW(MSD(ha->response_dvma))); 1214 icb->response_q_address[7] = MSB(MSW(MSD(ha->response_dvma))); 1215 1216 /* 1217 * Setup IP initialization control block 1218 */ 1219 ip_icb->version = IP_ICB_24XX_VERSION; 1220 1221 ip_icb->ip_firmware_options[0] = (uint8_t) 1222 (ip_icb->ip_firmware_options[0] | BIT_2); 1223 1224 if (rval != QL_SUCCESS) { 1225 EL(ha, "failed, rval = %xh\n", rval); 1226 } else { 1227 /*EMPTY*/ 1228 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1229 } 1230 return (rval); 1231 } 1232 1233 /* 1234 * ql_lock_nvram 1235 * Locks NVRAM access and returns starting address of NVRAM. 1236 * 1237 * Input: 1238 * ha: adapter state pointer. 1239 * addr: pointer for start address. 1240 * flags: Are mutually exclusive: 1241 * LNF_NVRAM_DATA --> get nvram 1242 * LNF_VPD_DATA --> get vpd data (24/25xx only). 1243 * 1244 * Returns: 1245 * ql local function return status code. 1246 * 1247 * Context: 1248 * Kernel context. 1249 */ 1250 int 1251 ql_lock_nvram(ql_adapter_state_t *ha, uint32_t *addr, uint32_t flags) 1252 { 1253 int i; 1254 1255 if ((flags & LNF_NVRAM_DATA) && (flags & LNF_VPD_DATA)) { 1256 EL(ha, "invalid options for function"); 1257 return (QL_FUNCTION_FAILED); 1258 } 1259 1260 if (ha->device_id == 0x2312 || ha->device_id == 0x2322) { 1261 if ((flags & LNF_NVRAM_DATA) == 0) { 1262 EL(ha, "invalid 2312/2322 option for HBA"); 1263 return (QL_FUNCTION_FAILED); 1264 } 1265 1266 /* if function number is non-zero, then adjust offset */ 1267 *addr = ha->flash_nvram_addr; 1268 1269 /* Try to get resource lock. Wait for 10 seconds max */ 1270 for (i = 0; i < 10000; i++) { 1271 /* if nvram busy bit is reset, acquire sema */ 1272 if ((RD16_IO_REG(ha, nvram) & 0x8000) == 0) { 1273 WRT16_IO_REG(ha, host_to_host_sema, 1); 1274 drv_usecwait(MILLISEC); 1275 if (RD16_IO_REG(ha, host_to_host_sema) & 1) { 1276 break; 1277 } 1278 } 1279 drv_usecwait(MILLISEC); 1280 } 1281 if ((RD16_IO_REG(ha, host_to_host_sema) & 1) == 0) { 1282 cmn_err(CE_WARN, "%s(%d): unable to get NVRAM lock", 1283 QL_NAME, ha->instance); 1284 return (QL_FUNCTION_FAILED); 1285 } 1286 } else if (CFG_IST(ha, CFG_CTRL_2422)) { 1287 if (flags & LNF_VPD_DATA) { 1288 *addr = NVRAM_DATA_ADDR | ha->flash_vpd_addr; 1289 } else if (flags & LNF_NVRAM_DATA) { 1290 *addr = NVRAM_DATA_ADDR | ha->flash_nvram_addr; 1291 } else { 1292 EL(ha, "invalid 2422 option for HBA"); 1293 return (QL_FUNCTION_FAILED); 1294 } 1295 1296 GLOBAL_HW_LOCK(); 1297 } else if (CFG_IST(ha, CFG_CTRL_258081)) { 1298 if (flags & LNF_VPD_DATA) { 1299 *addr = ha->flash_data_addr | ha->flash_vpd_addr; 1300 } else if (flags & LNF_NVRAM_DATA) { 1301 *addr = ha->flash_data_addr | ha->flash_nvram_addr; 1302 } else { 1303 EL(ha, "invalid 2581 option for HBA"); 1304 return (QL_FUNCTION_FAILED); 1305 } 1306 1307 GLOBAL_HW_LOCK(); 1308 } else { 1309 if ((flags & LNF_NVRAM_DATA) == 0) { 1310 EL(ha, "invalid option for HBA"); 1311 return (QL_FUNCTION_FAILED); 1312 } 1313 *addr = 0; 1314 GLOBAL_HW_LOCK(); 1315 } 1316 1317 return (QL_SUCCESS); 1318 } 1319 1320 /* 1321 * ql_release_nvram 1322 * Releases NVRAM access. 1323 * 1324 * Input: 1325 * ha: adapter state pointer. 1326 * 1327 * Context: 1328 * Kernel context. 1329 */ 1330 void 1331 ql_release_nvram(ql_adapter_state_t *ha) 1332 { 1333 if (ha->device_id == 0x2312 || ha->device_id == 0x2322) { 1334 /* Release resource lock */ 1335 WRT16_IO_REG(ha, host_to_host_sema, 0); 1336 } else { 1337 GLOBAL_HW_UNLOCK(); 1338 } 1339 } 1340 1341 /* 1342 * ql_23_properties 1343 * Copies driver properties to NVRAM or adapter structure. 1344 * 1345 * Driver properties are by design global variables and hidden 1346 * completely from administrators. Knowledgeable folks can 1347 * override the default values using driver.conf 1348 * 1349 * Input: 1350 * ha: adapter state pointer. 1351 * nv: NVRAM structure pointer. 1352 * 1353 * Context: 1354 * Kernel context. 1355 */ 1356 static void 1357 ql_23_properties(ql_adapter_state_t *ha, nvram_t *nv) 1358 { 1359 uint32_t data, cnt; 1360 1361 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1362 1363 /* Get frame payload size. */ 1364 if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) { 1365 data = 2048; 1366 } 1367 if (data == 512 || data == 1024 || data == 2048) { 1368 nv->max_frame_length[0] = LSB(data); 1369 nv->max_frame_length[1] = MSB(data); 1370 } else { 1371 EL(ha, "invalid parameter value for 'max-frame-length': " 1372 "%d; using nvram default of %d\n", data, CHAR_TO_SHORT( 1373 nv->max_frame_length[0], nv->max_frame_length[1])); 1374 } 1375 1376 /* Get max IOCB allocation. */ 1377 nv->max_iocb_allocation[0] = 0; 1378 nv->max_iocb_allocation[1] = 1; 1379 1380 /* Get execution throttle. */ 1381 if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) { 1382 data = 32; 1383 } 1384 if (data != 0 && data < 65536) { 1385 nv->execution_throttle[0] = LSB(data); 1386 nv->execution_throttle[1] = MSB(data); 1387 } else { 1388 EL(ha, "invalid parameter value for 'execution-throttle': " 1389 "%d; using nvram default of %d\n", data, CHAR_TO_SHORT( 1390 nv->execution_throttle[0], nv->execution_throttle[1])); 1391 } 1392 1393 /* Get Login timeout. */ 1394 if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) { 1395 data = 3; 1396 } 1397 if (data < 256) { 1398 nv->login_timeout = (uint8_t)data; 1399 } else { 1400 EL(ha, "invalid parameter value for 'login-timeout': " 1401 "%d; using nvram value of %d\n", data, nv->login_timeout); 1402 } 1403 1404 /* Get retry count. */ 1405 if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) { 1406 data = 4; 1407 } 1408 if (data < 256) { 1409 nv->login_retry_count = (uint8_t)data; 1410 } else { 1411 EL(ha, "invalid parameter value for 'login-retry-count': " 1412 "%d; using nvram value of %d\n", data, 1413 nv->login_retry_count); 1414 } 1415 1416 /* Get adapter hard loop ID enable. */ 1417 data = ql_get_prop(ha, "enable-adapter-hard-loop-ID"); 1418 if (data == 0) { 1419 nv->firmware_options[0] = 1420 (uint8_t)(nv->firmware_options[0] & ~BIT_0); 1421 } else if (data == 1) { 1422 nv->firmware_options[0] = 1423 (uint8_t)(nv->firmware_options[0] | BIT_0); 1424 } else if (data != 0xffffffff) { 1425 EL(ha, "invalid parameter value for " 1426 "'enable-adapter-hard-loop-ID': %d; using nvram value " 1427 "of %d\n", data, nv->firmware_options[0] & BIT_0 ? 1 : 0); 1428 } 1429 1430 /* Get adapter hard loop ID. */ 1431 data = ql_get_prop(ha, "adapter-hard-loop-ID"); 1432 if (data < 126) { 1433 nv->hard_address[0] = (uint8_t)data; 1434 } else if (data != 0xffffffff) { 1435 EL(ha, "invalid parameter value for 'adapter-hard-loop-ID': " 1436 "%d; using nvram value of %d\n", 1437 data, nv->hard_address[0]); 1438 } 1439 1440 /* Get LIP reset. */ 1441 if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) == 1442 0xffffffff) { 1443 data = 0; 1444 } 1445 if (data == 0) { 1446 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_1); 1447 } else if (data == 1) { 1448 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_1); 1449 } else { 1450 EL(ha, "invalid parameter value for " 1451 "'enable-LIP-reset-on-bus-reset': %d; using nvram value " 1452 "of %d\n", data, nv->host_p[1] & BIT_1 ? 1 : 0); 1453 } 1454 1455 /* Get LIP full login. */ 1456 if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) == 1457 0xffffffff) { 1458 data = 1; 1459 } 1460 if (data == 0) { 1461 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2); 1462 } else if (data == 1) { 1463 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2); 1464 } else { 1465 EL(ha, "invalid parameter value for " 1466 "'enable-LIP-full-login-on-bus-reset': %d; using nvram " 1467 "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0); 1468 } 1469 1470 /* Get target reset. */ 1471 if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) == 1472 0xffffffff) { 1473 data = 0; 1474 } 1475 if (data == 0) { 1476 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3); 1477 } else if (data == 1) { 1478 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3); 1479 } else { 1480 EL(ha, "invalid parameter value for " 1481 "'enable-target-reset-on-bus-reset': %d; using nvram " 1482 "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0); 1483 } 1484 1485 /* Get reset delay. */ 1486 if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) { 1487 data = 5; 1488 } 1489 if (data != 0 && data < 256) { 1490 nv->reset_delay = (uint8_t)data; 1491 } else { 1492 EL(ha, "invalid parameter value for 'reset-delay': %d; " 1493 "using nvram value of %d", data, nv->reset_delay); 1494 } 1495 1496 /* Get port down retry count. */ 1497 if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) { 1498 data = 8; 1499 } 1500 if (data < 256) { 1501 nv->port_down_retry_count = (uint8_t)data; 1502 } else { 1503 EL(ha, "invalid parameter value for 'port-down-retry-count':" 1504 " %d; using nvram value of %d\n", data, 1505 nv->port_down_retry_count); 1506 } 1507 1508 /* Get connection mode setting. */ 1509 if ((data = ql_get_prop(ha, "connection-options")) == 0xffffffff) { 1510 data = 2; 1511 } 1512 cnt = CFG_IST(ha, CFG_CTRL_2200) ? 3 : 2; 1513 if (data <= cnt) { 1514 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] & 1515 ~(BIT_6 | BIT_5 | BIT_4)); 1516 nv->add_fw_opt[0] = (uint8_t)(nv->add_fw_opt[0] | 1517 (uint8_t)(data << 4)); 1518 } else { 1519 EL(ha, "invalid parameter value for 'connection-options': " 1520 "%d; using nvram value of %d\n", data, 1521 (nv->add_fw_opt[0] >> 4) & 0x3); 1522 } 1523 1524 /* Get data rate setting. */ 1525 if ((CFG_IST(ha, CFG_CTRL_2200)) == 0) { 1526 if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) { 1527 data = 2; 1528 } 1529 if (data < 3) { 1530 nv->special_options[1] = (uint8_t) 1531 (nv->special_options[1] & 0x3f); 1532 nv->special_options[1] = (uint8_t) 1533 (nv->special_options[1] | (uint8_t)(data << 6)); 1534 } else { 1535 EL(ha, "invalid parameter value for 'fc-data-rate': " 1536 "%d; using nvram value of %d\n", data, 1537 (nv->special_options[1] >> 6) & 0x3); 1538 } 1539 } 1540 1541 /* Get adapter id string for Sun branded 23xx only */ 1542 if ((CFG_IST(ha, CFG_CTRL_2300)) && nv->adapInfo[0] != 0) { 1543 (void) snprintf((int8_t *)ha->adapInfo, 16, "%s", 1544 nv->adapInfo); 1545 } 1546 1547 /* Get IP FW container count. */ 1548 ha->ip_init_ctrl_blk.cb.cc[0] = LSB(ql_ip_buffer_count); 1549 ha->ip_init_ctrl_blk.cb.cc[1] = MSB(ql_ip_buffer_count); 1550 1551 /* Get IP low water mark. */ 1552 ha->ip_init_ctrl_blk.cb.low_water_mark[0] = LSB(ql_ip_low_water); 1553 ha->ip_init_ctrl_blk.cb.low_water_mark[1] = MSB(ql_ip_low_water); 1554 1555 /* Get IP fast register post count. */ 1556 ha->ip_init_ctrl_blk.cb.fast_post_reg_count[0] = 1557 ql_ip_fast_post_count; 1558 1559 ADAPTER_STATE_LOCK(ha); 1560 1561 ql_common_properties(ha); 1562 1563 ADAPTER_STATE_UNLOCK(ha); 1564 1565 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1566 } 1567 1568 /* 1569 * ql_common_properties 1570 * Driver properties adapter structure. 1571 * 1572 * Driver properties are by design global variables and hidden 1573 * completely from administrators. Knowledgeable folks can 1574 * override the default values using driver.conf 1575 * 1576 * Input: 1577 * ha: adapter state pointer. 1578 * 1579 * Context: 1580 * Kernel context. 1581 */ 1582 void 1583 ql_common_properties(ql_adapter_state_t *ha) 1584 { 1585 uint32_t data; 1586 1587 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1588 1589 /* Get extended logging trace buffer size. */ 1590 if ((data = ql_get_prop(ha, "set-ext-log-buffer-size")) != 1591 0xffffffff && data != 0) { 1592 char *new_trace; 1593 uint32_t new_size; 1594 1595 if (ha->el_trace_desc->trace_buffer != NULL) { 1596 new_size = 1024 * data; 1597 new_trace = (char *)kmem_zalloc(new_size, KM_SLEEP); 1598 1599 if (new_trace == NULL) { 1600 cmn_err(CE_WARN, "%s(%d): can't get new" 1601 " trace buffer", 1602 QL_NAME, ha->instance); 1603 } else { 1604 /* free the previous */ 1605 kmem_free(ha->el_trace_desc->trace_buffer, 1606 ha->el_trace_desc->trace_buffer_size); 1607 /* Use the new one */ 1608 ha->el_trace_desc->trace_buffer = new_trace; 1609 ha->el_trace_desc->trace_buffer_size = new_size; 1610 } 1611 } 1612 1613 } 1614 1615 /* Get extended logging enable. */ 1616 if ((data = ql_get_prop(ha, "extended-logging")) == 0xffffffff || 1617 data == 0) { 1618 ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING; 1619 } else if (data == 1) { 1620 ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING; 1621 } else { 1622 EL(ha, "invalid parameter value for 'extended-logging': %d;" 1623 " using default value of 0\n", data); 1624 ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING; 1625 } 1626 1627 /* Get extended logging trace disable. */ 1628 if ((data = ql_get_prop(ha, "disable-extended-logging-trace")) == 1629 0xffffffff || data == 0) { 1630 ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE; 1631 } else if (data == 1) { 1632 ha->cfg_flags |= CFG_DISABLE_EXTENDED_LOGGING_TRACE; 1633 } else { 1634 EL(ha, "invalid parameter value for " 1635 "'disable-extended-logging-trace': %d;" 1636 " using default value of 0\n", data); 1637 ha->cfg_flags &= ~CFG_DISABLE_EXTENDED_LOGGING_TRACE; 1638 } 1639 1640 /* Get FCP 2 Error Recovery. */ 1641 if ((data = ql_get_prop(ha, "enable-FCP-2-error-recovery")) == 1642 0xffffffff || data == 1) { 1643 ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT; 1644 } else if (data == 0) { 1645 ha->cfg_flags &= ~CFG_ENABLE_FCP_2_SUPPORT; 1646 } else { 1647 EL(ha, "invalid parameter value for " 1648 "'enable-FCP-2-error-recovery': %d; using nvram value of " 1649 "1\n", data); 1650 ha->cfg_flags |= CFG_ENABLE_FCP_2_SUPPORT; 1651 } 1652 1653 #ifdef QL_DEBUG_LEVEL_2 1654 ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING; 1655 #endif 1656 1657 /* Get port down retry delay. */ 1658 if ((data = ql_get_prop(ha, "port-down-retry-delay")) == 0xffffffff) { 1659 ha->port_down_retry_delay = PORT_RETRY_TIME; 1660 } else if (data < 256) { 1661 ha->port_down_retry_delay = (uint8_t)data; 1662 } else { 1663 EL(ha, "invalid parameter value for 'port-down-retry-delay':" 1664 " %d; using default value of %d", data, PORT_RETRY_TIME); 1665 ha->port_down_retry_delay = PORT_RETRY_TIME; 1666 } 1667 1668 /* Get queue full retry count. */ 1669 if ((data = ql_get_prop(ha, "queue-full-retry-count")) == 0xffffffff) { 1670 ha->qfull_retry_count = 16; 1671 } else if (data < 256) { 1672 ha->qfull_retry_count = (uint8_t)data; 1673 } else { 1674 EL(ha, "invalid parameter value for 'queue-full-retry-count':" 1675 " %d; using default value of 16", data); 1676 ha->qfull_retry_count = 16; 1677 } 1678 1679 /* Get queue full retry delay. */ 1680 if ((data = ql_get_prop(ha, "queue-full-retry-delay")) == 0xffffffff) { 1681 ha->qfull_retry_delay = PORT_RETRY_TIME; 1682 } else if (data < 256) { 1683 ha->qfull_retry_delay = (uint8_t)data; 1684 } else { 1685 EL(ha, "invalid parameter value for 'queue-full-retry-delay':" 1686 " %d; using default value of %d", data, PORT_RETRY_TIME); 1687 ha->qfull_retry_delay = PORT_RETRY_TIME; 1688 } 1689 1690 /* Get loop down timeout. */ 1691 if ((data = ql_get_prop(ha, "link-down-timeout")) == 0xffffffff) { 1692 data = 0; 1693 } else if (data > 255) { 1694 EL(ha, "invalid parameter value for 'link-down-timeout': %d;" 1695 " using nvram value of 0\n", data); 1696 data = 0; 1697 } 1698 ha->loop_down_abort_time = (uint8_t)(LOOP_DOWN_TIMER_START - data); 1699 if (ha->loop_down_abort_time == LOOP_DOWN_TIMER_START) { 1700 ha->loop_down_abort_time--; 1701 } else if (ha->loop_down_abort_time <= LOOP_DOWN_TIMER_END) { 1702 ha->loop_down_abort_time = LOOP_DOWN_TIMER_END + 1; 1703 } 1704 1705 /* Get link down error enable. */ 1706 if ((data = ql_get_prop(ha, "enable-link-down-error")) == 0xffffffff || 1707 data == 1) { 1708 ha->cfg_flags |= CFG_ENABLE_LINK_DOWN_REPORTING; 1709 } else if (data == 0) { 1710 ha->cfg_flags &= ~CFG_ENABLE_LINK_DOWN_REPORTING; 1711 } else { 1712 EL(ha, "invalid parameter value for 'link-down-error': %d;" 1713 " using default value of 1\n", data); 1714 } 1715 1716 /* 1717 * Get firmware dump flags. 1718 * TAKE_FW_DUMP_ON_MAILBOX_TIMEOUT BIT_0 1719 * TAKE_FW_DUMP_ON_ISP_SYSTEM_ERROR BIT_1 1720 * TAKE_FW_DUMP_ON_DRIVER_COMMAND_TIMEOUT BIT_2 1721 * TAKE_FW_DUMP_ON_LOOP_OFFLINE_TIMEOUT BIT_3 1722 */ 1723 ha->cfg_flags &= ~(CFG_DUMP_MAILBOX_TIMEOUT | 1724 CFG_DUMP_ISP_SYSTEM_ERROR | CFG_DUMP_DRIVER_COMMAND_TIMEOUT | 1725 CFG_DUMP_LOOP_OFFLINE_TIMEOUT); 1726 if ((data = ql_get_prop(ha, "firmware-dump-flags")) != 0xffffffff) { 1727 if (data & BIT_0) { 1728 ha->cfg_flags |= CFG_DUMP_MAILBOX_TIMEOUT; 1729 } 1730 if (data & BIT_1) { 1731 ha->cfg_flags |= CFG_DUMP_ISP_SYSTEM_ERROR; 1732 } 1733 if (data & BIT_2) { 1734 ha->cfg_flags |= CFG_DUMP_DRIVER_COMMAND_TIMEOUT; 1735 } 1736 if (data & BIT_3) { 1737 ha->cfg_flags |= CFG_DUMP_LOOP_OFFLINE_TIMEOUT; 1738 } 1739 } 1740 1741 /* Get the PCI max read request size override. */ 1742 ha->pci_max_read_req = 0; 1743 if ((data = ql_get_prop(ha, "pci-max-read-request")) != 0xffffffff && 1744 data != 0) { 1745 ha->pci_max_read_req = (uint16_t)(data); 1746 } 1747 1748 /* 1749 * Set default fw wait, adjusted for slow FCF's. 1750 * Revisit when FCF's as fast as FC switches. 1751 */ 1752 ha->fwwait = (uint8_t)(CFG_IST(ha, CFG_CTRL_8081) ? 45 : 10); 1753 /* Get the attach fw_ready override value. */ 1754 if ((data = ql_get_prop(ha, "init-loop-sync-wait")) != 0xffffffff) { 1755 if (data > 0 && data <= 240) { 1756 ha->fwwait = (uint8_t)data; 1757 } else { 1758 EL(ha, "invalid parameter value for " 1759 "'init-loop-sync-wait': %d; using default " 1760 "value of %d\n", data, ha->fwwait); 1761 } 1762 } 1763 1764 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 1765 } 1766 1767 /* 1768 * ql_24xx_properties 1769 * Copies driver properties to NVRAM or adapter structure. 1770 * 1771 * Driver properties are by design global variables and hidden 1772 * completely from administrators. Knowledgeable folks can 1773 * override the default values using /etc/system. 1774 * 1775 * Input: 1776 * ha: adapter state pointer. 1777 * nv: NVRAM structure pointer. 1778 * 1779 * Context: 1780 * Kernel context. 1781 */ 1782 static void 1783 ql_24xx_properties(ql_adapter_state_t *ha, nvram_24xx_t *nv) 1784 { 1785 uint32_t data; 1786 1787 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 1788 1789 /* Get frame size */ 1790 if ((data = ql_get_prop(ha, "max-frame-length")) == 0xffffffff) { 1791 data = 2048; 1792 } 1793 if (data == 512 || data == 1024 || data == 2048 || data == 2112) { 1794 nv->max_frame_length[0] = LSB(data); 1795 nv->max_frame_length[1] = MSB(data); 1796 } else { 1797 EL(ha, "invalid parameter value for 'max-frame-length': %d;" 1798 " using nvram default of %d\n", data, CHAR_TO_SHORT( 1799 nv->max_frame_length[0], nv->max_frame_length[1])); 1800 } 1801 1802 /* Get execution throttle. */ 1803 if ((data = ql_get_prop(ha, "execution-throttle")) == 0xffffffff) { 1804 data = 32; 1805 } 1806 if (data != 0 && data < 65536) { 1807 nv->execution_throttle[0] = LSB(data); 1808 nv->execution_throttle[1] = MSB(data); 1809 } else { 1810 EL(ha, "invalid parameter value for 'execution-throttle':" 1811 " %d; using nvram default of %d\n", data, CHAR_TO_SHORT( 1812 nv->execution_throttle[0], nv->execution_throttle[1])); 1813 } 1814 1815 /* Get Login timeout. */ 1816 if ((data = ql_get_prop(ha, "login-timeout")) == 0xffffffff) { 1817 data = 3; 1818 } 1819 if (data < 65536) { 1820 nv->login_timeout[0] = LSB(data); 1821 nv->login_timeout[1] = MSB(data); 1822 } else { 1823 EL(ha, "invalid parameter value for 'login-timeout': %d; " 1824 "using nvram value of %d\n", data, CHAR_TO_SHORT( 1825 nv->login_timeout[0], nv->login_timeout[1])); 1826 } 1827 1828 /* Get retry count. */ 1829 if ((data = ql_get_prop(ha, "login-retry-count")) == 0xffffffff) { 1830 data = 4; 1831 } 1832 if (data < 65536) { 1833 nv->login_retry_count[0] = LSB(data); 1834 nv->login_retry_count[1] = MSB(data); 1835 } else { 1836 EL(ha, "invalid parameter value for 'login-retry-count': " 1837 "%d; using nvram value of %d\n", data, CHAR_TO_SHORT( 1838 nv->login_retry_count[0], nv->login_retry_count[1])); 1839 } 1840 1841 /* Get adapter hard loop ID enable. */ 1842 data = ql_get_prop(ha, "enable-adapter-hard-loop-ID"); 1843 if (data == 0) { 1844 nv->firmware_options_1[0] = 1845 (uint8_t)(nv->firmware_options_1[0] & ~BIT_0); 1846 } else if (data == 1) { 1847 nv->firmware_options_1[0] = 1848 (uint8_t)(nv->firmware_options_1[0] | BIT_0); 1849 } else if (data != 0xffffffff) { 1850 EL(ha, "invalid parameter value for " 1851 "'enable-adapter-hard-loop-ID': %d; using nvram value " 1852 "of %d\n", data, 1853 nv->firmware_options_1[0] & BIT_0 ? 1 : 0); 1854 } 1855 1856 /* Get adapter hard loop ID. */ 1857 data = ql_get_prop(ha, "adapter-hard-loop-ID"); 1858 if (data < 126) { 1859 nv->hard_address[0] = LSB(data); 1860 nv->hard_address[1] = MSB(data); 1861 } else if (data != 0xffffffff) { 1862 EL(ha, "invalid parameter value for 'adapter-hard-loop-ID':" 1863 " %d; using nvram value of %d\n", data, CHAR_TO_SHORT( 1864 nv->hard_address[0], nv->hard_address[1])); 1865 } 1866 1867 /* Get LIP reset. */ 1868 if ((data = ql_get_prop(ha, "enable-LIP-reset-on-bus-reset")) == 1869 0xffffffff) { 1870 data = 0; 1871 } 1872 if (data == 0) { 1873 ha->cfg_flags &= ~CFG_ENABLE_LIP_RESET; 1874 } else if (data == 1) { 1875 ha->cfg_flags |= CFG_ENABLE_LIP_RESET; 1876 } else { 1877 EL(ha, "invalid parameter value for " 1878 "'enable-LIP-reset-on-bus-reset': %d; using value of 0\n", 1879 data); 1880 } 1881 1882 /* Get LIP full login. */ 1883 if ((data = ql_get_prop(ha, "enable-LIP-full-login-on-bus-reset")) == 1884 0xffffffff) { 1885 data = 1; 1886 } 1887 if (data == 0) { 1888 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_2); 1889 } else if (data == 1) { 1890 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_2); 1891 } else { 1892 EL(ha, "invalid parameter value for " 1893 "'enable-LIP-full-login-on-bus-reset': %d; using nvram " 1894 "value of %d\n", data, nv->host_p[1] & BIT_2 ? 1 : 0); 1895 } 1896 1897 /* Get target reset. */ 1898 if ((data = ql_get_prop(ha, "enable-target-reset-on-bus-reset")) == 1899 0xffffffff) { 1900 data = 0; 1901 } 1902 if (data == 0) { 1903 nv->host_p[1] = (uint8_t)(nv->host_p[1] & ~BIT_3); 1904 } else if (data == 1) { 1905 nv->host_p[1] = (uint8_t)(nv->host_p[1] | BIT_3); 1906 } else { 1907 EL(ha, "invalid parameter value for " 1908 "'enable-target-reset-on-bus-reset': %d; using nvram " 1909 "value of %d", data, nv->host_p[1] & BIT_3 ? 1 : 0); 1910 } 1911 1912 /* Get reset delay. */ 1913 if ((data = ql_get_prop(ha, "reset-delay")) == 0xffffffff) { 1914 data = 5; 1915 } 1916 if (data != 0 && data < 256) { 1917 nv->reset_delay = (uint8_t)data; 1918 } else { 1919 EL(ha, "invalid parameter value for 'reset-delay': %d; " 1920 "using nvram value of %d", data, nv->reset_delay); 1921 } 1922 1923 /* Get port down retry count. */ 1924 if ((data = ql_get_prop(ha, "port-down-retry-count")) == 0xffffffff) { 1925 data = 8; 1926 } 1927 if (data < 256) { 1928 nv->port_down_retry_count[0] = LSB(data); 1929 nv->port_down_retry_count[1] = MSB(data); 1930 } else { 1931 EL(ha, "invalid parameter value for 'port-down-retry-count':" 1932 " %d; using nvram value of %d\n", data, CHAR_TO_SHORT( 1933 nv->port_down_retry_count[0], 1934 nv->port_down_retry_count[1])); 1935 } 1936 1937 if (!(CFG_IST(ha, CFG_CTRL_8081))) { 1938 /* Get connection mode setting. */ 1939 if ((data = ql_get_prop(ha, "connection-options")) == 1940 0xffffffff) { 1941 data = 2; 1942 } 1943 if (data <= 2) { 1944 nv->firmware_options_2[0] = (uint8_t) 1945 (nv->firmware_options_2[0] & 1946 ~(BIT_6 | BIT_5 | BIT_4)); 1947 nv->firmware_options_2[0] = (uint8_t) 1948 (nv->firmware_options_2[0] | (uint8_t)(data << 4)); 1949 } else { 1950 EL(ha, "invalid parameter value for 'connection-" 1951 "options': %d; using nvram value of %d\n", data, 1952 (nv->firmware_options_2[0] >> 4) & 0x3); 1953 } 1954 1955 /* Get data rate setting. */ 1956 if ((data = ql_get_prop(ha, "fc-data-rate")) == 0xffffffff) { 1957 data = 2; 1958 } 1959 if ((CFG_IST(ha, CFG_CTRL_2422) && data < 4) || 1960 (CFG_IST(ha, CFG_CTRL_258081) && data < 5)) { 1961 nv->firmware_options_3[1] = (uint8_t) 1962 (nv->firmware_options_3[1] & 0x1f); 1963 nv->firmware_options_3[1] = (uint8_t) 1964 (nv->firmware_options_3[1] | (uint8_t)(data << 5)); 1965 } else { 1966 EL(ha, "invalid parameter value for 'fc-data-rate': " 1967 "%d; using nvram value of %d\n", data, 1968 (nv->firmware_options_3[1] >> 5) & 0x7); 1969 } 1970 } 1971 1972 /* Get IP FW container count. */ 1973 ha->ip_init_ctrl_blk.cb24.cc[0] = LSB(ql_ip_buffer_count); 1974 ha->ip_init_ctrl_blk.cb24.cc[1] = MSB(ql_ip_buffer_count); 1975 1976 /* Get IP low water mark. */ 1977 ha->ip_init_ctrl_blk.cb24.low_water_mark[0] = LSB(ql_ip_low_water); 1978 ha->ip_init_ctrl_blk.cb24.low_water_mark[1] = MSB(ql_ip_low_water); 1979 1980 ADAPTER_STATE_LOCK(ha); 1981 1982 /* Get enable flash load. */ 1983 if ((data = ql_get_prop(ha, "enable-flash-load")) == 0xffffffff || 1984 data == 0) { 1985 ha->cfg_flags &= ~CFG_LOAD_FLASH_FW; 1986 } else if (data == 1) { 1987 ha->cfg_flags |= CFG_LOAD_FLASH_FW; 1988 } else { 1989 EL(ha, "invalid parameter value for 'enable-flash-load': " 1990 "%d; using default value of 0\n", data); 1991 } 1992 1993 /* Enable firmware extended tracing */ 1994 if ((data = ql_get_prop(ha, "enable-fwexttrace")) != 0xffffffff) { 1995 if (data != 0) { 1996 ha->cfg_flags |= CFG_ENABLE_FWEXTTRACE; 1997 } 1998 } 1999 2000 /* Enable firmware fc tracing */ 2001 if ((data = ql_get_prop(ha, "enable-fwfcetrace")) != 0xffffffff) { 2002 ha->cfg_flags |= CFG_ENABLE_FWFCETRACE; 2003 ha->fwfcetraceopt = data; 2004 } 2005 2006 /* Enable fast timeout */ 2007 if ((data = ql_get_prop(ha, "enable-fasttimeout")) != 0xffffffff) { 2008 if (data != 0) { 2009 ha->cfg_flags |= CFG_FAST_TIMEOUT; 2010 } 2011 } 2012 2013 ql_common_properties(ha); 2014 2015 ADAPTER_STATE_UNLOCK(ha); 2016 2017 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2018 } 2019 2020 /* 2021 * ql_get_prop 2022 * Get property value from configuration file. 2023 * 2024 * Input: 2025 * ha= adapter state pointer. 2026 * string = property string pointer. 2027 * 2028 * Returns: 2029 * 0xFFFFFFFF = no property else property value. 2030 * 2031 * Context: 2032 * Kernel context. 2033 */ 2034 uint32_t 2035 ql_get_prop(ql_adapter_state_t *ha, char *string) 2036 { 2037 char buf[256]; 2038 uint32_t data = 0xffffffff; 2039 2040 /* 2041 * Look for a adapter instance NPIV (virtual port) specific parameter 2042 */ 2043 if (CFG_IST(ha, CFG_CTRL_24258081)) { 2044 (void) sprintf(buf, "hba%d-vp%d-%s", ha->instance, 2045 ha->vp_index, string); 2046 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 2047 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0, 2048 buf, (int)0xffffffff); 2049 } 2050 2051 /* 2052 * Get adapter instance parameter if a vp specific one isn't found. 2053 */ 2054 if (data == 0xffffffff) { 2055 (void) sprintf(buf, "hba%d-%s", ha->instance, string); 2056 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 2057 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 2058 0, buf, (int)0xffffffff); 2059 } 2060 2061 /* Adapter instance parameter found? */ 2062 if (data == 0xffffffff) { 2063 /* No, get default parameter. */ 2064 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/ 2065 data = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, ha->dip, 0, 2066 string, (int)0xffffffff); 2067 } 2068 2069 return (data); 2070 } 2071 2072 /* 2073 * ql_check_isp_firmware 2074 * Checks if using already loaded RISC code or drivers copy. 2075 * If using already loaded code, save a copy of it. 2076 * 2077 * Input: 2078 * ha = adapter state pointer. 2079 * 2080 * Returns: 2081 * ql local function return status code. 2082 * 2083 * Context: 2084 * Kernel context. 2085 */ 2086 static int 2087 ql_check_isp_firmware(ql_adapter_state_t *ha) 2088 { 2089 int rval; 2090 uint16_t word_count; 2091 uint32_t byte_count; 2092 uint32_t fw_size, *lptr; 2093 caddr_t bufp; 2094 uint16_t risc_address = (uint16_t)ha->risc_fw[0].addr; 2095 2096 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2097 2098 /* Test for firmware running. */ 2099 if (CFG_IST(ha, CFG_CTRL_8021)) { 2100 if (ql_8021_idc_handler(ha) != NX_DEV_READY) { 2101 rval = QL_FUNCTION_FAILED; 2102 } else { 2103 rval = ql_start_firmware(ha); 2104 } 2105 } else if (CFG_IST(ha, CFG_DISABLE_RISC_CODE_LOAD)) { 2106 if (ha->risc_code != NULL) { 2107 kmem_free(ha->risc_code, ha->risc_code_size); 2108 ha->risc_code = NULL; 2109 ha->risc_code_size = 0; 2110 } 2111 2112 /* Get RISC code length. */ 2113 rval = ql_rd_risc_ram(ha, risc_address + 3, ha->request_dvma, 2114 1); 2115 if (rval == QL_SUCCESS) { 2116 lptr = (uint32_t *)ha->request_ring_bp; 2117 fw_size = *lptr << 1; 2118 2119 if ((bufp = kmem_alloc(fw_size, KM_SLEEP)) != NULL) { 2120 ha->risc_code_size = fw_size; 2121 ha->risc_code = bufp; 2122 ha->fw_transfer_size = 128; 2123 2124 /* Dump RISC code. */ 2125 do { 2126 if (fw_size > ha->fw_transfer_size) { 2127 byte_count = 2128 ha->fw_transfer_size; 2129 } else { 2130 byte_count = fw_size; 2131 } 2132 2133 word_count = 2134 (uint16_t)(byte_count >> 1); 2135 2136 rval = ql_rd_risc_ram(ha, risc_address, 2137 ha->request_dvma, word_count); 2138 if (rval != QL_SUCCESS) { 2139 kmem_free(ha->risc_code, 2140 ha->risc_code_size); 2141 ha->risc_code = NULL; 2142 ha->risc_code_size = 0; 2143 break; 2144 } 2145 2146 (void) ddi_dma_sync( 2147 ha->hba_buf.dma_handle, 2148 REQUEST_Q_BUFFER_OFFSET, 2149 byte_count, 2150 DDI_DMA_SYNC_FORKERNEL); 2151 ddi_rep_get16(ha->hba_buf.acc_handle, 2152 (uint16_t *)bufp, 2153 (uint16_t *)ha->request_ring_bp, 2154 word_count, DDI_DEV_AUTOINCR); 2155 2156 risc_address += word_count; 2157 fw_size -= byte_count; 2158 bufp += byte_count; 2159 } while (fw_size != 0); 2160 } 2161 rval = QL_FUNCTION_FAILED; 2162 } 2163 } else { 2164 rval = QL_FUNCTION_FAILED; 2165 } 2166 2167 if (rval != QL_SUCCESS) { 2168 EL(ha, "Load RISC code\n"); 2169 } else { 2170 /*EMPTY*/ 2171 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2172 } 2173 return (rval); 2174 } 2175 2176 /* 2177 * Chip diagnostics 2178 * Test chip for proper operation. 2179 * 2180 * Input: 2181 * ha = adapter state pointer. 2182 * 2183 * Returns: 2184 * ql local function return status code. 2185 * 2186 * Context: 2187 * Kernel context. 2188 */ 2189 static int 2190 ql_chip_diag(ql_adapter_state_t *ha) 2191 { 2192 ql_mbx_data_t mr; 2193 int rval; 2194 int32_t retries = 4; 2195 uint16_t id; 2196 2197 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2198 2199 do { 2200 /* Reset ISP chip. */ 2201 TASK_DAEMON_LOCK(ha); 2202 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED; 2203 TASK_DAEMON_UNLOCK(ha); 2204 2205 /* For ISP2200A reduce firmware load size. */ 2206 if (CFG_IST(ha, CFG_CTRL_2200) && 2207 RD16_IO_REG(ha, mailbox_out[7]) == 4) { 2208 ha->fw_transfer_size = 128; 2209 } else { 2210 ha->fw_transfer_size = REQUEST_QUEUE_SIZE; 2211 } 2212 2213 rval = QL_SUCCESS; 2214 if (!(CFG_IST(ha, CFG_CTRL_8021))) { 2215 ql_reset_chip(ha); 2216 2217 /* Check product ID of chip */ 2218 mr.mb[1] = RD16_IO_REG(ha, mailbox_out[1]); 2219 mr.mb[2] = RD16_IO_REG(ha, mailbox_out[2]); 2220 mr.mb[3] = RD16_IO_REG(ha, mailbox_out[3]); 2221 2222 if (ha->device_id == 0x5432 || 2223 ha->device_id == 0x8432) { 2224 id = 0x2432; 2225 } else if (ha->device_id == 0x5422 || 2226 ha->device_id == 0x8422) { 2227 id = 0x2422; 2228 } else { 2229 id = ha->device_id; 2230 } 2231 2232 if (mr.mb[1] == PROD_ID_1 && 2233 (mr.mb[2] == PROD_ID_2 || mr.mb[2] == PROD_ID_2a) && 2234 (mr.mb[3] == PROD_ID_3 || mr.mb[3] == id)) { 2235 ha->adapter_stats->revlvl.isp2200 = 2236 RD16_IO_REG(ha, mailbox_out[4]); 2237 ha->adapter_stats->revlvl.risc = 2238 RD16_IO_REG(ha, mailbox_out[5]); 2239 ha->adapter_stats->revlvl.frmbfr = 2240 RD16_IO_REG(ha, mailbox_out[6]); 2241 ha->adapter_stats->revlvl.riscrom = 2242 RD16_IO_REG(ha, mailbox_out[7]); 2243 } else { 2244 cmn_err(CE_WARN, "%s(%d) - prod id failed!, " 2245 "mb1=%xh, mb2=%xh, mb3=%xh", QL_NAME, 2246 ha->instance, mr.mb[1], mr.mb[2], mr.mb[3]); 2247 rval = QL_FUNCTION_FAILED; 2248 } 2249 } else if (!(ha->task_daemon_flags & FIRMWARE_LOADED)) { 2250 break; 2251 } 2252 2253 if (rval == QL_SUCCESS) { 2254 /* Wrap Incoming Mailboxes Test. */ 2255 mr.mb[1] = 0xAAAA; 2256 mr.mb[2] = 0x5555; 2257 mr.mb[3] = 0xAA55; 2258 mr.mb[4] = 0x55AA; 2259 mr.mb[5] = 0xA5A5; 2260 mr.mb[6] = 0x5A5A; 2261 mr.mb[7] = 0x2525; 2262 rval = ql_mbx_wrap_test(ha, &mr); 2263 if (rval == QL_SUCCESS) { 2264 if (mr.mb[1] != 0xAAAA || 2265 mr.mb[2] != 0x5555 || 2266 mr.mb[3] != 0xAA55 || 2267 mr.mb[4] != 0x55AA || 2268 mr.mb[5] != 0xA5A5 || 2269 mr.mb[6] != 0x5A5A || 2270 mr.mb[7] != 0x2525) { 2271 rval = QL_FUNCTION_FAILED; 2272 (void) ql_flash_errlog(ha, 2273 FLASH_ERRLOG_ISP_ERR, 0, 2274 RD16_IO_REG(ha, hccr), 2275 RD16_IO_REG(ha, istatus)); 2276 } 2277 } else { 2278 cmn_err(CE_WARN, "%s(%d) - reg test failed=" 2279 "%xh!", QL_NAME, ha->instance, rval); 2280 } 2281 } 2282 } while ((retries-- != 0) && (rval != QL_SUCCESS)); 2283 2284 if (rval != QL_SUCCESS) { 2285 EL(ha, "failed, rval = %xh\n", rval); 2286 } else { 2287 /*EMPTY*/ 2288 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2289 } 2290 return (rval); 2291 } 2292 2293 /* 2294 * ql_load_isp_firmware 2295 * Load and start RISC firmware. 2296 * Uses request ring for DMA buffer. 2297 * 2298 * Input: 2299 * ha = adapter state pointer. 2300 * 2301 * Returns: 2302 * ql local function return status code. 2303 * 2304 * Context: 2305 * Kernel context. 2306 */ 2307 int 2308 ql_load_isp_firmware(ql_adapter_state_t *vha) 2309 { 2310 caddr_t risc_code_address; 2311 uint32_t risc_address, risc_code_size; 2312 int rval; 2313 uint32_t word_count, cnt; 2314 size_t byte_count; 2315 ql_adapter_state_t *ha = vha->pha; 2316 2317 if (CFG_IST(ha, CFG_CTRL_8021)) { 2318 rval = ql_8021_load_risc(ha); 2319 } else { 2320 if (CFG_IST(ha, CFG_CTRL_81XX)) { 2321 ql_mps_reset(ha); 2322 } 2323 2324 if (CFG_IST(ha, CFG_LOAD_FLASH_FW)) { 2325 return (ql_load_flash_fw(ha)); 2326 } 2327 2328 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2329 2330 /* Load firmware segments */ 2331 for (cnt = 0; cnt < MAX_RISC_CODE_SEGMENTS && 2332 ha->risc_fw[cnt].code != NULL; cnt++) { 2333 2334 risc_code_address = ha->risc_fw[cnt].code; 2335 risc_address = ha->risc_fw[cnt].addr; 2336 risc_code_size = ha->risc_fw[cnt].length; 2337 2338 while (risc_code_size) { 2339 if (CFG_IST(ha, CFG_CTRL_242581)) { 2340 word_count = ha->fw_transfer_size >> 2; 2341 if (word_count > risc_code_size) { 2342 word_count = risc_code_size; 2343 } 2344 byte_count = word_count << 2; 2345 2346 ddi_rep_put32(ha->hba_buf.acc_handle, 2347 (uint32_t *)risc_code_address, 2348 (uint32_t *)ha->request_ring_bp, 2349 word_count, DDI_DEV_AUTOINCR); 2350 } else { 2351 word_count = ha->fw_transfer_size >> 1; 2352 if (word_count > risc_code_size) { 2353 word_count = risc_code_size; 2354 } 2355 byte_count = word_count << 1; 2356 2357 ddi_rep_put16(ha->hba_buf.acc_handle, 2358 (uint16_t *)risc_code_address, 2359 (uint16_t *)ha->request_ring_bp, 2360 word_count, DDI_DEV_AUTOINCR); 2361 } 2362 2363 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 2364 REQUEST_Q_BUFFER_OFFSET, byte_count, 2365 DDI_DMA_SYNC_FORDEV); 2366 2367 rval = ql_wrt_risc_ram(ha, risc_address, 2368 ha->request_dvma, word_count); 2369 if (rval != QL_SUCCESS) { 2370 EL(ha, "failed, load=%xh\n", rval); 2371 cnt = MAX_RISC_CODE_SEGMENTS; 2372 break; 2373 } 2374 2375 risc_address += word_count; 2376 risc_code_size -= word_count; 2377 risc_code_address += byte_count; 2378 } 2379 } 2380 } 2381 2382 /* Start firmware. */ 2383 if (rval == QL_SUCCESS) { 2384 rval = ql_start_firmware(ha); 2385 } 2386 2387 if (rval != QL_SUCCESS) { 2388 EL(ha, "failed, rval = %xh\n", rval); 2389 } else { 2390 /*EMPTY*/ 2391 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2392 } 2393 2394 return (rval); 2395 } 2396 2397 /* 2398 * ql_load_flash_fw 2399 * Gets ISP24xx firmware from flash and loads ISP. 2400 * 2401 * Input: 2402 * ha: adapter state pointer. 2403 * 2404 * Returns: 2405 * ql local function return status code. 2406 */ 2407 static int 2408 ql_load_flash_fw(ql_adapter_state_t *ha) 2409 { 2410 int rval; 2411 uint8_t seg_cnt; 2412 uint32_t risc_address, xfer_size, count, *bp, faddr; 2413 uint32_t risc_code_size = 0; 2414 2415 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2416 2417 faddr = ha->flash_data_addr | ha->flash_fw_addr; 2418 2419 for (seg_cnt = 0; seg_cnt < 2; seg_cnt++) { 2420 xfer_size = ha->fw_transfer_size >> 2; 2421 do { 2422 GLOBAL_HW_LOCK(); 2423 2424 /* Read data from flash. */ 2425 bp = (uint32_t *)ha->request_ring_bp; 2426 for (count = 0; count < xfer_size; count++) { 2427 rval = ql_24xx_read_flash(ha, faddr++, bp); 2428 if (rval != QL_SUCCESS) { 2429 break; 2430 } 2431 ql_chg_endian((uint8_t *)bp++, 4); 2432 } 2433 2434 GLOBAL_HW_UNLOCK(); 2435 2436 if (rval != QL_SUCCESS) { 2437 EL(ha, "24xx_read_flash failed=%xh\n", rval); 2438 break; 2439 } 2440 2441 if (risc_code_size == 0) { 2442 bp = (uint32_t *)ha->request_ring_bp; 2443 risc_address = bp[2]; 2444 risc_code_size = bp[3]; 2445 ha->risc_fw[seg_cnt].addr = risc_address; 2446 } 2447 2448 if (risc_code_size < xfer_size) { 2449 faddr -= xfer_size - risc_code_size; 2450 xfer_size = risc_code_size; 2451 } 2452 2453 (void) ddi_dma_sync(ha->hba_buf.dma_handle, 2454 REQUEST_Q_BUFFER_OFFSET, xfer_size << 2, 2455 DDI_DMA_SYNC_FORDEV); 2456 2457 rval = ql_wrt_risc_ram(ha, risc_address, 2458 ha->request_dvma, xfer_size); 2459 if (rval != QL_SUCCESS) { 2460 EL(ha, "ql_wrt_risc_ram failed=%xh\n", rval); 2461 break; 2462 } 2463 2464 risc_address += xfer_size; 2465 risc_code_size -= xfer_size; 2466 } while (risc_code_size); 2467 2468 if (rval != QL_SUCCESS) { 2469 break; 2470 } 2471 } 2472 2473 /* Start firmware. */ 2474 if (rval == QL_SUCCESS) { 2475 rval = ql_start_firmware(ha); 2476 } 2477 2478 if (rval != QL_SUCCESS) { 2479 EL(ha, "failed, rval = %xh\n", rval); 2480 } else { 2481 /*EMPTY*/ 2482 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2483 } 2484 return (rval); 2485 } 2486 2487 /* 2488 * ql_start_firmware 2489 * Starts RISC code. 2490 * 2491 * Input: 2492 * ha = adapter state pointer. 2493 * 2494 * Returns: 2495 * ql local function return status code. 2496 * 2497 * Context: 2498 * Kernel context. 2499 */ 2500 int 2501 ql_start_firmware(ql_adapter_state_t *vha) 2502 { 2503 int rval, rval2; 2504 uint32_t data; 2505 ql_mbx_data_t mr; 2506 ql_adapter_state_t *ha = vha->pha; 2507 2508 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2509 2510 if (CFG_IST(ha, CFG_CTRL_8021)) { 2511 /* Save firmware version. */ 2512 rval = ql_get_fw_version(ha, &mr, MAILBOX_TOV); 2513 ha->fw_major_version = mr.mb[1]; 2514 ha->fw_minor_version = mr.mb[2]; 2515 ha->fw_subminor_version = mr.mb[3]; 2516 ha->fw_attributes = mr.mb[6]; 2517 } else if ((rval = ql_verify_checksum(ha)) == QL_SUCCESS) { 2518 /* Verify checksum of loaded RISC code. */ 2519 /* Start firmware execution. */ 2520 (void) ql_execute_fw(ha); 2521 2522 /* Save firmware version. */ 2523 (void) ql_get_fw_version(ha, &mr, MAILBOX_TOV); 2524 ha->fw_major_version = mr.mb[1]; 2525 ha->fw_minor_version = mr.mb[2]; 2526 ha->fw_subminor_version = mr.mb[3]; 2527 ha->fw_ext_memory_size = ((SHORT_TO_LONG(mr.mb[4], mr.mb[5]) - 2528 0x100000) + 1) * 4; 2529 ha->fw_attributes = mr.mb[6]; 2530 2531 if (CFG_IST(ha, CFG_CTRL_81XX)) { 2532 ha->phy_fw_major_version = LSB(mr.mb[8]); 2533 ha->phy_fw_minor_version = MSB(mr.mb[9]); 2534 ha->phy_fw_subminor_version = LSB(mr.mb[9]); 2535 ha->mpi_fw_major_version = LSB(mr.mb[10]); 2536 ha->mpi_fw_minor_version = MSB(mr.mb[11]); 2537 ha->mpi_fw_subminor_version = LSB(mr.mb[11]); 2538 ha->mpi_capability_list = SHORT_TO_LONG(mr.mb[13], 2539 mr.mb[12]); 2540 if ((rval2 = ql_flash_access(ha, FAC_GET_SECTOR_SIZE, 2541 0, 0, &data)) == QL_SUCCESS) { 2542 ha->xioctl->fdesc.block_size = data << 2; 2543 QL_PRINT_10(CE_CONT, "(%d): fdesc.block_size=" 2544 "%xh\n", ha->instance, 2545 ha->xioctl->fdesc.block_size); 2546 } else { 2547 EL(ha, "flash_access status=%xh\n", rval2); 2548 } 2549 } 2550 2551 /* Set Serdes Transmit Parameters. */ 2552 if (CFG_IST(ha, CFG_CTRL_2422) && ha->serdes_param[0] & BIT_0) { 2553 mr.mb[1] = ha->serdes_param[0]; 2554 mr.mb[2] = ha->serdes_param[1]; 2555 mr.mb[3] = ha->serdes_param[2]; 2556 mr.mb[4] = ha->serdes_param[3]; 2557 (void) ql_serdes_param(ha, &mr); 2558 } 2559 } 2560 /* ETS workaround */ 2561 if (CFG_IST(ha, CFG_CTRL_81XX) && ql_enable_ets) { 2562 if (ql_get_firmware_option(ha, &mr) == QL_SUCCESS) { 2563 mr.mb[2] = (uint16_t) 2564 (mr.mb[2] | FO2_FCOE_512_MAX_MEM_WR_BURST); 2565 (void) ql_set_firmware_option(ha, &mr); 2566 } 2567 } 2568 if (rval != QL_SUCCESS) { 2569 ha->task_daemon_flags &= ~FIRMWARE_LOADED; 2570 EL(ha, "failed, rval = %xh\n", rval); 2571 } else { 2572 ha->task_daemon_flags |= FIRMWARE_LOADED; 2573 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2574 } 2575 return (rval); 2576 } 2577 2578 /* 2579 * ql_set_cache_line 2580 * Sets PCI cache line parameter. 2581 * 2582 * Input: 2583 * ha = adapter state pointer. 2584 * 2585 * Returns: 2586 * ql local function return status code. 2587 * 2588 * Context: 2589 * Kernel context. 2590 */ 2591 int 2592 ql_set_cache_line(ql_adapter_state_t *ha) 2593 { 2594 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2595 2596 /* Set the cache line. */ 2597 if (CFG_IST(ha->pha, CFG_SET_CACHE_LINE_SIZE_1)) { 2598 /* Set cache line register. */ 2599 ql_pci_config_put8(ha->pha, PCI_CONF_CACHE_LINESZ, 1); 2600 } 2601 2602 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2603 2604 return (QL_SUCCESS); 2605 } 2606 2607 /* 2608 * ql_init_rings 2609 * Initializes firmware and ring pointers. 2610 * 2611 * Beginning of response ring has initialization control block 2612 * already built by nvram config routine. 2613 * 2614 * Input: 2615 * ha = adapter state pointer. 2616 * ha->hba_buf = request and response rings 2617 * ha->init_ctrl_blk = initialization control block 2618 * 2619 * Returns: 2620 * ql local function return status code. 2621 * 2622 * Context: 2623 * Kernel context. 2624 */ 2625 int 2626 ql_init_rings(ql_adapter_state_t *vha2) 2627 { 2628 int rval, rval2; 2629 uint16_t index; 2630 ql_mbx_data_t mr; 2631 ql_adapter_state_t *ha = vha2->pha; 2632 2633 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2634 2635 /* Clear outstanding commands array. */ 2636 for (index = 0; index < MAX_OUTSTANDING_COMMANDS; index++) { 2637 ha->outstanding_cmds[index] = NULL; 2638 } 2639 ha->osc_index = 1; 2640 2641 ha->pending_cmds.first = NULL; 2642 ha->pending_cmds.last = NULL; 2643 2644 /* Initialize firmware. */ 2645 ha->request_ring_ptr = ha->request_ring_bp; 2646 ha->req_ring_index = 0; 2647 ha->req_q_cnt = REQUEST_ENTRY_CNT - 1; 2648 ha->response_ring_ptr = ha->response_ring_bp; 2649 ha->rsp_ring_index = 0; 2650 2651 if (ha->flags & VP_ENABLED) { 2652 ql_adapter_state_t *vha; 2653 uint16_t cnt; 2654 uint32_t max_vports; 2655 ql_init_24xx_cb_t *icb = &ha->init_ctrl_blk.cb24; 2656 2657 max_vports = (CFG_IST(ha, CFG_CTRL_2422) ? 2658 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS); 2659 bzero(icb->vp_count, 2660 ((uintptr_t)icb + sizeof (ql_init_24xx_cb_t)) - 2661 (uintptr_t)icb->vp_count); 2662 icb->vp_count[0] = (uint8_t)max_vports; 2663 2664 /* Allow connection option 2. */ 2665 icb->global_vp_option[0] = BIT_1; 2666 2667 for (cnt = 0, vha = ha->vp_next; cnt < max_vports && 2668 vha != NULL; vha = vha->vp_next, cnt++) { 2669 2670 index = (uint8_t)(vha->vp_index - 1); 2671 bcopy(vha->loginparams.node_ww_name.raw_wwn, 2672 icb->vpc[index].node_name, 8); 2673 bcopy(vha->loginparams.nport_ww_name.raw_wwn, 2674 icb->vpc[index].port_name, 8); 2675 2676 icb->vpc[index].options = VPO_TARGET_MODE_DISABLED | 2677 VPO_INITIATOR_MODE_ENABLED; 2678 if (vha->flags & VP_ENABLED) { 2679 icb->vpc[index].options = (uint8_t) 2680 (icb->vpc[index].options | VPO_ENABLED); 2681 } 2682 } 2683 } 2684 2685 for (index = 0; index < 2; index++) { 2686 rval = ql_init_firmware(ha); 2687 if (rval == QL_COMMAND_ERROR) { 2688 EL(ha, "stopping firmware\n"); 2689 (void) ql_stop_firmware(ha); 2690 } else { 2691 break; 2692 } 2693 } 2694 2695 if (rval == QL_SUCCESS && (CFG_IST(ha, CFG_CTRL_24258081)) == 0) { 2696 /* Tell firmware to enable MBA_PORT_BYPASS_CHANGED event */ 2697 rval = ql_get_firmware_option(ha, &mr); 2698 if (rval == QL_SUCCESS) { 2699 mr.mb[1] = (uint16_t)(mr.mb[1] | BIT_9); 2700 mr.mb[2] = 0; 2701 mr.mb[3] = BIT_10; 2702 rval = ql_set_firmware_option(ha, &mr); 2703 } 2704 } 2705 2706 if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWFCETRACE))) { 2707 /* Firmware Fibre Channel Event Trace Buffer */ 2708 if ((rval2 = ql_get_dma_mem(ha, &ha->fwfcetracebuf, FWFCESIZE, 2709 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) { 2710 EL(ha, "fcetrace buffer alloc failed: %xh\n", rval2); 2711 } else { 2712 if ((rval2 = ql_fw_etrace(ha, &ha->fwfcetracebuf, 2713 FTO_FCE_TRACE_ENABLE)) != QL_SUCCESS) { 2714 EL(ha, "fcetrace enable failed: %xh\n", rval2); 2715 ql_free_phys(ha, &ha->fwfcetracebuf); 2716 } 2717 } 2718 } 2719 2720 if ((rval == QL_SUCCESS) && (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE))) { 2721 /* Firmware Extended Trace Buffer */ 2722 if ((rval2 = ql_get_dma_mem(ha, &ha->fwexttracebuf, FWEXTSIZE, 2723 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN)) != QL_SUCCESS) { 2724 EL(ha, "exttrace buffer alloc failed: %xh\n", rval2); 2725 } else { 2726 if ((rval2 = ql_fw_etrace(ha, &ha->fwexttracebuf, 2727 FTO_EXT_TRACE_ENABLE)) != QL_SUCCESS) { 2728 EL(ha, "exttrace enable failed: %xh\n", rval2); 2729 ql_free_phys(ha, &ha->fwexttracebuf); 2730 } 2731 } 2732 } 2733 2734 if (rval == QL_SUCCESS && CFG_IST(ha, CFG_CTRL_MENLO)) { 2735 ql_mbx_iocb_t *pkt; 2736 clock_t timer; 2737 2738 /* Wait for firmware login of menlo. */ 2739 for (timer = 3000; timer; timer--) { 2740 if (ha->flags & MENLO_LOGIN_OPERATIONAL) { 2741 break; 2742 } 2743 2744 if (!(ha->flags & INTERRUPTS_ENABLED) || 2745 ddi_in_panic()) { 2746 if (INTERRUPT_PENDING(ha)) { 2747 (void) ql_isr((caddr_t)ha); 2748 INTR_LOCK(ha); 2749 ha->intr_claimed = B_TRUE; 2750 INTR_UNLOCK(ha); 2751 } 2752 } 2753 2754 /* Delay for 1 tick (10 milliseconds). */ 2755 ql_delay(ha, 10000); 2756 } 2757 2758 if (timer == 0) { 2759 rval = QL_FUNCTION_TIMEOUT; 2760 } else { 2761 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP); 2762 if (pkt == NULL) { 2763 EL(ha, "failed, kmem_zalloc\n"); 2764 rval = QL_MEMORY_ALLOC_FAILED; 2765 } else { 2766 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE; 2767 pkt->mvfy.entry_count = 1; 2768 pkt->mvfy.options_status = 2769 LE_16(VMF_DO_NOT_UPDATE_FW); 2770 2771 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, 2772 sizeof (ql_mbx_iocb_t)); 2773 LITTLE_ENDIAN_16(&pkt->mvfy.options_status); 2774 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code); 2775 2776 if (rval != QL_SUCCESS || 2777 (pkt->mvfy.entry_status & 0x3c) != 0 || 2778 pkt->mvfy.options_status != CS_COMPLETE) { 2779 EL(ha, "failed, status=%xh, es=%xh, " 2780 "cs=%xh, fc=%xh\n", rval, 2781 pkt->mvfy.entry_status & 0x3c, 2782 pkt->mvfy.options_status, 2783 pkt->mvfy.failure_code); 2784 if (rval == QL_SUCCESS) { 2785 rval = QL_FUNCTION_FAILED; 2786 } 2787 } 2788 2789 kmem_free(pkt, sizeof (ql_mbx_iocb_t)); 2790 } 2791 } 2792 } 2793 2794 if (rval != QL_SUCCESS) { 2795 TASK_DAEMON_LOCK(ha); 2796 ha->task_daemon_flags &= ~FIRMWARE_UP; 2797 TASK_DAEMON_UNLOCK(ha); 2798 EL(ha, "failed, rval = %xh\n", rval); 2799 } else { 2800 TASK_DAEMON_LOCK(ha); 2801 ha->task_daemon_flags |= FIRMWARE_UP; 2802 TASK_DAEMON_UNLOCK(ha); 2803 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2804 } 2805 return (rval); 2806 } 2807 2808 /* 2809 * ql_fw_ready 2810 * Waits for firmware ready. If firmware becomes ready 2811 * device queues and RISC code are synchronized. 2812 * 2813 * Input: 2814 * ha = adapter state pointer. 2815 * secs = max wait time, in seconds (0-255). 2816 * 2817 * Returns: 2818 * ql local function return status code. 2819 * 2820 * Context: 2821 * Kernel context. 2822 */ 2823 int 2824 ql_fw_ready(ql_adapter_state_t *ha, uint8_t secs) 2825 { 2826 ql_mbx_data_t mr; 2827 clock_t timer; 2828 clock_t dly = 250000; 2829 clock_t sec_delay = MICROSEC / dly; 2830 clock_t wait = secs * sec_delay; 2831 int rval = QL_FUNCTION_FAILED; 2832 uint16_t state = 0xffff; 2833 2834 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2835 2836 timer = ha->r_a_tov < secs ? secs : ha->r_a_tov; 2837 timer = (timer + 2) * sec_delay; 2838 2839 /* Wait for ISP to finish LIP */ 2840 while (timer != 0 && wait != 0 && 2841 !(ha->task_daemon_flags & ISP_ABORT_NEEDED)) { 2842 2843 rval = ql_get_firmware_state(ha, &mr); 2844 if (rval == QL_SUCCESS) { 2845 if (ha->task_daemon_flags & (ISP_ABORT_NEEDED | 2846 LOOP_DOWN)) { 2847 wait--; 2848 } else if (mr.mb[1] != FSTATE_READY) { 2849 if (mr.mb[1] != FSTATE_WAIT_LOGIN) { 2850 wait--; 2851 } 2852 rval = QL_FUNCTION_FAILED; 2853 } else { 2854 /* Firmware is ready. Get 2 * R_A_TOV. */ 2855 rval = ql_get_timeout_parameters(ha, 2856 &ha->r_a_tov); 2857 if (rval != QL_SUCCESS) { 2858 EL(ha, "failed, get_timeout_param" 2859 "=%xh\n", rval); 2860 } 2861 2862 /* Configure loop. */ 2863 rval = ql_configure_loop(ha); 2864 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL); 2865 2866 if (ha->task_daemon_flags & 2867 LOOP_RESYNC_NEEDED) { 2868 wait--; 2869 EL(ha, "loop trans; tdf=%xh\n", 2870 ha->task_daemon_flags); 2871 } else { 2872 break; 2873 } 2874 } 2875 } else { 2876 wait--; 2877 } 2878 2879 if (state != mr.mb[1]) { 2880 EL(ha, "mailbox_reg[1] = %xh\n", mr.mb[1]); 2881 state = mr.mb[1]; 2882 } 2883 2884 /* Delay for a tick if waiting. */ 2885 if (timer-- != 0 && wait != 0) { 2886 if (timer % 4 == 0) { 2887 delay(drv_usectohz(dly)); 2888 } else { 2889 drv_usecwait(dly); 2890 } 2891 } else { 2892 rval = QL_FUNCTION_TIMEOUT; 2893 } 2894 } 2895 2896 if (rval != QL_SUCCESS) { 2897 EL(ha, "failed, rval = %xh\n", rval); 2898 } else { 2899 /*EMPTY*/ 2900 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2901 } 2902 return (rval); 2903 } 2904 2905 /* 2906 * ql_configure_loop 2907 * Setup configurations based on loop. 2908 * 2909 * Input: 2910 * ha = adapter state pointer. 2911 * 2912 * Returns: 2913 * ql local function return status code. 2914 * 2915 * Context: 2916 * Kernel context. 2917 */ 2918 static int 2919 ql_configure_loop(ql_adapter_state_t *ha) 2920 { 2921 int rval; 2922 ql_adapter_state_t *vha; 2923 2924 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 2925 2926 for (vha = ha; vha != NULL; vha = vha->vp_next) { 2927 TASK_DAEMON_LOCK(ha); 2928 if (!(vha->task_daemon_flags & LOOP_RESYNC_NEEDED) && 2929 vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) { 2930 TASK_DAEMON_UNLOCK(ha); 2931 continue; 2932 } 2933 vha->task_daemon_flags &= ~LOOP_RESYNC_NEEDED; 2934 TASK_DAEMON_UNLOCK(ha); 2935 2936 rval = ql_configure_hba(vha); 2937 if (rval == QL_SUCCESS && !(ha->task_daemon_flags & 2938 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) { 2939 rval = ql_configure_device_d_id(vha); 2940 if (rval == QL_SUCCESS && !(ha->task_daemon_flags & 2941 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) { 2942 (void) ql_configure_fabric(vha); 2943 } 2944 } 2945 } 2946 2947 if (rval != QL_SUCCESS) { 2948 EL(ha, "failed, rval = %xh\n", rval); 2949 } else { 2950 /*EMPTY*/ 2951 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 2952 } 2953 return (rval); 2954 } 2955 2956 /* 2957 * ql_configure_n_port_info 2958 * Setup configurations based on N port 2 N port topology. 2959 * 2960 * Input: 2961 * ha = adapter state pointer. 2962 * 2963 * Returns: 2964 * ql local function return status code. 2965 * 2966 * Context: 2967 * Kernel context. 2968 */ 2969 static void 2970 ql_configure_n_port_info(ql_adapter_state_t *ha) 2971 { 2972 ql_tgt_t tmp_tq; 2973 ql_tgt_t *tq; 2974 uint8_t *cb_port_name; 2975 ql_link_t *link; 2976 int index, rval; 2977 2978 tq = &tmp_tq; 2979 2980 /* Free existing target queues. */ 2981 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 2982 link = ha->dev[index].first; 2983 while (link != NULL) { 2984 tq = link->base_address; 2985 link = link->next; 2986 ql_remove_link(&ha->dev[index], &tq->device); 2987 ql_dev_free(ha, tq); 2988 } 2989 } 2990 2991 /* 2992 * If the N_Port's WWPN is larger than our's then it has the 2993 * N_Port login initiative. It will have determined that and 2994 * logged in with the firmware. This results in a device 2995 * database entry. In this situation we will later send up a PLOGI 2996 * by proxy for the N_Port to get things going. 2997 * 2998 * If the N_Ports WWPN is smaller then the firmware has the 2999 * N_Port login initiative and does a FLOGI in order to obtain the 3000 * N_Ports WWNN and WWPN. These names are required later 3001 * during Leadvilles FLOGI. No PLOGI is done by the firmware in 3002 * anticipation of a PLOGI via the driver from the upper layers. 3003 * Upon reciept of said PLOGI the driver issues an ELS PLOGI 3004 * pass-through command and the firmware assumes the s_id 3005 * and the N_Port assumes the d_id and Bob's your uncle. 3006 */ 3007 3008 /* 3009 * In N port 2 N port topology the FW provides a port database entry at 3010 * loop_id 0x7fe which allows us to acquire the Ports WWPN. 3011 */ 3012 tq->d_id.b.al_pa = 0; 3013 tq->d_id.b.area = 0; 3014 tq->d_id.b.domain = 0; 3015 tq->loop_id = 0x7fe; 3016 3017 rval = ql_get_port_database(ha, tq, PDF_NONE); 3018 if (rval == QL_SUCCESS || rval == QL_NOT_LOGGED_IN) { 3019 ql_dev_id_list_t *list; 3020 uint32_t list_size; 3021 ql_mbx_data_t mr; 3022 port_id_t d_id = {0, 0, 0, 0}; 3023 uint16_t loop_id = 0; 3024 3025 cb_port_name = (uint8_t *)(CFG_IST(ha, CFG_CTRL_24258081) ? 3026 &ha->init_ctrl_blk.cb24.port_name[0] : 3027 &ha->init_ctrl_blk.cb.port_name[0]); 3028 3029 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0], 3030 (la_wwn_t *)cb_port_name) == 1)) { 3031 EL(ha, "target port has N_Port login initiative\n"); 3032 } else { 3033 EL(ha, "host port has N_Port login initiative\n"); 3034 } 3035 3036 /* Capture the N Ports WWPN */ 3037 3038 bcopy((void *)&tq->port_name[0], 3039 (void *)&ha->n_port->port_name[0], 8); 3040 bcopy((void *)&tq->node_name[0], 3041 (void *)&ha->n_port->node_name[0], 8); 3042 3043 /* Resolve an n_port_handle */ 3044 ha->n_port->n_port_handle = 0x7fe; 3045 3046 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES; 3047 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP); 3048 3049 if (list != NULL && 3050 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) == 3051 QL_SUCCESS) { 3052 if (mr.mb[1]) { 3053 EL(ha, "id list entries = %d\n", mr.mb[1]); 3054 for (index = 0; index < mr.mb[1]; index++) { 3055 ql_dev_list(ha, list, index, 3056 &d_id, &loop_id); 3057 ha->n_port->n_port_handle = loop_id; 3058 } 3059 } else { 3060 for (index = 0; index <= LAST_LOCAL_LOOP_ID; 3061 index++) { 3062 /* resuse tq */ 3063 tq->loop_id = (uint16_t)index; 3064 rval = ql_get_port_database(ha, tq, 3065 PDF_NONE); 3066 if (rval == QL_NOT_LOGGED_IN) { 3067 if (tq->master_state == 3068 PD_STATE_PLOGI_PENDING) { 3069 ha->n_port-> 3070 n_port_handle = 3071 tq->loop_id; 3072 break; 3073 } 3074 } else { 3075 ha->n_port->n_port_handle = 3076 tq->loop_id; 3077 break; 3078 } 3079 } 3080 } 3081 } else { 3082 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh", 3083 QL_NAME, ha->instance, d_id.b24); 3084 } 3085 if (list != NULL) { 3086 kmem_free(list, list_size); 3087 } 3088 } 3089 } 3090 3091 3092 /* 3093 * ql_configure_hba 3094 * Setup adapter context. 3095 * 3096 * Input: 3097 * ha = adapter state pointer. 3098 * 3099 * Returns: 3100 * ql local function return status code. 3101 * 3102 * Context: 3103 * Kernel context. 3104 */ 3105 static int 3106 ql_configure_hba(ql_adapter_state_t *ha) 3107 { 3108 uint8_t *bp; 3109 int rval; 3110 uint32_t state; 3111 ql_mbx_data_t mr; 3112 3113 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3114 3115 /* Get host addresses. */ 3116 rval = ql_get_adapter_id(ha, &mr); 3117 if (rval == QL_SUCCESS) { 3118 ha->topology = (uint8_t)(ha->topology & 3119 ~(QL_N_PORT | QL_NL_PORT | QL_F_PORT | QL_FL_PORT)); 3120 3121 /* Save Host d_id, alpa, loop ID. */ 3122 ha->loop_id = mr.mb[1]; 3123 ha->d_id.b.al_pa = LSB(mr.mb[2]); 3124 ha->d_id.b.area = MSB(mr.mb[2]); 3125 ha->d_id.b.domain = LSB(mr.mb[3]); 3126 3127 ADAPTER_STATE_LOCK(ha); 3128 ha->flags &= ~FDISC_ENABLED; 3129 3130 /* Get loop topology. */ 3131 switch (mr.mb[6]) { 3132 case CNX_LOOP_NO_FABRIC: 3133 ha->topology = (uint8_t)(ha->topology | QL_NL_PORT); 3134 break; 3135 case CNX_FLPORT_IN_LOOP: 3136 ha->topology = (uint8_t)(ha->topology | QL_FL_PORT); 3137 break; 3138 case CNX_NPORT_2_NPORT_P2P: 3139 case CNX_NPORT_2_NPORT_NO_TGT_RSP: 3140 ha->flags |= POINT_TO_POINT; 3141 ha->topology = (uint8_t)(ha->topology | QL_N_PORT); 3142 if (CFG_IST(ha, CFG_CTRL_2425)) { 3143 ql_configure_n_port_info(ha); 3144 } 3145 break; 3146 case CNX_FLPORT_P2P: 3147 ha->flags |= POINT_TO_POINT; 3148 ha->topology = (uint8_t)(ha->topology | QL_F_PORT); 3149 3150 /* Get supported option. */ 3151 if (CFG_IST(ha, CFG_CTRL_24258081) && 3152 mr.mb[7] & GID_FP_NPIV_SUPPORT) { 3153 ha->flags |= FDISC_ENABLED; 3154 } 3155 /* Get VLAN ID, mac address */ 3156 if (CFG_IST(ha, CFG_CTRL_8081)) { 3157 ha->fabric_params = mr.mb[7]; 3158 ha->fcoe_vlan_id = (uint16_t)(mr.mb[9] & 0xfff); 3159 ha->fcoe_fcf_idx = mr.mb[10]; 3160 ha->fcoe_vnport_mac[0] = MSB(mr.mb[11]); 3161 ha->fcoe_vnport_mac[1] = LSB(mr.mb[11]); 3162 ha->fcoe_vnport_mac[2] = MSB(mr.mb[12]); 3163 ha->fcoe_vnport_mac[3] = LSB(mr.mb[12]); 3164 ha->fcoe_vnport_mac[4] = MSB(mr.mb[13]); 3165 ha->fcoe_vnport_mac[5] = LSB(mr.mb[13]); 3166 } 3167 break; 3168 default: 3169 QL_PRINT_2(CE_CONT, "(%d,%d): UNKNOWN topology=%xh, " 3170 "d_id=%xh\n", ha->instance, ha->vp_index, mr.mb[6], 3171 ha->d_id.b24); 3172 rval = QL_FUNCTION_FAILED; 3173 break; 3174 } 3175 ADAPTER_STATE_UNLOCK(ha); 3176 3177 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 | 3178 CFG_CTRL_24258081))) { 3179 mr.mb[1] = 0; 3180 mr.mb[2] = 0; 3181 rval = ql_data_rate(ha, &mr); 3182 if (rval != QL_SUCCESS) { 3183 EL(ha, "data_rate status=%xh\n", rval); 3184 state = FC_STATE_FULL_SPEED; 3185 } else { 3186 ha->iidma_rate = mr.mb[1]; 3187 if (mr.mb[1] == IIDMA_RATE_1GB) { 3188 state = FC_STATE_1GBIT_SPEED; 3189 } else if (mr.mb[1] == IIDMA_RATE_2GB) { 3190 state = FC_STATE_2GBIT_SPEED; 3191 } else if (mr.mb[1] == IIDMA_RATE_4GB) { 3192 state = FC_STATE_4GBIT_SPEED; 3193 } else if (mr.mb[1] == IIDMA_RATE_8GB) { 3194 state = FC_STATE_8GBIT_SPEED; 3195 } else if (mr.mb[1] == IIDMA_RATE_10GB) { 3196 state = FC_STATE_10GBIT_SPEED; 3197 } else { 3198 state = 0; 3199 } 3200 } 3201 } else { 3202 ha->iidma_rate = IIDMA_RATE_1GB; 3203 state = FC_STATE_FULL_SPEED; 3204 } 3205 ha->state = FC_PORT_STATE_MASK(ha->state) | state; 3206 } else if (rval == MBS_COMMAND_ERROR) { 3207 EL(ha, "mbox cmd error, rval = %xh, mr.mb[1]=%hx\n", 3208 rval, mr.mb[1]); 3209 } 3210 3211 if (rval != QL_SUCCESS) { 3212 EL(ha, "failed, rval = %xh\n", rval); 3213 } else { 3214 bp = ha->loginparams.nport_ww_name.raw_wwn; 3215 EL(ha, "topology=%xh, d_id=%xh, " 3216 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", 3217 ha->topology, ha->d_id.b24, bp[0], bp[1], 3218 bp[2], bp[3], bp[4], bp[5], bp[6], bp[7]); 3219 } 3220 return (rval); 3221 } 3222 3223 /* 3224 * ql_configure_device_d_id 3225 * Updates device loop ID. 3226 * Also adds to device queue any new devices found on private loop. 3227 * 3228 * Input: 3229 * ha = adapter state pointer. 3230 * 3231 * Returns: 3232 * ql local function return status code. 3233 * 3234 * Context: 3235 * Kernel context. 3236 */ 3237 static int 3238 ql_configure_device_d_id(ql_adapter_state_t *ha) 3239 { 3240 port_id_t d_id; 3241 ql_link_t *link; 3242 int rval; 3243 int loop; 3244 ql_tgt_t *tq; 3245 ql_dev_id_list_t *list; 3246 uint32_t list_size; 3247 uint16_t index, loop_id; 3248 ql_mbx_data_t mr; 3249 uint8_t retries = MAX_DEVICE_LOST_RETRY; 3250 3251 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3252 3253 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES; 3254 list = kmem_zalloc(list_size, KM_SLEEP); 3255 if (list == NULL) { 3256 rval = QL_MEMORY_ALLOC_FAILED; 3257 EL(ha, "failed, rval = %xh\n", rval); 3258 return (rval); 3259 } 3260 3261 do { 3262 /* 3263 * Get data from RISC code d_id list to init each device queue. 3264 */ 3265 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr); 3266 if (rval != QL_SUCCESS) { 3267 kmem_free(list, list_size); 3268 EL(ha, "failed, rval = %xh\n", rval); 3269 return (rval); 3270 } 3271 3272 /* Acquire adapter state lock. */ 3273 ADAPTER_STATE_LOCK(ha); 3274 3275 /* Mark all queues as unusable. */ 3276 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 3277 for (link = ha->dev[index].first; link != NULL; 3278 link = link->next) { 3279 tq = link->base_address; 3280 DEVICE_QUEUE_LOCK(tq); 3281 if (!(tq->flags & TQF_PLOGI_PROGRS) && 3282 !(ha->topology & QL_N_PORT)) { 3283 tq->loop_id = (uint16_t) 3284 (tq->loop_id | PORT_LOST_ID); 3285 } 3286 DEVICE_QUEUE_UNLOCK(tq); 3287 } 3288 } 3289 3290 /* If device not in queues add new queue. */ 3291 for (index = 0; index < mr.mb[1]; index++) { 3292 ql_dev_list(ha, list, index, &d_id, &loop_id); 3293 3294 if (VALID_DEVICE_ID(ha, loop_id)) { 3295 tq = ql_dev_init(ha, d_id, loop_id); 3296 if (tq != NULL) { 3297 tq->loop_id = loop_id; 3298 3299 /* Test for fabric device. */ 3300 if (d_id.b.domain != 3301 ha->d_id.b.domain || 3302 d_id.b.area != ha->d_id.b.area) { 3303 tq->flags |= TQF_FABRIC_DEVICE; 3304 } 3305 3306 ADAPTER_STATE_UNLOCK(ha); 3307 if (ql_get_port_database(ha, tq, 3308 PDF_NONE) == QL_SUCCESS) { 3309 ADAPTER_STATE_LOCK(ha); 3310 tq->loop_id = (uint16_t) 3311 (tq->loop_id & 3312 ~PORT_LOST_ID); 3313 } else { 3314 ADAPTER_STATE_LOCK(ha); 3315 } 3316 } 3317 } 3318 } 3319 3320 /* 24xx does not report switch devices in ID list. */ 3321 if ((CFG_IST(ha, CFG_CTRL_24258081)) && 3322 ha->topology & (QL_F_PORT | QL_FL_PORT)) { 3323 d_id.b24 = 0xfffffe; 3324 tq = ql_dev_init(ha, d_id, FL_PORT_24XX_HDL); 3325 if (tq != NULL) { 3326 tq->flags |= TQF_FABRIC_DEVICE; 3327 ADAPTER_STATE_UNLOCK(ha); 3328 (void) ql_get_port_database(ha, tq, PDF_NONE); 3329 ADAPTER_STATE_LOCK(ha); 3330 } 3331 d_id.b24 = 0xfffffc; 3332 tq = ql_dev_init(ha, d_id, SNS_24XX_HDL); 3333 if (tq != NULL) { 3334 tq->flags |= TQF_FABRIC_DEVICE; 3335 ADAPTER_STATE_UNLOCK(ha); 3336 if (ha->vp_index != 0) { 3337 (void) ql_login_fport(ha, tq, 3338 SNS_24XX_HDL, LFF_NONE, NULL); 3339 } 3340 (void) ql_get_port_database(ha, tq, PDF_NONE); 3341 ADAPTER_STATE_LOCK(ha); 3342 } 3343 } 3344 3345 /* If F_port exists, allocate queue for FL_Port. */ 3346 index = ql_alpa_to_index[0xfe]; 3347 d_id.b24 = 0; 3348 if (ha->dev[index].first != NULL) { 3349 tq = ql_dev_init(ha, d_id, (uint16_t) 3350 (CFG_IST(ha, CFG_CTRL_24258081) ? 3351 FL_PORT_24XX_HDL : FL_PORT_LOOP_ID)); 3352 if (tq != NULL) { 3353 tq->flags |= TQF_FABRIC_DEVICE; 3354 ADAPTER_STATE_UNLOCK(ha); 3355 (void) ql_get_port_database(ha, tq, PDF_NONE); 3356 ADAPTER_STATE_LOCK(ha); 3357 } 3358 } 3359 3360 /* Allocate queue for broadcast. */ 3361 d_id.b24 = 0xffffff; 3362 (void) ql_dev_init(ha, d_id, (uint16_t) 3363 (CFG_IST(ha, CFG_CTRL_24258081) ? BROADCAST_24XX_HDL : 3364 IP_BROADCAST_LOOP_ID)); 3365 3366 /* Check for any devices lost. */ 3367 loop = FALSE; 3368 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 3369 for (link = ha->dev[index].first; link != NULL; 3370 link = link->next) { 3371 tq = link->base_address; 3372 3373 if ((tq->loop_id & PORT_LOST_ID) && 3374 !(tq->flags & (TQF_INITIATOR_DEVICE | 3375 TQF_FABRIC_DEVICE))) { 3376 loop = TRUE; 3377 } 3378 } 3379 } 3380 3381 /* Release adapter state lock. */ 3382 ADAPTER_STATE_UNLOCK(ha); 3383 3384 /* Give devices time to recover. */ 3385 if (loop == TRUE) { 3386 drv_usecwait(1000000); 3387 } 3388 } while (retries-- && loop == TRUE && 3389 !(ha->pha->task_daemon_flags & LOOP_RESYNC_NEEDED)); 3390 3391 kmem_free(list, list_size); 3392 3393 if (rval != QL_SUCCESS) { 3394 EL(ha, "failed=%xh\n", rval); 3395 } else { 3396 /*EMPTY*/ 3397 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3398 } 3399 3400 return (rval); 3401 } 3402 3403 /* 3404 * ql_dev_list 3405 * Gets device d_id and loop ID from firmware device list. 3406 * 3407 * Input: 3408 * ha: adapter state pointer. 3409 * list device list pointer. 3410 * index: list index of device data. 3411 * d_id: pointer for d_id data. 3412 * id: pointer for loop ID. 3413 * 3414 * Context: 3415 * Kernel context. 3416 */ 3417 void 3418 ql_dev_list(ql_adapter_state_t *ha, union ql_dev_id_list *list, 3419 uint32_t index, port_id_t *d_id, uint16_t *id) 3420 { 3421 if (CFG_IST(ha, CFG_CTRL_24258081)) { 3422 struct ql_24_dev_id *list24 = (struct ql_24_dev_id *)list; 3423 3424 d_id->b.al_pa = list24[index].al_pa; 3425 d_id->b.area = list24[index].area; 3426 d_id->b.domain = list24[index].domain; 3427 *id = CHAR_TO_SHORT(list24[index].n_port_hdl_l, 3428 list24[index].n_port_hdl_h); 3429 3430 } else if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) { 3431 struct ql_ex_dev_id *list23 = (struct ql_ex_dev_id *)list; 3432 3433 d_id->b.al_pa = list23[index].al_pa; 3434 d_id->b.area = list23[index].area; 3435 d_id->b.domain = list23[index].domain; 3436 *id = CHAR_TO_SHORT(list23[index].loop_id_l, 3437 list23[index].loop_id_h); 3438 3439 } else { 3440 struct ql_dev_id *list22 = (struct ql_dev_id *)list; 3441 3442 d_id->b.al_pa = list22[index].al_pa; 3443 d_id->b.area = list22[index].area; 3444 d_id->b.domain = list22[index].domain; 3445 *id = (uint16_t)list22[index].loop_id; 3446 } 3447 } 3448 3449 /* 3450 * ql_configure_fabric 3451 * Setup fabric context. 3452 * 3453 * Input: 3454 * ha = adapter state pointer. 3455 * 3456 * Returns: 3457 * ql local function return status code. 3458 * 3459 * Context: 3460 * Kernel context. 3461 */ 3462 static int 3463 ql_configure_fabric(ql_adapter_state_t *ha) 3464 { 3465 port_id_t d_id; 3466 ql_tgt_t *tq; 3467 int rval = QL_FUNCTION_FAILED; 3468 3469 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3470 3471 ha->topology = (uint8_t)(ha->topology & ~QL_SNS_CONNECTION); 3472 3473 /* Test switch fabric controller present. */ 3474 d_id.b24 = FS_FABRIC_F_PORT; 3475 tq = ql_d_id_to_queue(ha, d_id); 3476 if (tq != NULL) { 3477 /* Get port/node names of F_Port. */ 3478 (void) ql_get_port_database(ha, tq, PDF_NONE); 3479 3480 d_id.b24 = FS_NAME_SERVER; 3481 tq = ql_d_id_to_queue(ha, d_id); 3482 if (tq != NULL) { 3483 (void) ql_get_port_database(ha, tq, PDF_NONE); 3484 ha->topology = (uint8_t) 3485 (ha->topology | QL_SNS_CONNECTION); 3486 rval = QL_SUCCESS; 3487 } 3488 } 3489 3490 if (rval != QL_SUCCESS) { 3491 EL(ha, "failed=%xh\n", rval); 3492 } else { 3493 /*EMPTY*/ 3494 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3495 } 3496 return (rval); 3497 } 3498 3499 /* 3500 * ql_reset_chip 3501 * Reset ISP chip. 3502 * 3503 * Input: 3504 * ha = adapter block pointer. 3505 * All activity on chip must be already stopped. 3506 * ADAPTER_STATE_LOCK must be released. 3507 * 3508 * Context: 3509 * Interrupt or Kernel context, no mailbox commands allowed. 3510 */ 3511 void 3512 ql_reset_chip(ql_adapter_state_t *vha) 3513 { 3514 uint32_t cnt; 3515 uint16_t cmd; 3516 ql_adapter_state_t *ha = vha->pha; 3517 3518 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance); 3519 3520 /* 3521 * accessing pci space while not powered can cause panic's 3522 * on some platforms (i.e. Sunblade 1000's) 3523 */ 3524 if (ha->power_level == PM_LEVEL_D3) { 3525 QL_PRINT_2(CE_CONT, "(%d): Low Power exit\n", ha->instance); 3526 return; 3527 } 3528 3529 /* Reset all outbound mailbox registers */ 3530 for (cnt = 0; cnt < ha->reg_off->mbox_cnt; cnt++) { 3531 WRT16_IO_REG(ha, mailbox_in[cnt], (uint16_t)0); 3532 } 3533 3534 if (CFG_IST(ha, CFG_CTRL_8021)) { 3535 ha->timeout_cnt = 0; 3536 ql_8021_reset_chip(ha); 3537 QL_PRINT_3(CE_CONT, "(%d): 8021 exit\n", ha->instance); 3538 return; 3539 } 3540 3541 /* Disable ISP interrupts. */ 3542 WRT16_IO_REG(ha, ictrl, 0); 3543 ADAPTER_STATE_LOCK(ha); 3544 ha->flags &= ~INTERRUPTS_ENABLED; 3545 ADAPTER_STATE_UNLOCK(ha); 3546 3547 if (CFG_IST(ha, CFG_CTRL_242581)) { 3548 RD32_IO_REG(ha, ictrl); 3549 ql_reset_24xx_chip(ha); 3550 QL_PRINT_3(CE_CONT, "(%d): 24xx exit\n", ha->instance); 3551 return; 3552 } 3553 3554 /* 3555 * We are going to reset the chip in case of 2300. That might cause 3556 * a PBM ERR if a DMA transaction is in progress. One way of 3557 * avoiding it is to disable Bus Master operation before we start 3558 * the reset activity. 3559 */ 3560 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM); 3561 cmd = (uint16_t)(cmd & ~PCI_COMM_ME); 3562 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd); 3563 3564 /* Pause RISC. */ 3565 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC); 3566 for (cnt = 0; cnt < 30000; cnt++) { 3567 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) { 3568 break; 3569 } 3570 drv_usecwait(MILLISEC); 3571 } 3572 3573 /* 3574 * A call to ql_isr() can still happen through 3575 * ql_mailbox_command(). So Mark that we are/(will-be) 3576 * running from rom code now. 3577 */ 3578 TASK_DAEMON_LOCK(ha); 3579 ha->task_daemon_flags &= ~(FIRMWARE_UP | FIRMWARE_LOADED); 3580 TASK_DAEMON_UNLOCK(ha); 3581 3582 /* Select FPM registers. */ 3583 WRT16_IO_REG(ha, ctrl_status, 0x20); 3584 3585 /* FPM Soft Reset. */ 3586 WRT16_IO_REG(ha, fpm_diag_config, 0x100); 3587 3588 /* Toggle FPM reset for 2300 */ 3589 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) { 3590 WRT16_IO_REG(ha, fpm_diag_config, 0); 3591 } 3592 3593 /* Select frame buffer registers. */ 3594 WRT16_IO_REG(ha, ctrl_status, 0x10); 3595 3596 /* Reset frame buffer FIFOs. */ 3597 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) { 3598 WRT16_IO_REG(ha, fb_cmd, 0x00fc); 3599 /* read back fb_cmd until zero or 3 seconds max */ 3600 for (cnt = 0; cnt < 300000; cnt++) { 3601 if ((RD16_IO_REG(ha, fb_cmd) & 0xff) == 0) { 3602 break; 3603 } 3604 drv_usecwait(10); 3605 } 3606 } else { 3607 WRT16_IO_REG(ha, fb_cmd, 0xa000); 3608 } 3609 3610 /* Select RISC module registers. */ 3611 WRT16_IO_REG(ha, ctrl_status, 0); 3612 3613 /* Reset RISC module. */ 3614 WRT16_IO_REG(ha, hccr, HC_RESET_RISC); 3615 3616 /* Reset ISP semaphore. */ 3617 WRT16_IO_REG(ha, semaphore, 0); 3618 3619 /* Release RISC module. */ 3620 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC); 3621 3622 /* Insure mailbox registers are free. */ 3623 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT); 3624 WRT16_IO_REG(ha, hccr, HC_CLR_HOST_INT); 3625 3626 /* clear the mailbox command pointer. */ 3627 ql_clear_mcp(ha); 3628 3629 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & 3630 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT)); 3631 3632 /* Bus Master is disabled so chip reset is safe. */ 3633 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) { 3634 WRT16_IO_REG(ha, ctrl_status, ISP_RESET); 3635 drv_usecwait(MILLISEC); 3636 3637 /* Wait for reset to finish. */ 3638 for (cnt = 0; cnt < 30000; cnt++) { 3639 if ((RD16_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) { 3640 break; 3641 } 3642 drv_usecwait(MILLISEC); 3643 } 3644 } 3645 3646 /* Wait for RISC to recover from reset. */ 3647 for (cnt = 0; cnt < 30000; cnt++) { 3648 if (RD16_IO_REG(ha, mailbox_out[0]) != MBS_BUSY) { 3649 break; 3650 } 3651 drv_usecwait(MILLISEC); 3652 } 3653 3654 /* restore bus master */ 3655 cmd = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM); 3656 cmd = (uint16_t)(cmd | PCI_COMM_ME); 3657 ql_pci_config_put16(ha, PCI_CONF_COMM, cmd); 3658 3659 /* Disable RISC pause on FPM parity error. */ 3660 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE); 3661 3662 /* Initialize probe registers */ 3663 if (CFG_IST(ha, CFG_SBUS_CARD)) { 3664 /* Pause RISC. */ 3665 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC); 3666 for (cnt = 0; cnt < 30000; cnt++) { 3667 if ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) != 0) { 3668 break; 3669 } else { 3670 drv_usecwait(MILLISEC); 3671 } 3672 } 3673 3674 /* Select FPM registers. */ 3675 WRT16_IO_REG(ha, ctrl_status, 0x30); 3676 3677 /* Set probe register */ 3678 WRT16_IO_REG(ha, mailbox_in[23], 0x204c); 3679 3680 /* Select RISC module registers. */ 3681 WRT16_IO_REG(ha, ctrl_status, 0); 3682 3683 /* Release RISC module. */ 3684 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC); 3685 } 3686 3687 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance); 3688 } 3689 3690 /* 3691 * ql_reset_24xx_chip 3692 * Reset ISP24xx chip. 3693 * 3694 * Input: 3695 * ha = adapter block pointer. 3696 * All activity on chip must be already stopped. 3697 * 3698 * Context: 3699 * Interrupt or Kernel context, no mailbox commands allowed. 3700 */ 3701 void 3702 ql_reset_24xx_chip(ql_adapter_state_t *ha) 3703 { 3704 uint32_t timer, stat; 3705 3706 /* Shutdown DMA. */ 3707 WRT32_IO_REG(ha, ctrl_status, DMA_SHUTDOWN | MWB_4096_BYTES); 3708 3709 /* Wait for DMA to stop. */ 3710 for (timer = 0; timer < 30000; timer++) { 3711 if ((RD32_IO_REG(ha, ctrl_status) & DMA_ACTIVE) == 0) { 3712 break; 3713 } 3714 drv_usecwait(100); 3715 } 3716 3717 /* Stop the firmware. */ 3718 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 3719 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE); 3720 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT); 3721 for (timer = 0; timer < 30000; timer++) { 3722 stat = RD32_IO_REG(ha, risc2host); 3723 if (stat & BIT_15) { 3724 if ((stat & 0xff) < 0x12) { 3725 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 3726 break; 3727 } 3728 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 3729 } 3730 drv_usecwait(100); 3731 } 3732 3733 /* Reset the chip. */ 3734 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN | 3735 MWB_4096_BYTES); 3736 drv_usecwait(100); 3737 3738 /* Wait for idle status from ROM firmware. */ 3739 for (timer = 0; timer < 30000; timer++) { 3740 if (RD16_IO_REG(ha, mailbox_out[0]) == 0) { 3741 break; 3742 } 3743 drv_usecwait(100); 3744 } 3745 3746 /* Wait for reset to finish. */ 3747 for (timer = 0; timer < 30000; timer++) { 3748 if ((RD32_IO_REG(ha, ctrl_status) & ISP_RESET) == 0) { 3749 break; 3750 } 3751 drv_usecwait(100); 3752 } 3753 3754 /* clear the mailbox command pointer. */ 3755 ql_clear_mcp(ha); 3756 3757 /* Insure mailbox registers are free. */ 3758 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & 3759 ~(MBX_BUSY_FLG | MBX_WANT_FLG | MBX_ABORT | MBX_INTERRUPT)); 3760 3761 if (ha->flags & MPI_RESET_NEEDED) { 3762 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 3763 WRT16_IO_REG(ha, mailbox_in[0], MBC_RESTART_MPI); 3764 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT); 3765 for (timer = 0; timer < 30000; timer++) { 3766 stat = RD32_IO_REG(ha, risc2host); 3767 if (stat & BIT_15) { 3768 if ((stat & 0xff) < 0x12) { 3769 WRT32_IO_REG(ha, hccr, 3770 HC24_CLR_RISC_INT); 3771 break; 3772 } 3773 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT); 3774 } 3775 drv_usecwait(100); 3776 } 3777 ADAPTER_STATE_LOCK(ha); 3778 ha->flags &= ~MPI_RESET_NEEDED; 3779 ADAPTER_STATE_UNLOCK(ha); 3780 } 3781 3782 /* 3783 * Set flash write-protection. 3784 */ 3785 if ((ha->flags & ONLINE) == 0) { 3786 ql_24xx_protect_flash(ha); 3787 } 3788 } 3789 3790 /* 3791 * ql_clear_mcp 3792 * Carefully clear the mailbox command pointer in the ha struct. 3793 * 3794 * Input: 3795 * ha = adapter block pointer. 3796 * 3797 * Context: 3798 * Interrupt or Kernel context, no mailbox commands allowed. 3799 */ 3800 3801 static void 3802 ql_clear_mcp(ql_adapter_state_t *ha) 3803 { 3804 uint32_t cnt; 3805 3806 /* Don't null ha->mcp without the lock, but don't hang either. */ 3807 if (MBX_REGISTER_LOCK_OWNER(ha) == curthread) { 3808 ha->mcp = NULL; 3809 } else { 3810 for (cnt = 0; cnt < 300000; cnt++) { 3811 if (TRY_MBX_REGISTER_LOCK(ha) != 0) { 3812 ha->mcp = NULL; 3813 MBX_REGISTER_UNLOCK(ha); 3814 break; 3815 } else { 3816 drv_usecwait(10); 3817 } 3818 } 3819 } 3820 } 3821 3822 3823 /* 3824 * ql_abort_isp 3825 * Resets ISP and aborts all outstanding commands. 3826 * 3827 * Input: 3828 * ha = adapter state pointer. 3829 * DEVICE_QUEUE_LOCK must be released. 3830 * 3831 * Returns: 3832 * ql local function return status code. 3833 * 3834 * Context: 3835 * Kernel context. 3836 */ 3837 int 3838 ql_abort_isp(ql_adapter_state_t *vha) 3839 { 3840 ql_link_t *link, *link2; 3841 ddi_devstate_t state; 3842 uint16_t index; 3843 ql_tgt_t *tq; 3844 ql_lun_t *lq; 3845 ql_srb_t *sp; 3846 int rval = QL_SUCCESS; 3847 ql_adapter_state_t *ha = vha->pha; 3848 3849 QL_PRINT_2(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index); 3850 3851 TASK_DAEMON_LOCK(ha); 3852 ha->task_daemon_flags &= ~ISP_ABORT_NEEDED; 3853 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE || 3854 (ha->flags & ONLINE) == 0 || ha->flags & ADAPTER_SUSPENDED) { 3855 TASK_DAEMON_UNLOCK(ha); 3856 return (rval); 3857 } 3858 3859 ha->task_daemon_flags |= ABORT_ISP_ACTIVE; 3860 ha->task_daemon_flags &= ~(RESET_MARKER_NEEDED | FIRMWARE_UP | 3861 FIRMWARE_LOADED); 3862 for (vha = ha; vha != NULL; vha = vha->vp_next) { 3863 vha->task_daemon_flags |= LOOP_DOWN; 3864 vha->task_daemon_flags &= ~(COMMAND_WAIT_NEEDED | 3865 LOOP_RESYNC_NEEDED); 3866 } 3867 3868 TASK_DAEMON_UNLOCK(ha); 3869 3870 if (ha->mailbox_flags & MBX_BUSY_FLG) { 3871 /* Acquire mailbox register lock. */ 3872 MBX_REGISTER_LOCK(ha); 3873 3874 /* Wake up mailbox box routine. */ 3875 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_ABORT); 3876 cv_broadcast(&ha->cv_mbx_intr); 3877 3878 /* Release mailbox register lock. */ 3879 MBX_REGISTER_UNLOCK(ha); 3880 3881 /* Wait for mailbox. */ 3882 for (index = 100; index && 3883 ha->mailbox_flags & MBX_ABORT; index--) { 3884 drv_usecwait(50000); 3885 } 3886 } 3887 3888 /* Wait for commands to end gracefully if not in panic. */ 3889 if (ha->flags & PARITY_ERROR) { 3890 ADAPTER_STATE_LOCK(ha); 3891 ha->flags &= ~PARITY_ERROR; 3892 ADAPTER_STATE_UNLOCK(ha); 3893 } else if (ddi_in_panic() == 0) { 3894 ql_cmd_wait(ha); 3895 } 3896 3897 /* Shutdown IP. */ 3898 if (ha->flags & IP_INITIALIZED) { 3899 (void) ql_shutdown_ip(ha); 3900 } 3901 3902 /* Reset the chip. */ 3903 ql_reset_chip(ha); 3904 3905 /* 3906 * Even though we have waited for outstanding commands to complete, 3907 * except for ones marked SRB_COMMAND_TIMEOUT, and reset the ISP, 3908 * there could still be an interrupt thread active. The interrupt 3909 * lock will prevent us from getting an sp from the outstanding 3910 * cmds array that the ISR may be using. 3911 */ 3912 3913 /* Place all commands in outstanding cmd list on device queue. */ 3914 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { 3915 REQUEST_RING_LOCK(ha); 3916 INTR_LOCK(ha); 3917 if ((link = ha->pending_cmds.first) != NULL) { 3918 sp = link->base_address; 3919 ql_remove_link(&ha->pending_cmds, &sp->cmd); 3920 3921 REQUEST_RING_UNLOCK(ha); 3922 index = 0; 3923 } else { 3924 REQUEST_RING_UNLOCK(ha); 3925 if ((sp = ha->outstanding_cmds[index]) == NULL) { 3926 INTR_UNLOCK(ha); 3927 continue; 3928 } 3929 } 3930 3931 /* 3932 * It's not obvious but the index for commands pulled from 3933 * pending will be zero and that entry in the outstanding array 3934 * is not used so nulling it is "no harm, no foul". 3935 */ 3936 3937 ha->outstanding_cmds[index] = NULL; 3938 sp->handle = 0; 3939 sp->flags &= ~SRB_IN_TOKEN_ARRAY; 3940 3941 INTR_UNLOCK(ha); 3942 3943 /* If command timeout. */ 3944 if (sp->flags & SRB_COMMAND_TIMEOUT) { 3945 sp->pkt->pkt_reason = CS_TIMEOUT; 3946 sp->flags &= ~SRB_RETRY; 3947 sp->flags |= SRB_ISP_COMPLETED; 3948 3949 /* Call done routine to handle completion. */ 3950 ql_done(&sp->cmd); 3951 continue; 3952 } 3953 3954 /* Acquire target queue lock. */ 3955 lq = sp->lun_queue; 3956 tq = lq->target_queue; 3957 DEVICE_QUEUE_LOCK(tq); 3958 3959 /* Reset watchdog time. */ 3960 sp->wdg_q_time = sp->init_wdg_q_time; 3961 3962 /* Place request back on top of device queue. */ 3963 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | 3964 SRB_RETRY); 3965 3966 ql_add_link_t(&lq->cmd, &sp->cmd); 3967 sp->flags |= SRB_IN_DEVICE_QUEUE; 3968 3969 /* Release target queue lock. */ 3970 DEVICE_QUEUE_UNLOCK(tq); 3971 } 3972 3973 /* 3974 * Clear per LUN active count, because there should not be 3975 * any IO outstanding at this time. 3976 */ 3977 for (vha = ha; vha != NULL; vha = vha->vp_next) { 3978 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) { 3979 link = vha->dev[index].first; 3980 while (link != NULL) { 3981 tq = link->base_address; 3982 link = link->next; 3983 DEVICE_QUEUE_LOCK(tq); 3984 tq->outcnt = 0; 3985 tq->flags &= ~TQF_QUEUE_SUSPENDED; 3986 for (link2 = tq->lun_queues.first; 3987 link2 != NULL; link2 = link2->next) { 3988 lq = link2->base_address; 3989 lq->lun_outcnt = 0; 3990 lq->flags &= ~LQF_UNTAGGED_PENDING; 3991 } 3992 DEVICE_QUEUE_UNLOCK(tq); 3993 } 3994 } 3995 } 3996 3997 if ((rval = ql_check_isp_firmware(ha)) != QL_SUCCESS) { 3998 if ((rval = ql_chip_diag(ha)) == QL_SUCCESS) { 3999 rval = ql_load_isp_firmware(ha); 4000 } 4001 } 4002 4003 if (rval == QL_SUCCESS && (rval = ql_set_cache_line(ha)) == 4004 QL_SUCCESS && (rval = ql_init_rings(ha)) == QL_SUCCESS && 4005 (rval = ql_fw_ready(ha, 10)) == QL_SUCCESS) { 4006 4007 /* If reset abort needed that may have been set. */ 4008 TASK_DAEMON_LOCK(ha); 4009 ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED | 4010 ABORT_ISP_ACTIVE); 4011 TASK_DAEMON_UNLOCK(ha); 4012 4013 /* Enable ISP interrupts. */ 4014 if (CFG_IST(ha, CFG_CTRL_8021)) { 4015 ql_8021_enable_intrs(ha); 4016 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 4017 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC); 4018 } else { 4019 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC); 4020 } 4021 4022 ADAPTER_STATE_LOCK(ha); 4023 ha->flags |= INTERRUPTS_ENABLED; 4024 ADAPTER_STATE_UNLOCK(ha); 4025 4026 /* Set loop online, if it really is. */ 4027 ql_loop_online(ha); 4028 4029 state = ddi_get_devstate(ha->dip); 4030 if (state != DDI_DEVSTATE_UP) { 4031 /*EMPTY*/ 4032 ddi_dev_report_fault(ha->dip, DDI_SERVICE_RESTORED, 4033 DDI_DEVICE_FAULT, "Device reset succeeded"); 4034 } 4035 } else { 4036 /* Enable ISP interrupts. */ 4037 if (CFG_IST(ha, CFG_CTRL_8021)) { 4038 ql_8021_enable_intrs(ha); 4039 } else if (CFG_IST(ha, CFG_CTRL_242581)) { 4040 WRT32_IO_REG(ha, ictrl, ISP_EN_RISC); 4041 } else { 4042 WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC); 4043 } 4044 4045 ADAPTER_STATE_LOCK(ha); 4046 ha->flags |= INTERRUPTS_ENABLED; 4047 ADAPTER_STATE_UNLOCK(ha); 4048 4049 TASK_DAEMON_LOCK(ha); 4050 ha->task_daemon_flags &= ~(ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE); 4051 ha->task_daemon_flags |= LOOP_DOWN; 4052 TASK_DAEMON_UNLOCK(ha); 4053 4054 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE); 4055 } 4056 4057 if (rval != QL_SUCCESS) { 4058 EL(ha, "failed, rval = %xh\n", rval); 4059 } else { 4060 /*EMPTY*/ 4061 QL_PRINT_2(CE_CONT, "(%d): done\n", ha->instance); 4062 } 4063 return (rval); 4064 } 4065 4066 /* 4067 * ql_vport_control 4068 * Issue Virtual Port Control command. 4069 * 4070 * Input: 4071 * ha = virtual adapter state pointer. 4072 * cmd = control command. 4073 * 4074 * Returns: 4075 * ql local function return status code. 4076 * 4077 * Context: 4078 * Kernel context. 4079 */ 4080 int 4081 ql_vport_control(ql_adapter_state_t *ha, uint8_t cmd) 4082 { 4083 ql_mbx_iocb_t *pkt; 4084 uint8_t bit; 4085 int rval; 4086 uint32_t pkt_size; 4087 4088 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index); 4089 4090 if (ha->vp_index != 0) { 4091 pkt_size = sizeof (ql_mbx_iocb_t); 4092 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 4093 if (pkt == NULL) { 4094 EL(ha, "failed, kmem_zalloc\n"); 4095 return (QL_MEMORY_ALLOC_FAILED); 4096 } 4097 4098 pkt->vpc.entry_type = VP_CONTROL_TYPE; 4099 pkt->vpc.entry_count = 1; 4100 pkt->vpc.command = cmd; 4101 pkt->vpc.vp_count = 1; 4102 bit = (uint8_t)(ha->vp_index - 1); 4103 pkt->vpc.vp_index[bit / 8] = (uint8_t) 4104 (pkt->vpc.vp_index[bit / 8] | BIT_0 << bit % 8); 4105 4106 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); 4107 if (rval == QL_SUCCESS && pkt->vpc.status != 0) { 4108 rval = QL_COMMAND_ERROR; 4109 } 4110 4111 kmem_free(pkt, pkt_size); 4112 } else { 4113 rval = QL_SUCCESS; 4114 } 4115 4116 if (rval != QL_SUCCESS) { 4117 EL(ha, "failed, rval = %xh\n", rval); 4118 } else { 4119 /*EMPTY*/ 4120 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, 4121 ha->vp_index); 4122 } 4123 return (rval); 4124 } 4125 4126 /* 4127 * ql_vport_modify 4128 * Issue of Modify Virtual Port command. 4129 * 4130 * Input: 4131 * ha = virtual adapter state pointer. 4132 * cmd = command. 4133 * opt = option. 4134 * 4135 * Context: 4136 * Interrupt or Kernel context, no mailbox commands allowed. 4137 */ 4138 int 4139 ql_vport_modify(ql_adapter_state_t *ha, uint8_t cmd, uint8_t opt) 4140 { 4141 ql_mbx_iocb_t *pkt; 4142 int rval; 4143 uint32_t pkt_size; 4144 4145 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index); 4146 4147 pkt_size = sizeof (ql_mbx_iocb_t); 4148 pkt = kmem_zalloc(pkt_size, KM_SLEEP); 4149 if (pkt == NULL) { 4150 EL(ha, "failed, kmem_zalloc\n"); 4151 return (QL_MEMORY_ALLOC_FAILED); 4152 } 4153 4154 pkt->vpm.entry_type = VP_MODIFY_TYPE; 4155 pkt->vpm.entry_count = 1; 4156 pkt->vpm.command = cmd; 4157 pkt->vpm.vp_count = 1; 4158 pkt->vpm.first_vp_index = ha->vp_index; 4159 pkt->vpm.first_options = opt; 4160 bcopy(ha->loginparams.nport_ww_name.raw_wwn, pkt->vpm.first_port_name, 4161 8); 4162 bcopy(ha->loginparams.node_ww_name.raw_wwn, pkt->vpm.first_node_name, 4163 8); 4164 4165 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); 4166 if (rval == QL_SUCCESS && pkt->vpm.status != 0) { 4167 EL(ha, "failed, ql_issue_mbx_iocb=%xh, status=%xh\n", rval, 4168 pkt->vpm.status); 4169 rval = QL_COMMAND_ERROR; 4170 } 4171 4172 kmem_free(pkt, pkt_size); 4173 4174 if (rval != QL_SUCCESS) { 4175 EL(ha, "failed, rval = %xh\n", rval); 4176 } else { 4177 /*EMPTY*/ 4178 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, 4179 ha->vp_index); 4180 } 4181 return (rval); 4182 } 4183 4184 /* 4185 * ql_vport_enable 4186 * Enable virtual port. 4187 * 4188 * Input: 4189 * ha = virtual adapter state pointer. 4190 * 4191 * Context: 4192 * Kernel context. 4193 */ 4194 int 4195 ql_vport_enable(ql_adapter_state_t *ha) 4196 { 4197 int timer; 4198 4199 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index); 4200 4201 ha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE; 4202 TASK_DAEMON_LOCK(ha); 4203 ha->task_daemon_flags |= LOOP_DOWN; 4204 ha->task_daemon_flags &= ~(FC_STATE_CHANGE | STATE_ONLINE); 4205 TASK_DAEMON_UNLOCK(ha); 4206 4207 ADAPTER_STATE_LOCK(ha); 4208 ha->flags |= VP_ENABLED; 4209 ADAPTER_STATE_UNLOCK(ha); 4210 4211 if (ql_vport_modify(ha, VPM_MODIFY_ENABLE, VPO_TARGET_MODE_DISABLED | 4212 VPO_INITIATOR_MODE_ENABLED | VPO_ENABLED) != QL_SUCCESS) { 4213 QL_PRINT_2(CE_CONT, "(%d): failed to enable virtual port=%d\n", 4214 ha->instance, ha->vp_index); 4215 return (QL_FUNCTION_FAILED); 4216 } 4217 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) { 4218 /* Wait for loop to come up. */ 4219 for (timer = 0; timer < 3000 && 4220 !(ha->task_daemon_flags & STATE_ONLINE); 4221 timer++) { 4222 delay(1); 4223 } 4224 } 4225 4226 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index); 4227 4228 return (QL_SUCCESS); 4229 } 4230 4231 /* 4232 * ql_vport_create 4233 * Create virtual port context. 4234 * 4235 * Input: 4236 * ha: parent adapter state pointer. 4237 * index: virtual port index number. 4238 * 4239 * Context: 4240 * Kernel context. 4241 */ 4242 ql_adapter_state_t * 4243 ql_vport_create(ql_adapter_state_t *ha, uint8_t index) 4244 { 4245 ql_adapter_state_t *vha; 4246 4247 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index); 4248 4249 /* Inherit the parents data. */ 4250 vha = kmem_alloc(sizeof (ql_adapter_state_t), KM_SLEEP); 4251 4252 ADAPTER_STATE_LOCK(ha); 4253 bcopy(ha, vha, sizeof (ql_adapter_state_t)); 4254 vha->pi_attrs = NULL; 4255 vha->ub_outcnt = 0; 4256 vha->ub_allocated = 0; 4257 vha->flags = 0; 4258 vha->task_daemon_flags = 0; 4259 ha->vp_next = vha; 4260 vha->pha = ha; 4261 vha->vp_index = index; 4262 ADAPTER_STATE_UNLOCK(ha); 4263 4264 vha->hba.next = NULL; 4265 vha->hba.prev = NULL; 4266 vha->hba.base_address = vha; 4267 vha->state = FC_PORT_SPEED_MASK(ha->state) | FC_STATE_OFFLINE; 4268 vha->dev = kmem_zalloc(sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE, 4269 KM_SLEEP); 4270 vha->ub_array = kmem_zalloc(sizeof (*vha->ub_array) * QL_UB_LIMIT, 4271 KM_SLEEP); 4272 4273 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index); 4274 4275 return (vha); 4276 } 4277 4278 /* 4279 * ql_vport_destroy 4280 * Destroys virtual port context. 4281 * 4282 * Input: 4283 * ha = virtual adapter state pointer. 4284 * 4285 * Context: 4286 * Kernel context. 4287 */ 4288 void 4289 ql_vport_destroy(ql_adapter_state_t *ha) 4290 { 4291 ql_adapter_state_t *vha; 4292 4293 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index); 4294 4295 /* Remove port from list. */ 4296 ADAPTER_STATE_LOCK(ha); 4297 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) { 4298 if (vha->vp_next == ha) { 4299 vha->vp_next = ha->vp_next; 4300 break; 4301 } 4302 } 4303 ADAPTER_STATE_UNLOCK(ha); 4304 4305 if (ha->ub_array != NULL) { 4306 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT); 4307 } 4308 if (ha->dev != NULL) { 4309 kmem_free(ha->dev, sizeof (*vha->dev) * DEVICE_HEAD_LIST_SIZE); 4310 } 4311 kmem_free(ha, sizeof (ql_adapter_state_t)); 4312 4313 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index); 4314 } 4315 4316 /* 4317 * ql_mps_reset 4318 * Reset MPS for FCoE functions. 4319 * 4320 * Input: 4321 * ha = virtual adapter state pointer. 4322 * 4323 * Context: 4324 * Kernel context. 4325 */ 4326 static void 4327 ql_mps_reset(ql_adapter_state_t *ha) 4328 { 4329 uint32_t data, dctl = 1000; 4330 4331 do { 4332 if (dctl-- == 0 || ql_wrt_risc_ram_word(ha, 0x7c00, 1) != 4333 QL_SUCCESS) { 4334 return; 4335 } 4336 if (ql_rd_risc_ram_word(ha, 0x7c00, &data) != QL_SUCCESS) { 4337 (void) ql_wrt_risc_ram_word(ha, 0x7c00, 0); 4338 return; 4339 } 4340 } while (!(data & BIT_0)); 4341 4342 if (ql_rd_risc_ram_word(ha, 0x7A15, &data) == QL_SUCCESS) { 4343 dctl = (uint16_t)ql_pci_config_get16(ha, 0x54); 4344 if ((data & 0xe0) != (dctl & 0xe0)) { 4345 data &= 0xff1f; 4346 data |= dctl & 0xe0; 4347 (void) ql_wrt_risc_ram_word(ha, 0x7A15, data); 4348 } 4349 } 4350 (void) ql_wrt_risc_ram_word(ha, 0x7c00, 0); 4351 } 4352