1 /*- 2 * Copyright (c) 2018 Microsemi Corporation. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 /* $FreeBSD$ */ 28 29 #include "smartpqi_includes.h" 30 31 /* 32 * Request the adapter to get PQI capabilities supported. 33 */ 34 static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs) 35 { 36 int ret = PQI_STATUS_SUCCESS; 37 38 DBG_FUNC("IN\n"); 39 40 gen_adm_req_iu_t admin_req; 41 gen_adm_resp_iu_t admin_resp; 42 dma_mem_t pqi_cap_dma_buf; 43 pqi_dev_cap_t *capability = NULL; 44 pqi_iu_layer_desc_t *iu_layer_desc = NULL; 45 46 /* Allocate Non DMA memory */ 47 capability = os_mem_alloc(softs, sizeof(*capability)); 48 if (!capability) { 49 DBG_ERR("Failed to allocate memory for capability\n"); 50 ret = PQI_STATUS_FAILURE; 51 goto err_out; 52 } 53 54 memset(&admin_req, 0, sizeof(admin_req)); 55 memset(&admin_resp, 0, sizeof(admin_resp)); 56 57 memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem)); 58 pqi_cap_dma_buf.tag = "pqi_cap_buf"; 59 pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE; 60 pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN; 61 62 ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf); 63 if (ret) { 64 DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret); 65 goto err_dma_alloc; 66 } 67 68 admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP; 69 admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size; 70 admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size; 71 admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr; 72 admin_req.req_type.general_func.sg_desc.type = SGL_DESCRIPTOR_CODE_DATA_BLOCK; 73 74 ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp); 75 if( PQI_STATUS_SUCCESS == ret) { 76 memcpy(capability, 77 pqi_cap_dma_buf.virt_addr, 78 pqi_cap_dma_buf.size); 79 } else { 80 DBG_ERR("Failed to send admin req report pqi device capability\n"); 81 goto err_admin_req; 82 83 } 84 85 softs->pqi_dev_cap.max_iqs = capability->max_iqs; 86 softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements; 87 softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len; 88 softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len; 89 softs->pqi_dev_cap.max_oqs = capability->max_oqs; 90 softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements; 91 softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len; 92 softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity; 93 94 iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP]; 95 softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len; 96 softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported; 97 softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported; 98 99 DBG_INFO("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs); 100 DBG_INFO("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements); 101 DBG_INFO("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len); 102 DBG_INFO("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len); 103 DBG_INFO("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs); 104 DBG_INFO("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements); 105 DBG_INFO("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len); 106 DBG_INFO("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity); 107 DBG_INFO("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw); 108 DBG_INFO("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported); 109 DBG_INFO("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported); 110 111 112 os_mem_free(softs, (void *)capability, 113 REPORT_PQI_DEV_CAP_DATA_BUF_SIZE); 114 os_dma_mem_free(softs, &pqi_cap_dma_buf); 115 116 DBG_FUNC("OUT\n"); 117 return ret; 118 119 err_admin_req: 120 os_dma_mem_free(softs, &pqi_cap_dma_buf); 121 err_dma_alloc: 122 if (capability) 123 os_mem_free(softs, (void *)capability, 124 REPORT_PQI_DEV_CAP_DATA_BUF_SIZE); 125 err_out: 126 DBG_FUNC("failed OUT\n"); 127 return PQI_STATUS_FAILURE; 128 } 129 130 /* 131 * Function used to deallocate the used rcb. 132 */ 133 void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count) 134 { 135 136 uint32_t num_req; 137 size_t size; 138 int i; 139 140 DBG_FUNC("IN\n"); 141 num_req = softs->max_outstanding_io + 1; 142 size = num_req * sizeof(rcb_t); 143 for (i = 1; i < req_count; i++) 144 os_dma_mem_free(softs, &softs->sg_dma_desc[i]); 145 os_mem_free(softs, (void *)softs->rcb, size); 146 softs->rcb = NULL; 147 DBG_FUNC("OUT\n"); 148 } 149 150 151 /* 152 * Allocate memory for rcb and SG descriptors. 153 */ 154 static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs) 155 { 156 int ret = PQI_STATUS_SUCCESS; 157 int i = 0; 158 uint32_t num_req = 0; 159 uint32_t sg_buf_size = 0; 160 uint64_t alloc_size = 0; 161 rcb_t *rcb = NULL; 162 rcb_t *prcb = NULL; 163 DBG_FUNC("IN\n"); 164 165 /* Set maximum outstanding requests */ 166 /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io 167 * The rcb will be accessed by using the tag as index 168 * As 0 tag index is not used, we need to allocate one extra. 169 */ 170 softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io; 171 num_req = softs->max_outstanding_io + 1; 172 DBG_INFO("Max Outstanding IO reset to %d\n", num_req); 173 174 alloc_size = num_req * sizeof(rcb_t); 175 176 /* Allocate Non DMA memory */ 177 rcb = os_mem_alloc(softs, alloc_size); 178 if (!rcb) { 179 DBG_ERR("Failed to allocate memory for rcb\n"); 180 ret = PQI_STATUS_FAILURE; 181 goto err_out; 182 } 183 softs->rcb = rcb; 184 185 /* Allocate sg dma memory for sg chain */ 186 sg_buf_size = softs->pqi_cap.max_sg_elem * 187 sizeof(sgt_t); 188 189 prcb = &softs->rcb[1]; 190 /* Initialize rcb */ 191 for(i=1; i < num_req; i++) { 192 char tag[15]; 193 sprintf(tag, "sg_dma_buf%d", i); 194 softs->sg_dma_desc[i].tag = tag; 195 softs->sg_dma_desc[i].size = sg_buf_size; 196 softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN; 197 198 ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]); 199 if (ret) { 200 DBG_ERR("Failed to Allocate sg desc %d\n", ret); 201 ret = PQI_STATUS_FAILURE; 202 goto error; 203 } 204 prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr); 205 prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr); 206 prcb ++; 207 } 208 209 DBG_FUNC("OUT\n"); 210 return ret; 211 error: 212 pqisrc_free_rcb(softs, i); 213 err_out: 214 DBG_FUNC("failed OUT\n"); 215 return ret; 216 } 217 218 /* 219 * Function used to decide the operational queue configuration params 220 * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support 221 */ 222 void pqisrc_decide_opq_config(pqisrc_softstate_t *softs) 223 { 224 uint16_t total_iq_elements; 225 226 DBG_FUNC("IN\n"); 227 228 DBG_INFO("softs->intr_count : %d softs->num_cpus_online : %d", 229 softs->intr_count, softs->num_cpus_online); 230 231 if (softs->intr_count == 1 || softs->num_cpus_online == 1) { 232 /* Share the event and Operational queue. */ 233 softs->num_op_obq = 1; 234 softs->share_opq_and_eventq = true; 235 } 236 else { 237 /* Note : One OBQ (OBQ0) reserved for event queue */ 238 softs->num_op_obq = MIN(softs->num_cpus_online, 239 softs->intr_count) - 1; 240 softs->num_op_obq = softs->intr_count - 1; 241 softs->share_opq_and_eventq = false; 242 } 243 244 #ifdef MULTIPLE_MSIX 245 /* 246 * softs->num_cpus_online is set as number of physical CPUs, 247 * So we can have more queues/interrupts . 248 */ 249 if (softs->intr_count > 1) 250 softs->share_opq_and_eventq = false; 251 #endif 252 253 DBG_INFO("softs->num_op_obq : %d\n",softs->num_op_obq); 254 255 softs->num_op_raid_ibq = softs->num_op_obq; 256 softs->num_op_aio_ibq = softs->num_op_raid_ibq; 257 softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16; 258 softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16; 259 if (softs->max_ib_iu_length_per_fw == 256 && 260 softs->ob_spanning_supported) { 261 /* older f/w that doesn't actually support spanning. */ 262 softs->max_ib_iu_length = softs->ibq_elem_size; 263 } else { 264 /* max. inbound IU length is an multiple of our inbound element size. */ 265 softs->max_ib_iu_length = 266 (softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) * 267 softs->ibq_elem_size; 268 269 } 270 /* If Max. Outstanding IO came with Max. Spanning element count then, 271 needed elements per IO are multiplication of 272 Max.Outstanding IO and Max.Spanning element */ 273 total_iq_elements = (softs->max_outstanding_io * 274 (softs->max_ib_iu_length / softs->ibq_elem_size)); 275 276 softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq; 277 softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq, 278 softs->pqi_dev_cap.max_iq_elements); 279 280 softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq; 281 softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq, 282 softs->pqi_dev_cap.max_oq_elements); 283 284 softs->max_sg_per_iu = ((softs->max_ib_iu_length - 285 softs->ibq_elem_size) / 286 sizeof(sgt_t)) + 287 MAX_EMBEDDED_SG_IN_FIRST_IU; 288 289 DBG_INFO("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length); 290 DBG_INFO("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq); 291 DBG_INFO("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq); 292 DBG_INFO("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu); 293 294 DBG_FUNC("OUT\n"); 295 } 296 297 /* 298 * Configure the operational queue parameters. 299 */ 300 int pqisrc_configure_op_queues(pqisrc_softstate_t *softs) 301 { 302 int ret = PQI_STATUS_SUCCESS; 303 304 /* Get the PQI capability, 305 REPORT PQI DEVICE CAPABILITY request */ 306 ret = pqisrc_report_pqi_capability(softs); 307 if (ret) { 308 DBG_ERR("Failed to send report pqi dev capability request : %d\n", 309 ret); 310 goto err_out; 311 } 312 313 /* Reserve required no of slots for internal requests */ 314 softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT; 315 316 /* Decide the Op queue configuration */ 317 pqisrc_decide_opq_config(softs); 318 319 DBG_FUNC("OUT\n"); 320 return ret; 321 322 err_out: 323 DBG_FUNC("OUT failed\n"); 324 return ret; 325 } 326 327 /* 328 * Validate the PQI mode of adapter. 329 */ 330 int pqisrc_check_pqimode(pqisrc_softstate_t *softs) 331 { 332 int ret = PQI_STATUS_FAILURE; 333 int tmo = 0; 334 uint64_t signature = 0; 335 336 DBG_FUNC("IN\n"); 337 338 /* Check the PQI device signature */ 339 tmo = PQISRC_PQIMODE_READY_TIMEOUT; 340 do { 341 signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE)); 342 343 if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE, 344 sizeof(uint64_t)) == 0) { 345 ret = PQI_STATUS_SUCCESS; 346 break; 347 } 348 OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL); 349 } while (tmo--); 350 351 PRINT_PQI_SIGNATURE(signature); 352 353 if (tmo <= 0) { 354 DBG_ERR("PQI Signature is invalid\n"); 355 ret = PQI_STATUS_TIMEOUT; 356 goto err_out; 357 } 358 359 tmo = PQISRC_PQIMODE_READY_TIMEOUT; 360 /* Check function and status code for the device */ 361 COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config, 362 PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo); 363 if (!tmo) { 364 DBG_ERR("PQI device is not in IDLE state\n"); 365 ret = PQI_STATUS_TIMEOUT; 366 goto err_out; 367 } 368 369 370 tmo = PQISRC_PQIMODE_READY_TIMEOUT; 371 /* Check the PQI device status register */ 372 COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) & 373 PQI_DEV_STATE_AT_INIT, tmo); 374 if (!tmo) { 375 DBG_ERR("PQI Registers are not ready\n"); 376 ret = PQI_STATUS_TIMEOUT; 377 goto err_out; 378 } 379 380 DBG_FUNC("OUT\n"); 381 return ret; 382 err_out: 383 DBG_FUNC("OUT failed\n"); 384 return ret; 385 } 386 387 /* 388 * Get the PQI configuration table parameters. 389 * Currently using for heart-beat counter scratch-pad register. 390 */ 391 int pqisrc_process_config_table(pqisrc_softstate_t *softs) 392 { 393 int ret = PQI_STATUS_FAILURE; 394 uint32_t config_table_size; 395 uint32_t section_off; 396 uint8_t *config_table_abs_addr; 397 struct pqi_conf_table *conf_table; 398 struct pqi_conf_table_section_header *section_hdr; 399 400 config_table_size = softs->pqi_cap.conf_tab_sz; 401 402 if (config_table_size < sizeof(*conf_table) || 403 config_table_size > PQI_CONF_TABLE_MAX_LEN) { 404 DBG_ERR("Invalid PQI conf table length of %u\n", 405 config_table_size); 406 return ret; 407 } 408 409 conf_table = os_mem_alloc(softs, config_table_size); 410 if (!conf_table) { 411 DBG_ERR("Failed to allocate memory for PQI conf table\n"); 412 return ret; 413 } 414 415 config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr + 416 softs->pqi_cap.conf_tab_off); 417 418 PCI_MEM_GET_BUF(softs, config_table_abs_addr, 419 softs->pqi_cap.conf_tab_off, 420 (uint8_t*)conf_table, config_table_size); 421 422 423 if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE, 424 sizeof(conf_table->sign)) != 0) { 425 DBG_ERR("Invalid PQI config signature\n"); 426 goto out; 427 } 428 429 section_off = LE_32(conf_table->first_section_off); 430 431 while (section_off) { 432 433 if (section_off+ sizeof(*section_hdr) >= config_table_size) { 434 DBG_ERR("PQI config table section offset (%u) beyond \ 435 end of config table (config table length: %u)\n", 436 section_off, config_table_size); 437 break; 438 } 439 440 section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off); 441 442 switch (LE_16(section_hdr->section_id)) { 443 case PQI_CONF_TABLE_SECTION_GENERAL_INFO: 444 case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES: 445 case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA: 446 case PQI_CONF_TABLE_SECTION_DEBUG: 447 break; 448 case PQI_CONF_TABLE_SECTION_HEARTBEAT: 449 softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off + 450 section_off + 451 offsetof(struct pqi_conf_table_heartbeat, 452 heartbeat_counter); 453 softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr + 454 softs->heartbeat_counter_off); 455 ret = PQI_STATUS_SUCCESS; 456 break; 457 default: 458 DBG_ERR("unrecognized PQI config table section ID: 0x%x\n", 459 LE_16(section_hdr->section_id)); 460 break; 461 } 462 section_off = LE_16(section_hdr->next_section_off); 463 } 464 out: 465 os_mem_free(softs, (void *)conf_table,config_table_size); 466 return ret; 467 } 468 469 /* Wait for PQI reset completion for the adapter*/ 470 int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs) 471 { 472 int ret = PQI_STATUS_SUCCESS; 473 pqi_reset_reg_t reset_reg; 474 int pqi_reset_timeout = 0; 475 uint64_t val = 0; 476 uint32_t max_timeout = 0; 477 478 val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP); 479 480 max_timeout = (val & 0xFFFF00000000) >> 32; 481 482 DBG_INFO("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout); 483 484 while(1) { 485 if (pqi_reset_timeout++ == max_timeout) { 486 return PQI_STATUS_TIMEOUT; 487 } 488 OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */ 489 reset_reg.all_bits = PCI_MEM_GET32(softs, 490 &softs->pqi_reg->dev_reset, PQI_DEV_RESET); 491 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 492 break; 493 } 494 495 return ret; 496 } 497 498 /* 499 * Function used to perform PQI hard reset. 500 */ 501 int pqi_reset(pqisrc_softstate_t *softs) 502 { 503 int ret = PQI_STATUS_SUCCESS; 504 uint32_t val = 0; 505 pqi_reset_reg_t pqi_reset_reg; 506 507 DBG_FUNC("IN\n"); 508 509 if (true == softs->ctrl_in_pqi_mode) { 510 511 if (softs->pqi_reset_quiesce_allowed) { 512 val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db, 513 LEGACY_SIS_IDBR); 514 val |= SIS_PQI_RESET_QUIESCE; 515 PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db, 516 LEGACY_SIS_IDBR, LE_32(val)); 517 ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE); 518 if (ret) { 519 DBG_ERR("failed with error %d during quiesce\n", ret); 520 return ret; 521 } 522 } 523 524 pqi_reset_reg.all_bits = 0; 525 pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 526 pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 527 528 PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET, 529 LE_32(pqi_reset_reg.all_bits)); 530 531 ret = pqisrc_wait_for_pqi_reset_completion(softs); 532 if (ret) { 533 DBG_ERR("PQI reset timed out: ret = %d!\n", ret); 534 return ret; 535 } 536 } 537 softs->ctrl_in_pqi_mode = false; 538 DBG_FUNC("OUT\n"); 539 return ret; 540 } 541 542 /* 543 * Initialize the adapter with supported PQI configuration. 544 */ 545 int pqisrc_pqi_init(pqisrc_softstate_t *softs) 546 { 547 int ret = PQI_STATUS_SUCCESS; 548 549 DBG_FUNC("IN\n"); 550 551 /* Check the PQI signature */ 552 ret = pqisrc_check_pqimode(softs); 553 if(ret) { 554 DBG_ERR("failed to switch to pqi\n"); 555 goto err_out; 556 } 557 558 PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE); 559 softs->ctrl_in_pqi_mode = true; 560 561 /* Get the No. of Online CPUs,NUMA/Processor config from OS */ 562 ret = os_get_processor_config(softs); 563 if (ret) { 564 DBG_ERR("Failed to get processor config from OS %d\n", 565 ret); 566 goto err_out; 567 } 568 569 /* Get the interrupt count, type, priority available from OS */ 570 ret = os_get_intr_config(softs); 571 if (ret) { 572 DBG_ERR("Failed to get interrupt config from OS %d\n", 573 ret); 574 goto err_out; 575 } 576 577 /* Create Admin Queue pair*/ 578 ret = pqisrc_create_admin_queue(softs); 579 if(ret) { 580 DBG_ERR("Failed to configure admin queue\n"); 581 goto err_admin_queue; 582 } 583 584 /* For creating event and IO operational queues we have to submit 585 admin IU requests.So Allocate resources for submitting IUs */ 586 587 /* Allocate the request container block (rcb) */ 588 ret = pqisrc_allocate_rcb(softs); 589 if (ret == PQI_STATUS_FAILURE) { 590 DBG_ERR("Failed to allocate rcb \n"); 591 goto err_rcb; 592 } 593 594 /* Allocate & initialize request id queue */ 595 ret = pqisrc_init_taglist(softs,&softs->taglist, 596 softs->max_outstanding_io); 597 if (ret) { 598 DBG_ERR("Failed to allocate memory for request id q : %d\n", 599 ret); 600 goto err_taglist; 601 } 602 603 ret = pqisrc_configure_op_queues(softs); 604 if (ret) { 605 DBG_ERR("Failed to configure op queue\n"); 606 goto err_config_opq; 607 } 608 609 /* Create Operational queues */ 610 ret = pqisrc_create_op_queues(softs); 611 if(ret) { 612 DBG_ERR("Failed to create op queue\n"); 613 ret = PQI_STATUS_FAILURE; 614 goto err_create_opq; 615 } 616 617 softs->ctrl_online = true; 618 619 DBG_FUNC("OUT\n"); 620 return ret; 621 622 err_create_opq: 623 err_config_opq: 624 pqisrc_destroy_taglist(softs,&softs->taglist); 625 err_taglist: 626 pqisrc_free_rcb(softs, softs->max_outstanding_io + 1); 627 err_rcb: 628 pqisrc_destroy_admin_queue(softs); 629 err_admin_queue: 630 os_free_intr_config(softs); 631 err_out: 632 DBG_FUNC("OUT failed\n"); 633 return PQI_STATUS_FAILURE; 634 } 635 636 /* */ 637 int pqisrc_force_sis(pqisrc_softstate_t *softs) 638 { 639 int ret = PQI_STATUS_SUCCESS; 640 641 if (SIS_IS_KERNEL_PANIC(softs)) { 642 DBG_INFO("Controller FW is not runnning"); 643 return PQI_STATUS_FAILURE; 644 } 645 646 if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) { 647 return ret; 648 } 649 650 if (SIS_IS_KERNEL_UP(softs)) { 651 PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE); 652 return ret; 653 } 654 /* Disable interrupts ? */ 655 sis_disable_msix(softs); 656 657 /* reset pqi, this will delete queues */ 658 ret = pqi_reset(softs); 659 if (ret) { 660 return ret; 661 } 662 /* Re enable SIS */ 663 ret = pqisrc_reenable_sis(softs); 664 if (ret) { 665 return ret; 666 } 667 668 PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE); 669 670 return ret; 671 } 672 673 /* 674 * Uninitialize the resources used during PQI initialization. 675 */ 676 void pqisrc_pqi_uninit(pqisrc_softstate_t *softs) 677 { 678 int i; 679 DBG_FUNC("IN\n"); 680 681 if(softs->devlist_lockcreated==true){ 682 os_uninit_spinlock(&softs->devlist_lock); 683 softs->devlist_lockcreated = false; 684 } 685 686 for (i = 0; i < softs->num_op_raid_ibq; i++) { 687 /* OP RAID IB Q */ 688 if(softs->op_raid_ib_q[i].lockcreated==true){ 689 OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock); 690 softs->op_raid_ib_q[i].lockcreated = false; 691 } 692 693 /* OP AIO IB Q */ 694 if(softs->op_aio_ib_q[i].lockcreated==true){ 695 OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock); 696 softs->op_aio_ib_q[i].lockcreated = false; 697 } 698 } 699 700 /* Free Op queues */ 701 os_dma_mem_free(softs, &softs->op_ibq_dma_mem); 702 os_dma_mem_free(softs, &softs->op_obq_dma_mem); 703 os_dma_mem_free(softs, &softs->event_q_dma_mem); 704 705 /* Complete all pending commands. */ 706 os_complete_outstanding_cmds_nodevice(softs); 707 708 /* Free rcb */ 709 pqisrc_free_rcb(softs, softs->max_outstanding_io + 1); 710 711 /* Free request id lists */ 712 pqisrc_destroy_taglist(softs,&softs->taglist); 713 714 if(softs->admin_ib_queue.lockcreated==true){ 715 OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock); 716 softs->admin_ib_queue.lockcreated = false; 717 } 718 719 /* Free Admin Queue */ 720 os_dma_mem_free(softs, &softs->admin_queue_dma_mem); 721 722 /* Switch back to SIS mode */ 723 if (pqisrc_force_sis(softs)) { 724 DBG_ERR("Failed to switch back the adapter to SIS mode!\n"); 725 } 726 727 DBG_FUNC("OUT\n"); 728 } 729 730 /* 731 * Function to initialize the adapter settings. 732 */ 733 int pqisrc_init(pqisrc_softstate_t *softs) 734 { 735 int ret = 0; 736 int i = 0, j = 0; 737 738 DBG_FUNC("IN\n"); 739 740 check_struct_sizes(); 741 742 /* Init the Sync interface */ 743 ret = pqisrc_sis_init(softs); 744 if (ret) { 745 DBG_ERR("SIS Init failed with error %d\n", ret); 746 goto err_out; 747 } 748 749 /* Init the PQI interface */ 750 ret = pqisrc_pqi_init(softs); 751 if (ret) { 752 DBG_ERR("PQI Init failed with error %d\n", ret); 753 goto err_pqi; 754 } 755 756 /* Setup interrupt */ 757 ret = os_setup_intr(softs); 758 if (ret) { 759 DBG_ERR("Interrupt setup failed with error %d\n", ret); 760 goto err_intr; 761 } 762 763 /* Report event configuration */ 764 ret = pqisrc_report_event_config(softs); 765 if(ret){ 766 DBG_ERR(" Failed to configure Report events\n"); 767 goto err_event; 768 } 769 770 /* Set event configuration*/ 771 ret = pqisrc_set_event_config(softs); 772 if(ret){ 773 DBG_ERR(" Failed to configure Set events\n"); 774 goto err_event; 775 } 776 777 /* Check for For PQI spanning */ 778 ret = pqisrc_get_ctrl_fw_version(softs); 779 if(ret){ 780 DBG_ERR(" Failed to get ctrl fw version\n"); 781 goto err_fw_version; 782 } 783 784 /* update driver version in to FW */ 785 ret = pqisrc_write_driver_version_to_host_wellness(softs); 786 if (ret) { 787 DBG_ERR(" Failed to update driver version in to FW"); 788 goto err_host_wellness; 789 } 790 791 792 os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE); 793 ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name); 794 if(ret){ 795 DBG_ERR(" Failed to initialize devlist_lock\n"); 796 softs->devlist_lockcreated=false; 797 goto err_lock; 798 } 799 softs->devlist_lockcreated = true; 800 801 ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock); 802 if(ret != PQI_STATUS_SUCCESS){ 803 DBG_ERR(" Failed to initialize scan lock\n"); 804 goto err_scan_lock; 805 } 806 807 OS_ATOMIC64_SET(softs, num_intrs, 0); 808 softs->prev_num_intrs = softs->num_intrs; 809 810 811 /* Get the PQI configuration table to read heart-beat counter*/ 812 if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) { 813 ret = pqisrc_process_config_table(softs); 814 if (ret) { 815 DBG_ERR("Failed to process PQI configuration table %d\n", ret); 816 goto err_config_tab; 817 } 818 } 819 820 if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) 821 softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL; 822 823 /* Init device list */ 824 for(i = 0; i < PQI_MAX_DEVICES; i++) 825 for(j = 0; j < PQI_MAX_MULTILUN; j++) 826 softs->device_list[i][j] = NULL; 827 828 DBG_FUNC("OUT\n"); 829 return ret; 830 831 err_config_tab: 832 os_destroy_semaphore(&softs->scan_lock); 833 err_scan_lock: 834 if(softs->devlist_lockcreated==true){ 835 os_uninit_spinlock(&softs->devlist_lock); 836 softs->devlist_lockcreated = false; 837 } 838 err_lock: 839 err_fw_version: 840 err_event: 841 err_host_wellness: 842 os_destroy_intr(softs); 843 err_intr: 844 pqisrc_pqi_uninit(softs); 845 err_pqi: 846 pqisrc_sis_uninit(softs); 847 err_out: 848 DBG_FUNC("OUT failed\n"); 849 return ret; 850 } 851 852 /* 853 * Write all data in the adapter's battery-backed cache to 854 * storage. 855 */ 856 int pqisrc_flush_cache( pqisrc_softstate_t *softs, 857 enum pqisrc_flush_cache_event_type event_type) 858 { 859 int rval = PQI_STATUS_SUCCESS; 860 pqisrc_raid_req_t request; 861 pqisrc_bmic_flush_cache_t *flush_buff = NULL; 862 863 DBG_FUNC("IN\n"); 864 865 if (pqisrc_ctrl_offline(softs)) 866 return PQI_STATUS_FAILURE; 867 868 flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t)); 869 if (!flush_buff) { 870 DBG_ERR("Failed to allocate memory for flush cache params\n"); 871 rval = PQI_STATUS_FAILURE; 872 return rval; 873 } 874 875 flush_buff->halt_event = event_type; 876 877 memset(&request, 0, sizeof(request)); 878 879 rval = pqisrc_build_send_raid_request(softs, &request, flush_buff, 880 sizeof(*flush_buff), SA_CACHE_FLUSH, 0, 881 (uint8_t *)RAID_CTLR_LUNID, NULL); 882 if (rval) { 883 DBG_ERR("error in build send raid req ret=%d\n", rval); 884 } 885 886 if (flush_buff) 887 os_mem_free(softs, (void *)flush_buff, 888 sizeof(pqisrc_bmic_flush_cache_t)); 889 890 DBG_FUNC("OUT\n"); 891 892 return rval; 893 } 894 895 /* 896 * Uninitialize the adapter. 897 */ 898 void pqisrc_uninit(pqisrc_softstate_t *softs) 899 { 900 DBG_FUNC("IN\n"); 901 902 os_destroy_intr(softs); 903 904 os_destroy_semaphore(&softs->scan_lock); 905 906 pqisrc_pqi_uninit(softs); 907 908 pqisrc_sis_uninit(softs); 909 910 pqisrc_cleanup_devices(softs); 911 912 DBG_FUNC("OUT\n"); 913 } 914