1 /*- 2 * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26 27 #include "smartpqi_includes.h" 28 29 #define MAX_RETRIES 3 30 #define PQISRC_INQUIRY_TIMEOUT 30 31 32 /* Validate the scsi sense response code */ 33 static inline 34 boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr) 35 { 36 DBG_FUNC("IN\n"); 37 38 if (!sshdr) 39 return false; 40 41 DBG_FUNC("OUT\n"); 42 43 return (sshdr->response_code & 0x70) == 0x70; 44 } 45 46 /* 47 * Initialize target ID pool for HBA/PDs . 48 */ 49 void 50 pqisrc_init_targetid_pool(pqisrc_softstate_t *softs) 51 { 52 int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1; 53 54 for(i = 0; i < PQI_MAX_PHYSICALS; i++) { 55 softs->tid_pool.tid[i] = tid--; 56 } 57 softs->tid_pool.index = i - 1; 58 } 59 60 int 61 pqisrc_alloc_tid(pqisrc_softstate_t *softs) 62 { 63 64 if(softs->tid_pool.index <= -1) { 65 DBG_ERR("Target ID exhausted\n"); 66 return INVALID_ELEM; 67 } 68 69 return softs->tid_pool.tid[softs->tid_pool.index--]; 70 } 71 72 void 73 pqisrc_free_tid(pqisrc_softstate_t *softs, int tid) 74 { 75 if(softs->tid_pool.index >= (PQI_MAX_PHYSICALS - 1)) { 76 DBG_ERR("Target ID queue is full\n"); 77 return; 78 } 79 80 softs->tid_pool.index++; 81 softs->tid_pool.tid[softs->tid_pool.index] = tid; 82 } 83 84 /* Update scsi sense info to a local buffer*/ 85 boolean_t 86 pqisrc_update_scsi_sense(const uint8_t *buff, int len, 87 struct sense_header_scsi *header) 88 { 89 90 DBG_FUNC("IN\n"); 91 92 if (!buff || !len) 93 return false; 94 95 memset(header, 0, sizeof(struct sense_header_scsi)); 96 97 header->response_code = (buff[0] & 0x7f); 98 99 if (!pqisrc_scsi_sense_valid(header)) 100 return false; 101 102 if (header->response_code >= 0x72) { 103 /* descriptor format */ 104 if (len > 1) 105 header->sense_key = (buff[1] & 0xf); 106 if (len > 2) 107 header->asc = buff[2]; 108 if (len > 3) 109 header->ascq = buff[3]; 110 if (len > 7) 111 header->additional_length = buff[7]; 112 } else { 113 /* fixed format */ 114 if (len > 2) 115 header->sense_key = (buff[2] & 0xf); 116 if (len > 7) { 117 len = (len < (buff[7] + 8)) ? 118 len : (buff[7] + 8); 119 if (len > 12) 120 header->asc = buff[12]; 121 if (len > 13) 122 header->ascq = buff[13]; 123 } 124 } 125 126 DBG_FUNC("OUT\n"); 127 128 return true; 129 } 130 131 /* 132 * Function used to build the internal raid request and analyze the response 133 */ 134 int 135 pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request, 136 void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr, 137 raid_path_error_info_elem_t *error_info) 138 { 139 140 uint8_t *cdb; 141 int ret = PQI_STATUS_SUCCESS; 142 uint32_t tag = 0; 143 struct dma_mem device_mem; 144 sgt_t *sgd; 145 146 ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE]; 147 ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE]; 148 149 rcb_t *rcb = NULL; 150 151 DBG_FUNC("IN\n"); 152 153 memset(&device_mem, 0, sizeof(struct dma_mem)); 154 155 /* for TUR datasize: 0 buff: NULL */ 156 if (datasize) { 157 device_mem.tag = "device_mem"; 158 device_mem.size = datasize; 159 device_mem.align = PQISRC_DEFAULT_DMA_ALIGN; 160 161 ret = os_dma_mem_alloc(softs, &device_mem); 162 163 if (ret) { 164 DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret); 165 return ret; 166 } 167 168 sgd = (sgt_t *)&request->sg_descriptors[0]; 169 170 sgd->addr = device_mem.dma_addr; 171 sgd->len = datasize; 172 sgd->flags = SG_FLAG_LAST; 173 174 } 175 176 /* Build raid path request */ 177 request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST; 178 179 request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t, 180 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH); 181 request->buffer_length = LE_32(datasize); 182 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 183 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 184 request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0; 185 186 cdb = request->cdb; 187 188 switch (cmd) { 189 case SA_INQUIRY: 190 request->data_direction = SOP_DATA_DIR_TO_DEVICE; 191 cdb[0] = SA_INQUIRY; 192 if (vpd_page & VPD_PAGE) { 193 cdb[1] = 0x1; 194 cdb[2] = (uint8_t)vpd_page; 195 } 196 cdb[4] = (uint8_t)datasize; 197 if (softs->timeout_in_passthrough) { 198 request->timeout_in_sec = PQISRC_INQUIRY_TIMEOUT; 199 } 200 break; 201 case SA_REPORT_LOG: 202 case SA_REPORT_PHYS: 203 request->data_direction = SOP_DATA_DIR_TO_DEVICE; 204 cdb[0] = cmd; 205 if (cmd == SA_REPORT_PHYS) 206 cdb[1] = SA_REPORT_PHYS_EXTENDED; 207 else 208 cdb[1] = SA_REPORT_LOG_EXTENDED; 209 cdb[8] = (uint8_t)((datasize) >> 8); 210 cdb[9] = (uint8_t)datasize; 211 break; 212 case PQI_LOG_EXT_QUEUE_ENABLE: 213 request->data_direction = SOP_DATA_DIR_TO_DEVICE; 214 cdb[0] = SA_REPORT_LOG; 215 cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED); 216 cdb[8] = (uint8_t)((datasize) >> 8); 217 cdb[9] = (uint8_t)datasize; 218 break; 219 case TEST_UNIT_READY: 220 request->data_direction = SOP_DATA_DIR_NONE; 221 break; 222 case SA_GET_RAID_MAP: 223 request->data_direction = SOP_DATA_DIR_TO_DEVICE; 224 cdb[0] = SA_CISS_READ; 225 cdb[1] = cmd; 226 cdb[8] = (uint8_t)((datasize) >> 8); 227 cdb[9] = (uint8_t)datasize; 228 break; 229 case SA_CACHE_FLUSH: 230 request->data_direction = SOP_DATA_DIR_FROM_DEVICE; 231 memcpy(device_mem.virt_addr, buff, datasize); 232 cdb[0] = BMIC_WRITE; 233 cdb[6] = BMIC_CACHE_FLUSH; 234 cdb[7] = (uint8_t)((datasize) << 8); 235 cdb[8] = (uint8_t)((datasize) >> 8); 236 break; 237 case BMIC_IDENTIFY_CONTROLLER: 238 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 239 request->data_direction = SOP_DATA_DIR_TO_DEVICE; 240 cdb[0] = BMIC_READ; 241 cdb[6] = cmd; 242 cdb[7] = (uint8_t)((datasize) << 8); 243 cdb[8] = (uint8_t)((datasize) >> 8); 244 break; 245 case BMIC_WRITE_HOST_WELLNESS: 246 request->data_direction = SOP_DATA_DIR_FROM_DEVICE; 247 memcpy(device_mem.virt_addr, buff, datasize); 248 cdb[0] = BMIC_WRITE; 249 cdb[6] = cmd; 250 cdb[7] = (uint8_t)((datasize) << 8); 251 cdb[8] = (uint8_t)((datasize) >> 8); 252 break; 253 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 254 request->data_direction = SOP_DATA_DIR_TO_DEVICE; 255 cdb[0] = BMIC_READ; 256 cdb[6] = cmd; 257 cdb[7] = (uint8_t)((datasize) << 8); 258 cdb[8] = (uint8_t)((datasize) >> 8); 259 break; 260 default: 261 DBG_ERR("unknown command 0x%x", cmd); 262 ret = PQI_STATUS_FAILURE; 263 return ret; 264 } 265 266 tag = pqisrc_get_tag(&softs->taglist); 267 if (INVALID_ELEM == tag) { 268 DBG_ERR("Tag not available\n"); 269 ret = PQI_STATUS_FAILURE; 270 goto err_notag; 271 } 272 273 ((pqisrc_raid_req_t *)request)->request_id = tag; 274 ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id; 275 ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id; 276 rcb = &softs->rcb[tag]; 277 rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success; 278 rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error; 279 280 rcb->req_pending = true; 281 rcb->tag = tag; 282 /* Submit Command */ 283 ret = pqisrc_submit_cmnd(softs, ib_q, request); 284 285 if (ret != PQI_STATUS_SUCCESS) { 286 DBG_ERR("Unable to submit command\n"); 287 goto err_out; 288 } 289 290 ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT); 291 if (ret != PQI_STATUS_SUCCESS) { 292 DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd); 293 goto err_out; 294 } 295 296 if (datasize) { 297 if (buff) { 298 memcpy(buff, device_mem.virt_addr, datasize); 299 } 300 os_dma_mem_free(softs, &device_mem); 301 } 302 303 ret = rcb->status; 304 if (ret) { 305 if(error_info) { 306 memcpy(error_info, 307 rcb->error_info, 308 sizeof(*error_info)); 309 310 if (error_info->data_out_result == 311 PQI_RAID_DATA_IN_OUT_UNDERFLOW) { 312 ret = PQI_STATUS_SUCCESS; 313 } 314 else{ 315 DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x," 316 "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr), 317 BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), 318 cmd, ret); 319 ret = PQI_STATUS_FAILURE; 320 } 321 } 322 } else { 323 if(error_info) { 324 ret = PQI_STATUS_SUCCESS; 325 memset(error_info, 0, sizeof(*error_info)); 326 } 327 } 328 329 os_reset_rcb(rcb); 330 pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id); 331 DBG_FUNC("OUT\n"); 332 return ret; 333 334 err_out: 335 DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n", 336 BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), 337 cmd, ret); 338 os_reset_rcb(rcb); 339 pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id); 340 err_notag: 341 if (datasize) 342 os_dma_mem_free(softs, &device_mem); 343 DBG_FUNC("FAILED \n"); 344 return ret; 345 } 346 347 /* common function used to send report physical and logical luns cmnds*/ 348 static int 349 pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd, 350 void *buff, size_t buf_len) 351 { 352 int ret; 353 pqisrc_raid_req_t request; 354 355 DBG_FUNC("IN\n"); 356 357 memset(&request, 0, sizeof(request)); 358 ret = pqisrc_build_send_raid_request(softs, &request, buff, 359 buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); 360 361 DBG_FUNC("OUT\n"); 362 363 return ret; 364 } 365 366 /* subroutine used to get physical and logical luns of the device */ 367 int 368 pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd, 369 reportlun_data_ext_t **buff, size_t *data_length) 370 { 371 int ret; 372 size_t list_len; 373 size_t data_len; 374 size_t new_lun_list_length; 375 reportlun_data_ext_t *lun_data; 376 reportlun_header_t report_lun_header; 377 378 DBG_FUNC("IN\n"); 379 380 ret = pqisrc_report_luns(softs, cmd, &report_lun_header, 381 sizeof(report_lun_header)); 382 383 if (ret) { 384 DBG_ERR("failed return code: %d\n", ret); 385 return ret; 386 } 387 list_len = BE_32(report_lun_header.list_length); 388 389 retry: 390 data_len = sizeof(reportlun_header_t) + list_len; 391 *data_length = data_len; 392 393 lun_data = os_mem_alloc(softs, data_len); 394 395 if (!lun_data) { 396 DBG_ERR("failed to allocate memory for lun_data\n"); 397 return PQI_STATUS_FAILURE; 398 } 399 400 if (list_len == 0) { 401 DBG_DISC("list_len is 0\n"); 402 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header)); 403 goto out; 404 } 405 406 ret = pqisrc_report_luns(softs, cmd, lun_data, data_len); 407 408 if (ret) { 409 DBG_ERR("error\n"); 410 goto error; 411 } 412 413 new_lun_list_length = BE_32(lun_data->header.list_length); 414 415 if (new_lun_list_length > list_len) { 416 list_len = new_lun_list_length; 417 os_mem_free(softs, (void *)lun_data, data_len); 418 goto retry; 419 } 420 421 out: 422 *buff = lun_data; 423 DBG_FUNC("OUT\n"); 424 return 0; 425 426 error: 427 os_mem_free(softs, (void *)lun_data, data_len); 428 DBG_ERR("FAILED\n"); 429 return ret; 430 } 431 432 /* 433 * Function used to grab queue depth ext lun data for logical devices 434 */ 435 static int 436 pqisrc_get_queue_lun_list(pqisrc_softstate_t *softs, uint8_t cmd, 437 reportlun_queue_depth_data_t **buff, size_t *data_length) 438 { 439 int ret; 440 size_t list_len; 441 size_t data_len; 442 size_t new_lun_list_length; 443 reportlun_queue_depth_data_t *lun_data; 444 reportlun_header_t report_lun_header; 445 446 DBG_FUNC("IN\n"); 447 448 ret = pqisrc_report_luns(softs, cmd, &report_lun_header, 449 sizeof(report_lun_header)); 450 451 if (ret) { 452 DBG_ERR("failed return code: %d\n", ret); 453 return ret; 454 } 455 list_len = BE_32(report_lun_header.list_length); 456 retry: 457 data_len = sizeof(reportlun_header_t) + list_len; 458 *data_length = data_len; 459 lun_data = os_mem_alloc(softs, data_len); 460 461 if (!lun_data) { 462 DBG_ERR("failed to allocate memory for lun_data\n"); 463 return PQI_STATUS_FAILURE; 464 } 465 466 if (list_len == 0) { 467 DBG_INFO("list_len is 0\n"); 468 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header)); 469 goto out; 470 } 471 ret = pqisrc_report_luns(softs, cmd, lun_data, data_len); 472 473 if (ret) { 474 DBG_ERR("error\n"); 475 goto error; 476 } 477 new_lun_list_length = BE_32(lun_data->header.list_length); 478 479 if (new_lun_list_length > list_len) { 480 list_len = new_lun_list_length; 481 os_mem_free(softs, (void *)lun_data, data_len); 482 goto retry; 483 } 484 485 out: 486 *buff = lun_data; 487 DBG_FUNC("OUT\n"); 488 return 0; 489 490 error: 491 os_mem_free(softs, (void *)lun_data, data_len); 492 DBG_ERR("FAILED\n"); 493 return ret; 494 } 495 496 /* 497 * Function used to get physical and logical device list 498 */ 499 static int 500 pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs, 501 reportlun_data_ext_t **physical_dev_list, 502 reportlun_data_ext_t **logical_dev_list, 503 reportlun_queue_depth_data_t **queue_dev_list, 504 size_t *queue_data_length, 505 size_t *phys_data_length, 506 size_t *log_data_length) 507 { 508 int ret = PQI_STATUS_SUCCESS; 509 size_t logical_list_length; 510 size_t logdev_data_length; 511 size_t data_length; 512 reportlun_data_ext_t *local_logdev_list; 513 reportlun_data_ext_t *logdev_data; 514 reportlun_header_t report_lun_header; 515 516 DBG_FUNC("IN\n"); 517 518 ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length); 519 if (ret) { 520 DBG_ERR("report physical LUNs failed"); 521 return ret; 522 } 523 524 ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length); 525 if (ret) { 526 DBG_ERR("report logical LUNs failed"); 527 return ret; 528 } 529 530 ret = pqisrc_get_queue_lun_list(softs, PQI_LOG_EXT_QUEUE_ENABLE, queue_dev_list, queue_data_length); 531 if (ret) { 532 DBG_ERR("report logical LUNs failed"); 533 return ret; 534 } 535 536 logdev_data = *logical_dev_list; 537 538 if (logdev_data) { 539 logical_list_length = 540 BE_32(logdev_data->header.list_length); 541 } else { 542 memset(&report_lun_header, 0, sizeof(report_lun_header)); 543 logdev_data = 544 (reportlun_data_ext_t *)&report_lun_header; 545 logical_list_length = 0; 546 } 547 548 logdev_data_length = sizeof(reportlun_header_t) + 549 logical_list_length; 550 551 /* Adding LOGICAL device entry for controller */ 552 local_logdev_list = os_mem_alloc(softs, 553 logdev_data_length + sizeof(reportlun_ext_entry_t)); 554 if (!local_logdev_list) { 555 data_length = *log_data_length; 556 os_mem_free(softs, (char *)*logical_dev_list, data_length); 557 *logical_dev_list = NULL; 558 return PQI_STATUS_FAILURE; 559 } 560 561 memcpy(local_logdev_list, logdev_data, logdev_data_length); 562 memset((uint8_t *)local_logdev_list + logdev_data_length, 0, 563 sizeof(reportlun_ext_entry_t)); 564 local_logdev_list->header.list_length = BE_32(logical_list_length + 565 sizeof(reportlun_ext_entry_t)); 566 data_length = *log_data_length; 567 os_mem_free(softs, (char *)*logical_dev_list, data_length); 568 *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t); 569 *logical_dev_list = local_logdev_list; 570 571 DBG_FUNC("OUT\n"); 572 573 return ret; 574 } 575 576 /* Subroutine used to set Bus-Target-Lun for the requested device */ 577 static inline void 578 pqisrc_set_btl(pqi_scsi_dev_t *device, 579 int bus, int target, int lun) 580 { 581 DBG_FUNC("IN\n"); 582 583 device->bus = bus; 584 device->target = target; 585 device->lun = lun; 586 587 DBG_FUNC("OUT\n"); 588 } 589 590 inline 591 boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device) 592 { 593 return device->is_external_raid_device; 594 } 595 596 static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr) 597 { 598 return scsi3addr[2] != 0; 599 } 600 601 /* Function used to assign Bus-Target-Lun for the requested device */ 602 static void 603 pqisrc_assign_btl(pqi_scsi_dev_t *device) 604 { 605 uint8_t *scsi3addr; 606 uint32_t lunid; 607 uint32_t bus; 608 uint32_t target; 609 uint32_t lun; 610 DBG_FUNC("IN\n"); 611 612 scsi3addr = device->scsi3addr; 613 lunid = GET_LE32(scsi3addr); 614 615 if (pqisrc_is_hba_lunid(scsi3addr)) { 616 /* The specified device is the controller. */ 617 pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff) + 1); 618 device->target_lun_valid = true; 619 return; 620 } 621 622 if (pqisrc_is_logical_device(device)) { 623 if (pqisrc_is_external_raid_device(device)) { 624 DBG_DISC("External Raid Device!!!"); 625 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 626 target = (lunid >> 16) & 0x3fff; 627 lun = lunid & 0xff; 628 } else { 629 bus = PQI_RAID_VOLUME_BUS; 630 lun = (lunid & 0x3fff) + 1; 631 target = 0; 632 } 633 pqisrc_set_btl(device, bus, target, lun); 634 device->target_lun_valid = true; 635 return; 636 } 637 638 DBG_FUNC("OUT\n"); 639 } 640 641 /* Build and send the internal INQUIRY command to particular device */ 642 int 643 pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs, 644 uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len) 645 { 646 int ret = PQI_STATUS_SUCCESS; 647 pqisrc_raid_req_t request; 648 raid_path_error_info_elem_t error_info; 649 650 DBG_FUNC("IN\n"); 651 652 memset(&request, 0, sizeof(request)); 653 ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len, 654 SA_INQUIRY, vpd_page, scsi3addr, &error_info); 655 656 DBG_FUNC("OUT\n"); 657 return ret; 658 } 659 660 #if 0 661 /* Function used to parse the sense information from response */ 662 static void 663 pqisrc_fetch_sense_info(const uint8_t *sense_data, 664 unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq) 665 { 666 struct sense_header_scsi header; 667 668 DBG_FUNC("IN\n"); 669 670 *sense_key = 0; 671 *ascq = 0; 672 *asc = 0; 673 674 if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) { 675 *sense_key = header.sense_key; 676 *asc = header.asc; 677 *ascq = header.ascq; 678 } 679 680 DBG_DISC("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq); 681 682 DBG_FUNC("OUT\n"); 683 } 684 #endif 685 686 /* Determine logical volume status from vpd buffer.*/ 687 static void pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs, 688 pqi_scsi_dev_t *device) 689 { 690 int ret; 691 uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED; 692 uint8_t vpd_size = sizeof(vpd_volume_status); 693 uint8_t offline = true; 694 size_t page_length; 695 vpd_volume_status *vpd; 696 697 DBG_FUNC("IN\n"); 698 699 vpd = os_mem_alloc(softs, vpd_size); 700 if (vpd == NULL) 701 goto out; 702 703 /* Get the size of the VPD return buff. */ 704 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS, 705 (uint8_t *)vpd, vpd_size); 706 707 if (ret) { 708 DBG_WARN("Inquiry returned failed status\n"); 709 goto out; 710 } 711 712 if (vpd->page_code != SA_VPD_LV_STATUS) { 713 DBG_WARN("Returned invalid buffer\n"); 714 goto out; 715 } 716 717 page_length = offsetof(vpd_volume_status, volume_status) + vpd->page_length; 718 if (page_length < vpd_size) 719 goto out; 720 721 status = vpd->volume_status; 722 offline = (vpd->flags & SA_LV_FLAGS_NO_HOST_IO)!=0; 723 724 out: 725 device->volume_offline = offline; 726 device->volume_status = status; 727 728 os_mem_free(softs, (char *)vpd, vpd_size); 729 730 DBG_FUNC("OUT\n"); 731 732 return; 733 } 734 735 /* Validate the RAID map parameters */ 736 static int 737 pqisrc_raid_map_validation(pqisrc_softstate_t *softs, 738 pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map) 739 { 740 char *error_msg; 741 uint32_t raidmap_size; 742 uint32_t r5or6_blocks_per_row; 743 744 DBG_FUNC("IN\n"); 745 746 raidmap_size = LE_32(raid_map->structure_size); 747 if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) { 748 error_msg = "RAID map too small\n"; 749 goto error; 750 } 751 752 #if 0 753 phys_dev_num = LE_16(raid_map->layout_map_count) * 754 (LE_16(raid_map->data_disks_per_row) + 755 LE_16(raid_map->metadata_disks_per_row)); 756 #endif 757 758 if (device->raid_level == SA_RAID_1) { 759 if (LE_16(raid_map->layout_map_count) != 2) { 760 error_msg = "invalid RAID-1 map\n"; 761 goto error; 762 } 763 } else if (device->raid_level == SA_RAID_ADM) { 764 if (LE_16(raid_map->layout_map_count) != 3) { 765 error_msg = "invalid RAID-1(triple) map\n"; 766 goto error; 767 } 768 } else if ((device->raid_level == SA_RAID_5 || 769 device->raid_level == SA_RAID_6) && 770 LE_16(raid_map->layout_map_count) > 1) { 771 /* RAID 50/60 */ 772 r5or6_blocks_per_row = 773 LE_16(raid_map->strip_size) * 774 LE_16(raid_map->data_disks_per_row); 775 if (r5or6_blocks_per_row == 0) { 776 error_msg = "invalid RAID-5 or RAID-6 map\n"; 777 goto error; 778 } 779 } 780 781 DBG_FUNC("OUT\n"); 782 783 return 0; 784 785 error: 786 DBG_NOTE("%s\n", error_msg); 787 return PQI_STATUS_FAILURE; 788 } 789 790 /* Get device raidmap for the requested device */ 791 static int 792 pqisrc_get_device_raidmap(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) 793 { 794 int ret = PQI_STATUS_SUCCESS; 795 int raidmap_size; 796 797 pqisrc_raid_req_t request; 798 pqisrc_raid_map_t *raid_map; 799 800 DBG_FUNC("IN\n"); 801 802 raid_map = os_mem_alloc(softs, sizeof(*raid_map)); 803 if (!raid_map) 804 return PQI_STATUS_FAILURE; 805 806 memset(&request, 0, sizeof(request)); 807 ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map), 808 SA_GET_RAID_MAP, 0, device->scsi3addr, NULL); 809 810 if (ret) { 811 DBG_ERR("error in build send raid req ret=%d\n", ret); 812 goto err_out; 813 } 814 815 raidmap_size = LE_32(raid_map->structure_size); 816 if (raidmap_size > sizeof(*raid_map)) { 817 DBG_NOTE("Raid map is larger than 1024 entries, request once again"); 818 os_mem_free(softs, (char*)raid_map, sizeof(*raid_map)); 819 820 raid_map = os_mem_alloc(softs, raidmap_size); 821 if (!raid_map) 822 return PQI_STATUS_FAILURE; 823 memset(&request, 0, sizeof(request)); 824 825 ret = pqisrc_build_send_raid_request(softs, &request, raid_map, raidmap_size, 826 SA_GET_RAID_MAP, 0, device->scsi3addr, NULL); 827 if (ret) { 828 DBG_ERR("error in build send raid req ret=%d\n", ret); 829 goto err_out; 830 } 831 832 if(LE_32(raid_map->structure_size) != raidmap_size) { 833 DBG_WARN("Expected raid map size %d bytes and got %d bytes\n", 834 raidmap_size,LE_32(raid_map->structure_size)); 835 goto err_out; 836 } 837 } 838 839 ret = pqisrc_raid_map_validation(softs, device, raid_map); 840 if (ret) { 841 DBG_NOTE("error in raid map validation ret=%d\n", ret); 842 goto err_out; 843 } 844 845 device->raid_map = raid_map; 846 DBG_FUNC("OUT\n"); 847 return 0; 848 849 err_out: 850 os_mem_free(softs, (char*)raid_map, sizeof(*raid_map)); 851 DBG_FUNC("FAILED \n"); 852 return ret; 853 } 854 855 /* Get device ioaccel_status to validate the type of device */ 856 static void 857 pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs, 858 pqi_scsi_dev_t *device) 859 { 860 int ret = PQI_STATUS_SUCCESS; 861 uint8_t *buff; 862 uint8_t ioaccel_status; 863 864 DBG_FUNC("IN\n"); 865 866 buff = os_mem_alloc(softs, 64); 867 if (!buff) 868 return; 869 870 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 871 VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64); 872 if (ret) { 873 DBG_ERR("error in send scsi inquiry ret=%d\n", ret); 874 goto err_out; 875 } 876 877 ioaccel_status = buff[IOACCEL_STATUS_BYTE]; 878 device->offload_config = 879 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); 880 881 if (device->offload_config) { 882 device->offload_enabled_pending = 883 !!(ioaccel_status & OFFLOAD_ENABLED_BIT); 884 if (pqisrc_get_device_raidmap(softs, device)) 885 device->offload_enabled_pending = false; 886 } 887 888 DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n", 889 device->offload_config, device->offload_enabled_pending); 890 891 err_out: 892 os_mem_free(softs, (char*)buff, 64); 893 DBG_FUNC("OUT\n"); 894 } 895 896 /* Get RAID level of requested device */ 897 static void 898 pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) 899 { 900 uint8_t raid_level; 901 uint8_t *buff; 902 903 DBG_FUNC("IN\n"); 904 905 raid_level = SA_RAID_UNKNOWN; 906 907 buff = os_mem_alloc(softs, 64); 908 if (buff) { 909 int ret; 910 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 911 VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64); 912 if (ret == 0) { 913 raid_level = buff[8]; 914 if (raid_level > SA_RAID_MAX) 915 raid_level = SA_RAID_UNKNOWN; 916 } 917 os_mem_free(softs, (char*)buff, 64); 918 } 919 920 device->raid_level = raid_level; 921 DBG_DISC("RAID LEVEL: %x \n", raid_level); 922 DBG_FUNC("OUT\n"); 923 } 924 925 /* Parse the inquiry response and determine the type of device */ 926 static int 927 pqisrc_get_dev_data(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) 928 { 929 int ret = PQI_STATUS_SUCCESS; 930 uint8_t *inq_buff; 931 int retry = MAX_RETRIES; 932 933 DBG_FUNC("IN\n"); 934 935 inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE); 936 if (!inq_buff) 937 return PQI_STATUS_FAILURE; 938 939 while(retry--) { 940 /* Send an inquiry to the device to see what it is. */ 941 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff, 942 OBDR_TAPE_INQ_SIZE); 943 if (!ret) 944 break; 945 DBG_WARN("Retrying inquiry !!!\n"); 946 } 947 if(retry <= 0) 948 goto err_out; 949 pqisrc_sanitize_inquiry_string(&inq_buff[8], 8); 950 pqisrc_sanitize_inquiry_string(&inq_buff[16], 16); 951 952 device->devtype = inq_buff[0] & 0x1f; 953 memcpy(device->vendor, &inq_buff[8], 954 sizeof(device->vendor)); 955 memcpy(device->model, &inq_buff[16], 956 sizeof(device->model)); 957 DBG_DISC("DEV_TYPE: %x VENDOR: %.8s MODEL: %.16s\n", device->devtype, device->vendor, device->model); 958 959 if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) { 960 if (pqisrc_is_external_raid_device(device)) { 961 device->raid_level = SA_RAID_UNKNOWN; 962 device->volume_status = SA_LV_OK; 963 device->volume_offline = false; 964 } 965 else { 966 pqisrc_get_dev_raid_level(softs, device); 967 pqisrc_get_dev_ioaccel_status(softs, device); 968 pqisrc_get_dev_vol_status(softs, device); 969 } 970 } 971 972 /* 973 * Check if this is a One-Button-Disaster-Recovery device 974 * by looking for "$DR-10" at offset 43 in the inquiry data. 975 */ 976 device->is_obdr_device = (device->devtype == ROM_DEVICE && 977 memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG, 978 OBDR_SIG_LEN) == 0); 979 err_out: 980 os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE); 981 982 DBG_FUNC("OUT\n"); 983 return ret; 984 } 985 986 /* 987 * BMIC (Basic Management And Interface Commands) command 988 * to get the controller identify params 989 */ 990 static int 991 pqisrc_identify_ctrl(pqisrc_softstate_t *softs, bmic_ident_ctrl_t *buff) 992 { 993 int ret = PQI_STATUS_SUCCESS; 994 pqisrc_raid_req_t request; 995 996 DBG_FUNC("IN\n"); 997 998 memset(&request, 0, sizeof(request)); 999 ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff), 1000 BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); 1001 DBG_FUNC("OUT\n"); 1002 1003 return ret; 1004 } 1005 1006 /* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */ 1007 int 1008 pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs) 1009 { 1010 int ret = PQI_STATUS_SUCCESS; 1011 bmic_ident_ctrl_t *identify_ctrl; 1012 1013 DBG_FUNC("IN\n"); 1014 1015 identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl)); 1016 if (!identify_ctrl) { 1017 DBG_ERR("failed to allocate memory for identify_ctrl\n"); 1018 return PQI_STATUS_FAILURE; 1019 } 1020 1021 memset(identify_ctrl, 0, sizeof(*identify_ctrl)); 1022 1023 ret = pqisrc_identify_ctrl(softs, identify_ctrl); 1024 if (ret) 1025 goto out; 1026 1027 softs->fw_build_number = identify_ctrl->fw_build_number; 1028 memcpy(softs->fw_version, identify_ctrl->fw_version, 1029 sizeof(identify_ctrl->fw_version)); 1030 softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0'; 1031 snprintf(softs->fw_version + 1032 strlen(softs->fw_version), 1033 sizeof(softs->fw_version), 1034 "-%u", identify_ctrl->fw_build_number); 1035 out: 1036 os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl)); 1037 DBG_NOTE("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number); 1038 DBG_FUNC("OUT\n"); 1039 return ret; 1040 } 1041 1042 /* BMIC command to determine scsi device identify params */ 1043 static int 1044 pqisrc_identify_physical_disk(pqisrc_softstate_t *softs, 1045 pqi_scsi_dev_t *device, 1046 bmic_ident_physdev_t *buff, 1047 int buf_len) 1048 { 1049 int ret = PQI_STATUS_SUCCESS; 1050 uint16_t bmic_device_index; 1051 pqisrc_raid_req_t request; 1052 1053 1054 DBG_FUNC("IN\n"); 1055 1056 memset(&request, 0, sizeof(request)); 1057 bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr); 1058 request.cdb[2] = (uint8_t)bmic_device_index; 1059 request.cdb[9] = (uint8_t)(bmic_device_index >> 8); 1060 1061 ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len, 1062 BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); 1063 DBG_FUNC("OUT\n"); 1064 return ret; 1065 } 1066 1067 /* 1068 * Function used to get the scsi device information using one of BMIC 1069 * BMIC_IDENTIFY_PHYSICAL_DEVICE 1070 */ 1071 static void 1072 pqisrc_get_physical_device_info(pqisrc_softstate_t *softs, 1073 pqi_scsi_dev_t *device, 1074 bmic_ident_physdev_t *id_phys) 1075 { 1076 int ret = PQI_STATUS_SUCCESS; 1077 1078 DBG_FUNC("IN\n"); 1079 memset(id_phys, 0, sizeof(*id_phys)); 1080 1081 ret= pqisrc_identify_physical_disk(softs, device, 1082 id_phys, sizeof(*id_phys)); 1083 if (ret) { 1084 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1085 return; 1086 } 1087 1088 device->queue_depth = 1089 LE_16(id_phys->current_queue_depth_limit); 1090 device->device_type = id_phys->device_type; 1091 device->active_path_index = id_phys->active_path_number; 1092 device->path_map = id_phys->redundant_path_present_map; 1093 memcpy(&device->box, 1094 &id_phys->alternate_paths_phys_box_on_port, 1095 sizeof(device->box)); 1096 memcpy(&device->phys_connector, 1097 &id_phys->alternate_paths_phys_connector, 1098 sizeof(device->phys_connector)); 1099 device->bay = id_phys->phys_bay_in_box; 1100 1101 DBG_DISC("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n", device->device_type, device->queue_depth); 1102 DBG_FUNC("OUT\n"); 1103 } 1104 1105 1106 /* Function used to find the entry of the device in a list */ 1107 static 1108 device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs, 1109 pqi_scsi_dev_t *device_to_find, pqi_scsi_dev_t **same_device) 1110 { 1111 pqi_scsi_dev_t *device; 1112 int i,j; 1113 DBG_FUNC("IN\n"); 1114 for(i = 0; i < PQI_MAX_DEVICES; i++) { 1115 for(j = 0; j < PQI_MAX_MULTILUN; j++) { 1116 if(softs->device_list[i][j] == NULL) 1117 continue; 1118 device = softs->device_list[i][j]; 1119 if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr, 1120 device->scsi3addr)) { 1121 *same_device = device; 1122 if (pqisrc_device_equal(device_to_find, device)) { 1123 if (device_to_find->volume_offline) 1124 return DEVICE_CHANGED; 1125 return DEVICE_UNCHANGED; 1126 } 1127 return DEVICE_CHANGED; 1128 } 1129 } 1130 } 1131 DBG_FUNC("OUT\n"); 1132 1133 return DEVICE_NOT_FOUND; 1134 } 1135 1136 1137 /* Update the newly added devices as existed device */ 1138 static void 1139 pqisrc_exist_device_update(pqisrc_softstate_t *softs, 1140 pqi_scsi_dev_t *device_exist, pqi_scsi_dev_t *new_device) 1141 { 1142 DBG_FUNC("IN\n"); 1143 device_exist->expose_device = new_device->expose_device; 1144 memcpy(device_exist->vendor, new_device->vendor, 1145 sizeof(device_exist->vendor)); 1146 memcpy(device_exist->model, new_device->model, 1147 sizeof(device_exist->model)); 1148 device_exist->is_physical_device = new_device->is_physical_device; 1149 device_exist->is_external_raid_device = 1150 new_device->is_external_raid_device; 1151 1152 if ((device_exist->volume_status == SA_LV_QUEUED_FOR_EXPANSION || 1153 device_exist->volume_status == SA_LV_UNDERGOING_EXPANSION) && 1154 new_device->volume_status == SA_LV_OK) { 1155 device_exist->scsi_rescan = true; 1156 } 1157 1158 device_exist->sas_address = new_device->sas_address; 1159 device_exist->raid_level = new_device->raid_level; 1160 device_exist->queue_depth = new_device->queue_depth; 1161 device_exist->ioaccel_handle = new_device->ioaccel_handle; 1162 device_exist->volume_status = new_device->volume_status; 1163 device_exist->active_path_index = new_device->active_path_index; 1164 device_exist->path_map = new_device->path_map; 1165 device_exist->bay = new_device->bay; 1166 memcpy(device_exist->box, new_device->box, 1167 sizeof(device_exist->box)); 1168 memcpy(device_exist->phys_connector, new_device->phys_connector, 1169 sizeof(device_exist->phys_connector)); 1170 device_exist->offload_config = new_device->offload_config; 1171 device_exist->offload_enabled_pending = 1172 new_device->offload_enabled_pending; 1173 device_exist->offload_to_mirror = 0; 1174 if (device_exist->raid_map) 1175 os_mem_free(softs, 1176 (char *)device_exist->raid_map, 1177 sizeof(*device_exist->raid_map)); 1178 device_exist->raid_map = new_device->raid_map; 1179 /* To prevent this from being freed later. */ 1180 new_device->raid_map = NULL; 1181 DBG_FUNC("OUT\n"); 1182 } 1183 1184 /* Validate the ioaccel_handle for a newly added device */ 1185 static 1186 pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel( 1187 pqisrc_softstate_t *softs, uint32_t ioaccel_handle) 1188 { 1189 pqi_scsi_dev_t *device; 1190 int i,j; 1191 DBG_FUNC("IN\n"); 1192 for(i = 0; i < PQI_MAX_DEVICES; i++) { 1193 for(j = 0; j < PQI_MAX_MULTILUN; j++) { 1194 if(softs->device_list[i][j] == NULL) 1195 continue; 1196 device = softs->device_list[i][j]; 1197 if (device->devtype != DISK_DEVICE) 1198 continue; 1199 if (pqisrc_is_logical_device(device)) 1200 continue; 1201 if (device->ioaccel_handle == ioaccel_handle) 1202 return device; 1203 } 1204 } 1205 DBG_FUNC("OUT\n"); 1206 1207 return NULL; 1208 } 1209 1210 /* Get the scsi device queue depth */ 1211 static void 1212 pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs) 1213 { 1214 unsigned i; 1215 unsigned phys_dev_num; 1216 unsigned num_raidmap_entries; 1217 unsigned queue_depth; 1218 pqisrc_raid_map_t *raid_map; 1219 pqi_scsi_dev_t *device; 1220 raidmap_data_t *dev_data; 1221 pqi_scsi_dev_t *phys_disk; 1222 unsigned j; 1223 unsigned k; 1224 1225 DBG_FUNC("IN\n"); 1226 1227 for(i = 0; i < PQI_MAX_DEVICES; i++) { 1228 for(j = 0; j < PQI_MAX_MULTILUN; j++) { 1229 if(softs->device_list[i][j] == NULL) 1230 continue; 1231 device = softs->device_list[i][j]; 1232 if (device->devtype != DISK_DEVICE) 1233 continue; 1234 if (!pqisrc_is_logical_device(device)) 1235 continue; 1236 if (pqisrc_is_external_raid_device(device)) 1237 continue; 1238 device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1239 raid_map = device->raid_map; 1240 if (!raid_map) 1241 return; 1242 dev_data = raid_map->dev_data; 1243 phys_dev_num = LE_16(raid_map->layout_map_count) * 1244 (LE_16(raid_map->data_disks_per_row) + 1245 LE_16(raid_map->metadata_disks_per_row)); 1246 num_raidmap_entries = phys_dev_num * 1247 LE_16(raid_map->row_cnt); 1248 1249 queue_depth = 0; 1250 for (k = 0; k < num_raidmap_entries; k++) { 1251 phys_disk = pqisrc_identify_device_via_ioaccel(softs, 1252 dev_data[k].ioaccel_handle); 1253 1254 if (!phys_disk) { 1255 DBG_WARN( 1256 "Failed to find physical disk handle for logical drive %016llx\n", 1257 (unsigned long long)BE_64(device->scsi3addr[0])); 1258 device->offload_enabled = false; 1259 device->offload_enabled_pending = false; 1260 if (raid_map) 1261 os_mem_free(softs, (char *)raid_map, sizeof(*raid_map)); 1262 device->raid_map = NULL; 1263 return; 1264 } 1265 1266 queue_depth += phys_disk->queue_depth; 1267 } 1268 1269 device->queue_depth = queue_depth; 1270 } /* end inner loop */ 1271 }/* end outer loop */ 1272 DBG_FUNC("OUT\n"); 1273 } 1274 1275 /* Function used to add a scsi device to OS scsi subsystem */ 1276 static int 1277 pqisrc_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) 1278 { 1279 DBG_FUNC("IN\n"); 1280 DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n", 1281 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status); 1282 1283 device->invalid = false; 1284 1285 if(device->expose_device) { 1286 pqisrc_init_device_active_io(softs, device); 1287 /* TBD: Call OS upper layer function to add the device entry */ 1288 os_add_device(softs,device); 1289 } 1290 DBG_FUNC("OUT\n"); 1291 return PQI_STATUS_SUCCESS; 1292 1293 } 1294 1295 /* Function used to remove a scsi device from OS scsi subsystem */ 1296 void 1297 pqisrc_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) 1298 { 1299 DBG_FUNC("IN\n"); 1300 DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n", 1301 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status); 1302 1303 device->invalid = true; 1304 if (device->expose_device == false) { 1305 /*Masked physical devices are not been exposed to storage stack. 1306 *Hence, free the masked device resources such as 1307 *device memory, Target ID,etc., here. 1308 */ 1309 DBG_NOTE("Deallocated Masked Device Resources.\n"); 1310 pqisrc_free_device(softs,device); 1311 return; 1312 } 1313 /* Wait for device outstanding Io's */ 1314 pqisrc_wait_for_device_commands_to_complete(softs, device); 1315 /* Call OS upper layer function to remove the exposed device entry */ 1316 os_remove_device(softs,device); 1317 DBG_FUNC("OUT\n"); 1318 } 1319 1320 /* 1321 * When exposing new device to OS fails then adjst list according to the 1322 * mid scsi list 1323 */ 1324 static void 1325 pqisrc_adjust_list(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) 1326 { 1327 DBG_FUNC("IN\n"); 1328 1329 if (!device) { 1330 DBG_ERR("softs = %p: device is NULL !!!\n", softs); 1331 return; 1332 } 1333 1334 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock); 1335 softs->device_list[device->target][device->lun] = NULL; 1336 OS_RELEASE_SPINLOCK(&softs->devlist_lock); 1337 pqisrc_device_mem_free(softs, device); 1338 1339 DBG_FUNC("OUT\n"); 1340 } 1341 1342 /* Debug routine used to display the RAID volume status of the device */ 1343 static void 1344 pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) 1345 { 1346 char *status; 1347 1348 DBG_FUNC("IN\n"); 1349 switch (device->volume_status) { 1350 case SA_LV_OK: 1351 status = "Volume is online."; 1352 break; 1353 case SA_LV_UNDERGOING_ERASE: 1354 status = "Volume is undergoing background erase process."; 1355 break; 1356 case SA_LV_NOT_AVAILABLE: 1357 status = "Volume is waiting for transforming volume."; 1358 break; 1359 case SA_LV_UNDERGOING_RPI: 1360 status = "Volume is undergoing rapid parity initialization process."; 1361 break; 1362 case SA_LV_PENDING_RPI: 1363 status = "Volume is queued for rapid parity initialization process."; 1364 break; 1365 case SA_LV_ENCRYPTED_NO_KEY: 1366 status = "Volume is encrypted and cannot be accessed because key is not present."; 1367 break; 1368 case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: 1369 status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode."; 1370 break; 1371 case SA_LV_UNDERGOING_ENCRYPTION: 1372 status = "Volume is undergoing encryption process."; 1373 break; 1374 case SA_LV_UNDERGOING_ENCRYPTION_REKEYING: 1375 status = "Volume is undergoing encryption re-keying process."; 1376 break; 1377 case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1378 status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled."; 1379 break; 1380 case SA_LV_PENDING_ENCRYPTION: 1381 status = "Volume is pending migration to encrypted state, but process has not started."; 1382 break; 1383 case SA_LV_PENDING_ENCRYPTION_REKEYING: 1384 status = "Volume is encrypted and is pending encryption rekeying."; 1385 break; 1386 case SA_LV_STATUS_VPD_UNSUPPORTED: 1387 status = "Volume status is not available through vital product data pages."; 1388 break; 1389 case SA_LV_UNDERGOING_EXPANSION: 1390 status = "Volume undergoing expansion"; 1391 break; 1392 case SA_LV_QUEUED_FOR_EXPANSION: 1393 status = "Volume queued for expansion"; 1394 case SA_LV_EJECTED: 1395 status = "Volume ejected"; 1396 break; 1397 case SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1398 status = "Volume has wrong physical drive replaced"; 1399 break; 1400 case SA_LV_DISABLED_SCSI_ID_CONFLICT: 1401 status = "Volume disabled scsi id conflict"; 1402 break; 1403 case SA_LV_HARDWARE_HAS_OVERHEATED: 1404 status = "Volume hardware has over heated"; 1405 break; 1406 case SA_LV_HARDWARE_OVERHEATING: 1407 status = "Volume hardware over heating"; 1408 break; 1409 case SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1410 status = "Volume physical drive connection problem"; 1411 break; 1412 default: 1413 status = "Volume is in an unknown state."; 1414 break; 1415 } 1416 1417 DBG_DISC("scsi BTL %d:%d:%d %s\n", 1418 device->bus, device->target, device->lun, status); 1419 DBG_FUNC("OUT\n"); 1420 } 1421 1422 void 1423 pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device) 1424 { 1425 DBG_FUNC("IN\n"); 1426 if (!device) 1427 return; 1428 if (device->raid_map) { 1429 os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t)); 1430 } 1431 os_mem_free(softs, (char *)device,sizeof(*device)); 1432 DBG_FUNC("OUT\n"); 1433 1434 } 1435 1436 /* OS should call this function to free the scsi device */ 1437 void 1438 pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device) 1439 { 1440 rcb_t *rcb; 1441 int i; 1442 1443 /* Clear the "device" field in the rcb. 1444 * Response coming after device removal shouldn't access this field 1445 */ 1446 for(i = 1; i <= softs->max_outstanding_io; i++) 1447 { 1448 rcb = &softs->rcb[i]; 1449 if(rcb->dvp == device) { 1450 DBG_WARN("Pending requests for the removing device\n"); 1451 rcb->dvp = NULL; 1452 } 1453 } 1454 1455 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock); 1456 1457 if (!pqisrc_is_logical_device(device)) { 1458 pqisrc_free_tid(softs,device->target); 1459 } 1460 1461 softs->device_list[device->target][device->lun] = NULL; 1462 1463 pqisrc_device_mem_free(softs, device); 1464 1465 OS_RELEASE_SPINLOCK(&softs->devlist_lock); 1466 1467 } 1468 1469 /* Update the newly added devices to the device list */ 1470 static void 1471 pqisrc_update_device_list(pqisrc_softstate_t *softs, 1472 pqi_scsi_dev_t *new_device_list[], int num_new_devices) 1473 { 1474 int ret; 1475 int i; 1476 device_status_t dev_status; 1477 pqi_scsi_dev_t *device; 1478 pqi_scsi_dev_t *same_device; 1479 pqi_scsi_dev_t **added = NULL; 1480 pqi_scsi_dev_t **removed = NULL; 1481 int nadded = 0, nremoved = 0; 1482 int j; 1483 int tid = 0; 1484 boolean_t driver_queue_depth_flag = false; 1485 1486 DBG_FUNC("IN\n"); 1487 1488 added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES); 1489 removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES); 1490 1491 if (!added || !removed) { 1492 DBG_WARN("Out of memory \n"); 1493 goto free_and_out; 1494 } 1495 1496 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock); 1497 1498 for(i = 0; i < PQI_MAX_DEVICES; i++) { 1499 for(j = 0; j < PQI_MAX_MULTILUN; j++) { 1500 if(softs->device_list[i][j] == NULL) 1501 continue; 1502 device = softs->device_list[i][j]; 1503 device->device_gone = true; 1504 } 1505 } 1506 DBG_IO("Device list used an array\n"); 1507 for (i = 0; i < num_new_devices; i++) { 1508 device = new_device_list[i]; 1509 1510 dev_status = pqisrc_scsi_find_entry(softs, device, 1511 &same_device); 1512 1513 switch (dev_status) { 1514 case DEVICE_UNCHANGED: 1515 /* New Device present in existing device list */ 1516 device->new_device = false; 1517 same_device->device_gone = false; 1518 pqisrc_exist_device_update(softs, same_device, device); 1519 break; 1520 case DEVICE_NOT_FOUND: 1521 /* Device not found in existing list */ 1522 device->new_device = true; 1523 break; 1524 case DEVICE_CHANGED: 1525 /* Actual device gone need to add device to list*/ 1526 device->new_device = true; 1527 break; 1528 default: 1529 break; 1530 } 1531 } 1532 /* Process all devices that have gone away. */ 1533 for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) { 1534 for(j = 0; j < PQI_MAX_MULTILUN; j++) { 1535 if(softs->device_list[i][j] == NULL) 1536 continue; 1537 device = softs->device_list[i][j]; 1538 if (device->device_gone) { 1539 softs->device_list[device->target][device->lun] = NULL; 1540 removed[nremoved] = device; 1541 nremoved++; 1542 } 1543 } 1544 } 1545 1546 /* Process all new devices. */ 1547 for (i = 0, nadded = 0; i < num_new_devices; i++) { 1548 device = new_device_list[i]; 1549 if (!device->new_device) 1550 continue; 1551 if (device->volume_offline) 1552 continue; 1553 1554 /* physical device */ 1555 if (!pqisrc_is_logical_device(device)) { 1556 tid = pqisrc_alloc_tid(softs); 1557 if(INVALID_ELEM != tid) 1558 pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0); 1559 } 1560 1561 /* This is not expected. We may lose the reference to the old device entry. 1562 * If the target & lun ids are same, it is supposed to detect as an existing 1563 * device, and not as a new device 1564 */ 1565 if(softs->device_list[device->target][device->lun] != NULL) { 1566 DBG_WARN("Overwriting T : %d L :%d\n",device->target,device->lun); 1567 } 1568 1569 softs->device_list[device->target][device->lun] = device; 1570 1571 DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device, 1572 device->bus,device->target,device->lun); 1573 /* To prevent this entry from being freed later. */ 1574 new_device_list[i] = NULL; 1575 added[nadded] = device; 1576 nadded++; 1577 } 1578 1579 1580 for(i = 0; i < PQI_MAX_DEVICES; i++) { 1581 for(j = 0; j < PQI_MAX_MULTILUN; j++) { 1582 if(softs->device_list[i][j] == NULL) 1583 continue; 1584 device = softs->device_list[i][j]; 1585 device->offload_enabled = device->offload_enabled_pending; 1586 } 1587 } 1588 1589 OS_RELEASE_SPINLOCK(&softs->devlist_lock); 1590 1591 for(i = 0; i < nremoved; i++) { 1592 device = removed[i]; 1593 if (device == NULL) 1594 continue; 1595 pqisrc_display_device_info(softs, "removed", device); 1596 pqisrc_remove_device(softs, device); 1597 1598 } 1599 1600 for(i = 0; i < PQI_MAX_DEVICES; i++) { 1601 for(j = 0; j < PQI_MAX_MULTILUN; j++) { 1602 if(softs->device_list[i][j] == NULL) 1603 continue; 1604 device = softs->device_list[i][j]; 1605 /* 1606 * Notify the OS upper layer if the queue depth of any existing device has 1607 * changed. 1608 */ 1609 if (device->queue_depth != 1610 device->advertised_queue_depth) { 1611 device->advertised_queue_depth = device->queue_depth; 1612 /* TBD: Call OS upper layer function to change device Q depth */ 1613 } 1614 if (device->firmware_queue_depth_set == false) 1615 driver_queue_depth_flag = true; 1616 if (device->scsi_rescan) 1617 os_rescan_target(softs, device); 1618 } 1619 } 1620 /* 1621 * If firmware queue depth is corrupt or not working 1622 * use driver method to re-calculate the queue depth 1623 * for all logical devices 1624 */ 1625 if (driver_queue_depth_flag) 1626 pqisrc_update_log_dev_qdepth(softs); 1627 1628 for(i = 0; i < nadded; i++) { 1629 device = added[i]; 1630 if (device->expose_device) { 1631 ret = pqisrc_add_device(softs, device); 1632 if (ret) { 1633 DBG_WARN("scsi %d:%d:%d addition failed, device not added\n", 1634 device->bus, device->target, 1635 device->lun); 1636 pqisrc_adjust_list(softs, device); 1637 continue; 1638 } 1639 } 1640 1641 pqisrc_display_device_info(softs, "added", device); 1642 } 1643 1644 /* Process all volumes that are offline. */ 1645 for (i = 0; i < num_new_devices; i++) { 1646 device = new_device_list[i]; 1647 if (!device) 1648 continue; 1649 if (!device->new_device) 1650 continue; 1651 if (device->volume_offline) { 1652 pqisrc_display_volume_status(softs, device); 1653 pqisrc_display_device_info(softs, "offline", device); 1654 } 1655 } 1656 1657 free_and_out: 1658 if (added) 1659 os_mem_free(softs, (char *)added, 1660 sizeof(*added) * PQI_MAX_DEVICES); 1661 if (removed) 1662 os_mem_free(softs, (char *)removed, 1663 sizeof(*removed) * PQI_MAX_DEVICES); 1664 1665 DBG_FUNC("OUT\n"); 1666 } 1667 1668 /* 1669 * Let the Adapter know about driver version using one of BMIC 1670 * BMIC_WRITE_HOST_WELLNESS 1671 */ 1672 int 1673 pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs) 1674 { 1675 int rval = PQI_STATUS_SUCCESS; 1676 struct bmic_host_wellness_driver_version *host_wellness_driver_ver; 1677 size_t data_length; 1678 pqisrc_raid_req_t request; 1679 1680 DBG_FUNC("IN\n"); 1681 1682 memset(&request, 0, sizeof(request)); 1683 data_length = sizeof(*host_wellness_driver_ver); 1684 1685 host_wellness_driver_ver = os_mem_alloc(softs, data_length); 1686 if (!host_wellness_driver_ver) { 1687 DBG_ERR("failed to allocate memory for host wellness driver_version\n"); 1688 return PQI_STATUS_FAILURE; 1689 } 1690 1691 host_wellness_driver_ver->start_tag[0] = '<'; 1692 host_wellness_driver_ver->start_tag[1] = 'H'; 1693 host_wellness_driver_ver->start_tag[2] = 'W'; 1694 host_wellness_driver_ver->start_tag[3] = '>'; 1695 host_wellness_driver_ver->driver_version_tag[0] = 'D'; 1696 host_wellness_driver_ver->driver_version_tag[1] = 'V'; 1697 host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version)); 1698 strncpy(host_wellness_driver_ver->driver_version, softs->os_name, 1699 sizeof(host_wellness_driver_ver->driver_version)); 1700 if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) { 1701 strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION, 1702 sizeof(host_wellness_driver_ver->driver_version) - strlen(softs->os_name)); 1703 } else { 1704 DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n", 1705 strlen(softs->os_name)); 1706 1707 } 1708 host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0'; 1709 host_wellness_driver_ver->end_tag[0] = 'Z'; 1710 host_wellness_driver_ver->end_tag[1] = 'Z'; 1711 1712 rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length, 1713 BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); 1714 1715 os_mem_free(softs, (char *)host_wellness_driver_ver, data_length); 1716 1717 DBG_FUNC("OUT"); 1718 return rval; 1719 } 1720 1721 /* 1722 * Write current RTC time from host to the adapter using 1723 * BMIC_WRITE_HOST_WELLNESS 1724 */ 1725 int 1726 pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs) 1727 { 1728 int rval = PQI_STATUS_SUCCESS; 1729 struct bmic_host_wellness_time *host_wellness_time; 1730 size_t data_length; 1731 pqisrc_raid_req_t request; 1732 1733 DBG_FUNC("IN\n"); 1734 1735 memset(&request, 0, sizeof(request)); 1736 data_length = sizeof(*host_wellness_time); 1737 1738 host_wellness_time = os_mem_alloc(softs, data_length); 1739 if (!host_wellness_time) { 1740 DBG_ERR("failed to allocate memory for host wellness time structure\n"); 1741 return PQI_STATUS_FAILURE; 1742 } 1743 1744 host_wellness_time->start_tag[0] = '<'; 1745 host_wellness_time->start_tag[1] = 'H'; 1746 host_wellness_time->start_tag[2] = 'W'; 1747 host_wellness_time->start_tag[3] = '>'; 1748 host_wellness_time->time_tag[0] = 'T'; 1749 host_wellness_time->time_tag[1] = 'D'; 1750 host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) - 1751 offsetof(struct bmic_host_wellness_time, century)); 1752 1753 os_get_time(host_wellness_time); 1754 1755 host_wellness_time->dont_write_tag[0] = 'D'; 1756 host_wellness_time->dont_write_tag[1] = 'W'; 1757 host_wellness_time->end_tag[0] = 'Z'; 1758 host_wellness_time->end_tag[1] = 'Z'; 1759 1760 rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length, 1761 BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL); 1762 1763 os_mem_free(softs, (char *)host_wellness_time, data_length); 1764 1765 DBG_FUNC("OUT"); 1766 return rval; 1767 } 1768 1769 /* 1770 * Function used to perform a rescan of scsi devices 1771 * for any config change events 1772 */ 1773 int 1774 pqisrc_scan_devices(pqisrc_softstate_t *softs) 1775 { 1776 boolean_t is_physical_device; 1777 int ret = PQI_STATUS_FAILURE; 1778 int i; 1779 int new_dev_cnt; 1780 int phy_log_dev_cnt; 1781 size_t queue_log_data_length; 1782 uint8_t *scsi3addr; 1783 uint8_t multiplier; 1784 uint16_t qdepth; 1785 uint32_t physical_cnt; 1786 uint32_t logical_cnt; 1787 uint32_t logical_queue_cnt; 1788 uint32_t ndev_allocated = 0; 1789 size_t phys_data_length, log_data_length; 1790 reportlun_data_ext_t *physical_dev_list = NULL; 1791 reportlun_data_ext_t *logical_dev_list = NULL; 1792 reportlun_ext_entry_t *lun_ext_entry = NULL; 1793 reportlun_queue_depth_data_t *logical_queue_dev_list = NULL; 1794 bmic_ident_physdev_t *bmic_phy_info = NULL; 1795 pqi_scsi_dev_t **new_device_list = NULL; 1796 pqi_scsi_dev_t *device = NULL; 1797 1798 1799 DBG_FUNC("IN\n"); 1800 1801 ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list, 1802 &logical_queue_dev_list, &queue_log_data_length, 1803 &phys_data_length, &log_data_length); 1804 1805 if (ret) 1806 goto err_out; 1807 1808 physical_cnt = BE_32(physical_dev_list->header.list_length) 1809 / sizeof(physical_dev_list->lun_entries[0]); 1810 1811 logical_cnt = BE_32(logical_dev_list->header.list_length) 1812 / sizeof(logical_dev_list->lun_entries[0]); 1813 1814 logical_queue_cnt = BE_32(logical_queue_dev_list->header.list_length) 1815 / sizeof(logical_queue_dev_list->lun_entries[0]); 1816 1817 1818 DBG_DISC("physical_cnt %d logical_cnt %d queue_cnt %d\n", physical_cnt, logical_cnt, logical_queue_cnt); 1819 1820 if (physical_cnt) { 1821 bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info)); 1822 if (bmic_phy_info == NULL) { 1823 ret = PQI_STATUS_FAILURE; 1824 DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret); 1825 goto err_out; 1826 } 1827 } 1828 phy_log_dev_cnt = physical_cnt + logical_cnt; 1829 new_device_list = os_mem_alloc(softs, 1830 sizeof(*new_device_list) * phy_log_dev_cnt); 1831 1832 if (new_device_list == NULL) { 1833 ret = PQI_STATUS_FAILURE; 1834 DBG_ERR("failed to allocate memory for device list : %d\n", ret); 1835 goto err_out; 1836 } 1837 1838 for (i = 0; i < phy_log_dev_cnt; i++) { 1839 new_device_list[i] = os_mem_alloc(softs, 1840 sizeof(*new_device_list[i])); 1841 if (new_device_list[i] == NULL) { 1842 ret = PQI_STATUS_FAILURE; 1843 DBG_ERR("failed to allocate memory for device list : %d\n", ret); 1844 ndev_allocated = i; 1845 goto err_out; 1846 } 1847 } 1848 1849 ndev_allocated = phy_log_dev_cnt; 1850 new_dev_cnt = 0; 1851 for (i = 0; i < phy_log_dev_cnt; i++) { 1852 1853 if (i < physical_cnt) { 1854 is_physical_device = true; 1855 lun_ext_entry = &physical_dev_list->lun_entries[i]; 1856 } else { 1857 is_physical_device = false; 1858 lun_ext_entry = 1859 &logical_dev_list->lun_entries[i - physical_cnt]; 1860 } 1861 1862 scsi3addr = lun_ext_entry->lunid; 1863 1864 /* Save the target sas adderess for external raid device */ 1865 if(lun_ext_entry->device_type == CONTROLLER_DEVICE) { 1866 int target = lun_ext_entry->lunid[3] & 0x3f; 1867 softs->target_sas_addr[target] = BE_64(lun_ext_entry->wwid); 1868 } 1869 1870 /* Skip masked physical non-disk devices. */ 1871 if (MASKED_DEVICE(scsi3addr) && is_physical_device 1872 && (lun_ext_entry->ioaccel_handle == 0)) 1873 continue; 1874 1875 device = new_device_list[new_dev_cnt]; 1876 memset(device, 0, sizeof(*device)); 1877 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1878 device->wwid = lun_ext_entry->wwid; 1879 device->is_physical_device = is_physical_device; 1880 if (!is_physical_device && logical_queue_cnt--) { 1881 device->is_external_raid_device = 1882 pqisrc_is_external_raid_addr(scsi3addr); 1883 /* The multiplier is the value we multiply the queue 1884 * depth value with to get the actual queue depth. 1885 * If multiplier is 1 multiply by 256 if 1886 * multiplier 0 then multiply by 16 */ 1887 multiplier = logical_queue_dev_list->lun_entries[i - physical_cnt].multiplier; 1888 qdepth = logical_queue_dev_list->lun_entries[i - physical_cnt].queue_depth; 1889 if (multiplier) { 1890 device->firmware_queue_depth_set = true; 1891 device->queue_depth = qdepth*256; 1892 } else { 1893 device->firmware_queue_depth_set = true; 1894 device->queue_depth = qdepth*16; 1895 } 1896 if (device->queue_depth > softs->adapterQDepth) { 1897 device->firmware_queue_depth_set = true; 1898 device->queue_depth = softs->adapterQDepth; 1899 } 1900 if ((multiplier == 1) && 1901 (qdepth <= 0 || qdepth >= MAX_RAW_M256_QDEPTH)) 1902 device->firmware_queue_depth_set = false; 1903 if ((multiplier == 0) && 1904 (qdepth <= 0 || qdepth >= MAX_RAW_M16_QDEPTH)) 1905 device->firmware_queue_depth_set = false; 1906 } 1907 1908 1909 /* Get device type, vendor, model, device ID. */ 1910 ret = pqisrc_get_dev_data(softs, device); 1911 if (ret) { 1912 DBG_WARN("Inquiry failed, skipping device %016llx\n", 1913 (unsigned long long)BE_64(device->scsi3addr[0])); 1914 DBG_DISC("INQUIRY FAILED \n"); 1915 continue; 1916 } 1917 /* Set controller queue depth to what 1918 * it was from the scsi midlayer */ 1919 if (device->devtype == RAID_DEVICE) { 1920 device->firmware_queue_depth_set = true; 1921 device->queue_depth = softs->adapterQDepth; 1922 } 1923 pqisrc_assign_btl(device); 1924 1925 /* 1926 * Expose all devices except for physical devices that 1927 * are masked. 1928 */ 1929 if (device->is_physical_device && 1930 MASKED_DEVICE(scsi3addr)) 1931 device->expose_device = false; 1932 else 1933 device->expose_device = true; 1934 1935 if (device->is_physical_device && 1936 (lun_ext_entry->device_flags & 1937 REPORT_LUN_DEV_FLAG_AIO_ENABLED) && 1938 lun_ext_entry->ioaccel_handle) { 1939 device->aio_enabled = true; 1940 } 1941 switch (device->devtype) { 1942 case ROM_DEVICE: 1943 /* 1944 * We don't *really* support actual CD-ROM devices, 1945 * but we do support the HP "One Button Disaster 1946 * Recovery" tape drive which temporarily pretends to 1947 * be a CD-ROM drive. 1948 */ 1949 if (device->is_obdr_device) 1950 new_dev_cnt++; 1951 break; 1952 case DISK_DEVICE: 1953 case ZBC_DEVICE: 1954 if (device->is_physical_device) { 1955 device->ioaccel_handle = 1956 lun_ext_entry->ioaccel_handle; 1957 device->sas_address = BE_64(lun_ext_entry->wwid); 1958 pqisrc_get_physical_device_info(softs, device, 1959 bmic_phy_info); 1960 } 1961 new_dev_cnt++; 1962 break; 1963 case ENCLOSURE_DEVICE: 1964 if (device->is_physical_device) { 1965 device->sas_address = BE_64(lun_ext_entry->wwid); 1966 } 1967 new_dev_cnt++; 1968 break; 1969 case TAPE_DEVICE: 1970 case MEDIUM_CHANGER_DEVICE: 1971 new_dev_cnt++; 1972 break; 1973 case RAID_DEVICE: 1974 /* 1975 * Only present the HBA controller itself as a RAID 1976 * controller. If it's a RAID controller other than 1977 * the HBA itself (an external RAID controller, MSA500 1978 * or similar), don't present it. 1979 */ 1980 if (pqisrc_is_hba_lunid(scsi3addr)) 1981 new_dev_cnt++; 1982 break; 1983 case SES_DEVICE: 1984 case CONTROLLER_DEVICE: 1985 default: 1986 break; 1987 } 1988 } 1989 DBG_DISC("new_dev_cnt %d\n", new_dev_cnt); 1990 1991 pqisrc_update_device_list(softs, new_device_list, new_dev_cnt); 1992 1993 err_out: 1994 if (new_device_list) { 1995 for (i = 0; i < ndev_allocated; i++) { 1996 if (new_device_list[i]) { 1997 if(new_device_list[i]->raid_map) 1998 os_mem_free(softs, (char *)new_device_list[i]->raid_map, 1999 sizeof(pqisrc_raid_map_t)); 2000 os_mem_free(softs, (char*)new_device_list[i], 2001 sizeof(*new_device_list[i])); 2002 } 2003 } 2004 os_mem_free(softs, (char *)new_device_list, 2005 sizeof(*new_device_list) * ndev_allocated); 2006 } 2007 if(physical_dev_list) 2008 os_mem_free(softs, (char *)physical_dev_list, phys_data_length); 2009 if(logical_dev_list) 2010 os_mem_free(softs, (char *)logical_dev_list, log_data_length); 2011 if(logical_queue_dev_list) 2012 os_mem_free(softs, (char*)logical_queue_dev_list, 2013 queue_log_data_length); 2014 if (bmic_phy_info) 2015 os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info)); 2016 2017 DBG_FUNC("OUT \n"); 2018 2019 return ret; 2020 } 2021 2022 /* 2023 * Clean up memory allocated for devices. 2024 */ 2025 void 2026 pqisrc_cleanup_devices(pqisrc_softstate_t *softs) 2027 { 2028 2029 int i = 0,j = 0; 2030 pqi_scsi_dev_t *dvp = NULL; 2031 DBG_FUNC("IN\n"); 2032 2033 for(i = 0; i < PQI_MAX_DEVICES; i++) { 2034 for(j = 0; j < PQI_MAX_MULTILUN; j++) { 2035 if (softs->device_list[i][j] == NULL) 2036 continue; 2037 dvp = softs->device_list[i][j]; 2038 pqisrc_device_mem_free(softs, dvp); 2039 } 2040 } 2041 DBG_FUNC("OUT\n"); 2042 } 2043