Lines Matching +full:battery +full:- +full:backed
1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microchip PQI-based storage controllers
4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
22 #include <linux/blk-mq-pci.h>
36 #define DRIVER_VERSION "2.1.30-031"
51 #define PQI_NO_COMPLETION ((void *)-1)
194 "RAID-0",
195 "RAID-4",
196 "RAID-1(1+0)",
197 "RAID-5",
198 "RAID-5+1",
199 "RAID-6",
200 "RAID-1(Triple)",
229 sdev->no_write_same = 1; in pqi_disable_write_same()
239 return !device->is_physical_device; in pqi_is_logical_device()
249 return !ctrl_info->controller_online; in pqi_ctrl_offline()
254 if (ctrl_info->controller_online) in pqi_check_ctrl_health()
308 ctrl_info->scan_blocked = true; in pqi_ctrl_block_scan()
309 mutex_lock(&ctrl_info->scan_mutex); in pqi_ctrl_block_scan()
314 ctrl_info->scan_blocked = false; in pqi_ctrl_unblock_scan()
315 mutex_unlock(&ctrl_info->scan_mutex); in pqi_ctrl_unblock_scan()
320 return ctrl_info->scan_blocked; in pqi_ctrl_scan_blocked()
325 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_block_device_reset()
330 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_ctrl_unblock_device_reset()
339 shost = ctrl_info->scsi_host; in pqi_scsi_block_requests()
355 scsi_unblock_requests(ctrl_info->scsi_host); in pqi_scsi_unblock_requests()
360 atomic_inc(&ctrl_info->num_busy_threads); in pqi_ctrl_busy()
365 atomic_dec(&ctrl_info->num_busy_threads); in pqi_ctrl_unbusy()
370 return ctrl_info->block_requests; in pqi_ctrl_blocked()
375 ctrl_info->block_requests = true; in pqi_ctrl_block_requests()
380 ctrl_info->block_requests = false; in pqi_ctrl_unblock_requests()
381 wake_up_all(&ctrl_info->block_requests_wait); in pqi_ctrl_unblock_requests()
389 atomic_inc(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
390 wait_event(ctrl_info->block_requests_wait, in pqi_wait_if_ctrl_blocked()
392 atomic_dec(&ctrl_info->num_blocked_threads); in pqi_wait_if_ctrl_blocked()
407 while (atomic_read(&ctrl_info->num_busy_threads) > in pqi_ctrl_wait_until_quiesced()
408 atomic_read(&ctrl_info->num_blocked_threads)) { in pqi_ctrl_wait_until_quiesced()
410 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
412 jiffies_to_msecs(jiffies - start_jiffies) / 1000); in pqi_ctrl_wait_until_quiesced()
420 dev_warn(&ctrl_info->pci_dev->dev, in pqi_ctrl_wait_until_quiesced()
422 jiffies_to_msecs(jiffies - start_jiffies) / 1000); in pqi_ctrl_wait_until_quiesced()
427 return device->device_offline; in pqi_device_offline()
432 mutex_lock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_start()
437 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_ctrl_ofa_done()
442 mutex_lock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
443 mutex_unlock(&ctrl_info->ofa_mutex); in pqi_wait_until_ofa_finished()
448 return mutex_is_locked(&ctrl_info->ofa_mutex); in pqi_ofa_in_progress()
453 device->in_remove = true; in pqi_device_remove_start()
458 return device->in_remove; in pqi_device_in_remove()
463 device->in_reset[lun] = true; in pqi_device_reset_start()
468 device->in_reset[lun] = false; in pqi_device_reset_done()
473 return device->in_reset[lun]; in pqi_device_in_reset()
484 return -1; in pqi_event_type_to_event_index()
489 return pqi_event_type_to_event_index(event_type) != -1; in pqi_is_supported_event()
498 schedule_delayed_work(&ctrl_info->rescan_work, delay); in pqi_schedule_rescan_worker_with_delay()
515 cancel_delayed_work_sync(&ctrl_info->rescan_work); in pqi_cancel_rescan_worker()
520 if (!ctrl_info->heartbeat_counter) in pqi_read_heartbeat_counter()
523 return readl(ctrl_info->heartbeat_counter); in pqi_read_heartbeat_counter()
528 return readb(ctrl_info->soft_reset_status); in pqi_read_soft_reset_status()
537 writeb(status, ctrl_info->soft_reset_status); in pqi_clear_soft_reset_status()
547 if (device->ncq_prio_enable) { in pqi_is_io_high_priority()
552 switch (scmd->cmnd[0]) { in pqi_is_io_high_priority()
579 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, in pqi_map_single()
581 if (dma_mapping_error(&pci_dev->dev, bus_address)) in pqi_map_single()
582 return -ENOMEM; in pqi_map_single()
584 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); in pqi_map_single()
585 put_unaligned_le32(buffer_length, &sg_descriptor->length); in pqi_map_single()
586 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); in pqi_map_single()
601 dma_unmap_single(&pci_dev->dev, in pqi_pci_unmap()
617 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; in pqi_build_raid_path_request()
619 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, in pqi_build_raid_path_request()
620 &request->header.iu_length); in pqi_build_raid_path_request()
621 put_unaligned_le32(buffer_length, &request->buffer_length); in pqi_build_raid_path_request()
622 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); in pqi_build_raid_path_request()
623 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_build_raid_path_request()
624 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; in pqi_build_raid_path_request()
626 cdb = request->cdb; in pqi_build_raid_path_request()
630 request->data_direction = SOP_READ_FLAG; in pqi_build_raid_path_request()
640 request->data_direction = SOP_READ_FLAG; in pqi_build_raid_path_request()
643 if (ctrl_info->rpl_extended_format_4_5_supported) in pqi_build_raid_path_request()
648 cdb[1] = ctrl_info->ciss_report_log_flags; in pqi_build_raid_path_request()
653 request->data_direction = SOP_READ_FLAG; in pqi_build_raid_path_request()
659 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; in pqi_build_raid_path_request()
660 request->data_direction = SOP_WRITE_FLAG; in pqi_build_raid_path_request()
672 request->data_direction = SOP_READ_FLAG; in pqi_build_raid_path_request()
681 request->data_direction = SOP_WRITE_FLAG; in pqi_build_raid_path_request()
687 request->data_direction = SOP_BIDIRECTIONAL; in pqi_build_raid_path_request()
694 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); in pqi_build_raid_path_request()
698 switch (request->data_direction) { in pqi_build_raid_path_request()
713 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], in pqi_build_raid_path_request()
719 io_request->scmd = NULL; in pqi_reinit_io_request()
720 io_request->status = 0; in pqi_reinit_io_request()
721 io_request->error_info = NULL; in pqi_reinit_io_request()
722 io_request->raid_bypass = false; in pqi_reinit_io_request()
734 io_request = &ctrl_info->io_request_pool[i]; in pqi_alloc_io_request()
735 if (atomic_inc_return(&io_request->refcount) > 1) { in pqi_alloc_io_request()
736 atomic_dec(&io_request->refcount); in pqi_alloc_io_request()
741 * benignly racy - may have to wait for an open slot. in pqi_alloc_io_request()
742 * command slot range is scsi_ml_can_queue - in pqi_alloc_io_request()
743 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] in pqi_alloc_io_request()
747 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; in pqi_alloc_io_request()
748 if (atomic_inc_return(&io_request->refcount) == 1) in pqi_alloc_io_request()
750 atomic_dec(&io_request->refcount); in pqi_alloc_io_request()
763 atomic_dec(&io_request->refcount); in pqi_free_io_request()
781 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_send_scsi_raid_request()
840 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); in pqi_identify_physical_device()
846 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_identify_physical_device()
879 max_write_raid_1_10_3drive) - \
891 return -ENOMEM; in pqi_get_advanced_raid_bypass_config()
903 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); in pqi_get_advanced_raid_bypass_config()
908 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || in pqi_get_advanced_raid_bypass_config()
909 buffer->header.subpage_code != in pqi_get_advanced_raid_bypass_config()
911 get_unaligned_le16(&buffer->header.buffer_length) < in pqi_get_advanced_raid_bypass_config()
913 buffer->aio_subpage.header.page_code != in pqi_get_advanced_raid_bypass_config()
915 buffer->aio_subpage.header.subpage_code != in pqi_get_advanced_raid_bypass_config()
917 get_unaligned_le16(&buffer->aio_subpage.header.page_length) < in pqi_get_advanced_raid_bypass_config()
922 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_get_advanced_raid_bypass_config()
924 &buffer->aio_subpage.max_transfer_encrypted_sas_sata); in pqi_get_advanced_raid_bypass_config()
926 ctrl_info->max_transfer_encrypted_nvme = in pqi_get_advanced_raid_bypass_config()
928 &buffer->aio_subpage.max_transfer_encrypted_nvme); in pqi_get_advanced_raid_bypass_config()
930 ctrl_info->max_write_raid_5_6 = in pqi_get_advanced_raid_bypass_config()
932 &buffer->aio_subpage.max_write_raid_5_6); in pqi_get_advanced_raid_bypass_config()
934 ctrl_info->max_write_raid_1_10_2drive = in pqi_get_advanced_raid_bypass_config()
936 &buffer->aio_subpage.max_write_raid_1_10_2drive); in pqi_get_advanced_raid_bypass_config()
938 ctrl_info->max_write_raid_1_10_3drive = in pqi_get_advanced_raid_bypass_config()
940 &buffer->aio_subpage.max_write_raid_1_10_3drive); in pqi_get_advanced_raid_bypass_config()
956 return -ENOMEM; in pqi_flush_cache()
958 flush_cache->shutdown_event = shutdown_event; in pqi_flush_cache()
985 return -ENOMEM; in pqi_set_diag_rescan()
992 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); in pqi_set_diag_rescan()
1034 return -ENOMEM; in pqi_write_driver_version_to_host_wellness()
1036 buffer->start_tag[0] = '<'; in pqi_write_driver_version_to_host_wellness()
1037 buffer->start_tag[1] = 'H'; in pqi_write_driver_version_to_host_wellness()
1038 buffer->start_tag[2] = 'W'; in pqi_write_driver_version_to_host_wellness()
1039 buffer->start_tag[3] = '>'; in pqi_write_driver_version_to_host_wellness()
1040 buffer->driver_version_tag[0] = 'D'; in pqi_write_driver_version_to_host_wellness()
1041 buffer->driver_version_tag[1] = 'V'; in pqi_write_driver_version_to_host_wellness()
1042 put_unaligned_le16(sizeof(buffer->driver_version), in pqi_write_driver_version_to_host_wellness()
1043 &buffer->driver_version_length); in pqi_write_driver_version_to_host_wellness()
1044 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION, in pqi_write_driver_version_to_host_wellness()
1045 sizeof(buffer->driver_version)); in pqi_write_driver_version_to_host_wellness()
1046 buffer->dont_write_tag[0] = 'D'; in pqi_write_driver_version_to_host_wellness()
1047 buffer->dont_write_tag[1] = 'W'; in pqi_write_driver_version_to_host_wellness()
1048 buffer->end_tag[0] = 'Z'; in pqi_write_driver_version_to_host_wellness()
1049 buffer->end_tag[1] = 'Z'; in pqi_write_driver_version_to_host_wellness()
1085 return -ENOMEM; in pqi_write_current_time_to_host_wellness()
1087 buffer->start_tag[0] = '<'; in pqi_write_current_time_to_host_wellness()
1088 buffer->start_tag[1] = 'H'; in pqi_write_current_time_to_host_wellness()
1089 buffer->start_tag[2] = 'W'; in pqi_write_current_time_to_host_wellness()
1090 buffer->start_tag[3] = '>'; in pqi_write_current_time_to_host_wellness()
1091 buffer->time_tag[0] = 'T'; in pqi_write_current_time_to_host_wellness()
1092 buffer->time_tag[1] = 'D'; in pqi_write_current_time_to_host_wellness()
1093 put_unaligned_le16(sizeof(buffer->time), in pqi_write_current_time_to_host_wellness()
1094 &buffer->time_length); in pqi_write_current_time_to_host_wellness()
1097 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); in pqi_write_current_time_to_host_wellness()
1100 buffer->time[0] = bin2bcd(tm.tm_hour); in pqi_write_current_time_to_host_wellness()
1101 buffer->time[1] = bin2bcd(tm.tm_min); in pqi_write_current_time_to_host_wellness()
1102 buffer->time[2] = bin2bcd(tm.tm_sec); in pqi_write_current_time_to_host_wellness()
1103 buffer->time[3] = 0; in pqi_write_current_time_to_host_wellness()
1104 buffer->time[4] = bin2bcd(tm.tm_mon + 1); in pqi_write_current_time_to_host_wellness()
1105 buffer->time[5] = bin2bcd(tm.tm_mday); in pqi_write_current_time_to_host_wellness()
1106 buffer->time[6] = bin2bcd(year / 100); in pqi_write_current_time_to_host_wellness()
1107 buffer->time[7] = bin2bcd(year % 100); in pqi_write_current_time_to_host_wellness()
1109 buffer->dont_write_tag[0] = 'D'; in pqi_write_current_time_to_host_wellness()
1110 buffer->dont_write_tag[1] = 'W'; in pqi_write_current_time_to_host_wellness()
1111 buffer->end_tag[0] = 'Z'; in pqi_write_current_time_to_host_wellness()
1112 buffer->end_tag[1] = 'Z'; in pqi_write_current_time_to_host_wellness()
1133 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_time_worker()
1136 schedule_delayed_work(&ctrl_info->update_time_work, in pqi_update_time_worker()
1142 schedule_delayed_work(&ctrl_info->update_time_work, 0); in pqi_schedule_update_time_worker()
1147 cancel_delayed_work_sync(&ctrl_info->update_time_work); in pqi_cancel_update_time_worker()
1167 rc = -ENOMEM; in pqi_report_phys_logical_luns()
1175 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); in pqi_report_phys_logical_luns()
1182 rc = -ENOMEM; in pqi_report_phys_logical_luns()
1196 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); in pqi_report_phys_logical_luns()
1232 if (ctrl_info->rpl_extended_format_4_5_supported) { in pqi_report_phys_luns()
1234 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; in pqi_report_phys_luns()
1239 dev_err(&ctrl_info->pci_dev->dev, in pqi_report_phys_luns()
1242 return -EINVAL; in pqi_report_phys_luns()
1244 dev_warn(&ctrl_info->pci_dev->dev, in pqi_report_phys_luns()
1250 …physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_l… in pqi_report_phys_luns()
1255 return -ENOMEM; in pqi_report_phys_luns()
1258 &rpl_16byte_wwid_list->header.list_length); in pqi_report_phys_luns()
1259 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; in pqi_report_phys_luns()
1262 …memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, si… in pqi_report_phys_luns()
1263 …memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, s… in pqi_report_phys_luns()
1264 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); in pqi_report_phys_luns()
1265 …rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; in pqi_report_phys_luns()
1266 …rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_fla… in pqi_report_phys_luns()
1267 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; in pqi_report_phys_luns()
1268 …rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redunda… in pqi_report_phys_luns()
1269 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; in pqi_report_phys_luns()
1296 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1301 dev_err(&ctrl_info->pci_dev->dev, in pqi_get_device_lists()
1313 get_unaligned_be32(&logdev_data->header.list_length); in pqi_get_device_lists()
1329 return -ENOMEM; in pqi_get_device_lists()
1337 &internal_logdev_list->header.list_length); in pqi_get_device_lists()
1348 device->bus = bus; in pqi_set_bus_target_lun()
1349 device->target = target; in pqi_set_bus_target_lun()
1350 device->lun = lun; in pqi_set_bus_target_lun()
1361 scsi3addr = device->scsi3addr; in pqi_assign_bus_target_lun()
1367 device->target_lun_valid = true; in pqi_assign_bus_target_lun()
1372 if (device->is_external_raid_device) { in pqi_assign_bus_target_lun()
1382 device->target_lun_valid = true; in pqi_assign_bus_target_lun()
1387 * Defer target and LUN assignment for non-controller physical devices in pqi_assign_bus_target_lun()
1404 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_level()
1414 device->raid_level = raid_level; in pqi_get_raid_level()
1424 raid_map_size = get_unaligned_le32(&raid_map->structure_size); in pqi_validate_raid_map()
1431 if (device->raid_level == SA_RAID_1) { in pqi_validate_raid_map()
1432 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { in pqi_validate_raid_map()
1433 err_msg = "invalid RAID-1 map"; in pqi_validate_raid_map()
1436 } else if (device->raid_level == SA_RAID_TRIPLE) { in pqi_validate_raid_map()
1437 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { in pqi_validate_raid_map()
1438 err_msg = "invalid RAID-1(Triple) map"; in pqi_validate_raid_map()
1441 } else if ((device->raid_level == SA_RAID_5 || in pqi_validate_raid_map()
1442 device->raid_level == SA_RAID_6) && in pqi_validate_raid_map()
1443 get_unaligned_le16(&raid_map->layout_map_count) > 1) { in pqi_validate_raid_map()
1446 get_unaligned_le16(&raid_map->strip_size) * in pqi_validate_raid_map()
1447 get_unaligned_le16(&raid_map->data_disks_per_row); in pqi_validate_raid_map()
1449 err_msg = "invalid RAID-5 or RAID-6 map"; in pqi_validate_raid_map()
1457 dev_warn(&ctrl_info->pci_dev->dev, in pqi_validate_raid_map()
1459 *((u32 *)&device->scsi3addr), in pqi_validate_raid_map()
1460 *((u32 *)&device->scsi3addr[4]), err_msg); in pqi_validate_raid_map()
1462 return -EINVAL; in pqi_validate_raid_map()
1474 return -ENOMEM; in pqi_get_raid_map()
1477 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); in pqi_get_raid_map()
1481 raid_map_size = get_unaligned_le32(&raid_map->structure_size); in pqi_get_raid_map()
1489 return -ENOMEM; in pqi_get_raid_map()
1492 device->scsi3addr, raid_map, raid_map_size, 0, NULL); in pqi_get_raid_map()
1496 if (get_unaligned_le32(&raid_map->structure_size) in pqi_get_raid_map()
1498 dev_warn(&ctrl_info->pci_dev->dev, in pqi_get_raid_map()
1501 get_unaligned_le32(&raid_map->structure_size)); in pqi_get_raid_map()
1502 rc = -EINVAL; in pqi_get_raid_map()
1511 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); in pqi_get_raid_map()
1512 if (!device->raid_io_stats) { in pqi_get_raid_map()
1513 rc = -ENOMEM; in pqi_get_raid_map()
1517 device->raid_map = raid_map; in pqi_get_raid_map()
1530 if (!ctrl_info->lv_drive_type_mix_valid) { in pqi_set_max_transfer_encrypted()
1531 device->max_transfer_encrypted = ~0; in pqi_set_max_transfer_encrypted()
1535 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { in pqi_set_max_transfer_encrypted()
1543 device->max_transfer_encrypted = in pqi_set_max_transfer_encrypted()
1544 ctrl_info->max_transfer_encrypted_sas_sata; in pqi_set_max_transfer_encrypted()
1547 device->max_transfer_encrypted = in pqi_set_max_transfer_encrypted()
1548 ctrl_info->max_transfer_encrypted_nvme; in pqi_set_max_transfer_encrypted()
1553 device->max_transfer_encrypted = in pqi_set_max_transfer_encrypted()
1554 min(ctrl_info->max_transfer_encrypted_sas_sata, in pqi_set_max_transfer_encrypted()
1555 ctrl_info->max_transfer_encrypted_nvme); in pqi_set_max_transfer_encrypted()
1571 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_raid_bypass_status()
1581 device->raid_bypass_configured = in pqi_get_raid_bypass_status()
1583 if (device->raid_bypass_configured && in pqi_get_raid_bypass_status()
1586 device->raid_bypass_enabled = true; in pqi_get_raid_bypass_status()
1587 if (get_unaligned_le16(&device->raid_map->flags) & in pqi_get_raid_bypass_status()
1597 * Use vendor-specific VPD to determine online/offline status of a volume.
1614 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, in pqi_get_volume_status()
1619 if (vpd->page_code != CISS_VPD_LV_STATUS) in pqi_get_volume_status()
1623 volume_status) + vpd->page_length; in pqi_get_volume_status()
1627 volume_status = vpd->volume_status; in pqi_get_volume_status()
1628 volume_flags = get_unaligned_be32(&vpd->flags); in pqi_get_volume_status()
1634 device->volume_status = volume_status; in pqi_get_volume_status()
1635 device->volume_offline = volume_offline; in pqi_get_volume_status()
1653 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; in pqi_get_physical_device_info()
1657 scsi_sanitize_inquiry_string(&id_phys->model[0], 8); in pqi_get_physical_device_info()
1658 scsi_sanitize_inquiry_string(&id_phys->model[8], 16); in pqi_get_physical_device_info()
1660 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); in pqi_get_physical_device_info()
1661 memcpy(device->model, &id_phys->model[8], sizeof(device->model)); in pqi_get_physical_device_info()
1663 device->box_index = id_phys->box_index; in pqi_get_physical_device_info()
1664 device->phys_box_on_bus = id_phys->phys_box_on_bus; in pqi_get_physical_device_info()
1665 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; in pqi_get_physical_device_info()
1666 device->queue_depth = in pqi_get_physical_device_info()
1667 get_unaligned_le16(&id_phys->current_queue_depth_limit); in pqi_get_physical_device_info()
1668 device->active_path_index = id_phys->active_path_number; in pqi_get_physical_device_info()
1669 device->path_map = id_phys->redundant_path_present_map; in pqi_get_physical_device_info()
1670 memcpy(&device->box, in pqi_get_physical_device_info()
1671 &id_phys->alternate_paths_phys_box_on_port, in pqi_get_physical_device_info()
1672 sizeof(device->box)); in pqi_get_physical_device_info()
1673 memcpy(&device->phys_connector, in pqi_get_physical_device_info()
1674 &id_phys->alternate_paths_phys_connector, in pqi_get_physical_device_info()
1675 sizeof(device->phys_connector)); in pqi_get_physical_device_info()
1676 device->bay = id_phys->phys_bay_in_box; in pqi_get_physical_device_info()
1677 device->lun_count = id_phys->multi_lun_device_lun_count; in pqi_get_physical_device_info()
1678 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && in pqi_get_physical_device_info()
1679 id_phys->phy_count) in pqi_get_physical_device_info()
1680 device->phy_id = in pqi_get_physical_device_info()
1681 id_phys->phy_to_phy_map[device->active_path_index]; in pqi_get_physical_device_info()
1683 device->phy_id = 0xFF; in pqi_get_physical_device_info()
1685 device->ncq_prio_support = in pqi_get_physical_device_info()
1686 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & in pqi_get_physical_device_info()
1689 …device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVI… in pqi_get_physical_device_info()
1702 return -ENOMEM; in pqi_get_logical_device_info()
1705 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); in pqi_get_logical_device_info()
1712 device->devtype = buffer[0] & 0x1f; in pqi_get_logical_device_info()
1713 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); in pqi_get_logical_device_info()
1714 memcpy(device->model, &buffer[16], sizeof(device->model)); in pqi_get_logical_device_info()
1716 if (device->devtype == TYPE_DISK) { in pqi_get_logical_device_info()
1717 if (device->is_external_raid_device) { in pqi_get_logical_device_info()
1718 device->raid_level = SA_RAID_UNKNOWN; in pqi_get_logical_device_info()
1719 device->volume_status = CISS_LV_OK; in pqi_get_logical_device_info()
1720 device->volume_offline = false; in pqi_get_logical_device_info()
1741 * Note: devices that have completed sanitize must be re-enabled
1746 return device->erase_in_progress; in pqi_keep_device_offline()
1755 if (device->is_expander_smp_device) in pqi_get_device_info_phys_logical()
1774 if (rc == 0 && device->lun_count == 0) in pqi_get_device_info()
1775 device->lun_count = 1; in pqi_get_device_info()
1788 switch (device->volume_status) { in pqi_show_volume_status()
1847 status = "Encrypted volume inaccessible - key not present"; in pqi_show_volume_status()
1853 status = "Volume undergoing encryption re-keying process"; in pqi_show_volume_status()
1872 unknown_state_str, device->volume_status); in pqi_show_volume_status()
1877 dev_info(&ctrl_info->pci_dev->dev, in pqi_show_volume_status()
1879 ctrl_info->scsi_host->host_no, in pqi_show_volume_status()
1880 device->bus, device->target, device->lun, status); in pqi_show_volume_status()
1899 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, in pqi_add_device()
1900 device->target, device->lun); in pqi_add_device()
1902 rc = pqi_add_sas_device(ctrl_info->sas_host, device); in pqi_add_device()
1914 for (lun = 0; lun < device->lun_count; lun++) { in pqi_remove_device()
1918 dev_err(&ctrl_info->pci_dev->dev, in pqi_remove_device()
1920 ctrl_info->scsi_host->host_no, device->bus, in pqi_remove_device()
1921 device->target, lun, in pqi_remove_device()
1922 atomic_read(&device->scsi_cmds_outstanding[lun])); in pqi_remove_device()
1926 scsi_remove_device(device->sdev); in pqi_remove_device()
1940 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_find_scsi_dev()
1941 if (device->bus == bus && device->target == target && device->lun == lun) in pqi_find_scsi_dev()
1949 if (dev1->is_physical_device != dev2->is_physical_device) in pqi_device_equal()
1952 if (dev1->is_physical_device) in pqi_device_equal()
1953 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; in pqi_device_equal()
1955 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; in pqi_device_equal()
1969 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_scsi_find_entry()
1970 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { in pqi_scsi_find_entry()
1973 if (device_to_find->volume_offline) in pqi_scsi_find_entry()
1986 if (device->is_expander_smp_device) in pqi_device_type()
1989 return scsi_device_type(device->devtype); in pqi_device_type()
2001 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); in pqi_dev_info()
2003 if (device->target_lun_valid) in pqi_dev_info()
2005 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2007 device->target, in pqi_dev_info()
2008 device->lun); in pqi_dev_info()
2011 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2012 "-:-"); in pqi_dev_info()
2016 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2018 *((u32 *)&device->scsi3addr), in pqi_dev_info()
2019 *((u32 *)&device->scsi3addr[4])); in pqi_dev_info()
2022 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2024 get_unaligned_be64(&device->wwid[0]), in pqi_dev_info()
2025 get_unaligned_be64(&device->wwid[8])); in pqi_dev_info()
2027 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2030 device->vendor, in pqi_dev_info()
2031 device->model); in pqi_dev_info()
2034 if (device->devtype == TYPE_DISK) in pqi_dev_info()
2036 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2037 "SSDSmartPathCap%c En%c %-12s", in pqi_dev_info()
2038 device->raid_bypass_configured ? '+' : '-', in pqi_dev_info()
2039 device->raid_bypass_enabled ? '+' : '-', in pqi_dev_info()
2040 pqi_raid_level_to_string(device->raid_level)); in pqi_dev_info()
2043 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2044 "AIO%c", device->aio_enabled ? '+' : '-'); in pqi_dev_info()
2045 if (device->devtype == TYPE_DISK || in pqi_dev_info()
2046 device->devtype == TYPE_ZBC) in pqi_dev_info()
2048 PQI_DEV_INFO_BUFFER_LENGTH - count, in pqi_dev_info()
2049 " qd=%-6d", device->queue_depth); in pqi_dev_info()
2052 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); in pqi_dev_info()
2063 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size); in pqi_raid_maps_equal()
2064 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size); in pqi_raid_maps_equal()
2077 existing_device->device_type = new_device->device_type; in pqi_scsi_update_device()
2078 existing_device->bus = new_device->bus; in pqi_scsi_update_device()
2079 if (new_device->target_lun_valid) { in pqi_scsi_update_device()
2080 existing_device->target = new_device->target; in pqi_scsi_update_device()
2081 existing_device->lun = new_device->lun; in pqi_scsi_update_device()
2082 existing_device->target_lun_valid = true; in pqi_scsi_update_device()
2087 existing_device->is_physical_device = new_device->is_physical_device; in pqi_scsi_update_device()
2088 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); in pqi_scsi_update_device()
2089 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); in pqi_scsi_update_device()
2090 existing_device->sas_address = new_device->sas_address; in pqi_scsi_update_device()
2091 existing_device->queue_depth = new_device->queue_depth; in pqi_scsi_update_device()
2092 existing_device->device_offline = false; in pqi_scsi_update_device()
2093 existing_device->lun_count = new_device->lun_count; in pqi_scsi_update_device()
2096 existing_device->is_external_raid_device = new_device->is_external_raid_device; in pqi_scsi_update_device()
2098 if (existing_device->devtype == TYPE_DISK) { in pqi_scsi_update_device()
2099 existing_device->raid_level = new_device->raid_level; in pqi_scsi_update_device()
2100 existing_device->volume_status = new_device->volume_status; in pqi_scsi_update_device()
2101 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); in pqi_scsi_update_device()
2102 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { in pqi_scsi_update_device()
2103 kfree(existing_device->raid_map); in pqi_scsi_update_device()
2104 existing_device->raid_map = new_device->raid_map; in pqi_scsi_update_device()
2106 new_device->raid_map = NULL; in pqi_scsi_update_device()
2108 if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { in pqi_scsi_update_device()
2109 existing_device->raid_io_stats = new_device->raid_io_stats; in pqi_scsi_update_device()
2110 new_device->raid_io_stats = NULL; in pqi_scsi_update_device()
2112 existing_device->raid_bypass_configured = new_device->raid_bypass_configured; in pqi_scsi_update_device()
2113 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; in pqi_scsi_update_device()
2116 existing_device->aio_enabled = new_device->aio_enabled; in pqi_scsi_update_device()
2117 existing_device->aio_handle = new_device->aio_handle; in pqi_scsi_update_device()
2118 existing_device->is_expander_smp_device = new_device->is_expander_smp_device; in pqi_scsi_update_device()
2119 existing_device->active_path_index = new_device->active_path_index; in pqi_scsi_update_device()
2120 existing_device->phy_id = new_device->phy_id; in pqi_scsi_update_device()
2121 existing_device->path_map = new_device->path_map; in pqi_scsi_update_device()
2122 existing_device->bay = new_device->bay; in pqi_scsi_update_device()
2123 existing_device->box_index = new_device->box_index; in pqi_scsi_update_device()
2124 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; in pqi_scsi_update_device()
2125 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; in pqi_scsi_update_device()
2126 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); in pqi_scsi_update_device()
2127 …memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_c… in pqi_scsi_update_device()
2134 free_percpu(device->raid_io_stats); in pqi_free_device()
2135 kfree(device->raid_map); in pqi_free_device()
2141 * Called when exposing a new device to the OS fails in order to re-adjust
2150 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
2151 list_del(&device->scsi_device_list_entry); in pqi_fixup_botched_add()
2152 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_fixup_botched_add()
2155 device->keep_device = false; in pqi_fixup_botched_add()
2160 if (device->is_expander_smp_device) in pqi_is_device_added()
2161 return device->sas_port != NULL; in pqi_is_device_added()
2163 return device->sdev != NULL; in pqi_is_device_added()
2171 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) in pqi_init_device_tmf_work()
2172 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); in pqi_init_device_tmf_work()
2180 if (device->sdev == NULL) in pqi_volume_rescan_needed()
2183 if (!scsi_device_online(device->sdev)) in pqi_volume_rescan_needed()
2186 return device->rescan; in pqi_volume_rescan_needed()
2209 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2212 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_update_device_list()
2213 device->device_gone = true; in pqi_update_device_list()
2227 device->new_device = false; in pqi_update_device_list()
2228 matching_device->device_gone = false; in pqi_update_device_list()
2236 device->new_device = true; in pqi_update_device_list()
2243 device->new_device = true; in pqi_update_device_list()
2249 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, in pqi_update_device_list()
2251 if (device->device_gone) { in pqi_update_device_list()
2252 list_del(&device->scsi_device_list_entry); in pqi_update_device_list()
2253 list_add_tail(&device->delete_list_entry, &delete_list); in pqi_update_device_list()
2260 if (!device->new_device) in pqi_update_device_list()
2262 if (device->volume_offline) in pqi_update_device_list()
2264 list_add_tail(&device->scsi_device_list_entry, in pqi_update_device_list()
2265 &ctrl_info->scsi_device_list); in pqi_update_device_list()
2266 list_add_tail(&device->add_list_entry, &add_list); in pqi_update_device_list()
2268 device->keep_device = true; in pqi_update_device_list()
2272 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2289 if (device->volume_offline) { in pqi_update_device_list()
2297 list_del(&device->delete_list_entry); in pqi_update_device_list()
2305 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_update_device_list()
2309 if (device->sdev && device->queue_depth != device->advertised_queue_depth) { in pqi_update_device_list()
2310 device->advertised_queue_depth = device->queue_depth; in pqi_update_device_list()
2311 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); in pqi_update_device_list()
2313 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2318 device->rescan = false; in pqi_update_device_list()
2319 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2320 scsi_rescan_device(device->sdev); in pqi_update_device_list()
2322 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_update_device_list()
2333 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_device_list()
2335 ctrl_info->scsi_host->host_no, in pqi_update_device_list()
2336 device->bus, device->target, in pqi_update_device_list()
2337 device->lun); in pqi_update_device_list()
2353 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && in pqi_is_supported_device()
2354 !pqi_is_hba_lunid(device->scsi3addr)) in pqi_is_supported_device()
2376 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); in pqi_expose_device()
2409 get_unaligned_be32(&physdev_list->header.list_length) in pqi_update_scsi_devices()
2410 / sizeof(physdev_list->lun_entries[0]); in pqi_update_scsi_devices()
2416 get_unaligned_be32(&logdev_list->header.list_length) in pqi_update_scsi_devices()
2417 / sizeof(logdev_list->lun_entries[0]); in pqi_update_scsi_devices()
2430 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2432 rc = -ENOMEM; in pqi_update_scsi_devices()
2437 for (i = num_physicals - 1; i >= 0; i--) { in pqi_update_scsi_devices()
2438 phys_lun = &physdev_list->lun_entries[i]; in pqi_update_scsi_devices()
2439 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { in pqi_update_scsi_devices()
2440 pqi_mask_device(phys_lun->lunid); in pqi_update_scsi_devices()
2448 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) in pqi_update_scsi_devices()
2449 ctrl_info->lv_drive_type_mix_valid = true; in pqi_update_scsi_devices()
2457 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); in pqi_update_scsi_devices()
2458 rc = -ENOMEM; in pqi_update_scsi_devices()
2465 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2467 rc = -ENOMEM; in pqi_update_scsi_devices()
2470 list_add_tail(&device->new_device_list_entry, in pqi_update_scsi_devices()
2484 phys_lun = &physdev_list->lun_entries[physical_index++]; in pqi_update_scsi_devices()
2486 scsi3addr = phys_lun->lunid; in pqi_update_scsi_devices()
2490 log_lun = &logdev_list->lun_entries[logical_index++]; in pqi_update_scsi_devices()
2491 scsi3addr = log_lun->lunid; in pqi_update_scsi_devices()
2503 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); in pqi_update_scsi_devices()
2504 device->is_physical_device = is_physical_device; in pqi_update_scsi_devices()
2506 device->device_type = phys_lun->device_type; in pqi_update_scsi_devices()
2507 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) in pqi_update_scsi_devices()
2508 device->is_expander_smp_device = true; in pqi_update_scsi_devices()
2510 device->is_external_raid_device = in pqi_update_scsi_devices()
2519 if (rc == -ENOMEM) { in pqi_update_scsi_devices()
2520 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", in pqi_update_scsi_devices()
2525 if (device->is_physical_device) in pqi_update_scsi_devices()
2526 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2528 get_unaligned_be64(&phys_lun->wwid[0]), in pqi_update_scsi_devices()
2529 get_unaligned_be64(&phys_lun->wwid[8])); in pqi_update_scsi_devices()
2531 dev_warn(&ctrl_info->pci_dev->dev, in pqi_update_scsi_devices()
2533 *((u32 *)&device->scsi3addr), in pqi_update_scsi_devices()
2534 *((u32 *)&device->scsi3addr[4])); in pqi_update_scsi_devices()
2545 if (device->is_physical_device) { in pqi_update_scsi_devices()
2546 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); in pqi_update_scsi_devices()
2547 if ((phys_lun->device_flags & in pqi_update_scsi_devices()
2549 phys_lun->aio_handle) { in pqi_update_scsi_devices()
2550 device->aio_enabled = true; in pqi_update_scsi_devices()
2551 device->aio_handle = in pqi_update_scsi_devices()
2552 phys_lun->aio_handle; in pqi_update_scsi_devices()
2555 memcpy(device->volume_id, log_lun->volume_id, in pqi_update_scsi_devices()
2556 sizeof(device->volume_id)); in pqi_update_scsi_devices()
2559 device->sas_address = get_unaligned_be64(&device->wwid[0]); in pqi_update_scsi_devices()
2569 if (device->keep_device) in pqi_update_scsi_devices()
2571 list_del(&device->new_device_list_entry); in pqi_update_scsi_devices()
2589 return -ENXIO; in pqi_scan_scsi_devices()
2591 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2595 return -EBUSY; in pqi_scan_scsi_devices()
2597 return -EINPROGRESS; in pqi_scan_scsi_devices()
2604 mutex_unlock(&ctrl_info->scan_mutex); in pqi_scan_scsi_devices()
2627 return !mutex_is_locked(&ctrl_info->scan_mutex); in pqi_scan_finished()
2640 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); in pqi_set_encryption_info()
2644 encryption_info->data_encryption_key_index = in pqi_set_encryption_info()
2645 get_unaligned_le16(&raid_map->data_encryption_key_index); in pqi_set_encryption_info()
2646 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); in pqi_set_encryption_info()
2647 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); in pqi_set_encryption_info()
2659 switch (rmd->raid_level) { in pqi_aio_raid_level_supported()
2663 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2664 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) in pqi_aio_raid_level_supported()
2668 if (rmd->is_write && (!ctrl_info->enable_r1_writes || in pqi_aio_raid_level_supported()
2669 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) in pqi_aio_raid_level_supported()
2673 if (rmd->is_write && (!ctrl_info->enable_r5_writes || in pqi_aio_raid_level_supported()
2674 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2678 if (rmd->is_write && (!ctrl_info->enable_r6_writes || in pqi_aio_raid_level_supported()
2679 rmd->data_length > ctrl_info->max_write_raid_5_6)) in pqi_aio_raid_level_supported()
2696 switch (scmd->cmnd[0]) { in pqi_get_aio_lba_and_block_count()
2698 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2701 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | in pqi_get_aio_lba_and_block_count()
2702 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); in pqi_get_aio_lba_and_block_count()
2703 rmd->block_cnt = (u32)scmd->cmnd[4]; in pqi_get_aio_lba_and_block_count()
2704 if (rmd->block_cnt == 0) in pqi_get_aio_lba_and_block_count()
2705 rmd->block_cnt = 256; in pqi_get_aio_lba_and_block_count()
2708 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2711 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2712 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); in pqi_get_aio_lba_and_block_count()
2715 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2718 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2719 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); in pqi_get_aio_lba_and_block_count()
2722 rmd->is_write = true; in pqi_get_aio_lba_and_block_count()
2725 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); in pqi_get_aio_lba_and_block_count()
2726 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); in pqi_get_aio_lba_and_block_count()
2733 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); in pqi_get_aio_lba_and_block_count()
2745 rmd->last_block = rmd->first_block + rmd->block_cnt - 1; in pci_get_aio_common_raid_map_values()
2748 if (rmd->last_block >= in pci_get_aio_common_raid_map_values()
2749 get_unaligned_le64(&raid_map->volume_blk_cnt) || in pci_get_aio_common_raid_map_values()
2750 rmd->last_block < rmd->first_block) in pci_get_aio_common_raid_map_values()
2753 rmd->data_disks_per_row = in pci_get_aio_common_raid_map_values()
2754 get_unaligned_le16(&raid_map->data_disks_per_row); in pci_get_aio_common_raid_map_values()
2755 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); in pci_get_aio_common_raid_map_values()
2756 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); in pci_get_aio_common_raid_map_values()
2759 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; in pci_get_aio_common_raid_map_values()
2760 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ in pci_get_aio_common_raid_map_values()
2763 tmpdiv = rmd->first_block; in pci_get_aio_common_raid_map_values()
2764 do_div(tmpdiv, rmd->blocks_per_row); in pci_get_aio_common_raid_map_values()
2765 rmd->first_row = tmpdiv; in pci_get_aio_common_raid_map_values()
2766 tmpdiv = rmd->last_block; in pci_get_aio_common_raid_map_values()
2767 do_div(tmpdiv, rmd->blocks_per_row); in pci_get_aio_common_raid_map_values()
2768 rmd->last_row = tmpdiv; in pci_get_aio_common_raid_map_values()
2769 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2770 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2771 tmpdiv = rmd->first_row_offset; in pci_get_aio_common_raid_map_values()
2772 do_div(tmpdiv, rmd->strip_size); in pci_get_aio_common_raid_map_values()
2773 rmd->first_column = tmpdiv; in pci_get_aio_common_raid_map_values()
2774 tmpdiv = rmd->last_row_offset; in pci_get_aio_common_raid_map_values()
2775 do_div(tmpdiv, rmd->strip_size); in pci_get_aio_common_raid_map_values()
2776 rmd->last_column = tmpdiv; in pci_get_aio_common_raid_map_values()
2778 rmd->first_row = rmd->first_block / rmd->blocks_per_row; in pci_get_aio_common_raid_map_values()
2779 rmd->last_row = rmd->last_block / rmd->blocks_per_row; in pci_get_aio_common_raid_map_values()
2780 rmd->first_row_offset = (u32)(rmd->first_block - in pci_get_aio_common_raid_map_values()
2781 (rmd->first_row * rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2782 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * in pci_get_aio_common_raid_map_values()
2783 rmd->blocks_per_row)); in pci_get_aio_common_raid_map_values()
2784 rmd->first_column = rmd->first_row_offset / rmd->strip_size; in pci_get_aio_common_raid_map_values()
2785 rmd->last_column = rmd->last_row_offset / rmd->strip_size; in pci_get_aio_common_raid_map_values()
2789 if (rmd->first_row != rmd->last_row || in pci_get_aio_common_raid_map_values()
2790 rmd->first_column != rmd->last_column) in pci_get_aio_common_raid_map_values()
2794 rmd->total_disks_per_row = rmd->data_disks_per_row + in pci_get_aio_common_raid_map_values()
2795 get_unaligned_le16(&raid_map->metadata_disks_per_row); in pci_get_aio_common_raid_map_values()
2796 rmd->map_row = ((u32)(rmd->first_row >> in pci_get_aio_common_raid_map_values()
2797 raid_map->parity_rotation_shift)) % in pci_get_aio_common_raid_map_values()
2798 get_unaligned_le16(&raid_map->row_cnt); in pci_get_aio_common_raid_map_values()
2799 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + in pci_get_aio_common_raid_map_values()
2800 rmd->first_column; in pci_get_aio_common_raid_map_values()
2812 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ in pqi_calc_aio_r5_or_r6()
2817 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; in pqi_calc_aio_r5_or_r6()
2819 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2820 rmd->first_group = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2821 tmpdiv = rmd->first_group; in pqi_calc_aio_r5_or_r6()
2822 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2823 rmd->first_group = tmpdiv; in pqi_calc_aio_r5_or_r6()
2824 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2825 rmd->last_group = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2826 tmpdiv = rmd->last_group; in pqi_calc_aio_r5_or_r6()
2827 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2828 rmd->last_group = tmpdiv; in pqi_calc_aio_r5_or_r6()
2830 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2831 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2833 if (rmd->first_group != rmd->last_group) in pqi_calc_aio_r5_or_r6()
2838 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2839 do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2840 rmd->first_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2841 rmd->r5or6_first_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2842 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2843 do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2844 rmd->r5or6_last_row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2846 rmd->first_row = rmd->r5or6_first_row = in pqi_calc_aio_r5_or_r6()
2847 rmd->first_block / rmd->stripesize; in pqi_calc_aio_r5_or_r6()
2848 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; in pqi_calc_aio_r5_or_r6()
2850 if (rmd->r5or6_first_row != rmd->r5or6_last_row) in pqi_calc_aio_r5_or_r6()
2855 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2856 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2857 tmpdiv = rmd->first_row_offset; in pqi_calc_aio_r5_or_r6()
2858 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2859 rmd->r5or6_first_row_offset = rmd->first_row_offset; in pqi_calc_aio_r5_or_r6()
2860 tmpdiv = rmd->last_block; in pqi_calc_aio_r5_or_r6()
2861 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); in pqi_calc_aio_r5_or_r6()
2862 tmpdiv = rmd->r5or6_last_row_offset; in pqi_calc_aio_r5_or_r6()
2863 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2864 tmpdiv = rmd->r5or6_first_row_offset; in pqi_calc_aio_r5_or_r6()
2865 do_div(tmpdiv, rmd->strip_size); in pqi_calc_aio_r5_or_r6()
2866 rmd->first_column = rmd->r5or6_first_column = tmpdiv; in pqi_calc_aio_r5_or_r6()
2867 tmpdiv = rmd->r5or6_last_row_offset; in pqi_calc_aio_r5_or_r6()
2868 do_div(tmpdiv, rmd->strip_size); in pqi_calc_aio_r5_or_r6()
2869 rmd->r5or6_last_column = tmpdiv; in pqi_calc_aio_r5_or_r6()
2871 rmd->first_row_offset = rmd->r5or6_first_row_offset = in pqi_calc_aio_r5_or_r6()
2872 (u32)((rmd->first_block % rmd->stripesize) % in pqi_calc_aio_r5_or_r6()
2873 rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2875 rmd->r5or6_last_row_offset = in pqi_calc_aio_r5_or_r6()
2876 (u32)((rmd->last_block % rmd->stripesize) % in pqi_calc_aio_r5_or_r6()
2877 rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2879 rmd->first_column = in pqi_calc_aio_r5_or_r6()
2880 rmd->r5or6_first_row_offset / rmd->strip_size; in pqi_calc_aio_r5_or_r6()
2881 rmd->r5or6_first_column = rmd->first_column; in pqi_calc_aio_r5_or_r6()
2882 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; in pqi_calc_aio_r5_or_r6()
2884 if (rmd->r5or6_first_column != rmd->r5or6_last_column) in pqi_calc_aio_r5_or_r6()
2888 rmd->map_row = in pqi_calc_aio_r5_or_r6()
2889 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % in pqi_calc_aio_r5_or_r6()
2890 get_unaligned_le16(&raid_map->row_cnt); in pqi_calc_aio_r5_or_r6()
2892 rmd->map_index = (rmd->first_group * in pqi_calc_aio_r5_or_r6()
2893 (get_unaligned_le16(&raid_map->row_cnt) * in pqi_calc_aio_r5_or_r6()
2894 rmd->total_disks_per_row)) + in pqi_calc_aio_r5_or_r6()
2895 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; in pqi_calc_aio_r5_or_r6()
2897 if (rmd->is_write) { in pqi_calc_aio_r5_or_r6()
2909 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); in pqi_calc_aio_r5_or_r6()
2910 index *= rmd->total_disks_per_row; in pqi_calc_aio_r5_or_r6()
2911 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); in pqi_calc_aio_r5_or_r6()
2913 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r5_or_r6()
2914 if (rmd->raid_level == SA_RAID_6) { in pqi_calc_aio_r5_or_r6()
2915 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; in pqi_calc_aio_r5_or_r6()
2916 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; in pqi_calc_aio_r5_or_r6()
2919 tmpdiv = rmd->first_block; in pqi_calc_aio_r5_or_r6()
2920 do_div(tmpdiv, rmd->blocks_per_row); in pqi_calc_aio_r5_or_r6()
2921 rmd->row = tmpdiv; in pqi_calc_aio_r5_or_r6()
2923 rmd->row = rmd->first_block / rmd->blocks_per_row; in pqi_calc_aio_r5_or_r6()
2933 if (rmd->disk_block > 0xffffffff) { in pqi_set_aio_cdb()
2934 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; in pqi_set_aio_cdb()
2935 rmd->cdb[1] = 0; in pqi_set_aio_cdb()
2936 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); in pqi_set_aio_cdb()
2937 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); in pqi_set_aio_cdb()
2938 rmd->cdb[14] = 0; in pqi_set_aio_cdb()
2939 rmd->cdb[15] = 0; in pqi_set_aio_cdb()
2940 rmd->cdb_length = 16; in pqi_set_aio_cdb()
2942 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; in pqi_set_aio_cdb()
2943 rmd->cdb[1] = 0; in pqi_set_aio_cdb()
2944 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); in pqi_set_aio_cdb()
2945 rmd->cdb[6] = 0; in pqi_set_aio_cdb()
2946 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); in pqi_set_aio_cdb()
2947 rmd->cdb[9] = 0; in pqi_set_aio_cdb()
2948 rmd->cdb_length = 10; in pqi_set_aio_cdb()
2958 group = rmd->map_index / rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2960 index = rmd->map_index - (group * rmd->data_disks_per_row); in pqi_calc_aio_r1_nexus()
2961 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2962 index += rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2963 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2964 if (rmd->layout_map_count > 2) { in pqi_calc_aio_r1_nexus()
2965 index += rmd->data_disks_per_row; in pqi_calc_aio_r1_nexus()
2966 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; in pqi_calc_aio_r1_nexus()
2969 rmd->num_it_nexus_entries = rmd->layout_map_count; in pqi_calc_aio_r1_nexus()
2988 rmd.raid_level = device->raid_level; in pqi_raid_bypass_submit_scsi_cmd()
2996 raid_map = device->raid_map; in pqi_raid_bypass_submit_scsi_cmd()
3002 if (device->raid_level == SA_RAID_1 || in pqi_raid_bypass_submit_scsi_cmd()
3003 device->raid_level == SA_RAID_TRIPLE) { in pqi_raid_bypass_submit_scsi_cmd()
3007 group = device->next_bypass_group[rmd.map_index]; in pqi_raid_bypass_submit_scsi_cmd()
3011 device->next_bypass_group[rmd.map_index] = next_bypass_group; in pqi_raid_bypass_submit_scsi_cmd()
3014 } else if ((device->raid_level == SA_RAID_5 || in pqi_raid_bypass_submit_scsi_cmd()
3015 device->raid_level == SA_RAID_6) && in pqi_raid_bypass_submit_scsi_cmd()
3025 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; in pqi_raid_bypass_submit_scsi_cmd()
3026 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + in pqi_raid_bypass_submit_scsi_cmd()
3028 (rmd.first_row_offset - rmd.first_column * rmd.strip_size); in pqi_raid_bypass_submit_scsi_cmd()
3032 if (raid_map->phys_blk_shift) { in pqi_raid_bypass_submit_scsi_cmd()
3033 rmd.disk_block <<= raid_map->phys_blk_shift; in pqi_raid_bypass_submit_scsi_cmd()
3034 rmd.disk_block_cnt <<= raid_map->phys_blk_shift; in pqi_raid_bypass_submit_scsi_cmd()
3042 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { in pqi_raid_bypass_submit_scsi_cmd()
3043 if (rmd.data_length > device->max_transfer_encrypted) in pqi_raid_bypass_submit_scsi_cmd()
3052 switch (device->raid_level) { in pqi_raid_bypass_submit_scsi_cmd()
3090 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_mode_ready()
3094 signature = readq(&pqi_registers->signature); in pqi_wait_for_pqi_mode_ready()
3099 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3101 return -ETIMEDOUT; in pqi_wait_for_pqi_mode_ready()
3107 status = readb(&pqi_registers->function_and_status_code); in pqi_wait_for_pqi_mode_ready()
3111 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3113 return -ETIMEDOUT; in pqi_wait_for_pqi_mode_ready()
3119 if (readl(&pqi_registers->device_status) == in pqi_wait_for_pqi_mode_ready()
3123 dev_err(&ctrl_info->pci_dev->dev, in pqi_wait_for_pqi_mode_ready()
3125 return -ETIMEDOUT; in pqi_wait_for_pqi_mode_ready()
3137 device = io_request->scmd->device->hostdata; in pqi_aio_path_disabled()
3138 device->raid_bypass_enabled = false; in pqi_aio_path_disabled()
3139 device->aio_enabled = false; in pqi_aio_path_disabled()
3147 device = sdev->hostdata; in pqi_take_device_offline()
3148 if (device->device_offline) in pqi_take_device_offline()
3151 device->device_offline = true; in pqi_take_device_offline()
3152 ctrl_info = shost_to_hba(sdev->host); in pqi_take_device_offline()
3154 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", in pqi_take_device_offline()
3155 path, ctrl_info->scsi_host->host_no, device->bus, in pqi_take_device_offline()
3156 device->target, device->lun); in pqi_take_device_offline()
3170 scmd = io_request->scmd; in pqi_process_raid_io_error()
3174 error_info = io_request->error_info; in pqi_process_raid_io_error()
3175 scsi_status = error_info->status; in pqi_process_raid_io_error()
3178 switch (error_info->data_out_result) { in pqi_process_raid_io_error()
3183 get_unaligned_le32(&error_info->data_out_transferred); in pqi_process_raid_io_error()
3184 residual_count = scsi_bufflen(scmd) - xfer_count; in pqi_process_raid_io_error()
3186 if (xfer_count < scmd->underflow) in pqi_process_raid_io_error()
3217 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); in pqi_process_raid_io_error()
3220 get_unaligned_le16(&error_info->response_data_length); in pqi_process_raid_io_error()
3222 if (sense_data_length > sizeof(error_info->data)) in pqi_process_raid_io_error()
3223 sense_data_length = sizeof(error_info->data); in pqi_process_raid_io_error()
3226 scsi_normalize_sense(error_info->data, in pqi_process_raid_io_error()
3230 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); in pqi_process_raid_io_error()
3231 struct pqi_scsi_dev *device = scmd->device->hostdata; in pqi_process_raid_io_error()
3237 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3238 pqi_take_device_offline(scmd->device, "RAID"); in pqi_process_raid_io_error()
3242 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ in pqi_process_raid_io_error()
3245 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); in pqi_process_raid_io_error()
3252 memcpy(scmd->sense_buffer, error_info->data, in pqi_process_raid_io_error()
3256 if (pqi_cmd_priv(scmd)->this_residual && in pqi_process_raid_io_error()
3257 !pqi_is_logical_device(scmd->device->hostdata) && in pqi_process_raid_io_error()
3261 scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && in pqi_process_raid_io_error()
3266 pqi_take_device_offline(scmd->device, "AIO"); in pqi_process_raid_io_error()
3267 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1); in pqi_process_raid_io_error()
3270 scmd->result = scsi_status; in pqi_process_raid_io_error()
3285 scmd = io_request->scmd; in pqi_process_aio_io_error()
3286 error_info = io_request->error_info; in pqi_process_aio_io_error()
3291 switch (error_info->service_response) { in pqi_process_aio_io_error()
3293 scsi_status = error_info->status; in pqi_process_aio_io_error()
3296 switch (error_info->status) { in pqi_process_aio_io_error()
3303 &error_info->residual_count); in pqi_process_aio_io_error()
3305 xfer_count = scsi_bufflen(scmd) - residual_count; in pqi_process_aio_io_error()
3306 if (xfer_count < scmd->underflow) in pqi_process_aio_io_error()
3315 io_request->status = -EAGAIN; in pqi_process_aio_io_error()
3319 if (!io_request->raid_bypass) { in pqi_process_aio_io_error()
3321 pqi_take_device_offline(scmd->device, "AIO"); in pqi_process_aio_io_error()
3343 if (error_info->data_present) { in pqi_process_aio_io_error()
3345 get_unaligned_le16(&error_info->data_length); in pqi_process_aio_io_error()
3347 if (sense_data_length > sizeof(error_info->data)) in pqi_process_aio_io_error()
3348 sense_data_length = sizeof(error_info->data); in pqi_process_aio_io_error()
3351 memcpy(scmd->sense_buffer, error_info->data, in pqi_process_aio_io_error()
3359 scmd->result = scsi_status; in pqi_process_aio_io_error()
3381 switch (response->response_code) { in pqi_interpret_task_management_response()
3387 rc = -EAGAIN; in pqi_interpret_task_management_response()
3390 rc = -ENODEV; in pqi_interpret_task_management_response()
3393 rc = -EIO; in pqi_interpret_task_management_response()
3398 dev_err(&ctrl_info->pci_dev->dev, in pqi_interpret_task_management_response()
3399 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); in pqi_interpret_task_management_response()
3420 oq_ci = queue_group->oq_ci_copy; in pqi_process_io_intr()
3423 oq_pi = readl(queue_group->oq_pi); in pqi_process_io_intr()
3424 if (oq_pi >= ctrl_info->num_elements_per_oq) { in pqi_process_io_intr()
3426 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3427 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", in pqi_process_io_intr()
3428 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); in pqi_process_io_intr()
3429 return -1; in pqi_process_io_intr()
3435 response = queue_group->oq_element_array + in pqi_process_io_intr()
3438 request_id = get_unaligned_le16(&response->request_id); in pqi_process_io_intr()
3439 if (request_id >= ctrl_info->max_io_slots) { in pqi_process_io_intr()
3441 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3442 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", in pqi_process_io_intr()
3443 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); in pqi_process_io_intr()
3444 return -1; in pqi_process_io_intr()
3447 io_request = &ctrl_info->io_request_pool[request_id]; in pqi_process_io_intr()
3448 if (atomic_read(&io_request->refcount) == 0) { in pqi_process_io_intr()
3450 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3453 return -1; in pqi_process_io_intr()
3456 switch (response->header.iu_type) { in pqi_process_io_intr()
3459 if (io_request->scmd) in pqi_process_io_intr()
3460 io_request->scmd->result = 0; in pqi_process_io_intr()
3465 io_request->status = in pqi_process_io_intr()
3467 &((struct pqi_vendor_general_response *)response)->status); in pqi_process_io_intr()
3470 io_request->status = pqi_interpret_task_management_response(ctrl_info, in pqi_process_io_intr()
3475 io_request->status = -EAGAIN; in pqi_process_io_intr()
3479 io_request->error_info = ctrl_info->error_buffer + in pqi_process_io_intr()
3480 (get_unaligned_le16(&response->error_index) * in pqi_process_io_intr()
3482 pqi_process_io_error(response->header.iu_type, io_request); in pqi_process_io_intr()
3486 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_io_intr()
3488 response->header.iu_type, oq_pi, oq_ci); in pqi_process_io_intr()
3489 return -1; in pqi_process_io_intr()
3492 io_request->io_complete_callback(io_request, io_request->context); in pqi_process_io_intr()
3498 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; in pqi_process_io_intr()
3502 queue_group->oq_ci_copy = oq_ci; in pqi_process_io_intr()
3503 writel(oq_ci, queue_group->oq_ci); in pqi_process_io_intr()
3515 num_elements_used = pi - ci; in pqi_num_elements_free()
3517 num_elements_used = elements_in_queue - ci + pi; in pqi_num_elements_free()
3519 return elements_in_queue - num_elements_used - 1; in pqi_num_elements_free()
3531 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; in pqi_send_event_ack()
3532 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); in pqi_send_event_ack()
3535 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3537 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; in pqi_send_event_ack()
3538 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); in pqi_send_event_ack()
3541 ctrl_info->num_elements_per_iq)) in pqi_send_event_ack()
3545 &queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3551 next_element = queue_group->iq_element_array[RAID_PATH] + in pqi_send_event_ack()
3556 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; in pqi_send_event_ack()
3557 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; in pqi_send_event_ack()
3563 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); in pqi_send_event_ack()
3565 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); in pqi_send_event_ack()
3576 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, in pqi_acknowledge_event()
3578 request.event_type = event->event_type; in pqi_acknowledge_event()
3579 put_unaligned_le16(event->event_id, &request.event_id); in pqi_acknowledge_event()
3580 put_unaligned_le32(event->additional_event_id, &request.additional_event_id); in pqi_acknowledge_event()
3608 dev_warn(&ctrl_info->pci_dev->dev, in pqi_poll_for_soft_reset_status()
3623 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3635 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3640 ctrl_info->pqi_mode_enabled = false; in pqi_process_soft_reset()
3643 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); in pqi_process_soft_reset()
3645 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3650 dev_info(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3652 if (ctrl_info->soft_reset_handshake_supported) in pqi_process_soft_reset()
3654 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); in pqi_process_soft_reset()
3661 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_soft_reset()
3664 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); in pqi_process_soft_reset()
3679 …pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info… in pqi_ofa_memory_alloc_worker()
3680 pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE); in pqi_ofa_memory_alloc_worker()
3690 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; in pqi_ofa_quiesce_worker()
3704 switch (event->event_id) { in pqi_ofa_process_event()
3706 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3708 schedule_work(&ctrl_info->ofa_memory_alloc_work); in pqi_ofa_process_event()
3711 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3713 schedule_work(&ctrl_info->ofa_quiesce_work); in pqi_ofa_process_event()
3717 dev_info(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3719 ctrl_info->ofa_cancel_reason); in pqi_ofa_process_event()
3720 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); in pqi_ofa_process_event()
3724 dev_err(&ctrl_info->pci_dev->dev, in pqi_ofa_process_event()
3726 event->event_id); in pqi_ofa_process_event()
3738 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_mark_volumes_for_rescan()
3740 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { in pqi_mark_volumes_for_rescan()
3741 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) in pqi_mark_volumes_for_rescan()
3742 device->rescan = true; in pqi_mark_volumes_for_rescan()
3745 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_mark_volumes_for_rescan()
3753 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_disable_raid_bypass()
3755 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) in pqi_disable_raid_bypass()
3756 if (device->raid_bypass_enabled) in pqi_disable_raid_bypass()
3757 device->raid_bypass_enabled = false; in pqi_disable_raid_bypass()
3759 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_disable_raid_bypass()
3778 event = ctrl_info->events; in pqi_event_worker()
3780 if (event->pending) { in pqi_event_worker()
3781 event->pending = false; in pqi_event_worker()
3782 if (event->event_type == PQI_EVENT_TYPE_OFA) { in pqi_event_worker()
3787 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) in pqi_event_worker()
3789 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) in pqi_event_worker()
3820 num_interrupts = atomic_read(&ctrl_info->num_interrupts); in pqi_heartbeat_timer_handler()
3823 if (num_interrupts == ctrl_info->previous_num_interrupts) { in pqi_heartbeat_timer_handler()
3824 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { in pqi_heartbeat_timer_handler()
3825 dev_err(&ctrl_info->pci_dev->dev, in pqi_heartbeat_timer_handler()
3826 "no heartbeat detected - last heartbeat count: %u\n", in pqi_heartbeat_timer_handler()
3832 ctrl_info->previous_num_interrupts = num_interrupts; in pqi_heartbeat_timer_handler()
3835 ctrl_info->previous_heartbeat_count = heartbeat_count; in pqi_heartbeat_timer_handler()
3836 mod_timer(&ctrl_info->heartbeat_timer, in pqi_heartbeat_timer_handler()
3842 if (!ctrl_info->heartbeat_counter) in pqi_start_heartbeat_timer()
3845 ctrl_info->previous_num_interrupts = in pqi_start_heartbeat_timer()
3846 atomic_read(&ctrl_info->num_interrupts); in pqi_start_heartbeat_timer()
3847 ctrl_info->previous_heartbeat_count = in pqi_start_heartbeat_timer()
3850 ctrl_info->heartbeat_timer.expires = in pqi_start_heartbeat_timer()
3852 add_timer(&ctrl_info->heartbeat_timer); in pqi_start_heartbeat_timer()
3857 del_timer_sync(&ctrl_info->heartbeat_timer); in pqi_stop_heartbeat_timer()
3863 switch (event->event_id) { in pqi_ofa_capture_event_payload()
3865 ctrl_info->ofa_bytes_requested = in pqi_ofa_capture_event_payload()
3866 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); in pqi_ofa_capture_event_payload()
3869 ctrl_info->ofa_cancel_reason = in pqi_ofa_capture_event_payload()
3870 get_unaligned_le16(&response->data.ofa_cancelled.reason); in pqi_ofa_capture_event_payload()
3885 event_queue = &ctrl_info->event_queue; in pqi_process_event_intr()
3887 oq_ci = event_queue->oq_ci_copy; in pqi_process_event_intr()
3890 oq_pi = readl(event_queue->oq_pi); in pqi_process_event_intr()
3893 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_event_intr()
3894 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", in pqi_process_event_intr()
3895 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); in pqi_process_event_intr()
3896 return -1; in pqi_process_event_intr()
3903 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); in pqi_process_event_intr()
3905 event_index = pqi_event_type_to_event_index(response->event_type); in pqi_process_event_intr()
3907 if (event_index >= 0 && response->request_acknowledge) { in pqi_process_event_intr()
3908 event = &ctrl_info->events[event_index]; in pqi_process_event_intr()
3909 event->pending = true; in pqi_process_event_intr()
3910 event->event_type = response->event_type; in pqi_process_event_intr()
3911 event->event_id = get_unaligned_le16(&response->event_id); in pqi_process_event_intr()
3912 event->additional_event_id = in pqi_process_event_intr()
3913 get_unaligned_le32(&response->additional_event_id); in pqi_process_event_intr()
3914 if (event->event_type == PQI_EVENT_TYPE_OFA) in pqi_process_event_intr()
3922 event_queue->oq_ci_copy = oq_ci; in pqi_process_event_intr()
3923 writel(oq_ci, event_queue->oq_ci); in pqi_process_event_intr()
3924 schedule_work(&ctrl_info->event_work); in pqi_process_event_intr()
3938 pqi_registers = ctrl_info->pqi_registers; in pqi_configure_legacy_intx()
3941 register_addr = &pqi_registers->legacy_intx_mask_clear; in pqi_configure_legacy_intx()
3943 register_addr = &pqi_registers->legacy_intx_mask_set; in pqi_configure_legacy_intx()
3953 switch (ctrl_info->irq_mode) { in pqi_change_irq_mode()
3994 ctrl_info->irq_mode = new_mode; in pqi_change_irq_mode()
4004 switch (ctrl_info->irq_mode) { in pqi_is_valid_irq()
4009 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); in pqi_is_valid_irq()
4032 ctrl_info = queue_group->ctrl_info; in pqi_irq_handler()
4041 if (irq == ctrl_info->event_irq) { in pqi_irq_handler()
4050 atomic_inc(&ctrl_info->num_interrupts); in pqi_irq_handler()
4061 struct pci_dev *pci_dev = ctrl_info->pci_dev; in pqi_request_irqs()
4065 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); in pqi_request_irqs()
4067 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { in pqi_request_irqs()
4069 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); in pqi_request_irqs()
4071 dev_err(&pci_dev->dev, in pqi_request_irqs()
4076 ctrl_info->num_msix_vectors_initialized++; in pqi_request_irqs()
4086 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) in pqi_free_irqs()
4087 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), in pqi_free_irqs()
4088 &ctrl_info->queue_groups[i]); in pqi_free_irqs()
4090 ctrl_info->num_msix_vectors_initialized = 0; in pqi_free_irqs()
4101 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, in pqi_enable_msix_interrupts()
4102 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, in pqi_enable_msix_interrupts()
4105 dev_err(&ctrl_info->pci_dev->dev, in pqi_enable_msix_interrupts()
4106 "MSI-X init failed with error %d\n", in pqi_enable_msix_interrupts()
4111 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; in pqi_enable_msix_interrupts()
4112 ctrl_info->irq_mode = IRQ_MODE_MSIX; in pqi_enable_msix_interrupts()
4118 if (ctrl_info->num_msix_vectors_enabled) { in pqi_disable_msix_interrupts()
4119 pci_free_irq_vectors(ctrl_info->pci_dev); in pqi_disable_msix_interrupts()
4120 ctrl_info->num_msix_vectors_enabled = 0; in pqi_disable_msix_interrupts()
4140 ctrl_info->num_elements_per_iq; in pqi_alloc_operational_queues()
4143 ctrl_info->num_elements_per_oq; in pqi_alloc_operational_queues()
4144 num_inbound_queues = ctrl_info->num_queue_groups * 2; in pqi_alloc_operational_queues()
4145 num_outbound_queues = ctrl_info->num_queue_groups; in pqi_alloc_operational_queues()
4146 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; in pqi_alloc_operational_queues()
4178 ctrl_info->queue_memory_base = in pqi_alloc_operational_queues()
4179 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_operational_queues()
4180 &ctrl_info->queue_memory_base_dma_handle, in pqi_alloc_operational_queues()
4183 if (!ctrl_info->queue_memory_base) in pqi_alloc_operational_queues()
4184 return -ENOMEM; in pqi_alloc_operational_queues()
4186 ctrl_info->queue_memory_length = alloc_length; in pqi_alloc_operational_queues()
4188 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, in pqi_alloc_operational_queues()
4191 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4192 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4193 queue_group->iq_element_array[RAID_PATH] = element_array; in pqi_alloc_operational_queues()
4194 queue_group->iq_element_array_bus_addr[RAID_PATH] = in pqi_alloc_operational_queues()
4195 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4196 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4200 queue_group->iq_element_array[AIO_PATH] = element_array; in pqi_alloc_operational_queues()
4201 queue_group->iq_element_array_bus_addr[AIO_PATH] = in pqi_alloc_operational_queues()
4202 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4203 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4209 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4210 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4211 queue_group->oq_element_array = element_array; in pqi_alloc_operational_queues()
4212 queue_group->oq_element_array_bus_addr = in pqi_alloc_operational_queues()
4213 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4214 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4220 ctrl_info->event_queue.oq_element_array = element_array; in pqi_alloc_operational_queues()
4221 ctrl_info->event_queue.oq_element_array_bus_addr = in pqi_alloc_operational_queues()
4222 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4223 (element_array - ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4230 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_alloc_operational_queues()
4231 queue_group = &ctrl_info->queue_groups[i]; in pqi_alloc_operational_queues()
4232 queue_group->iq_ci[RAID_PATH] = next_queue_index; in pqi_alloc_operational_queues()
4233 queue_group->iq_ci_bus_addr[RAID_PATH] = in pqi_alloc_operational_queues()
4234 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4235 (next_queue_index - in pqi_alloc_operational_queues()
4236 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4240 queue_group->iq_ci[AIO_PATH] = next_queue_index; in pqi_alloc_operational_queues()
4241 queue_group->iq_ci_bus_addr[AIO_PATH] = in pqi_alloc_operational_queues()
4242 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4243 (next_queue_index - in pqi_alloc_operational_queues()
4244 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4248 queue_group->oq_pi = next_queue_index; in pqi_alloc_operational_queues()
4249 queue_group->oq_pi_bus_addr = in pqi_alloc_operational_queues()
4250 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4251 (next_queue_index - in pqi_alloc_operational_queues()
4252 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4258 ctrl_info->event_queue.oq_pi = next_queue_index; in pqi_alloc_operational_queues()
4259 ctrl_info->event_queue.oq_pi_bus_addr = in pqi_alloc_operational_queues()
4260 ctrl_info->queue_memory_base_dma_handle + in pqi_alloc_operational_queues()
4261 (next_queue_index - in pqi_alloc_operational_queues()
4262 (void __iomem *)ctrl_info->queue_memory_base); in pqi_alloc_operational_queues()
4277 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
4278 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; in pqi_init_operational_queues()
4285 ctrl_info->event_queue.oq_id = next_oq_id++; in pqi_init_operational_queues()
4286 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
4287 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; in pqi_init_operational_queues()
4288 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; in pqi_init_operational_queues()
4289 ctrl_info->queue_groups[i].oq_id = next_oq_id++; in pqi_init_operational_queues()
4293 * Assign MSI-X table entry indexes to all queues. Note that the in pqi_init_operational_queues()
4296 ctrl_info->event_queue.int_msg_num = 0; in pqi_init_operational_queues()
4297 for (i = 0; i < ctrl_info->num_queue_groups; i++) in pqi_init_operational_queues()
4298 ctrl_info->queue_groups[i].int_msg_num = i; in pqi_init_operational_queues()
4300 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_init_operational_queues()
4301 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); in pqi_init_operational_queues()
4302 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); in pqi_init_operational_queues()
4303 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); in pqi_init_operational_queues()
4304 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); in pqi_init_operational_queues()
4317 ctrl_info->admin_queue_memory_base = in pqi_alloc_admin_queues()
4318 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, in pqi_alloc_admin_queues()
4319 &ctrl_info->admin_queue_memory_base_dma_handle, in pqi_alloc_admin_queues()
4322 if (!ctrl_info->admin_queue_memory_base) in pqi_alloc_admin_queues()
4323 return -ENOMEM; in pqi_alloc_admin_queues()
4325 ctrl_info->admin_queue_memory_length = alloc_length; in pqi_alloc_admin_queues()
4327 admin_queues = &ctrl_info->admin_queues; in pqi_alloc_admin_queues()
4328 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, in pqi_alloc_admin_queues()
4330 admin_queues->iq_element_array = in pqi_alloc_admin_queues()
4331 &admin_queues_aligned->iq_element_array; in pqi_alloc_admin_queues()
4332 admin_queues->oq_element_array = in pqi_alloc_admin_queues()
4333 &admin_queues_aligned->oq_element_array; in pqi_alloc_admin_queues()
4334 admin_queues->iq_ci = in pqi_alloc_admin_queues()
4335 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; in pqi_alloc_admin_queues()
4336 admin_queues->oq_pi = in pqi_alloc_admin_queues()
4337 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; in pqi_alloc_admin_queues()
4339 admin_queues->iq_element_array_bus_addr = in pqi_alloc_admin_queues()
4340 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4341 (admin_queues->iq_element_array - in pqi_alloc_admin_queues()
4342 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4343 admin_queues->oq_element_array_bus_addr = in pqi_alloc_admin_queues()
4344 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4345 (admin_queues->oq_element_array - in pqi_alloc_admin_queues()
4346 ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4347 admin_queues->iq_ci_bus_addr = in pqi_alloc_admin_queues()
4348 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4349 ((void __iomem *)admin_queues->iq_ci - in pqi_alloc_admin_queues()
4350 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4351 admin_queues->oq_pi_bus_addr = in pqi_alloc_admin_queues()
4352 ctrl_info->admin_queue_memory_base_dma_handle + in pqi_alloc_admin_queues()
4353 ((void __iomem *)admin_queues->oq_pi - in pqi_alloc_admin_queues()
4354 (void __iomem *)ctrl_info->admin_queue_memory_base); in pqi_alloc_admin_queues()
4370 pqi_registers = ctrl_info->pqi_registers; in pqi_create_admin_queues()
4371 admin_queues = &ctrl_info->admin_queues; in pqi_create_admin_queues()
4373 writeq((u64)admin_queues->iq_element_array_bus_addr, in pqi_create_admin_queues()
4374 &pqi_registers->admin_iq_element_array_addr); in pqi_create_admin_queues()
4375 writeq((u64)admin_queues->oq_element_array_bus_addr, in pqi_create_admin_queues()
4376 &pqi_registers->admin_oq_element_array_addr); in pqi_create_admin_queues()
4377 writeq((u64)admin_queues->iq_ci_bus_addr, in pqi_create_admin_queues()
4378 &pqi_registers->admin_iq_ci_addr); in pqi_create_admin_queues()
4379 writeq((u64)admin_queues->oq_pi_bus_addr, in pqi_create_admin_queues()
4380 &pqi_registers->admin_oq_pi_addr); in pqi_create_admin_queues()
4384 (admin_queues->int_msg_num << 16); in pqi_create_admin_queues()
4385 writel(reg, &pqi_registers->admin_iq_num_elements); in pqi_create_admin_queues()
4388 &pqi_registers->function_and_status_code); in pqi_create_admin_queues()
4393 status = readb(&pqi_registers->function_and_status_code); in pqi_create_admin_queues()
4397 return -ETIMEDOUT; in pqi_create_admin_queues()
4405 admin_queues->iq_pi = ctrl_info->iomem_base + in pqi_create_admin_queues()
4407 readq(&pqi_registers->admin_iq_pi_offset); in pqi_create_admin_queues()
4408 admin_queues->oq_ci = ctrl_info->iomem_base + in pqi_create_admin_queues()
4410 readq(&pqi_registers->admin_oq_ci_offset); in pqi_create_admin_queues()
4422 admin_queues = &ctrl_info->admin_queues; in pqi_submit_admin_request()
4423 iq_pi = admin_queues->iq_pi_copy; in pqi_submit_admin_request()
4425 next_element = admin_queues->iq_element_array + in pqi_submit_admin_request()
4431 admin_queues->iq_pi_copy = iq_pi; in pqi_submit_admin_request()
4437 writel(iq_pi, admin_queues->iq_pi); in pqi_submit_admin_request()
4450 admin_queues = &ctrl_info->admin_queues; in pqi_poll_for_admin_response()
4451 oq_ci = admin_queues->oq_ci_copy; in pqi_poll_for_admin_response()
4456 oq_pi = readl(admin_queues->oq_pi); in pqi_poll_for_admin_response()
4460 dev_err(&ctrl_info->pci_dev->dev, in pqi_poll_for_admin_response()
4462 return -ETIMEDOUT; in pqi_poll_for_admin_response()
4465 return -ENXIO; in pqi_poll_for_admin_response()
4469 memcpy(response, admin_queues->oq_element_array + in pqi_poll_for_admin_response()
4473 admin_queues->oq_ci_copy = oq_ci; in pqi_poll_for_admin_response()
4474 writel(oq_ci, admin_queues->oq_ci); in pqi_poll_for_admin_response()
4494 spin_lock_irqsave(&queue_group->submit_lock[path], flags); in pqi_start_io()
4497 io_request->queue_group = queue_group; in pqi_start_io()
4498 list_add_tail(&io_request->request_list_entry, in pqi_start_io()
4499 &queue_group->request_list[path]); in pqi_start_io()
4502 iq_pi = queue_group->iq_pi_copy[path]; in pqi_start_io()
4505 &queue_group->request_list[path], request_list_entry) { in pqi_start_io()
4507 request = io_request->iu; in pqi_start_io()
4509 iu_length = get_unaligned_le16(&request->iu_length) + in pqi_start_io()
4515 iq_ci = readl(queue_group->iq_ci[path]); in pqi_start_io()
4518 ctrl_info->num_elements_per_iq)) in pqi_start_io()
4521 put_unaligned_le16(queue_group->oq_id, in pqi_start_io()
4522 &request->response_queue_id); in pqi_start_io()
4524 next_element = queue_group->iq_element_array[path] + in pqi_start_io()
4528 ctrl_info->num_elements_per_iq - iq_pi; in pqi_start_io()
4536 memcpy(queue_group->iq_element_array[path], in pqi_start_io()
4538 iu_length - copy_count); in pqi_start_io()
4542 ctrl_info->num_elements_per_iq; in pqi_start_io()
4544 list_del(&io_request->request_list_entry); in pqi_start_io()
4547 if (iq_pi != queue_group->iq_pi_copy[path]) { in pqi_start_io()
4548 queue_group->iq_pi_copy[path] = iq_pi; in pqi_start_io()
4553 writel(iq_pi, queue_group->iq_pi[path]); in pqi_start_io()
4556 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); in pqi_start_io()
4575 rc = -ENXIO; in pqi_wait_for_completion_io()
4594 int rc = -EIO; in pqi_process_raid_io_error_synchronous()
4596 switch (error_info->data_out_result) { in pqi_process_raid_io_error_synchronous()
4598 if (error_info->status == SAM_STAT_GOOD) in pqi_process_raid_io_error_synchronous()
4602 if (error_info->status == SAM_STAT_GOOD || in pqi_process_raid_io_error_synchronous()
4603 error_info->status == SAM_STAT_CHECK_CONDITION) in pqi_process_raid_io_error_synchronous()
4616 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; in pqi_is_blockable_request()
4629 if (down_interruptible(&ctrl_info->sync_request_sem)) in pqi_submit_raid_request_synchronous()
4630 return -ERESTARTSYS; in pqi_submit_raid_request_synchronous()
4632 down(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4644 rc = -ENXIO; in pqi_submit_raid_request_synchronous()
4650 put_unaligned_le16(io_request->index, in pqi_submit_raid_request_synchronous()
4651 &(((struct pqi_raid_path_request *)request)->request_id)); in pqi_submit_raid_request_synchronous()
4653 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) in pqi_submit_raid_request_synchronous()
4654 ((struct pqi_raid_path_request *)request)->error_index = in pqi_submit_raid_request_synchronous()
4655 ((struct pqi_raid_path_request *)request)->request_id; in pqi_submit_raid_request_synchronous()
4657 iu_length = get_unaligned_le16(&request->iu_length) + in pqi_submit_raid_request_synchronous()
4659 memcpy(io_request->iu, request, iu_length); in pqi_submit_raid_request_synchronous()
4661 io_request->io_complete_callback = pqi_raid_synchronous_complete; in pqi_submit_raid_request_synchronous()
4662 io_request->context = &wait; in pqi_submit_raid_request_synchronous()
4664 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_submit_raid_request_synchronous()
4670 if (io_request->error_info) in pqi_submit_raid_request_synchronous()
4671 memcpy(error_info, io_request->error_info, sizeof(*error_info)); in pqi_submit_raid_request_synchronous()
4674 } else if (rc == 0 && io_request->error_info) { in pqi_submit_raid_request_synchronous()
4675 rc = pqi_process_raid_io_error_synchronous(io_request->error_info); in pqi_submit_raid_request_synchronous()
4682 up(&ctrl_info->sync_request_sem); in pqi_submit_raid_request_synchronous()
4690 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) in pqi_validate_admin_response()
4691 return -EINVAL; in pqi_validate_admin_response()
4693 if (get_unaligned_le16(&response->header.iu_length) != in pqi_validate_admin_response()
4695 return -EINVAL; in pqi_validate_admin_response()
4697 if (response->function_code != expected_function_code) in pqi_validate_admin_response()
4698 return -EINVAL; in pqi_validate_admin_response()
4700 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) in pqi_validate_admin_response()
4701 return -EINVAL; in pqi_validate_admin_response()
4718 rc = pqi_validate_admin_response(response, request->function_code); in pqi_submit_admin_request_synchronous()
4733 return -ENOMEM; in pqi_report_device_capability()
4745 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_report_device_capability()
4754 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_report_device_capability()
4762 rc = -EIO; in pqi_report_device_capability()
4766 ctrl_info->max_inbound_queues = in pqi_report_device_capability()
4767 get_unaligned_le16(&capability->max_inbound_queues); in pqi_report_device_capability()
4768 ctrl_info->max_elements_per_iq = in pqi_report_device_capability()
4769 get_unaligned_le16(&capability->max_elements_per_iq); in pqi_report_device_capability()
4770 ctrl_info->max_iq_element_length = in pqi_report_device_capability()
4771 get_unaligned_le16(&capability->max_iq_element_length) in pqi_report_device_capability()
4773 ctrl_info->max_outbound_queues = in pqi_report_device_capability()
4774 get_unaligned_le16(&capability->max_outbound_queues); in pqi_report_device_capability()
4775 ctrl_info->max_elements_per_oq = in pqi_report_device_capability()
4776 get_unaligned_le16(&capability->max_elements_per_oq); in pqi_report_device_capability()
4777 ctrl_info->max_oq_element_length = in pqi_report_device_capability()
4778 get_unaligned_le16(&capability->max_oq_element_length) in pqi_report_device_capability()
4782 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; in pqi_report_device_capability()
4784 ctrl_info->max_inbound_iu_length_per_firmware = in pqi_report_device_capability()
4786 &sop_iu_layer_descriptor->max_inbound_iu_length); in pqi_report_device_capability()
4787 ctrl_info->inbound_spanning_supported = in pqi_report_device_capability()
4788 sop_iu_layer_descriptor->inbound_spanning_supported; in pqi_report_device_capability()
4789 ctrl_info->outbound_spanning_supported = in pqi_report_device_capability()
4790 sop_iu_layer_descriptor->outbound_spanning_supported; in pqi_report_device_capability()
4800 if (ctrl_info->max_iq_element_length < in pqi_validate_device_capability()
4802 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4804 ctrl_info->max_iq_element_length, in pqi_validate_device_capability()
4806 return -EINVAL; in pqi_validate_device_capability()
4809 if (ctrl_info->max_oq_element_length < in pqi_validate_device_capability()
4811 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4813 ctrl_info->max_oq_element_length, in pqi_validate_device_capability()
4815 return -EINVAL; in pqi_validate_device_capability()
4818 if (ctrl_info->max_inbound_iu_length_per_firmware < in pqi_validate_device_capability()
4820 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4822 ctrl_info->max_inbound_iu_length_per_firmware, in pqi_validate_device_capability()
4824 return -EINVAL; in pqi_validate_device_capability()
4827 if (!ctrl_info->inbound_spanning_supported) { in pqi_validate_device_capability()
4828 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4830 return -EINVAL; in pqi_validate_device_capability()
4833 if (ctrl_info->outbound_spanning_supported) { in pqi_validate_device_capability()
4834 dev_err(&ctrl_info->pci_dev->dev, in pqi_validate_device_capability()
4836 return -EINVAL; in pqi_validate_device_capability()
4849 event_queue = &ctrl_info->event_queue; in pqi_create_event_queue()
4852 * Create OQ (Outbound Queue - device to host queue) to dedicate in pqi_create_event_queue()
4860 put_unaligned_le16(event_queue->oq_id, in pqi_create_event_queue()
4862 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, in pqi_create_event_queue()
4864 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, in pqi_create_event_queue()
4871 put_unaligned_le16(event_queue->int_msg_num, in pqi_create_event_queue()
4879 event_queue->oq_ci = ctrl_info->iomem_base + in pqi_create_event_queue()
4895 queue_group = &ctrl_info->queue_groups[group_number]; in pqi_create_queue_group()
4898 * Create IQ (Inbound Queue - host to device queue) for in pqi_create_queue_group()
4906 put_unaligned_le16(queue_group->iq_id[RAID_PATH], in pqi_create_queue_group()
4909 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], in pqi_create_queue_group()
4911 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], in pqi_create_queue_group()
4913 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4922 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4927 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4933 * Create IQ (Inbound Queue - host to device queue) for in pqi_create_queue_group()
4941 put_unaligned_le16(queue_group->iq_id[AIO_PATH], in pqi_create_queue_group()
4943 put_unaligned_le64((u64)queue_group-> in pqi_create_queue_group()
4946 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], in pqi_create_queue_group()
4948 put_unaligned_le16(ctrl_info->num_elements_per_iq, in pqi_create_queue_group()
4957 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4962 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + in pqi_create_queue_group()
4977 put_unaligned_le16(queue_group->iq_id[AIO_PATH], in pqi_create_queue_group()
4985 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
4991 * Create OQ (Outbound Queue - device to host queue). in pqi_create_queue_group()
4998 put_unaligned_le16(queue_group->oq_id, in pqi_create_queue_group()
5000 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, in pqi_create_queue_group()
5002 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, in pqi_create_queue_group()
5004 put_unaligned_le16(ctrl_info->num_elements_per_oq, in pqi_create_queue_group()
5009 put_unaligned_le16(queue_group->int_msg_num, in pqi_create_queue_group()
5015 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queue_group()
5020 queue_group->oq_ci = ctrl_info->iomem_base + in pqi_create_queue_group()
5035 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
5040 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_create_queues()
5043 dev_err(&ctrl_info->pci_dev->dev, in pqi_create_queues()
5045 i, ctrl_info->num_queue_groups); in pqi_create_queues()
5068 return -ENOMEM; in pqi_configure_events()
5074 data.report_event_configuration.sg_descriptors[1]) - in pqi_configure_events()
5079 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
5088 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
5095 for (i = 0; i < event_config->num_event_descriptors; i++) { in pqi_configure_events()
5096 event_descriptor = &event_config->descriptors[i]; in pqi_configure_events()
5098 pqi_is_supported_event(event_descriptor->event_type)) in pqi_configure_events()
5099 put_unaligned_le16(ctrl_info->event_queue.oq_id, in pqi_configure_events()
5100 &event_descriptor->oq_id); in pqi_configure_events()
5102 put_unaligned_le16(0, &event_descriptor->oq_id); in pqi_configure_events()
5109 data.report_event_configuration.sg_descriptors[1]) - in pqi_configure_events()
5114 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_configure_events()
5123 pqi_pci_unmap(ctrl_info->pci_dev, in pqi_configure_events()
5145 if (!ctrl_info->io_request_pool) in pqi_free_all_io_requests()
5148 dev = &ctrl_info->pci_dev->dev; in pqi_free_all_io_requests()
5149 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_free_all_io_requests()
5150 io_request = ctrl_info->io_request_pool; in pqi_free_all_io_requests()
5152 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_free_all_io_requests()
5153 kfree(io_request->iu); in pqi_free_all_io_requests()
5154 if (!io_request->sg_chain_buffer) in pqi_free_all_io_requests()
5157 io_request->sg_chain_buffer, in pqi_free_all_io_requests()
5158 io_request->sg_chain_buffer_dma_handle); in pqi_free_all_io_requests()
5162 kfree(ctrl_info->io_request_pool); in pqi_free_all_io_requests()
5163 ctrl_info->io_request_pool = NULL; in pqi_free_all_io_requests()
5168 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, in pqi_alloc_error_buffer()
5169 ctrl_info->error_buffer_length, in pqi_alloc_error_buffer()
5170 &ctrl_info->error_buffer_dma_handle, in pqi_alloc_error_buffer()
5172 if (!ctrl_info->error_buffer) in pqi_alloc_error_buffer()
5173 return -ENOMEM; in pqi_alloc_error_buffer()
5187 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, in pqi_alloc_io_resources()
5188 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); in pqi_alloc_io_resources()
5190 if (!ctrl_info->io_request_pool) { in pqi_alloc_io_resources()
5191 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5196 dev = &ctrl_info->pci_dev->dev; in pqi_alloc_io_resources()
5197 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; in pqi_alloc_io_resources()
5198 io_request = ctrl_info->io_request_pool; in pqi_alloc_io_resources()
5200 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_alloc_io_resources()
5201 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); in pqi_alloc_io_resources()
5203 if (!io_request->iu) { in pqi_alloc_io_resources()
5204 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5214 dev_err(&ctrl_info->pci_dev->dev, in pqi_alloc_io_resources()
5215 "failed to allocate PQI scatter-gather chain buffers\n"); in pqi_alloc_io_resources()
5219 io_request->index = i; in pqi_alloc_io_resources()
5220 io_request->sg_chain_buffer = sg_chain_buffer; in pqi_alloc_io_resources()
5221 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; in pqi_alloc_io_resources()
5230 return -ENOMEM; in pqi_alloc_io_resources()
5243 ctrl_info->scsi_ml_can_queue = in pqi_calculate_io_resources()
5244 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; in pqi_calculate_io_resources()
5245 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; in pqi_calculate_io_resources()
5247 ctrl_info->error_buffer_length = in pqi_calculate_io_resources()
5248 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; in pqi_calculate_io_resources()
5251 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
5254 max_transfer_size = min(ctrl_info->max_transfer_size, in pqi_calculate_io_resources()
5259 /* +1 to cover when the buffer is not page-aligned. */ in pqi_calculate_io_resources()
5262 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); in pqi_calculate_io_resources()
5264 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; in pqi_calculate_io_resources()
5266 ctrl_info->sg_chain_buffer_length = in pqi_calculate_io_resources()
5269 ctrl_info->sg_tablesize = max_sg_entries; in pqi_calculate_io_resources()
5270 ctrl_info->max_sectors = max_transfer_size / 512; in pqi_calculate_io_resources()
5285 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, in pqi_calculate_queue_resources()
5286 ctrl_info->max_outbound_queues - 1); in pqi_calculate_queue_resources()
5290 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); in pqi_calculate_queue_resources()
5294 ctrl_info->num_queue_groups = num_queue_groups; in pqi_calculate_queue_resources()
5300 ctrl_info->max_inbound_iu_length = in pqi_calculate_queue_resources()
5301 (ctrl_info->max_inbound_iu_length_per_firmware / in pqi_calculate_queue_resources()
5306 (ctrl_info->max_inbound_iu_length / in pqi_calculate_queue_resources()
5313 ctrl_info->max_elements_per_iq); in pqi_calculate_queue_resources()
5315 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; in pqi_calculate_queue_resources()
5317 ctrl_info->max_elements_per_oq); in pqi_calculate_queue_resources()
5319 ctrl_info->num_elements_per_iq = num_elements_per_iq; in pqi_calculate_queue_resources()
5320 ctrl_info->num_elements_per_oq = num_elements_per_oq; in pqi_calculate_queue_resources()
5322 ctrl_info->max_sg_per_iu = in pqi_calculate_queue_resources()
5323 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5328 ctrl_info->max_sg_per_r56_iu = in pqi_calculate_queue_resources()
5329 ((ctrl_info->max_inbound_iu_length - in pqi_calculate_queue_resources()
5341 put_unaligned_le64(address, &sg_descriptor->address); in pqi_set_sg_descriptor()
5342 put_unaligned_le32(length, &sg_descriptor->length); in pqi_set_sg_descriptor()
5343 put_unaligned_le32(0, &sg_descriptor->flags); in pqi_set_sg_descriptor()
5356 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ in pqi_build_sg_list()
5367 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, in pqi_build_sg_list()
5368 &sg_descriptor->address); in pqi_build_sg_list()
5369 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), in pqi_build_sg_list()
5370 &sg_descriptor->length); in pqi_build_sg_list()
5371 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); in pqi_build_sg_list()
5374 sg_descriptor = io_request->sg_chain_buffer; in pqi_build_sg_list()
5379 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); in pqi_build_sg_list()
5399 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - in pqi_build_raid_sg_list()
5406 sg_descriptor = request->sg_descriptors; in pqi_build_raid_sg_list()
5409 ctrl_info->max_sg_per_iu, &chained); in pqi_build_raid_sg_list()
5411 request->partial = chained; in pqi_build_raid_sg_list()
5415 put_unaligned_le16(iu_length, &request->header.iu_length); in pqi_build_raid_sg_list()
5435 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - in pqi_build_aio_r1_sg_list()
5443 sg_descriptor = request->sg_descriptors; in pqi_build_aio_r1_sg_list()
5446 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_r1_sg_list()
5448 request->partial = chained; in pqi_build_aio_r1_sg_list()
5452 put_unaligned_le16(iu_length, &request->header.iu_length); in pqi_build_aio_r1_sg_list()
5453 request->num_sg_descriptors = num_sg_in_iu; in pqi_build_aio_r1_sg_list()
5473 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - in pqi_build_aio_r56_sg_list()
5479 sg_descriptor = request->sg_descriptors; in pqi_build_aio_r56_sg_list()
5482 ctrl_info->max_sg_per_r56_iu, &chained); in pqi_build_aio_r56_sg_list()
5484 request->partial = chained; in pqi_build_aio_r56_sg_list()
5488 put_unaligned_le16(iu_length, &request->header.iu_length); in pqi_build_aio_r56_sg_list()
5489 request->num_sg_descriptors = num_sg_in_iu; in pqi_build_aio_r56_sg_list()
5509 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - in pqi_build_aio_sg_list()
5517 sg_descriptor = request->sg_descriptors; in pqi_build_aio_sg_list()
5520 ctrl_info->max_sg_per_iu, &chained); in pqi_build_aio_sg_list()
5522 request->partial = chained; in pqi_build_aio_sg_list()
5526 put_unaligned_le16(iu_length, &request->header.iu_length); in pqi_build_aio_sg_list()
5527 request->num_sg_descriptors = num_sg_in_iu; in pqi_build_aio_sg_list()
5537 scmd = io_request->scmd; in pqi_raid_io_complete()
5556 io_request->io_complete_callback = pqi_raid_io_complete; in pqi_raid_submit_io()
5557 io_request->scmd = scmd; in pqi_raid_submit_io()
5559 request = io_request->iu; in pqi_raid_submit_io()
5562 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; in pqi_raid_submit_io()
5563 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); in pqi_raid_submit_io()
5564 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_raid_submit_io()
5565 request->command_priority = io_high_prio; in pqi_raid_submit_io()
5566 put_unaligned_le16(io_request->index, &request->request_id); in pqi_raid_submit_io()
5567 request->error_index = request->request_id; in pqi_raid_submit_io()
5568 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); in pqi_raid_submit_io()
5569 request->ml_device_lun_number = (u8)scmd->device->lun; in pqi_raid_submit_io()
5571 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); in pqi_raid_submit_io()
5572 memcpy(request->cdb, scmd->cmnd, cdb_length); in pqi_raid_submit_io()
5579 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; in pqi_raid_submit_io()
5582 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; in pqi_raid_submit_io()
5585 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; in pqi_raid_submit_io()
5588 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; in pqi_raid_submit_io()
5592 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; in pqi_raid_submit_io()
5596 switch (scmd->sc_data_direction) { in pqi_raid_submit_io()
5598 request->data_direction = SOP_READ_FLAG; in pqi_raid_submit_io()
5601 request->data_direction = SOP_WRITE_FLAG; in pqi_raid_submit_io()
5604 request->data_direction = SOP_NO_DIRECTION_FLAG; in pqi_raid_submit_io()
5607 request->data_direction = SOP_BIDIRECTIONAL; in pqi_raid_submit_io()
5610 dev_err(&ctrl_info->pci_dev->dev, in pqi_raid_submit_io()
5612 scmd->sc_data_direction); in pqi_raid_submit_io()
5644 if (!io_request->raid_bypass) in pqi_raid_bypass_retry_needed()
5647 scmd = io_request->scmd; in pqi_raid_bypass_retry_needed()
5648 if ((scmd->result & 0xff) == SAM_STAT_GOOD) in pqi_raid_bypass_retry_needed()
5650 if (host_byte(scmd->result) == DID_NO_CONNECT) in pqi_raid_bypass_retry_needed()
5653 device = scmd->device->hostdata; in pqi_raid_bypass_retry_needed()
5657 ctrl_info = shost_to_hba(scmd->device->host); in pqi_raid_bypass_retry_needed()
5669 scmd = io_request->scmd; in pqi_aio_io_complete()
5671 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { in pqi_aio_io_complete()
5673 pqi_cmd_priv(scmd)->this_residual++; in pqi_aio_io_complete()
5688 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, in pqi_aio_submit_scsi_cmd()
5689 scmd->cmnd, scmd->cmd_len, queue_group, NULL, in pqi_aio_submit_scsi_cmd()
5707 io_request->io_complete_callback = pqi_aio_io_complete; in pqi_aio_submit_io()
5708 io_request->scmd = scmd; in pqi_aio_submit_io()
5709 io_request->raid_bypass = raid_bypass; in pqi_aio_submit_io()
5711 request = io_request->iu; in pqi_aio_submit_io()
5714 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; in pqi_aio_submit_io()
5715 put_unaligned_le32(aio_handle, &request->nexus_id); in pqi_aio_submit_io()
5716 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); in pqi_aio_submit_io()
5717 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_aio_submit_io()
5718 request->command_priority = io_high_prio; in pqi_aio_submit_io()
5719 put_unaligned_le16(io_request->index, &request->request_id); in pqi_aio_submit_io()
5720 request->error_index = request->request_id; in pqi_aio_submit_io()
5721 if (!raid_bypass && ctrl_info->multi_lun_device_supported) in pqi_aio_submit_io()
5722 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); in pqi_aio_submit_io()
5723 if (cdb_length > sizeof(request->cdb)) in pqi_aio_submit_io()
5724 cdb_length = sizeof(request->cdb); in pqi_aio_submit_io()
5725 request->cdb_length = cdb_length; in pqi_aio_submit_io()
5726 memcpy(request->cdb, cdb, cdb_length); in pqi_aio_submit_io()
5728 switch (scmd->sc_data_direction) { in pqi_aio_submit_io()
5730 request->data_direction = SOP_READ_FLAG; in pqi_aio_submit_io()
5733 request->data_direction = SOP_WRITE_FLAG; in pqi_aio_submit_io()
5736 request->data_direction = SOP_NO_DIRECTION_FLAG; in pqi_aio_submit_io()
5739 request->data_direction = SOP_BIDIRECTIONAL; in pqi_aio_submit_io()
5742 dev_err(&ctrl_info->pci_dev->dev, in pqi_aio_submit_io()
5744 scmd->sc_data_direction); in pqi_aio_submit_io()
5749 request->encryption_enable = true; in pqi_aio_submit_io()
5750 put_unaligned_le16(encryption_info->data_encryption_key_index, in pqi_aio_submit_io()
5751 &request->data_encryption_key_index); in pqi_aio_submit_io()
5752 put_unaligned_le32(encryption_info->encrypt_tweak_lower, in pqi_aio_submit_io()
5753 &request->encrypt_tweak_lower); in pqi_aio_submit_io()
5754 put_unaligned_le32(encryption_info->encrypt_tweak_upper, in pqi_aio_submit_io()
5755 &request->encrypt_tweak_upper); in pqi_aio_submit_io()
5782 io_request->io_complete_callback = pqi_aio_io_complete; in pqi_aio_submit_r1_write_io()
5783 io_request->scmd = scmd; in pqi_aio_submit_r1_write_io()
5784 io_request->raid_bypass = true; in pqi_aio_submit_r1_write_io()
5786 r1_request = io_request->iu; in pqi_aio_submit_r1_write_io()
5789 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; in pqi_aio_submit_r1_write_io()
5790 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); in pqi_aio_submit_r1_write_io()
5791 r1_request->num_drives = rmd->num_it_nexus_entries; in pqi_aio_submit_r1_write_io()
5792 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); in pqi_aio_submit_r1_write_io()
5793 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); in pqi_aio_submit_r1_write_io()
5794 if (rmd->num_it_nexus_entries == 3) in pqi_aio_submit_r1_write_io()
5795 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); in pqi_aio_submit_r1_write_io()
5797 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); in pqi_aio_submit_r1_write_io()
5798 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_aio_submit_r1_write_io()
5799 put_unaligned_le16(io_request->index, &r1_request->request_id); in pqi_aio_submit_r1_write_io()
5800 r1_request->error_index = r1_request->request_id; in pqi_aio_submit_r1_write_io()
5801 if (rmd->cdb_length > sizeof(r1_request->cdb)) in pqi_aio_submit_r1_write_io()
5802 rmd->cdb_length = sizeof(r1_request->cdb); in pqi_aio_submit_r1_write_io()
5803 r1_request->cdb_length = rmd->cdb_length; in pqi_aio_submit_r1_write_io()
5804 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); in pqi_aio_submit_r1_write_io()
5807 r1_request->data_direction = SOP_READ_FLAG; in pqi_aio_submit_r1_write_io()
5810 r1_request->encryption_enable = true; in pqi_aio_submit_r1_write_io()
5811 put_unaligned_le16(encryption_info->data_encryption_key_index, in pqi_aio_submit_r1_write_io()
5812 &r1_request->data_encryption_key_index); in pqi_aio_submit_r1_write_io()
5813 put_unaligned_le32(encryption_info->encrypt_tweak_lower, in pqi_aio_submit_r1_write_io()
5814 &r1_request->encrypt_tweak_lower); in pqi_aio_submit_r1_write_io()
5815 put_unaligned_le32(encryption_info->encrypt_tweak_upper, in pqi_aio_submit_r1_write_io()
5816 &r1_request->encrypt_tweak_upper); in pqi_aio_submit_r1_write_io()
5842 io_request->io_complete_callback = pqi_aio_io_complete; in pqi_aio_submit_r56_write_io()
5843 io_request->scmd = scmd; in pqi_aio_submit_r56_write_io()
5844 io_request->raid_bypass = true; in pqi_aio_submit_r56_write_io()
5846 r56_request = io_request->iu; in pqi_aio_submit_r56_write_io()
5849 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) in pqi_aio_submit_r56_write_io()
5850 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; in pqi_aio_submit_r56_write_io()
5852 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; in pqi_aio_submit_r56_write_io()
5854 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); in pqi_aio_submit_r56_write_io()
5855 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); in pqi_aio_submit_r56_write_io()
5856 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); in pqi_aio_submit_r56_write_io()
5857 if (rmd->raid_level == SA_RAID_6) { in pqi_aio_submit_r56_write_io()
5858 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); in pqi_aio_submit_r56_write_io()
5859 r56_request->xor_multiplier = rmd->xor_mult; in pqi_aio_submit_r56_write_io()
5861 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); in pqi_aio_submit_r56_write_io()
5862 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; in pqi_aio_submit_r56_write_io()
5863 put_unaligned_le64(rmd->row, &r56_request->row); in pqi_aio_submit_r56_write_io()
5865 put_unaligned_le16(io_request->index, &r56_request->request_id); in pqi_aio_submit_r56_write_io()
5866 r56_request->error_index = r56_request->request_id; in pqi_aio_submit_r56_write_io()
5868 if (rmd->cdb_length > sizeof(r56_request->cdb)) in pqi_aio_submit_r56_write_io()
5869 rmd->cdb_length = sizeof(r56_request->cdb); in pqi_aio_submit_r56_write_io()
5870 r56_request->cdb_length = rmd->cdb_length; in pqi_aio_submit_r56_write_io()
5871 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); in pqi_aio_submit_r56_write_io()
5874 r56_request->data_direction = SOP_READ_FLAG; in pqi_aio_submit_r56_write_io()
5877 r56_request->encryption_enable = true; in pqi_aio_submit_r56_write_io()
5878 put_unaligned_le16(encryption_info->data_encryption_key_index, in pqi_aio_submit_r56_write_io()
5879 &r56_request->data_encryption_key_index); in pqi_aio_submit_r56_write_io()
5880 put_unaligned_le32(encryption_info->encrypt_tweak_lower, in pqi_aio_submit_r56_write_io()
5881 &r56_request->encrypt_tweak_lower); in pqi_aio_submit_r56_write_io()
5882 put_unaligned_le32(encryption_info->encrypt_tweak_upper, in pqi_aio_submit_r56_write_io()
5883 &r56_request->encrypt_tweak_upper); in pqi_aio_submit_r56_write_io()
5911 return pqi_cmd_priv(scmd)->this_residual == 0; in pqi_is_bypass_eligible_request()
5924 if (!scmd->device) { in pqi_prep_for_scsi_done()
5929 device = scmd->device->hostdata; in pqi_prep_for_scsi_done()
5935 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); in pqi_prep_for_scsi_done()
5937 wait = (struct completion *)xchg(&scmd->host_scribble, NULL); in pqi_prep_for_scsi_done()
5953 if (!ctrl_info->enable_stream_detection) in pqi_is_parity_write_stream()
5964 device = scmd->device->hostdata; in pqi_is_parity_write_stream()
5967 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) in pqi_is_parity_write_stream()
5972 * requests down non-AIO path. in pqi_is_parity_write_stream()
5974 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || in pqi_is_parity_write_stream()
5975 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) in pqi_is_parity_write_stream()
5981 pqi_stream_data = &device->stream_data[i]; in pqi_is_parity_write_stream()
5986 if ((pqi_stream_data->next_lba && in pqi_is_parity_write_stream()
5987 rmd.first_block >= pqi_stream_data->next_lba) && in pqi_is_parity_write_stream()
5988 rmd.first_block <= pqi_stream_data->next_lba + in pqi_is_parity_write_stream()
5990 pqi_stream_data->next_lba = rmd.first_block + in pqi_is_parity_write_stream()
5992 pqi_stream_data->last_accessed = jiffies; in pqi_is_parity_write_stream()
5993 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++; in pqi_is_parity_write_stream()
5998 if (pqi_stream_data->last_accessed == 0) { in pqi_is_parity_write_stream()
6004 if (pqi_stream_data->last_accessed <= oldest_jiffies) { in pqi_is_parity_write_stream()
6005 oldest_jiffies = pqi_stream_data->last_accessed; in pqi_is_parity_write_stream()
6011 pqi_stream_data = &device->stream_data[lru_index]; in pqi_is_parity_write_stream()
6012 pqi_stream_data->last_accessed = jiffies; in pqi_is_parity_write_stream()
6013 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; in pqi_is_parity_write_stream()
6028 scmd->host_scribble = PQI_NO_COMPLETION; in pqi_scsi_queue_command()
6030 device = scmd->device->hostdata; in pqi_scsi_queue_command()
6038 lun = (u8)scmd->device->lun; in pqi_scsi_queue_command()
6040 atomic_inc(&device->scsi_cmds_outstanding[lun]); in pqi_scsi_queue_command()
6059 scmd->result = 0; in pqi_scsi_queue_command()
6062 queue_group = &ctrl_info->queue_groups[hw_queue]; in pqi_scsi_queue_command()
6066 if (device->raid_bypass_enabled && in pqi_scsi_queue_command()
6072 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++; in pqi_scsi_queue_command()
6078 if (device->aio_enabled) in pqi_scsi_queue_command()
6086 scmd->host_scribble = NULL; in pqi_scsi_queue_command()
6087 atomic_dec(&device->scsi_cmds_outstanding[lun]); in pqi_scsi_queue_command()
6104 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_queued_io_count()
6105 queue_group = &ctrl_info->queue_groups[i]; in pqi_queued_io_count()
6107 spin_lock_irqsave(&queue_group->submit_lock[path], flags); in pqi_queued_io_count()
6108 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) in pqi_queued_io_count()
6110 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); in pqi_queued_io_count()
6128 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_nonempty_inbound_queue_count()
6129 queue_group = &ctrl_info->queue_groups[i]; in pqi_nonempty_inbound_queue_count()
6131 iq_pi = queue_group->iq_pi_copy[path]; in pqi_nonempty_inbound_queue_count()
6132 iq_ci = readl(queue_group->iq_ci[path]); in pqi_nonempty_inbound_queue_count()
6162 return -ENXIO; in pqi_wait_until_inbound_queues_empty()
6164 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_until_inbound_queues_empty()
6165 …"waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: … in pqi_wait_until_inbound_queues_empty()
6166 … jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); in pqi_wait_until_inbound_queues_empty()
6174 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_until_inbound_queues_empty()
6176 jiffies_to_msecs(jiffies - start_jiffies) / 1000); in pqi_wait_until_inbound_queues_empty()
6193 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_fail_io_queued_for_device()
6194 queue_group = &ctrl_info->queue_groups[i]; in pqi_fail_io_queued_for_device()
6198 &queue_group->submit_lock[path], flags); in pqi_fail_io_queued_for_device()
6201 &queue_group->request_list[path], in pqi_fail_io_queued_for_device()
6204 scmd = io_request->scmd; in pqi_fail_io_queued_for_device()
6208 scsi_device = scmd->device->hostdata; in pqi_fail_io_queued_for_device()
6210 list_del(&io_request->request_list_entry); in pqi_fail_io_queued_for_device()
6211 if (scsi_device == device && (u8)scmd->device->lun == lun) in pqi_fail_io_queued_for_device()
6221 &queue_group->submit_lock[path], flags); in pqi_fail_io_queued_for_device()
6239 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) { in pqi_device_wait_for_pending_io()
6240 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { in pqi_device_wait_for_pending_io()
6243 return -ENXIO; in pqi_device_wait_for_pending_io()
6245 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); in pqi_device_wait_for_pending_io()
6247 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
6249 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
6251 return -ETIMEDOUT; in pqi_device_wait_for_pending_io()
6254 dev_warn(&ctrl_info->pci_dev->dev, in pqi_device_wait_for_pending_io()
6256 ctrl_info->scsi_host->host_no, device->bus, device->target, in pqi_device_wait_for_pending_io()
6294 rc = -ENXIO; in pqi_wait_for_lun_reset_completion()
6299 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]); in pqi_wait_for_lun_reset_completion()
6300 dev_warn(&ctrl_info->pci_dev->dev, in pqi_wait_for_lun_reset_completion()
6302 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); in pqi_wait_for_lun_reset_completion()
6318 io_request->io_complete_callback = pqi_lun_reset_complete; in pqi_lun_reset()
6319 io_request->context = &wait; in pqi_lun_reset()
6321 request = io_request->iu; in pqi_lun_reset()
6324 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; in pqi_lun_reset()
6325 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, in pqi_lun_reset()
6326 &request->header.iu_length); in pqi_lun_reset()
6327 put_unaligned_le16(io_request->index, &request->request_id); in pqi_lun_reset()
6328 memcpy(request->lun_number, device->scsi3addr, in pqi_lun_reset()
6329 sizeof(request->lun_number)); in pqi_lun_reset()
6330 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) in pqi_lun_reset()
6331 request->ml_device_lun_number = lun; in pqi_lun_reset()
6332 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; in pqi_lun_reset()
6333 if (ctrl_info->tmf_iu_timeout_supported) in pqi_lun_reset()
6334 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); in pqi_lun_reset()
6336 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, in pqi_lun_reset()
6341 rc = io_request->status; in pqi_lun_reset()
6362 …if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIE… in pqi_lun_reset_with_retries()
6400 mutex_lock(&ctrl_info->lun_reset_mutex); in pqi_device_reset_handler()
6402 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_reset_handler()
6404 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); in pqi_device_reset_handler()
6412 dev_err(&ctrl_info->pci_dev->dev, in pqi_device_reset_handler()
6414 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, in pqi_device_reset_handler()
6417 mutex_unlock(&ctrl_info->lun_reset_mutex); in pqi_device_reset_handler()
6429 shost = scmd->device->host; in pqi_eh_device_reset_handler()
6431 device = scmd->device->hostdata; in pqi_eh_device_reset_handler()
6432 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; in pqi_eh_device_reset_handler()
6434 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); in pqi_eh_device_reset_handler()
6443 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); in pqi_tmf_worker()
6445 …pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scs… in pqi_tmf_worker()
6456 shost = scmd->device->host; in pqi_eh_abort_handler()
6458 device = scmd->device->hostdata; in pqi_eh_abort_handler()
6460 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6462 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); in pqi_eh_abort_handler()
6464 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { in pqi_eh_abort_handler()
6465 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6467 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); in pqi_eh_abort_handler()
6468 scmd->result = DID_RESET << 16; in pqi_eh_abort_handler()
6472 tmf_work = &device->tmf_work[scmd->device->lun]; in pqi_eh_abort_handler()
6474 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { in pqi_eh_abort_handler()
6475 tmf_work->ctrl_info = ctrl_info; in pqi_eh_abort_handler()
6476 tmf_work->device = device; in pqi_eh_abort_handler()
6477 tmf_work->lun = (u8)scmd->device->lun; in pqi_eh_abort_handler()
6478 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; in pqi_eh_abort_handler()
6479 schedule_work(&tmf_work->work_struct); in pqi_eh_abort_handler()
6484 dev_err(&ctrl_info->pci_dev->dev, in pqi_eh_abort_handler()
6486 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); in pqi_eh_abort_handler()
6501 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_alloc()
6503 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6510 if (device->target_lun_valid) { in pqi_slave_alloc()
6511 device->ignore_device = true; in pqi_slave_alloc()
6513 device->target = sdev_id(sdev); in pqi_slave_alloc()
6514 device->lun = sdev->lun; in pqi_slave_alloc()
6515 device->target_lun_valid = true; in pqi_slave_alloc()
6520 sdev_id(sdev), sdev->lun); in pqi_slave_alloc()
6524 sdev->hostdata = device; in pqi_slave_alloc()
6525 device->sdev = sdev; in pqi_slave_alloc()
6526 if (device->queue_depth) { in pqi_slave_alloc()
6527 device->advertised_queue_depth = device->queue_depth; in pqi_slave_alloc()
6529 device->advertised_queue_depth); in pqi_slave_alloc()
6534 sdev->allow_restart = 1; in pqi_slave_alloc()
6535 if (device->device_type == SA_DEVICE_TYPE_NVME) in pqi_slave_alloc()
6540 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_alloc()
6549 if (!ctrl_info->disable_managed_interrupts) in pqi_map_queues()
6550 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], in pqi_map_queues()
6551 ctrl_info->pci_dev, 0); in pqi_map_queues()
6553 return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); in pqi_map_queues()
6558 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; in pqi_is_tape_changer_device()
6566 device = sdev->hostdata; in pqi_slave_configure()
6567 device->devtype = sdev->type; in pqi_slave_configure()
6569 if (pqi_is_tape_changer_device(device) && device->ignore_device) { in pqi_slave_configure()
6570 rc = -ENXIO; in pqi_slave_configure()
6571 device->ignore_device = false; in pqi_slave_configure()
6584 ctrl_info = shost_to_hba(sdev->host); in pqi_slave_destroy()
6586 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6590 device = sdev->hostdata; in pqi_slave_destroy()
6592 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6596 device->lun_count--; in pqi_slave_destroy()
6597 if (device->lun_count > 0) { in pqi_slave_destroy()
6598 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6602 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6603 list_del(&device->scsi_device_list_entry); in pqi_slave_destroy()
6604 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_slave_destroy()
6606 mutex_unlock(&ctrl_info->scan_mutex); in pqi_slave_destroy()
6620 return -EINVAL; in pqi_getpciinfo_ioctl()
6622 pci_dev = ctrl_info->pci_dev; in pqi_getpciinfo_ioctl()
6624 pci_info.domain = pci_domain_nr(pci_dev->bus); in pqi_getpciinfo_ioctl()
6625 pci_info.bus = pci_dev->bus->number; in pqi_getpciinfo_ioctl()
6626 pci_info.dev_fn = pci_dev->devfn; in pqi_getpciinfo_ioctl()
6627 subsystem_vendor = pci_dev->subsystem_vendor; in pqi_getpciinfo_ioctl()
6628 subsystem_device = pci_dev->subsystem_device; in pqi_getpciinfo_ioctl()
6632 return -EFAULT; in pqi_getpciinfo_ioctl()
6642 return -EINVAL; in pqi_getdrivver_ioctl()
6648 return -EFAULT; in pqi_getdrivver_ioctl()
6665 switch (pqi_error_info->data_out_result) { in pqi_error_info_to_ciss()
6709 get_unaligned_le16(&pqi_error_info->sense_data_length); in pqi_error_info_to_ciss()
6712 get_unaligned_le16(&pqi_error_info->response_data_length); in pqi_error_info_to_ciss()
6714 if (sense_data_length > sizeof(pqi_error_info->data)) in pqi_error_info_to_ciss()
6715 sense_data_length = sizeof(pqi_error_info->data); in pqi_error_info_to_ciss()
6717 ciss_error_info->scsi_status = pqi_error_info->status; in pqi_error_info_to_ciss()
6718 ciss_error_info->command_status = ciss_cmd_status; in pqi_error_info_to_ciss()
6719 ciss_error_info->sense_data_length = sense_data_length; in pqi_error_info_to_ciss()
6734 return -ENXIO; in pqi_passthru_ioctl()
6736 return -EBUSY; in pqi_passthru_ioctl()
6738 return -EINVAL; in pqi_passthru_ioctl()
6740 return -EPERM; in pqi_passthru_ioctl()
6742 return -EFAULT; in pqi_passthru_ioctl()
6745 return -EINVAL; in pqi_passthru_ioctl()
6747 return -EINVAL; in pqi_passthru_ioctl()
6749 return -EINVAL; in pqi_passthru_ioctl()
6758 return -EINVAL; in pqi_passthru_ioctl()
6764 return -ENOMEM; in pqi_passthru_ioctl()
6768 rc = -EFAULT; in pqi_passthru_ioctl()
6779 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - in pqi_passthru_ioctl()
6806 rc = pqi_map_single(ctrl_info->pci_dev, in pqi_passthru_ioctl()
6817 if (ctrl_info->raid_iu_timeout_supported) in pqi_passthru_ioctl()
6824 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, in pqi_passthru_ioctl()
6847 rc = -EFAULT; in pqi_passthru_ioctl()
6855 rc = -EFAULT; in pqi_passthru_ioctl()
6871 ctrl_info = shost_to_hba(sdev->host); in pqi_ioctl()
6889 rc = -EINVAL; in pqi_ioctl()
6905 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); in pqi_firmware_version_show()
6917 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); in pqi_serial_number_show()
6929 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); in pqi_model_show()
6941 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); in pqi_vendor_show()
6962 count += scnprintf(buffer + count, PAGE_SIZE - count, in pqi_lockup_action_show()
6965 count += scnprintf(buffer + count, PAGE_SIZE - count, in pqi_lockup_action_show()
6969 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); in pqi_lockup_action_show()
6991 return -EINVAL; in pqi_lockup_action_store()
7001 ctrl_info->enable_stream_detection); in pqi_host_enable_stream_detection_show()
7012 return -EINVAL; in pqi_host_enable_stream_detection_store()
7017 ctrl_info->enable_stream_detection = set_stream_detection; in pqi_host_enable_stream_detection_store()
7028 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); in pqi_host_enable_r5_writes_show()
7039 return -EINVAL; in pqi_host_enable_r5_writes_store()
7044 ctrl_info->enable_r5_writes = set_r5_writes; in pqi_host_enable_r5_writes_store()
7055 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); in pqi_host_enable_r6_writes_show()
7066 return -EINVAL; in pqi_host_enable_r6_writes_store()
7071 ctrl_info->enable_r6_writes = set_r6_writes; in pqi_host_enable_r6_writes_store()
7119 ctrl_info = shost_to_hba(sdev->host); in pqi_unique_id_show()
7122 return -ENODEV; in pqi_unique_id_show()
7124 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7126 device = sdev->hostdata; in pqi_unique_id_show()
7128 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7129 return -ENODEV; in pqi_unique_id_show()
7132 if (device->is_physical_device) in pqi_unique_id_show()
7133 memcpy(unique_id, device->wwid, sizeof(device->wwid)); in pqi_unique_id_show()
7135 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); in pqi_unique_id_show()
7137 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_unique_id_show()
7158 ctrl_info = shost_to_hba(sdev->host); in pqi_lunid_show()
7161 return -ENODEV; in pqi_lunid_show()
7163 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7165 device = sdev->hostdata; in pqi_lunid_show()
7167 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7168 return -ENODEV; in pqi_lunid_show()
7171 memcpy(lunid, device->scsi3addr, sizeof(lunid)); in pqi_lunid_show()
7173 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_lunid_show()
7196 ctrl_info = shost_to_hba(sdev->host); in pqi_path_info_show()
7199 return -ENODEV; in pqi_path_info_show()
7201 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7203 device = sdev->hostdata; in pqi_path_info_show()
7205 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7206 return -ENODEV; in pqi_path_info_show()
7209 bay = device->bay; in pqi_path_info_show()
7212 if (i == device->active_path_index) in pqi_path_info_show()
7214 else if (device->path_map & path_map_index) in pqi_path_info_show()
7220 PAGE_SIZE - output_len, in pqi_path_info_show()
7222 ctrl_info->scsi_host->host_no, in pqi_path_info_show()
7223 device->bus, device->target, in pqi_path_info_show()
7224 device->lun, in pqi_path_info_show()
7225 scsi_device_type(device->devtype)); in pqi_path_info_show()
7227 if (device->devtype == TYPE_RAID || in pqi_path_info_show()
7231 memcpy(&phys_connector, &device->phys_connector[i], in pqi_path_info_show()
7239 PAGE_SIZE - output_len, in pqi_path_info_show()
7242 box = device->box[i]; in pqi_path_info_show()
7245 PAGE_SIZE - output_len, in pqi_path_info_show()
7248 if ((device->devtype == TYPE_DISK || in pqi_path_info_show()
7249 device->devtype == TYPE_ZBC) && in pqi_path_info_show()
7252 PAGE_SIZE - output_len, in pqi_path_info_show()
7257 PAGE_SIZE - output_len, in pqi_path_info_show()
7261 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_path_info_show()
7276 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_address_show()
7279 return -ENODEV; in pqi_sas_address_show()
7281 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7283 device = sdev->hostdata; in pqi_sas_address_show()
7285 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7286 return -ENODEV; in pqi_sas_address_show()
7289 sas_address = device->sas_address; in pqi_sas_address_show()
7291 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_address_show()
7305 ctrl_info = shost_to_hba(sdev->host); in pqi_ssd_smart_path_enabled_show()
7308 return -ENODEV; in pqi_ssd_smart_path_enabled_show()
7310 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7312 device = sdev->hostdata; in pqi_ssd_smart_path_enabled_show()
7314 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7315 return -ENODEV; in pqi_ssd_smart_path_enabled_show()
7318 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; in pqi_ssd_smart_path_enabled_show()
7322 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_ssd_smart_path_enabled_show()
7337 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_level_show()
7340 return -ENODEV; in pqi_raid_level_show()
7342 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7344 device = sdev->hostdata; in pqi_raid_level_show()
7346 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7347 return -ENODEV; in pqi_raid_level_show()
7350 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) in pqi_raid_level_show()
7351 raid_level = pqi_raid_level_to_string(device->raid_level); in pqi_raid_level_show()
7355 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_level_show()
7371 ctrl_info = shost_to_hba(sdev->host); in pqi_raid_bypass_cnt_show()
7374 return -ENODEV; in pqi_raid_bypass_cnt_show()
7376 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7378 device = sdev->hostdata; in pqi_raid_bypass_cnt_show()
7380 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7381 return -ENODEV; in pqi_raid_bypass_cnt_show()
7386 if (device->raid_io_stats) { in pqi_raid_bypass_cnt_show()
7388 raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt; in pqi_raid_bypass_cnt_show()
7392 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_raid_bypass_cnt_show()
7407 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_ncq_prio_enable_show()
7410 return -ENODEV; in pqi_sas_ncq_prio_enable_show()
7412 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7414 device = sdev->hostdata; in pqi_sas_ncq_prio_enable_show()
7416 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7417 return -ENODEV; in pqi_sas_ncq_prio_enable_show()
7421 device->ncq_prio_enable); in pqi_sas_ncq_prio_enable_show()
7422 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_show()
7438 return -EINVAL; in pqi_sas_ncq_prio_enable_store()
7441 ctrl_info = shost_to_hba(sdev->host); in pqi_sas_ncq_prio_enable_store()
7443 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7445 device = sdev->hostdata; in pqi_sas_ncq_prio_enable_store()
7448 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7449 return -ENODEV; in pqi_sas_ncq_prio_enable_store()
7452 if (!device->ncq_prio_support) { in pqi_sas_ncq_prio_enable_store()
7453 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7454 return -EINVAL; in pqi_sas_ncq_prio_enable_store()
7457 device->ncq_prio_enable = ncq_prio_enable; in pqi_sas_ncq_prio_enable_store()
7459 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_sas_ncq_prio_enable_store()
7471 ctrl_info = shost_to_hba(sdev->host); in pqi_numa_node_show()
7473 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); in pqi_numa_node_show()
7487 ctrl_info = shost_to_hba(sdev->host); in pqi_write_stream_cnt_show()
7490 return -ENODEV; in pqi_write_stream_cnt_show()
7492 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); in pqi_write_stream_cnt_show()
7494 device = sdev->hostdata; in pqi_write_stream_cnt_show()
7496 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_write_stream_cnt_show()
7497 return -ENODEV; in pqi_write_stream_cnt_show()
7502 if (device->raid_io_stats) { in pqi_write_stream_cnt_show()
7504 write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; in pqi_write_stream_cnt_show()
7508 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); in pqi_write_stream_cnt_show()
7548 .this_id = -1,
7568 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); in pqi_register_scsi()
7569 return -ENOMEM; in pqi_register_scsi()
7572 shost->io_port = 0; in pqi_register_scsi()
7573 shost->n_io_port = 0; in pqi_register_scsi()
7574 shost->this_id = -1; in pqi_register_scsi()
7575 shost->max_channel = PQI_MAX_BUS; in pqi_register_scsi()
7576 shost->max_cmd_len = MAX_COMMAND_SIZE; in pqi_register_scsi()
7577 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; in pqi_register_scsi()
7578 shost->max_id = ~0; in pqi_register_scsi()
7579 shost->max_sectors = ctrl_info->max_sectors; in pqi_register_scsi()
7580 shost->can_queue = ctrl_info->scsi_ml_can_queue; in pqi_register_scsi()
7581 shost->cmd_per_lun = shost->can_queue; in pqi_register_scsi()
7582 shost->sg_tablesize = ctrl_info->sg_tablesize; in pqi_register_scsi()
7583 shost->transportt = pqi_sas_transport_template; in pqi_register_scsi()
7584 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); in pqi_register_scsi()
7585 shost->unique_id = shost->irq; in pqi_register_scsi()
7586 shost->nr_hw_queues = ctrl_info->num_queue_groups; in pqi_register_scsi()
7587 shost->host_tagset = 1; in pqi_register_scsi()
7588 shost->hostdata[0] = (unsigned long)ctrl_info; in pqi_register_scsi()
7590 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); in pqi_register_scsi()
7592 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); in pqi_register_scsi()
7598 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); in pqi_register_scsi()
7602 ctrl_info->scsi_host = shost; in pqi_register_scsi()
7620 shost = ctrl_info->scsi_host; in pqi_unregister_scsi()
7636 pqi_registers = ctrl_info->pqi_registers; in pqi_wait_for_pqi_reset_completion()
7637 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; in pqi_wait_for_pqi_reset_completion()
7642 reset_reg.all_bits = readl(&pqi_registers->device_reset); in pqi_wait_for_pqi_reset_completion()
7646 rc = -ENXIO; in pqi_wait_for_pqi_reset_completion()
7650 rc = -ETIMEDOUT; in pqi_wait_for_pqi_reset_completion()
7663 if (ctrl_info->pqi_reset_quiesce_supported) { in pqi_reset()
7666 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7676 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); in pqi_reset()
7680 dev_err(&ctrl_info->pci_dev->dev, in pqi_reset()
7693 return -ENOMEM; in pqi_get_ctrl_serial_number()
7699 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, in pqi_get_ctrl_serial_number()
7700 sizeof(sense_info->ctrl_serial_number)); in pqi_get_ctrl_serial_number()
7701 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; in pqi_get_ctrl_serial_number()
7716 return -ENOMEM; in pqi_get_ctrl_product_details()
7722 if (get_unaligned_le32(&identify->extra_controller_flags) & in pqi_get_ctrl_product_details()
7724 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7725 identify->firmware_version_long, in pqi_get_ctrl_product_details()
7726 sizeof(identify->firmware_version_long)); in pqi_get_ctrl_product_details()
7728 memcpy(ctrl_info->firmware_version, in pqi_get_ctrl_product_details()
7729 identify->firmware_version_short, in pqi_get_ctrl_product_details()
7730 sizeof(identify->firmware_version_short)); in pqi_get_ctrl_product_details()
7731 ctrl_info->firmware_version in pqi_get_ctrl_product_details()
7732 [sizeof(identify->firmware_version_short)] = '\0'; in pqi_get_ctrl_product_details()
7733 snprintf(ctrl_info->firmware_version + in pqi_get_ctrl_product_details()
7734 strlen(ctrl_info->firmware_version), in pqi_get_ctrl_product_details()
7735 sizeof(ctrl_info->firmware_version) - in pqi_get_ctrl_product_details()
7736 sizeof(identify->firmware_version_short), in pqi_get_ctrl_product_details()
7737 "-%u", in pqi_get_ctrl_product_details()
7738 get_unaligned_le16(&identify->firmware_build_number)); in pqi_get_ctrl_product_details()
7741 memcpy(ctrl_info->model, identify->product_id, in pqi_get_ctrl_product_details()
7742 sizeof(identify->product_id)); in pqi_get_ctrl_product_details()
7743 ctrl_info->model[sizeof(identify->product_id)] = '\0'; in pqi_get_ctrl_product_details()
7745 memcpy(ctrl_info->vendor, identify->vendor_id, in pqi_get_ctrl_product_details()
7746 sizeof(identify->vendor_id)); in pqi_get_ctrl_product_details()
7747 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; in pqi_get_ctrl_product_details()
7749 dev_info(&ctrl_info->pci_dev->dev, in pqi_get_ctrl_product_details()
7750 "Firmware version: %s\n", ctrl_info->firmware_version); in pqi_get_ctrl_product_details()
7773 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) in pqi_is_firmware_feature_supported()
7776 return firmware_features->features_supported[byte_index] & in pqi_is_firmware_feature_supported()
7789 (le16_to_cpu(firmware_features->num_elements) * 2); in pqi_is_firmware_feature_enabled()
7806 le16_to_cpu(firmware_features->num_elements); in pqi_request_firmware_feature()
7808 firmware_features->features_supported[byte_index] |= in pqi_request_firmware_feature()
7820 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, in pqi_config_table_update()
7840 features_requested = firmware_features->features_supported + in pqi_enable_firmware_features()
7841 le16_to_cpu(firmware_features->num_elements); in pqi_enable_firmware_features()
7844 (features_requested - (void *)firmware_features); in pqi_enable_firmware_features()
7847 le16_to_cpu(firmware_features->num_elements)); in pqi_enable_firmware_features()
7853 (le16_to_cpu(firmware_features->num_elements) * 2) + in pqi_enable_firmware_features()
7876 if (!firmware_feature->supported) { in pqi_firmware_feature_status()
7877 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", in pqi_firmware_feature_status()
7878 firmware_feature->feature_name); in pqi_firmware_feature_status()
7882 if (firmware_feature->enabled) { in pqi_firmware_feature_status()
7883 dev_info(&ctrl_info->pci_dev->dev, in pqi_firmware_feature_status()
7884 "%s enabled\n", firmware_feature->feature_name); in pqi_firmware_feature_status()
7888 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", in pqi_firmware_feature_status()
7889 firmware_feature->feature_name); in pqi_firmware_feature_status()
7895 switch (firmware_feature->feature_bit) { in pqi_ctrl_update_feature_flags()
7897 ctrl_info->enable_r1_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7900 ctrl_info->enable_r5_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7903 ctrl_info->enable_r6_writes = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7906 ctrl_info->soft_reset_handshake_supported = in pqi_ctrl_update_feature_flags()
7907 firmware_feature->enabled && in pqi_ctrl_update_feature_flags()
7911 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7914 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7917 ctrl_info->firmware_triage_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7918 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); in pqi_ctrl_update_feature_flags()
7921 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7924 ctrl_info->multi_lun_device_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7927 ctrl_info->ctrl_logging_supported = firmware_feature->enabled; in pqi_ctrl_update_feature_flags()
7937 if (firmware_feature->feature_status) in pqi_firmware_feature_update()
7938 firmware_feature->feature_status(ctrl_info, firmware_feature); in pqi_firmware_feature_update()
8030 .feature_name = "Multi-LUN Target",
8051 ctrl_info = section_info->ctrl_info; in pqi_process_firmware_features()
8052 firmware_features = section_info->section; in pqi_process_firmware_features()
8053 firmware_features_iomem_addr = section_info->section_iomem_addr; in pqi_process_firmware_features()
8080 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_firmware_features()
8130 ctrl_info->heartbeat_counter = NULL; in pqi_ctrl_reset_config()
8131 ctrl_info->soft_reset_status = NULL; in pqi_ctrl_reset_config()
8132 ctrl_info->soft_reset_handshake_supported = false; in pqi_ctrl_reset_config()
8133 ctrl_info->enable_r1_writes = false; in pqi_ctrl_reset_config()
8134 ctrl_info->enable_r5_writes = false; in pqi_ctrl_reset_config()
8135 ctrl_info->enable_r6_writes = false; in pqi_ctrl_reset_config()
8136 ctrl_info->raid_iu_timeout_supported = false; in pqi_ctrl_reset_config()
8137 ctrl_info->tmf_iu_timeout_supported = false; in pqi_ctrl_reset_config()
8138 ctrl_info->firmware_triage_supported = false; in pqi_ctrl_reset_config()
8139 ctrl_info->rpl_extended_format_4_5_supported = false; in pqi_ctrl_reset_config()
8140 ctrl_info->multi_lun_device_supported = false; in pqi_ctrl_reset_config()
8141 ctrl_info->ctrl_logging_supported = false; in pqi_ctrl_reset_config()
8155 table_length = ctrl_info->config_table_length; in pqi_process_config_table()
8161 dev_err(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
8163 return -ENOMEM; in pqi_process_config_table()
8170 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; in pqi_process_config_table()
8175 section_offset = get_unaligned_le32(&config_table->first_section_offset); in pqi_process_config_table()
8184 switch (get_unaligned_le16(§ion->section_id)) { in pqi_process_config_table()
8191 dev_warn(&ctrl_info->pci_dev->dev, in pqi_process_config_table()
8194 ctrl_info->heartbeat_counter = in pqi_process_config_table()
8201 ctrl_info->soft_reset_status = in pqi_process_config_table()
8209 section_offset = get_unaligned_le16(§ion->next_section_offset); in pqi_process_config_table()
8237 dev_err(&ctrl_info->pci_dev->dev, in pqi_revert_to_sis_mode()
8238 "re-enabling SIS mode failed with error %d\n", rc); in pqi_revert_to_sis_mode()
8254 return -ENXIO; in pqi_force_sis_mode()
8317 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8331 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8338 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8344 ctrl_info->product_id = (u8)product_id; in pqi_ctrl_init()
8345 ctrl_info->product_revision = (u8)(product_id >> 8); in pqi_ctrl_init()
8348 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
8350 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
8353 if (ctrl_info->max_outstanding_requests > in pqi_ctrl_init()
8355 ctrl_info->max_outstanding_requests = in pqi_ctrl_init()
8363 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8375 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8380 /* Wait for the controller to complete the SIS -> PQI transition. */ in pqi_ctrl_init()
8383 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8389 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init()
8394 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8401 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8408 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8423 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { in pqi_ctrl_init()
8424 ctrl_info->max_msix_vectors = in pqi_ctrl_init()
8425 ctrl_info->num_msix_vectors_enabled; in pqi_ctrl_init()
8435 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8452 ctrl_info->controller_online = true; in pqi_ctrl_init()
8460 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init()
8463 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8467 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init()
8473 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8483 if (ctrl_info->ctrl_logging_supported && !reset_devices) { in pqi_ctrl_init()
8484 …pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LO… in pqi_ctrl_init()
8485 …pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_… in pqi_ctrl_init()
8490 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8497 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8504 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8505 "error enabling multi-lun rescan\n"); in pqi_ctrl_init()
8511 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init()
8529 admin_queues = &ctrl_info->admin_queues; in pqi_reinit_queues()
8530 admin_queues->iq_pi_copy = 0; in pqi_reinit_queues()
8531 admin_queues->oq_ci_copy = 0; in pqi_reinit_queues()
8532 writel(0, admin_queues->oq_pi); in pqi_reinit_queues()
8534 for (i = 0; i < ctrl_info->num_queue_groups; i++) { in pqi_reinit_queues()
8535 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; in pqi_reinit_queues()
8536 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; in pqi_reinit_queues()
8537 ctrl_info->queue_groups[i].oq_ci_copy = 0; in pqi_reinit_queues()
8539 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); in pqi_reinit_queues()
8540 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); in pqi_reinit_queues()
8541 writel(0, ctrl_info->queue_groups[i].oq_pi); in pqi_reinit_queues()
8544 event_queue = &ctrl_info->event_queue; in pqi_reinit_queues()
8545 writel(0, event_queue->oq_pi); in pqi_reinit_queues()
8546 event_queue->oq_ci_copy = 0; in pqi_reinit_queues()
8571 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8578 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8590 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8595 /* Wait for the controller to complete the SIS -> PQI transition. */ in pqi_ctrl_init_resume()
8598 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8604 ctrl_info->pqi_mode_enabled = true; in pqi_ctrl_init_resume()
8611 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8622 ctrl_info->controller_online = true; in pqi_ctrl_init_resume()
8633 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { in pqi_ctrl_init_resume()
8636 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8640 ctrl_info->ciss_report_log_flags |= in pqi_ctrl_init_resume()
8646 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8653 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8660 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8661 "error enabling multi-lun rescan\n"); in pqi_ctrl_init_resume()
8667 dev_err(&ctrl_info->pci_dev->dev, in pqi_ctrl_init_resume()
8674 if (ctrl_info->ctrl_logging_supported) { in pqi_ctrl_init_resume()
8675 if (!ctrl_info->ctrl_log_memory.host_memory) in pqi_ctrl_init_resume()
8677 &ctrl_info->ctrl_log_memory, in pqi_ctrl_init_resume()
8681 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); in pqi_ctrl_init_resume()
8683 if (ctrl_info->ctrl_log_memory.host_memory) in pqi_ctrl_init_resume()
8685 &ctrl_info->ctrl_log_memory); in pqi_ctrl_init_resume()
8709 rc = pci_enable_device(ctrl_info->pci_dev); in pqi_pci_init()
8711 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8721 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); in pqi_pci_init()
8723 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); in pqi_pci_init()
8727 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); in pqi_pci_init()
8729 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8734 ctrl_info->iomem_base = ioremap(pci_resource_start( in pqi_pci_init()
8735 ctrl_info->pci_dev, 0), in pqi_pci_init()
8736 pci_resource_len(ctrl_info->pci_dev, 0)); in pqi_pci_init()
8737 if (!ctrl_info->iomem_base) { in pqi_pci_init()
8738 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8740 rc = -ENOMEM; in pqi_pci_init()
8747 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, in pqi_pci_init()
8750 dev_err(&ctrl_info->pci_dev->dev, in pqi_pci_init()
8756 pci_set_master(ctrl_info->pci_dev); in pqi_pci_init()
8758 ctrl_info->registers = ctrl_info->iomem_base; in pqi_pci_init()
8759 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; in pqi_pci_init()
8761 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); in pqi_pci_init()
8766 pci_release_regions(ctrl_info->pci_dev); in pqi_pci_init()
8768 pci_disable_device(ctrl_info->pci_dev); in pqi_pci_init()
8775 iounmap(ctrl_info->iomem_base); in pqi_cleanup_pci_init()
8776 pci_release_regions(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8777 if (pci_is_enabled(ctrl_info->pci_dev)) in pqi_cleanup_pci_init()
8778 pci_disable_device(ctrl_info->pci_dev); in pqi_cleanup_pci_init()
8779 pci_set_drvdata(ctrl_info->pci_dev, NULL); in pqi_cleanup_pci_init()
8791 mutex_init(&ctrl_info->scan_mutex); in pqi_alloc_ctrl_info()
8792 mutex_init(&ctrl_info->lun_reset_mutex); in pqi_alloc_ctrl_info()
8793 mutex_init(&ctrl_info->ofa_mutex); in pqi_alloc_ctrl_info()
8795 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); in pqi_alloc_ctrl_info()
8796 spin_lock_init(&ctrl_info->scsi_device_list_lock); in pqi_alloc_ctrl_info()
8798 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); in pqi_alloc_ctrl_info()
8799 atomic_set(&ctrl_info->num_interrupts, 0); in pqi_alloc_ctrl_info()
8801 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); in pqi_alloc_ctrl_info()
8802 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); in pqi_alloc_ctrl_info()
8804 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); in pqi_alloc_ctrl_info()
8805 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); in pqi_alloc_ctrl_info()
8807 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); in pqi_alloc_ctrl_info()
8808 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); in pqi_alloc_ctrl_info()
8810 sema_init(&ctrl_info->sync_request_sem, in pqi_alloc_ctrl_info()
8812 init_waitqueue_head(&ctrl_info->block_requests_wait); in pqi_alloc_ctrl_info()
8814 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; in pqi_alloc_ctrl_info()
8815 ctrl_info->irq_mode = IRQ_MODE_NONE; in pqi_alloc_ctrl_info()
8816 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; in pqi_alloc_ctrl_info()
8818 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; in pqi_alloc_ctrl_info()
8819 ctrl_info->max_transfer_encrypted_sas_sata = in pqi_alloc_ctrl_info()
8821 ctrl_info->max_transfer_encrypted_nvme = in pqi_alloc_ctrl_info()
8823 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; in pqi_alloc_ctrl_info()
8824 ctrl_info->max_write_raid_1_10_2drive = ~0; in pqi_alloc_ctrl_info()
8825 ctrl_info->max_write_raid_1_10_3drive = ~0; in pqi_alloc_ctrl_info()
8826 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; in pqi_alloc_ctrl_info()
8845 if (ctrl_info->queue_memory_base) in pqi_free_ctrl_resources()
8846 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8847 ctrl_info->queue_memory_length, in pqi_free_ctrl_resources()
8848 ctrl_info->queue_memory_base, in pqi_free_ctrl_resources()
8849 ctrl_info->queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8850 if (ctrl_info->admin_queue_memory_base) in pqi_free_ctrl_resources()
8851 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8852 ctrl_info->admin_queue_memory_length, in pqi_free_ctrl_resources()
8853 ctrl_info->admin_queue_memory_base, in pqi_free_ctrl_resources()
8854 ctrl_info->admin_queue_memory_base_dma_handle); in pqi_free_ctrl_resources()
8856 if (ctrl_info->error_buffer) in pqi_free_ctrl_resources()
8857 dma_free_coherent(&ctrl_info->pci_dev->dev, in pqi_free_ctrl_resources()
8858 ctrl_info->error_buffer_length, in pqi_free_ctrl_resources()
8859 ctrl_info->error_buffer, in pqi_free_ctrl_resources()
8860 ctrl_info->error_buffer_dma_handle); in pqi_free_ctrl_resources()
8861 if (ctrl_info->iomem_base) in pqi_free_ctrl_resources()
8868 ctrl_info->controller_online = false; in pqi_remove_ctrl()
8873 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { in pqi_remove_ctrl()
8875 ctrl_info->pqi_mode_enabled = false; in pqi_remove_ctrl()
8877 pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory); in pqi_remove_ctrl()
8879 if (ctrl_info->pqi_mode_enabled) in pqi_remove_ctrl()
8925 host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); in pqi_host_alloc_mem()
8926 if (!host_memory_descriptor->host_chunk_virt_address) in pqi_host_alloc_mem()
8929 dev = &ctrl_info->pci_dev->dev; in pqi_host_alloc_mem()
8930 host_memory = host_memory_descriptor->host_memory; in pqi_host_alloc_mem()
8933 …host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_hand… in pqi_host_alloc_mem()
8934 if (!host_memory_descriptor->host_chunk_virt_address[i]) in pqi_host_alloc_mem()
8936 mem_descriptor = &host_memory->sg_descriptor[i]; in pqi_host_alloc_mem()
8937 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); in pqi_host_alloc_mem()
8938 put_unaligned_le32(chunk_size, &mem_descriptor->length); in pqi_host_alloc_mem()
8941 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); in pqi_host_alloc_mem()
8942 put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); in pqi_host_alloc_mem()
8943 put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated); in pqi_host_alloc_mem()
8948 while (--i >= 0) { in pqi_host_alloc_mem()
8949 mem_descriptor = &host_memory->sg_descriptor[i]; in pqi_host_alloc_mem()
8951 host_memory_descriptor->host_chunk_virt_address[i], in pqi_host_alloc_mem()
8952 get_unaligned_le64(&mem_descriptor->address)); in pqi_host_alloc_mem()
8954 kfree(host_memory_descriptor->host_chunk_virt_address); in pqi_host_alloc_mem()
8956 return -ENOMEM; in pqi_host_alloc_mem()
8987 return -ENOMEM; in pqi_host_alloc_buffer()
8997 dev = &ctrl_info->pci_dev->dev; in pqi_host_setup_buffer()
9000 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); in pqi_host_setup_buffer()
9004 host_memory_descriptor->host_memory = host_memory; in pqi_host_setup_buffer()
9010 host_memory_descriptor->host_memory_dma_handle); in pqi_host_setup_buffer()
9011 host_memory_descriptor->host_memory = NULL; in pqi_host_setup_buffer()
9025 host_memory = host_memory_descriptor->host_memory; in pqi_host_free_buffer()
9029 dev = &ctrl_info->pci_dev->dev; in pqi_host_free_buffer()
9031 if (get_unaligned_le32(&host_memory->bytes_allocated) == 0) in pqi_host_free_buffer()
9034 mem_descriptor = host_memory->sg_descriptor; in pqi_host_free_buffer()
9035 num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors); in pqi_host_free_buffer()
9040 host_memory_descriptor->host_chunk_virt_address[i], in pqi_host_free_buffer()
9043 kfree(host_memory_descriptor->host_chunk_virt_address); in pqi_host_free_buffer()
9047 host_memory_descriptor->host_memory_dma_handle); in pqi_host_free_buffer()
9048 host_memory_descriptor->host_memory = NULL; in pqi_host_free_buffer()
9062 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); in pqi_host_memory_update()
9065 host_memory = host_memory_descriptor->host_memory; in pqi_host_memory_update()
9068 …tof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descripto… in pqi_host_memory_update()
9069 …put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_… in pqi_host_memory_update()
9073 put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); in pqi_host_memory_update()
9074 memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); in pqi_host_memory_update()
9076 put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); in pqi_host_memory_update()
9077 memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); in pqi_host_memory_update()
9096 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_fail_all_outstanding_requests()
9097 io_request = &ctrl_info->io_request_pool[i]; in pqi_fail_all_outstanding_requests()
9098 if (atomic_read(&io_request->refcount) == 0) in pqi_fail_all_outstanding_requests()
9101 scmd = io_request->scmd; in pqi_fail_all_outstanding_requests()
9103 sdev = scmd->device; in pqi_fail_all_outstanding_requests()
9111 io_request->status = -ENXIO; in pqi_fail_all_outstanding_requests()
9112 io_request->error_info = in pqi_fail_all_outstanding_requests()
9116 io_request->io_complete_callback(io_request, in pqi_fail_all_outstanding_requests()
9117 io_request->context); in pqi_fail_all_outstanding_requests()
9190 if (!ctrl_info->controller_online) in pqi_take_ctrl_offline()
9193 ctrl_info->controller_online = false; in pqi_take_ctrl_offline()
9194 ctrl_info->pqi_mode_enabled = false; in pqi_take_ctrl_offline()
9198 pci_disable_device(ctrl_info->pci_dev); in pqi_take_ctrl_offline()
9199 dev_err(&ctrl_info->pci_dev->dev, in pqi_take_ctrl_offline()
9202 schedule_work(&ctrl_info->ctrl_offline_work); in pqi_take_ctrl_offline()
9210 if (id->driver_data) in pqi_print_ctrl_info()
9211 ctrl_description = (char *)id->driver_data; in pqi_print_ctrl_info()
9215 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); in pqi_print_ctrl_info()
9228 id->subvendor == PCI_ANY_ID && in pqi_pci_probe()
9229 id->subdevice == PCI_ANY_ID) { in pqi_pci_probe()
9230 dev_warn(&pci_dev->dev, in pqi_pci_probe()
9232 return -ENODEV; in pqi_pci_probe()
9235 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) in pqi_pci_probe()
9236 dev_warn(&pci_dev->dev, in pqi_pci_probe()
9239 node = dev_to_node(&pci_dev->dev); in pqi_pci_probe()
9244 set_dev_node(&pci_dev->dev, node); in pqi_pci_probe()
9249 dev_err(&pci_dev->dev, in pqi_pci_probe()
9251 return -ENOMEM; in pqi_pci_probe()
9253 ctrl_info->numa_node = node; in pqi_pci_probe()
9255 ctrl_info->pci_dev = pci_dev; in pqi_pci_probe()
9283 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); in pqi_pci_remove()
9285 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; in pqi_pci_remove()
9287 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; in pqi_pci_remove()
9289 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { in pqi_pci_remove()
9292 dev_err(&pci_dev->dev, in pqi_pci_remove()
9305 for (i = 0; i < ctrl_info->max_io_slots; i++) { in pqi_crash_if_pending_command()
9306 io_request = &ctrl_info->io_request_pool[i]; in pqi_crash_if_pending_command()
9307 if (atomic_read(&io_request->refcount) == 0) in pqi_crash_if_pending_command()
9309 scmd = io_request->scmd; in pqi_crash_if_pending_command()
9311 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ in pqi_crash_if_pending_command()
9323 dev_err(&pci_dev->dev, in pqi_shutdown()
9341 * Write all data in the controller's battery-backed cache to in pqi_shutdown()
9346 dev_err(&pci_dev->dev, in pqi_shutdown()
9368 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", in pqi_process_lockup_action_param()
9381 …pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - … in pqi_process_ctrl_ready_timeout_param()
9385 …dy_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %… in pqi_process_ctrl_ready_timeout_param()
9403 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) in pqi_get_flush_cache_shutdown_event()
9436 ctrl_info->controller_online = false; in pqi_suspend_or_freeze()
9437 ctrl_info->pqi_mode_enabled = false; in pqi_suspend_or_freeze()
9488 ctrl_info->controller_online = true; in pqi_thaw()
9489 ctrl_info->pqi_mode_enabled = true; in pqi_thaw()
10535 return -ENODEV; in pqi_init()
10728 data.create_operational_iq) != 64 - 11); in pqi_verify_structures()
10730 data.create_operational_oq) != 64 - 11); in pqi_verify_structures()
10732 data.delete_operational_queue) != 64 - 11); in pqi_verify_structures()