smartpqi_init.c (1a22bc4bee22b15e933ef4c51a426b6f376d336a) | smartpqi_init.c (6702d2c40f31b200d90614d1b0a841f14ba22ee0) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * driver for Microsemi PQI-based storage controllers 4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com --- 53 unchanged lines hidden (view full) --- 62 struct pqi_io_request *io_request); 63static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 64 struct pqi_iu_header *request, unsigned int flags, 65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 66static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 68 unsigned int cdb_length, struct pqi_queue_group *queue_group, 69 struct pqi_encryption_info *encryption_info, bool raid_bypass); | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * driver for Microsemi PQI-based storage controllers 4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com --- 53 unchanged lines hidden (view full) --- 62 struct pqi_io_request *io_request); 63static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 64 struct pqi_iu_header *request, unsigned int flags, 65 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 66static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 67 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 68 unsigned int cdb_length, struct pqi_queue_group *queue_group, 69 struct pqi_encryption_info *encryption_info, bool raid_bypass); |
70static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 71 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 72 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 73 struct pqi_scsi_dev_raid_map_data *rmd); |
|
70static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 71static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 72static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); 73static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 74 u32 bytes_requested); 75static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); 76static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); 77static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, --- 2154 unchanged lines hidden (view full) --- 2232 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2233 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2234} 2235 2236/* 2237 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2238 */ 2239 | 74static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 75static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 76static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); 77static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, 78 u32 bytes_requested); 79static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); 80static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); 81static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, --- 2154 unchanged lines hidden (view full) --- 2236 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2237 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2238} 2239 2240/* 2241 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2242 */ 2243 |
2240static bool pqi_aio_raid_level_supported(struct pqi_scsi_dev_raid_map_data *rmd) | 2244static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, 2245 struct pqi_scsi_dev_raid_map_data *rmd) |
2241{ 2242 bool is_supported = true; 2243 2244 switch (rmd->raid_level) { 2245 case SA_RAID_0: 2246 break; 2247 case SA_RAID_1: 2248 if (rmd->is_write) 2249 is_supported = false; 2250 break; 2251 case SA_RAID_5: | 2246{ 2247 bool is_supported = true; 2248 2249 switch (rmd->raid_level) { 2250 case SA_RAID_0: 2251 break; 2252 case SA_RAID_1: 2253 if (rmd->is_write) 2254 is_supported = false; 2255 break; 2256 case SA_RAID_5: |
2252 fallthrough; | 2257 if (rmd->is_write && !ctrl_info->enable_r5_writes) 2258 is_supported = false; 2259 break; |
2253 case SA_RAID_6: | 2260 case SA_RAID_6: |
2254 if (rmd->is_write) | 2261 if (rmd->is_write && !ctrl_info->enable_r6_writes) |
2255 is_supported = false; 2256 break; 2257 case SA_RAID_ADM: 2258 if (rmd->is_write) 2259 is_supported = false; 2260 break; 2261 default: 2262 is_supported = false; --- 258 unchanged lines hidden (view full) --- 2521 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % 2522 get_unaligned_le16(&raid_map->row_cnt); 2523 2524 rmd->map_index = (rmd->first_group * 2525 (get_unaligned_le16(&raid_map->row_cnt) * 2526 rmd->total_disks_per_row)) + 2527 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; 2528 | 2262 is_supported = false; 2263 break; 2264 case SA_RAID_ADM: 2265 if (rmd->is_write) 2266 is_supported = false; 2267 break; 2268 default: 2269 is_supported = false; --- 258 unchanged lines hidden (view full) --- 2528 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % 2529 get_unaligned_le16(&raid_map->row_cnt); 2530 2531 rmd->map_index = (rmd->first_group * 2532 (get_unaligned_le16(&raid_map->row_cnt) * 2533 rmd->total_disks_per_row)) + 2534 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; 2535 |
2536 if (rmd->is_write) { 2537 u32 index; 2538 2539 /* 2540 * p_parity_it_nexus and q_parity_it_nexus are pointers to the 2541 * parity entries inside the device's raid_map. 2542 * 2543 * A device's RAID map is bounded by: number of RAID disks squared. 2544 * 2545 * The devices RAID map size is checked during device 2546 * initialization. 2547 */ 2548 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); 2549 index *= rmd->total_disks_per_row; 2550 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); 2551 2552 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; 2553 if (rmd->raid_level == SA_RAID_6) { 2554 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; 2555 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; 2556 } 2557 if (rmd->blocks_per_row == 0) 2558 return PQI_RAID_BYPASS_INELIGIBLE; 2559#if BITS_PER_LONG == 32 2560 tmpdiv = rmd->first_block; 2561 do_div(tmpdiv, rmd->blocks_per_row); 2562 rmd->row = tmpdiv; 2563#else 2564 rmd->row = rmd->first_block / rmd->blocks_per_row; 2565#endif 2566 } 2567 |
|
2529 return 0; 2530} 2531 2532static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) 2533{ 2534 /* Build the new CDB for the physical disk I/O. */ 2535 if (rmd->disk_block > 0xffffffff) { 2536 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; --- 25 unchanged lines hidden (view full) --- 2562 struct pqi_scsi_dev_raid_map_data rmd = {0}; 2563 2564 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 2565 if (rc) 2566 return PQI_RAID_BYPASS_INELIGIBLE; 2567 2568 rmd.raid_level = device->raid_level; 2569 | 2568 return 0; 2569} 2570 2571static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) 2572{ 2573 /* Build the new CDB for the physical disk I/O. */ 2574 if (rmd->disk_block > 0xffffffff) { 2575 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; --- 25 unchanged lines hidden (view full) --- 2601 struct pqi_scsi_dev_raid_map_data rmd = {0}; 2602 2603 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 2604 if (rc) 2605 return PQI_RAID_BYPASS_INELIGIBLE; 2606 2607 rmd.raid_level = device->raid_level; 2608 |
2570 if (!pqi_aio_raid_level_supported(&rmd)) | 2609 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) |
2571 return PQI_RAID_BYPASS_INELIGIBLE; 2572 2573 if (unlikely(rmd.block_cnt == 0)) 2574 return PQI_RAID_BYPASS_INELIGIBLE; 2575 2576 raid_map = device->raid_map; 2577 2578 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); 2579 if (rc) 2580 return PQI_RAID_BYPASS_INELIGIBLE; 2581 2582 /* RAID 1 */ 2583 if (device->raid_level == SA_RAID_1) { 2584 if (device->offload_to_mirror) 2585 rmd.map_index += rmd.data_disks_per_row; 2586 device->offload_to_mirror = !device->offload_to_mirror; 2587 } else if (device->raid_level == SA_RAID_ADM) { 2588 rc = pqi_calc_aio_raid_adm(&rmd, device); 2589 } else if ((device->raid_level == SA_RAID_5 || | 2610 return PQI_RAID_BYPASS_INELIGIBLE; 2611 2612 if (unlikely(rmd.block_cnt == 0)) 2613 return PQI_RAID_BYPASS_INELIGIBLE; 2614 2615 raid_map = device->raid_map; 2616 2617 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); 2618 if (rc) 2619 return PQI_RAID_BYPASS_INELIGIBLE; 2620 2621 /* RAID 1 */ 2622 if (device->raid_level == SA_RAID_1) { 2623 if (device->offload_to_mirror) 2624 rmd.map_index += rmd.data_disks_per_row; 2625 device->offload_to_mirror = !device->offload_to_mirror; 2626 } else if (device->raid_level == SA_RAID_ADM) { 2627 rc = pqi_calc_aio_raid_adm(&rmd, device); 2628 } else if ((device->raid_level == SA_RAID_5 || |
2590 device->raid_level == SA_RAID_6) && rmd.layout_map_count > 1) { | 2629 device->raid_level == SA_RAID_6) && 2630 (rmd.layout_map_count > 1 || rmd.is_write)) { |
2591 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); 2592 if (rc) 2593 return PQI_RAID_BYPASS_INELIGIBLE; 2594 } 2595 2596 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) 2597 return PQI_RAID_BYPASS_INELIGIBLE; 2598 --- 18 unchanged lines hidden (view full) --- 2617 RAID_MAP_ENCRYPTION_ENABLED) { 2618 pqi_set_encryption_info(&encryption_info, raid_map, 2619 rmd.first_block); 2620 encryption_info_ptr = &encryption_info; 2621 } else { 2622 encryption_info_ptr = NULL; 2623 } 2624 | 2631 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); 2632 if (rc) 2633 return PQI_RAID_BYPASS_INELIGIBLE; 2634 } 2635 2636 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) 2637 return PQI_RAID_BYPASS_INELIGIBLE; 2638 --- 18 unchanged lines hidden (view full) --- 2657 RAID_MAP_ENCRYPTION_ENABLED) { 2658 pqi_set_encryption_info(&encryption_info, raid_map, 2659 rmd.first_block); 2660 encryption_info_ptr = &encryption_info; 2661 } else { 2662 encryption_info_ptr = NULL; 2663 } 2664 |
2625 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, | 2665 if (rmd.is_write) { 2666 switch (device->raid_level) { 2667 case SA_RAID_0: 2668 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, |
2626 rmd.cdb, rmd.cdb_length, queue_group, 2627 encryption_info_ptr, true); | 2669 rmd.cdb, rmd.cdb_length, queue_group, 2670 encryption_info_ptr, true); |
2671 case SA_RAID_5: 2672 case SA_RAID_6: 2673 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, 2674 encryption_info_ptr, device, &rmd); 2675 default: 2676 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, 2677 rmd.cdb, rmd.cdb_length, queue_group, 2678 encryption_info_ptr, true); 2679 } 2680 } else { 2681 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, 2682 rmd.cdb, rmd.cdb_length, queue_group, 2683 encryption_info_ptr, true); 2684 } 2685 |
|
2628} 2629 2630#define PQI_STATUS_IDLE 0x0 2631 2632#define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2633#define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2634 2635#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 --- 2203 unchanged lines hidden (view full) --- 4839 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4840 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4841 4842 ctrl_info->max_sg_per_iu = 4843 ((ctrl_info->max_inbound_iu_length - 4844 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4845 sizeof(struct pqi_sg_descriptor)) + 4846 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; | 2686} 2687 2688#define PQI_STATUS_IDLE 0x0 2689 2690#define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2691#define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2692 2693#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 --- 2203 unchanged lines hidden (view full) --- 4897 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4898 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4899 4900 ctrl_info->max_sg_per_iu = 4901 ((ctrl_info->max_inbound_iu_length - 4902 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4903 sizeof(struct pqi_sg_descriptor)) + 4904 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; |
4905 4906 ctrl_info->max_sg_per_r56_iu = 4907 ((ctrl_info->max_inbound_iu_length - 4908 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4909 sizeof(struct pqi_sg_descriptor)) + 4910 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; |
|
4847} 4848 4849static inline void pqi_set_sg_descriptor( 4850 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4851{ 4852 u64 address = (u64)sg_dma_address(sg); 4853 unsigned int length = sg_dma_len(sg); 4854 --- 71 unchanged lines hidden (view full) --- 4926 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4927 4928out: 4929 put_unaligned_le16(iu_length, &request->header.iu_length); 4930 4931 return 0; 4932} 4933 | 4911} 4912 4913static inline void pqi_set_sg_descriptor( 4914 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4915{ 4916 u64 address = (u64)sg_dma_address(sg); 4917 unsigned int length = sg_dma_len(sg); 4918 --- 71 unchanged lines hidden (view full) --- 4990 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4991 4992out: 4993 put_unaligned_le16(iu_length, &request->header.iu_length); 4994 4995 return 0; 4996} 4997 |
4998static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, 4999 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, 5000 struct pqi_io_request *io_request) 5001{ 5002 u16 iu_length; 5003 int sg_count; 5004 bool chained; 5005 unsigned int num_sg_in_iu; 5006 struct scatterlist *sg; 5007 struct pqi_sg_descriptor *sg_descriptor; 5008 5009 sg_count = scsi_dma_map(scmd); 5010 if (sg_count < 0) 5011 return sg_count; 5012 5013 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - 5014 PQI_REQUEST_HEADER_LENGTH; 5015 num_sg_in_iu = 0; 5016 5017 if (sg_count != 0) { 5018 sg = scsi_sglist(scmd); 5019 sg_descriptor = request->sg_descriptors; 5020 5021 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5022 ctrl_info->max_sg_per_r56_iu, &chained); 5023 5024 request->partial = chained; 5025 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5026 } 5027 5028 put_unaligned_le16(iu_length, &request->header.iu_length); 5029 request->num_sg_descriptors = num_sg_in_iu; 5030 5031 return 0; 5032} 5033 |
|
4934static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4935 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4936 struct pqi_io_request *io_request) 4937{ 4938 u16 iu_length; 4939 int sg_count; 4940 bool chained; 4941 unsigned int num_sg_in_iu; --- 388 unchanged lines hidden (view full) --- 5330 return SCSI_MLQUEUE_HOST_BUSY; 5331 } 5332 5333 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5334 5335 return 0; 5336} 5337 | 5034static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 5035 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 5036 struct pqi_io_request *io_request) 5037{ 5038 u16 iu_length; 5039 int sg_count; 5040 bool chained; 5041 unsigned int num_sg_in_iu; --- 388 unchanged lines hidden (view full) --- 5430 return SCSI_MLQUEUE_HOST_BUSY; 5431 } 5432 5433 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5434 5435 return 0; 5436} 5437 |
5438static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 5439 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5440 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5441 struct pqi_scsi_dev_raid_map_data *rmd) 5442{ 5443 int rc; 5444 struct pqi_io_request *io_request; 5445 struct pqi_aio_r56_path_request *r56_request; 5446 5447 io_request = pqi_alloc_io_request(ctrl_info); 5448 io_request->io_complete_callback = pqi_aio_io_complete; 5449 io_request->scmd = scmd; 5450 io_request->raid_bypass = true; 5451 5452 r56_request = io_request->iu; 5453 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); 5454 5455 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) 5456 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; 5457 else 5458 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; 5459 5460 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); 5461 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); 5462 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); 5463 if (rmd->raid_level == SA_RAID_6) { 5464 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); 5465 r56_request->xor_multiplier = rmd->xor_mult; 5466 } 5467 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); 5468 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5469 put_unaligned_le64(rmd->row, &r56_request->row); 5470 5471 put_unaligned_le16(io_request->index, &r56_request->request_id); 5472 r56_request->error_index = r56_request->request_id; 5473 5474 if (rmd->cdb_length > sizeof(r56_request->cdb)) 5475 rmd->cdb_length = sizeof(r56_request->cdb); 5476 r56_request->cdb_length = rmd->cdb_length; 5477 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); 5478 5479 /* The direction is always write. */ 5480 r56_request->data_direction = SOP_READ_FLAG; 5481 5482 if (encryption_info) { 5483 r56_request->encryption_enable = true; 5484 put_unaligned_le16(encryption_info->data_encryption_key_index, 5485 &r56_request->data_encryption_key_index); 5486 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5487 &r56_request->encrypt_tweak_lower); 5488 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5489 &r56_request->encrypt_tweak_upper); 5490 } 5491 5492 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); 5493 if (rc) { 5494 pqi_free_io_request(io_request); 5495 return SCSI_MLQUEUE_HOST_BUSY; 5496 } 5497 5498 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5499 5500 return 0; 5501} 5502 |
|
5338static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5339 struct scsi_cmnd *scmd) 5340{ 5341 u16 hw_queue; 5342 5343 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 5344 if (hw_queue > ctrl_info->max_hw_queue_index) 5345 hw_queue = 0; --- 951 unchanged lines hidden (view full) --- 6297 pqi_lockup_action = pqi_lockup_actions[i].action; 6298 return count; 6299 } 6300 } 6301 6302 return -EINVAL; 6303} 6304 | 5503static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5504 struct scsi_cmnd *scmd) 5505{ 5506 u16 hw_queue; 5507 5508 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 5509 if (hw_queue > ctrl_info->max_hw_queue_index) 5510 hw_queue = 0; --- 951 unchanged lines hidden (view full) --- 6462 pqi_lockup_action = pqi_lockup_actions[i].action; 6463 return count; 6464 } 6465 } 6466 6467 return -EINVAL; 6468} 6469 |
6470static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, 6471 struct device_attribute *attr, char *buffer) 6472{ 6473 struct Scsi_Host *shost = class_to_shost(dev); 6474 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6475 6476 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); 6477} 6478 6479static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, 6480 struct device_attribute *attr, const char *buffer, size_t count) 6481{ 6482 struct Scsi_Host *shost = class_to_shost(dev); 6483 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6484 u8 set_r5_writes = 0; 6485 6486 if (kstrtou8(buffer, 0, &set_r5_writes)) 6487 return -EINVAL; 6488 6489 if (set_r5_writes > 0) 6490 set_r5_writes = 1; 6491 6492 ctrl_info->enable_r5_writes = set_r5_writes; 6493 6494 return count; 6495} 6496 6497static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, 6498 struct device_attribute *attr, char *buffer) 6499{ 6500 struct Scsi_Host *shost = class_to_shost(dev); 6501 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6502 6503 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); 6504} 6505 6506static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, 6507 struct device_attribute *attr, const char *buffer, size_t count) 6508{ 6509 struct Scsi_Host *shost = class_to_shost(dev); 6510 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6511 u8 set_r6_writes = 0; 6512 6513 if (kstrtou8(buffer, 0, &set_r6_writes)) 6514 return -EINVAL; 6515 6516 if (set_r6_writes > 0) 6517 set_r6_writes = 1; 6518 6519 ctrl_info->enable_r6_writes = set_r6_writes; 6520 6521 return count; 6522} 6523 |
|
6305static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); 6306static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 6307static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 6308static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 6309static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 6310static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 6311static DEVICE_ATTR(lockup_action, 0644, 6312 pqi_lockup_action_show, pqi_lockup_action_store); | 6524static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); 6525static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 6526static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 6527static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 6528static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 6529static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 6530static DEVICE_ATTR(lockup_action, 0644, 6531 pqi_lockup_action_show, pqi_lockup_action_store); |
6532static DEVICE_ATTR(enable_r5_writes, 0644, 6533 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); 6534static DEVICE_ATTR(enable_r6_writes, 0644, 6535 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); |
|
6313 6314static struct device_attribute *pqi_shost_attrs[] = { 6315 &dev_attr_driver_version, 6316 &dev_attr_firmware_version, 6317 &dev_attr_model, 6318 &dev_attr_serial_number, 6319 &dev_attr_vendor, 6320 &dev_attr_rescan, 6321 &dev_attr_lockup_action, | 6536 6537static struct device_attribute *pqi_shost_attrs[] = { 6538 &dev_attr_driver_version, 6539 &dev_attr_firmware_version, 6540 &dev_attr_model, 6541 &dev_attr_serial_number, 6542 &dev_attr_vendor, 6543 &dev_attr_rescan, 6544 &dev_attr_lockup_action, |
6545 &dev_attr_enable_r5_writes, 6546 &dev_attr_enable_r6_writes, |
|
6322 NULL 6323}; 6324 6325static ssize_t pqi_unique_id_show(struct device *dev, 6326 struct device_attribute *attr, char *buffer) 6327{ 6328 struct pqi_ctrl_info *ctrl_info; 6329 struct scsi_device *sdev; --- 2769 unchanged lines hidden --- | 6547 NULL 6548}; 6549 6550static ssize_t pqi_unique_id_show(struct device *dev, 6551 struct device_attribute *attr, char *buffer) 6552{ 6553 struct pqi_ctrl_info *ctrl_info; 6554 struct scsi_device *sdev; --- 2769 unchanged lines hidden --- |