1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for Microchip PQI-based storage controllers 4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com 9 * 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/sched.h> 18 #include <linux/rtc.h> 19 #include <linux/bcd.h> 20 #include <linux/reboot.h> 21 #include <linux/cciss_ioctl.h> 22 #include <scsi/scsi_host.h> 23 #include <scsi/scsi_cmnd.h> 24 #include <scsi/scsi_device.h> 25 #include <scsi/scsi_eh.h> 26 #include <scsi/scsi_transport_sas.h> 27 #include <linux/unaligned.h> 28 #include "smartpqi.h" 29 #include "smartpqi_sis.h" 30 31 #if !defined(BUILD_TIMESTAMP) 32 #define BUILD_TIMESTAMP 33 #endif 34 35 #define DRIVER_VERSION "2.1.30-031" 36 #define DRIVER_MAJOR 2 37 #define DRIVER_MINOR 1 38 #define DRIVER_RELEASE 30 39 #define DRIVER_REVISION 31 40 41 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ 42 DRIVER_VERSION BUILD_TIMESTAMP ")" 43 #define DRIVER_NAME_SHORT "smartpqi" 44 45 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 46 47 #define PQI_POST_RESET_DELAY_SECS 5 48 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 49 50 #define PQI_NO_COMPLETION ((void *)-1) 51 52 MODULE_AUTHOR("Microchip"); 53 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " 54 DRIVER_VERSION); 55 MODULE_VERSION(DRIVER_VERSION); 56 MODULE_LICENSE("GPL"); 57 58 struct pqi_cmd_priv { 59 int this_residual; 60 }; 61 62 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) 63 { 64 return scsi_cmd_priv(cmd); 65 } 66 67 static void pqi_verify_structures(void); 68 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 69 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); 70 static void pqi_ctrl_offline_worker(struct work_struct *work); 71 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 72 static void pqi_scan_start(struct Scsi_Host *shost); 73 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 74 struct pqi_queue_group *queue_group, enum pqi_io_path path, 75 struct pqi_io_request *io_request); 76 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 77 struct pqi_iu_header *request, unsigned int flags, 78 struct pqi_raid_error_info *error_info); 79 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 80 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 81 unsigned int cdb_length, struct pqi_queue_group *queue_group, 82 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio); 83 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 84 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 85 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 86 struct pqi_scsi_dev_raid_map_data *rmd); 87 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 88 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 89 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 90 struct pqi_scsi_dev_raid_map_data *rmd); 91 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 92 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 93 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); 94 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size); 95 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor); 96 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code); 97 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 98 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); 99 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); 100 static void pqi_tmf_worker(struct work_struct *work); 101 102 /* for flags argument to pqi_submit_raid_request_synchronous() */ 103 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 104 105 static struct scsi_transport_template *pqi_sas_transport_template; 106 107 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 108 109 enum pqi_lockup_action { 110 NONE, 111 REBOOT, 112 PANIC 113 }; 114 115 static enum pqi_lockup_action pqi_lockup_action = NONE; 116 117 static struct { 118 enum pqi_lockup_action action; 119 char *name; 120 } pqi_lockup_actions[] = { 121 { 122 .action = NONE, 123 .name = "none", 124 }, 125 { 126 .action = REBOOT, 127 .name = "reboot", 128 }, 129 { 130 .action = PANIC, 131 .name = "panic", 132 }, 133 }; 134 135 static unsigned int pqi_supported_event_types[] = { 136 PQI_EVENT_TYPE_HOTPLUG, 137 PQI_EVENT_TYPE_HARDWARE, 138 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 139 PQI_EVENT_TYPE_LOGICAL_DEVICE, 140 PQI_EVENT_TYPE_OFA, 141 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 142 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 143 }; 144 145 static int pqi_disable_device_id_wildcards; 146 module_param_named(disable_device_id_wildcards, 147 pqi_disable_device_id_wildcards, int, 0644); 148 MODULE_PARM_DESC(disable_device_id_wildcards, 149 "Disable device ID wildcards."); 150 151 static int pqi_disable_heartbeat; 152 module_param_named(disable_heartbeat, 153 pqi_disable_heartbeat, int, 0644); 154 MODULE_PARM_DESC(disable_heartbeat, 155 "Disable heartbeat."); 156 157 static int pqi_disable_ctrl_shutdown; 158 module_param_named(disable_ctrl_shutdown, 159 pqi_disable_ctrl_shutdown, int, 0644); 160 MODULE_PARM_DESC(disable_ctrl_shutdown, 161 "Disable controller shutdown when controller locked up."); 162 163 static char *pqi_lockup_action_param; 164 module_param_named(lockup_action, 165 pqi_lockup_action_param, charp, 0644); 166 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 167 "\t\tSupported: none, reboot, panic\n" 168 "\t\tDefault: none"); 169 170 static int pqi_expose_ld_first; 171 module_param_named(expose_ld_first, 172 pqi_expose_ld_first, int, 0644); 173 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); 174 175 static int pqi_hide_vsep; 176 module_param_named(hide_vsep, 177 pqi_hide_vsep, int, 0644); 178 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); 179 180 static int pqi_disable_managed_interrupts; 181 module_param_named(disable_managed_interrupts, 182 pqi_disable_managed_interrupts, int, 0644); 183 MODULE_PARM_DESC(disable_managed_interrupts, 184 "Disable the kernel automatically assigning SMP affinity to IRQs."); 185 186 static unsigned int pqi_ctrl_ready_timeout_secs; 187 module_param_named(ctrl_ready_timeout, 188 pqi_ctrl_ready_timeout_secs, uint, 0644); 189 MODULE_PARM_DESC(ctrl_ready_timeout, 190 "Timeout in seconds for driver to wait for controller ready."); 191 192 static char *raid_levels[] = { 193 "RAID-0", 194 "RAID-4", 195 "RAID-1(1+0)", 196 "RAID-5", 197 "RAID-5+1", 198 "RAID-6", 199 "RAID-1(Triple)", 200 }; 201 202 static char *pqi_raid_level_to_string(u8 raid_level) 203 { 204 if (raid_level < ARRAY_SIZE(raid_levels)) 205 return raid_levels[raid_level]; 206 207 return "RAID UNKNOWN"; 208 } 209 210 #define SA_RAID_0 0 211 #define SA_RAID_4 1 212 #define SA_RAID_1 2 /* also used for RAID 10 */ 213 #define SA_RAID_5 3 /* also used for RAID 50 */ 214 #define SA_RAID_51 4 215 #define SA_RAID_6 5 /* also used for RAID 60 */ 216 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ 217 #define SA_RAID_MAX SA_RAID_TRIPLE 218 #define SA_RAID_UNKNOWN 0xff 219 220 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 221 { 222 pqi_prep_for_scsi_done(scmd); 223 scsi_done(scmd); 224 } 225 226 static inline void pqi_disable_write_same(struct scsi_device *sdev) 227 { 228 sdev->no_write_same = 1; 229 } 230 231 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 232 { 233 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 234 } 235 236 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 237 { 238 return !device->is_physical_device; 239 } 240 241 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 242 { 243 return scsi3addr[2] != 0; 244 } 245 246 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 247 { 248 return !ctrl_info->controller_online; 249 } 250 251 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 252 { 253 if (ctrl_info->controller_online) 254 if (!sis_is_firmware_running(ctrl_info)) 255 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP); 256 } 257 258 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 259 { 260 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 261 } 262 263 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1 264 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2 265 266 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) 267 { 268 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; 269 } 270 271 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 272 enum pqi_ctrl_mode mode) 273 { 274 u32 driver_scratch; 275 276 driver_scratch = sis_read_driver_scratch(ctrl_info); 277 278 if (mode == PQI_MODE) 279 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE; 280 else 281 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE; 282 283 sis_write_driver_scratch(ctrl_info, driver_scratch); 284 } 285 286 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) 287 { 288 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; 289 } 290 291 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) 292 { 293 u32 driver_scratch; 294 295 driver_scratch = sis_read_driver_scratch(ctrl_info); 296 297 if (is_supported) 298 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 299 else 300 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 301 302 sis_write_driver_scratch(ctrl_info, driver_scratch); 303 } 304 305 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) 306 { 307 ctrl_info->scan_blocked = true; 308 mutex_lock(&ctrl_info->scan_mutex); 309 } 310 311 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) 312 { 313 ctrl_info->scan_blocked = false; 314 mutex_unlock(&ctrl_info->scan_mutex); 315 } 316 317 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) 318 { 319 return ctrl_info->scan_blocked; 320 } 321 322 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) 323 { 324 mutex_lock(&ctrl_info->lun_reset_mutex); 325 } 326 327 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) 328 { 329 mutex_unlock(&ctrl_info->lun_reset_mutex); 330 } 331 332 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) 333 { 334 struct Scsi_Host *shost; 335 unsigned int num_loops; 336 int msecs_sleep; 337 338 shost = ctrl_info->scsi_host; 339 340 scsi_block_requests(shost); 341 342 num_loops = 0; 343 msecs_sleep = 20; 344 while (scsi_host_busy(shost)) { 345 num_loops++; 346 if (num_loops == 10) 347 msecs_sleep = 500; 348 msleep(msecs_sleep); 349 } 350 } 351 352 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) 353 { 354 scsi_unblock_requests(ctrl_info->scsi_host); 355 } 356 357 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 358 { 359 atomic_inc(&ctrl_info->num_busy_threads); 360 } 361 362 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 363 { 364 atomic_dec(&ctrl_info->num_busy_threads); 365 } 366 367 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 368 { 369 return ctrl_info->block_requests; 370 } 371 372 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 373 { 374 ctrl_info->block_requests = true; 375 } 376 377 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 378 { 379 ctrl_info->block_requests = false; 380 wake_up_all(&ctrl_info->block_requests_wait); 381 } 382 383 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 384 { 385 if (!pqi_ctrl_blocked(ctrl_info)) 386 return; 387 388 atomic_inc(&ctrl_info->num_blocked_threads); 389 wait_event(ctrl_info->block_requests_wait, 390 !pqi_ctrl_blocked(ctrl_info)); 391 atomic_dec(&ctrl_info->num_blocked_threads); 392 } 393 394 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10 395 396 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 397 { 398 unsigned long start_jiffies; 399 unsigned long warning_timeout; 400 bool displayed_warning; 401 402 displayed_warning = false; 403 start_jiffies = jiffies; 404 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 405 406 while (atomic_read(&ctrl_info->num_busy_threads) > 407 atomic_read(&ctrl_info->num_blocked_threads)) { 408 if (time_after(jiffies, warning_timeout)) { 409 dev_warn(&ctrl_info->pci_dev->dev, 410 "waiting %u seconds for driver activity to quiesce\n", 411 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 412 displayed_warning = true; 413 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies; 414 } 415 usleep_range(1000, 2000); 416 } 417 418 if (displayed_warning) 419 dev_warn(&ctrl_info->pci_dev->dev, 420 "driver activity quiesced after waiting for %u seconds\n", 421 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 422 } 423 424 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 425 { 426 return device->device_offline; 427 } 428 429 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 430 { 431 mutex_lock(&ctrl_info->ofa_mutex); 432 } 433 434 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 435 { 436 mutex_unlock(&ctrl_info->ofa_mutex); 437 } 438 439 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 440 { 441 mutex_lock(&ctrl_info->ofa_mutex); 442 mutex_unlock(&ctrl_info->ofa_mutex); 443 } 444 445 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) 446 { 447 return mutex_is_locked(&ctrl_info->ofa_mutex); 448 } 449 450 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 451 { 452 device->in_remove = true; 453 } 454 455 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) 456 { 457 return device->in_remove; 458 } 459 460 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun) 461 { 462 device->in_reset[lun] = true; 463 } 464 465 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun) 466 { 467 device->in_reset[lun] = false; 468 } 469 470 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun) 471 { 472 return device->in_reset[lun]; 473 } 474 475 static inline int pqi_event_type_to_event_index(unsigned int event_type) 476 { 477 int index; 478 479 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 480 if (event_type == pqi_supported_event_types[index]) 481 return index; 482 483 return -1; 484 } 485 486 static inline bool pqi_is_supported_event(unsigned int event_type) 487 { 488 return pqi_event_type_to_event_index(event_type) != -1; 489 } 490 491 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, 492 unsigned long delay) 493 { 494 if (pqi_ctrl_offline(ctrl_info)) 495 return; 496 497 schedule_delayed_work(&ctrl_info->rescan_work, delay); 498 } 499 500 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 501 { 502 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 503 } 504 505 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 506 507 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) 508 { 509 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 510 } 511 512 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 513 { 514 cancel_delayed_work_sync(&ctrl_info->rescan_work); 515 } 516 517 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 518 { 519 if (!ctrl_info->heartbeat_counter) 520 return 0; 521 522 return readl(ctrl_info->heartbeat_counter); 523 } 524 525 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 526 { 527 return readb(ctrl_info->soft_reset_status); 528 } 529 530 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 531 { 532 u8 status; 533 534 status = pqi_read_soft_reset_status(ctrl_info); 535 status &= ~PQI_SOFT_RESET_ABORT; 536 writeb(status, ctrl_info->soft_reset_status); 537 } 538 539 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) 540 { 541 bool io_high_prio; 542 int priority_class; 543 544 io_high_prio = false; 545 546 if (device->ncq_prio_enable) { 547 priority_class = 548 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); 549 if (priority_class == IOPRIO_CLASS_RT) { 550 /* Set NCQ priority for read/write commands. */ 551 switch (scmd->cmnd[0]) { 552 case WRITE_16: 553 case READ_16: 554 case WRITE_12: 555 case READ_12: 556 case WRITE_10: 557 case READ_10: 558 case WRITE_6: 559 case READ_6: 560 io_high_prio = true; 561 break; 562 } 563 } 564 } 565 566 return io_high_prio; 567 } 568 569 static int pqi_map_single(struct pci_dev *pci_dev, 570 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 571 size_t buffer_length, enum dma_data_direction data_direction) 572 { 573 dma_addr_t bus_address; 574 575 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 576 return 0; 577 578 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 579 data_direction); 580 if (dma_mapping_error(&pci_dev->dev, bus_address)) 581 return -ENOMEM; 582 583 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 584 put_unaligned_le32(buffer_length, &sg_descriptor->length); 585 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 586 587 return 0; 588 } 589 590 static void pqi_pci_unmap(struct pci_dev *pci_dev, 591 struct pqi_sg_descriptor *descriptors, int num_descriptors, 592 enum dma_data_direction data_direction) 593 { 594 int i; 595 596 if (data_direction == DMA_NONE) 597 return; 598 599 for (i = 0; i < num_descriptors; i++) 600 dma_unmap_single(&pci_dev->dev, 601 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 602 get_unaligned_le32(&descriptors[i].length), 603 data_direction); 604 } 605 606 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 607 struct pqi_raid_path_request *request, u8 cmd, 608 u8 *scsi3addr, void *buffer, size_t buffer_length, 609 u16 vpd_page, enum dma_data_direction *dir) 610 { 611 u8 *cdb; 612 size_t cdb_length = buffer_length; 613 614 memset(request, 0, sizeof(*request)); 615 616 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 617 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 618 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 619 &request->header.iu_length); 620 put_unaligned_le32(buffer_length, &request->buffer_length); 621 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 622 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 623 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 624 625 cdb = request->cdb; 626 627 switch (cmd) { 628 case INQUIRY: 629 request->data_direction = SOP_READ_FLAG; 630 cdb[0] = INQUIRY; 631 if (vpd_page & VPD_PAGE) { 632 cdb[1] = 0x1; 633 cdb[2] = (u8)vpd_page; 634 } 635 cdb[4] = (u8)cdb_length; 636 break; 637 case CISS_REPORT_LOG: 638 case CISS_REPORT_PHYS: 639 request->data_direction = SOP_READ_FLAG; 640 cdb[0] = cmd; 641 if (cmd == CISS_REPORT_PHYS) { 642 if (ctrl_info->rpl_extended_format_4_5_supported) 643 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4; 644 else 645 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2; 646 } else { 647 cdb[1] = ctrl_info->ciss_report_log_flags; 648 } 649 put_unaligned_be32(cdb_length, &cdb[6]); 650 break; 651 case CISS_GET_RAID_MAP: 652 request->data_direction = SOP_READ_FLAG; 653 cdb[0] = CISS_READ; 654 cdb[1] = CISS_GET_RAID_MAP; 655 put_unaligned_be32(cdb_length, &cdb[6]); 656 break; 657 case SA_FLUSH_CACHE: 658 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; 659 request->data_direction = SOP_WRITE_FLAG; 660 cdb[0] = BMIC_WRITE; 661 cdb[6] = BMIC_FLUSH_CACHE; 662 put_unaligned_be16(cdb_length, &cdb[7]); 663 break; 664 case BMIC_SENSE_DIAG_OPTIONS: 665 cdb_length = 0; 666 fallthrough; 667 case BMIC_IDENTIFY_CONTROLLER: 668 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 669 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 670 case BMIC_SENSE_FEATURE: 671 request->data_direction = SOP_READ_FLAG; 672 cdb[0] = BMIC_READ; 673 cdb[6] = cmd; 674 put_unaligned_be16(cdb_length, &cdb[7]); 675 break; 676 case BMIC_SET_DIAG_OPTIONS: 677 cdb_length = 0; 678 fallthrough; 679 case BMIC_WRITE_HOST_WELLNESS: 680 request->data_direction = SOP_WRITE_FLAG; 681 cdb[0] = BMIC_WRITE; 682 cdb[6] = cmd; 683 put_unaligned_be16(cdb_length, &cdb[7]); 684 break; 685 case BMIC_CSMI_PASSTHRU: 686 request->data_direction = SOP_BIDIRECTIONAL; 687 cdb[0] = BMIC_WRITE; 688 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 689 cdb[6] = cmd; 690 put_unaligned_be16(cdb_length, &cdb[7]); 691 break; 692 default: 693 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); 694 break; 695 } 696 697 switch (request->data_direction) { 698 case SOP_READ_FLAG: 699 *dir = DMA_FROM_DEVICE; 700 break; 701 case SOP_WRITE_FLAG: 702 *dir = DMA_TO_DEVICE; 703 break; 704 case SOP_NO_DIRECTION_FLAG: 705 *dir = DMA_NONE; 706 break; 707 default: 708 *dir = DMA_BIDIRECTIONAL; 709 break; 710 } 711 712 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 713 buffer, buffer_length, *dir); 714 } 715 716 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 717 { 718 io_request->scmd = NULL; 719 io_request->status = 0; 720 io_request->error_info = NULL; 721 io_request->raid_bypass = false; 722 } 723 724 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) 725 { 726 struct pqi_io_request *io_request; 727 u16 i; 728 729 if (scmd) { /* SML I/O request */ 730 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 731 732 i = blk_mq_unique_tag_to_tag(blk_tag); 733 io_request = &ctrl_info->io_request_pool[i]; 734 if (atomic_inc_return(&io_request->refcount) > 1) { 735 atomic_dec(&io_request->refcount); 736 return NULL; 737 } 738 } else { /* IOCTL or driver internal request */ 739 /* 740 * benignly racy - may have to wait for an open slot. 741 * command slot range is scsi_ml_can_queue - 742 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] 743 */ 744 i = 0; 745 while (1) { 746 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; 747 if (atomic_inc_return(&io_request->refcount) == 1) 748 break; 749 atomic_dec(&io_request->refcount); 750 i = (i + 1) % PQI_RESERVED_IO_SLOTS; 751 } 752 } 753 754 if (io_request) 755 pqi_reinit_io_request(io_request); 756 757 return io_request; 758 } 759 760 static void pqi_free_io_request(struct pqi_io_request *io_request) 761 { 762 atomic_dec(&io_request->refcount); 763 } 764 765 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 766 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 767 struct pqi_raid_error_info *error_info) 768 { 769 int rc; 770 struct pqi_raid_path_request request; 771 enum dma_data_direction dir; 772 773 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, 774 buffer, buffer_length, vpd_page, &dir); 775 if (rc) 776 return rc; 777 778 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); 779 780 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 781 782 return rc; 783 } 784 785 /* helper functions for pqi_send_scsi_raid_request */ 786 787 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 788 u8 cmd, void *buffer, size_t buffer_length) 789 { 790 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 791 buffer, buffer_length, 0, NULL); 792 } 793 794 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 795 u8 cmd, void *buffer, size_t buffer_length, 796 struct pqi_raid_error_info *error_info) 797 { 798 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 799 buffer, buffer_length, 0, error_info); 800 } 801 802 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 803 struct bmic_identify_controller *buffer) 804 { 805 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 806 buffer, sizeof(*buffer)); 807 } 808 809 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, 810 struct bmic_sense_subsystem_info *sense_info) 811 { 812 return pqi_send_ctrl_raid_request(ctrl_info, 813 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, 814 sizeof(*sense_info)); 815 } 816 817 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 818 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 819 { 820 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 821 buffer, buffer_length, vpd_page, NULL); 822 } 823 824 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 825 struct pqi_scsi_dev *device, 826 struct bmic_identify_physical_device *buffer, size_t buffer_length) 827 { 828 int rc; 829 enum dma_data_direction dir; 830 u16 bmic_device_index; 831 struct pqi_raid_path_request request; 832 833 rc = pqi_build_raid_path_request(ctrl_info, &request, 834 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 835 buffer_length, 0, &dir); 836 if (rc) 837 return rc; 838 839 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 840 request.cdb[2] = (u8)bmic_device_index; 841 request.cdb[9] = (u8)(bmic_device_index >> 8); 842 843 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 844 845 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 846 847 return rc; 848 } 849 850 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) 851 { 852 u32 bytes; 853 854 bytes = get_unaligned_le16(limit); 855 if (bytes == 0) 856 bytes = ~0; 857 else 858 bytes *= 1024; 859 860 return bytes; 861 } 862 863 #pragma pack(1) 864 865 struct bmic_sense_feature_buffer { 866 struct bmic_sense_feature_buffer_header header; 867 struct bmic_sense_feature_io_page_aio_subpage aio_subpage; 868 }; 869 870 #pragma pack() 871 872 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ 873 offsetofend(struct bmic_sense_feature_buffer, \ 874 aio_subpage.max_write_raid_1_10_3drive) 875 876 #define MINIMUM_AIO_SUBPAGE_LENGTH \ 877 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ 878 max_write_raid_1_10_3drive) - \ 879 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) 880 881 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) 882 { 883 int rc; 884 enum dma_data_direction dir; 885 struct pqi_raid_path_request request; 886 struct bmic_sense_feature_buffer *buffer; 887 888 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); 889 if (!buffer) 890 return -ENOMEM; 891 892 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, 893 buffer, sizeof(*buffer), 0, &dir); 894 if (rc) 895 goto error; 896 897 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; 898 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; 899 900 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 901 902 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 903 904 if (rc) 905 goto error; 906 907 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || 908 buffer->header.subpage_code != 909 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 910 get_unaligned_le16(&buffer->header.buffer_length) < 911 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || 912 buffer->aio_subpage.header.page_code != 913 BMIC_SENSE_FEATURE_IO_PAGE || 914 buffer->aio_subpage.header.subpage_code != 915 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 916 get_unaligned_le16(&buffer->aio_subpage.header.page_length) < 917 MINIMUM_AIO_SUBPAGE_LENGTH) { 918 goto error; 919 } 920 921 ctrl_info->max_transfer_encrypted_sas_sata = 922 pqi_aio_limit_to_bytes( 923 &buffer->aio_subpage.max_transfer_encrypted_sas_sata); 924 925 ctrl_info->max_transfer_encrypted_nvme = 926 pqi_aio_limit_to_bytes( 927 &buffer->aio_subpage.max_transfer_encrypted_nvme); 928 929 ctrl_info->max_write_raid_5_6 = 930 pqi_aio_limit_to_bytes( 931 &buffer->aio_subpage.max_write_raid_5_6); 932 933 ctrl_info->max_write_raid_1_10_2drive = 934 pqi_aio_limit_to_bytes( 935 &buffer->aio_subpage.max_write_raid_1_10_2drive); 936 937 ctrl_info->max_write_raid_1_10_3drive = 938 pqi_aio_limit_to_bytes( 939 &buffer->aio_subpage.max_write_raid_1_10_3drive); 940 941 error: 942 kfree(buffer); 943 944 return rc; 945 } 946 947 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 948 enum bmic_flush_cache_shutdown_event shutdown_event) 949 { 950 int rc; 951 struct bmic_flush_cache *flush_cache; 952 953 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 954 if (!flush_cache) 955 return -ENOMEM; 956 957 flush_cache->shutdown_event = shutdown_event; 958 959 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 960 sizeof(*flush_cache)); 961 962 kfree(flush_cache); 963 964 return rc; 965 } 966 967 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 968 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 969 struct pqi_raid_error_info *error_info) 970 { 971 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 972 buffer, buffer_length, error_info); 973 } 974 975 #define PQI_FETCH_PTRAID_DATA (1 << 31) 976 977 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 978 { 979 int rc; 980 struct bmic_diag_options *diag; 981 982 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 983 if (!diag) 984 return -ENOMEM; 985 986 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 987 diag, sizeof(*diag)); 988 if (rc) 989 goto out; 990 991 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 992 993 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, 994 sizeof(*diag)); 995 996 out: 997 kfree(diag); 998 999 return rc; 1000 } 1001 1002 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 1003 void *buffer, size_t buffer_length) 1004 { 1005 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 1006 buffer, buffer_length); 1007 } 1008 1009 #pragma pack(1) 1010 1011 struct bmic_host_wellness_driver_version { 1012 u8 start_tag[4]; 1013 u8 driver_version_tag[2]; 1014 __le16 driver_version_length; 1015 char driver_version[32]; 1016 u8 dont_write_tag[2]; 1017 u8 end_tag[2]; 1018 }; 1019 1020 #pragma pack() 1021 1022 static int pqi_write_driver_version_to_host_wellness( 1023 struct pqi_ctrl_info *ctrl_info) 1024 { 1025 int rc; 1026 struct bmic_host_wellness_driver_version *buffer; 1027 size_t buffer_length; 1028 1029 buffer_length = sizeof(*buffer); 1030 1031 buffer = kmalloc(buffer_length, GFP_KERNEL); 1032 if (!buffer) 1033 return -ENOMEM; 1034 1035 buffer->start_tag[0] = '<'; 1036 buffer->start_tag[1] = 'H'; 1037 buffer->start_tag[2] = 'W'; 1038 buffer->start_tag[3] = '>'; 1039 buffer->driver_version_tag[0] = 'D'; 1040 buffer->driver_version_tag[1] = 'V'; 1041 put_unaligned_le16(sizeof(buffer->driver_version), 1042 &buffer->driver_version_length); 1043 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION, 1044 sizeof(buffer->driver_version)); 1045 buffer->dont_write_tag[0] = 'D'; 1046 buffer->dont_write_tag[1] = 'W'; 1047 buffer->end_tag[0] = 'Z'; 1048 buffer->end_tag[1] = 'Z'; 1049 1050 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1051 1052 kfree(buffer); 1053 1054 return rc; 1055 } 1056 1057 #pragma pack(1) 1058 1059 struct bmic_host_wellness_time { 1060 u8 start_tag[4]; 1061 u8 time_tag[2]; 1062 __le16 time_length; 1063 u8 time[8]; 1064 u8 dont_write_tag[2]; 1065 u8 end_tag[2]; 1066 }; 1067 1068 #pragma pack() 1069 1070 static int pqi_write_current_time_to_host_wellness( 1071 struct pqi_ctrl_info *ctrl_info) 1072 { 1073 int rc; 1074 struct bmic_host_wellness_time *buffer; 1075 size_t buffer_length; 1076 time64_t local_time; 1077 unsigned int year; 1078 struct tm tm; 1079 1080 buffer_length = sizeof(*buffer); 1081 1082 buffer = kmalloc(buffer_length, GFP_KERNEL); 1083 if (!buffer) 1084 return -ENOMEM; 1085 1086 buffer->start_tag[0] = '<'; 1087 buffer->start_tag[1] = 'H'; 1088 buffer->start_tag[2] = 'W'; 1089 buffer->start_tag[3] = '>'; 1090 buffer->time_tag[0] = 'T'; 1091 buffer->time_tag[1] = 'D'; 1092 put_unaligned_le16(sizeof(buffer->time), 1093 &buffer->time_length); 1094 1095 local_time = ktime_get_real_seconds(); 1096 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 1097 year = tm.tm_year + 1900; 1098 1099 buffer->time[0] = bin2bcd(tm.tm_hour); 1100 buffer->time[1] = bin2bcd(tm.tm_min); 1101 buffer->time[2] = bin2bcd(tm.tm_sec); 1102 buffer->time[3] = 0; 1103 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 1104 buffer->time[5] = bin2bcd(tm.tm_mday); 1105 buffer->time[6] = bin2bcd(year / 100); 1106 buffer->time[7] = bin2bcd(year % 100); 1107 1108 buffer->dont_write_tag[0] = 'D'; 1109 buffer->dont_write_tag[1] = 'W'; 1110 buffer->end_tag[0] = 'Z'; 1111 buffer->end_tag[1] = 'Z'; 1112 1113 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1114 1115 kfree(buffer); 1116 1117 return rc; 1118 } 1119 1120 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 1121 1122 static void pqi_update_time_worker(struct work_struct *work) 1123 { 1124 int rc; 1125 struct pqi_ctrl_info *ctrl_info; 1126 1127 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1128 update_time_work); 1129 1130 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 1131 if (rc) 1132 dev_warn(&ctrl_info->pci_dev->dev, 1133 "error updating time on controller\n"); 1134 1135 schedule_delayed_work(&ctrl_info->update_time_work, 1136 PQI_UPDATE_TIME_WORK_INTERVAL); 1137 } 1138 1139 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1140 { 1141 schedule_delayed_work(&ctrl_info->update_time_work, 0); 1142 } 1143 1144 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1145 { 1146 cancel_delayed_work_sync(&ctrl_info->update_time_work); 1147 } 1148 1149 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, 1150 size_t buffer_length) 1151 { 1152 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); 1153 } 1154 1155 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) 1156 { 1157 int rc; 1158 size_t lun_list_length; 1159 size_t lun_data_length; 1160 size_t new_lun_list_length; 1161 void *lun_data = NULL; 1162 struct report_lun_header *report_lun_header; 1163 1164 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 1165 if (!report_lun_header) { 1166 rc = -ENOMEM; 1167 goto out; 1168 } 1169 1170 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); 1171 if (rc) 1172 goto out; 1173 1174 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 1175 1176 again: 1177 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 1178 1179 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 1180 if (!lun_data) { 1181 rc = -ENOMEM; 1182 goto out; 1183 } 1184 1185 if (lun_list_length == 0) { 1186 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 1187 goto out; 1188 } 1189 1190 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 1191 if (rc) 1192 goto out; 1193 1194 new_lun_list_length = 1195 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); 1196 1197 if (new_lun_list_length > lun_list_length) { 1198 lun_list_length = new_lun_list_length; 1199 kfree(lun_data); 1200 goto again; 1201 } 1202 1203 out: 1204 kfree(report_lun_header); 1205 1206 if (rc) { 1207 kfree(lun_data); 1208 lun_data = NULL; 1209 } 1210 1211 *buffer = lun_data; 1212 1213 return rc; 1214 } 1215 1216 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1217 { 1218 int rc; 1219 unsigned int i; 1220 u8 rpl_response_format; 1221 u32 num_physicals; 1222 void *rpl_list; 1223 struct report_lun_header *rpl_header; 1224 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; 1225 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list; 1226 1227 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list); 1228 if (rc) 1229 return rc; 1230 1231 if (ctrl_info->rpl_extended_format_4_5_supported) { 1232 rpl_header = rpl_list; 1233 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; 1234 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) { 1235 *buffer = rpl_list; 1236 return 0; 1237 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) { 1238 dev_err(&ctrl_info->pci_dev->dev, 1239 "RPL returned unsupported data format %u\n", 1240 rpl_response_format); 1241 return -EINVAL; 1242 } else { 1243 dev_warn(&ctrl_info->pci_dev->dev, 1244 "RPL returned extended format 2 instead of 4\n"); 1245 } 1246 } 1247 1248 rpl_8byte_wwid_list = rpl_list; 1249 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); 1250 1251 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, 1252 num_physicals), GFP_KERNEL); 1253 if (!rpl_16byte_wwid_list) 1254 return -ENOMEM; 1255 1256 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid), 1257 &rpl_16byte_wwid_list->header.list_length); 1258 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; 1259 1260 for (i = 0; i < num_physicals; i++) { 1261 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid)); 1262 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid)); 1263 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); 1264 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; 1265 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags; 1266 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; 1267 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths; 1268 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; 1269 } 1270 1271 kfree(rpl_8byte_wwid_list); 1272 *buffer = rpl_16byte_wwid_list; 1273 1274 return 0; 1275 } 1276 1277 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1278 { 1279 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 1280 } 1281 1282 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 1283 struct report_phys_lun_16byte_wwid_list **physdev_list, 1284 struct report_log_lun_list **logdev_list) 1285 { 1286 int rc; 1287 size_t logdev_list_length; 1288 size_t logdev_data_length; 1289 struct report_log_lun_list *internal_logdev_list; 1290 struct report_log_lun_list *logdev_data; 1291 struct report_lun_header report_lun_header; 1292 1293 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1294 if (rc) 1295 dev_err(&ctrl_info->pci_dev->dev, 1296 "report physical LUNs failed\n"); 1297 1298 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1299 if (rc) 1300 dev_err(&ctrl_info->pci_dev->dev, 1301 "report logical LUNs failed\n"); 1302 1303 /* 1304 * Tack the controller itself onto the end of the logical device list 1305 * by adding a list entry that is all zeros. 1306 */ 1307 1308 logdev_data = *logdev_list; 1309 1310 if (logdev_data) { 1311 logdev_list_length = 1312 get_unaligned_be32(&logdev_data->header.list_length); 1313 } else { 1314 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1315 logdev_data = 1316 (struct report_log_lun_list *)&report_lun_header; 1317 logdev_list_length = 0; 1318 } 1319 1320 logdev_data_length = sizeof(struct report_lun_header) + 1321 logdev_list_length; 1322 1323 internal_logdev_list = kmalloc(logdev_data_length + 1324 sizeof(struct report_log_lun), GFP_KERNEL); 1325 if (!internal_logdev_list) { 1326 kfree(*logdev_list); 1327 *logdev_list = NULL; 1328 return -ENOMEM; 1329 } 1330 1331 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1332 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1333 sizeof(struct report_log_lun)); 1334 put_unaligned_be32(logdev_list_length + 1335 sizeof(struct report_log_lun), 1336 &internal_logdev_list->header.list_length); 1337 1338 kfree(*logdev_list); 1339 *logdev_list = internal_logdev_list; 1340 1341 return 0; 1342 } 1343 1344 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1345 int bus, int target, int lun) 1346 { 1347 device->bus = bus; 1348 device->target = target; 1349 device->lun = lun; 1350 } 1351 1352 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1353 { 1354 u8 *scsi3addr; 1355 u32 lunid; 1356 int bus; 1357 int target; 1358 int lun; 1359 1360 scsi3addr = device->scsi3addr; 1361 lunid = get_unaligned_le32(scsi3addr); 1362 1363 if (pqi_is_hba_lunid(scsi3addr)) { 1364 /* The specified device is the controller. */ 1365 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1366 device->target_lun_valid = true; 1367 return; 1368 } 1369 1370 if (pqi_is_logical_device(device)) { 1371 if (device->is_external_raid_device) { 1372 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1373 target = (lunid >> 16) & 0x3fff; 1374 lun = lunid & 0xff; 1375 } else { 1376 bus = PQI_RAID_VOLUME_BUS; 1377 target = 0; 1378 lun = lunid & 0x3fff; 1379 } 1380 pqi_set_bus_target_lun(device, bus, target, lun); 1381 device->target_lun_valid = true; 1382 return; 1383 } 1384 1385 /* 1386 * Defer target and LUN assignment for non-controller physical devices 1387 * because the SAS transport layer will make these assignments later. 1388 */ 1389 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1390 } 1391 1392 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1393 struct pqi_scsi_dev *device) 1394 { 1395 int rc; 1396 u8 raid_level; 1397 u8 *buffer; 1398 1399 raid_level = SA_RAID_UNKNOWN; 1400 1401 buffer = kmalloc(64, GFP_KERNEL); 1402 if (buffer) { 1403 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1404 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1405 if (rc == 0) { 1406 raid_level = buffer[8]; 1407 if (raid_level > SA_RAID_MAX) 1408 raid_level = SA_RAID_UNKNOWN; 1409 } 1410 kfree(buffer); 1411 } 1412 1413 device->raid_level = raid_level; 1414 } 1415 1416 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1417 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1418 { 1419 char *err_msg; 1420 u32 raid_map_size; 1421 u32 r5or6_blocks_per_row; 1422 1423 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1424 1425 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1426 err_msg = "RAID map too small"; 1427 goto bad_raid_map; 1428 } 1429 1430 if (device->raid_level == SA_RAID_1) { 1431 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1432 err_msg = "invalid RAID-1 map"; 1433 goto bad_raid_map; 1434 } 1435 } else if (device->raid_level == SA_RAID_TRIPLE) { 1436 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1437 err_msg = "invalid RAID-1(Triple) map"; 1438 goto bad_raid_map; 1439 } 1440 } else if ((device->raid_level == SA_RAID_5 || 1441 device->raid_level == SA_RAID_6) && 1442 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1443 /* RAID 50/60 */ 1444 r5or6_blocks_per_row = 1445 get_unaligned_le16(&raid_map->strip_size) * 1446 get_unaligned_le16(&raid_map->data_disks_per_row); 1447 if (r5or6_blocks_per_row == 0) { 1448 err_msg = "invalid RAID-5 or RAID-6 map"; 1449 goto bad_raid_map; 1450 } 1451 } 1452 1453 return 0; 1454 1455 bad_raid_map: 1456 dev_warn(&ctrl_info->pci_dev->dev, 1457 "logical device %08x%08x %s\n", 1458 *((u32 *)&device->scsi3addr), 1459 *((u32 *)&device->scsi3addr[4]), err_msg); 1460 1461 return -EINVAL; 1462 } 1463 1464 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1465 struct pqi_scsi_dev *device) 1466 { 1467 int rc; 1468 u32 raid_map_size; 1469 struct raid_map *raid_map; 1470 1471 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1472 if (!raid_map) 1473 return -ENOMEM; 1474 1475 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1476 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); 1477 if (rc) 1478 goto error; 1479 1480 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1481 1482 if (raid_map_size > sizeof(*raid_map)) { 1483 1484 kfree(raid_map); 1485 1486 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1487 if (!raid_map) 1488 return -ENOMEM; 1489 1490 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1491 device->scsi3addr, raid_map, raid_map_size, 0, NULL); 1492 if (rc) 1493 goto error; 1494 1495 if (get_unaligned_le32(&raid_map->structure_size) 1496 != raid_map_size) { 1497 dev_warn(&ctrl_info->pci_dev->dev, 1498 "requested %u bytes, received %u bytes\n", 1499 raid_map_size, 1500 get_unaligned_le32(&raid_map->structure_size)); 1501 rc = -EINVAL; 1502 goto error; 1503 } 1504 } 1505 1506 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1507 if (rc) 1508 goto error; 1509 1510 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); 1511 if (!device->raid_io_stats) { 1512 rc = -ENOMEM; 1513 goto error; 1514 } 1515 1516 device->raid_map = raid_map; 1517 1518 return 0; 1519 1520 error: 1521 kfree(raid_map); 1522 1523 return rc; 1524 } 1525 1526 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, 1527 struct pqi_scsi_dev *device) 1528 { 1529 if (!ctrl_info->lv_drive_type_mix_valid) { 1530 device->max_transfer_encrypted = ~0; 1531 return; 1532 } 1533 1534 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { 1535 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: 1536 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: 1537 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: 1538 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: 1539 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: 1540 case LV_DRIVE_TYPE_MIX_SAS_ONLY: 1541 case LV_DRIVE_TYPE_MIX_SATA_ONLY: 1542 device->max_transfer_encrypted = 1543 ctrl_info->max_transfer_encrypted_sas_sata; 1544 break; 1545 case LV_DRIVE_TYPE_MIX_NVME_ONLY: 1546 device->max_transfer_encrypted = 1547 ctrl_info->max_transfer_encrypted_nvme; 1548 break; 1549 case LV_DRIVE_TYPE_MIX_UNKNOWN: 1550 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: 1551 default: 1552 device->max_transfer_encrypted = 1553 min(ctrl_info->max_transfer_encrypted_sas_sata, 1554 ctrl_info->max_transfer_encrypted_nvme); 1555 break; 1556 } 1557 } 1558 1559 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1560 struct pqi_scsi_dev *device) 1561 { 1562 int rc; 1563 u8 *buffer; 1564 u8 bypass_status; 1565 1566 buffer = kmalloc(64, GFP_KERNEL); 1567 if (!buffer) 1568 return; 1569 1570 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1571 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1572 if (rc) 1573 goto out; 1574 1575 #define RAID_BYPASS_STATUS 4 1576 #define RAID_BYPASS_CONFIGURED 0x1 1577 #define RAID_BYPASS_ENABLED 0x2 1578 1579 bypass_status = buffer[RAID_BYPASS_STATUS]; 1580 device->raid_bypass_configured = 1581 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1582 if (device->raid_bypass_configured && 1583 (bypass_status & RAID_BYPASS_ENABLED) && 1584 pqi_get_raid_map(ctrl_info, device) == 0) { 1585 device->raid_bypass_enabled = true; 1586 if (get_unaligned_le16(&device->raid_map->flags) & 1587 RAID_MAP_ENCRYPTION_ENABLED) 1588 pqi_set_max_transfer_encrypted(ctrl_info, device); 1589 } 1590 1591 out: 1592 kfree(buffer); 1593 } 1594 1595 /* 1596 * Use vendor-specific VPD to determine online/offline status of a volume. 1597 */ 1598 1599 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1600 struct pqi_scsi_dev *device) 1601 { 1602 int rc; 1603 size_t page_length; 1604 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1605 bool volume_offline = true; 1606 u32 volume_flags; 1607 struct ciss_vpd_logical_volume_status *vpd; 1608 1609 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1610 if (!vpd) 1611 goto no_buffer; 1612 1613 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1614 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1615 if (rc) 1616 goto out; 1617 1618 if (vpd->page_code != CISS_VPD_LV_STATUS) 1619 goto out; 1620 1621 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1622 volume_status) + vpd->page_length; 1623 if (page_length < sizeof(*vpd)) 1624 goto out; 1625 1626 volume_status = vpd->volume_status; 1627 volume_flags = get_unaligned_be32(&vpd->flags); 1628 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1629 1630 out: 1631 kfree(vpd); 1632 no_buffer: 1633 device->volume_status = volume_status; 1634 device->volume_offline = volume_offline; 1635 } 1636 1637 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 1638 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 1639 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 1640 1641 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, 1642 struct pqi_scsi_dev *device, 1643 struct bmic_identify_physical_device *id_phys) 1644 { 1645 int rc; 1646 1647 memset(id_phys, 0, sizeof(*id_phys)); 1648 1649 rc = pqi_identify_physical_device(ctrl_info, device, 1650 id_phys, sizeof(*id_phys)); 1651 if (rc) { 1652 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1653 return rc; 1654 } 1655 1656 scsi_sanitize_inquiry_string(&id_phys->model[0], 8); 1657 scsi_sanitize_inquiry_string(&id_phys->model[8], 16); 1658 1659 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); 1660 memcpy(device->model, &id_phys->model[8], sizeof(device->model)); 1661 1662 device->box_index = id_phys->box_index; 1663 device->phys_box_on_bus = id_phys->phys_box_on_bus; 1664 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; 1665 device->queue_depth = 1666 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1667 device->active_path_index = id_phys->active_path_number; 1668 device->path_map = id_phys->redundant_path_present_map; 1669 memcpy(&device->box, 1670 &id_phys->alternate_paths_phys_box_on_port, 1671 sizeof(device->box)); 1672 memcpy(&device->phys_connector, 1673 &id_phys->alternate_paths_phys_connector, 1674 sizeof(device->phys_connector)); 1675 device->bay = id_phys->phys_bay_in_box; 1676 device->lun_count = id_phys->multi_lun_device_lun_count; 1677 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && 1678 id_phys->phy_count) 1679 device->phy_id = 1680 id_phys->phy_to_phy_map[device->active_path_index]; 1681 else 1682 device->phy_id = 0xFF; 1683 1684 device->ncq_prio_support = 1685 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & 1686 PQI_DEVICE_NCQ_PRIO_SUPPORTED); 1687 1688 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); 1689 1690 return 0; 1691 } 1692 1693 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, 1694 struct pqi_scsi_dev *device) 1695 { 1696 int rc; 1697 u8 *buffer; 1698 1699 buffer = kmalloc(64, GFP_KERNEL); 1700 if (!buffer) 1701 return -ENOMEM; 1702 1703 /* Send an inquiry to the device to see what it is. */ 1704 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1705 if (rc) 1706 goto out; 1707 1708 scsi_sanitize_inquiry_string(&buffer[8], 8); 1709 scsi_sanitize_inquiry_string(&buffer[16], 16); 1710 1711 device->devtype = buffer[0] & 0x1f; 1712 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1713 memcpy(device->model, &buffer[16], sizeof(device->model)); 1714 1715 if (device->devtype == TYPE_DISK) { 1716 if (device->is_external_raid_device) { 1717 device->raid_level = SA_RAID_UNKNOWN; 1718 device->volume_status = CISS_LV_OK; 1719 device->volume_offline = false; 1720 } else { 1721 pqi_get_raid_level(ctrl_info, device); 1722 pqi_get_raid_bypass_status(ctrl_info, device); 1723 pqi_get_volume_status(ctrl_info, device); 1724 } 1725 } 1726 1727 out: 1728 kfree(buffer); 1729 1730 return rc; 1731 } 1732 1733 /* 1734 * Prevent adding drive to OS for some corner cases such as a drive 1735 * undergoing a sanitize (erase) operation. Some OSes will continue to poll 1736 * the drive until the sanitize completes, which can take hours, 1737 * resulting in long bootup delays. Commands such as TUR, READ_CAP 1738 * are allowed, but READ/WRITE cause check condition. So the OS 1739 * cannot check/read the partition table. 1740 * Note: devices that have completed sanitize must be re-enabled 1741 * using the management utility. 1742 */ 1743 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) 1744 { 1745 return device->erase_in_progress; 1746 } 1747 1748 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, 1749 struct pqi_scsi_dev *device, 1750 struct bmic_identify_physical_device *id_phys) 1751 { 1752 int rc; 1753 1754 if (device->is_expander_smp_device) 1755 return 0; 1756 1757 if (pqi_is_logical_device(device)) 1758 rc = pqi_get_logical_device_info(ctrl_info, device); 1759 else 1760 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); 1761 1762 return rc; 1763 } 1764 1765 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1766 struct pqi_scsi_dev *device, 1767 struct bmic_identify_physical_device *id_phys) 1768 { 1769 int rc; 1770 1771 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); 1772 1773 if (rc == 0 && device->lun_count == 0) 1774 device->lun_count = 1; 1775 1776 return rc; 1777 } 1778 1779 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1780 struct pqi_scsi_dev *device) 1781 { 1782 char *status; 1783 static const char unknown_state_str[] = 1784 "Volume is in an unknown state (%u)"; 1785 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1786 1787 switch (device->volume_status) { 1788 case CISS_LV_OK: 1789 status = "Volume online"; 1790 break; 1791 case CISS_LV_FAILED: 1792 status = "Volume failed"; 1793 break; 1794 case CISS_LV_NOT_CONFIGURED: 1795 status = "Volume not configured"; 1796 break; 1797 case CISS_LV_DEGRADED: 1798 status = "Volume degraded"; 1799 break; 1800 case CISS_LV_READY_FOR_RECOVERY: 1801 status = "Volume ready for recovery operation"; 1802 break; 1803 case CISS_LV_UNDERGOING_RECOVERY: 1804 status = "Volume undergoing recovery"; 1805 break; 1806 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1807 status = "Wrong physical drive was replaced"; 1808 break; 1809 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1810 status = "A physical drive not properly connected"; 1811 break; 1812 case CISS_LV_HARDWARE_OVERHEATING: 1813 status = "Hardware is overheating"; 1814 break; 1815 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1816 status = "Hardware has overheated"; 1817 break; 1818 case CISS_LV_UNDERGOING_EXPANSION: 1819 status = "Volume undergoing expansion"; 1820 break; 1821 case CISS_LV_NOT_AVAILABLE: 1822 status = "Volume waiting for transforming volume"; 1823 break; 1824 case CISS_LV_QUEUED_FOR_EXPANSION: 1825 status = "Volume queued for expansion"; 1826 break; 1827 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1828 status = "Volume disabled due to SCSI ID conflict"; 1829 break; 1830 case CISS_LV_EJECTED: 1831 status = "Volume has been ejected"; 1832 break; 1833 case CISS_LV_UNDERGOING_ERASE: 1834 status = "Volume undergoing background erase"; 1835 break; 1836 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1837 status = "Volume ready for predictive spare rebuild"; 1838 break; 1839 case CISS_LV_UNDERGOING_RPI: 1840 status = "Volume undergoing rapid parity initialization"; 1841 break; 1842 case CISS_LV_PENDING_RPI: 1843 status = "Volume queued for rapid parity initialization"; 1844 break; 1845 case CISS_LV_ENCRYPTED_NO_KEY: 1846 status = "Encrypted volume inaccessible - key not present"; 1847 break; 1848 case CISS_LV_UNDERGOING_ENCRYPTION: 1849 status = "Volume undergoing encryption process"; 1850 break; 1851 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1852 status = "Volume undergoing encryption re-keying process"; 1853 break; 1854 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1855 status = "Volume encrypted but encryption is disabled"; 1856 break; 1857 case CISS_LV_PENDING_ENCRYPTION: 1858 status = "Volume pending migration to encrypted state"; 1859 break; 1860 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1861 status = "Volume pending encryption rekeying"; 1862 break; 1863 case CISS_LV_NOT_SUPPORTED: 1864 status = "Volume not supported on this controller"; 1865 break; 1866 case CISS_LV_STATUS_UNAVAILABLE: 1867 status = "Volume status not available"; 1868 break; 1869 default: 1870 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1871 unknown_state_str, device->volume_status); 1872 status = unknown_state_buffer; 1873 break; 1874 } 1875 1876 dev_info(&ctrl_info->pci_dev->dev, 1877 "scsi %d:%d:%d:%d %s\n", 1878 ctrl_info->scsi_host->host_no, 1879 device->bus, device->target, device->lun, status); 1880 } 1881 1882 static void pqi_rescan_worker(struct work_struct *work) 1883 { 1884 struct pqi_ctrl_info *ctrl_info; 1885 1886 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1887 rescan_work); 1888 1889 pqi_scan_scsi_devices(ctrl_info); 1890 } 1891 1892 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1893 struct pqi_scsi_dev *device) 1894 { 1895 int rc; 1896 1897 if (pqi_is_logical_device(device)) 1898 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1899 device->target, device->lun); 1900 else 1901 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1902 1903 return rc; 1904 } 1905 1906 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) 1907 1908 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) 1909 { 1910 int rc; 1911 int lun; 1912 1913 for (lun = 0; lun < device->lun_count; lun++) { 1914 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, 1915 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); 1916 if (rc) 1917 dev_err(&ctrl_info->pci_dev->dev, 1918 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", 1919 ctrl_info->scsi_host->host_no, device->bus, 1920 device->target, lun, 1921 atomic_read(&device->scsi_cmds_outstanding[lun])); 1922 } 1923 1924 if (pqi_is_logical_device(device)) 1925 scsi_remove_device(device->sdev); 1926 else 1927 pqi_remove_sas_device(device); 1928 1929 pqi_device_remove_start(device); 1930 } 1931 1932 /* Assumes the SCSI device list lock is held. */ 1933 1934 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1935 int bus, int target, int lun) 1936 { 1937 struct pqi_scsi_dev *device; 1938 1939 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 1940 if (device->bus == bus && device->target == target && device->lun == lun) 1941 return device; 1942 1943 return NULL; 1944 } 1945 1946 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) 1947 { 1948 if (dev1->is_physical_device != dev2->is_physical_device) 1949 return false; 1950 1951 if (dev1->is_physical_device) 1952 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; 1953 1954 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; 1955 } 1956 1957 enum pqi_find_result { 1958 DEVICE_NOT_FOUND, 1959 DEVICE_CHANGED, 1960 DEVICE_SAME, 1961 }; 1962 1963 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1964 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) 1965 { 1966 struct pqi_scsi_dev *device; 1967 1968 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 1969 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { 1970 *matching_device = device; 1971 if (pqi_device_equal(device_to_find, device)) { 1972 if (device_to_find->volume_offline) 1973 return DEVICE_CHANGED; 1974 return DEVICE_SAME; 1975 } 1976 return DEVICE_CHANGED; 1977 } 1978 } 1979 1980 return DEVICE_NOT_FOUND; 1981 } 1982 1983 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1984 { 1985 if (device->is_expander_smp_device) 1986 return "Enclosure SMP "; 1987 1988 return scsi_device_type(device->devtype); 1989 } 1990 1991 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1992 1993 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1994 char *action, struct pqi_scsi_dev *device) 1995 { 1996 ssize_t count; 1997 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1998 1999 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 2000 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 2001 2002 if (device->target_lun_valid) 2003 count += scnprintf(buffer + count, 2004 PQI_DEV_INFO_BUFFER_LENGTH - count, 2005 "%d:%d", 2006 device->target, 2007 device->lun); 2008 else 2009 count += scnprintf(buffer + count, 2010 PQI_DEV_INFO_BUFFER_LENGTH - count, 2011 "-:-"); 2012 2013 if (pqi_is_logical_device(device)) 2014 count += scnprintf(buffer + count, 2015 PQI_DEV_INFO_BUFFER_LENGTH - count, 2016 " %08x%08x", 2017 *((u32 *)&device->scsi3addr), 2018 *((u32 *)&device->scsi3addr[4])); 2019 else 2020 count += scnprintf(buffer + count, 2021 PQI_DEV_INFO_BUFFER_LENGTH - count, 2022 " %016llx%016llx", 2023 get_unaligned_be64(&device->wwid[0]), 2024 get_unaligned_be64(&device->wwid[8])); 2025 2026 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 2027 " %s %.8s %.16s ", 2028 pqi_device_type(device), 2029 device->vendor, 2030 device->model); 2031 2032 if (pqi_is_logical_device(device)) { 2033 if (device->devtype == TYPE_DISK) 2034 count += scnprintf(buffer + count, 2035 PQI_DEV_INFO_BUFFER_LENGTH - count, 2036 "SSDSmartPathCap%c En%c %-12s", 2037 device->raid_bypass_configured ? '+' : '-', 2038 device->raid_bypass_enabled ? '+' : '-', 2039 pqi_raid_level_to_string(device->raid_level)); 2040 } else { 2041 count += scnprintf(buffer + count, 2042 PQI_DEV_INFO_BUFFER_LENGTH - count, 2043 "AIO%c", device->aio_enabled ? '+' : '-'); 2044 if (device->devtype == TYPE_DISK || 2045 device->devtype == TYPE_ZBC) 2046 count += scnprintf(buffer + count, 2047 PQI_DEV_INFO_BUFFER_LENGTH - count, 2048 " qd=%-6d", device->queue_depth); 2049 } 2050 2051 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 2052 } 2053 2054 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2) 2055 { 2056 u32 raid_map1_size; 2057 u32 raid_map2_size; 2058 2059 if (raid_map1 == NULL || raid_map2 == NULL) 2060 return raid_map1 == raid_map2; 2061 2062 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size); 2063 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size); 2064 2065 if (raid_map1_size != raid_map2_size) 2066 return false; 2067 2068 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0; 2069 } 2070 2071 /* Assumes the SCSI device list lock is held. */ 2072 2073 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, 2074 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device) 2075 { 2076 existing_device->device_type = new_device->device_type; 2077 existing_device->bus = new_device->bus; 2078 if (new_device->target_lun_valid) { 2079 existing_device->target = new_device->target; 2080 existing_device->lun = new_device->lun; 2081 existing_device->target_lun_valid = true; 2082 } 2083 2084 /* By definition, the scsi3addr and wwid fields are already the same. */ 2085 2086 existing_device->is_physical_device = new_device->is_physical_device; 2087 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); 2088 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); 2089 existing_device->sas_address = new_device->sas_address; 2090 existing_device->queue_depth = new_device->queue_depth; 2091 existing_device->device_offline = false; 2092 existing_device->lun_count = new_device->lun_count; 2093 2094 if (pqi_is_logical_device(existing_device)) { 2095 existing_device->is_external_raid_device = new_device->is_external_raid_device; 2096 2097 if (existing_device->devtype == TYPE_DISK) { 2098 existing_device->raid_level = new_device->raid_level; 2099 existing_device->volume_status = new_device->volume_status; 2100 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); 2101 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { 2102 kfree(existing_device->raid_map); 2103 existing_device->raid_map = new_device->raid_map; 2104 /* To prevent this from being freed later. */ 2105 new_device->raid_map = NULL; 2106 } 2107 if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { 2108 existing_device->raid_io_stats = new_device->raid_io_stats; 2109 new_device->raid_io_stats = NULL; 2110 } 2111 existing_device->raid_bypass_configured = new_device->raid_bypass_configured; 2112 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; 2113 } 2114 } else { 2115 existing_device->aio_enabled = new_device->aio_enabled; 2116 existing_device->aio_handle = new_device->aio_handle; 2117 existing_device->is_expander_smp_device = new_device->is_expander_smp_device; 2118 existing_device->active_path_index = new_device->active_path_index; 2119 existing_device->phy_id = new_device->phy_id; 2120 existing_device->path_map = new_device->path_map; 2121 existing_device->bay = new_device->bay; 2122 existing_device->box_index = new_device->box_index; 2123 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; 2124 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; 2125 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); 2126 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); 2127 } 2128 } 2129 2130 static inline void pqi_free_device(struct pqi_scsi_dev *device) 2131 { 2132 if (device) { 2133 free_percpu(device->raid_io_stats); 2134 kfree(device->raid_map); 2135 kfree(device); 2136 } 2137 } 2138 2139 /* 2140 * Called when exposing a new device to the OS fails in order to re-adjust 2141 * our internal SCSI device list to match the SCSI ML's view. 2142 */ 2143 2144 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 2145 struct pqi_scsi_dev *device) 2146 { 2147 unsigned long flags; 2148 2149 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2150 list_del(&device->scsi_device_list_entry); 2151 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2152 2153 /* Allow the device structure to be freed later. */ 2154 device->keep_device = false; 2155 } 2156 2157 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 2158 { 2159 if (device->is_expander_smp_device) 2160 return device->sas_port != NULL; 2161 2162 return device->sdev != NULL; 2163 } 2164 2165 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device) 2166 { 2167 unsigned int lun; 2168 struct pqi_tmf_work *tmf_work; 2169 2170 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) 2171 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); 2172 } 2173 2174 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device) 2175 { 2176 if (pqi_device_in_remove(device)) 2177 return false; 2178 2179 if (device->sdev == NULL) 2180 return false; 2181 2182 if (!scsi_device_online(device->sdev)) 2183 return false; 2184 2185 return device->rescan; 2186 } 2187 2188 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 2189 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 2190 { 2191 int rc; 2192 unsigned int i; 2193 unsigned long flags; 2194 enum pqi_find_result find_result; 2195 struct pqi_scsi_dev *device; 2196 struct pqi_scsi_dev *next; 2197 struct pqi_scsi_dev *matching_device; 2198 LIST_HEAD(add_list); 2199 LIST_HEAD(delete_list); 2200 2201 /* 2202 * The idea here is to do as little work as possible while holding the 2203 * spinlock. That's why we go to great pains to defer anything other 2204 * than updating the internal device list until after we release the 2205 * spinlock. 2206 */ 2207 2208 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2209 2210 /* Assume that all devices in the existing list have gone away. */ 2211 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 2212 device->device_gone = true; 2213 2214 for (i = 0; i < num_new_devices; i++) { 2215 device = new_device_list[i]; 2216 2217 find_result = pqi_scsi_find_entry(ctrl_info, device, 2218 &matching_device); 2219 2220 switch (find_result) { 2221 case DEVICE_SAME: 2222 /* 2223 * The newly found device is already in the existing 2224 * device list. 2225 */ 2226 device->new_device = false; 2227 matching_device->device_gone = false; 2228 pqi_scsi_update_device(ctrl_info, matching_device, device); 2229 break; 2230 case DEVICE_NOT_FOUND: 2231 /* 2232 * The newly found device is NOT in the existing device 2233 * list. 2234 */ 2235 device->new_device = true; 2236 break; 2237 case DEVICE_CHANGED: 2238 /* 2239 * The original device has gone away and we need to add 2240 * the new device. 2241 */ 2242 device->new_device = true; 2243 break; 2244 } 2245 } 2246 2247 /* Process all devices that have gone away. */ 2248 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 2249 scsi_device_list_entry) { 2250 if (device->device_gone) { 2251 list_del(&device->scsi_device_list_entry); 2252 list_add_tail(&device->delete_list_entry, &delete_list); 2253 } 2254 } 2255 2256 /* Process all new devices. */ 2257 for (i = 0; i < num_new_devices; i++) { 2258 device = new_device_list[i]; 2259 if (!device->new_device) 2260 continue; 2261 if (device->volume_offline) 2262 continue; 2263 list_add_tail(&device->scsi_device_list_entry, 2264 &ctrl_info->scsi_device_list); 2265 list_add_tail(&device->add_list_entry, &add_list); 2266 /* To prevent this device structure from being freed later. */ 2267 device->keep_device = true; 2268 pqi_init_device_tmf_work(device); 2269 } 2270 2271 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2272 2273 /* 2274 * If OFA is in progress and there are devices that need to be deleted, 2275 * allow any pending reset operations to continue and unblock any SCSI 2276 * requests before removal. 2277 */ 2278 if (pqi_ofa_in_progress(ctrl_info)) { 2279 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) 2280 if (pqi_is_device_added(device)) 2281 pqi_device_remove_start(device); 2282 pqi_ctrl_unblock_device_reset(ctrl_info); 2283 pqi_scsi_unblock_requests(ctrl_info); 2284 } 2285 2286 /* Remove all devices that have gone away. */ 2287 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { 2288 if (device->volume_offline) { 2289 pqi_dev_info(ctrl_info, "offline", device); 2290 pqi_show_volume_status(ctrl_info, device); 2291 } else { 2292 pqi_dev_info(ctrl_info, "removed", device); 2293 } 2294 if (pqi_is_device_added(device)) 2295 pqi_remove_device(ctrl_info, device); 2296 list_del(&device->delete_list_entry); 2297 pqi_free_device(device); 2298 } 2299 2300 /* 2301 * Notify the SML of any existing device changes such as; 2302 * queue depth, device size. 2303 */ 2304 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 2305 /* 2306 * Check for queue depth change. 2307 */ 2308 if (device->sdev && device->queue_depth != device->advertised_queue_depth) { 2309 device->advertised_queue_depth = device->queue_depth; 2310 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); 2311 } 2312 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2313 /* 2314 * Check for changes in the device, such as size. 2315 */ 2316 if (pqi_volume_rescan_needed(device)) { 2317 device->rescan = false; 2318 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2319 scsi_rescan_device(device->sdev); 2320 } else { 2321 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2322 } 2323 } 2324 2325 /* Expose any new devices. */ 2326 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 2327 if (!pqi_is_device_added(device)) { 2328 rc = pqi_add_device(ctrl_info, device); 2329 if (rc == 0) { 2330 pqi_dev_info(ctrl_info, "added", device); 2331 } else { 2332 dev_warn(&ctrl_info->pci_dev->dev, 2333 "scsi %d:%d:%d:%d addition failed, device not added\n", 2334 ctrl_info->scsi_host->host_no, 2335 device->bus, device->target, 2336 device->lun); 2337 pqi_fixup_botched_add(ctrl_info, device); 2338 } 2339 } 2340 } 2341 2342 } 2343 2344 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) 2345 { 2346 /* 2347 * Only support the HBA controller itself as a RAID 2348 * controller. If it's a RAID controller other than 2349 * the HBA itself (an external RAID controller, for 2350 * example), we don't support it. 2351 */ 2352 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && 2353 !pqi_is_hba_lunid(device->scsi3addr)) 2354 return false; 2355 2356 return true; 2357 } 2358 2359 static inline bool pqi_skip_device(u8 *scsi3addr) 2360 { 2361 /* Ignore all masked devices. */ 2362 if (MASKED_DEVICE(scsi3addr)) 2363 return true; 2364 2365 return false; 2366 } 2367 2368 static inline void pqi_mask_device(u8 *scsi3addr) 2369 { 2370 scsi3addr[3] |= 0xc0; 2371 } 2372 2373 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 2374 { 2375 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); 2376 } 2377 2378 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2379 { 2380 int i; 2381 int rc; 2382 LIST_HEAD(new_device_list_head); 2383 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL; 2384 struct report_log_lun_list *logdev_list = NULL; 2385 struct report_phys_lun_16byte_wwid *phys_lun; 2386 struct report_log_lun *log_lun; 2387 struct bmic_identify_physical_device *id_phys = NULL; 2388 u32 num_physicals; 2389 u32 num_logicals; 2390 struct pqi_scsi_dev **new_device_list = NULL; 2391 struct pqi_scsi_dev *device; 2392 struct pqi_scsi_dev *next; 2393 unsigned int num_new_devices; 2394 unsigned int num_valid_devices; 2395 bool is_physical_device; 2396 u8 *scsi3addr; 2397 unsigned int physical_index; 2398 unsigned int logical_index; 2399 static char *out_of_memory_msg = 2400 "failed to allocate memory, device discovery stopped"; 2401 2402 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 2403 if (rc) 2404 goto out; 2405 2406 if (physdev_list) 2407 num_physicals = 2408 get_unaligned_be32(&physdev_list->header.list_length) 2409 / sizeof(physdev_list->lun_entries[0]); 2410 else 2411 num_physicals = 0; 2412 2413 if (logdev_list) 2414 num_logicals = 2415 get_unaligned_be32(&logdev_list->header.list_length) 2416 / sizeof(logdev_list->lun_entries[0]); 2417 else 2418 num_logicals = 0; 2419 2420 if (num_physicals) { 2421 /* 2422 * We need this buffer for calls to pqi_get_physical_disk_info() 2423 * below. We allocate it here instead of inside 2424 * pqi_get_physical_disk_info() because it's a fairly large 2425 * buffer. 2426 */ 2427 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2428 if (!id_phys) { 2429 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2430 out_of_memory_msg); 2431 rc = -ENOMEM; 2432 goto out; 2433 } 2434 2435 if (pqi_hide_vsep) { 2436 for (i = num_physicals - 1; i >= 0; i--) { 2437 phys_lun = &physdev_list->lun_entries[i]; 2438 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { 2439 pqi_mask_device(phys_lun->lunid); 2440 break; 2441 } 2442 } 2443 } 2444 } 2445 2446 if (num_logicals && 2447 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) 2448 ctrl_info->lv_drive_type_mix_valid = true; 2449 2450 num_new_devices = num_physicals + num_logicals; 2451 2452 new_device_list = kmalloc_array(num_new_devices, 2453 sizeof(*new_device_list), 2454 GFP_KERNEL); 2455 if (!new_device_list) { 2456 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2457 rc = -ENOMEM; 2458 goto out; 2459 } 2460 2461 for (i = 0; i < num_new_devices; i++) { 2462 device = kzalloc(sizeof(*device), GFP_KERNEL); 2463 if (!device) { 2464 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2465 out_of_memory_msg); 2466 rc = -ENOMEM; 2467 goto out; 2468 } 2469 list_add_tail(&device->new_device_list_entry, 2470 &new_device_list_head); 2471 } 2472 2473 device = NULL; 2474 num_valid_devices = 0; 2475 physical_index = 0; 2476 logical_index = 0; 2477 2478 for (i = 0; i < num_new_devices; i++) { 2479 2480 if ((!pqi_expose_ld_first && i < num_physicals) || 2481 (pqi_expose_ld_first && i >= num_logicals)) { 2482 is_physical_device = true; 2483 phys_lun = &physdev_list->lun_entries[physical_index++]; 2484 log_lun = NULL; 2485 scsi3addr = phys_lun->lunid; 2486 } else { 2487 is_physical_device = false; 2488 phys_lun = NULL; 2489 log_lun = &logdev_list->lun_entries[logical_index++]; 2490 scsi3addr = log_lun->lunid; 2491 } 2492 2493 if (is_physical_device && pqi_skip_device(scsi3addr)) 2494 continue; 2495 2496 if (device) 2497 device = list_next_entry(device, new_device_list_entry); 2498 else 2499 device = list_first_entry(&new_device_list_head, 2500 struct pqi_scsi_dev, new_device_list_entry); 2501 2502 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2503 device->is_physical_device = is_physical_device; 2504 if (is_physical_device) { 2505 device->device_type = phys_lun->device_type; 2506 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) 2507 device->is_expander_smp_device = true; 2508 } else { 2509 device->is_external_raid_device = 2510 pqi_is_external_raid_addr(scsi3addr); 2511 } 2512 2513 if (!pqi_is_supported_device(device)) 2514 continue; 2515 2516 /* Gather information about the device. */ 2517 rc = pqi_get_device_info(ctrl_info, device, id_phys); 2518 if (rc == -ENOMEM) { 2519 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2520 out_of_memory_msg); 2521 goto out; 2522 } 2523 if (rc) { 2524 if (device->is_physical_device) 2525 dev_warn(&ctrl_info->pci_dev->dev, 2526 "obtaining device info failed, skipping physical device %016llx%016llx\n", 2527 get_unaligned_be64(&phys_lun->wwid[0]), 2528 get_unaligned_be64(&phys_lun->wwid[8])); 2529 else 2530 dev_warn(&ctrl_info->pci_dev->dev, 2531 "obtaining device info failed, skipping logical device %08x%08x\n", 2532 *((u32 *)&device->scsi3addr), 2533 *((u32 *)&device->scsi3addr[4])); 2534 rc = 0; 2535 continue; 2536 } 2537 2538 /* Do not present disks that the OS cannot fully probe. */ 2539 if (pqi_keep_device_offline(device)) 2540 continue; 2541 2542 pqi_assign_bus_target_lun(device); 2543 2544 if (device->is_physical_device) { 2545 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); 2546 if ((phys_lun->device_flags & 2547 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2548 phys_lun->aio_handle) { 2549 device->aio_enabled = true; 2550 device->aio_handle = 2551 phys_lun->aio_handle; 2552 } 2553 } else { 2554 memcpy(device->volume_id, log_lun->volume_id, 2555 sizeof(device->volume_id)); 2556 } 2557 2558 device->sas_address = get_unaligned_be64(&device->wwid[0]); 2559 2560 new_device_list[num_valid_devices++] = device; 2561 } 2562 2563 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2564 2565 out: 2566 list_for_each_entry_safe(device, next, &new_device_list_head, 2567 new_device_list_entry) { 2568 if (device->keep_device) 2569 continue; 2570 list_del(&device->new_device_list_entry); 2571 pqi_free_device(device); 2572 } 2573 2574 kfree(new_device_list); 2575 kfree(physdev_list); 2576 kfree(logdev_list); 2577 kfree(id_phys); 2578 2579 return rc; 2580 } 2581 2582 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2583 { 2584 int rc; 2585 int mutex_acquired; 2586 2587 if (pqi_ctrl_offline(ctrl_info)) 2588 return -ENXIO; 2589 2590 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 2591 2592 if (!mutex_acquired) { 2593 if (pqi_ctrl_scan_blocked(ctrl_info)) 2594 return -EBUSY; 2595 pqi_schedule_rescan_worker_delayed(ctrl_info); 2596 return -EINPROGRESS; 2597 } 2598 2599 rc = pqi_update_scsi_devices(ctrl_info); 2600 if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) 2601 pqi_schedule_rescan_worker_delayed(ctrl_info); 2602 2603 mutex_unlock(&ctrl_info->scan_mutex); 2604 2605 return rc; 2606 } 2607 2608 static void pqi_scan_start(struct Scsi_Host *shost) 2609 { 2610 struct pqi_ctrl_info *ctrl_info; 2611 2612 ctrl_info = shost_to_hba(shost); 2613 2614 pqi_scan_scsi_devices(ctrl_info); 2615 } 2616 2617 /* Returns TRUE if scan is finished. */ 2618 2619 static int pqi_scan_finished(struct Scsi_Host *shost, 2620 unsigned long elapsed_time) 2621 { 2622 struct pqi_ctrl_info *ctrl_info; 2623 2624 ctrl_info = shost_priv(shost); 2625 2626 return !mutex_is_locked(&ctrl_info->scan_mutex); 2627 } 2628 2629 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, 2630 struct raid_map *raid_map, u64 first_block) 2631 { 2632 u32 volume_blk_size; 2633 2634 /* 2635 * Set the encryption tweak values based on logical block address. 2636 * If the block size is 512, the tweak value is equal to the LBA. 2637 * For other block sizes, tweak value is (LBA * block size) / 512. 2638 */ 2639 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2640 if (volume_blk_size != 512) 2641 first_block = (first_block * volume_blk_size) / 512; 2642 2643 encryption_info->data_encryption_key_index = 2644 get_unaligned_le16(&raid_map->data_encryption_key_index); 2645 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2646 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2647 } 2648 2649 /* 2650 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2651 */ 2652 2653 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, 2654 struct pqi_scsi_dev_raid_map_data *rmd) 2655 { 2656 bool is_supported = true; 2657 2658 switch (rmd->raid_level) { 2659 case SA_RAID_0: 2660 break; 2661 case SA_RAID_1: 2662 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2663 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) 2664 is_supported = false; 2665 break; 2666 case SA_RAID_TRIPLE: 2667 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2668 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) 2669 is_supported = false; 2670 break; 2671 case SA_RAID_5: 2672 if (rmd->is_write && (!ctrl_info->enable_r5_writes || 2673 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2674 is_supported = false; 2675 break; 2676 case SA_RAID_6: 2677 if (rmd->is_write && (!ctrl_info->enable_r6_writes || 2678 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2679 is_supported = false; 2680 break; 2681 default: 2682 is_supported = false; 2683 break; 2684 } 2685 2686 return is_supported; 2687 } 2688 2689 #define PQI_RAID_BYPASS_INELIGIBLE 1 2690 2691 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, 2692 struct pqi_scsi_dev_raid_map_data *rmd) 2693 { 2694 /* Check for valid opcode, get LBA and block count. */ 2695 switch (scmd->cmnd[0]) { 2696 case WRITE_6: 2697 rmd->is_write = true; 2698 fallthrough; 2699 case READ_6: 2700 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2701 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2702 rmd->block_cnt = (u32)scmd->cmnd[4]; 2703 if (rmd->block_cnt == 0) 2704 rmd->block_cnt = 256; 2705 break; 2706 case WRITE_10: 2707 rmd->is_write = true; 2708 fallthrough; 2709 case READ_10: 2710 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2711 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2712 break; 2713 case WRITE_12: 2714 rmd->is_write = true; 2715 fallthrough; 2716 case READ_12: 2717 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2718 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2719 break; 2720 case WRITE_16: 2721 rmd->is_write = true; 2722 fallthrough; 2723 case READ_16: 2724 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); 2725 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2726 break; 2727 default: 2728 /* Process via normal I/O path. */ 2729 return PQI_RAID_BYPASS_INELIGIBLE; 2730 } 2731 2732 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); 2733 2734 return 0; 2735 } 2736 2737 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, 2738 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) 2739 { 2740 #if BITS_PER_LONG == 32 2741 u64 tmpdiv; 2742 #endif 2743 2744 rmd->last_block = rmd->first_block + rmd->block_cnt - 1; 2745 2746 /* Check for invalid block or wraparound. */ 2747 if (rmd->last_block >= 2748 get_unaligned_le64(&raid_map->volume_blk_cnt) || 2749 rmd->last_block < rmd->first_block) 2750 return PQI_RAID_BYPASS_INELIGIBLE; 2751 2752 rmd->data_disks_per_row = 2753 get_unaligned_le16(&raid_map->data_disks_per_row); 2754 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); 2755 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2756 2757 /* Calculate stripe information for the request. */ 2758 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; 2759 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2760 return PQI_RAID_BYPASS_INELIGIBLE; 2761 #if BITS_PER_LONG == 32 2762 tmpdiv = rmd->first_block; 2763 do_div(tmpdiv, rmd->blocks_per_row); 2764 rmd->first_row = tmpdiv; 2765 tmpdiv = rmd->last_block; 2766 do_div(tmpdiv, rmd->blocks_per_row); 2767 rmd->last_row = tmpdiv; 2768 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); 2769 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); 2770 tmpdiv = rmd->first_row_offset; 2771 do_div(tmpdiv, rmd->strip_size); 2772 rmd->first_column = tmpdiv; 2773 tmpdiv = rmd->last_row_offset; 2774 do_div(tmpdiv, rmd->strip_size); 2775 rmd->last_column = tmpdiv; 2776 #else 2777 rmd->first_row = rmd->first_block / rmd->blocks_per_row; 2778 rmd->last_row = rmd->last_block / rmd->blocks_per_row; 2779 rmd->first_row_offset = (u32)(rmd->first_block - 2780 (rmd->first_row * rmd->blocks_per_row)); 2781 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * 2782 rmd->blocks_per_row)); 2783 rmd->first_column = rmd->first_row_offset / rmd->strip_size; 2784 rmd->last_column = rmd->last_row_offset / rmd->strip_size; 2785 #endif 2786 2787 /* If this isn't a single row/column then give to the controller. */ 2788 if (rmd->first_row != rmd->last_row || 2789 rmd->first_column != rmd->last_column) 2790 return PQI_RAID_BYPASS_INELIGIBLE; 2791 2792 /* Proceeding with driver mapping. */ 2793 rmd->total_disks_per_row = rmd->data_disks_per_row + 2794 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2795 rmd->map_row = ((u32)(rmd->first_row >> 2796 raid_map->parity_rotation_shift)) % 2797 get_unaligned_le16(&raid_map->row_cnt); 2798 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + 2799 rmd->first_column; 2800 2801 return 0; 2802 } 2803 2804 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, 2805 struct raid_map *raid_map) 2806 { 2807 #if BITS_PER_LONG == 32 2808 u64 tmpdiv; 2809 #endif 2810 2811 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2812 return PQI_RAID_BYPASS_INELIGIBLE; 2813 2814 /* RAID 50/60 */ 2815 /* Verify first and last block are in same RAID group. */ 2816 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; 2817 #if BITS_PER_LONG == 32 2818 tmpdiv = rmd->first_block; 2819 rmd->first_group = do_div(tmpdiv, rmd->stripesize); 2820 tmpdiv = rmd->first_group; 2821 do_div(tmpdiv, rmd->blocks_per_row); 2822 rmd->first_group = tmpdiv; 2823 tmpdiv = rmd->last_block; 2824 rmd->last_group = do_div(tmpdiv, rmd->stripesize); 2825 tmpdiv = rmd->last_group; 2826 do_div(tmpdiv, rmd->blocks_per_row); 2827 rmd->last_group = tmpdiv; 2828 #else 2829 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; 2830 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; 2831 #endif 2832 if (rmd->first_group != rmd->last_group) 2833 return PQI_RAID_BYPASS_INELIGIBLE; 2834 2835 /* Verify request is in a single row of RAID 5/6. */ 2836 #if BITS_PER_LONG == 32 2837 tmpdiv = rmd->first_block; 2838 do_div(tmpdiv, rmd->stripesize); 2839 rmd->first_row = tmpdiv; 2840 rmd->r5or6_first_row = tmpdiv; 2841 tmpdiv = rmd->last_block; 2842 do_div(tmpdiv, rmd->stripesize); 2843 rmd->r5or6_last_row = tmpdiv; 2844 #else 2845 rmd->first_row = rmd->r5or6_first_row = 2846 rmd->first_block / rmd->stripesize; 2847 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; 2848 #endif 2849 if (rmd->r5or6_first_row != rmd->r5or6_last_row) 2850 return PQI_RAID_BYPASS_INELIGIBLE; 2851 2852 /* Verify request is in a single column. */ 2853 #if BITS_PER_LONG == 32 2854 tmpdiv = rmd->first_block; 2855 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); 2856 tmpdiv = rmd->first_row_offset; 2857 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); 2858 rmd->r5or6_first_row_offset = rmd->first_row_offset; 2859 tmpdiv = rmd->last_block; 2860 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); 2861 tmpdiv = rmd->r5or6_last_row_offset; 2862 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); 2863 tmpdiv = rmd->r5or6_first_row_offset; 2864 do_div(tmpdiv, rmd->strip_size); 2865 rmd->first_column = rmd->r5or6_first_column = tmpdiv; 2866 tmpdiv = rmd->r5or6_last_row_offset; 2867 do_div(tmpdiv, rmd->strip_size); 2868 rmd->r5or6_last_column = tmpdiv; 2869 #else 2870 rmd->first_row_offset = rmd->r5or6_first_row_offset = 2871 (u32)((rmd->first_block % rmd->stripesize) % 2872 rmd->blocks_per_row); 2873 2874 rmd->r5or6_last_row_offset = 2875 (u32)((rmd->last_block % rmd->stripesize) % 2876 rmd->blocks_per_row); 2877 2878 rmd->first_column = 2879 rmd->r5or6_first_row_offset / rmd->strip_size; 2880 rmd->r5or6_first_column = rmd->first_column; 2881 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; 2882 #endif 2883 if (rmd->r5or6_first_column != rmd->r5or6_last_column) 2884 return PQI_RAID_BYPASS_INELIGIBLE; 2885 2886 /* Request is eligible. */ 2887 rmd->map_row = 2888 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % 2889 get_unaligned_le16(&raid_map->row_cnt); 2890 2891 rmd->map_index = (rmd->first_group * 2892 (get_unaligned_le16(&raid_map->row_cnt) * 2893 rmd->total_disks_per_row)) + 2894 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; 2895 2896 if (rmd->is_write) { 2897 u32 index; 2898 2899 /* 2900 * p_parity_it_nexus and q_parity_it_nexus are pointers to the 2901 * parity entries inside the device's raid_map. 2902 * 2903 * A device's RAID map is bounded by: number of RAID disks squared. 2904 * 2905 * The devices RAID map size is checked during device 2906 * initialization. 2907 */ 2908 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); 2909 index *= rmd->total_disks_per_row; 2910 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); 2911 2912 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; 2913 if (rmd->raid_level == SA_RAID_6) { 2914 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; 2915 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; 2916 } 2917 #if BITS_PER_LONG == 32 2918 tmpdiv = rmd->first_block; 2919 do_div(tmpdiv, rmd->blocks_per_row); 2920 rmd->row = tmpdiv; 2921 #else 2922 rmd->row = rmd->first_block / rmd->blocks_per_row; 2923 #endif 2924 } 2925 2926 return 0; 2927 } 2928 2929 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) 2930 { 2931 /* Build the new CDB for the physical disk I/O. */ 2932 if (rmd->disk_block > 0xffffffff) { 2933 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; 2934 rmd->cdb[1] = 0; 2935 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); 2936 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); 2937 rmd->cdb[14] = 0; 2938 rmd->cdb[15] = 0; 2939 rmd->cdb_length = 16; 2940 } else { 2941 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; 2942 rmd->cdb[1] = 0; 2943 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); 2944 rmd->cdb[6] = 0; 2945 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); 2946 rmd->cdb[9] = 0; 2947 rmd->cdb_length = 10; 2948 } 2949 } 2950 2951 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, 2952 struct pqi_scsi_dev_raid_map_data *rmd) 2953 { 2954 u32 index; 2955 u32 group; 2956 2957 group = rmd->map_index / rmd->data_disks_per_row; 2958 2959 index = rmd->map_index - (group * rmd->data_disks_per_row); 2960 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; 2961 index += rmd->data_disks_per_row; 2962 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; 2963 if (rmd->layout_map_count > 2) { 2964 index += rmd->data_disks_per_row; 2965 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; 2966 } 2967 2968 rmd->num_it_nexus_entries = rmd->layout_map_count; 2969 } 2970 2971 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2972 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2973 struct pqi_queue_group *queue_group) 2974 { 2975 int rc; 2976 struct raid_map *raid_map; 2977 u32 group; 2978 u32 next_bypass_group; 2979 struct pqi_encryption_info *encryption_info_ptr; 2980 struct pqi_encryption_info encryption_info; 2981 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 2982 2983 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 2984 if (rc) 2985 return PQI_RAID_BYPASS_INELIGIBLE; 2986 2987 rmd.raid_level = device->raid_level; 2988 2989 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) 2990 return PQI_RAID_BYPASS_INELIGIBLE; 2991 2992 if (unlikely(rmd.block_cnt == 0)) 2993 return PQI_RAID_BYPASS_INELIGIBLE; 2994 2995 raid_map = device->raid_map; 2996 2997 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); 2998 if (rc) 2999 return PQI_RAID_BYPASS_INELIGIBLE; 3000 3001 if (device->raid_level == SA_RAID_1 || 3002 device->raid_level == SA_RAID_TRIPLE) { 3003 if (rmd.is_write) { 3004 pqi_calc_aio_r1_nexus(raid_map, &rmd); 3005 } else { 3006 group = device->next_bypass_group[rmd.map_index]; 3007 next_bypass_group = group + 1; 3008 if (next_bypass_group >= rmd.layout_map_count) 3009 next_bypass_group = 0; 3010 device->next_bypass_group[rmd.map_index] = next_bypass_group; 3011 rmd.map_index += group * rmd.data_disks_per_row; 3012 } 3013 } else if ((device->raid_level == SA_RAID_5 || 3014 device->raid_level == SA_RAID_6) && 3015 (rmd.layout_map_count > 1 || rmd.is_write)) { 3016 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); 3017 if (rc) 3018 return PQI_RAID_BYPASS_INELIGIBLE; 3019 } 3020 3021 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) 3022 return PQI_RAID_BYPASS_INELIGIBLE; 3023 3024 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; 3025 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 3026 rmd.first_row * rmd.strip_size + 3027 (rmd.first_row_offset - rmd.first_column * rmd.strip_size); 3028 rmd.disk_block_cnt = rmd.block_cnt; 3029 3030 /* Handle differing logical/physical block sizes. */ 3031 if (raid_map->phys_blk_shift) { 3032 rmd.disk_block <<= raid_map->phys_blk_shift; 3033 rmd.disk_block_cnt <<= raid_map->phys_blk_shift; 3034 } 3035 3036 if (unlikely(rmd.disk_block_cnt > 0xffff)) 3037 return PQI_RAID_BYPASS_INELIGIBLE; 3038 3039 pqi_set_aio_cdb(&rmd); 3040 3041 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { 3042 if (rmd.data_length > device->max_transfer_encrypted) 3043 return PQI_RAID_BYPASS_INELIGIBLE; 3044 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); 3045 encryption_info_ptr = &encryption_info; 3046 } else { 3047 encryption_info_ptr = NULL; 3048 } 3049 3050 if (rmd.is_write) { 3051 switch (device->raid_level) { 3052 case SA_RAID_1: 3053 case SA_RAID_TRIPLE: 3054 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, 3055 encryption_info_ptr, device, &rmd); 3056 case SA_RAID_5: 3057 case SA_RAID_6: 3058 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, 3059 encryption_info_ptr, device, &rmd); 3060 } 3061 } 3062 3063 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, 3064 rmd.cdb, rmd.cdb_length, queue_group, 3065 encryption_info_ptr, true, false); 3066 } 3067 3068 #define PQI_STATUS_IDLE 0x0 3069 3070 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 3071 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 3072 3073 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 3074 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 3075 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 3076 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 3077 #define PQI_DEVICE_STATE_ERROR 0x4 3078 3079 #define PQI_MODE_READY_TIMEOUT_SECS 30 3080 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 3081 3082 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 3083 { 3084 struct pqi_device_registers __iomem *pqi_registers; 3085 unsigned long timeout; 3086 u64 signature; 3087 u8 status; 3088 3089 pqi_registers = ctrl_info->pqi_registers; 3090 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 3091 3092 while (1) { 3093 signature = readq(&pqi_registers->signature); 3094 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 3095 sizeof(signature)) == 0) 3096 break; 3097 if (time_after(jiffies, timeout)) { 3098 dev_err(&ctrl_info->pci_dev->dev, 3099 "timed out waiting for PQI signature\n"); 3100 return -ETIMEDOUT; 3101 } 3102 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3103 } 3104 3105 while (1) { 3106 status = readb(&pqi_registers->function_and_status_code); 3107 if (status == PQI_STATUS_IDLE) 3108 break; 3109 if (time_after(jiffies, timeout)) { 3110 dev_err(&ctrl_info->pci_dev->dev, 3111 "timed out waiting for PQI IDLE\n"); 3112 return -ETIMEDOUT; 3113 } 3114 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3115 } 3116 3117 while (1) { 3118 if (readl(&pqi_registers->device_status) == 3119 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 3120 break; 3121 if (time_after(jiffies, timeout)) { 3122 dev_err(&ctrl_info->pci_dev->dev, 3123 "timed out waiting for PQI all registers ready\n"); 3124 return -ETIMEDOUT; 3125 } 3126 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3127 } 3128 3129 return 0; 3130 } 3131 3132 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 3133 { 3134 struct pqi_scsi_dev *device; 3135 3136 device = io_request->scmd->device->hostdata; 3137 device->raid_bypass_enabled = false; 3138 device->aio_enabled = false; 3139 } 3140 3141 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 3142 { 3143 struct pqi_ctrl_info *ctrl_info; 3144 struct pqi_scsi_dev *device; 3145 3146 device = sdev->hostdata; 3147 if (device->device_offline) 3148 return; 3149 3150 device->device_offline = true; 3151 ctrl_info = shost_to_hba(sdev->host); 3152 pqi_schedule_rescan_worker(ctrl_info); 3153 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 3154 path, ctrl_info->scsi_host->host_no, device->bus, 3155 device->target, device->lun); 3156 } 3157 3158 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 3159 { 3160 u8 scsi_status; 3161 u8 host_byte; 3162 struct scsi_cmnd *scmd; 3163 struct pqi_raid_error_info *error_info; 3164 size_t sense_data_length; 3165 int residual_count; 3166 int xfer_count; 3167 struct scsi_sense_hdr sshdr; 3168 3169 scmd = io_request->scmd; 3170 if (!scmd) 3171 return; 3172 3173 error_info = io_request->error_info; 3174 scsi_status = error_info->status; 3175 host_byte = DID_OK; 3176 3177 switch (error_info->data_out_result) { 3178 case PQI_DATA_IN_OUT_GOOD: 3179 break; 3180 case PQI_DATA_IN_OUT_UNDERFLOW: 3181 xfer_count = 3182 get_unaligned_le32(&error_info->data_out_transferred); 3183 residual_count = scsi_bufflen(scmd) - xfer_count; 3184 scsi_set_resid(scmd, residual_count); 3185 if (xfer_count < scmd->underflow) 3186 host_byte = DID_SOFT_ERROR; 3187 break; 3188 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 3189 case PQI_DATA_IN_OUT_ABORTED: 3190 host_byte = DID_ABORT; 3191 break; 3192 case PQI_DATA_IN_OUT_TIMEOUT: 3193 host_byte = DID_TIME_OUT; 3194 break; 3195 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 3196 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 3197 case PQI_DATA_IN_OUT_BUFFER_ERROR: 3198 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 3199 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 3200 case PQI_DATA_IN_OUT_ERROR: 3201 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 3202 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 3203 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 3204 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 3205 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 3206 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 3207 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 3208 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 3209 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 3210 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 3211 default: 3212 host_byte = DID_ERROR; 3213 break; 3214 } 3215 3216 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 3217 if (sense_data_length == 0) 3218 sense_data_length = 3219 get_unaligned_le16(&error_info->response_data_length); 3220 if (sense_data_length) { 3221 if (sense_data_length > sizeof(error_info->data)) 3222 sense_data_length = sizeof(error_info->data); 3223 3224 if (scsi_status == SAM_STAT_CHECK_CONDITION && 3225 scsi_normalize_sense(error_info->data, 3226 sense_data_length, &sshdr) && 3227 sshdr.sense_key == HARDWARE_ERROR && 3228 sshdr.asc == 0x3e) { 3229 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); 3230 struct pqi_scsi_dev *device = scmd->device->hostdata; 3231 3232 switch (sshdr.ascq) { 3233 case 0x1: /* LOGICAL UNIT FAILURE */ 3234 if (printk_ratelimit()) 3235 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", 3236 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3237 pqi_take_device_offline(scmd->device, "RAID"); 3238 host_byte = DID_NO_CONNECT; 3239 break; 3240 3241 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ 3242 if (printk_ratelimit()) 3243 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", 3244 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3245 break; 3246 } 3247 } 3248 3249 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3250 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3251 memcpy(scmd->sense_buffer, error_info->data, 3252 sense_data_length); 3253 } 3254 3255 if (pqi_cmd_priv(scmd)->this_residual && 3256 !pqi_is_logical_device(scmd->device->hostdata) && 3257 scsi_status == SAM_STAT_CHECK_CONDITION && 3258 host_byte == DID_OK && 3259 sense_data_length && 3260 scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && 3261 sshdr.sense_key == ILLEGAL_REQUEST && 3262 sshdr.asc == 0x26 && 3263 sshdr.ascq == 0x0) { 3264 host_byte = DID_NO_CONNECT; 3265 pqi_take_device_offline(scmd->device, "AIO"); 3266 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1); 3267 } 3268 3269 scmd->result = scsi_status; 3270 set_host_byte(scmd, host_byte); 3271 } 3272 3273 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 3274 { 3275 u8 scsi_status; 3276 u8 host_byte; 3277 struct scsi_cmnd *scmd; 3278 struct pqi_aio_error_info *error_info; 3279 size_t sense_data_length; 3280 int residual_count; 3281 int xfer_count; 3282 bool device_offline; 3283 3284 scmd = io_request->scmd; 3285 error_info = io_request->error_info; 3286 host_byte = DID_OK; 3287 sense_data_length = 0; 3288 device_offline = false; 3289 3290 switch (error_info->service_response) { 3291 case PQI_AIO_SERV_RESPONSE_COMPLETE: 3292 scsi_status = error_info->status; 3293 break; 3294 case PQI_AIO_SERV_RESPONSE_FAILURE: 3295 switch (error_info->status) { 3296 case PQI_AIO_STATUS_IO_ABORTED: 3297 scsi_status = SAM_STAT_TASK_ABORTED; 3298 break; 3299 case PQI_AIO_STATUS_UNDERRUN: 3300 scsi_status = SAM_STAT_GOOD; 3301 residual_count = get_unaligned_le32( 3302 &error_info->residual_count); 3303 scsi_set_resid(scmd, residual_count); 3304 xfer_count = scsi_bufflen(scmd) - residual_count; 3305 if (xfer_count < scmd->underflow) 3306 host_byte = DID_SOFT_ERROR; 3307 break; 3308 case PQI_AIO_STATUS_OVERRUN: 3309 scsi_status = SAM_STAT_GOOD; 3310 break; 3311 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 3312 pqi_aio_path_disabled(io_request); 3313 scsi_status = SAM_STAT_GOOD; 3314 io_request->status = -EAGAIN; 3315 break; 3316 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 3317 case PQI_AIO_STATUS_INVALID_DEVICE: 3318 if (!io_request->raid_bypass) { 3319 device_offline = true; 3320 pqi_take_device_offline(scmd->device, "AIO"); 3321 host_byte = DID_NO_CONNECT; 3322 } 3323 scsi_status = SAM_STAT_CHECK_CONDITION; 3324 break; 3325 case PQI_AIO_STATUS_IO_ERROR: 3326 default: 3327 scsi_status = SAM_STAT_CHECK_CONDITION; 3328 break; 3329 } 3330 break; 3331 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 3332 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 3333 scsi_status = SAM_STAT_GOOD; 3334 break; 3335 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 3336 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 3337 default: 3338 scsi_status = SAM_STAT_CHECK_CONDITION; 3339 break; 3340 } 3341 3342 if (error_info->data_present) { 3343 sense_data_length = 3344 get_unaligned_le16(&error_info->data_length); 3345 if (sense_data_length) { 3346 if (sense_data_length > sizeof(error_info->data)) 3347 sense_data_length = sizeof(error_info->data); 3348 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3349 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3350 memcpy(scmd->sense_buffer, error_info->data, 3351 sense_data_length); 3352 } 3353 } 3354 3355 if (device_offline && sense_data_length == 0) 3356 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1); 3357 3358 scmd->result = scsi_status; 3359 set_host_byte(scmd, host_byte); 3360 } 3361 3362 static void pqi_process_io_error(unsigned int iu_type, 3363 struct pqi_io_request *io_request) 3364 { 3365 switch (iu_type) { 3366 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3367 pqi_process_raid_io_error(io_request); 3368 break; 3369 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3370 pqi_process_aio_io_error(io_request); 3371 break; 3372 } 3373 } 3374 3375 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, 3376 struct pqi_task_management_response *response) 3377 { 3378 int rc; 3379 3380 switch (response->response_code) { 3381 case SOP_TMF_COMPLETE: 3382 case SOP_TMF_FUNCTION_SUCCEEDED: 3383 rc = 0; 3384 break; 3385 case SOP_TMF_REJECTED: 3386 rc = -EAGAIN; 3387 break; 3388 case SOP_TMF_INCORRECT_LOGICAL_UNIT: 3389 rc = -ENODEV; 3390 break; 3391 default: 3392 rc = -EIO; 3393 break; 3394 } 3395 3396 if (rc) 3397 dev_err(&ctrl_info->pci_dev->dev, 3398 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); 3399 3400 return rc; 3401 } 3402 3403 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, 3404 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 3405 { 3406 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); 3407 } 3408 3409 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) 3410 { 3411 int num_responses; 3412 pqi_index_t oq_pi; 3413 pqi_index_t oq_ci; 3414 struct pqi_io_request *io_request; 3415 struct pqi_io_response *response; 3416 u16 request_id; 3417 3418 num_responses = 0; 3419 oq_ci = queue_group->oq_ci_copy; 3420 3421 while (1) { 3422 oq_pi = readl(queue_group->oq_pi); 3423 if (oq_pi >= ctrl_info->num_elements_per_oq) { 3424 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE); 3425 dev_err(&ctrl_info->pci_dev->dev, 3426 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3427 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); 3428 return -1; 3429 } 3430 if (oq_pi == oq_ci) 3431 break; 3432 3433 num_responses++; 3434 response = queue_group->oq_element_array + 3435 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3436 3437 request_id = get_unaligned_le16(&response->request_id); 3438 if (request_id >= ctrl_info->max_io_slots) { 3439 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID); 3440 dev_err(&ctrl_info->pci_dev->dev, 3441 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", 3442 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); 3443 return -1; 3444 } 3445 3446 io_request = &ctrl_info->io_request_pool[request_id]; 3447 if (atomic_read(&io_request->refcount) == 0) { 3448 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID); 3449 dev_err(&ctrl_info->pci_dev->dev, 3450 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", 3451 request_id, oq_pi, oq_ci); 3452 return -1; 3453 } 3454 3455 switch (response->header.iu_type) { 3456 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 3457 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 3458 if (io_request->scmd) 3459 io_request->scmd->result = 0; 3460 fallthrough; 3461 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 3462 break; 3463 case PQI_RESPONSE_IU_VENDOR_GENERAL: 3464 io_request->status = 3465 get_unaligned_le16( 3466 &((struct pqi_vendor_general_response *)response)->status); 3467 break; 3468 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 3469 io_request->status = pqi_interpret_task_management_response(ctrl_info, 3470 (void *)response); 3471 break; 3472 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 3473 pqi_aio_path_disabled(io_request); 3474 io_request->status = -EAGAIN; 3475 break; 3476 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3477 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3478 io_request->error_info = ctrl_info->error_buffer + 3479 (get_unaligned_le16(&response->error_index) * 3480 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 3481 pqi_process_io_error(response->header.iu_type, io_request); 3482 break; 3483 default: 3484 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE); 3485 dev_err(&ctrl_info->pci_dev->dev, 3486 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", 3487 response->header.iu_type, oq_pi, oq_ci); 3488 return -1; 3489 } 3490 3491 io_request->io_complete_callback(io_request, io_request->context); 3492 3493 /* 3494 * Note that the I/O request structure CANNOT BE TOUCHED after 3495 * returning from the I/O completion callback! 3496 */ 3497 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 3498 } 3499 3500 if (num_responses) { 3501 queue_group->oq_ci_copy = oq_ci; 3502 writel(oq_ci, queue_group->oq_ci); 3503 } 3504 3505 return num_responses; 3506 } 3507 3508 static inline unsigned int pqi_num_elements_free(unsigned int pi, 3509 unsigned int ci, unsigned int elements_in_queue) 3510 { 3511 unsigned int num_elements_used; 3512 3513 if (pi >= ci) 3514 num_elements_used = pi - ci; 3515 else 3516 num_elements_used = elements_in_queue - ci + pi; 3517 3518 return elements_in_queue - num_elements_used - 1; 3519 } 3520 3521 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3522 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3523 { 3524 pqi_index_t iq_pi; 3525 pqi_index_t iq_ci; 3526 unsigned long flags; 3527 void *next_element; 3528 struct pqi_queue_group *queue_group; 3529 3530 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3531 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3532 3533 while (1) { 3534 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3535 3536 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3537 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3538 3539 if (pqi_num_elements_free(iq_pi, iq_ci, 3540 ctrl_info->num_elements_per_iq)) 3541 break; 3542 3543 spin_unlock_irqrestore( 3544 &queue_group->submit_lock[RAID_PATH], flags); 3545 3546 if (pqi_ctrl_offline(ctrl_info)) 3547 return; 3548 } 3549 3550 next_element = queue_group->iq_element_array[RAID_PATH] + 3551 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3552 3553 memcpy(next_element, iu, iu_length); 3554 3555 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3556 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3557 3558 /* 3559 * This write notifies the controller that an IU is available to be 3560 * processed. 3561 */ 3562 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3563 3564 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3565 } 3566 3567 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3568 struct pqi_event *event) 3569 { 3570 struct pqi_event_acknowledge_request request; 3571 3572 memset(&request, 0, sizeof(request)); 3573 3574 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3575 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3576 &request.header.iu_length); 3577 request.event_type = event->event_type; 3578 put_unaligned_le16(event->event_id, &request.event_id); 3579 put_unaligned_le32(event->additional_event_id, &request.additional_event_id); 3580 3581 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3582 } 3583 3584 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3585 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3586 3587 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3588 struct pqi_ctrl_info *ctrl_info) 3589 { 3590 u8 status; 3591 unsigned long timeout; 3592 3593 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies; 3594 3595 while (1) { 3596 status = pqi_read_soft_reset_status(ctrl_info); 3597 if (status & PQI_SOFT_RESET_INITIATE) 3598 return RESET_INITIATE_DRIVER; 3599 3600 if (status & PQI_SOFT_RESET_ABORT) 3601 return RESET_ABORT; 3602 3603 if (!sis_is_firmware_running(ctrl_info)) 3604 return RESET_NORESPONSE; 3605 3606 if (time_after(jiffies, timeout)) { 3607 dev_warn(&ctrl_info->pci_dev->dev, 3608 "timed out waiting for soft reset status\n"); 3609 return RESET_TIMEDOUT; 3610 } 3611 3612 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3613 } 3614 } 3615 3616 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) 3617 { 3618 int rc; 3619 unsigned int delay_secs; 3620 enum pqi_soft_reset_status reset_status; 3621 3622 if (ctrl_info->soft_reset_handshake_supported) 3623 reset_status = pqi_poll_for_soft_reset_status(ctrl_info); 3624 else 3625 reset_status = RESET_INITIATE_FIRMWARE; 3626 3627 delay_secs = PQI_POST_RESET_DELAY_SECS; 3628 3629 switch (reset_status) { 3630 case RESET_TIMEDOUT: 3631 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; 3632 fallthrough; 3633 case RESET_INITIATE_DRIVER: 3634 dev_info(&ctrl_info->pci_dev->dev, 3635 "Online Firmware Activation: resetting controller\n"); 3636 sis_soft_reset(ctrl_info); 3637 fallthrough; 3638 case RESET_INITIATE_FIRMWARE: 3639 ctrl_info->pqi_mode_enabled = false; 3640 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 3641 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); 3642 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3643 pqi_ctrl_ofa_done(ctrl_info); 3644 dev_info(&ctrl_info->pci_dev->dev, 3645 "Online Firmware Activation: %s\n", 3646 rc == 0 ? "SUCCESS" : "FAILED"); 3647 break; 3648 case RESET_ABORT: 3649 dev_info(&ctrl_info->pci_dev->dev, 3650 "Online Firmware Activation ABORTED\n"); 3651 if (ctrl_info->soft_reset_handshake_supported) 3652 pqi_clear_soft_reset_status(ctrl_info); 3653 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3654 pqi_ctrl_ofa_done(ctrl_info); 3655 pqi_ofa_ctrl_unquiesce(ctrl_info); 3656 break; 3657 case RESET_NORESPONSE: 3658 fallthrough; 3659 default: 3660 dev_err(&ctrl_info->pci_dev->dev, 3661 "unexpected Online Firmware Activation reset status: 0x%x\n", 3662 reset_status); 3663 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3664 pqi_ctrl_ofa_done(ctrl_info); 3665 pqi_ofa_ctrl_unquiesce(ctrl_info); 3666 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); 3667 break; 3668 } 3669 } 3670 3671 static void pqi_ofa_memory_alloc_worker(struct work_struct *work) 3672 { 3673 struct pqi_ctrl_info *ctrl_info; 3674 3675 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); 3676 3677 pqi_ctrl_ofa_start(ctrl_info); 3678 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested); 3679 pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE); 3680 } 3681 3682 static void pqi_ofa_quiesce_worker(struct work_struct *work) 3683 { 3684 struct pqi_ctrl_info *ctrl_info; 3685 struct pqi_event *event; 3686 3687 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); 3688 3689 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; 3690 3691 pqi_ofa_ctrl_quiesce(ctrl_info); 3692 pqi_acknowledge_event(ctrl_info, event); 3693 pqi_process_soft_reset(ctrl_info); 3694 } 3695 3696 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3697 struct pqi_event *event) 3698 { 3699 bool ack_event; 3700 3701 ack_event = true; 3702 3703 switch (event->event_id) { 3704 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3705 dev_info(&ctrl_info->pci_dev->dev, 3706 "received Online Firmware Activation memory allocation request\n"); 3707 schedule_work(&ctrl_info->ofa_memory_alloc_work); 3708 break; 3709 case PQI_EVENT_OFA_QUIESCE: 3710 dev_info(&ctrl_info->pci_dev->dev, 3711 "received Online Firmware Activation quiesce request\n"); 3712 schedule_work(&ctrl_info->ofa_quiesce_work); 3713 ack_event = false; 3714 break; 3715 case PQI_EVENT_OFA_CANCELED: 3716 dev_info(&ctrl_info->pci_dev->dev, 3717 "received Online Firmware Activation cancel request: reason: %u\n", 3718 ctrl_info->ofa_cancel_reason); 3719 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3720 pqi_ctrl_ofa_done(ctrl_info); 3721 break; 3722 default: 3723 dev_err(&ctrl_info->pci_dev->dev, 3724 "received unknown Online Firmware Activation request: event ID: %u\n", 3725 event->event_id); 3726 break; 3727 } 3728 3729 return ack_event; 3730 } 3731 3732 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info) 3733 { 3734 unsigned long flags; 3735 struct pqi_scsi_dev *device; 3736 3737 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3738 3739 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 3740 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 3741 device->rescan = true; 3742 } 3743 3744 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3745 } 3746 3747 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) 3748 { 3749 unsigned long flags; 3750 struct pqi_scsi_dev *device; 3751 3752 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3753 3754 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 3755 if (device->raid_bypass_enabled) 3756 device->raid_bypass_enabled = false; 3757 3758 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3759 } 3760 3761 static void pqi_event_worker(struct work_struct *work) 3762 { 3763 unsigned int i; 3764 bool rescan_needed; 3765 struct pqi_ctrl_info *ctrl_info; 3766 struct pqi_event *event; 3767 bool ack_event; 3768 3769 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3770 3771 pqi_ctrl_busy(ctrl_info); 3772 pqi_wait_if_ctrl_blocked(ctrl_info); 3773 if (pqi_ctrl_offline(ctrl_info)) 3774 goto out; 3775 3776 rescan_needed = false; 3777 event = ctrl_info->events; 3778 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3779 if (event->pending) { 3780 event->pending = false; 3781 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3782 ack_event = pqi_ofa_process_event(ctrl_info, event); 3783 } else { 3784 ack_event = true; 3785 rescan_needed = true; 3786 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) 3787 pqi_mark_volumes_for_rescan(ctrl_info); 3788 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) 3789 pqi_disable_raid_bypass(ctrl_info); 3790 } 3791 if (ack_event) 3792 pqi_acknowledge_event(ctrl_info, event); 3793 } 3794 event++; 3795 } 3796 3797 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ) 3798 3799 if (rescan_needed) 3800 pqi_schedule_rescan_worker_with_delay(ctrl_info, 3801 PQI_RESCAN_WORK_FOR_EVENT_DELAY); 3802 3803 out: 3804 pqi_ctrl_unbusy(ctrl_info); 3805 } 3806 3807 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 3808 3809 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3810 { 3811 int num_interrupts; 3812 u32 heartbeat_count; 3813 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); 3814 3815 pqi_check_ctrl_health(ctrl_info); 3816 if (pqi_ctrl_offline(ctrl_info)) 3817 return; 3818 3819 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3820 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3821 3822 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3823 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3824 dev_err(&ctrl_info->pci_dev->dev, 3825 "no heartbeat detected - last heartbeat count: %u\n", 3826 heartbeat_count); 3827 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT); 3828 return; 3829 } 3830 } else { 3831 ctrl_info->previous_num_interrupts = num_interrupts; 3832 } 3833 3834 ctrl_info->previous_heartbeat_count = heartbeat_count; 3835 mod_timer(&ctrl_info->heartbeat_timer, 3836 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3837 } 3838 3839 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3840 { 3841 if (!ctrl_info->heartbeat_counter) 3842 return; 3843 3844 ctrl_info->previous_num_interrupts = 3845 atomic_read(&ctrl_info->num_interrupts); 3846 ctrl_info->previous_heartbeat_count = 3847 pqi_read_heartbeat_counter(ctrl_info); 3848 3849 ctrl_info->heartbeat_timer.expires = 3850 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3851 add_timer(&ctrl_info->heartbeat_timer); 3852 } 3853 3854 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3855 { 3856 del_timer_sync(&ctrl_info->heartbeat_timer); 3857 } 3858 3859 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, 3860 struct pqi_event *event, struct pqi_event_response *response) 3861 { 3862 switch (event->event_id) { 3863 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3864 ctrl_info->ofa_bytes_requested = 3865 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); 3866 break; 3867 case PQI_EVENT_OFA_CANCELED: 3868 ctrl_info->ofa_cancel_reason = 3869 get_unaligned_le16(&response->data.ofa_cancelled.reason); 3870 break; 3871 } 3872 } 3873 3874 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3875 { 3876 int num_events; 3877 pqi_index_t oq_pi; 3878 pqi_index_t oq_ci; 3879 struct pqi_event_queue *event_queue; 3880 struct pqi_event_response *response; 3881 struct pqi_event *event; 3882 int event_index; 3883 3884 event_queue = &ctrl_info->event_queue; 3885 num_events = 0; 3886 oq_ci = event_queue->oq_ci_copy; 3887 3888 while (1) { 3889 oq_pi = readl(event_queue->oq_pi); 3890 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { 3891 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE); 3892 dev_err(&ctrl_info->pci_dev->dev, 3893 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3894 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); 3895 return -1; 3896 } 3897 3898 if (oq_pi == oq_ci) 3899 break; 3900 3901 num_events++; 3902 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3903 3904 event_index = pqi_event_type_to_event_index(response->event_type); 3905 3906 if (event_index >= 0 && response->request_acknowledge) { 3907 event = &ctrl_info->events[event_index]; 3908 event->pending = true; 3909 event->event_type = response->event_type; 3910 event->event_id = get_unaligned_le16(&response->event_id); 3911 event->additional_event_id = 3912 get_unaligned_le32(&response->additional_event_id); 3913 if (event->event_type == PQI_EVENT_TYPE_OFA) 3914 pqi_ofa_capture_event_payload(ctrl_info, event, response); 3915 } 3916 3917 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3918 } 3919 3920 if (num_events) { 3921 event_queue->oq_ci_copy = oq_ci; 3922 writel(oq_ci, event_queue->oq_ci); 3923 schedule_work(&ctrl_info->event_work); 3924 } 3925 3926 return num_events; 3927 } 3928 3929 #define PQI_LEGACY_INTX_MASK 0x1 3930 3931 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) 3932 { 3933 u32 intx_mask; 3934 struct pqi_device_registers __iomem *pqi_registers; 3935 volatile void __iomem *register_addr; 3936 3937 pqi_registers = ctrl_info->pqi_registers; 3938 3939 if (enable_intx) 3940 register_addr = &pqi_registers->legacy_intx_mask_clear; 3941 else 3942 register_addr = &pqi_registers->legacy_intx_mask_set; 3943 3944 intx_mask = readl(register_addr); 3945 intx_mask |= PQI_LEGACY_INTX_MASK; 3946 writel(intx_mask, register_addr); 3947 } 3948 3949 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3950 enum pqi_irq_mode new_mode) 3951 { 3952 switch (ctrl_info->irq_mode) { 3953 case IRQ_MODE_MSIX: 3954 switch (new_mode) { 3955 case IRQ_MODE_MSIX: 3956 break; 3957 case IRQ_MODE_INTX: 3958 pqi_configure_legacy_intx(ctrl_info, true); 3959 sis_enable_intx(ctrl_info); 3960 break; 3961 case IRQ_MODE_NONE: 3962 break; 3963 } 3964 break; 3965 case IRQ_MODE_INTX: 3966 switch (new_mode) { 3967 case IRQ_MODE_MSIX: 3968 pqi_configure_legacy_intx(ctrl_info, false); 3969 sis_enable_msix(ctrl_info); 3970 break; 3971 case IRQ_MODE_INTX: 3972 break; 3973 case IRQ_MODE_NONE: 3974 pqi_configure_legacy_intx(ctrl_info, false); 3975 break; 3976 } 3977 break; 3978 case IRQ_MODE_NONE: 3979 switch (new_mode) { 3980 case IRQ_MODE_MSIX: 3981 sis_enable_msix(ctrl_info); 3982 break; 3983 case IRQ_MODE_INTX: 3984 pqi_configure_legacy_intx(ctrl_info, true); 3985 sis_enable_intx(ctrl_info); 3986 break; 3987 case IRQ_MODE_NONE: 3988 break; 3989 } 3990 break; 3991 } 3992 3993 ctrl_info->irq_mode = new_mode; 3994 } 3995 3996 #define PQI_LEGACY_INTX_PENDING 0x1 3997 3998 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3999 { 4000 bool valid_irq; 4001 u32 intx_status; 4002 4003 switch (ctrl_info->irq_mode) { 4004 case IRQ_MODE_MSIX: 4005 valid_irq = true; 4006 break; 4007 case IRQ_MODE_INTX: 4008 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); 4009 if (intx_status & PQI_LEGACY_INTX_PENDING) 4010 valid_irq = true; 4011 else 4012 valid_irq = false; 4013 break; 4014 case IRQ_MODE_NONE: 4015 default: 4016 valid_irq = false; 4017 break; 4018 } 4019 4020 return valid_irq; 4021 } 4022 4023 static irqreturn_t pqi_irq_handler(int irq, void *data) 4024 { 4025 struct pqi_ctrl_info *ctrl_info; 4026 struct pqi_queue_group *queue_group; 4027 int num_io_responses_handled; 4028 int num_events_handled; 4029 4030 queue_group = data; 4031 ctrl_info = queue_group->ctrl_info; 4032 4033 if (!pqi_is_valid_irq(ctrl_info)) 4034 return IRQ_NONE; 4035 4036 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 4037 if (num_io_responses_handled < 0) 4038 goto out; 4039 4040 if (irq == ctrl_info->event_irq) { 4041 num_events_handled = pqi_process_event_intr(ctrl_info); 4042 if (num_events_handled < 0) 4043 goto out; 4044 } else { 4045 num_events_handled = 0; 4046 } 4047 4048 if (num_io_responses_handled + num_events_handled > 0) 4049 atomic_inc(&ctrl_info->num_interrupts); 4050 4051 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 4052 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 4053 4054 out: 4055 return IRQ_HANDLED; 4056 } 4057 4058 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 4059 { 4060 struct pci_dev *pci_dev = ctrl_info->pci_dev; 4061 int i; 4062 int rc; 4063 4064 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 4065 4066 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 4067 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 4068 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 4069 if (rc) { 4070 dev_err(&pci_dev->dev, 4071 "irq %u init failed with error %d\n", 4072 pci_irq_vector(pci_dev, i), rc); 4073 return rc; 4074 } 4075 ctrl_info->num_msix_vectors_initialized++; 4076 } 4077 4078 return 0; 4079 } 4080 4081 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 4082 { 4083 int i; 4084 4085 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 4086 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 4087 &ctrl_info->queue_groups[i]); 4088 4089 ctrl_info->num_msix_vectors_initialized = 0; 4090 } 4091 4092 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4093 { 4094 int num_vectors_enabled; 4095 unsigned int flags = PCI_IRQ_MSIX; 4096 4097 if (!pqi_disable_managed_interrupts) 4098 flags |= PCI_IRQ_AFFINITY; 4099 4100 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 4101 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 4102 flags); 4103 if (num_vectors_enabled < 0) { 4104 dev_err(&ctrl_info->pci_dev->dev, 4105 "MSI-X init failed with error %d\n", 4106 num_vectors_enabled); 4107 return num_vectors_enabled; 4108 } 4109 4110 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 4111 ctrl_info->irq_mode = IRQ_MODE_MSIX; 4112 return 0; 4113 } 4114 4115 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4116 { 4117 if (ctrl_info->num_msix_vectors_enabled) { 4118 pci_free_irq_vectors(ctrl_info->pci_dev); 4119 ctrl_info->num_msix_vectors_enabled = 0; 4120 } 4121 } 4122 4123 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 4124 { 4125 unsigned int i; 4126 size_t alloc_length; 4127 size_t element_array_length_per_iq; 4128 size_t element_array_length_per_oq; 4129 void *element_array; 4130 void __iomem *next_queue_index; 4131 void *aligned_pointer; 4132 unsigned int num_inbound_queues; 4133 unsigned int num_outbound_queues; 4134 unsigned int num_queue_indexes; 4135 struct pqi_queue_group *queue_group; 4136 4137 element_array_length_per_iq = 4138 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 4139 ctrl_info->num_elements_per_iq; 4140 element_array_length_per_oq = 4141 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 4142 ctrl_info->num_elements_per_oq; 4143 num_inbound_queues = ctrl_info->num_queue_groups * 2; 4144 num_outbound_queues = ctrl_info->num_queue_groups; 4145 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 4146 4147 aligned_pointer = NULL; 4148 4149 for (i = 0; i < num_inbound_queues; i++) { 4150 aligned_pointer = PTR_ALIGN(aligned_pointer, 4151 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4152 aligned_pointer += element_array_length_per_iq; 4153 } 4154 4155 for (i = 0; i < num_outbound_queues; i++) { 4156 aligned_pointer = PTR_ALIGN(aligned_pointer, 4157 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4158 aligned_pointer += element_array_length_per_oq; 4159 } 4160 4161 aligned_pointer = PTR_ALIGN(aligned_pointer, 4162 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4163 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4164 PQI_EVENT_OQ_ELEMENT_LENGTH; 4165 4166 for (i = 0; i < num_queue_indexes; i++) { 4167 aligned_pointer = PTR_ALIGN(aligned_pointer, 4168 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4169 aligned_pointer += sizeof(pqi_index_t); 4170 } 4171 4172 alloc_length = (size_t)aligned_pointer + 4173 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4174 4175 alloc_length += PQI_EXTRA_SGL_MEMORY; 4176 4177 ctrl_info->queue_memory_base = 4178 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4179 &ctrl_info->queue_memory_base_dma_handle, 4180 GFP_KERNEL); 4181 4182 if (!ctrl_info->queue_memory_base) 4183 return -ENOMEM; 4184 4185 ctrl_info->queue_memory_length = alloc_length; 4186 4187 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 4188 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4189 4190 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4191 queue_group = &ctrl_info->queue_groups[i]; 4192 queue_group->iq_element_array[RAID_PATH] = element_array; 4193 queue_group->iq_element_array_bus_addr[RAID_PATH] = 4194 ctrl_info->queue_memory_base_dma_handle + 4195 (element_array - ctrl_info->queue_memory_base); 4196 element_array += element_array_length_per_iq; 4197 element_array = PTR_ALIGN(element_array, 4198 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4199 queue_group->iq_element_array[AIO_PATH] = element_array; 4200 queue_group->iq_element_array_bus_addr[AIO_PATH] = 4201 ctrl_info->queue_memory_base_dma_handle + 4202 (element_array - ctrl_info->queue_memory_base); 4203 element_array += element_array_length_per_iq; 4204 element_array = PTR_ALIGN(element_array, 4205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4206 } 4207 4208 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4209 queue_group = &ctrl_info->queue_groups[i]; 4210 queue_group->oq_element_array = element_array; 4211 queue_group->oq_element_array_bus_addr = 4212 ctrl_info->queue_memory_base_dma_handle + 4213 (element_array - ctrl_info->queue_memory_base); 4214 element_array += element_array_length_per_oq; 4215 element_array = PTR_ALIGN(element_array, 4216 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4217 } 4218 4219 ctrl_info->event_queue.oq_element_array = element_array; 4220 ctrl_info->event_queue.oq_element_array_bus_addr = 4221 ctrl_info->queue_memory_base_dma_handle + 4222 (element_array - ctrl_info->queue_memory_base); 4223 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4224 PQI_EVENT_OQ_ELEMENT_LENGTH; 4225 4226 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 4227 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4228 4229 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4230 queue_group = &ctrl_info->queue_groups[i]; 4231 queue_group->iq_ci[RAID_PATH] = next_queue_index; 4232 queue_group->iq_ci_bus_addr[RAID_PATH] = 4233 ctrl_info->queue_memory_base_dma_handle + 4234 (next_queue_index - 4235 (void __iomem *)ctrl_info->queue_memory_base); 4236 next_queue_index += sizeof(pqi_index_t); 4237 next_queue_index = PTR_ALIGN(next_queue_index, 4238 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4239 queue_group->iq_ci[AIO_PATH] = next_queue_index; 4240 queue_group->iq_ci_bus_addr[AIO_PATH] = 4241 ctrl_info->queue_memory_base_dma_handle + 4242 (next_queue_index - 4243 (void __iomem *)ctrl_info->queue_memory_base); 4244 next_queue_index += sizeof(pqi_index_t); 4245 next_queue_index = PTR_ALIGN(next_queue_index, 4246 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4247 queue_group->oq_pi = next_queue_index; 4248 queue_group->oq_pi_bus_addr = 4249 ctrl_info->queue_memory_base_dma_handle + 4250 (next_queue_index - 4251 (void __iomem *)ctrl_info->queue_memory_base); 4252 next_queue_index += sizeof(pqi_index_t); 4253 next_queue_index = PTR_ALIGN(next_queue_index, 4254 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4255 } 4256 4257 ctrl_info->event_queue.oq_pi = next_queue_index; 4258 ctrl_info->event_queue.oq_pi_bus_addr = 4259 ctrl_info->queue_memory_base_dma_handle + 4260 (next_queue_index - 4261 (void __iomem *)ctrl_info->queue_memory_base); 4262 4263 return 0; 4264 } 4265 4266 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 4267 { 4268 unsigned int i; 4269 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4270 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4271 4272 /* 4273 * Initialize the backpointers to the controller structure in 4274 * each operational queue group structure. 4275 */ 4276 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4277 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 4278 4279 /* 4280 * Assign IDs to all operational queues. Note that the IDs 4281 * assigned to operational IQs are independent of the IDs 4282 * assigned to operational OQs. 4283 */ 4284 ctrl_info->event_queue.oq_id = next_oq_id++; 4285 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4286 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 4287 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 4288 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 4289 } 4290 4291 /* 4292 * Assign MSI-X table entry indexes to all queues. Note that the 4293 * interrupt for the event queue is shared with the first queue group. 4294 */ 4295 ctrl_info->event_queue.int_msg_num = 0; 4296 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4297 ctrl_info->queue_groups[i].int_msg_num = i; 4298 4299 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4300 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 4301 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 4302 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 4303 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 4304 } 4305 } 4306 4307 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 4308 { 4309 size_t alloc_length; 4310 struct pqi_admin_queues_aligned *admin_queues_aligned; 4311 struct pqi_admin_queues *admin_queues; 4312 4313 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 4314 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4315 4316 ctrl_info->admin_queue_memory_base = 4317 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4318 &ctrl_info->admin_queue_memory_base_dma_handle, 4319 GFP_KERNEL); 4320 4321 if (!ctrl_info->admin_queue_memory_base) 4322 return -ENOMEM; 4323 4324 ctrl_info->admin_queue_memory_length = alloc_length; 4325 4326 admin_queues = &ctrl_info->admin_queues; 4327 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 4328 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4329 admin_queues->iq_element_array = 4330 &admin_queues_aligned->iq_element_array; 4331 admin_queues->oq_element_array = 4332 &admin_queues_aligned->oq_element_array; 4333 admin_queues->iq_ci = 4334 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; 4335 admin_queues->oq_pi = 4336 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 4337 4338 admin_queues->iq_element_array_bus_addr = 4339 ctrl_info->admin_queue_memory_base_dma_handle + 4340 (admin_queues->iq_element_array - 4341 ctrl_info->admin_queue_memory_base); 4342 admin_queues->oq_element_array_bus_addr = 4343 ctrl_info->admin_queue_memory_base_dma_handle + 4344 (admin_queues->oq_element_array - 4345 ctrl_info->admin_queue_memory_base); 4346 admin_queues->iq_ci_bus_addr = 4347 ctrl_info->admin_queue_memory_base_dma_handle + 4348 ((void __iomem *)admin_queues->iq_ci - 4349 (void __iomem *)ctrl_info->admin_queue_memory_base); 4350 admin_queues->oq_pi_bus_addr = 4351 ctrl_info->admin_queue_memory_base_dma_handle + 4352 ((void __iomem *)admin_queues->oq_pi - 4353 (void __iomem *)ctrl_info->admin_queue_memory_base); 4354 4355 return 0; 4356 } 4357 4358 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 4359 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 4360 4361 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 4362 { 4363 struct pqi_device_registers __iomem *pqi_registers; 4364 struct pqi_admin_queues *admin_queues; 4365 unsigned long timeout; 4366 u8 status; 4367 u32 reg; 4368 4369 pqi_registers = ctrl_info->pqi_registers; 4370 admin_queues = &ctrl_info->admin_queues; 4371 4372 writeq((u64)admin_queues->iq_element_array_bus_addr, 4373 &pqi_registers->admin_iq_element_array_addr); 4374 writeq((u64)admin_queues->oq_element_array_bus_addr, 4375 &pqi_registers->admin_oq_element_array_addr); 4376 writeq((u64)admin_queues->iq_ci_bus_addr, 4377 &pqi_registers->admin_iq_ci_addr); 4378 writeq((u64)admin_queues->oq_pi_bus_addr, 4379 &pqi_registers->admin_oq_pi_addr); 4380 4381 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 4382 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | 4383 (admin_queues->int_msg_num << 16); 4384 writel(reg, &pqi_registers->admin_iq_num_elements); 4385 4386 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 4387 &pqi_registers->function_and_status_code); 4388 4389 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 4390 while (1) { 4391 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 4392 status = readb(&pqi_registers->function_and_status_code); 4393 if (status == PQI_STATUS_IDLE) 4394 break; 4395 if (time_after(jiffies, timeout)) 4396 return -ETIMEDOUT; 4397 } 4398 4399 /* 4400 * The offset registers are not initialized to the correct 4401 * offsets until *after* the create admin queue pair command 4402 * completes successfully. 4403 */ 4404 admin_queues->iq_pi = ctrl_info->iomem_base + 4405 PQI_DEVICE_REGISTERS_OFFSET + 4406 readq(&pqi_registers->admin_iq_pi_offset); 4407 admin_queues->oq_ci = ctrl_info->iomem_base + 4408 PQI_DEVICE_REGISTERS_OFFSET + 4409 readq(&pqi_registers->admin_oq_ci_offset); 4410 4411 return 0; 4412 } 4413 4414 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 4415 struct pqi_general_admin_request *request) 4416 { 4417 struct pqi_admin_queues *admin_queues; 4418 void *next_element; 4419 pqi_index_t iq_pi; 4420 4421 admin_queues = &ctrl_info->admin_queues; 4422 iq_pi = admin_queues->iq_pi_copy; 4423 4424 next_element = admin_queues->iq_element_array + 4425 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 4426 4427 memcpy(next_element, request, sizeof(*request)); 4428 4429 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 4430 admin_queues->iq_pi_copy = iq_pi; 4431 4432 /* 4433 * This write notifies the controller that an IU is available to be 4434 * processed. 4435 */ 4436 writel(iq_pi, admin_queues->iq_pi); 4437 } 4438 4439 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 4440 4441 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 4442 struct pqi_general_admin_response *response) 4443 { 4444 struct pqi_admin_queues *admin_queues; 4445 pqi_index_t oq_pi; 4446 pqi_index_t oq_ci; 4447 unsigned long timeout; 4448 4449 admin_queues = &ctrl_info->admin_queues; 4450 oq_ci = admin_queues->oq_ci_copy; 4451 4452 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 4453 4454 while (1) { 4455 oq_pi = readl(admin_queues->oq_pi); 4456 if (oq_pi != oq_ci) 4457 break; 4458 if (time_after(jiffies, timeout)) { 4459 dev_err(&ctrl_info->pci_dev->dev, 4460 "timed out waiting for admin response\n"); 4461 return -ETIMEDOUT; 4462 } 4463 if (!sis_is_firmware_running(ctrl_info)) 4464 return -ENXIO; 4465 usleep_range(1000, 2000); 4466 } 4467 4468 memcpy(response, admin_queues->oq_element_array + 4469 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 4470 4471 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 4472 admin_queues->oq_ci_copy = oq_ci; 4473 writel(oq_ci, admin_queues->oq_ci); 4474 4475 return 0; 4476 } 4477 4478 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 4479 struct pqi_queue_group *queue_group, enum pqi_io_path path, 4480 struct pqi_io_request *io_request) 4481 { 4482 struct pqi_io_request *next; 4483 void *next_element; 4484 pqi_index_t iq_pi; 4485 pqi_index_t iq_ci; 4486 size_t iu_length; 4487 unsigned long flags; 4488 unsigned int num_elements_needed; 4489 unsigned int num_elements_to_end_of_queue; 4490 size_t copy_count; 4491 struct pqi_iu_header *request; 4492 4493 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 4494 4495 if (io_request) { 4496 io_request->queue_group = queue_group; 4497 list_add_tail(&io_request->request_list_entry, 4498 &queue_group->request_list[path]); 4499 } 4500 4501 iq_pi = queue_group->iq_pi_copy[path]; 4502 4503 list_for_each_entry_safe(io_request, next, 4504 &queue_group->request_list[path], request_list_entry) { 4505 4506 request = io_request->iu; 4507 4508 iu_length = get_unaligned_le16(&request->iu_length) + 4509 PQI_REQUEST_HEADER_LENGTH; 4510 num_elements_needed = 4511 DIV_ROUND_UP(iu_length, 4512 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4513 4514 iq_ci = readl(queue_group->iq_ci[path]); 4515 4516 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 4517 ctrl_info->num_elements_per_iq)) 4518 break; 4519 4520 put_unaligned_le16(queue_group->oq_id, 4521 &request->response_queue_id); 4522 4523 next_element = queue_group->iq_element_array[path] + 4524 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4525 4526 num_elements_to_end_of_queue = 4527 ctrl_info->num_elements_per_iq - iq_pi; 4528 4529 if (num_elements_needed <= num_elements_to_end_of_queue) { 4530 memcpy(next_element, request, iu_length); 4531 } else { 4532 copy_count = num_elements_to_end_of_queue * 4533 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4534 memcpy(next_element, request, copy_count); 4535 memcpy(queue_group->iq_element_array[path], 4536 (u8 *)request + copy_count, 4537 iu_length - copy_count); 4538 } 4539 4540 iq_pi = (iq_pi + num_elements_needed) % 4541 ctrl_info->num_elements_per_iq; 4542 4543 list_del(&io_request->request_list_entry); 4544 } 4545 4546 if (iq_pi != queue_group->iq_pi_copy[path]) { 4547 queue_group->iq_pi_copy[path] = iq_pi; 4548 /* 4549 * This write notifies the controller that one or more IUs are 4550 * available to be processed. 4551 */ 4552 writel(iq_pi, queue_group->iq_pi[path]); 4553 } 4554 4555 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 4556 } 4557 4558 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 4559 4560 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 4561 struct completion *wait) 4562 { 4563 int rc; 4564 4565 while (1) { 4566 if (wait_for_completion_io_timeout(wait, 4567 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 4568 rc = 0; 4569 break; 4570 } 4571 4572 pqi_check_ctrl_health(ctrl_info); 4573 if (pqi_ctrl_offline(ctrl_info)) { 4574 rc = -ENXIO; 4575 break; 4576 } 4577 } 4578 4579 return rc; 4580 } 4581 4582 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 4583 void *context) 4584 { 4585 struct completion *waiting = context; 4586 4587 complete(waiting); 4588 } 4589 4590 static int pqi_process_raid_io_error_synchronous( 4591 struct pqi_raid_error_info *error_info) 4592 { 4593 int rc = -EIO; 4594 4595 switch (error_info->data_out_result) { 4596 case PQI_DATA_IN_OUT_GOOD: 4597 if (error_info->status == SAM_STAT_GOOD) 4598 rc = 0; 4599 break; 4600 case PQI_DATA_IN_OUT_UNDERFLOW: 4601 if (error_info->status == SAM_STAT_GOOD || 4602 error_info->status == SAM_STAT_CHECK_CONDITION) 4603 rc = 0; 4604 break; 4605 case PQI_DATA_IN_OUT_ABORTED: 4606 rc = PQI_CMD_STATUS_ABORTED; 4607 break; 4608 } 4609 4610 return rc; 4611 } 4612 4613 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) 4614 { 4615 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; 4616 } 4617 4618 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4619 struct pqi_iu_header *request, unsigned int flags, 4620 struct pqi_raid_error_info *error_info) 4621 { 4622 int rc = 0; 4623 struct pqi_io_request *io_request; 4624 size_t iu_length; 4625 DECLARE_COMPLETION_ONSTACK(wait); 4626 4627 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4628 if (down_interruptible(&ctrl_info->sync_request_sem)) 4629 return -ERESTARTSYS; 4630 } else { 4631 down(&ctrl_info->sync_request_sem); 4632 } 4633 4634 pqi_ctrl_busy(ctrl_info); 4635 /* 4636 * Wait for other admin queue updates such as; 4637 * config table changes, OFA memory updates, ... 4638 */ 4639 if (pqi_is_blockable_request(request)) 4640 pqi_wait_if_ctrl_blocked(ctrl_info); 4641 4642 if (pqi_ctrl_offline(ctrl_info)) { 4643 rc = -ENXIO; 4644 goto out; 4645 } 4646 4647 io_request = pqi_alloc_io_request(ctrl_info, NULL); 4648 4649 put_unaligned_le16(io_request->index, 4650 &(((struct pqi_raid_path_request *)request)->request_id)); 4651 4652 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4653 ((struct pqi_raid_path_request *)request)->error_index = 4654 ((struct pqi_raid_path_request *)request)->request_id; 4655 4656 iu_length = get_unaligned_le16(&request->iu_length) + 4657 PQI_REQUEST_HEADER_LENGTH; 4658 memcpy(io_request->iu, request, iu_length); 4659 4660 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4661 io_request->context = &wait; 4662 4663 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4664 io_request); 4665 4666 pqi_wait_for_completion_io(ctrl_info, &wait); 4667 4668 if (error_info) { 4669 if (io_request->error_info) 4670 memcpy(error_info, io_request->error_info, sizeof(*error_info)); 4671 else 4672 memset(error_info, 0, sizeof(*error_info)); 4673 } else if (rc == 0 && io_request->error_info) { 4674 rc = pqi_process_raid_io_error_synchronous(io_request->error_info); 4675 } 4676 4677 pqi_free_io_request(io_request); 4678 4679 out: 4680 pqi_ctrl_unbusy(ctrl_info); 4681 up(&ctrl_info->sync_request_sem); 4682 4683 return rc; 4684 } 4685 4686 static int pqi_validate_admin_response( 4687 struct pqi_general_admin_response *response, u8 expected_function_code) 4688 { 4689 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4690 return -EINVAL; 4691 4692 if (get_unaligned_le16(&response->header.iu_length) != 4693 PQI_GENERAL_ADMIN_IU_LENGTH) 4694 return -EINVAL; 4695 4696 if (response->function_code != expected_function_code) 4697 return -EINVAL; 4698 4699 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4700 return -EINVAL; 4701 4702 return 0; 4703 } 4704 4705 static int pqi_submit_admin_request_synchronous( 4706 struct pqi_ctrl_info *ctrl_info, 4707 struct pqi_general_admin_request *request, 4708 struct pqi_general_admin_response *response) 4709 { 4710 int rc; 4711 4712 pqi_submit_admin_request(ctrl_info, request); 4713 4714 rc = pqi_poll_for_admin_response(ctrl_info, response); 4715 4716 if (rc == 0) 4717 rc = pqi_validate_admin_response(response, request->function_code); 4718 4719 return rc; 4720 } 4721 4722 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4723 { 4724 int rc; 4725 struct pqi_general_admin_request request; 4726 struct pqi_general_admin_response response; 4727 struct pqi_device_capability *capability; 4728 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4729 4730 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4731 if (!capability) 4732 return -ENOMEM; 4733 4734 memset(&request, 0, sizeof(request)); 4735 4736 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4737 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4738 &request.header.iu_length); 4739 request.function_code = 4740 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4741 put_unaligned_le32(sizeof(*capability), 4742 &request.data.report_device_capability.buffer_length); 4743 4744 rc = pqi_map_single(ctrl_info->pci_dev, 4745 &request.data.report_device_capability.sg_descriptor, 4746 capability, sizeof(*capability), 4747 DMA_FROM_DEVICE); 4748 if (rc) 4749 goto out; 4750 4751 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); 4752 4753 pqi_pci_unmap(ctrl_info->pci_dev, 4754 &request.data.report_device_capability.sg_descriptor, 1, 4755 DMA_FROM_DEVICE); 4756 4757 if (rc) 4758 goto out; 4759 4760 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4761 rc = -EIO; 4762 goto out; 4763 } 4764 4765 ctrl_info->max_inbound_queues = 4766 get_unaligned_le16(&capability->max_inbound_queues); 4767 ctrl_info->max_elements_per_iq = 4768 get_unaligned_le16(&capability->max_elements_per_iq); 4769 ctrl_info->max_iq_element_length = 4770 get_unaligned_le16(&capability->max_iq_element_length) 4771 * 16; 4772 ctrl_info->max_outbound_queues = 4773 get_unaligned_le16(&capability->max_outbound_queues); 4774 ctrl_info->max_elements_per_oq = 4775 get_unaligned_le16(&capability->max_elements_per_oq); 4776 ctrl_info->max_oq_element_length = 4777 get_unaligned_le16(&capability->max_oq_element_length) 4778 * 16; 4779 4780 sop_iu_layer_descriptor = 4781 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4782 4783 ctrl_info->max_inbound_iu_length_per_firmware = 4784 get_unaligned_le16( 4785 &sop_iu_layer_descriptor->max_inbound_iu_length); 4786 ctrl_info->inbound_spanning_supported = 4787 sop_iu_layer_descriptor->inbound_spanning_supported; 4788 ctrl_info->outbound_spanning_supported = 4789 sop_iu_layer_descriptor->outbound_spanning_supported; 4790 4791 out: 4792 kfree(capability); 4793 4794 return rc; 4795 } 4796 4797 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4798 { 4799 if (ctrl_info->max_iq_element_length < 4800 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4801 dev_err(&ctrl_info->pci_dev->dev, 4802 "max. inbound queue element length of %d is less than the required length of %d\n", 4803 ctrl_info->max_iq_element_length, 4804 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4805 return -EINVAL; 4806 } 4807 4808 if (ctrl_info->max_oq_element_length < 4809 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4810 dev_err(&ctrl_info->pci_dev->dev, 4811 "max. outbound queue element length of %d is less than the required length of %d\n", 4812 ctrl_info->max_oq_element_length, 4813 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4814 return -EINVAL; 4815 } 4816 4817 if (ctrl_info->max_inbound_iu_length_per_firmware < 4818 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4819 dev_err(&ctrl_info->pci_dev->dev, 4820 "max. inbound IU length of %u is less than the min. required length of %d\n", 4821 ctrl_info->max_inbound_iu_length_per_firmware, 4822 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4823 return -EINVAL; 4824 } 4825 4826 if (!ctrl_info->inbound_spanning_supported) { 4827 dev_err(&ctrl_info->pci_dev->dev, 4828 "the controller does not support inbound spanning\n"); 4829 return -EINVAL; 4830 } 4831 4832 if (ctrl_info->outbound_spanning_supported) { 4833 dev_err(&ctrl_info->pci_dev->dev, 4834 "the controller supports outbound spanning but this driver does not\n"); 4835 return -EINVAL; 4836 } 4837 4838 return 0; 4839 } 4840 4841 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4842 { 4843 int rc; 4844 struct pqi_event_queue *event_queue; 4845 struct pqi_general_admin_request request; 4846 struct pqi_general_admin_response response; 4847 4848 event_queue = &ctrl_info->event_queue; 4849 4850 /* 4851 * Create OQ (Outbound Queue - device to host queue) to dedicate 4852 * to events. 4853 */ 4854 memset(&request, 0, sizeof(request)); 4855 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4856 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4857 &request.header.iu_length); 4858 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4859 put_unaligned_le16(event_queue->oq_id, 4860 &request.data.create_operational_oq.queue_id); 4861 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4862 &request.data.create_operational_oq.element_array_addr); 4863 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4864 &request.data.create_operational_oq.pi_addr); 4865 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4866 &request.data.create_operational_oq.num_elements); 4867 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4868 &request.data.create_operational_oq.element_length); 4869 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4870 put_unaligned_le16(event_queue->int_msg_num, 4871 &request.data.create_operational_oq.int_msg_num); 4872 4873 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4874 &response); 4875 if (rc) 4876 return rc; 4877 4878 event_queue->oq_ci = ctrl_info->iomem_base + 4879 PQI_DEVICE_REGISTERS_OFFSET + 4880 get_unaligned_le64( 4881 &response.data.create_operational_oq.oq_ci_offset); 4882 4883 return 0; 4884 } 4885 4886 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4887 unsigned int group_number) 4888 { 4889 int rc; 4890 struct pqi_queue_group *queue_group; 4891 struct pqi_general_admin_request request; 4892 struct pqi_general_admin_response response; 4893 4894 queue_group = &ctrl_info->queue_groups[group_number]; 4895 4896 /* 4897 * Create IQ (Inbound Queue - host to device queue) for 4898 * RAID path. 4899 */ 4900 memset(&request, 0, sizeof(request)); 4901 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4902 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4903 &request.header.iu_length); 4904 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4905 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4906 &request.data.create_operational_iq.queue_id); 4907 put_unaligned_le64( 4908 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4909 &request.data.create_operational_iq.element_array_addr); 4910 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4911 &request.data.create_operational_iq.ci_addr); 4912 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4913 &request.data.create_operational_iq.num_elements); 4914 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4915 &request.data.create_operational_iq.element_length); 4916 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4917 4918 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4919 &response); 4920 if (rc) { 4921 dev_err(&ctrl_info->pci_dev->dev, 4922 "error creating inbound RAID queue\n"); 4923 return rc; 4924 } 4925 4926 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4927 PQI_DEVICE_REGISTERS_OFFSET + 4928 get_unaligned_le64( 4929 &response.data.create_operational_iq.iq_pi_offset); 4930 4931 /* 4932 * Create IQ (Inbound Queue - host to device queue) for 4933 * Advanced I/O (AIO) path. 4934 */ 4935 memset(&request, 0, sizeof(request)); 4936 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4937 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4938 &request.header.iu_length); 4939 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4940 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4941 &request.data.create_operational_iq.queue_id); 4942 put_unaligned_le64((u64)queue_group-> 4943 iq_element_array_bus_addr[AIO_PATH], 4944 &request.data.create_operational_iq.element_array_addr); 4945 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4946 &request.data.create_operational_iq.ci_addr); 4947 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4948 &request.data.create_operational_iq.num_elements); 4949 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4950 &request.data.create_operational_iq.element_length); 4951 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4952 4953 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4954 &response); 4955 if (rc) { 4956 dev_err(&ctrl_info->pci_dev->dev, 4957 "error creating inbound AIO queue\n"); 4958 return rc; 4959 } 4960 4961 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4962 PQI_DEVICE_REGISTERS_OFFSET + 4963 get_unaligned_le64( 4964 &response.data.create_operational_iq.iq_pi_offset); 4965 4966 /* 4967 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4968 * assumed to be for RAID path I/O unless we change the queue's 4969 * property. 4970 */ 4971 memset(&request, 0, sizeof(request)); 4972 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4973 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4974 &request.header.iu_length); 4975 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4976 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4977 &request.data.change_operational_iq_properties.queue_id); 4978 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4979 &request.data.change_operational_iq_properties.vendor_specific); 4980 4981 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4982 &response); 4983 if (rc) { 4984 dev_err(&ctrl_info->pci_dev->dev, 4985 "error changing queue property\n"); 4986 return rc; 4987 } 4988 4989 /* 4990 * Create OQ (Outbound Queue - device to host queue). 4991 */ 4992 memset(&request, 0, sizeof(request)); 4993 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4994 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4995 &request.header.iu_length); 4996 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4997 put_unaligned_le16(queue_group->oq_id, 4998 &request.data.create_operational_oq.queue_id); 4999 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 5000 &request.data.create_operational_oq.element_array_addr); 5001 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 5002 &request.data.create_operational_oq.pi_addr); 5003 put_unaligned_le16(ctrl_info->num_elements_per_oq, 5004 &request.data.create_operational_oq.num_elements); 5005 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 5006 &request.data.create_operational_oq.element_length); 5007 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 5008 put_unaligned_le16(queue_group->int_msg_num, 5009 &request.data.create_operational_oq.int_msg_num); 5010 5011 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 5012 &response); 5013 if (rc) { 5014 dev_err(&ctrl_info->pci_dev->dev, 5015 "error creating outbound queue\n"); 5016 return rc; 5017 } 5018 5019 queue_group->oq_ci = ctrl_info->iomem_base + 5020 PQI_DEVICE_REGISTERS_OFFSET + 5021 get_unaligned_le64( 5022 &response.data.create_operational_oq.oq_ci_offset); 5023 5024 return 0; 5025 } 5026 5027 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 5028 { 5029 int rc; 5030 unsigned int i; 5031 5032 rc = pqi_create_event_queue(ctrl_info); 5033 if (rc) { 5034 dev_err(&ctrl_info->pci_dev->dev, 5035 "error creating event queue\n"); 5036 return rc; 5037 } 5038 5039 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5040 rc = pqi_create_queue_group(ctrl_info, i); 5041 if (rc) { 5042 dev_err(&ctrl_info->pci_dev->dev, 5043 "error creating queue group number %u/%u\n", 5044 i, ctrl_info->num_queue_groups); 5045 return rc; 5046 } 5047 } 5048 5049 return 0; 5050 } 5051 5052 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 5053 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) 5054 5055 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 5056 bool enable_events) 5057 { 5058 int rc; 5059 unsigned int i; 5060 struct pqi_event_config *event_config; 5061 struct pqi_event_descriptor *event_descriptor; 5062 struct pqi_general_management_request request; 5063 5064 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5065 GFP_KERNEL); 5066 if (!event_config) 5067 return -ENOMEM; 5068 5069 memset(&request, 0, sizeof(request)); 5070 5071 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 5072 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5073 data.report_event_configuration.sg_descriptors[1]) - 5074 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5075 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5076 &request.data.report_event_configuration.buffer_length); 5077 5078 rc = pqi_map_single(ctrl_info->pci_dev, 5079 request.data.report_event_configuration.sg_descriptors, 5080 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5081 DMA_FROM_DEVICE); 5082 if (rc) 5083 goto out; 5084 5085 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5086 5087 pqi_pci_unmap(ctrl_info->pci_dev, 5088 request.data.report_event_configuration.sg_descriptors, 1, 5089 DMA_FROM_DEVICE); 5090 5091 if (rc) 5092 goto out; 5093 5094 for (i = 0; i < event_config->num_event_descriptors; i++) { 5095 event_descriptor = &event_config->descriptors[i]; 5096 if (enable_events && 5097 pqi_is_supported_event(event_descriptor->event_type)) 5098 put_unaligned_le16(ctrl_info->event_queue.oq_id, 5099 &event_descriptor->oq_id); 5100 else 5101 put_unaligned_le16(0, &event_descriptor->oq_id); 5102 } 5103 5104 memset(&request, 0, sizeof(request)); 5105 5106 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 5107 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5108 data.report_event_configuration.sg_descriptors[1]) - 5109 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5110 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5111 &request.data.report_event_configuration.buffer_length); 5112 5113 rc = pqi_map_single(ctrl_info->pci_dev, 5114 request.data.report_event_configuration.sg_descriptors, 5115 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5116 DMA_TO_DEVICE); 5117 if (rc) 5118 goto out; 5119 5120 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5121 5122 pqi_pci_unmap(ctrl_info->pci_dev, 5123 request.data.report_event_configuration.sg_descriptors, 1, 5124 DMA_TO_DEVICE); 5125 5126 out: 5127 kfree(event_config); 5128 5129 return rc; 5130 } 5131 5132 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 5133 { 5134 return pqi_configure_events(ctrl_info, true); 5135 } 5136 5137 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 5138 { 5139 unsigned int i; 5140 struct device *dev; 5141 size_t sg_chain_buffer_length; 5142 struct pqi_io_request *io_request; 5143 5144 if (!ctrl_info->io_request_pool) 5145 return; 5146 5147 dev = &ctrl_info->pci_dev->dev; 5148 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5149 io_request = ctrl_info->io_request_pool; 5150 5151 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5152 kfree(io_request->iu); 5153 if (!io_request->sg_chain_buffer) 5154 break; 5155 dma_free_coherent(dev, sg_chain_buffer_length, 5156 io_request->sg_chain_buffer, 5157 io_request->sg_chain_buffer_dma_handle); 5158 io_request++; 5159 } 5160 5161 kfree(ctrl_info->io_request_pool); 5162 ctrl_info->io_request_pool = NULL; 5163 } 5164 5165 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 5166 { 5167 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 5168 ctrl_info->error_buffer_length, 5169 &ctrl_info->error_buffer_dma_handle, 5170 GFP_KERNEL); 5171 if (!ctrl_info->error_buffer) 5172 return -ENOMEM; 5173 5174 return 0; 5175 } 5176 5177 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 5178 { 5179 unsigned int i; 5180 void *sg_chain_buffer; 5181 size_t sg_chain_buffer_length; 5182 dma_addr_t sg_chain_buffer_dma_handle; 5183 struct device *dev; 5184 struct pqi_io_request *io_request; 5185 5186 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, 5187 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 5188 5189 if (!ctrl_info->io_request_pool) { 5190 dev_err(&ctrl_info->pci_dev->dev, 5191 "failed to allocate I/O request pool\n"); 5192 goto error; 5193 } 5194 5195 dev = &ctrl_info->pci_dev->dev; 5196 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5197 io_request = ctrl_info->io_request_pool; 5198 5199 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5200 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 5201 5202 if (!io_request->iu) { 5203 dev_err(&ctrl_info->pci_dev->dev, 5204 "failed to allocate IU buffers\n"); 5205 goto error; 5206 } 5207 5208 sg_chain_buffer = dma_alloc_coherent(dev, 5209 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 5210 GFP_KERNEL); 5211 5212 if (!sg_chain_buffer) { 5213 dev_err(&ctrl_info->pci_dev->dev, 5214 "failed to allocate PQI scatter-gather chain buffers\n"); 5215 goto error; 5216 } 5217 5218 io_request->index = i; 5219 io_request->sg_chain_buffer = sg_chain_buffer; 5220 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; 5221 io_request++; 5222 } 5223 5224 return 0; 5225 5226 error: 5227 pqi_free_all_io_requests(ctrl_info); 5228 5229 return -ENOMEM; 5230 } 5231 5232 /* 5233 * Calculate required resources that are sized based on max. outstanding 5234 * requests and max. transfer size. 5235 */ 5236 5237 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 5238 { 5239 u32 max_transfer_size; 5240 u32 max_sg_entries; 5241 5242 ctrl_info->scsi_ml_can_queue = 5243 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 5244 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 5245 5246 ctrl_info->error_buffer_length = 5247 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 5248 5249 if (reset_devices) 5250 max_transfer_size = min(ctrl_info->max_transfer_size, 5251 PQI_MAX_TRANSFER_SIZE_KDUMP); 5252 else 5253 max_transfer_size = min(ctrl_info->max_transfer_size, 5254 PQI_MAX_TRANSFER_SIZE); 5255 5256 max_sg_entries = max_transfer_size / PAGE_SIZE; 5257 5258 /* +1 to cover when the buffer is not page-aligned. */ 5259 max_sg_entries++; 5260 5261 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 5262 5263 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 5264 5265 ctrl_info->sg_chain_buffer_length = 5266 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 5267 PQI_EXTRA_SGL_MEMORY; 5268 ctrl_info->sg_tablesize = max_sg_entries; 5269 ctrl_info->max_sectors = max_transfer_size / 512; 5270 } 5271 5272 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 5273 { 5274 int num_queue_groups; 5275 u16 num_elements_per_iq; 5276 u16 num_elements_per_oq; 5277 5278 if (reset_devices) { 5279 num_queue_groups = 1; 5280 } else { 5281 int num_cpus; 5282 int max_queue_groups; 5283 5284 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 5285 ctrl_info->max_outbound_queues - 1); 5286 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 5287 5288 num_cpus = num_online_cpus(); 5289 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 5290 num_queue_groups = min(num_queue_groups, max_queue_groups); 5291 } 5292 5293 ctrl_info->num_queue_groups = num_queue_groups; 5294 5295 /* 5296 * Make sure that the max. inbound IU length is an even multiple 5297 * of our inbound element length. 5298 */ 5299 ctrl_info->max_inbound_iu_length = 5300 (ctrl_info->max_inbound_iu_length_per_firmware / 5301 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 5302 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 5303 5304 num_elements_per_iq = 5305 (ctrl_info->max_inbound_iu_length / 5306 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 5307 5308 /* Add one because one element in each queue is unusable. */ 5309 num_elements_per_iq++; 5310 5311 num_elements_per_iq = min(num_elements_per_iq, 5312 ctrl_info->max_elements_per_iq); 5313 5314 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 5315 num_elements_per_oq = min(num_elements_per_oq, 5316 ctrl_info->max_elements_per_oq); 5317 5318 ctrl_info->num_elements_per_iq = num_elements_per_iq; 5319 ctrl_info->num_elements_per_oq = num_elements_per_oq; 5320 5321 ctrl_info->max_sg_per_iu = 5322 ((ctrl_info->max_inbound_iu_length - 5323 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5324 sizeof(struct pqi_sg_descriptor)) + 5325 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 5326 5327 ctrl_info->max_sg_per_r56_iu = 5328 ((ctrl_info->max_inbound_iu_length - 5329 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5330 sizeof(struct pqi_sg_descriptor)) + 5331 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; 5332 } 5333 5334 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, 5335 struct scatterlist *sg) 5336 { 5337 u64 address = (u64)sg_dma_address(sg); 5338 unsigned int length = sg_dma_len(sg); 5339 5340 put_unaligned_le64(address, &sg_descriptor->address); 5341 put_unaligned_le32(length, &sg_descriptor->length); 5342 put_unaligned_le32(0, &sg_descriptor->flags); 5343 } 5344 5345 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, 5346 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, 5347 int max_sg_per_iu, bool *chained) 5348 { 5349 int i; 5350 unsigned int num_sg_in_iu; 5351 5352 *chained = false; 5353 i = 0; 5354 num_sg_in_iu = 0; 5355 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ 5356 5357 while (1) { 5358 pqi_set_sg_descriptor(sg_descriptor, sg); 5359 if (!*chained) 5360 num_sg_in_iu++; 5361 i++; 5362 if (i == sg_count) 5363 break; 5364 sg_descriptor++; 5365 if (i == max_sg_per_iu) { 5366 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, 5367 &sg_descriptor->address); 5368 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), 5369 &sg_descriptor->length); 5370 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); 5371 *chained = true; 5372 num_sg_in_iu++; 5373 sg_descriptor = io_request->sg_chain_buffer; 5374 } 5375 sg = sg_next(sg); 5376 } 5377 5378 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 5379 5380 return num_sg_in_iu; 5381 } 5382 5383 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 5384 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 5385 struct pqi_io_request *io_request) 5386 { 5387 u16 iu_length; 5388 int sg_count; 5389 bool chained; 5390 unsigned int num_sg_in_iu; 5391 struct scatterlist *sg; 5392 struct pqi_sg_descriptor *sg_descriptor; 5393 5394 sg_count = scsi_dma_map(scmd); 5395 if (sg_count < 0) 5396 return sg_count; 5397 5398 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5399 PQI_REQUEST_HEADER_LENGTH; 5400 5401 if (sg_count == 0) 5402 goto out; 5403 5404 sg = scsi_sglist(scmd); 5405 sg_descriptor = request->sg_descriptors; 5406 5407 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5408 ctrl_info->max_sg_per_iu, &chained); 5409 5410 request->partial = chained; 5411 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5412 5413 out: 5414 put_unaligned_le16(iu_length, &request->header.iu_length); 5415 5416 return 0; 5417 } 5418 5419 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, 5420 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, 5421 struct pqi_io_request *io_request) 5422 { 5423 u16 iu_length; 5424 int sg_count; 5425 bool chained; 5426 unsigned int num_sg_in_iu; 5427 struct scatterlist *sg; 5428 struct pqi_sg_descriptor *sg_descriptor; 5429 5430 sg_count = scsi_dma_map(scmd); 5431 if (sg_count < 0) 5432 return sg_count; 5433 5434 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - 5435 PQI_REQUEST_HEADER_LENGTH; 5436 num_sg_in_iu = 0; 5437 5438 if (sg_count == 0) 5439 goto out; 5440 5441 sg = scsi_sglist(scmd); 5442 sg_descriptor = request->sg_descriptors; 5443 5444 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5445 ctrl_info->max_sg_per_iu, &chained); 5446 5447 request->partial = chained; 5448 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5449 5450 out: 5451 put_unaligned_le16(iu_length, &request->header.iu_length); 5452 request->num_sg_descriptors = num_sg_in_iu; 5453 5454 return 0; 5455 } 5456 5457 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, 5458 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, 5459 struct pqi_io_request *io_request) 5460 { 5461 u16 iu_length; 5462 int sg_count; 5463 bool chained; 5464 unsigned int num_sg_in_iu; 5465 struct scatterlist *sg; 5466 struct pqi_sg_descriptor *sg_descriptor; 5467 5468 sg_count = scsi_dma_map(scmd); 5469 if (sg_count < 0) 5470 return sg_count; 5471 5472 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - 5473 PQI_REQUEST_HEADER_LENGTH; 5474 num_sg_in_iu = 0; 5475 5476 if (sg_count != 0) { 5477 sg = scsi_sglist(scmd); 5478 sg_descriptor = request->sg_descriptors; 5479 5480 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5481 ctrl_info->max_sg_per_r56_iu, &chained); 5482 5483 request->partial = chained; 5484 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5485 } 5486 5487 put_unaligned_le16(iu_length, &request->header.iu_length); 5488 request->num_sg_descriptors = num_sg_in_iu; 5489 5490 return 0; 5491 } 5492 5493 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 5494 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 5495 struct pqi_io_request *io_request) 5496 { 5497 u16 iu_length; 5498 int sg_count; 5499 bool chained; 5500 unsigned int num_sg_in_iu; 5501 struct scatterlist *sg; 5502 struct pqi_sg_descriptor *sg_descriptor; 5503 5504 sg_count = scsi_dma_map(scmd); 5505 if (sg_count < 0) 5506 return sg_count; 5507 5508 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 5509 PQI_REQUEST_HEADER_LENGTH; 5510 num_sg_in_iu = 0; 5511 5512 if (sg_count == 0) 5513 goto out; 5514 5515 sg = scsi_sglist(scmd); 5516 sg_descriptor = request->sg_descriptors; 5517 5518 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5519 ctrl_info->max_sg_per_iu, &chained); 5520 5521 request->partial = chained; 5522 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5523 5524 out: 5525 put_unaligned_le16(iu_length, &request->header.iu_length); 5526 request->num_sg_descriptors = num_sg_in_iu; 5527 5528 return 0; 5529 } 5530 5531 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 5532 void *context) 5533 { 5534 struct scsi_cmnd *scmd; 5535 5536 scmd = io_request->scmd; 5537 pqi_free_io_request(io_request); 5538 scsi_dma_unmap(scmd); 5539 pqi_scsi_done(scmd); 5540 } 5541 5542 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, 5543 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5544 struct pqi_queue_group *queue_group, bool io_high_prio) 5545 { 5546 int rc; 5547 size_t cdb_length; 5548 struct pqi_io_request *io_request; 5549 struct pqi_raid_path_request *request; 5550 5551 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5552 if (!io_request) 5553 return SCSI_MLQUEUE_HOST_BUSY; 5554 5555 io_request->io_complete_callback = pqi_raid_io_complete; 5556 io_request->scmd = scmd; 5557 5558 request = io_request->iu; 5559 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); 5560 5561 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5562 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5563 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5564 request->command_priority = io_high_prio; 5565 put_unaligned_le16(io_request->index, &request->request_id); 5566 request->error_index = request->request_id; 5567 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); 5568 request->ml_device_lun_number = (u8)scmd->device->lun; 5569 5570 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 5571 memcpy(request->cdb, scmd->cmnd, cdb_length); 5572 5573 switch (cdb_length) { 5574 case 6: 5575 case 10: 5576 case 12: 5577 case 16: 5578 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5579 break; 5580 case 20: 5581 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; 5582 break; 5583 case 24: 5584 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; 5585 break; 5586 case 28: 5587 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; 5588 break; 5589 case 32: 5590 default: 5591 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; 5592 break; 5593 } 5594 5595 switch (scmd->sc_data_direction) { 5596 case DMA_FROM_DEVICE: 5597 request->data_direction = SOP_READ_FLAG; 5598 break; 5599 case DMA_TO_DEVICE: 5600 request->data_direction = SOP_WRITE_FLAG; 5601 break; 5602 case DMA_NONE: 5603 request->data_direction = SOP_NO_DIRECTION_FLAG; 5604 break; 5605 case DMA_BIDIRECTIONAL: 5606 request->data_direction = SOP_BIDIRECTIONAL; 5607 break; 5608 default: 5609 dev_err(&ctrl_info->pci_dev->dev, 5610 "unknown data direction: %d\n", 5611 scmd->sc_data_direction); 5612 break; 5613 } 5614 5615 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5616 if (rc) { 5617 pqi_free_io_request(io_request); 5618 return SCSI_MLQUEUE_HOST_BUSY; 5619 } 5620 5621 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5622 5623 return 0; 5624 } 5625 5626 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5627 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5628 struct pqi_queue_group *queue_group) 5629 { 5630 bool io_high_prio; 5631 5632 io_high_prio = pqi_is_io_high_priority(device, scmd); 5633 5634 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); 5635 } 5636 5637 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5638 { 5639 struct scsi_cmnd *scmd; 5640 struct pqi_scsi_dev *device; 5641 struct pqi_ctrl_info *ctrl_info; 5642 5643 if (!io_request->raid_bypass) 5644 return false; 5645 5646 scmd = io_request->scmd; 5647 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5648 return false; 5649 if (host_byte(scmd->result) == DID_NO_CONNECT) 5650 return false; 5651 5652 device = scmd->device->hostdata; 5653 if (pqi_device_offline(device) || pqi_device_in_remove(device)) 5654 return false; 5655 5656 ctrl_info = shost_to_hba(scmd->device->host); 5657 if (pqi_ctrl_offline(ctrl_info)) 5658 return false; 5659 5660 return true; 5661 } 5662 5663 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5664 void *context) 5665 { 5666 struct scsi_cmnd *scmd; 5667 5668 scmd = io_request->scmd; 5669 scsi_dma_unmap(scmd); 5670 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { 5671 set_host_byte(scmd, DID_IMM_RETRY); 5672 pqi_cmd_priv(scmd)->this_residual++; 5673 } 5674 5675 pqi_free_io_request(io_request); 5676 pqi_scsi_done(scmd); 5677 } 5678 5679 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5680 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5681 struct pqi_queue_group *queue_group) 5682 { 5683 bool io_high_prio; 5684 5685 io_high_prio = pqi_is_io_high_priority(device, scmd); 5686 5687 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5688 scmd->cmnd, scmd->cmd_len, queue_group, NULL, 5689 false, io_high_prio); 5690 } 5691 5692 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5693 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5694 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5695 struct pqi_encryption_info *encryption_info, bool raid_bypass, 5696 bool io_high_prio) 5697 { 5698 int rc; 5699 struct pqi_io_request *io_request; 5700 struct pqi_aio_path_request *request; 5701 5702 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5703 if (!io_request) 5704 return SCSI_MLQUEUE_HOST_BUSY; 5705 5706 io_request->io_complete_callback = pqi_aio_io_complete; 5707 io_request->scmd = scmd; 5708 io_request->raid_bypass = raid_bypass; 5709 5710 request = io_request->iu; 5711 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors)); 5712 5713 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5714 put_unaligned_le32(aio_handle, &request->nexus_id); 5715 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5716 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5717 request->command_priority = io_high_prio; 5718 put_unaligned_le16(io_request->index, &request->request_id); 5719 request->error_index = request->request_id; 5720 if (!raid_bypass && ctrl_info->multi_lun_device_supported) 5721 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); 5722 if (cdb_length > sizeof(request->cdb)) 5723 cdb_length = sizeof(request->cdb); 5724 request->cdb_length = cdb_length; 5725 memcpy(request->cdb, cdb, cdb_length); 5726 5727 switch (scmd->sc_data_direction) { 5728 case DMA_TO_DEVICE: 5729 request->data_direction = SOP_READ_FLAG; 5730 break; 5731 case DMA_FROM_DEVICE: 5732 request->data_direction = SOP_WRITE_FLAG; 5733 break; 5734 case DMA_NONE: 5735 request->data_direction = SOP_NO_DIRECTION_FLAG; 5736 break; 5737 case DMA_BIDIRECTIONAL: 5738 request->data_direction = SOP_BIDIRECTIONAL; 5739 break; 5740 default: 5741 dev_err(&ctrl_info->pci_dev->dev, 5742 "unknown data direction: %d\n", 5743 scmd->sc_data_direction); 5744 break; 5745 } 5746 5747 if (encryption_info) { 5748 request->encryption_enable = true; 5749 put_unaligned_le16(encryption_info->data_encryption_key_index, 5750 &request->data_encryption_key_index); 5751 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5752 &request->encrypt_tweak_lower); 5753 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5754 &request->encrypt_tweak_upper); 5755 } 5756 5757 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5758 if (rc) { 5759 pqi_free_io_request(io_request); 5760 return SCSI_MLQUEUE_HOST_BUSY; 5761 } 5762 5763 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5764 5765 return 0; 5766 } 5767 5768 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 5769 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5770 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5771 struct pqi_scsi_dev_raid_map_data *rmd) 5772 { 5773 int rc; 5774 struct pqi_io_request *io_request; 5775 struct pqi_aio_r1_path_request *r1_request; 5776 5777 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5778 if (!io_request) 5779 return SCSI_MLQUEUE_HOST_BUSY; 5780 5781 io_request->io_complete_callback = pqi_aio_io_complete; 5782 io_request->scmd = scmd; 5783 io_request->raid_bypass = true; 5784 5785 r1_request = io_request->iu; 5786 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); 5787 5788 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; 5789 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); 5790 r1_request->num_drives = rmd->num_it_nexus_entries; 5791 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); 5792 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); 5793 if (rmd->num_it_nexus_entries == 3) 5794 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); 5795 5796 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); 5797 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5798 put_unaligned_le16(io_request->index, &r1_request->request_id); 5799 r1_request->error_index = r1_request->request_id; 5800 if (rmd->cdb_length > sizeof(r1_request->cdb)) 5801 rmd->cdb_length = sizeof(r1_request->cdb); 5802 r1_request->cdb_length = rmd->cdb_length; 5803 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); 5804 5805 /* The direction is always write. */ 5806 r1_request->data_direction = SOP_READ_FLAG; 5807 5808 if (encryption_info) { 5809 r1_request->encryption_enable = true; 5810 put_unaligned_le16(encryption_info->data_encryption_key_index, 5811 &r1_request->data_encryption_key_index); 5812 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5813 &r1_request->encrypt_tweak_lower); 5814 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5815 &r1_request->encrypt_tweak_upper); 5816 } 5817 5818 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); 5819 if (rc) { 5820 pqi_free_io_request(io_request); 5821 return SCSI_MLQUEUE_HOST_BUSY; 5822 } 5823 5824 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5825 5826 return 0; 5827 } 5828 5829 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 5830 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5831 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5832 struct pqi_scsi_dev_raid_map_data *rmd) 5833 { 5834 int rc; 5835 struct pqi_io_request *io_request; 5836 struct pqi_aio_r56_path_request *r56_request; 5837 5838 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5839 if (!io_request) 5840 return SCSI_MLQUEUE_HOST_BUSY; 5841 io_request->io_complete_callback = pqi_aio_io_complete; 5842 io_request->scmd = scmd; 5843 io_request->raid_bypass = true; 5844 5845 r56_request = io_request->iu; 5846 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); 5847 5848 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) 5849 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; 5850 else 5851 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; 5852 5853 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); 5854 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); 5855 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); 5856 if (rmd->raid_level == SA_RAID_6) { 5857 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); 5858 r56_request->xor_multiplier = rmd->xor_mult; 5859 } 5860 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); 5861 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5862 put_unaligned_le64(rmd->row, &r56_request->row); 5863 5864 put_unaligned_le16(io_request->index, &r56_request->request_id); 5865 r56_request->error_index = r56_request->request_id; 5866 5867 if (rmd->cdb_length > sizeof(r56_request->cdb)) 5868 rmd->cdb_length = sizeof(r56_request->cdb); 5869 r56_request->cdb_length = rmd->cdb_length; 5870 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); 5871 5872 /* The direction is always write. */ 5873 r56_request->data_direction = SOP_READ_FLAG; 5874 5875 if (encryption_info) { 5876 r56_request->encryption_enable = true; 5877 put_unaligned_le16(encryption_info->data_encryption_key_index, 5878 &r56_request->data_encryption_key_index); 5879 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5880 &r56_request->encrypt_tweak_lower); 5881 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5882 &r56_request->encrypt_tweak_upper); 5883 } 5884 5885 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); 5886 if (rc) { 5887 pqi_free_io_request(io_request); 5888 return SCSI_MLQUEUE_HOST_BUSY; 5889 } 5890 5891 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5892 5893 return 0; 5894 } 5895 5896 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5897 struct scsi_cmnd *scmd) 5898 { 5899 /* 5900 * We are setting host_tagset = 1 during init. 5901 */ 5902 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); 5903 } 5904 5905 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) 5906 { 5907 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) 5908 return false; 5909 5910 return pqi_cmd_priv(scmd)->this_residual == 0; 5911 } 5912 5913 /* 5914 * This function gets called just before we hand the completed SCSI request 5915 * back to the SML. 5916 */ 5917 5918 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5919 { 5920 struct pqi_scsi_dev *device; 5921 struct completion *wait; 5922 5923 if (!scmd->device) { 5924 set_host_byte(scmd, DID_NO_CONNECT); 5925 return; 5926 } 5927 5928 device = scmd->device->hostdata; 5929 if (!device) { 5930 set_host_byte(scmd, DID_NO_CONNECT); 5931 return; 5932 } 5933 5934 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); 5935 5936 wait = (struct completion *)xchg(&scmd->host_scribble, NULL); 5937 if (wait != PQI_NO_COMPLETION) 5938 complete(wait); 5939 } 5940 5941 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, 5942 struct scsi_cmnd *scmd) 5943 { 5944 u32 oldest_jiffies; 5945 u8 lru_index; 5946 int i; 5947 int rc; 5948 struct pqi_scsi_dev *device; 5949 struct pqi_stream_data *pqi_stream_data; 5950 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 5951 5952 if (!ctrl_info->enable_stream_detection) 5953 return false; 5954 5955 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 5956 if (rc) 5957 return false; 5958 5959 /* Check writes only. */ 5960 if (!rmd.is_write) 5961 return false; 5962 5963 device = scmd->device->hostdata; 5964 5965 /* Check for RAID 5/6 streams. */ 5966 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) 5967 return false; 5968 5969 /* 5970 * If controller does not support AIO RAID{5,6} writes, need to send 5971 * requests down non-AIO path. 5972 */ 5973 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || 5974 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) 5975 return true; 5976 5977 lru_index = 0; 5978 oldest_jiffies = INT_MAX; 5979 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { 5980 pqi_stream_data = &device->stream_data[i]; 5981 /* 5982 * Check for adjacent request or request is within 5983 * the previous request. 5984 */ 5985 if ((pqi_stream_data->next_lba && 5986 rmd.first_block >= pqi_stream_data->next_lba) && 5987 rmd.first_block <= pqi_stream_data->next_lba + 5988 rmd.block_cnt) { 5989 pqi_stream_data->next_lba = rmd.first_block + 5990 rmd.block_cnt; 5991 pqi_stream_data->last_accessed = jiffies; 5992 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++; 5993 return true; 5994 } 5995 5996 /* unused entry */ 5997 if (pqi_stream_data->last_accessed == 0) { 5998 lru_index = i; 5999 break; 6000 } 6001 6002 /* Find entry with oldest last accessed time. */ 6003 if (pqi_stream_data->last_accessed <= oldest_jiffies) { 6004 oldest_jiffies = pqi_stream_data->last_accessed; 6005 lru_index = i; 6006 } 6007 } 6008 6009 /* Set LRU entry. */ 6010 pqi_stream_data = &device->stream_data[lru_index]; 6011 pqi_stream_data->last_accessed = jiffies; 6012 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; 6013 6014 return false; 6015 } 6016 6017 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 6018 { 6019 int rc; 6020 struct pqi_ctrl_info *ctrl_info; 6021 struct pqi_scsi_dev *device; 6022 u16 hw_queue; 6023 struct pqi_queue_group *queue_group; 6024 bool raid_bypassed; 6025 u8 lun; 6026 6027 scmd->host_scribble = PQI_NO_COMPLETION; 6028 6029 device = scmd->device->hostdata; 6030 6031 if (!device) { 6032 set_host_byte(scmd, DID_NO_CONNECT); 6033 pqi_scsi_done(scmd); 6034 return 0; 6035 } 6036 6037 lun = (u8)scmd->device->lun; 6038 6039 atomic_inc(&device->scsi_cmds_outstanding[lun]); 6040 6041 ctrl_info = shost_to_hba(shost); 6042 6043 if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) { 6044 set_host_byte(scmd, DID_NO_CONNECT); 6045 pqi_scsi_done(scmd); 6046 return 0; 6047 } 6048 6049 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { 6050 rc = SCSI_MLQUEUE_HOST_BUSY; 6051 goto out; 6052 } 6053 6054 /* 6055 * This is necessary because the SML doesn't zero out this field during 6056 * error recovery. 6057 */ 6058 scmd->result = 0; 6059 6060 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 6061 queue_group = &ctrl_info->queue_groups[hw_queue]; 6062 6063 if (pqi_is_logical_device(device)) { 6064 raid_bypassed = false; 6065 if (device->raid_bypass_enabled && 6066 pqi_is_bypass_eligible_request(scmd) && 6067 !pqi_is_parity_write_stream(ctrl_info, scmd)) { 6068 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6069 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { 6070 raid_bypassed = true; 6071 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++; 6072 } 6073 } 6074 if (!raid_bypassed) 6075 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6076 } else { 6077 if (device->aio_enabled) 6078 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6079 else 6080 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6081 } 6082 6083 out: 6084 if (rc) { 6085 scmd->host_scribble = NULL; 6086 atomic_dec(&device->scsi_cmds_outstanding[lun]); 6087 } 6088 6089 return rc; 6090 } 6091 6092 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) 6093 { 6094 unsigned int i; 6095 unsigned int path; 6096 unsigned long flags; 6097 unsigned int queued_io_count; 6098 struct pqi_queue_group *queue_group; 6099 struct pqi_io_request *io_request; 6100 6101 queued_io_count = 0; 6102 6103 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6104 queue_group = &ctrl_info->queue_groups[i]; 6105 for (path = 0; path < 2; path++) { 6106 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 6107 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) 6108 queued_io_count++; 6109 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 6110 } 6111 } 6112 6113 return queued_io_count; 6114 } 6115 6116 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) 6117 { 6118 unsigned int i; 6119 unsigned int path; 6120 unsigned int nonempty_inbound_queue_count; 6121 struct pqi_queue_group *queue_group; 6122 pqi_index_t iq_pi; 6123 pqi_index_t iq_ci; 6124 6125 nonempty_inbound_queue_count = 0; 6126 6127 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6128 queue_group = &ctrl_info->queue_groups[i]; 6129 for (path = 0; path < 2; path++) { 6130 iq_pi = queue_group->iq_pi_copy[path]; 6131 iq_ci = readl(queue_group->iq_ci[path]); 6132 if (iq_ci != iq_pi) 6133 nonempty_inbound_queue_count++; 6134 } 6135 } 6136 6137 return nonempty_inbound_queue_count; 6138 } 6139 6140 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10 6141 6142 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 6143 { 6144 unsigned long start_jiffies; 6145 unsigned long warning_timeout; 6146 unsigned int queued_io_count; 6147 unsigned int nonempty_inbound_queue_count; 6148 bool displayed_warning; 6149 6150 displayed_warning = false; 6151 start_jiffies = jiffies; 6152 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6153 6154 while (1) { 6155 queued_io_count = pqi_queued_io_count(ctrl_info); 6156 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); 6157 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0) 6158 break; 6159 pqi_check_ctrl_health(ctrl_info); 6160 if (pqi_ctrl_offline(ctrl_info)) 6161 return -ENXIO; 6162 if (time_after(jiffies, warning_timeout)) { 6163 dev_warn(&ctrl_info->pci_dev->dev, 6164 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n", 6165 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); 6166 displayed_warning = true; 6167 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6168 } 6169 usleep_range(1000, 2000); 6170 } 6171 6172 if (displayed_warning) 6173 dev_warn(&ctrl_info->pci_dev->dev, 6174 "queued I/O drained after waiting for %u seconds\n", 6175 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 6176 6177 return 0; 6178 } 6179 6180 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 6181 struct pqi_scsi_dev *device, u8 lun) 6182 { 6183 unsigned int i; 6184 unsigned int path; 6185 struct pqi_queue_group *queue_group; 6186 unsigned long flags; 6187 struct pqi_io_request *io_request; 6188 struct pqi_io_request *next; 6189 struct scsi_cmnd *scmd; 6190 struct pqi_scsi_dev *scsi_device; 6191 6192 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6193 queue_group = &ctrl_info->queue_groups[i]; 6194 6195 for (path = 0; path < 2; path++) { 6196 spin_lock_irqsave( 6197 &queue_group->submit_lock[path], flags); 6198 6199 list_for_each_entry_safe(io_request, next, 6200 &queue_group->request_list[path], 6201 request_list_entry) { 6202 6203 scmd = io_request->scmd; 6204 if (!scmd) 6205 continue; 6206 6207 scsi_device = scmd->device->hostdata; 6208 6209 list_del(&io_request->request_list_entry); 6210 if (scsi_device == device && (u8)scmd->device->lun == lun) 6211 set_host_byte(scmd, DID_RESET); 6212 else 6213 set_host_byte(scmd, DID_REQUEUE); 6214 pqi_free_io_request(io_request); 6215 scsi_dma_unmap(scmd); 6216 pqi_scsi_done(scmd); 6217 } 6218 6219 spin_unlock_irqrestore( 6220 &queue_group->submit_lock[path], flags); 6221 } 6222 } 6223 } 6224 6225 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 6226 6227 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 6228 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs) 6229 { 6230 int cmds_outstanding; 6231 unsigned long start_jiffies; 6232 unsigned long warning_timeout; 6233 unsigned long msecs_waiting; 6234 6235 start_jiffies = jiffies; 6236 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6237 6238 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) { 6239 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { 6240 pqi_check_ctrl_health(ctrl_info); 6241 if (pqi_ctrl_offline(ctrl_info)) 6242 return -ENXIO; 6243 } 6244 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); 6245 if (msecs_waiting >= timeout_msecs) { 6246 dev_err(&ctrl_info->pci_dev->dev, 6247 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", 6248 ctrl_info->scsi_host->host_no, device->bus, device->target, 6249 lun, msecs_waiting / 1000, cmds_outstanding); 6250 return -ETIMEDOUT; 6251 } 6252 if (time_after(jiffies, warning_timeout)) { 6253 dev_warn(&ctrl_info->pci_dev->dev, 6254 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", 6255 ctrl_info->scsi_host->host_no, device->bus, device->target, 6256 lun, msecs_waiting / 1000, cmds_outstanding); 6257 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6258 } 6259 usleep_range(1000, 2000); 6260 } 6261 6262 return 0; 6263 } 6264 6265 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 6266 void *context) 6267 { 6268 struct completion *waiting = context; 6269 6270 complete(waiting); 6271 } 6272 6273 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 6274 6275 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 6276 struct pqi_scsi_dev *device, u8 lun, struct completion *wait) 6277 { 6278 int rc; 6279 unsigned int wait_secs; 6280 int cmds_outstanding; 6281 6282 wait_secs = 0; 6283 6284 while (1) { 6285 if (wait_for_completion_io_timeout(wait, 6286 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) { 6287 rc = 0; 6288 break; 6289 } 6290 6291 pqi_check_ctrl_health(ctrl_info); 6292 if (pqi_ctrl_offline(ctrl_info)) { 6293 rc = -ENXIO; 6294 break; 6295 } 6296 6297 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; 6298 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]); 6299 dev_warn(&ctrl_info->pci_dev->dev, 6300 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n", 6301 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); 6302 } 6303 6304 return rc; 6305 } 6306 6307 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 6308 6309 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6310 { 6311 int rc; 6312 struct pqi_io_request *io_request; 6313 DECLARE_COMPLETION_ONSTACK(wait); 6314 struct pqi_task_management_request *request; 6315 6316 io_request = pqi_alloc_io_request(ctrl_info, NULL); 6317 io_request->io_complete_callback = pqi_lun_reset_complete; 6318 io_request->context = &wait; 6319 6320 request = io_request->iu; 6321 memset(request, 0, sizeof(*request)); 6322 6323 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 6324 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 6325 &request->header.iu_length); 6326 put_unaligned_le16(io_request->index, &request->request_id); 6327 memcpy(request->lun_number, device->scsi3addr, 6328 sizeof(request->lun_number)); 6329 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) 6330 request->ml_device_lun_number = lun; 6331 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 6332 if (ctrl_info->tmf_iu_timeout_supported) 6333 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); 6334 6335 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 6336 io_request); 6337 6338 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait); 6339 if (rc == 0) 6340 rc = io_request->status; 6341 6342 pqi_free_io_request(io_request); 6343 6344 return rc; 6345 } 6346 6347 #define PQI_LUN_RESET_RETRIES 3 6348 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) 6349 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) 6350 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) 6351 6352 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6353 { 6354 int reset_rc; 6355 int wait_rc; 6356 unsigned int retries; 6357 unsigned long timeout_msecs; 6358 6359 for (retries = 0;;) { 6360 reset_rc = pqi_lun_reset(ctrl_info, device, lun); 6361 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES) 6362 break; 6363 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 6364 } 6365 6366 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : 6367 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; 6368 6369 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); 6370 if (wait_rc && reset_rc == 0) 6371 reset_rc = wait_rc; 6372 6373 return reset_rc == 0 ? SUCCESS : FAILED; 6374 } 6375 6376 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6377 { 6378 int rc; 6379 6380 pqi_ctrl_block_requests(ctrl_info); 6381 pqi_ctrl_wait_until_quiesced(ctrl_info); 6382 pqi_fail_io_queued_for_device(ctrl_info, device, lun); 6383 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 6384 pqi_device_reset_start(device, lun); 6385 pqi_ctrl_unblock_requests(ctrl_info); 6386 if (rc) 6387 rc = FAILED; 6388 else 6389 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); 6390 pqi_device_reset_done(device, lun); 6391 6392 return rc; 6393 } 6394 6395 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) 6396 { 6397 int rc; 6398 6399 mutex_lock(&ctrl_info->lun_reset_mutex); 6400 6401 dev_err(&ctrl_info->pci_dev->dev, 6402 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", 6403 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); 6404 6405 pqi_check_ctrl_health(ctrl_info); 6406 if (pqi_ctrl_offline(ctrl_info)) 6407 rc = FAILED; 6408 else 6409 rc = pqi_device_reset(ctrl_info, device, lun); 6410 6411 dev_err(&ctrl_info->pci_dev->dev, 6412 "reset of scsi %d:%d:%d:%u: %s\n", 6413 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, 6414 rc == SUCCESS ? "SUCCESS" : "FAILED"); 6415 6416 mutex_unlock(&ctrl_info->lun_reset_mutex); 6417 6418 return rc; 6419 } 6420 6421 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 6422 { 6423 struct Scsi_Host *shost; 6424 struct pqi_ctrl_info *ctrl_info; 6425 struct pqi_scsi_dev *device; 6426 u8 scsi_opcode; 6427 6428 shost = scmd->device->host; 6429 ctrl_info = shost_to_hba(shost); 6430 device = scmd->device->hostdata; 6431 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6432 6433 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); 6434 } 6435 6436 static void pqi_tmf_worker(struct work_struct *work) 6437 { 6438 struct pqi_tmf_work *tmf_work; 6439 struct scsi_cmnd *scmd; 6440 6441 tmf_work = container_of(work, struct pqi_tmf_work, work_struct); 6442 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); 6443 6444 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode); 6445 } 6446 6447 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd) 6448 { 6449 struct Scsi_Host *shost; 6450 struct pqi_ctrl_info *ctrl_info; 6451 struct pqi_scsi_dev *device; 6452 struct pqi_tmf_work *tmf_work; 6453 DECLARE_COMPLETION_ONSTACK(wait); 6454 6455 shost = scmd->device->host; 6456 ctrl_info = shost_to_hba(shost); 6457 device = scmd->device->hostdata; 6458 6459 dev_err(&ctrl_info->pci_dev->dev, 6460 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n", 6461 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6462 6463 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { 6464 dev_err(&ctrl_info->pci_dev->dev, 6465 "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n", 6466 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6467 scmd->result = DID_RESET << 16; 6468 goto out; 6469 } 6470 6471 tmf_work = &device->tmf_work[scmd->device->lun]; 6472 6473 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { 6474 tmf_work->ctrl_info = ctrl_info; 6475 tmf_work->device = device; 6476 tmf_work->lun = (u8)scmd->device->lun; 6477 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6478 schedule_work(&tmf_work->work_struct); 6479 } 6480 6481 wait_for_completion(&wait); 6482 6483 dev_err(&ctrl_info->pci_dev->dev, 6484 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n", 6485 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6486 6487 out: 6488 6489 return SUCCESS; 6490 } 6491 6492 static int pqi_slave_alloc(struct scsi_device *sdev) 6493 { 6494 struct pqi_scsi_dev *device; 6495 unsigned long flags; 6496 struct pqi_ctrl_info *ctrl_info; 6497 struct scsi_target *starget; 6498 struct sas_rphy *rphy; 6499 6500 ctrl_info = shost_to_hba(sdev->host); 6501 6502 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6503 6504 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 6505 starget = scsi_target(sdev); 6506 rphy = target_to_rphy(starget); 6507 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 6508 if (device) { 6509 if (device->target_lun_valid) { 6510 device->ignore_device = true; 6511 } else { 6512 device->target = sdev_id(sdev); 6513 device->lun = sdev->lun; 6514 device->target_lun_valid = true; 6515 } 6516 } 6517 } else { 6518 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 6519 sdev_id(sdev), sdev->lun); 6520 } 6521 6522 if (device) { 6523 sdev->hostdata = device; 6524 device->sdev = sdev; 6525 if (device->queue_depth) { 6526 device->advertised_queue_depth = device->queue_depth; 6527 scsi_change_queue_depth(sdev, 6528 device->advertised_queue_depth); 6529 } 6530 if (pqi_is_logical_device(device)) { 6531 pqi_disable_write_same(sdev); 6532 } else { 6533 sdev->allow_restart = 1; 6534 if (device->device_type == SA_DEVICE_TYPE_NVME) 6535 pqi_disable_write_same(sdev); 6536 } 6537 } 6538 6539 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6540 6541 return 0; 6542 } 6543 6544 static void pqi_map_queues(struct Scsi_Host *shost) 6545 { 6546 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6547 6548 if (!ctrl_info->disable_managed_interrupts) 6549 blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 6550 &ctrl_info->pci_dev->dev, 0); 6551 else 6552 blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); 6553 } 6554 6555 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) 6556 { 6557 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; 6558 } 6559 6560 static int pqi_slave_configure(struct scsi_device *sdev) 6561 { 6562 int rc = 0; 6563 struct pqi_scsi_dev *device; 6564 6565 device = sdev->hostdata; 6566 device->devtype = sdev->type; 6567 6568 if (pqi_is_tape_changer_device(device) && device->ignore_device) { 6569 rc = -ENXIO; 6570 device->ignore_device = false; 6571 } 6572 6573 return rc; 6574 } 6575 6576 static void pqi_slave_destroy(struct scsi_device *sdev) 6577 { 6578 struct pqi_ctrl_info *ctrl_info; 6579 struct pqi_scsi_dev *device; 6580 int mutex_acquired; 6581 unsigned long flags; 6582 6583 ctrl_info = shost_to_hba(sdev->host); 6584 6585 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 6586 if (!mutex_acquired) 6587 return; 6588 6589 device = sdev->hostdata; 6590 if (!device) { 6591 mutex_unlock(&ctrl_info->scan_mutex); 6592 return; 6593 } 6594 6595 device->lun_count--; 6596 if (device->lun_count > 0) { 6597 mutex_unlock(&ctrl_info->scan_mutex); 6598 return; 6599 } 6600 6601 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6602 list_del(&device->scsi_device_list_entry); 6603 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6604 6605 mutex_unlock(&ctrl_info->scan_mutex); 6606 6607 pqi_dev_info(ctrl_info, "removed", device); 6608 pqi_free_device(device); 6609 } 6610 6611 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6612 { 6613 struct pci_dev *pci_dev; 6614 u32 subsystem_vendor; 6615 u32 subsystem_device; 6616 cciss_pci_info_struct pci_info; 6617 6618 if (!arg) 6619 return -EINVAL; 6620 6621 pci_dev = ctrl_info->pci_dev; 6622 6623 pci_info.domain = pci_domain_nr(pci_dev->bus); 6624 pci_info.bus = pci_dev->bus->number; 6625 pci_info.dev_fn = pci_dev->devfn; 6626 subsystem_vendor = pci_dev->subsystem_vendor; 6627 subsystem_device = pci_dev->subsystem_device; 6628 pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; 6629 6630 if (copy_to_user(arg, &pci_info, sizeof(pci_info))) 6631 return -EFAULT; 6632 6633 return 0; 6634 } 6635 6636 static int pqi_getdrivver_ioctl(void __user *arg) 6637 { 6638 u32 version; 6639 6640 if (!arg) 6641 return -EINVAL; 6642 6643 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 6644 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 6645 6646 if (copy_to_user(arg, &version, sizeof(version))) 6647 return -EFAULT; 6648 6649 return 0; 6650 } 6651 6652 struct ciss_error_info { 6653 u8 scsi_status; 6654 int command_status; 6655 size_t sense_data_length; 6656 }; 6657 6658 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 6659 struct ciss_error_info *ciss_error_info) 6660 { 6661 int ciss_cmd_status; 6662 size_t sense_data_length; 6663 6664 switch (pqi_error_info->data_out_result) { 6665 case PQI_DATA_IN_OUT_GOOD: 6666 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 6667 break; 6668 case PQI_DATA_IN_OUT_UNDERFLOW: 6669 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 6670 break; 6671 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 6672 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 6673 break; 6674 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 6675 case PQI_DATA_IN_OUT_BUFFER_ERROR: 6676 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 6677 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 6678 case PQI_DATA_IN_OUT_ERROR: 6679 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 6680 break; 6681 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 6682 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 6683 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 6684 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 6685 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 6686 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 6687 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 6688 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 6689 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 6690 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 6691 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 6692 break; 6693 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 6694 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 6695 break; 6696 case PQI_DATA_IN_OUT_ABORTED: 6697 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 6698 break; 6699 case PQI_DATA_IN_OUT_TIMEOUT: 6700 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 6701 break; 6702 default: 6703 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 6704 break; 6705 } 6706 6707 sense_data_length = 6708 get_unaligned_le16(&pqi_error_info->sense_data_length); 6709 if (sense_data_length == 0) 6710 sense_data_length = 6711 get_unaligned_le16(&pqi_error_info->response_data_length); 6712 if (sense_data_length) 6713 if (sense_data_length > sizeof(pqi_error_info->data)) 6714 sense_data_length = sizeof(pqi_error_info->data); 6715 6716 ciss_error_info->scsi_status = pqi_error_info->status; 6717 ciss_error_info->command_status = ciss_cmd_status; 6718 ciss_error_info->sense_data_length = sense_data_length; 6719 } 6720 6721 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6722 { 6723 int rc; 6724 char *kernel_buffer = NULL; 6725 u16 iu_length; 6726 size_t sense_data_length; 6727 IOCTL_Command_struct iocommand; 6728 struct pqi_raid_path_request request; 6729 struct pqi_raid_error_info pqi_error_info; 6730 struct ciss_error_info ciss_error_info; 6731 6732 if (pqi_ctrl_offline(ctrl_info)) 6733 return -ENXIO; 6734 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) 6735 return -EBUSY; 6736 if (!arg) 6737 return -EINVAL; 6738 if (!capable(CAP_SYS_RAWIO)) 6739 return -EPERM; 6740 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 6741 return -EFAULT; 6742 if (iocommand.buf_size < 1 && 6743 iocommand.Request.Type.Direction != XFER_NONE) 6744 return -EINVAL; 6745 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 6746 return -EINVAL; 6747 if (iocommand.Request.Type.Type != TYPE_CMD) 6748 return -EINVAL; 6749 6750 switch (iocommand.Request.Type.Direction) { 6751 case XFER_NONE: 6752 case XFER_WRITE: 6753 case XFER_READ: 6754 case XFER_READ | XFER_WRITE: 6755 break; 6756 default: 6757 return -EINVAL; 6758 } 6759 6760 if (iocommand.buf_size > 0) { 6761 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 6762 if (!kernel_buffer) 6763 return -ENOMEM; 6764 if (iocommand.Request.Type.Direction & XFER_WRITE) { 6765 if (copy_from_user(kernel_buffer, iocommand.buf, 6766 iocommand.buf_size)) { 6767 rc = -EFAULT; 6768 goto out; 6769 } 6770 } else { 6771 memset(kernel_buffer, 0, iocommand.buf_size); 6772 } 6773 } 6774 6775 memset(&request, 0, sizeof(request)); 6776 6777 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 6778 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 6779 PQI_REQUEST_HEADER_LENGTH; 6780 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 6781 sizeof(request.lun_number)); 6782 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 6783 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 6784 6785 switch (iocommand.Request.Type.Direction) { 6786 case XFER_NONE: 6787 request.data_direction = SOP_NO_DIRECTION_FLAG; 6788 break; 6789 case XFER_WRITE: 6790 request.data_direction = SOP_WRITE_FLAG; 6791 break; 6792 case XFER_READ: 6793 request.data_direction = SOP_READ_FLAG; 6794 break; 6795 case XFER_READ | XFER_WRITE: 6796 request.data_direction = SOP_BIDIRECTIONAL; 6797 break; 6798 } 6799 6800 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 6801 6802 if (iocommand.buf_size > 0) { 6803 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 6804 6805 rc = pqi_map_single(ctrl_info->pci_dev, 6806 &request.sg_descriptors[0], kernel_buffer, 6807 iocommand.buf_size, DMA_BIDIRECTIONAL); 6808 if (rc) 6809 goto out; 6810 6811 iu_length += sizeof(request.sg_descriptors[0]); 6812 } 6813 6814 put_unaligned_le16(iu_length, &request.header.iu_length); 6815 6816 if (ctrl_info->raid_iu_timeout_supported) 6817 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); 6818 6819 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6820 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info); 6821 6822 if (iocommand.buf_size > 0) 6823 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6824 DMA_BIDIRECTIONAL); 6825 6826 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6827 6828 if (rc == 0) { 6829 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6830 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6831 iocommand.error_info.CommandStatus = 6832 ciss_error_info.command_status; 6833 sense_data_length = ciss_error_info.sense_data_length; 6834 if (sense_data_length) { 6835 if (sense_data_length > 6836 sizeof(iocommand.error_info.SenseInfo)) 6837 sense_data_length = 6838 sizeof(iocommand.error_info.SenseInfo); 6839 memcpy(iocommand.error_info.SenseInfo, 6840 pqi_error_info.data, sense_data_length); 6841 iocommand.error_info.SenseLen = sense_data_length; 6842 } 6843 } 6844 6845 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6846 rc = -EFAULT; 6847 goto out; 6848 } 6849 6850 if (rc == 0 && iocommand.buf_size > 0 && 6851 (iocommand.Request.Type.Direction & XFER_READ)) { 6852 if (copy_to_user(iocommand.buf, kernel_buffer, 6853 iocommand.buf_size)) { 6854 rc = -EFAULT; 6855 } 6856 } 6857 6858 out: 6859 kfree(kernel_buffer); 6860 6861 return rc; 6862 } 6863 6864 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, 6865 void __user *arg) 6866 { 6867 int rc; 6868 struct pqi_ctrl_info *ctrl_info; 6869 6870 ctrl_info = shost_to_hba(sdev->host); 6871 6872 switch (cmd) { 6873 case CCISS_DEREGDISK: 6874 case CCISS_REGNEWDISK: 6875 case CCISS_REGNEWD: 6876 rc = pqi_scan_scsi_devices(ctrl_info); 6877 break; 6878 case CCISS_GETPCIINFO: 6879 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6880 break; 6881 case CCISS_GETDRIVVER: 6882 rc = pqi_getdrivver_ioctl(arg); 6883 break; 6884 case CCISS_PASSTHRU: 6885 rc = pqi_passthru_ioctl(ctrl_info, arg); 6886 break; 6887 default: 6888 rc = -EINVAL; 6889 break; 6890 } 6891 6892 return rc; 6893 } 6894 6895 static ssize_t pqi_firmware_version_show(struct device *dev, 6896 struct device_attribute *attr, char *buffer) 6897 { 6898 struct Scsi_Host *shost; 6899 struct pqi_ctrl_info *ctrl_info; 6900 6901 shost = class_to_shost(dev); 6902 ctrl_info = shost_to_hba(shost); 6903 6904 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); 6905 } 6906 6907 static ssize_t pqi_serial_number_show(struct device *dev, 6908 struct device_attribute *attr, char *buffer) 6909 { 6910 struct Scsi_Host *shost; 6911 struct pqi_ctrl_info *ctrl_info; 6912 6913 shost = class_to_shost(dev); 6914 ctrl_info = shost_to_hba(shost); 6915 6916 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); 6917 } 6918 6919 static ssize_t pqi_model_show(struct device *dev, 6920 struct device_attribute *attr, char *buffer) 6921 { 6922 struct Scsi_Host *shost; 6923 struct pqi_ctrl_info *ctrl_info; 6924 6925 shost = class_to_shost(dev); 6926 ctrl_info = shost_to_hba(shost); 6927 6928 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); 6929 } 6930 6931 static ssize_t pqi_vendor_show(struct device *dev, 6932 struct device_attribute *attr, char *buffer) 6933 { 6934 struct Scsi_Host *shost; 6935 struct pqi_ctrl_info *ctrl_info; 6936 6937 shost = class_to_shost(dev); 6938 ctrl_info = shost_to_hba(shost); 6939 6940 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); 6941 } 6942 6943 static ssize_t pqi_host_rescan_store(struct device *dev, 6944 struct device_attribute *attr, const char *buffer, size_t count) 6945 { 6946 struct Scsi_Host *shost = class_to_shost(dev); 6947 6948 pqi_scan_start(shost); 6949 6950 return count; 6951 } 6952 6953 static ssize_t pqi_lockup_action_show(struct device *dev, 6954 struct device_attribute *attr, char *buffer) 6955 { 6956 int count = 0; 6957 unsigned int i; 6958 6959 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6960 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6961 count += scnprintf(buffer + count, PAGE_SIZE - count, 6962 "[%s] ", pqi_lockup_actions[i].name); 6963 else 6964 count += scnprintf(buffer + count, PAGE_SIZE - count, 6965 "%s ", pqi_lockup_actions[i].name); 6966 } 6967 6968 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); 6969 6970 return count; 6971 } 6972 6973 static ssize_t pqi_lockup_action_store(struct device *dev, 6974 struct device_attribute *attr, const char *buffer, size_t count) 6975 { 6976 unsigned int i; 6977 char *action_name; 6978 char action_name_buffer[32]; 6979 6980 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6981 action_name = strstrip(action_name_buffer); 6982 6983 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6984 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 6985 pqi_lockup_action = pqi_lockup_actions[i].action; 6986 return count; 6987 } 6988 } 6989 6990 return -EINVAL; 6991 } 6992 6993 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, 6994 struct device_attribute *attr, char *buffer) 6995 { 6996 struct Scsi_Host *shost = class_to_shost(dev); 6997 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6998 6999 return scnprintf(buffer, 10, "%x\n", 7000 ctrl_info->enable_stream_detection); 7001 } 7002 7003 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, 7004 struct device_attribute *attr, const char *buffer, size_t count) 7005 { 7006 struct Scsi_Host *shost = class_to_shost(dev); 7007 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7008 u8 set_stream_detection = 0; 7009 7010 if (kstrtou8(buffer, 0, &set_stream_detection)) 7011 return -EINVAL; 7012 7013 if (set_stream_detection > 0) 7014 set_stream_detection = 1; 7015 7016 ctrl_info->enable_stream_detection = set_stream_detection; 7017 7018 return count; 7019 } 7020 7021 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, 7022 struct device_attribute *attr, char *buffer) 7023 { 7024 struct Scsi_Host *shost = class_to_shost(dev); 7025 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7026 7027 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); 7028 } 7029 7030 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, 7031 struct device_attribute *attr, const char *buffer, size_t count) 7032 { 7033 struct Scsi_Host *shost = class_to_shost(dev); 7034 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7035 u8 set_r5_writes = 0; 7036 7037 if (kstrtou8(buffer, 0, &set_r5_writes)) 7038 return -EINVAL; 7039 7040 if (set_r5_writes > 0) 7041 set_r5_writes = 1; 7042 7043 ctrl_info->enable_r5_writes = set_r5_writes; 7044 7045 return count; 7046 } 7047 7048 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, 7049 struct device_attribute *attr, char *buffer) 7050 { 7051 struct Scsi_Host *shost = class_to_shost(dev); 7052 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7053 7054 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); 7055 } 7056 7057 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, 7058 struct device_attribute *attr, const char *buffer, size_t count) 7059 { 7060 struct Scsi_Host *shost = class_to_shost(dev); 7061 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7062 u8 set_r6_writes = 0; 7063 7064 if (kstrtou8(buffer, 0, &set_r6_writes)) 7065 return -EINVAL; 7066 7067 if (set_r6_writes > 0) 7068 set_r6_writes = 1; 7069 7070 ctrl_info->enable_r6_writes = set_r6_writes; 7071 7072 return count; 7073 } 7074 7075 static DEVICE_STRING_ATTR_RO(driver_version, 0444, 7076 DRIVER_VERSION BUILD_TIMESTAMP); 7077 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 7078 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 7079 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 7080 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 7081 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 7082 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, 7083 pqi_lockup_action_store); 7084 static DEVICE_ATTR(enable_stream_detection, 0644, 7085 pqi_host_enable_stream_detection_show, 7086 pqi_host_enable_stream_detection_store); 7087 static DEVICE_ATTR(enable_r5_writes, 0644, 7088 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); 7089 static DEVICE_ATTR(enable_r6_writes, 0644, 7090 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); 7091 7092 static struct attribute *pqi_shost_attrs[] = { 7093 &dev_attr_driver_version.attr.attr, 7094 &dev_attr_firmware_version.attr, 7095 &dev_attr_model.attr, 7096 &dev_attr_serial_number.attr, 7097 &dev_attr_vendor.attr, 7098 &dev_attr_rescan.attr, 7099 &dev_attr_lockup_action.attr, 7100 &dev_attr_enable_stream_detection.attr, 7101 &dev_attr_enable_r5_writes.attr, 7102 &dev_attr_enable_r6_writes.attr, 7103 NULL 7104 }; 7105 7106 ATTRIBUTE_GROUPS(pqi_shost); 7107 7108 static ssize_t pqi_unique_id_show(struct device *dev, 7109 struct device_attribute *attr, char *buffer) 7110 { 7111 struct pqi_ctrl_info *ctrl_info; 7112 struct scsi_device *sdev; 7113 struct pqi_scsi_dev *device; 7114 unsigned long flags; 7115 u8 unique_id[16]; 7116 7117 sdev = to_scsi_device(dev); 7118 ctrl_info = shost_to_hba(sdev->host); 7119 7120 if (pqi_ctrl_offline(ctrl_info)) 7121 return -ENODEV; 7122 7123 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7124 7125 device = sdev->hostdata; 7126 if (!device) { 7127 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7128 return -ENODEV; 7129 } 7130 7131 if (device->is_physical_device) 7132 memcpy(unique_id, device->wwid, sizeof(device->wwid)); 7133 else 7134 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 7135 7136 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7137 7138 return scnprintf(buffer, PAGE_SIZE, 7139 "%02X%02X%02X%02X%02X%02X%02X%02X" 7140 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 7141 unique_id[0], unique_id[1], unique_id[2], unique_id[3], 7142 unique_id[4], unique_id[5], unique_id[6], unique_id[7], 7143 unique_id[8], unique_id[9], unique_id[10], unique_id[11], 7144 unique_id[12], unique_id[13], unique_id[14], unique_id[15]); 7145 } 7146 7147 static ssize_t pqi_lunid_show(struct device *dev, 7148 struct device_attribute *attr, char *buffer) 7149 { 7150 struct pqi_ctrl_info *ctrl_info; 7151 struct scsi_device *sdev; 7152 struct pqi_scsi_dev *device; 7153 unsigned long flags; 7154 u8 lunid[8]; 7155 7156 sdev = to_scsi_device(dev); 7157 ctrl_info = shost_to_hba(sdev->host); 7158 7159 if (pqi_ctrl_offline(ctrl_info)) 7160 return -ENODEV; 7161 7162 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7163 7164 device = sdev->hostdata; 7165 if (!device) { 7166 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7167 return -ENODEV; 7168 } 7169 7170 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 7171 7172 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7173 7174 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 7175 } 7176 7177 #define MAX_PATHS 8 7178 7179 static ssize_t pqi_path_info_show(struct device *dev, 7180 struct device_attribute *attr, char *buf) 7181 { 7182 struct pqi_ctrl_info *ctrl_info; 7183 struct scsi_device *sdev; 7184 struct pqi_scsi_dev *device; 7185 unsigned long flags; 7186 int i; 7187 int output_len = 0; 7188 u8 box; 7189 u8 bay; 7190 u8 path_map_index; 7191 char *active; 7192 u8 phys_connector[2]; 7193 7194 sdev = to_scsi_device(dev); 7195 ctrl_info = shost_to_hba(sdev->host); 7196 7197 if (pqi_ctrl_offline(ctrl_info)) 7198 return -ENODEV; 7199 7200 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7201 7202 device = sdev->hostdata; 7203 if (!device) { 7204 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7205 return -ENODEV; 7206 } 7207 7208 bay = device->bay; 7209 for (i = 0; i < MAX_PATHS; i++) { 7210 path_map_index = 1 << i; 7211 if (i == device->active_path_index) 7212 active = "Active"; 7213 else if (device->path_map & path_map_index) 7214 active = "Inactive"; 7215 else 7216 continue; 7217 7218 output_len += scnprintf(buf + output_len, 7219 PAGE_SIZE - output_len, 7220 "[%d:%d:%d:%d] %20.20s ", 7221 ctrl_info->scsi_host->host_no, 7222 device->bus, device->target, 7223 device->lun, 7224 scsi_device_type(device->devtype)); 7225 7226 if (device->devtype == TYPE_RAID || 7227 pqi_is_logical_device(device)) 7228 goto end_buffer; 7229 7230 memcpy(&phys_connector, &device->phys_connector[i], 7231 sizeof(phys_connector)); 7232 if (phys_connector[0] < '0') 7233 phys_connector[0] = '0'; 7234 if (phys_connector[1] < '0') 7235 phys_connector[1] = '0'; 7236 7237 output_len += scnprintf(buf + output_len, 7238 PAGE_SIZE - output_len, 7239 "PORT: %.2s ", phys_connector); 7240 7241 box = device->box[i]; 7242 if (box != 0 && box != 0xFF) 7243 output_len += scnprintf(buf + output_len, 7244 PAGE_SIZE - output_len, 7245 "BOX: %hhu ", box); 7246 7247 if ((device->devtype == TYPE_DISK || 7248 device->devtype == TYPE_ZBC) && 7249 pqi_expose_device(device)) 7250 output_len += scnprintf(buf + output_len, 7251 PAGE_SIZE - output_len, 7252 "BAY: %hhu ", bay); 7253 7254 end_buffer: 7255 output_len += scnprintf(buf + output_len, 7256 PAGE_SIZE - output_len, 7257 "%s\n", active); 7258 } 7259 7260 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7261 7262 return output_len; 7263 } 7264 7265 static ssize_t pqi_sas_address_show(struct device *dev, 7266 struct device_attribute *attr, char *buffer) 7267 { 7268 struct pqi_ctrl_info *ctrl_info; 7269 struct scsi_device *sdev; 7270 struct pqi_scsi_dev *device; 7271 unsigned long flags; 7272 u64 sas_address; 7273 7274 sdev = to_scsi_device(dev); 7275 ctrl_info = shost_to_hba(sdev->host); 7276 7277 if (pqi_ctrl_offline(ctrl_info)) 7278 return -ENODEV; 7279 7280 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7281 7282 device = sdev->hostdata; 7283 if (!device) { 7284 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7285 return -ENODEV; 7286 } 7287 7288 sas_address = device->sas_address; 7289 7290 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7291 7292 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 7293 } 7294 7295 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 7296 struct device_attribute *attr, char *buffer) 7297 { 7298 struct pqi_ctrl_info *ctrl_info; 7299 struct scsi_device *sdev; 7300 struct pqi_scsi_dev *device; 7301 unsigned long flags; 7302 7303 sdev = to_scsi_device(dev); 7304 ctrl_info = shost_to_hba(sdev->host); 7305 7306 if (pqi_ctrl_offline(ctrl_info)) 7307 return -ENODEV; 7308 7309 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7310 7311 device = sdev->hostdata; 7312 if (!device) { 7313 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7314 return -ENODEV; 7315 } 7316 7317 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 7318 buffer[1] = '\n'; 7319 buffer[2] = '\0'; 7320 7321 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7322 7323 return 2; 7324 } 7325 7326 static ssize_t pqi_raid_level_show(struct device *dev, 7327 struct device_attribute *attr, char *buffer) 7328 { 7329 struct pqi_ctrl_info *ctrl_info; 7330 struct scsi_device *sdev; 7331 struct pqi_scsi_dev *device; 7332 unsigned long flags; 7333 char *raid_level; 7334 7335 sdev = to_scsi_device(dev); 7336 ctrl_info = shost_to_hba(sdev->host); 7337 7338 if (pqi_ctrl_offline(ctrl_info)) 7339 return -ENODEV; 7340 7341 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7342 7343 device = sdev->hostdata; 7344 if (!device) { 7345 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7346 return -ENODEV; 7347 } 7348 7349 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 7350 raid_level = pqi_raid_level_to_string(device->raid_level); 7351 else 7352 raid_level = "N/A"; 7353 7354 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7355 7356 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 7357 } 7358 7359 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, 7360 struct device_attribute *attr, char *buffer) 7361 { 7362 struct pqi_ctrl_info *ctrl_info; 7363 struct scsi_device *sdev; 7364 struct pqi_scsi_dev *device; 7365 unsigned long flags; 7366 u64 raid_bypass_cnt; 7367 int cpu; 7368 7369 sdev = to_scsi_device(dev); 7370 ctrl_info = shost_to_hba(sdev->host); 7371 7372 if (pqi_ctrl_offline(ctrl_info)) 7373 return -ENODEV; 7374 7375 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7376 7377 device = sdev->hostdata; 7378 if (!device) { 7379 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7380 return -ENODEV; 7381 } 7382 7383 raid_bypass_cnt = 0; 7384 7385 if (device->raid_io_stats) { 7386 for_each_online_cpu(cpu) { 7387 raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt; 7388 } 7389 } 7390 7391 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7392 7393 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt); 7394 } 7395 7396 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, 7397 struct device_attribute *attr, char *buf) 7398 { 7399 struct pqi_ctrl_info *ctrl_info; 7400 struct scsi_device *sdev; 7401 struct pqi_scsi_dev *device; 7402 unsigned long flags; 7403 int output_len = 0; 7404 7405 sdev = to_scsi_device(dev); 7406 ctrl_info = shost_to_hba(sdev->host); 7407 7408 if (pqi_ctrl_offline(ctrl_info)) 7409 return -ENODEV; 7410 7411 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7412 7413 device = sdev->hostdata; 7414 if (!device) { 7415 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7416 return -ENODEV; 7417 } 7418 7419 output_len = snprintf(buf, PAGE_SIZE, "%d\n", 7420 device->ncq_prio_enable); 7421 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7422 7423 return output_len; 7424 } 7425 7426 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, 7427 struct device_attribute *attr, 7428 const char *buf, size_t count) 7429 { 7430 struct pqi_ctrl_info *ctrl_info; 7431 struct scsi_device *sdev; 7432 struct pqi_scsi_dev *device; 7433 unsigned long flags; 7434 u8 ncq_prio_enable = 0; 7435 7436 if (kstrtou8(buf, 0, &ncq_prio_enable)) 7437 return -EINVAL; 7438 7439 sdev = to_scsi_device(dev); 7440 ctrl_info = shost_to_hba(sdev->host); 7441 7442 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7443 7444 device = sdev->hostdata; 7445 7446 if (!device) { 7447 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7448 return -ENODEV; 7449 } 7450 7451 if (!device->ncq_prio_support) { 7452 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7453 return -EINVAL; 7454 } 7455 7456 device->ncq_prio_enable = ncq_prio_enable; 7457 7458 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7459 7460 return strlen(buf); 7461 } 7462 7463 static ssize_t pqi_numa_node_show(struct device *dev, 7464 struct device_attribute *attr, char *buffer) 7465 { 7466 struct scsi_device *sdev; 7467 struct pqi_ctrl_info *ctrl_info; 7468 7469 sdev = to_scsi_device(dev); 7470 ctrl_info = shost_to_hba(sdev->host); 7471 7472 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); 7473 } 7474 7475 static ssize_t pqi_write_stream_cnt_show(struct device *dev, 7476 struct device_attribute *attr, char *buffer) 7477 { 7478 struct pqi_ctrl_info *ctrl_info; 7479 struct scsi_device *sdev; 7480 struct pqi_scsi_dev *device; 7481 unsigned long flags; 7482 u64 write_stream_cnt; 7483 int cpu; 7484 7485 sdev = to_scsi_device(dev); 7486 ctrl_info = shost_to_hba(sdev->host); 7487 7488 if (pqi_ctrl_offline(ctrl_info)) 7489 return -ENODEV; 7490 7491 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7492 7493 device = sdev->hostdata; 7494 if (!device) { 7495 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7496 return -ENODEV; 7497 } 7498 7499 write_stream_cnt = 0; 7500 7501 if (device->raid_io_stats) { 7502 for_each_online_cpu(cpu) { 7503 write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; 7504 } 7505 } 7506 7507 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7508 7509 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt); 7510 } 7511 7512 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 7513 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 7514 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 7515 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 7516 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); 7517 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 7518 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); 7519 static DEVICE_ATTR(sas_ncq_prio_enable, 0644, 7520 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); 7521 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); 7522 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL); 7523 7524 static struct attribute *pqi_sdev_attrs[] = { 7525 &dev_attr_lunid.attr, 7526 &dev_attr_unique_id.attr, 7527 &dev_attr_path_info.attr, 7528 &dev_attr_sas_address.attr, 7529 &dev_attr_ssd_smart_path_enabled.attr, 7530 &dev_attr_raid_level.attr, 7531 &dev_attr_raid_bypass_cnt.attr, 7532 &dev_attr_sas_ncq_prio_enable.attr, 7533 &dev_attr_numa_node.attr, 7534 &dev_attr_write_stream_cnt.attr, 7535 NULL 7536 }; 7537 7538 ATTRIBUTE_GROUPS(pqi_sdev); 7539 7540 static const struct scsi_host_template pqi_driver_template = { 7541 .module = THIS_MODULE, 7542 .name = DRIVER_NAME_SHORT, 7543 .proc_name = DRIVER_NAME_SHORT, 7544 .queuecommand = pqi_scsi_queue_command, 7545 .scan_start = pqi_scan_start, 7546 .scan_finished = pqi_scan_finished, 7547 .this_id = -1, 7548 .eh_device_reset_handler = pqi_eh_device_reset_handler, 7549 .eh_abort_handler = pqi_eh_abort_handler, 7550 .ioctl = pqi_ioctl, 7551 .slave_alloc = pqi_slave_alloc, 7552 .slave_configure = pqi_slave_configure, 7553 .slave_destroy = pqi_slave_destroy, 7554 .map_queues = pqi_map_queues, 7555 .sdev_groups = pqi_sdev_groups, 7556 .shost_groups = pqi_shost_groups, 7557 .cmd_size = sizeof(struct pqi_cmd_priv), 7558 }; 7559 7560 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 7561 { 7562 int rc; 7563 struct Scsi_Host *shost; 7564 7565 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 7566 if (!shost) { 7567 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); 7568 return -ENOMEM; 7569 } 7570 7571 shost->io_port = 0; 7572 shost->n_io_port = 0; 7573 shost->this_id = -1; 7574 shost->max_channel = PQI_MAX_BUS; 7575 shost->max_cmd_len = MAX_COMMAND_SIZE; 7576 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; 7577 shost->max_id = ~0; 7578 shost->max_sectors = ctrl_info->max_sectors; 7579 shost->can_queue = ctrl_info->scsi_ml_can_queue; 7580 shost->cmd_per_lun = shost->can_queue; 7581 shost->sg_tablesize = ctrl_info->sg_tablesize; 7582 shost->transportt = pqi_sas_transport_template; 7583 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 7584 shost->unique_id = shost->irq; 7585 shost->nr_hw_queues = ctrl_info->num_queue_groups; 7586 shost->host_tagset = 1; 7587 shost->hostdata[0] = (unsigned long)ctrl_info; 7588 7589 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 7590 if (rc) { 7591 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); 7592 goto free_host; 7593 } 7594 7595 rc = pqi_add_sas_host(shost, ctrl_info); 7596 if (rc) { 7597 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); 7598 goto remove_host; 7599 } 7600 7601 ctrl_info->scsi_host = shost; 7602 7603 return 0; 7604 7605 remove_host: 7606 scsi_remove_host(shost); 7607 free_host: 7608 scsi_host_put(shost); 7609 7610 return rc; 7611 } 7612 7613 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 7614 { 7615 struct Scsi_Host *shost; 7616 7617 pqi_delete_sas_host(ctrl_info); 7618 7619 shost = ctrl_info->scsi_host; 7620 if (!shost) 7621 return; 7622 7623 scsi_remove_host(shost); 7624 scsi_host_put(shost); 7625 } 7626 7627 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 7628 { 7629 int rc = 0; 7630 struct pqi_device_registers __iomem *pqi_registers; 7631 unsigned long timeout; 7632 unsigned int timeout_msecs; 7633 union pqi_reset_register reset_reg; 7634 7635 pqi_registers = ctrl_info->pqi_registers; 7636 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 7637 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 7638 7639 while (1) { 7640 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 7641 reset_reg.all_bits = readl(&pqi_registers->device_reset); 7642 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 7643 break; 7644 if (!sis_is_firmware_running(ctrl_info)) { 7645 rc = -ENXIO; 7646 break; 7647 } 7648 if (time_after(jiffies, timeout)) { 7649 rc = -ETIMEDOUT; 7650 break; 7651 } 7652 } 7653 7654 return rc; 7655 } 7656 7657 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 7658 { 7659 int rc; 7660 union pqi_reset_register reset_reg; 7661 7662 if (ctrl_info->pqi_reset_quiesce_supported) { 7663 rc = sis_pqi_reset_quiesce(ctrl_info); 7664 if (rc) { 7665 dev_err(&ctrl_info->pci_dev->dev, 7666 "PQI reset failed during quiesce with error %d\n", rc); 7667 return rc; 7668 } 7669 } 7670 7671 reset_reg.all_bits = 0; 7672 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 7673 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 7674 7675 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 7676 7677 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 7678 if (rc) 7679 dev_err(&ctrl_info->pci_dev->dev, 7680 "PQI reset failed with error %d\n", rc); 7681 7682 return rc; 7683 } 7684 7685 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) 7686 { 7687 int rc; 7688 struct bmic_sense_subsystem_info *sense_info; 7689 7690 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); 7691 if (!sense_info) 7692 return -ENOMEM; 7693 7694 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); 7695 if (rc) 7696 goto out; 7697 7698 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, 7699 sizeof(sense_info->ctrl_serial_number)); 7700 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; 7701 7702 out: 7703 kfree(sense_info); 7704 7705 return rc; 7706 } 7707 7708 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) 7709 { 7710 int rc; 7711 struct bmic_identify_controller *identify; 7712 7713 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 7714 if (!identify) 7715 return -ENOMEM; 7716 7717 rc = pqi_identify_controller(ctrl_info, identify); 7718 if (rc) 7719 goto out; 7720 7721 if (get_unaligned_le32(&identify->extra_controller_flags) & 7722 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { 7723 memcpy(ctrl_info->firmware_version, 7724 identify->firmware_version_long, 7725 sizeof(identify->firmware_version_long)); 7726 } else { 7727 memcpy(ctrl_info->firmware_version, 7728 identify->firmware_version_short, 7729 sizeof(identify->firmware_version_short)); 7730 ctrl_info->firmware_version 7731 [sizeof(identify->firmware_version_short)] = '\0'; 7732 snprintf(ctrl_info->firmware_version + 7733 strlen(ctrl_info->firmware_version), 7734 sizeof(ctrl_info->firmware_version) - 7735 sizeof(identify->firmware_version_short), 7736 "-%u", 7737 get_unaligned_le16(&identify->firmware_build_number)); 7738 } 7739 7740 memcpy(ctrl_info->model, identify->product_id, 7741 sizeof(identify->product_id)); 7742 ctrl_info->model[sizeof(identify->product_id)] = '\0'; 7743 7744 memcpy(ctrl_info->vendor, identify->vendor_id, 7745 sizeof(identify->vendor_id)); 7746 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; 7747 7748 dev_info(&ctrl_info->pci_dev->dev, 7749 "Firmware version: %s\n", ctrl_info->firmware_version); 7750 7751 out: 7752 kfree(identify); 7753 7754 return rc; 7755 } 7756 7757 struct pqi_config_table_section_info { 7758 struct pqi_ctrl_info *ctrl_info; 7759 void *section; 7760 u32 section_offset; 7761 void __iomem *section_iomem_addr; 7762 }; 7763 7764 static inline bool pqi_is_firmware_feature_supported( 7765 struct pqi_config_table_firmware_features *firmware_features, 7766 unsigned int bit_position) 7767 { 7768 unsigned int byte_index; 7769 7770 byte_index = bit_position / BITS_PER_BYTE; 7771 7772 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 7773 return false; 7774 7775 return firmware_features->features_supported[byte_index] & 7776 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7777 } 7778 7779 static inline bool pqi_is_firmware_feature_enabled( 7780 struct pqi_config_table_firmware_features *firmware_features, 7781 void __iomem *firmware_features_iomem_addr, 7782 unsigned int bit_position) 7783 { 7784 unsigned int byte_index; 7785 u8 __iomem *features_enabled_iomem_addr; 7786 7787 byte_index = (bit_position / BITS_PER_BYTE) + 7788 (le16_to_cpu(firmware_features->num_elements) * 2); 7789 7790 features_enabled_iomem_addr = firmware_features_iomem_addr + 7791 offsetof(struct pqi_config_table_firmware_features, 7792 features_supported) + byte_index; 7793 7794 return *((__force u8 *)features_enabled_iomem_addr) & 7795 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7796 } 7797 7798 static inline void pqi_request_firmware_feature( 7799 struct pqi_config_table_firmware_features *firmware_features, 7800 unsigned int bit_position) 7801 { 7802 unsigned int byte_index; 7803 7804 byte_index = (bit_position / BITS_PER_BYTE) + 7805 le16_to_cpu(firmware_features->num_elements); 7806 7807 firmware_features->features_supported[byte_index] |= 7808 (1 << (bit_position % BITS_PER_BYTE)); 7809 } 7810 7811 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 7812 u16 first_section, u16 last_section) 7813 { 7814 struct pqi_vendor_general_request request; 7815 7816 memset(&request, 0, sizeof(request)); 7817 7818 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7819 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7820 &request.header.iu_length); 7821 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 7822 &request.function_code); 7823 put_unaligned_le16(first_section, 7824 &request.data.config_table_update.first_section); 7825 put_unaligned_le16(last_section, 7826 &request.data.config_table_update.last_section); 7827 7828 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 7829 } 7830 7831 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 7832 struct pqi_config_table_firmware_features *firmware_features, 7833 void __iomem *firmware_features_iomem_addr) 7834 { 7835 void *features_requested; 7836 void __iomem *features_requested_iomem_addr; 7837 void __iomem *host_max_known_feature_iomem_addr; 7838 7839 features_requested = firmware_features->features_supported + 7840 le16_to_cpu(firmware_features->num_elements); 7841 7842 features_requested_iomem_addr = firmware_features_iomem_addr + 7843 (features_requested - (void *)firmware_features); 7844 7845 memcpy_toio(features_requested_iomem_addr, features_requested, 7846 le16_to_cpu(firmware_features->num_elements)); 7847 7848 if (pqi_is_firmware_feature_supported(firmware_features, 7849 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { 7850 host_max_known_feature_iomem_addr = 7851 features_requested_iomem_addr + 7852 (le16_to_cpu(firmware_features->num_elements) * 2) + 7853 sizeof(__le16); 7854 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr); 7855 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1); 7856 } 7857 7858 return pqi_config_table_update(ctrl_info, 7859 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 7860 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 7861 } 7862 7863 struct pqi_firmware_feature { 7864 char *feature_name; 7865 unsigned int feature_bit; 7866 bool supported; 7867 bool enabled; 7868 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 7869 struct pqi_firmware_feature *firmware_feature); 7870 }; 7871 7872 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 7873 struct pqi_firmware_feature *firmware_feature) 7874 { 7875 if (!firmware_feature->supported) { 7876 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 7877 firmware_feature->feature_name); 7878 return; 7879 } 7880 7881 if (firmware_feature->enabled) { 7882 dev_info(&ctrl_info->pci_dev->dev, 7883 "%s enabled\n", firmware_feature->feature_name); 7884 return; 7885 } 7886 7887 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 7888 firmware_feature->feature_name); 7889 } 7890 7891 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, 7892 struct pqi_firmware_feature *firmware_feature) 7893 { 7894 switch (firmware_feature->feature_bit) { 7895 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: 7896 ctrl_info->enable_r1_writes = firmware_feature->enabled; 7897 break; 7898 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: 7899 ctrl_info->enable_r5_writes = firmware_feature->enabled; 7900 break; 7901 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: 7902 ctrl_info->enable_r6_writes = firmware_feature->enabled; 7903 break; 7904 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: 7905 ctrl_info->soft_reset_handshake_supported = 7906 firmware_feature->enabled && 7907 pqi_read_soft_reset_status(ctrl_info); 7908 break; 7909 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: 7910 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; 7911 break; 7912 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: 7913 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; 7914 break; 7915 case PQI_FIRMWARE_FEATURE_FW_TRIAGE: 7916 ctrl_info->firmware_triage_supported = firmware_feature->enabled; 7917 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); 7918 break; 7919 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5: 7920 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; 7921 break; 7922 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: 7923 ctrl_info->multi_lun_device_supported = firmware_feature->enabled; 7924 break; 7925 case PQI_FIRMWARE_FEATURE_CTRL_LOGGING: 7926 ctrl_info->ctrl_logging_supported = firmware_feature->enabled; 7927 break; 7928 } 7929 7930 pqi_firmware_feature_status(ctrl_info, firmware_feature); 7931 } 7932 7933 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 7934 struct pqi_firmware_feature *firmware_feature) 7935 { 7936 if (firmware_feature->feature_status) 7937 firmware_feature->feature_status(ctrl_info, firmware_feature); 7938 } 7939 7940 static DEFINE_MUTEX(pqi_firmware_features_mutex); 7941 7942 static struct pqi_firmware_feature pqi_firmware_features[] = { 7943 { 7944 .feature_name = "Online Firmware Activation", 7945 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 7946 .feature_status = pqi_firmware_feature_status, 7947 }, 7948 { 7949 .feature_name = "Serial Management Protocol", 7950 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 7951 .feature_status = pqi_firmware_feature_status, 7952 }, 7953 { 7954 .feature_name = "Maximum Known Feature", 7955 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, 7956 .feature_status = pqi_firmware_feature_status, 7957 }, 7958 { 7959 .feature_name = "RAID 0 Read Bypass", 7960 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, 7961 .feature_status = pqi_firmware_feature_status, 7962 }, 7963 { 7964 .feature_name = "RAID 1 Read Bypass", 7965 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, 7966 .feature_status = pqi_firmware_feature_status, 7967 }, 7968 { 7969 .feature_name = "RAID 5 Read Bypass", 7970 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, 7971 .feature_status = pqi_firmware_feature_status, 7972 }, 7973 { 7974 .feature_name = "RAID 6 Read Bypass", 7975 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, 7976 .feature_status = pqi_firmware_feature_status, 7977 }, 7978 { 7979 .feature_name = "RAID 0 Write Bypass", 7980 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, 7981 .feature_status = pqi_firmware_feature_status, 7982 }, 7983 { 7984 .feature_name = "RAID 1 Write Bypass", 7985 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, 7986 .feature_status = pqi_ctrl_update_feature_flags, 7987 }, 7988 { 7989 .feature_name = "RAID 5 Write Bypass", 7990 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, 7991 .feature_status = pqi_ctrl_update_feature_flags, 7992 }, 7993 { 7994 .feature_name = "RAID 6 Write Bypass", 7995 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, 7996 .feature_status = pqi_ctrl_update_feature_flags, 7997 }, 7998 { 7999 .feature_name = "New Soft Reset Handshake", 8000 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 8001 .feature_status = pqi_ctrl_update_feature_flags, 8002 }, 8003 { 8004 .feature_name = "RAID IU Timeout", 8005 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, 8006 .feature_status = pqi_ctrl_update_feature_flags, 8007 }, 8008 { 8009 .feature_name = "TMF IU Timeout", 8010 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, 8011 .feature_status = pqi_ctrl_update_feature_flags, 8012 }, 8013 { 8014 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", 8015 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, 8016 .feature_status = pqi_firmware_feature_status, 8017 }, 8018 { 8019 .feature_name = "Firmware Triage", 8020 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE, 8021 .feature_status = pqi_ctrl_update_feature_flags, 8022 }, 8023 { 8024 .feature_name = "RPL Extended Formats 4 and 5", 8025 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5, 8026 .feature_status = pqi_ctrl_update_feature_flags, 8027 }, 8028 { 8029 .feature_name = "Multi-LUN Target", 8030 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, 8031 .feature_status = pqi_ctrl_update_feature_flags, 8032 }, 8033 { 8034 .feature_name = "Controller Data Logging", 8035 .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING, 8036 .feature_status = pqi_ctrl_update_feature_flags, 8037 }, 8038 }; 8039 8040 static void pqi_process_firmware_features( 8041 struct pqi_config_table_section_info *section_info) 8042 { 8043 int rc; 8044 struct pqi_ctrl_info *ctrl_info; 8045 struct pqi_config_table_firmware_features *firmware_features; 8046 void __iomem *firmware_features_iomem_addr; 8047 unsigned int i; 8048 unsigned int num_features_supported; 8049 8050 ctrl_info = section_info->ctrl_info; 8051 firmware_features = section_info->section; 8052 firmware_features_iomem_addr = section_info->section_iomem_addr; 8053 8054 for (i = 0, num_features_supported = 0; 8055 i < ARRAY_SIZE(pqi_firmware_features); i++) { 8056 if (pqi_is_firmware_feature_supported(firmware_features, 8057 pqi_firmware_features[i].feature_bit)) { 8058 pqi_firmware_features[i].supported = true; 8059 num_features_supported++; 8060 } else { 8061 pqi_firmware_feature_update(ctrl_info, 8062 &pqi_firmware_features[i]); 8063 } 8064 } 8065 8066 if (num_features_supported == 0) 8067 return; 8068 8069 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8070 if (!pqi_firmware_features[i].supported) 8071 continue; 8072 pqi_request_firmware_feature(firmware_features, 8073 pqi_firmware_features[i].feature_bit); 8074 } 8075 8076 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 8077 firmware_features_iomem_addr); 8078 if (rc) { 8079 dev_err(&ctrl_info->pci_dev->dev, 8080 "failed to enable firmware features in PQI configuration table\n"); 8081 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8082 if (!pqi_firmware_features[i].supported) 8083 continue; 8084 pqi_firmware_feature_update(ctrl_info, 8085 &pqi_firmware_features[i]); 8086 } 8087 return; 8088 } 8089 8090 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8091 if (!pqi_firmware_features[i].supported) 8092 continue; 8093 if (pqi_is_firmware_feature_enabled(firmware_features, 8094 firmware_features_iomem_addr, 8095 pqi_firmware_features[i].feature_bit)) { 8096 pqi_firmware_features[i].enabled = true; 8097 } 8098 pqi_firmware_feature_update(ctrl_info, 8099 &pqi_firmware_features[i]); 8100 } 8101 } 8102 8103 static void pqi_init_firmware_features(void) 8104 { 8105 unsigned int i; 8106 8107 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8108 pqi_firmware_features[i].supported = false; 8109 pqi_firmware_features[i].enabled = false; 8110 } 8111 } 8112 8113 static void pqi_process_firmware_features_section( 8114 struct pqi_config_table_section_info *section_info) 8115 { 8116 mutex_lock(&pqi_firmware_features_mutex); 8117 pqi_init_firmware_features(); 8118 pqi_process_firmware_features(section_info); 8119 mutex_unlock(&pqi_firmware_features_mutex); 8120 } 8121 8122 /* 8123 * Reset all controller settings that can be initialized during the processing 8124 * of the PQI Configuration Table. 8125 */ 8126 8127 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) 8128 { 8129 ctrl_info->heartbeat_counter = NULL; 8130 ctrl_info->soft_reset_status = NULL; 8131 ctrl_info->soft_reset_handshake_supported = false; 8132 ctrl_info->enable_r1_writes = false; 8133 ctrl_info->enable_r5_writes = false; 8134 ctrl_info->enable_r6_writes = false; 8135 ctrl_info->raid_iu_timeout_supported = false; 8136 ctrl_info->tmf_iu_timeout_supported = false; 8137 ctrl_info->firmware_triage_supported = false; 8138 ctrl_info->rpl_extended_format_4_5_supported = false; 8139 ctrl_info->multi_lun_device_supported = false; 8140 ctrl_info->ctrl_logging_supported = false; 8141 } 8142 8143 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 8144 { 8145 u32 table_length; 8146 u32 section_offset; 8147 bool firmware_feature_section_present; 8148 void __iomem *table_iomem_addr; 8149 struct pqi_config_table *config_table; 8150 struct pqi_config_table_section_header *section; 8151 struct pqi_config_table_section_info section_info; 8152 struct pqi_config_table_section_info feature_section_info = {0}; 8153 8154 table_length = ctrl_info->config_table_length; 8155 if (table_length == 0) 8156 return 0; 8157 8158 config_table = kmalloc(table_length, GFP_KERNEL); 8159 if (!config_table) { 8160 dev_err(&ctrl_info->pci_dev->dev, 8161 "failed to allocate memory for PQI configuration table\n"); 8162 return -ENOMEM; 8163 } 8164 8165 /* 8166 * Copy the config table contents from I/O memory space into the 8167 * temporary buffer. 8168 */ 8169 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; 8170 memcpy_fromio(config_table, table_iomem_addr, table_length); 8171 8172 firmware_feature_section_present = false; 8173 section_info.ctrl_info = ctrl_info; 8174 section_offset = get_unaligned_le32(&config_table->first_section_offset); 8175 8176 while (section_offset) { 8177 section = (void *)config_table + section_offset; 8178 8179 section_info.section = section; 8180 section_info.section_offset = section_offset; 8181 section_info.section_iomem_addr = table_iomem_addr + section_offset; 8182 8183 switch (get_unaligned_le16(§ion->section_id)) { 8184 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 8185 firmware_feature_section_present = true; 8186 feature_section_info = section_info; 8187 break; 8188 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 8189 if (pqi_disable_heartbeat) 8190 dev_warn(&ctrl_info->pci_dev->dev, 8191 "heartbeat disabled by module parameter\n"); 8192 else 8193 ctrl_info->heartbeat_counter = 8194 table_iomem_addr + 8195 section_offset + 8196 offsetof(struct pqi_config_table_heartbeat, 8197 heartbeat_counter); 8198 break; 8199 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 8200 ctrl_info->soft_reset_status = 8201 table_iomem_addr + 8202 section_offset + 8203 offsetof(struct pqi_config_table_soft_reset, 8204 soft_reset_status); 8205 break; 8206 } 8207 8208 section_offset = get_unaligned_le16(§ion->next_section_offset); 8209 } 8210 8211 /* 8212 * We process the firmware feature section after all other sections 8213 * have been processed so that the feature bit callbacks can take 8214 * into account the settings configured by other sections. 8215 */ 8216 if (firmware_feature_section_present) 8217 pqi_process_firmware_features_section(&feature_section_info); 8218 8219 kfree(config_table); 8220 8221 return 0; 8222 } 8223 8224 /* Switches the controller from PQI mode back into SIS mode. */ 8225 8226 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 8227 { 8228 int rc; 8229 8230 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 8231 rc = pqi_reset(ctrl_info); 8232 if (rc) 8233 return rc; 8234 rc = sis_reenable_sis_mode(ctrl_info); 8235 if (rc) { 8236 dev_err(&ctrl_info->pci_dev->dev, 8237 "re-enabling SIS mode failed with error %d\n", rc); 8238 return rc; 8239 } 8240 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8241 8242 return 0; 8243 } 8244 8245 /* 8246 * If the controller isn't already in SIS mode, this function forces it into 8247 * SIS mode. 8248 */ 8249 8250 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 8251 { 8252 if (!sis_is_firmware_running(ctrl_info)) 8253 return -ENXIO; 8254 8255 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 8256 return 0; 8257 8258 if (sis_is_kernel_up(ctrl_info)) { 8259 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8260 return 0; 8261 } 8262 8263 return pqi_revert_to_sis_mode(ctrl_info); 8264 } 8265 8266 static void pqi_perform_lockup_action(void) 8267 { 8268 switch (pqi_lockup_action) { 8269 case PANIC: 8270 panic("FATAL: Smart Family Controller lockup detected"); 8271 break; 8272 case REBOOT: 8273 emergency_restart(); 8274 break; 8275 case NONE: 8276 default: 8277 break; 8278 } 8279 } 8280 8281 #define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024) 8282 #define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS) 8283 8284 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 8285 { 8286 int rc; 8287 u32 product_id; 8288 8289 if (reset_devices) { 8290 if (pqi_is_fw_triage_supported(ctrl_info)) { 8291 rc = sis_wait_for_fw_triage_completion(ctrl_info); 8292 if (rc) 8293 return rc; 8294 } 8295 if (sis_is_ctrl_logging_supported(ctrl_info)) { 8296 sis_notify_kdump(ctrl_info); 8297 rc = sis_wait_for_ctrl_logging_completion(ctrl_info); 8298 if (rc) 8299 return rc; 8300 } 8301 sis_soft_reset(ctrl_info); 8302 ssleep(PQI_POST_RESET_DELAY_SECS); 8303 } else { 8304 rc = pqi_force_sis_mode(ctrl_info); 8305 if (rc) 8306 return rc; 8307 } 8308 8309 /* 8310 * Wait until the controller is ready to start accepting SIS 8311 * commands. 8312 */ 8313 rc = sis_wait_for_ctrl_ready(ctrl_info); 8314 if (rc) { 8315 if (reset_devices) { 8316 dev_err(&ctrl_info->pci_dev->dev, 8317 "kdump init failed with error %d\n", rc); 8318 pqi_lockup_action = REBOOT; 8319 pqi_perform_lockup_action(); 8320 } 8321 return rc; 8322 } 8323 8324 /* 8325 * Get the controller properties. This allows us to determine 8326 * whether or not it supports PQI mode. 8327 */ 8328 rc = sis_get_ctrl_properties(ctrl_info); 8329 if (rc) { 8330 dev_err(&ctrl_info->pci_dev->dev, 8331 "error obtaining controller properties\n"); 8332 return rc; 8333 } 8334 8335 rc = sis_get_pqi_capabilities(ctrl_info); 8336 if (rc) { 8337 dev_err(&ctrl_info->pci_dev->dev, 8338 "error obtaining controller capabilities\n"); 8339 return rc; 8340 } 8341 8342 product_id = sis_get_product_id(ctrl_info); 8343 ctrl_info->product_id = (u8)product_id; 8344 ctrl_info->product_revision = (u8)(product_id >> 8); 8345 8346 if (reset_devices) { 8347 if (ctrl_info->max_outstanding_requests > 8348 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 8349 ctrl_info->max_outstanding_requests = 8350 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 8351 } else { 8352 if (ctrl_info->max_outstanding_requests > 8353 PQI_MAX_OUTSTANDING_REQUESTS) 8354 ctrl_info->max_outstanding_requests = 8355 PQI_MAX_OUTSTANDING_REQUESTS; 8356 } 8357 8358 pqi_calculate_io_resources(ctrl_info); 8359 8360 rc = pqi_alloc_error_buffer(ctrl_info); 8361 if (rc) { 8362 dev_err(&ctrl_info->pci_dev->dev, 8363 "failed to allocate PQI error buffer\n"); 8364 return rc; 8365 } 8366 8367 /* 8368 * If the function we are about to call succeeds, the 8369 * controller will transition from legacy SIS mode 8370 * into PQI mode. 8371 */ 8372 rc = sis_init_base_struct_addr(ctrl_info); 8373 if (rc) { 8374 dev_err(&ctrl_info->pci_dev->dev, 8375 "error initializing PQI mode\n"); 8376 return rc; 8377 } 8378 8379 /* Wait for the controller to complete the SIS -> PQI transition. */ 8380 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8381 if (rc) { 8382 dev_err(&ctrl_info->pci_dev->dev, 8383 "transition to PQI mode failed\n"); 8384 return rc; 8385 } 8386 8387 /* From here on, we are running in PQI mode. */ 8388 ctrl_info->pqi_mode_enabled = true; 8389 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8390 8391 rc = pqi_alloc_admin_queues(ctrl_info); 8392 if (rc) { 8393 dev_err(&ctrl_info->pci_dev->dev, 8394 "failed to allocate admin queues\n"); 8395 return rc; 8396 } 8397 8398 rc = pqi_create_admin_queues(ctrl_info); 8399 if (rc) { 8400 dev_err(&ctrl_info->pci_dev->dev, 8401 "error creating admin queues\n"); 8402 return rc; 8403 } 8404 8405 rc = pqi_report_device_capability(ctrl_info); 8406 if (rc) { 8407 dev_err(&ctrl_info->pci_dev->dev, 8408 "obtaining device capability failed\n"); 8409 return rc; 8410 } 8411 8412 rc = pqi_validate_device_capability(ctrl_info); 8413 if (rc) 8414 return rc; 8415 8416 pqi_calculate_queue_resources(ctrl_info); 8417 8418 rc = pqi_enable_msix_interrupts(ctrl_info); 8419 if (rc) 8420 return rc; 8421 8422 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 8423 ctrl_info->max_msix_vectors = 8424 ctrl_info->num_msix_vectors_enabled; 8425 pqi_calculate_queue_resources(ctrl_info); 8426 } 8427 8428 rc = pqi_alloc_io_resources(ctrl_info); 8429 if (rc) 8430 return rc; 8431 8432 rc = pqi_alloc_operational_queues(ctrl_info); 8433 if (rc) { 8434 dev_err(&ctrl_info->pci_dev->dev, 8435 "failed to allocate operational queues\n"); 8436 return rc; 8437 } 8438 8439 pqi_init_operational_queues(ctrl_info); 8440 8441 rc = pqi_create_queues(ctrl_info); 8442 if (rc) 8443 return rc; 8444 8445 rc = pqi_request_irqs(ctrl_info); 8446 if (rc) 8447 return rc; 8448 8449 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8450 8451 ctrl_info->controller_online = true; 8452 8453 rc = pqi_process_config_table(ctrl_info); 8454 if (rc) 8455 return rc; 8456 8457 pqi_start_heartbeat_timer(ctrl_info); 8458 8459 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8460 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8461 if (rc) { /* Supported features not returned correctly. */ 8462 dev_err(&ctrl_info->pci_dev->dev, 8463 "error obtaining advanced RAID bypass configuration\n"); 8464 return rc; 8465 } 8466 ctrl_info->ciss_report_log_flags |= 8467 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8468 } 8469 8470 rc = pqi_enable_events(ctrl_info); 8471 if (rc) { 8472 dev_err(&ctrl_info->pci_dev->dev, 8473 "error enabling events\n"); 8474 return rc; 8475 } 8476 8477 /* Register with the SCSI subsystem. */ 8478 rc = pqi_register_scsi(ctrl_info); 8479 if (rc) 8480 return rc; 8481 8482 if (ctrl_info->ctrl_logging_supported && !reset_devices) { 8483 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); 8484 pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); 8485 } 8486 8487 rc = pqi_get_ctrl_product_details(ctrl_info); 8488 if (rc) { 8489 dev_err(&ctrl_info->pci_dev->dev, 8490 "error obtaining product details\n"); 8491 return rc; 8492 } 8493 8494 rc = pqi_get_ctrl_serial_number(ctrl_info); 8495 if (rc) { 8496 dev_err(&ctrl_info->pci_dev->dev, 8497 "error obtaining ctrl serial number\n"); 8498 return rc; 8499 } 8500 8501 rc = pqi_set_diag_rescan(ctrl_info); 8502 if (rc) { 8503 dev_err(&ctrl_info->pci_dev->dev, 8504 "error enabling multi-lun rescan\n"); 8505 return rc; 8506 } 8507 8508 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8509 if (rc) { 8510 dev_err(&ctrl_info->pci_dev->dev, 8511 "error updating host wellness\n"); 8512 return rc; 8513 } 8514 8515 pqi_schedule_update_time_worker(ctrl_info); 8516 8517 pqi_scan_scsi_devices(ctrl_info); 8518 8519 return 0; 8520 } 8521 8522 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 8523 { 8524 unsigned int i; 8525 struct pqi_admin_queues *admin_queues; 8526 struct pqi_event_queue *event_queue; 8527 8528 admin_queues = &ctrl_info->admin_queues; 8529 admin_queues->iq_pi_copy = 0; 8530 admin_queues->oq_ci_copy = 0; 8531 writel(0, admin_queues->oq_pi); 8532 8533 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 8534 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 8535 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 8536 ctrl_info->queue_groups[i].oq_ci_copy = 0; 8537 8538 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 8539 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 8540 writel(0, ctrl_info->queue_groups[i].oq_pi); 8541 } 8542 8543 event_queue = &ctrl_info->event_queue; 8544 writel(0, event_queue->oq_pi); 8545 event_queue->oq_ci_copy = 0; 8546 } 8547 8548 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 8549 { 8550 int rc; 8551 8552 rc = pqi_force_sis_mode(ctrl_info); 8553 if (rc) 8554 return rc; 8555 8556 /* 8557 * Wait until the controller is ready to start accepting SIS 8558 * commands. 8559 */ 8560 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 8561 if (rc) 8562 return rc; 8563 8564 /* 8565 * Get the controller properties. This allows us to determine 8566 * whether or not it supports PQI mode. 8567 */ 8568 rc = sis_get_ctrl_properties(ctrl_info); 8569 if (rc) { 8570 dev_err(&ctrl_info->pci_dev->dev, 8571 "error obtaining controller properties\n"); 8572 return rc; 8573 } 8574 8575 rc = sis_get_pqi_capabilities(ctrl_info); 8576 if (rc) { 8577 dev_err(&ctrl_info->pci_dev->dev, 8578 "error obtaining controller capabilities\n"); 8579 return rc; 8580 } 8581 8582 /* 8583 * If the function we are about to call succeeds, the 8584 * controller will transition from legacy SIS mode 8585 * into PQI mode. 8586 */ 8587 rc = sis_init_base_struct_addr(ctrl_info); 8588 if (rc) { 8589 dev_err(&ctrl_info->pci_dev->dev, 8590 "error initializing PQI mode\n"); 8591 return rc; 8592 } 8593 8594 /* Wait for the controller to complete the SIS -> PQI transition. */ 8595 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8596 if (rc) { 8597 dev_err(&ctrl_info->pci_dev->dev, 8598 "transition to PQI mode failed\n"); 8599 return rc; 8600 } 8601 8602 /* From here on, we are running in PQI mode. */ 8603 ctrl_info->pqi_mode_enabled = true; 8604 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8605 8606 pqi_reinit_queues(ctrl_info); 8607 8608 rc = pqi_create_admin_queues(ctrl_info); 8609 if (rc) { 8610 dev_err(&ctrl_info->pci_dev->dev, 8611 "error creating admin queues\n"); 8612 return rc; 8613 } 8614 8615 rc = pqi_create_queues(ctrl_info); 8616 if (rc) 8617 return rc; 8618 8619 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8620 8621 ctrl_info->controller_online = true; 8622 pqi_ctrl_unblock_requests(ctrl_info); 8623 8624 pqi_ctrl_reset_config(ctrl_info); 8625 8626 rc = pqi_process_config_table(ctrl_info); 8627 if (rc) 8628 return rc; 8629 8630 pqi_start_heartbeat_timer(ctrl_info); 8631 8632 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8633 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8634 if (rc) { 8635 dev_err(&ctrl_info->pci_dev->dev, 8636 "error obtaining advanced RAID bypass configuration\n"); 8637 return rc; 8638 } 8639 ctrl_info->ciss_report_log_flags |= 8640 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8641 } 8642 8643 rc = pqi_enable_events(ctrl_info); 8644 if (rc) { 8645 dev_err(&ctrl_info->pci_dev->dev, 8646 "error enabling events\n"); 8647 return rc; 8648 } 8649 8650 rc = pqi_get_ctrl_product_details(ctrl_info); 8651 if (rc) { 8652 dev_err(&ctrl_info->pci_dev->dev, 8653 "error obtaining product details\n"); 8654 return rc; 8655 } 8656 8657 rc = pqi_set_diag_rescan(ctrl_info); 8658 if (rc) { 8659 dev_err(&ctrl_info->pci_dev->dev, 8660 "error enabling multi-lun rescan\n"); 8661 return rc; 8662 } 8663 8664 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8665 if (rc) { 8666 dev_err(&ctrl_info->pci_dev->dev, 8667 "error updating host wellness\n"); 8668 return rc; 8669 } 8670 8671 if (pqi_ofa_in_progress(ctrl_info)) { 8672 pqi_ctrl_unblock_scan(ctrl_info); 8673 if (ctrl_info->ctrl_logging_supported) { 8674 if (!ctrl_info->ctrl_log_memory.host_memory) 8675 pqi_host_setup_buffer(ctrl_info, 8676 &ctrl_info->ctrl_log_memory, 8677 PQI_CTRL_LOG_TOTAL_SIZE, 8678 PQI_CTRL_LOG_MIN_SIZE); 8679 pqi_host_memory_update(ctrl_info, 8680 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); 8681 } else { 8682 if (ctrl_info->ctrl_log_memory.host_memory) 8683 pqi_host_free_buffer(ctrl_info, 8684 &ctrl_info->ctrl_log_memory); 8685 } 8686 } 8687 8688 pqi_scan_scsi_devices(ctrl_info); 8689 8690 return 0; 8691 } 8692 8693 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) 8694 { 8695 int rc; 8696 8697 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 8698 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 8699 8700 return pcibios_err_to_errno(rc); 8701 } 8702 8703 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 8704 { 8705 int rc; 8706 u64 mask; 8707 8708 rc = pci_enable_device(ctrl_info->pci_dev); 8709 if (rc) { 8710 dev_err(&ctrl_info->pci_dev->dev, 8711 "failed to enable PCI device\n"); 8712 return rc; 8713 } 8714 8715 if (sizeof(dma_addr_t) > 4) 8716 mask = DMA_BIT_MASK(64); 8717 else 8718 mask = DMA_BIT_MASK(32); 8719 8720 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); 8721 if (rc) { 8722 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 8723 goto disable_device; 8724 } 8725 8726 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 8727 if (rc) { 8728 dev_err(&ctrl_info->pci_dev->dev, 8729 "failed to obtain PCI resources\n"); 8730 goto disable_device; 8731 } 8732 8733 ctrl_info->iomem_base = ioremap(pci_resource_start( 8734 ctrl_info->pci_dev, 0), 8735 pci_resource_len(ctrl_info->pci_dev, 0)); 8736 if (!ctrl_info->iomem_base) { 8737 dev_err(&ctrl_info->pci_dev->dev, 8738 "failed to map memory for controller registers\n"); 8739 rc = -ENOMEM; 8740 goto release_regions; 8741 } 8742 8743 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 8744 8745 /* Increase the PCIe completion timeout. */ 8746 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 8747 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 8748 if (rc) { 8749 dev_err(&ctrl_info->pci_dev->dev, 8750 "failed to set PCIe completion timeout\n"); 8751 goto release_regions; 8752 } 8753 8754 /* Enable bus mastering. */ 8755 pci_set_master(ctrl_info->pci_dev); 8756 8757 ctrl_info->registers = ctrl_info->iomem_base; 8758 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 8759 8760 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 8761 8762 return 0; 8763 8764 release_regions: 8765 pci_release_regions(ctrl_info->pci_dev); 8766 disable_device: 8767 pci_disable_device(ctrl_info->pci_dev); 8768 8769 return rc; 8770 } 8771 8772 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 8773 { 8774 iounmap(ctrl_info->iomem_base); 8775 pci_release_regions(ctrl_info->pci_dev); 8776 if (pci_is_enabled(ctrl_info->pci_dev)) 8777 pci_disable_device(ctrl_info->pci_dev); 8778 pci_set_drvdata(ctrl_info->pci_dev, NULL); 8779 } 8780 8781 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 8782 { 8783 struct pqi_ctrl_info *ctrl_info; 8784 8785 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 8786 GFP_KERNEL, numa_node); 8787 if (!ctrl_info) 8788 return NULL; 8789 8790 mutex_init(&ctrl_info->scan_mutex); 8791 mutex_init(&ctrl_info->lun_reset_mutex); 8792 mutex_init(&ctrl_info->ofa_mutex); 8793 8794 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 8795 spin_lock_init(&ctrl_info->scsi_device_list_lock); 8796 8797 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 8798 atomic_set(&ctrl_info->num_interrupts, 0); 8799 8800 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 8801 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 8802 8803 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 8804 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 8805 8806 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); 8807 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); 8808 8809 sema_init(&ctrl_info->sync_request_sem, 8810 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 8811 init_waitqueue_head(&ctrl_info->block_requests_wait); 8812 8813 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 8814 ctrl_info->irq_mode = IRQ_MODE_NONE; 8815 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 8816 8817 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; 8818 ctrl_info->max_transfer_encrypted_sas_sata = 8819 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; 8820 ctrl_info->max_transfer_encrypted_nvme = 8821 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; 8822 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; 8823 ctrl_info->max_write_raid_1_10_2drive = ~0; 8824 ctrl_info->max_write_raid_1_10_3drive = ~0; 8825 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; 8826 8827 return ctrl_info; 8828 } 8829 8830 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 8831 { 8832 kfree(ctrl_info); 8833 } 8834 8835 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 8836 { 8837 pqi_free_irqs(ctrl_info); 8838 pqi_disable_msix_interrupts(ctrl_info); 8839 } 8840 8841 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 8842 { 8843 pqi_free_interrupts(ctrl_info); 8844 if (ctrl_info->queue_memory_base) 8845 dma_free_coherent(&ctrl_info->pci_dev->dev, 8846 ctrl_info->queue_memory_length, 8847 ctrl_info->queue_memory_base, 8848 ctrl_info->queue_memory_base_dma_handle); 8849 if (ctrl_info->admin_queue_memory_base) 8850 dma_free_coherent(&ctrl_info->pci_dev->dev, 8851 ctrl_info->admin_queue_memory_length, 8852 ctrl_info->admin_queue_memory_base, 8853 ctrl_info->admin_queue_memory_base_dma_handle); 8854 pqi_free_all_io_requests(ctrl_info); 8855 if (ctrl_info->error_buffer) 8856 dma_free_coherent(&ctrl_info->pci_dev->dev, 8857 ctrl_info->error_buffer_length, 8858 ctrl_info->error_buffer, 8859 ctrl_info->error_buffer_dma_handle); 8860 if (ctrl_info->iomem_base) 8861 pqi_cleanup_pci_init(ctrl_info); 8862 pqi_free_ctrl_info(ctrl_info); 8863 } 8864 8865 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 8866 { 8867 ctrl_info->controller_online = false; 8868 pqi_stop_heartbeat_timer(ctrl_info); 8869 pqi_ctrl_block_requests(ctrl_info); 8870 pqi_cancel_rescan_worker(ctrl_info); 8871 pqi_cancel_update_time_worker(ctrl_info); 8872 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { 8873 pqi_fail_all_outstanding_requests(ctrl_info); 8874 ctrl_info->pqi_mode_enabled = false; 8875 } 8876 pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory); 8877 pqi_unregister_scsi(ctrl_info); 8878 if (ctrl_info->pqi_mode_enabled) 8879 pqi_revert_to_sis_mode(ctrl_info); 8880 pqi_free_ctrl_resources(ctrl_info); 8881 } 8882 8883 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 8884 { 8885 pqi_ctrl_block_scan(ctrl_info); 8886 pqi_scsi_block_requests(ctrl_info); 8887 pqi_ctrl_block_device_reset(ctrl_info); 8888 pqi_ctrl_block_requests(ctrl_info); 8889 pqi_ctrl_wait_until_quiesced(ctrl_info); 8890 pqi_stop_heartbeat_timer(ctrl_info); 8891 } 8892 8893 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 8894 { 8895 pqi_start_heartbeat_timer(ctrl_info); 8896 pqi_ctrl_unblock_requests(ctrl_info); 8897 pqi_ctrl_unblock_device_reset(ctrl_info); 8898 pqi_scsi_unblock_requests(ctrl_info); 8899 pqi_ctrl_unblock_scan(ctrl_info); 8900 } 8901 8902 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) 8903 { 8904 ssleep(delay_secs); 8905 8906 return pqi_ctrl_init_resume(ctrl_info); 8907 } 8908 8909 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, 8910 struct pqi_host_memory_descriptor *host_memory_descriptor, 8911 u32 total_size, u32 chunk_size) 8912 { 8913 int i; 8914 u32 sg_count; 8915 struct device *dev; 8916 struct pqi_host_memory *host_memory; 8917 struct pqi_sg_descriptor *mem_descriptor; 8918 dma_addr_t dma_handle; 8919 8920 sg_count = DIV_ROUND_UP(total_size, chunk_size); 8921 if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) 8922 goto out; 8923 8924 host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); 8925 if (!host_memory_descriptor->host_chunk_virt_address) 8926 goto out; 8927 8928 dev = &ctrl_info->pci_dev->dev; 8929 host_memory = host_memory_descriptor->host_memory; 8930 8931 for (i = 0; i < sg_count; i++) { 8932 host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); 8933 if (!host_memory_descriptor->host_chunk_virt_address[i]) 8934 goto out_free_chunks; 8935 mem_descriptor = &host_memory->sg_descriptor[i]; 8936 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); 8937 put_unaligned_le32(chunk_size, &mem_descriptor->length); 8938 } 8939 8940 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 8941 put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); 8942 put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated); 8943 8944 return 0; 8945 8946 out_free_chunks: 8947 while (--i >= 0) { 8948 mem_descriptor = &host_memory->sg_descriptor[i]; 8949 dma_free_coherent(dev, chunk_size, 8950 host_memory_descriptor->host_chunk_virt_address[i], 8951 get_unaligned_le64(&mem_descriptor->address)); 8952 } 8953 kfree(host_memory_descriptor->host_chunk_virt_address); 8954 out: 8955 return -ENOMEM; 8956 } 8957 8958 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info, 8959 struct pqi_host_memory_descriptor *host_memory_descriptor, 8960 u32 total_required_size, u32 min_required_size) 8961 { 8962 u32 chunk_size; 8963 u32 min_chunk_size; 8964 8965 if (total_required_size == 0 || min_required_size == 0) 8966 return 0; 8967 8968 total_required_size = PAGE_ALIGN(total_required_size); 8969 min_required_size = PAGE_ALIGN(min_required_size); 8970 min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS); 8971 min_chunk_size = PAGE_ALIGN(min_chunk_size); 8972 8973 while (total_required_size >= min_required_size) { 8974 for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) { 8975 if (pqi_host_alloc_mem(ctrl_info, 8976 host_memory_descriptor, total_required_size, 8977 chunk_size) == 0) 8978 return 0; 8979 chunk_size /= 2; 8980 chunk_size = PAGE_ALIGN(chunk_size); 8981 } 8982 total_required_size /= 2; 8983 total_required_size = PAGE_ALIGN(total_required_size); 8984 } 8985 8986 return -ENOMEM; 8987 } 8988 8989 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, 8990 struct pqi_host_memory_descriptor *host_memory_descriptor, 8991 u32 total_size, u32 min_size) 8992 { 8993 struct device *dev; 8994 struct pqi_host_memory *host_memory; 8995 8996 dev = &ctrl_info->pci_dev->dev; 8997 8998 host_memory = dma_alloc_coherent(dev, sizeof(*host_memory), 8999 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); 9000 if (!host_memory) 9001 return; 9002 9003 host_memory_descriptor->host_memory = host_memory; 9004 9005 if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor, 9006 total_size, min_size) < 0) { 9007 dev_err(dev, "failed to allocate firmware usable host buffer\n"); 9008 dma_free_coherent(dev, sizeof(*host_memory), host_memory, 9009 host_memory_descriptor->host_memory_dma_handle); 9010 host_memory_descriptor->host_memory = NULL; 9011 return; 9012 } 9013 } 9014 9015 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, 9016 struct pqi_host_memory_descriptor *host_memory_descriptor) 9017 { 9018 unsigned int i; 9019 struct device *dev; 9020 struct pqi_host_memory *host_memory; 9021 struct pqi_sg_descriptor *mem_descriptor; 9022 unsigned int num_memory_descriptors; 9023 9024 host_memory = host_memory_descriptor->host_memory; 9025 if (!host_memory) 9026 return; 9027 9028 dev = &ctrl_info->pci_dev->dev; 9029 9030 if (get_unaligned_le32(&host_memory->bytes_allocated) == 0) 9031 goto out; 9032 9033 mem_descriptor = host_memory->sg_descriptor; 9034 num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors); 9035 9036 for (i = 0; i < num_memory_descriptors; i++) { 9037 dma_free_coherent(dev, 9038 get_unaligned_le32(&mem_descriptor[i].length), 9039 host_memory_descriptor->host_chunk_virt_address[i], 9040 get_unaligned_le64(&mem_descriptor[i].address)); 9041 } 9042 kfree(host_memory_descriptor->host_chunk_virt_address); 9043 9044 out: 9045 dma_free_coherent(dev, sizeof(*host_memory), host_memory, 9046 host_memory_descriptor->host_memory_dma_handle); 9047 host_memory_descriptor->host_memory = NULL; 9048 } 9049 9050 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, 9051 struct pqi_host_memory_descriptor *host_memory_descriptor, 9052 u16 function_code) 9053 { 9054 u32 buffer_length; 9055 struct pqi_vendor_general_request request; 9056 struct pqi_host_memory *host_memory; 9057 9058 memset(&request, 0, sizeof(request)); 9059 9060 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 9061 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 9062 put_unaligned_le16(function_code, &request.function_code); 9063 9064 host_memory = host_memory_descriptor->host_memory; 9065 9066 if (host_memory) { 9067 buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); 9068 put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address); 9069 put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length); 9070 9071 if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) { 9072 put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); 9073 memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); 9074 } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) { 9075 put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); 9076 memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); 9077 } 9078 } 9079 9080 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 9081 } 9082 9083 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 9084 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 9085 .status = SAM_STAT_CHECK_CONDITION, 9086 }; 9087 9088 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 9089 { 9090 unsigned int i; 9091 struct pqi_io_request *io_request; 9092 struct scsi_cmnd *scmd; 9093 struct scsi_device *sdev; 9094 9095 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9096 io_request = &ctrl_info->io_request_pool[i]; 9097 if (atomic_read(&io_request->refcount) == 0) 9098 continue; 9099 9100 scmd = io_request->scmd; 9101 if (scmd) { 9102 sdev = scmd->device; 9103 if (!sdev || !scsi_device_online(sdev)) { 9104 pqi_free_io_request(io_request); 9105 continue; 9106 } else { 9107 set_host_byte(scmd, DID_NO_CONNECT); 9108 } 9109 } else { 9110 io_request->status = -ENXIO; 9111 io_request->error_info = 9112 &pqi_ctrl_offline_raid_error_info; 9113 } 9114 9115 io_request->io_complete_callback(io_request, 9116 io_request->context); 9117 } 9118 } 9119 9120 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 9121 { 9122 pqi_perform_lockup_action(); 9123 pqi_stop_heartbeat_timer(ctrl_info); 9124 pqi_free_interrupts(ctrl_info); 9125 pqi_cancel_rescan_worker(ctrl_info); 9126 pqi_cancel_update_time_worker(ctrl_info); 9127 pqi_ctrl_wait_until_quiesced(ctrl_info); 9128 pqi_fail_all_outstanding_requests(ctrl_info); 9129 pqi_ctrl_unblock_requests(ctrl_info); 9130 } 9131 9132 static void pqi_ctrl_offline_worker(struct work_struct *work) 9133 { 9134 struct pqi_ctrl_info *ctrl_info; 9135 9136 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 9137 pqi_take_ctrl_offline_deferred(ctrl_info); 9138 } 9139 9140 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9141 { 9142 char *string; 9143 9144 switch (ctrl_shutdown_reason) { 9145 case PQI_IQ_NOT_DRAINED_TIMEOUT: 9146 string = "inbound queue not drained timeout"; 9147 break; 9148 case PQI_LUN_RESET_TIMEOUT: 9149 string = "LUN reset timeout"; 9150 break; 9151 case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT: 9152 string = "I/O pending timeout after LUN reset"; 9153 break; 9154 case PQI_NO_HEARTBEAT: 9155 string = "no controller heartbeat detected"; 9156 break; 9157 case PQI_FIRMWARE_KERNEL_NOT_UP: 9158 string = "firmware kernel not ready"; 9159 break; 9160 case PQI_OFA_RESPONSE_TIMEOUT: 9161 string = "OFA response timeout"; 9162 break; 9163 case PQI_INVALID_REQ_ID: 9164 string = "invalid request ID"; 9165 break; 9166 case PQI_UNMATCHED_REQ_ID: 9167 string = "unmatched request ID"; 9168 break; 9169 case PQI_IO_PI_OUT_OF_RANGE: 9170 string = "I/O queue producer index out of range"; 9171 break; 9172 case PQI_EVENT_PI_OUT_OF_RANGE: 9173 string = "event queue producer index out of range"; 9174 break; 9175 case PQI_UNEXPECTED_IU_TYPE: 9176 string = "unexpected IU type"; 9177 break; 9178 default: 9179 string = "unknown reason"; 9180 break; 9181 } 9182 9183 return string; 9184 } 9185 9186 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 9187 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9188 { 9189 if (!ctrl_info->controller_online) 9190 return; 9191 9192 ctrl_info->controller_online = false; 9193 ctrl_info->pqi_mode_enabled = false; 9194 pqi_ctrl_block_requests(ctrl_info); 9195 if (!pqi_disable_ctrl_shutdown) 9196 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); 9197 pci_disable_device(ctrl_info->pci_dev); 9198 dev_err(&ctrl_info->pci_dev->dev, 9199 "controller offline: reason code 0x%x (%s)\n", 9200 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason)); 9201 schedule_work(&ctrl_info->ctrl_offline_work); 9202 } 9203 9204 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 9205 const struct pci_device_id *id) 9206 { 9207 char *ctrl_description; 9208 9209 if (id->driver_data) 9210 ctrl_description = (char *)id->driver_data; 9211 else 9212 ctrl_description = "Microchip Smart Family Controller"; 9213 9214 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 9215 } 9216 9217 static int pqi_pci_probe(struct pci_dev *pci_dev, 9218 const struct pci_device_id *id) 9219 { 9220 int rc; 9221 int node; 9222 struct pqi_ctrl_info *ctrl_info; 9223 9224 pqi_print_ctrl_info(pci_dev, id); 9225 9226 if (pqi_disable_device_id_wildcards && 9227 id->subvendor == PCI_ANY_ID && 9228 id->subdevice == PCI_ANY_ID) { 9229 dev_warn(&pci_dev->dev, 9230 "controller not probed because device ID wildcards are disabled\n"); 9231 return -ENODEV; 9232 } 9233 9234 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 9235 dev_warn(&pci_dev->dev, 9236 "controller device ID matched using wildcards\n"); 9237 9238 node = dev_to_node(&pci_dev->dev); 9239 if (node == NUMA_NO_NODE) { 9240 node = cpu_to_node(0); 9241 if (node == NUMA_NO_NODE) 9242 node = 0; 9243 set_dev_node(&pci_dev->dev, node); 9244 } 9245 9246 ctrl_info = pqi_alloc_ctrl_info(node); 9247 if (!ctrl_info) { 9248 dev_err(&pci_dev->dev, 9249 "failed to allocate controller info block\n"); 9250 return -ENOMEM; 9251 } 9252 ctrl_info->numa_node = node; 9253 9254 ctrl_info->pci_dev = pci_dev; 9255 9256 rc = pqi_pci_init(ctrl_info); 9257 if (rc) 9258 goto error; 9259 9260 rc = pqi_ctrl_init(ctrl_info); 9261 if (rc) 9262 goto error; 9263 9264 return 0; 9265 9266 error: 9267 pqi_remove_ctrl(ctrl_info); 9268 9269 return rc; 9270 } 9271 9272 static void pqi_pci_remove(struct pci_dev *pci_dev) 9273 { 9274 struct pqi_ctrl_info *ctrl_info; 9275 u16 vendor_id; 9276 int rc; 9277 9278 ctrl_info = pci_get_drvdata(pci_dev); 9279 if (!ctrl_info) 9280 return; 9281 9282 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); 9283 if (vendor_id == 0xffff) 9284 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; 9285 else 9286 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; 9287 9288 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { 9289 rc = pqi_flush_cache(ctrl_info, RESTART); 9290 if (rc) 9291 dev_err(&pci_dev->dev, 9292 "unable to flush controller cache during remove\n"); 9293 } 9294 9295 pqi_remove_ctrl(ctrl_info); 9296 } 9297 9298 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) 9299 { 9300 unsigned int i; 9301 struct pqi_io_request *io_request; 9302 struct scsi_cmnd *scmd; 9303 9304 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9305 io_request = &ctrl_info->io_request_pool[i]; 9306 if (atomic_read(&io_request->refcount) == 0) 9307 continue; 9308 scmd = io_request->scmd; 9309 WARN_ON(scmd != NULL); /* IO command from SML */ 9310 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ 9311 } 9312 } 9313 9314 static void pqi_shutdown(struct pci_dev *pci_dev) 9315 { 9316 int rc; 9317 struct pqi_ctrl_info *ctrl_info; 9318 enum bmic_flush_cache_shutdown_event shutdown_event; 9319 9320 ctrl_info = pci_get_drvdata(pci_dev); 9321 if (!ctrl_info) { 9322 dev_err(&pci_dev->dev, 9323 "cache could not be flushed\n"); 9324 return; 9325 } 9326 9327 pqi_wait_until_ofa_finished(ctrl_info); 9328 9329 pqi_scsi_block_requests(ctrl_info); 9330 pqi_ctrl_block_device_reset(ctrl_info); 9331 pqi_ctrl_block_requests(ctrl_info); 9332 pqi_ctrl_wait_until_quiesced(ctrl_info); 9333 9334 if (system_state == SYSTEM_RESTART) 9335 shutdown_event = RESTART; 9336 else 9337 shutdown_event = SHUTDOWN; 9338 9339 /* 9340 * Write all data in the controller's battery-backed cache to 9341 * storage. 9342 */ 9343 rc = pqi_flush_cache(ctrl_info, shutdown_event); 9344 if (rc) 9345 dev_err(&pci_dev->dev, 9346 "unable to flush controller cache during shutdown\n"); 9347 9348 pqi_crash_if_pending_command(ctrl_info); 9349 pqi_reset(ctrl_info); 9350 } 9351 9352 static void pqi_process_lockup_action_param(void) 9353 { 9354 unsigned int i; 9355 9356 if (!pqi_lockup_action_param) 9357 return; 9358 9359 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 9360 if (strcmp(pqi_lockup_action_param, 9361 pqi_lockup_actions[i].name) == 0) { 9362 pqi_lockup_action = pqi_lockup_actions[i].action; 9363 return; 9364 } 9365 } 9366 9367 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 9368 DRIVER_NAME_SHORT, pqi_lockup_action_param); 9369 } 9370 9371 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30 9372 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60) 9373 9374 static void pqi_process_ctrl_ready_timeout_param(void) 9375 { 9376 if (pqi_ctrl_ready_timeout_secs == 0) 9377 return; 9378 9379 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) { 9380 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n", 9381 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS); 9382 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS; 9383 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) { 9384 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n", 9385 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS); 9386 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS; 9387 } 9388 9389 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs; 9390 } 9391 9392 static void pqi_process_module_params(void) 9393 { 9394 pqi_process_lockup_action_param(); 9395 pqi_process_ctrl_ready_timeout_param(); 9396 } 9397 9398 #if defined(CONFIG_PM) 9399 9400 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev) 9401 { 9402 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) 9403 return RESTART; 9404 9405 return SUSPEND; 9406 } 9407 9408 static int pqi_suspend_or_freeze(struct device *dev, bool suspend) 9409 { 9410 struct pci_dev *pci_dev; 9411 struct pqi_ctrl_info *ctrl_info; 9412 9413 pci_dev = to_pci_dev(dev); 9414 ctrl_info = pci_get_drvdata(pci_dev); 9415 9416 pqi_wait_until_ofa_finished(ctrl_info); 9417 9418 pqi_ctrl_block_scan(ctrl_info); 9419 pqi_scsi_block_requests(ctrl_info); 9420 pqi_ctrl_block_device_reset(ctrl_info); 9421 pqi_ctrl_block_requests(ctrl_info); 9422 pqi_ctrl_wait_until_quiesced(ctrl_info); 9423 9424 if (suspend) { 9425 enum bmic_flush_cache_shutdown_event shutdown_event; 9426 9427 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9428 pqi_flush_cache(ctrl_info, shutdown_event); 9429 } 9430 9431 pqi_stop_heartbeat_timer(ctrl_info); 9432 pqi_crash_if_pending_command(ctrl_info); 9433 pqi_free_irqs(ctrl_info); 9434 9435 ctrl_info->controller_online = false; 9436 ctrl_info->pqi_mode_enabled = false; 9437 9438 return 0; 9439 } 9440 9441 static __maybe_unused int pqi_suspend(struct device *dev) 9442 { 9443 return pqi_suspend_or_freeze(dev, true); 9444 } 9445 9446 static int pqi_resume_or_restore(struct device *dev) 9447 { 9448 int rc; 9449 struct pci_dev *pci_dev; 9450 struct pqi_ctrl_info *ctrl_info; 9451 9452 pci_dev = to_pci_dev(dev); 9453 ctrl_info = pci_get_drvdata(pci_dev); 9454 9455 rc = pqi_request_irqs(ctrl_info); 9456 if (rc) 9457 return rc; 9458 9459 pqi_ctrl_unblock_device_reset(ctrl_info); 9460 pqi_ctrl_unblock_requests(ctrl_info); 9461 pqi_scsi_unblock_requests(ctrl_info); 9462 pqi_ctrl_unblock_scan(ctrl_info); 9463 9464 ssleep(PQI_POST_RESET_DELAY_SECS); 9465 9466 return pqi_ctrl_init_resume(ctrl_info); 9467 } 9468 9469 static int pqi_freeze(struct device *dev) 9470 { 9471 return pqi_suspend_or_freeze(dev, false); 9472 } 9473 9474 static int pqi_thaw(struct device *dev) 9475 { 9476 int rc; 9477 struct pci_dev *pci_dev; 9478 struct pqi_ctrl_info *ctrl_info; 9479 9480 pci_dev = to_pci_dev(dev); 9481 ctrl_info = pci_get_drvdata(pci_dev); 9482 9483 rc = pqi_request_irqs(ctrl_info); 9484 if (rc) 9485 return rc; 9486 9487 ctrl_info->controller_online = true; 9488 ctrl_info->pqi_mode_enabled = true; 9489 9490 pqi_ctrl_unblock_device_reset(ctrl_info); 9491 pqi_ctrl_unblock_requests(ctrl_info); 9492 pqi_scsi_unblock_requests(ctrl_info); 9493 pqi_ctrl_unblock_scan(ctrl_info); 9494 9495 return 0; 9496 } 9497 9498 static int pqi_poweroff(struct device *dev) 9499 { 9500 struct pci_dev *pci_dev; 9501 struct pqi_ctrl_info *ctrl_info; 9502 enum bmic_flush_cache_shutdown_event shutdown_event; 9503 9504 pci_dev = to_pci_dev(dev); 9505 ctrl_info = pci_get_drvdata(pci_dev); 9506 9507 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9508 pqi_flush_cache(ctrl_info, shutdown_event); 9509 9510 return 0; 9511 } 9512 9513 static const struct dev_pm_ops pqi_pm_ops = { 9514 .suspend = pqi_suspend, 9515 .resume = pqi_resume_or_restore, 9516 .freeze = pqi_freeze, 9517 .thaw = pqi_thaw, 9518 .poweroff = pqi_poweroff, 9519 .restore = pqi_resume_or_restore, 9520 }; 9521 9522 #endif /* CONFIG_PM */ 9523 9524 /* Define the PCI IDs for the controllers that we support. */ 9525 static const struct pci_device_id pqi_pci_id_table[] = { 9526 { 9527 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9528 0x105b, 0x1211) 9529 }, 9530 { 9531 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9532 0x105b, 0x1321) 9533 }, 9534 { 9535 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9536 0x152d, 0x8a22) 9537 }, 9538 { 9539 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9540 0x152d, 0x8a23) 9541 }, 9542 { 9543 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9544 0x152d, 0x8a24) 9545 }, 9546 { 9547 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9548 0x152d, 0x8a36) 9549 }, 9550 { 9551 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9552 0x152d, 0x8a37) 9553 }, 9554 { 9555 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9556 0x193d, 0x0462) 9557 }, 9558 { 9559 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9560 0x193d, 0x1104) 9561 }, 9562 { 9563 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9564 0x193d, 0x1105) 9565 }, 9566 { 9567 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9568 0x193d, 0x1106) 9569 }, 9570 { 9571 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9572 0x193d, 0x1107) 9573 }, 9574 { 9575 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9576 0x193d, 0x1108) 9577 }, 9578 { 9579 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9580 0x193d, 0x1109) 9581 }, 9582 { 9583 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9584 0x193d, 0x110b) 9585 }, 9586 { 9587 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9588 0x193d, 0x1110) 9589 }, 9590 { 9591 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9592 0x193d, 0x8460) 9593 }, 9594 { 9595 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9596 0x193d, 0x8461) 9597 }, 9598 { 9599 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9600 0x193d, 0x8462) 9601 }, 9602 { 9603 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9604 0x193d, 0xc460) 9605 }, 9606 { 9607 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9608 0x193d, 0xc461) 9609 }, 9610 { 9611 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9612 0x193d, 0xf460) 9613 }, 9614 { 9615 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9616 0x193d, 0xf461) 9617 }, 9618 { 9619 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9620 0x1bd4, 0x0045) 9621 }, 9622 { 9623 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9624 0x1bd4, 0x0046) 9625 }, 9626 { 9627 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9628 0x1bd4, 0x0047) 9629 }, 9630 { 9631 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9632 0x1bd4, 0x0048) 9633 }, 9634 { 9635 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9636 0x1bd4, 0x004a) 9637 }, 9638 { 9639 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9640 0x1bd4, 0x004b) 9641 }, 9642 { 9643 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9644 0x1bd4, 0x004c) 9645 }, 9646 { 9647 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9648 0x1bd4, 0x004f) 9649 }, 9650 { 9651 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9652 0x1bd4, 0x0051) 9653 }, 9654 { 9655 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9656 0x1bd4, 0x0052) 9657 }, 9658 { 9659 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9660 0x1bd4, 0x0053) 9661 }, 9662 { 9663 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9664 0x1bd4, 0x0054) 9665 }, 9666 { 9667 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9668 0x1bd4, 0x006b) 9669 }, 9670 { 9671 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9672 0x1bd4, 0x006c) 9673 }, 9674 { 9675 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9676 0x1bd4, 0x006d) 9677 }, 9678 { 9679 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9680 0x1bd4, 0x006f) 9681 }, 9682 { 9683 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9684 0x1bd4, 0x0070) 9685 }, 9686 { 9687 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9688 0x1bd4, 0x0071) 9689 }, 9690 { 9691 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9692 0x1bd4, 0x0072) 9693 }, 9694 { 9695 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9696 0x1bd4, 0x0086) 9697 }, 9698 { 9699 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9700 0x1bd4, 0x0087) 9701 }, 9702 { 9703 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9704 0x1bd4, 0x0088) 9705 }, 9706 { 9707 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9708 0x1bd4, 0x0089) 9709 }, 9710 { 9711 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9712 0x1ff9, 0x00a1) 9713 }, 9714 { 9715 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9716 0x1f3a, 0x0104) 9717 }, 9718 { 9719 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9720 0x19e5, 0xd227) 9721 }, 9722 { 9723 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9724 0x19e5, 0xd228) 9725 }, 9726 { 9727 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9728 0x19e5, 0xd229) 9729 }, 9730 { 9731 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9732 0x19e5, 0xd22a) 9733 }, 9734 { 9735 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9736 0x19e5, 0xd22b) 9737 }, 9738 { 9739 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9740 0x19e5, 0xd22c) 9741 }, 9742 { 9743 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9744 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 9745 }, 9746 { 9747 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9748 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 9749 }, 9750 { 9751 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9752 PCI_VENDOR_ID_ADAPTEC2, 0x0659) 9753 }, 9754 { 9755 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9756 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 9757 }, 9758 { 9759 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9760 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 9761 }, 9762 { 9763 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9764 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 9765 }, 9766 { 9767 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9768 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 9769 }, 9770 { 9771 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9772 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 9773 }, 9774 { 9775 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9776 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 9777 }, 9778 { 9779 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9780 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 9781 }, 9782 { 9783 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9784 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 9785 }, 9786 { 9787 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9788 PCI_VENDOR_ID_ADAPTEC2, 0x0808) 9789 }, 9790 { 9791 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9792 PCI_VENDOR_ID_ADAPTEC2, 0x0809) 9793 }, 9794 { 9795 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9796 PCI_VENDOR_ID_ADAPTEC2, 0x080a) 9797 }, 9798 { 9799 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9800 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 9801 }, 9802 { 9803 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9804 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 9805 }, 9806 { 9807 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9808 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 9809 }, 9810 { 9811 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9812 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 9813 }, 9814 { 9815 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9816 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 9817 }, 9818 { 9819 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9820 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 9821 }, 9822 { 9823 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9824 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 9825 }, 9826 { 9827 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9828 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 9829 }, 9830 { 9831 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9832 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 9833 }, 9834 { 9835 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9836 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 9837 }, 9838 { 9839 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9840 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 9841 }, 9842 { 9843 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9844 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 9845 }, 9846 { 9847 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9848 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 9849 }, 9850 { 9851 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9852 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 9853 }, 9854 { 9855 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9856 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 9857 }, 9858 { 9859 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9860 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 9861 }, 9862 { 9863 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9864 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 9865 }, 9866 { 9867 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9868 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 9869 }, 9870 { 9871 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9872 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 9873 }, 9874 { 9875 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9876 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 9877 }, 9878 { 9879 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9880 PCI_VENDOR_ID_ADAPTEC2, 0x1304) 9881 }, 9882 { 9883 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9884 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 9885 }, 9886 { 9887 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9888 PCI_VENDOR_ID_ADAPTEC2, 0x1400) 9889 }, 9890 { 9891 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9892 PCI_VENDOR_ID_ADAPTEC2, 0x1402) 9893 }, 9894 { 9895 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9896 PCI_VENDOR_ID_ADAPTEC2, 0x1410) 9897 }, 9898 { 9899 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9900 PCI_VENDOR_ID_ADAPTEC2, 0x1411) 9901 }, 9902 { 9903 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9904 PCI_VENDOR_ID_ADAPTEC2, 0x1412) 9905 }, 9906 { 9907 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9908 PCI_VENDOR_ID_ADAPTEC2, 0x1420) 9909 }, 9910 { 9911 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9912 PCI_VENDOR_ID_ADAPTEC2, 0x1430) 9913 }, 9914 { 9915 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9916 PCI_VENDOR_ID_ADAPTEC2, 0x1440) 9917 }, 9918 { 9919 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9920 PCI_VENDOR_ID_ADAPTEC2, 0x1441) 9921 }, 9922 { 9923 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9924 PCI_VENDOR_ID_ADAPTEC2, 0x1450) 9925 }, 9926 { 9927 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9928 PCI_VENDOR_ID_ADAPTEC2, 0x1452) 9929 }, 9930 { 9931 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9932 PCI_VENDOR_ID_ADAPTEC2, 0x1460) 9933 }, 9934 { 9935 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9936 PCI_VENDOR_ID_ADAPTEC2, 0x1461) 9937 }, 9938 { 9939 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9940 PCI_VENDOR_ID_ADAPTEC2, 0x1462) 9941 }, 9942 { 9943 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9944 PCI_VENDOR_ID_ADAPTEC2, 0x1463) 9945 }, 9946 { 9947 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9948 PCI_VENDOR_ID_ADAPTEC2, 0x1470) 9949 }, 9950 { 9951 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9952 PCI_VENDOR_ID_ADAPTEC2, 0x1471) 9953 }, 9954 { 9955 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9956 PCI_VENDOR_ID_ADAPTEC2, 0x1472) 9957 }, 9958 { 9959 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9960 PCI_VENDOR_ID_ADAPTEC2, 0x1473) 9961 }, 9962 { 9963 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9964 PCI_VENDOR_ID_ADAPTEC2, 0x1474) 9965 }, 9966 { 9967 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9968 PCI_VENDOR_ID_ADAPTEC2, 0x1475) 9969 }, 9970 { 9971 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9972 PCI_VENDOR_ID_ADAPTEC2, 0x1480) 9973 }, 9974 { 9975 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9976 PCI_VENDOR_ID_ADAPTEC2, 0x1490) 9977 }, 9978 { 9979 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9980 PCI_VENDOR_ID_ADAPTEC2, 0x1491) 9981 }, 9982 { 9983 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9984 PCI_VENDOR_ID_ADAPTEC2, 0x14a0) 9985 }, 9986 { 9987 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9988 PCI_VENDOR_ID_ADAPTEC2, 0x14a1) 9989 }, 9990 { 9991 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9992 PCI_VENDOR_ID_ADAPTEC2, 0x14a2) 9993 }, 9994 { 9995 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9996 PCI_VENDOR_ID_ADAPTEC2, 0x14a4) 9997 }, 9998 { 9999 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10000 PCI_VENDOR_ID_ADAPTEC2, 0x14a5) 10001 }, 10002 { 10003 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10004 PCI_VENDOR_ID_ADAPTEC2, 0x14a6) 10005 }, 10006 { 10007 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10008 PCI_VENDOR_ID_ADAPTEC2, 0x14b0) 10009 }, 10010 { 10011 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10012 PCI_VENDOR_ID_ADAPTEC2, 0x14b1) 10013 }, 10014 { 10015 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10016 PCI_VENDOR_ID_ADAPTEC2, 0x14c0) 10017 }, 10018 { 10019 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10020 PCI_VENDOR_ID_ADAPTEC2, 0x14c1) 10021 }, 10022 { 10023 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10024 PCI_VENDOR_ID_ADAPTEC2, 0x14c2) 10025 }, 10026 { 10027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10028 PCI_VENDOR_ID_ADAPTEC2, 0x14c3) 10029 }, 10030 { 10031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10032 PCI_VENDOR_ID_ADAPTEC2, 0x14c4) 10033 }, 10034 { 10035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10036 PCI_VENDOR_ID_ADAPTEC2, 0x14d0) 10037 }, 10038 { 10039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10040 PCI_VENDOR_ID_ADAPTEC2, 0x14e0) 10041 }, 10042 { 10043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10044 PCI_VENDOR_ID_ADAPTEC2, 0x14f0) 10045 }, 10046 { 10047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10048 PCI_VENDOR_ID_ADVANTECH, 0x8312) 10049 }, 10050 { 10051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10052 PCI_VENDOR_ID_DELL, 0x1fe0) 10053 }, 10054 { 10055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10056 PCI_VENDOR_ID_HP, 0x0600) 10057 }, 10058 { 10059 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10060 PCI_VENDOR_ID_HP, 0x0601) 10061 }, 10062 { 10063 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10064 PCI_VENDOR_ID_HP, 0x0602) 10065 }, 10066 { 10067 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10068 PCI_VENDOR_ID_HP, 0x0603) 10069 }, 10070 { 10071 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10072 PCI_VENDOR_ID_HP, 0x0609) 10073 }, 10074 { 10075 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10076 PCI_VENDOR_ID_HP, 0x0650) 10077 }, 10078 { 10079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10080 PCI_VENDOR_ID_HP, 0x0651) 10081 }, 10082 { 10083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10084 PCI_VENDOR_ID_HP, 0x0652) 10085 }, 10086 { 10087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10088 PCI_VENDOR_ID_HP, 0x0653) 10089 }, 10090 { 10091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10092 PCI_VENDOR_ID_HP, 0x0654) 10093 }, 10094 { 10095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10096 PCI_VENDOR_ID_HP, 0x0655) 10097 }, 10098 { 10099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10100 PCI_VENDOR_ID_HP, 0x0700) 10101 }, 10102 { 10103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10104 PCI_VENDOR_ID_HP, 0x0701) 10105 }, 10106 { 10107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10108 PCI_VENDOR_ID_HP, 0x1001) 10109 }, 10110 { 10111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10112 PCI_VENDOR_ID_HP, 0x1002) 10113 }, 10114 { 10115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10116 PCI_VENDOR_ID_HP, 0x1100) 10117 }, 10118 { 10119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10120 PCI_VENDOR_ID_HP, 0x1101) 10121 }, 10122 { 10123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10124 0x1590, 0x0294) 10125 }, 10126 { 10127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10128 0x1590, 0x02db) 10129 }, 10130 { 10131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10132 0x1590, 0x02dc) 10133 }, 10134 { 10135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10136 0x1590, 0x032e) 10137 }, 10138 { 10139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10140 0x1590, 0x036f) 10141 }, 10142 { 10143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10144 0x1590, 0x0381) 10145 }, 10146 { 10147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10148 0x1590, 0x0382) 10149 }, 10150 { 10151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10152 0x1590, 0x0383) 10153 }, 10154 { 10155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10156 0x1d8d, 0x0800) 10157 }, 10158 { 10159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10160 0x1d8d, 0x0908) 10161 }, 10162 { 10163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10164 0x1d8d, 0x0806) 10165 }, 10166 { 10167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10168 0x1d8d, 0x0916) 10169 }, 10170 { 10171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10172 PCI_VENDOR_ID_GIGABYTE, 0x1000) 10173 }, 10174 { 10175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10176 0x1dfc, 0x3161) 10177 }, 10178 { 10179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10180 0x1f0c, 0x3161) 10181 }, 10182 { 10183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10184 0x1cf2, 0x0804) 10185 }, 10186 { 10187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10188 0x1cf2, 0x0805) 10189 }, 10190 { 10191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10192 0x1cf2, 0x0806) 10193 }, 10194 { 10195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10196 0x1cf2, 0x5445) 10197 }, 10198 { 10199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10200 0x1cf2, 0x5446) 10201 }, 10202 { 10203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10204 0x1cf2, 0x5447) 10205 }, 10206 { 10207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10208 0x1cf2, 0x5449) 10209 }, 10210 { 10211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10212 0x1cf2, 0x544a) 10213 }, 10214 { 10215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10216 0x1cf2, 0x544b) 10217 }, 10218 { 10219 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10220 0x1cf2, 0x544d) 10221 }, 10222 { 10223 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10224 0x1cf2, 0x544e) 10225 }, 10226 { 10227 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10228 0x1cf2, 0x544f) 10229 }, 10230 { 10231 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10232 0x1cf2, 0x54da) 10233 }, 10234 { 10235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10236 0x1cf2, 0x54db) 10237 }, 10238 { 10239 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10240 0x1cf2, 0x54dc) 10241 }, 10242 { 10243 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10244 0x1cf2, 0x0b27) 10245 }, 10246 { 10247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10248 0x1cf2, 0x0b29) 10249 }, 10250 { 10251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10252 0x1cf2, 0x0b45) 10253 }, 10254 { 10255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10256 0x1cc4, 0x0101) 10257 }, 10258 { 10259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10260 0x1cc4, 0x0201) 10261 }, 10262 { 10263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10264 PCI_VENDOR_ID_LENOVO, 0x0220) 10265 }, 10266 { 10267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10268 PCI_VENDOR_ID_LENOVO, 0x0221) 10269 }, 10270 { 10271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10272 PCI_VENDOR_ID_LENOVO, 0x0520) 10273 }, 10274 { 10275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10276 PCI_VENDOR_ID_LENOVO, 0x0522) 10277 }, 10278 { 10279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10280 PCI_VENDOR_ID_LENOVO, 0x0620) 10281 }, 10282 { 10283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10284 PCI_VENDOR_ID_LENOVO, 0x0621) 10285 }, 10286 { 10287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10288 PCI_VENDOR_ID_LENOVO, 0x0622) 10289 }, 10290 { 10291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10292 PCI_VENDOR_ID_LENOVO, 0x0623) 10293 }, 10294 { 10295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10296 0x1014, 0x0718) 10297 }, 10298 { 10299 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10300 0x1137, 0x02f8) 10301 }, 10302 { 10303 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10304 0x1137, 0x02f9) 10305 }, 10306 { 10307 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10308 0x1137, 0x02fa) 10309 }, 10310 { 10311 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10312 0x1137, 0x02fe) 10313 }, 10314 { 10315 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10316 0x1137, 0x02ff) 10317 }, 10318 { 10319 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10320 0x1137, 0x0300) 10321 }, 10322 { 10323 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10324 0x1ff9, 0x0045) 10325 }, 10326 { 10327 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10328 0x1ff9, 0x0046) 10329 }, 10330 { 10331 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10332 0x1ff9, 0x0047) 10333 }, 10334 { 10335 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10336 0x1ff9, 0x0048) 10337 }, 10338 { 10339 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10340 0x1ff9, 0x004a) 10341 }, 10342 { 10343 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10344 0x1ff9, 0x004b) 10345 }, 10346 { 10347 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10348 0x1ff9, 0x004c) 10349 }, 10350 { 10351 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10352 0x1ff9, 0x004f) 10353 }, 10354 { 10355 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10356 0x1ff9, 0x0051) 10357 }, 10358 { 10359 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10360 0x1ff9, 0x0052) 10361 }, 10362 { 10363 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10364 0x1ff9, 0x0053) 10365 }, 10366 { 10367 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10368 0x1ff9, 0x0054) 10369 }, 10370 { 10371 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10372 0x1ff9, 0x006b) 10373 }, 10374 { 10375 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10376 0x1ff9, 0x006c) 10377 }, 10378 { 10379 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10380 0x1ff9, 0x006d) 10381 }, 10382 { 10383 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10384 0x1ff9, 0x006f) 10385 }, 10386 { 10387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10388 0x1ff9, 0x0070) 10389 }, 10390 { 10391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10392 0x1ff9, 0x0071) 10393 }, 10394 { 10395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10396 0x1ff9, 0x0072) 10397 }, 10398 { 10399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10400 0x1ff9, 0x0086) 10401 }, 10402 { 10403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10404 0x1ff9, 0x0087) 10405 }, 10406 { 10407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10408 0x1ff9, 0x0088) 10409 }, 10410 { 10411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10412 0x1ff9, 0x0089) 10413 }, 10414 { 10415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10416 0x1e93, 0x1000) 10417 }, 10418 { 10419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10420 0x1e93, 0x1001) 10421 }, 10422 { 10423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10424 0x1e93, 0x1002) 10425 }, 10426 { 10427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10428 0x1e93, 0x1005) 10429 }, 10430 { 10431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10432 0x1f51, 0x1001) 10433 }, 10434 { 10435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10436 0x1f51, 0x1002) 10437 }, 10438 { 10439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10440 0x1f51, 0x1003) 10441 }, 10442 { 10443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10444 0x1f51, 0x1004) 10445 }, 10446 { 10447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10448 0x1f51, 0x1005) 10449 }, 10450 { 10451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10452 0x1f51, 0x1006) 10453 }, 10454 { 10455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10456 0x1f51, 0x1007) 10457 }, 10458 { 10459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10460 0x1f51, 0x1008) 10461 }, 10462 { 10463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10464 0x1f51, 0x1009) 10465 }, 10466 { 10467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10468 0x1f51, 0x100a) 10469 }, 10470 { 10471 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10472 0x1f51, 0x100e) 10473 }, 10474 { 10475 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10476 0x1f51, 0x100f) 10477 }, 10478 { 10479 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10480 0x1f51, 0x1010) 10481 }, 10482 { 10483 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10484 0x1f51, 0x1011) 10485 }, 10486 { 10487 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10488 0x1f51, 0x1043) 10489 }, 10490 { 10491 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10492 0x1f51, 0x1044) 10493 }, 10494 { 10495 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10496 0x1f51, 0x1045) 10497 }, 10498 { 10499 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10500 0x1ff9, 0x00a3) 10501 }, 10502 { 10503 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10504 PCI_ANY_ID, PCI_ANY_ID) 10505 }, 10506 { 0 } 10507 }; 10508 10509 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 10510 10511 static struct pci_driver pqi_pci_driver = { 10512 .name = DRIVER_NAME_SHORT, 10513 .id_table = pqi_pci_id_table, 10514 .probe = pqi_pci_probe, 10515 .remove = pqi_pci_remove, 10516 .shutdown = pqi_shutdown, 10517 #if defined(CONFIG_PM) 10518 .driver = { 10519 .pm = &pqi_pm_ops 10520 }, 10521 #endif 10522 }; 10523 10524 static int __init pqi_init(void) 10525 { 10526 int rc; 10527 10528 pr_info(DRIVER_NAME "\n"); 10529 pqi_verify_structures(); 10530 sis_verify_structures(); 10531 10532 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); 10533 if (!pqi_sas_transport_template) 10534 return -ENODEV; 10535 10536 pqi_process_module_params(); 10537 10538 rc = pci_register_driver(&pqi_pci_driver); 10539 if (rc) 10540 sas_release_transport(pqi_sas_transport_template); 10541 10542 return rc; 10543 } 10544 10545 static void __exit pqi_cleanup(void) 10546 { 10547 pci_unregister_driver(&pqi_pci_driver); 10548 sas_release_transport(pqi_sas_transport_template); 10549 } 10550 10551 module_init(pqi_init); 10552 module_exit(pqi_cleanup); 10553 10554 static void pqi_verify_structures(void) 10555 { 10556 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10557 sis_host_to_ctrl_doorbell) != 0x20); 10558 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10559 sis_interrupt_mask) != 0x34); 10560 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10561 sis_ctrl_to_host_doorbell) != 0x9c); 10562 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10563 sis_ctrl_to_host_doorbell_clear) != 0xa0); 10564 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10565 sis_driver_scratch) != 0xb0); 10566 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10567 sis_product_identifier) != 0xb4); 10568 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10569 sis_firmware_status) != 0xbc); 10570 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10571 sis_ctrl_shutdown_reason_code) != 0xcc); 10572 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10573 sis_mailbox) != 0x1000); 10574 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10575 pqi_registers) != 0x4000); 10576 10577 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10578 iu_type) != 0x0); 10579 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10580 iu_length) != 0x2); 10581 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10582 response_queue_id) != 0x4); 10583 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10584 driver_flags) != 0x6); 10585 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 10586 10587 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10588 status) != 0x0); 10589 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10590 service_response) != 0x1); 10591 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10592 data_present) != 0x2); 10593 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10594 reserved) != 0x3); 10595 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10596 residual_count) != 0x4); 10597 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10598 data_length) != 0x8); 10599 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10600 reserved1) != 0xa); 10601 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10602 data) != 0xc); 10603 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 10604 10605 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10606 data_in_result) != 0x0); 10607 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10608 data_out_result) != 0x1); 10609 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10610 reserved) != 0x2); 10611 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10612 status) != 0x5); 10613 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10614 status_qualifier) != 0x6); 10615 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10616 sense_data_length) != 0x8); 10617 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10618 response_data_length) != 0xa); 10619 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10620 data_in_transferred) != 0xc); 10621 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10622 data_out_transferred) != 0x10); 10623 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10624 data) != 0x14); 10625 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 10626 10627 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10628 signature) != 0x0); 10629 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10630 function_and_status_code) != 0x8); 10631 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10632 max_admin_iq_elements) != 0x10); 10633 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10634 max_admin_oq_elements) != 0x11); 10635 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10636 admin_iq_element_length) != 0x12); 10637 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10638 admin_oq_element_length) != 0x13); 10639 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10640 max_reset_timeout) != 0x14); 10641 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10642 legacy_intx_status) != 0x18); 10643 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10644 legacy_intx_mask_set) != 0x1c); 10645 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10646 legacy_intx_mask_clear) != 0x20); 10647 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10648 device_status) != 0x40); 10649 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10650 admin_iq_pi_offset) != 0x48); 10651 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10652 admin_oq_ci_offset) != 0x50); 10653 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10654 admin_iq_element_array_addr) != 0x58); 10655 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10656 admin_oq_element_array_addr) != 0x60); 10657 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10658 admin_iq_ci_addr) != 0x68); 10659 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10660 admin_oq_pi_addr) != 0x70); 10661 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10662 admin_iq_num_elements) != 0x78); 10663 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10664 admin_oq_num_elements) != 0x79); 10665 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10666 admin_queue_int_msg_num) != 0x7a); 10667 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10668 device_error) != 0x80); 10669 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10670 error_details) != 0x88); 10671 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10672 device_reset) != 0x90); 10673 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10674 power_action) != 0x94); 10675 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 10676 10677 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10678 header.iu_type) != 0); 10679 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10680 header.iu_length) != 2); 10681 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10682 header.driver_flags) != 6); 10683 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10684 request_id) != 8); 10685 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10686 function_code) != 10); 10687 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10688 data.report_device_capability.buffer_length) != 44); 10689 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10690 data.report_device_capability.sg_descriptor) != 48); 10691 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10692 data.create_operational_iq.queue_id) != 12); 10693 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10694 data.create_operational_iq.element_array_addr) != 16); 10695 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10696 data.create_operational_iq.ci_addr) != 24); 10697 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10698 data.create_operational_iq.num_elements) != 32); 10699 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10700 data.create_operational_iq.element_length) != 34); 10701 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10702 data.create_operational_iq.queue_protocol) != 36); 10703 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10704 data.create_operational_oq.queue_id) != 12); 10705 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10706 data.create_operational_oq.element_array_addr) != 16); 10707 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10708 data.create_operational_oq.pi_addr) != 24); 10709 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10710 data.create_operational_oq.num_elements) != 32); 10711 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10712 data.create_operational_oq.element_length) != 34); 10713 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10714 data.create_operational_oq.queue_protocol) != 36); 10715 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10716 data.create_operational_oq.int_msg_num) != 40); 10717 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10718 data.create_operational_oq.coalescing_count) != 42); 10719 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10720 data.create_operational_oq.min_coalescing_time) != 44); 10721 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10722 data.create_operational_oq.max_coalescing_time) != 48); 10723 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10724 data.delete_operational_queue.queue_id) != 12); 10725 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 10726 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10727 data.create_operational_iq) != 64 - 11); 10728 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10729 data.create_operational_oq) != 64 - 11); 10730 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10731 data.delete_operational_queue) != 64 - 11); 10732 10733 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10734 header.iu_type) != 0); 10735 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10736 header.iu_length) != 2); 10737 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10738 header.driver_flags) != 6); 10739 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10740 request_id) != 8); 10741 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10742 function_code) != 10); 10743 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10744 status) != 11); 10745 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10746 data.create_operational_iq.status_descriptor) != 12); 10747 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10748 data.create_operational_iq.iq_pi_offset) != 16); 10749 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10750 data.create_operational_oq.status_descriptor) != 12); 10751 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10752 data.create_operational_oq.oq_ci_offset) != 16); 10753 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 10754 10755 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10756 header.iu_type) != 0); 10757 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10758 header.iu_length) != 2); 10759 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10760 header.response_queue_id) != 4); 10761 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10762 header.driver_flags) != 6); 10763 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10764 request_id) != 8); 10765 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10766 nexus_id) != 10); 10767 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10768 buffer_length) != 12); 10769 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10770 lun_number) != 16); 10771 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10772 protocol_specific) != 24); 10773 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10774 error_index) != 27); 10775 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10776 cdb) != 32); 10777 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10778 timeout) != 60); 10779 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10780 sg_descriptors) != 64); 10781 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 10782 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10783 10784 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10785 header.iu_type) != 0); 10786 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10787 header.iu_length) != 2); 10788 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10789 header.response_queue_id) != 4); 10790 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10791 header.driver_flags) != 6); 10792 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10793 request_id) != 8); 10794 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10795 nexus_id) != 12); 10796 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10797 buffer_length) != 16); 10798 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10799 data_encryption_key_index) != 22); 10800 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10801 encrypt_tweak_lower) != 24); 10802 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10803 encrypt_tweak_upper) != 28); 10804 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10805 cdb) != 32); 10806 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10807 error_index) != 48); 10808 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10809 num_sg_descriptors) != 50); 10810 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10811 cdb_length) != 51); 10812 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10813 lun_number) != 52); 10814 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10815 sg_descriptors) != 64); 10816 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 10817 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10818 10819 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10820 header.iu_type) != 0); 10821 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10822 header.iu_length) != 2); 10823 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10824 request_id) != 8); 10825 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10826 error_index) != 10); 10827 10828 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10829 header.iu_type) != 0); 10830 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10831 header.iu_length) != 2); 10832 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10833 header.response_queue_id) != 4); 10834 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10835 request_id) != 8); 10836 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10837 data.report_event_configuration.buffer_length) != 12); 10838 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10839 data.report_event_configuration.sg_descriptors) != 16); 10840 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10841 data.set_event_configuration.global_event_oq_id) != 10); 10842 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10843 data.set_event_configuration.buffer_length) != 12); 10844 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10845 data.set_event_configuration.sg_descriptors) != 16); 10846 10847 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10848 max_inbound_iu_length) != 6); 10849 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10850 max_outbound_iu_length) != 14); 10851 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 10852 10853 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10854 data_length) != 0); 10855 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10856 iq_arbitration_priority_support_bitmask) != 8); 10857 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10858 maximum_aw_a) != 9); 10859 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10860 maximum_aw_b) != 10); 10861 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10862 maximum_aw_c) != 11); 10863 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10864 max_inbound_queues) != 16); 10865 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10866 max_elements_per_iq) != 18); 10867 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10868 max_iq_element_length) != 24); 10869 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10870 min_iq_element_length) != 26); 10871 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10872 max_outbound_queues) != 30); 10873 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10874 max_elements_per_oq) != 32); 10875 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10876 intr_coalescing_time_granularity) != 34); 10877 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10878 max_oq_element_length) != 36); 10879 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10880 min_oq_element_length) != 38); 10881 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10882 iu_layer_descriptors) != 64); 10883 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 10884 10885 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 10886 event_type) != 0); 10887 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 10888 oq_id) != 2); 10889 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 10890 10891 BUILD_BUG_ON(offsetof(struct pqi_event_config, 10892 num_event_descriptors) != 2); 10893 BUILD_BUG_ON(offsetof(struct pqi_event_config, 10894 descriptors) != 4); 10895 10896 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 10897 ARRAY_SIZE(pqi_supported_event_types)); 10898 10899 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10900 header.iu_type) != 0); 10901 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10902 header.iu_length) != 2); 10903 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10904 event_type) != 8); 10905 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10906 event_id) != 10); 10907 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10908 additional_event_id) != 12); 10909 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10910 data) != 16); 10911 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 10912 10913 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10914 header.iu_type) != 0); 10915 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10916 header.iu_length) != 2); 10917 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10918 event_type) != 8); 10919 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10920 event_id) != 10); 10921 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10922 additional_event_id) != 12); 10923 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 10924 10925 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10926 header.iu_type) != 0); 10927 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10928 header.iu_length) != 2); 10929 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10930 request_id) != 8); 10931 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10932 nexus_id) != 10); 10933 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10934 timeout) != 14); 10935 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10936 lun_number) != 16); 10937 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10938 protocol_specific) != 24); 10939 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10940 outbound_queue_id_to_manage) != 26); 10941 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10942 request_id_to_manage) != 28); 10943 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10944 task_management_function) != 30); 10945 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 10946 10947 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10948 header.iu_type) != 0); 10949 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10950 header.iu_length) != 2); 10951 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10952 request_id) != 8); 10953 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10954 nexus_id) != 10); 10955 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10956 additional_response_info) != 12); 10957 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10958 response_code) != 15); 10959 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 10960 10961 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10962 configured_logical_drive_count) != 0); 10963 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10964 configuration_signature) != 1); 10965 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10966 firmware_version_short) != 5); 10967 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10968 extended_logical_unit_count) != 154); 10969 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10970 firmware_build_number) != 190); 10971 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10972 vendor_id) != 200); 10973 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10974 product_id) != 208); 10975 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10976 extra_controller_flags) != 286); 10977 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10978 controller_mode) != 292); 10979 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10980 spare_part_number) != 293); 10981 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10982 firmware_version_long) != 325); 10983 10984 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10985 phys_bay_in_box) != 115); 10986 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10987 device_type) != 120); 10988 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10989 redundant_path_present_map) != 1736); 10990 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10991 active_path_number) != 1738); 10992 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10993 alternate_paths_phys_connector) != 1739); 10994 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10995 alternate_paths_phys_box_on_port) != 1755); 10996 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10997 current_queue_depth_limit) != 1796); 10998 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 10999 11000 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); 11001 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11002 page_code) != 0); 11003 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11004 subpage_code) != 1); 11005 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11006 buffer_length) != 2); 11007 11008 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); 11009 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11010 page_code) != 0); 11011 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11012 subpage_code) != 1); 11013 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11014 page_length) != 2); 11015 11016 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) 11017 != 18); 11018 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11019 header) != 0); 11020 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11021 firmware_read_support) != 4); 11022 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11023 driver_read_support) != 5); 11024 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11025 firmware_write_support) != 6); 11026 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11027 driver_write_support) != 7); 11028 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11029 max_transfer_encrypted_sas_sata) != 8); 11030 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11031 max_transfer_encrypted_nvme) != 10); 11032 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11033 max_write_raid_5_6) != 12); 11034 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11035 max_write_raid_1_10_2drive) != 14); 11036 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11037 max_write_raid_1_10_3drive) != 16); 11038 11039 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 11040 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 11041 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 11042 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11043 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 11044 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11045 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 11046 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 11047 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11048 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 11049 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 11050 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11051 11052 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 11053 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 11054 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 11055 } 11056