1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for Microchip PQI-based storage controllers 4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com 9 * 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/sched.h> 18 #include <linux/rtc.h> 19 #include <linux/bcd.h> 20 #include <linux/reboot.h> 21 #include <linux/cciss_ioctl.h> 22 #include <scsi/scsi_host.h> 23 #include <scsi/scsi_cmnd.h> 24 #include <scsi/scsi_device.h> 25 #include <scsi/scsi_eh.h> 26 #include <scsi/scsi_transport_sas.h> 27 #include <linux/unaligned.h> 28 #include "smartpqi.h" 29 #include "smartpqi_sis.h" 30 31 #if !defined(BUILD_TIMESTAMP) 32 #define BUILD_TIMESTAMP 33 #endif 34 35 #define DRIVER_VERSION "2.1.30-031" 36 #define DRIVER_MAJOR 2 37 #define DRIVER_MINOR 1 38 #define DRIVER_RELEASE 30 39 #define DRIVER_REVISION 31 40 41 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ 42 DRIVER_VERSION BUILD_TIMESTAMP ")" 43 #define DRIVER_NAME_SHORT "smartpqi" 44 45 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 46 47 #define PQI_POST_RESET_DELAY_SECS 5 48 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 49 50 #define PQI_NO_COMPLETION ((void *)-1) 51 52 MODULE_AUTHOR("Microchip"); 53 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " 54 DRIVER_VERSION); 55 MODULE_VERSION(DRIVER_VERSION); 56 MODULE_LICENSE("GPL"); 57 58 struct pqi_cmd_priv { 59 int this_residual; 60 }; 61 62 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) 63 { 64 return scsi_cmd_priv(cmd); 65 } 66 67 static void pqi_verify_structures(void); 68 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 69 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); 70 static void pqi_ctrl_offline_worker(struct work_struct *work); 71 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 72 static void pqi_scan_start(struct Scsi_Host *shost); 73 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 74 struct pqi_queue_group *queue_group, enum pqi_io_path path, 75 struct pqi_io_request *io_request); 76 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 77 struct pqi_iu_header *request, unsigned int flags, 78 struct pqi_raid_error_info *error_info); 79 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 80 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 81 unsigned int cdb_length, struct pqi_queue_group *queue_group, 82 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio); 83 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 84 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 85 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 86 struct pqi_scsi_dev_raid_map_data *rmd); 87 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 88 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 89 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 90 struct pqi_scsi_dev_raid_map_data *rmd); 91 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 92 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 93 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); 94 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size); 95 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor); 96 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code); 97 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 98 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); 99 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); 100 static void pqi_tmf_worker(struct work_struct *work); 101 102 /* for flags argument to pqi_submit_raid_request_synchronous() */ 103 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 104 105 static struct scsi_transport_template *pqi_sas_transport_template; 106 107 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 108 109 enum pqi_lockup_action { 110 NONE, 111 REBOOT, 112 PANIC 113 }; 114 115 static enum pqi_lockup_action pqi_lockup_action = NONE; 116 117 static struct { 118 enum pqi_lockup_action action; 119 char *name; 120 } pqi_lockup_actions[] = { 121 { 122 .action = NONE, 123 .name = "none", 124 }, 125 { 126 .action = REBOOT, 127 .name = "reboot", 128 }, 129 { 130 .action = PANIC, 131 .name = "panic", 132 }, 133 }; 134 135 static unsigned int pqi_supported_event_types[] = { 136 PQI_EVENT_TYPE_HOTPLUG, 137 PQI_EVENT_TYPE_HARDWARE, 138 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 139 PQI_EVENT_TYPE_LOGICAL_DEVICE, 140 PQI_EVENT_TYPE_OFA, 141 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 142 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 143 }; 144 145 static int pqi_disable_device_id_wildcards; 146 module_param_named(disable_device_id_wildcards, 147 pqi_disable_device_id_wildcards, int, 0644); 148 MODULE_PARM_DESC(disable_device_id_wildcards, 149 "Disable device ID wildcards."); 150 151 static int pqi_disable_heartbeat; 152 module_param_named(disable_heartbeat, 153 pqi_disable_heartbeat, int, 0644); 154 MODULE_PARM_DESC(disable_heartbeat, 155 "Disable heartbeat."); 156 157 static int pqi_disable_ctrl_shutdown; 158 module_param_named(disable_ctrl_shutdown, 159 pqi_disable_ctrl_shutdown, int, 0644); 160 MODULE_PARM_DESC(disable_ctrl_shutdown, 161 "Disable controller shutdown when controller locked up."); 162 163 static char *pqi_lockup_action_param; 164 module_param_named(lockup_action, 165 pqi_lockup_action_param, charp, 0644); 166 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 167 "\t\tSupported: none, reboot, panic\n" 168 "\t\tDefault: none"); 169 170 static int pqi_expose_ld_first; 171 module_param_named(expose_ld_first, 172 pqi_expose_ld_first, int, 0644); 173 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); 174 175 static int pqi_hide_vsep; 176 module_param_named(hide_vsep, 177 pqi_hide_vsep, int, 0644); 178 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); 179 180 static int pqi_disable_managed_interrupts; 181 module_param_named(disable_managed_interrupts, 182 pqi_disable_managed_interrupts, int, 0644); 183 MODULE_PARM_DESC(disable_managed_interrupts, 184 "Disable the kernel automatically assigning SMP affinity to IRQs."); 185 186 static unsigned int pqi_ctrl_ready_timeout_secs; 187 module_param_named(ctrl_ready_timeout, 188 pqi_ctrl_ready_timeout_secs, uint, 0644); 189 MODULE_PARM_DESC(ctrl_ready_timeout, 190 "Timeout in seconds for driver to wait for controller ready."); 191 192 static char *raid_levels[] = { 193 "RAID-0", 194 "RAID-4", 195 "RAID-1(1+0)", 196 "RAID-5", 197 "RAID-5+1", 198 "RAID-6", 199 "RAID-1(Triple)", 200 }; 201 202 static char *pqi_raid_level_to_string(u8 raid_level) 203 { 204 if (raid_level < ARRAY_SIZE(raid_levels)) 205 return raid_levels[raid_level]; 206 207 return "RAID UNKNOWN"; 208 } 209 210 #define SA_RAID_0 0 211 #define SA_RAID_4 1 212 #define SA_RAID_1 2 /* also used for RAID 10 */ 213 #define SA_RAID_5 3 /* also used for RAID 50 */ 214 #define SA_RAID_51 4 215 #define SA_RAID_6 5 /* also used for RAID 60 */ 216 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ 217 #define SA_RAID_MAX SA_RAID_TRIPLE 218 #define SA_RAID_UNKNOWN 0xff 219 220 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 221 { 222 pqi_prep_for_scsi_done(scmd); 223 scsi_done(scmd); 224 } 225 226 static inline void pqi_disable_write_same(struct scsi_device *sdev) 227 { 228 sdev->no_write_same = 1; 229 } 230 231 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 232 { 233 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 234 } 235 236 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 237 { 238 return !device->is_physical_device; 239 } 240 241 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 242 { 243 return scsi3addr[2] != 0; 244 } 245 246 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 247 { 248 return !ctrl_info->controller_online; 249 } 250 251 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 252 { 253 if (ctrl_info->controller_online) 254 if (!sis_is_firmware_running(ctrl_info)) 255 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP); 256 } 257 258 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 259 { 260 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 261 } 262 263 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1 264 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2 265 266 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) 267 { 268 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; 269 } 270 271 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 272 enum pqi_ctrl_mode mode) 273 { 274 u32 driver_scratch; 275 276 driver_scratch = sis_read_driver_scratch(ctrl_info); 277 278 if (mode == PQI_MODE) 279 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE; 280 else 281 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE; 282 283 sis_write_driver_scratch(ctrl_info, driver_scratch); 284 } 285 286 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) 287 { 288 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; 289 } 290 291 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) 292 { 293 u32 driver_scratch; 294 295 driver_scratch = sis_read_driver_scratch(ctrl_info); 296 297 if (is_supported) 298 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 299 else 300 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 301 302 sis_write_driver_scratch(ctrl_info, driver_scratch); 303 } 304 305 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) 306 { 307 ctrl_info->scan_blocked = true; 308 mutex_lock(&ctrl_info->scan_mutex); 309 } 310 311 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) 312 { 313 ctrl_info->scan_blocked = false; 314 mutex_unlock(&ctrl_info->scan_mutex); 315 } 316 317 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) 318 { 319 return ctrl_info->scan_blocked; 320 } 321 322 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) 323 { 324 mutex_lock(&ctrl_info->lun_reset_mutex); 325 } 326 327 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) 328 { 329 mutex_unlock(&ctrl_info->lun_reset_mutex); 330 } 331 332 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) 333 { 334 struct Scsi_Host *shost; 335 unsigned int num_loops; 336 int msecs_sleep; 337 338 shost = ctrl_info->scsi_host; 339 340 scsi_block_requests(shost); 341 342 num_loops = 0; 343 msecs_sleep = 20; 344 while (scsi_host_busy(shost)) { 345 num_loops++; 346 if (num_loops == 10) 347 msecs_sleep = 500; 348 msleep(msecs_sleep); 349 } 350 } 351 352 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) 353 { 354 scsi_unblock_requests(ctrl_info->scsi_host); 355 } 356 357 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 358 { 359 atomic_inc(&ctrl_info->num_busy_threads); 360 } 361 362 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 363 { 364 atomic_dec(&ctrl_info->num_busy_threads); 365 } 366 367 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 368 { 369 return ctrl_info->block_requests; 370 } 371 372 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 373 { 374 ctrl_info->block_requests = true; 375 } 376 377 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 378 { 379 ctrl_info->block_requests = false; 380 wake_up_all(&ctrl_info->block_requests_wait); 381 } 382 383 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 384 { 385 if (!pqi_ctrl_blocked(ctrl_info)) 386 return; 387 388 atomic_inc(&ctrl_info->num_blocked_threads); 389 wait_event(ctrl_info->block_requests_wait, 390 !pqi_ctrl_blocked(ctrl_info)); 391 atomic_dec(&ctrl_info->num_blocked_threads); 392 } 393 394 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10 395 396 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 397 { 398 unsigned long start_jiffies; 399 unsigned long warning_timeout; 400 bool displayed_warning; 401 402 displayed_warning = false; 403 start_jiffies = jiffies; 404 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 405 406 while (atomic_read(&ctrl_info->num_busy_threads) > 407 atomic_read(&ctrl_info->num_blocked_threads)) { 408 if (time_after(jiffies, warning_timeout)) { 409 dev_warn(&ctrl_info->pci_dev->dev, 410 "waiting %u seconds for driver activity to quiesce\n", 411 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 412 displayed_warning = true; 413 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies; 414 } 415 usleep_range(1000, 2000); 416 } 417 418 if (displayed_warning) 419 dev_warn(&ctrl_info->pci_dev->dev, 420 "driver activity quiesced after waiting for %u seconds\n", 421 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 422 } 423 424 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 425 { 426 return device->device_offline; 427 } 428 429 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 430 { 431 mutex_lock(&ctrl_info->ofa_mutex); 432 } 433 434 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 435 { 436 mutex_unlock(&ctrl_info->ofa_mutex); 437 } 438 439 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 440 { 441 mutex_lock(&ctrl_info->ofa_mutex); 442 mutex_unlock(&ctrl_info->ofa_mutex); 443 } 444 445 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) 446 { 447 return mutex_is_locked(&ctrl_info->ofa_mutex); 448 } 449 450 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 451 { 452 device->in_remove = true; 453 } 454 455 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) 456 { 457 return device->in_remove; 458 } 459 460 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun) 461 { 462 device->in_reset[lun] = true; 463 } 464 465 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun) 466 { 467 device->in_reset[lun] = false; 468 } 469 470 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun) 471 { 472 return device->in_reset[lun]; 473 } 474 475 static inline int pqi_event_type_to_event_index(unsigned int event_type) 476 { 477 int index; 478 479 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 480 if (event_type == pqi_supported_event_types[index]) 481 return index; 482 483 return -1; 484 } 485 486 static inline bool pqi_is_supported_event(unsigned int event_type) 487 { 488 return pqi_event_type_to_event_index(event_type) != -1; 489 } 490 491 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, 492 unsigned long delay) 493 { 494 if (pqi_ctrl_offline(ctrl_info)) 495 return; 496 497 schedule_delayed_work(&ctrl_info->rescan_work, delay); 498 } 499 500 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 501 { 502 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 503 } 504 505 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 506 507 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) 508 { 509 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 510 } 511 512 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 513 { 514 cancel_delayed_work_sync(&ctrl_info->rescan_work); 515 } 516 517 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 518 { 519 if (!ctrl_info->heartbeat_counter) 520 return 0; 521 522 return readl(ctrl_info->heartbeat_counter); 523 } 524 525 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 526 { 527 return readb(ctrl_info->soft_reset_status); 528 } 529 530 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 531 { 532 u8 status; 533 534 status = pqi_read_soft_reset_status(ctrl_info); 535 status &= ~PQI_SOFT_RESET_ABORT; 536 writeb(status, ctrl_info->soft_reset_status); 537 } 538 539 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) 540 { 541 bool io_high_prio; 542 int priority_class; 543 544 io_high_prio = false; 545 546 if (device->ncq_prio_enable) { 547 priority_class = 548 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); 549 if (priority_class == IOPRIO_CLASS_RT) { 550 /* Set NCQ priority for read/write commands. */ 551 switch (scmd->cmnd[0]) { 552 case WRITE_16: 553 case READ_16: 554 case WRITE_12: 555 case READ_12: 556 case WRITE_10: 557 case READ_10: 558 case WRITE_6: 559 case READ_6: 560 io_high_prio = true; 561 break; 562 } 563 } 564 } 565 566 return io_high_prio; 567 } 568 569 static int pqi_map_single(struct pci_dev *pci_dev, 570 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 571 size_t buffer_length, enum dma_data_direction data_direction) 572 { 573 dma_addr_t bus_address; 574 575 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 576 return 0; 577 578 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 579 data_direction); 580 if (dma_mapping_error(&pci_dev->dev, bus_address)) 581 return -ENOMEM; 582 583 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 584 put_unaligned_le32(buffer_length, &sg_descriptor->length); 585 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 586 587 return 0; 588 } 589 590 static void pqi_pci_unmap(struct pci_dev *pci_dev, 591 struct pqi_sg_descriptor *descriptors, int num_descriptors, 592 enum dma_data_direction data_direction) 593 { 594 int i; 595 596 if (data_direction == DMA_NONE) 597 return; 598 599 for (i = 0; i < num_descriptors; i++) 600 dma_unmap_single(&pci_dev->dev, 601 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 602 get_unaligned_le32(&descriptors[i].length), 603 data_direction); 604 } 605 606 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 607 struct pqi_raid_path_request *request, u8 cmd, 608 u8 *scsi3addr, void *buffer, size_t buffer_length, 609 u16 vpd_page, enum dma_data_direction *dir) 610 { 611 u8 *cdb; 612 size_t cdb_length = buffer_length; 613 614 memset(request, 0, sizeof(*request)); 615 616 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 617 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 618 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 619 &request->header.iu_length); 620 put_unaligned_le32(buffer_length, &request->buffer_length); 621 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 622 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 623 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 624 625 cdb = request->cdb; 626 627 switch (cmd) { 628 case INQUIRY: 629 request->data_direction = SOP_READ_FLAG; 630 cdb[0] = INQUIRY; 631 if (vpd_page & VPD_PAGE) { 632 cdb[1] = 0x1; 633 cdb[2] = (u8)vpd_page; 634 } 635 cdb[4] = (u8)cdb_length; 636 break; 637 case CISS_REPORT_LOG: 638 case CISS_REPORT_PHYS: 639 request->data_direction = SOP_READ_FLAG; 640 cdb[0] = cmd; 641 if (cmd == CISS_REPORT_PHYS) { 642 if (ctrl_info->rpl_extended_format_4_5_supported) 643 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4; 644 else 645 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2; 646 } else { 647 cdb[1] = ctrl_info->ciss_report_log_flags; 648 } 649 put_unaligned_be32(cdb_length, &cdb[6]); 650 break; 651 case CISS_GET_RAID_MAP: 652 request->data_direction = SOP_READ_FLAG; 653 cdb[0] = CISS_READ; 654 cdb[1] = CISS_GET_RAID_MAP; 655 put_unaligned_be32(cdb_length, &cdb[6]); 656 break; 657 case SA_FLUSH_CACHE: 658 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; 659 request->data_direction = SOP_WRITE_FLAG; 660 cdb[0] = BMIC_WRITE; 661 cdb[6] = BMIC_FLUSH_CACHE; 662 put_unaligned_be16(cdb_length, &cdb[7]); 663 break; 664 case BMIC_SENSE_DIAG_OPTIONS: 665 cdb_length = 0; 666 fallthrough; 667 case BMIC_IDENTIFY_CONTROLLER: 668 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 669 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 670 case BMIC_SENSE_FEATURE: 671 request->data_direction = SOP_READ_FLAG; 672 cdb[0] = BMIC_READ; 673 cdb[6] = cmd; 674 put_unaligned_be16(cdb_length, &cdb[7]); 675 break; 676 case BMIC_SET_DIAG_OPTIONS: 677 cdb_length = 0; 678 fallthrough; 679 case BMIC_WRITE_HOST_WELLNESS: 680 request->data_direction = SOP_WRITE_FLAG; 681 cdb[0] = BMIC_WRITE; 682 cdb[6] = cmd; 683 put_unaligned_be16(cdb_length, &cdb[7]); 684 break; 685 case BMIC_CSMI_PASSTHRU: 686 request->data_direction = SOP_BIDIRECTIONAL; 687 cdb[0] = BMIC_WRITE; 688 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 689 cdb[6] = cmd; 690 put_unaligned_be16(cdb_length, &cdb[7]); 691 break; 692 default: 693 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); 694 break; 695 } 696 697 switch (request->data_direction) { 698 case SOP_READ_FLAG: 699 *dir = DMA_FROM_DEVICE; 700 break; 701 case SOP_WRITE_FLAG: 702 *dir = DMA_TO_DEVICE; 703 break; 704 case SOP_NO_DIRECTION_FLAG: 705 *dir = DMA_NONE; 706 break; 707 default: 708 *dir = DMA_BIDIRECTIONAL; 709 break; 710 } 711 712 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 713 buffer, buffer_length, *dir); 714 } 715 716 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 717 { 718 io_request->scmd = NULL; 719 io_request->status = 0; 720 io_request->error_info = NULL; 721 io_request->raid_bypass = false; 722 } 723 724 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) 725 { 726 struct pqi_io_request *io_request; 727 u16 i; 728 729 if (scmd) { /* SML I/O request */ 730 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 731 732 i = blk_mq_unique_tag_to_tag(blk_tag); 733 io_request = &ctrl_info->io_request_pool[i]; 734 if (atomic_inc_return(&io_request->refcount) > 1) { 735 atomic_dec(&io_request->refcount); 736 return NULL; 737 } 738 } else { /* IOCTL or driver internal request */ 739 /* 740 * benignly racy - may have to wait for an open slot. 741 * command slot range is scsi_ml_can_queue - 742 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] 743 */ 744 i = 0; 745 while (1) { 746 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; 747 if (atomic_inc_return(&io_request->refcount) == 1) 748 break; 749 atomic_dec(&io_request->refcount); 750 i = (i + 1) % PQI_RESERVED_IO_SLOTS; 751 } 752 } 753 754 if (io_request) 755 pqi_reinit_io_request(io_request); 756 757 return io_request; 758 } 759 760 static void pqi_free_io_request(struct pqi_io_request *io_request) 761 { 762 atomic_dec(&io_request->refcount); 763 } 764 765 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 766 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 767 struct pqi_raid_error_info *error_info) 768 { 769 int rc; 770 struct pqi_raid_path_request request; 771 enum dma_data_direction dir; 772 773 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, 774 buffer, buffer_length, vpd_page, &dir); 775 if (rc) 776 return rc; 777 778 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); 779 780 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 781 782 return rc; 783 } 784 785 /* helper functions for pqi_send_scsi_raid_request */ 786 787 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 788 u8 cmd, void *buffer, size_t buffer_length) 789 { 790 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 791 buffer, buffer_length, 0, NULL); 792 } 793 794 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 795 u8 cmd, void *buffer, size_t buffer_length, 796 struct pqi_raid_error_info *error_info) 797 { 798 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 799 buffer, buffer_length, 0, error_info); 800 } 801 802 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 803 struct bmic_identify_controller *buffer) 804 { 805 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 806 buffer, sizeof(*buffer)); 807 } 808 809 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, 810 struct bmic_sense_subsystem_info *sense_info) 811 { 812 return pqi_send_ctrl_raid_request(ctrl_info, 813 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, 814 sizeof(*sense_info)); 815 } 816 817 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 818 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 819 { 820 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 821 buffer, buffer_length, vpd_page, NULL); 822 } 823 824 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 825 struct pqi_scsi_dev *device, 826 struct bmic_identify_physical_device *buffer, size_t buffer_length) 827 { 828 int rc; 829 enum dma_data_direction dir; 830 u16 bmic_device_index; 831 struct pqi_raid_path_request request; 832 833 rc = pqi_build_raid_path_request(ctrl_info, &request, 834 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 835 buffer_length, 0, &dir); 836 if (rc) 837 return rc; 838 839 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 840 request.cdb[2] = (u8)bmic_device_index; 841 request.cdb[9] = (u8)(bmic_device_index >> 8); 842 843 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 844 845 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 846 847 return rc; 848 } 849 850 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) 851 { 852 u32 bytes; 853 854 bytes = get_unaligned_le16(limit); 855 if (bytes == 0) 856 bytes = ~0; 857 else 858 bytes *= 1024; 859 860 return bytes; 861 } 862 863 #pragma pack(1) 864 865 struct bmic_sense_feature_buffer { 866 struct bmic_sense_feature_buffer_header header; 867 struct bmic_sense_feature_io_page_aio_subpage aio_subpage; 868 }; 869 870 #pragma pack() 871 872 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ 873 offsetofend(struct bmic_sense_feature_buffer, \ 874 aio_subpage.max_write_raid_1_10_3drive) 875 876 #define MINIMUM_AIO_SUBPAGE_LENGTH \ 877 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ 878 max_write_raid_1_10_3drive) - \ 879 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) 880 881 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) 882 { 883 int rc; 884 enum dma_data_direction dir; 885 struct pqi_raid_path_request request; 886 struct bmic_sense_feature_buffer *buffer; 887 888 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); 889 if (!buffer) 890 return -ENOMEM; 891 892 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, 893 buffer, sizeof(*buffer), 0, &dir); 894 if (rc) 895 goto error; 896 897 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; 898 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; 899 900 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 901 902 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 903 904 if (rc) 905 goto error; 906 907 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || 908 buffer->header.subpage_code != 909 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 910 get_unaligned_le16(&buffer->header.buffer_length) < 911 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || 912 buffer->aio_subpage.header.page_code != 913 BMIC_SENSE_FEATURE_IO_PAGE || 914 buffer->aio_subpage.header.subpage_code != 915 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 916 get_unaligned_le16(&buffer->aio_subpage.header.page_length) < 917 MINIMUM_AIO_SUBPAGE_LENGTH) { 918 goto error; 919 } 920 921 ctrl_info->max_transfer_encrypted_sas_sata = 922 pqi_aio_limit_to_bytes( 923 &buffer->aio_subpage.max_transfer_encrypted_sas_sata); 924 925 ctrl_info->max_transfer_encrypted_nvme = 926 pqi_aio_limit_to_bytes( 927 &buffer->aio_subpage.max_transfer_encrypted_nvme); 928 929 ctrl_info->max_write_raid_5_6 = 930 pqi_aio_limit_to_bytes( 931 &buffer->aio_subpage.max_write_raid_5_6); 932 933 ctrl_info->max_write_raid_1_10_2drive = 934 pqi_aio_limit_to_bytes( 935 &buffer->aio_subpage.max_write_raid_1_10_2drive); 936 937 ctrl_info->max_write_raid_1_10_3drive = 938 pqi_aio_limit_to_bytes( 939 &buffer->aio_subpage.max_write_raid_1_10_3drive); 940 941 error: 942 kfree(buffer); 943 944 return rc; 945 } 946 947 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 948 enum bmic_flush_cache_shutdown_event shutdown_event) 949 { 950 int rc; 951 struct bmic_flush_cache *flush_cache; 952 953 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 954 if (!flush_cache) 955 return -ENOMEM; 956 957 flush_cache->shutdown_event = shutdown_event; 958 959 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 960 sizeof(*flush_cache)); 961 962 kfree(flush_cache); 963 964 return rc; 965 } 966 967 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 968 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 969 struct pqi_raid_error_info *error_info) 970 { 971 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 972 buffer, buffer_length, error_info); 973 } 974 975 #define PQI_FETCH_PTRAID_DATA (1 << 31) 976 977 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 978 { 979 int rc; 980 struct bmic_diag_options *diag; 981 982 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 983 if (!diag) 984 return -ENOMEM; 985 986 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 987 diag, sizeof(*diag)); 988 if (rc) 989 goto out; 990 991 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 992 993 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, 994 sizeof(*diag)); 995 996 out: 997 kfree(diag); 998 999 return rc; 1000 } 1001 1002 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 1003 void *buffer, size_t buffer_length) 1004 { 1005 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 1006 buffer, buffer_length); 1007 } 1008 1009 #pragma pack(1) 1010 1011 struct bmic_host_wellness_driver_version { 1012 u8 start_tag[4]; 1013 u8 driver_version_tag[2]; 1014 __le16 driver_version_length; 1015 char driver_version[32]; 1016 u8 dont_write_tag[2]; 1017 u8 end_tag[2]; 1018 }; 1019 1020 #pragma pack() 1021 1022 static int pqi_write_driver_version_to_host_wellness( 1023 struct pqi_ctrl_info *ctrl_info) 1024 { 1025 int rc; 1026 struct bmic_host_wellness_driver_version *buffer; 1027 size_t buffer_length; 1028 1029 buffer_length = sizeof(*buffer); 1030 1031 buffer = kmalloc(buffer_length, GFP_KERNEL); 1032 if (!buffer) 1033 return -ENOMEM; 1034 1035 buffer->start_tag[0] = '<'; 1036 buffer->start_tag[1] = 'H'; 1037 buffer->start_tag[2] = 'W'; 1038 buffer->start_tag[3] = '>'; 1039 buffer->driver_version_tag[0] = 'D'; 1040 buffer->driver_version_tag[1] = 'V'; 1041 put_unaligned_le16(sizeof(buffer->driver_version), 1042 &buffer->driver_version_length); 1043 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION, 1044 sizeof(buffer->driver_version)); 1045 buffer->dont_write_tag[0] = 'D'; 1046 buffer->dont_write_tag[1] = 'W'; 1047 buffer->end_tag[0] = 'Z'; 1048 buffer->end_tag[1] = 'Z'; 1049 1050 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1051 1052 kfree(buffer); 1053 1054 return rc; 1055 } 1056 1057 #pragma pack(1) 1058 1059 struct bmic_host_wellness_time { 1060 u8 start_tag[4]; 1061 u8 time_tag[2]; 1062 __le16 time_length; 1063 u8 time[8]; 1064 u8 dont_write_tag[2]; 1065 u8 end_tag[2]; 1066 }; 1067 1068 #pragma pack() 1069 1070 static int pqi_write_current_time_to_host_wellness( 1071 struct pqi_ctrl_info *ctrl_info) 1072 { 1073 int rc; 1074 struct bmic_host_wellness_time *buffer; 1075 size_t buffer_length; 1076 time64_t local_time; 1077 unsigned int year; 1078 struct tm tm; 1079 1080 buffer_length = sizeof(*buffer); 1081 1082 buffer = kmalloc(buffer_length, GFP_KERNEL); 1083 if (!buffer) 1084 return -ENOMEM; 1085 1086 buffer->start_tag[0] = '<'; 1087 buffer->start_tag[1] = 'H'; 1088 buffer->start_tag[2] = 'W'; 1089 buffer->start_tag[3] = '>'; 1090 buffer->time_tag[0] = 'T'; 1091 buffer->time_tag[1] = 'D'; 1092 put_unaligned_le16(sizeof(buffer->time), 1093 &buffer->time_length); 1094 1095 local_time = ktime_get_real_seconds(); 1096 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 1097 year = tm.tm_year + 1900; 1098 1099 buffer->time[0] = bin2bcd(tm.tm_hour); 1100 buffer->time[1] = bin2bcd(tm.tm_min); 1101 buffer->time[2] = bin2bcd(tm.tm_sec); 1102 buffer->time[3] = 0; 1103 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 1104 buffer->time[5] = bin2bcd(tm.tm_mday); 1105 buffer->time[6] = bin2bcd(year / 100); 1106 buffer->time[7] = bin2bcd(year % 100); 1107 1108 buffer->dont_write_tag[0] = 'D'; 1109 buffer->dont_write_tag[1] = 'W'; 1110 buffer->end_tag[0] = 'Z'; 1111 buffer->end_tag[1] = 'Z'; 1112 1113 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1114 1115 kfree(buffer); 1116 1117 return rc; 1118 } 1119 1120 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 1121 1122 static void pqi_update_time_worker(struct work_struct *work) 1123 { 1124 int rc; 1125 struct pqi_ctrl_info *ctrl_info; 1126 1127 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1128 update_time_work); 1129 1130 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 1131 if (rc) 1132 dev_warn(&ctrl_info->pci_dev->dev, 1133 "error updating time on controller\n"); 1134 1135 schedule_delayed_work(&ctrl_info->update_time_work, 1136 PQI_UPDATE_TIME_WORK_INTERVAL); 1137 } 1138 1139 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1140 { 1141 schedule_delayed_work(&ctrl_info->update_time_work, 0); 1142 } 1143 1144 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1145 { 1146 cancel_delayed_work_sync(&ctrl_info->update_time_work); 1147 } 1148 1149 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, 1150 size_t buffer_length) 1151 { 1152 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); 1153 } 1154 1155 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) 1156 { 1157 int rc; 1158 size_t lun_list_length; 1159 size_t lun_data_length; 1160 size_t new_lun_list_length; 1161 void *lun_data = NULL; 1162 struct report_lun_header *report_lun_header; 1163 1164 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 1165 if (!report_lun_header) { 1166 rc = -ENOMEM; 1167 goto out; 1168 } 1169 1170 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); 1171 if (rc) 1172 goto out; 1173 1174 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 1175 1176 again: 1177 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 1178 1179 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 1180 if (!lun_data) { 1181 rc = -ENOMEM; 1182 goto out; 1183 } 1184 1185 if (lun_list_length == 0) { 1186 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 1187 goto out; 1188 } 1189 1190 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 1191 if (rc) 1192 goto out; 1193 1194 new_lun_list_length = 1195 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); 1196 1197 if (new_lun_list_length > lun_list_length) { 1198 lun_list_length = new_lun_list_length; 1199 kfree(lun_data); 1200 goto again; 1201 } 1202 1203 out: 1204 kfree(report_lun_header); 1205 1206 if (rc) { 1207 kfree(lun_data); 1208 lun_data = NULL; 1209 } 1210 1211 *buffer = lun_data; 1212 1213 return rc; 1214 } 1215 1216 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1217 { 1218 int rc; 1219 unsigned int i; 1220 u8 rpl_response_format; 1221 u32 num_physicals; 1222 void *rpl_list; 1223 struct report_lun_header *rpl_header; 1224 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; 1225 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list; 1226 1227 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list); 1228 if (rc) 1229 return rc; 1230 1231 if (ctrl_info->rpl_extended_format_4_5_supported) { 1232 rpl_header = rpl_list; 1233 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; 1234 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) { 1235 *buffer = rpl_list; 1236 return 0; 1237 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) { 1238 dev_err(&ctrl_info->pci_dev->dev, 1239 "RPL returned unsupported data format %u\n", 1240 rpl_response_format); 1241 return -EINVAL; 1242 } else { 1243 dev_warn(&ctrl_info->pci_dev->dev, 1244 "RPL returned extended format 2 instead of 4\n"); 1245 } 1246 } 1247 1248 rpl_8byte_wwid_list = rpl_list; 1249 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); 1250 1251 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, 1252 num_physicals), GFP_KERNEL); 1253 if (!rpl_16byte_wwid_list) 1254 return -ENOMEM; 1255 1256 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid), 1257 &rpl_16byte_wwid_list->header.list_length); 1258 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; 1259 1260 for (i = 0; i < num_physicals; i++) { 1261 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid)); 1262 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid)); 1263 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); 1264 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; 1265 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags; 1266 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; 1267 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths; 1268 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; 1269 } 1270 1271 kfree(rpl_8byte_wwid_list); 1272 *buffer = rpl_16byte_wwid_list; 1273 1274 return 0; 1275 } 1276 1277 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1278 { 1279 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 1280 } 1281 1282 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 1283 struct report_phys_lun_16byte_wwid_list **physdev_list, 1284 struct report_log_lun_list **logdev_list) 1285 { 1286 int rc; 1287 size_t logdev_list_length; 1288 size_t logdev_data_length; 1289 struct report_log_lun_list *internal_logdev_list; 1290 struct report_log_lun_list *logdev_data; 1291 struct report_lun_header report_lun_header; 1292 1293 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1294 if (rc) 1295 dev_err(&ctrl_info->pci_dev->dev, 1296 "report physical LUNs failed\n"); 1297 1298 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1299 if (rc) 1300 dev_err(&ctrl_info->pci_dev->dev, 1301 "report logical LUNs failed\n"); 1302 1303 /* 1304 * Tack the controller itself onto the end of the logical device list 1305 * by adding a list entry that is all zeros. 1306 */ 1307 1308 logdev_data = *logdev_list; 1309 1310 if (logdev_data) { 1311 logdev_list_length = 1312 get_unaligned_be32(&logdev_data->header.list_length); 1313 } else { 1314 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1315 logdev_data = 1316 (struct report_log_lun_list *)&report_lun_header; 1317 logdev_list_length = 0; 1318 } 1319 1320 logdev_data_length = sizeof(struct report_lun_header) + 1321 logdev_list_length; 1322 1323 internal_logdev_list = kmalloc(logdev_data_length + 1324 sizeof(struct report_log_lun), GFP_KERNEL); 1325 if (!internal_logdev_list) { 1326 kfree(*logdev_list); 1327 *logdev_list = NULL; 1328 return -ENOMEM; 1329 } 1330 1331 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1332 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1333 sizeof(struct report_log_lun)); 1334 put_unaligned_be32(logdev_list_length + 1335 sizeof(struct report_log_lun), 1336 &internal_logdev_list->header.list_length); 1337 1338 kfree(*logdev_list); 1339 *logdev_list = internal_logdev_list; 1340 1341 return 0; 1342 } 1343 1344 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1345 int bus, int target, int lun) 1346 { 1347 device->bus = bus; 1348 device->target = target; 1349 device->lun = lun; 1350 } 1351 1352 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1353 { 1354 u8 *scsi3addr; 1355 u32 lunid; 1356 int bus; 1357 int target; 1358 int lun; 1359 1360 scsi3addr = device->scsi3addr; 1361 lunid = get_unaligned_le32(scsi3addr); 1362 1363 if (pqi_is_hba_lunid(scsi3addr)) { 1364 /* The specified device is the controller. */ 1365 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1366 device->target_lun_valid = true; 1367 return; 1368 } 1369 1370 if (pqi_is_logical_device(device)) { 1371 if (device->is_external_raid_device) { 1372 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1373 target = (lunid >> 16) & 0x3fff; 1374 lun = lunid & 0xff; 1375 } else { 1376 bus = PQI_RAID_VOLUME_BUS; 1377 target = 0; 1378 lun = lunid & 0x3fff; 1379 } 1380 pqi_set_bus_target_lun(device, bus, target, lun); 1381 device->target_lun_valid = true; 1382 return; 1383 } 1384 1385 /* 1386 * Defer target and LUN assignment for non-controller physical devices 1387 * because the SAS transport layer will make these assignments later. 1388 */ 1389 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1390 } 1391 1392 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1393 struct pqi_scsi_dev *device) 1394 { 1395 int rc; 1396 u8 raid_level; 1397 u8 *buffer; 1398 1399 raid_level = SA_RAID_UNKNOWN; 1400 1401 buffer = kmalloc(64, GFP_KERNEL); 1402 if (buffer) { 1403 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1404 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1405 if (rc == 0) { 1406 raid_level = buffer[8]; 1407 if (raid_level > SA_RAID_MAX) 1408 raid_level = SA_RAID_UNKNOWN; 1409 } 1410 kfree(buffer); 1411 } 1412 1413 device->raid_level = raid_level; 1414 } 1415 1416 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1417 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1418 { 1419 char *err_msg; 1420 u32 raid_map_size; 1421 u32 r5or6_blocks_per_row; 1422 1423 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1424 1425 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1426 err_msg = "RAID map too small"; 1427 goto bad_raid_map; 1428 } 1429 1430 if (device->raid_level == SA_RAID_1) { 1431 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1432 err_msg = "invalid RAID-1 map"; 1433 goto bad_raid_map; 1434 } 1435 } else if (device->raid_level == SA_RAID_TRIPLE) { 1436 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1437 err_msg = "invalid RAID-1(Triple) map"; 1438 goto bad_raid_map; 1439 } 1440 } else if ((device->raid_level == SA_RAID_5 || 1441 device->raid_level == SA_RAID_6) && 1442 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1443 /* RAID 50/60 */ 1444 r5or6_blocks_per_row = 1445 get_unaligned_le16(&raid_map->strip_size) * 1446 get_unaligned_le16(&raid_map->data_disks_per_row); 1447 if (r5or6_blocks_per_row == 0) { 1448 err_msg = "invalid RAID-5 or RAID-6 map"; 1449 goto bad_raid_map; 1450 } 1451 } 1452 1453 return 0; 1454 1455 bad_raid_map: 1456 dev_warn(&ctrl_info->pci_dev->dev, 1457 "logical device %08x%08x %s\n", 1458 *((u32 *)&device->scsi3addr), 1459 *((u32 *)&device->scsi3addr[4]), err_msg); 1460 1461 return -EINVAL; 1462 } 1463 1464 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1465 struct pqi_scsi_dev *device) 1466 { 1467 int rc; 1468 u32 raid_map_size; 1469 struct raid_map *raid_map; 1470 1471 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1472 if (!raid_map) 1473 return -ENOMEM; 1474 1475 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1476 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); 1477 if (rc) 1478 goto error; 1479 1480 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1481 1482 if (raid_map_size > sizeof(*raid_map)) { 1483 1484 kfree(raid_map); 1485 1486 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1487 if (!raid_map) 1488 return -ENOMEM; 1489 1490 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1491 device->scsi3addr, raid_map, raid_map_size, 0, NULL); 1492 if (rc) 1493 goto error; 1494 1495 if (get_unaligned_le32(&raid_map->structure_size) 1496 != raid_map_size) { 1497 dev_warn(&ctrl_info->pci_dev->dev, 1498 "requested %u bytes, received %u bytes\n", 1499 raid_map_size, 1500 get_unaligned_le32(&raid_map->structure_size)); 1501 rc = -EINVAL; 1502 goto error; 1503 } 1504 } 1505 1506 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1507 if (rc) 1508 goto error; 1509 1510 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); 1511 if (!device->raid_io_stats) { 1512 rc = -ENOMEM; 1513 goto error; 1514 } 1515 1516 device->raid_map = raid_map; 1517 1518 return 0; 1519 1520 error: 1521 kfree(raid_map); 1522 1523 return rc; 1524 } 1525 1526 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, 1527 struct pqi_scsi_dev *device) 1528 { 1529 if (!ctrl_info->lv_drive_type_mix_valid) { 1530 device->max_transfer_encrypted = ~0; 1531 return; 1532 } 1533 1534 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { 1535 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: 1536 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: 1537 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: 1538 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: 1539 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: 1540 case LV_DRIVE_TYPE_MIX_SAS_ONLY: 1541 case LV_DRIVE_TYPE_MIX_SATA_ONLY: 1542 device->max_transfer_encrypted = 1543 ctrl_info->max_transfer_encrypted_sas_sata; 1544 break; 1545 case LV_DRIVE_TYPE_MIX_NVME_ONLY: 1546 device->max_transfer_encrypted = 1547 ctrl_info->max_transfer_encrypted_nvme; 1548 break; 1549 case LV_DRIVE_TYPE_MIX_UNKNOWN: 1550 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: 1551 default: 1552 device->max_transfer_encrypted = 1553 min(ctrl_info->max_transfer_encrypted_sas_sata, 1554 ctrl_info->max_transfer_encrypted_nvme); 1555 break; 1556 } 1557 } 1558 1559 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1560 struct pqi_scsi_dev *device) 1561 { 1562 int rc; 1563 u8 *buffer; 1564 u8 bypass_status; 1565 1566 buffer = kmalloc(64, GFP_KERNEL); 1567 if (!buffer) 1568 return; 1569 1570 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1571 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1572 if (rc) 1573 goto out; 1574 1575 #define RAID_BYPASS_STATUS 4 1576 #define RAID_BYPASS_CONFIGURED 0x1 1577 #define RAID_BYPASS_ENABLED 0x2 1578 1579 bypass_status = buffer[RAID_BYPASS_STATUS]; 1580 device->raid_bypass_configured = 1581 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1582 if (device->raid_bypass_configured && 1583 (bypass_status & RAID_BYPASS_ENABLED) && 1584 pqi_get_raid_map(ctrl_info, device) == 0) { 1585 device->raid_bypass_enabled = true; 1586 if (get_unaligned_le16(&device->raid_map->flags) & 1587 RAID_MAP_ENCRYPTION_ENABLED) 1588 pqi_set_max_transfer_encrypted(ctrl_info, device); 1589 } 1590 1591 out: 1592 kfree(buffer); 1593 } 1594 1595 /* 1596 * Use vendor-specific VPD to determine online/offline status of a volume. 1597 */ 1598 1599 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1600 struct pqi_scsi_dev *device) 1601 { 1602 int rc; 1603 size_t page_length; 1604 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1605 bool volume_offline = true; 1606 u32 volume_flags; 1607 struct ciss_vpd_logical_volume_status *vpd; 1608 1609 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1610 if (!vpd) 1611 goto no_buffer; 1612 1613 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1614 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1615 if (rc) 1616 goto out; 1617 1618 if (vpd->page_code != CISS_VPD_LV_STATUS) 1619 goto out; 1620 1621 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1622 volume_status) + vpd->page_length; 1623 if (page_length < sizeof(*vpd)) 1624 goto out; 1625 1626 volume_status = vpd->volume_status; 1627 volume_flags = get_unaligned_be32(&vpd->flags); 1628 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1629 1630 out: 1631 kfree(vpd); 1632 no_buffer: 1633 device->volume_status = volume_status; 1634 device->volume_offline = volume_offline; 1635 } 1636 1637 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 1638 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 1639 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 1640 1641 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, 1642 struct pqi_scsi_dev *device, 1643 struct bmic_identify_physical_device *id_phys) 1644 { 1645 int rc; 1646 1647 memset(id_phys, 0, sizeof(*id_phys)); 1648 1649 rc = pqi_identify_physical_device(ctrl_info, device, 1650 id_phys, sizeof(*id_phys)); 1651 if (rc) { 1652 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1653 return rc; 1654 } 1655 1656 scsi_sanitize_inquiry_string(&id_phys->model[0], 8); 1657 scsi_sanitize_inquiry_string(&id_phys->model[8], 16); 1658 1659 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); 1660 memcpy(device->model, &id_phys->model[8], sizeof(device->model)); 1661 1662 device->box_index = id_phys->box_index; 1663 device->phys_box_on_bus = id_phys->phys_box_on_bus; 1664 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; 1665 device->queue_depth = 1666 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1667 device->active_path_index = id_phys->active_path_number; 1668 device->path_map = id_phys->redundant_path_present_map; 1669 memcpy(&device->box, 1670 &id_phys->alternate_paths_phys_box_on_port, 1671 sizeof(device->box)); 1672 memcpy(&device->phys_connector, 1673 &id_phys->alternate_paths_phys_connector, 1674 sizeof(device->phys_connector)); 1675 device->bay = id_phys->phys_bay_in_box; 1676 device->lun_count = id_phys->multi_lun_device_lun_count; 1677 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && 1678 id_phys->phy_count) 1679 device->phy_id = 1680 id_phys->phy_to_phy_map[device->active_path_index]; 1681 else 1682 device->phy_id = 0xFF; 1683 1684 device->ncq_prio_support = 1685 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & 1686 PQI_DEVICE_NCQ_PRIO_SUPPORTED); 1687 1688 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); 1689 1690 return 0; 1691 } 1692 1693 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, 1694 struct pqi_scsi_dev *device) 1695 { 1696 int rc; 1697 u8 *buffer; 1698 1699 buffer = kmalloc(64, GFP_KERNEL); 1700 if (!buffer) 1701 return -ENOMEM; 1702 1703 /* Send an inquiry to the device to see what it is. */ 1704 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1705 if (rc) 1706 goto out; 1707 1708 scsi_sanitize_inquiry_string(&buffer[8], 8); 1709 scsi_sanitize_inquiry_string(&buffer[16], 16); 1710 1711 device->devtype = buffer[0] & 0x1f; 1712 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1713 memcpy(device->model, &buffer[16], sizeof(device->model)); 1714 1715 if (device->devtype == TYPE_DISK) { 1716 if (device->is_external_raid_device) { 1717 device->raid_level = SA_RAID_UNKNOWN; 1718 device->volume_status = CISS_LV_OK; 1719 device->volume_offline = false; 1720 } else { 1721 pqi_get_raid_level(ctrl_info, device); 1722 pqi_get_raid_bypass_status(ctrl_info, device); 1723 pqi_get_volume_status(ctrl_info, device); 1724 } 1725 } 1726 1727 out: 1728 kfree(buffer); 1729 1730 return rc; 1731 } 1732 1733 /* 1734 * Prevent adding drive to OS for some corner cases such as a drive 1735 * undergoing a sanitize (erase) operation. Some OSes will continue to poll 1736 * the drive until the sanitize completes, which can take hours, 1737 * resulting in long bootup delays. Commands such as TUR, READ_CAP 1738 * are allowed, but READ/WRITE cause check condition. So the OS 1739 * cannot check/read the partition table. 1740 * Note: devices that have completed sanitize must be re-enabled 1741 * using the management utility. 1742 */ 1743 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) 1744 { 1745 return device->erase_in_progress; 1746 } 1747 1748 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, 1749 struct pqi_scsi_dev *device, 1750 struct bmic_identify_physical_device *id_phys) 1751 { 1752 int rc; 1753 1754 if (device->is_expander_smp_device) 1755 return 0; 1756 1757 if (pqi_is_logical_device(device)) 1758 rc = pqi_get_logical_device_info(ctrl_info, device); 1759 else 1760 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); 1761 1762 return rc; 1763 } 1764 1765 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1766 struct pqi_scsi_dev *device, 1767 struct bmic_identify_physical_device *id_phys) 1768 { 1769 int rc; 1770 1771 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); 1772 1773 if (rc == 0 && device->lun_count == 0) 1774 device->lun_count = 1; 1775 1776 return rc; 1777 } 1778 1779 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1780 struct pqi_scsi_dev *device) 1781 { 1782 char *status; 1783 static const char unknown_state_str[] = 1784 "Volume is in an unknown state (%u)"; 1785 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1786 1787 switch (device->volume_status) { 1788 case CISS_LV_OK: 1789 status = "Volume online"; 1790 break; 1791 case CISS_LV_FAILED: 1792 status = "Volume failed"; 1793 break; 1794 case CISS_LV_NOT_CONFIGURED: 1795 status = "Volume not configured"; 1796 break; 1797 case CISS_LV_DEGRADED: 1798 status = "Volume degraded"; 1799 break; 1800 case CISS_LV_READY_FOR_RECOVERY: 1801 status = "Volume ready for recovery operation"; 1802 break; 1803 case CISS_LV_UNDERGOING_RECOVERY: 1804 status = "Volume undergoing recovery"; 1805 break; 1806 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1807 status = "Wrong physical drive was replaced"; 1808 break; 1809 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1810 status = "A physical drive not properly connected"; 1811 break; 1812 case CISS_LV_HARDWARE_OVERHEATING: 1813 status = "Hardware is overheating"; 1814 break; 1815 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1816 status = "Hardware has overheated"; 1817 break; 1818 case CISS_LV_UNDERGOING_EXPANSION: 1819 status = "Volume undergoing expansion"; 1820 break; 1821 case CISS_LV_NOT_AVAILABLE: 1822 status = "Volume waiting for transforming volume"; 1823 break; 1824 case CISS_LV_QUEUED_FOR_EXPANSION: 1825 status = "Volume queued for expansion"; 1826 break; 1827 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1828 status = "Volume disabled due to SCSI ID conflict"; 1829 break; 1830 case CISS_LV_EJECTED: 1831 status = "Volume has been ejected"; 1832 break; 1833 case CISS_LV_UNDERGOING_ERASE: 1834 status = "Volume undergoing background erase"; 1835 break; 1836 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1837 status = "Volume ready for predictive spare rebuild"; 1838 break; 1839 case CISS_LV_UNDERGOING_RPI: 1840 status = "Volume undergoing rapid parity initialization"; 1841 break; 1842 case CISS_LV_PENDING_RPI: 1843 status = "Volume queued for rapid parity initialization"; 1844 break; 1845 case CISS_LV_ENCRYPTED_NO_KEY: 1846 status = "Encrypted volume inaccessible - key not present"; 1847 break; 1848 case CISS_LV_UNDERGOING_ENCRYPTION: 1849 status = "Volume undergoing encryption process"; 1850 break; 1851 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1852 status = "Volume undergoing encryption re-keying process"; 1853 break; 1854 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1855 status = "Volume encrypted but encryption is disabled"; 1856 break; 1857 case CISS_LV_PENDING_ENCRYPTION: 1858 status = "Volume pending migration to encrypted state"; 1859 break; 1860 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1861 status = "Volume pending encryption rekeying"; 1862 break; 1863 case CISS_LV_NOT_SUPPORTED: 1864 status = "Volume not supported on this controller"; 1865 break; 1866 case CISS_LV_STATUS_UNAVAILABLE: 1867 status = "Volume status not available"; 1868 break; 1869 default: 1870 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1871 unknown_state_str, device->volume_status); 1872 status = unknown_state_buffer; 1873 break; 1874 } 1875 1876 dev_info(&ctrl_info->pci_dev->dev, 1877 "scsi %d:%d:%d:%d %s\n", 1878 ctrl_info->scsi_host->host_no, 1879 device->bus, device->target, device->lun, status); 1880 } 1881 1882 static void pqi_rescan_worker(struct work_struct *work) 1883 { 1884 struct pqi_ctrl_info *ctrl_info; 1885 1886 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1887 rescan_work); 1888 1889 pqi_scan_scsi_devices(ctrl_info); 1890 } 1891 1892 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1893 struct pqi_scsi_dev *device) 1894 { 1895 int rc; 1896 1897 if (pqi_is_logical_device(device)) 1898 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1899 device->target, device->lun); 1900 else 1901 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1902 1903 return rc; 1904 } 1905 1906 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) 1907 1908 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) 1909 { 1910 int rc; 1911 int lun; 1912 1913 for (lun = 0; lun < device->lun_count; lun++) { 1914 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, 1915 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); 1916 if (rc) 1917 dev_err(&ctrl_info->pci_dev->dev, 1918 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", 1919 ctrl_info->scsi_host->host_no, device->bus, 1920 device->target, lun, 1921 atomic_read(&device->scsi_cmds_outstanding[lun])); 1922 } 1923 1924 if (pqi_is_logical_device(device)) 1925 scsi_remove_device(device->sdev); 1926 else 1927 pqi_remove_sas_device(device); 1928 1929 pqi_device_remove_start(device); 1930 } 1931 1932 /* Assumes the SCSI device list lock is held. */ 1933 1934 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1935 int bus, int target, int lun) 1936 { 1937 struct pqi_scsi_dev *device; 1938 1939 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 1940 if (device->bus == bus && device->target == target && device->lun == lun) 1941 return device; 1942 1943 return NULL; 1944 } 1945 1946 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) 1947 { 1948 if (dev1->is_physical_device != dev2->is_physical_device) 1949 return false; 1950 1951 if (dev1->is_physical_device) 1952 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; 1953 1954 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; 1955 } 1956 1957 enum pqi_find_result { 1958 DEVICE_NOT_FOUND, 1959 DEVICE_CHANGED, 1960 DEVICE_SAME, 1961 }; 1962 1963 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1964 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) 1965 { 1966 struct pqi_scsi_dev *device; 1967 1968 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 1969 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { 1970 *matching_device = device; 1971 if (pqi_device_equal(device_to_find, device)) { 1972 if (device_to_find->volume_offline) 1973 return DEVICE_CHANGED; 1974 return DEVICE_SAME; 1975 } 1976 return DEVICE_CHANGED; 1977 } 1978 } 1979 1980 return DEVICE_NOT_FOUND; 1981 } 1982 1983 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1984 { 1985 if (device->is_expander_smp_device) 1986 return "Enclosure SMP "; 1987 1988 return scsi_device_type(device->devtype); 1989 } 1990 1991 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1992 1993 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1994 char *action, struct pqi_scsi_dev *device) 1995 { 1996 ssize_t count; 1997 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1998 1999 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 2000 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 2001 2002 if (device->target_lun_valid) 2003 count += scnprintf(buffer + count, 2004 PQI_DEV_INFO_BUFFER_LENGTH - count, 2005 "%d:%d", 2006 device->target, 2007 device->lun); 2008 else 2009 count += scnprintf(buffer + count, 2010 PQI_DEV_INFO_BUFFER_LENGTH - count, 2011 "-:-"); 2012 2013 if (pqi_is_logical_device(device)) 2014 count += scnprintf(buffer + count, 2015 PQI_DEV_INFO_BUFFER_LENGTH - count, 2016 " %08x%08x", 2017 *((u32 *)&device->scsi3addr), 2018 *((u32 *)&device->scsi3addr[4])); 2019 else 2020 count += scnprintf(buffer + count, 2021 PQI_DEV_INFO_BUFFER_LENGTH - count, 2022 " %016llx%016llx", 2023 get_unaligned_be64(&device->wwid[0]), 2024 get_unaligned_be64(&device->wwid[8])); 2025 2026 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 2027 " %s %.8s %.16s ", 2028 pqi_device_type(device), 2029 device->vendor, 2030 device->model); 2031 2032 if (pqi_is_logical_device(device)) { 2033 if (device->devtype == TYPE_DISK) 2034 count += scnprintf(buffer + count, 2035 PQI_DEV_INFO_BUFFER_LENGTH - count, 2036 "SSDSmartPathCap%c En%c %-12s", 2037 device->raid_bypass_configured ? '+' : '-', 2038 device->raid_bypass_enabled ? '+' : '-', 2039 pqi_raid_level_to_string(device->raid_level)); 2040 } else { 2041 count += scnprintf(buffer + count, 2042 PQI_DEV_INFO_BUFFER_LENGTH - count, 2043 "AIO%c", device->aio_enabled ? '+' : '-'); 2044 if (device->devtype == TYPE_DISK || 2045 device->devtype == TYPE_ZBC) 2046 count += scnprintf(buffer + count, 2047 PQI_DEV_INFO_BUFFER_LENGTH - count, 2048 " qd=%-6d", device->queue_depth); 2049 } 2050 2051 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 2052 } 2053 2054 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2) 2055 { 2056 u32 raid_map1_size; 2057 u32 raid_map2_size; 2058 2059 if (raid_map1 == NULL || raid_map2 == NULL) 2060 return raid_map1 == raid_map2; 2061 2062 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size); 2063 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size); 2064 2065 if (raid_map1_size != raid_map2_size) 2066 return false; 2067 2068 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0; 2069 } 2070 2071 /* Assumes the SCSI device list lock is held. */ 2072 2073 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, 2074 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device) 2075 { 2076 existing_device->device_type = new_device->device_type; 2077 existing_device->bus = new_device->bus; 2078 if (new_device->target_lun_valid) { 2079 existing_device->target = new_device->target; 2080 existing_device->lun = new_device->lun; 2081 existing_device->target_lun_valid = true; 2082 } 2083 2084 /* By definition, the scsi3addr and wwid fields are already the same. */ 2085 2086 existing_device->is_physical_device = new_device->is_physical_device; 2087 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); 2088 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); 2089 existing_device->sas_address = new_device->sas_address; 2090 existing_device->queue_depth = new_device->queue_depth; 2091 existing_device->device_offline = false; 2092 existing_device->lun_count = new_device->lun_count; 2093 2094 if (pqi_is_logical_device(existing_device)) { 2095 existing_device->is_external_raid_device = new_device->is_external_raid_device; 2096 2097 if (existing_device->devtype == TYPE_DISK) { 2098 existing_device->raid_level = new_device->raid_level; 2099 existing_device->volume_status = new_device->volume_status; 2100 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); 2101 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { 2102 kfree(existing_device->raid_map); 2103 existing_device->raid_map = new_device->raid_map; 2104 /* To prevent this from being freed later. */ 2105 new_device->raid_map = NULL; 2106 } 2107 if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { 2108 existing_device->raid_io_stats = new_device->raid_io_stats; 2109 new_device->raid_io_stats = NULL; 2110 } 2111 existing_device->raid_bypass_configured = new_device->raid_bypass_configured; 2112 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; 2113 } 2114 } else { 2115 existing_device->aio_enabled = new_device->aio_enabled; 2116 existing_device->aio_handle = new_device->aio_handle; 2117 existing_device->is_expander_smp_device = new_device->is_expander_smp_device; 2118 existing_device->active_path_index = new_device->active_path_index; 2119 existing_device->phy_id = new_device->phy_id; 2120 existing_device->path_map = new_device->path_map; 2121 existing_device->bay = new_device->bay; 2122 existing_device->box_index = new_device->box_index; 2123 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; 2124 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; 2125 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); 2126 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); 2127 } 2128 } 2129 2130 static inline void pqi_free_device(struct pqi_scsi_dev *device) 2131 { 2132 if (device) { 2133 free_percpu(device->raid_io_stats); 2134 kfree(device->raid_map); 2135 kfree(device); 2136 } 2137 } 2138 2139 /* 2140 * Called when exposing a new device to the OS fails in order to re-adjust 2141 * our internal SCSI device list to match the SCSI ML's view. 2142 */ 2143 2144 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 2145 struct pqi_scsi_dev *device) 2146 { 2147 unsigned long flags; 2148 2149 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2150 list_del(&device->scsi_device_list_entry); 2151 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2152 2153 /* Allow the device structure to be freed later. */ 2154 device->keep_device = false; 2155 } 2156 2157 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 2158 { 2159 if (device->is_expander_smp_device) 2160 return device->sas_port != NULL; 2161 2162 return device->sdev != NULL; 2163 } 2164 2165 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device) 2166 { 2167 unsigned int lun; 2168 struct pqi_tmf_work *tmf_work; 2169 2170 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) 2171 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); 2172 } 2173 2174 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device) 2175 { 2176 if (pqi_device_in_remove(device)) 2177 return false; 2178 2179 if (device->sdev == NULL) 2180 return false; 2181 2182 if (!scsi_device_online(device->sdev)) 2183 return false; 2184 2185 return device->rescan; 2186 } 2187 2188 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 2189 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 2190 { 2191 int rc; 2192 unsigned int i; 2193 unsigned long flags; 2194 enum pqi_find_result find_result; 2195 struct pqi_scsi_dev *device; 2196 struct pqi_scsi_dev *next; 2197 struct pqi_scsi_dev *matching_device; 2198 LIST_HEAD(add_list); 2199 LIST_HEAD(delete_list); 2200 2201 /* 2202 * The idea here is to do as little work as possible while holding the 2203 * spinlock. That's why we go to great pains to defer anything other 2204 * than updating the internal device list until after we release the 2205 * spinlock. 2206 */ 2207 2208 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2209 2210 /* Assume that all devices in the existing list have gone away. */ 2211 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 2212 device->device_gone = true; 2213 2214 for (i = 0; i < num_new_devices; i++) { 2215 device = new_device_list[i]; 2216 2217 find_result = pqi_scsi_find_entry(ctrl_info, device, 2218 &matching_device); 2219 2220 switch (find_result) { 2221 case DEVICE_SAME: 2222 /* 2223 * The newly found device is already in the existing 2224 * device list. 2225 */ 2226 device->new_device = false; 2227 matching_device->device_gone = false; 2228 pqi_scsi_update_device(ctrl_info, matching_device, device); 2229 break; 2230 case DEVICE_NOT_FOUND: 2231 /* 2232 * The newly found device is NOT in the existing device 2233 * list. 2234 */ 2235 device->new_device = true; 2236 break; 2237 case DEVICE_CHANGED: 2238 /* 2239 * The original device has gone away and we need to add 2240 * the new device. 2241 */ 2242 device->new_device = true; 2243 break; 2244 } 2245 } 2246 2247 /* Process all devices that have gone away. */ 2248 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 2249 scsi_device_list_entry) { 2250 if (device->device_gone) { 2251 list_del(&device->scsi_device_list_entry); 2252 list_add_tail(&device->delete_list_entry, &delete_list); 2253 } 2254 } 2255 2256 /* Process all new devices. */ 2257 for (i = 0; i < num_new_devices; i++) { 2258 device = new_device_list[i]; 2259 if (!device->new_device) 2260 continue; 2261 if (device->volume_offline) 2262 continue; 2263 list_add_tail(&device->scsi_device_list_entry, 2264 &ctrl_info->scsi_device_list); 2265 list_add_tail(&device->add_list_entry, &add_list); 2266 /* To prevent this device structure from being freed later. */ 2267 device->keep_device = true; 2268 pqi_init_device_tmf_work(device); 2269 } 2270 2271 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2272 2273 /* 2274 * If OFA is in progress and there are devices that need to be deleted, 2275 * allow any pending reset operations to continue and unblock any SCSI 2276 * requests before removal. 2277 */ 2278 if (pqi_ofa_in_progress(ctrl_info)) { 2279 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) 2280 if (pqi_is_device_added(device)) 2281 pqi_device_remove_start(device); 2282 pqi_ctrl_unblock_device_reset(ctrl_info); 2283 pqi_scsi_unblock_requests(ctrl_info); 2284 } 2285 2286 /* Remove all devices that have gone away. */ 2287 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { 2288 if (device->volume_offline) { 2289 pqi_dev_info(ctrl_info, "offline", device); 2290 pqi_show_volume_status(ctrl_info, device); 2291 } else { 2292 pqi_dev_info(ctrl_info, "removed", device); 2293 } 2294 if (pqi_is_device_added(device)) 2295 pqi_remove_device(ctrl_info, device); 2296 list_del(&device->delete_list_entry); 2297 pqi_free_device(device); 2298 } 2299 2300 /* 2301 * Notify the SML of any existing device changes such as; 2302 * queue depth, device size. 2303 */ 2304 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 2305 /* 2306 * Check for queue depth change. 2307 */ 2308 if (device->sdev && device->queue_depth != device->advertised_queue_depth) { 2309 device->advertised_queue_depth = device->queue_depth; 2310 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); 2311 } 2312 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2313 /* 2314 * Check for changes in the device, such as size. 2315 */ 2316 if (pqi_volume_rescan_needed(device)) { 2317 device->rescan = false; 2318 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2319 scsi_rescan_device(device->sdev); 2320 } else { 2321 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2322 } 2323 } 2324 2325 /* Expose any new devices. */ 2326 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 2327 if (!pqi_is_device_added(device)) { 2328 rc = pqi_add_device(ctrl_info, device); 2329 if (rc == 0) { 2330 pqi_dev_info(ctrl_info, "added", device); 2331 } else { 2332 dev_warn(&ctrl_info->pci_dev->dev, 2333 "scsi %d:%d:%d:%d addition failed, device not added\n", 2334 ctrl_info->scsi_host->host_no, 2335 device->bus, device->target, 2336 device->lun); 2337 pqi_fixup_botched_add(ctrl_info, device); 2338 } 2339 } 2340 } 2341 2342 } 2343 2344 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) 2345 { 2346 /* 2347 * Only support the HBA controller itself as a RAID 2348 * controller. If it's a RAID controller other than 2349 * the HBA itself (an external RAID controller, for 2350 * example), we don't support it. 2351 */ 2352 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && 2353 !pqi_is_hba_lunid(device->scsi3addr)) 2354 return false; 2355 2356 return true; 2357 } 2358 2359 static inline bool pqi_skip_device(u8 *scsi3addr) 2360 { 2361 /* Ignore all masked devices. */ 2362 if (MASKED_DEVICE(scsi3addr)) 2363 return true; 2364 2365 return false; 2366 } 2367 2368 static inline void pqi_mask_device(u8 *scsi3addr) 2369 { 2370 scsi3addr[3] |= 0xc0; 2371 } 2372 2373 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 2374 { 2375 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); 2376 } 2377 2378 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2379 { 2380 int i; 2381 int rc; 2382 LIST_HEAD(new_device_list_head); 2383 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL; 2384 struct report_log_lun_list *logdev_list = NULL; 2385 struct report_phys_lun_16byte_wwid *phys_lun; 2386 struct report_log_lun *log_lun; 2387 struct bmic_identify_physical_device *id_phys = NULL; 2388 u32 num_physicals; 2389 u32 num_logicals; 2390 struct pqi_scsi_dev **new_device_list = NULL; 2391 struct pqi_scsi_dev *device; 2392 struct pqi_scsi_dev *next; 2393 unsigned int num_new_devices; 2394 unsigned int num_valid_devices; 2395 bool is_physical_device; 2396 u8 *scsi3addr; 2397 unsigned int physical_index; 2398 unsigned int logical_index; 2399 static char *out_of_memory_msg = 2400 "failed to allocate memory, device discovery stopped"; 2401 2402 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 2403 if (rc) 2404 goto out; 2405 2406 if (physdev_list) 2407 num_physicals = 2408 get_unaligned_be32(&physdev_list->header.list_length) 2409 / sizeof(physdev_list->lun_entries[0]); 2410 else 2411 num_physicals = 0; 2412 2413 if (logdev_list) 2414 num_logicals = 2415 get_unaligned_be32(&logdev_list->header.list_length) 2416 / sizeof(logdev_list->lun_entries[0]); 2417 else 2418 num_logicals = 0; 2419 2420 if (num_physicals) { 2421 /* 2422 * We need this buffer for calls to pqi_get_physical_disk_info() 2423 * below. We allocate it here instead of inside 2424 * pqi_get_physical_disk_info() because it's a fairly large 2425 * buffer. 2426 */ 2427 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2428 if (!id_phys) { 2429 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2430 out_of_memory_msg); 2431 rc = -ENOMEM; 2432 goto out; 2433 } 2434 2435 if (pqi_hide_vsep) { 2436 for (i = num_physicals - 1; i >= 0; i--) { 2437 phys_lun = &physdev_list->lun_entries[i]; 2438 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { 2439 pqi_mask_device(phys_lun->lunid); 2440 break; 2441 } 2442 } 2443 } 2444 } 2445 2446 if (num_logicals && 2447 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) 2448 ctrl_info->lv_drive_type_mix_valid = true; 2449 2450 num_new_devices = num_physicals + num_logicals; 2451 2452 new_device_list = kmalloc_array(num_new_devices, 2453 sizeof(*new_device_list), 2454 GFP_KERNEL); 2455 if (!new_device_list) { 2456 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2457 rc = -ENOMEM; 2458 goto out; 2459 } 2460 2461 for (i = 0; i < num_new_devices; i++) { 2462 device = kzalloc(sizeof(*device), GFP_KERNEL); 2463 if (!device) { 2464 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2465 out_of_memory_msg); 2466 rc = -ENOMEM; 2467 goto out; 2468 } 2469 list_add_tail(&device->new_device_list_entry, 2470 &new_device_list_head); 2471 } 2472 2473 device = NULL; 2474 num_valid_devices = 0; 2475 physical_index = 0; 2476 logical_index = 0; 2477 2478 for (i = 0; i < num_new_devices; i++) { 2479 2480 if ((!pqi_expose_ld_first && i < num_physicals) || 2481 (pqi_expose_ld_first && i >= num_logicals)) { 2482 is_physical_device = true; 2483 phys_lun = &physdev_list->lun_entries[physical_index++]; 2484 log_lun = NULL; 2485 scsi3addr = phys_lun->lunid; 2486 } else { 2487 is_physical_device = false; 2488 phys_lun = NULL; 2489 log_lun = &logdev_list->lun_entries[logical_index++]; 2490 scsi3addr = log_lun->lunid; 2491 } 2492 2493 if (is_physical_device && pqi_skip_device(scsi3addr)) 2494 continue; 2495 2496 if (device) 2497 device = list_next_entry(device, new_device_list_entry); 2498 else 2499 device = list_first_entry(&new_device_list_head, 2500 struct pqi_scsi_dev, new_device_list_entry); 2501 2502 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2503 device->is_physical_device = is_physical_device; 2504 if (is_physical_device) { 2505 device->device_type = phys_lun->device_type; 2506 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) 2507 device->is_expander_smp_device = true; 2508 } else { 2509 device->is_external_raid_device = 2510 pqi_is_external_raid_addr(scsi3addr); 2511 } 2512 2513 if (!pqi_is_supported_device(device)) 2514 continue; 2515 2516 /* Gather information about the device. */ 2517 rc = pqi_get_device_info(ctrl_info, device, id_phys); 2518 if (rc == -ENOMEM) { 2519 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2520 out_of_memory_msg); 2521 goto out; 2522 } 2523 if (rc) { 2524 if (device->is_physical_device) 2525 dev_warn(&ctrl_info->pci_dev->dev, 2526 "obtaining device info failed, skipping physical device %016llx%016llx\n", 2527 get_unaligned_be64(&phys_lun->wwid[0]), 2528 get_unaligned_be64(&phys_lun->wwid[8])); 2529 else 2530 dev_warn(&ctrl_info->pci_dev->dev, 2531 "obtaining device info failed, skipping logical device %08x%08x\n", 2532 *((u32 *)&device->scsi3addr), 2533 *((u32 *)&device->scsi3addr[4])); 2534 rc = 0; 2535 continue; 2536 } 2537 2538 /* Do not present disks that the OS cannot fully probe. */ 2539 if (pqi_keep_device_offline(device)) 2540 continue; 2541 2542 pqi_assign_bus_target_lun(device); 2543 2544 if (device->is_physical_device) { 2545 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); 2546 if ((phys_lun->device_flags & 2547 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2548 phys_lun->aio_handle) { 2549 device->aio_enabled = true; 2550 device->aio_handle = 2551 phys_lun->aio_handle; 2552 } 2553 } else { 2554 memcpy(device->volume_id, log_lun->volume_id, 2555 sizeof(device->volume_id)); 2556 } 2557 2558 device->sas_address = get_unaligned_be64(&device->wwid[0]); 2559 2560 new_device_list[num_valid_devices++] = device; 2561 } 2562 2563 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2564 2565 out: 2566 list_for_each_entry_safe(device, next, &new_device_list_head, 2567 new_device_list_entry) { 2568 if (device->keep_device) 2569 continue; 2570 list_del(&device->new_device_list_entry); 2571 pqi_free_device(device); 2572 } 2573 2574 kfree(new_device_list); 2575 kfree(physdev_list); 2576 kfree(logdev_list); 2577 kfree(id_phys); 2578 2579 return rc; 2580 } 2581 2582 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2583 { 2584 int rc; 2585 int mutex_acquired; 2586 2587 if (pqi_ctrl_offline(ctrl_info)) 2588 return -ENXIO; 2589 2590 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 2591 2592 if (!mutex_acquired) { 2593 if (pqi_ctrl_scan_blocked(ctrl_info)) 2594 return -EBUSY; 2595 pqi_schedule_rescan_worker_delayed(ctrl_info); 2596 return -EINPROGRESS; 2597 } 2598 2599 rc = pqi_update_scsi_devices(ctrl_info); 2600 if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) 2601 pqi_schedule_rescan_worker_delayed(ctrl_info); 2602 2603 mutex_unlock(&ctrl_info->scan_mutex); 2604 2605 return rc; 2606 } 2607 2608 static void pqi_scan_start(struct Scsi_Host *shost) 2609 { 2610 struct pqi_ctrl_info *ctrl_info; 2611 2612 ctrl_info = shost_to_hba(shost); 2613 2614 pqi_scan_scsi_devices(ctrl_info); 2615 } 2616 2617 /* Returns TRUE if scan is finished. */ 2618 2619 static int pqi_scan_finished(struct Scsi_Host *shost, 2620 unsigned long elapsed_time) 2621 { 2622 struct pqi_ctrl_info *ctrl_info; 2623 2624 ctrl_info = shost_priv(shost); 2625 2626 return !mutex_is_locked(&ctrl_info->scan_mutex); 2627 } 2628 2629 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, 2630 struct raid_map *raid_map, u64 first_block) 2631 { 2632 u32 volume_blk_size; 2633 2634 /* 2635 * Set the encryption tweak values based on logical block address. 2636 * If the block size is 512, the tweak value is equal to the LBA. 2637 * For other block sizes, tweak value is (LBA * block size) / 512. 2638 */ 2639 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2640 if (volume_blk_size != 512) 2641 first_block = (first_block * volume_blk_size) / 512; 2642 2643 encryption_info->data_encryption_key_index = 2644 get_unaligned_le16(&raid_map->data_encryption_key_index); 2645 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2646 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2647 } 2648 2649 /* 2650 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2651 */ 2652 2653 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, 2654 struct pqi_scsi_dev_raid_map_data *rmd) 2655 { 2656 bool is_supported = true; 2657 2658 switch (rmd->raid_level) { 2659 case SA_RAID_0: 2660 break; 2661 case SA_RAID_1: 2662 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2663 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) 2664 is_supported = false; 2665 break; 2666 case SA_RAID_TRIPLE: 2667 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2668 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) 2669 is_supported = false; 2670 break; 2671 case SA_RAID_5: 2672 if (rmd->is_write && (!ctrl_info->enable_r5_writes || 2673 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2674 is_supported = false; 2675 break; 2676 case SA_RAID_6: 2677 if (rmd->is_write && (!ctrl_info->enable_r6_writes || 2678 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2679 is_supported = false; 2680 break; 2681 default: 2682 is_supported = false; 2683 break; 2684 } 2685 2686 return is_supported; 2687 } 2688 2689 #define PQI_RAID_BYPASS_INELIGIBLE 1 2690 2691 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, 2692 struct pqi_scsi_dev_raid_map_data *rmd) 2693 { 2694 /* Check for valid opcode, get LBA and block count. */ 2695 switch (scmd->cmnd[0]) { 2696 case WRITE_6: 2697 rmd->is_write = true; 2698 fallthrough; 2699 case READ_6: 2700 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2701 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2702 rmd->block_cnt = (u32)scmd->cmnd[4]; 2703 if (rmd->block_cnt == 0) 2704 rmd->block_cnt = 256; 2705 break; 2706 case WRITE_10: 2707 rmd->is_write = true; 2708 fallthrough; 2709 case READ_10: 2710 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2711 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2712 break; 2713 case WRITE_12: 2714 rmd->is_write = true; 2715 fallthrough; 2716 case READ_12: 2717 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2718 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2719 break; 2720 case WRITE_16: 2721 rmd->is_write = true; 2722 fallthrough; 2723 case READ_16: 2724 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); 2725 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2726 break; 2727 default: 2728 /* Process via normal I/O path. */ 2729 return PQI_RAID_BYPASS_INELIGIBLE; 2730 } 2731 2732 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); 2733 2734 return 0; 2735 } 2736 2737 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, 2738 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) 2739 { 2740 #if BITS_PER_LONG == 32 2741 u64 tmpdiv; 2742 #endif 2743 2744 rmd->last_block = rmd->first_block + rmd->block_cnt - 1; 2745 2746 /* Check for invalid block or wraparound. */ 2747 if (rmd->last_block >= 2748 get_unaligned_le64(&raid_map->volume_blk_cnt) || 2749 rmd->last_block < rmd->first_block) 2750 return PQI_RAID_BYPASS_INELIGIBLE; 2751 2752 rmd->data_disks_per_row = 2753 get_unaligned_le16(&raid_map->data_disks_per_row); 2754 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); 2755 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2756 2757 /* Calculate stripe information for the request. */ 2758 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; 2759 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2760 return PQI_RAID_BYPASS_INELIGIBLE; 2761 #if BITS_PER_LONG == 32 2762 tmpdiv = rmd->first_block; 2763 do_div(tmpdiv, rmd->blocks_per_row); 2764 rmd->first_row = tmpdiv; 2765 tmpdiv = rmd->last_block; 2766 do_div(tmpdiv, rmd->blocks_per_row); 2767 rmd->last_row = tmpdiv; 2768 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); 2769 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); 2770 tmpdiv = rmd->first_row_offset; 2771 do_div(tmpdiv, rmd->strip_size); 2772 rmd->first_column = tmpdiv; 2773 tmpdiv = rmd->last_row_offset; 2774 do_div(tmpdiv, rmd->strip_size); 2775 rmd->last_column = tmpdiv; 2776 #else 2777 rmd->first_row = rmd->first_block / rmd->blocks_per_row; 2778 rmd->last_row = rmd->last_block / rmd->blocks_per_row; 2779 rmd->first_row_offset = (u32)(rmd->first_block - 2780 (rmd->first_row * rmd->blocks_per_row)); 2781 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * 2782 rmd->blocks_per_row)); 2783 rmd->first_column = rmd->first_row_offset / rmd->strip_size; 2784 rmd->last_column = rmd->last_row_offset / rmd->strip_size; 2785 #endif 2786 2787 /* If this isn't a single row/column then give to the controller. */ 2788 if (rmd->first_row != rmd->last_row || 2789 rmd->first_column != rmd->last_column) 2790 return PQI_RAID_BYPASS_INELIGIBLE; 2791 2792 /* Proceeding with driver mapping. */ 2793 rmd->total_disks_per_row = rmd->data_disks_per_row + 2794 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2795 rmd->map_row = ((u32)(rmd->first_row >> 2796 raid_map->parity_rotation_shift)) % 2797 get_unaligned_le16(&raid_map->row_cnt); 2798 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + 2799 rmd->first_column; 2800 2801 return 0; 2802 } 2803 2804 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, 2805 struct raid_map *raid_map) 2806 { 2807 #if BITS_PER_LONG == 32 2808 u64 tmpdiv; 2809 #endif 2810 2811 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2812 return PQI_RAID_BYPASS_INELIGIBLE; 2813 2814 /* RAID 50/60 */ 2815 /* Verify first and last block are in same RAID group. */ 2816 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; 2817 #if BITS_PER_LONG == 32 2818 tmpdiv = rmd->first_block; 2819 rmd->first_group = do_div(tmpdiv, rmd->stripesize); 2820 tmpdiv = rmd->first_group; 2821 do_div(tmpdiv, rmd->blocks_per_row); 2822 rmd->first_group = tmpdiv; 2823 tmpdiv = rmd->last_block; 2824 rmd->last_group = do_div(tmpdiv, rmd->stripesize); 2825 tmpdiv = rmd->last_group; 2826 do_div(tmpdiv, rmd->blocks_per_row); 2827 rmd->last_group = tmpdiv; 2828 #else 2829 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; 2830 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; 2831 #endif 2832 if (rmd->first_group != rmd->last_group) 2833 return PQI_RAID_BYPASS_INELIGIBLE; 2834 2835 /* Verify request is in a single row of RAID 5/6. */ 2836 #if BITS_PER_LONG == 32 2837 tmpdiv = rmd->first_block; 2838 do_div(tmpdiv, rmd->stripesize); 2839 rmd->first_row = tmpdiv; 2840 rmd->r5or6_first_row = tmpdiv; 2841 tmpdiv = rmd->last_block; 2842 do_div(tmpdiv, rmd->stripesize); 2843 rmd->r5or6_last_row = tmpdiv; 2844 #else 2845 rmd->first_row = rmd->r5or6_first_row = 2846 rmd->first_block / rmd->stripesize; 2847 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; 2848 #endif 2849 if (rmd->r5or6_first_row != rmd->r5or6_last_row) 2850 return PQI_RAID_BYPASS_INELIGIBLE; 2851 2852 /* Verify request is in a single column. */ 2853 #if BITS_PER_LONG == 32 2854 tmpdiv = rmd->first_block; 2855 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); 2856 tmpdiv = rmd->first_row_offset; 2857 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); 2858 rmd->r5or6_first_row_offset = rmd->first_row_offset; 2859 tmpdiv = rmd->last_block; 2860 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); 2861 tmpdiv = rmd->r5or6_last_row_offset; 2862 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); 2863 tmpdiv = rmd->r5or6_first_row_offset; 2864 do_div(tmpdiv, rmd->strip_size); 2865 rmd->first_column = rmd->r5or6_first_column = tmpdiv; 2866 tmpdiv = rmd->r5or6_last_row_offset; 2867 do_div(tmpdiv, rmd->strip_size); 2868 rmd->r5or6_last_column = tmpdiv; 2869 #else 2870 rmd->first_row_offset = rmd->r5or6_first_row_offset = 2871 (u32)((rmd->first_block % rmd->stripesize) % 2872 rmd->blocks_per_row); 2873 2874 rmd->r5or6_last_row_offset = 2875 (u32)((rmd->last_block % rmd->stripesize) % 2876 rmd->blocks_per_row); 2877 2878 rmd->first_column = 2879 rmd->r5or6_first_row_offset / rmd->strip_size; 2880 rmd->r5or6_first_column = rmd->first_column; 2881 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; 2882 #endif 2883 if (rmd->r5or6_first_column != rmd->r5or6_last_column) 2884 return PQI_RAID_BYPASS_INELIGIBLE; 2885 2886 /* Request is eligible. */ 2887 rmd->map_row = 2888 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % 2889 get_unaligned_le16(&raid_map->row_cnt); 2890 2891 rmd->map_index = (rmd->first_group * 2892 (get_unaligned_le16(&raid_map->row_cnt) * 2893 rmd->total_disks_per_row)) + 2894 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; 2895 2896 if (rmd->is_write) { 2897 u32 index; 2898 2899 /* 2900 * p_parity_it_nexus and q_parity_it_nexus are pointers to the 2901 * parity entries inside the device's raid_map. 2902 * 2903 * A device's RAID map is bounded by: number of RAID disks squared. 2904 * 2905 * The devices RAID map size is checked during device 2906 * initialization. 2907 */ 2908 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); 2909 index *= rmd->total_disks_per_row; 2910 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); 2911 2912 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; 2913 if (rmd->raid_level == SA_RAID_6) { 2914 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; 2915 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; 2916 } 2917 #if BITS_PER_LONG == 32 2918 tmpdiv = rmd->first_block; 2919 do_div(tmpdiv, rmd->blocks_per_row); 2920 rmd->row = tmpdiv; 2921 #else 2922 rmd->row = rmd->first_block / rmd->blocks_per_row; 2923 #endif 2924 } 2925 2926 return 0; 2927 } 2928 2929 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) 2930 { 2931 /* Build the new CDB for the physical disk I/O. */ 2932 if (rmd->disk_block > 0xffffffff) { 2933 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; 2934 rmd->cdb[1] = 0; 2935 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); 2936 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); 2937 rmd->cdb[14] = 0; 2938 rmd->cdb[15] = 0; 2939 rmd->cdb_length = 16; 2940 } else { 2941 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; 2942 rmd->cdb[1] = 0; 2943 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); 2944 rmd->cdb[6] = 0; 2945 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); 2946 rmd->cdb[9] = 0; 2947 rmd->cdb_length = 10; 2948 } 2949 } 2950 2951 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, 2952 struct pqi_scsi_dev_raid_map_data *rmd) 2953 { 2954 u32 index; 2955 u32 group; 2956 2957 group = rmd->map_index / rmd->data_disks_per_row; 2958 2959 index = rmd->map_index - (group * rmd->data_disks_per_row); 2960 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; 2961 index += rmd->data_disks_per_row; 2962 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; 2963 if (rmd->layout_map_count > 2) { 2964 index += rmd->data_disks_per_row; 2965 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; 2966 } 2967 2968 rmd->num_it_nexus_entries = rmd->layout_map_count; 2969 } 2970 2971 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2972 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2973 struct pqi_queue_group *queue_group) 2974 { 2975 int rc; 2976 struct raid_map *raid_map; 2977 u32 group; 2978 u32 next_bypass_group; 2979 struct pqi_encryption_info *encryption_info_ptr; 2980 struct pqi_encryption_info encryption_info; 2981 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 2982 2983 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 2984 if (rc) 2985 return PQI_RAID_BYPASS_INELIGIBLE; 2986 2987 rmd.raid_level = device->raid_level; 2988 2989 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) 2990 return PQI_RAID_BYPASS_INELIGIBLE; 2991 2992 if (unlikely(rmd.block_cnt == 0)) 2993 return PQI_RAID_BYPASS_INELIGIBLE; 2994 2995 raid_map = device->raid_map; 2996 2997 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); 2998 if (rc) 2999 return PQI_RAID_BYPASS_INELIGIBLE; 3000 3001 if (device->raid_level == SA_RAID_1 || 3002 device->raid_level == SA_RAID_TRIPLE) { 3003 if (rmd.is_write) { 3004 pqi_calc_aio_r1_nexus(raid_map, &rmd); 3005 } else { 3006 group = device->next_bypass_group[rmd.map_index]; 3007 next_bypass_group = group + 1; 3008 if (next_bypass_group >= rmd.layout_map_count) 3009 next_bypass_group = 0; 3010 device->next_bypass_group[rmd.map_index] = next_bypass_group; 3011 rmd.map_index += group * rmd.data_disks_per_row; 3012 } 3013 } else if ((device->raid_level == SA_RAID_5 || 3014 device->raid_level == SA_RAID_6) && 3015 (rmd.layout_map_count > 1 || rmd.is_write)) { 3016 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); 3017 if (rc) 3018 return PQI_RAID_BYPASS_INELIGIBLE; 3019 } 3020 3021 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) 3022 return PQI_RAID_BYPASS_INELIGIBLE; 3023 3024 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; 3025 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 3026 rmd.first_row * rmd.strip_size + 3027 (rmd.first_row_offset - rmd.first_column * rmd.strip_size); 3028 rmd.disk_block_cnt = rmd.block_cnt; 3029 3030 /* Handle differing logical/physical block sizes. */ 3031 if (raid_map->phys_blk_shift) { 3032 rmd.disk_block <<= raid_map->phys_blk_shift; 3033 rmd.disk_block_cnt <<= raid_map->phys_blk_shift; 3034 } 3035 3036 if (unlikely(rmd.disk_block_cnt > 0xffff)) 3037 return PQI_RAID_BYPASS_INELIGIBLE; 3038 3039 pqi_set_aio_cdb(&rmd); 3040 3041 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { 3042 if (rmd.data_length > device->max_transfer_encrypted) 3043 return PQI_RAID_BYPASS_INELIGIBLE; 3044 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); 3045 encryption_info_ptr = &encryption_info; 3046 } else { 3047 encryption_info_ptr = NULL; 3048 } 3049 3050 if (rmd.is_write) { 3051 switch (device->raid_level) { 3052 case SA_RAID_1: 3053 case SA_RAID_TRIPLE: 3054 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, 3055 encryption_info_ptr, device, &rmd); 3056 case SA_RAID_5: 3057 case SA_RAID_6: 3058 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, 3059 encryption_info_ptr, device, &rmd); 3060 } 3061 } 3062 3063 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, 3064 rmd.cdb, rmd.cdb_length, queue_group, 3065 encryption_info_ptr, true, false); 3066 } 3067 3068 #define PQI_STATUS_IDLE 0x0 3069 3070 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 3071 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 3072 3073 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 3074 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 3075 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 3076 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 3077 #define PQI_DEVICE_STATE_ERROR 0x4 3078 3079 #define PQI_MODE_READY_TIMEOUT_SECS 30 3080 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 3081 3082 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 3083 { 3084 struct pqi_device_registers __iomem *pqi_registers; 3085 unsigned long timeout; 3086 u64 signature; 3087 u8 status; 3088 3089 pqi_registers = ctrl_info->pqi_registers; 3090 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 3091 3092 while (1) { 3093 signature = readq(&pqi_registers->signature); 3094 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 3095 sizeof(signature)) == 0) 3096 break; 3097 if (time_after(jiffies, timeout)) { 3098 dev_err(&ctrl_info->pci_dev->dev, 3099 "timed out waiting for PQI signature\n"); 3100 return -ETIMEDOUT; 3101 } 3102 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3103 } 3104 3105 while (1) { 3106 status = readb(&pqi_registers->function_and_status_code); 3107 if (status == PQI_STATUS_IDLE) 3108 break; 3109 if (time_after(jiffies, timeout)) { 3110 dev_err(&ctrl_info->pci_dev->dev, 3111 "timed out waiting for PQI IDLE\n"); 3112 return -ETIMEDOUT; 3113 } 3114 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3115 } 3116 3117 while (1) { 3118 if (readl(&pqi_registers->device_status) == 3119 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 3120 break; 3121 if (time_after(jiffies, timeout)) { 3122 dev_err(&ctrl_info->pci_dev->dev, 3123 "timed out waiting for PQI all registers ready\n"); 3124 return -ETIMEDOUT; 3125 } 3126 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3127 } 3128 3129 return 0; 3130 } 3131 3132 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 3133 { 3134 struct pqi_scsi_dev *device; 3135 3136 device = io_request->scmd->device->hostdata; 3137 device->raid_bypass_enabled = false; 3138 device->aio_enabled = false; 3139 } 3140 3141 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 3142 { 3143 struct pqi_ctrl_info *ctrl_info; 3144 struct pqi_scsi_dev *device; 3145 3146 device = sdev->hostdata; 3147 if (device->device_offline) 3148 return; 3149 3150 device->device_offline = true; 3151 ctrl_info = shost_to_hba(sdev->host); 3152 pqi_schedule_rescan_worker(ctrl_info); 3153 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 3154 path, ctrl_info->scsi_host->host_no, device->bus, 3155 device->target, device->lun); 3156 } 3157 3158 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 3159 { 3160 u8 scsi_status; 3161 u8 host_byte; 3162 struct scsi_cmnd *scmd; 3163 struct pqi_raid_error_info *error_info; 3164 size_t sense_data_length; 3165 int residual_count; 3166 int xfer_count; 3167 struct scsi_sense_hdr sshdr; 3168 3169 scmd = io_request->scmd; 3170 if (!scmd) 3171 return; 3172 3173 error_info = io_request->error_info; 3174 scsi_status = error_info->status; 3175 host_byte = DID_OK; 3176 3177 switch (error_info->data_out_result) { 3178 case PQI_DATA_IN_OUT_GOOD: 3179 break; 3180 case PQI_DATA_IN_OUT_UNDERFLOW: 3181 xfer_count = 3182 get_unaligned_le32(&error_info->data_out_transferred); 3183 residual_count = scsi_bufflen(scmd) - xfer_count; 3184 scsi_set_resid(scmd, residual_count); 3185 if (xfer_count < scmd->underflow) 3186 host_byte = DID_SOFT_ERROR; 3187 break; 3188 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 3189 case PQI_DATA_IN_OUT_ABORTED: 3190 host_byte = DID_ABORT; 3191 break; 3192 case PQI_DATA_IN_OUT_TIMEOUT: 3193 host_byte = DID_TIME_OUT; 3194 break; 3195 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 3196 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 3197 case PQI_DATA_IN_OUT_BUFFER_ERROR: 3198 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 3199 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 3200 case PQI_DATA_IN_OUT_ERROR: 3201 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 3202 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 3203 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 3204 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 3205 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 3206 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 3207 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 3208 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 3209 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 3210 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 3211 default: 3212 host_byte = DID_ERROR; 3213 break; 3214 } 3215 3216 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 3217 if (sense_data_length == 0) 3218 sense_data_length = 3219 get_unaligned_le16(&error_info->response_data_length); 3220 if (sense_data_length) { 3221 if (sense_data_length > sizeof(error_info->data)) 3222 sense_data_length = sizeof(error_info->data); 3223 3224 if (scsi_status == SAM_STAT_CHECK_CONDITION && 3225 scsi_normalize_sense(error_info->data, 3226 sense_data_length, &sshdr) && 3227 sshdr.sense_key == HARDWARE_ERROR && 3228 sshdr.asc == 0x3e) { 3229 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); 3230 struct pqi_scsi_dev *device = scmd->device->hostdata; 3231 3232 switch (sshdr.ascq) { 3233 case 0x1: /* LOGICAL UNIT FAILURE */ 3234 if (printk_ratelimit()) 3235 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", 3236 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3237 pqi_take_device_offline(scmd->device, "RAID"); 3238 host_byte = DID_NO_CONNECT; 3239 break; 3240 3241 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ 3242 if (printk_ratelimit()) 3243 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", 3244 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3245 break; 3246 } 3247 } 3248 3249 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3250 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3251 memcpy(scmd->sense_buffer, error_info->data, 3252 sense_data_length); 3253 } 3254 3255 if (pqi_cmd_priv(scmd)->this_residual && 3256 !pqi_is_logical_device(scmd->device->hostdata) && 3257 scsi_status == SAM_STAT_CHECK_CONDITION && 3258 host_byte == DID_OK && 3259 sense_data_length && 3260 scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && 3261 sshdr.sense_key == ILLEGAL_REQUEST && 3262 sshdr.asc == 0x26 && 3263 sshdr.ascq == 0x0) { 3264 host_byte = DID_NO_CONNECT; 3265 pqi_take_device_offline(scmd->device, "AIO"); 3266 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1); 3267 } 3268 3269 scmd->result = scsi_status; 3270 set_host_byte(scmd, host_byte); 3271 } 3272 3273 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 3274 { 3275 u8 scsi_status; 3276 u8 host_byte; 3277 struct scsi_cmnd *scmd; 3278 struct pqi_aio_error_info *error_info; 3279 size_t sense_data_length; 3280 int residual_count; 3281 int xfer_count; 3282 bool device_offline; 3283 3284 scmd = io_request->scmd; 3285 error_info = io_request->error_info; 3286 host_byte = DID_OK; 3287 sense_data_length = 0; 3288 device_offline = false; 3289 3290 switch (error_info->service_response) { 3291 case PQI_AIO_SERV_RESPONSE_COMPLETE: 3292 scsi_status = error_info->status; 3293 break; 3294 case PQI_AIO_SERV_RESPONSE_FAILURE: 3295 switch (error_info->status) { 3296 case PQI_AIO_STATUS_IO_ABORTED: 3297 scsi_status = SAM_STAT_TASK_ABORTED; 3298 break; 3299 case PQI_AIO_STATUS_UNDERRUN: 3300 scsi_status = SAM_STAT_GOOD; 3301 residual_count = get_unaligned_le32( 3302 &error_info->residual_count); 3303 scsi_set_resid(scmd, residual_count); 3304 xfer_count = scsi_bufflen(scmd) - residual_count; 3305 if (xfer_count < scmd->underflow) 3306 host_byte = DID_SOFT_ERROR; 3307 break; 3308 case PQI_AIO_STATUS_OVERRUN: 3309 scsi_status = SAM_STAT_GOOD; 3310 break; 3311 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 3312 pqi_aio_path_disabled(io_request); 3313 scsi_status = SAM_STAT_GOOD; 3314 io_request->status = -EAGAIN; 3315 break; 3316 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 3317 case PQI_AIO_STATUS_INVALID_DEVICE: 3318 if (!io_request->raid_bypass) { 3319 device_offline = true; 3320 pqi_take_device_offline(scmd->device, "AIO"); 3321 host_byte = DID_NO_CONNECT; 3322 } 3323 scsi_status = SAM_STAT_CHECK_CONDITION; 3324 break; 3325 case PQI_AIO_STATUS_IO_ERROR: 3326 default: 3327 scsi_status = SAM_STAT_CHECK_CONDITION; 3328 break; 3329 } 3330 break; 3331 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 3332 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 3333 scsi_status = SAM_STAT_GOOD; 3334 break; 3335 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 3336 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 3337 default: 3338 scsi_status = SAM_STAT_CHECK_CONDITION; 3339 break; 3340 } 3341 3342 if (error_info->data_present) { 3343 sense_data_length = 3344 get_unaligned_le16(&error_info->data_length); 3345 if (sense_data_length) { 3346 if (sense_data_length > sizeof(error_info->data)) 3347 sense_data_length = sizeof(error_info->data); 3348 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3349 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3350 memcpy(scmd->sense_buffer, error_info->data, 3351 sense_data_length); 3352 } 3353 } 3354 3355 if (device_offline && sense_data_length == 0) 3356 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1); 3357 3358 scmd->result = scsi_status; 3359 set_host_byte(scmd, host_byte); 3360 } 3361 3362 static void pqi_process_io_error(unsigned int iu_type, 3363 struct pqi_io_request *io_request) 3364 { 3365 switch (iu_type) { 3366 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3367 pqi_process_raid_io_error(io_request); 3368 break; 3369 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3370 pqi_process_aio_io_error(io_request); 3371 break; 3372 } 3373 } 3374 3375 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, 3376 struct pqi_task_management_response *response) 3377 { 3378 int rc; 3379 3380 switch (response->response_code) { 3381 case SOP_TMF_COMPLETE: 3382 case SOP_TMF_FUNCTION_SUCCEEDED: 3383 rc = 0; 3384 break; 3385 case SOP_TMF_REJECTED: 3386 rc = -EAGAIN; 3387 break; 3388 case SOP_TMF_INCORRECT_LOGICAL_UNIT: 3389 rc = -ENODEV; 3390 break; 3391 default: 3392 rc = -EIO; 3393 break; 3394 } 3395 3396 if (rc) 3397 dev_err(&ctrl_info->pci_dev->dev, 3398 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); 3399 3400 return rc; 3401 } 3402 3403 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, 3404 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 3405 { 3406 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); 3407 } 3408 3409 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) 3410 { 3411 int num_responses; 3412 pqi_index_t oq_pi; 3413 pqi_index_t oq_ci; 3414 struct pqi_io_request *io_request; 3415 struct pqi_io_response *response; 3416 u16 request_id; 3417 3418 num_responses = 0; 3419 oq_ci = queue_group->oq_ci_copy; 3420 3421 while (1) { 3422 oq_pi = readl(queue_group->oq_pi); 3423 if (oq_pi >= ctrl_info->num_elements_per_oq) { 3424 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE); 3425 dev_err(&ctrl_info->pci_dev->dev, 3426 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3427 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); 3428 return -1; 3429 } 3430 if (oq_pi == oq_ci) 3431 break; 3432 3433 num_responses++; 3434 response = queue_group->oq_element_array + 3435 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3436 3437 request_id = get_unaligned_le16(&response->request_id); 3438 if (request_id >= ctrl_info->max_io_slots) { 3439 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID); 3440 dev_err(&ctrl_info->pci_dev->dev, 3441 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", 3442 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); 3443 return -1; 3444 } 3445 3446 io_request = &ctrl_info->io_request_pool[request_id]; 3447 if (atomic_read(&io_request->refcount) == 0) { 3448 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID); 3449 dev_err(&ctrl_info->pci_dev->dev, 3450 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", 3451 request_id, oq_pi, oq_ci); 3452 return -1; 3453 } 3454 3455 switch (response->header.iu_type) { 3456 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 3457 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 3458 if (io_request->scmd) 3459 io_request->scmd->result = 0; 3460 fallthrough; 3461 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 3462 break; 3463 case PQI_RESPONSE_IU_VENDOR_GENERAL: 3464 io_request->status = 3465 get_unaligned_le16( 3466 &((struct pqi_vendor_general_response *)response)->status); 3467 break; 3468 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 3469 io_request->status = pqi_interpret_task_management_response(ctrl_info, 3470 (void *)response); 3471 break; 3472 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 3473 pqi_aio_path_disabled(io_request); 3474 io_request->status = -EAGAIN; 3475 break; 3476 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3477 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3478 io_request->error_info = ctrl_info->error_buffer + 3479 (get_unaligned_le16(&response->error_index) * 3480 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 3481 pqi_process_io_error(response->header.iu_type, io_request); 3482 break; 3483 default: 3484 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE); 3485 dev_err(&ctrl_info->pci_dev->dev, 3486 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", 3487 response->header.iu_type, oq_pi, oq_ci); 3488 return -1; 3489 } 3490 3491 io_request->io_complete_callback(io_request, io_request->context); 3492 3493 /* 3494 * Note that the I/O request structure CANNOT BE TOUCHED after 3495 * returning from the I/O completion callback! 3496 */ 3497 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 3498 } 3499 3500 if (num_responses) { 3501 queue_group->oq_ci_copy = oq_ci; 3502 writel(oq_ci, queue_group->oq_ci); 3503 } 3504 3505 return num_responses; 3506 } 3507 3508 static inline unsigned int pqi_num_elements_free(unsigned int pi, 3509 unsigned int ci, unsigned int elements_in_queue) 3510 { 3511 unsigned int num_elements_used; 3512 3513 if (pi >= ci) 3514 num_elements_used = pi - ci; 3515 else 3516 num_elements_used = elements_in_queue - ci + pi; 3517 3518 return elements_in_queue - num_elements_used - 1; 3519 } 3520 3521 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3522 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3523 { 3524 pqi_index_t iq_pi; 3525 pqi_index_t iq_ci; 3526 unsigned long flags; 3527 void *next_element; 3528 struct pqi_queue_group *queue_group; 3529 3530 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3531 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3532 3533 while (1) { 3534 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3535 3536 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3537 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3538 3539 if (pqi_num_elements_free(iq_pi, iq_ci, 3540 ctrl_info->num_elements_per_iq)) 3541 break; 3542 3543 spin_unlock_irqrestore( 3544 &queue_group->submit_lock[RAID_PATH], flags); 3545 3546 if (pqi_ctrl_offline(ctrl_info)) 3547 return; 3548 } 3549 3550 next_element = queue_group->iq_element_array[RAID_PATH] + 3551 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3552 3553 memcpy(next_element, iu, iu_length); 3554 3555 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3556 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3557 3558 /* 3559 * This write notifies the controller that an IU is available to be 3560 * processed. 3561 */ 3562 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3563 3564 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3565 } 3566 3567 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3568 struct pqi_event *event) 3569 { 3570 struct pqi_event_acknowledge_request request; 3571 3572 memset(&request, 0, sizeof(request)); 3573 3574 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3575 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3576 &request.header.iu_length); 3577 request.event_type = event->event_type; 3578 put_unaligned_le16(event->event_id, &request.event_id); 3579 put_unaligned_le32(event->additional_event_id, &request.additional_event_id); 3580 3581 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3582 } 3583 3584 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3585 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3586 3587 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3588 struct pqi_ctrl_info *ctrl_info) 3589 { 3590 u8 status; 3591 unsigned long timeout; 3592 3593 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies; 3594 3595 while (1) { 3596 status = pqi_read_soft_reset_status(ctrl_info); 3597 if (status & PQI_SOFT_RESET_INITIATE) 3598 return RESET_INITIATE_DRIVER; 3599 3600 if (status & PQI_SOFT_RESET_ABORT) 3601 return RESET_ABORT; 3602 3603 if (!sis_is_firmware_running(ctrl_info)) 3604 return RESET_NORESPONSE; 3605 3606 if (time_after(jiffies, timeout)) { 3607 dev_warn(&ctrl_info->pci_dev->dev, 3608 "timed out waiting for soft reset status\n"); 3609 return RESET_TIMEDOUT; 3610 } 3611 3612 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3613 } 3614 } 3615 3616 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) 3617 { 3618 int rc; 3619 unsigned int delay_secs; 3620 enum pqi_soft_reset_status reset_status; 3621 3622 if (ctrl_info->soft_reset_handshake_supported) 3623 reset_status = pqi_poll_for_soft_reset_status(ctrl_info); 3624 else 3625 reset_status = RESET_INITIATE_FIRMWARE; 3626 3627 delay_secs = PQI_POST_RESET_DELAY_SECS; 3628 3629 switch (reset_status) { 3630 case RESET_TIMEDOUT: 3631 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; 3632 fallthrough; 3633 case RESET_INITIATE_DRIVER: 3634 dev_info(&ctrl_info->pci_dev->dev, 3635 "Online Firmware Activation: resetting controller\n"); 3636 sis_soft_reset(ctrl_info); 3637 fallthrough; 3638 case RESET_INITIATE_FIRMWARE: 3639 ctrl_info->pqi_mode_enabled = false; 3640 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 3641 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); 3642 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3643 pqi_ctrl_ofa_done(ctrl_info); 3644 dev_info(&ctrl_info->pci_dev->dev, 3645 "Online Firmware Activation: %s\n", 3646 rc == 0 ? "SUCCESS" : "FAILED"); 3647 break; 3648 case RESET_ABORT: 3649 dev_info(&ctrl_info->pci_dev->dev, 3650 "Online Firmware Activation ABORTED\n"); 3651 if (ctrl_info->soft_reset_handshake_supported) 3652 pqi_clear_soft_reset_status(ctrl_info); 3653 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3654 pqi_ctrl_ofa_done(ctrl_info); 3655 pqi_ofa_ctrl_unquiesce(ctrl_info); 3656 break; 3657 case RESET_NORESPONSE: 3658 fallthrough; 3659 default: 3660 dev_err(&ctrl_info->pci_dev->dev, 3661 "unexpected Online Firmware Activation reset status: 0x%x\n", 3662 reset_status); 3663 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3664 pqi_ctrl_ofa_done(ctrl_info); 3665 pqi_ofa_ctrl_unquiesce(ctrl_info); 3666 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); 3667 break; 3668 } 3669 } 3670 3671 static void pqi_ofa_memory_alloc_worker(struct work_struct *work) 3672 { 3673 struct pqi_ctrl_info *ctrl_info; 3674 3675 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); 3676 3677 pqi_ctrl_ofa_start(ctrl_info); 3678 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested); 3679 pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE); 3680 } 3681 3682 static void pqi_ofa_quiesce_worker(struct work_struct *work) 3683 { 3684 struct pqi_ctrl_info *ctrl_info; 3685 struct pqi_event *event; 3686 3687 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); 3688 3689 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; 3690 3691 pqi_ofa_ctrl_quiesce(ctrl_info); 3692 pqi_acknowledge_event(ctrl_info, event); 3693 pqi_process_soft_reset(ctrl_info); 3694 } 3695 3696 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3697 struct pqi_event *event) 3698 { 3699 bool ack_event; 3700 3701 ack_event = true; 3702 3703 switch (event->event_id) { 3704 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3705 dev_info(&ctrl_info->pci_dev->dev, 3706 "received Online Firmware Activation memory allocation request\n"); 3707 schedule_work(&ctrl_info->ofa_memory_alloc_work); 3708 break; 3709 case PQI_EVENT_OFA_QUIESCE: 3710 dev_info(&ctrl_info->pci_dev->dev, 3711 "received Online Firmware Activation quiesce request\n"); 3712 schedule_work(&ctrl_info->ofa_quiesce_work); 3713 ack_event = false; 3714 break; 3715 case PQI_EVENT_OFA_CANCELED: 3716 dev_info(&ctrl_info->pci_dev->dev, 3717 "received Online Firmware Activation cancel request: reason: %u\n", 3718 ctrl_info->ofa_cancel_reason); 3719 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3720 pqi_ctrl_ofa_done(ctrl_info); 3721 break; 3722 default: 3723 dev_err(&ctrl_info->pci_dev->dev, 3724 "received unknown Online Firmware Activation request: event ID: %u\n", 3725 event->event_id); 3726 break; 3727 } 3728 3729 return ack_event; 3730 } 3731 3732 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info) 3733 { 3734 unsigned long flags; 3735 struct pqi_scsi_dev *device; 3736 3737 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3738 3739 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 3740 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 3741 device->rescan = true; 3742 } 3743 3744 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3745 } 3746 3747 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) 3748 { 3749 unsigned long flags; 3750 struct pqi_scsi_dev *device; 3751 3752 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3753 3754 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 3755 if (device->raid_bypass_enabled) 3756 device->raid_bypass_enabled = false; 3757 3758 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3759 } 3760 3761 static void pqi_event_worker(struct work_struct *work) 3762 { 3763 unsigned int i; 3764 bool rescan_needed; 3765 struct pqi_ctrl_info *ctrl_info; 3766 struct pqi_event *event; 3767 bool ack_event; 3768 3769 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3770 3771 pqi_ctrl_busy(ctrl_info); 3772 pqi_wait_if_ctrl_blocked(ctrl_info); 3773 if (pqi_ctrl_offline(ctrl_info)) 3774 goto out; 3775 3776 rescan_needed = false; 3777 event = ctrl_info->events; 3778 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3779 if (event->pending) { 3780 event->pending = false; 3781 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3782 ack_event = pqi_ofa_process_event(ctrl_info, event); 3783 } else { 3784 ack_event = true; 3785 rescan_needed = true; 3786 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) 3787 pqi_mark_volumes_for_rescan(ctrl_info); 3788 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) 3789 pqi_disable_raid_bypass(ctrl_info); 3790 } 3791 if (ack_event) 3792 pqi_acknowledge_event(ctrl_info, event); 3793 } 3794 event++; 3795 } 3796 3797 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ) 3798 3799 if (rescan_needed) 3800 pqi_schedule_rescan_worker_with_delay(ctrl_info, 3801 PQI_RESCAN_WORK_FOR_EVENT_DELAY); 3802 3803 out: 3804 pqi_ctrl_unbusy(ctrl_info); 3805 } 3806 3807 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 3808 3809 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3810 { 3811 int num_interrupts; 3812 u32 heartbeat_count; 3813 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); 3814 3815 pqi_check_ctrl_health(ctrl_info); 3816 if (pqi_ctrl_offline(ctrl_info)) 3817 return; 3818 3819 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3820 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3821 3822 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3823 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3824 dev_err(&ctrl_info->pci_dev->dev, 3825 "no heartbeat detected - last heartbeat count: %u\n", 3826 heartbeat_count); 3827 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT); 3828 return; 3829 } 3830 } else { 3831 ctrl_info->previous_num_interrupts = num_interrupts; 3832 } 3833 3834 ctrl_info->previous_heartbeat_count = heartbeat_count; 3835 mod_timer(&ctrl_info->heartbeat_timer, 3836 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3837 } 3838 3839 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3840 { 3841 if (!ctrl_info->heartbeat_counter) 3842 return; 3843 3844 ctrl_info->previous_num_interrupts = 3845 atomic_read(&ctrl_info->num_interrupts); 3846 ctrl_info->previous_heartbeat_count = 3847 pqi_read_heartbeat_counter(ctrl_info); 3848 3849 ctrl_info->heartbeat_timer.expires = 3850 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3851 add_timer(&ctrl_info->heartbeat_timer); 3852 } 3853 3854 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3855 { 3856 del_timer_sync(&ctrl_info->heartbeat_timer); 3857 } 3858 3859 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, 3860 struct pqi_event *event, struct pqi_event_response *response) 3861 { 3862 switch (event->event_id) { 3863 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3864 ctrl_info->ofa_bytes_requested = 3865 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); 3866 break; 3867 case PQI_EVENT_OFA_CANCELED: 3868 ctrl_info->ofa_cancel_reason = 3869 get_unaligned_le16(&response->data.ofa_cancelled.reason); 3870 break; 3871 } 3872 } 3873 3874 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3875 { 3876 int num_events; 3877 pqi_index_t oq_pi; 3878 pqi_index_t oq_ci; 3879 struct pqi_event_queue *event_queue; 3880 struct pqi_event_response *response; 3881 struct pqi_event *event; 3882 int event_index; 3883 3884 event_queue = &ctrl_info->event_queue; 3885 num_events = 0; 3886 oq_ci = event_queue->oq_ci_copy; 3887 3888 while (1) { 3889 oq_pi = readl(event_queue->oq_pi); 3890 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { 3891 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE); 3892 dev_err(&ctrl_info->pci_dev->dev, 3893 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3894 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); 3895 return -1; 3896 } 3897 3898 if (oq_pi == oq_ci) 3899 break; 3900 3901 num_events++; 3902 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3903 3904 event_index = pqi_event_type_to_event_index(response->event_type); 3905 3906 if (event_index >= 0 && response->request_acknowledge) { 3907 event = &ctrl_info->events[event_index]; 3908 event->pending = true; 3909 event->event_type = response->event_type; 3910 event->event_id = get_unaligned_le16(&response->event_id); 3911 event->additional_event_id = 3912 get_unaligned_le32(&response->additional_event_id); 3913 if (event->event_type == PQI_EVENT_TYPE_OFA) 3914 pqi_ofa_capture_event_payload(ctrl_info, event, response); 3915 } 3916 3917 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3918 } 3919 3920 if (num_events) { 3921 event_queue->oq_ci_copy = oq_ci; 3922 writel(oq_ci, event_queue->oq_ci); 3923 schedule_work(&ctrl_info->event_work); 3924 } 3925 3926 return num_events; 3927 } 3928 3929 #define PQI_LEGACY_INTX_MASK 0x1 3930 3931 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) 3932 { 3933 u32 intx_mask; 3934 struct pqi_device_registers __iomem *pqi_registers; 3935 volatile void __iomem *register_addr; 3936 3937 pqi_registers = ctrl_info->pqi_registers; 3938 3939 if (enable_intx) 3940 register_addr = &pqi_registers->legacy_intx_mask_clear; 3941 else 3942 register_addr = &pqi_registers->legacy_intx_mask_set; 3943 3944 intx_mask = readl(register_addr); 3945 intx_mask |= PQI_LEGACY_INTX_MASK; 3946 writel(intx_mask, register_addr); 3947 } 3948 3949 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3950 enum pqi_irq_mode new_mode) 3951 { 3952 switch (ctrl_info->irq_mode) { 3953 case IRQ_MODE_MSIX: 3954 switch (new_mode) { 3955 case IRQ_MODE_MSIX: 3956 break; 3957 case IRQ_MODE_INTX: 3958 pqi_configure_legacy_intx(ctrl_info, true); 3959 sis_enable_intx(ctrl_info); 3960 break; 3961 case IRQ_MODE_NONE: 3962 break; 3963 } 3964 break; 3965 case IRQ_MODE_INTX: 3966 switch (new_mode) { 3967 case IRQ_MODE_MSIX: 3968 pqi_configure_legacy_intx(ctrl_info, false); 3969 sis_enable_msix(ctrl_info); 3970 break; 3971 case IRQ_MODE_INTX: 3972 break; 3973 case IRQ_MODE_NONE: 3974 pqi_configure_legacy_intx(ctrl_info, false); 3975 break; 3976 } 3977 break; 3978 case IRQ_MODE_NONE: 3979 switch (new_mode) { 3980 case IRQ_MODE_MSIX: 3981 sis_enable_msix(ctrl_info); 3982 break; 3983 case IRQ_MODE_INTX: 3984 pqi_configure_legacy_intx(ctrl_info, true); 3985 sis_enable_intx(ctrl_info); 3986 break; 3987 case IRQ_MODE_NONE: 3988 break; 3989 } 3990 break; 3991 } 3992 3993 ctrl_info->irq_mode = new_mode; 3994 } 3995 3996 #define PQI_LEGACY_INTX_PENDING 0x1 3997 3998 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3999 { 4000 bool valid_irq; 4001 u32 intx_status; 4002 4003 switch (ctrl_info->irq_mode) { 4004 case IRQ_MODE_MSIX: 4005 valid_irq = true; 4006 break; 4007 case IRQ_MODE_INTX: 4008 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); 4009 if (intx_status & PQI_LEGACY_INTX_PENDING) 4010 valid_irq = true; 4011 else 4012 valid_irq = false; 4013 break; 4014 case IRQ_MODE_NONE: 4015 default: 4016 valid_irq = false; 4017 break; 4018 } 4019 4020 return valid_irq; 4021 } 4022 4023 static irqreturn_t pqi_irq_handler(int irq, void *data) 4024 { 4025 struct pqi_ctrl_info *ctrl_info; 4026 struct pqi_queue_group *queue_group; 4027 int num_io_responses_handled; 4028 int num_events_handled; 4029 4030 queue_group = data; 4031 ctrl_info = queue_group->ctrl_info; 4032 4033 if (!pqi_is_valid_irq(ctrl_info)) 4034 return IRQ_NONE; 4035 4036 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 4037 if (num_io_responses_handled < 0) 4038 goto out; 4039 4040 if (irq == ctrl_info->event_irq) { 4041 num_events_handled = pqi_process_event_intr(ctrl_info); 4042 if (num_events_handled < 0) 4043 goto out; 4044 } else { 4045 num_events_handled = 0; 4046 } 4047 4048 if (num_io_responses_handled + num_events_handled > 0) 4049 atomic_inc(&ctrl_info->num_interrupts); 4050 4051 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 4052 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 4053 4054 out: 4055 return IRQ_HANDLED; 4056 } 4057 4058 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 4059 { 4060 struct pci_dev *pci_dev = ctrl_info->pci_dev; 4061 int i; 4062 int rc; 4063 4064 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 4065 4066 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 4067 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 4068 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 4069 if (rc) { 4070 dev_err(&pci_dev->dev, 4071 "irq %u init failed with error %d\n", 4072 pci_irq_vector(pci_dev, i), rc); 4073 return rc; 4074 } 4075 ctrl_info->num_msix_vectors_initialized++; 4076 } 4077 4078 return 0; 4079 } 4080 4081 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 4082 { 4083 int i; 4084 4085 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 4086 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 4087 &ctrl_info->queue_groups[i]); 4088 4089 ctrl_info->num_msix_vectors_initialized = 0; 4090 } 4091 4092 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4093 { 4094 int num_vectors_enabled; 4095 unsigned int flags = PCI_IRQ_MSIX; 4096 4097 if (!pqi_disable_managed_interrupts) 4098 flags |= PCI_IRQ_AFFINITY; 4099 4100 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 4101 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 4102 flags); 4103 if (num_vectors_enabled < 0) { 4104 dev_err(&ctrl_info->pci_dev->dev, 4105 "MSI-X init failed with error %d\n", 4106 num_vectors_enabled); 4107 return num_vectors_enabled; 4108 } 4109 4110 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 4111 ctrl_info->irq_mode = IRQ_MODE_MSIX; 4112 return 0; 4113 } 4114 4115 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4116 { 4117 if (ctrl_info->num_msix_vectors_enabled) { 4118 pci_free_irq_vectors(ctrl_info->pci_dev); 4119 ctrl_info->num_msix_vectors_enabled = 0; 4120 } 4121 } 4122 4123 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 4124 { 4125 unsigned int i; 4126 size_t alloc_length; 4127 size_t element_array_length_per_iq; 4128 size_t element_array_length_per_oq; 4129 void *element_array; 4130 void __iomem *next_queue_index; 4131 void *aligned_pointer; 4132 unsigned int num_inbound_queues; 4133 unsigned int num_outbound_queues; 4134 unsigned int num_queue_indexes; 4135 struct pqi_queue_group *queue_group; 4136 4137 element_array_length_per_iq = 4138 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 4139 ctrl_info->num_elements_per_iq; 4140 element_array_length_per_oq = 4141 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 4142 ctrl_info->num_elements_per_oq; 4143 num_inbound_queues = ctrl_info->num_queue_groups * 2; 4144 num_outbound_queues = ctrl_info->num_queue_groups; 4145 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 4146 4147 aligned_pointer = NULL; 4148 4149 for (i = 0; i < num_inbound_queues; i++) { 4150 aligned_pointer = PTR_ALIGN(aligned_pointer, 4151 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4152 aligned_pointer += element_array_length_per_iq; 4153 } 4154 4155 for (i = 0; i < num_outbound_queues; i++) { 4156 aligned_pointer = PTR_ALIGN(aligned_pointer, 4157 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4158 aligned_pointer += element_array_length_per_oq; 4159 } 4160 4161 aligned_pointer = PTR_ALIGN(aligned_pointer, 4162 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4163 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4164 PQI_EVENT_OQ_ELEMENT_LENGTH; 4165 4166 for (i = 0; i < num_queue_indexes; i++) { 4167 aligned_pointer = PTR_ALIGN(aligned_pointer, 4168 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4169 aligned_pointer += sizeof(pqi_index_t); 4170 } 4171 4172 alloc_length = (size_t)aligned_pointer + 4173 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4174 4175 alloc_length += PQI_EXTRA_SGL_MEMORY; 4176 4177 ctrl_info->queue_memory_base = 4178 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4179 &ctrl_info->queue_memory_base_dma_handle, 4180 GFP_KERNEL); 4181 4182 if (!ctrl_info->queue_memory_base) 4183 return -ENOMEM; 4184 4185 ctrl_info->queue_memory_length = alloc_length; 4186 4187 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 4188 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4189 4190 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4191 queue_group = &ctrl_info->queue_groups[i]; 4192 queue_group->iq_element_array[RAID_PATH] = element_array; 4193 queue_group->iq_element_array_bus_addr[RAID_PATH] = 4194 ctrl_info->queue_memory_base_dma_handle + 4195 (element_array - ctrl_info->queue_memory_base); 4196 element_array += element_array_length_per_iq; 4197 element_array = PTR_ALIGN(element_array, 4198 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4199 queue_group->iq_element_array[AIO_PATH] = element_array; 4200 queue_group->iq_element_array_bus_addr[AIO_PATH] = 4201 ctrl_info->queue_memory_base_dma_handle + 4202 (element_array - ctrl_info->queue_memory_base); 4203 element_array += element_array_length_per_iq; 4204 element_array = PTR_ALIGN(element_array, 4205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4206 } 4207 4208 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4209 queue_group = &ctrl_info->queue_groups[i]; 4210 queue_group->oq_element_array = element_array; 4211 queue_group->oq_element_array_bus_addr = 4212 ctrl_info->queue_memory_base_dma_handle + 4213 (element_array - ctrl_info->queue_memory_base); 4214 element_array += element_array_length_per_oq; 4215 element_array = PTR_ALIGN(element_array, 4216 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4217 } 4218 4219 ctrl_info->event_queue.oq_element_array = element_array; 4220 ctrl_info->event_queue.oq_element_array_bus_addr = 4221 ctrl_info->queue_memory_base_dma_handle + 4222 (element_array - ctrl_info->queue_memory_base); 4223 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4224 PQI_EVENT_OQ_ELEMENT_LENGTH; 4225 4226 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 4227 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4228 4229 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4230 queue_group = &ctrl_info->queue_groups[i]; 4231 queue_group->iq_ci[RAID_PATH] = next_queue_index; 4232 queue_group->iq_ci_bus_addr[RAID_PATH] = 4233 ctrl_info->queue_memory_base_dma_handle + 4234 (next_queue_index - 4235 (void __iomem *)ctrl_info->queue_memory_base); 4236 next_queue_index += sizeof(pqi_index_t); 4237 next_queue_index = PTR_ALIGN(next_queue_index, 4238 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4239 queue_group->iq_ci[AIO_PATH] = next_queue_index; 4240 queue_group->iq_ci_bus_addr[AIO_PATH] = 4241 ctrl_info->queue_memory_base_dma_handle + 4242 (next_queue_index - 4243 (void __iomem *)ctrl_info->queue_memory_base); 4244 next_queue_index += sizeof(pqi_index_t); 4245 next_queue_index = PTR_ALIGN(next_queue_index, 4246 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4247 queue_group->oq_pi = next_queue_index; 4248 queue_group->oq_pi_bus_addr = 4249 ctrl_info->queue_memory_base_dma_handle + 4250 (next_queue_index - 4251 (void __iomem *)ctrl_info->queue_memory_base); 4252 next_queue_index += sizeof(pqi_index_t); 4253 next_queue_index = PTR_ALIGN(next_queue_index, 4254 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4255 } 4256 4257 ctrl_info->event_queue.oq_pi = next_queue_index; 4258 ctrl_info->event_queue.oq_pi_bus_addr = 4259 ctrl_info->queue_memory_base_dma_handle + 4260 (next_queue_index - 4261 (void __iomem *)ctrl_info->queue_memory_base); 4262 4263 return 0; 4264 } 4265 4266 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 4267 { 4268 unsigned int i; 4269 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4270 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4271 4272 /* 4273 * Initialize the backpointers to the controller structure in 4274 * each operational queue group structure. 4275 */ 4276 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4277 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 4278 4279 /* 4280 * Assign IDs to all operational queues. Note that the IDs 4281 * assigned to operational IQs are independent of the IDs 4282 * assigned to operational OQs. 4283 */ 4284 ctrl_info->event_queue.oq_id = next_oq_id++; 4285 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4286 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 4287 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 4288 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 4289 } 4290 4291 /* 4292 * Assign MSI-X table entry indexes to all queues. Note that the 4293 * interrupt for the event queue is shared with the first queue group. 4294 */ 4295 ctrl_info->event_queue.int_msg_num = 0; 4296 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4297 ctrl_info->queue_groups[i].int_msg_num = i; 4298 4299 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4300 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 4301 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 4302 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 4303 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 4304 } 4305 } 4306 4307 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 4308 { 4309 size_t alloc_length; 4310 struct pqi_admin_queues_aligned *admin_queues_aligned; 4311 struct pqi_admin_queues *admin_queues; 4312 4313 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 4314 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4315 4316 ctrl_info->admin_queue_memory_base = 4317 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4318 &ctrl_info->admin_queue_memory_base_dma_handle, 4319 GFP_KERNEL); 4320 4321 if (!ctrl_info->admin_queue_memory_base) 4322 return -ENOMEM; 4323 4324 ctrl_info->admin_queue_memory_length = alloc_length; 4325 4326 admin_queues = &ctrl_info->admin_queues; 4327 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 4328 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4329 admin_queues->iq_element_array = 4330 &admin_queues_aligned->iq_element_array; 4331 admin_queues->oq_element_array = 4332 &admin_queues_aligned->oq_element_array; 4333 admin_queues->iq_ci = 4334 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; 4335 admin_queues->oq_pi = 4336 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 4337 4338 admin_queues->iq_element_array_bus_addr = 4339 ctrl_info->admin_queue_memory_base_dma_handle + 4340 (admin_queues->iq_element_array - 4341 ctrl_info->admin_queue_memory_base); 4342 admin_queues->oq_element_array_bus_addr = 4343 ctrl_info->admin_queue_memory_base_dma_handle + 4344 (admin_queues->oq_element_array - 4345 ctrl_info->admin_queue_memory_base); 4346 admin_queues->iq_ci_bus_addr = 4347 ctrl_info->admin_queue_memory_base_dma_handle + 4348 ((void __iomem *)admin_queues->iq_ci - 4349 (void __iomem *)ctrl_info->admin_queue_memory_base); 4350 admin_queues->oq_pi_bus_addr = 4351 ctrl_info->admin_queue_memory_base_dma_handle + 4352 ((void __iomem *)admin_queues->oq_pi - 4353 (void __iomem *)ctrl_info->admin_queue_memory_base); 4354 4355 return 0; 4356 } 4357 4358 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 4359 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 4360 4361 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 4362 { 4363 struct pqi_device_registers __iomem *pqi_registers; 4364 struct pqi_admin_queues *admin_queues; 4365 unsigned long timeout; 4366 u8 status; 4367 u32 reg; 4368 4369 pqi_registers = ctrl_info->pqi_registers; 4370 admin_queues = &ctrl_info->admin_queues; 4371 4372 writeq((u64)admin_queues->iq_element_array_bus_addr, 4373 &pqi_registers->admin_iq_element_array_addr); 4374 writeq((u64)admin_queues->oq_element_array_bus_addr, 4375 &pqi_registers->admin_oq_element_array_addr); 4376 writeq((u64)admin_queues->iq_ci_bus_addr, 4377 &pqi_registers->admin_iq_ci_addr); 4378 writeq((u64)admin_queues->oq_pi_bus_addr, 4379 &pqi_registers->admin_oq_pi_addr); 4380 4381 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 4382 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | 4383 (admin_queues->int_msg_num << 16); 4384 writel(reg, &pqi_registers->admin_iq_num_elements); 4385 4386 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 4387 &pqi_registers->function_and_status_code); 4388 4389 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 4390 while (1) { 4391 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 4392 status = readb(&pqi_registers->function_and_status_code); 4393 if (status == PQI_STATUS_IDLE) 4394 break; 4395 if (time_after(jiffies, timeout)) 4396 return -ETIMEDOUT; 4397 } 4398 4399 /* 4400 * The offset registers are not initialized to the correct 4401 * offsets until *after* the create admin queue pair command 4402 * completes successfully. 4403 */ 4404 admin_queues->iq_pi = ctrl_info->iomem_base + 4405 PQI_DEVICE_REGISTERS_OFFSET + 4406 readq(&pqi_registers->admin_iq_pi_offset); 4407 admin_queues->oq_ci = ctrl_info->iomem_base + 4408 PQI_DEVICE_REGISTERS_OFFSET + 4409 readq(&pqi_registers->admin_oq_ci_offset); 4410 4411 return 0; 4412 } 4413 4414 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 4415 struct pqi_general_admin_request *request) 4416 { 4417 struct pqi_admin_queues *admin_queues; 4418 void *next_element; 4419 pqi_index_t iq_pi; 4420 4421 admin_queues = &ctrl_info->admin_queues; 4422 iq_pi = admin_queues->iq_pi_copy; 4423 4424 next_element = admin_queues->iq_element_array + 4425 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 4426 4427 memcpy(next_element, request, sizeof(*request)); 4428 4429 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 4430 admin_queues->iq_pi_copy = iq_pi; 4431 4432 /* 4433 * This write notifies the controller that an IU is available to be 4434 * processed. 4435 */ 4436 writel(iq_pi, admin_queues->iq_pi); 4437 } 4438 4439 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 4440 4441 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 4442 struct pqi_general_admin_response *response) 4443 { 4444 struct pqi_admin_queues *admin_queues; 4445 pqi_index_t oq_pi; 4446 pqi_index_t oq_ci; 4447 unsigned long timeout; 4448 4449 admin_queues = &ctrl_info->admin_queues; 4450 oq_ci = admin_queues->oq_ci_copy; 4451 4452 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 4453 4454 while (1) { 4455 oq_pi = readl(admin_queues->oq_pi); 4456 if (oq_pi != oq_ci) 4457 break; 4458 if (time_after(jiffies, timeout)) { 4459 dev_err(&ctrl_info->pci_dev->dev, 4460 "timed out waiting for admin response\n"); 4461 return -ETIMEDOUT; 4462 } 4463 if (!sis_is_firmware_running(ctrl_info)) 4464 return -ENXIO; 4465 usleep_range(1000, 2000); 4466 } 4467 4468 memcpy(response, admin_queues->oq_element_array + 4469 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 4470 4471 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 4472 admin_queues->oq_ci_copy = oq_ci; 4473 writel(oq_ci, admin_queues->oq_ci); 4474 4475 return 0; 4476 } 4477 4478 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 4479 struct pqi_queue_group *queue_group, enum pqi_io_path path, 4480 struct pqi_io_request *io_request) 4481 { 4482 struct pqi_io_request *next; 4483 void *next_element; 4484 pqi_index_t iq_pi; 4485 pqi_index_t iq_ci; 4486 size_t iu_length; 4487 unsigned long flags; 4488 unsigned int num_elements_needed; 4489 unsigned int num_elements_to_end_of_queue; 4490 size_t copy_count; 4491 struct pqi_iu_header *request; 4492 4493 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 4494 4495 if (io_request) { 4496 io_request->queue_group = queue_group; 4497 list_add_tail(&io_request->request_list_entry, 4498 &queue_group->request_list[path]); 4499 } 4500 4501 iq_pi = queue_group->iq_pi_copy[path]; 4502 4503 list_for_each_entry_safe(io_request, next, 4504 &queue_group->request_list[path], request_list_entry) { 4505 4506 request = io_request->iu; 4507 4508 iu_length = get_unaligned_le16(&request->iu_length) + 4509 PQI_REQUEST_HEADER_LENGTH; 4510 num_elements_needed = 4511 DIV_ROUND_UP(iu_length, 4512 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4513 4514 iq_ci = readl(queue_group->iq_ci[path]); 4515 4516 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 4517 ctrl_info->num_elements_per_iq)) 4518 break; 4519 4520 put_unaligned_le16(queue_group->oq_id, 4521 &request->response_queue_id); 4522 4523 next_element = queue_group->iq_element_array[path] + 4524 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4525 4526 num_elements_to_end_of_queue = 4527 ctrl_info->num_elements_per_iq - iq_pi; 4528 4529 if (num_elements_needed <= num_elements_to_end_of_queue) { 4530 memcpy(next_element, request, iu_length); 4531 } else { 4532 copy_count = num_elements_to_end_of_queue * 4533 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4534 memcpy(next_element, request, copy_count); 4535 memcpy(queue_group->iq_element_array[path], 4536 (u8 *)request + copy_count, 4537 iu_length - copy_count); 4538 } 4539 4540 iq_pi = (iq_pi + num_elements_needed) % 4541 ctrl_info->num_elements_per_iq; 4542 4543 list_del(&io_request->request_list_entry); 4544 } 4545 4546 if (iq_pi != queue_group->iq_pi_copy[path]) { 4547 queue_group->iq_pi_copy[path] = iq_pi; 4548 /* 4549 * This write notifies the controller that one or more IUs are 4550 * available to be processed. 4551 */ 4552 writel(iq_pi, queue_group->iq_pi[path]); 4553 } 4554 4555 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 4556 } 4557 4558 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 4559 4560 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 4561 struct completion *wait) 4562 { 4563 int rc; 4564 4565 while (1) { 4566 if (wait_for_completion_io_timeout(wait, 4567 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 4568 rc = 0; 4569 break; 4570 } 4571 4572 pqi_check_ctrl_health(ctrl_info); 4573 if (pqi_ctrl_offline(ctrl_info)) { 4574 rc = -ENXIO; 4575 break; 4576 } 4577 } 4578 4579 return rc; 4580 } 4581 4582 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 4583 void *context) 4584 { 4585 struct completion *waiting = context; 4586 4587 complete(waiting); 4588 } 4589 4590 static int pqi_process_raid_io_error_synchronous( 4591 struct pqi_raid_error_info *error_info) 4592 { 4593 int rc = -EIO; 4594 4595 switch (error_info->data_out_result) { 4596 case PQI_DATA_IN_OUT_GOOD: 4597 if (error_info->status == SAM_STAT_GOOD) 4598 rc = 0; 4599 break; 4600 case PQI_DATA_IN_OUT_UNDERFLOW: 4601 if (error_info->status == SAM_STAT_GOOD || 4602 error_info->status == SAM_STAT_CHECK_CONDITION) 4603 rc = 0; 4604 break; 4605 case PQI_DATA_IN_OUT_ABORTED: 4606 rc = PQI_CMD_STATUS_ABORTED; 4607 break; 4608 } 4609 4610 return rc; 4611 } 4612 4613 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) 4614 { 4615 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; 4616 } 4617 4618 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4619 struct pqi_iu_header *request, unsigned int flags, 4620 struct pqi_raid_error_info *error_info) 4621 { 4622 int rc = 0; 4623 struct pqi_io_request *io_request; 4624 size_t iu_length; 4625 DECLARE_COMPLETION_ONSTACK(wait); 4626 4627 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4628 if (down_interruptible(&ctrl_info->sync_request_sem)) 4629 return -ERESTARTSYS; 4630 } else { 4631 down(&ctrl_info->sync_request_sem); 4632 } 4633 4634 pqi_ctrl_busy(ctrl_info); 4635 /* 4636 * Wait for other admin queue updates such as; 4637 * config table changes, OFA memory updates, ... 4638 */ 4639 if (pqi_is_blockable_request(request)) 4640 pqi_wait_if_ctrl_blocked(ctrl_info); 4641 4642 if (pqi_ctrl_offline(ctrl_info)) { 4643 rc = -ENXIO; 4644 goto out; 4645 } 4646 4647 io_request = pqi_alloc_io_request(ctrl_info, NULL); 4648 4649 put_unaligned_le16(io_request->index, 4650 &(((struct pqi_raid_path_request *)request)->request_id)); 4651 4652 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4653 ((struct pqi_raid_path_request *)request)->error_index = 4654 ((struct pqi_raid_path_request *)request)->request_id; 4655 4656 iu_length = get_unaligned_le16(&request->iu_length) + 4657 PQI_REQUEST_HEADER_LENGTH; 4658 memcpy(io_request->iu, request, iu_length); 4659 4660 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4661 io_request->context = &wait; 4662 4663 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4664 io_request); 4665 4666 pqi_wait_for_completion_io(ctrl_info, &wait); 4667 4668 if (error_info) { 4669 if (io_request->error_info) 4670 memcpy(error_info, io_request->error_info, sizeof(*error_info)); 4671 else 4672 memset(error_info, 0, sizeof(*error_info)); 4673 } else if (rc == 0 && io_request->error_info) { 4674 rc = pqi_process_raid_io_error_synchronous(io_request->error_info); 4675 } 4676 4677 pqi_free_io_request(io_request); 4678 4679 out: 4680 pqi_ctrl_unbusy(ctrl_info); 4681 up(&ctrl_info->sync_request_sem); 4682 4683 return rc; 4684 } 4685 4686 static int pqi_validate_admin_response( 4687 struct pqi_general_admin_response *response, u8 expected_function_code) 4688 { 4689 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4690 return -EINVAL; 4691 4692 if (get_unaligned_le16(&response->header.iu_length) != 4693 PQI_GENERAL_ADMIN_IU_LENGTH) 4694 return -EINVAL; 4695 4696 if (response->function_code != expected_function_code) 4697 return -EINVAL; 4698 4699 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4700 return -EINVAL; 4701 4702 return 0; 4703 } 4704 4705 static int pqi_submit_admin_request_synchronous( 4706 struct pqi_ctrl_info *ctrl_info, 4707 struct pqi_general_admin_request *request, 4708 struct pqi_general_admin_response *response) 4709 { 4710 int rc; 4711 4712 pqi_submit_admin_request(ctrl_info, request); 4713 4714 rc = pqi_poll_for_admin_response(ctrl_info, response); 4715 4716 if (rc == 0) 4717 rc = pqi_validate_admin_response(response, request->function_code); 4718 4719 return rc; 4720 } 4721 4722 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4723 { 4724 int rc; 4725 struct pqi_general_admin_request request; 4726 struct pqi_general_admin_response response; 4727 struct pqi_device_capability *capability; 4728 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4729 4730 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4731 if (!capability) 4732 return -ENOMEM; 4733 4734 memset(&request, 0, sizeof(request)); 4735 4736 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4737 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4738 &request.header.iu_length); 4739 request.function_code = 4740 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4741 put_unaligned_le32(sizeof(*capability), 4742 &request.data.report_device_capability.buffer_length); 4743 4744 rc = pqi_map_single(ctrl_info->pci_dev, 4745 &request.data.report_device_capability.sg_descriptor, 4746 capability, sizeof(*capability), 4747 DMA_FROM_DEVICE); 4748 if (rc) 4749 goto out; 4750 4751 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); 4752 4753 pqi_pci_unmap(ctrl_info->pci_dev, 4754 &request.data.report_device_capability.sg_descriptor, 1, 4755 DMA_FROM_DEVICE); 4756 4757 if (rc) 4758 goto out; 4759 4760 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4761 rc = -EIO; 4762 goto out; 4763 } 4764 4765 ctrl_info->max_inbound_queues = 4766 get_unaligned_le16(&capability->max_inbound_queues); 4767 ctrl_info->max_elements_per_iq = 4768 get_unaligned_le16(&capability->max_elements_per_iq); 4769 ctrl_info->max_iq_element_length = 4770 get_unaligned_le16(&capability->max_iq_element_length) 4771 * 16; 4772 ctrl_info->max_outbound_queues = 4773 get_unaligned_le16(&capability->max_outbound_queues); 4774 ctrl_info->max_elements_per_oq = 4775 get_unaligned_le16(&capability->max_elements_per_oq); 4776 ctrl_info->max_oq_element_length = 4777 get_unaligned_le16(&capability->max_oq_element_length) 4778 * 16; 4779 4780 sop_iu_layer_descriptor = 4781 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4782 4783 ctrl_info->max_inbound_iu_length_per_firmware = 4784 get_unaligned_le16( 4785 &sop_iu_layer_descriptor->max_inbound_iu_length); 4786 ctrl_info->inbound_spanning_supported = 4787 sop_iu_layer_descriptor->inbound_spanning_supported; 4788 ctrl_info->outbound_spanning_supported = 4789 sop_iu_layer_descriptor->outbound_spanning_supported; 4790 4791 out: 4792 kfree(capability); 4793 4794 return rc; 4795 } 4796 4797 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4798 { 4799 if (ctrl_info->max_iq_element_length < 4800 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4801 dev_err(&ctrl_info->pci_dev->dev, 4802 "max. inbound queue element length of %d is less than the required length of %d\n", 4803 ctrl_info->max_iq_element_length, 4804 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4805 return -EINVAL; 4806 } 4807 4808 if (ctrl_info->max_oq_element_length < 4809 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4810 dev_err(&ctrl_info->pci_dev->dev, 4811 "max. outbound queue element length of %d is less than the required length of %d\n", 4812 ctrl_info->max_oq_element_length, 4813 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4814 return -EINVAL; 4815 } 4816 4817 if (ctrl_info->max_inbound_iu_length_per_firmware < 4818 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4819 dev_err(&ctrl_info->pci_dev->dev, 4820 "max. inbound IU length of %u is less than the min. required length of %d\n", 4821 ctrl_info->max_inbound_iu_length_per_firmware, 4822 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4823 return -EINVAL; 4824 } 4825 4826 if (!ctrl_info->inbound_spanning_supported) { 4827 dev_err(&ctrl_info->pci_dev->dev, 4828 "the controller does not support inbound spanning\n"); 4829 return -EINVAL; 4830 } 4831 4832 if (ctrl_info->outbound_spanning_supported) { 4833 dev_err(&ctrl_info->pci_dev->dev, 4834 "the controller supports outbound spanning but this driver does not\n"); 4835 return -EINVAL; 4836 } 4837 4838 return 0; 4839 } 4840 4841 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4842 { 4843 int rc; 4844 struct pqi_event_queue *event_queue; 4845 struct pqi_general_admin_request request; 4846 struct pqi_general_admin_response response; 4847 4848 event_queue = &ctrl_info->event_queue; 4849 4850 /* 4851 * Create OQ (Outbound Queue - device to host queue) to dedicate 4852 * to events. 4853 */ 4854 memset(&request, 0, sizeof(request)); 4855 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4856 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4857 &request.header.iu_length); 4858 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4859 put_unaligned_le16(event_queue->oq_id, 4860 &request.data.create_operational_oq.queue_id); 4861 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4862 &request.data.create_operational_oq.element_array_addr); 4863 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4864 &request.data.create_operational_oq.pi_addr); 4865 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4866 &request.data.create_operational_oq.num_elements); 4867 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4868 &request.data.create_operational_oq.element_length); 4869 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4870 put_unaligned_le16(event_queue->int_msg_num, 4871 &request.data.create_operational_oq.int_msg_num); 4872 4873 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4874 &response); 4875 if (rc) 4876 return rc; 4877 4878 event_queue->oq_ci = ctrl_info->iomem_base + 4879 PQI_DEVICE_REGISTERS_OFFSET + 4880 get_unaligned_le64( 4881 &response.data.create_operational_oq.oq_ci_offset); 4882 4883 return 0; 4884 } 4885 4886 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4887 unsigned int group_number) 4888 { 4889 int rc; 4890 struct pqi_queue_group *queue_group; 4891 struct pqi_general_admin_request request; 4892 struct pqi_general_admin_response response; 4893 4894 queue_group = &ctrl_info->queue_groups[group_number]; 4895 4896 /* 4897 * Create IQ (Inbound Queue - host to device queue) for 4898 * RAID path. 4899 */ 4900 memset(&request, 0, sizeof(request)); 4901 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4902 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4903 &request.header.iu_length); 4904 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4905 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4906 &request.data.create_operational_iq.queue_id); 4907 put_unaligned_le64( 4908 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4909 &request.data.create_operational_iq.element_array_addr); 4910 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4911 &request.data.create_operational_iq.ci_addr); 4912 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4913 &request.data.create_operational_iq.num_elements); 4914 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4915 &request.data.create_operational_iq.element_length); 4916 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4917 4918 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4919 &response); 4920 if (rc) { 4921 dev_err(&ctrl_info->pci_dev->dev, 4922 "error creating inbound RAID queue\n"); 4923 return rc; 4924 } 4925 4926 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4927 PQI_DEVICE_REGISTERS_OFFSET + 4928 get_unaligned_le64( 4929 &response.data.create_operational_iq.iq_pi_offset); 4930 4931 /* 4932 * Create IQ (Inbound Queue - host to device queue) for 4933 * Advanced I/O (AIO) path. 4934 */ 4935 memset(&request, 0, sizeof(request)); 4936 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4937 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4938 &request.header.iu_length); 4939 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4940 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4941 &request.data.create_operational_iq.queue_id); 4942 put_unaligned_le64((u64)queue_group-> 4943 iq_element_array_bus_addr[AIO_PATH], 4944 &request.data.create_operational_iq.element_array_addr); 4945 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4946 &request.data.create_operational_iq.ci_addr); 4947 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4948 &request.data.create_operational_iq.num_elements); 4949 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4950 &request.data.create_operational_iq.element_length); 4951 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4952 4953 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4954 &response); 4955 if (rc) { 4956 dev_err(&ctrl_info->pci_dev->dev, 4957 "error creating inbound AIO queue\n"); 4958 return rc; 4959 } 4960 4961 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4962 PQI_DEVICE_REGISTERS_OFFSET + 4963 get_unaligned_le64( 4964 &response.data.create_operational_iq.iq_pi_offset); 4965 4966 /* 4967 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4968 * assumed to be for RAID path I/O unless we change the queue's 4969 * property. 4970 */ 4971 memset(&request, 0, sizeof(request)); 4972 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4973 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4974 &request.header.iu_length); 4975 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4976 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4977 &request.data.change_operational_iq_properties.queue_id); 4978 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4979 &request.data.change_operational_iq_properties.vendor_specific); 4980 4981 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4982 &response); 4983 if (rc) { 4984 dev_err(&ctrl_info->pci_dev->dev, 4985 "error changing queue property\n"); 4986 return rc; 4987 } 4988 4989 /* 4990 * Create OQ (Outbound Queue - device to host queue). 4991 */ 4992 memset(&request, 0, sizeof(request)); 4993 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4994 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4995 &request.header.iu_length); 4996 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4997 put_unaligned_le16(queue_group->oq_id, 4998 &request.data.create_operational_oq.queue_id); 4999 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 5000 &request.data.create_operational_oq.element_array_addr); 5001 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 5002 &request.data.create_operational_oq.pi_addr); 5003 put_unaligned_le16(ctrl_info->num_elements_per_oq, 5004 &request.data.create_operational_oq.num_elements); 5005 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 5006 &request.data.create_operational_oq.element_length); 5007 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 5008 put_unaligned_le16(queue_group->int_msg_num, 5009 &request.data.create_operational_oq.int_msg_num); 5010 5011 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 5012 &response); 5013 if (rc) { 5014 dev_err(&ctrl_info->pci_dev->dev, 5015 "error creating outbound queue\n"); 5016 return rc; 5017 } 5018 5019 queue_group->oq_ci = ctrl_info->iomem_base + 5020 PQI_DEVICE_REGISTERS_OFFSET + 5021 get_unaligned_le64( 5022 &response.data.create_operational_oq.oq_ci_offset); 5023 5024 return 0; 5025 } 5026 5027 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 5028 { 5029 int rc; 5030 unsigned int i; 5031 5032 rc = pqi_create_event_queue(ctrl_info); 5033 if (rc) { 5034 dev_err(&ctrl_info->pci_dev->dev, 5035 "error creating event queue\n"); 5036 return rc; 5037 } 5038 5039 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5040 rc = pqi_create_queue_group(ctrl_info, i); 5041 if (rc) { 5042 dev_err(&ctrl_info->pci_dev->dev, 5043 "error creating queue group number %u/%u\n", 5044 i, ctrl_info->num_queue_groups); 5045 return rc; 5046 } 5047 } 5048 5049 return 0; 5050 } 5051 5052 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 5053 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) 5054 5055 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 5056 bool enable_events) 5057 { 5058 int rc; 5059 unsigned int i; 5060 struct pqi_event_config *event_config; 5061 struct pqi_event_descriptor *event_descriptor; 5062 struct pqi_general_management_request request; 5063 5064 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5065 GFP_KERNEL); 5066 if (!event_config) 5067 return -ENOMEM; 5068 5069 memset(&request, 0, sizeof(request)); 5070 5071 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 5072 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5073 data.report_event_configuration.sg_descriptors[1]) - 5074 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5075 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5076 &request.data.report_event_configuration.buffer_length); 5077 5078 rc = pqi_map_single(ctrl_info->pci_dev, 5079 request.data.report_event_configuration.sg_descriptors, 5080 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5081 DMA_FROM_DEVICE); 5082 if (rc) 5083 goto out; 5084 5085 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5086 5087 pqi_pci_unmap(ctrl_info->pci_dev, 5088 request.data.report_event_configuration.sg_descriptors, 1, 5089 DMA_FROM_DEVICE); 5090 5091 if (rc) 5092 goto out; 5093 5094 for (i = 0; i < event_config->num_event_descriptors; i++) { 5095 event_descriptor = &event_config->descriptors[i]; 5096 if (enable_events && 5097 pqi_is_supported_event(event_descriptor->event_type)) 5098 put_unaligned_le16(ctrl_info->event_queue.oq_id, 5099 &event_descriptor->oq_id); 5100 else 5101 put_unaligned_le16(0, &event_descriptor->oq_id); 5102 } 5103 5104 memset(&request, 0, sizeof(request)); 5105 5106 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 5107 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5108 data.report_event_configuration.sg_descriptors[1]) - 5109 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5110 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5111 &request.data.report_event_configuration.buffer_length); 5112 5113 rc = pqi_map_single(ctrl_info->pci_dev, 5114 request.data.report_event_configuration.sg_descriptors, 5115 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5116 DMA_TO_DEVICE); 5117 if (rc) 5118 goto out; 5119 5120 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5121 5122 pqi_pci_unmap(ctrl_info->pci_dev, 5123 request.data.report_event_configuration.sg_descriptors, 1, 5124 DMA_TO_DEVICE); 5125 5126 out: 5127 kfree(event_config); 5128 5129 return rc; 5130 } 5131 5132 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 5133 { 5134 return pqi_configure_events(ctrl_info, true); 5135 } 5136 5137 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 5138 { 5139 unsigned int i; 5140 struct device *dev; 5141 size_t sg_chain_buffer_length; 5142 struct pqi_io_request *io_request; 5143 5144 if (!ctrl_info->io_request_pool) 5145 return; 5146 5147 dev = &ctrl_info->pci_dev->dev; 5148 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5149 io_request = ctrl_info->io_request_pool; 5150 5151 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5152 kfree(io_request->iu); 5153 if (!io_request->sg_chain_buffer) 5154 break; 5155 dma_free_coherent(dev, sg_chain_buffer_length, 5156 io_request->sg_chain_buffer, 5157 io_request->sg_chain_buffer_dma_handle); 5158 io_request++; 5159 } 5160 5161 kfree(ctrl_info->io_request_pool); 5162 ctrl_info->io_request_pool = NULL; 5163 } 5164 5165 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 5166 { 5167 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 5168 ctrl_info->error_buffer_length, 5169 &ctrl_info->error_buffer_dma_handle, 5170 GFP_KERNEL); 5171 if (!ctrl_info->error_buffer) 5172 return -ENOMEM; 5173 5174 return 0; 5175 } 5176 5177 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 5178 { 5179 unsigned int i; 5180 void *sg_chain_buffer; 5181 size_t sg_chain_buffer_length; 5182 dma_addr_t sg_chain_buffer_dma_handle; 5183 struct device *dev; 5184 struct pqi_io_request *io_request; 5185 5186 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, 5187 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 5188 5189 if (!ctrl_info->io_request_pool) { 5190 dev_err(&ctrl_info->pci_dev->dev, 5191 "failed to allocate I/O request pool\n"); 5192 goto error; 5193 } 5194 5195 dev = &ctrl_info->pci_dev->dev; 5196 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5197 io_request = ctrl_info->io_request_pool; 5198 5199 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5200 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 5201 5202 if (!io_request->iu) { 5203 dev_err(&ctrl_info->pci_dev->dev, 5204 "failed to allocate IU buffers\n"); 5205 goto error; 5206 } 5207 5208 sg_chain_buffer = dma_alloc_coherent(dev, 5209 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 5210 GFP_KERNEL); 5211 5212 if (!sg_chain_buffer) { 5213 dev_err(&ctrl_info->pci_dev->dev, 5214 "failed to allocate PQI scatter-gather chain buffers\n"); 5215 goto error; 5216 } 5217 5218 io_request->index = i; 5219 io_request->sg_chain_buffer = sg_chain_buffer; 5220 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; 5221 io_request++; 5222 } 5223 5224 return 0; 5225 5226 error: 5227 pqi_free_all_io_requests(ctrl_info); 5228 5229 return -ENOMEM; 5230 } 5231 5232 /* 5233 * Calculate required resources that are sized based on max. outstanding 5234 * requests and max. transfer size. 5235 */ 5236 5237 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 5238 { 5239 u32 max_transfer_size; 5240 u32 max_sg_entries; 5241 5242 ctrl_info->scsi_ml_can_queue = 5243 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 5244 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 5245 5246 ctrl_info->error_buffer_length = 5247 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 5248 5249 if (reset_devices) 5250 max_transfer_size = min(ctrl_info->max_transfer_size, 5251 PQI_MAX_TRANSFER_SIZE_KDUMP); 5252 else 5253 max_transfer_size = min(ctrl_info->max_transfer_size, 5254 PQI_MAX_TRANSFER_SIZE); 5255 5256 max_sg_entries = max_transfer_size / PAGE_SIZE; 5257 5258 /* +1 to cover when the buffer is not page-aligned. */ 5259 max_sg_entries++; 5260 5261 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 5262 5263 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 5264 5265 ctrl_info->sg_chain_buffer_length = 5266 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 5267 PQI_EXTRA_SGL_MEMORY; 5268 ctrl_info->sg_tablesize = max_sg_entries; 5269 ctrl_info->max_sectors = max_transfer_size / 512; 5270 } 5271 5272 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 5273 { 5274 int num_queue_groups; 5275 u16 num_elements_per_iq; 5276 u16 num_elements_per_oq; 5277 5278 if (reset_devices) { 5279 num_queue_groups = 1; 5280 } else { 5281 int num_cpus; 5282 int max_queue_groups; 5283 5284 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 5285 ctrl_info->max_outbound_queues - 1); 5286 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 5287 5288 num_cpus = num_online_cpus(); 5289 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 5290 num_queue_groups = min(num_queue_groups, max_queue_groups); 5291 } 5292 5293 ctrl_info->num_queue_groups = num_queue_groups; 5294 5295 /* 5296 * Make sure that the max. inbound IU length is an even multiple 5297 * of our inbound element length. 5298 */ 5299 ctrl_info->max_inbound_iu_length = 5300 (ctrl_info->max_inbound_iu_length_per_firmware / 5301 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 5302 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 5303 5304 num_elements_per_iq = 5305 (ctrl_info->max_inbound_iu_length / 5306 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 5307 5308 /* Add one because one element in each queue is unusable. */ 5309 num_elements_per_iq++; 5310 5311 num_elements_per_iq = min(num_elements_per_iq, 5312 ctrl_info->max_elements_per_iq); 5313 5314 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 5315 num_elements_per_oq = min(num_elements_per_oq, 5316 ctrl_info->max_elements_per_oq); 5317 5318 ctrl_info->num_elements_per_iq = num_elements_per_iq; 5319 ctrl_info->num_elements_per_oq = num_elements_per_oq; 5320 5321 ctrl_info->max_sg_per_iu = 5322 ((ctrl_info->max_inbound_iu_length - 5323 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5324 sizeof(struct pqi_sg_descriptor)) + 5325 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 5326 5327 ctrl_info->max_sg_per_r56_iu = 5328 ((ctrl_info->max_inbound_iu_length - 5329 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5330 sizeof(struct pqi_sg_descriptor)) + 5331 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; 5332 } 5333 5334 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, 5335 struct scatterlist *sg) 5336 { 5337 u64 address = (u64)sg_dma_address(sg); 5338 unsigned int length = sg_dma_len(sg); 5339 5340 put_unaligned_le64(address, &sg_descriptor->address); 5341 put_unaligned_le32(length, &sg_descriptor->length); 5342 put_unaligned_le32(0, &sg_descriptor->flags); 5343 } 5344 5345 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, 5346 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, 5347 int max_sg_per_iu, bool *chained) 5348 { 5349 int i; 5350 unsigned int num_sg_in_iu; 5351 5352 *chained = false; 5353 i = 0; 5354 num_sg_in_iu = 0; 5355 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ 5356 5357 while (1) { 5358 pqi_set_sg_descriptor(sg_descriptor, sg); 5359 if (!*chained) 5360 num_sg_in_iu++; 5361 i++; 5362 if (i == sg_count) 5363 break; 5364 sg_descriptor++; 5365 if (i == max_sg_per_iu) { 5366 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, 5367 &sg_descriptor->address); 5368 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), 5369 &sg_descriptor->length); 5370 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); 5371 *chained = true; 5372 num_sg_in_iu++; 5373 sg_descriptor = io_request->sg_chain_buffer; 5374 } 5375 sg = sg_next(sg); 5376 } 5377 5378 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 5379 5380 return num_sg_in_iu; 5381 } 5382 5383 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 5384 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 5385 struct pqi_io_request *io_request) 5386 { 5387 u16 iu_length; 5388 int sg_count; 5389 bool chained; 5390 unsigned int num_sg_in_iu; 5391 struct scatterlist *sg; 5392 struct pqi_sg_descriptor *sg_descriptor; 5393 5394 sg_count = scsi_dma_map(scmd); 5395 if (sg_count < 0) 5396 return sg_count; 5397 5398 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5399 PQI_REQUEST_HEADER_LENGTH; 5400 5401 if (sg_count == 0) 5402 goto out; 5403 5404 sg = scsi_sglist(scmd); 5405 sg_descriptor = request->sg_descriptors; 5406 5407 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5408 ctrl_info->max_sg_per_iu, &chained); 5409 5410 request->partial = chained; 5411 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5412 5413 out: 5414 put_unaligned_le16(iu_length, &request->header.iu_length); 5415 5416 return 0; 5417 } 5418 5419 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, 5420 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, 5421 struct pqi_io_request *io_request) 5422 { 5423 u16 iu_length; 5424 int sg_count; 5425 bool chained; 5426 unsigned int num_sg_in_iu; 5427 struct scatterlist *sg; 5428 struct pqi_sg_descriptor *sg_descriptor; 5429 5430 sg_count = scsi_dma_map(scmd); 5431 if (sg_count < 0) 5432 return sg_count; 5433 5434 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - 5435 PQI_REQUEST_HEADER_LENGTH; 5436 num_sg_in_iu = 0; 5437 5438 if (sg_count == 0) 5439 goto out; 5440 5441 sg = scsi_sglist(scmd); 5442 sg_descriptor = request->sg_descriptors; 5443 5444 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5445 ctrl_info->max_sg_per_iu, &chained); 5446 5447 request->partial = chained; 5448 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5449 5450 out: 5451 put_unaligned_le16(iu_length, &request->header.iu_length); 5452 request->num_sg_descriptors = num_sg_in_iu; 5453 5454 return 0; 5455 } 5456 5457 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, 5458 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, 5459 struct pqi_io_request *io_request) 5460 { 5461 u16 iu_length; 5462 int sg_count; 5463 bool chained; 5464 unsigned int num_sg_in_iu; 5465 struct scatterlist *sg; 5466 struct pqi_sg_descriptor *sg_descriptor; 5467 5468 sg_count = scsi_dma_map(scmd); 5469 if (sg_count < 0) 5470 return sg_count; 5471 5472 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - 5473 PQI_REQUEST_HEADER_LENGTH; 5474 num_sg_in_iu = 0; 5475 5476 if (sg_count != 0) { 5477 sg = scsi_sglist(scmd); 5478 sg_descriptor = request->sg_descriptors; 5479 5480 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5481 ctrl_info->max_sg_per_r56_iu, &chained); 5482 5483 request->partial = chained; 5484 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5485 } 5486 5487 put_unaligned_le16(iu_length, &request->header.iu_length); 5488 request->num_sg_descriptors = num_sg_in_iu; 5489 5490 return 0; 5491 } 5492 5493 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 5494 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 5495 struct pqi_io_request *io_request) 5496 { 5497 u16 iu_length; 5498 int sg_count; 5499 bool chained; 5500 unsigned int num_sg_in_iu; 5501 struct scatterlist *sg; 5502 struct pqi_sg_descriptor *sg_descriptor; 5503 5504 sg_count = scsi_dma_map(scmd); 5505 if (sg_count < 0) 5506 return sg_count; 5507 5508 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 5509 PQI_REQUEST_HEADER_LENGTH; 5510 num_sg_in_iu = 0; 5511 5512 if (sg_count == 0) 5513 goto out; 5514 5515 sg = scsi_sglist(scmd); 5516 sg_descriptor = request->sg_descriptors; 5517 5518 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5519 ctrl_info->max_sg_per_iu, &chained); 5520 5521 request->partial = chained; 5522 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5523 5524 out: 5525 put_unaligned_le16(iu_length, &request->header.iu_length); 5526 request->num_sg_descriptors = num_sg_in_iu; 5527 5528 return 0; 5529 } 5530 5531 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 5532 void *context) 5533 { 5534 struct scsi_cmnd *scmd; 5535 5536 scmd = io_request->scmd; 5537 pqi_free_io_request(io_request); 5538 scsi_dma_unmap(scmd); 5539 pqi_scsi_done(scmd); 5540 } 5541 5542 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, 5543 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5544 struct pqi_queue_group *queue_group, bool io_high_prio) 5545 { 5546 int rc; 5547 size_t cdb_length; 5548 struct pqi_io_request *io_request; 5549 struct pqi_raid_path_request *request; 5550 5551 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5552 if (!io_request) 5553 return SCSI_MLQUEUE_HOST_BUSY; 5554 5555 io_request->io_complete_callback = pqi_raid_io_complete; 5556 io_request->scmd = scmd; 5557 5558 request = io_request->iu; 5559 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); 5560 5561 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5562 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5563 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5564 request->command_priority = io_high_prio; 5565 put_unaligned_le16(io_request->index, &request->request_id); 5566 request->error_index = request->request_id; 5567 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); 5568 request->ml_device_lun_number = (u8)scmd->device->lun; 5569 5570 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 5571 memcpy(request->cdb, scmd->cmnd, cdb_length); 5572 5573 switch (cdb_length) { 5574 case 6: 5575 case 10: 5576 case 12: 5577 case 16: 5578 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5579 break; 5580 case 20: 5581 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; 5582 break; 5583 case 24: 5584 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; 5585 break; 5586 case 28: 5587 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; 5588 break; 5589 case 32: 5590 default: 5591 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; 5592 break; 5593 } 5594 5595 switch (scmd->sc_data_direction) { 5596 case DMA_FROM_DEVICE: 5597 request->data_direction = SOP_READ_FLAG; 5598 break; 5599 case DMA_TO_DEVICE: 5600 request->data_direction = SOP_WRITE_FLAG; 5601 break; 5602 case DMA_NONE: 5603 request->data_direction = SOP_NO_DIRECTION_FLAG; 5604 break; 5605 case DMA_BIDIRECTIONAL: 5606 request->data_direction = SOP_BIDIRECTIONAL; 5607 break; 5608 default: 5609 dev_err(&ctrl_info->pci_dev->dev, 5610 "unknown data direction: %d\n", 5611 scmd->sc_data_direction); 5612 break; 5613 } 5614 5615 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5616 if (rc) { 5617 pqi_free_io_request(io_request); 5618 return SCSI_MLQUEUE_HOST_BUSY; 5619 } 5620 5621 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5622 5623 return 0; 5624 } 5625 5626 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5627 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5628 struct pqi_queue_group *queue_group) 5629 { 5630 bool io_high_prio; 5631 5632 io_high_prio = pqi_is_io_high_priority(device, scmd); 5633 5634 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); 5635 } 5636 5637 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5638 { 5639 struct scsi_cmnd *scmd; 5640 struct pqi_scsi_dev *device; 5641 struct pqi_ctrl_info *ctrl_info; 5642 5643 if (!io_request->raid_bypass) 5644 return false; 5645 5646 scmd = io_request->scmd; 5647 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5648 return false; 5649 if (host_byte(scmd->result) == DID_NO_CONNECT) 5650 return false; 5651 5652 device = scmd->device->hostdata; 5653 if (pqi_device_offline(device) || pqi_device_in_remove(device)) 5654 return false; 5655 5656 ctrl_info = shost_to_hba(scmd->device->host); 5657 if (pqi_ctrl_offline(ctrl_info)) 5658 return false; 5659 5660 return true; 5661 } 5662 5663 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5664 void *context) 5665 { 5666 struct scsi_cmnd *scmd; 5667 5668 scmd = io_request->scmd; 5669 scsi_dma_unmap(scmd); 5670 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { 5671 set_host_byte(scmd, DID_IMM_RETRY); 5672 pqi_cmd_priv(scmd)->this_residual++; 5673 } 5674 5675 pqi_free_io_request(io_request); 5676 pqi_scsi_done(scmd); 5677 } 5678 5679 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5680 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5681 struct pqi_queue_group *queue_group) 5682 { 5683 bool io_high_prio; 5684 5685 io_high_prio = pqi_is_io_high_priority(device, scmd); 5686 5687 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5688 scmd->cmnd, scmd->cmd_len, queue_group, NULL, 5689 false, io_high_prio); 5690 } 5691 5692 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5693 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5694 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5695 struct pqi_encryption_info *encryption_info, bool raid_bypass, 5696 bool io_high_prio) 5697 { 5698 int rc; 5699 struct pqi_io_request *io_request; 5700 struct pqi_aio_path_request *request; 5701 5702 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5703 if (!io_request) 5704 return SCSI_MLQUEUE_HOST_BUSY; 5705 5706 io_request->io_complete_callback = pqi_aio_io_complete; 5707 io_request->scmd = scmd; 5708 io_request->raid_bypass = raid_bypass; 5709 5710 request = io_request->iu; 5711 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors)); 5712 5713 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5714 put_unaligned_le32(aio_handle, &request->nexus_id); 5715 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5716 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5717 request->command_priority = io_high_prio; 5718 put_unaligned_le16(io_request->index, &request->request_id); 5719 request->error_index = request->request_id; 5720 if (!raid_bypass && ctrl_info->multi_lun_device_supported) 5721 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); 5722 if (cdb_length > sizeof(request->cdb)) 5723 cdb_length = sizeof(request->cdb); 5724 request->cdb_length = cdb_length; 5725 memcpy(request->cdb, cdb, cdb_length); 5726 5727 switch (scmd->sc_data_direction) { 5728 case DMA_TO_DEVICE: 5729 request->data_direction = SOP_READ_FLAG; 5730 break; 5731 case DMA_FROM_DEVICE: 5732 request->data_direction = SOP_WRITE_FLAG; 5733 break; 5734 case DMA_NONE: 5735 request->data_direction = SOP_NO_DIRECTION_FLAG; 5736 break; 5737 case DMA_BIDIRECTIONAL: 5738 request->data_direction = SOP_BIDIRECTIONAL; 5739 break; 5740 default: 5741 dev_err(&ctrl_info->pci_dev->dev, 5742 "unknown data direction: %d\n", 5743 scmd->sc_data_direction); 5744 break; 5745 } 5746 5747 if (encryption_info) { 5748 request->encryption_enable = true; 5749 put_unaligned_le16(encryption_info->data_encryption_key_index, 5750 &request->data_encryption_key_index); 5751 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5752 &request->encrypt_tweak_lower); 5753 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5754 &request->encrypt_tweak_upper); 5755 } 5756 5757 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5758 if (rc) { 5759 pqi_free_io_request(io_request); 5760 return SCSI_MLQUEUE_HOST_BUSY; 5761 } 5762 5763 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5764 5765 return 0; 5766 } 5767 5768 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 5769 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5770 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5771 struct pqi_scsi_dev_raid_map_data *rmd) 5772 { 5773 int rc; 5774 struct pqi_io_request *io_request; 5775 struct pqi_aio_r1_path_request *r1_request; 5776 5777 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5778 if (!io_request) 5779 return SCSI_MLQUEUE_HOST_BUSY; 5780 5781 io_request->io_complete_callback = pqi_aio_io_complete; 5782 io_request->scmd = scmd; 5783 io_request->raid_bypass = true; 5784 5785 r1_request = io_request->iu; 5786 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); 5787 5788 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; 5789 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); 5790 r1_request->num_drives = rmd->num_it_nexus_entries; 5791 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); 5792 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); 5793 if (rmd->num_it_nexus_entries == 3) 5794 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); 5795 5796 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); 5797 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5798 put_unaligned_le16(io_request->index, &r1_request->request_id); 5799 r1_request->error_index = r1_request->request_id; 5800 if (rmd->cdb_length > sizeof(r1_request->cdb)) 5801 rmd->cdb_length = sizeof(r1_request->cdb); 5802 r1_request->cdb_length = rmd->cdb_length; 5803 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); 5804 5805 /* The direction is always write. */ 5806 r1_request->data_direction = SOP_READ_FLAG; 5807 5808 if (encryption_info) { 5809 r1_request->encryption_enable = true; 5810 put_unaligned_le16(encryption_info->data_encryption_key_index, 5811 &r1_request->data_encryption_key_index); 5812 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5813 &r1_request->encrypt_tweak_lower); 5814 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5815 &r1_request->encrypt_tweak_upper); 5816 } 5817 5818 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); 5819 if (rc) { 5820 pqi_free_io_request(io_request); 5821 return SCSI_MLQUEUE_HOST_BUSY; 5822 } 5823 5824 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5825 5826 return 0; 5827 } 5828 5829 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 5830 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5831 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5832 struct pqi_scsi_dev_raid_map_data *rmd) 5833 { 5834 int rc; 5835 struct pqi_io_request *io_request; 5836 struct pqi_aio_r56_path_request *r56_request; 5837 5838 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5839 if (!io_request) 5840 return SCSI_MLQUEUE_HOST_BUSY; 5841 io_request->io_complete_callback = pqi_aio_io_complete; 5842 io_request->scmd = scmd; 5843 io_request->raid_bypass = true; 5844 5845 r56_request = io_request->iu; 5846 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); 5847 5848 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) 5849 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; 5850 else 5851 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; 5852 5853 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); 5854 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); 5855 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); 5856 if (rmd->raid_level == SA_RAID_6) { 5857 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); 5858 r56_request->xor_multiplier = rmd->xor_mult; 5859 } 5860 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); 5861 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5862 put_unaligned_le64(rmd->row, &r56_request->row); 5863 5864 put_unaligned_le16(io_request->index, &r56_request->request_id); 5865 r56_request->error_index = r56_request->request_id; 5866 5867 if (rmd->cdb_length > sizeof(r56_request->cdb)) 5868 rmd->cdb_length = sizeof(r56_request->cdb); 5869 r56_request->cdb_length = rmd->cdb_length; 5870 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); 5871 5872 /* The direction is always write. */ 5873 r56_request->data_direction = SOP_READ_FLAG; 5874 5875 if (encryption_info) { 5876 r56_request->encryption_enable = true; 5877 put_unaligned_le16(encryption_info->data_encryption_key_index, 5878 &r56_request->data_encryption_key_index); 5879 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5880 &r56_request->encrypt_tweak_lower); 5881 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5882 &r56_request->encrypt_tweak_upper); 5883 } 5884 5885 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); 5886 if (rc) { 5887 pqi_free_io_request(io_request); 5888 return SCSI_MLQUEUE_HOST_BUSY; 5889 } 5890 5891 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5892 5893 return 0; 5894 } 5895 5896 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5897 struct scsi_cmnd *scmd) 5898 { 5899 /* 5900 * We are setting host_tagset = 1 during init. 5901 */ 5902 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); 5903 } 5904 5905 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) 5906 { 5907 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) 5908 return false; 5909 5910 return pqi_cmd_priv(scmd)->this_residual == 0; 5911 } 5912 5913 /* 5914 * This function gets called just before we hand the completed SCSI request 5915 * back to the SML. 5916 */ 5917 5918 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5919 { 5920 struct pqi_scsi_dev *device; 5921 struct completion *wait; 5922 5923 if (!scmd->device) { 5924 set_host_byte(scmd, DID_NO_CONNECT); 5925 return; 5926 } 5927 5928 device = scmd->device->hostdata; 5929 if (!device) { 5930 set_host_byte(scmd, DID_NO_CONNECT); 5931 return; 5932 } 5933 5934 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); 5935 5936 wait = (struct completion *)xchg(&scmd->host_scribble, NULL); 5937 if (wait != PQI_NO_COMPLETION) 5938 complete(wait); 5939 } 5940 5941 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, 5942 struct scsi_cmnd *scmd) 5943 { 5944 u32 oldest_jiffies; 5945 u8 lru_index; 5946 int i; 5947 int rc; 5948 struct pqi_scsi_dev *device; 5949 struct pqi_stream_data *pqi_stream_data; 5950 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 5951 5952 if (!ctrl_info->enable_stream_detection) 5953 return false; 5954 5955 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 5956 if (rc) 5957 return false; 5958 5959 /* Check writes only. */ 5960 if (!rmd.is_write) 5961 return false; 5962 5963 device = scmd->device->hostdata; 5964 5965 /* Check for RAID 5/6 streams. */ 5966 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) 5967 return false; 5968 5969 /* 5970 * If controller does not support AIO RAID{5,6} writes, need to send 5971 * requests down non-AIO path. 5972 */ 5973 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || 5974 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) 5975 return true; 5976 5977 lru_index = 0; 5978 oldest_jiffies = INT_MAX; 5979 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { 5980 pqi_stream_data = &device->stream_data[i]; 5981 /* 5982 * Check for adjacent request or request is within 5983 * the previous request. 5984 */ 5985 if ((pqi_stream_data->next_lba && 5986 rmd.first_block >= pqi_stream_data->next_lba) && 5987 rmd.first_block <= pqi_stream_data->next_lba + 5988 rmd.block_cnt) { 5989 pqi_stream_data->next_lba = rmd.first_block + 5990 rmd.block_cnt; 5991 pqi_stream_data->last_accessed = jiffies; 5992 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->write_stream_cnt++; 5993 return true; 5994 } 5995 5996 /* unused entry */ 5997 if (pqi_stream_data->last_accessed == 0) { 5998 lru_index = i; 5999 break; 6000 } 6001 6002 /* Find entry with oldest last accessed time. */ 6003 if (pqi_stream_data->last_accessed <= oldest_jiffies) { 6004 oldest_jiffies = pqi_stream_data->last_accessed; 6005 lru_index = i; 6006 } 6007 } 6008 6009 /* Set LRU entry. */ 6010 pqi_stream_data = &device->stream_data[lru_index]; 6011 pqi_stream_data->last_accessed = jiffies; 6012 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; 6013 6014 return false; 6015 } 6016 6017 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 6018 { 6019 int rc; 6020 struct pqi_ctrl_info *ctrl_info; 6021 struct pqi_scsi_dev *device; 6022 u16 hw_queue; 6023 struct pqi_queue_group *queue_group; 6024 bool raid_bypassed; 6025 u8 lun; 6026 6027 scmd->host_scribble = PQI_NO_COMPLETION; 6028 6029 device = scmd->device->hostdata; 6030 6031 if (!device) { 6032 set_host_byte(scmd, DID_NO_CONNECT); 6033 pqi_scsi_done(scmd); 6034 return 0; 6035 } 6036 6037 lun = (u8)scmd->device->lun; 6038 6039 atomic_inc(&device->scsi_cmds_outstanding[lun]); 6040 6041 ctrl_info = shost_to_hba(shost); 6042 6043 if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) { 6044 set_host_byte(scmd, DID_NO_CONNECT); 6045 pqi_scsi_done(scmd); 6046 return 0; 6047 } 6048 6049 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { 6050 rc = SCSI_MLQUEUE_HOST_BUSY; 6051 goto out; 6052 } 6053 6054 /* 6055 * This is necessary because the SML doesn't zero out this field during 6056 * error recovery. 6057 */ 6058 scmd->result = 0; 6059 6060 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 6061 queue_group = &ctrl_info->queue_groups[hw_queue]; 6062 6063 if (pqi_is_logical_device(device)) { 6064 raid_bypassed = false; 6065 if (device->raid_bypass_enabled && 6066 pqi_is_bypass_eligible_request(scmd) && 6067 !pqi_is_parity_write_stream(ctrl_info, scmd)) { 6068 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6069 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { 6070 raid_bypassed = true; 6071 per_cpu_ptr(device->raid_io_stats, smp_processor_id())->raid_bypass_cnt++; 6072 } 6073 } 6074 if (!raid_bypassed) 6075 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6076 } else { 6077 if (device->aio_enabled) 6078 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6079 else 6080 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6081 } 6082 6083 out: 6084 if (rc) { 6085 scmd->host_scribble = NULL; 6086 atomic_dec(&device->scsi_cmds_outstanding[lun]); 6087 } 6088 6089 return rc; 6090 } 6091 6092 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) 6093 { 6094 unsigned int i; 6095 unsigned int path; 6096 unsigned long flags; 6097 unsigned int queued_io_count; 6098 struct pqi_queue_group *queue_group; 6099 struct pqi_io_request *io_request; 6100 6101 queued_io_count = 0; 6102 6103 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6104 queue_group = &ctrl_info->queue_groups[i]; 6105 for (path = 0; path < 2; path++) { 6106 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 6107 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) 6108 queued_io_count++; 6109 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 6110 } 6111 } 6112 6113 return queued_io_count; 6114 } 6115 6116 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) 6117 { 6118 unsigned int i; 6119 unsigned int path; 6120 unsigned int nonempty_inbound_queue_count; 6121 struct pqi_queue_group *queue_group; 6122 pqi_index_t iq_pi; 6123 pqi_index_t iq_ci; 6124 6125 nonempty_inbound_queue_count = 0; 6126 6127 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6128 queue_group = &ctrl_info->queue_groups[i]; 6129 for (path = 0; path < 2; path++) { 6130 iq_pi = queue_group->iq_pi_copy[path]; 6131 iq_ci = readl(queue_group->iq_ci[path]); 6132 if (iq_ci != iq_pi) 6133 nonempty_inbound_queue_count++; 6134 } 6135 } 6136 6137 return nonempty_inbound_queue_count; 6138 } 6139 6140 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10 6141 6142 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 6143 { 6144 unsigned long start_jiffies; 6145 unsigned long warning_timeout; 6146 unsigned int queued_io_count; 6147 unsigned int nonempty_inbound_queue_count; 6148 bool displayed_warning; 6149 6150 displayed_warning = false; 6151 start_jiffies = jiffies; 6152 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6153 6154 while (1) { 6155 queued_io_count = pqi_queued_io_count(ctrl_info); 6156 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); 6157 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0) 6158 break; 6159 pqi_check_ctrl_health(ctrl_info); 6160 if (pqi_ctrl_offline(ctrl_info)) 6161 return -ENXIO; 6162 if (time_after(jiffies, warning_timeout)) { 6163 dev_warn(&ctrl_info->pci_dev->dev, 6164 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n", 6165 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); 6166 displayed_warning = true; 6167 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6168 } 6169 usleep_range(1000, 2000); 6170 } 6171 6172 if (displayed_warning) 6173 dev_warn(&ctrl_info->pci_dev->dev, 6174 "queued I/O drained after waiting for %u seconds\n", 6175 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 6176 6177 return 0; 6178 } 6179 6180 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 6181 struct pqi_scsi_dev *device, u8 lun) 6182 { 6183 unsigned int i; 6184 unsigned int path; 6185 struct pqi_queue_group *queue_group; 6186 unsigned long flags; 6187 struct pqi_io_request *io_request; 6188 struct pqi_io_request *next; 6189 struct scsi_cmnd *scmd; 6190 struct pqi_scsi_dev *scsi_device; 6191 6192 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6193 queue_group = &ctrl_info->queue_groups[i]; 6194 6195 for (path = 0; path < 2; path++) { 6196 spin_lock_irqsave( 6197 &queue_group->submit_lock[path], flags); 6198 6199 list_for_each_entry_safe(io_request, next, 6200 &queue_group->request_list[path], 6201 request_list_entry) { 6202 6203 scmd = io_request->scmd; 6204 if (!scmd) 6205 continue; 6206 6207 scsi_device = scmd->device->hostdata; 6208 6209 list_del(&io_request->request_list_entry); 6210 if (scsi_device == device && (u8)scmd->device->lun == lun) 6211 set_host_byte(scmd, DID_RESET); 6212 else 6213 set_host_byte(scmd, DID_REQUEUE); 6214 pqi_free_io_request(io_request); 6215 scsi_dma_unmap(scmd); 6216 pqi_scsi_done(scmd); 6217 } 6218 6219 spin_unlock_irqrestore( 6220 &queue_group->submit_lock[path], flags); 6221 } 6222 } 6223 } 6224 6225 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 6226 6227 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 6228 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs) 6229 { 6230 int cmds_outstanding; 6231 unsigned long start_jiffies; 6232 unsigned long warning_timeout; 6233 unsigned long msecs_waiting; 6234 6235 start_jiffies = jiffies; 6236 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6237 6238 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) { 6239 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { 6240 pqi_check_ctrl_health(ctrl_info); 6241 if (pqi_ctrl_offline(ctrl_info)) 6242 return -ENXIO; 6243 } 6244 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); 6245 if (msecs_waiting >= timeout_msecs) { 6246 dev_err(&ctrl_info->pci_dev->dev, 6247 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", 6248 ctrl_info->scsi_host->host_no, device->bus, device->target, 6249 lun, msecs_waiting / 1000, cmds_outstanding); 6250 return -ETIMEDOUT; 6251 } 6252 if (time_after(jiffies, warning_timeout)) { 6253 dev_warn(&ctrl_info->pci_dev->dev, 6254 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", 6255 ctrl_info->scsi_host->host_no, device->bus, device->target, 6256 lun, msecs_waiting / 1000, cmds_outstanding); 6257 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6258 } 6259 usleep_range(1000, 2000); 6260 } 6261 6262 return 0; 6263 } 6264 6265 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 6266 void *context) 6267 { 6268 struct completion *waiting = context; 6269 6270 complete(waiting); 6271 } 6272 6273 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 6274 6275 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 6276 struct pqi_scsi_dev *device, u8 lun, struct completion *wait) 6277 { 6278 int rc; 6279 unsigned int wait_secs; 6280 int cmds_outstanding; 6281 6282 wait_secs = 0; 6283 6284 while (1) { 6285 if (wait_for_completion_io_timeout(wait, 6286 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) { 6287 rc = 0; 6288 break; 6289 } 6290 6291 pqi_check_ctrl_health(ctrl_info); 6292 if (pqi_ctrl_offline(ctrl_info)) { 6293 rc = -ENXIO; 6294 break; 6295 } 6296 6297 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; 6298 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]); 6299 dev_warn(&ctrl_info->pci_dev->dev, 6300 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n", 6301 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); 6302 } 6303 6304 return rc; 6305 } 6306 6307 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 6308 6309 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6310 { 6311 int rc; 6312 struct pqi_io_request *io_request; 6313 DECLARE_COMPLETION_ONSTACK(wait); 6314 struct pqi_task_management_request *request; 6315 6316 io_request = pqi_alloc_io_request(ctrl_info, NULL); 6317 io_request->io_complete_callback = pqi_lun_reset_complete; 6318 io_request->context = &wait; 6319 6320 request = io_request->iu; 6321 memset(request, 0, sizeof(*request)); 6322 6323 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 6324 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 6325 &request->header.iu_length); 6326 put_unaligned_le16(io_request->index, &request->request_id); 6327 memcpy(request->lun_number, device->scsi3addr, 6328 sizeof(request->lun_number)); 6329 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) 6330 request->ml_device_lun_number = lun; 6331 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 6332 if (ctrl_info->tmf_iu_timeout_supported) 6333 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); 6334 6335 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 6336 io_request); 6337 6338 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait); 6339 if (rc == 0) 6340 rc = io_request->status; 6341 6342 pqi_free_io_request(io_request); 6343 6344 return rc; 6345 } 6346 6347 #define PQI_LUN_RESET_RETRIES 3 6348 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) 6349 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) 6350 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) 6351 6352 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6353 { 6354 int reset_rc; 6355 int wait_rc; 6356 unsigned int retries; 6357 unsigned long timeout_msecs; 6358 6359 for (retries = 0;;) { 6360 reset_rc = pqi_lun_reset(ctrl_info, device, lun); 6361 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES) 6362 break; 6363 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 6364 } 6365 6366 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : 6367 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; 6368 6369 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); 6370 if (wait_rc && reset_rc == 0) 6371 reset_rc = wait_rc; 6372 6373 return reset_rc == 0 ? SUCCESS : FAILED; 6374 } 6375 6376 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6377 { 6378 int rc; 6379 6380 pqi_ctrl_block_requests(ctrl_info); 6381 pqi_ctrl_wait_until_quiesced(ctrl_info); 6382 pqi_fail_io_queued_for_device(ctrl_info, device, lun); 6383 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 6384 pqi_device_reset_start(device, lun); 6385 pqi_ctrl_unblock_requests(ctrl_info); 6386 if (rc) 6387 rc = FAILED; 6388 else 6389 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); 6390 pqi_device_reset_done(device, lun); 6391 6392 return rc; 6393 } 6394 6395 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) 6396 { 6397 int rc; 6398 6399 mutex_lock(&ctrl_info->lun_reset_mutex); 6400 6401 dev_err(&ctrl_info->pci_dev->dev, 6402 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", 6403 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); 6404 6405 pqi_check_ctrl_health(ctrl_info); 6406 if (pqi_ctrl_offline(ctrl_info)) 6407 rc = FAILED; 6408 else 6409 rc = pqi_device_reset(ctrl_info, device, lun); 6410 6411 dev_err(&ctrl_info->pci_dev->dev, 6412 "reset of scsi %d:%d:%d:%u: %s\n", 6413 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, 6414 rc == SUCCESS ? "SUCCESS" : "FAILED"); 6415 6416 mutex_unlock(&ctrl_info->lun_reset_mutex); 6417 6418 return rc; 6419 } 6420 6421 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 6422 { 6423 struct Scsi_Host *shost; 6424 struct pqi_ctrl_info *ctrl_info; 6425 struct pqi_scsi_dev *device; 6426 u8 scsi_opcode; 6427 6428 shost = scmd->device->host; 6429 ctrl_info = shost_to_hba(shost); 6430 device = scmd->device->hostdata; 6431 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6432 6433 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); 6434 } 6435 6436 static void pqi_tmf_worker(struct work_struct *work) 6437 { 6438 struct pqi_tmf_work *tmf_work; 6439 struct scsi_cmnd *scmd; 6440 6441 tmf_work = container_of(work, struct pqi_tmf_work, work_struct); 6442 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); 6443 6444 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode); 6445 } 6446 6447 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd) 6448 { 6449 struct Scsi_Host *shost; 6450 struct pqi_ctrl_info *ctrl_info; 6451 struct pqi_scsi_dev *device; 6452 struct pqi_tmf_work *tmf_work; 6453 DECLARE_COMPLETION_ONSTACK(wait); 6454 6455 shost = scmd->device->host; 6456 ctrl_info = shost_to_hba(shost); 6457 device = scmd->device->hostdata; 6458 6459 dev_err(&ctrl_info->pci_dev->dev, 6460 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n", 6461 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6462 6463 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { 6464 dev_err(&ctrl_info->pci_dev->dev, 6465 "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n", 6466 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6467 scmd->result = DID_RESET << 16; 6468 goto out; 6469 } 6470 6471 tmf_work = &device->tmf_work[scmd->device->lun]; 6472 6473 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { 6474 tmf_work->ctrl_info = ctrl_info; 6475 tmf_work->device = device; 6476 tmf_work->lun = (u8)scmd->device->lun; 6477 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6478 schedule_work(&tmf_work->work_struct); 6479 } 6480 6481 wait_for_completion(&wait); 6482 6483 dev_err(&ctrl_info->pci_dev->dev, 6484 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n", 6485 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6486 6487 out: 6488 6489 return SUCCESS; 6490 } 6491 6492 static int pqi_sdev_init(struct scsi_device *sdev) 6493 { 6494 struct pqi_scsi_dev *device; 6495 unsigned long flags; 6496 struct pqi_ctrl_info *ctrl_info; 6497 struct scsi_target *starget; 6498 struct sas_rphy *rphy; 6499 6500 ctrl_info = shost_to_hba(sdev->host); 6501 6502 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6503 6504 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 6505 starget = scsi_target(sdev); 6506 rphy = target_to_rphy(starget); 6507 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 6508 if (device) { 6509 if (device->target_lun_valid) { 6510 device->ignore_device = true; 6511 } else { 6512 device->target = sdev_id(sdev); 6513 device->lun = sdev->lun; 6514 device->target_lun_valid = true; 6515 } 6516 } 6517 } else { 6518 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 6519 sdev_id(sdev), sdev->lun); 6520 } 6521 6522 if (device) { 6523 sdev->hostdata = device; 6524 device->sdev = sdev; 6525 if (device->queue_depth) { 6526 device->advertised_queue_depth = device->queue_depth; 6527 scsi_change_queue_depth(sdev, 6528 device->advertised_queue_depth); 6529 } 6530 if (pqi_is_logical_device(device)) { 6531 pqi_disable_write_same(sdev); 6532 } else { 6533 sdev->allow_restart = 1; 6534 if (device->device_type == SA_DEVICE_TYPE_NVME) 6535 pqi_disable_write_same(sdev); 6536 } 6537 } 6538 6539 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6540 6541 return 0; 6542 } 6543 6544 static void pqi_map_queues(struct Scsi_Host *shost) 6545 { 6546 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6547 6548 if (!ctrl_info->disable_managed_interrupts) 6549 blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 6550 &ctrl_info->pci_dev->dev, 0); 6551 else 6552 blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); 6553 } 6554 6555 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) 6556 { 6557 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; 6558 } 6559 6560 static int pqi_sdev_configure(struct scsi_device *sdev, 6561 struct queue_limits *lim) 6562 { 6563 int rc = 0; 6564 struct pqi_scsi_dev *device; 6565 6566 device = sdev->hostdata; 6567 device->devtype = sdev->type; 6568 6569 if (pqi_is_tape_changer_device(device) && device->ignore_device) { 6570 rc = -ENXIO; 6571 device->ignore_device = false; 6572 } 6573 6574 return rc; 6575 } 6576 6577 static void pqi_sdev_destroy(struct scsi_device *sdev) 6578 { 6579 struct pqi_ctrl_info *ctrl_info; 6580 struct pqi_scsi_dev *device; 6581 int mutex_acquired; 6582 unsigned long flags; 6583 6584 ctrl_info = shost_to_hba(sdev->host); 6585 6586 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 6587 if (!mutex_acquired) 6588 return; 6589 6590 device = sdev->hostdata; 6591 if (!device) { 6592 mutex_unlock(&ctrl_info->scan_mutex); 6593 return; 6594 } 6595 6596 device->lun_count--; 6597 if (device->lun_count > 0) { 6598 mutex_unlock(&ctrl_info->scan_mutex); 6599 return; 6600 } 6601 6602 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6603 list_del(&device->scsi_device_list_entry); 6604 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6605 6606 mutex_unlock(&ctrl_info->scan_mutex); 6607 6608 pqi_dev_info(ctrl_info, "removed", device); 6609 pqi_free_device(device); 6610 } 6611 6612 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6613 { 6614 struct pci_dev *pci_dev; 6615 u32 subsystem_vendor; 6616 u32 subsystem_device; 6617 cciss_pci_info_struct pci_info; 6618 6619 if (!arg) 6620 return -EINVAL; 6621 6622 pci_dev = ctrl_info->pci_dev; 6623 6624 pci_info.domain = pci_domain_nr(pci_dev->bus); 6625 pci_info.bus = pci_dev->bus->number; 6626 pci_info.dev_fn = pci_dev->devfn; 6627 subsystem_vendor = pci_dev->subsystem_vendor; 6628 subsystem_device = pci_dev->subsystem_device; 6629 pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; 6630 6631 if (copy_to_user(arg, &pci_info, sizeof(pci_info))) 6632 return -EFAULT; 6633 6634 return 0; 6635 } 6636 6637 static int pqi_getdrivver_ioctl(void __user *arg) 6638 { 6639 u32 version; 6640 6641 if (!arg) 6642 return -EINVAL; 6643 6644 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 6645 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 6646 6647 if (copy_to_user(arg, &version, sizeof(version))) 6648 return -EFAULT; 6649 6650 return 0; 6651 } 6652 6653 struct ciss_error_info { 6654 u8 scsi_status; 6655 int command_status; 6656 size_t sense_data_length; 6657 }; 6658 6659 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 6660 struct ciss_error_info *ciss_error_info) 6661 { 6662 int ciss_cmd_status; 6663 size_t sense_data_length; 6664 6665 switch (pqi_error_info->data_out_result) { 6666 case PQI_DATA_IN_OUT_GOOD: 6667 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 6668 break; 6669 case PQI_DATA_IN_OUT_UNDERFLOW: 6670 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 6671 break; 6672 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 6673 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 6674 break; 6675 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 6676 case PQI_DATA_IN_OUT_BUFFER_ERROR: 6677 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 6678 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 6679 case PQI_DATA_IN_OUT_ERROR: 6680 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 6681 break; 6682 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 6683 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 6684 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 6685 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 6686 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 6687 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 6688 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 6689 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 6690 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 6691 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 6692 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 6693 break; 6694 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 6695 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 6696 break; 6697 case PQI_DATA_IN_OUT_ABORTED: 6698 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 6699 break; 6700 case PQI_DATA_IN_OUT_TIMEOUT: 6701 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 6702 break; 6703 default: 6704 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 6705 break; 6706 } 6707 6708 sense_data_length = 6709 get_unaligned_le16(&pqi_error_info->sense_data_length); 6710 if (sense_data_length == 0) 6711 sense_data_length = 6712 get_unaligned_le16(&pqi_error_info->response_data_length); 6713 if (sense_data_length) 6714 if (sense_data_length > sizeof(pqi_error_info->data)) 6715 sense_data_length = sizeof(pqi_error_info->data); 6716 6717 ciss_error_info->scsi_status = pqi_error_info->status; 6718 ciss_error_info->command_status = ciss_cmd_status; 6719 ciss_error_info->sense_data_length = sense_data_length; 6720 } 6721 6722 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6723 { 6724 int rc; 6725 char *kernel_buffer = NULL; 6726 u16 iu_length; 6727 size_t sense_data_length; 6728 IOCTL_Command_struct iocommand; 6729 struct pqi_raid_path_request request; 6730 struct pqi_raid_error_info pqi_error_info; 6731 struct ciss_error_info ciss_error_info; 6732 6733 if (pqi_ctrl_offline(ctrl_info)) 6734 return -ENXIO; 6735 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) 6736 return -EBUSY; 6737 if (!arg) 6738 return -EINVAL; 6739 if (!capable(CAP_SYS_RAWIO)) 6740 return -EPERM; 6741 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 6742 return -EFAULT; 6743 if (iocommand.buf_size < 1 && 6744 iocommand.Request.Type.Direction != XFER_NONE) 6745 return -EINVAL; 6746 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 6747 return -EINVAL; 6748 if (iocommand.Request.Type.Type != TYPE_CMD) 6749 return -EINVAL; 6750 6751 switch (iocommand.Request.Type.Direction) { 6752 case XFER_NONE: 6753 case XFER_WRITE: 6754 case XFER_READ: 6755 case XFER_READ | XFER_WRITE: 6756 break; 6757 default: 6758 return -EINVAL; 6759 } 6760 6761 if (iocommand.buf_size > 0) { 6762 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 6763 if (!kernel_buffer) 6764 return -ENOMEM; 6765 if (iocommand.Request.Type.Direction & XFER_WRITE) { 6766 if (copy_from_user(kernel_buffer, iocommand.buf, 6767 iocommand.buf_size)) { 6768 rc = -EFAULT; 6769 goto out; 6770 } 6771 } else { 6772 memset(kernel_buffer, 0, iocommand.buf_size); 6773 } 6774 } 6775 6776 memset(&request, 0, sizeof(request)); 6777 6778 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 6779 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 6780 PQI_REQUEST_HEADER_LENGTH; 6781 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 6782 sizeof(request.lun_number)); 6783 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 6784 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 6785 6786 switch (iocommand.Request.Type.Direction) { 6787 case XFER_NONE: 6788 request.data_direction = SOP_NO_DIRECTION_FLAG; 6789 break; 6790 case XFER_WRITE: 6791 request.data_direction = SOP_WRITE_FLAG; 6792 break; 6793 case XFER_READ: 6794 request.data_direction = SOP_READ_FLAG; 6795 break; 6796 case XFER_READ | XFER_WRITE: 6797 request.data_direction = SOP_BIDIRECTIONAL; 6798 break; 6799 } 6800 6801 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 6802 6803 if (iocommand.buf_size > 0) { 6804 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 6805 6806 rc = pqi_map_single(ctrl_info->pci_dev, 6807 &request.sg_descriptors[0], kernel_buffer, 6808 iocommand.buf_size, DMA_BIDIRECTIONAL); 6809 if (rc) 6810 goto out; 6811 6812 iu_length += sizeof(request.sg_descriptors[0]); 6813 } 6814 6815 put_unaligned_le16(iu_length, &request.header.iu_length); 6816 6817 if (ctrl_info->raid_iu_timeout_supported) 6818 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); 6819 6820 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6821 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info); 6822 6823 if (iocommand.buf_size > 0) 6824 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6825 DMA_BIDIRECTIONAL); 6826 6827 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6828 6829 if (rc == 0) { 6830 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6831 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6832 iocommand.error_info.CommandStatus = 6833 ciss_error_info.command_status; 6834 sense_data_length = ciss_error_info.sense_data_length; 6835 if (sense_data_length) { 6836 if (sense_data_length > 6837 sizeof(iocommand.error_info.SenseInfo)) 6838 sense_data_length = 6839 sizeof(iocommand.error_info.SenseInfo); 6840 memcpy(iocommand.error_info.SenseInfo, 6841 pqi_error_info.data, sense_data_length); 6842 iocommand.error_info.SenseLen = sense_data_length; 6843 } 6844 } 6845 6846 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6847 rc = -EFAULT; 6848 goto out; 6849 } 6850 6851 if (rc == 0 && iocommand.buf_size > 0 && 6852 (iocommand.Request.Type.Direction & XFER_READ)) { 6853 if (copy_to_user(iocommand.buf, kernel_buffer, 6854 iocommand.buf_size)) { 6855 rc = -EFAULT; 6856 } 6857 } 6858 6859 out: 6860 kfree(kernel_buffer); 6861 6862 return rc; 6863 } 6864 6865 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, 6866 void __user *arg) 6867 { 6868 int rc; 6869 struct pqi_ctrl_info *ctrl_info; 6870 6871 ctrl_info = shost_to_hba(sdev->host); 6872 6873 switch (cmd) { 6874 case CCISS_DEREGDISK: 6875 case CCISS_REGNEWDISK: 6876 case CCISS_REGNEWD: 6877 rc = pqi_scan_scsi_devices(ctrl_info); 6878 break; 6879 case CCISS_GETPCIINFO: 6880 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6881 break; 6882 case CCISS_GETDRIVVER: 6883 rc = pqi_getdrivver_ioctl(arg); 6884 break; 6885 case CCISS_PASSTHRU: 6886 rc = pqi_passthru_ioctl(ctrl_info, arg); 6887 break; 6888 default: 6889 rc = -EINVAL; 6890 break; 6891 } 6892 6893 return rc; 6894 } 6895 6896 static ssize_t pqi_firmware_version_show(struct device *dev, 6897 struct device_attribute *attr, char *buffer) 6898 { 6899 struct Scsi_Host *shost; 6900 struct pqi_ctrl_info *ctrl_info; 6901 6902 shost = class_to_shost(dev); 6903 ctrl_info = shost_to_hba(shost); 6904 6905 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); 6906 } 6907 6908 static ssize_t pqi_serial_number_show(struct device *dev, 6909 struct device_attribute *attr, char *buffer) 6910 { 6911 struct Scsi_Host *shost; 6912 struct pqi_ctrl_info *ctrl_info; 6913 6914 shost = class_to_shost(dev); 6915 ctrl_info = shost_to_hba(shost); 6916 6917 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); 6918 } 6919 6920 static ssize_t pqi_model_show(struct device *dev, 6921 struct device_attribute *attr, char *buffer) 6922 { 6923 struct Scsi_Host *shost; 6924 struct pqi_ctrl_info *ctrl_info; 6925 6926 shost = class_to_shost(dev); 6927 ctrl_info = shost_to_hba(shost); 6928 6929 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); 6930 } 6931 6932 static ssize_t pqi_vendor_show(struct device *dev, 6933 struct device_attribute *attr, char *buffer) 6934 { 6935 struct Scsi_Host *shost; 6936 struct pqi_ctrl_info *ctrl_info; 6937 6938 shost = class_to_shost(dev); 6939 ctrl_info = shost_to_hba(shost); 6940 6941 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); 6942 } 6943 6944 static ssize_t pqi_host_rescan_store(struct device *dev, 6945 struct device_attribute *attr, const char *buffer, size_t count) 6946 { 6947 struct Scsi_Host *shost = class_to_shost(dev); 6948 6949 pqi_scan_start(shost); 6950 6951 return count; 6952 } 6953 6954 static ssize_t pqi_lockup_action_show(struct device *dev, 6955 struct device_attribute *attr, char *buffer) 6956 { 6957 int count = 0; 6958 unsigned int i; 6959 6960 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6961 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6962 count += scnprintf(buffer + count, PAGE_SIZE - count, 6963 "[%s] ", pqi_lockup_actions[i].name); 6964 else 6965 count += scnprintf(buffer + count, PAGE_SIZE - count, 6966 "%s ", pqi_lockup_actions[i].name); 6967 } 6968 6969 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); 6970 6971 return count; 6972 } 6973 6974 static ssize_t pqi_lockup_action_store(struct device *dev, 6975 struct device_attribute *attr, const char *buffer, size_t count) 6976 { 6977 unsigned int i; 6978 char *action_name; 6979 char action_name_buffer[32]; 6980 6981 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6982 action_name = strstrip(action_name_buffer); 6983 6984 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6985 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 6986 pqi_lockup_action = pqi_lockup_actions[i].action; 6987 return count; 6988 } 6989 } 6990 6991 return -EINVAL; 6992 } 6993 6994 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, 6995 struct device_attribute *attr, char *buffer) 6996 { 6997 struct Scsi_Host *shost = class_to_shost(dev); 6998 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6999 7000 return scnprintf(buffer, 10, "%x\n", 7001 ctrl_info->enable_stream_detection); 7002 } 7003 7004 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, 7005 struct device_attribute *attr, const char *buffer, size_t count) 7006 { 7007 struct Scsi_Host *shost = class_to_shost(dev); 7008 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7009 u8 set_stream_detection = 0; 7010 7011 if (kstrtou8(buffer, 0, &set_stream_detection)) 7012 return -EINVAL; 7013 7014 if (set_stream_detection > 0) 7015 set_stream_detection = 1; 7016 7017 ctrl_info->enable_stream_detection = set_stream_detection; 7018 7019 return count; 7020 } 7021 7022 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, 7023 struct device_attribute *attr, char *buffer) 7024 { 7025 struct Scsi_Host *shost = class_to_shost(dev); 7026 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7027 7028 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); 7029 } 7030 7031 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, 7032 struct device_attribute *attr, const char *buffer, size_t count) 7033 { 7034 struct Scsi_Host *shost = class_to_shost(dev); 7035 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7036 u8 set_r5_writes = 0; 7037 7038 if (kstrtou8(buffer, 0, &set_r5_writes)) 7039 return -EINVAL; 7040 7041 if (set_r5_writes > 0) 7042 set_r5_writes = 1; 7043 7044 ctrl_info->enable_r5_writes = set_r5_writes; 7045 7046 return count; 7047 } 7048 7049 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, 7050 struct device_attribute *attr, char *buffer) 7051 { 7052 struct Scsi_Host *shost = class_to_shost(dev); 7053 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7054 7055 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); 7056 } 7057 7058 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, 7059 struct device_attribute *attr, const char *buffer, size_t count) 7060 { 7061 struct Scsi_Host *shost = class_to_shost(dev); 7062 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7063 u8 set_r6_writes = 0; 7064 7065 if (kstrtou8(buffer, 0, &set_r6_writes)) 7066 return -EINVAL; 7067 7068 if (set_r6_writes > 0) 7069 set_r6_writes = 1; 7070 7071 ctrl_info->enable_r6_writes = set_r6_writes; 7072 7073 return count; 7074 } 7075 7076 static DEVICE_STRING_ATTR_RO(driver_version, 0444, 7077 DRIVER_VERSION BUILD_TIMESTAMP); 7078 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 7079 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 7080 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 7081 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 7082 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 7083 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, 7084 pqi_lockup_action_store); 7085 static DEVICE_ATTR(enable_stream_detection, 0644, 7086 pqi_host_enable_stream_detection_show, 7087 pqi_host_enable_stream_detection_store); 7088 static DEVICE_ATTR(enable_r5_writes, 0644, 7089 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); 7090 static DEVICE_ATTR(enable_r6_writes, 0644, 7091 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); 7092 7093 static struct attribute *pqi_shost_attrs[] = { 7094 &dev_attr_driver_version.attr.attr, 7095 &dev_attr_firmware_version.attr, 7096 &dev_attr_model.attr, 7097 &dev_attr_serial_number.attr, 7098 &dev_attr_vendor.attr, 7099 &dev_attr_rescan.attr, 7100 &dev_attr_lockup_action.attr, 7101 &dev_attr_enable_stream_detection.attr, 7102 &dev_attr_enable_r5_writes.attr, 7103 &dev_attr_enable_r6_writes.attr, 7104 NULL 7105 }; 7106 7107 ATTRIBUTE_GROUPS(pqi_shost); 7108 7109 static ssize_t pqi_unique_id_show(struct device *dev, 7110 struct device_attribute *attr, char *buffer) 7111 { 7112 struct pqi_ctrl_info *ctrl_info; 7113 struct scsi_device *sdev; 7114 struct pqi_scsi_dev *device; 7115 unsigned long flags; 7116 u8 unique_id[16]; 7117 7118 sdev = to_scsi_device(dev); 7119 ctrl_info = shost_to_hba(sdev->host); 7120 7121 if (pqi_ctrl_offline(ctrl_info)) 7122 return -ENODEV; 7123 7124 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7125 7126 device = sdev->hostdata; 7127 if (!device) { 7128 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7129 return -ENODEV; 7130 } 7131 7132 if (device->is_physical_device) 7133 memcpy(unique_id, device->wwid, sizeof(device->wwid)); 7134 else 7135 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 7136 7137 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7138 7139 return scnprintf(buffer, PAGE_SIZE, 7140 "%02X%02X%02X%02X%02X%02X%02X%02X" 7141 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 7142 unique_id[0], unique_id[1], unique_id[2], unique_id[3], 7143 unique_id[4], unique_id[5], unique_id[6], unique_id[7], 7144 unique_id[8], unique_id[9], unique_id[10], unique_id[11], 7145 unique_id[12], unique_id[13], unique_id[14], unique_id[15]); 7146 } 7147 7148 static ssize_t pqi_lunid_show(struct device *dev, 7149 struct device_attribute *attr, char *buffer) 7150 { 7151 struct pqi_ctrl_info *ctrl_info; 7152 struct scsi_device *sdev; 7153 struct pqi_scsi_dev *device; 7154 unsigned long flags; 7155 u8 lunid[8]; 7156 7157 sdev = to_scsi_device(dev); 7158 ctrl_info = shost_to_hba(sdev->host); 7159 7160 if (pqi_ctrl_offline(ctrl_info)) 7161 return -ENODEV; 7162 7163 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7164 7165 device = sdev->hostdata; 7166 if (!device) { 7167 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7168 return -ENODEV; 7169 } 7170 7171 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 7172 7173 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7174 7175 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 7176 } 7177 7178 #define MAX_PATHS 8 7179 7180 static ssize_t pqi_path_info_show(struct device *dev, 7181 struct device_attribute *attr, char *buf) 7182 { 7183 struct pqi_ctrl_info *ctrl_info; 7184 struct scsi_device *sdev; 7185 struct pqi_scsi_dev *device; 7186 unsigned long flags; 7187 int i; 7188 int output_len = 0; 7189 u8 box; 7190 u8 bay; 7191 u8 path_map_index; 7192 char *active; 7193 u8 phys_connector[2]; 7194 7195 sdev = to_scsi_device(dev); 7196 ctrl_info = shost_to_hba(sdev->host); 7197 7198 if (pqi_ctrl_offline(ctrl_info)) 7199 return -ENODEV; 7200 7201 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7202 7203 device = sdev->hostdata; 7204 if (!device) { 7205 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7206 return -ENODEV; 7207 } 7208 7209 bay = device->bay; 7210 for (i = 0; i < MAX_PATHS; i++) { 7211 path_map_index = 1 << i; 7212 if (i == device->active_path_index) 7213 active = "Active"; 7214 else if (device->path_map & path_map_index) 7215 active = "Inactive"; 7216 else 7217 continue; 7218 7219 output_len += scnprintf(buf + output_len, 7220 PAGE_SIZE - output_len, 7221 "[%d:%d:%d:%d] %20.20s ", 7222 ctrl_info->scsi_host->host_no, 7223 device->bus, device->target, 7224 device->lun, 7225 scsi_device_type(device->devtype)); 7226 7227 if (device->devtype == TYPE_RAID || 7228 pqi_is_logical_device(device)) 7229 goto end_buffer; 7230 7231 memcpy(&phys_connector, &device->phys_connector[i], 7232 sizeof(phys_connector)); 7233 if (phys_connector[0] < '0') 7234 phys_connector[0] = '0'; 7235 if (phys_connector[1] < '0') 7236 phys_connector[1] = '0'; 7237 7238 output_len += scnprintf(buf + output_len, 7239 PAGE_SIZE - output_len, 7240 "PORT: %.2s ", phys_connector); 7241 7242 box = device->box[i]; 7243 if (box != 0 && box != 0xFF) 7244 output_len += scnprintf(buf + output_len, 7245 PAGE_SIZE - output_len, 7246 "BOX: %hhu ", box); 7247 7248 if ((device->devtype == TYPE_DISK || 7249 device->devtype == TYPE_ZBC) && 7250 pqi_expose_device(device)) 7251 output_len += scnprintf(buf + output_len, 7252 PAGE_SIZE - output_len, 7253 "BAY: %hhu ", bay); 7254 7255 end_buffer: 7256 output_len += scnprintf(buf + output_len, 7257 PAGE_SIZE - output_len, 7258 "%s\n", active); 7259 } 7260 7261 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7262 7263 return output_len; 7264 } 7265 7266 static ssize_t pqi_sas_address_show(struct device *dev, 7267 struct device_attribute *attr, char *buffer) 7268 { 7269 struct pqi_ctrl_info *ctrl_info; 7270 struct scsi_device *sdev; 7271 struct pqi_scsi_dev *device; 7272 unsigned long flags; 7273 u64 sas_address; 7274 7275 sdev = to_scsi_device(dev); 7276 ctrl_info = shost_to_hba(sdev->host); 7277 7278 if (pqi_ctrl_offline(ctrl_info)) 7279 return -ENODEV; 7280 7281 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7282 7283 device = sdev->hostdata; 7284 if (!device) { 7285 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7286 return -ENODEV; 7287 } 7288 7289 sas_address = device->sas_address; 7290 7291 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7292 7293 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 7294 } 7295 7296 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 7297 struct device_attribute *attr, char *buffer) 7298 { 7299 struct pqi_ctrl_info *ctrl_info; 7300 struct scsi_device *sdev; 7301 struct pqi_scsi_dev *device; 7302 unsigned long flags; 7303 7304 sdev = to_scsi_device(dev); 7305 ctrl_info = shost_to_hba(sdev->host); 7306 7307 if (pqi_ctrl_offline(ctrl_info)) 7308 return -ENODEV; 7309 7310 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7311 7312 device = sdev->hostdata; 7313 if (!device) { 7314 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7315 return -ENODEV; 7316 } 7317 7318 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 7319 buffer[1] = '\n'; 7320 buffer[2] = '\0'; 7321 7322 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7323 7324 return 2; 7325 } 7326 7327 static ssize_t pqi_raid_level_show(struct device *dev, 7328 struct device_attribute *attr, char *buffer) 7329 { 7330 struct pqi_ctrl_info *ctrl_info; 7331 struct scsi_device *sdev; 7332 struct pqi_scsi_dev *device; 7333 unsigned long flags; 7334 char *raid_level; 7335 7336 sdev = to_scsi_device(dev); 7337 ctrl_info = shost_to_hba(sdev->host); 7338 7339 if (pqi_ctrl_offline(ctrl_info)) 7340 return -ENODEV; 7341 7342 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7343 7344 device = sdev->hostdata; 7345 if (!device) { 7346 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7347 return -ENODEV; 7348 } 7349 7350 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 7351 raid_level = pqi_raid_level_to_string(device->raid_level); 7352 else 7353 raid_level = "N/A"; 7354 7355 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7356 7357 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 7358 } 7359 7360 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, 7361 struct device_attribute *attr, char *buffer) 7362 { 7363 struct pqi_ctrl_info *ctrl_info; 7364 struct scsi_device *sdev; 7365 struct pqi_scsi_dev *device; 7366 unsigned long flags; 7367 u64 raid_bypass_cnt; 7368 int cpu; 7369 7370 sdev = to_scsi_device(dev); 7371 ctrl_info = shost_to_hba(sdev->host); 7372 7373 if (pqi_ctrl_offline(ctrl_info)) 7374 return -ENODEV; 7375 7376 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7377 7378 device = sdev->hostdata; 7379 if (!device) { 7380 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7381 return -ENODEV; 7382 } 7383 7384 raid_bypass_cnt = 0; 7385 7386 if (device->raid_io_stats) { 7387 for_each_online_cpu(cpu) { 7388 raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt; 7389 } 7390 } 7391 7392 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7393 7394 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt); 7395 } 7396 7397 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, 7398 struct device_attribute *attr, char *buf) 7399 { 7400 struct pqi_ctrl_info *ctrl_info; 7401 struct scsi_device *sdev; 7402 struct pqi_scsi_dev *device; 7403 unsigned long flags; 7404 int output_len = 0; 7405 7406 sdev = to_scsi_device(dev); 7407 ctrl_info = shost_to_hba(sdev->host); 7408 7409 if (pqi_ctrl_offline(ctrl_info)) 7410 return -ENODEV; 7411 7412 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7413 7414 device = sdev->hostdata; 7415 if (!device) { 7416 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7417 return -ENODEV; 7418 } 7419 7420 output_len = snprintf(buf, PAGE_SIZE, "%d\n", 7421 device->ncq_prio_enable); 7422 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7423 7424 return output_len; 7425 } 7426 7427 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, 7428 struct device_attribute *attr, 7429 const char *buf, size_t count) 7430 { 7431 struct pqi_ctrl_info *ctrl_info; 7432 struct scsi_device *sdev; 7433 struct pqi_scsi_dev *device; 7434 unsigned long flags; 7435 u8 ncq_prio_enable = 0; 7436 7437 if (kstrtou8(buf, 0, &ncq_prio_enable)) 7438 return -EINVAL; 7439 7440 sdev = to_scsi_device(dev); 7441 ctrl_info = shost_to_hba(sdev->host); 7442 7443 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7444 7445 device = sdev->hostdata; 7446 7447 if (!device) { 7448 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7449 return -ENODEV; 7450 } 7451 7452 if (!device->ncq_prio_support) { 7453 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7454 return -EINVAL; 7455 } 7456 7457 device->ncq_prio_enable = ncq_prio_enable; 7458 7459 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7460 7461 return strlen(buf); 7462 } 7463 7464 static ssize_t pqi_numa_node_show(struct device *dev, 7465 struct device_attribute *attr, char *buffer) 7466 { 7467 struct scsi_device *sdev; 7468 struct pqi_ctrl_info *ctrl_info; 7469 7470 sdev = to_scsi_device(dev); 7471 ctrl_info = shost_to_hba(sdev->host); 7472 7473 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); 7474 } 7475 7476 static ssize_t pqi_write_stream_cnt_show(struct device *dev, 7477 struct device_attribute *attr, char *buffer) 7478 { 7479 struct pqi_ctrl_info *ctrl_info; 7480 struct scsi_device *sdev; 7481 struct pqi_scsi_dev *device; 7482 unsigned long flags; 7483 u64 write_stream_cnt; 7484 int cpu; 7485 7486 sdev = to_scsi_device(dev); 7487 ctrl_info = shost_to_hba(sdev->host); 7488 7489 if (pqi_ctrl_offline(ctrl_info)) 7490 return -ENODEV; 7491 7492 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7493 7494 device = sdev->hostdata; 7495 if (!device) { 7496 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7497 return -ENODEV; 7498 } 7499 7500 write_stream_cnt = 0; 7501 7502 if (device->raid_io_stats) { 7503 for_each_online_cpu(cpu) { 7504 write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; 7505 } 7506 } 7507 7508 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7509 7510 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt); 7511 } 7512 7513 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 7514 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 7515 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 7516 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 7517 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); 7518 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 7519 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); 7520 static DEVICE_ATTR(sas_ncq_prio_enable, 0644, 7521 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); 7522 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); 7523 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL); 7524 7525 static struct attribute *pqi_sdev_attrs[] = { 7526 &dev_attr_lunid.attr, 7527 &dev_attr_unique_id.attr, 7528 &dev_attr_path_info.attr, 7529 &dev_attr_sas_address.attr, 7530 &dev_attr_ssd_smart_path_enabled.attr, 7531 &dev_attr_raid_level.attr, 7532 &dev_attr_raid_bypass_cnt.attr, 7533 &dev_attr_sas_ncq_prio_enable.attr, 7534 &dev_attr_numa_node.attr, 7535 &dev_attr_write_stream_cnt.attr, 7536 NULL 7537 }; 7538 7539 ATTRIBUTE_GROUPS(pqi_sdev); 7540 7541 static const struct scsi_host_template pqi_driver_template = { 7542 .module = THIS_MODULE, 7543 .name = DRIVER_NAME_SHORT, 7544 .proc_name = DRIVER_NAME_SHORT, 7545 .queuecommand = pqi_scsi_queue_command, 7546 .scan_start = pqi_scan_start, 7547 .scan_finished = pqi_scan_finished, 7548 .this_id = -1, 7549 .eh_device_reset_handler = pqi_eh_device_reset_handler, 7550 .eh_abort_handler = pqi_eh_abort_handler, 7551 .ioctl = pqi_ioctl, 7552 .sdev_init = pqi_sdev_init, 7553 .sdev_configure = pqi_sdev_configure, 7554 .sdev_destroy = pqi_sdev_destroy, 7555 .map_queues = pqi_map_queues, 7556 .sdev_groups = pqi_sdev_groups, 7557 .shost_groups = pqi_shost_groups, 7558 .cmd_size = sizeof(struct pqi_cmd_priv), 7559 }; 7560 7561 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 7562 { 7563 int rc; 7564 struct Scsi_Host *shost; 7565 7566 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 7567 if (!shost) { 7568 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); 7569 return -ENOMEM; 7570 } 7571 7572 shost->io_port = 0; 7573 shost->n_io_port = 0; 7574 shost->this_id = -1; 7575 shost->max_channel = PQI_MAX_BUS; 7576 shost->max_cmd_len = MAX_COMMAND_SIZE; 7577 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; 7578 shost->max_id = ~0; 7579 shost->max_sectors = ctrl_info->max_sectors; 7580 shost->can_queue = ctrl_info->scsi_ml_can_queue; 7581 shost->cmd_per_lun = shost->can_queue; 7582 shost->sg_tablesize = ctrl_info->sg_tablesize; 7583 shost->transportt = pqi_sas_transport_template; 7584 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 7585 shost->unique_id = shost->irq; 7586 shost->nr_hw_queues = ctrl_info->num_queue_groups; 7587 shost->host_tagset = 1; 7588 shost->hostdata[0] = (unsigned long)ctrl_info; 7589 7590 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 7591 if (rc) { 7592 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); 7593 goto free_host; 7594 } 7595 7596 rc = pqi_add_sas_host(shost, ctrl_info); 7597 if (rc) { 7598 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); 7599 goto remove_host; 7600 } 7601 7602 ctrl_info->scsi_host = shost; 7603 7604 return 0; 7605 7606 remove_host: 7607 scsi_remove_host(shost); 7608 free_host: 7609 scsi_host_put(shost); 7610 7611 return rc; 7612 } 7613 7614 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 7615 { 7616 struct Scsi_Host *shost; 7617 7618 pqi_delete_sas_host(ctrl_info); 7619 7620 shost = ctrl_info->scsi_host; 7621 if (!shost) 7622 return; 7623 7624 scsi_remove_host(shost); 7625 scsi_host_put(shost); 7626 } 7627 7628 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 7629 { 7630 int rc = 0; 7631 struct pqi_device_registers __iomem *pqi_registers; 7632 unsigned long timeout; 7633 unsigned int timeout_msecs; 7634 union pqi_reset_register reset_reg; 7635 7636 pqi_registers = ctrl_info->pqi_registers; 7637 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 7638 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 7639 7640 while (1) { 7641 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 7642 reset_reg.all_bits = readl(&pqi_registers->device_reset); 7643 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 7644 break; 7645 if (!sis_is_firmware_running(ctrl_info)) { 7646 rc = -ENXIO; 7647 break; 7648 } 7649 if (time_after(jiffies, timeout)) { 7650 rc = -ETIMEDOUT; 7651 break; 7652 } 7653 } 7654 7655 return rc; 7656 } 7657 7658 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 7659 { 7660 int rc; 7661 union pqi_reset_register reset_reg; 7662 7663 if (ctrl_info->pqi_reset_quiesce_supported) { 7664 rc = sis_pqi_reset_quiesce(ctrl_info); 7665 if (rc) { 7666 dev_err(&ctrl_info->pci_dev->dev, 7667 "PQI reset failed during quiesce with error %d\n", rc); 7668 return rc; 7669 } 7670 } 7671 7672 reset_reg.all_bits = 0; 7673 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 7674 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 7675 7676 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 7677 7678 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 7679 if (rc) 7680 dev_err(&ctrl_info->pci_dev->dev, 7681 "PQI reset failed with error %d\n", rc); 7682 7683 return rc; 7684 } 7685 7686 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) 7687 { 7688 int rc; 7689 struct bmic_sense_subsystem_info *sense_info; 7690 7691 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); 7692 if (!sense_info) 7693 return -ENOMEM; 7694 7695 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); 7696 if (rc) 7697 goto out; 7698 7699 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, 7700 sizeof(sense_info->ctrl_serial_number)); 7701 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; 7702 7703 out: 7704 kfree(sense_info); 7705 7706 return rc; 7707 } 7708 7709 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) 7710 { 7711 int rc; 7712 struct bmic_identify_controller *identify; 7713 7714 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 7715 if (!identify) 7716 return -ENOMEM; 7717 7718 rc = pqi_identify_controller(ctrl_info, identify); 7719 if (rc) 7720 goto out; 7721 7722 if (get_unaligned_le32(&identify->extra_controller_flags) & 7723 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { 7724 memcpy(ctrl_info->firmware_version, 7725 identify->firmware_version_long, 7726 sizeof(identify->firmware_version_long)); 7727 } else { 7728 memcpy(ctrl_info->firmware_version, 7729 identify->firmware_version_short, 7730 sizeof(identify->firmware_version_short)); 7731 ctrl_info->firmware_version 7732 [sizeof(identify->firmware_version_short)] = '\0'; 7733 snprintf(ctrl_info->firmware_version + 7734 strlen(ctrl_info->firmware_version), 7735 sizeof(ctrl_info->firmware_version) - 7736 sizeof(identify->firmware_version_short), 7737 "-%u", 7738 get_unaligned_le16(&identify->firmware_build_number)); 7739 } 7740 7741 memcpy(ctrl_info->model, identify->product_id, 7742 sizeof(identify->product_id)); 7743 ctrl_info->model[sizeof(identify->product_id)] = '\0'; 7744 7745 memcpy(ctrl_info->vendor, identify->vendor_id, 7746 sizeof(identify->vendor_id)); 7747 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; 7748 7749 dev_info(&ctrl_info->pci_dev->dev, 7750 "Firmware version: %s\n", ctrl_info->firmware_version); 7751 7752 out: 7753 kfree(identify); 7754 7755 return rc; 7756 } 7757 7758 struct pqi_config_table_section_info { 7759 struct pqi_ctrl_info *ctrl_info; 7760 void *section; 7761 u32 section_offset; 7762 void __iomem *section_iomem_addr; 7763 }; 7764 7765 static inline bool pqi_is_firmware_feature_supported( 7766 struct pqi_config_table_firmware_features *firmware_features, 7767 unsigned int bit_position) 7768 { 7769 unsigned int byte_index; 7770 7771 byte_index = bit_position / BITS_PER_BYTE; 7772 7773 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 7774 return false; 7775 7776 return firmware_features->features_supported[byte_index] & 7777 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7778 } 7779 7780 static inline bool pqi_is_firmware_feature_enabled( 7781 struct pqi_config_table_firmware_features *firmware_features, 7782 void __iomem *firmware_features_iomem_addr, 7783 unsigned int bit_position) 7784 { 7785 unsigned int byte_index; 7786 u8 __iomem *features_enabled_iomem_addr; 7787 7788 byte_index = (bit_position / BITS_PER_BYTE) + 7789 (le16_to_cpu(firmware_features->num_elements) * 2); 7790 7791 features_enabled_iomem_addr = firmware_features_iomem_addr + 7792 offsetof(struct pqi_config_table_firmware_features, 7793 features_supported) + byte_index; 7794 7795 return *((__force u8 *)features_enabled_iomem_addr) & 7796 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7797 } 7798 7799 static inline void pqi_request_firmware_feature( 7800 struct pqi_config_table_firmware_features *firmware_features, 7801 unsigned int bit_position) 7802 { 7803 unsigned int byte_index; 7804 7805 byte_index = (bit_position / BITS_PER_BYTE) + 7806 le16_to_cpu(firmware_features->num_elements); 7807 7808 firmware_features->features_supported[byte_index] |= 7809 (1 << (bit_position % BITS_PER_BYTE)); 7810 } 7811 7812 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 7813 u16 first_section, u16 last_section) 7814 { 7815 struct pqi_vendor_general_request request; 7816 7817 memset(&request, 0, sizeof(request)); 7818 7819 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7820 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7821 &request.header.iu_length); 7822 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 7823 &request.function_code); 7824 put_unaligned_le16(first_section, 7825 &request.data.config_table_update.first_section); 7826 put_unaligned_le16(last_section, 7827 &request.data.config_table_update.last_section); 7828 7829 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 7830 } 7831 7832 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 7833 struct pqi_config_table_firmware_features *firmware_features, 7834 void __iomem *firmware_features_iomem_addr) 7835 { 7836 void *features_requested; 7837 void __iomem *features_requested_iomem_addr; 7838 void __iomem *host_max_known_feature_iomem_addr; 7839 7840 features_requested = firmware_features->features_supported + 7841 le16_to_cpu(firmware_features->num_elements); 7842 7843 features_requested_iomem_addr = firmware_features_iomem_addr + 7844 (features_requested - (void *)firmware_features); 7845 7846 memcpy_toio(features_requested_iomem_addr, features_requested, 7847 le16_to_cpu(firmware_features->num_elements)); 7848 7849 if (pqi_is_firmware_feature_supported(firmware_features, 7850 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { 7851 host_max_known_feature_iomem_addr = 7852 features_requested_iomem_addr + 7853 (le16_to_cpu(firmware_features->num_elements) * 2) + 7854 sizeof(__le16); 7855 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr); 7856 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1); 7857 } 7858 7859 return pqi_config_table_update(ctrl_info, 7860 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 7861 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 7862 } 7863 7864 struct pqi_firmware_feature { 7865 char *feature_name; 7866 unsigned int feature_bit; 7867 bool supported; 7868 bool enabled; 7869 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 7870 struct pqi_firmware_feature *firmware_feature); 7871 }; 7872 7873 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 7874 struct pqi_firmware_feature *firmware_feature) 7875 { 7876 if (!firmware_feature->supported) { 7877 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 7878 firmware_feature->feature_name); 7879 return; 7880 } 7881 7882 if (firmware_feature->enabled) { 7883 dev_info(&ctrl_info->pci_dev->dev, 7884 "%s enabled\n", firmware_feature->feature_name); 7885 return; 7886 } 7887 7888 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 7889 firmware_feature->feature_name); 7890 } 7891 7892 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, 7893 struct pqi_firmware_feature *firmware_feature) 7894 { 7895 switch (firmware_feature->feature_bit) { 7896 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: 7897 ctrl_info->enable_r1_writes = firmware_feature->enabled; 7898 break; 7899 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: 7900 ctrl_info->enable_r5_writes = firmware_feature->enabled; 7901 break; 7902 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: 7903 ctrl_info->enable_r6_writes = firmware_feature->enabled; 7904 break; 7905 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: 7906 ctrl_info->soft_reset_handshake_supported = 7907 firmware_feature->enabled && 7908 pqi_read_soft_reset_status(ctrl_info); 7909 break; 7910 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: 7911 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; 7912 break; 7913 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: 7914 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; 7915 break; 7916 case PQI_FIRMWARE_FEATURE_FW_TRIAGE: 7917 ctrl_info->firmware_triage_supported = firmware_feature->enabled; 7918 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); 7919 break; 7920 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5: 7921 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; 7922 break; 7923 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: 7924 ctrl_info->multi_lun_device_supported = firmware_feature->enabled; 7925 break; 7926 case PQI_FIRMWARE_FEATURE_CTRL_LOGGING: 7927 ctrl_info->ctrl_logging_supported = firmware_feature->enabled; 7928 break; 7929 } 7930 7931 pqi_firmware_feature_status(ctrl_info, firmware_feature); 7932 } 7933 7934 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 7935 struct pqi_firmware_feature *firmware_feature) 7936 { 7937 if (firmware_feature->feature_status) 7938 firmware_feature->feature_status(ctrl_info, firmware_feature); 7939 } 7940 7941 static DEFINE_MUTEX(pqi_firmware_features_mutex); 7942 7943 static struct pqi_firmware_feature pqi_firmware_features[] = { 7944 { 7945 .feature_name = "Online Firmware Activation", 7946 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 7947 .feature_status = pqi_firmware_feature_status, 7948 }, 7949 { 7950 .feature_name = "Serial Management Protocol", 7951 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 7952 .feature_status = pqi_firmware_feature_status, 7953 }, 7954 { 7955 .feature_name = "Maximum Known Feature", 7956 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, 7957 .feature_status = pqi_firmware_feature_status, 7958 }, 7959 { 7960 .feature_name = "RAID 0 Read Bypass", 7961 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, 7962 .feature_status = pqi_firmware_feature_status, 7963 }, 7964 { 7965 .feature_name = "RAID 1 Read Bypass", 7966 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, 7967 .feature_status = pqi_firmware_feature_status, 7968 }, 7969 { 7970 .feature_name = "RAID 5 Read Bypass", 7971 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, 7972 .feature_status = pqi_firmware_feature_status, 7973 }, 7974 { 7975 .feature_name = "RAID 6 Read Bypass", 7976 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, 7977 .feature_status = pqi_firmware_feature_status, 7978 }, 7979 { 7980 .feature_name = "RAID 0 Write Bypass", 7981 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, 7982 .feature_status = pqi_firmware_feature_status, 7983 }, 7984 { 7985 .feature_name = "RAID 1 Write Bypass", 7986 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, 7987 .feature_status = pqi_ctrl_update_feature_flags, 7988 }, 7989 { 7990 .feature_name = "RAID 5 Write Bypass", 7991 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, 7992 .feature_status = pqi_ctrl_update_feature_flags, 7993 }, 7994 { 7995 .feature_name = "RAID 6 Write Bypass", 7996 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, 7997 .feature_status = pqi_ctrl_update_feature_flags, 7998 }, 7999 { 8000 .feature_name = "New Soft Reset Handshake", 8001 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 8002 .feature_status = pqi_ctrl_update_feature_flags, 8003 }, 8004 { 8005 .feature_name = "RAID IU Timeout", 8006 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, 8007 .feature_status = pqi_ctrl_update_feature_flags, 8008 }, 8009 { 8010 .feature_name = "TMF IU Timeout", 8011 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, 8012 .feature_status = pqi_ctrl_update_feature_flags, 8013 }, 8014 { 8015 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", 8016 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, 8017 .feature_status = pqi_firmware_feature_status, 8018 }, 8019 { 8020 .feature_name = "Firmware Triage", 8021 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE, 8022 .feature_status = pqi_ctrl_update_feature_flags, 8023 }, 8024 { 8025 .feature_name = "RPL Extended Formats 4 and 5", 8026 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5, 8027 .feature_status = pqi_ctrl_update_feature_flags, 8028 }, 8029 { 8030 .feature_name = "Multi-LUN Target", 8031 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, 8032 .feature_status = pqi_ctrl_update_feature_flags, 8033 }, 8034 { 8035 .feature_name = "Controller Data Logging", 8036 .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING, 8037 .feature_status = pqi_ctrl_update_feature_flags, 8038 }, 8039 }; 8040 8041 static void pqi_process_firmware_features( 8042 struct pqi_config_table_section_info *section_info) 8043 { 8044 int rc; 8045 struct pqi_ctrl_info *ctrl_info; 8046 struct pqi_config_table_firmware_features *firmware_features; 8047 void __iomem *firmware_features_iomem_addr; 8048 unsigned int i; 8049 unsigned int num_features_supported; 8050 8051 ctrl_info = section_info->ctrl_info; 8052 firmware_features = section_info->section; 8053 firmware_features_iomem_addr = section_info->section_iomem_addr; 8054 8055 for (i = 0, num_features_supported = 0; 8056 i < ARRAY_SIZE(pqi_firmware_features); i++) { 8057 if (pqi_is_firmware_feature_supported(firmware_features, 8058 pqi_firmware_features[i].feature_bit)) { 8059 pqi_firmware_features[i].supported = true; 8060 num_features_supported++; 8061 } else { 8062 pqi_firmware_feature_update(ctrl_info, 8063 &pqi_firmware_features[i]); 8064 } 8065 } 8066 8067 if (num_features_supported == 0) 8068 return; 8069 8070 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8071 if (!pqi_firmware_features[i].supported) 8072 continue; 8073 pqi_request_firmware_feature(firmware_features, 8074 pqi_firmware_features[i].feature_bit); 8075 } 8076 8077 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 8078 firmware_features_iomem_addr); 8079 if (rc) { 8080 dev_err(&ctrl_info->pci_dev->dev, 8081 "failed to enable firmware features in PQI configuration table\n"); 8082 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8083 if (!pqi_firmware_features[i].supported) 8084 continue; 8085 pqi_firmware_feature_update(ctrl_info, 8086 &pqi_firmware_features[i]); 8087 } 8088 return; 8089 } 8090 8091 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8092 if (!pqi_firmware_features[i].supported) 8093 continue; 8094 if (pqi_is_firmware_feature_enabled(firmware_features, 8095 firmware_features_iomem_addr, 8096 pqi_firmware_features[i].feature_bit)) { 8097 pqi_firmware_features[i].enabled = true; 8098 } 8099 pqi_firmware_feature_update(ctrl_info, 8100 &pqi_firmware_features[i]); 8101 } 8102 } 8103 8104 static void pqi_init_firmware_features(void) 8105 { 8106 unsigned int i; 8107 8108 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8109 pqi_firmware_features[i].supported = false; 8110 pqi_firmware_features[i].enabled = false; 8111 } 8112 } 8113 8114 static void pqi_process_firmware_features_section( 8115 struct pqi_config_table_section_info *section_info) 8116 { 8117 mutex_lock(&pqi_firmware_features_mutex); 8118 pqi_init_firmware_features(); 8119 pqi_process_firmware_features(section_info); 8120 mutex_unlock(&pqi_firmware_features_mutex); 8121 } 8122 8123 /* 8124 * Reset all controller settings that can be initialized during the processing 8125 * of the PQI Configuration Table. 8126 */ 8127 8128 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) 8129 { 8130 ctrl_info->heartbeat_counter = NULL; 8131 ctrl_info->soft_reset_status = NULL; 8132 ctrl_info->soft_reset_handshake_supported = false; 8133 ctrl_info->enable_r1_writes = false; 8134 ctrl_info->enable_r5_writes = false; 8135 ctrl_info->enable_r6_writes = false; 8136 ctrl_info->raid_iu_timeout_supported = false; 8137 ctrl_info->tmf_iu_timeout_supported = false; 8138 ctrl_info->firmware_triage_supported = false; 8139 ctrl_info->rpl_extended_format_4_5_supported = false; 8140 ctrl_info->multi_lun_device_supported = false; 8141 ctrl_info->ctrl_logging_supported = false; 8142 } 8143 8144 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 8145 { 8146 u32 table_length; 8147 u32 section_offset; 8148 bool firmware_feature_section_present; 8149 void __iomem *table_iomem_addr; 8150 struct pqi_config_table *config_table; 8151 struct pqi_config_table_section_header *section; 8152 struct pqi_config_table_section_info section_info; 8153 struct pqi_config_table_section_info feature_section_info = {0}; 8154 8155 table_length = ctrl_info->config_table_length; 8156 if (table_length == 0) 8157 return 0; 8158 8159 config_table = kmalloc(table_length, GFP_KERNEL); 8160 if (!config_table) { 8161 dev_err(&ctrl_info->pci_dev->dev, 8162 "failed to allocate memory for PQI configuration table\n"); 8163 return -ENOMEM; 8164 } 8165 8166 /* 8167 * Copy the config table contents from I/O memory space into the 8168 * temporary buffer. 8169 */ 8170 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; 8171 memcpy_fromio(config_table, table_iomem_addr, table_length); 8172 8173 firmware_feature_section_present = false; 8174 section_info.ctrl_info = ctrl_info; 8175 section_offset = get_unaligned_le32(&config_table->first_section_offset); 8176 8177 while (section_offset) { 8178 section = (void *)config_table + section_offset; 8179 8180 section_info.section = section; 8181 section_info.section_offset = section_offset; 8182 section_info.section_iomem_addr = table_iomem_addr + section_offset; 8183 8184 switch (get_unaligned_le16(§ion->section_id)) { 8185 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 8186 firmware_feature_section_present = true; 8187 feature_section_info = section_info; 8188 break; 8189 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 8190 if (pqi_disable_heartbeat) 8191 dev_warn(&ctrl_info->pci_dev->dev, 8192 "heartbeat disabled by module parameter\n"); 8193 else 8194 ctrl_info->heartbeat_counter = 8195 table_iomem_addr + 8196 section_offset + 8197 offsetof(struct pqi_config_table_heartbeat, 8198 heartbeat_counter); 8199 break; 8200 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 8201 ctrl_info->soft_reset_status = 8202 table_iomem_addr + 8203 section_offset + 8204 offsetof(struct pqi_config_table_soft_reset, 8205 soft_reset_status); 8206 break; 8207 } 8208 8209 section_offset = get_unaligned_le16(§ion->next_section_offset); 8210 } 8211 8212 /* 8213 * We process the firmware feature section after all other sections 8214 * have been processed so that the feature bit callbacks can take 8215 * into account the settings configured by other sections. 8216 */ 8217 if (firmware_feature_section_present) 8218 pqi_process_firmware_features_section(&feature_section_info); 8219 8220 kfree(config_table); 8221 8222 return 0; 8223 } 8224 8225 /* Switches the controller from PQI mode back into SIS mode. */ 8226 8227 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 8228 { 8229 int rc; 8230 8231 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 8232 rc = pqi_reset(ctrl_info); 8233 if (rc) 8234 return rc; 8235 rc = sis_reenable_sis_mode(ctrl_info); 8236 if (rc) { 8237 dev_err(&ctrl_info->pci_dev->dev, 8238 "re-enabling SIS mode failed with error %d\n", rc); 8239 return rc; 8240 } 8241 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8242 8243 return 0; 8244 } 8245 8246 /* 8247 * If the controller isn't already in SIS mode, this function forces it into 8248 * SIS mode. 8249 */ 8250 8251 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 8252 { 8253 if (!sis_is_firmware_running(ctrl_info)) 8254 return -ENXIO; 8255 8256 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 8257 return 0; 8258 8259 if (sis_is_kernel_up(ctrl_info)) { 8260 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8261 return 0; 8262 } 8263 8264 return pqi_revert_to_sis_mode(ctrl_info); 8265 } 8266 8267 static void pqi_perform_lockup_action(void) 8268 { 8269 switch (pqi_lockup_action) { 8270 case PANIC: 8271 panic("FATAL: Smart Family Controller lockup detected"); 8272 break; 8273 case REBOOT: 8274 emergency_restart(); 8275 break; 8276 case NONE: 8277 default: 8278 break; 8279 } 8280 } 8281 8282 #define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024) 8283 #define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS) 8284 8285 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 8286 { 8287 int rc; 8288 u32 product_id; 8289 8290 if (reset_devices) { 8291 if (pqi_is_fw_triage_supported(ctrl_info)) { 8292 rc = sis_wait_for_fw_triage_completion(ctrl_info); 8293 if (rc) 8294 return rc; 8295 } 8296 if (sis_is_ctrl_logging_supported(ctrl_info)) { 8297 sis_notify_kdump(ctrl_info); 8298 rc = sis_wait_for_ctrl_logging_completion(ctrl_info); 8299 if (rc) 8300 return rc; 8301 } 8302 sis_soft_reset(ctrl_info); 8303 ssleep(PQI_POST_RESET_DELAY_SECS); 8304 } else { 8305 rc = pqi_force_sis_mode(ctrl_info); 8306 if (rc) 8307 return rc; 8308 } 8309 8310 /* 8311 * Wait until the controller is ready to start accepting SIS 8312 * commands. 8313 */ 8314 rc = sis_wait_for_ctrl_ready(ctrl_info); 8315 if (rc) { 8316 if (reset_devices) { 8317 dev_err(&ctrl_info->pci_dev->dev, 8318 "kdump init failed with error %d\n", rc); 8319 pqi_lockup_action = REBOOT; 8320 pqi_perform_lockup_action(); 8321 } 8322 return rc; 8323 } 8324 8325 /* 8326 * Get the controller properties. This allows us to determine 8327 * whether or not it supports PQI mode. 8328 */ 8329 rc = sis_get_ctrl_properties(ctrl_info); 8330 if (rc) { 8331 dev_err(&ctrl_info->pci_dev->dev, 8332 "error obtaining controller properties\n"); 8333 return rc; 8334 } 8335 8336 rc = sis_get_pqi_capabilities(ctrl_info); 8337 if (rc) { 8338 dev_err(&ctrl_info->pci_dev->dev, 8339 "error obtaining controller capabilities\n"); 8340 return rc; 8341 } 8342 8343 product_id = sis_get_product_id(ctrl_info); 8344 ctrl_info->product_id = (u8)product_id; 8345 ctrl_info->product_revision = (u8)(product_id >> 8); 8346 8347 if (reset_devices) { 8348 if (ctrl_info->max_outstanding_requests > 8349 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 8350 ctrl_info->max_outstanding_requests = 8351 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 8352 } else { 8353 if (ctrl_info->max_outstanding_requests > 8354 PQI_MAX_OUTSTANDING_REQUESTS) 8355 ctrl_info->max_outstanding_requests = 8356 PQI_MAX_OUTSTANDING_REQUESTS; 8357 } 8358 8359 pqi_calculate_io_resources(ctrl_info); 8360 8361 rc = pqi_alloc_error_buffer(ctrl_info); 8362 if (rc) { 8363 dev_err(&ctrl_info->pci_dev->dev, 8364 "failed to allocate PQI error buffer\n"); 8365 return rc; 8366 } 8367 8368 /* 8369 * If the function we are about to call succeeds, the 8370 * controller will transition from legacy SIS mode 8371 * into PQI mode. 8372 */ 8373 rc = sis_init_base_struct_addr(ctrl_info); 8374 if (rc) { 8375 dev_err(&ctrl_info->pci_dev->dev, 8376 "error initializing PQI mode\n"); 8377 return rc; 8378 } 8379 8380 /* Wait for the controller to complete the SIS -> PQI transition. */ 8381 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8382 if (rc) { 8383 dev_err(&ctrl_info->pci_dev->dev, 8384 "transition to PQI mode failed\n"); 8385 return rc; 8386 } 8387 8388 /* From here on, we are running in PQI mode. */ 8389 ctrl_info->pqi_mode_enabled = true; 8390 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8391 8392 rc = pqi_alloc_admin_queues(ctrl_info); 8393 if (rc) { 8394 dev_err(&ctrl_info->pci_dev->dev, 8395 "failed to allocate admin queues\n"); 8396 return rc; 8397 } 8398 8399 rc = pqi_create_admin_queues(ctrl_info); 8400 if (rc) { 8401 dev_err(&ctrl_info->pci_dev->dev, 8402 "error creating admin queues\n"); 8403 return rc; 8404 } 8405 8406 rc = pqi_report_device_capability(ctrl_info); 8407 if (rc) { 8408 dev_err(&ctrl_info->pci_dev->dev, 8409 "obtaining device capability failed\n"); 8410 return rc; 8411 } 8412 8413 rc = pqi_validate_device_capability(ctrl_info); 8414 if (rc) 8415 return rc; 8416 8417 pqi_calculate_queue_resources(ctrl_info); 8418 8419 rc = pqi_enable_msix_interrupts(ctrl_info); 8420 if (rc) 8421 return rc; 8422 8423 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 8424 ctrl_info->max_msix_vectors = 8425 ctrl_info->num_msix_vectors_enabled; 8426 pqi_calculate_queue_resources(ctrl_info); 8427 } 8428 8429 rc = pqi_alloc_io_resources(ctrl_info); 8430 if (rc) 8431 return rc; 8432 8433 rc = pqi_alloc_operational_queues(ctrl_info); 8434 if (rc) { 8435 dev_err(&ctrl_info->pci_dev->dev, 8436 "failed to allocate operational queues\n"); 8437 return rc; 8438 } 8439 8440 pqi_init_operational_queues(ctrl_info); 8441 8442 rc = pqi_create_queues(ctrl_info); 8443 if (rc) 8444 return rc; 8445 8446 rc = pqi_request_irqs(ctrl_info); 8447 if (rc) 8448 return rc; 8449 8450 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8451 8452 ctrl_info->controller_online = true; 8453 8454 rc = pqi_process_config_table(ctrl_info); 8455 if (rc) 8456 return rc; 8457 8458 pqi_start_heartbeat_timer(ctrl_info); 8459 8460 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8461 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8462 if (rc) { /* Supported features not returned correctly. */ 8463 dev_err(&ctrl_info->pci_dev->dev, 8464 "error obtaining advanced RAID bypass configuration\n"); 8465 return rc; 8466 } 8467 ctrl_info->ciss_report_log_flags |= 8468 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8469 } 8470 8471 rc = pqi_enable_events(ctrl_info); 8472 if (rc) { 8473 dev_err(&ctrl_info->pci_dev->dev, 8474 "error enabling events\n"); 8475 return rc; 8476 } 8477 8478 /* Register with the SCSI subsystem. */ 8479 rc = pqi_register_scsi(ctrl_info); 8480 if (rc) 8481 return rc; 8482 8483 if (ctrl_info->ctrl_logging_supported && !reset_devices) { 8484 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); 8485 pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); 8486 } 8487 8488 rc = pqi_get_ctrl_product_details(ctrl_info); 8489 if (rc) { 8490 dev_err(&ctrl_info->pci_dev->dev, 8491 "error obtaining product details\n"); 8492 return rc; 8493 } 8494 8495 rc = pqi_get_ctrl_serial_number(ctrl_info); 8496 if (rc) { 8497 dev_err(&ctrl_info->pci_dev->dev, 8498 "error obtaining ctrl serial number\n"); 8499 return rc; 8500 } 8501 8502 rc = pqi_set_diag_rescan(ctrl_info); 8503 if (rc) { 8504 dev_err(&ctrl_info->pci_dev->dev, 8505 "error enabling multi-lun rescan\n"); 8506 return rc; 8507 } 8508 8509 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8510 if (rc) { 8511 dev_err(&ctrl_info->pci_dev->dev, 8512 "error updating host wellness\n"); 8513 return rc; 8514 } 8515 8516 pqi_schedule_update_time_worker(ctrl_info); 8517 8518 pqi_scan_scsi_devices(ctrl_info); 8519 8520 return 0; 8521 } 8522 8523 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 8524 { 8525 unsigned int i; 8526 struct pqi_admin_queues *admin_queues; 8527 struct pqi_event_queue *event_queue; 8528 8529 admin_queues = &ctrl_info->admin_queues; 8530 admin_queues->iq_pi_copy = 0; 8531 admin_queues->oq_ci_copy = 0; 8532 writel(0, admin_queues->oq_pi); 8533 8534 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 8535 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 8536 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 8537 ctrl_info->queue_groups[i].oq_ci_copy = 0; 8538 8539 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 8540 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 8541 writel(0, ctrl_info->queue_groups[i].oq_pi); 8542 } 8543 8544 event_queue = &ctrl_info->event_queue; 8545 writel(0, event_queue->oq_pi); 8546 event_queue->oq_ci_copy = 0; 8547 } 8548 8549 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 8550 { 8551 int rc; 8552 8553 rc = pqi_force_sis_mode(ctrl_info); 8554 if (rc) 8555 return rc; 8556 8557 /* 8558 * Wait until the controller is ready to start accepting SIS 8559 * commands. 8560 */ 8561 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 8562 if (rc) 8563 return rc; 8564 8565 /* 8566 * Get the controller properties. This allows us to determine 8567 * whether or not it supports PQI mode. 8568 */ 8569 rc = sis_get_ctrl_properties(ctrl_info); 8570 if (rc) { 8571 dev_err(&ctrl_info->pci_dev->dev, 8572 "error obtaining controller properties\n"); 8573 return rc; 8574 } 8575 8576 rc = sis_get_pqi_capabilities(ctrl_info); 8577 if (rc) { 8578 dev_err(&ctrl_info->pci_dev->dev, 8579 "error obtaining controller capabilities\n"); 8580 return rc; 8581 } 8582 8583 /* 8584 * If the function we are about to call succeeds, the 8585 * controller will transition from legacy SIS mode 8586 * into PQI mode. 8587 */ 8588 rc = sis_init_base_struct_addr(ctrl_info); 8589 if (rc) { 8590 dev_err(&ctrl_info->pci_dev->dev, 8591 "error initializing PQI mode\n"); 8592 return rc; 8593 } 8594 8595 /* Wait for the controller to complete the SIS -> PQI transition. */ 8596 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8597 if (rc) { 8598 dev_err(&ctrl_info->pci_dev->dev, 8599 "transition to PQI mode failed\n"); 8600 return rc; 8601 } 8602 8603 /* From here on, we are running in PQI mode. */ 8604 ctrl_info->pqi_mode_enabled = true; 8605 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8606 8607 pqi_reinit_queues(ctrl_info); 8608 8609 rc = pqi_create_admin_queues(ctrl_info); 8610 if (rc) { 8611 dev_err(&ctrl_info->pci_dev->dev, 8612 "error creating admin queues\n"); 8613 return rc; 8614 } 8615 8616 rc = pqi_create_queues(ctrl_info); 8617 if (rc) 8618 return rc; 8619 8620 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8621 8622 ctrl_info->controller_online = true; 8623 pqi_ctrl_unblock_requests(ctrl_info); 8624 8625 pqi_ctrl_reset_config(ctrl_info); 8626 8627 rc = pqi_process_config_table(ctrl_info); 8628 if (rc) 8629 return rc; 8630 8631 pqi_start_heartbeat_timer(ctrl_info); 8632 8633 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8634 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8635 if (rc) { 8636 dev_err(&ctrl_info->pci_dev->dev, 8637 "error obtaining advanced RAID bypass configuration\n"); 8638 return rc; 8639 } 8640 ctrl_info->ciss_report_log_flags |= 8641 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8642 } 8643 8644 rc = pqi_enable_events(ctrl_info); 8645 if (rc) { 8646 dev_err(&ctrl_info->pci_dev->dev, 8647 "error enabling events\n"); 8648 return rc; 8649 } 8650 8651 rc = pqi_get_ctrl_product_details(ctrl_info); 8652 if (rc) { 8653 dev_err(&ctrl_info->pci_dev->dev, 8654 "error obtaining product details\n"); 8655 return rc; 8656 } 8657 8658 rc = pqi_set_diag_rescan(ctrl_info); 8659 if (rc) { 8660 dev_err(&ctrl_info->pci_dev->dev, 8661 "error enabling multi-lun rescan\n"); 8662 return rc; 8663 } 8664 8665 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8666 if (rc) { 8667 dev_err(&ctrl_info->pci_dev->dev, 8668 "error updating host wellness\n"); 8669 return rc; 8670 } 8671 8672 if (pqi_ofa_in_progress(ctrl_info)) { 8673 pqi_ctrl_unblock_scan(ctrl_info); 8674 if (ctrl_info->ctrl_logging_supported) { 8675 if (!ctrl_info->ctrl_log_memory.host_memory) 8676 pqi_host_setup_buffer(ctrl_info, 8677 &ctrl_info->ctrl_log_memory, 8678 PQI_CTRL_LOG_TOTAL_SIZE, 8679 PQI_CTRL_LOG_MIN_SIZE); 8680 pqi_host_memory_update(ctrl_info, 8681 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); 8682 } else { 8683 if (ctrl_info->ctrl_log_memory.host_memory) 8684 pqi_host_free_buffer(ctrl_info, 8685 &ctrl_info->ctrl_log_memory); 8686 } 8687 } 8688 8689 pqi_scan_scsi_devices(ctrl_info); 8690 8691 return 0; 8692 } 8693 8694 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) 8695 { 8696 int rc; 8697 8698 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 8699 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 8700 8701 return pcibios_err_to_errno(rc); 8702 } 8703 8704 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 8705 { 8706 int rc; 8707 u64 mask; 8708 8709 rc = pci_enable_device(ctrl_info->pci_dev); 8710 if (rc) { 8711 dev_err(&ctrl_info->pci_dev->dev, 8712 "failed to enable PCI device\n"); 8713 return rc; 8714 } 8715 8716 if (sizeof(dma_addr_t) > 4) 8717 mask = DMA_BIT_MASK(64); 8718 else 8719 mask = DMA_BIT_MASK(32); 8720 8721 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); 8722 if (rc) { 8723 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 8724 goto disable_device; 8725 } 8726 8727 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 8728 if (rc) { 8729 dev_err(&ctrl_info->pci_dev->dev, 8730 "failed to obtain PCI resources\n"); 8731 goto disable_device; 8732 } 8733 8734 ctrl_info->iomem_base = ioremap(pci_resource_start( 8735 ctrl_info->pci_dev, 0), 8736 pci_resource_len(ctrl_info->pci_dev, 0)); 8737 if (!ctrl_info->iomem_base) { 8738 dev_err(&ctrl_info->pci_dev->dev, 8739 "failed to map memory for controller registers\n"); 8740 rc = -ENOMEM; 8741 goto release_regions; 8742 } 8743 8744 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 8745 8746 /* Increase the PCIe completion timeout. */ 8747 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 8748 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 8749 if (rc) { 8750 dev_err(&ctrl_info->pci_dev->dev, 8751 "failed to set PCIe completion timeout\n"); 8752 goto release_regions; 8753 } 8754 8755 /* Enable bus mastering. */ 8756 pci_set_master(ctrl_info->pci_dev); 8757 8758 ctrl_info->registers = ctrl_info->iomem_base; 8759 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 8760 8761 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 8762 8763 return 0; 8764 8765 release_regions: 8766 pci_release_regions(ctrl_info->pci_dev); 8767 disable_device: 8768 pci_disable_device(ctrl_info->pci_dev); 8769 8770 return rc; 8771 } 8772 8773 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 8774 { 8775 iounmap(ctrl_info->iomem_base); 8776 pci_release_regions(ctrl_info->pci_dev); 8777 if (pci_is_enabled(ctrl_info->pci_dev)) 8778 pci_disable_device(ctrl_info->pci_dev); 8779 pci_set_drvdata(ctrl_info->pci_dev, NULL); 8780 } 8781 8782 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 8783 { 8784 struct pqi_ctrl_info *ctrl_info; 8785 8786 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 8787 GFP_KERNEL, numa_node); 8788 if (!ctrl_info) 8789 return NULL; 8790 8791 mutex_init(&ctrl_info->scan_mutex); 8792 mutex_init(&ctrl_info->lun_reset_mutex); 8793 mutex_init(&ctrl_info->ofa_mutex); 8794 8795 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 8796 spin_lock_init(&ctrl_info->scsi_device_list_lock); 8797 8798 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 8799 atomic_set(&ctrl_info->num_interrupts, 0); 8800 8801 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 8802 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 8803 8804 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 8805 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 8806 8807 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); 8808 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); 8809 8810 sema_init(&ctrl_info->sync_request_sem, 8811 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 8812 init_waitqueue_head(&ctrl_info->block_requests_wait); 8813 8814 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 8815 ctrl_info->irq_mode = IRQ_MODE_NONE; 8816 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 8817 8818 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; 8819 ctrl_info->max_transfer_encrypted_sas_sata = 8820 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; 8821 ctrl_info->max_transfer_encrypted_nvme = 8822 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; 8823 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; 8824 ctrl_info->max_write_raid_1_10_2drive = ~0; 8825 ctrl_info->max_write_raid_1_10_3drive = ~0; 8826 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; 8827 8828 return ctrl_info; 8829 } 8830 8831 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 8832 { 8833 kfree(ctrl_info); 8834 } 8835 8836 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 8837 { 8838 pqi_free_irqs(ctrl_info); 8839 pqi_disable_msix_interrupts(ctrl_info); 8840 } 8841 8842 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 8843 { 8844 pqi_free_interrupts(ctrl_info); 8845 if (ctrl_info->queue_memory_base) 8846 dma_free_coherent(&ctrl_info->pci_dev->dev, 8847 ctrl_info->queue_memory_length, 8848 ctrl_info->queue_memory_base, 8849 ctrl_info->queue_memory_base_dma_handle); 8850 if (ctrl_info->admin_queue_memory_base) 8851 dma_free_coherent(&ctrl_info->pci_dev->dev, 8852 ctrl_info->admin_queue_memory_length, 8853 ctrl_info->admin_queue_memory_base, 8854 ctrl_info->admin_queue_memory_base_dma_handle); 8855 pqi_free_all_io_requests(ctrl_info); 8856 if (ctrl_info->error_buffer) 8857 dma_free_coherent(&ctrl_info->pci_dev->dev, 8858 ctrl_info->error_buffer_length, 8859 ctrl_info->error_buffer, 8860 ctrl_info->error_buffer_dma_handle); 8861 if (ctrl_info->iomem_base) 8862 pqi_cleanup_pci_init(ctrl_info); 8863 pqi_free_ctrl_info(ctrl_info); 8864 } 8865 8866 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 8867 { 8868 ctrl_info->controller_online = false; 8869 pqi_stop_heartbeat_timer(ctrl_info); 8870 pqi_ctrl_block_requests(ctrl_info); 8871 pqi_cancel_rescan_worker(ctrl_info); 8872 pqi_cancel_update_time_worker(ctrl_info); 8873 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { 8874 pqi_fail_all_outstanding_requests(ctrl_info); 8875 ctrl_info->pqi_mode_enabled = false; 8876 } 8877 pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory); 8878 pqi_unregister_scsi(ctrl_info); 8879 if (ctrl_info->pqi_mode_enabled) 8880 pqi_revert_to_sis_mode(ctrl_info); 8881 pqi_free_ctrl_resources(ctrl_info); 8882 } 8883 8884 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 8885 { 8886 pqi_ctrl_block_scan(ctrl_info); 8887 pqi_scsi_block_requests(ctrl_info); 8888 pqi_ctrl_block_device_reset(ctrl_info); 8889 pqi_ctrl_block_requests(ctrl_info); 8890 pqi_ctrl_wait_until_quiesced(ctrl_info); 8891 pqi_stop_heartbeat_timer(ctrl_info); 8892 } 8893 8894 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 8895 { 8896 pqi_start_heartbeat_timer(ctrl_info); 8897 pqi_ctrl_unblock_requests(ctrl_info); 8898 pqi_ctrl_unblock_device_reset(ctrl_info); 8899 pqi_scsi_unblock_requests(ctrl_info); 8900 pqi_ctrl_unblock_scan(ctrl_info); 8901 } 8902 8903 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) 8904 { 8905 ssleep(delay_secs); 8906 8907 return pqi_ctrl_init_resume(ctrl_info); 8908 } 8909 8910 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, 8911 struct pqi_host_memory_descriptor *host_memory_descriptor, 8912 u32 total_size, u32 chunk_size) 8913 { 8914 int i; 8915 u32 sg_count; 8916 struct device *dev; 8917 struct pqi_host_memory *host_memory; 8918 struct pqi_sg_descriptor *mem_descriptor; 8919 dma_addr_t dma_handle; 8920 8921 sg_count = DIV_ROUND_UP(total_size, chunk_size); 8922 if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) 8923 goto out; 8924 8925 host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); 8926 if (!host_memory_descriptor->host_chunk_virt_address) 8927 goto out; 8928 8929 dev = &ctrl_info->pci_dev->dev; 8930 host_memory = host_memory_descriptor->host_memory; 8931 8932 for (i = 0; i < sg_count; i++) { 8933 host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); 8934 if (!host_memory_descriptor->host_chunk_virt_address[i]) 8935 goto out_free_chunks; 8936 mem_descriptor = &host_memory->sg_descriptor[i]; 8937 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); 8938 put_unaligned_le32(chunk_size, &mem_descriptor->length); 8939 } 8940 8941 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 8942 put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); 8943 put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated); 8944 8945 return 0; 8946 8947 out_free_chunks: 8948 while (--i >= 0) { 8949 mem_descriptor = &host_memory->sg_descriptor[i]; 8950 dma_free_coherent(dev, chunk_size, 8951 host_memory_descriptor->host_chunk_virt_address[i], 8952 get_unaligned_le64(&mem_descriptor->address)); 8953 } 8954 kfree(host_memory_descriptor->host_chunk_virt_address); 8955 out: 8956 return -ENOMEM; 8957 } 8958 8959 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info, 8960 struct pqi_host_memory_descriptor *host_memory_descriptor, 8961 u32 total_required_size, u32 min_required_size) 8962 { 8963 u32 chunk_size; 8964 u32 min_chunk_size; 8965 8966 if (total_required_size == 0 || min_required_size == 0) 8967 return 0; 8968 8969 total_required_size = PAGE_ALIGN(total_required_size); 8970 min_required_size = PAGE_ALIGN(min_required_size); 8971 min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS); 8972 min_chunk_size = PAGE_ALIGN(min_chunk_size); 8973 8974 while (total_required_size >= min_required_size) { 8975 for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) { 8976 if (pqi_host_alloc_mem(ctrl_info, 8977 host_memory_descriptor, total_required_size, 8978 chunk_size) == 0) 8979 return 0; 8980 chunk_size /= 2; 8981 chunk_size = PAGE_ALIGN(chunk_size); 8982 } 8983 total_required_size /= 2; 8984 total_required_size = PAGE_ALIGN(total_required_size); 8985 } 8986 8987 return -ENOMEM; 8988 } 8989 8990 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, 8991 struct pqi_host_memory_descriptor *host_memory_descriptor, 8992 u32 total_size, u32 min_size) 8993 { 8994 struct device *dev; 8995 struct pqi_host_memory *host_memory; 8996 8997 dev = &ctrl_info->pci_dev->dev; 8998 8999 host_memory = dma_alloc_coherent(dev, sizeof(*host_memory), 9000 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); 9001 if (!host_memory) 9002 return; 9003 9004 host_memory_descriptor->host_memory = host_memory; 9005 9006 if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor, 9007 total_size, min_size) < 0) { 9008 dev_err(dev, "failed to allocate firmware usable host buffer\n"); 9009 dma_free_coherent(dev, sizeof(*host_memory), host_memory, 9010 host_memory_descriptor->host_memory_dma_handle); 9011 host_memory_descriptor->host_memory = NULL; 9012 return; 9013 } 9014 } 9015 9016 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, 9017 struct pqi_host_memory_descriptor *host_memory_descriptor) 9018 { 9019 unsigned int i; 9020 struct device *dev; 9021 struct pqi_host_memory *host_memory; 9022 struct pqi_sg_descriptor *mem_descriptor; 9023 unsigned int num_memory_descriptors; 9024 9025 host_memory = host_memory_descriptor->host_memory; 9026 if (!host_memory) 9027 return; 9028 9029 dev = &ctrl_info->pci_dev->dev; 9030 9031 if (get_unaligned_le32(&host_memory->bytes_allocated) == 0) 9032 goto out; 9033 9034 mem_descriptor = host_memory->sg_descriptor; 9035 num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors); 9036 9037 for (i = 0; i < num_memory_descriptors; i++) { 9038 dma_free_coherent(dev, 9039 get_unaligned_le32(&mem_descriptor[i].length), 9040 host_memory_descriptor->host_chunk_virt_address[i], 9041 get_unaligned_le64(&mem_descriptor[i].address)); 9042 } 9043 kfree(host_memory_descriptor->host_chunk_virt_address); 9044 9045 out: 9046 dma_free_coherent(dev, sizeof(*host_memory), host_memory, 9047 host_memory_descriptor->host_memory_dma_handle); 9048 host_memory_descriptor->host_memory = NULL; 9049 } 9050 9051 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, 9052 struct pqi_host_memory_descriptor *host_memory_descriptor, 9053 u16 function_code) 9054 { 9055 u32 buffer_length; 9056 struct pqi_vendor_general_request request; 9057 struct pqi_host_memory *host_memory; 9058 9059 memset(&request, 0, sizeof(request)); 9060 9061 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 9062 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 9063 put_unaligned_le16(function_code, &request.function_code); 9064 9065 host_memory = host_memory_descriptor->host_memory; 9066 9067 if (host_memory) { 9068 buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); 9069 put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address); 9070 put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length); 9071 9072 if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) { 9073 put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); 9074 memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); 9075 } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) { 9076 put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); 9077 memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); 9078 } 9079 } 9080 9081 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 9082 } 9083 9084 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 9085 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 9086 .status = SAM_STAT_CHECK_CONDITION, 9087 }; 9088 9089 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 9090 { 9091 unsigned int i; 9092 struct pqi_io_request *io_request; 9093 struct scsi_cmnd *scmd; 9094 struct scsi_device *sdev; 9095 9096 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9097 io_request = &ctrl_info->io_request_pool[i]; 9098 if (atomic_read(&io_request->refcount) == 0) 9099 continue; 9100 9101 scmd = io_request->scmd; 9102 if (scmd) { 9103 sdev = scmd->device; 9104 if (!sdev || !scsi_device_online(sdev)) { 9105 pqi_free_io_request(io_request); 9106 continue; 9107 } else { 9108 set_host_byte(scmd, DID_NO_CONNECT); 9109 } 9110 } else { 9111 io_request->status = -ENXIO; 9112 io_request->error_info = 9113 &pqi_ctrl_offline_raid_error_info; 9114 } 9115 9116 io_request->io_complete_callback(io_request, 9117 io_request->context); 9118 } 9119 } 9120 9121 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 9122 { 9123 pqi_perform_lockup_action(); 9124 pqi_stop_heartbeat_timer(ctrl_info); 9125 pqi_free_interrupts(ctrl_info); 9126 pqi_cancel_rescan_worker(ctrl_info); 9127 pqi_cancel_update_time_worker(ctrl_info); 9128 pqi_ctrl_wait_until_quiesced(ctrl_info); 9129 pqi_fail_all_outstanding_requests(ctrl_info); 9130 pqi_ctrl_unblock_requests(ctrl_info); 9131 } 9132 9133 static void pqi_ctrl_offline_worker(struct work_struct *work) 9134 { 9135 struct pqi_ctrl_info *ctrl_info; 9136 9137 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 9138 pqi_take_ctrl_offline_deferred(ctrl_info); 9139 } 9140 9141 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9142 { 9143 char *string; 9144 9145 switch (ctrl_shutdown_reason) { 9146 case PQI_IQ_NOT_DRAINED_TIMEOUT: 9147 string = "inbound queue not drained timeout"; 9148 break; 9149 case PQI_LUN_RESET_TIMEOUT: 9150 string = "LUN reset timeout"; 9151 break; 9152 case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT: 9153 string = "I/O pending timeout after LUN reset"; 9154 break; 9155 case PQI_NO_HEARTBEAT: 9156 string = "no controller heartbeat detected"; 9157 break; 9158 case PQI_FIRMWARE_KERNEL_NOT_UP: 9159 string = "firmware kernel not ready"; 9160 break; 9161 case PQI_OFA_RESPONSE_TIMEOUT: 9162 string = "OFA response timeout"; 9163 break; 9164 case PQI_INVALID_REQ_ID: 9165 string = "invalid request ID"; 9166 break; 9167 case PQI_UNMATCHED_REQ_ID: 9168 string = "unmatched request ID"; 9169 break; 9170 case PQI_IO_PI_OUT_OF_RANGE: 9171 string = "I/O queue producer index out of range"; 9172 break; 9173 case PQI_EVENT_PI_OUT_OF_RANGE: 9174 string = "event queue producer index out of range"; 9175 break; 9176 case PQI_UNEXPECTED_IU_TYPE: 9177 string = "unexpected IU type"; 9178 break; 9179 default: 9180 string = "unknown reason"; 9181 break; 9182 } 9183 9184 return string; 9185 } 9186 9187 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 9188 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9189 { 9190 if (!ctrl_info->controller_online) 9191 return; 9192 9193 ctrl_info->controller_online = false; 9194 ctrl_info->pqi_mode_enabled = false; 9195 pqi_ctrl_block_requests(ctrl_info); 9196 if (!pqi_disable_ctrl_shutdown) 9197 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); 9198 pci_disable_device(ctrl_info->pci_dev); 9199 dev_err(&ctrl_info->pci_dev->dev, 9200 "controller offline: reason code 0x%x (%s)\n", 9201 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason)); 9202 schedule_work(&ctrl_info->ctrl_offline_work); 9203 } 9204 9205 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 9206 const struct pci_device_id *id) 9207 { 9208 char *ctrl_description; 9209 9210 if (id->driver_data) 9211 ctrl_description = (char *)id->driver_data; 9212 else 9213 ctrl_description = "Microchip Smart Family Controller"; 9214 9215 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 9216 } 9217 9218 static int pqi_pci_probe(struct pci_dev *pci_dev, 9219 const struct pci_device_id *id) 9220 { 9221 int rc; 9222 int node; 9223 struct pqi_ctrl_info *ctrl_info; 9224 9225 pqi_print_ctrl_info(pci_dev, id); 9226 9227 if (pqi_disable_device_id_wildcards && 9228 id->subvendor == PCI_ANY_ID && 9229 id->subdevice == PCI_ANY_ID) { 9230 dev_warn(&pci_dev->dev, 9231 "controller not probed because device ID wildcards are disabled\n"); 9232 return -ENODEV; 9233 } 9234 9235 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 9236 dev_warn(&pci_dev->dev, 9237 "controller device ID matched using wildcards\n"); 9238 9239 node = dev_to_node(&pci_dev->dev); 9240 if (node == NUMA_NO_NODE) { 9241 node = cpu_to_node(0); 9242 if (node == NUMA_NO_NODE) 9243 node = 0; 9244 set_dev_node(&pci_dev->dev, node); 9245 } 9246 9247 ctrl_info = pqi_alloc_ctrl_info(node); 9248 if (!ctrl_info) { 9249 dev_err(&pci_dev->dev, 9250 "failed to allocate controller info block\n"); 9251 return -ENOMEM; 9252 } 9253 ctrl_info->numa_node = node; 9254 9255 ctrl_info->pci_dev = pci_dev; 9256 9257 rc = pqi_pci_init(ctrl_info); 9258 if (rc) 9259 goto error; 9260 9261 rc = pqi_ctrl_init(ctrl_info); 9262 if (rc) 9263 goto error; 9264 9265 return 0; 9266 9267 error: 9268 pqi_remove_ctrl(ctrl_info); 9269 9270 return rc; 9271 } 9272 9273 static void pqi_pci_remove(struct pci_dev *pci_dev) 9274 { 9275 struct pqi_ctrl_info *ctrl_info; 9276 u16 vendor_id; 9277 int rc; 9278 9279 ctrl_info = pci_get_drvdata(pci_dev); 9280 if (!ctrl_info) 9281 return; 9282 9283 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); 9284 if (vendor_id == 0xffff) 9285 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; 9286 else 9287 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; 9288 9289 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { 9290 rc = pqi_flush_cache(ctrl_info, RESTART); 9291 if (rc) 9292 dev_err(&pci_dev->dev, 9293 "unable to flush controller cache during remove\n"); 9294 } 9295 9296 pqi_remove_ctrl(ctrl_info); 9297 } 9298 9299 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) 9300 { 9301 unsigned int i; 9302 struct pqi_io_request *io_request; 9303 struct scsi_cmnd *scmd; 9304 9305 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9306 io_request = &ctrl_info->io_request_pool[i]; 9307 if (atomic_read(&io_request->refcount) == 0) 9308 continue; 9309 scmd = io_request->scmd; 9310 WARN_ON(scmd != NULL); /* IO command from SML */ 9311 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ 9312 } 9313 } 9314 9315 static void pqi_shutdown(struct pci_dev *pci_dev) 9316 { 9317 int rc; 9318 struct pqi_ctrl_info *ctrl_info; 9319 enum bmic_flush_cache_shutdown_event shutdown_event; 9320 9321 ctrl_info = pci_get_drvdata(pci_dev); 9322 if (!ctrl_info) { 9323 dev_err(&pci_dev->dev, 9324 "cache could not be flushed\n"); 9325 return; 9326 } 9327 9328 pqi_wait_until_ofa_finished(ctrl_info); 9329 9330 pqi_scsi_block_requests(ctrl_info); 9331 pqi_ctrl_block_device_reset(ctrl_info); 9332 pqi_ctrl_block_requests(ctrl_info); 9333 pqi_ctrl_wait_until_quiesced(ctrl_info); 9334 9335 if (system_state == SYSTEM_RESTART) 9336 shutdown_event = RESTART; 9337 else 9338 shutdown_event = SHUTDOWN; 9339 9340 /* 9341 * Write all data in the controller's battery-backed cache to 9342 * storage. 9343 */ 9344 rc = pqi_flush_cache(ctrl_info, shutdown_event); 9345 if (rc) 9346 dev_err(&pci_dev->dev, 9347 "unable to flush controller cache during shutdown\n"); 9348 9349 pqi_crash_if_pending_command(ctrl_info); 9350 pqi_reset(ctrl_info); 9351 } 9352 9353 static void pqi_process_lockup_action_param(void) 9354 { 9355 unsigned int i; 9356 9357 if (!pqi_lockup_action_param) 9358 return; 9359 9360 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 9361 if (strcmp(pqi_lockup_action_param, 9362 pqi_lockup_actions[i].name) == 0) { 9363 pqi_lockup_action = pqi_lockup_actions[i].action; 9364 return; 9365 } 9366 } 9367 9368 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 9369 DRIVER_NAME_SHORT, pqi_lockup_action_param); 9370 } 9371 9372 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30 9373 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60) 9374 9375 static void pqi_process_ctrl_ready_timeout_param(void) 9376 { 9377 if (pqi_ctrl_ready_timeout_secs == 0) 9378 return; 9379 9380 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) { 9381 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n", 9382 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS); 9383 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS; 9384 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) { 9385 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n", 9386 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS); 9387 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS; 9388 } 9389 9390 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs; 9391 } 9392 9393 static void pqi_process_module_params(void) 9394 { 9395 pqi_process_lockup_action_param(); 9396 pqi_process_ctrl_ready_timeout_param(); 9397 } 9398 9399 #if defined(CONFIG_PM) 9400 9401 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev) 9402 { 9403 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) 9404 return RESTART; 9405 9406 return SUSPEND; 9407 } 9408 9409 static int pqi_suspend_or_freeze(struct device *dev, bool suspend) 9410 { 9411 struct pci_dev *pci_dev; 9412 struct pqi_ctrl_info *ctrl_info; 9413 9414 pci_dev = to_pci_dev(dev); 9415 ctrl_info = pci_get_drvdata(pci_dev); 9416 9417 pqi_wait_until_ofa_finished(ctrl_info); 9418 9419 pqi_ctrl_block_scan(ctrl_info); 9420 pqi_scsi_block_requests(ctrl_info); 9421 pqi_ctrl_block_device_reset(ctrl_info); 9422 pqi_ctrl_block_requests(ctrl_info); 9423 pqi_ctrl_wait_until_quiesced(ctrl_info); 9424 9425 if (suspend) { 9426 enum bmic_flush_cache_shutdown_event shutdown_event; 9427 9428 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9429 pqi_flush_cache(ctrl_info, shutdown_event); 9430 } 9431 9432 pqi_stop_heartbeat_timer(ctrl_info); 9433 pqi_crash_if_pending_command(ctrl_info); 9434 pqi_free_irqs(ctrl_info); 9435 9436 ctrl_info->controller_online = false; 9437 ctrl_info->pqi_mode_enabled = false; 9438 9439 return 0; 9440 } 9441 9442 static __maybe_unused int pqi_suspend(struct device *dev) 9443 { 9444 return pqi_suspend_or_freeze(dev, true); 9445 } 9446 9447 static int pqi_resume_or_restore(struct device *dev) 9448 { 9449 int rc; 9450 struct pci_dev *pci_dev; 9451 struct pqi_ctrl_info *ctrl_info; 9452 9453 pci_dev = to_pci_dev(dev); 9454 ctrl_info = pci_get_drvdata(pci_dev); 9455 9456 rc = pqi_request_irqs(ctrl_info); 9457 if (rc) 9458 return rc; 9459 9460 pqi_ctrl_unblock_device_reset(ctrl_info); 9461 pqi_ctrl_unblock_requests(ctrl_info); 9462 pqi_scsi_unblock_requests(ctrl_info); 9463 pqi_ctrl_unblock_scan(ctrl_info); 9464 9465 ssleep(PQI_POST_RESET_DELAY_SECS); 9466 9467 return pqi_ctrl_init_resume(ctrl_info); 9468 } 9469 9470 static int pqi_freeze(struct device *dev) 9471 { 9472 return pqi_suspend_or_freeze(dev, false); 9473 } 9474 9475 static int pqi_thaw(struct device *dev) 9476 { 9477 int rc; 9478 struct pci_dev *pci_dev; 9479 struct pqi_ctrl_info *ctrl_info; 9480 9481 pci_dev = to_pci_dev(dev); 9482 ctrl_info = pci_get_drvdata(pci_dev); 9483 9484 rc = pqi_request_irqs(ctrl_info); 9485 if (rc) 9486 return rc; 9487 9488 ctrl_info->controller_online = true; 9489 ctrl_info->pqi_mode_enabled = true; 9490 9491 pqi_ctrl_unblock_device_reset(ctrl_info); 9492 pqi_ctrl_unblock_requests(ctrl_info); 9493 pqi_scsi_unblock_requests(ctrl_info); 9494 pqi_ctrl_unblock_scan(ctrl_info); 9495 9496 return 0; 9497 } 9498 9499 static int pqi_poweroff(struct device *dev) 9500 { 9501 struct pci_dev *pci_dev; 9502 struct pqi_ctrl_info *ctrl_info; 9503 enum bmic_flush_cache_shutdown_event shutdown_event; 9504 9505 pci_dev = to_pci_dev(dev); 9506 ctrl_info = pci_get_drvdata(pci_dev); 9507 9508 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9509 pqi_flush_cache(ctrl_info, shutdown_event); 9510 9511 return 0; 9512 } 9513 9514 static const struct dev_pm_ops pqi_pm_ops = { 9515 .suspend = pqi_suspend, 9516 .resume = pqi_resume_or_restore, 9517 .freeze = pqi_freeze, 9518 .thaw = pqi_thaw, 9519 .poweroff = pqi_poweroff, 9520 .restore = pqi_resume_or_restore, 9521 }; 9522 9523 #endif /* CONFIG_PM */ 9524 9525 /* Define the PCI IDs for the controllers that we support. */ 9526 static const struct pci_device_id pqi_pci_id_table[] = { 9527 { 9528 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9529 0x105b, 0x1211) 9530 }, 9531 { 9532 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9533 0x105b, 0x1321) 9534 }, 9535 { 9536 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9537 0x152d, 0x8a22) 9538 }, 9539 { 9540 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9541 0x152d, 0x8a23) 9542 }, 9543 { 9544 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9545 0x152d, 0x8a24) 9546 }, 9547 { 9548 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9549 0x152d, 0x8a36) 9550 }, 9551 { 9552 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9553 0x152d, 0x8a37) 9554 }, 9555 { 9556 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9557 0x193d, 0x0462) 9558 }, 9559 { 9560 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9561 0x193d, 0x1104) 9562 }, 9563 { 9564 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9565 0x193d, 0x1105) 9566 }, 9567 { 9568 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9569 0x193d, 0x1106) 9570 }, 9571 { 9572 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9573 0x193d, 0x1107) 9574 }, 9575 { 9576 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9577 0x193d, 0x1108) 9578 }, 9579 { 9580 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9581 0x193d, 0x1109) 9582 }, 9583 { 9584 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9585 0x193d, 0x110b) 9586 }, 9587 { 9588 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9589 0x193d, 0x1110) 9590 }, 9591 { 9592 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9593 0x193d, 0x8460) 9594 }, 9595 { 9596 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9597 0x193d, 0x8461) 9598 }, 9599 { 9600 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9601 0x193d, 0x8462) 9602 }, 9603 { 9604 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9605 0x193d, 0xc460) 9606 }, 9607 { 9608 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9609 0x193d, 0xc461) 9610 }, 9611 { 9612 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9613 0x193d, 0xf460) 9614 }, 9615 { 9616 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9617 0x193d, 0xf461) 9618 }, 9619 { 9620 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9621 0x1bd4, 0x0045) 9622 }, 9623 { 9624 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9625 0x1bd4, 0x0046) 9626 }, 9627 { 9628 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9629 0x1bd4, 0x0047) 9630 }, 9631 { 9632 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9633 0x1bd4, 0x0048) 9634 }, 9635 { 9636 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9637 0x1bd4, 0x004a) 9638 }, 9639 { 9640 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9641 0x1bd4, 0x004b) 9642 }, 9643 { 9644 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9645 0x1bd4, 0x004c) 9646 }, 9647 { 9648 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9649 0x1bd4, 0x004f) 9650 }, 9651 { 9652 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9653 0x1bd4, 0x0051) 9654 }, 9655 { 9656 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9657 0x1bd4, 0x0052) 9658 }, 9659 { 9660 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9661 0x1bd4, 0x0053) 9662 }, 9663 { 9664 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9665 0x1bd4, 0x0054) 9666 }, 9667 { 9668 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9669 0x1bd4, 0x006b) 9670 }, 9671 { 9672 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9673 0x1bd4, 0x006c) 9674 }, 9675 { 9676 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9677 0x1bd4, 0x006d) 9678 }, 9679 { 9680 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9681 0x1bd4, 0x006f) 9682 }, 9683 { 9684 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9685 0x1bd4, 0x0070) 9686 }, 9687 { 9688 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9689 0x1bd4, 0x0071) 9690 }, 9691 { 9692 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9693 0x1bd4, 0x0072) 9694 }, 9695 { 9696 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9697 0x1bd4, 0x0086) 9698 }, 9699 { 9700 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9701 0x1bd4, 0x0087) 9702 }, 9703 { 9704 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9705 0x1bd4, 0x0088) 9706 }, 9707 { 9708 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9709 0x1bd4, 0x0089) 9710 }, 9711 { 9712 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9713 0x1ff9, 0x00a1) 9714 }, 9715 { 9716 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9717 0x1f3a, 0x0104) 9718 }, 9719 { 9720 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9721 0x19e5, 0xd227) 9722 }, 9723 { 9724 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9725 0x19e5, 0xd228) 9726 }, 9727 { 9728 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9729 0x19e5, 0xd229) 9730 }, 9731 { 9732 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9733 0x19e5, 0xd22a) 9734 }, 9735 { 9736 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9737 0x19e5, 0xd22b) 9738 }, 9739 { 9740 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9741 0x19e5, 0xd22c) 9742 }, 9743 { 9744 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9745 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 9746 }, 9747 { 9748 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9749 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 9750 }, 9751 { 9752 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9753 PCI_VENDOR_ID_ADAPTEC2, 0x0659) 9754 }, 9755 { 9756 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9757 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 9758 }, 9759 { 9760 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9761 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 9762 }, 9763 { 9764 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9765 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 9766 }, 9767 { 9768 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9769 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 9770 }, 9771 { 9772 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9773 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 9774 }, 9775 { 9776 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9777 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 9778 }, 9779 { 9780 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9781 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 9782 }, 9783 { 9784 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9785 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 9786 }, 9787 { 9788 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9789 PCI_VENDOR_ID_ADAPTEC2, 0x0808) 9790 }, 9791 { 9792 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9793 PCI_VENDOR_ID_ADAPTEC2, 0x0809) 9794 }, 9795 { 9796 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9797 PCI_VENDOR_ID_ADAPTEC2, 0x080a) 9798 }, 9799 { 9800 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9801 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 9802 }, 9803 { 9804 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9805 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 9806 }, 9807 { 9808 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9809 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 9810 }, 9811 { 9812 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9813 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 9814 }, 9815 { 9816 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9817 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 9818 }, 9819 { 9820 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9821 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 9822 }, 9823 { 9824 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9825 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 9826 }, 9827 { 9828 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9829 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 9830 }, 9831 { 9832 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9833 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 9834 }, 9835 { 9836 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9837 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 9838 }, 9839 { 9840 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9841 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 9842 }, 9843 { 9844 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9845 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 9846 }, 9847 { 9848 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9849 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 9850 }, 9851 { 9852 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9853 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 9854 }, 9855 { 9856 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9857 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 9858 }, 9859 { 9860 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9861 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 9862 }, 9863 { 9864 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9865 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 9866 }, 9867 { 9868 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9869 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 9870 }, 9871 { 9872 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9873 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 9874 }, 9875 { 9876 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9877 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 9878 }, 9879 { 9880 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9881 PCI_VENDOR_ID_ADAPTEC2, 0x1304) 9882 }, 9883 { 9884 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9885 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 9886 }, 9887 { 9888 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9889 PCI_VENDOR_ID_ADAPTEC2, 0x1400) 9890 }, 9891 { 9892 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9893 PCI_VENDOR_ID_ADAPTEC2, 0x1402) 9894 }, 9895 { 9896 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9897 PCI_VENDOR_ID_ADAPTEC2, 0x1410) 9898 }, 9899 { 9900 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9901 PCI_VENDOR_ID_ADAPTEC2, 0x1411) 9902 }, 9903 { 9904 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9905 PCI_VENDOR_ID_ADAPTEC2, 0x1412) 9906 }, 9907 { 9908 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9909 PCI_VENDOR_ID_ADAPTEC2, 0x1420) 9910 }, 9911 { 9912 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9913 PCI_VENDOR_ID_ADAPTEC2, 0x1430) 9914 }, 9915 { 9916 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9917 PCI_VENDOR_ID_ADAPTEC2, 0x1440) 9918 }, 9919 { 9920 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9921 PCI_VENDOR_ID_ADAPTEC2, 0x1441) 9922 }, 9923 { 9924 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9925 PCI_VENDOR_ID_ADAPTEC2, 0x1450) 9926 }, 9927 { 9928 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9929 PCI_VENDOR_ID_ADAPTEC2, 0x1452) 9930 }, 9931 { 9932 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9933 PCI_VENDOR_ID_ADAPTEC2, 0x1460) 9934 }, 9935 { 9936 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9937 PCI_VENDOR_ID_ADAPTEC2, 0x1461) 9938 }, 9939 { 9940 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9941 PCI_VENDOR_ID_ADAPTEC2, 0x1462) 9942 }, 9943 { 9944 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9945 PCI_VENDOR_ID_ADAPTEC2, 0x1463) 9946 }, 9947 { 9948 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9949 PCI_VENDOR_ID_ADAPTEC2, 0x1470) 9950 }, 9951 { 9952 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9953 PCI_VENDOR_ID_ADAPTEC2, 0x1471) 9954 }, 9955 { 9956 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9957 PCI_VENDOR_ID_ADAPTEC2, 0x1472) 9958 }, 9959 { 9960 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9961 PCI_VENDOR_ID_ADAPTEC2, 0x1473) 9962 }, 9963 { 9964 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9965 PCI_VENDOR_ID_ADAPTEC2, 0x1474) 9966 }, 9967 { 9968 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9969 PCI_VENDOR_ID_ADAPTEC2, 0x1475) 9970 }, 9971 { 9972 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9973 PCI_VENDOR_ID_ADAPTEC2, 0x1480) 9974 }, 9975 { 9976 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9977 PCI_VENDOR_ID_ADAPTEC2, 0x1490) 9978 }, 9979 { 9980 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9981 PCI_VENDOR_ID_ADAPTEC2, 0x1491) 9982 }, 9983 { 9984 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9985 PCI_VENDOR_ID_ADAPTEC2, 0x14a0) 9986 }, 9987 { 9988 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9989 PCI_VENDOR_ID_ADAPTEC2, 0x14a1) 9990 }, 9991 { 9992 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9993 PCI_VENDOR_ID_ADAPTEC2, 0x14a2) 9994 }, 9995 { 9996 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9997 PCI_VENDOR_ID_ADAPTEC2, 0x14a4) 9998 }, 9999 { 10000 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10001 PCI_VENDOR_ID_ADAPTEC2, 0x14a5) 10002 }, 10003 { 10004 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10005 PCI_VENDOR_ID_ADAPTEC2, 0x14a6) 10006 }, 10007 { 10008 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10009 PCI_VENDOR_ID_ADAPTEC2, 0x14b0) 10010 }, 10011 { 10012 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10013 PCI_VENDOR_ID_ADAPTEC2, 0x14b1) 10014 }, 10015 { 10016 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10017 PCI_VENDOR_ID_ADAPTEC2, 0x14c0) 10018 }, 10019 { 10020 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10021 PCI_VENDOR_ID_ADAPTEC2, 0x14c1) 10022 }, 10023 { 10024 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10025 PCI_VENDOR_ID_ADAPTEC2, 0x14c2) 10026 }, 10027 { 10028 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10029 PCI_VENDOR_ID_ADAPTEC2, 0x14c3) 10030 }, 10031 { 10032 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10033 PCI_VENDOR_ID_ADAPTEC2, 0x14c4) 10034 }, 10035 { 10036 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10037 PCI_VENDOR_ID_ADAPTEC2, 0x14d0) 10038 }, 10039 { 10040 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10041 PCI_VENDOR_ID_ADAPTEC2, 0x14e0) 10042 }, 10043 { 10044 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10045 PCI_VENDOR_ID_ADAPTEC2, 0x14f0) 10046 }, 10047 { 10048 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10049 PCI_VENDOR_ID_ADVANTECH, 0x8312) 10050 }, 10051 { 10052 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10053 PCI_VENDOR_ID_DELL, 0x1fe0) 10054 }, 10055 { 10056 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10057 PCI_VENDOR_ID_HP, 0x0600) 10058 }, 10059 { 10060 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10061 PCI_VENDOR_ID_HP, 0x0601) 10062 }, 10063 { 10064 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10065 PCI_VENDOR_ID_HP, 0x0602) 10066 }, 10067 { 10068 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10069 PCI_VENDOR_ID_HP, 0x0603) 10070 }, 10071 { 10072 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10073 PCI_VENDOR_ID_HP, 0x0609) 10074 }, 10075 { 10076 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10077 PCI_VENDOR_ID_HP, 0x0650) 10078 }, 10079 { 10080 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10081 PCI_VENDOR_ID_HP, 0x0651) 10082 }, 10083 { 10084 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10085 PCI_VENDOR_ID_HP, 0x0652) 10086 }, 10087 { 10088 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10089 PCI_VENDOR_ID_HP, 0x0653) 10090 }, 10091 { 10092 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10093 PCI_VENDOR_ID_HP, 0x0654) 10094 }, 10095 { 10096 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10097 PCI_VENDOR_ID_HP, 0x0655) 10098 }, 10099 { 10100 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10101 PCI_VENDOR_ID_HP, 0x0700) 10102 }, 10103 { 10104 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10105 PCI_VENDOR_ID_HP, 0x0701) 10106 }, 10107 { 10108 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10109 PCI_VENDOR_ID_HP, 0x1001) 10110 }, 10111 { 10112 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10113 PCI_VENDOR_ID_HP, 0x1002) 10114 }, 10115 { 10116 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10117 PCI_VENDOR_ID_HP, 0x1100) 10118 }, 10119 { 10120 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10121 PCI_VENDOR_ID_HP, 0x1101) 10122 }, 10123 { 10124 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10125 0x1590, 0x0294) 10126 }, 10127 { 10128 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10129 0x1590, 0x02db) 10130 }, 10131 { 10132 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10133 0x1590, 0x02dc) 10134 }, 10135 { 10136 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10137 0x1590, 0x032e) 10138 }, 10139 { 10140 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10141 0x1590, 0x036f) 10142 }, 10143 { 10144 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10145 0x1590, 0x0381) 10146 }, 10147 { 10148 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10149 0x1590, 0x0382) 10150 }, 10151 { 10152 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10153 0x1590, 0x0383) 10154 }, 10155 { 10156 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10157 0x1d8d, 0x0800) 10158 }, 10159 { 10160 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10161 0x1d8d, 0x0908) 10162 }, 10163 { 10164 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10165 0x1d8d, 0x0806) 10166 }, 10167 { 10168 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10169 0x1d8d, 0x0916) 10170 }, 10171 { 10172 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10173 PCI_VENDOR_ID_GIGABYTE, 0x1000) 10174 }, 10175 { 10176 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10177 0x1dfc, 0x3161) 10178 }, 10179 { 10180 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10181 0x1f0c, 0x3161) 10182 }, 10183 { 10184 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10185 0x1cf2, 0x0804) 10186 }, 10187 { 10188 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10189 0x1cf2, 0x0805) 10190 }, 10191 { 10192 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10193 0x1cf2, 0x0806) 10194 }, 10195 { 10196 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10197 0x1cf2, 0x5445) 10198 }, 10199 { 10200 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10201 0x1cf2, 0x5446) 10202 }, 10203 { 10204 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10205 0x1cf2, 0x5447) 10206 }, 10207 { 10208 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10209 0x1cf2, 0x5449) 10210 }, 10211 { 10212 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10213 0x1cf2, 0x544a) 10214 }, 10215 { 10216 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10217 0x1cf2, 0x544b) 10218 }, 10219 { 10220 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10221 0x1cf2, 0x544d) 10222 }, 10223 { 10224 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10225 0x1cf2, 0x544e) 10226 }, 10227 { 10228 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10229 0x1cf2, 0x544f) 10230 }, 10231 { 10232 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10233 0x1cf2, 0x54da) 10234 }, 10235 { 10236 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10237 0x1cf2, 0x54db) 10238 }, 10239 { 10240 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10241 0x1cf2, 0x54dc) 10242 }, 10243 { 10244 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10245 0x1cf2, 0x0b27) 10246 }, 10247 { 10248 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10249 0x1cf2, 0x0b29) 10250 }, 10251 { 10252 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10253 0x1cf2, 0x0b45) 10254 }, 10255 { 10256 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10257 0x1cc4, 0x0101) 10258 }, 10259 { 10260 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10261 0x1cc4, 0x0201) 10262 }, 10263 { 10264 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10265 PCI_VENDOR_ID_LENOVO, 0x0220) 10266 }, 10267 { 10268 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10269 PCI_VENDOR_ID_LENOVO, 0x0221) 10270 }, 10271 { 10272 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10273 PCI_VENDOR_ID_LENOVO, 0x0520) 10274 }, 10275 { 10276 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10277 PCI_VENDOR_ID_LENOVO, 0x0522) 10278 }, 10279 { 10280 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10281 PCI_VENDOR_ID_LENOVO, 0x0620) 10282 }, 10283 { 10284 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10285 PCI_VENDOR_ID_LENOVO, 0x0621) 10286 }, 10287 { 10288 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10289 PCI_VENDOR_ID_LENOVO, 0x0622) 10290 }, 10291 { 10292 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10293 PCI_VENDOR_ID_LENOVO, 0x0623) 10294 }, 10295 { 10296 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10297 0x1014, 0x0718) 10298 }, 10299 { 10300 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10301 0x1137, 0x02f8) 10302 }, 10303 { 10304 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10305 0x1137, 0x02f9) 10306 }, 10307 { 10308 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10309 0x1137, 0x02fa) 10310 }, 10311 { 10312 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10313 0x1137, 0x02fe) 10314 }, 10315 { 10316 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10317 0x1137, 0x02ff) 10318 }, 10319 { 10320 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10321 0x1137, 0x0300) 10322 }, 10323 { 10324 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10325 0x1ff9, 0x0045) 10326 }, 10327 { 10328 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10329 0x1ff9, 0x0046) 10330 }, 10331 { 10332 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10333 0x1ff9, 0x0047) 10334 }, 10335 { 10336 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10337 0x1ff9, 0x0048) 10338 }, 10339 { 10340 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10341 0x1ff9, 0x004a) 10342 }, 10343 { 10344 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10345 0x1ff9, 0x004b) 10346 }, 10347 { 10348 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10349 0x1ff9, 0x004c) 10350 }, 10351 { 10352 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10353 0x1ff9, 0x004f) 10354 }, 10355 { 10356 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10357 0x1ff9, 0x0051) 10358 }, 10359 { 10360 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10361 0x1ff9, 0x0052) 10362 }, 10363 { 10364 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10365 0x1ff9, 0x0053) 10366 }, 10367 { 10368 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10369 0x1ff9, 0x0054) 10370 }, 10371 { 10372 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10373 0x1ff9, 0x006b) 10374 }, 10375 { 10376 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10377 0x1ff9, 0x006c) 10378 }, 10379 { 10380 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10381 0x1ff9, 0x006d) 10382 }, 10383 { 10384 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10385 0x1ff9, 0x006f) 10386 }, 10387 { 10388 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10389 0x1ff9, 0x0070) 10390 }, 10391 { 10392 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10393 0x1ff9, 0x0071) 10394 }, 10395 { 10396 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10397 0x1ff9, 0x0072) 10398 }, 10399 { 10400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10401 0x1ff9, 0x0086) 10402 }, 10403 { 10404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10405 0x1ff9, 0x0087) 10406 }, 10407 { 10408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10409 0x1ff9, 0x0088) 10410 }, 10411 { 10412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10413 0x1ff9, 0x0089) 10414 }, 10415 { 10416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10417 0x1e93, 0x1000) 10418 }, 10419 { 10420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10421 0x1e93, 0x1001) 10422 }, 10423 { 10424 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10425 0x1e93, 0x1002) 10426 }, 10427 { 10428 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10429 0x1e93, 0x1005) 10430 }, 10431 { 10432 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10433 0x1f51, 0x1001) 10434 }, 10435 { 10436 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10437 0x1f51, 0x1002) 10438 }, 10439 { 10440 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10441 0x1f51, 0x1003) 10442 }, 10443 { 10444 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10445 0x1f51, 0x1004) 10446 }, 10447 { 10448 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10449 0x1f51, 0x1005) 10450 }, 10451 { 10452 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10453 0x1f51, 0x1006) 10454 }, 10455 { 10456 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10457 0x1f51, 0x1007) 10458 }, 10459 { 10460 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10461 0x1f51, 0x1008) 10462 }, 10463 { 10464 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10465 0x1f51, 0x1009) 10466 }, 10467 { 10468 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10469 0x1f51, 0x100a) 10470 }, 10471 { 10472 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10473 0x1f51, 0x100e) 10474 }, 10475 { 10476 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10477 0x1f51, 0x100f) 10478 }, 10479 { 10480 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10481 0x1f51, 0x1010) 10482 }, 10483 { 10484 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10485 0x1f51, 0x1011) 10486 }, 10487 { 10488 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10489 0x1f51, 0x1043) 10490 }, 10491 { 10492 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10493 0x1f51, 0x1044) 10494 }, 10495 { 10496 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10497 0x1f51, 0x1045) 10498 }, 10499 { 10500 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10501 0x1ff9, 0x00a3) 10502 }, 10503 { 10504 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10505 PCI_ANY_ID, PCI_ANY_ID) 10506 }, 10507 { 0 } 10508 }; 10509 10510 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 10511 10512 static struct pci_driver pqi_pci_driver = { 10513 .name = DRIVER_NAME_SHORT, 10514 .id_table = pqi_pci_id_table, 10515 .probe = pqi_pci_probe, 10516 .remove = pqi_pci_remove, 10517 .shutdown = pqi_shutdown, 10518 #if defined(CONFIG_PM) 10519 .driver = { 10520 .pm = &pqi_pm_ops 10521 }, 10522 #endif 10523 }; 10524 10525 static int __init pqi_init(void) 10526 { 10527 int rc; 10528 10529 pr_info(DRIVER_NAME "\n"); 10530 pqi_verify_structures(); 10531 sis_verify_structures(); 10532 10533 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); 10534 if (!pqi_sas_transport_template) 10535 return -ENODEV; 10536 10537 pqi_process_module_params(); 10538 10539 rc = pci_register_driver(&pqi_pci_driver); 10540 if (rc) 10541 sas_release_transport(pqi_sas_transport_template); 10542 10543 return rc; 10544 } 10545 10546 static void __exit pqi_cleanup(void) 10547 { 10548 pci_unregister_driver(&pqi_pci_driver); 10549 sas_release_transport(pqi_sas_transport_template); 10550 } 10551 10552 module_init(pqi_init); 10553 module_exit(pqi_cleanup); 10554 10555 static void pqi_verify_structures(void) 10556 { 10557 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10558 sis_host_to_ctrl_doorbell) != 0x20); 10559 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10560 sis_interrupt_mask) != 0x34); 10561 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10562 sis_ctrl_to_host_doorbell) != 0x9c); 10563 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10564 sis_ctrl_to_host_doorbell_clear) != 0xa0); 10565 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10566 sis_driver_scratch) != 0xb0); 10567 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10568 sis_product_identifier) != 0xb4); 10569 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10570 sis_firmware_status) != 0xbc); 10571 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10572 sis_ctrl_shutdown_reason_code) != 0xcc); 10573 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10574 sis_mailbox) != 0x1000); 10575 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10576 pqi_registers) != 0x4000); 10577 10578 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10579 iu_type) != 0x0); 10580 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10581 iu_length) != 0x2); 10582 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10583 response_queue_id) != 0x4); 10584 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10585 driver_flags) != 0x6); 10586 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 10587 10588 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10589 status) != 0x0); 10590 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10591 service_response) != 0x1); 10592 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10593 data_present) != 0x2); 10594 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10595 reserved) != 0x3); 10596 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10597 residual_count) != 0x4); 10598 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10599 data_length) != 0x8); 10600 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10601 reserved1) != 0xa); 10602 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10603 data) != 0xc); 10604 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 10605 10606 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10607 data_in_result) != 0x0); 10608 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10609 data_out_result) != 0x1); 10610 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10611 reserved) != 0x2); 10612 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10613 status) != 0x5); 10614 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10615 status_qualifier) != 0x6); 10616 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10617 sense_data_length) != 0x8); 10618 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10619 response_data_length) != 0xa); 10620 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10621 data_in_transferred) != 0xc); 10622 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10623 data_out_transferred) != 0x10); 10624 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10625 data) != 0x14); 10626 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 10627 10628 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10629 signature) != 0x0); 10630 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10631 function_and_status_code) != 0x8); 10632 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10633 max_admin_iq_elements) != 0x10); 10634 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10635 max_admin_oq_elements) != 0x11); 10636 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10637 admin_iq_element_length) != 0x12); 10638 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10639 admin_oq_element_length) != 0x13); 10640 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10641 max_reset_timeout) != 0x14); 10642 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10643 legacy_intx_status) != 0x18); 10644 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10645 legacy_intx_mask_set) != 0x1c); 10646 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10647 legacy_intx_mask_clear) != 0x20); 10648 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10649 device_status) != 0x40); 10650 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10651 admin_iq_pi_offset) != 0x48); 10652 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10653 admin_oq_ci_offset) != 0x50); 10654 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10655 admin_iq_element_array_addr) != 0x58); 10656 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10657 admin_oq_element_array_addr) != 0x60); 10658 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10659 admin_iq_ci_addr) != 0x68); 10660 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10661 admin_oq_pi_addr) != 0x70); 10662 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10663 admin_iq_num_elements) != 0x78); 10664 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10665 admin_oq_num_elements) != 0x79); 10666 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10667 admin_queue_int_msg_num) != 0x7a); 10668 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10669 device_error) != 0x80); 10670 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10671 error_details) != 0x88); 10672 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10673 device_reset) != 0x90); 10674 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10675 power_action) != 0x94); 10676 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 10677 10678 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10679 header.iu_type) != 0); 10680 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10681 header.iu_length) != 2); 10682 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10683 header.driver_flags) != 6); 10684 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10685 request_id) != 8); 10686 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10687 function_code) != 10); 10688 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10689 data.report_device_capability.buffer_length) != 44); 10690 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10691 data.report_device_capability.sg_descriptor) != 48); 10692 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10693 data.create_operational_iq.queue_id) != 12); 10694 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10695 data.create_operational_iq.element_array_addr) != 16); 10696 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10697 data.create_operational_iq.ci_addr) != 24); 10698 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10699 data.create_operational_iq.num_elements) != 32); 10700 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10701 data.create_operational_iq.element_length) != 34); 10702 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10703 data.create_operational_iq.queue_protocol) != 36); 10704 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10705 data.create_operational_oq.queue_id) != 12); 10706 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10707 data.create_operational_oq.element_array_addr) != 16); 10708 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10709 data.create_operational_oq.pi_addr) != 24); 10710 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10711 data.create_operational_oq.num_elements) != 32); 10712 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10713 data.create_operational_oq.element_length) != 34); 10714 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10715 data.create_operational_oq.queue_protocol) != 36); 10716 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10717 data.create_operational_oq.int_msg_num) != 40); 10718 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10719 data.create_operational_oq.coalescing_count) != 42); 10720 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10721 data.create_operational_oq.min_coalescing_time) != 44); 10722 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10723 data.create_operational_oq.max_coalescing_time) != 48); 10724 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10725 data.delete_operational_queue.queue_id) != 12); 10726 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 10727 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10728 data.create_operational_iq) != 64 - 11); 10729 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10730 data.create_operational_oq) != 64 - 11); 10731 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10732 data.delete_operational_queue) != 64 - 11); 10733 10734 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10735 header.iu_type) != 0); 10736 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10737 header.iu_length) != 2); 10738 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10739 header.driver_flags) != 6); 10740 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10741 request_id) != 8); 10742 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10743 function_code) != 10); 10744 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10745 status) != 11); 10746 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10747 data.create_operational_iq.status_descriptor) != 12); 10748 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10749 data.create_operational_iq.iq_pi_offset) != 16); 10750 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10751 data.create_operational_oq.status_descriptor) != 12); 10752 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10753 data.create_operational_oq.oq_ci_offset) != 16); 10754 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 10755 10756 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10757 header.iu_type) != 0); 10758 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10759 header.iu_length) != 2); 10760 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10761 header.response_queue_id) != 4); 10762 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10763 header.driver_flags) != 6); 10764 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10765 request_id) != 8); 10766 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10767 nexus_id) != 10); 10768 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10769 buffer_length) != 12); 10770 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10771 lun_number) != 16); 10772 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10773 protocol_specific) != 24); 10774 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10775 error_index) != 27); 10776 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10777 cdb) != 32); 10778 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10779 timeout) != 60); 10780 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10781 sg_descriptors) != 64); 10782 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 10783 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10784 10785 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10786 header.iu_type) != 0); 10787 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10788 header.iu_length) != 2); 10789 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10790 header.response_queue_id) != 4); 10791 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10792 header.driver_flags) != 6); 10793 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10794 request_id) != 8); 10795 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10796 nexus_id) != 12); 10797 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10798 buffer_length) != 16); 10799 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10800 data_encryption_key_index) != 22); 10801 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10802 encrypt_tweak_lower) != 24); 10803 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10804 encrypt_tweak_upper) != 28); 10805 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10806 cdb) != 32); 10807 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10808 error_index) != 48); 10809 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10810 num_sg_descriptors) != 50); 10811 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10812 cdb_length) != 51); 10813 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10814 lun_number) != 52); 10815 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10816 sg_descriptors) != 64); 10817 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 10818 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10819 10820 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10821 header.iu_type) != 0); 10822 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10823 header.iu_length) != 2); 10824 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10825 request_id) != 8); 10826 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10827 error_index) != 10); 10828 10829 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10830 header.iu_type) != 0); 10831 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10832 header.iu_length) != 2); 10833 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10834 header.response_queue_id) != 4); 10835 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10836 request_id) != 8); 10837 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10838 data.report_event_configuration.buffer_length) != 12); 10839 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10840 data.report_event_configuration.sg_descriptors) != 16); 10841 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10842 data.set_event_configuration.global_event_oq_id) != 10); 10843 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10844 data.set_event_configuration.buffer_length) != 12); 10845 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10846 data.set_event_configuration.sg_descriptors) != 16); 10847 10848 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10849 max_inbound_iu_length) != 6); 10850 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10851 max_outbound_iu_length) != 14); 10852 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 10853 10854 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10855 data_length) != 0); 10856 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10857 iq_arbitration_priority_support_bitmask) != 8); 10858 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10859 maximum_aw_a) != 9); 10860 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10861 maximum_aw_b) != 10); 10862 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10863 maximum_aw_c) != 11); 10864 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10865 max_inbound_queues) != 16); 10866 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10867 max_elements_per_iq) != 18); 10868 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10869 max_iq_element_length) != 24); 10870 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10871 min_iq_element_length) != 26); 10872 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10873 max_outbound_queues) != 30); 10874 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10875 max_elements_per_oq) != 32); 10876 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10877 intr_coalescing_time_granularity) != 34); 10878 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10879 max_oq_element_length) != 36); 10880 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10881 min_oq_element_length) != 38); 10882 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10883 iu_layer_descriptors) != 64); 10884 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 10885 10886 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 10887 event_type) != 0); 10888 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 10889 oq_id) != 2); 10890 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 10891 10892 BUILD_BUG_ON(offsetof(struct pqi_event_config, 10893 num_event_descriptors) != 2); 10894 BUILD_BUG_ON(offsetof(struct pqi_event_config, 10895 descriptors) != 4); 10896 10897 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 10898 ARRAY_SIZE(pqi_supported_event_types)); 10899 10900 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10901 header.iu_type) != 0); 10902 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10903 header.iu_length) != 2); 10904 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10905 event_type) != 8); 10906 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10907 event_id) != 10); 10908 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10909 additional_event_id) != 12); 10910 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10911 data) != 16); 10912 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 10913 10914 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10915 header.iu_type) != 0); 10916 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10917 header.iu_length) != 2); 10918 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10919 event_type) != 8); 10920 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10921 event_id) != 10); 10922 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10923 additional_event_id) != 12); 10924 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 10925 10926 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10927 header.iu_type) != 0); 10928 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10929 header.iu_length) != 2); 10930 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10931 request_id) != 8); 10932 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10933 nexus_id) != 10); 10934 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10935 timeout) != 14); 10936 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10937 lun_number) != 16); 10938 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10939 protocol_specific) != 24); 10940 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10941 outbound_queue_id_to_manage) != 26); 10942 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10943 request_id_to_manage) != 28); 10944 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10945 task_management_function) != 30); 10946 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 10947 10948 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10949 header.iu_type) != 0); 10950 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10951 header.iu_length) != 2); 10952 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10953 request_id) != 8); 10954 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10955 nexus_id) != 10); 10956 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10957 additional_response_info) != 12); 10958 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10959 response_code) != 15); 10960 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 10961 10962 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10963 configured_logical_drive_count) != 0); 10964 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10965 configuration_signature) != 1); 10966 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10967 firmware_version_short) != 5); 10968 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10969 extended_logical_unit_count) != 154); 10970 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10971 firmware_build_number) != 190); 10972 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10973 vendor_id) != 200); 10974 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10975 product_id) != 208); 10976 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10977 extra_controller_flags) != 286); 10978 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10979 controller_mode) != 292); 10980 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10981 spare_part_number) != 293); 10982 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10983 firmware_version_long) != 325); 10984 10985 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10986 phys_bay_in_box) != 115); 10987 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10988 device_type) != 120); 10989 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10990 redundant_path_present_map) != 1736); 10991 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10992 active_path_number) != 1738); 10993 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10994 alternate_paths_phys_connector) != 1739); 10995 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10996 alternate_paths_phys_box_on_port) != 1755); 10997 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10998 current_queue_depth_limit) != 1796); 10999 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 11000 11001 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); 11002 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11003 page_code) != 0); 11004 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11005 subpage_code) != 1); 11006 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11007 buffer_length) != 2); 11008 11009 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); 11010 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11011 page_code) != 0); 11012 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11013 subpage_code) != 1); 11014 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11015 page_length) != 2); 11016 11017 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) 11018 != 18); 11019 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11020 header) != 0); 11021 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11022 firmware_read_support) != 4); 11023 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11024 driver_read_support) != 5); 11025 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11026 firmware_write_support) != 6); 11027 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11028 driver_write_support) != 7); 11029 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11030 max_transfer_encrypted_sas_sata) != 8); 11031 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11032 max_transfer_encrypted_nvme) != 10); 11033 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11034 max_write_raid_5_6) != 12); 11035 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11036 max_write_raid_1_10_2drive) != 14); 11037 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11038 max_write_raid_1_10_3drive) != 16); 11039 11040 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 11041 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 11042 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 11043 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11044 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 11045 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11046 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 11047 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 11048 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11049 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 11050 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 11051 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11052 11053 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 11054 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 11055 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 11056 } 11057