1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for Microchip PQI-based storage controllers 4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com 9 * 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/sched.h> 18 #include <linux/rtc.h> 19 #include <linux/bcd.h> 20 #include <linux/reboot.h> 21 #include <linux/cciss_ioctl.h> 22 #include <linux/crash_dump.h> 23 #include <linux/string.h> 24 #include <scsi/scsi_host.h> 25 #include <scsi/scsi_cmnd.h> 26 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_eh.h> 28 #include <scsi/scsi_transport_sas.h> 29 #include <linux/unaligned.h> 30 #include "smartpqi.h" 31 #include "smartpqi_sis.h" 32 33 #if !defined(BUILD_TIMESTAMP) 34 #define BUILD_TIMESTAMP 35 #endif 36 37 #define DRIVER_VERSION "2.1.34-035" 38 #define DRIVER_MAJOR 2 39 #define DRIVER_MINOR 1 40 #define DRIVER_RELEASE 34 41 #define DRIVER_REVISION 35 42 43 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ 44 DRIVER_VERSION BUILD_TIMESTAMP ")" 45 #define DRIVER_NAME_SHORT "smartpqi" 46 47 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 48 49 #define PQI_POST_RESET_DELAY_SECS 5 50 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 51 52 #define PQI_NO_COMPLETION ((void *)-1) 53 54 MODULE_AUTHOR("Microchip"); 55 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " 56 DRIVER_VERSION); 57 MODULE_VERSION(DRIVER_VERSION); 58 MODULE_LICENSE("GPL"); 59 60 struct pqi_cmd_priv { 61 int this_residual; 62 }; 63 64 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) 65 { 66 return scsi_cmd_priv(cmd); 67 } 68 69 static void pqi_verify_structures(void); 70 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 71 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); 72 static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info); 73 static void pqi_ctrl_offline_worker(struct work_struct *work); 74 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 75 static void pqi_scan_start(struct Scsi_Host *shost); 76 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 77 struct pqi_queue_group *queue_group, enum pqi_io_path path, 78 struct pqi_io_request *io_request); 79 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 80 struct pqi_iu_header *request, unsigned int flags, 81 struct pqi_raid_error_info *error_info); 82 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 83 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 84 unsigned int cdb_length, struct pqi_queue_group *queue_group, 85 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio); 86 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 87 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 88 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 89 struct pqi_scsi_dev_raid_map_data *rmd); 90 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 91 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 92 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 93 struct pqi_scsi_dev_raid_map_data *rmd); 94 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 95 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 96 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); 97 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u32 total_size, u32 min_size); 98 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor); 99 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, struct pqi_host_memory_descriptor *host_memory_descriptor, u16 function_code); 100 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 101 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); 102 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); 103 static void pqi_tmf_worker(struct work_struct *work); 104 105 /* for flags argument to pqi_submit_raid_request_synchronous() */ 106 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 107 108 static struct scsi_transport_template *pqi_sas_transport_template; 109 110 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 111 112 enum pqi_lockup_action { 113 NONE, 114 REBOOT, 115 PANIC 116 }; 117 118 static enum pqi_lockup_action pqi_lockup_action = NONE; 119 120 static struct { 121 enum pqi_lockup_action action; 122 char *name; 123 } pqi_lockup_actions[] = { 124 { 125 .action = NONE, 126 .name = "none", 127 }, 128 { 129 .action = REBOOT, 130 .name = "reboot", 131 }, 132 { 133 .action = PANIC, 134 .name = "panic", 135 }, 136 }; 137 138 static unsigned int pqi_supported_event_types[] = { 139 PQI_EVENT_TYPE_HOTPLUG, 140 PQI_EVENT_TYPE_HARDWARE, 141 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 142 PQI_EVENT_TYPE_LOGICAL_DEVICE, 143 PQI_EVENT_TYPE_OFA, 144 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 145 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 146 }; 147 148 static int pqi_disable_device_id_wildcards; 149 module_param_named(disable_device_id_wildcards, 150 pqi_disable_device_id_wildcards, int, 0644); 151 MODULE_PARM_DESC(disable_device_id_wildcards, 152 "Disable device ID wildcards."); 153 154 static int pqi_disable_heartbeat; 155 module_param_named(disable_heartbeat, 156 pqi_disable_heartbeat, int, 0644); 157 MODULE_PARM_DESC(disable_heartbeat, 158 "Disable heartbeat."); 159 160 static int pqi_disable_ctrl_shutdown; 161 module_param_named(disable_ctrl_shutdown, 162 pqi_disable_ctrl_shutdown, int, 0644); 163 MODULE_PARM_DESC(disable_ctrl_shutdown, 164 "Disable controller shutdown when controller locked up."); 165 166 static char *pqi_lockup_action_param; 167 module_param_named(lockup_action, 168 pqi_lockup_action_param, charp, 0644); 169 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 170 "\t\tSupported: none, reboot, panic\n" 171 "\t\tDefault: none"); 172 173 static int pqi_expose_ld_first; 174 module_param_named(expose_ld_first, 175 pqi_expose_ld_first, int, 0644); 176 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); 177 178 static int pqi_hide_vsep; 179 module_param_named(hide_vsep, 180 pqi_hide_vsep, int, 0644); 181 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); 182 183 static int pqi_disable_managed_interrupts; 184 module_param_named(disable_managed_interrupts, 185 pqi_disable_managed_interrupts, int, 0644); 186 MODULE_PARM_DESC(disable_managed_interrupts, 187 "Disable the kernel automatically assigning SMP affinity to IRQs."); 188 189 static unsigned int pqi_ctrl_ready_timeout_secs; 190 module_param_named(ctrl_ready_timeout, 191 pqi_ctrl_ready_timeout_secs, uint, 0644); 192 MODULE_PARM_DESC(ctrl_ready_timeout, 193 "Timeout in seconds for driver to wait for controller ready."); 194 195 static char *raid_levels[] = { 196 "RAID-0", 197 "RAID-4", 198 "RAID-1(1+0)", 199 "RAID-5", 200 "RAID-5+1", 201 "RAID-6", 202 "RAID-1(Triple)", 203 }; 204 205 static char *pqi_raid_level_to_string(u8 raid_level) 206 { 207 if (raid_level < ARRAY_SIZE(raid_levels)) 208 return raid_levels[raid_level]; 209 210 return "RAID UNKNOWN"; 211 } 212 213 #define SA_RAID_0 0 214 #define SA_RAID_4 1 215 #define SA_RAID_1 2 /* also used for RAID 10 */ 216 #define SA_RAID_5 3 /* also used for RAID 50 */ 217 #define SA_RAID_51 4 218 #define SA_RAID_6 5 /* also used for RAID 60 */ 219 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ 220 #define SA_RAID_MAX SA_RAID_TRIPLE 221 #define SA_RAID_UNKNOWN 0xff 222 223 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 224 { 225 pqi_prep_for_scsi_done(scmd); 226 scsi_done(scmd); 227 } 228 229 static inline void pqi_disable_write_same(struct scsi_device *sdev) 230 { 231 sdev->no_write_same = 1; 232 } 233 234 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 235 { 236 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 237 } 238 239 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 240 { 241 return !device->is_physical_device; 242 } 243 244 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 245 { 246 return scsi3addr[2] != 0; 247 } 248 249 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 250 { 251 return !ctrl_info->controller_online; 252 } 253 254 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 255 { 256 if (ctrl_info->controller_online) 257 if (!sis_is_firmware_running(ctrl_info)) 258 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP); 259 } 260 261 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 262 { 263 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 264 } 265 266 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1 267 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2 268 269 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) 270 { 271 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; 272 } 273 274 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 275 enum pqi_ctrl_mode mode) 276 { 277 u32 driver_scratch; 278 279 driver_scratch = sis_read_driver_scratch(ctrl_info); 280 281 if (mode == PQI_MODE) 282 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE; 283 else 284 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE; 285 286 sis_write_driver_scratch(ctrl_info, driver_scratch); 287 } 288 289 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) 290 { 291 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; 292 } 293 294 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) 295 { 296 u32 driver_scratch; 297 298 driver_scratch = sis_read_driver_scratch(ctrl_info); 299 300 if (is_supported) 301 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 302 else 303 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 304 305 sis_write_driver_scratch(ctrl_info, driver_scratch); 306 } 307 308 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) 309 { 310 ctrl_info->scan_blocked = true; 311 mutex_lock(&ctrl_info->scan_mutex); 312 } 313 314 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) 315 { 316 ctrl_info->scan_blocked = false; 317 mutex_unlock(&ctrl_info->scan_mutex); 318 } 319 320 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) 321 { 322 return ctrl_info->scan_blocked; 323 } 324 325 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) 326 { 327 mutex_lock(&ctrl_info->lun_reset_mutex); 328 } 329 330 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) 331 { 332 mutex_unlock(&ctrl_info->lun_reset_mutex); 333 } 334 335 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) 336 { 337 struct Scsi_Host *shost; 338 unsigned int num_loops; 339 int msecs_sleep; 340 341 shost = ctrl_info->scsi_host; 342 343 scsi_block_requests(shost); 344 345 num_loops = 0; 346 msecs_sleep = 20; 347 while (scsi_host_busy(shost)) { 348 num_loops++; 349 if (num_loops == 10) 350 msecs_sleep = 500; 351 msleep(msecs_sleep); 352 } 353 } 354 355 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) 356 { 357 scsi_unblock_requests(ctrl_info->scsi_host); 358 } 359 360 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 361 { 362 atomic_inc(&ctrl_info->num_busy_threads); 363 } 364 365 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 366 { 367 atomic_dec(&ctrl_info->num_busy_threads); 368 } 369 370 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 371 { 372 return ctrl_info->block_requests; 373 } 374 375 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 376 { 377 ctrl_info->block_requests = true; 378 } 379 380 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 381 { 382 ctrl_info->block_requests = false; 383 wake_up_all(&ctrl_info->block_requests_wait); 384 } 385 386 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 387 { 388 if (!pqi_ctrl_blocked(ctrl_info)) 389 return; 390 391 atomic_inc(&ctrl_info->num_blocked_threads); 392 wait_event(ctrl_info->block_requests_wait, 393 !pqi_ctrl_blocked(ctrl_info)); 394 atomic_dec(&ctrl_info->num_blocked_threads); 395 } 396 397 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10 398 399 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 400 { 401 unsigned long start_jiffies; 402 unsigned long warning_timeout; 403 bool displayed_warning; 404 405 displayed_warning = false; 406 start_jiffies = jiffies; 407 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 408 409 while (atomic_read(&ctrl_info->num_busy_threads) > 410 atomic_read(&ctrl_info->num_blocked_threads)) { 411 if (time_after(jiffies, warning_timeout)) { 412 dev_warn(&ctrl_info->pci_dev->dev, 413 "waiting %u seconds for driver activity to quiesce\n", 414 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 415 displayed_warning = true; 416 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies; 417 } 418 usleep_range(1000, 2000); 419 } 420 421 if (displayed_warning) 422 dev_warn(&ctrl_info->pci_dev->dev, 423 "driver activity quiesced after waiting for %u seconds\n", 424 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 425 } 426 427 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 428 { 429 return device->device_offline; 430 } 431 432 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 433 { 434 mutex_lock(&ctrl_info->ofa_mutex); 435 } 436 437 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 438 { 439 mutex_unlock(&ctrl_info->ofa_mutex); 440 } 441 442 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 443 { 444 mutex_lock(&ctrl_info->ofa_mutex); 445 mutex_unlock(&ctrl_info->ofa_mutex); 446 } 447 448 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) 449 { 450 return mutex_is_locked(&ctrl_info->ofa_mutex); 451 } 452 453 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 454 { 455 device->in_remove = true; 456 } 457 458 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) 459 { 460 return device->in_remove; 461 } 462 463 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun) 464 { 465 device->in_reset[lun] = true; 466 } 467 468 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun) 469 { 470 device->in_reset[lun] = false; 471 } 472 473 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun) 474 { 475 return device->in_reset[lun]; 476 } 477 478 static inline int pqi_event_type_to_event_index(unsigned int event_type) 479 { 480 int index; 481 482 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 483 if (event_type == pqi_supported_event_types[index]) 484 return index; 485 486 return -1; 487 } 488 489 static inline bool pqi_is_supported_event(unsigned int event_type) 490 { 491 return pqi_event_type_to_event_index(event_type) != -1; 492 } 493 494 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, 495 unsigned long delay) 496 { 497 if (pqi_ctrl_offline(ctrl_info)) 498 return; 499 500 schedule_delayed_work(&ctrl_info->rescan_work, delay); 501 } 502 503 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 504 { 505 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 506 } 507 508 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 509 510 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) 511 { 512 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 513 } 514 515 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 516 { 517 cancel_delayed_work_sync(&ctrl_info->rescan_work); 518 } 519 520 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 521 { 522 if (!ctrl_info->heartbeat_counter) 523 return 0; 524 525 return readl(ctrl_info->heartbeat_counter); 526 } 527 528 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 529 { 530 return readb(ctrl_info->soft_reset_status); 531 } 532 533 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 534 { 535 u8 status; 536 537 status = pqi_read_soft_reset_status(ctrl_info); 538 status &= ~PQI_SOFT_RESET_ABORT; 539 writeb(status, ctrl_info->soft_reset_status); 540 } 541 542 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) 543 { 544 bool io_high_prio; 545 int priority_class; 546 547 io_high_prio = false; 548 549 if (device->ncq_prio_enable) { 550 priority_class = 551 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); 552 if (priority_class == IOPRIO_CLASS_RT) { 553 /* Set NCQ priority for read/write commands. */ 554 switch (scmd->cmnd[0]) { 555 case WRITE_16: 556 case READ_16: 557 case WRITE_12: 558 case READ_12: 559 case WRITE_10: 560 case READ_10: 561 case WRITE_6: 562 case READ_6: 563 io_high_prio = true; 564 break; 565 } 566 } 567 } 568 569 return io_high_prio; 570 } 571 572 static int pqi_map_single(struct pci_dev *pci_dev, 573 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 574 size_t buffer_length, enum dma_data_direction data_direction) 575 { 576 dma_addr_t bus_address; 577 578 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 579 return 0; 580 581 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 582 data_direction); 583 if (dma_mapping_error(&pci_dev->dev, bus_address)) 584 return -ENOMEM; 585 586 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 587 put_unaligned_le32(buffer_length, &sg_descriptor->length); 588 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 589 590 return 0; 591 } 592 593 static void pqi_pci_unmap(struct pci_dev *pci_dev, 594 struct pqi_sg_descriptor *descriptors, int num_descriptors, 595 enum dma_data_direction data_direction) 596 { 597 int i; 598 599 if (data_direction == DMA_NONE) 600 return; 601 602 for (i = 0; i < num_descriptors; i++) 603 dma_unmap_single(&pci_dev->dev, 604 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 605 get_unaligned_le32(&descriptors[i].length), 606 data_direction); 607 } 608 609 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 610 struct pqi_raid_path_request *request, u8 cmd, 611 u8 *scsi3addr, void *buffer, size_t buffer_length, 612 u16 vpd_page, enum dma_data_direction *dir) 613 { 614 u8 *cdb; 615 size_t cdb_length = buffer_length; 616 617 memset(request, 0, sizeof(*request)); 618 619 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 620 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 621 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 622 &request->header.iu_length); 623 put_unaligned_le32(buffer_length, &request->buffer_length); 624 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 625 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 626 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 627 628 cdb = request->cdb; 629 630 switch (cmd) { 631 case INQUIRY: 632 request->data_direction = SOP_READ_FLAG; 633 cdb[0] = INQUIRY; 634 if (vpd_page & VPD_PAGE) { 635 cdb[1] = 0x1; 636 cdb[2] = (u8)vpd_page; 637 } 638 cdb[4] = (u8)cdb_length; 639 break; 640 case CISS_REPORT_LOG: 641 case CISS_REPORT_PHYS: 642 request->data_direction = SOP_READ_FLAG; 643 cdb[0] = cmd; 644 if (cmd == CISS_REPORT_PHYS) { 645 if (ctrl_info->rpl_extended_format_4_5_supported) 646 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4; 647 else 648 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2; 649 } else { 650 cdb[1] = ctrl_info->ciss_report_log_flags; 651 } 652 put_unaligned_be32(cdb_length, &cdb[6]); 653 break; 654 case CISS_GET_RAID_MAP: 655 request->data_direction = SOP_READ_FLAG; 656 cdb[0] = CISS_READ; 657 cdb[1] = CISS_GET_RAID_MAP; 658 put_unaligned_be32(cdb_length, &cdb[6]); 659 break; 660 case SA_FLUSH_CACHE: 661 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; 662 request->data_direction = SOP_WRITE_FLAG; 663 cdb[0] = BMIC_WRITE; 664 cdb[6] = BMIC_FLUSH_CACHE; 665 put_unaligned_be16(cdb_length, &cdb[7]); 666 break; 667 case BMIC_SENSE_DIAG_OPTIONS: 668 cdb_length = 0; 669 fallthrough; 670 case BMIC_IDENTIFY_CONTROLLER: 671 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 672 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 673 case BMIC_SENSE_FEATURE: 674 request->data_direction = SOP_READ_FLAG; 675 cdb[0] = BMIC_READ; 676 cdb[6] = cmd; 677 put_unaligned_be16(cdb_length, &cdb[7]); 678 break; 679 case BMIC_SET_DIAG_OPTIONS: 680 cdb_length = 0; 681 fallthrough; 682 case BMIC_WRITE_HOST_WELLNESS: 683 request->data_direction = SOP_WRITE_FLAG; 684 cdb[0] = BMIC_WRITE; 685 cdb[6] = cmd; 686 put_unaligned_be16(cdb_length, &cdb[7]); 687 break; 688 case BMIC_CSMI_PASSTHRU: 689 request->data_direction = SOP_BIDIRECTIONAL; 690 cdb[0] = BMIC_WRITE; 691 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 692 cdb[6] = cmd; 693 put_unaligned_be16(cdb_length, &cdb[7]); 694 break; 695 default: 696 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); 697 break; 698 } 699 700 switch (request->data_direction) { 701 case SOP_READ_FLAG: 702 *dir = DMA_FROM_DEVICE; 703 break; 704 case SOP_WRITE_FLAG: 705 *dir = DMA_TO_DEVICE; 706 break; 707 case SOP_NO_DIRECTION_FLAG: 708 *dir = DMA_NONE; 709 break; 710 default: 711 *dir = DMA_BIDIRECTIONAL; 712 break; 713 } 714 715 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 716 buffer, buffer_length, *dir); 717 } 718 719 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 720 { 721 io_request->scmd = NULL; 722 io_request->status = 0; 723 io_request->error_info = NULL; 724 io_request->raid_bypass = false; 725 } 726 727 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) 728 { 729 struct pqi_io_request *io_request; 730 u16 i; 731 732 if (scmd) { /* SML I/O request */ 733 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 734 735 i = blk_mq_unique_tag_to_tag(blk_tag); 736 io_request = &ctrl_info->io_request_pool[i]; 737 if (atomic_inc_return(&io_request->refcount) > 1) { 738 atomic_dec(&io_request->refcount); 739 return NULL; 740 } 741 } else { /* IOCTL or driver internal request */ 742 /* 743 * benignly racy - may have to wait for an open slot. 744 * command slot range is scsi_ml_can_queue - 745 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] 746 */ 747 i = 0; 748 while (1) { 749 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; 750 if (atomic_inc_return(&io_request->refcount) == 1) 751 break; 752 atomic_dec(&io_request->refcount); 753 i = (i + 1) % PQI_RESERVED_IO_SLOTS; 754 } 755 } 756 757 if (io_request) 758 pqi_reinit_io_request(io_request); 759 760 return io_request; 761 } 762 763 static void pqi_free_io_request(struct pqi_io_request *io_request) 764 { 765 atomic_dec(&io_request->refcount); 766 } 767 768 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 769 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 770 struct pqi_raid_error_info *error_info) 771 { 772 int rc; 773 struct pqi_raid_path_request request; 774 enum dma_data_direction dir; 775 776 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, 777 buffer, buffer_length, vpd_page, &dir); 778 if (rc) 779 return rc; 780 781 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); 782 783 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 784 785 return rc; 786 } 787 788 /* helper functions for pqi_send_scsi_raid_request */ 789 790 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 791 u8 cmd, void *buffer, size_t buffer_length) 792 { 793 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 794 buffer, buffer_length, 0, NULL); 795 } 796 797 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 798 u8 cmd, void *buffer, size_t buffer_length, 799 struct pqi_raid_error_info *error_info) 800 { 801 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 802 buffer, buffer_length, 0, error_info); 803 } 804 805 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 806 struct bmic_identify_controller *buffer) 807 { 808 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 809 buffer, sizeof(*buffer)); 810 } 811 812 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, 813 struct bmic_sense_subsystem_info *sense_info) 814 { 815 return pqi_send_ctrl_raid_request(ctrl_info, 816 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, 817 sizeof(*sense_info)); 818 } 819 820 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 821 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 822 { 823 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 824 buffer, buffer_length, vpd_page, NULL); 825 } 826 827 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 828 struct pqi_scsi_dev *device, 829 struct bmic_identify_physical_device *buffer, size_t buffer_length) 830 { 831 int rc; 832 enum dma_data_direction dir; 833 u16 bmic_device_index; 834 struct pqi_raid_path_request request; 835 836 rc = pqi_build_raid_path_request(ctrl_info, &request, 837 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 838 buffer_length, 0, &dir); 839 if (rc) 840 return rc; 841 842 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 843 request.cdb[2] = (u8)bmic_device_index; 844 request.cdb[9] = (u8)(bmic_device_index >> 8); 845 846 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 847 848 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 849 850 return rc; 851 } 852 853 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) 854 { 855 u32 bytes; 856 857 bytes = get_unaligned_le16(limit); 858 if (bytes == 0) 859 bytes = ~0; 860 else 861 bytes *= 1024; 862 863 return bytes; 864 } 865 866 #pragma pack(1) 867 868 struct bmic_sense_feature_buffer { 869 struct bmic_sense_feature_buffer_header header; 870 struct bmic_sense_feature_io_page_aio_subpage aio_subpage; 871 }; 872 873 #pragma pack() 874 875 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ 876 offsetofend(struct bmic_sense_feature_buffer, \ 877 aio_subpage.max_write_raid_1_10_3drive) 878 879 #define MINIMUM_AIO_SUBPAGE_LENGTH \ 880 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ 881 max_write_raid_1_10_3drive) - \ 882 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) 883 884 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) 885 { 886 int rc; 887 enum dma_data_direction dir; 888 struct pqi_raid_path_request request; 889 struct bmic_sense_feature_buffer *buffer; 890 891 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); 892 if (!buffer) 893 return -ENOMEM; 894 895 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, 896 buffer, sizeof(*buffer), 0, &dir); 897 if (rc) 898 goto error; 899 900 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; 901 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; 902 903 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 904 905 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 906 907 if (rc) 908 goto error; 909 910 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || 911 buffer->header.subpage_code != 912 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 913 get_unaligned_le16(&buffer->header.buffer_length) < 914 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || 915 buffer->aio_subpage.header.page_code != 916 BMIC_SENSE_FEATURE_IO_PAGE || 917 buffer->aio_subpage.header.subpage_code != 918 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 919 get_unaligned_le16(&buffer->aio_subpage.header.page_length) < 920 MINIMUM_AIO_SUBPAGE_LENGTH) { 921 goto error; 922 } 923 924 ctrl_info->max_transfer_encrypted_sas_sata = 925 pqi_aio_limit_to_bytes( 926 &buffer->aio_subpage.max_transfer_encrypted_sas_sata); 927 928 ctrl_info->max_transfer_encrypted_nvme = 929 pqi_aio_limit_to_bytes( 930 &buffer->aio_subpage.max_transfer_encrypted_nvme); 931 932 ctrl_info->max_write_raid_5_6 = 933 pqi_aio_limit_to_bytes( 934 &buffer->aio_subpage.max_write_raid_5_6); 935 936 ctrl_info->max_write_raid_1_10_2drive = 937 pqi_aio_limit_to_bytes( 938 &buffer->aio_subpage.max_write_raid_1_10_2drive); 939 940 ctrl_info->max_write_raid_1_10_3drive = 941 pqi_aio_limit_to_bytes( 942 &buffer->aio_subpage.max_write_raid_1_10_3drive); 943 944 error: 945 kfree(buffer); 946 947 return rc; 948 } 949 950 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 951 enum bmic_flush_cache_shutdown_event shutdown_event) 952 { 953 int rc; 954 struct bmic_flush_cache *flush_cache; 955 956 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 957 if (!flush_cache) 958 return -ENOMEM; 959 960 flush_cache->shutdown_event = shutdown_event; 961 962 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 963 sizeof(*flush_cache)); 964 965 kfree(flush_cache); 966 967 return rc; 968 } 969 970 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 971 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 972 struct pqi_raid_error_info *error_info) 973 { 974 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 975 buffer, buffer_length, error_info); 976 } 977 978 #define PQI_FETCH_PTRAID_DATA (1 << 31) 979 980 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 981 { 982 int rc; 983 struct bmic_diag_options *diag; 984 985 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 986 if (!diag) 987 return -ENOMEM; 988 989 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 990 diag, sizeof(*diag)); 991 if (rc) 992 goto out; 993 994 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 995 996 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, 997 sizeof(*diag)); 998 999 out: 1000 kfree(diag); 1001 1002 return rc; 1003 } 1004 1005 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 1006 void *buffer, size_t buffer_length) 1007 { 1008 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 1009 buffer, buffer_length); 1010 } 1011 1012 #pragma pack(1) 1013 1014 struct bmic_host_wellness_driver_version { 1015 u8 start_tag[4]; 1016 u8 driver_version_tag[2]; 1017 __le16 driver_version_length; 1018 char driver_version[32]; 1019 u8 dont_write_tag[2]; 1020 u8 end_tag[2]; 1021 }; 1022 1023 #pragma pack() 1024 1025 static int pqi_write_driver_version_to_host_wellness( 1026 struct pqi_ctrl_info *ctrl_info) 1027 { 1028 int rc; 1029 struct bmic_host_wellness_driver_version *buffer; 1030 size_t buffer_length; 1031 1032 buffer_length = sizeof(*buffer); 1033 1034 buffer = kmalloc(buffer_length, GFP_KERNEL); 1035 if (!buffer) 1036 return -ENOMEM; 1037 1038 buffer->start_tag[0] = '<'; 1039 buffer->start_tag[1] = 'H'; 1040 buffer->start_tag[2] = 'W'; 1041 buffer->start_tag[3] = '>'; 1042 buffer->driver_version_tag[0] = 'D'; 1043 buffer->driver_version_tag[1] = 'V'; 1044 put_unaligned_le16(sizeof(buffer->driver_version), 1045 &buffer->driver_version_length); 1046 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION, 1047 sizeof(buffer->driver_version)); 1048 buffer->dont_write_tag[0] = 'D'; 1049 buffer->dont_write_tag[1] = 'W'; 1050 buffer->end_tag[0] = 'Z'; 1051 buffer->end_tag[1] = 'Z'; 1052 1053 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1054 1055 kfree(buffer); 1056 1057 return rc; 1058 } 1059 1060 #pragma pack(1) 1061 1062 struct bmic_host_wellness_time { 1063 u8 start_tag[4]; 1064 u8 time_tag[2]; 1065 __le16 time_length; 1066 u8 time[8]; 1067 u8 dont_write_tag[2]; 1068 u8 end_tag[2]; 1069 }; 1070 1071 #pragma pack() 1072 1073 static int pqi_write_current_time_to_host_wellness( 1074 struct pqi_ctrl_info *ctrl_info) 1075 { 1076 int rc; 1077 struct bmic_host_wellness_time *buffer; 1078 size_t buffer_length; 1079 time64_t local_time; 1080 unsigned int year; 1081 struct tm tm; 1082 1083 buffer_length = sizeof(*buffer); 1084 1085 buffer = kmalloc(buffer_length, GFP_KERNEL); 1086 if (!buffer) 1087 return -ENOMEM; 1088 1089 buffer->start_tag[0] = '<'; 1090 buffer->start_tag[1] = 'H'; 1091 buffer->start_tag[2] = 'W'; 1092 buffer->start_tag[3] = '>'; 1093 buffer->time_tag[0] = 'T'; 1094 buffer->time_tag[1] = 'D'; 1095 put_unaligned_le16(sizeof(buffer->time), 1096 &buffer->time_length); 1097 1098 local_time = ktime_get_real_seconds(); 1099 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 1100 year = tm.tm_year + 1900; 1101 1102 buffer->time[0] = bin2bcd(tm.tm_hour); 1103 buffer->time[1] = bin2bcd(tm.tm_min); 1104 buffer->time[2] = bin2bcd(tm.tm_sec); 1105 buffer->time[3] = 0; 1106 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 1107 buffer->time[5] = bin2bcd(tm.tm_mday); 1108 buffer->time[6] = bin2bcd(year / 100); 1109 buffer->time[7] = bin2bcd(year % 100); 1110 1111 buffer->dont_write_tag[0] = 'D'; 1112 buffer->dont_write_tag[1] = 'W'; 1113 buffer->end_tag[0] = 'Z'; 1114 buffer->end_tag[1] = 'Z'; 1115 1116 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1117 1118 kfree(buffer); 1119 1120 return rc; 1121 } 1122 1123 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 1124 1125 static void pqi_update_time_worker(struct work_struct *work) 1126 { 1127 int rc; 1128 struct pqi_ctrl_info *ctrl_info; 1129 1130 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1131 update_time_work); 1132 1133 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 1134 if (rc) 1135 dev_warn(&ctrl_info->pci_dev->dev, 1136 "error updating time on controller\n"); 1137 1138 schedule_delayed_work(&ctrl_info->update_time_work, 1139 PQI_UPDATE_TIME_WORK_INTERVAL); 1140 } 1141 1142 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1143 { 1144 schedule_delayed_work(&ctrl_info->update_time_work, 0); 1145 } 1146 1147 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1148 { 1149 cancel_delayed_work_sync(&ctrl_info->update_time_work); 1150 } 1151 1152 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, 1153 size_t buffer_length) 1154 { 1155 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); 1156 } 1157 1158 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) 1159 { 1160 int rc; 1161 size_t lun_list_length; 1162 size_t lun_data_length; 1163 size_t new_lun_list_length; 1164 void *lun_data = NULL; 1165 struct report_lun_header *report_lun_header; 1166 1167 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 1168 if (!report_lun_header) { 1169 rc = -ENOMEM; 1170 goto out; 1171 } 1172 1173 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); 1174 if (rc) 1175 goto out; 1176 1177 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 1178 1179 again: 1180 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 1181 1182 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 1183 if (!lun_data) { 1184 rc = -ENOMEM; 1185 goto out; 1186 } 1187 1188 if (lun_list_length == 0) { 1189 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 1190 goto out; 1191 } 1192 1193 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 1194 if (rc) 1195 goto out; 1196 1197 new_lun_list_length = 1198 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); 1199 1200 if (new_lun_list_length > lun_list_length) { 1201 lun_list_length = new_lun_list_length; 1202 kfree(lun_data); 1203 goto again; 1204 } 1205 1206 out: 1207 kfree(report_lun_header); 1208 1209 if (rc) { 1210 kfree(lun_data); 1211 lun_data = NULL; 1212 } 1213 1214 *buffer = lun_data; 1215 1216 return rc; 1217 } 1218 1219 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1220 { 1221 int rc; 1222 unsigned int i; 1223 u8 rpl_response_format; 1224 u32 num_physicals; 1225 void *rpl_list; 1226 struct report_lun_header *rpl_header; 1227 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; 1228 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list; 1229 1230 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list); 1231 if (rc) 1232 return rc; 1233 1234 if (ctrl_info->rpl_extended_format_4_5_supported) { 1235 rpl_header = rpl_list; 1236 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; 1237 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) { 1238 *buffer = rpl_list; 1239 return 0; 1240 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) { 1241 dev_err(&ctrl_info->pci_dev->dev, 1242 "RPL returned unsupported data format %u\n", 1243 rpl_response_format); 1244 return -EINVAL; 1245 } else { 1246 dev_warn(&ctrl_info->pci_dev->dev, 1247 "RPL returned extended format 2 instead of 4\n"); 1248 } 1249 } 1250 1251 rpl_8byte_wwid_list = rpl_list; 1252 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); 1253 1254 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, 1255 num_physicals), GFP_KERNEL); 1256 if (!rpl_16byte_wwid_list) 1257 return -ENOMEM; 1258 1259 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid), 1260 &rpl_16byte_wwid_list->header.list_length); 1261 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; 1262 1263 for (i = 0; i < num_physicals; i++) { 1264 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid)); 1265 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid)); 1266 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); 1267 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; 1268 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags; 1269 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; 1270 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths; 1271 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; 1272 } 1273 1274 kfree(rpl_8byte_wwid_list); 1275 *buffer = rpl_16byte_wwid_list; 1276 1277 return 0; 1278 } 1279 1280 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1281 { 1282 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 1283 } 1284 1285 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 1286 struct report_phys_lun_16byte_wwid_list **physdev_list, 1287 struct report_log_lun_list **logdev_list) 1288 { 1289 int rc; 1290 size_t logdev_list_length; 1291 size_t logdev_data_length; 1292 struct report_log_lun_list *internal_logdev_list; 1293 struct report_log_lun_list *logdev_data; 1294 struct report_lun_header report_lun_header; 1295 1296 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1297 if (rc) 1298 dev_err(&ctrl_info->pci_dev->dev, 1299 "report physical LUNs failed\n"); 1300 1301 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1302 if (rc) 1303 dev_err(&ctrl_info->pci_dev->dev, 1304 "report logical LUNs failed\n"); 1305 1306 /* 1307 * Tack the controller itself onto the end of the logical device list 1308 * by adding a list entry that is all zeros. 1309 */ 1310 1311 logdev_data = *logdev_list; 1312 1313 if (logdev_data) { 1314 logdev_list_length = 1315 get_unaligned_be32(&logdev_data->header.list_length); 1316 } else { 1317 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1318 logdev_data = 1319 (struct report_log_lun_list *)&report_lun_header; 1320 logdev_list_length = 0; 1321 } 1322 1323 logdev_data_length = sizeof(struct report_lun_header) + 1324 logdev_list_length; 1325 1326 internal_logdev_list = kmalloc(logdev_data_length + 1327 sizeof(struct report_log_lun), GFP_KERNEL); 1328 if (!internal_logdev_list) { 1329 kfree(*logdev_list); 1330 *logdev_list = NULL; 1331 return -ENOMEM; 1332 } 1333 1334 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1335 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1336 sizeof(struct report_log_lun)); 1337 put_unaligned_be32(logdev_list_length + 1338 sizeof(struct report_log_lun), 1339 &internal_logdev_list->header.list_length); 1340 1341 kfree(*logdev_list); 1342 *logdev_list = internal_logdev_list; 1343 1344 return 0; 1345 } 1346 1347 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1348 int bus, int target, int lun) 1349 { 1350 device->bus = bus; 1351 device->target = target; 1352 device->lun = lun; 1353 } 1354 1355 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1356 { 1357 u8 *scsi3addr; 1358 u32 lunid; 1359 int bus; 1360 int target; 1361 int lun; 1362 1363 scsi3addr = device->scsi3addr; 1364 lunid = get_unaligned_le32(scsi3addr); 1365 1366 if (pqi_is_hba_lunid(scsi3addr)) { 1367 /* The specified device is the controller. */ 1368 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1369 device->target_lun_valid = true; 1370 return; 1371 } 1372 1373 if (pqi_is_logical_device(device)) { 1374 if (device->is_external_raid_device) { 1375 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1376 target = (lunid >> 16) & 0x3fff; 1377 lun = lunid & 0xff; 1378 } else { 1379 bus = PQI_RAID_VOLUME_BUS; 1380 target = 0; 1381 lun = lunid & 0x3fff; 1382 } 1383 pqi_set_bus_target_lun(device, bus, target, lun); 1384 device->target_lun_valid = true; 1385 return; 1386 } 1387 1388 /* 1389 * Defer target and LUN assignment for non-controller physical devices 1390 * because the SAS transport layer will make these assignments later. 1391 */ 1392 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1393 } 1394 1395 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1396 struct pqi_scsi_dev *device) 1397 { 1398 int rc; 1399 u8 raid_level; 1400 u8 *buffer; 1401 1402 raid_level = SA_RAID_UNKNOWN; 1403 1404 buffer = kmalloc(64, GFP_KERNEL); 1405 if (buffer) { 1406 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1407 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1408 if (rc == 0) { 1409 raid_level = buffer[8]; 1410 if (raid_level > SA_RAID_MAX) 1411 raid_level = SA_RAID_UNKNOWN; 1412 } 1413 kfree(buffer); 1414 } 1415 1416 device->raid_level = raid_level; 1417 } 1418 1419 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1420 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1421 { 1422 char *err_msg; 1423 u32 raid_map_size; 1424 u32 r5or6_blocks_per_row; 1425 1426 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1427 1428 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1429 err_msg = "RAID map too small"; 1430 goto bad_raid_map; 1431 } 1432 1433 if (device->raid_level == SA_RAID_1) { 1434 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1435 err_msg = "invalid RAID-1 map"; 1436 goto bad_raid_map; 1437 } 1438 } else if (device->raid_level == SA_RAID_TRIPLE) { 1439 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1440 err_msg = "invalid RAID-1(Triple) map"; 1441 goto bad_raid_map; 1442 } 1443 } else if ((device->raid_level == SA_RAID_5 || 1444 device->raid_level == SA_RAID_6) && 1445 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1446 /* RAID 50/60 */ 1447 r5or6_blocks_per_row = 1448 get_unaligned_le16(&raid_map->strip_size) * 1449 get_unaligned_le16(&raid_map->data_disks_per_row); 1450 if (r5or6_blocks_per_row == 0) { 1451 err_msg = "invalid RAID-5 or RAID-6 map"; 1452 goto bad_raid_map; 1453 } 1454 } 1455 1456 return 0; 1457 1458 bad_raid_map: 1459 dev_warn(&ctrl_info->pci_dev->dev, 1460 "logical device %08x%08x %s\n", 1461 *((u32 *)&device->scsi3addr), 1462 *((u32 *)&device->scsi3addr[4]), err_msg); 1463 1464 return -EINVAL; 1465 } 1466 1467 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1468 struct pqi_scsi_dev *device) 1469 { 1470 int rc; 1471 u32 raid_map_size; 1472 struct raid_map *raid_map; 1473 1474 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1475 if (!raid_map) 1476 return -ENOMEM; 1477 1478 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1479 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); 1480 if (rc) 1481 goto error; 1482 1483 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1484 1485 if (raid_map_size > sizeof(*raid_map)) { 1486 1487 kfree(raid_map); 1488 1489 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1490 if (!raid_map) 1491 return -ENOMEM; 1492 1493 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1494 device->scsi3addr, raid_map, raid_map_size, 0, NULL); 1495 if (rc) 1496 goto error; 1497 1498 if (get_unaligned_le32(&raid_map->structure_size) 1499 != raid_map_size) { 1500 dev_warn(&ctrl_info->pci_dev->dev, 1501 "requested %u bytes, received %u bytes\n", 1502 raid_map_size, 1503 get_unaligned_le32(&raid_map->structure_size)); 1504 rc = -EINVAL; 1505 goto error; 1506 } 1507 } 1508 1509 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1510 if (rc) 1511 goto error; 1512 1513 device->raid_io_stats = alloc_percpu(struct pqi_raid_io_stats); 1514 if (!device->raid_io_stats) { 1515 rc = -ENOMEM; 1516 goto error; 1517 } 1518 1519 device->raid_map = raid_map; 1520 1521 return 0; 1522 1523 error: 1524 kfree(raid_map); 1525 1526 return rc; 1527 } 1528 1529 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, 1530 struct pqi_scsi_dev *device) 1531 { 1532 if (!ctrl_info->lv_drive_type_mix_valid) { 1533 device->max_transfer_encrypted = ~0; 1534 return; 1535 } 1536 1537 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { 1538 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: 1539 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: 1540 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: 1541 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: 1542 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: 1543 case LV_DRIVE_TYPE_MIX_SAS_ONLY: 1544 case LV_DRIVE_TYPE_MIX_SATA_ONLY: 1545 device->max_transfer_encrypted = 1546 ctrl_info->max_transfer_encrypted_sas_sata; 1547 break; 1548 case LV_DRIVE_TYPE_MIX_NVME_ONLY: 1549 device->max_transfer_encrypted = 1550 ctrl_info->max_transfer_encrypted_nvme; 1551 break; 1552 case LV_DRIVE_TYPE_MIX_UNKNOWN: 1553 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: 1554 default: 1555 device->max_transfer_encrypted = 1556 min(ctrl_info->max_transfer_encrypted_sas_sata, 1557 ctrl_info->max_transfer_encrypted_nvme); 1558 break; 1559 } 1560 } 1561 1562 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1563 struct pqi_scsi_dev *device) 1564 { 1565 int rc; 1566 u8 *buffer; 1567 u8 bypass_status; 1568 1569 buffer = kmalloc(64, GFP_KERNEL); 1570 if (!buffer) 1571 return; 1572 1573 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1574 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1575 if (rc) 1576 goto out; 1577 1578 #define RAID_BYPASS_STATUS 4 1579 #define RAID_BYPASS_CONFIGURED 0x1 1580 #define RAID_BYPASS_ENABLED 0x2 1581 1582 bypass_status = buffer[RAID_BYPASS_STATUS]; 1583 device->raid_bypass_configured = 1584 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1585 if (device->raid_bypass_configured && 1586 (bypass_status & RAID_BYPASS_ENABLED) && 1587 pqi_get_raid_map(ctrl_info, device) == 0) { 1588 device->raid_bypass_enabled = true; 1589 if (get_unaligned_le16(&device->raid_map->flags) & 1590 RAID_MAP_ENCRYPTION_ENABLED) 1591 pqi_set_max_transfer_encrypted(ctrl_info, device); 1592 } 1593 1594 out: 1595 kfree(buffer); 1596 } 1597 1598 /* 1599 * Use vendor-specific VPD to determine online/offline status of a volume. 1600 */ 1601 1602 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1603 struct pqi_scsi_dev *device) 1604 { 1605 int rc; 1606 size_t page_length; 1607 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1608 bool volume_offline = true; 1609 u32 volume_flags; 1610 struct ciss_vpd_logical_volume_status *vpd; 1611 1612 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1613 if (!vpd) 1614 goto no_buffer; 1615 1616 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1617 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1618 if (rc) 1619 goto out; 1620 1621 if (vpd->page_code != CISS_VPD_LV_STATUS) 1622 goto out; 1623 1624 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1625 volume_status) + vpd->page_length; 1626 if (page_length < sizeof(*vpd)) 1627 goto out; 1628 1629 volume_status = vpd->volume_status; 1630 volume_flags = get_unaligned_be32(&vpd->flags); 1631 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1632 1633 out: 1634 kfree(vpd); 1635 no_buffer: 1636 device->volume_status = volume_status; 1637 device->volume_offline = volume_offline; 1638 } 1639 1640 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 1641 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 1642 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 1643 1644 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, 1645 struct pqi_scsi_dev *device, 1646 struct bmic_identify_physical_device *id_phys) 1647 { 1648 int rc; 1649 1650 memset(id_phys, 0, sizeof(*id_phys)); 1651 1652 rc = pqi_identify_physical_device(ctrl_info, device, 1653 id_phys, sizeof(*id_phys)); 1654 if (rc) { 1655 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1656 return rc; 1657 } 1658 1659 scsi_sanitize_inquiry_string(&id_phys->model[0], 8); 1660 scsi_sanitize_inquiry_string(&id_phys->model[8], 16); 1661 1662 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); 1663 memcpy(device->model, &id_phys->model[8], sizeof(device->model)); 1664 1665 device->box_index = id_phys->box_index; 1666 device->phys_box_on_bus = id_phys->phys_box_on_bus; 1667 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; 1668 device->queue_depth = 1669 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1670 device->active_path_index = id_phys->active_path_number; 1671 device->path_map = id_phys->redundant_path_present_map; 1672 memcpy(&device->box, 1673 &id_phys->alternate_paths_phys_box_on_port, 1674 sizeof(device->box)); 1675 memcpy(&device->phys_connector, 1676 &id_phys->alternate_paths_phys_connector, 1677 sizeof(device->phys_connector)); 1678 device->bay = id_phys->phys_bay_in_box; 1679 device->lun_count = id_phys->multi_lun_device_lun_count; 1680 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && 1681 id_phys->phy_count) 1682 device->phy_id = 1683 id_phys->phy_to_phy_map[device->active_path_index]; 1684 else 1685 device->phy_id = 0xFF; 1686 1687 device->ncq_prio_support = 1688 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & 1689 PQI_DEVICE_NCQ_PRIO_SUPPORTED); 1690 1691 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); 1692 1693 return 0; 1694 } 1695 1696 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, 1697 struct pqi_scsi_dev *device) 1698 { 1699 int rc; 1700 u8 *buffer; 1701 1702 buffer = kmalloc(64, GFP_KERNEL); 1703 if (!buffer) 1704 return -ENOMEM; 1705 1706 /* Send an inquiry to the device to see what it is. */ 1707 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1708 if (rc) 1709 goto out; 1710 1711 scsi_sanitize_inquiry_string(&buffer[8], 8); 1712 scsi_sanitize_inquiry_string(&buffer[16], 16); 1713 1714 device->devtype = buffer[0] & 0x1f; 1715 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1716 memcpy(device->model, &buffer[16], sizeof(device->model)); 1717 1718 if (device->devtype == TYPE_DISK) { 1719 if (device->is_external_raid_device) { 1720 device->raid_level = SA_RAID_UNKNOWN; 1721 device->volume_status = CISS_LV_OK; 1722 device->volume_offline = false; 1723 } else { 1724 pqi_get_raid_level(ctrl_info, device); 1725 pqi_get_raid_bypass_status(ctrl_info, device); 1726 pqi_get_volume_status(ctrl_info, device); 1727 } 1728 } 1729 1730 out: 1731 kfree(buffer); 1732 1733 return rc; 1734 } 1735 1736 /* 1737 * Prevent adding drive to OS for some corner cases such as a drive 1738 * undergoing a sanitize (erase) operation. Some OSes will continue to poll 1739 * the drive until the sanitize completes, which can take hours, 1740 * resulting in long bootup delays. Commands such as TUR, READ_CAP 1741 * are allowed, but READ/WRITE cause check condition. So the OS 1742 * cannot check/read the partition table. 1743 * Note: devices that have completed sanitize must be re-enabled 1744 * using the management utility. 1745 */ 1746 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) 1747 { 1748 return device->erase_in_progress; 1749 } 1750 1751 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, 1752 struct pqi_scsi_dev *device, 1753 struct bmic_identify_physical_device *id_phys) 1754 { 1755 int rc; 1756 1757 if (device->is_expander_smp_device) 1758 return 0; 1759 1760 if (pqi_is_logical_device(device)) 1761 rc = pqi_get_logical_device_info(ctrl_info, device); 1762 else 1763 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); 1764 1765 return rc; 1766 } 1767 1768 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1769 struct pqi_scsi_dev *device, 1770 struct bmic_identify_physical_device *id_phys) 1771 { 1772 int rc; 1773 1774 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); 1775 1776 if (rc == 0 && device->lun_count == 0) 1777 device->lun_count = 1; 1778 1779 return rc; 1780 } 1781 1782 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1783 struct pqi_scsi_dev *device) 1784 { 1785 char *status; 1786 static const char unknown_state_str[] = 1787 "Volume is in an unknown state (%u)"; 1788 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1789 1790 switch (device->volume_status) { 1791 case CISS_LV_OK: 1792 status = "Volume online"; 1793 break; 1794 case CISS_LV_FAILED: 1795 status = "Volume failed"; 1796 break; 1797 case CISS_LV_NOT_CONFIGURED: 1798 status = "Volume not configured"; 1799 break; 1800 case CISS_LV_DEGRADED: 1801 status = "Volume degraded"; 1802 break; 1803 case CISS_LV_READY_FOR_RECOVERY: 1804 status = "Volume ready for recovery operation"; 1805 break; 1806 case CISS_LV_UNDERGOING_RECOVERY: 1807 status = "Volume undergoing recovery"; 1808 break; 1809 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1810 status = "Wrong physical drive was replaced"; 1811 break; 1812 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1813 status = "A physical drive not properly connected"; 1814 break; 1815 case CISS_LV_HARDWARE_OVERHEATING: 1816 status = "Hardware is overheating"; 1817 break; 1818 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1819 status = "Hardware has overheated"; 1820 break; 1821 case CISS_LV_UNDERGOING_EXPANSION: 1822 status = "Volume undergoing expansion"; 1823 break; 1824 case CISS_LV_NOT_AVAILABLE: 1825 status = "Volume waiting for transforming volume"; 1826 break; 1827 case CISS_LV_QUEUED_FOR_EXPANSION: 1828 status = "Volume queued for expansion"; 1829 break; 1830 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1831 status = "Volume disabled due to SCSI ID conflict"; 1832 break; 1833 case CISS_LV_EJECTED: 1834 status = "Volume has been ejected"; 1835 break; 1836 case CISS_LV_UNDERGOING_ERASE: 1837 status = "Volume undergoing background erase"; 1838 break; 1839 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1840 status = "Volume ready for predictive spare rebuild"; 1841 break; 1842 case CISS_LV_UNDERGOING_RPI: 1843 status = "Volume undergoing rapid parity initialization"; 1844 break; 1845 case CISS_LV_PENDING_RPI: 1846 status = "Volume queued for rapid parity initialization"; 1847 break; 1848 case CISS_LV_ENCRYPTED_NO_KEY: 1849 status = "Encrypted volume inaccessible - key not present"; 1850 break; 1851 case CISS_LV_UNDERGOING_ENCRYPTION: 1852 status = "Volume undergoing encryption process"; 1853 break; 1854 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1855 status = "Volume undergoing encryption re-keying process"; 1856 break; 1857 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1858 status = "Volume encrypted but encryption is disabled"; 1859 break; 1860 case CISS_LV_PENDING_ENCRYPTION: 1861 status = "Volume pending migration to encrypted state"; 1862 break; 1863 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1864 status = "Volume pending encryption rekeying"; 1865 break; 1866 case CISS_LV_NOT_SUPPORTED: 1867 status = "Volume not supported on this controller"; 1868 break; 1869 case CISS_LV_STATUS_UNAVAILABLE: 1870 status = "Volume status not available"; 1871 break; 1872 default: 1873 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1874 unknown_state_str, device->volume_status); 1875 status = unknown_state_buffer; 1876 break; 1877 } 1878 1879 dev_info(&ctrl_info->pci_dev->dev, 1880 "scsi %d:%d:%d:%d %s\n", 1881 ctrl_info->scsi_host->host_no, 1882 device->bus, device->target, device->lun, status); 1883 } 1884 1885 static void pqi_rescan_worker(struct work_struct *work) 1886 { 1887 struct pqi_ctrl_info *ctrl_info; 1888 1889 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1890 rescan_work); 1891 1892 pqi_scan_scsi_devices(ctrl_info); 1893 } 1894 1895 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1896 struct pqi_scsi_dev *device) 1897 { 1898 int rc; 1899 1900 if (pqi_is_logical_device(device)) 1901 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1902 device->target, device->lun); 1903 else 1904 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1905 1906 return rc; 1907 } 1908 1909 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) 1910 1911 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) 1912 { 1913 int rc; 1914 int lun; 1915 1916 for (lun = 0; lun < device->lun_count; lun++) { 1917 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, 1918 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); 1919 if (rc) 1920 dev_err(&ctrl_info->pci_dev->dev, 1921 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", 1922 ctrl_info->scsi_host->host_no, device->bus, 1923 device->target, lun, 1924 atomic_read(&device->scsi_cmds_outstanding[lun])); 1925 } 1926 1927 if (pqi_is_logical_device(device)) 1928 scsi_remove_device(device->sdev); 1929 else 1930 pqi_remove_sas_device(device); 1931 1932 pqi_device_remove_start(device); 1933 } 1934 1935 /* Assumes the SCSI device list lock is held. */ 1936 1937 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1938 int bus, int target, int lun) 1939 { 1940 struct pqi_scsi_dev *device; 1941 1942 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 1943 if (device->bus == bus && device->target == target && device->lun == lun) 1944 return device; 1945 1946 return NULL; 1947 } 1948 1949 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) 1950 { 1951 if (dev1->is_physical_device != dev2->is_physical_device) 1952 return false; 1953 1954 if (dev1->is_physical_device) 1955 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; 1956 1957 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; 1958 } 1959 1960 enum pqi_find_result { 1961 DEVICE_NOT_FOUND, 1962 DEVICE_CHANGED, 1963 DEVICE_SAME, 1964 }; 1965 1966 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1967 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) 1968 { 1969 struct pqi_scsi_dev *device; 1970 1971 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 1972 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { 1973 *matching_device = device; 1974 if (pqi_device_equal(device_to_find, device)) { 1975 if (device_to_find->volume_offline) 1976 return DEVICE_CHANGED; 1977 return DEVICE_SAME; 1978 } 1979 return DEVICE_CHANGED; 1980 } 1981 } 1982 1983 return DEVICE_NOT_FOUND; 1984 } 1985 1986 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1987 { 1988 if (device->is_expander_smp_device) 1989 return "Enclosure SMP "; 1990 1991 return scsi_device_type(device->devtype); 1992 } 1993 1994 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1995 1996 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1997 char *action, struct pqi_scsi_dev *device) 1998 { 1999 ssize_t count; 2000 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 2001 2002 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 2003 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 2004 2005 if (device->target_lun_valid) 2006 count += scnprintf(buffer + count, 2007 PQI_DEV_INFO_BUFFER_LENGTH - count, 2008 "%d:%d", 2009 device->target, 2010 device->lun); 2011 else 2012 count += scnprintf(buffer + count, 2013 PQI_DEV_INFO_BUFFER_LENGTH - count, 2014 "-:-"); 2015 2016 if (pqi_is_logical_device(device)) { 2017 count += scnprintf(buffer + count, 2018 PQI_DEV_INFO_BUFFER_LENGTH - count, 2019 " %08x%08x", 2020 *((u32 *)&device->scsi3addr), 2021 *((u32 *)&device->scsi3addr[4])); 2022 } else if (ctrl_info->rpl_extended_format_4_5_supported) { 2023 if (device->device_type == SA_DEVICE_TYPE_NVME) 2024 count += scnprintf(buffer + count, 2025 PQI_DEV_INFO_BUFFER_LENGTH - count, 2026 " %016llx%016llx", 2027 get_unaligned_be64(&device->wwid[0]), 2028 get_unaligned_be64(&device->wwid[8])); 2029 else 2030 count += scnprintf(buffer + count, 2031 PQI_DEV_INFO_BUFFER_LENGTH - count, 2032 " %016llx", 2033 get_unaligned_be64(&device->wwid[0])); 2034 } else { 2035 count += scnprintf(buffer + count, 2036 PQI_DEV_INFO_BUFFER_LENGTH - count, 2037 " %016llx", 2038 get_unaligned_be64(&device->wwid[0])); 2039 } 2040 2041 2042 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 2043 " %s %.8s %.16s ", 2044 pqi_device_type(device), 2045 device->vendor, 2046 device->model); 2047 2048 if (pqi_is_logical_device(device)) { 2049 if (device->devtype == TYPE_DISK) 2050 count += scnprintf(buffer + count, 2051 PQI_DEV_INFO_BUFFER_LENGTH - count, 2052 "SSDSmartPathCap%c En%c %-12s", 2053 device->raid_bypass_configured ? '+' : '-', 2054 device->raid_bypass_enabled ? '+' : '-', 2055 pqi_raid_level_to_string(device->raid_level)); 2056 } else { 2057 count += scnprintf(buffer + count, 2058 PQI_DEV_INFO_BUFFER_LENGTH - count, 2059 "AIO%c", device->aio_enabled ? '+' : '-'); 2060 if (device->devtype == TYPE_DISK || 2061 device->devtype == TYPE_ZBC) 2062 count += scnprintf(buffer + count, 2063 PQI_DEV_INFO_BUFFER_LENGTH - count, 2064 " qd=%-6d", device->queue_depth); 2065 } 2066 2067 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 2068 } 2069 2070 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2) 2071 { 2072 u32 raid_map1_size; 2073 u32 raid_map2_size; 2074 2075 if (raid_map1 == NULL || raid_map2 == NULL) 2076 return raid_map1 == raid_map2; 2077 2078 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size); 2079 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size); 2080 2081 if (raid_map1_size != raid_map2_size) 2082 return false; 2083 2084 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0; 2085 } 2086 2087 /* Assumes the SCSI device list lock is held. */ 2088 2089 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, 2090 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device) 2091 { 2092 existing_device->device_type = new_device->device_type; 2093 existing_device->bus = new_device->bus; 2094 if (new_device->target_lun_valid) { 2095 existing_device->target = new_device->target; 2096 existing_device->lun = new_device->lun; 2097 existing_device->target_lun_valid = true; 2098 } 2099 2100 /* By definition, the scsi3addr and wwid fields are already the same. */ 2101 2102 existing_device->is_physical_device = new_device->is_physical_device; 2103 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); 2104 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); 2105 existing_device->sas_address = new_device->sas_address; 2106 existing_device->queue_depth = new_device->queue_depth; 2107 existing_device->device_offline = false; 2108 existing_device->lun_count = new_device->lun_count; 2109 2110 if (pqi_is_logical_device(existing_device)) { 2111 existing_device->is_external_raid_device = new_device->is_external_raid_device; 2112 2113 if (existing_device->devtype == TYPE_DISK) { 2114 existing_device->raid_level = new_device->raid_level; 2115 existing_device->volume_status = new_device->volume_status; 2116 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); 2117 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { 2118 kfree(existing_device->raid_map); 2119 existing_device->raid_map = new_device->raid_map; 2120 /* To prevent this from being freed later. */ 2121 new_device->raid_map = NULL; 2122 } 2123 if (new_device->raid_bypass_enabled && existing_device->raid_io_stats == NULL) { 2124 existing_device->raid_io_stats = new_device->raid_io_stats; 2125 new_device->raid_io_stats = NULL; 2126 } 2127 existing_device->raid_bypass_configured = new_device->raid_bypass_configured; 2128 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; 2129 } 2130 } else { 2131 existing_device->aio_enabled = new_device->aio_enabled; 2132 existing_device->aio_handle = new_device->aio_handle; 2133 existing_device->is_expander_smp_device = new_device->is_expander_smp_device; 2134 existing_device->active_path_index = new_device->active_path_index; 2135 existing_device->phy_id = new_device->phy_id; 2136 existing_device->path_map = new_device->path_map; 2137 existing_device->bay = new_device->bay; 2138 existing_device->box_index = new_device->box_index; 2139 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; 2140 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; 2141 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); 2142 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); 2143 } 2144 } 2145 2146 static inline void pqi_free_device(struct pqi_scsi_dev *device) 2147 { 2148 if (device) { 2149 free_percpu(device->raid_io_stats); 2150 kfree(device->raid_map); 2151 kfree(device); 2152 } 2153 } 2154 2155 /* 2156 * Called when exposing a new device to the OS fails in order to re-adjust 2157 * our internal SCSI device list to match the SCSI ML's view. 2158 */ 2159 2160 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 2161 struct pqi_scsi_dev *device) 2162 { 2163 unsigned long flags; 2164 2165 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2166 list_del(&device->scsi_device_list_entry); 2167 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2168 2169 /* Allow the device structure to be freed later. */ 2170 device->keep_device = false; 2171 } 2172 2173 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 2174 { 2175 if (device->is_expander_smp_device) 2176 return device->sas_port != NULL; 2177 2178 return device->sdev != NULL; 2179 } 2180 2181 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device) 2182 { 2183 unsigned int lun; 2184 struct pqi_tmf_work *tmf_work; 2185 2186 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) 2187 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); 2188 } 2189 2190 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device) 2191 { 2192 if (pqi_device_in_remove(device)) 2193 return false; 2194 2195 if (device->sdev == NULL) 2196 return false; 2197 2198 if (!scsi_device_online(device->sdev)) 2199 return false; 2200 2201 return device->rescan; 2202 } 2203 2204 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 2205 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 2206 { 2207 int rc; 2208 unsigned int i; 2209 unsigned long flags; 2210 enum pqi_find_result find_result; 2211 struct pqi_scsi_dev *device; 2212 struct pqi_scsi_dev *next; 2213 struct pqi_scsi_dev *matching_device; 2214 LIST_HEAD(add_list); 2215 LIST_HEAD(delete_list); 2216 2217 /* 2218 * The idea here is to do as little work as possible while holding the 2219 * spinlock. That's why we go to great pains to defer anything other 2220 * than updating the internal device list until after we release the 2221 * spinlock. 2222 */ 2223 2224 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2225 2226 /* Assume that all devices in the existing list have gone away. */ 2227 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 2228 device->device_gone = true; 2229 2230 for (i = 0; i < num_new_devices; i++) { 2231 device = new_device_list[i]; 2232 2233 find_result = pqi_scsi_find_entry(ctrl_info, device, 2234 &matching_device); 2235 2236 switch (find_result) { 2237 case DEVICE_SAME: 2238 /* 2239 * The newly found device is already in the existing 2240 * device list. 2241 */ 2242 device->new_device = false; 2243 matching_device->device_gone = false; 2244 pqi_scsi_update_device(ctrl_info, matching_device, device); 2245 break; 2246 case DEVICE_NOT_FOUND: 2247 /* 2248 * The newly found device is NOT in the existing device 2249 * list. 2250 */ 2251 device->new_device = true; 2252 break; 2253 case DEVICE_CHANGED: 2254 /* 2255 * The original device has gone away and we need to add 2256 * the new device. 2257 */ 2258 device->new_device = true; 2259 break; 2260 } 2261 } 2262 2263 /* Process all devices that have gone away. */ 2264 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 2265 scsi_device_list_entry) { 2266 if (device->device_gone) { 2267 list_del(&device->scsi_device_list_entry); 2268 list_add_tail(&device->delete_list_entry, &delete_list); 2269 } 2270 } 2271 2272 /* Process all new devices. */ 2273 for (i = 0; i < num_new_devices; i++) { 2274 device = new_device_list[i]; 2275 if (!device->new_device) 2276 continue; 2277 if (device->volume_offline) 2278 continue; 2279 list_add_tail(&device->scsi_device_list_entry, 2280 &ctrl_info->scsi_device_list); 2281 list_add_tail(&device->add_list_entry, &add_list); 2282 /* To prevent this device structure from being freed later. */ 2283 device->keep_device = true; 2284 pqi_init_device_tmf_work(device); 2285 } 2286 2287 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2288 2289 /* 2290 * If OFA is in progress and there are devices that need to be deleted, 2291 * allow any pending reset operations to continue and unblock any SCSI 2292 * requests before removal. 2293 */ 2294 if (pqi_ofa_in_progress(ctrl_info)) { 2295 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) 2296 if (pqi_is_device_added(device)) 2297 pqi_device_remove_start(device); 2298 pqi_ctrl_unblock_device_reset(ctrl_info); 2299 pqi_scsi_unblock_requests(ctrl_info); 2300 } 2301 2302 /* Remove all devices that have gone away. */ 2303 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { 2304 if (device->volume_offline) { 2305 pqi_dev_info(ctrl_info, "offline", device); 2306 pqi_show_volume_status(ctrl_info, device); 2307 } else { 2308 pqi_dev_info(ctrl_info, "removed", device); 2309 } 2310 if (pqi_is_device_added(device)) 2311 pqi_remove_device(ctrl_info, device); 2312 list_del(&device->delete_list_entry); 2313 pqi_free_device(device); 2314 } 2315 2316 /* 2317 * Notify the SML of any existing device changes such as; 2318 * queue depth, device size. 2319 */ 2320 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 2321 /* 2322 * Check for queue depth change. 2323 */ 2324 if (device->sdev && device->queue_depth != device->advertised_queue_depth) { 2325 device->advertised_queue_depth = device->queue_depth; 2326 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); 2327 } 2328 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2329 /* 2330 * Check for changes in the device, such as size. 2331 */ 2332 if (pqi_volume_rescan_needed(device)) { 2333 device->rescan = false; 2334 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2335 scsi_rescan_device(device->sdev); 2336 } else { 2337 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2338 } 2339 } 2340 2341 /* Expose any new devices. */ 2342 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 2343 if (!pqi_is_device_added(device)) { 2344 rc = pqi_add_device(ctrl_info, device); 2345 if (rc == 0) { 2346 pqi_dev_info(ctrl_info, "added", device); 2347 } else { 2348 dev_warn(&ctrl_info->pci_dev->dev, 2349 "scsi %d:%d:%d:%d addition failed, device not added\n", 2350 ctrl_info->scsi_host->host_no, 2351 device->bus, device->target, 2352 device->lun); 2353 pqi_fixup_botched_add(ctrl_info, device); 2354 } 2355 } 2356 } 2357 2358 } 2359 2360 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) 2361 { 2362 /* 2363 * Only support the HBA controller itself as a RAID 2364 * controller. If it's a RAID controller other than 2365 * the HBA itself (an external RAID controller, for 2366 * example), we don't support it. 2367 */ 2368 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && 2369 !pqi_is_hba_lunid(device->scsi3addr)) 2370 return false; 2371 2372 return true; 2373 } 2374 2375 static inline bool pqi_skip_device(u8 *scsi3addr) 2376 { 2377 /* Ignore all masked devices. */ 2378 if (MASKED_DEVICE(scsi3addr)) 2379 return true; 2380 2381 return false; 2382 } 2383 2384 static inline void pqi_mask_device(u8 *scsi3addr) 2385 { 2386 scsi3addr[3] |= 0xc0; 2387 } 2388 2389 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 2390 { 2391 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); 2392 } 2393 2394 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2395 { 2396 int i; 2397 int rc; 2398 LIST_HEAD(new_device_list_head); 2399 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL; 2400 struct report_log_lun_list *logdev_list = NULL; 2401 struct report_phys_lun_16byte_wwid *phys_lun; 2402 struct report_log_lun *log_lun; 2403 struct bmic_identify_physical_device *id_phys = NULL; 2404 u32 num_physicals; 2405 u32 num_logicals; 2406 struct pqi_scsi_dev **new_device_list = NULL; 2407 struct pqi_scsi_dev *device; 2408 struct pqi_scsi_dev *next; 2409 unsigned int num_new_devices; 2410 unsigned int num_valid_devices; 2411 bool is_physical_device; 2412 u8 *scsi3addr; 2413 unsigned int physical_index; 2414 unsigned int logical_index; 2415 static char *out_of_memory_msg = 2416 "failed to allocate memory, device discovery stopped"; 2417 2418 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 2419 if (rc) 2420 goto out; 2421 2422 if (physdev_list) 2423 num_physicals = 2424 get_unaligned_be32(&physdev_list->header.list_length) 2425 / sizeof(physdev_list->lun_entries[0]); 2426 else 2427 num_physicals = 0; 2428 2429 if (logdev_list) 2430 num_logicals = 2431 get_unaligned_be32(&logdev_list->header.list_length) 2432 / sizeof(logdev_list->lun_entries[0]); 2433 else 2434 num_logicals = 0; 2435 2436 if (num_physicals) { 2437 /* 2438 * We need this buffer for calls to pqi_get_physical_disk_info() 2439 * below. We allocate it here instead of inside 2440 * pqi_get_physical_disk_info() because it's a fairly large 2441 * buffer. 2442 */ 2443 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2444 if (!id_phys) { 2445 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2446 out_of_memory_msg); 2447 rc = -ENOMEM; 2448 goto out; 2449 } 2450 2451 if (pqi_hide_vsep) { 2452 for (i = num_physicals - 1; i >= 0; i--) { 2453 phys_lun = &physdev_list->lun_entries[i]; 2454 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { 2455 pqi_mask_device(phys_lun->lunid); 2456 break; 2457 } 2458 } 2459 } 2460 } 2461 2462 if (num_logicals && 2463 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) 2464 ctrl_info->lv_drive_type_mix_valid = true; 2465 2466 num_new_devices = num_physicals + num_logicals; 2467 2468 new_device_list = kmalloc_array(num_new_devices, 2469 sizeof(*new_device_list), 2470 GFP_KERNEL); 2471 if (!new_device_list) { 2472 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2473 rc = -ENOMEM; 2474 goto out; 2475 } 2476 2477 for (i = 0; i < num_new_devices; i++) { 2478 device = kzalloc(sizeof(*device), GFP_KERNEL); 2479 if (!device) { 2480 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2481 out_of_memory_msg); 2482 rc = -ENOMEM; 2483 goto out; 2484 } 2485 list_add_tail(&device->new_device_list_entry, 2486 &new_device_list_head); 2487 } 2488 2489 device = NULL; 2490 num_valid_devices = 0; 2491 physical_index = 0; 2492 logical_index = 0; 2493 2494 for (i = 0; i < num_new_devices; i++) { 2495 2496 if ((!pqi_expose_ld_first && i < num_physicals) || 2497 (pqi_expose_ld_first && i >= num_logicals)) { 2498 is_physical_device = true; 2499 phys_lun = &physdev_list->lun_entries[physical_index++]; 2500 log_lun = NULL; 2501 scsi3addr = phys_lun->lunid; 2502 } else { 2503 is_physical_device = false; 2504 phys_lun = NULL; 2505 log_lun = &logdev_list->lun_entries[logical_index++]; 2506 scsi3addr = log_lun->lunid; 2507 } 2508 2509 if (is_physical_device && pqi_skip_device(scsi3addr)) 2510 continue; 2511 2512 if (device) 2513 device = list_next_entry(device, new_device_list_entry); 2514 else 2515 device = list_first_entry(&new_device_list_head, 2516 struct pqi_scsi_dev, new_device_list_entry); 2517 2518 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2519 device->is_physical_device = is_physical_device; 2520 if (is_physical_device) { 2521 device->device_type = phys_lun->device_type; 2522 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) 2523 device->is_expander_smp_device = true; 2524 } else { 2525 device->is_external_raid_device = 2526 pqi_is_external_raid_addr(scsi3addr); 2527 } 2528 2529 if (!pqi_is_supported_device(device)) 2530 continue; 2531 2532 /* Gather information about the device. */ 2533 rc = pqi_get_device_info(ctrl_info, device, id_phys); 2534 if (rc == -ENOMEM) { 2535 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2536 out_of_memory_msg); 2537 goto out; 2538 } 2539 if (rc) { 2540 if (device->is_physical_device) 2541 dev_warn(&ctrl_info->pci_dev->dev, 2542 "obtaining device info failed, skipping physical device %016llx%016llx\n", 2543 get_unaligned_be64(&phys_lun->wwid[0]), 2544 get_unaligned_be64(&phys_lun->wwid[8])); 2545 else 2546 dev_warn(&ctrl_info->pci_dev->dev, 2547 "obtaining device info failed, skipping logical device %08x%08x\n", 2548 *((u32 *)&device->scsi3addr), 2549 *((u32 *)&device->scsi3addr[4])); 2550 rc = 0; 2551 continue; 2552 } 2553 2554 /* Do not present disks that the OS cannot fully probe. */ 2555 if (pqi_keep_device_offline(device)) 2556 continue; 2557 2558 pqi_assign_bus_target_lun(device); 2559 2560 if (device->is_physical_device) { 2561 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); 2562 if ((phys_lun->device_flags & 2563 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2564 phys_lun->aio_handle) { 2565 device->aio_enabled = true; 2566 device->aio_handle = 2567 phys_lun->aio_handle; 2568 } 2569 } else { 2570 memcpy(device->volume_id, log_lun->volume_id, 2571 sizeof(device->volume_id)); 2572 } 2573 2574 device->sas_address = get_unaligned_be64(&device->wwid[0]); 2575 2576 new_device_list[num_valid_devices++] = device; 2577 } 2578 2579 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2580 2581 out: 2582 list_for_each_entry_safe(device, next, &new_device_list_head, 2583 new_device_list_entry) { 2584 if (device->keep_device) 2585 continue; 2586 list_del(&device->new_device_list_entry); 2587 pqi_free_device(device); 2588 } 2589 2590 kfree(new_device_list); 2591 kfree(physdev_list); 2592 kfree(logdev_list); 2593 kfree(id_phys); 2594 2595 return rc; 2596 } 2597 2598 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2599 { 2600 int rc; 2601 int mutex_acquired; 2602 2603 if (pqi_ctrl_offline(ctrl_info)) 2604 return -ENXIO; 2605 2606 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 2607 2608 if (!mutex_acquired) { 2609 if (pqi_ctrl_scan_blocked(ctrl_info)) 2610 return -EBUSY; 2611 pqi_schedule_rescan_worker_delayed(ctrl_info); 2612 return -EINPROGRESS; 2613 } 2614 2615 rc = pqi_update_scsi_devices(ctrl_info); 2616 if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) 2617 pqi_schedule_rescan_worker_delayed(ctrl_info); 2618 2619 mutex_unlock(&ctrl_info->scan_mutex); 2620 2621 return rc; 2622 } 2623 2624 static void pqi_scan_start(struct Scsi_Host *shost) 2625 { 2626 struct pqi_ctrl_info *ctrl_info; 2627 2628 ctrl_info = shost_to_hba(shost); 2629 2630 pqi_scan_scsi_devices(ctrl_info); 2631 } 2632 2633 /* Returns TRUE if scan is finished. */ 2634 2635 static int pqi_scan_finished(struct Scsi_Host *shost, 2636 unsigned long elapsed_time) 2637 { 2638 struct pqi_ctrl_info *ctrl_info; 2639 2640 ctrl_info = shost_priv(shost); 2641 2642 return !mutex_is_locked(&ctrl_info->scan_mutex); 2643 } 2644 2645 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, 2646 struct raid_map *raid_map, u64 first_block) 2647 { 2648 u32 volume_blk_size; 2649 2650 /* 2651 * Set the encryption tweak values based on logical block address. 2652 * If the block size is 512, the tweak value is equal to the LBA. 2653 * For other block sizes, tweak value is (LBA * block size) / 512. 2654 */ 2655 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2656 if (volume_blk_size != 512) 2657 first_block = (first_block * volume_blk_size) / 512; 2658 2659 encryption_info->data_encryption_key_index = 2660 get_unaligned_le16(&raid_map->data_encryption_key_index); 2661 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2662 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2663 } 2664 2665 /* 2666 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2667 */ 2668 2669 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, 2670 struct pqi_scsi_dev_raid_map_data *rmd) 2671 { 2672 bool is_supported = true; 2673 2674 switch (rmd->raid_level) { 2675 case SA_RAID_0: 2676 break; 2677 case SA_RAID_1: 2678 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2679 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) 2680 is_supported = false; 2681 break; 2682 case SA_RAID_TRIPLE: 2683 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2684 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) 2685 is_supported = false; 2686 break; 2687 case SA_RAID_5: 2688 if (rmd->is_write && (!ctrl_info->enable_r5_writes || 2689 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2690 is_supported = false; 2691 break; 2692 case SA_RAID_6: 2693 if (rmd->is_write && (!ctrl_info->enable_r6_writes || 2694 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2695 is_supported = false; 2696 break; 2697 default: 2698 is_supported = false; 2699 break; 2700 } 2701 2702 return is_supported; 2703 } 2704 2705 #define PQI_RAID_BYPASS_INELIGIBLE 1 2706 2707 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, 2708 struct pqi_scsi_dev_raid_map_data *rmd) 2709 { 2710 /* Check for valid opcode, get LBA and block count. */ 2711 switch (scmd->cmnd[0]) { 2712 case WRITE_6: 2713 rmd->is_write = true; 2714 fallthrough; 2715 case READ_6: 2716 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2717 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2718 rmd->block_cnt = (u32)scmd->cmnd[4]; 2719 if (rmd->block_cnt == 0) 2720 rmd->block_cnt = 256; 2721 break; 2722 case WRITE_10: 2723 rmd->is_write = true; 2724 fallthrough; 2725 case READ_10: 2726 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2727 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2728 break; 2729 case WRITE_12: 2730 rmd->is_write = true; 2731 fallthrough; 2732 case READ_12: 2733 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2734 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2735 break; 2736 case WRITE_16: 2737 rmd->is_write = true; 2738 fallthrough; 2739 case READ_16: 2740 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); 2741 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2742 break; 2743 default: 2744 /* Process via normal I/O path. */ 2745 return PQI_RAID_BYPASS_INELIGIBLE; 2746 } 2747 2748 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); 2749 2750 return 0; 2751 } 2752 2753 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, 2754 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) 2755 { 2756 #if BITS_PER_LONG == 32 2757 u64 tmpdiv; 2758 #endif 2759 2760 rmd->last_block = rmd->first_block + rmd->block_cnt - 1; 2761 2762 /* Check for invalid block or wraparound. */ 2763 if (rmd->last_block >= 2764 get_unaligned_le64(&raid_map->volume_blk_cnt) || 2765 rmd->last_block < rmd->first_block) 2766 return PQI_RAID_BYPASS_INELIGIBLE; 2767 2768 rmd->data_disks_per_row = 2769 get_unaligned_le16(&raid_map->data_disks_per_row); 2770 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); 2771 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2772 2773 /* Calculate stripe information for the request. */ 2774 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; 2775 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2776 return PQI_RAID_BYPASS_INELIGIBLE; 2777 #if BITS_PER_LONG == 32 2778 tmpdiv = rmd->first_block; 2779 do_div(tmpdiv, rmd->blocks_per_row); 2780 rmd->first_row = tmpdiv; 2781 tmpdiv = rmd->last_block; 2782 do_div(tmpdiv, rmd->blocks_per_row); 2783 rmd->last_row = tmpdiv; 2784 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); 2785 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); 2786 tmpdiv = rmd->first_row_offset; 2787 do_div(tmpdiv, rmd->strip_size); 2788 rmd->first_column = tmpdiv; 2789 tmpdiv = rmd->last_row_offset; 2790 do_div(tmpdiv, rmd->strip_size); 2791 rmd->last_column = tmpdiv; 2792 #else 2793 rmd->first_row = rmd->first_block / rmd->blocks_per_row; 2794 rmd->last_row = rmd->last_block / rmd->blocks_per_row; 2795 rmd->first_row_offset = (u32)(rmd->first_block - 2796 (rmd->first_row * rmd->blocks_per_row)); 2797 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * 2798 rmd->blocks_per_row)); 2799 rmd->first_column = rmd->first_row_offset / rmd->strip_size; 2800 rmd->last_column = rmd->last_row_offset / rmd->strip_size; 2801 #endif 2802 2803 /* If this isn't a single row/column then give to the controller. */ 2804 if (rmd->first_row != rmd->last_row || 2805 rmd->first_column != rmd->last_column) 2806 return PQI_RAID_BYPASS_INELIGIBLE; 2807 2808 /* Proceeding with driver mapping. */ 2809 rmd->total_disks_per_row = rmd->data_disks_per_row + 2810 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2811 rmd->map_row = ((u32)(rmd->first_row >> 2812 raid_map->parity_rotation_shift)) % 2813 get_unaligned_le16(&raid_map->row_cnt); 2814 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + 2815 rmd->first_column; 2816 2817 return 0; 2818 } 2819 2820 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, 2821 struct raid_map *raid_map) 2822 { 2823 #if BITS_PER_LONG == 32 2824 u64 tmpdiv; 2825 #endif 2826 2827 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2828 return PQI_RAID_BYPASS_INELIGIBLE; 2829 2830 /* RAID 50/60 */ 2831 /* Verify first and last block are in same RAID group. */ 2832 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; 2833 #if BITS_PER_LONG == 32 2834 tmpdiv = rmd->first_block; 2835 rmd->first_group = do_div(tmpdiv, rmd->stripesize); 2836 tmpdiv = rmd->first_group; 2837 do_div(tmpdiv, rmd->blocks_per_row); 2838 rmd->first_group = tmpdiv; 2839 tmpdiv = rmd->last_block; 2840 rmd->last_group = do_div(tmpdiv, rmd->stripesize); 2841 tmpdiv = rmd->last_group; 2842 do_div(tmpdiv, rmd->blocks_per_row); 2843 rmd->last_group = tmpdiv; 2844 #else 2845 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; 2846 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; 2847 #endif 2848 if (rmd->first_group != rmd->last_group) 2849 return PQI_RAID_BYPASS_INELIGIBLE; 2850 2851 /* Verify request is in a single row of RAID 5/6. */ 2852 #if BITS_PER_LONG == 32 2853 tmpdiv = rmd->first_block; 2854 do_div(tmpdiv, rmd->stripesize); 2855 rmd->first_row = tmpdiv; 2856 rmd->r5or6_first_row = tmpdiv; 2857 tmpdiv = rmd->last_block; 2858 do_div(tmpdiv, rmd->stripesize); 2859 rmd->r5or6_last_row = tmpdiv; 2860 #else 2861 rmd->first_row = rmd->r5or6_first_row = 2862 rmd->first_block / rmd->stripesize; 2863 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; 2864 #endif 2865 if (rmd->r5or6_first_row != rmd->r5or6_last_row) 2866 return PQI_RAID_BYPASS_INELIGIBLE; 2867 2868 /* Verify request is in a single column. */ 2869 #if BITS_PER_LONG == 32 2870 tmpdiv = rmd->first_block; 2871 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); 2872 tmpdiv = rmd->first_row_offset; 2873 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); 2874 rmd->r5or6_first_row_offset = rmd->first_row_offset; 2875 tmpdiv = rmd->last_block; 2876 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); 2877 tmpdiv = rmd->r5or6_last_row_offset; 2878 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); 2879 tmpdiv = rmd->r5or6_first_row_offset; 2880 do_div(tmpdiv, rmd->strip_size); 2881 rmd->first_column = rmd->r5or6_first_column = tmpdiv; 2882 tmpdiv = rmd->r5or6_last_row_offset; 2883 do_div(tmpdiv, rmd->strip_size); 2884 rmd->r5or6_last_column = tmpdiv; 2885 #else 2886 rmd->first_row_offset = rmd->r5or6_first_row_offset = 2887 (u32)((rmd->first_block % rmd->stripesize) % 2888 rmd->blocks_per_row); 2889 2890 rmd->r5or6_last_row_offset = 2891 (u32)((rmd->last_block % rmd->stripesize) % 2892 rmd->blocks_per_row); 2893 2894 rmd->first_column = 2895 rmd->r5or6_first_row_offset / rmd->strip_size; 2896 rmd->r5or6_first_column = rmd->first_column; 2897 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; 2898 #endif 2899 if (rmd->r5or6_first_column != rmd->r5or6_last_column) 2900 return PQI_RAID_BYPASS_INELIGIBLE; 2901 2902 /* Request is eligible. */ 2903 rmd->map_row = 2904 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % 2905 get_unaligned_le16(&raid_map->row_cnt); 2906 2907 rmd->map_index = (rmd->first_group * 2908 (get_unaligned_le16(&raid_map->row_cnt) * 2909 rmd->total_disks_per_row)) + 2910 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; 2911 2912 if (rmd->is_write) { 2913 u32 index; 2914 2915 /* 2916 * p_parity_it_nexus and q_parity_it_nexus are pointers to the 2917 * parity entries inside the device's raid_map. 2918 * 2919 * A device's RAID map is bounded by: number of RAID disks squared. 2920 * 2921 * The devices RAID map size is checked during device 2922 * initialization. 2923 */ 2924 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); 2925 index *= rmd->total_disks_per_row; 2926 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); 2927 2928 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; 2929 if (rmd->raid_level == SA_RAID_6) { 2930 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; 2931 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; 2932 } 2933 #if BITS_PER_LONG == 32 2934 tmpdiv = rmd->first_block; 2935 do_div(tmpdiv, rmd->blocks_per_row); 2936 rmd->row = tmpdiv; 2937 #else 2938 rmd->row = rmd->first_block / rmd->blocks_per_row; 2939 #endif 2940 } 2941 2942 return 0; 2943 } 2944 2945 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) 2946 { 2947 /* Build the new CDB for the physical disk I/O. */ 2948 if (rmd->disk_block > 0xffffffff) { 2949 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; 2950 rmd->cdb[1] = 0; 2951 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); 2952 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); 2953 rmd->cdb[14] = 0; 2954 rmd->cdb[15] = 0; 2955 rmd->cdb_length = 16; 2956 } else { 2957 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; 2958 rmd->cdb[1] = 0; 2959 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); 2960 rmd->cdb[6] = 0; 2961 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); 2962 rmd->cdb[9] = 0; 2963 rmd->cdb_length = 10; 2964 } 2965 } 2966 2967 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, 2968 struct pqi_scsi_dev_raid_map_data *rmd) 2969 { 2970 u32 index; 2971 u32 group; 2972 2973 group = rmd->map_index / rmd->data_disks_per_row; 2974 2975 index = rmd->map_index - (group * rmd->data_disks_per_row); 2976 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; 2977 index += rmd->data_disks_per_row; 2978 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; 2979 if (rmd->layout_map_count > 2) { 2980 index += rmd->data_disks_per_row; 2981 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; 2982 } 2983 2984 rmd->num_it_nexus_entries = rmd->layout_map_count; 2985 } 2986 2987 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2988 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2989 struct pqi_queue_group *queue_group) 2990 { 2991 int rc; 2992 struct raid_map *raid_map; 2993 u32 group; 2994 u32 next_bypass_group; 2995 struct pqi_encryption_info *encryption_info_ptr; 2996 struct pqi_encryption_info encryption_info; 2997 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 2998 2999 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 3000 if (rc) 3001 return PQI_RAID_BYPASS_INELIGIBLE; 3002 3003 rmd.raid_level = device->raid_level; 3004 3005 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) 3006 return PQI_RAID_BYPASS_INELIGIBLE; 3007 3008 if (unlikely(rmd.block_cnt == 0)) 3009 return PQI_RAID_BYPASS_INELIGIBLE; 3010 3011 raid_map = device->raid_map; 3012 3013 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); 3014 if (rc) 3015 return PQI_RAID_BYPASS_INELIGIBLE; 3016 3017 if (device->raid_level == SA_RAID_1 || 3018 device->raid_level == SA_RAID_TRIPLE) { 3019 if (rmd.is_write) { 3020 pqi_calc_aio_r1_nexus(raid_map, &rmd); 3021 } else { 3022 group = device->next_bypass_group[rmd.map_index]; 3023 next_bypass_group = group + 1; 3024 if (next_bypass_group >= rmd.layout_map_count) 3025 next_bypass_group = 0; 3026 device->next_bypass_group[rmd.map_index] = next_bypass_group; 3027 rmd.map_index += group * rmd.data_disks_per_row; 3028 } 3029 } else if ((device->raid_level == SA_RAID_5 || 3030 device->raid_level == SA_RAID_6) && 3031 (rmd.layout_map_count > 1 || rmd.is_write)) { 3032 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); 3033 if (rc) 3034 return PQI_RAID_BYPASS_INELIGIBLE; 3035 } 3036 3037 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) 3038 return PQI_RAID_BYPASS_INELIGIBLE; 3039 3040 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; 3041 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 3042 rmd.first_row * rmd.strip_size + 3043 (rmd.first_row_offset - rmd.first_column * rmd.strip_size); 3044 rmd.disk_block_cnt = rmd.block_cnt; 3045 3046 /* Handle differing logical/physical block sizes. */ 3047 if (raid_map->phys_blk_shift) { 3048 rmd.disk_block <<= raid_map->phys_blk_shift; 3049 rmd.disk_block_cnt <<= raid_map->phys_blk_shift; 3050 } 3051 3052 if (unlikely(rmd.disk_block_cnt > 0xffff)) 3053 return PQI_RAID_BYPASS_INELIGIBLE; 3054 3055 pqi_set_aio_cdb(&rmd); 3056 3057 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { 3058 if (rmd.data_length > device->max_transfer_encrypted) 3059 return PQI_RAID_BYPASS_INELIGIBLE; 3060 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); 3061 encryption_info_ptr = &encryption_info; 3062 } else { 3063 encryption_info_ptr = NULL; 3064 } 3065 3066 if (rmd.is_write) { 3067 switch (device->raid_level) { 3068 case SA_RAID_1: 3069 case SA_RAID_TRIPLE: 3070 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, 3071 encryption_info_ptr, device, &rmd); 3072 case SA_RAID_5: 3073 case SA_RAID_6: 3074 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, 3075 encryption_info_ptr, device, &rmd); 3076 } 3077 } 3078 3079 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, 3080 rmd.cdb, rmd.cdb_length, queue_group, 3081 encryption_info_ptr, true, false); 3082 } 3083 3084 #define PQI_STATUS_IDLE 0x0 3085 3086 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 3087 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 3088 3089 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 3090 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 3091 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 3092 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 3093 #define PQI_DEVICE_STATE_ERROR 0x4 3094 3095 #define PQI_MODE_READY_TIMEOUT_SECS 30 3096 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 3097 3098 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 3099 { 3100 struct pqi_device_registers __iomem *pqi_registers; 3101 unsigned long timeout; 3102 u64 signature; 3103 u8 status; 3104 3105 pqi_registers = ctrl_info->pqi_registers; 3106 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 3107 3108 while (1) { 3109 signature = readq(&pqi_registers->signature); 3110 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 3111 sizeof(signature)) == 0) 3112 break; 3113 if (time_after(jiffies, timeout)) { 3114 dev_err(&ctrl_info->pci_dev->dev, 3115 "timed out waiting for PQI signature\n"); 3116 return -ETIMEDOUT; 3117 } 3118 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3119 } 3120 3121 while (1) { 3122 status = readb(&pqi_registers->function_and_status_code); 3123 if (status == PQI_STATUS_IDLE) 3124 break; 3125 if (time_after(jiffies, timeout)) { 3126 dev_err(&ctrl_info->pci_dev->dev, 3127 "timed out waiting for PQI IDLE\n"); 3128 return -ETIMEDOUT; 3129 } 3130 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3131 } 3132 3133 while (1) { 3134 if (readl(&pqi_registers->device_status) == 3135 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 3136 break; 3137 if (time_after(jiffies, timeout)) { 3138 dev_err(&ctrl_info->pci_dev->dev, 3139 "timed out waiting for PQI all registers ready\n"); 3140 return -ETIMEDOUT; 3141 } 3142 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3143 } 3144 3145 return 0; 3146 } 3147 3148 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 3149 { 3150 struct pqi_scsi_dev *device; 3151 3152 device = io_request->scmd->device->hostdata; 3153 device->raid_bypass_enabled = false; 3154 device->aio_enabled = false; 3155 } 3156 3157 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 3158 { 3159 struct pqi_ctrl_info *ctrl_info; 3160 struct pqi_scsi_dev *device; 3161 3162 device = sdev->hostdata; 3163 if (device->device_offline) 3164 return; 3165 3166 device->device_offline = true; 3167 ctrl_info = shost_to_hba(sdev->host); 3168 pqi_schedule_rescan_worker(ctrl_info); 3169 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 3170 path, ctrl_info->scsi_host->host_no, device->bus, 3171 device->target, device->lun); 3172 } 3173 3174 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 3175 { 3176 u8 scsi_status; 3177 u8 host_byte; 3178 struct scsi_cmnd *scmd; 3179 struct pqi_raid_error_info *error_info; 3180 size_t sense_data_length; 3181 int residual_count; 3182 int xfer_count; 3183 struct scsi_sense_hdr sshdr; 3184 3185 scmd = io_request->scmd; 3186 if (!scmd) 3187 return; 3188 3189 error_info = io_request->error_info; 3190 scsi_status = error_info->status; 3191 host_byte = DID_OK; 3192 3193 switch (error_info->data_out_result) { 3194 case PQI_DATA_IN_OUT_GOOD: 3195 break; 3196 case PQI_DATA_IN_OUT_UNDERFLOW: 3197 xfer_count = 3198 get_unaligned_le32(&error_info->data_out_transferred); 3199 residual_count = scsi_bufflen(scmd) - xfer_count; 3200 scsi_set_resid(scmd, residual_count); 3201 if (xfer_count < scmd->underflow) 3202 host_byte = DID_SOFT_ERROR; 3203 break; 3204 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 3205 case PQI_DATA_IN_OUT_ABORTED: 3206 host_byte = DID_ABORT; 3207 break; 3208 case PQI_DATA_IN_OUT_TIMEOUT: 3209 host_byte = DID_TIME_OUT; 3210 break; 3211 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 3212 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 3213 case PQI_DATA_IN_OUT_BUFFER_ERROR: 3214 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 3215 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 3216 case PQI_DATA_IN_OUT_ERROR: 3217 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 3218 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 3219 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 3220 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 3221 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 3222 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 3223 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 3224 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 3225 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 3226 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 3227 default: 3228 host_byte = DID_ERROR; 3229 break; 3230 } 3231 3232 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 3233 if (sense_data_length == 0) 3234 sense_data_length = 3235 get_unaligned_le16(&error_info->response_data_length); 3236 if (sense_data_length) { 3237 if (sense_data_length > sizeof(error_info->data)) 3238 sense_data_length = sizeof(error_info->data); 3239 3240 if (scsi_status == SAM_STAT_CHECK_CONDITION && 3241 scsi_normalize_sense(error_info->data, 3242 sense_data_length, &sshdr) && 3243 sshdr.sense_key == HARDWARE_ERROR && 3244 sshdr.asc == 0x3e) { 3245 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); 3246 struct pqi_scsi_dev *device = scmd->device->hostdata; 3247 3248 switch (sshdr.ascq) { 3249 case 0x1: /* LOGICAL UNIT FAILURE */ 3250 if (printk_ratelimit()) 3251 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", 3252 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3253 pqi_take_device_offline(scmd->device, "RAID"); 3254 host_byte = DID_NO_CONNECT; 3255 break; 3256 3257 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ 3258 if (printk_ratelimit()) 3259 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", 3260 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3261 break; 3262 } 3263 } 3264 3265 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3266 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3267 memcpy(scmd->sense_buffer, error_info->data, 3268 sense_data_length); 3269 } 3270 3271 if (pqi_cmd_priv(scmd)->this_residual && 3272 !pqi_is_logical_device(scmd->device->hostdata) && 3273 scsi_status == SAM_STAT_CHECK_CONDITION && 3274 host_byte == DID_OK && 3275 sense_data_length && 3276 scsi_normalize_sense(error_info->data, sense_data_length, &sshdr) && 3277 sshdr.sense_key == ILLEGAL_REQUEST && 3278 sshdr.asc == 0x26 && 3279 sshdr.ascq == 0x0) { 3280 host_byte = DID_NO_CONNECT; 3281 pqi_take_device_offline(scmd->device, "AIO"); 3282 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 0x3e, 0x1); 3283 } 3284 3285 scmd->result = scsi_status; 3286 set_host_byte(scmd, host_byte); 3287 } 3288 3289 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 3290 { 3291 u8 scsi_status; 3292 u8 host_byte; 3293 struct scsi_cmnd *scmd; 3294 struct pqi_aio_error_info *error_info; 3295 size_t sense_data_length; 3296 int residual_count; 3297 int xfer_count; 3298 bool device_offline; 3299 3300 scmd = io_request->scmd; 3301 error_info = io_request->error_info; 3302 host_byte = DID_OK; 3303 sense_data_length = 0; 3304 device_offline = false; 3305 3306 switch (error_info->service_response) { 3307 case PQI_AIO_SERV_RESPONSE_COMPLETE: 3308 scsi_status = error_info->status; 3309 break; 3310 case PQI_AIO_SERV_RESPONSE_FAILURE: 3311 switch (error_info->status) { 3312 case PQI_AIO_STATUS_IO_ABORTED: 3313 scsi_status = SAM_STAT_TASK_ABORTED; 3314 break; 3315 case PQI_AIO_STATUS_UNDERRUN: 3316 scsi_status = SAM_STAT_GOOD; 3317 residual_count = get_unaligned_le32( 3318 &error_info->residual_count); 3319 scsi_set_resid(scmd, residual_count); 3320 xfer_count = scsi_bufflen(scmd) - residual_count; 3321 if (xfer_count < scmd->underflow) 3322 host_byte = DID_SOFT_ERROR; 3323 break; 3324 case PQI_AIO_STATUS_OVERRUN: 3325 scsi_status = SAM_STAT_GOOD; 3326 break; 3327 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 3328 pqi_aio_path_disabled(io_request); 3329 scsi_status = SAM_STAT_GOOD; 3330 io_request->status = -EAGAIN; 3331 break; 3332 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 3333 case PQI_AIO_STATUS_INVALID_DEVICE: 3334 if (!io_request->raid_bypass) { 3335 device_offline = true; 3336 pqi_take_device_offline(scmd->device, "AIO"); 3337 host_byte = DID_NO_CONNECT; 3338 } 3339 scsi_status = SAM_STAT_CHECK_CONDITION; 3340 break; 3341 case PQI_AIO_STATUS_IO_ERROR: 3342 default: 3343 scsi_status = SAM_STAT_CHECK_CONDITION; 3344 break; 3345 } 3346 break; 3347 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 3348 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 3349 scsi_status = SAM_STAT_GOOD; 3350 break; 3351 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 3352 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 3353 default: 3354 scsi_status = SAM_STAT_CHECK_CONDITION; 3355 break; 3356 } 3357 3358 if (error_info->data_present) { 3359 sense_data_length = 3360 get_unaligned_le16(&error_info->data_length); 3361 if (sense_data_length) { 3362 if (sense_data_length > sizeof(error_info->data)) 3363 sense_data_length = sizeof(error_info->data); 3364 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3365 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3366 memcpy(scmd->sense_buffer, error_info->data, 3367 sense_data_length); 3368 } 3369 } 3370 3371 if (device_offline && sense_data_length == 0) 3372 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1); 3373 3374 scmd->result = scsi_status; 3375 set_host_byte(scmd, host_byte); 3376 } 3377 3378 static void pqi_process_io_error(unsigned int iu_type, 3379 struct pqi_io_request *io_request) 3380 { 3381 switch (iu_type) { 3382 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3383 pqi_process_raid_io_error(io_request); 3384 break; 3385 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3386 pqi_process_aio_io_error(io_request); 3387 break; 3388 } 3389 } 3390 3391 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, 3392 struct pqi_task_management_response *response) 3393 { 3394 int rc; 3395 3396 switch (response->response_code) { 3397 case SOP_TMF_COMPLETE: 3398 case SOP_TMF_FUNCTION_SUCCEEDED: 3399 rc = 0; 3400 break; 3401 case SOP_TMF_REJECTED: 3402 rc = -EAGAIN; 3403 break; 3404 case SOP_TMF_INCORRECT_LOGICAL_UNIT: 3405 rc = -ENODEV; 3406 break; 3407 default: 3408 rc = -EIO; 3409 break; 3410 } 3411 3412 if (rc) 3413 dev_err(&ctrl_info->pci_dev->dev, 3414 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); 3415 3416 return rc; 3417 } 3418 3419 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, 3420 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 3421 { 3422 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); 3423 } 3424 3425 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) 3426 { 3427 int num_responses; 3428 pqi_index_t oq_pi; 3429 pqi_index_t oq_ci; 3430 struct pqi_io_request *io_request; 3431 struct pqi_io_response *response; 3432 u16 request_id; 3433 3434 num_responses = 0; 3435 oq_ci = queue_group->oq_ci_copy; 3436 3437 while (1) { 3438 oq_pi = readl(queue_group->oq_pi); 3439 if (oq_pi >= ctrl_info->num_elements_per_oq) { 3440 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE); 3441 dev_err(&ctrl_info->pci_dev->dev, 3442 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3443 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); 3444 return -1; 3445 } 3446 if (oq_pi == oq_ci) 3447 break; 3448 3449 num_responses++; 3450 response = queue_group->oq_element_array + 3451 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3452 3453 request_id = get_unaligned_le16(&response->request_id); 3454 if (request_id >= ctrl_info->max_io_slots) { 3455 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID); 3456 dev_err(&ctrl_info->pci_dev->dev, 3457 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", 3458 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); 3459 return -1; 3460 } 3461 3462 io_request = &ctrl_info->io_request_pool[request_id]; 3463 if (atomic_read(&io_request->refcount) == 0) { 3464 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID); 3465 dev_err(&ctrl_info->pci_dev->dev, 3466 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", 3467 request_id, oq_pi, oq_ci); 3468 return -1; 3469 } 3470 3471 switch (response->header.iu_type) { 3472 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 3473 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 3474 if (io_request->scmd) 3475 io_request->scmd->result = 0; 3476 fallthrough; 3477 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 3478 break; 3479 case PQI_RESPONSE_IU_VENDOR_GENERAL: 3480 io_request->status = 3481 get_unaligned_le16( 3482 &((struct pqi_vendor_general_response *)response)->status); 3483 break; 3484 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 3485 io_request->status = pqi_interpret_task_management_response(ctrl_info, 3486 (void *)response); 3487 break; 3488 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 3489 pqi_aio_path_disabled(io_request); 3490 io_request->status = -EAGAIN; 3491 break; 3492 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3493 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3494 io_request->error_info = ctrl_info->error_buffer + 3495 (get_unaligned_le16(&response->error_index) * 3496 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 3497 pqi_process_io_error(response->header.iu_type, io_request); 3498 break; 3499 default: 3500 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE); 3501 dev_err(&ctrl_info->pci_dev->dev, 3502 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", 3503 response->header.iu_type, oq_pi, oq_ci); 3504 return -1; 3505 } 3506 3507 io_request->io_complete_callback(io_request, io_request->context); 3508 3509 /* 3510 * Note that the I/O request structure CANNOT BE TOUCHED after 3511 * returning from the I/O completion callback! 3512 */ 3513 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 3514 } 3515 3516 if (num_responses) { 3517 queue_group->oq_ci_copy = oq_ci; 3518 writel(oq_ci, queue_group->oq_ci); 3519 } 3520 3521 return num_responses; 3522 } 3523 3524 static inline unsigned int pqi_num_elements_free(unsigned int pi, 3525 unsigned int ci, unsigned int elements_in_queue) 3526 { 3527 unsigned int num_elements_used; 3528 3529 if (pi >= ci) 3530 num_elements_used = pi - ci; 3531 else 3532 num_elements_used = elements_in_queue - ci + pi; 3533 3534 return elements_in_queue - num_elements_used - 1; 3535 } 3536 3537 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3538 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3539 { 3540 pqi_index_t iq_pi; 3541 pqi_index_t iq_ci; 3542 unsigned long flags; 3543 void *next_element; 3544 struct pqi_queue_group *queue_group; 3545 3546 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3547 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3548 3549 while (1) { 3550 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3551 3552 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3553 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3554 3555 if (pqi_num_elements_free(iq_pi, iq_ci, 3556 ctrl_info->num_elements_per_iq)) 3557 break; 3558 3559 spin_unlock_irqrestore( 3560 &queue_group->submit_lock[RAID_PATH], flags); 3561 3562 if (pqi_ctrl_offline(ctrl_info)) 3563 return; 3564 } 3565 3566 next_element = queue_group->iq_element_array[RAID_PATH] + 3567 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3568 3569 memcpy(next_element, iu, iu_length); 3570 3571 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3572 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3573 3574 /* 3575 * This write notifies the controller that an IU is available to be 3576 * processed. 3577 */ 3578 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3579 3580 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3581 } 3582 3583 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3584 struct pqi_event *event) 3585 { 3586 struct pqi_event_acknowledge_request request; 3587 3588 memset(&request, 0, sizeof(request)); 3589 3590 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3591 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3592 &request.header.iu_length); 3593 request.event_type = event->event_type; 3594 put_unaligned_le16(event->event_id, &request.event_id); 3595 put_unaligned_le32(event->additional_event_id, &request.additional_event_id); 3596 3597 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3598 } 3599 3600 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3601 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3602 3603 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3604 struct pqi_ctrl_info *ctrl_info) 3605 { 3606 u8 status; 3607 unsigned long timeout; 3608 3609 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies; 3610 3611 while (1) { 3612 status = pqi_read_soft_reset_status(ctrl_info); 3613 if (status & PQI_SOFT_RESET_INITIATE) 3614 return RESET_INITIATE_DRIVER; 3615 3616 if (status & PQI_SOFT_RESET_ABORT) 3617 return RESET_ABORT; 3618 3619 if (!sis_is_firmware_running(ctrl_info)) 3620 return RESET_NORESPONSE; 3621 3622 if (time_after(jiffies, timeout)) { 3623 dev_warn(&ctrl_info->pci_dev->dev, 3624 "timed out waiting for soft reset status\n"); 3625 return RESET_TIMEDOUT; 3626 } 3627 3628 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3629 } 3630 } 3631 3632 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) 3633 { 3634 int rc; 3635 unsigned int delay_secs; 3636 enum pqi_soft_reset_status reset_status; 3637 3638 if (ctrl_info->soft_reset_handshake_supported) 3639 reset_status = pqi_poll_for_soft_reset_status(ctrl_info); 3640 else 3641 reset_status = RESET_INITIATE_FIRMWARE; 3642 3643 delay_secs = PQI_POST_RESET_DELAY_SECS; 3644 3645 switch (reset_status) { 3646 case RESET_TIMEDOUT: 3647 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; 3648 fallthrough; 3649 case RESET_INITIATE_DRIVER: 3650 dev_info(&ctrl_info->pci_dev->dev, 3651 "Online Firmware Activation: resetting controller\n"); 3652 sis_soft_reset(ctrl_info); 3653 fallthrough; 3654 case RESET_INITIATE_FIRMWARE: 3655 ctrl_info->pqi_mode_enabled = false; 3656 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 3657 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); 3658 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3659 pqi_ctrl_ofa_done(ctrl_info); 3660 dev_info(&ctrl_info->pci_dev->dev, 3661 "Online Firmware Activation: %s\n", 3662 rc == 0 ? "SUCCESS" : "FAILED"); 3663 break; 3664 case RESET_ABORT: 3665 dev_info(&ctrl_info->pci_dev->dev, 3666 "Online Firmware Activation ABORTED\n"); 3667 if (ctrl_info->soft_reset_handshake_supported) 3668 pqi_clear_soft_reset_status(ctrl_info); 3669 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3670 pqi_ctrl_ofa_done(ctrl_info); 3671 pqi_ofa_ctrl_unquiesce(ctrl_info); 3672 break; 3673 case RESET_NORESPONSE: 3674 fallthrough; 3675 default: 3676 dev_err(&ctrl_info->pci_dev->dev, 3677 "unexpected Online Firmware Activation reset status: 0x%x\n", 3678 reset_status); 3679 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3680 pqi_ctrl_ofa_done(ctrl_info); 3681 pqi_ofa_ctrl_unquiesce(ctrl_info); 3682 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); 3683 break; 3684 } 3685 } 3686 3687 static void pqi_ofa_memory_alloc_worker(struct work_struct *work) 3688 { 3689 struct pqi_ctrl_info *ctrl_info; 3690 3691 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); 3692 3693 pqi_ctrl_ofa_start(ctrl_info); 3694 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ofa_memory, ctrl_info->ofa_bytes_requested, ctrl_info->ofa_bytes_requested); 3695 pqi_host_memory_update(ctrl_info, &ctrl_info->ofa_memory, PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE); 3696 } 3697 3698 static void pqi_ofa_quiesce_worker(struct work_struct *work) 3699 { 3700 struct pqi_ctrl_info *ctrl_info; 3701 struct pqi_event *event; 3702 3703 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); 3704 3705 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; 3706 3707 pqi_ofa_ctrl_quiesce(ctrl_info); 3708 pqi_acknowledge_event(ctrl_info, event); 3709 pqi_process_soft_reset(ctrl_info); 3710 } 3711 3712 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3713 struct pqi_event *event) 3714 { 3715 bool ack_event; 3716 3717 ack_event = true; 3718 3719 switch (event->event_id) { 3720 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3721 dev_info(&ctrl_info->pci_dev->dev, 3722 "received Online Firmware Activation memory allocation request\n"); 3723 schedule_work(&ctrl_info->ofa_memory_alloc_work); 3724 break; 3725 case PQI_EVENT_OFA_QUIESCE: 3726 dev_info(&ctrl_info->pci_dev->dev, 3727 "received Online Firmware Activation quiesce request\n"); 3728 schedule_work(&ctrl_info->ofa_quiesce_work); 3729 ack_event = false; 3730 break; 3731 case PQI_EVENT_OFA_CANCELED: 3732 dev_info(&ctrl_info->pci_dev->dev, 3733 "received Online Firmware Activation cancel request: reason: %u\n", 3734 ctrl_info->ofa_cancel_reason); 3735 pqi_host_free_buffer(ctrl_info, &ctrl_info->ofa_memory); 3736 pqi_ctrl_ofa_done(ctrl_info); 3737 break; 3738 default: 3739 dev_err(&ctrl_info->pci_dev->dev, 3740 "received unknown Online Firmware Activation request: event ID: %u\n", 3741 event->event_id); 3742 break; 3743 } 3744 3745 return ack_event; 3746 } 3747 3748 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info) 3749 { 3750 unsigned long flags; 3751 struct pqi_scsi_dev *device; 3752 3753 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3754 3755 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 3756 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 3757 device->rescan = true; 3758 } 3759 3760 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3761 } 3762 3763 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) 3764 { 3765 unsigned long flags; 3766 struct pqi_scsi_dev *device; 3767 3768 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3769 3770 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 3771 if (device->raid_bypass_enabled) 3772 device->raid_bypass_enabled = false; 3773 3774 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3775 } 3776 3777 static void pqi_event_worker(struct work_struct *work) 3778 { 3779 unsigned int i; 3780 bool rescan_needed; 3781 struct pqi_ctrl_info *ctrl_info; 3782 struct pqi_event *event; 3783 bool ack_event; 3784 3785 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3786 3787 pqi_ctrl_busy(ctrl_info); 3788 pqi_wait_if_ctrl_blocked(ctrl_info); 3789 if (pqi_ctrl_offline(ctrl_info)) 3790 goto out; 3791 3792 rescan_needed = false; 3793 event = ctrl_info->events; 3794 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3795 if (event->pending) { 3796 event->pending = false; 3797 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3798 ack_event = pqi_ofa_process_event(ctrl_info, event); 3799 } else { 3800 ack_event = true; 3801 rescan_needed = true; 3802 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) 3803 pqi_mark_volumes_for_rescan(ctrl_info); 3804 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) 3805 pqi_disable_raid_bypass(ctrl_info); 3806 } 3807 if (ack_event) 3808 pqi_acknowledge_event(ctrl_info, event); 3809 } 3810 event++; 3811 } 3812 3813 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ) 3814 3815 if (rescan_needed) 3816 pqi_schedule_rescan_worker_with_delay(ctrl_info, 3817 PQI_RESCAN_WORK_FOR_EVENT_DELAY); 3818 3819 out: 3820 pqi_ctrl_unbusy(ctrl_info); 3821 } 3822 3823 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 3824 3825 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3826 { 3827 int num_interrupts; 3828 u32 heartbeat_count; 3829 struct pqi_ctrl_info *ctrl_info = timer_container_of(ctrl_info, t, 3830 heartbeat_timer); 3831 3832 pqi_check_ctrl_health(ctrl_info); 3833 if (pqi_ctrl_offline(ctrl_info)) 3834 return; 3835 3836 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3837 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3838 3839 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3840 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3841 dev_err(&ctrl_info->pci_dev->dev, 3842 "no heartbeat detected - last heartbeat count: %u\n", 3843 heartbeat_count); 3844 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT); 3845 return; 3846 } 3847 } else { 3848 ctrl_info->previous_num_interrupts = num_interrupts; 3849 } 3850 3851 ctrl_info->previous_heartbeat_count = heartbeat_count; 3852 mod_timer(&ctrl_info->heartbeat_timer, 3853 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3854 } 3855 3856 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3857 { 3858 if (!ctrl_info->heartbeat_counter) 3859 return; 3860 3861 ctrl_info->previous_num_interrupts = 3862 atomic_read(&ctrl_info->num_interrupts); 3863 ctrl_info->previous_heartbeat_count = 3864 pqi_read_heartbeat_counter(ctrl_info); 3865 3866 ctrl_info->heartbeat_timer.expires = 3867 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3868 add_timer(&ctrl_info->heartbeat_timer); 3869 } 3870 3871 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3872 { 3873 timer_delete_sync(&ctrl_info->heartbeat_timer); 3874 } 3875 3876 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, 3877 struct pqi_event *event, struct pqi_event_response *response) 3878 { 3879 switch (event->event_id) { 3880 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3881 ctrl_info->ofa_bytes_requested = 3882 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); 3883 break; 3884 case PQI_EVENT_OFA_CANCELED: 3885 ctrl_info->ofa_cancel_reason = 3886 get_unaligned_le16(&response->data.ofa_cancelled.reason); 3887 break; 3888 } 3889 } 3890 3891 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3892 { 3893 int num_events; 3894 pqi_index_t oq_pi; 3895 pqi_index_t oq_ci; 3896 struct pqi_event_queue *event_queue; 3897 struct pqi_event_response *response; 3898 struct pqi_event *event; 3899 int event_index; 3900 3901 event_queue = &ctrl_info->event_queue; 3902 num_events = 0; 3903 oq_ci = event_queue->oq_ci_copy; 3904 3905 while (1) { 3906 oq_pi = readl(event_queue->oq_pi); 3907 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { 3908 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE); 3909 dev_err(&ctrl_info->pci_dev->dev, 3910 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3911 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); 3912 return -1; 3913 } 3914 3915 if (oq_pi == oq_ci) 3916 break; 3917 3918 num_events++; 3919 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3920 3921 event_index = pqi_event_type_to_event_index(response->event_type); 3922 3923 if (event_index >= 0 && response->request_acknowledge) { 3924 event = &ctrl_info->events[event_index]; 3925 event->pending = true; 3926 event->event_type = response->event_type; 3927 event->event_id = get_unaligned_le16(&response->event_id); 3928 event->additional_event_id = 3929 get_unaligned_le32(&response->additional_event_id); 3930 if (event->event_type == PQI_EVENT_TYPE_OFA) 3931 pqi_ofa_capture_event_payload(ctrl_info, event, response); 3932 } 3933 3934 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3935 } 3936 3937 if (num_events) { 3938 event_queue->oq_ci_copy = oq_ci; 3939 writel(oq_ci, event_queue->oq_ci); 3940 schedule_work(&ctrl_info->event_work); 3941 } 3942 3943 return num_events; 3944 } 3945 3946 #define PQI_LEGACY_INTX_MASK 0x1 3947 3948 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) 3949 { 3950 u32 intx_mask; 3951 struct pqi_device_registers __iomem *pqi_registers; 3952 volatile void __iomem *register_addr; 3953 3954 pqi_registers = ctrl_info->pqi_registers; 3955 3956 if (enable_intx) 3957 register_addr = &pqi_registers->legacy_intx_mask_clear; 3958 else 3959 register_addr = &pqi_registers->legacy_intx_mask_set; 3960 3961 intx_mask = readl(register_addr); 3962 intx_mask |= PQI_LEGACY_INTX_MASK; 3963 writel(intx_mask, register_addr); 3964 } 3965 3966 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3967 enum pqi_irq_mode new_mode) 3968 { 3969 switch (ctrl_info->irq_mode) { 3970 case IRQ_MODE_MSIX: 3971 switch (new_mode) { 3972 case IRQ_MODE_MSIX: 3973 break; 3974 case IRQ_MODE_INTX: 3975 pqi_configure_legacy_intx(ctrl_info, true); 3976 sis_enable_intx(ctrl_info); 3977 break; 3978 case IRQ_MODE_NONE: 3979 break; 3980 } 3981 break; 3982 case IRQ_MODE_INTX: 3983 switch (new_mode) { 3984 case IRQ_MODE_MSIX: 3985 pqi_configure_legacy_intx(ctrl_info, false); 3986 sis_enable_msix(ctrl_info); 3987 break; 3988 case IRQ_MODE_INTX: 3989 break; 3990 case IRQ_MODE_NONE: 3991 pqi_configure_legacy_intx(ctrl_info, false); 3992 break; 3993 } 3994 break; 3995 case IRQ_MODE_NONE: 3996 switch (new_mode) { 3997 case IRQ_MODE_MSIX: 3998 sis_enable_msix(ctrl_info); 3999 break; 4000 case IRQ_MODE_INTX: 4001 pqi_configure_legacy_intx(ctrl_info, true); 4002 sis_enable_intx(ctrl_info); 4003 break; 4004 case IRQ_MODE_NONE: 4005 break; 4006 } 4007 break; 4008 } 4009 4010 ctrl_info->irq_mode = new_mode; 4011 } 4012 4013 #define PQI_LEGACY_INTX_PENDING 0x1 4014 4015 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 4016 { 4017 bool valid_irq; 4018 u32 intx_status; 4019 4020 switch (ctrl_info->irq_mode) { 4021 case IRQ_MODE_MSIX: 4022 valid_irq = true; 4023 break; 4024 case IRQ_MODE_INTX: 4025 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); 4026 if (intx_status & PQI_LEGACY_INTX_PENDING) 4027 valid_irq = true; 4028 else 4029 valid_irq = false; 4030 break; 4031 case IRQ_MODE_NONE: 4032 default: 4033 valid_irq = false; 4034 break; 4035 } 4036 4037 return valid_irq; 4038 } 4039 4040 static irqreturn_t pqi_irq_handler(int irq, void *data) 4041 { 4042 struct pqi_ctrl_info *ctrl_info; 4043 struct pqi_queue_group *queue_group; 4044 int num_io_responses_handled; 4045 int num_events_handled; 4046 4047 queue_group = data; 4048 ctrl_info = queue_group->ctrl_info; 4049 4050 if (!pqi_is_valid_irq(ctrl_info)) 4051 return IRQ_NONE; 4052 4053 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 4054 if (num_io_responses_handled < 0) 4055 goto out; 4056 4057 if (irq == ctrl_info->event_irq) { 4058 num_events_handled = pqi_process_event_intr(ctrl_info); 4059 if (num_events_handled < 0) 4060 goto out; 4061 } else { 4062 num_events_handled = 0; 4063 } 4064 4065 if (num_io_responses_handled + num_events_handled > 0) 4066 atomic_inc(&ctrl_info->num_interrupts); 4067 4068 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 4069 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 4070 4071 out: 4072 return IRQ_HANDLED; 4073 } 4074 4075 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 4076 { 4077 struct pci_dev *pci_dev = ctrl_info->pci_dev; 4078 int i; 4079 int rc; 4080 4081 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 4082 4083 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 4084 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 4085 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 4086 if (rc) { 4087 dev_err(&pci_dev->dev, 4088 "irq %u init failed with error %d\n", 4089 pci_irq_vector(pci_dev, i), rc); 4090 return rc; 4091 } 4092 ctrl_info->num_msix_vectors_initialized++; 4093 } 4094 4095 return 0; 4096 } 4097 4098 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 4099 { 4100 int i; 4101 4102 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 4103 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 4104 &ctrl_info->queue_groups[i]); 4105 4106 ctrl_info->num_msix_vectors_initialized = 0; 4107 } 4108 4109 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4110 { 4111 int num_vectors_enabled; 4112 unsigned int flags = PCI_IRQ_MSIX; 4113 4114 if (!pqi_disable_managed_interrupts) 4115 flags |= PCI_IRQ_AFFINITY; 4116 4117 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 4118 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 4119 flags); 4120 if (num_vectors_enabled < 0) { 4121 dev_err(&ctrl_info->pci_dev->dev, 4122 "MSI-X init failed with error %d\n", 4123 num_vectors_enabled); 4124 return num_vectors_enabled; 4125 } 4126 4127 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 4128 ctrl_info->irq_mode = IRQ_MODE_MSIX; 4129 return 0; 4130 } 4131 4132 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4133 { 4134 if (ctrl_info->num_msix_vectors_enabled) { 4135 pci_free_irq_vectors(ctrl_info->pci_dev); 4136 ctrl_info->num_msix_vectors_enabled = 0; 4137 } 4138 } 4139 4140 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 4141 { 4142 unsigned int i; 4143 size_t alloc_length; 4144 size_t element_array_length_per_iq; 4145 size_t element_array_length_per_oq; 4146 void *element_array; 4147 void __iomem *next_queue_index; 4148 void *aligned_pointer; 4149 unsigned int num_inbound_queues; 4150 unsigned int num_outbound_queues; 4151 unsigned int num_queue_indexes; 4152 struct pqi_queue_group *queue_group; 4153 4154 element_array_length_per_iq = 4155 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 4156 ctrl_info->num_elements_per_iq; 4157 element_array_length_per_oq = 4158 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 4159 ctrl_info->num_elements_per_oq; 4160 num_inbound_queues = ctrl_info->num_queue_groups * 2; 4161 num_outbound_queues = ctrl_info->num_queue_groups; 4162 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 4163 4164 aligned_pointer = NULL; 4165 4166 for (i = 0; i < num_inbound_queues; i++) { 4167 aligned_pointer = PTR_ALIGN(aligned_pointer, 4168 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4169 aligned_pointer += element_array_length_per_iq; 4170 } 4171 4172 for (i = 0; i < num_outbound_queues; i++) { 4173 aligned_pointer = PTR_ALIGN(aligned_pointer, 4174 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4175 aligned_pointer += element_array_length_per_oq; 4176 } 4177 4178 aligned_pointer = PTR_ALIGN(aligned_pointer, 4179 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4180 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4181 PQI_EVENT_OQ_ELEMENT_LENGTH; 4182 4183 for (i = 0; i < num_queue_indexes; i++) { 4184 aligned_pointer = PTR_ALIGN(aligned_pointer, 4185 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4186 aligned_pointer += sizeof(pqi_index_t); 4187 } 4188 4189 alloc_length = (size_t)aligned_pointer + 4190 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4191 4192 alloc_length += PQI_EXTRA_SGL_MEMORY; 4193 4194 ctrl_info->queue_memory_base = 4195 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4196 &ctrl_info->queue_memory_base_dma_handle, 4197 GFP_KERNEL); 4198 4199 if (!ctrl_info->queue_memory_base) 4200 return -ENOMEM; 4201 4202 ctrl_info->queue_memory_length = alloc_length; 4203 4204 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 4205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4206 4207 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4208 queue_group = &ctrl_info->queue_groups[i]; 4209 queue_group->iq_element_array[RAID_PATH] = element_array; 4210 queue_group->iq_element_array_bus_addr[RAID_PATH] = 4211 ctrl_info->queue_memory_base_dma_handle + 4212 (element_array - ctrl_info->queue_memory_base); 4213 element_array += element_array_length_per_iq; 4214 element_array = PTR_ALIGN(element_array, 4215 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4216 queue_group->iq_element_array[AIO_PATH] = element_array; 4217 queue_group->iq_element_array_bus_addr[AIO_PATH] = 4218 ctrl_info->queue_memory_base_dma_handle + 4219 (element_array - ctrl_info->queue_memory_base); 4220 element_array += element_array_length_per_iq; 4221 element_array = PTR_ALIGN(element_array, 4222 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4223 } 4224 4225 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4226 queue_group = &ctrl_info->queue_groups[i]; 4227 queue_group->oq_element_array = element_array; 4228 queue_group->oq_element_array_bus_addr = 4229 ctrl_info->queue_memory_base_dma_handle + 4230 (element_array - ctrl_info->queue_memory_base); 4231 element_array += element_array_length_per_oq; 4232 element_array = PTR_ALIGN(element_array, 4233 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4234 } 4235 4236 ctrl_info->event_queue.oq_element_array = element_array; 4237 ctrl_info->event_queue.oq_element_array_bus_addr = 4238 ctrl_info->queue_memory_base_dma_handle + 4239 (element_array - ctrl_info->queue_memory_base); 4240 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4241 PQI_EVENT_OQ_ELEMENT_LENGTH; 4242 4243 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 4244 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4245 4246 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4247 queue_group = &ctrl_info->queue_groups[i]; 4248 queue_group->iq_ci[RAID_PATH] = next_queue_index; 4249 queue_group->iq_ci_bus_addr[RAID_PATH] = 4250 ctrl_info->queue_memory_base_dma_handle + 4251 (next_queue_index - 4252 (void __iomem *)ctrl_info->queue_memory_base); 4253 next_queue_index += sizeof(pqi_index_t); 4254 next_queue_index = PTR_ALIGN(next_queue_index, 4255 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4256 queue_group->iq_ci[AIO_PATH] = next_queue_index; 4257 queue_group->iq_ci_bus_addr[AIO_PATH] = 4258 ctrl_info->queue_memory_base_dma_handle + 4259 (next_queue_index - 4260 (void __iomem *)ctrl_info->queue_memory_base); 4261 next_queue_index += sizeof(pqi_index_t); 4262 next_queue_index = PTR_ALIGN(next_queue_index, 4263 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4264 queue_group->oq_pi = next_queue_index; 4265 queue_group->oq_pi_bus_addr = 4266 ctrl_info->queue_memory_base_dma_handle + 4267 (next_queue_index - 4268 (void __iomem *)ctrl_info->queue_memory_base); 4269 next_queue_index += sizeof(pqi_index_t); 4270 next_queue_index = PTR_ALIGN(next_queue_index, 4271 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4272 } 4273 4274 ctrl_info->event_queue.oq_pi = next_queue_index; 4275 ctrl_info->event_queue.oq_pi_bus_addr = 4276 ctrl_info->queue_memory_base_dma_handle + 4277 (next_queue_index - 4278 (void __iomem *)ctrl_info->queue_memory_base); 4279 4280 return 0; 4281 } 4282 4283 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 4284 { 4285 unsigned int i; 4286 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4287 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4288 4289 /* 4290 * Initialize the backpointers to the controller structure in 4291 * each operational queue group structure. 4292 */ 4293 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4294 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 4295 4296 /* 4297 * Assign IDs to all operational queues. Note that the IDs 4298 * assigned to operational IQs are independent of the IDs 4299 * assigned to operational OQs. 4300 */ 4301 ctrl_info->event_queue.oq_id = next_oq_id++; 4302 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4303 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 4304 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 4305 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 4306 } 4307 4308 /* 4309 * Assign MSI-X table entry indexes to all queues. Note that the 4310 * interrupt for the event queue is shared with the first queue group. 4311 */ 4312 ctrl_info->event_queue.int_msg_num = 0; 4313 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4314 ctrl_info->queue_groups[i].int_msg_num = i; 4315 4316 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4317 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 4318 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 4319 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 4320 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 4321 } 4322 } 4323 4324 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 4325 { 4326 size_t alloc_length; 4327 struct pqi_admin_queues_aligned *admin_queues_aligned; 4328 struct pqi_admin_queues *admin_queues; 4329 4330 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 4331 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4332 4333 ctrl_info->admin_queue_memory_base = 4334 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4335 &ctrl_info->admin_queue_memory_base_dma_handle, 4336 GFP_KERNEL); 4337 4338 if (!ctrl_info->admin_queue_memory_base) 4339 return -ENOMEM; 4340 4341 ctrl_info->admin_queue_memory_length = alloc_length; 4342 4343 admin_queues = &ctrl_info->admin_queues; 4344 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 4345 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4346 admin_queues->iq_element_array = 4347 &admin_queues_aligned->iq_element_array; 4348 admin_queues->oq_element_array = 4349 &admin_queues_aligned->oq_element_array; 4350 admin_queues->iq_ci = 4351 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; 4352 admin_queues->oq_pi = 4353 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 4354 4355 admin_queues->iq_element_array_bus_addr = 4356 ctrl_info->admin_queue_memory_base_dma_handle + 4357 (admin_queues->iq_element_array - 4358 ctrl_info->admin_queue_memory_base); 4359 admin_queues->oq_element_array_bus_addr = 4360 ctrl_info->admin_queue_memory_base_dma_handle + 4361 (admin_queues->oq_element_array - 4362 ctrl_info->admin_queue_memory_base); 4363 admin_queues->iq_ci_bus_addr = 4364 ctrl_info->admin_queue_memory_base_dma_handle + 4365 ((void __iomem *)admin_queues->iq_ci - 4366 (void __iomem *)ctrl_info->admin_queue_memory_base); 4367 admin_queues->oq_pi_bus_addr = 4368 ctrl_info->admin_queue_memory_base_dma_handle + 4369 ((void __iomem *)admin_queues->oq_pi - 4370 (void __iomem *)ctrl_info->admin_queue_memory_base); 4371 4372 return 0; 4373 } 4374 4375 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 4376 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 4377 4378 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 4379 { 4380 struct pqi_device_registers __iomem *pqi_registers; 4381 struct pqi_admin_queues *admin_queues; 4382 unsigned long timeout; 4383 u8 status; 4384 u32 reg; 4385 4386 pqi_registers = ctrl_info->pqi_registers; 4387 admin_queues = &ctrl_info->admin_queues; 4388 4389 writeq((u64)admin_queues->iq_element_array_bus_addr, 4390 &pqi_registers->admin_iq_element_array_addr); 4391 writeq((u64)admin_queues->oq_element_array_bus_addr, 4392 &pqi_registers->admin_oq_element_array_addr); 4393 writeq((u64)admin_queues->iq_ci_bus_addr, 4394 &pqi_registers->admin_iq_ci_addr); 4395 writeq((u64)admin_queues->oq_pi_bus_addr, 4396 &pqi_registers->admin_oq_pi_addr); 4397 4398 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 4399 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | 4400 (admin_queues->int_msg_num << 16); 4401 writel(reg, &pqi_registers->admin_iq_num_elements); 4402 4403 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 4404 &pqi_registers->function_and_status_code); 4405 4406 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 4407 while (1) { 4408 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 4409 status = readb(&pqi_registers->function_and_status_code); 4410 if (status == PQI_STATUS_IDLE) 4411 break; 4412 if (time_after(jiffies, timeout)) 4413 return -ETIMEDOUT; 4414 } 4415 4416 /* 4417 * The offset registers are not initialized to the correct 4418 * offsets until *after* the create admin queue pair command 4419 * completes successfully. 4420 */ 4421 admin_queues->iq_pi = ctrl_info->iomem_base + 4422 PQI_DEVICE_REGISTERS_OFFSET + 4423 readq(&pqi_registers->admin_iq_pi_offset); 4424 admin_queues->oq_ci = ctrl_info->iomem_base + 4425 PQI_DEVICE_REGISTERS_OFFSET + 4426 readq(&pqi_registers->admin_oq_ci_offset); 4427 4428 return 0; 4429 } 4430 4431 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 4432 struct pqi_general_admin_request *request) 4433 { 4434 struct pqi_admin_queues *admin_queues; 4435 void *next_element; 4436 pqi_index_t iq_pi; 4437 4438 admin_queues = &ctrl_info->admin_queues; 4439 iq_pi = admin_queues->iq_pi_copy; 4440 4441 next_element = admin_queues->iq_element_array + 4442 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 4443 4444 memcpy(next_element, request, sizeof(*request)); 4445 4446 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 4447 admin_queues->iq_pi_copy = iq_pi; 4448 4449 /* 4450 * This write notifies the controller that an IU is available to be 4451 * processed. 4452 */ 4453 writel(iq_pi, admin_queues->iq_pi); 4454 } 4455 4456 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 4457 4458 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 4459 struct pqi_general_admin_response *response) 4460 { 4461 struct pqi_admin_queues *admin_queues; 4462 pqi_index_t oq_pi; 4463 pqi_index_t oq_ci; 4464 unsigned long timeout; 4465 4466 admin_queues = &ctrl_info->admin_queues; 4467 oq_ci = admin_queues->oq_ci_copy; 4468 4469 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 4470 4471 while (1) { 4472 oq_pi = readl(admin_queues->oq_pi); 4473 if (oq_pi != oq_ci) 4474 break; 4475 if (time_after(jiffies, timeout)) { 4476 dev_err(&ctrl_info->pci_dev->dev, 4477 "timed out waiting for admin response\n"); 4478 return -ETIMEDOUT; 4479 } 4480 if (!sis_is_firmware_running(ctrl_info)) 4481 return -ENXIO; 4482 usleep_range(1000, 2000); 4483 } 4484 4485 memcpy(response, admin_queues->oq_element_array + 4486 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 4487 4488 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 4489 admin_queues->oq_ci_copy = oq_ci; 4490 writel(oq_ci, admin_queues->oq_ci); 4491 4492 return 0; 4493 } 4494 4495 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 4496 struct pqi_queue_group *queue_group, enum pqi_io_path path, 4497 struct pqi_io_request *io_request) 4498 { 4499 struct pqi_io_request *next; 4500 void *next_element; 4501 pqi_index_t iq_pi; 4502 pqi_index_t iq_ci; 4503 size_t iu_length; 4504 unsigned long flags; 4505 unsigned int num_elements_needed; 4506 unsigned int num_elements_to_end_of_queue; 4507 size_t copy_count; 4508 struct pqi_iu_header *request; 4509 4510 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 4511 4512 if (io_request) { 4513 io_request->queue_group = queue_group; 4514 list_add_tail(&io_request->request_list_entry, 4515 &queue_group->request_list[path]); 4516 } 4517 4518 iq_pi = queue_group->iq_pi_copy[path]; 4519 4520 list_for_each_entry_safe(io_request, next, 4521 &queue_group->request_list[path], request_list_entry) { 4522 4523 request = io_request->iu; 4524 4525 iu_length = get_unaligned_le16(&request->iu_length) + 4526 PQI_REQUEST_HEADER_LENGTH; 4527 num_elements_needed = 4528 DIV_ROUND_UP(iu_length, 4529 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4530 4531 iq_ci = readl(queue_group->iq_ci[path]); 4532 4533 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 4534 ctrl_info->num_elements_per_iq)) 4535 break; 4536 4537 put_unaligned_le16(queue_group->oq_id, 4538 &request->response_queue_id); 4539 4540 next_element = queue_group->iq_element_array[path] + 4541 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4542 4543 num_elements_to_end_of_queue = 4544 ctrl_info->num_elements_per_iq - iq_pi; 4545 4546 if (num_elements_needed <= num_elements_to_end_of_queue) { 4547 memcpy(next_element, request, iu_length); 4548 } else { 4549 copy_count = num_elements_to_end_of_queue * 4550 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4551 memcpy(next_element, request, copy_count); 4552 memcpy(queue_group->iq_element_array[path], 4553 (u8 *)request + copy_count, 4554 iu_length - copy_count); 4555 } 4556 4557 iq_pi = (iq_pi + num_elements_needed) % 4558 ctrl_info->num_elements_per_iq; 4559 4560 list_del(&io_request->request_list_entry); 4561 } 4562 4563 if (iq_pi != queue_group->iq_pi_copy[path]) { 4564 queue_group->iq_pi_copy[path] = iq_pi; 4565 /* 4566 * This write notifies the controller that one or more IUs are 4567 * available to be processed. 4568 */ 4569 writel(iq_pi, queue_group->iq_pi[path]); 4570 } 4571 4572 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 4573 } 4574 4575 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 4576 4577 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 4578 struct completion *wait) 4579 { 4580 int rc; 4581 4582 while (1) { 4583 if (wait_for_completion_io_timeout(wait, 4584 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 4585 rc = 0; 4586 break; 4587 } 4588 4589 pqi_check_ctrl_health(ctrl_info); 4590 if (pqi_ctrl_offline(ctrl_info)) { 4591 rc = -ENXIO; 4592 break; 4593 } 4594 } 4595 4596 return rc; 4597 } 4598 4599 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 4600 void *context) 4601 { 4602 struct completion *waiting = context; 4603 4604 complete(waiting); 4605 } 4606 4607 static int pqi_process_raid_io_error_synchronous( 4608 struct pqi_raid_error_info *error_info) 4609 { 4610 int rc = -EIO; 4611 4612 switch (error_info->data_out_result) { 4613 case PQI_DATA_IN_OUT_GOOD: 4614 if (error_info->status == SAM_STAT_GOOD) 4615 rc = 0; 4616 break; 4617 case PQI_DATA_IN_OUT_UNDERFLOW: 4618 if (error_info->status == SAM_STAT_GOOD || 4619 error_info->status == SAM_STAT_CHECK_CONDITION) 4620 rc = 0; 4621 break; 4622 case PQI_DATA_IN_OUT_ABORTED: 4623 rc = PQI_CMD_STATUS_ABORTED; 4624 break; 4625 } 4626 4627 return rc; 4628 } 4629 4630 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) 4631 { 4632 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; 4633 } 4634 4635 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4636 struct pqi_iu_header *request, unsigned int flags, 4637 struct pqi_raid_error_info *error_info) 4638 { 4639 int rc = 0; 4640 struct pqi_io_request *io_request; 4641 size_t iu_length; 4642 DECLARE_COMPLETION_ONSTACK(wait); 4643 4644 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4645 if (down_interruptible(&ctrl_info->sync_request_sem)) 4646 return -ERESTARTSYS; 4647 } else { 4648 down(&ctrl_info->sync_request_sem); 4649 } 4650 4651 pqi_ctrl_busy(ctrl_info); 4652 /* 4653 * Wait for other admin queue updates such as; 4654 * config table changes, OFA memory updates, ... 4655 */ 4656 if (pqi_is_blockable_request(request)) 4657 pqi_wait_if_ctrl_blocked(ctrl_info); 4658 4659 if (pqi_ctrl_offline(ctrl_info)) { 4660 rc = -ENXIO; 4661 goto out; 4662 } 4663 4664 io_request = pqi_alloc_io_request(ctrl_info, NULL); 4665 4666 put_unaligned_le16(io_request->index, 4667 &(((struct pqi_raid_path_request *)request)->request_id)); 4668 4669 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4670 ((struct pqi_raid_path_request *)request)->error_index = 4671 ((struct pqi_raid_path_request *)request)->request_id; 4672 4673 iu_length = get_unaligned_le16(&request->iu_length) + 4674 PQI_REQUEST_HEADER_LENGTH; 4675 memcpy(io_request->iu, request, iu_length); 4676 4677 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4678 io_request->context = &wait; 4679 4680 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4681 io_request); 4682 4683 pqi_wait_for_completion_io(ctrl_info, &wait); 4684 4685 if (error_info) { 4686 if (io_request->error_info) 4687 memcpy(error_info, io_request->error_info, sizeof(*error_info)); 4688 else 4689 memset(error_info, 0, sizeof(*error_info)); 4690 } else if (rc == 0 && io_request->error_info) { 4691 rc = pqi_process_raid_io_error_synchronous(io_request->error_info); 4692 } 4693 4694 pqi_free_io_request(io_request); 4695 4696 out: 4697 pqi_ctrl_unbusy(ctrl_info); 4698 up(&ctrl_info->sync_request_sem); 4699 4700 return rc; 4701 } 4702 4703 static int pqi_validate_admin_response( 4704 struct pqi_general_admin_response *response, u8 expected_function_code) 4705 { 4706 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4707 return -EINVAL; 4708 4709 if (get_unaligned_le16(&response->header.iu_length) != 4710 PQI_GENERAL_ADMIN_IU_LENGTH) 4711 return -EINVAL; 4712 4713 if (response->function_code != expected_function_code) 4714 return -EINVAL; 4715 4716 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4717 return -EINVAL; 4718 4719 return 0; 4720 } 4721 4722 static int pqi_submit_admin_request_synchronous( 4723 struct pqi_ctrl_info *ctrl_info, 4724 struct pqi_general_admin_request *request, 4725 struct pqi_general_admin_response *response) 4726 { 4727 int rc; 4728 4729 pqi_submit_admin_request(ctrl_info, request); 4730 4731 rc = pqi_poll_for_admin_response(ctrl_info, response); 4732 4733 if (rc == 0) 4734 rc = pqi_validate_admin_response(response, request->function_code); 4735 4736 return rc; 4737 } 4738 4739 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4740 { 4741 int rc; 4742 struct pqi_general_admin_request request; 4743 struct pqi_general_admin_response response; 4744 struct pqi_device_capability *capability; 4745 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4746 4747 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4748 if (!capability) 4749 return -ENOMEM; 4750 4751 memset(&request, 0, sizeof(request)); 4752 4753 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4754 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4755 &request.header.iu_length); 4756 request.function_code = 4757 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4758 put_unaligned_le32(sizeof(*capability), 4759 &request.data.report_device_capability.buffer_length); 4760 4761 rc = pqi_map_single(ctrl_info->pci_dev, 4762 &request.data.report_device_capability.sg_descriptor, 4763 capability, sizeof(*capability), 4764 DMA_FROM_DEVICE); 4765 if (rc) 4766 goto out; 4767 4768 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); 4769 4770 pqi_pci_unmap(ctrl_info->pci_dev, 4771 &request.data.report_device_capability.sg_descriptor, 1, 4772 DMA_FROM_DEVICE); 4773 4774 if (rc) 4775 goto out; 4776 4777 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4778 rc = -EIO; 4779 goto out; 4780 } 4781 4782 ctrl_info->max_inbound_queues = 4783 get_unaligned_le16(&capability->max_inbound_queues); 4784 ctrl_info->max_elements_per_iq = 4785 get_unaligned_le16(&capability->max_elements_per_iq); 4786 ctrl_info->max_iq_element_length = 4787 get_unaligned_le16(&capability->max_iq_element_length) 4788 * 16; 4789 ctrl_info->max_outbound_queues = 4790 get_unaligned_le16(&capability->max_outbound_queues); 4791 ctrl_info->max_elements_per_oq = 4792 get_unaligned_le16(&capability->max_elements_per_oq); 4793 ctrl_info->max_oq_element_length = 4794 get_unaligned_le16(&capability->max_oq_element_length) 4795 * 16; 4796 4797 sop_iu_layer_descriptor = 4798 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4799 4800 ctrl_info->max_inbound_iu_length_per_firmware = 4801 get_unaligned_le16( 4802 &sop_iu_layer_descriptor->max_inbound_iu_length); 4803 ctrl_info->inbound_spanning_supported = 4804 sop_iu_layer_descriptor->inbound_spanning_supported; 4805 ctrl_info->outbound_spanning_supported = 4806 sop_iu_layer_descriptor->outbound_spanning_supported; 4807 4808 out: 4809 kfree(capability); 4810 4811 return rc; 4812 } 4813 4814 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4815 { 4816 if (ctrl_info->max_iq_element_length < 4817 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4818 dev_err(&ctrl_info->pci_dev->dev, 4819 "max. inbound queue element length of %d is less than the required length of %d\n", 4820 ctrl_info->max_iq_element_length, 4821 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4822 return -EINVAL; 4823 } 4824 4825 if (ctrl_info->max_oq_element_length < 4826 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4827 dev_err(&ctrl_info->pci_dev->dev, 4828 "max. outbound queue element length of %d is less than the required length of %d\n", 4829 ctrl_info->max_oq_element_length, 4830 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4831 return -EINVAL; 4832 } 4833 4834 if (ctrl_info->max_inbound_iu_length_per_firmware < 4835 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4836 dev_err(&ctrl_info->pci_dev->dev, 4837 "max. inbound IU length of %u is less than the min. required length of %d\n", 4838 ctrl_info->max_inbound_iu_length_per_firmware, 4839 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4840 return -EINVAL; 4841 } 4842 4843 if (!ctrl_info->inbound_spanning_supported) { 4844 dev_err(&ctrl_info->pci_dev->dev, 4845 "the controller does not support inbound spanning\n"); 4846 return -EINVAL; 4847 } 4848 4849 if (ctrl_info->outbound_spanning_supported) { 4850 dev_err(&ctrl_info->pci_dev->dev, 4851 "the controller supports outbound spanning but this driver does not\n"); 4852 return -EINVAL; 4853 } 4854 4855 return 0; 4856 } 4857 4858 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4859 { 4860 int rc; 4861 struct pqi_event_queue *event_queue; 4862 struct pqi_general_admin_request request; 4863 struct pqi_general_admin_response response; 4864 4865 event_queue = &ctrl_info->event_queue; 4866 4867 /* 4868 * Create OQ (Outbound Queue - device to host queue) to dedicate 4869 * to events. 4870 */ 4871 memset(&request, 0, sizeof(request)); 4872 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4873 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4874 &request.header.iu_length); 4875 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4876 put_unaligned_le16(event_queue->oq_id, 4877 &request.data.create_operational_oq.queue_id); 4878 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4879 &request.data.create_operational_oq.element_array_addr); 4880 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4881 &request.data.create_operational_oq.pi_addr); 4882 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4883 &request.data.create_operational_oq.num_elements); 4884 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4885 &request.data.create_operational_oq.element_length); 4886 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4887 put_unaligned_le16(event_queue->int_msg_num, 4888 &request.data.create_operational_oq.int_msg_num); 4889 4890 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4891 &response); 4892 if (rc) 4893 return rc; 4894 4895 event_queue->oq_ci = ctrl_info->iomem_base + 4896 PQI_DEVICE_REGISTERS_OFFSET + 4897 get_unaligned_le64( 4898 &response.data.create_operational_oq.oq_ci_offset); 4899 4900 return 0; 4901 } 4902 4903 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4904 unsigned int group_number) 4905 { 4906 int rc; 4907 struct pqi_queue_group *queue_group; 4908 struct pqi_general_admin_request request; 4909 struct pqi_general_admin_response response; 4910 4911 queue_group = &ctrl_info->queue_groups[group_number]; 4912 4913 /* 4914 * Create IQ (Inbound Queue - host to device queue) for 4915 * RAID path. 4916 */ 4917 memset(&request, 0, sizeof(request)); 4918 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4919 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4920 &request.header.iu_length); 4921 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4922 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4923 &request.data.create_operational_iq.queue_id); 4924 put_unaligned_le64( 4925 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4926 &request.data.create_operational_iq.element_array_addr); 4927 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4928 &request.data.create_operational_iq.ci_addr); 4929 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4930 &request.data.create_operational_iq.num_elements); 4931 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4932 &request.data.create_operational_iq.element_length); 4933 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4934 4935 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4936 &response); 4937 if (rc) { 4938 dev_err(&ctrl_info->pci_dev->dev, 4939 "error creating inbound RAID queue\n"); 4940 return rc; 4941 } 4942 4943 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4944 PQI_DEVICE_REGISTERS_OFFSET + 4945 get_unaligned_le64( 4946 &response.data.create_operational_iq.iq_pi_offset); 4947 4948 /* 4949 * Create IQ (Inbound Queue - host to device queue) for 4950 * Advanced I/O (AIO) path. 4951 */ 4952 memset(&request, 0, sizeof(request)); 4953 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4954 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4955 &request.header.iu_length); 4956 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4957 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4958 &request.data.create_operational_iq.queue_id); 4959 put_unaligned_le64((u64)queue_group-> 4960 iq_element_array_bus_addr[AIO_PATH], 4961 &request.data.create_operational_iq.element_array_addr); 4962 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4963 &request.data.create_operational_iq.ci_addr); 4964 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4965 &request.data.create_operational_iq.num_elements); 4966 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4967 &request.data.create_operational_iq.element_length); 4968 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4969 4970 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4971 &response); 4972 if (rc) { 4973 dev_err(&ctrl_info->pci_dev->dev, 4974 "error creating inbound AIO queue\n"); 4975 return rc; 4976 } 4977 4978 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4979 PQI_DEVICE_REGISTERS_OFFSET + 4980 get_unaligned_le64( 4981 &response.data.create_operational_iq.iq_pi_offset); 4982 4983 /* 4984 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4985 * assumed to be for RAID path I/O unless we change the queue's 4986 * property. 4987 */ 4988 memset(&request, 0, sizeof(request)); 4989 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4990 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4991 &request.header.iu_length); 4992 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4993 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4994 &request.data.change_operational_iq_properties.queue_id); 4995 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4996 &request.data.change_operational_iq_properties.vendor_specific); 4997 4998 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4999 &response); 5000 if (rc) { 5001 dev_err(&ctrl_info->pci_dev->dev, 5002 "error changing queue property\n"); 5003 return rc; 5004 } 5005 5006 /* 5007 * Create OQ (Outbound Queue - device to host queue). 5008 */ 5009 memset(&request, 0, sizeof(request)); 5010 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 5011 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 5012 &request.header.iu_length); 5013 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 5014 put_unaligned_le16(queue_group->oq_id, 5015 &request.data.create_operational_oq.queue_id); 5016 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 5017 &request.data.create_operational_oq.element_array_addr); 5018 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 5019 &request.data.create_operational_oq.pi_addr); 5020 put_unaligned_le16(ctrl_info->num_elements_per_oq, 5021 &request.data.create_operational_oq.num_elements); 5022 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 5023 &request.data.create_operational_oq.element_length); 5024 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 5025 put_unaligned_le16(queue_group->int_msg_num, 5026 &request.data.create_operational_oq.int_msg_num); 5027 5028 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 5029 &response); 5030 if (rc) { 5031 dev_err(&ctrl_info->pci_dev->dev, 5032 "error creating outbound queue\n"); 5033 return rc; 5034 } 5035 5036 queue_group->oq_ci = ctrl_info->iomem_base + 5037 PQI_DEVICE_REGISTERS_OFFSET + 5038 get_unaligned_le64( 5039 &response.data.create_operational_oq.oq_ci_offset); 5040 5041 return 0; 5042 } 5043 5044 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 5045 { 5046 int rc; 5047 unsigned int i; 5048 5049 rc = pqi_create_event_queue(ctrl_info); 5050 if (rc) { 5051 dev_err(&ctrl_info->pci_dev->dev, 5052 "error creating event queue\n"); 5053 return rc; 5054 } 5055 5056 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5057 rc = pqi_create_queue_group(ctrl_info, i); 5058 if (rc) { 5059 dev_err(&ctrl_info->pci_dev->dev, 5060 "error creating queue group number %u/%u\n", 5061 i, ctrl_info->num_queue_groups); 5062 return rc; 5063 } 5064 } 5065 5066 return 0; 5067 } 5068 5069 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 5070 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) 5071 5072 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 5073 bool enable_events) 5074 { 5075 int rc; 5076 unsigned int i; 5077 struct pqi_event_config *event_config; 5078 struct pqi_event_descriptor *event_descriptor; 5079 struct pqi_general_management_request request; 5080 5081 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5082 GFP_KERNEL); 5083 if (!event_config) 5084 return -ENOMEM; 5085 5086 memset(&request, 0, sizeof(request)); 5087 5088 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 5089 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5090 data.report_event_configuration.sg_descriptors[1]) - 5091 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5092 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5093 &request.data.report_event_configuration.buffer_length); 5094 5095 rc = pqi_map_single(ctrl_info->pci_dev, 5096 request.data.report_event_configuration.sg_descriptors, 5097 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5098 DMA_FROM_DEVICE); 5099 if (rc) 5100 goto out; 5101 5102 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5103 5104 pqi_pci_unmap(ctrl_info->pci_dev, 5105 request.data.report_event_configuration.sg_descriptors, 1, 5106 DMA_FROM_DEVICE); 5107 5108 if (rc) 5109 goto out; 5110 5111 for (i = 0; i < event_config->num_event_descriptors; i++) { 5112 event_descriptor = &event_config->descriptors[i]; 5113 if (enable_events && 5114 pqi_is_supported_event(event_descriptor->event_type)) 5115 put_unaligned_le16(ctrl_info->event_queue.oq_id, 5116 &event_descriptor->oq_id); 5117 else 5118 put_unaligned_le16(0, &event_descriptor->oq_id); 5119 } 5120 5121 memset(&request, 0, sizeof(request)); 5122 5123 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 5124 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5125 data.report_event_configuration.sg_descriptors[1]) - 5126 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5127 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5128 &request.data.report_event_configuration.buffer_length); 5129 5130 rc = pqi_map_single(ctrl_info->pci_dev, 5131 request.data.report_event_configuration.sg_descriptors, 5132 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5133 DMA_TO_DEVICE); 5134 if (rc) 5135 goto out; 5136 5137 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5138 5139 pqi_pci_unmap(ctrl_info->pci_dev, 5140 request.data.report_event_configuration.sg_descriptors, 1, 5141 DMA_TO_DEVICE); 5142 5143 out: 5144 kfree(event_config); 5145 5146 return rc; 5147 } 5148 5149 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 5150 { 5151 return pqi_configure_events(ctrl_info, true); 5152 } 5153 5154 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 5155 { 5156 unsigned int i; 5157 struct device *dev; 5158 size_t sg_chain_buffer_length; 5159 struct pqi_io_request *io_request; 5160 5161 if (!ctrl_info->io_request_pool) 5162 return; 5163 5164 dev = &ctrl_info->pci_dev->dev; 5165 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5166 io_request = ctrl_info->io_request_pool; 5167 5168 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5169 kfree(io_request->iu); 5170 if (!io_request->sg_chain_buffer) 5171 break; 5172 dma_free_coherent(dev, sg_chain_buffer_length, 5173 io_request->sg_chain_buffer, 5174 io_request->sg_chain_buffer_dma_handle); 5175 io_request++; 5176 } 5177 5178 kfree(ctrl_info->io_request_pool); 5179 ctrl_info->io_request_pool = NULL; 5180 } 5181 5182 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 5183 { 5184 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 5185 ctrl_info->error_buffer_length, 5186 &ctrl_info->error_buffer_dma_handle, 5187 GFP_KERNEL); 5188 if (!ctrl_info->error_buffer) 5189 return -ENOMEM; 5190 5191 return 0; 5192 } 5193 5194 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 5195 { 5196 unsigned int i; 5197 void *sg_chain_buffer; 5198 size_t sg_chain_buffer_length; 5199 dma_addr_t sg_chain_buffer_dma_handle; 5200 struct device *dev; 5201 struct pqi_io_request *io_request; 5202 5203 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, 5204 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 5205 5206 if (!ctrl_info->io_request_pool) { 5207 dev_err(&ctrl_info->pci_dev->dev, 5208 "failed to allocate I/O request pool\n"); 5209 goto error; 5210 } 5211 5212 dev = &ctrl_info->pci_dev->dev; 5213 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5214 io_request = ctrl_info->io_request_pool; 5215 5216 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5217 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 5218 5219 if (!io_request->iu) { 5220 dev_err(&ctrl_info->pci_dev->dev, 5221 "failed to allocate IU buffers\n"); 5222 goto error; 5223 } 5224 5225 sg_chain_buffer = dma_alloc_coherent(dev, 5226 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 5227 GFP_KERNEL); 5228 5229 if (!sg_chain_buffer) { 5230 dev_err(&ctrl_info->pci_dev->dev, 5231 "failed to allocate PQI scatter-gather chain buffers\n"); 5232 goto error; 5233 } 5234 5235 io_request->index = i; 5236 io_request->sg_chain_buffer = sg_chain_buffer; 5237 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; 5238 io_request++; 5239 } 5240 5241 return 0; 5242 5243 error: 5244 pqi_free_all_io_requests(ctrl_info); 5245 5246 return -ENOMEM; 5247 } 5248 5249 /* 5250 * Calculate required resources that are sized based on max. outstanding 5251 * requests and max. transfer size. 5252 */ 5253 5254 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 5255 { 5256 u32 max_transfer_size; 5257 u32 max_sg_entries; 5258 5259 ctrl_info->scsi_ml_can_queue = 5260 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 5261 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 5262 5263 ctrl_info->error_buffer_length = 5264 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 5265 5266 if (is_kdump_kernel()) 5267 max_transfer_size = min(ctrl_info->max_transfer_size, 5268 PQI_MAX_TRANSFER_SIZE_KDUMP); 5269 else 5270 max_transfer_size = min(ctrl_info->max_transfer_size, 5271 PQI_MAX_TRANSFER_SIZE); 5272 5273 max_sg_entries = max_transfer_size / PAGE_SIZE; 5274 5275 /* +1 to cover when the buffer is not page-aligned. */ 5276 max_sg_entries++; 5277 5278 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 5279 5280 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 5281 5282 ctrl_info->sg_chain_buffer_length = 5283 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 5284 PQI_EXTRA_SGL_MEMORY; 5285 ctrl_info->sg_tablesize = max_sg_entries; 5286 ctrl_info->max_sectors = max_transfer_size / 512; 5287 } 5288 5289 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 5290 { 5291 int num_queue_groups; 5292 u16 num_elements_per_iq; 5293 u16 num_elements_per_oq; 5294 5295 if (is_kdump_kernel()) { 5296 num_queue_groups = 1; 5297 } else { 5298 int max_queue_groups; 5299 5300 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 5301 ctrl_info->max_outbound_queues - 1); 5302 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 5303 5304 num_queue_groups = 5305 blk_mq_num_online_queues(ctrl_info->max_msix_vectors); 5306 num_queue_groups = min(num_queue_groups, max_queue_groups); 5307 } 5308 5309 ctrl_info->num_queue_groups = num_queue_groups; 5310 5311 /* 5312 * Make sure that the max. inbound IU length is an even multiple 5313 * of our inbound element length. 5314 */ 5315 ctrl_info->max_inbound_iu_length = 5316 (ctrl_info->max_inbound_iu_length_per_firmware / 5317 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 5318 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 5319 5320 num_elements_per_iq = 5321 (ctrl_info->max_inbound_iu_length / 5322 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 5323 5324 /* Add one because one element in each queue is unusable. */ 5325 num_elements_per_iq++; 5326 5327 num_elements_per_iq = min(num_elements_per_iq, 5328 ctrl_info->max_elements_per_iq); 5329 5330 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 5331 num_elements_per_oq = min(num_elements_per_oq, 5332 ctrl_info->max_elements_per_oq); 5333 5334 ctrl_info->num_elements_per_iq = num_elements_per_iq; 5335 ctrl_info->num_elements_per_oq = num_elements_per_oq; 5336 5337 ctrl_info->max_sg_per_iu = 5338 ((ctrl_info->max_inbound_iu_length - 5339 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5340 sizeof(struct pqi_sg_descriptor)) + 5341 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 5342 5343 ctrl_info->max_sg_per_r56_iu = 5344 ((ctrl_info->max_inbound_iu_length - 5345 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5346 sizeof(struct pqi_sg_descriptor)) + 5347 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; 5348 } 5349 5350 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, 5351 struct scatterlist *sg) 5352 { 5353 u64 address = (u64)sg_dma_address(sg); 5354 unsigned int length = sg_dma_len(sg); 5355 5356 put_unaligned_le64(address, &sg_descriptor->address); 5357 put_unaligned_le32(length, &sg_descriptor->length); 5358 put_unaligned_le32(0, &sg_descriptor->flags); 5359 } 5360 5361 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, 5362 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, 5363 int max_sg_per_iu, bool *chained) 5364 { 5365 int i; 5366 unsigned int num_sg_in_iu; 5367 5368 *chained = false; 5369 i = 0; 5370 num_sg_in_iu = 0; 5371 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ 5372 5373 while (1) { 5374 pqi_set_sg_descriptor(sg_descriptor, sg); 5375 if (!*chained) 5376 num_sg_in_iu++; 5377 i++; 5378 if (i == sg_count) 5379 break; 5380 sg_descriptor++; 5381 if (i == max_sg_per_iu) { 5382 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, 5383 &sg_descriptor->address); 5384 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), 5385 &sg_descriptor->length); 5386 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); 5387 *chained = true; 5388 num_sg_in_iu++; 5389 sg_descriptor = io_request->sg_chain_buffer; 5390 } 5391 sg = sg_next(sg); 5392 } 5393 5394 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 5395 5396 return num_sg_in_iu; 5397 } 5398 5399 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 5400 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 5401 struct pqi_io_request *io_request) 5402 { 5403 u16 iu_length; 5404 int sg_count; 5405 bool chained; 5406 unsigned int num_sg_in_iu; 5407 struct scatterlist *sg; 5408 struct pqi_sg_descriptor *sg_descriptor; 5409 5410 sg_count = scsi_dma_map(scmd); 5411 if (sg_count < 0) 5412 return sg_count; 5413 5414 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5415 PQI_REQUEST_HEADER_LENGTH; 5416 5417 if (sg_count == 0) 5418 goto out; 5419 5420 sg = scsi_sglist(scmd); 5421 sg_descriptor = request->sg_descriptors; 5422 5423 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5424 ctrl_info->max_sg_per_iu, &chained); 5425 5426 request->partial = chained; 5427 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5428 5429 out: 5430 put_unaligned_le16(iu_length, &request->header.iu_length); 5431 5432 return 0; 5433 } 5434 5435 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, 5436 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, 5437 struct pqi_io_request *io_request) 5438 { 5439 u16 iu_length; 5440 int sg_count; 5441 bool chained; 5442 unsigned int num_sg_in_iu; 5443 struct scatterlist *sg; 5444 struct pqi_sg_descriptor *sg_descriptor; 5445 5446 sg_count = scsi_dma_map(scmd); 5447 if (sg_count < 0) 5448 return sg_count; 5449 5450 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - 5451 PQI_REQUEST_HEADER_LENGTH; 5452 num_sg_in_iu = 0; 5453 5454 if (sg_count == 0) 5455 goto out; 5456 5457 sg = scsi_sglist(scmd); 5458 sg_descriptor = request->sg_descriptors; 5459 5460 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5461 ctrl_info->max_sg_per_iu, &chained); 5462 5463 request->partial = chained; 5464 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5465 5466 out: 5467 put_unaligned_le16(iu_length, &request->header.iu_length); 5468 request->num_sg_descriptors = num_sg_in_iu; 5469 5470 return 0; 5471 } 5472 5473 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, 5474 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, 5475 struct pqi_io_request *io_request) 5476 { 5477 u16 iu_length; 5478 int sg_count; 5479 bool chained; 5480 unsigned int num_sg_in_iu; 5481 struct scatterlist *sg; 5482 struct pqi_sg_descriptor *sg_descriptor; 5483 5484 sg_count = scsi_dma_map(scmd); 5485 if (sg_count < 0) 5486 return sg_count; 5487 5488 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - 5489 PQI_REQUEST_HEADER_LENGTH; 5490 num_sg_in_iu = 0; 5491 5492 if (sg_count != 0) { 5493 sg = scsi_sglist(scmd); 5494 sg_descriptor = request->sg_descriptors; 5495 5496 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5497 ctrl_info->max_sg_per_r56_iu, &chained); 5498 5499 request->partial = chained; 5500 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5501 } 5502 5503 put_unaligned_le16(iu_length, &request->header.iu_length); 5504 request->num_sg_descriptors = num_sg_in_iu; 5505 5506 return 0; 5507 } 5508 5509 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 5510 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 5511 struct pqi_io_request *io_request) 5512 { 5513 u16 iu_length; 5514 int sg_count; 5515 bool chained; 5516 unsigned int num_sg_in_iu; 5517 struct scatterlist *sg; 5518 struct pqi_sg_descriptor *sg_descriptor; 5519 5520 sg_count = scsi_dma_map(scmd); 5521 if (sg_count < 0) 5522 return sg_count; 5523 5524 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 5525 PQI_REQUEST_HEADER_LENGTH; 5526 num_sg_in_iu = 0; 5527 5528 if (sg_count == 0) 5529 goto out; 5530 5531 sg = scsi_sglist(scmd); 5532 sg_descriptor = request->sg_descriptors; 5533 5534 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5535 ctrl_info->max_sg_per_iu, &chained); 5536 5537 request->partial = chained; 5538 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5539 5540 out: 5541 put_unaligned_le16(iu_length, &request->header.iu_length); 5542 request->num_sg_descriptors = num_sg_in_iu; 5543 5544 return 0; 5545 } 5546 5547 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 5548 void *context) 5549 { 5550 struct scsi_cmnd *scmd; 5551 5552 scmd = io_request->scmd; 5553 pqi_free_io_request(io_request); 5554 scsi_dma_unmap(scmd); 5555 pqi_scsi_done(scmd); 5556 } 5557 5558 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, 5559 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5560 struct pqi_queue_group *queue_group, bool io_high_prio) 5561 { 5562 int rc; 5563 size_t cdb_length; 5564 struct pqi_io_request *io_request; 5565 struct pqi_raid_path_request *request; 5566 5567 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5568 if (!io_request) 5569 return SCSI_MLQUEUE_HOST_BUSY; 5570 5571 io_request->io_complete_callback = pqi_raid_io_complete; 5572 io_request->scmd = scmd; 5573 5574 request = io_request->iu; 5575 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); 5576 5577 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5578 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5579 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5580 request->command_priority = io_high_prio; 5581 put_unaligned_le16(io_request->index, &request->request_id); 5582 request->error_index = request->request_id; 5583 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); 5584 request->ml_device_lun_number = (u8)scmd->device->lun; 5585 5586 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 5587 memcpy(request->cdb, scmd->cmnd, cdb_length); 5588 5589 switch (cdb_length) { 5590 case 6: 5591 case 10: 5592 case 12: 5593 case 16: 5594 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5595 break; 5596 case 20: 5597 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; 5598 break; 5599 case 24: 5600 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; 5601 break; 5602 case 28: 5603 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; 5604 break; 5605 case 32: 5606 default: 5607 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; 5608 break; 5609 } 5610 5611 switch (scmd->sc_data_direction) { 5612 case DMA_FROM_DEVICE: 5613 request->data_direction = SOP_READ_FLAG; 5614 break; 5615 case DMA_TO_DEVICE: 5616 request->data_direction = SOP_WRITE_FLAG; 5617 break; 5618 case DMA_NONE: 5619 request->data_direction = SOP_NO_DIRECTION_FLAG; 5620 break; 5621 case DMA_BIDIRECTIONAL: 5622 request->data_direction = SOP_BIDIRECTIONAL; 5623 break; 5624 default: 5625 dev_err(&ctrl_info->pci_dev->dev, 5626 "unknown data direction: %d\n", 5627 scmd->sc_data_direction); 5628 break; 5629 } 5630 5631 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5632 if (rc) { 5633 pqi_free_io_request(io_request); 5634 return SCSI_MLQUEUE_HOST_BUSY; 5635 } 5636 5637 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5638 5639 return 0; 5640 } 5641 5642 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5643 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5644 struct pqi_queue_group *queue_group) 5645 { 5646 bool io_high_prio; 5647 5648 io_high_prio = pqi_is_io_high_priority(device, scmd); 5649 5650 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); 5651 } 5652 5653 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5654 { 5655 struct scsi_cmnd *scmd; 5656 struct pqi_scsi_dev *device; 5657 struct pqi_ctrl_info *ctrl_info; 5658 5659 if (!io_request->raid_bypass) 5660 return false; 5661 5662 scmd = io_request->scmd; 5663 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5664 return false; 5665 if (host_byte(scmd->result) == DID_NO_CONNECT) 5666 return false; 5667 5668 device = scmd->device->hostdata; 5669 if (pqi_device_offline(device) || pqi_device_in_remove(device)) 5670 return false; 5671 5672 ctrl_info = shost_to_hba(scmd->device->host); 5673 if (pqi_ctrl_offline(ctrl_info)) 5674 return false; 5675 5676 return true; 5677 } 5678 5679 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5680 void *context) 5681 { 5682 struct scsi_cmnd *scmd; 5683 5684 scmd = io_request->scmd; 5685 scsi_dma_unmap(scmd); 5686 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { 5687 set_host_byte(scmd, DID_IMM_RETRY); 5688 pqi_cmd_priv(scmd)->this_residual++; 5689 } 5690 5691 pqi_free_io_request(io_request); 5692 pqi_scsi_done(scmd); 5693 } 5694 5695 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5696 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5697 struct pqi_queue_group *queue_group) 5698 { 5699 bool io_high_prio; 5700 5701 io_high_prio = pqi_is_io_high_priority(device, scmd); 5702 5703 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5704 scmd->cmnd, scmd->cmd_len, queue_group, NULL, 5705 false, io_high_prio); 5706 } 5707 5708 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5709 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5710 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5711 struct pqi_encryption_info *encryption_info, bool raid_bypass, 5712 bool io_high_prio) 5713 { 5714 int rc; 5715 struct pqi_io_request *io_request; 5716 struct pqi_aio_path_request *request; 5717 5718 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5719 if (!io_request) 5720 return SCSI_MLQUEUE_HOST_BUSY; 5721 5722 io_request->io_complete_callback = pqi_aio_io_complete; 5723 io_request->scmd = scmd; 5724 io_request->raid_bypass = raid_bypass; 5725 5726 request = io_request->iu; 5727 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors)); 5728 5729 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5730 put_unaligned_le32(aio_handle, &request->nexus_id); 5731 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5732 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5733 request->command_priority = io_high_prio; 5734 put_unaligned_le16(io_request->index, &request->request_id); 5735 request->error_index = request->request_id; 5736 if (!raid_bypass && ctrl_info->multi_lun_device_supported) 5737 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); 5738 if (cdb_length > sizeof(request->cdb)) 5739 cdb_length = sizeof(request->cdb); 5740 request->cdb_length = cdb_length; 5741 memcpy(request->cdb, cdb, cdb_length); 5742 5743 switch (scmd->sc_data_direction) { 5744 case DMA_TO_DEVICE: 5745 request->data_direction = SOP_READ_FLAG; 5746 break; 5747 case DMA_FROM_DEVICE: 5748 request->data_direction = SOP_WRITE_FLAG; 5749 break; 5750 case DMA_NONE: 5751 request->data_direction = SOP_NO_DIRECTION_FLAG; 5752 break; 5753 case DMA_BIDIRECTIONAL: 5754 request->data_direction = SOP_BIDIRECTIONAL; 5755 break; 5756 default: 5757 dev_err(&ctrl_info->pci_dev->dev, 5758 "unknown data direction: %d\n", 5759 scmd->sc_data_direction); 5760 break; 5761 } 5762 5763 if (encryption_info) { 5764 request->encryption_enable = true; 5765 put_unaligned_le16(encryption_info->data_encryption_key_index, 5766 &request->data_encryption_key_index); 5767 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5768 &request->encrypt_tweak_lower); 5769 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5770 &request->encrypt_tweak_upper); 5771 } 5772 5773 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5774 if (rc) { 5775 pqi_free_io_request(io_request); 5776 return SCSI_MLQUEUE_HOST_BUSY; 5777 } 5778 5779 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5780 5781 return 0; 5782 } 5783 5784 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 5785 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5786 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5787 struct pqi_scsi_dev_raid_map_data *rmd) 5788 { 5789 int rc; 5790 struct pqi_io_request *io_request; 5791 struct pqi_aio_r1_path_request *r1_request; 5792 5793 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5794 if (!io_request) 5795 return SCSI_MLQUEUE_HOST_BUSY; 5796 5797 io_request->io_complete_callback = pqi_aio_io_complete; 5798 io_request->scmd = scmd; 5799 io_request->raid_bypass = true; 5800 5801 r1_request = io_request->iu; 5802 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); 5803 5804 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; 5805 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); 5806 r1_request->num_drives = rmd->num_it_nexus_entries; 5807 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); 5808 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); 5809 if (rmd->num_it_nexus_entries == 3) 5810 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); 5811 5812 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); 5813 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5814 put_unaligned_le16(io_request->index, &r1_request->request_id); 5815 r1_request->error_index = r1_request->request_id; 5816 if (rmd->cdb_length > sizeof(r1_request->cdb)) 5817 rmd->cdb_length = sizeof(r1_request->cdb); 5818 r1_request->cdb_length = rmd->cdb_length; 5819 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); 5820 5821 /* The direction is always write. */ 5822 r1_request->data_direction = SOP_READ_FLAG; 5823 5824 if (encryption_info) { 5825 r1_request->encryption_enable = true; 5826 put_unaligned_le16(encryption_info->data_encryption_key_index, 5827 &r1_request->data_encryption_key_index); 5828 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5829 &r1_request->encrypt_tweak_lower); 5830 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5831 &r1_request->encrypt_tweak_upper); 5832 } 5833 5834 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); 5835 if (rc) { 5836 pqi_free_io_request(io_request); 5837 return SCSI_MLQUEUE_HOST_BUSY; 5838 } 5839 5840 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5841 5842 return 0; 5843 } 5844 5845 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 5846 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5847 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5848 struct pqi_scsi_dev_raid_map_data *rmd) 5849 { 5850 int rc; 5851 struct pqi_io_request *io_request; 5852 struct pqi_aio_r56_path_request *r56_request; 5853 5854 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5855 if (!io_request) 5856 return SCSI_MLQUEUE_HOST_BUSY; 5857 io_request->io_complete_callback = pqi_aio_io_complete; 5858 io_request->scmd = scmd; 5859 io_request->raid_bypass = true; 5860 5861 r56_request = io_request->iu; 5862 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); 5863 5864 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) 5865 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; 5866 else 5867 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; 5868 5869 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); 5870 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); 5871 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); 5872 if (rmd->raid_level == SA_RAID_6) { 5873 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); 5874 r56_request->xor_multiplier = rmd->xor_mult; 5875 } 5876 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); 5877 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5878 put_unaligned_le64(rmd->row, &r56_request->row); 5879 5880 put_unaligned_le16(io_request->index, &r56_request->request_id); 5881 r56_request->error_index = r56_request->request_id; 5882 5883 if (rmd->cdb_length > sizeof(r56_request->cdb)) 5884 rmd->cdb_length = sizeof(r56_request->cdb); 5885 r56_request->cdb_length = rmd->cdb_length; 5886 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); 5887 5888 /* The direction is always write. */ 5889 r56_request->data_direction = SOP_READ_FLAG; 5890 5891 if (encryption_info) { 5892 r56_request->encryption_enable = true; 5893 put_unaligned_le16(encryption_info->data_encryption_key_index, 5894 &r56_request->data_encryption_key_index); 5895 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5896 &r56_request->encrypt_tweak_lower); 5897 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5898 &r56_request->encrypt_tweak_upper); 5899 } 5900 5901 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); 5902 if (rc) { 5903 pqi_free_io_request(io_request); 5904 return SCSI_MLQUEUE_HOST_BUSY; 5905 } 5906 5907 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5908 5909 return 0; 5910 } 5911 5912 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5913 struct scsi_cmnd *scmd) 5914 { 5915 /* 5916 * We are setting host_tagset = 1 during init. 5917 */ 5918 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); 5919 } 5920 5921 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) 5922 { 5923 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) 5924 return false; 5925 5926 return pqi_cmd_priv(scmd)->this_residual == 0; 5927 } 5928 5929 /* 5930 * This function gets called just before we hand the completed SCSI request 5931 * back to the SML. 5932 */ 5933 5934 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5935 { 5936 struct pqi_scsi_dev *device; 5937 struct completion *wait; 5938 5939 if (!scmd->device) { 5940 set_host_byte(scmd, DID_NO_CONNECT); 5941 return; 5942 } 5943 5944 device = scmd->device->hostdata; 5945 if (!device) { 5946 set_host_byte(scmd, DID_NO_CONNECT); 5947 return; 5948 } 5949 5950 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); 5951 5952 wait = (struct completion *)xchg(&scmd->host_scribble, NULL); 5953 if (wait != PQI_NO_COMPLETION) 5954 complete(wait); 5955 } 5956 5957 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, 5958 struct scsi_cmnd *scmd) 5959 { 5960 u32 oldest_jiffies; 5961 u8 lru_index; 5962 int i; 5963 int rc; 5964 struct pqi_scsi_dev *device; 5965 struct pqi_stream_data *pqi_stream_data; 5966 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 5967 5968 if (!ctrl_info->enable_stream_detection) 5969 return false; 5970 5971 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 5972 if (rc) 5973 return false; 5974 5975 /* Check writes only. */ 5976 if (!rmd.is_write) 5977 return false; 5978 5979 device = scmd->device->hostdata; 5980 5981 /* Check for RAID 5/6 streams. */ 5982 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) 5983 return false; 5984 5985 /* 5986 * If controller does not support AIO RAID{5,6} writes, need to send 5987 * requests down non-AIO path. 5988 */ 5989 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || 5990 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) 5991 return true; 5992 5993 lru_index = 0; 5994 oldest_jiffies = INT_MAX; 5995 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { 5996 pqi_stream_data = &device->stream_data[i]; 5997 /* 5998 * Check for adjacent request or request is within 5999 * the previous request. 6000 */ 6001 if ((pqi_stream_data->next_lba && 6002 rmd.first_block >= pqi_stream_data->next_lba) && 6003 rmd.first_block <= pqi_stream_data->next_lba + 6004 rmd.block_cnt) { 6005 pqi_stream_data->next_lba = rmd.first_block + 6006 rmd.block_cnt; 6007 pqi_stream_data->last_accessed = jiffies; 6008 per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->write_stream_cnt++; 6009 return true; 6010 } 6011 6012 /* unused entry */ 6013 if (pqi_stream_data->last_accessed == 0) { 6014 lru_index = i; 6015 break; 6016 } 6017 6018 /* Find entry with oldest last accessed time. */ 6019 if (pqi_stream_data->last_accessed <= oldest_jiffies) { 6020 oldest_jiffies = pqi_stream_data->last_accessed; 6021 lru_index = i; 6022 } 6023 } 6024 6025 /* Set LRU entry. */ 6026 pqi_stream_data = &device->stream_data[lru_index]; 6027 pqi_stream_data->last_accessed = jiffies; 6028 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; 6029 6030 return false; 6031 } 6032 6033 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 6034 { 6035 int rc; 6036 struct pqi_ctrl_info *ctrl_info; 6037 struct pqi_scsi_dev *device; 6038 u16 hw_queue; 6039 struct pqi_queue_group *queue_group; 6040 bool raid_bypassed; 6041 u8 lun; 6042 6043 scmd->host_scribble = PQI_NO_COMPLETION; 6044 6045 device = scmd->device->hostdata; 6046 6047 if (!device) { 6048 set_host_byte(scmd, DID_NO_CONNECT); 6049 pqi_scsi_done(scmd); 6050 return 0; 6051 } 6052 6053 lun = (u8)scmd->device->lun; 6054 6055 atomic_inc(&device->scsi_cmds_outstanding[lun]); 6056 6057 ctrl_info = shost_to_hba(shost); 6058 6059 if (pqi_ctrl_offline(ctrl_info) || pqi_device_offline(device) || pqi_device_in_remove(device)) { 6060 set_host_byte(scmd, DID_NO_CONNECT); 6061 pqi_scsi_done(scmd); 6062 return 0; 6063 } 6064 6065 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { 6066 rc = SCSI_MLQUEUE_HOST_BUSY; 6067 goto out; 6068 } 6069 6070 /* 6071 * This is necessary because the SML doesn't zero out this field during 6072 * error recovery. 6073 */ 6074 scmd->result = 0; 6075 6076 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 6077 queue_group = &ctrl_info->queue_groups[hw_queue]; 6078 6079 if (pqi_is_logical_device(device)) { 6080 raid_bypassed = false; 6081 if (device->raid_bypass_enabled && 6082 pqi_is_bypass_eligible_request(scmd) && 6083 !pqi_is_parity_write_stream(ctrl_info, scmd)) { 6084 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6085 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { 6086 raid_bypassed = true; 6087 per_cpu_ptr(device->raid_io_stats, raw_smp_processor_id())->raid_bypass_cnt++; 6088 } 6089 } 6090 if (!raid_bypassed) 6091 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6092 } else { 6093 if (device->aio_enabled) 6094 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6095 else 6096 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6097 } 6098 6099 out: 6100 if (rc) { 6101 scmd->host_scribble = NULL; 6102 atomic_dec(&device->scsi_cmds_outstanding[lun]); 6103 } 6104 6105 return rc; 6106 } 6107 6108 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) 6109 { 6110 unsigned int i; 6111 unsigned int path; 6112 unsigned long flags; 6113 unsigned int queued_io_count; 6114 struct pqi_queue_group *queue_group; 6115 struct pqi_io_request *io_request; 6116 6117 queued_io_count = 0; 6118 6119 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6120 queue_group = &ctrl_info->queue_groups[i]; 6121 for (path = 0; path < 2; path++) { 6122 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 6123 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) 6124 queued_io_count++; 6125 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 6126 } 6127 } 6128 6129 return queued_io_count; 6130 } 6131 6132 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) 6133 { 6134 unsigned int i; 6135 unsigned int path; 6136 unsigned int nonempty_inbound_queue_count; 6137 struct pqi_queue_group *queue_group; 6138 pqi_index_t iq_pi; 6139 pqi_index_t iq_ci; 6140 6141 nonempty_inbound_queue_count = 0; 6142 6143 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6144 queue_group = &ctrl_info->queue_groups[i]; 6145 for (path = 0; path < 2; path++) { 6146 iq_pi = queue_group->iq_pi_copy[path]; 6147 iq_ci = readl(queue_group->iq_ci[path]); 6148 if (iq_ci != iq_pi) 6149 nonempty_inbound_queue_count++; 6150 } 6151 } 6152 6153 return nonempty_inbound_queue_count; 6154 } 6155 6156 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10 6157 6158 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 6159 { 6160 unsigned long start_jiffies; 6161 unsigned long warning_timeout; 6162 unsigned int queued_io_count; 6163 unsigned int nonempty_inbound_queue_count; 6164 bool displayed_warning; 6165 6166 displayed_warning = false; 6167 start_jiffies = jiffies; 6168 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6169 6170 while (1) { 6171 queued_io_count = pqi_queued_io_count(ctrl_info); 6172 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); 6173 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0) 6174 break; 6175 pqi_check_ctrl_health(ctrl_info); 6176 if (pqi_ctrl_offline(ctrl_info)) 6177 return -ENXIO; 6178 if (time_after(jiffies, warning_timeout)) { 6179 dev_warn(&ctrl_info->pci_dev->dev, 6180 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n", 6181 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); 6182 displayed_warning = true; 6183 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6184 } 6185 usleep_range(1000, 2000); 6186 } 6187 6188 if (displayed_warning) 6189 dev_warn(&ctrl_info->pci_dev->dev, 6190 "queued I/O drained after waiting for %u seconds\n", 6191 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 6192 6193 return 0; 6194 } 6195 6196 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 6197 struct pqi_scsi_dev *device, u8 lun) 6198 { 6199 unsigned int i; 6200 unsigned int path; 6201 struct pqi_queue_group *queue_group; 6202 unsigned long flags; 6203 struct pqi_io_request *io_request; 6204 struct pqi_io_request *next; 6205 struct scsi_cmnd *scmd; 6206 struct pqi_scsi_dev *scsi_device; 6207 6208 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6209 queue_group = &ctrl_info->queue_groups[i]; 6210 6211 for (path = 0; path < 2; path++) { 6212 spin_lock_irqsave( 6213 &queue_group->submit_lock[path], flags); 6214 6215 list_for_each_entry_safe(io_request, next, 6216 &queue_group->request_list[path], 6217 request_list_entry) { 6218 6219 scmd = io_request->scmd; 6220 if (!scmd) 6221 continue; 6222 6223 scsi_device = scmd->device->hostdata; 6224 6225 list_del(&io_request->request_list_entry); 6226 if (scsi_device == device && (u8)scmd->device->lun == lun) 6227 set_host_byte(scmd, DID_RESET); 6228 else 6229 set_host_byte(scmd, DID_REQUEUE); 6230 pqi_free_io_request(io_request); 6231 scsi_dma_unmap(scmd); 6232 pqi_scsi_done(scmd); 6233 } 6234 6235 spin_unlock_irqrestore( 6236 &queue_group->submit_lock[path], flags); 6237 } 6238 } 6239 } 6240 6241 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 6242 6243 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 6244 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs) 6245 { 6246 int cmds_outstanding; 6247 unsigned long start_jiffies; 6248 unsigned long warning_timeout; 6249 unsigned long msecs_waiting; 6250 6251 start_jiffies = jiffies; 6252 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6253 6254 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) { 6255 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { 6256 pqi_check_ctrl_health(ctrl_info); 6257 if (pqi_ctrl_offline(ctrl_info)) 6258 return -ENXIO; 6259 } 6260 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); 6261 if (msecs_waiting >= timeout_msecs) { 6262 dev_err(&ctrl_info->pci_dev->dev, 6263 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", 6264 ctrl_info->scsi_host->host_no, device->bus, device->target, 6265 lun, msecs_waiting / 1000, cmds_outstanding); 6266 return -ETIMEDOUT; 6267 } 6268 if (time_after(jiffies, warning_timeout)) { 6269 dev_warn(&ctrl_info->pci_dev->dev, 6270 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", 6271 ctrl_info->scsi_host->host_no, device->bus, device->target, 6272 lun, msecs_waiting / 1000, cmds_outstanding); 6273 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6274 } 6275 usleep_range(1000, 2000); 6276 } 6277 6278 return 0; 6279 } 6280 6281 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 6282 void *context) 6283 { 6284 struct completion *waiting = context; 6285 6286 complete(waiting); 6287 } 6288 6289 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 6290 6291 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 6292 struct pqi_scsi_dev *device, u8 lun, struct completion *wait) 6293 { 6294 int rc; 6295 unsigned int wait_secs; 6296 int cmds_outstanding; 6297 6298 wait_secs = 0; 6299 6300 while (1) { 6301 if (wait_for_completion_io_timeout(wait, 6302 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) { 6303 rc = 0; 6304 break; 6305 } 6306 6307 pqi_check_ctrl_health(ctrl_info); 6308 if (pqi_ctrl_offline(ctrl_info)) { 6309 rc = -ENXIO; 6310 break; 6311 } 6312 6313 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; 6314 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]); 6315 dev_warn(&ctrl_info->pci_dev->dev, 6316 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n", 6317 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); 6318 } 6319 6320 return rc; 6321 } 6322 6323 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 6324 6325 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6326 { 6327 int rc; 6328 struct pqi_io_request *io_request; 6329 DECLARE_COMPLETION_ONSTACK(wait); 6330 struct pqi_task_management_request *request; 6331 6332 io_request = pqi_alloc_io_request(ctrl_info, NULL); 6333 io_request->io_complete_callback = pqi_lun_reset_complete; 6334 io_request->context = &wait; 6335 6336 request = io_request->iu; 6337 memset(request, 0, sizeof(*request)); 6338 6339 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 6340 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 6341 &request->header.iu_length); 6342 put_unaligned_le16(io_request->index, &request->request_id); 6343 memcpy(request->lun_number, device->scsi3addr, 6344 sizeof(request->lun_number)); 6345 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) 6346 request->ml_device_lun_number = lun; 6347 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 6348 if (ctrl_info->tmf_iu_timeout_supported) 6349 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); 6350 6351 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 6352 io_request); 6353 6354 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait); 6355 if (rc == 0) 6356 rc = io_request->status; 6357 6358 pqi_free_io_request(io_request); 6359 6360 return rc; 6361 } 6362 6363 #define PQI_LUN_RESET_RETRIES 3 6364 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) 6365 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) 6366 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) 6367 6368 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6369 { 6370 int reset_rc; 6371 int wait_rc; 6372 unsigned int retries; 6373 unsigned long timeout_msecs; 6374 6375 for (retries = 0;;) { 6376 reset_rc = pqi_lun_reset(ctrl_info, device, lun); 6377 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES) 6378 break; 6379 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 6380 } 6381 6382 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : 6383 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; 6384 6385 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); 6386 if (wait_rc && reset_rc == 0) 6387 reset_rc = wait_rc; 6388 6389 return reset_rc == 0 ? SUCCESS : FAILED; 6390 } 6391 6392 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6393 { 6394 int rc; 6395 6396 pqi_ctrl_block_requests(ctrl_info); 6397 pqi_ctrl_wait_until_quiesced(ctrl_info); 6398 pqi_fail_io_queued_for_device(ctrl_info, device, lun); 6399 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 6400 pqi_device_reset_start(device, lun); 6401 pqi_ctrl_unblock_requests(ctrl_info); 6402 if (rc) 6403 rc = FAILED; 6404 else 6405 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); 6406 pqi_device_reset_done(device, lun); 6407 6408 return rc; 6409 } 6410 6411 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) 6412 { 6413 int rc; 6414 6415 mutex_lock(&ctrl_info->lun_reset_mutex); 6416 6417 dev_err(&ctrl_info->pci_dev->dev, 6418 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", 6419 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); 6420 6421 pqi_check_ctrl_health(ctrl_info); 6422 if (pqi_ctrl_offline(ctrl_info)) 6423 rc = FAILED; 6424 else 6425 rc = pqi_device_reset(ctrl_info, device, lun); 6426 6427 dev_err(&ctrl_info->pci_dev->dev, 6428 "reset of scsi %d:%d:%d:%u: %s\n", 6429 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, 6430 rc == SUCCESS ? "SUCCESS" : "FAILED"); 6431 6432 mutex_unlock(&ctrl_info->lun_reset_mutex); 6433 6434 return rc; 6435 } 6436 6437 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 6438 { 6439 struct Scsi_Host *shost; 6440 struct pqi_ctrl_info *ctrl_info; 6441 struct pqi_scsi_dev *device; 6442 u8 scsi_opcode; 6443 6444 shost = scmd->device->host; 6445 ctrl_info = shost_to_hba(shost); 6446 device = scmd->device->hostdata; 6447 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6448 6449 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); 6450 } 6451 6452 static void pqi_tmf_worker(struct work_struct *work) 6453 { 6454 struct pqi_tmf_work *tmf_work; 6455 struct scsi_cmnd *scmd; 6456 6457 tmf_work = container_of(work, struct pqi_tmf_work, work_struct); 6458 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); 6459 6460 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode); 6461 } 6462 6463 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd) 6464 { 6465 struct Scsi_Host *shost; 6466 struct pqi_ctrl_info *ctrl_info; 6467 struct pqi_scsi_dev *device; 6468 struct pqi_tmf_work *tmf_work; 6469 DECLARE_COMPLETION_ONSTACK(wait); 6470 6471 shost = scmd->device->host; 6472 ctrl_info = shost_to_hba(shost); 6473 device = scmd->device->hostdata; 6474 6475 dev_err(&ctrl_info->pci_dev->dev, 6476 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n", 6477 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6478 6479 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { 6480 dev_err(&ctrl_info->pci_dev->dev, 6481 "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n", 6482 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6483 scmd->result = DID_RESET << 16; 6484 goto out; 6485 } 6486 6487 tmf_work = &device->tmf_work[scmd->device->lun]; 6488 6489 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { 6490 tmf_work->ctrl_info = ctrl_info; 6491 tmf_work->device = device; 6492 tmf_work->lun = (u8)scmd->device->lun; 6493 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6494 schedule_work(&tmf_work->work_struct); 6495 } 6496 6497 wait_for_completion(&wait); 6498 6499 dev_err(&ctrl_info->pci_dev->dev, 6500 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n", 6501 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6502 6503 out: 6504 6505 return SUCCESS; 6506 } 6507 6508 static int pqi_sdev_init(struct scsi_device *sdev) 6509 { 6510 struct pqi_scsi_dev *device; 6511 unsigned long flags; 6512 struct pqi_ctrl_info *ctrl_info; 6513 struct scsi_target *starget; 6514 struct sas_rphy *rphy; 6515 6516 ctrl_info = shost_to_hba(sdev->host); 6517 6518 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6519 6520 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 6521 starget = scsi_target(sdev); 6522 rphy = target_to_rphy(starget); 6523 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 6524 if (device) { 6525 if (device->target_lun_valid) { 6526 device->ignore_device = true; 6527 } else { 6528 device->target = sdev_id(sdev); 6529 device->lun = sdev->lun; 6530 device->target_lun_valid = true; 6531 } 6532 } 6533 } else { 6534 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 6535 sdev_id(sdev), sdev->lun); 6536 } 6537 6538 if (device) { 6539 sdev->hostdata = device; 6540 device->sdev = sdev; 6541 if (device->queue_depth) { 6542 device->advertised_queue_depth = device->queue_depth; 6543 scsi_change_queue_depth(sdev, 6544 device->advertised_queue_depth); 6545 } 6546 if (pqi_is_logical_device(device)) { 6547 pqi_disable_write_same(sdev); 6548 } else { 6549 sdev->allow_restart = 1; 6550 if (device->device_type == SA_DEVICE_TYPE_NVME) 6551 pqi_disable_write_same(sdev); 6552 } 6553 } 6554 6555 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6556 6557 return 0; 6558 } 6559 6560 static void pqi_map_queues(struct Scsi_Host *shost) 6561 { 6562 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6563 6564 if (!ctrl_info->disable_managed_interrupts) 6565 blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 6566 &ctrl_info->pci_dev->dev, 0); 6567 else 6568 blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); 6569 } 6570 6571 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) 6572 { 6573 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; 6574 } 6575 6576 static int pqi_sdev_configure(struct scsi_device *sdev, 6577 struct queue_limits *lim) 6578 { 6579 int rc = 0; 6580 struct pqi_scsi_dev *device; 6581 6582 device = sdev->hostdata; 6583 device->devtype = sdev->type; 6584 6585 if (pqi_is_tape_changer_device(device) && device->ignore_device) { 6586 rc = -ENXIO; 6587 device->ignore_device = false; 6588 } 6589 6590 return rc; 6591 } 6592 6593 static void pqi_sdev_destroy(struct scsi_device *sdev) 6594 { 6595 struct pqi_ctrl_info *ctrl_info; 6596 struct pqi_scsi_dev *device; 6597 int mutex_acquired; 6598 unsigned long flags; 6599 6600 ctrl_info = shost_to_hba(sdev->host); 6601 6602 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 6603 if (!mutex_acquired) 6604 return; 6605 6606 device = sdev->hostdata; 6607 if (!device) { 6608 mutex_unlock(&ctrl_info->scan_mutex); 6609 return; 6610 } 6611 6612 device->lun_count--; 6613 if (device->lun_count > 0) { 6614 mutex_unlock(&ctrl_info->scan_mutex); 6615 return; 6616 } 6617 6618 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6619 list_del(&device->scsi_device_list_entry); 6620 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6621 6622 mutex_unlock(&ctrl_info->scan_mutex); 6623 6624 pqi_dev_info(ctrl_info, "removed", device); 6625 pqi_free_device(device); 6626 } 6627 6628 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6629 { 6630 struct pci_dev *pci_dev; 6631 u32 subsystem_vendor; 6632 u32 subsystem_device; 6633 cciss_pci_info_struct pci_info; 6634 6635 if (!arg) 6636 return -EINVAL; 6637 6638 pci_dev = ctrl_info->pci_dev; 6639 6640 pci_info.domain = pci_domain_nr(pci_dev->bus); 6641 pci_info.bus = pci_dev->bus->number; 6642 pci_info.dev_fn = pci_dev->devfn; 6643 subsystem_vendor = pci_dev->subsystem_vendor; 6644 subsystem_device = pci_dev->subsystem_device; 6645 pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; 6646 6647 if (copy_to_user(arg, &pci_info, sizeof(pci_info))) 6648 return -EFAULT; 6649 6650 return 0; 6651 } 6652 6653 static int pqi_getdrivver_ioctl(void __user *arg) 6654 { 6655 u32 version; 6656 6657 if (!arg) 6658 return -EINVAL; 6659 6660 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 6661 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 6662 6663 if (copy_to_user(arg, &version, sizeof(version))) 6664 return -EFAULT; 6665 6666 return 0; 6667 } 6668 6669 struct ciss_error_info { 6670 u8 scsi_status; 6671 int command_status; 6672 size_t sense_data_length; 6673 }; 6674 6675 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 6676 struct ciss_error_info *ciss_error_info) 6677 { 6678 int ciss_cmd_status; 6679 size_t sense_data_length; 6680 6681 switch (pqi_error_info->data_out_result) { 6682 case PQI_DATA_IN_OUT_GOOD: 6683 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 6684 break; 6685 case PQI_DATA_IN_OUT_UNDERFLOW: 6686 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 6687 break; 6688 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 6689 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 6690 break; 6691 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 6692 case PQI_DATA_IN_OUT_BUFFER_ERROR: 6693 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 6694 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 6695 case PQI_DATA_IN_OUT_ERROR: 6696 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 6697 break; 6698 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 6699 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 6700 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 6701 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 6702 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 6703 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 6704 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 6705 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 6706 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 6707 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 6708 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 6709 break; 6710 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 6711 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 6712 break; 6713 case PQI_DATA_IN_OUT_ABORTED: 6714 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 6715 break; 6716 case PQI_DATA_IN_OUT_TIMEOUT: 6717 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 6718 break; 6719 default: 6720 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 6721 break; 6722 } 6723 6724 sense_data_length = 6725 get_unaligned_le16(&pqi_error_info->sense_data_length); 6726 if (sense_data_length == 0) 6727 sense_data_length = 6728 get_unaligned_le16(&pqi_error_info->response_data_length); 6729 if (sense_data_length) 6730 if (sense_data_length > sizeof(pqi_error_info->data)) 6731 sense_data_length = sizeof(pqi_error_info->data); 6732 6733 ciss_error_info->scsi_status = pqi_error_info->status; 6734 ciss_error_info->command_status = ciss_cmd_status; 6735 ciss_error_info->sense_data_length = sense_data_length; 6736 } 6737 6738 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6739 { 6740 int rc; 6741 char *kernel_buffer = NULL; 6742 u16 iu_length; 6743 size_t sense_data_length; 6744 IOCTL_Command_struct iocommand; 6745 struct pqi_raid_path_request request; 6746 struct pqi_raid_error_info pqi_error_info; 6747 struct ciss_error_info ciss_error_info; 6748 6749 if (pqi_ctrl_offline(ctrl_info)) 6750 return -ENXIO; 6751 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) 6752 return -EBUSY; 6753 if (!arg) 6754 return -EINVAL; 6755 if (!capable(CAP_SYS_RAWIO)) 6756 return -EPERM; 6757 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 6758 return -EFAULT; 6759 if (iocommand.buf_size < 1 && 6760 iocommand.Request.Type.Direction != XFER_NONE) 6761 return -EINVAL; 6762 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 6763 return -EINVAL; 6764 if (iocommand.Request.Type.Type != TYPE_CMD) 6765 return -EINVAL; 6766 6767 switch (iocommand.Request.Type.Direction) { 6768 case XFER_NONE: 6769 case XFER_WRITE: 6770 case XFER_READ: 6771 case XFER_READ | XFER_WRITE: 6772 break; 6773 default: 6774 return -EINVAL; 6775 } 6776 6777 if (iocommand.buf_size > 0) { 6778 if (iocommand.Request.Type.Direction & XFER_WRITE) { 6779 kernel_buffer = memdup_user(iocommand.buf, 6780 iocommand.buf_size); 6781 if (IS_ERR(kernel_buffer)) 6782 return PTR_ERR(kernel_buffer); 6783 } else { 6784 kernel_buffer = kzalloc(iocommand.buf_size, GFP_KERNEL); 6785 if (!kernel_buffer) 6786 return -ENOMEM; 6787 } 6788 } 6789 6790 memset(&request, 0, sizeof(request)); 6791 6792 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 6793 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 6794 PQI_REQUEST_HEADER_LENGTH; 6795 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 6796 sizeof(request.lun_number)); 6797 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 6798 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 6799 6800 switch (iocommand.Request.Type.Direction) { 6801 case XFER_NONE: 6802 request.data_direction = SOP_NO_DIRECTION_FLAG; 6803 break; 6804 case XFER_WRITE: 6805 request.data_direction = SOP_WRITE_FLAG; 6806 break; 6807 case XFER_READ: 6808 request.data_direction = SOP_READ_FLAG; 6809 break; 6810 case XFER_READ | XFER_WRITE: 6811 request.data_direction = SOP_BIDIRECTIONAL; 6812 break; 6813 } 6814 6815 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 6816 6817 if (iocommand.buf_size > 0) { 6818 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 6819 6820 rc = pqi_map_single(ctrl_info->pci_dev, 6821 &request.sg_descriptors[0], kernel_buffer, 6822 iocommand.buf_size, DMA_BIDIRECTIONAL); 6823 if (rc) 6824 goto out; 6825 6826 iu_length += sizeof(request.sg_descriptors[0]); 6827 } 6828 6829 put_unaligned_le16(iu_length, &request.header.iu_length); 6830 6831 if (ctrl_info->raid_iu_timeout_supported) 6832 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); 6833 6834 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6835 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info); 6836 6837 if (iocommand.buf_size > 0) 6838 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6839 DMA_BIDIRECTIONAL); 6840 6841 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6842 6843 if (rc == 0) { 6844 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6845 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6846 iocommand.error_info.CommandStatus = 6847 ciss_error_info.command_status; 6848 sense_data_length = ciss_error_info.sense_data_length; 6849 if (sense_data_length) { 6850 if (sense_data_length > 6851 sizeof(iocommand.error_info.SenseInfo)) 6852 sense_data_length = 6853 sizeof(iocommand.error_info.SenseInfo); 6854 memcpy(iocommand.error_info.SenseInfo, 6855 pqi_error_info.data, sense_data_length); 6856 iocommand.error_info.SenseLen = sense_data_length; 6857 } 6858 } 6859 6860 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6861 rc = -EFAULT; 6862 goto out; 6863 } 6864 6865 if (rc == 0 && iocommand.buf_size > 0 && 6866 (iocommand.Request.Type.Direction & XFER_READ)) { 6867 if (copy_to_user(iocommand.buf, kernel_buffer, 6868 iocommand.buf_size)) { 6869 rc = -EFAULT; 6870 } 6871 } 6872 6873 out: 6874 kfree(kernel_buffer); 6875 6876 return rc; 6877 } 6878 6879 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, 6880 void __user *arg) 6881 { 6882 int rc; 6883 struct pqi_ctrl_info *ctrl_info; 6884 6885 ctrl_info = shost_to_hba(sdev->host); 6886 6887 switch (cmd) { 6888 case CCISS_DEREGDISK: 6889 case CCISS_REGNEWDISK: 6890 case CCISS_REGNEWD: 6891 rc = pqi_scan_scsi_devices(ctrl_info); 6892 break; 6893 case CCISS_GETPCIINFO: 6894 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6895 break; 6896 case CCISS_GETDRIVVER: 6897 rc = pqi_getdrivver_ioctl(arg); 6898 break; 6899 case CCISS_PASSTHRU: 6900 rc = pqi_passthru_ioctl(ctrl_info, arg); 6901 break; 6902 default: 6903 rc = -EINVAL; 6904 break; 6905 } 6906 6907 return rc; 6908 } 6909 6910 static ssize_t pqi_firmware_version_show(struct device *dev, 6911 struct device_attribute *attr, char *buffer) 6912 { 6913 struct Scsi_Host *shost; 6914 struct pqi_ctrl_info *ctrl_info; 6915 6916 shost = class_to_shost(dev); 6917 ctrl_info = shost_to_hba(shost); 6918 6919 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); 6920 } 6921 6922 static ssize_t pqi_serial_number_show(struct device *dev, 6923 struct device_attribute *attr, char *buffer) 6924 { 6925 struct Scsi_Host *shost; 6926 struct pqi_ctrl_info *ctrl_info; 6927 6928 shost = class_to_shost(dev); 6929 ctrl_info = shost_to_hba(shost); 6930 6931 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); 6932 } 6933 6934 static ssize_t pqi_model_show(struct device *dev, 6935 struct device_attribute *attr, char *buffer) 6936 { 6937 struct Scsi_Host *shost; 6938 struct pqi_ctrl_info *ctrl_info; 6939 6940 shost = class_to_shost(dev); 6941 ctrl_info = shost_to_hba(shost); 6942 6943 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); 6944 } 6945 6946 static ssize_t pqi_vendor_show(struct device *dev, 6947 struct device_attribute *attr, char *buffer) 6948 { 6949 struct Scsi_Host *shost; 6950 struct pqi_ctrl_info *ctrl_info; 6951 6952 shost = class_to_shost(dev); 6953 ctrl_info = shost_to_hba(shost); 6954 6955 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); 6956 } 6957 6958 static ssize_t pqi_host_rescan_store(struct device *dev, 6959 struct device_attribute *attr, const char *buffer, size_t count) 6960 { 6961 struct Scsi_Host *shost = class_to_shost(dev); 6962 6963 pqi_scan_start(shost); 6964 6965 return count; 6966 } 6967 6968 static ssize_t pqi_lockup_action_show(struct device *dev, 6969 struct device_attribute *attr, char *buffer) 6970 { 6971 int count = 0; 6972 unsigned int i; 6973 6974 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6975 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6976 count += scnprintf(buffer + count, PAGE_SIZE - count, 6977 "[%s] ", pqi_lockup_actions[i].name); 6978 else 6979 count += scnprintf(buffer + count, PAGE_SIZE - count, 6980 "%s ", pqi_lockup_actions[i].name); 6981 } 6982 6983 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); 6984 6985 return count; 6986 } 6987 6988 static ssize_t pqi_lockup_action_store(struct device *dev, 6989 struct device_attribute *attr, const char *buffer, size_t count) 6990 { 6991 unsigned int i; 6992 char *action_name; 6993 char action_name_buffer[32]; 6994 6995 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6996 action_name = strstrip(action_name_buffer); 6997 6998 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6999 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 7000 pqi_lockup_action = pqi_lockup_actions[i].action; 7001 return count; 7002 } 7003 } 7004 7005 return -EINVAL; 7006 } 7007 7008 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, 7009 struct device_attribute *attr, char *buffer) 7010 { 7011 struct Scsi_Host *shost = class_to_shost(dev); 7012 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7013 7014 return scnprintf(buffer, 10, "%x\n", 7015 ctrl_info->enable_stream_detection); 7016 } 7017 7018 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, 7019 struct device_attribute *attr, const char *buffer, size_t count) 7020 { 7021 struct Scsi_Host *shost = class_to_shost(dev); 7022 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7023 u8 set_stream_detection = 0; 7024 7025 if (kstrtou8(buffer, 0, &set_stream_detection)) 7026 return -EINVAL; 7027 7028 if (set_stream_detection > 0) 7029 set_stream_detection = 1; 7030 7031 ctrl_info->enable_stream_detection = set_stream_detection; 7032 7033 return count; 7034 } 7035 7036 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, 7037 struct device_attribute *attr, char *buffer) 7038 { 7039 struct Scsi_Host *shost = class_to_shost(dev); 7040 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7041 7042 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); 7043 } 7044 7045 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, 7046 struct device_attribute *attr, const char *buffer, size_t count) 7047 { 7048 struct Scsi_Host *shost = class_to_shost(dev); 7049 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7050 u8 set_r5_writes = 0; 7051 7052 if (kstrtou8(buffer, 0, &set_r5_writes)) 7053 return -EINVAL; 7054 7055 if (set_r5_writes > 0) 7056 set_r5_writes = 1; 7057 7058 ctrl_info->enable_r5_writes = set_r5_writes; 7059 7060 return count; 7061 } 7062 7063 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, 7064 struct device_attribute *attr, char *buffer) 7065 { 7066 struct Scsi_Host *shost = class_to_shost(dev); 7067 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7068 7069 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); 7070 } 7071 7072 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, 7073 struct device_attribute *attr, const char *buffer, size_t count) 7074 { 7075 struct Scsi_Host *shost = class_to_shost(dev); 7076 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7077 u8 set_r6_writes = 0; 7078 7079 if (kstrtou8(buffer, 0, &set_r6_writes)) 7080 return -EINVAL; 7081 7082 if (set_r6_writes > 0) 7083 set_r6_writes = 1; 7084 7085 ctrl_info->enable_r6_writes = set_r6_writes; 7086 7087 return count; 7088 } 7089 7090 static DEVICE_STRING_ATTR_RO(driver_version, 0444, 7091 DRIVER_VERSION BUILD_TIMESTAMP); 7092 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 7093 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 7094 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 7095 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 7096 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 7097 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, 7098 pqi_lockup_action_store); 7099 static DEVICE_ATTR(enable_stream_detection, 0644, 7100 pqi_host_enable_stream_detection_show, 7101 pqi_host_enable_stream_detection_store); 7102 static DEVICE_ATTR(enable_r5_writes, 0644, 7103 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); 7104 static DEVICE_ATTR(enable_r6_writes, 0644, 7105 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); 7106 7107 static struct attribute *pqi_shost_attrs[] = { 7108 &dev_attr_driver_version.attr.attr, 7109 &dev_attr_firmware_version.attr, 7110 &dev_attr_model.attr, 7111 &dev_attr_serial_number.attr, 7112 &dev_attr_vendor.attr, 7113 &dev_attr_rescan.attr, 7114 &dev_attr_lockup_action.attr, 7115 &dev_attr_enable_stream_detection.attr, 7116 &dev_attr_enable_r5_writes.attr, 7117 &dev_attr_enable_r6_writes.attr, 7118 NULL 7119 }; 7120 7121 ATTRIBUTE_GROUPS(pqi_shost); 7122 7123 static ssize_t pqi_unique_id_show(struct device *dev, 7124 struct device_attribute *attr, char *buffer) 7125 { 7126 struct pqi_ctrl_info *ctrl_info; 7127 struct scsi_device *sdev; 7128 struct pqi_scsi_dev *device; 7129 unsigned long flags; 7130 u8 unique_id[16]; 7131 7132 sdev = to_scsi_device(dev); 7133 ctrl_info = shost_to_hba(sdev->host); 7134 7135 if (pqi_ctrl_offline(ctrl_info)) 7136 return -ENODEV; 7137 7138 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7139 7140 device = sdev->hostdata; 7141 if (!device) { 7142 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7143 return -ENODEV; 7144 } 7145 7146 if (device->is_physical_device) 7147 memcpy(unique_id, device->wwid, sizeof(device->wwid)); 7148 else 7149 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 7150 7151 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7152 7153 return scnprintf(buffer, PAGE_SIZE, 7154 "%02X%02X%02X%02X%02X%02X%02X%02X" 7155 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 7156 unique_id[0], unique_id[1], unique_id[2], unique_id[3], 7157 unique_id[4], unique_id[5], unique_id[6], unique_id[7], 7158 unique_id[8], unique_id[9], unique_id[10], unique_id[11], 7159 unique_id[12], unique_id[13], unique_id[14], unique_id[15]); 7160 } 7161 7162 static ssize_t pqi_lunid_show(struct device *dev, 7163 struct device_attribute *attr, char *buffer) 7164 { 7165 struct pqi_ctrl_info *ctrl_info; 7166 struct scsi_device *sdev; 7167 struct pqi_scsi_dev *device; 7168 unsigned long flags; 7169 u8 lunid[8]; 7170 7171 sdev = to_scsi_device(dev); 7172 ctrl_info = shost_to_hba(sdev->host); 7173 7174 if (pqi_ctrl_offline(ctrl_info)) 7175 return -ENODEV; 7176 7177 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7178 7179 device = sdev->hostdata; 7180 if (!device) { 7181 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7182 return -ENODEV; 7183 } 7184 7185 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 7186 7187 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7188 7189 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 7190 } 7191 7192 #define MAX_PATHS 8 7193 7194 static ssize_t pqi_path_info_show(struct device *dev, 7195 struct device_attribute *attr, char *buf) 7196 { 7197 struct pqi_ctrl_info *ctrl_info; 7198 struct scsi_device *sdev; 7199 struct pqi_scsi_dev *device; 7200 unsigned long flags; 7201 int i; 7202 int output_len = 0; 7203 u8 box; 7204 u8 bay; 7205 u8 path_map_index; 7206 char *active; 7207 u8 phys_connector[2]; 7208 7209 sdev = to_scsi_device(dev); 7210 ctrl_info = shost_to_hba(sdev->host); 7211 7212 if (pqi_ctrl_offline(ctrl_info)) 7213 return -ENODEV; 7214 7215 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7216 7217 device = sdev->hostdata; 7218 if (!device) { 7219 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7220 return -ENODEV; 7221 } 7222 7223 bay = device->bay; 7224 for (i = 0; i < MAX_PATHS; i++) { 7225 path_map_index = 1 << i; 7226 if (i == device->active_path_index) 7227 active = "Active"; 7228 else if (device->path_map & path_map_index) 7229 active = "Inactive"; 7230 else 7231 continue; 7232 7233 output_len += scnprintf(buf + output_len, 7234 PAGE_SIZE - output_len, 7235 "[%d:%d:%d:%d] %20.20s ", 7236 ctrl_info->scsi_host->host_no, 7237 device->bus, device->target, 7238 device->lun, 7239 scsi_device_type(device->devtype)); 7240 7241 if (device->devtype == TYPE_RAID || 7242 pqi_is_logical_device(device)) 7243 goto end_buffer; 7244 7245 memcpy(&phys_connector, &device->phys_connector[i], 7246 sizeof(phys_connector)); 7247 if (phys_connector[0] < '0') 7248 phys_connector[0] = '0'; 7249 if (phys_connector[1] < '0') 7250 phys_connector[1] = '0'; 7251 7252 output_len += scnprintf(buf + output_len, 7253 PAGE_SIZE - output_len, 7254 "PORT: %.2s ", phys_connector); 7255 7256 box = device->box[i]; 7257 if (box != 0 && box != 0xFF) 7258 output_len += scnprintf(buf + output_len, 7259 PAGE_SIZE - output_len, 7260 "BOX: %hhu ", box); 7261 7262 if ((device->devtype == TYPE_DISK || 7263 device->devtype == TYPE_ZBC) && 7264 pqi_expose_device(device)) 7265 output_len += scnprintf(buf + output_len, 7266 PAGE_SIZE - output_len, 7267 "BAY: %hhu ", bay); 7268 7269 end_buffer: 7270 output_len += scnprintf(buf + output_len, 7271 PAGE_SIZE - output_len, 7272 "%s\n", active); 7273 } 7274 7275 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7276 7277 return output_len; 7278 } 7279 7280 static ssize_t pqi_sas_address_show(struct device *dev, 7281 struct device_attribute *attr, char *buffer) 7282 { 7283 struct pqi_ctrl_info *ctrl_info; 7284 struct scsi_device *sdev; 7285 struct pqi_scsi_dev *device; 7286 unsigned long flags; 7287 u64 sas_address; 7288 7289 sdev = to_scsi_device(dev); 7290 ctrl_info = shost_to_hba(sdev->host); 7291 7292 if (pqi_ctrl_offline(ctrl_info)) 7293 return -ENODEV; 7294 7295 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7296 7297 device = sdev->hostdata; 7298 if (!device) { 7299 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7300 return -ENODEV; 7301 } 7302 7303 sas_address = device->sas_address; 7304 7305 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7306 7307 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 7308 } 7309 7310 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 7311 struct device_attribute *attr, char *buffer) 7312 { 7313 struct pqi_ctrl_info *ctrl_info; 7314 struct scsi_device *sdev; 7315 struct pqi_scsi_dev *device; 7316 unsigned long flags; 7317 7318 sdev = to_scsi_device(dev); 7319 ctrl_info = shost_to_hba(sdev->host); 7320 7321 if (pqi_ctrl_offline(ctrl_info)) 7322 return -ENODEV; 7323 7324 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7325 7326 device = sdev->hostdata; 7327 if (!device) { 7328 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7329 return -ENODEV; 7330 } 7331 7332 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 7333 buffer[1] = '\n'; 7334 buffer[2] = '\0'; 7335 7336 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7337 7338 return 2; 7339 } 7340 7341 static ssize_t pqi_raid_level_show(struct device *dev, 7342 struct device_attribute *attr, char *buffer) 7343 { 7344 struct pqi_ctrl_info *ctrl_info; 7345 struct scsi_device *sdev; 7346 struct pqi_scsi_dev *device; 7347 unsigned long flags; 7348 char *raid_level; 7349 7350 sdev = to_scsi_device(dev); 7351 ctrl_info = shost_to_hba(sdev->host); 7352 7353 if (pqi_ctrl_offline(ctrl_info)) 7354 return -ENODEV; 7355 7356 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7357 7358 device = sdev->hostdata; 7359 if (!device) { 7360 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7361 return -ENODEV; 7362 } 7363 7364 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 7365 raid_level = pqi_raid_level_to_string(device->raid_level); 7366 else 7367 raid_level = "N/A"; 7368 7369 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7370 7371 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 7372 } 7373 7374 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, 7375 struct device_attribute *attr, char *buffer) 7376 { 7377 struct pqi_ctrl_info *ctrl_info; 7378 struct scsi_device *sdev; 7379 struct pqi_scsi_dev *device; 7380 unsigned long flags; 7381 u64 raid_bypass_cnt; 7382 int cpu; 7383 7384 sdev = to_scsi_device(dev); 7385 ctrl_info = shost_to_hba(sdev->host); 7386 7387 if (pqi_ctrl_offline(ctrl_info)) 7388 return -ENODEV; 7389 7390 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7391 7392 device = sdev->hostdata; 7393 if (!device) { 7394 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7395 return -ENODEV; 7396 } 7397 7398 raid_bypass_cnt = 0; 7399 7400 if (device->raid_io_stats) { 7401 for_each_online_cpu(cpu) { 7402 raid_bypass_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->raid_bypass_cnt; 7403 } 7404 } 7405 7406 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7407 7408 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", raid_bypass_cnt); 7409 } 7410 7411 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, 7412 struct device_attribute *attr, char *buf) 7413 { 7414 struct pqi_ctrl_info *ctrl_info; 7415 struct scsi_device *sdev; 7416 struct pqi_scsi_dev *device; 7417 unsigned long flags; 7418 int output_len = 0; 7419 7420 sdev = to_scsi_device(dev); 7421 ctrl_info = shost_to_hba(sdev->host); 7422 7423 if (pqi_ctrl_offline(ctrl_info)) 7424 return -ENODEV; 7425 7426 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7427 7428 device = sdev->hostdata; 7429 if (!device) { 7430 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7431 return -ENODEV; 7432 } 7433 7434 output_len = snprintf(buf, PAGE_SIZE, "%d\n", 7435 device->ncq_prio_enable); 7436 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7437 7438 return output_len; 7439 } 7440 7441 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, 7442 struct device_attribute *attr, 7443 const char *buf, size_t count) 7444 { 7445 struct pqi_ctrl_info *ctrl_info; 7446 struct scsi_device *sdev; 7447 struct pqi_scsi_dev *device; 7448 unsigned long flags; 7449 u8 ncq_prio_enable = 0; 7450 7451 if (kstrtou8(buf, 0, &ncq_prio_enable)) 7452 return -EINVAL; 7453 7454 sdev = to_scsi_device(dev); 7455 ctrl_info = shost_to_hba(sdev->host); 7456 7457 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7458 7459 device = sdev->hostdata; 7460 7461 if (!device) { 7462 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7463 return -ENODEV; 7464 } 7465 7466 if (!device->ncq_prio_support) { 7467 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7468 return -EINVAL; 7469 } 7470 7471 device->ncq_prio_enable = ncq_prio_enable; 7472 7473 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7474 7475 return strlen(buf); 7476 } 7477 7478 static ssize_t pqi_numa_node_show(struct device *dev, 7479 struct device_attribute *attr, char *buffer) 7480 { 7481 struct scsi_device *sdev; 7482 struct pqi_ctrl_info *ctrl_info; 7483 7484 sdev = to_scsi_device(dev); 7485 ctrl_info = shost_to_hba(sdev->host); 7486 7487 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); 7488 } 7489 7490 static ssize_t pqi_write_stream_cnt_show(struct device *dev, 7491 struct device_attribute *attr, char *buffer) 7492 { 7493 struct pqi_ctrl_info *ctrl_info; 7494 struct scsi_device *sdev; 7495 struct pqi_scsi_dev *device; 7496 unsigned long flags; 7497 u64 write_stream_cnt; 7498 int cpu; 7499 7500 sdev = to_scsi_device(dev); 7501 ctrl_info = shost_to_hba(sdev->host); 7502 7503 if (pqi_ctrl_offline(ctrl_info)) 7504 return -ENODEV; 7505 7506 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7507 7508 device = sdev->hostdata; 7509 if (!device) { 7510 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7511 return -ENODEV; 7512 } 7513 7514 write_stream_cnt = 0; 7515 7516 if (device->raid_io_stats) { 7517 for_each_online_cpu(cpu) { 7518 write_stream_cnt += per_cpu_ptr(device->raid_io_stats, cpu)->write_stream_cnt; 7519 } 7520 } 7521 7522 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7523 7524 return scnprintf(buffer, PAGE_SIZE, "0x%llx\n", write_stream_cnt); 7525 } 7526 7527 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 7528 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 7529 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 7530 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 7531 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); 7532 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 7533 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); 7534 static DEVICE_ATTR(sas_ncq_prio_enable, 0644, 7535 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); 7536 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); 7537 static DEVICE_ATTR(write_stream_cnt, 0444, pqi_write_stream_cnt_show, NULL); 7538 7539 static struct attribute *pqi_sdev_attrs[] = { 7540 &dev_attr_lunid.attr, 7541 &dev_attr_unique_id.attr, 7542 &dev_attr_path_info.attr, 7543 &dev_attr_sas_address.attr, 7544 &dev_attr_ssd_smart_path_enabled.attr, 7545 &dev_attr_raid_level.attr, 7546 &dev_attr_raid_bypass_cnt.attr, 7547 &dev_attr_sas_ncq_prio_enable.attr, 7548 &dev_attr_numa_node.attr, 7549 &dev_attr_write_stream_cnt.attr, 7550 NULL 7551 }; 7552 7553 ATTRIBUTE_GROUPS(pqi_sdev); 7554 7555 static const struct scsi_host_template pqi_driver_template = { 7556 .module = THIS_MODULE, 7557 .name = DRIVER_NAME_SHORT, 7558 .proc_name = DRIVER_NAME_SHORT, 7559 .queuecommand = pqi_scsi_queue_command, 7560 .scan_start = pqi_scan_start, 7561 .scan_finished = pqi_scan_finished, 7562 .this_id = -1, 7563 .eh_device_reset_handler = pqi_eh_device_reset_handler, 7564 .eh_abort_handler = pqi_eh_abort_handler, 7565 .ioctl = pqi_ioctl, 7566 .sdev_init = pqi_sdev_init, 7567 .sdev_configure = pqi_sdev_configure, 7568 .sdev_destroy = pqi_sdev_destroy, 7569 .map_queues = pqi_map_queues, 7570 .sdev_groups = pqi_sdev_groups, 7571 .shost_groups = pqi_shost_groups, 7572 .cmd_size = sizeof(struct pqi_cmd_priv), 7573 }; 7574 7575 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 7576 { 7577 int rc; 7578 struct Scsi_Host *shost; 7579 7580 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 7581 if (!shost) { 7582 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); 7583 return -ENOMEM; 7584 } 7585 7586 shost->io_port = 0; 7587 shost->n_io_port = 0; 7588 shost->this_id = -1; 7589 shost->max_channel = PQI_MAX_BUS; 7590 shost->max_cmd_len = MAX_COMMAND_SIZE; 7591 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; 7592 shost->max_id = ~0; 7593 shost->max_sectors = ctrl_info->max_sectors; 7594 shost->can_queue = ctrl_info->scsi_ml_can_queue; 7595 shost->cmd_per_lun = shost->can_queue; 7596 shost->sg_tablesize = ctrl_info->sg_tablesize; 7597 shost->transportt = pqi_sas_transport_template; 7598 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 7599 shost->unique_id = shost->irq; 7600 shost->nr_hw_queues = ctrl_info->num_queue_groups; 7601 shost->host_tagset = 1; 7602 shost->hostdata[0] = (unsigned long)ctrl_info; 7603 7604 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 7605 if (rc) { 7606 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); 7607 goto free_host; 7608 } 7609 7610 rc = pqi_add_sas_host(shost, ctrl_info); 7611 if (rc) { 7612 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); 7613 goto remove_host; 7614 } 7615 7616 ctrl_info->scsi_host = shost; 7617 7618 return 0; 7619 7620 remove_host: 7621 scsi_remove_host(shost); 7622 free_host: 7623 scsi_host_put(shost); 7624 7625 return rc; 7626 } 7627 7628 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 7629 { 7630 struct Scsi_Host *shost; 7631 7632 pqi_delete_sas_host(ctrl_info); 7633 7634 shost = ctrl_info->scsi_host; 7635 if (!shost) 7636 return; 7637 7638 scsi_remove_host(shost); 7639 scsi_host_put(shost); 7640 } 7641 7642 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 7643 { 7644 int rc = 0; 7645 struct pqi_device_registers __iomem *pqi_registers; 7646 unsigned long timeout; 7647 unsigned int timeout_msecs; 7648 union pqi_reset_register reset_reg; 7649 7650 pqi_registers = ctrl_info->pqi_registers; 7651 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 7652 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 7653 7654 while (1) { 7655 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 7656 reset_reg.all_bits = readl(&pqi_registers->device_reset); 7657 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 7658 break; 7659 if (!sis_is_firmware_running(ctrl_info)) { 7660 rc = -ENXIO; 7661 break; 7662 } 7663 if (time_after(jiffies, timeout)) { 7664 rc = -ETIMEDOUT; 7665 break; 7666 } 7667 } 7668 7669 return rc; 7670 } 7671 7672 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 7673 { 7674 int rc; 7675 union pqi_reset_register reset_reg; 7676 7677 if (ctrl_info->pqi_reset_quiesce_supported) { 7678 rc = sis_pqi_reset_quiesce(ctrl_info); 7679 if (rc) { 7680 dev_err(&ctrl_info->pci_dev->dev, 7681 "PQI reset failed during quiesce with error %d\n", rc); 7682 return rc; 7683 } 7684 } 7685 7686 reset_reg.all_bits = 0; 7687 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 7688 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 7689 7690 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 7691 7692 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 7693 if (rc) 7694 dev_err(&ctrl_info->pci_dev->dev, 7695 "PQI reset failed with error %d\n", rc); 7696 7697 return rc; 7698 } 7699 7700 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) 7701 { 7702 int rc; 7703 struct bmic_sense_subsystem_info *sense_info; 7704 7705 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); 7706 if (!sense_info) 7707 return -ENOMEM; 7708 7709 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); 7710 if (rc) 7711 goto out; 7712 7713 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, 7714 sizeof(sense_info->ctrl_serial_number)); 7715 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; 7716 7717 out: 7718 kfree(sense_info); 7719 7720 return rc; 7721 } 7722 7723 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) 7724 { 7725 int rc; 7726 struct bmic_identify_controller *identify; 7727 7728 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 7729 if (!identify) 7730 return -ENOMEM; 7731 7732 rc = pqi_identify_controller(ctrl_info, identify); 7733 if (rc) 7734 goto out; 7735 7736 if (get_unaligned_le32(&identify->extra_controller_flags) & 7737 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { 7738 memcpy(ctrl_info->firmware_version, 7739 identify->firmware_version_long, 7740 sizeof(identify->firmware_version_long)); 7741 } else { 7742 memcpy(ctrl_info->firmware_version, 7743 identify->firmware_version_short, 7744 sizeof(identify->firmware_version_short)); 7745 ctrl_info->firmware_version 7746 [sizeof(identify->firmware_version_short)] = '\0'; 7747 snprintf(ctrl_info->firmware_version + 7748 strlen(ctrl_info->firmware_version), 7749 sizeof(ctrl_info->firmware_version) - 7750 sizeof(identify->firmware_version_short), 7751 "-%u", 7752 get_unaligned_le16(&identify->firmware_build_number)); 7753 } 7754 7755 memcpy(ctrl_info->model, identify->product_id, 7756 sizeof(identify->product_id)); 7757 ctrl_info->model[sizeof(identify->product_id)] = '\0'; 7758 7759 memcpy(ctrl_info->vendor, identify->vendor_id, 7760 sizeof(identify->vendor_id)); 7761 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; 7762 7763 dev_info(&ctrl_info->pci_dev->dev, 7764 "Firmware version: %s\n", ctrl_info->firmware_version); 7765 7766 out: 7767 kfree(identify); 7768 7769 return rc; 7770 } 7771 7772 struct pqi_config_table_section_info { 7773 struct pqi_ctrl_info *ctrl_info; 7774 void *section; 7775 u32 section_offset; 7776 void __iomem *section_iomem_addr; 7777 }; 7778 7779 static inline bool pqi_is_firmware_feature_supported( 7780 struct pqi_config_table_firmware_features *firmware_features, 7781 unsigned int bit_position) 7782 { 7783 unsigned int byte_index; 7784 7785 byte_index = bit_position / BITS_PER_BYTE; 7786 7787 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 7788 return false; 7789 7790 return firmware_features->features_supported[byte_index] & 7791 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7792 } 7793 7794 static inline bool pqi_is_firmware_feature_enabled( 7795 struct pqi_config_table_firmware_features *firmware_features, 7796 void __iomem *firmware_features_iomem_addr, 7797 unsigned int bit_position) 7798 { 7799 unsigned int byte_index; 7800 u8 __iomem *features_enabled_iomem_addr; 7801 7802 byte_index = (bit_position / BITS_PER_BYTE) + 7803 (le16_to_cpu(firmware_features->num_elements) * 2); 7804 7805 features_enabled_iomem_addr = firmware_features_iomem_addr + 7806 offsetof(struct pqi_config_table_firmware_features, 7807 features_supported) + byte_index; 7808 7809 return *((__force u8 *)features_enabled_iomem_addr) & 7810 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7811 } 7812 7813 static inline void pqi_request_firmware_feature( 7814 struct pqi_config_table_firmware_features *firmware_features, 7815 unsigned int bit_position) 7816 { 7817 unsigned int byte_index; 7818 7819 byte_index = (bit_position / BITS_PER_BYTE) + 7820 le16_to_cpu(firmware_features->num_elements); 7821 7822 firmware_features->features_supported[byte_index] |= 7823 (1 << (bit_position % BITS_PER_BYTE)); 7824 } 7825 7826 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 7827 u16 first_section, u16 last_section) 7828 { 7829 struct pqi_vendor_general_request request; 7830 7831 memset(&request, 0, sizeof(request)); 7832 7833 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7834 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7835 &request.header.iu_length); 7836 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 7837 &request.function_code); 7838 put_unaligned_le16(first_section, 7839 &request.data.config_table_update.first_section); 7840 put_unaligned_le16(last_section, 7841 &request.data.config_table_update.last_section); 7842 7843 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 7844 } 7845 7846 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 7847 struct pqi_config_table_firmware_features *firmware_features, 7848 void __iomem *firmware_features_iomem_addr) 7849 { 7850 void *features_requested; 7851 void __iomem *features_requested_iomem_addr; 7852 void __iomem *host_max_known_feature_iomem_addr; 7853 7854 features_requested = firmware_features->features_supported + 7855 le16_to_cpu(firmware_features->num_elements); 7856 7857 features_requested_iomem_addr = firmware_features_iomem_addr + 7858 (features_requested - (void *)firmware_features); 7859 7860 memcpy_toio(features_requested_iomem_addr, features_requested, 7861 le16_to_cpu(firmware_features->num_elements)); 7862 7863 if (pqi_is_firmware_feature_supported(firmware_features, 7864 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { 7865 host_max_known_feature_iomem_addr = 7866 features_requested_iomem_addr + 7867 (le16_to_cpu(firmware_features->num_elements) * 2) + 7868 sizeof(__le16); 7869 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr); 7870 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1); 7871 } 7872 7873 return pqi_config_table_update(ctrl_info, 7874 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 7875 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 7876 } 7877 7878 struct pqi_firmware_feature { 7879 char *feature_name; 7880 unsigned int feature_bit; 7881 bool supported; 7882 bool enabled; 7883 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 7884 struct pqi_firmware_feature *firmware_feature); 7885 }; 7886 7887 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 7888 struct pqi_firmware_feature *firmware_feature) 7889 { 7890 if (!firmware_feature->supported) { 7891 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 7892 firmware_feature->feature_name); 7893 return; 7894 } 7895 7896 if (firmware_feature->enabled) { 7897 dev_info(&ctrl_info->pci_dev->dev, 7898 "%s enabled\n", firmware_feature->feature_name); 7899 return; 7900 } 7901 7902 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 7903 firmware_feature->feature_name); 7904 } 7905 7906 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, 7907 struct pqi_firmware_feature *firmware_feature) 7908 { 7909 switch (firmware_feature->feature_bit) { 7910 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: 7911 ctrl_info->enable_r1_writes = firmware_feature->enabled; 7912 break; 7913 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: 7914 ctrl_info->enable_r5_writes = firmware_feature->enabled; 7915 break; 7916 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: 7917 ctrl_info->enable_r6_writes = firmware_feature->enabled; 7918 break; 7919 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: 7920 ctrl_info->soft_reset_handshake_supported = 7921 firmware_feature->enabled && 7922 pqi_read_soft_reset_status(ctrl_info); 7923 break; 7924 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: 7925 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; 7926 break; 7927 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: 7928 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; 7929 break; 7930 case PQI_FIRMWARE_FEATURE_FW_TRIAGE: 7931 ctrl_info->firmware_triage_supported = firmware_feature->enabled; 7932 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); 7933 break; 7934 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5: 7935 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; 7936 break; 7937 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: 7938 ctrl_info->multi_lun_device_supported = firmware_feature->enabled; 7939 break; 7940 case PQI_FIRMWARE_FEATURE_CTRL_LOGGING: 7941 ctrl_info->ctrl_logging_supported = firmware_feature->enabled; 7942 break; 7943 } 7944 7945 pqi_firmware_feature_status(ctrl_info, firmware_feature); 7946 } 7947 7948 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 7949 struct pqi_firmware_feature *firmware_feature) 7950 { 7951 if (firmware_feature->feature_status) 7952 firmware_feature->feature_status(ctrl_info, firmware_feature); 7953 } 7954 7955 static DEFINE_MUTEX(pqi_firmware_features_mutex); 7956 7957 static struct pqi_firmware_feature pqi_firmware_features[] = { 7958 { 7959 .feature_name = "Online Firmware Activation", 7960 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 7961 .feature_status = pqi_firmware_feature_status, 7962 }, 7963 { 7964 .feature_name = "Serial Management Protocol", 7965 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 7966 .feature_status = pqi_firmware_feature_status, 7967 }, 7968 { 7969 .feature_name = "Maximum Known Feature", 7970 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, 7971 .feature_status = pqi_firmware_feature_status, 7972 }, 7973 { 7974 .feature_name = "RAID 0 Read Bypass", 7975 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, 7976 .feature_status = pqi_firmware_feature_status, 7977 }, 7978 { 7979 .feature_name = "RAID 1 Read Bypass", 7980 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, 7981 .feature_status = pqi_firmware_feature_status, 7982 }, 7983 { 7984 .feature_name = "RAID 5 Read Bypass", 7985 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, 7986 .feature_status = pqi_firmware_feature_status, 7987 }, 7988 { 7989 .feature_name = "RAID 6 Read Bypass", 7990 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, 7991 .feature_status = pqi_firmware_feature_status, 7992 }, 7993 { 7994 .feature_name = "RAID 0 Write Bypass", 7995 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, 7996 .feature_status = pqi_firmware_feature_status, 7997 }, 7998 { 7999 .feature_name = "RAID 1 Write Bypass", 8000 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, 8001 .feature_status = pqi_ctrl_update_feature_flags, 8002 }, 8003 { 8004 .feature_name = "RAID 5 Write Bypass", 8005 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, 8006 .feature_status = pqi_ctrl_update_feature_flags, 8007 }, 8008 { 8009 .feature_name = "RAID 6 Write Bypass", 8010 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, 8011 .feature_status = pqi_ctrl_update_feature_flags, 8012 }, 8013 { 8014 .feature_name = "New Soft Reset Handshake", 8015 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 8016 .feature_status = pqi_ctrl_update_feature_flags, 8017 }, 8018 { 8019 .feature_name = "RAID IU Timeout", 8020 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, 8021 .feature_status = pqi_ctrl_update_feature_flags, 8022 }, 8023 { 8024 .feature_name = "TMF IU Timeout", 8025 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, 8026 .feature_status = pqi_ctrl_update_feature_flags, 8027 }, 8028 { 8029 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", 8030 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, 8031 .feature_status = pqi_firmware_feature_status, 8032 }, 8033 { 8034 .feature_name = "Firmware Triage", 8035 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE, 8036 .feature_status = pqi_ctrl_update_feature_flags, 8037 }, 8038 { 8039 .feature_name = "RPL Extended Formats 4 and 5", 8040 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5, 8041 .feature_status = pqi_ctrl_update_feature_flags, 8042 }, 8043 { 8044 .feature_name = "Multi-LUN Target", 8045 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, 8046 .feature_status = pqi_ctrl_update_feature_flags, 8047 }, 8048 { 8049 .feature_name = "Controller Data Logging", 8050 .feature_bit = PQI_FIRMWARE_FEATURE_CTRL_LOGGING, 8051 .feature_status = pqi_ctrl_update_feature_flags, 8052 }, 8053 }; 8054 8055 static void pqi_process_firmware_features( 8056 struct pqi_config_table_section_info *section_info) 8057 { 8058 int rc; 8059 struct pqi_ctrl_info *ctrl_info; 8060 struct pqi_config_table_firmware_features *firmware_features; 8061 void __iomem *firmware_features_iomem_addr; 8062 unsigned int i; 8063 unsigned int num_features_supported; 8064 8065 ctrl_info = section_info->ctrl_info; 8066 firmware_features = section_info->section; 8067 firmware_features_iomem_addr = section_info->section_iomem_addr; 8068 8069 for (i = 0, num_features_supported = 0; 8070 i < ARRAY_SIZE(pqi_firmware_features); i++) { 8071 if (pqi_is_firmware_feature_supported(firmware_features, 8072 pqi_firmware_features[i].feature_bit)) { 8073 pqi_firmware_features[i].supported = true; 8074 num_features_supported++; 8075 } else { 8076 pqi_firmware_feature_update(ctrl_info, 8077 &pqi_firmware_features[i]); 8078 } 8079 } 8080 8081 if (num_features_supported == 0) 8082 return; 8083 8084 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8085 if (!pqi_firmware_features[i].supported) 8086 continue; 8087 pqi_request_firmware_feature(firmware_features, 8088 pqi_firmware_features[i].feature_bit); 8089 } 8090 8091 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 8092 firmware_features_iomem_addr); 8093 if (rc) { 8094 dev_err(&ctrl_info->pci_dev->dev, 8095 "failed to enable firmware features in PQI configuration table\n"); 8096 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8097 if (!pqi_firmware_features[i].supported) 8098 continue; 8099 pqi_firmware_feature_update(ctrl_info, 8100 &pqi_firmware_features[i]); 8101 } 8102 return; 8103 } 8104 8105 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8106 if (!pqi_firmware_features[i].supported) 8107 continue; 8108 if (pqi_is_firmware_feature_enabled(firmware_features, 8109 firmware_features_iomem_addr, 8110 pqi_firmware_features[i].feature_bit)) { 8111 pqi_firmware_features[i].enabled = true; 8112 } 8113 pqi_firmware_feature_update(ctrl_info, 8114 &pqi_firmware_features[i]); 8115 } 8116 } 8117 8118 static void pqi_init_firmware_features(void) 8119 { 8120 unsigned int i; 8121 8122 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8123 pqi_firmware_features[i].supported = false; 8124 pqi_firmware_features[i].enabled = false; 8125 } 8126 } 8127 8128 static void pqi_process_firmware_features_section( 8129 struct pqi_config_table_section_info *section_info) 8130 { 8131 mutex_lock(&pqi_firmware_features_mutex); 8132 pqi_init_firmware_features(); 8133 pqi_process_firmware_features(section_info); 8134 mutex_unlock(&pqi_firmware_features_mutex); 8135 } 8136 8137 /* 8138 * Reset all controller settings that can be initialized during the processing 8139 * of the PQI Configuration Table. 8140 */ 8141 8142 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) 8143 { 8144 ctrl_info->heartbeat_counter = NULL; 8145 ctrl_info->soft_reset_status = NULL; 8146 ctrl_info->soft_reset_handshake_supported = false; 8147 ctrl_info->enable_r1_writes = false; 8148 ctrl_info->enable_r5_writes = false; 8149 ctrl_info->enable_r6_writes = false; 8150 ctrl_info->raid_iu_timeout_supported = false; 8151 ctrl_info->tmf_iu_timeout_supported = false; 8152 ctrl_info->firmware_triage_supported = false; 8153 ctrl_info->rpl_extended_format_4_5_supported = false; 8154 ctrl_info->multi_lun_device_supported = false; 8155 ctrl_info->ctrl_logging_supported = false; 8156 } 8157 8158 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 8159 { 8160 u32 table_length; 8161 u32 section_offset; 8162 bool firmware_feature_section_present; 8163 void __iomem *table_iomem_addr; 8164 struct pqi_config_table *config_table; 8165 struct pqi_config_table_section_header *section; 8166 struct pqi_config_table_section_info section_info; 8167 struct pqi_config_table_section_info feature_section_info = {0}; 8168 8169 table_length = ctrl_info->config_table_length; 8170 if (table_length == 0) 8171 return 0; 8172 8173 config_table = kmalloc(table_length, GFP_KERNEL); 8174 if (!config_table) { 8175 dev_err(&ctrl_info->pci_dev->dev, 8176 "failed to allocate memory for PQI configuration table\n"); 8177 return -ENOMEM; 8178 } 8179 8180 /* 8181 * Copy the config table contents from I/O memory space into the 8182 * temporary buffer. 8183 */ 8184 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; 8185 memcpy_fromio(config_table, table_iomem_addr, table_length); 8186 8187 firmware_feature_section_present = false; 8188 section_info.ctrl_info = ctrl_info; 8189 section_offset = get_unaligned_le32(&config_table->first_section_offset); 8190 8191 while (section_offset) { 8192 section = (void *)config_table + section_offset; 8193 8194 section_info.section = section; 8195 section_info.section_offset = section_offset; 8196 section_info.section_iomem_addr = table_iomem_addr + section_offset; 8197 8198 switch (get_unaligned_le16(§ion->section_id)) { 8199 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 8200 firmware_feature_section_present = true; 8201 feature_section_info = section_info; 8202 break; 8203 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 8204 if (pqi_disable_heartbeat) 8205 dev_warn(&ctrl_info->pci_dev->dev, 8206 "heartbeat disabled by module parameter\n"); 8207 else 8208 ctrl_info->heartbeat_counter = 8209 table_iomem_addr + 8210 section_offset + 8211 offsetof(struct pqi_config_table_heartbeat, 8212 heartbeat_counter); 8213 break; 8214 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 8215 ctrl_info->soft_reset_status = 8216 table_iomem_addr + 8217 section_offset + 8218 offsetof(struct pqi_config_table_soft_reset, 8219 soft_reset_status); 8220 break; 8221 } 8222 8223 section_offset = get_unaligned_le16(§ion->next_section_offset); 8224 } 8225 8226 /* 8227 * We process the firmware feature section after all other sections 8228 * have been processed so that the feature bit callbacks can take 8229 * into account the settings configured by other sections. 8230 */ 8231 if (firmware_feature_section_present) 8232 pqi_process_firmware_features_section(&feature_section_info); 8233 8234 kfree(config_table); 8235 8236 return 0; 8237 } 8238 8239 /* Switches the controller from PQI mode back into SIS mode. */ 8240 8241 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 8242 { 8243 int rc; 8244 8245 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 8246 rc = pqi_reset(ctrl_info); 8247 if (rc) 8248 return rc; 8249 rc = sis_reenable_sis_mode(ctrl_info); 8250 if (rc) { 8251 dev_err(&ctrl_info->pci_dev->dev, 8252 "re-enabling SIS mode failed with error %d\n", rc); 8253 return rc; 8254 } 8255 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8256 8257 return 0; 8258 } 8259 8260 /* 8261 * If the controller isn't already in SIS mode, this function forces it into 8262 * SIS mode. 8263 */ 8264 8265 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 8266 { 8267 if (!sis_is_firmware_running(ctrl_info)) 8268 return -ENXIO; 8269 8270 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 8271 return 0; 8272 8273 if (sis_is_kernel_up(ctrl_info)) { 8274 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8275 return 0; 8276 } 8277 8278 return pqi_revert_to_sis_mode(ctrl_info); 8279 } 8280 8281 static void pqi_perform_lockup_action(void) 8282 { 8283 switch (pqi_lockup_action) { 8284 case PANIC: 8285 panic("FATAL: Smart Family Controller lockup detected"); 8286 break; 8287 case REBOOT: 8288 emergency_restart(); 8289 break; 8290 case NONE: 8291 default: 8292 break; 8293 } 8294 } 8295 8296 #define PQI_CTRL_LOG_TOTAL_SIZE (4 * 1024 * 1024) 8297 #define PQI_CTRL_LOG_MIN_SIZE (PQI_CTRL_LOG_TOTAL_SIZE / PQI_HOST_MAX_SG_DESCRIPTORS) 8298 8299 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 8300 { 8301 int rc; 8302 u32 product_id; 8303 8304 if (reset_devices) { 8305 if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) { 8306 rc = sis_wait_for_fw_triage_completion(ctrl_info); 8307 if (rc) 8308 return rc; 8309 } 8310 if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) { 8311 sis_notify_kdump(ctrl_info); 8312 rc = sis_wait_for_ctrl_logging_completion(ctrl_info); 8313 if (rc) 8314 return rc; 8315 } 8316 sis_soft_reset(ctrl_info); 8317 ssleep(PQI_POST_RESET_DELAY_SECS); 8318 } else { 8319 rc = pqi_force_sis_mode(ctrl_info); 8320 if (rc) 8321 return rc; 8322 } 8323 8324 /* 8325 * Wait until the controller is ready to start accepting SIS 8326 * commands. 8327 */ 8328 rc = sis_wait_for_ctrl_ready(ctrl_info); 8329 if (rc) { 8330 if (reset_devices) { 8331 dev_err(&ctrl_info->pci_dev->dev, 8332 "kdump init failed with error %d\n", rc); 8333 pqi_lockup_action = REBOOT; 8334 pqi_perform_lockup_action(); 8335 } 8336 return rc; 8337 } 8338 8339 /* 8340 * Get the controller properties. This allows us to determine 8341 * whether or not it supports PQI mode. 8342 */ 8343 rc = sis_get_ctrl_properties(ctrl_info); 8344 if (rc) { 8345 dev_err(&ctrl_info->pci_dev->dev, 8346 "error obtaining controller properties\n"); 8347 return rc; 8348 } 8349 8350 rc = sis_get_pqi_capabilities(ctrl_info); 8351 if (rc) { 8352 dev_err(&ctrl_info->pci_dev->dev, 8353 "error obtaining controller capabilities\n"); 8354 return rc; 8355 } 8356 8357 product_id = sis_get_product_id(ctrl_info); 8358 ctrl_info->product_id = (u8)product_id; 8359 ctrl_info->product_revision = (u8)(product_id >> 8); 8360 8361 if (is_kdump_kernel()) { 8362 if (ctrl_info->max_outstanding_requests > 8363 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 8364 ctrl_info->max_outstanding_requests = 8365 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 8366 } else { 8367 if (ctrl_info->max_outstanding_requests > 8368 PQI_MAX_OUTSTANDING_REQUESTS) 8369 ctrl_info->max_outstanding_requests = 8370 PQI_MAX_OUTSTANDING_REQUESTS; 8371 } 8372 8373 pqi_calculate_io_resources(ctrl_info); 8374 8375 rc = pqi_alloc_error_buffer(ctrl_info); 8376 if (rc) { 8377 dev_err(&ctrl_info->pci_dev->dev, 8378 "failed to allocate PQI error buffer\n"); 8379 return rc; 8380 } 8381 8382 /* 8383 * If the function we are about to call succeeds, the 8384 * controller will transition from legacy SIS mode 8385 * into PQI mode. 8386 */ 8387 rc = sis_init_base_struct_addr(ctrl_info); 8388 if (rc) { 8389 dev_err(&ctrl_info->pci_dev->dev, 8390 "error initializing PQI mode\n"); 8391 return rc; 8392 } 8393 8394 /* Wait for the controller to complete the SIS -> PQI transition. */ 8395 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8396 if (rc) { 8397 dev_err(&ctrl_info->pci_dev->dev, 8398 "transition to PQI mode failed\n"); 8399 return rc; 8400 } 8401 8402 /* From here on, we are running in PQI mode. */ 8403 ctrl_info->pqi_mode_enabled = true; 8404 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8405 8406 rc = pqi_alloc_admin_queues(ctrl_info); 8407 if (rc) { 8408 dev_err(&ctrl_info->pci_dev->dev, 8409 "failed to allocate admin queues\n"); 8410 return rc; 8411 } 8412 8413 rc = pqi_create_admin_queues(ctrl_info); 8414 if (rc) { 8415 dev_err(&ctrl_info->pci_dev->dev, 8416 "error creating admin queues\n"); 8417 return rc; 8418 } 8419 8420 rc = pqi_report_device_capability(ctrl_info); 8421 if (rc) { 8422 dev_err(&ctrl_info->pci_dev->dev, 8423 "obtaining device capability failed\n"); 8424 return rc; 8425 } 8426 8427 rc = pqi_validate_device_capability(ctrl_info); 8428 if (rc) 8429 return rc; 8430 8431 pqi_calculate_queue_resources(ctrl_info); 8432 8433 rc = pqi_enable_msix_interrupts(ctrl_info); 8434 if (rc) 8435 return rc; 8436 8437 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 8438 ctrl_info->max_msix_vectors = 8439 ctrl_info->num_msix_vectors_enabled; 8440 pqi_calculate_queue_resources(ctrl_info); 8441 } 8442 8443 rc = pqi_alloc_io_resources(ctrl_info); 8444 if (rc) 8445 return rc; 8446 8447 rc = pqi_alloc_operational_queues(ctrl_info); 8448 if (rc) { 8449 dev_err(&ctrl_info->pci_dev->dev, 8450 "failed to allocate operational queues\n"); 8451 return rc; 8452 } 8453 8454 pqi_init_operational_queues(ctrl_info); 8455 8456 rc = pqi_create_queues(ctrl_info); 8457 if (rc) 8458 return rc; 8459 8460 rc = pqi_request_irqs(ctrl_info); 8461 if (rc) 8462 return rc; 8463 8464 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8465 8466 ctrl_info->controller_online = true; 8467 8468 rc = pqi_process_config_table(ctrl_info); 8469 if (rc) 8470 return rc; 8471 8472 pqi_start_heartbeat_timer(ctrl_info); 8473 8474 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8475 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8476 if (rc) { /* Supported features not returned correctly. */ 8477 dev_err(&ctrl_info->pci_dev->dev, 8478 "error obtaining advanced RAID bypass configuration\n"); 8479 return rc; 8480 } 8481 ctrl_info->ciss_report_log_flags |= 8482 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8483 } 8484 8485 rc = pqi_enable_events(ctrl_info); 8486 if (rc) { 8487 dev_err(&ctrl_info->pci_dev->dev, 8488 "error enabling events\n"); 8489 return rc; 8490 } 8491 8492 /* Register with the SCSI subsystem. */ 8493 rc = pqi_register_scsi(ctrl_info); 8494 if (rc) 8495 return rc; 8496 8497 if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) { 8498 pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE); 8499 pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); 8500 } 8501 8502 rc = pqi_get_ctrl_product_details(ctrl_info); 8503 if (rc) { 8504 dev_err(&ctrl_info->pci_dev->dev, 8505 "error obtaining product details\n"); 8506 return rc; 8507 } 8508 8509 rc = pqi_get_ctrl_serial_number(ctrl_info); 8510 if (rc) { 8511 dev_err(&ctrl_info->pci_dev->dev, 8512 "error obtaining ctrl serial number\n"); 8513 return rc; 8514 } 8515 8516 rc = pqi_set_diag_rescan(ctrl_info); 8517 if (rc) { 8518 dev_err(&ctrl_info->pci_dev->dev, 8519 "error enabling multi-lun rescan\n"); 8520 return rc; 8521 } 8522 8523 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8524 if (rc) { 8525 dev_err(&ctrl_info->pci_dev->dev, 8526 "error updating host wellness\n"); 8527 return rc; 8528 } 8529 8530 pqi_schedule_update_time_worker(ctrl_info); 8531 8532 pqi_scan_scsi_devices(ctrl_info); 8533 8534 return 0; 8535 } 8536 8537 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 8538 { 8539 unsigned int i; 8540 struct pqi_admin_queues *admin_queues; 8541 struct pqi_event_queue *event_queue; 8542 8543 admin_queues = &ctrl_info->admin_queues; 8544 admin_queues->iq_pi_copy = 0; 8545 admin_queues->oq_ci_copy = 0; 8546 writel(0, admin_queues->oq_pi); 8547 8548 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 8549 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 8550 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 8551 ctrl_info->queue_groups[i].oq_ci_copy = 0; 8552 8553 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 8554 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 8555 writel(0, ctrl_info->queue_groups[i].oq_pi); 8556 } 8557 8558 event_queue = &ctrl_info->event_queue; 8559 writel(0, event_queue->oq_pi); 8560 event_queue->oq_ci_copy = 0; 8561 } 8562 8563 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 8564 { 8565 int rc; 8566 8567 rc = pqi_force_sis_mode(ctrl_info); 8568 if (rc) 8569 return rc; 8570 8571 /* 8572 * Wait until the controller is ready to start accepting SIS 8573 * commands. 8574 */ 8575 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 8576 if (rc) 8577 return rc; 8578 8579 /* 8580 * Get the controller properties. This allows us to determine 8581 * whether or not it supports PQI mode. 8582 */ 8583 rc = sis_get_ctrl_properties(ctrl_info); 8584 if (rc) { 8585 dev_err(&ctrl_info->pci_dev->dev, 8586 "error obtaining controller properties\n"); 8587 return rc; 8588 } 8589 8590 rc = sis_get_pqi_capabilities(ctrl_info); 8591 if (rc) { 8592 dev_err(&ctrl_info->pci_dev->dev, 8593 "error obtaining controller capabilities\n"); 8594 return rc; 8595 } 8596 8597 /* 8598 * If the function we are about to call succeeds, the 8599 * controller will transition from legacy SIS mode 8600 * into PQI mode. 8601 */ 8602 rc = sis_init_base_struct_addr(ctrl_info); 8603 if (rc) { 8604 dev_err(&ctrl_info->pci_dev->dev, 8605 "error initializing PQI mode\n"); 8606 return rc; 8607 } 8608 8609 /* Wait for the controller to complete the SIS -> PQI transition. */ 8610 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8611 if (rc) { 8612 dev_err(&ctrl_info->pci_dev->dev, 8613 "transition to PQI mode failed\n"); 8614 return rc; 8615 } 8616 8617 /* From here on, we are running in PQI mode. */ 8618 ctrl_info->pqi_mode_enabled = true; 8619 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8620 8621 pqi_reinit_queues(ctrl_info); 8622 8623 rc = pqi_create_admin_queues(ctrl_info); 8624 if (rc) { 8625 dev_err(&ctrl_info->pci_dev->dev, 8626 "error creating admin queues\n"); 8627 return rc; 8628 } 8629 8630 rc = pqi_create_queues(ctrl_info); 8631 if (rc) 8632 return rc; 8633 8634 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8635 8636 ctrl_info->controller_online = true; 8637 pqi_ctrl_unblock_requests(ctrl_info); 8638 8639 pqi_ctrl_reset_config(ctrl_info); 8640 8641 rc = pqi_process_config_table(ctrl_info); 8642 if (rc) 8643 return rc; 8644 8645 pqi_start_heartbeat_timer(ctrl_info); 8646 8647 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8648 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8649 if (rc) { 8650 dev_err(&ctrl_info->pci_dev->dev, 8651 "error obtaining advanced RAID bypass configuration\n"); 8652 return rc; 8653 } 8654 ctrl_info->ciss_report_log_flags |= 8655 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8656 } 8657 8658 rc = pqi_enable_events(ctrl_info); 8659 if (rc) { 8660 dev_err(&ctrl_info->pci_dev->dev, 8661 "error enabling events\n"); 8662 return rc; 8663 } 8664 8665 rc = pqi_get_ctrl_product_details(ctrl_info); 8666 if (rc) { 8667 dev_err(&ctrl_info->pci_dev->dev, 8668 "error obtaining product details\n"); 8669 return rc; 8670 } 8671 8672 rc = pqi_set_diag_rescan(ctrl_info); 8673 if (rc) { 8674 dev_err(&ctrl_info->pci_dev->dev, 8675 "error enabling multi-lun rescan\n"); 8676 return rc; 8677 } 8678 8679 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8680 if (rc) { 8681 dev_err(&ctrl_info->pci_dev->dev, 8682 "error updating host wellness\n"); 8683 return rc; 8684 } 8685 8686 if (pqi_ofa_in_progress(ctrl_info)) { 8687 pqi_ctrl_unblock_scan(ctrl_info); 8688 if (ctrl_info->ctrl_logging_supported) { 8689 if (!ctrl_info->ctrl_log_memory.host_memory) 8690 pqi_host_setup_buffer(ctrl_info, 8691 &ctrl_info->ctrl_log_memory, 8692 PQI_CTRL_LOG_TOTAL_SIZE, 8693 PQI_CTRL_LOG_MIN_SIZE); 8694 pqi_host_memory_update(ctrl_info, 8695 &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE); 8696 } else { 8697 if (ctrl_info->ctrl_log_memory.host_memory) 8698 pqi_host_free_buffer(ctrl_info, 8699 &ctrl_info->ctrl_log_memory); 8700 } 8701 } 8702 8703 pqi_scan_scsi_devices(ctrl_info); 8704 8705 return 0; 8706 } 8707 8708 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) 8709 { 8710 int rc; 8711 8712 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 8713 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 8714 8715 return pcibios_err_to_errno(rc); 8716 } 8717 8718 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 8719 { 8720 int rc; 8721 u64 mask; 8722 8723 rc = pci_enable_device(ctrl_info->pci_dev); 8724 if (rc) { 8725 dev_err(&ctrl_info->pci_dev->dev, 8726 "failed to enable PCI device\n"); 8727 return rc; 8728 } 8729 8730 if (sizeof(dma_addr_t) > 4) 8731 mask = DMA_BIT_MASK(64); 8732 else 8733 mask = DMA_BIT_MASK(32); 8734 8735 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); 8736 if (rc) { 8737 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 8738 goto disable_device; 8739 } 8740 8741 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 8742 if (rc) { 8743 dev_err(&ctrl_info->pci_dev->dev, 8744 "failed to obtain PCI resources\n"); 8745 goto disable_device; 8746 } 8747 8748 ctrl_info->iomem_base = ioremap(pci_resource_start( 8749 ctrl_info->pci_dev, 0), 8750 pci_resource_len(ctrl_info->pci_dev, 0)); 8751 if (!ctrl_info->iomem_base) { 8752 dev_err(&ctrl_info->pci_dev->dev, 8753 "failed to map memory for controller registers\n"); 8754 rc = -ENOMEM; 8755 goto release_regions; 8756 } 8757 8758 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 8759 8760 /* Increase the PCIe completion timeout. */ 8761 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 8762 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 8763 if (rc) { 8764 dev_err(&ctrl_info->pci_dev->dev, 8765 "failed to set PCIe completion timeout\n"); 8766 goto release_regions; 8767 } 8768 8769 /* Enable bus mastering. */ 8770 pci_set_master(ctrl_info->pci_dev); 8771 8772 ctrl_info->registers = ctrl_info->iomem_base; 8773 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 8774 8775 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 8776 8777 return 0; 8778 8779 release_regions: 8780 pci_release_regions(ctrl_info->pci_dev); 8781 disable_device: 8782 pci_disable_device(ctrl_info->pci_dev); 8783 8784 return rc; 8785 } 8786 8787 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 8788 { 8789 iounmap(ctrl_info->iomem_base); 8790 pci_release_regions(ctrl_info->pci_dev); 8791 if (pci_is_enabled(ctrl_info->pci_dev)) 8792 pci_disable_device(ctrl_info->pci_dev); 8793 pci_set_drvdata(ctrl_info->pci_dev, NULL); 8794 } 8795 8796 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 8797 { 8798 struct pqi_ctrl_info *ctrl_info; 8799 8800 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 8801 GFP_KERNEL, numa_node); 8802 if (!ctrl_info) 8803 return NULL; 8804 8805 mutex_init(&ctrl_info->scan_mutex); 8806 mutex_init(&ctrl_info->lun_reset_mutex); 8807 mutex_init(&ctrl_info->ofa_mutex); 8808 8809 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 8810 spin_lock_init(&ctrl_info->scsi_device_list_lock); 8811 8812 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 8813 atomic_set(&ctrl_info->num_interrupts, 0); 8814 8815 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 8816 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 8817 8818 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 8819 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 8820 8821 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); 8822 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); 8823 8824 sema_init(&ctrl_info->sync_request_sem, 8825 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 8826 init_waitqueue_head(&ctrl_info->block_requests_wait); 8827 8828 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 8829 ctrl_info->irq_mode = IRQ_MODE_NONE; 8830 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 8831 8832 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; 8833 ctrl_info->max_transfer_encrypted_sas_sata = 8834 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; 8835 ctrl_info->max_transfer_encrypted_nvme = 8836 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; 8837 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; 8838 ctrl_info->max_write_raid_1_10_2drive = ~0; 8839 ctrl_info->max_write_raid_1_10_3drive = ~0; 8840 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; 8841 8842 return ctrl_info; 8843 } 8844 8845 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 8846 { 8847 kfree(ctrl_info); 8848 } 8849 8850 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 8851 { 8852 pqi_free_irqs(ctrl_info); 8853 pqi_disable_msix_interrupts(ctrl_info); 8854 } 8855 8856 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 8857 { 8858 pqi_free_interrupts(ctrl_info); 8859 if (ctrl_info->queue_memory_base) 8860 dma_free_coherent(&ctrl_info->pci_dev->dev, 8861 ctrl_info->queue_memory_length, 8862 ctrl_info->queue_memory_base, 8863 ctrl_info->queue_memory_base_dma_handle); 8864 if (ctrl_info->admin_queue_memory_base) 8865 dma_free_coherent(&ctrl_info->pci_dev->dev, 8866 ctrl_info->admin_queue_memory_length, 8867 ctrl_info->admin_queue_memory_base, 8868 ctrl_info->admin_queue_memory_base_dma_handle); 8869 pqi_free_all_io_requests(ctrl_info); 8870 if (ctrl_info->error_buffer) 8871 dma_free_coherent(&ctrl_info->pci_dev->dev, 8872 ctrl_info->error_buffer_length, 8873 ctrl_info->error_buffer, 8874 ctrl_info->error_buffer_dma_handle); 8875 if (ctrl_info->iomem_base) 8876 pqi_cleanup_pci_init(ctrl_info); 8877 pqi_free_ctrl_info(ctrl_info); 8878 } 8879 8880 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 8881 { 8882 ctrl_info->controller_online = false; 8883 pqi_stop_heartbeat_timer(ctrl_info); 8884 pqi_ctrl_block_requests(ctrl_info); 8885 pqi_cancel_rescan_worker(ctrl_info); 8886 pqi_cancel_update_time_worker(ctrl_info); 8887 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { 8888 pqi_fail_all_outstanding_requests(ctrl_info); 8889 ctrl_info->pqi_mode_enabled = false; 8890 } 8891 pqi_host_free_buffer(ctrl_info, &ctrl_info->ctrl_log_memory); 8892 pqi_unregister_scsi(ctrl_info); 8893 if (ctrl_info->pqi_mode_enabled) 8894 pqi_revert_to_sis_mode(ctrl_info); 8895 pqi_free_ctrl_resources(ctrl_info); 8896 } 8897 8898 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 8899 { 8900 pqi_ctrl_block_scan(ctrl_info); 8901 pqi_scsi_block_requests(ctrl_info); 8902 pqi_ctrl_block_device_reset(ctrl_info); 8903 pqi_ctrl_block_requests(ctrl_info); 8904 pqi_ctrl_wait_until_quiesced(ctrl_info); 8905 pqi_stop_heartbeat_timer(ctrl_info); 8906 } 8907 8908 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 8909 { 8910 pqi_start_heartbeat_timer(ctrl_info); 8911 pqi_ctrl_unblock_requests(ctrl_info); 8912 pqi_ctrl_unblock_device_reset(ctrl_info); 8913 pqi_scsi_unblock_requests(ctrl_info); 8914 pqi_ctrl_unblock_scan(ctrl_info); 8915 } 8916 8917 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) 8918 { 8919 ssleep(delay_secs); 8920 8921 return pqi_ctrl_init_resume(ctrl_info); 8922 } 8923 8924 static int pqi_host_alloc_mem(struct pqi_ctrl_info *ctrl_info, 8925 struct pqi_host_memory_descriptor *host_memory_descriptor, 8926 u32 total_size, u32 chunk_size) 8927 { 8928 int i; 8929 u32 sg_count; 8930 struct device *dev; 8931 struct pqi_host_memory *host_memory; 8932 struct pqi_sg_descriptor *mem_descriptor; 8933 dma_addr_t dma_handle; 8934 8935 sg_count = DIV_ROUND_UP(total_size, chunk_size); 8936 if (sg_count == 0 || sg_count > PQI_HOST_MAX_SG_DESCRIPTORS) 8937 goto out; 8938 8939 host_memory_descriptor->host_chunk_virt_address = kmalloc(sg_count * sizeof(void *), GFP_KERNEL); 8940 if (!host_memory_descriptor->host_chunk_virt_address) 8941 goto out; 8942 8943 dev = &ctrl_info->pci_dev->dev; 8944 host_memory = host_memory_descriptor->host_memory; 8945 8946 for (i = 0; i < sg_count; i++) { 8947 host_memory_descriptor->host_chunk_virt_address[i] = dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); 8948 if (!host_memory_descriptor->host_chunk_virt_address[i]) 8949 goto out_free_chunks; 8950 mem_descriptor = &host_memory->sg_descriptor[i]; 8951 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); 8952 put_unaligned_le32(chunk_size, &mem_descriptor->length); 8953 } 8954 8955 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 8956 put_unaligned_le16(sg_count, &host_memory->num_memory_descriptors); 8957 put_unaligned_le32(sg_count * chunk_size, &host_memory->bytes_allocated); 8958 8959 return 0; 8960 8961 out_free_chunks: 8962 while (--i >= 0) { 8963 mem_descriptor = &host_memory->sg_descriptor[i]; 8964 dma_free_coherent(dev, chunk_size, 8965 host_memory_descriptor->host_chunk_virt_address[i], 8966 get_unaligned_le64(&mem_descriptor->address)); 8967 } 8968 kfree(host_memory_descriptor->host_chunk_virt_address); 8969 out: 8970 return -ENOMEM; 8971 } 8972 8973 static int pqi_host_alloc_buffer(struct pqi_ctrl_info *ctrl_info, 8974 struct pqi_host_memory_descriptor *host_memory_descriptor, 8975 u32 total_required_size, u32 min_required_size) 8976 { 8977 u32 chunk_size; 8978 u32 min_chunk_size; 8979 8980 if (total_required_size == 0 || min_required_size == 0) 8981 return 0; 8982 8983 total_required_size = PAGE_ALIGN(total_required_size); 8984 min_required_size = PAGE_ALIGN(min_required_size); 8985 min_chunk_size = DIV_ROUND_UP(total_required_size, PQI_HOST_MAX_SG_DESCRIPTORS); 8986 min_chunk_size = PAGE_ALIGN(min_chunk_size); 8987 8988 while (total_required_size >= min_required_size) { 8989 for (chunk_size = total_required_size; chunk_size >= min_chunk_size;) { 8990 if (pqi_host_alloc_mem(ctrl_info, 8991 host_memory_descriptor, total_required_size, 8992 chunk_size) == 0) 8993 return 0; 8994 chunk_size /= 2; 8995 chunk_size = PAGE_ALIGN(chunk_size); 8996 } 8997 total_required_size /= 2; 8998 total_required_size = PAGE_ALIGN(total_required_size); 8999 } 9000 9001 return -ENOMEM; 9002 } 9003 9004 static void pqi_host_setup_buffer(struct pqi_ctrl_info *ctrl_info, 9005 struct pqi_host_memory_descriptor *host_memory_descriptor, 9006 u32 total_size, u32 min_size) 9007 { 9008 struct device *dev; 9009 struct pqi_host_memory *host_memory; 9010 9011 dev = &ctrl_info->pci_dev->dev; 9012 9013 host_memory = dma_alloc_coherent(dev, sizeof(*host_memory), 9014 &host_memory_descriptor->host_memory_dma_handle, GFP_KERNEL); 9015 if (!host_memory) 9016 return; 9017 9018 host_memory_descriptor->host_memory = host_memory; 9019 9020 if (pqi_host_alloc_buffer(ctrl_info, host_memory_descriptor, 9021 total_size, min_size) < 0) { 9022 dev_err(dev, "failed to allocate firmware usable host buffer\n"); 9023 dma_free_coherent(dev, sizeof(*host_memory), host_memory, 9024 host_memory_descriptor->host_memory_dma_handle); 9025 host_memory_descriptor->host_memory = NULL; 9026 return; 9027 } 9028 } 9029 9030 static void pqi_host_free_buffer(struct pqi_ctrl_info *ctrl_info, 9031 struct pqi_host_memory_descriptor *host_memory_descriptor) 9032 { 9033 unsigned int i; 9034 struct device *dev; 9035 struct pqi_host_memory *host_memory; 9036 struct pqi_sg_descriptor *mem_descriptor; 9037 unsigned int num_memory_descriptors; 9038 9039 host_memory = host_memory_descriptor->host_memory; 9040 if (!host_memory) 9041 return; 9042 9043 dev = &ctrl_info->pci_dev->dev; 9044 9045 if (get_unaligned_le32(&host_memory->bytes_allocated) == 0) 9046 goto out; 9047 9048 mem_descriptor = host_memory->sg_descriptor; 9049 num_memory_descriptors = get_unaligned_le16(&host_memory->num_memory_descriptors); 9050 9051 for (i = 0; i < num_memory_descriptors; i++) { 9052 dma_free_coherent(dev, 9053 get_unaligned_le32(&mem_descriptor[i].length), 9054 host_memory_descriptor->host_chunk_virt_address[i], 9055 get_unaligned_le64(&mem_descriptor[i].address)); 9056 } 9057 kfree(host_memory_descriptor->host_chunk_virt_address); 9058 9059 out: 9060 dma_free_coherent(dev, sizeof(*host_memory), host_memory, 9061 host_memory_descriptor->host_memory_dma_handle); 9062 host_memory_descriptor->host_memory = NULL; 9063 } 9064 9065 static int pqi_host_memory_update(struct pqi_ctrl_info *ctrl_info, 9066 struct pqi_host_memory_descriptor *host_memory_descriptor, 9067 u16 function_code) 9068 { 9069 u32 buffer_length; 9070 struct pqi_vendor_general_request request; 9071 struct pqi_host_memory *host_memory; 9072 9073 memset(&request, 0, sizeof(request)); 9074 9075 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 9076 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 9077 put_unaligned_le16(function_code, &request.function_code); 9078 9079 host_memory = host_memory_descriptor->host_memory; 9080 9081 if (host_memory) { 9082 buffer_length = offsetof(struct pqi_host_memory, sg_descriptor) + get_unaligned_le16(&host_memory->num_memory_descriptors) * sizeof(struct pqi_sg_descriptor); 9083 put_unaligned_le64((u64)host_memory_descriptor->host_memory_dma_handle, &request.data.host_memory_allocation.buffer_address); 9084 put_unaligned_le32(buffer_length, &request.data.host_memory_allocation.buffer_length); 9085 9086 if (function_code == PQI_VENDOR_GENERAL_OFA_MEMORY_UPDATE) { 9087 put_unaligned_le16(PQI_OFA_VERSION, &host_memory->version); 9088 memcpy(&host_memory->signature, PQI_OFA_SIGNATURE, sizeof(host_memory->signature)); 9089 } else if (function_code == PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE) { 9090 put_unaligned_le16(PQI_CTRL_LOG_VERSION, &host_memory->version); 9091 memcpy(&host_memory->signature, PQI_CTRL_LOG_SIGNATURE, sizeof(host_memory->signature)); 9092 } 9093 } 9094 9095 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 9096 } 9097 9098 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 9099 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 9100 .status = SAM_STAT_CHECK_CONDITION, 9101 }; 9102 9103 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 9104 { 9105 unsigned int i; 9106 struct pqi_io_request *io_request; 9107 struct scsi_cmnd *scmd; 9108 struct scsi_device *sdev; 9109 9110 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9111 io_request = &ctrl_info->io_request_pool[i]; 9112 if (atomic_read(&io_request->refcount) == 0) 9113 continue; 9114 9115 scmd = io_request->scmd; 9116 if (scmd) { 9117 sdev = scmd->device; 9118 if (!sdev || !scsi_device_online(sdev)) { 9119 pqi_free_io_request(io_request); 9120 continue; 9121 } else { 9122 set_host_byte(scmd, DID_NO_CONNECT); 9123 } 9124 } else { 9125 io_request->status = -ENXIO; 9126 io_request->error_info = 9127 &pqi_ctrl_offline_raid_error_info; 9128 } 9129 9130 io_request->io_complete_callback(io_request, 9131 io_request->context); 9132 } 9133 } 9134 9135 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 9136 { 9137 pqi_perform_lockup_action(); 9138 pqi_stop_heartbeat_timer(ctrl_info); 9139 pqi_free_interrupts(ctrl_info); 9140 pqi_cancel_rescan_worker(ctrl_info); 9141 pqi_cancel_update_time_worker(ctrl_info); 9142 pqi_ctrl_wait_until_quiesced(ctrl_info); 9143 pqi_fail_all_outstanding_requests(ctrl_info); 9144 pqi_ctrl_unblock_requests(ctrl_info); 9145 pqi_take_ctrl_devices_offline(ctrl_info); 9146 } 9147 9148 static void pqi_ctrl_offline_worker(struct work_struct *work) 9149 { 9150 struct pqi_ctrl_info *ctrl_info; 9151 9152 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 9153 pqi_take_ctrl_offline_deferred(ctrl_info); 9154 } 9155 9156 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9157 { 9158 char *string; 9159 9160 switch (ctrl_shutdown_reason) { 9161 case PQI_IQ_NOT_DRAINED_TIMEOUT: 9162 string = "inbound queue not drained timeout"; 9163 break; 9164 case PQI_LUN_RESET_TIMEOUT: 9165 string = "LUN reset timeout"; 9166 break; 9167 case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT: 9168 string = "I/O pending timeout after LUN reset"; 9169 break; 9170 case PQI_NO_HEARTBEAT: 9171 string = "no controller heartbeat detected"; 9172 break; 9173 case PQI_FIRMWARE_KERNEL_NOT_UP: 9174 string = "firmware kernel not ready"; 9175 break; 9176 case PQI_OFA_RESPONSE_TIMEOUT: 9177 string = "OFA response timeout"; 9178 break; 9179 case PQI_INVALID_REQ_ID: 9180 string = "invalid request ID"; 9181 break; 9182 case PQI_UNMATCHED_REQ_ID: 9183 string = "unmatched request ID"; 9184 break; 9185 case PQI_IO_PI_OUT_OF_RANGE: 9186 string = "I/O queue producer index out of range"; 9187 break; 9188 case PQI_EVENT_PI_OUT_OF_RANGE: 9189 string = "event queue producer index out of range"; 9190 break; 9191 case PQI_UNEXPECTED_IU_TYPE: 9192 string = "unexpected IU type"; 9193 break; 9194 default: 9195 string = "unknown reason"; 9196 break; 9197 } 9198 9199 return string; 9200 } 9201 9202 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 9203 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9204 { 9205 if (!ctrl_info->controller_online) 9206 return; 9207 9208 ctrl_info->controller_online = false; 9209 ctrl_info->pqi_mode_enabled = false; 9210 pqi_ctrl_block_requests(ctrl_info); 9211 if (!pqi_disable_ctrl_shutdown) 9212 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); 9213 pci_disable_device(ctrl_info->pci_dev); 9214 dev_err(&ctrl_info->pci_dev->dev, 9215 "controller offline: reason code 0x%x (%s)\n", 9216 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason)); 9217 schedule_work(&ctrl_info->ctrl_offline_work); 9218 } 9219 9220 static void pqi_take_ctrl_devices_offline(struct pqi_ctrl_info *ctrl_info) 9221 { 9222 int rc; 9223 unsigned long flags; 9224 struct pqi_scsi_dev *device; 9225 9226 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 9227 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 9228 rc = list_is_last(&device->scsi_device_list_entry, &ctrl_info->scsi_device_list); 9229 if (rc) 9230 continue; 9231 9232 /* 9233 * Is the sdev pointer NULL? 9234 */ 9235 if (device->sdev) 9236 scsi_device_set_state(device->sdev, SDEV_OFFLINE); 9237 } 9238 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 9239 } 9240 9241 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 9242 const struct pci_device_id *id) 9243 { 9244 char *ctrl_description; 9245 9246 if (id->driver_data) 9247 ctrl_description = (char *)id->driver_data; 9248 else 9249 ctrl_description = "Microchip Smart Family Controller"; 9250 9251 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 9252 } 9253 9254 static int pqi_pci_probe(struct pci_dev *pci_dev, 9255 const struct pci_device_id *id) 9256 { 9257 int rc; 9258 int node; 9259 struct pqi_ctrl_info *ctrl_info; 9260 9261 pqi_print_ctrl_info(pci_dev, id); 9262 9263 if (pqi_disable_device_id_wildcards && 9264 id->subvendor == PCI_ANY_ID && 9265 id->subdevice == PCI_ANY_ID) { 9266 dev_warn(&pci_dev->dev, 9267 "controller not probed because device ID wildcards are disabled\n"); 9268 return -ENODEV; 9269 } 9270 9271 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 9272 dev_warn(&pci_dev->dev, 9273 "controller device ID matched using wildcards\n"); 9274 9275 node = dev_to_node(&pci_dev->dev); 9276 if (node == NUMA_NO_NODE) { 9277 node = cpu_to_node(0); 9278 if (node == NUMA_NO_NODE) 9279 node = 0; 9280 set_dev_node(&pci_dev->dev, node); 9281 } 9282 9283 ctrl_info = pqi_alloc_ctrl_info(node); 9284 if (!ctrl_info) { 9285 dev_err(&pci_dev->dev, 9286 "failed to allocate controller info block\n"); 9287 return -ENOMEM; 9288 } 9289 ctrl_info->numa_node = node; 9290 9291 ctrl_info->pci_dev = pci_dev; 9292 9293 rc = pqi_pci_init(ctrl_info); 9294 if (rc) 9295 goto error; 9296 9297 rc = pqi_ctrl_init(ctrl_info); 9298 if (rc) 9299 goto error; 9300 9301 return 0; 9302 9303 error: 9304 pqi_remove_ctrl(ctrl_info); 9305 9306 return rc; 9307 } 9308 9309 static void pqi_pci_remove(struct pci_dev *pci_dev) 9310 { 9311 struct pqi_ctrl_info *ctrl_info; 9312 u16 vendor_id; 9313 int rc; 9314 9315 ctrl_info = pci_get_drvdata(pci_dev); 9316 if (!ctrl_info) 9317 return; 9318 9319 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); 9320 if (vendor_id == 0xffff) 9321 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; 9322 else 9323 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; 9324 9325 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { 9326 rc = pqi_flush_cache(ctrl_info, RESTART); 9327 if (rc) 9328 dev_err(&pci_dev->dev, 9329 "unable to flush controller cache during remove\n"); 9330 } 9331 9332 pqi_remove_ctrl(ctrl_info); 9333 } 9334 9335 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) 9336 { 9337 unsigned int i; 9338 struct pqi_io_request *io_request; 9339 struct scsi_cmnd *scmd; 9340 9341 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9342 io_request = &ctrl_info->io_request_pool[i]; 9343 if (atomic_read(&io_request->refcount) == 0) 9344 continue; 9345 scmd = io_request->scmd; 9346 WARN_ON(scmd != NULL); /* IO command from SML */ 9347 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ 9348 } 9349 } 9350 9351 static void pqi_shutdown(struct pci_dev *pci_dev) 9352 { 9353 int rc; 9354 struct pqi_ctrl_info *ctrl_info; 9355 enum bmic_flush_cache_shutdown_event shutdown_event; 9356 9357 ctrl_info = pci_get_drvdata(pci_dev); 9358 if (!ctrl_info) { 9359 dev_err(&pci_dev->dev, 9360 "cache could not be flushed\n"); 9361 return; 9362 } 9363 9364 pqi_wait_until_ofa_finished(ctrl_info); 9365 9366 pqi_scsi_block_requests(ctrl_info); 9367 pqi_ctrl_block_device_reset(ctrl_info); 9368 pqi_ctrl_block_requests(ctrl_info); 9369 pqi_ctrl_wait_until_quiesced(ctrl_info); 9370 9371 if (system_state == SYSTEM_RESTART) 9372 shutdown_event = RESTART; 9373 else 9374 shutdown_event = SHUTDOWN; 9375 9376 /* 9377 * Write all data in the controller's battery-backed cache to 9378 * storage. 9379 */ 9380 rc = pqi_flush_cache(ctrl_info, shutdown_event); 9381 if (rc) 9382 dev_err(&pci_dev->dev, 9383 "unable to flush controller cache during shutdown\n"); 9384 9385 pqi_crash_if_pending_command(ctrl_info); 9386 pqi_reset(ctrl_info); 9387 } 9388 9389 static void pqi_process_lockup_action_param(void) 9390 { 9391 unsigned int i; 9392 9393 if (!pqi_lockup_action_param) 9394 return; 9395 9396 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 9397 if (strcmp(pqi_lockup_action_param, 9398 pqi_lockup_actions[i].name) == 0) { 9399 pqi_lockup_action = pqi_lockup_actions[i].action; 9400 return; 9401 } 9402 } 9403 9404 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 9405 DRIVER_NAME_SHORT, pqi_lockup_action_param); 9406 } 9407 9408 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30 9409 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60) 9410 9411 static void pqi_process_ctrl_ready_timeout_param(void) 9412 { 9413 if (pqi_ctrl_ready_timeout_secs == 0) 9414 return; 9415 9416 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) { 9417 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n", 9418 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS); 9419 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS; 9420 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) { 9421 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n", 9422 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS); 9423 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS; 9424 } 9425 9426 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs; 9427 } 9428 9429 static void pqi_process_module_params(void) 9430 { 9431 pqi_process_lockup_action_param(); 9432 pqi_process_ctrl_ready_timeout_param(); 9433 } 9434 9435 #if defined(CONFIG_PM) 9436 9437 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev) 9438 { 9439 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) 9440 return RESTART; 9441 9442 return SUSPEND; 9443 } 9444 9445 static int pqi_suspend_or_freeze(struct device *dev, bool suspend) 9446 { 9447 struct pci_dev *pci_dev; 9448 struct pqi_ctrl_info *ctrl_info; 9449 9450 pci_dev = to_pci_dev(dev); 9451 ctrl_info = pci_get_drvdata(pci_dev); 9452 9453 pqi_wait_until_ofa_finished(ctrl_info); 9454 9455 pqi_ctrl_block_scan(ctrl_info); 9456 pqi_scsi_block_requests(ctrl_info); 9457 pqi_ctrl_block_device_reset(ctrl_info); 9458 pqi_ctrl_block_requests(ctrl_info); 9459 pqi_ctrl_wait_until_quiesced(ctrl_info); 9460 9461 if (suspend) { 9462 enum bmic_flush_cache_shutdown_event shutdown_event; 9463 9464 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9465 pqi_flush_cache(ctrl_info, shutdown_event); 9466 } 9467 9468 pqi_stop_heartbeat_timer(ctrl_info); 9469 pqi_crash_if_pending_command(ctrl_info); 9470 pqi_free_irqs(ctrl_info); 9471 9472 ctrl_info->controller_online = false; 9473 ctrl_info->pqi_mode_enabled = false; 9474 9475 return 0; 9476 } 9477 9478 static __maybe_unused int pqi_suspend(struct device *dev) 9479 { 9480 return pqi_suspend_or_freeze(dev, true); 9481 } 9482 9483 static int pqi_resume_or_restore(struct device *dev) 9484 { 9485 int rc; 9486 struct pci_dev *pci_dev; 9487 struct pqi_ctrl_info *ctrl_info; 9488 9489 pci_dev = to_pci_dev(dev); 9490 ctrl_info = pci_get_drvdata(pci_dev); 9491 9492 rc = pqi_request_irqs(ctrl_info); 9493 if (rc) 9494 return rc; 9495 9496 pqi_ctrl_unblock_device_reset(ctrl_info); 9497 pqi_ctrl_unblock_requests(ctrl_info); 9498 pqi_scsi_unblock_requests(ctrl_info); 9499 pqi_ctrl_unblock_scan(ctrl_info); 9500 9501 ssleep(PQI_POST_RESET_DELAY_SECS); 9502 9503 return pqi_ctrl_init_resume(ctrl_info); 9504 } 9505 9506 static int pqi_freeze(struct device *dev) 9507 { 9508 return pqi_suspend_or_freeze(dev, false); 9509 } 9510 9511 static int pqi_thaw(struct device *dev) 9512 { 9513 int rc; 9514 struct pci_dev *pci_dev; 9515 struct pqi_ctrl_info *ctrl_info; 9516 9517 pci_dev = to_pci_dev(dev); 9518 ctrl_info = pci_get_drvdata(pci_dev); 9519 9520 rc = pqi_request_irqs(ctrl_info); 9521 if (rc) 9522 return rc; 9523 9524 ctrl_info->controller_online = true; 9525 ctrl_info->pqi_mode_enabled = true; 9526 9527 pqi_ctrl_unblock_device_reset(ctrl_info); 9528 pqi_ctrl_unblock_requests(ctrl_info); 9529 pqi_scsi_unblock_requests(ctrl_info); 9530 pqi_ctrl_unblock_scan(ctrl_info); 9531 9532 return 0; 9533 } 9534 9535 static int pqi_poweroff(struct device *dev) 9536 { 9537 struct pci_dev *pci_dev; 9538 struct pqi_ctrl_info *ctrl_info; 9539 enum bmic_flush_cache_shutdown_event shutdown_event; 9540 9541 pci_dev = to_pci_dev(dev); 9542 ctrl_info = pci_get_drvdata(pci_dev); 9543 9544 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9545 pqi_flush_cache(ctrl_info, shutdown_event); 9546 9547 return 0; 9548 } 9549 9550 static const struct dev_pm_ops pqi_pm_ops = { 9551 .suspend = pqi_suspend, 9552 .resume = pqi_resume_or_restore, 9553 .freeze = pqi_freeze, 9554 .thaw = pqi_thaw, 9555 .poweroff = pqi_poweroff, 9556 .restore = pqi_resume_or_restore, 9557 }; 9558 9559 #endif /* CONFIG_PM */ 9560 9561 /* Define the PCI IDs for the controllers that we support. */ 9562 static const struct pci_device_id pqi_pci_id_table[] = { 9563 { 9564 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9565 0x105b, 0x1211) 9566 }, 9567 { 9568 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9569 0x105b, 0x1321) 9570 }, 9571 { 9572 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9573 0x152d, 0x8a22) 9574 }, 9575 { 9576 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9577 0x152d, 0x8a23) 9578 }, 9579 { 9580 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9581 0x152d, 0x8a24) 9582 }, 9583 { 9584 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9585 0x152d, 0x8a36) 9586 }, 9587 { 9588 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9589 0x152d, 0x8a37) 9590 }, 9591 { 9592 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9593 0x193d, 0x0462) 9594 }, 9595 { 9596 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9597 0x193d, 0x1104) 9598 }, 9599 { 9600 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9601 0x193d, 0x1105) 9602 }, 9603 { 9604 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9605 0x193d, 0x1106) 9606 }, 9607 { 9608 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9609 0x193d, 0x1107) 9610 }, 9611 { 9612 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9613 0x193d, 0x1108) 9614 }, 9615 { 9616 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9617 0x193d, 0x1109) 9618 }, 9619 { 9620 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9621 0x193d, 0x110b) 9622 }, 9623 { 9624 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9625 0x193d, 0x1110) 9626 }, 9627 { 9628 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9629 0x193d, 0x8460) 9630 }, 9631 { 9632 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9633 0x193d, 0x8461) 9634 }, 9635 { 9636 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9637 0x193d, 0x8462) 9638 }, 9639 { 9640 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9641 0x193d, 0xc460) 9642 }, 9643 { 9644 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9645 0x193d, 0xc461) 9646 }, 9647 { 9648 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9649 0x193d, 0xf460) 9650 }, 9651 { 9652 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9653 0x193d, 0xf461) 9654 }, 9655 { 9656 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9657 0x1bd4, 0x0045) 9658 }, 9659 { 9660 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9661 0x1bd4, 0x0046) 9662 }, 9663 { 9664 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9665 0x1bd4, 0x0047) 9666 }, 9667 { 9668 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9669 0x1bd4, 0x0048) 9670 }, 9671 { 9672 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9673 0x1bd4, 0x004a) 9674 }, 9675 { 9676 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9677 0x1bd4, 0x004b) 9678 }, 9679 { 9680 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9681 0x1bd4, 0x004c) 9682 }, 9683 { 9684 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9685 0x1bd4, 0x004f) 9686 }, 9687 { 9688 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9689 0x1bd4, 0x0051) 9690 }, 9691 { 9692 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9693 0x1bd4, 0x0052) 9694 }, 9695 { 9696 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9697 0x1bd4, 0x0053) 9698 }, 9699 { 9700 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9701 0x1bd4, 0x0054) 9702 }, 9703 { 9704 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9705 0x1bd4, 0x006b) 9706 }, 9707 { 9708 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9709 0x1bd4, 0x006c) 9710 }, 9711 { 9712 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9713 0x1bd4, 0x006d) 9714 }, 9715 { 9716 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9717 0x1bd4, 0x006f) 9718 }, 9719 { 9720 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9721 0x1bd4, 0x0070) 9722 }, 9723 { 9724 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9725 0x1bd4, 0x0071) 9726 }, 9727 { 9728 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9729 0x1bd4, 0x0072) 9730 }, 9731 { 9732 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9733 0x1bd4, 0x0086) 9734 }, 9735 { 9736 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9737 0x1bd4, 0x0087) 9738 }, 9739 { 9740 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9741 0x1bd4, 0x0088) 9742 }, 9743 { 9744 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9745 0x1bd4, 0x0089) 9746 }, 9747 { 9748 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9749 0x1bd4, 0x00a3) 9750 }, 9751 { 9752 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9753 0x1ff9, 0x00a1) 9754 }, 9755 { 9756 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9757 0x1f3a, 0x0104) 9758 }, 9759 { 9760 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9761 0x19e5, 0xd227) 9762 }, 9763 { 9764 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9765 0x19e5, 0xd228) 9766 }, 9767 { 9768 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9769 0x19e5, 0xd229) 9770 }, 9771 { 9772 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9773 0x19e5, 0xd22a) 9774 }, 9775 { 9776 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9777 0x19e5, 0xd22b) 9778 }, 9779 { 9780 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9781 0x19e5, 0xd22c) 9782 }, 9783 { 9784 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9785 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 9786 }, 9787 { 9788 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9789 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 9790 }, 9791 { 9792 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9793 PCI_VENDOR_ID_ADAPTEC2, 0x0659) 9794 }, 9795 { 9796 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9797 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 9798 }, 9799 { 9800 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9801 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 9802 }, 9803 { 9804 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9805 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 9806 }, 9807 { 9808 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9809 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 9810 }, 9811 { 9812 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9813 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 9814 }, 9815 { 9816 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9817 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 9818 }, 9819 { 9820 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9821 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 9822 }, 9823 { 9824 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9825 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 9826 }, 9827 { 9828 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9829 PCI_VENDOR_ID_ADAPTEC2, 0x0808) 9830 }, 9831 { 9832 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9833 PCI_VENDOR_ID_ADAPTEC2, 0x0809) 9834 }, 9835 { 9836 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9837 PCI_VENDOR_ID_ADAPTEC2, 0x080a) 9838 }, 9839 { 9840 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9841 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 9842 }, 9843 { 9844 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9845 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 9846 }, 9847 { 9848 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9849 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 9850 }, 9851 { 9852 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9853 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 9854 }, 9855 { 9856 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9857 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 9858 }, 9859 { 9860 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9861 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 9862 }, 9863 { 9864 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9865 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 9866 }, 9867 { 9868 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9869 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 9870 }, 9871 { 9872 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9873 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 9874 }, 9875 { 9876 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9877 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 9878 }, 9879 { 9880 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9881 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 9882 }, 9883 { 9884 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9885 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 9886 }, 9887 { 9888 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9889 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 9890 }, 9891 { 9892 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9893 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 9894 }, 9895 { 9896 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9897 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 9898 }, 9899 { 9900 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9901 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 9902 }, 9903 { 9904 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9905 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 9906 }, 9907 { 9908 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9909 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 9910 }, 9911 { 9912 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9913 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 9914 }, 9915 { 9916 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9917 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 9918 }, 9919 { 9920 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9921 PCI_VENDOR_ID_ADAPTEC2, 0x1304) 9922 }, 9923 { 9924 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9925 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 9926 }, 9927 { 9928 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9929 PCI_VENDOR_ID_ADAPTEC2, 0x1400) 9930 }, 9931 { 9932 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9933 PCI_VENDOR_ID_ADAPTEC2, 0x1402) 9934 }, 9935 { 9936 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9937 PCI_VENDOR_ID_ADAPTEC2, 0x1410) 9938 }, 9939 { 9940 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9941 PCI_VENDOR_ID_ADAPTEC2, 0x1411) 9942 }, 9943 { 9944 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9945 PCI_VENDOR_ID_ADAPTEC2, 0x1412) 9946 }, 9947 { 9948 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9949 PCI_VENDOR_ID_ADAPTEC2, 0x1420) 9950 }, 9951 { 9952 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9953 PCI_VENDOR_ID_ADAPTEC2, 0x1430) 9954 }, 9955 { 9956 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9957 PCI_VENDOR_ID_ADAPTEC2, 0x1440) 9958 }, 9959 { 9960 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9961 PCI_VENDOR_ID_ADAPTEC2, 0x1441) 9962 }, 9963 { 9964 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9965 PCI_VENDOR_ID_ADAPTEC2, 0x1450) 9966 }, 9967 { 9968 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9969 PCI_VENDOR_ID_ADAPTEC2, 0x1452) 9970 }, 9971 { 9972 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9973 PCI_VENDOR_ID_ADAPTEC2, 0x1460) 9974 }, 9975 { 9976 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9977 PCI_VENDOR_ID_ADAPTEC2, 0x1461) 9978 }, 9979 { 9980 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9981 PCI_VENDOR_ID_ADAPTEC2, 0x1462) 9982 }, 9983 { 9984 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9985 PCI_VENDOR_ID_ADAPTEC2, 0x1463) 9986 }, 9987 { 9988 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9989 PCI_VENDOR_ID_ADAPTEC2, 0x1470) 9990 }, 9991 { 9992 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9993 PCI_VENDOR_ID_ADAPTEC2, 0x1471) 9994 }, 9995 { 9996 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9997 PCI_VENDOR_ID_ADAPTEC2, 0x1472) 9998 }, 9999 { 10000 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10001 PCI_VENDOR_ID_ADAPTEC2, 0x1473) 10002 }, 10003 { 10004 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10005 PCI_VENDOR_ID_ADAPTEC2, 0x1474) 10006 }, 10007 { 10008 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10009 PCI_VENDOR_ID_ADAPTEC2, 0x1475) 10010 }, 10011 { 10012 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10013 PCI_VENDOR_ID_ADAPTEC2, 0x1480) 10014 }, 10015 { 10016 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10017 PCI_VENDOR_ID_ADAPTEC2, 0x1490) 10018 }, 10019 { 10020 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10021 PCI_VENDOR_ID_ADAPTEC2, 0x1491) 10022 }, 10023 { 10024 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10025 PCI_VENDOR_ID_ADAPTEC2, 0x14a0) 10026 }, 10027 { 10028 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10029 PCI_VENDOR_ID_ADAPTEC2, 0x14a1) 10030 }, 10031 { 10032 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10033 PCI_VENDOR_ID_ADAPTEC2, 0x14a2) 10034 }, 10035 { 10036 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10037 PCI_VENDOR_ID_ADAPTEC2, 0x14a4) 10038 }, 10039 { 10040 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10041 PCI_VENDOR_ID_ADAPTEC2, 0x14a5) 10042 }, 10043 { 10044 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10045 PCI_VENDOR_ID_ADAPTEC2, 0x14a6) 10046 }, 10047 { 10048 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10049 PCI_VENDOR_ID_ADAPTEC2, 0x14b0) 10050 }, 10051 { 10052 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10053 PCI_VENDOR_ID_ADAPTEC2, 0x14b1) 10054 }, 10055 { 10056 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10057 PCI_VENDOR_ID_ADAPTEC2, 0x14c0) 10058 }, 10059 { 10060 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10061 PCI_VENDOR_ID_ADAPTEC2, 0x14c1) 10062 }, 10063 { 10064 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10065 PCI_VENDOR_ID_ADAPTEC2, 0x14c2) 10066 }, 10067 { 10068 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10069 PCI_VENDOR_ID_ADAPTEC2, 0x14c3) 10070 }, 10071 { 10072 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10073 PCI_VENDOR_ID_ADAPTEC2, 0x14c4) 10074 }, 10075 { 10076 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10077 PCI_VENDOR_ID_ADAPTEC2, 0x14d0) 10078 }, 10079 { 10080 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10081 PCI_VENDOR_ID_ADAPTEC2, 0x14e0) 10082 }, 10083 { 10084 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10085 PCI_VENDOR_ID_ADAPTEC2, 0x14f0) 10086 }, 10087 { 10088 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10089 0x207d, 0x4044) 10090 }, 10091 { 10092 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10093 0x207d, 0x4054) 10094 }, 10095 { 10096 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10097 0x207d, 0x4084) 10098 }, 10099 { 10100 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10101 0x207d, 0x4094) 10102 }, 10103 { 10104 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10105 0x207d, 0x4140) 10106 }, 10107 { 10108 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10109 0x207d, 0x4240) 10110 }, 10111 { 10112 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10113 PCI_VENDOR_ID_ADVANTECH, 0x8312) 10114 }, 10115 { 10116 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10117 PCI_VENDOR_ID_DELL, 0x1fe0) 10118 }, 10119 { 10120 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10121 PCI_VENDOR_ID_HP, 0x0600) 10122 }, 10123 { 10124 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10125 PCI_VENDOR_ID_HP, 0x0601) 10126 }, 10127 { 10128 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10129 PCI_VENDOR_ID_HP, 0x0602) 10130 }, 10131 { 10132 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10133 PCI_VENDOR_ID_HP, 0x0603) 10134 }, 10135 { 10136 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10137 PCI_VENDOR_ID_HP, 0x0609) 10138 }, 10139 { 10140 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10141 PCI_VENDOR_ID_HP, 0x0650) 10142 }, 10143 { 10144 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10145 PCI_VENDOR_ID_HP, 0x0651) 10146 }, 10147 { 10148 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10149 PCI_VENDOR_ID_HP, 0x0652) 10150 }, 10151 { 10152 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10153 PCI_VENDOR_ID_HP, 0x0653) 10154 }, 10155 { 10156 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10157 PCI_VENDOR_ID_HP, 0x0654) 10158 }, 10159 { 10160 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10161 PCI_VENDOR_ID_HP, 0x0655) 10162 }, 10163 { 10164 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10165 PCI_VENDOR_ID_HP, 0x0700) 10166 }, 10167 { 10168 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10169 PCI_VENDOR_ID_HP, 0x0701) 10170 }, 10171 { 10172 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10173 PCI_VENDOR_ID_HP, 0x1001) 10174 }, 10175 { 10176 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10177 PCI_VENDOR_ID_HP, 0x1002) 10178 }, 10179 { 10180 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10181 PCI_VENDOR_ID_HP, 0x1100) 10182 }, 10183 { 10184 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10185 PCI_VENDOR_ID_HP, 0x1101) 10186 }, 10187 { 10188 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10189 0x1590, 0x0294) 10190 }, 10191 { 10192 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10193 0x1590, 0x02db) 10194 }, 10195 { 10196 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10197 0x1590, 0x02dc) 10198 }, 10199 { 10200 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10201 0x1590, 0x032e) 10202 }, 10203 { 10204 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10205 0x1590, 0x036f) 10206 }, 10207 { 10208 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10209 0x1590, 0x0381) 10210 }, 10211 { 10212 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10213 0x1590, 0x0382) 10214 }, 10215 { 10216 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10217 0x1590, 0x0383) 10218 }, 10219 { 10220 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10221 0x1d8d, 0x0800) 10222 }, 10223 { 10224 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10225 0x1d8d, 0x0908) 10226 }, 10227 { 10228 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10229 0x1d8d, 0x0806) 10230 }, 10231 { 10232 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10233 0x1d8d, 0x0916) 10234 }, 10235 { 10236 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10237 PCI_VENDOR_ID_GIGABYTE, 0x1000) 10238 }, 10239 { 10240 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10241 0x1dfc, 0x3161) 10242 }, 10243 { 10244 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10245 0x1f0c, 0x3161) 10246 }, 10247 { 10248 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10249 0x1cf2, 0x0804) 10250 }, 10251 { 10252 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10253 0x1cf2, 0x0805) 10254 }, 10255 { 10256 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10257 0x1cf2, 0x0806) 10258 }, 10259 { 10260 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10261 0x1cf2, 0x5445) 10262 }, 10263 { 10264 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10265 0x1cf2, 0x5446) 10266 }, 10267 { 10268 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10269 0x1cf2, 0x5447) 10270 }, 10271 { 10272 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10273 0x1cf2, 0x5449) 10274 }, 10275 { 10276 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10277 0x1cf2, 0x544a) 10278 }, 10279 { 10280 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10281 0x1cf2, 0x544b) 10282 }, 10283 { 10284 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10285 0x1cf2, 0x544d) 10286 }, 10287 { 10288 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10289 0x1cf2, 0x544e) 10290 }, 10291 { 10292 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10293 0x1cf2, 0x544f) 10294 }, 10295 { 10296 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10297 0x1cf2, 0x54da) 10298 }, 10299 { 10300 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10301 0x1cf2, 0x54db) 10302 }, 10303 { 10304 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10305 0x1cf2, 0x54dc) 10306 }, 10307 { 10308 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10309 0x1cf2, 0x0b27) 10310 }, 10311 { 10312 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10313 0x1cf2, 0x0b29) 10314 }, 10315 { 10316 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10317 0x1cf2, 0x0b45) 10318 }, 10319 { 10320 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10321 0x1cc4, 0x0101) 10322 }, 10323 { 10324 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10325 0x1cc4, 0x0201) 10326 }, 10327 { 10328 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10329 0x1018, 0x8238) 10330 }, 10331 { 10332 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10333 0x1f3f, 0x0610) 10334 }, 10335 { 10336 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10337 PCI_VENDOR_ID_LENOVO, 0x0220) 10338 }, 10339 { 10340 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10341 PCI_VENDOR_ID_LENOVO, 0x0221) 10342 }, 10343 { 10344 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10345 PCI_VENDOR_ID_LENOVO, 0x0222) 10346 }, 10347 { 10348 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10349 PCI_VENDOR_ID_LENOVO, 0x0223) 10350 }, 10351 { 10352 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10353 PCI_VENDOR_ID_LENOVO, 0x0224) 10354 }, 10355 { 10356 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10357 PCI_VENDOR_ID_LENOVO, 0x0225) 10358 }, 10359 { 10360 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10361 PCI_VENDOR_ID_LENOVO, 0x0520) 10362 }, 10363 { 10364 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10365 PCI_VENDOR_ID_LENOVO, 0x0521) 10366 }, 10367 { 10368 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10369 PCI_VENDOR_ID_LENOVO, 0x0522) 10370 }, 10371 { 10372 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10373 PCI_VENDOR_ID_LENOVO, 0x0620) 10374 }, 10375 { 10376 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10377 PCI_VENDOR_ID_LENOVO, 0x0621) 10378 }, 10379 { 10380 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10381 PCI_VENDOR_ID_LENOVO, 0x0622) 10382 }, 10383 { 10384 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10385 PCI_VENDOR_ID_LENOVO, 0x0623) 10386 }, 10387 { 10388 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10389 PCI_VENDOR_ID_LENOVO, 0x0624) 10390 }, 10391 { 10392 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10393 PCI_VENDOR_ID_LENOVO, 0x0625) 10394 }, 10395 { 10396 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10397 PCI_VENDOR_ID_LENOVO, 0x0626) 10398 }, 10399 { 10400 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10401 PCI_VENDOR_ID_LENOVO, 0x0627) 10402 }, 10403 { 10404 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10405 PCI_VENDOR_ID_LENOVO, 0x0628) 10406 }, 10407 { 10408 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10409 0x1014, 0x0718) 10410 }, 10411 { 10412 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10413 0x1137, 0x02f8) 10414 }, 10415 { 10416 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10417 0x1137, 0x02f9) 10418 }, 10419 { 10420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10421 0x1137, 0x02fa) 10422 }, 10423 { 10424 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10425 0x1137, 0x02fe) 10426 }, 10427 { 10428 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10429 0x1137, 0x02ff) 10430 }, 10431 { 10432 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10433 0x1137, 0x0300) 10434 }, 10435 { 10436 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10437 0x1ded, 0x3301) 10438 }, 10439 { 10440 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10441 0x1ff9, 0x0045) 10442 }, 10443 { 10444 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10445 0x1ff9, 0x0046) 10446 }, 10447 { 10448 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10449 0x1ff9, 0x0047) 10450 }, 10451 { 10452 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10453 0x1ff9, 0x0048) 10454 }, 10455 { 10456 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10457 0x1ff9, 0x004a) 10458 }, 10459 { 10460 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10461 0x1ff9, 0x004b) 10462 }, 10463 { 10464 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10465 0x1ff9, 0x004c) 10466 }, 10467 { 10468 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10469 0x1ff9, 0x004f) 10470 }, 10471 { 10472 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10473 0x1ff9, 0x0051) 10474 }, 10475 { 10476 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10477 0x1ff9, 0x0052) 10478 }, 10479 { 10480 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10481 0x1ff9, 0x0053) 10482 }, 10483 { 10484 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10485 0x1ff9, 0x0054) 10486 }, 10487 { 10488 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10489 0x1ff9, 0x006b) 10490 }, 10491 { 10492 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10493 0x1ff9, 0x006c) 10494 }, 10495 { 10496 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10497 0x1ff9, 0x006d) 10498 }, 10499 { 10500 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10501 0x1ff9, 0x006f) 10502 }, 10503 { 10504 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10505 0x1ff9, 0x0070) 10506 }, 10507 { 10508 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10509 0x1ff9, 0x0071) 10510 }, 10511 { 10512 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10513 0x1ff9, 0x0072) 10514 }, 10515 { 10516 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10517 0x1ff9, 0x0086) 10518 }, 10519 { 10520 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10521 0x1ff9, 0x0087) 10522 }, 10523 { 10524 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10525 0x1ff9, 0x0088) 10526 }, 10527 { 10528 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10529 0x1ff9, 0x0089) 10530 }, 10531 { 10532 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10533 0x1e93, 0x1000) 10534 }, 10535 { 10536 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10537 0x1e93, 0x1001) 10538 }, 10539 { 10540 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10541 0x1e93, 0x1002) 10542 }, 10543 { 10544 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10545 0x1e93, 0x1005) 10546 }, 10547 { 10548 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10549 0x1f51, 0x1001) 10550 }, 10551 { 10552 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10553 0x1f51, 0x1002) 10554 }, 10555 { 10556 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10557 0x1f51, 0x1003) 10558 }, 10559 { 10560 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10561 0x1f51, 0x1004) 10562 }, 10563 { 10564 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10565 0x1f51, 0x1005) 10566 }, 10567 { 10568 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10569 0x1f51, 0x1006) 10570 }, 10571 { 10572 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10573 0x1f51, 0x1007) 10574 }, 10575 { 10576 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10577 0x1f51, 0x1008) 10578 }, 10579 { 10580 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10581 0x1f51, 0x1009) 10582 }, 10583 { 10584 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10585 0x1f51, 0x100a) 10586 }, 10587 { 10588 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10589 0x1f51, 0x100b) 10590 }, 10591 { 10592 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10593 0x1f51, 0x100e) 10594 }, 10595 { 10596 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10597 0x1f51, 0x100f) 10598 }, 10599 { 10600 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10601 0x1f51, 0x1010) 10602 }, 10603 { 10604 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10605 0x1f51, 0x1011) 10606 }, 10607 { 10608 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10609 0x1f51, 0x1043) 10610 }, 10611 { 10612 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10613 0x1f51, 0x1044) 10614 }, 10615 { 10616 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10617 0x1f51, 0x1045) 10618 }, 10619 { 10620 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10621 0x1ff9, 0x00a3) 10622 }, 10623 { 10624 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10625 PCI_ANY_ID, PCI_ANY_ID) 10626 }, 10627 { 0 } 10628 }; 10629 10630 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 10631 10632 static struct pci_driver pqi_pci_driver = { 10633 .name = DRIVER_NAME_SHORT, 10634 .id_table = pqi_pci_id_table, 10635 .probe = pqi_pci_probe, 10636 .remove = pqi_pci_remove, 10637 .shutdown = pqi_shutdown, 10638 #if defined(CONFIG_PM) 10639 .driver = { 10640 .pm = &pqi_pm_ops 10641 }, 10642 #endif 10643 }; 10644 10645 static int __init pqi_init(void) 10646 { 10647 int rc; 10648 10649 pr_info(DRIVER_NAME "\n"); 10650 pqi_verify_structures(); 10651 sis_verify_structures(); 10652 10653 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); 10654 if (!pqi_sas_transport_template) 10655 return -ENODEV; 10656 10657 pqi_process_module_params(); 10658 10659 rc = pci_register_driver(&pqi_pci_driver); 10660 if (rc) 10661 sas_release_transport(pqi_sas_transport_template); 10662 10663 return rc; 10664 } 10665 10666 static void __exit pqi_cleanup(void) 10667 { 10668 pci_unregister_driver(&pqi_pci_driver); 10669 sas_release_transport(pqi_sas_transport_template); 10670 } 10671 10672 module_init(pqi_init); 10673 module_exit(pqi_cleanup); 10674 10675 static void pqi_verify_structures(void) 10676 { 10677 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10678 sis_host_to_ctrl_doorbell) != 0x20); 10679 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10680 sis_interrupt_mask) != 0x34); 10681 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10682 sis_ctrl_to_host_doorbell) != 0x9c); 10683 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10684 sis_ctrl_to_host_doorbell_clear) != 0xa0); 10685 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10686 sis_driver_scratch) != 0xb0); 10687 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10688 sis_product_identifier) != 0xb4); 10689 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10690 sis_firmware_status) != 0xbc); 10691 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10692 sis_ctrl_shutdown_reason_code) != 0xcc); 10693 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10694 sis_mailbox) != 0x1000); 10695 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10696 pqi_registers) != 0x4000); 10697 10698 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10699 iu_type) != 0x0); 10700 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10701 iu_length) != 0x2); 10702 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10703 response_queue_id) != 0x4); 10704 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10705 driver_flags) != 0x6); 10706 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 10707 10708 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10709 status) != 0x0); 10710 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10711 service_response) != 0x1); 10712 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10713 data_present) != 0x2); 10714 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10715 reserved) != 0x3); 10716 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10717 residual_count) != 0x4); 10718 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10719 data_length) != 0x8); 10720 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10721 reserved1) != 0xa); 10722 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10723 data) != 0xc); 10724 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 10725 10726 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10727 data_in_result) != 0x0); 10728 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10729 data_out_result) != 0x1); 10730 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10731 reserved) != 0x2); 10732 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10733 status) != 0x5); 10734 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10735 status_qualifier) != 0x6); 10736 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10737 sense_data_length) != 0x8); 10738 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10739 response_data_length) != 0xa); 10740 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10741 data_in_transferred) != 0xc); 10742 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10743 data_out_transferred) != 0x10); 10744 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10745 data) != 0x14); 10746 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 10747 10748 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10749 signature) != 0x0); 10750 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10751 function_and_status_code) != 0x8); 10752 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10753 max_admin_iq_elements) != 0x10); 10754 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10755 max_admin_oq_elements) != 0x11); 10756 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10757 admin_iq_element_length) != 0x12); 10758 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10759 admin_oq_element_length) != 0x13); 10760 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10761 max_reset_timeout) != 0x14); 10762 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10763 legacy_intx_status) != 0x18); 10764 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10765 legacy_intx_mask_set) != 0x1c); 10766 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10767 legacy_intx_mask_clear) != 0x20); 10768 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10769 device_status) != 0x40); 10770 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10771 admin_iq_pi_offset) != 0x48); 10772 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10773 admin_oq_ci_offset) != 0x50); 10774 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10775 admin_iq_element_array_addr) != 0x58); 10776 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10777 admin_oq_element_array_addr) != 0x60); 10778 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10779 admin_iq_ci_addr) != 0x68); 10780 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10781 admin_oq_pi_addr) != 0x70); 10782 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10783 admin_iq_num_elements) != 0x78); 10784 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10785 admin_oq_num_elements) != 0x79); 10786 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10787 admin_queue_int_msg_num) != 0x7a); 10788 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10789 device_error) != 0x80); 10790 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10791 error_details) != 0x88); 10792 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10793 device_reset) != 0x90); 10794 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10795 power_action) != 0x94); 10796 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 10797 10798 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10799 header.iu_type) != 0); 10800 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10801 header.iu_length) != 2); 10802 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10803 header.driver_flags) != 6); 10804 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10805 request_id) != 8); 10806 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10807 function_code) != 10); 10808 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10809 data.report_device_capability.buffer_length) != 44); 10810 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10811 data.report_device_capability.sg_descriptor) != 48); 10812 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10813 data.create_operational_iq.queue_id) != 12); 10814 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10815 data.create_operational_iq.element_array_addr) != 16); 10816 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10817 data.create_operational_iq.ci_addr) != 24); 10818 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10819 data.create_operational_iq.num_elements) != 32); 10820 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10821 data.create_operational_iq.element_length) != 34); 10822 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10823 data.create_operational_iq.queue_protocol) != 36); 10824 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10825 data.create_operational_oq.queue_id) != 12); 10826 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10827 data.create_operational_oq.element_array_addr) != 16); 10828 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10829 data.create_operational_oq.pi_addr) != 24); 10830 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10831 data.create_operational_oq.num_elements) != 32); 10832 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10833 data.create_operational_oq.element_length) != 34); 10834 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10835 data.create_operational_oq.queue_protocol) != 36); 10836 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10837 data.create_operational_oq.int_msg_num) != 40); 10838 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10839 data.create_operational_oq.coalescing_count) != 42); 10840 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10841 data.create_operational_oq.min_coalescing_time) != 44); 10842 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10843 data.create_operational_oq.max_coalescing_time) != 48); 10844 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10845 data.delete_operational_queue.queue_id) != 12); 10846 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 10847 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10848 data.create_operational_iq) != 64 - 11); 10849 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10850 data.create_operational_oq) != 64 - 11); 10851 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10852 data.delete_operational_queue) != 64 - 11); 10853 10854 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10855 header.iu_type) != 0); 10856 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10857 header.iu_length) != 2); 10858 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10859 header.driver_flags) != 6); 10860 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10861 request_id) != 8); 10862 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10863 function_code) != 10); 10864 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10865 status) != 11); 10866 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10867 data.create_operational_iq.status_descriptor) != 12); 10868 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10869 data.create_operational_iq.iq_pi_offset) != 16); 10870 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10871 data.create_operational_oq.status_descriptor) != 12); 10872 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10873 data.create_operational_oq.oq_ci_offset) != 16); 10874 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 10875 10876 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10877 header.iu_type) != 0); 10878 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10879 header.iu_length) != 2); 10880 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10881 header.response_queue_id) != 4); 10882 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10883 header.driver_flags) != 6); 10884 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10885 request_id) != 8); 10886 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10887 nexus_id) != 10); 10888 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10889 buffer_length) != 12); 10890 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10891 lun_number) != 16); 10892 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10893 protocol_specific) != 24); 10894 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10895 error_index) != 27); 10896 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10897 cdb) != 32); 10898 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10899 timeout) != 60); 10900 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10901 sg_descriptors) != 64); 10902 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 10903 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10904 10905 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10906 header.iu_type) != 0); 10907 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10908 header.iu_length) != 2); 10909 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10910 header.response_queue_id) != 4); 10911 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10912 header.driver_flags) != 6); 10913 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10914 request_id) != 8); 10915 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10916 nexus_id) != 12); 10917 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10918 buffer_length) != 16); 10919 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10920 data_encryption_key_index) != 22); 10921 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10922 encrypt_tweak_lower) != 24); 10923 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10924 encrypt_tweak_upper) != 28); 10925 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10926 cdb) != 32); 10927 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10928 error_index) != 48); 10929 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10930 num_sg_descriptors) != 50); 10931 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10932 cdb_length) != 51); 10933 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10934 lun_number) != 52); 10935 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10936 sg_descriptors) != 64); 10937 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 10938 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10939 10940 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10941 header.iu_type) != 0); 10942 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10943 header.iu_length) != 2); 10944 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10945 request_id) != 8); 10946 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10947 error_index) != 10); 10948 10949 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10950 header.iu_type) != 0); 10951 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10952 header.iu_length) != 2); 10953 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10954 header.response_queue_id) != 4); 10955 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10956 request_id) != 8); 10957 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10958 data.report_event_configuration.buffer_length) != 12); 10959 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10960 data.report_event_configuration.sg_descriptors) != 16); 10961 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10962 data.set_event_configuration.global_event_oq_id) != 10); 10963 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10964 data.set_event_configuration.buffer_length) != 12); 10965 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10966 data.set_event_configuration.sg_descriptors) != 16); 10967 10968 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10969 max_inbound_iu_length) != 6); 10970 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10971 max_outbound_iu_length) != 14); 10972 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 10973 10974 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10975 data_length) != 0); 10976 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10977 iq_arbitration_priority_support_bitmask) != 8); 10978 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10979 maximum_aw_a) != 9); 10980 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10981 maximum_aw_b) != 10); 10982 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10983 maximum_aw_c) != 11); 10984 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10985 max_inbound_queues) != 16); 10986 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10987 max_elements_per_iq) != 18); 10988 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10989 max_iq_element_length) != 24); 10990 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10991 min_iq_element_length) != 26); 10992 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10993 max_outbound_queues) != 30); 10994 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10995 max_elements_per_oq) != 32); 10996 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10997 intr_coalescing_time_granularity) != 34); 10998 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10999 max_oq_element_length) != 36); 11000 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 11001 min_oq_element_length) != 38); 11002 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 11003 iu_layer_descriptors) != 64); 11004 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 11005 11006 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 11007 event_type) != 0); 11008 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 11009 oq_id) != 2); 11010 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 11011 11012 BUILD_BUG_ON(offsetof(struct pqi_event_config, 11013 num_event_descriptors) != 2); 11014 BUILD_BUG_ON(offsetof(struct pqi_event_config, 11015 descriptors) != 4); 11016 11017 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 11018 ARRAY_SIZE(pqi_supported_event_types)); 11019 11020 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11021 header.iu_type) != 0); 11022 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11023 header.iu_length) != 2); 11024 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11025 event_type) != 8); 11026 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11027 event_id) != 10); 11028 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11029 additional_event_id) != 12); 11030 BUILD_BUG_ON(offsetof(struct pqi_event_response, 11031 data) != 16); 11032 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 11033 11034 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11035 header.iu_type) != 0); 11036 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11037 header.iu_length) != 2); 11038 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11039 event_type) != 8); 11040 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11041 event_id) != 10); 11042 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 11043 additional_event_id) != 12); 11044 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 11045 11046 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11047 header.iu_type) != 0); 11048 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11049 header.iu_length) != 2); 11050 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11051 request_id) != 8); 11052 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11053 nexus_id) != 10); 11054 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11055 timeout) != 14); 11056 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11057 lun_number) != 16); 11058 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11059 protocol_specific) != 24); 11060 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11061 outbound_queue_id_to_manage) != 26); 11062 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11063 request_id_to_manage) != 28); 11064 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 11065 task_management_function) != 30); 11066 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 11067 11068 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11069 header.iu_type) != 0); 11070 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11071 header.iu_length) != 2); 11072 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11073 request_id) != 8); 11074 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11075 nexus_id) != 10); 11076 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11077 additional_response_info) != 12); 11078 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 11079 response_code) != 15); 11080 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 11081 11082 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11083 configured_logical_drive_count) != 0); 11084 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11085 configuration_signature) != 1); 11086 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11087 firmware_version_short) != 5); 11088 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11089 extended_logical_unit_count) != 154); 11090 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11091 firmware_build_number) != 190); 11092 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11093 vendor_id) != 200); 11094 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11095 product_id) != 208); 11096 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11097 extra_controller_flags) != 286); 11098 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11099 controller_mode) != 292); 11100 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11101 spare_part_number) != 293); 11102 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 11103 firmware_version_long) != 325); 11104 11105 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11106 phys_bay_in_box) != 115); 11107 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11108 device_type) != 120); 11109 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11110 redundant_path_present_map) != 1736); 11111 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11112 active_path_number) != 1738); 11113 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11114 alternate_paths_phys_connector) != 1739); 11115 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11116 alternate_paths_phys_box_on_port) != 1755); 11117 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 11118 current_queue_depth_limit) != 1796); 11119 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 11120 11121 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); 11122 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11123 page_code) != 0); 11124 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11125 subpage_code) != 1); 11126 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 11127 buffer_length) != 2); 11128 11129 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); 11130 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11131 page_code) != 0); 11132 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11133 subpage_code) != 1); 11134 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 11135 page_length) != 2); 11136 11137 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) 11138 != 18); 11139 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11140 header) != 0); 11141 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11142 firmware_read_support) != 4); 11143 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11144 driver_read_support) != 5); 11145 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11146 firmware_write_support) != 6); 11147 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11148 driver_write_support) != 7); 11149 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11150 max_transfer_encrypted_sas_sata) != 8); 11151 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11152 max_transfer_encrypted_nvme) != 10); 11153 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11154 max_write_raid_5_6) != 12); 11155 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11156 max_write_raid_1_10_2drive) != 14); 11157 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 11158 max_write_raid_1_10_3drive) != 16); 11159 11160 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 11161 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 11162 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 11163 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11164 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 11165 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11166 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 11167 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 11168 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11169 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 11170 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 11171 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 11172 11173 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 11174 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 11175 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 11176 } 11177