1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for Microchip PQI-based storage controllers 4 * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries 5 * Copyright (c) 2016-2018 Microsemi Corporation 6 * Copyright (c) 2016 PMC-Sierra, Inc. 7 * 8 * Questions/Comments/Bugfixes to storagedev@microchip.com 9 * 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/pci.h> 15 #include <linux/delay.h> 16 #include <linux/interrupt.h> 17 #include <linux/sched.h> 18 #include <linux/rtc.h> 19 #include <linux/bcd.h> 20 #include <linux/reboot.h> 21 #include <linux/cciss_ioctl.h> 22 #include <linux/blk-mq-pci.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_device.h> 26 #include <scsi/scsi_eh.h> 27 #include <scsi/scsi_transport_sas.h> 28 #include <asm/unaligned.h> 29 #include "smartpqi.h" 30 #include "smartpqi_sis.h" 31 32 #if !defined(BUILD_TIMESTAMP) 33 #define BUILD_TIMESTAMP 34 #endif 35 36 #define DRIVER_VERSION "2.1.26-030" 37 #define DRIVER_MAJOR 2 38 #define DRIVER_MINOR 1 39 #define DRIVER_RELEASE 26 40 #define DRIVER_REVISION 30 41 42 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ 43 DRIVER_VERSION BUILD_TIMESTAMP ")" 44 #define DRIVER_NAME_SHORT "smartpqi" 45 46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 47 48 #define PQI_POST_RESET_DELAY_SECS 5 49 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 50 51 #define PQI_NO_COMPLETION ((void *)-1) 52 53 MODULE_AUTHOR("Microchip"); 54 MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " 55 DRIVER_VERSION); 56 MODULE_VERSION(DRIVER_VERSION); 57 MODULE_LICENSE("GPL"); 58 59 struct pqi_cmd_priv { 60 int this_residual; 61 }; 62 63 static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd) 64 { 65 return scsi_cmd_priv(cmd); 66 } 67 68 static void pqi_verify_structures(void); 69 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 70 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason); 71 static void pqi_ctrl_offline_worker(struct work_struct *work); 72 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 73 static void pqi_scan_start(struct Scsi_Host *shost); 74 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 75 struct pqi_queue_group *queue_group, enum pqi_io_path path, 76 struct pqi_io_request *io_request); 77 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 78 struct pqi_iu_header *request, unsigned int flags, 79 struct pqi_raid_error_info *error_info); 80 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 81 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 82 unsigned int cdb_length, struct pqi_queue_group *queue_group, 83 struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio); 84 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 85 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 86 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 87 struct pqi_scsi_dev_raid_map_data *rmd); 88 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 89 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 90 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 91 struct pqi_scsi_dev_raid_map_data *rmd); 92 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); 93 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); 94 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs); 95 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info); 96 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); 97 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); 98 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 99 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs); 100 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info); 101 static void pqi_tmf_worker(struct work_struct *work); 102 103 /* for flags argument to pqi_submit_raid_request_synchronous() */ 104 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 105 106 static struct scsi_transport_template *pqi_sas_transport_template; 107 108 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 109 110 enum pqi_lockup_action { 111 NONE, 112 REBOOT, 113 PANIC 114 }; 115 116 static enum pqi_lockup_action pqi_lockup_action = NONE; 117 118 static struct { 119 enum pqi_lockup_action action; 120 char *name; 121 } pqi_lockup_actions[] = { 122 { 123 .action = NONE, 124 .name = "none", 125 }, 126 { 127 .action = REBOOT, 128 .name = "reboot", 129 }, 130 { 131 .action = PANIC, 132 .name = "panic", 133 }, 134 }; 135 136 static unsigned int pqi_supported_event_types[] = { 137 PQI_EVENT_TYPE_HOTPLUG, 138 PQI_EVENT_TYPE_HARDWARE, 139 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 140 PQI_EVENT_TYPE_LOGICAL_DEVICE, 141 PQI_EVENT_TYPE_OFA, 142 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 143 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 144 }; 145 146 static int pqi_disable_device_id_wildcards; 147 module_param_named(disable_device_id_wildcards, 148 pqi_disable_device_id_wildcards, int, 0644); 149 MODULE_PARM_DESC(disable_device_id_wildcards, 150 "Disable device ID wildcards."); 151 152 static int pqi_disable_heartbeat; 153 module_param_named(disable_heartbeat, 154 pqi_disable_heartbeat, int, 0644); 155 MODULE_PARM_DESC(disable_heartbeat, 156 "Disable heartbeat."); 157 158 static int pqi_disable_ctrl_shutdown; 159 module_param_named(disable_ctrl_shutdown, 160 pqi_disable_ctrl_shutdown, int, 0644); 161 MODULE_PARM_DESC(disable_ctrl_shutdown, 162 "Disable controller shutdown when controller locked up."); 163 164 static char *pqi_lockup_action_param; 165 module_param_named(lockup_action, 166 pqi_lockup_action_param, charp, 0644); 167 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 168 "\t\tSupported: none, reboot, panic\n" 169 "\t\tDefault: none"); 170 171 static int pqi_expose_ld_first; 172 module_param_named(expose_ld_first, 173 pqi_expose_ld_first, int, 0644); 174 MODULE_PARM_DESC(expose_ld_first, "Expose logical drives before physical drives."); 175 176 static int pqi_hide_vsep; 177 module_param_named(hide_vsep, 178 pqi_hide_vsep, int, 0644); 179 MODULE_PARM_DESC(hide_vsep, "Hide the virtual SEP for direct attached drives."); 180 181 static int pqi_disable_managed_interrupts; 182 module_param_named(disable_managed_interrupts, 183 pqi_disable_managed_interrupts, int, 0644); 184 MODULE_PARM_DESC(disable_managed_interrupts, 185 "Disable the kernel automatically assigning SMP affinity to IRQs."); 186 187 static unsigned int pqi_ctrl_ready_timeout_secs; 188 module_param_named(ctrl_ready_timeout, 189 pqi_ctrl_ready_timeout_secs, uint, 0644); 190 MODULE_PARM_DESC(ctrl_ready_timeout, 191 "Timeout in seconds for driver to wait for controller ready."); 192 193 static char *raid_levels[] = { 194 "RAID-0", 195 "RAID-4", 196 "RAID-1(1+0)", 197 "RAID-5", 198 "RAID-5+1", 199 "RAID-6", 200 "RAID-1(Triple)", 201 }; 202 203 static char *pqi_raid_level_to_string(u8 raid_level) 204 { 205 if (raid_level < ARRAY_SIZE(raid_levels)) 206 return raid_levels[raid_level]; 207 208 return "RAID UNKNOWN"; 209 } 210 211 #define SA_RAID_0 0 212 #define SA_RAID_4 1 213 #define SA_RAID_1 2 /* also used for RAID 10 */ 214 #define SA_RAID_5 3 /* also used for RAID 50 */ 215 #define SA_RAID_51 4 216 #define SA_RAID_6 5 /* also used for RAID 60 */ 217 #define SA_RAID_TRIPLE 6 /* also used for RAID 1+0 Triple */ 218 #define SA_RAID_MAX SA_RAID_TRIPLE 219 #define SA_RAID_UNKNOWN 0xff 220 221 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 222 { 223 pqi_prep_for_scsi_done(scmd); 224 scsi_done(scmd); 225 } 226 227 static inline void pqi_disable_write_same(struct scsi_device *sdev) 228 { 229 sdev->no_write_same = 1; 230 } 231 232 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 233 { 234 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 235 } 236 237 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 238 { 239 return !device->is_physical_device; 240 } 241 242 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 243 { 244 return scsi3addr[2] != 0; 245 } 246 247 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 248 { 249 return !ctrl_info->controller_online; 250 } 251 252 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 253 { 254 if (ctrl_info->controller_online) 255 if (!sis_is_firmware_running(ctrl_info)) 256 pqi_take_ctrl_offline(ctrl_info, PQI_FIRMWARE_KERNEL_NOT_UP); 257 } 258 259 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 260 { 261 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 262 } 263 264 #define PQI_DRIVER_SCRATCH_PQI_MODE 0x1 265 #define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED 0x2 266 267 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info) 268 { 269 return sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_PQI_MODE ? PQI_MODE : SIS_MODE; 270 } 271 272 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 273 enum pqi_ctrl_mode mode) 274 { 275 u32 driver_scratch; 276 277 driver_scratch = sis_read_driver_scratch(ctrl_info); 278 279 if (mode == PQI_MODE) 280 driver_scratch |= PQI_DRIVER_SCRATCH_PQI_MODE; 281 else 282 driver_scratch &= ~PQI_DRIVER_SCRATCH_PQI_MODE; 283 284 sis_write_driver_scratch(ctrl_info, driver_scratch); 285 } 286 287 static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info) 288 { 289 return (sis_read_driver_scratch(ctrl_info) & PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED) != 0; 290 } 291 292 static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported) 293 { 294 u32 driver_scratch; 295 296 driver_scratch = sis_read_driver_scratch(ctrl_info); 297 298 if (is_supported) 299 driver_scratch |= PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 300 else 301 driver_scratch &= ~PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED; 302 303 sis_write_driver_scratch(ctrl_info, driver_scratch); 304 } 305 306 static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info) 307 { 308 ctrl_info->scan_blocked = true; 309 mutex_lock(&ctrl_info->scan_mutex); 310 } 311 312 static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info) 313 { 314 ctrl_info->scan_blocked = false; 315 mutex_unlock(&ctrl_info->scan_mutex); 316 } 317 318 static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info) 319 { 320 return ctrl_info->scan_blocked; 321 } 322 323 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) 324 { 325 mutex_lock(&ctrl_info->lun_reset_mutex); 326 } 327 328 static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info) 329 { 330 mutex_unlock(&ctrl_info->lun_reset_mutex); 331 } 332 333 static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info) 334 { 335 struct Scsi_Host *shost; 336 unsigned int num_loops; 337 int msecs_sleep; 338 339 shost = ctrl_info->scsi_host; 340 341 scsi_block_requests(shost); 342 343 num_loops = 0; 344 msecs_sleep = 20; 345 while (scsi_host_busy(shost)) { 346 num_loops++; 347 if (num_loops == 10) 348 msecs_sleep = 500; 349 msleep(msecs_sleep); 350 } 351 } 352 353 static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info) 354 { 355 scsi_unblock_requests(ctrl_info->scsi_host); 356 } 357 358 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 359 { 360 atomic_inc(&ctrl_info->num_busy_threads); 361 } 362 363 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 364 { 365 atomic_dec(&ctrl_info->num_busy_threads); 366 } 367 368 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 369 { 370 return ctrl_info->block_requests; 371 } 372 373 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 374 { 375 ctrl_info->block_requests = true; 376 } 377 378 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 379 { 380 ctrl_info->block_requests = false; 381 wake_up_all(&ctrl_info->block_requests_wait); 382 } 383 384 static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 385 { 386 if (!pqi_ctrl_blocked(ctrl_info)) 387 return; 388 389 atomic_inc(&ctrl_info->num_blocked_threads); 390 wait_event(ctrl_info->block_requests_wait, 391 !pqi_ctrl_blocked(ctrl_info)); 392 atomic_dec(&ctrl_info->num_blocked_threads); 393 } 394 395 #define PQI_QUIESCE_WARNING_TIMEOUT_SECS 10 396 397 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 398 { 399 unsigned long start_jiffies; 400 unsigned long warning_timeout; 401 bool displayed_warning; 402 403 displayed_warning = false; 404 start_jiffies = jiffies; 405 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 406 407 while (atomic_read(&ctrl_info->num_busy_threads) > 408 atomic_read(&ctrl_info->num_blocked_threads)) { 409 if (time_after(jiffies, warning_timeout)) { 410 dev_warn(&ctrl_info->pci_dev->dev, 411 "waiting %u seconds for driver activity to quiesce\n", 412 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 413 displayed_warning = true; 414 warning_timeout = (PQI_QUIESCE_WARNING_TIMEOUT_SECS * HZ) + jiffies; 415 } 416 usleep_range(1000, 2000); 417 } 418 419 if (displayed_warning) 420 dev_warn(&ctrl_info->pci_dev->dev, 421 "driver activity quiesced after waiting for %u seconds\n", 422 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 423 } 424 425 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 426 { 427 return device->device_offline; 428 } 429 430 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) 431 { 432 mutex_lock(&ctrl_info->ofa_mutex); 433 } 434 435 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) 436 { 437 mutex_unlock(&ctrl_info->ofa_mutex); 438 } 439 440 static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) 441 { 442 mutex_lock(&ctrl_info->ofa_mutex); 443 mutex_unlock(&ctrl_info->ofa_mutex); 444 } 445 446 static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info) 447 { 448 return mutex_is_locked(&ctrl_info->ofa_mutex); 449 } 450 451 static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) 452 { 453 device->in_remove = true; 454 } 455 456 static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device) 457 { 458 return device->in_remove; 459 } 460 461 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun) 462 { 463 device->in_reset[lun] = true; 464 } 465 466 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun) 467 { 468 device->in_reset[lun] = false; 469 } 470 471 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun) 472 { 473 return device->in_reset[lun]; 474 } 475 476 static inline int pqi_event_type_to_event_index(unsigned int event_type) 477 { 478 int index; 479 480 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 481 if (event_type == pqi_supported_event_types[index]) 482 return index; 483 484 return -1; 485 } 486 487 static inline bool pqi_is_supported_event(unsigned int event_type) 488 { 489 return pqi_event_type_to_event_index(event_type) != -1; 490 } 491 492 static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info, 493 unsigned long delay) 494 { 495 if (pqi_ctrl_offline(ctrl_info)) 496 return; 497 498 schedule_delayed_work(&ctrl_info->rescan_work, delay); 499 } 500 501 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 502 { 503 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 504 } 505 506 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 507 508 static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info) 509 { 510 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 511 } 512 513 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 514 { 515 cancel_delayed_work_sync(&ctrl_info->rescan_work); 516 } 517 518 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 519 { 520 if (!ctrl_info->heartbeat_counter) 521 return 0; 522 523 return readl(ctrl_info->heartbeat_counter); 524 } 525 526 static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 527 { 528 return readb(ctrl_info->soft_reset_status); 529 } 530 531 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) 532 { 533 u8 status; 534 535 status = pqi_read_soft_reset_status(ctrl_info); 536 status &= ~PQI_SOFT_RESET_ABORT; 537 writeb(status, ctrl_info->soft_reset_status); 538 } 539 540 static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) 541 { 542 bool io_high_prio; 543 int priority_class; 544 545 io_high_prio = false; 546 547 if (device->ncq_prio_enable) { 548 priority_class = 549 IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); 550 if (priority_class == IOPRIO_CLASS_RT) { 551 /* Set NCQ priority for read/write commands. */ 552 switch (scmd->cmnd[0]) { 553 case WRITE_16: 554 case READ_16: 555 case WRITE_12: 556 case READ_12: 557 case WRITE_10: 558 case READ_10: 559 case WRITE_6: 560 case READ_6: 561 io_high_prio = true; 562 break; 563 } 564 } 565 } 566 567 return io_high_prio; 568 } 569 570 static int pqi_map_single(struct pci_dev *pci_dev, 571 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 572 size_t buffer_length, enum dma_data_direction data_direction) 573 { 574 dma_addr_t bus_address; 575 576 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 577 return 0; 578 579 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 580 data_direction); 581 if (dma_mapping_error(&pci_dev->dev, bus_address)) 582 return -ENOMEM; 583 584 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 585 put_unaligned_le32(buffer_length, &sg_descriptor->length); 586 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 587 588 return 0; 589 } 590 591 static void pqi_pci_unmap(struct pci_dev *pci_dev, 592 struct pqi_sg_descriptor *descriptors, int num_descriptors, 593 enum dma_data_direction data_direction) 594 { 595 int i; 596 597 if (data_direction == DMA_NONE) 598 return; 599 600 for (i = 0; i < num_descriptors; i++) 601 dma_unmap_single(&pci_dev->dev, 602 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 603 get_unaligned_le32(&descriptors[i].length), 604 data_direction); 605 } 606 607 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 608 struct pqi_raid_path_request *request, u8 cmd, 609 u8 *scsi3addr, void *buffer, size_t buffer_length, 610 u16 vpd_page, enum dma_data_direction *dir) 611 { 612 u8 *cdb; 613 size_t cdb_length = buffer_length; 614 615 memset(request, 0, sizeof(*request)); 616 617 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 618 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 619 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 620 &request->header.iu_length); 621 put_unaligned_le32(buffer_length, &request->buffer_length); 622 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 623 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 624 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 625 626 cdb = request->cdb; 627 628 switch (cmd) { 629 case INQUIRY: 630 request->data_direction = SOP_READ_FLAG; 631 cdb[0] = INQUIRY; 632 if (vpd_page & VPD_PAGE) { 633 cdb[1] = 0x1; 634 cdb[2] = (u8)vpd_page; 635 } 636 cdb[4] = (u8)cdb_length; 637 break; 638 case CISS_REPORT_LOG: 639 case CISS_REPORT_PHYS: 640 request->data_direction = SOP_READ_FLAG; 641 cdb[0] = cmd; 642 if (cmd == CISS_REPORT_PHYS) { 643 if (ctrl_info->rpl_extended_format_4_5_supported) 644 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4; 645 else 646 cdb[1] = CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2; 647 } else { 648 cdb[1] = ctrl_info->ciss_report_log_flags; 649 } 650 put_unaligned_be32(cdb_length, &cdb[6]); 651 break; 652 case CISS_GET_RAID_MAP: 653 request->data_direction = SOP_READ_FLAG; 654 cdb[0] = CISS_READ; 655 cdb[1] = CISS_GET_RAID_MAP; 656 put_unaligned_be32(cdb_length, &cdb[6]); 657 break; 658 case SA_FLUSH_CACHE: 659 request->header.driver_flags = PQI_DRIVER_NONBLOCKABLE_REQUEST; 660 request->data_direction = SOP_WRITE_FLAG; 661 cdb[0] = BMIC_WRITE; 662 cdb[6] = BMIC_FLUSH_CACHE; 663 put_unaligned_be16(cdb_length, &cdb[7]); 664 break; 665 case BMIC_SENSE_DIAG_OPTIONS: 666 cdb_length = 0; 667 fallthrough; 668 case BMIC_IDENTIFY_CONTROLLER: 669 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 670 case BMIC_SENSE_SUBSYSTEM_INFORMATION: 671 case BMIC_SENSE_FEATURE: 672 request->data_direction = SOP_READ_FLAG; 673 cdb[0] = BMIC_READ; 674 cdb[6] = cmd; 675 put_unaligned_be16(cdb_length, &cdb[7]); 676 break; 677 case BMIC_SET_DIAG_OPTIONS: 678 cdb_length = 0; 679 fallthrough; 680 case BMIC_WRITE_HOST_WELLNESS: 681 request->data_direction = SOP_WRITE_FLAG; 682 cdb[0] = BMIC_WRITE; 683 cdb[6] = cmd; 684 put_unaligned_be16(cdb_length, &cdb[7]); 685 break; 686 case BMIC_CSMI_PASSTHRU: 687 request->data_direction = SOP_BIDIRECTIONAL; 688 cdb[0] = BMIC_WRITE; 689 cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; 690 cdb[6] = cmd; 691 put_unaligned_be16(cdb_length, &cdb[7]); 692 break; 693 default: 694 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd); 695 break; 696 } 697 698 switch (request->data_direction) { 699 case SOP_READ_FLAG: 700 *dir = DMA_FROM_DEVICE; 701 break; 702 case SOP_WRITE_FLAG: 703 *dir = DMA_TO_DEVICE; 704 break; 705 case SOP_NO_DIRECTION_FLAG: 706 *dir = DMA_NONE; 707 break; 708 default: 709 *dir = DMA_BIDIRECTIONAL; 710 break; 711 } 712 713 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 714 buffer, buffer_length, *dir); 715 } 716 717 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 718 { 719 io_request->scmd = NULL; 720 io_request->status = 0; 721 io_request->error_info = NULL; 722 io_request->raid_bypass = false; 723 } 724 725 static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) 726 { 727 struct pqi_io_request *io_request; 728 u16 i; 729 730 if (scmd) { /* SML I/O request */ 731 u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 732 733 i = blk_mq_unique_tag_to_tag(blk_tag); 734 io_request = &ctrl_info->io_request_pool[i]; 735 if (atomic_inc_return(&io_request->refcount) > 1) { 736 atomic_dec(&io_request->refcount); 737 return NULL; 738 } 739 } else { /* IOCTL or driver internal request */ 740 /* 741 * benignly racy - may have to wait for an open slot. 742 * command slot range is scsi_ml_can_queue - 743 * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] 744 */ 745 i = 0; 746 while (1) { 747 io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; 748 if (atomic_inc_return(&io_request->refcount) == 1) 749 break; 750 atomic_dec(&io_request->refcount); 751 i = (i + 1) % PQI_RESERVED_IO_SLOTS; 752 } 753 } 754 755 if (io_request) 756 pqi_reinit_io_request(io_request); 757 758 return io_request; 759 } 760 761 static void pqi_free_io_request(struct pqi_io_request *io_request) 762 { 763 atomic_dec(&io_request->refcount); 764 } 765 766 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, 767 u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, 768 struct pqi_raid_error_info *error_info) 769 { 770 int rc; 771 struct pqi_raid_path_request request; 772 enum dma_data_direction dir; 773 774 rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, 775 buffer, buffer_length, vpd_page, &dir); 776 if (rc) 777 return rc; 778 779 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, error_info); 780 781 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 782 783 return rc; 784 } 785 786 /* helper functions for pqi_send_scsi_raid_request */ 787 788 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, 789 u8 cmd, void *buffer, size_t buffer_length) 790 { 791 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 792 buffer, buffer_length, 0, NULL); 793 } 794 795 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, 796 u8 cmd, void *buffer, size_t buffer_length, 797 struct pqi_raid_error_info *error_info) 798 { 799 return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, 800 buffer, buffer_length, 0, error_info); 801 } 802 803 static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 804 struct bmic_identify_controller *buffer) 805 { 806 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, 807 buffer, sizeof(*buffer)); 808 } 809 810 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, 811 struct bmic_sense_subsystem_info *sense_info) 812 { 813 return pqi_send_ctrl_raid_request(ctrl_info, 814 BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, 815 sizeof(*sense_info)); 816 } 817 818 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 819 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 820 { 821 return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, 822 buffer, buffer_length, vpd_page, NULL); 823 } 824 825 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 826 struct pqi_scsi_dev *device, 827 struct bmic_identify_physical_device *buffer, size_t buffer_length) 828 { 829 int rc; 830 enum dma_data_direction dir; 831 u16 bmic_device_index; 832 struct pqi_raid_path_request request; 833 834 rc = pqi_build_raid_path_request(ctrl_info, &request, 835 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 836 buffer_length, 0, &dir); 837 if (rc) 838 return rc; 839 840 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 841 request.cdb[2] = (u8)bmic_device_index; 842 request.cdb[9] = (u8)(bmic_device_index >> 8); 843 844 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 845 846 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 847 848 return rc; 849 } 850 851 static inline u32 pqi_aio_limit_to_bytes(__le16 *limit) 852 { 853 u32 bytes; 854 855 bytes = get_unaligned_le16(limit); 856 if (bytes == 0) 857 bytes = ~0; 858 else 859 bytes *= 1024; 860 861 return bytes; 862 } 863 864 #pragma pack(1) 865 866 struct bmic_sense_feature_buffer { 867 struct bmic_sense_feature_buffer_header header; 868 struct bmic_sense_feature_io_page_aio_subpage aio_subpage; 869 }; 870 871 #pragma pack() 872 873 #define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH \ 874 offsetofend(struct bmic_sense_feature_buffer, \ 875 aio_subpage.max_write_raid_1_10_3drive) 876 877 #define MINIMUM_AIO_SUBPAGE_LENGTH \ 878 (offsetofend(struct bmic_sense_feature_io_page_aio_subpage, \ 879 max_write_raid_1_10_3drive) - \ 880 sizeof_field(struct bmic_sense_feature_io_page_aio_subpage, header)) 881 882 static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info) 883 { 884 int rc; 885 enum dma_data_direction dir; 886 struct pqi_raid_path_request request; 887 struct bmic_sense_feature_buffer *buffer; 888 889 buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); 890 if (!buffer) 891 return -ENOMEM; 892 893 rc = pqi_build_raid_path_request(ctrl_info, &request, BMIC_SENSE_FEATURE, RAID_CTLR_LUNID, 894 buffer, sizeof(*buffer), 0, &dir); 895 if (rc) 896 goto error; 897 898 request.cdb[2] = BMIC_SENSE_FEATURE_IO_PAGE; 899 request.cdb[3] = BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE; 900 901 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 902 903 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 904 905 if (rc) 906 goto error; 907 908 if (buffer->header.page_code != BMIC_SENSE_FEATURE_IO_PAGE || 909 buffer->header.subpage_code != 910 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 911 get_unaligned_le16(&buffer->header.buffer_length) < 912 MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH || 913 buffer->aio_subpage.header.page_code != 914 BMIC_SENSE_FEATURE_IO_PAGE || 915 buffer->aio_subpage.header.subpage_code != 916 BMIC_SENSE_FEATURE_IO_PAGE_AIO_SUBPAGE || 917 get_unaligned_le16(&buffer->aio_subpage.header.page_length) < 918 MINIMUM_AIO_SUBPAGE_LENGTH) { 919 goto error; 920 } 921 922 ctrl_info->max_transfer_encrypted_sas_sata = 923 pqi_aio_limit_to_bytes( 924 &buffer->aio_subpage.max_transfer_encrypted_sas_sata); 925 926 ctrl_info->max_transfer_encrypted_nvme = 927 pqi_aio_limit_to_bytes( 928 &buffer->aio_subpage.max_transfer_encrypted_nvme); 929 930 ctrl_info->max_write_raid_5_6 = 931 pqi_aio_limit_to_bytes( 932 &buffer->aio_subpage.max_write_raid_5_6); 933 934 ctrl_info->max_write_raid_1_10_2drive = 935 pqi_aio_limit_to_bytes( 936 &buffer->aio_subpage.max_write_raid_1_10_2drive); 937 938 ctrl_info->max_write_raid_1_10_3drive = 939 pqi_aio_limit_to_bytes( 940 &buffer->aio_subpage.max_write_raid_1_10_3drive); 941 942 error: 943 kfree(buffer); 944 945 return rc; 946 } 947 948 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 949 enum bmic_flush_cache_shutdown_event shutdown_event) 950 { 951 int rc; 952 struct bmic_flush_cache *flush_cache; 953 954 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 955 if (!flush_cache) 956 return -ENOMEM; 957 958 flush_cache->shutdown_event = shutdown_event; 959 960 rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, 961 sizeof(*flush_cache)); 962 963 kfree(flush_cache); 964 965 return rc; 966 } 967 968 int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, 969 struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, 970 struct pqi_raid_error_info *error_info) 971 { 972 return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, 973 buffer, buffer_length, error_info); 974 } 975 976 #define PQI_FETCH_PTRAID_DATA (1 << 31) 977 978 static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) 979 { 980 int rc; 981 struct bmic_diag_options *diag; 982 983 diag = kzalloc(sizeof(*diag), GFP_KERNEL); 984 if (!diag) 985 return -ENOMEM; 986 987 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, 988 diag, sizeof(*diag)); 989 if (rc) 990 goto out; 991 992 diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); 993 994 rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, 995 sizeof(*diag)); 996 997 out: 998 kfree(diag); 999 1000 return rc; 1001 } 1002 1003 static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 1004 void *buffer, size_t buffer_length) 1005 { 1006 return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, 1007 buffer, buffer_length); 1008 } 1009 1010 #pragma pack(1) 1011 1012 struct bmic_host_wellness_driver_version { 1013 u8 start_tag[4]; 1014 u8 driver_version_tag[2]; 1015 __le16 driver_version_length; 1016 char driver_version[32]; 1017 u8 dont_write_tag[2]; 1018 u8 end_tag[2]; 1019 }; 1020 1021 #pragma pack() 1022 1023 static int pqi_write_driver_version_to_host_wellness( 1024 struct pqi_ctrl_info *ctrl_info) 1025 { 1026 int rc; 1027 struct bmic_host_wellness_driver_version *buffer; 1028 size_t buffer_length; 1029 1030 buffer_length = sizeof(*buffer); 1031 1032 buffer = kmalloc(buffer_length, GFP_KERNEL); 1033 if (!buffer) 1034 return -ENOMEM; 1035 1036 buffer->start_tag[0] = '<'; 1037 buffer->start_tag[1] = 'H'; 1038 buffer->start_tag[2] = 'W'; 1039 buffer->start_tag[3] = '>'; 1040 buffer->driver_version_tag[0] = 'D'; 1041 buffer->driver_version_tag[1] = 'V'; 1042 put_unaligned_le16(sizeof(buffer->driver_version), 1043 &buffer->driver_version_length); 1044 strscpy(buffer->driver_version, "Linux " DRIVER_VERSION, 1045 sizeof(buffer->driver_version)); 1046 buffer->dont_write_tag[0] = 'D'; 1047 buffer->dont_write_tag[1] = 'W'; 1048 buffer->end_tag[0] = 'Z'; 1049 buffer->end_tag[1] = 'Z'; 1050 1051 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1052 1053 kfree(buffer); 1054 1055 return rc; 1056 } 1057 1058 #pragma pack(1) 1059 1060 struct bmic_host_wellness_time { 1061 u8 start_tag[4]; 1062 u8 time_tag[2]; 1063 __le16 time_length; 1064 u8 time[8]; 1065 u8 dont_write_tag[2]; 1066 u8 end_tag[2]; 1067 }; 1068 1069 #pragma pack() 1070 1071 static int pqi_write_current_time_to_host_wellness( 1072 struct pqi_ctrl_info *ctrl_info) 1073 { 1074 int rc; 1075 struct bmic_host_wellness_time *buffer; 1076 size_t buffer_length; 1077 time64_t local_time; 1078 unsigned int year; 1079 struct tm tm; 1080 1081 buffer_length = sizeof(*buffer); 1082 1083 buffer = kmalloc(buffer_length, GFP_KERNEL); 1084 if (!buffer) 1085 return -ENOMEM; 1086 1087 buffer->start_tag[0] = '<'; 1088 buffer->start_tag[1] = 'H'; 1089 buffer->start_tag[2] = 'W'; 1090 buffer->start_tag[3] = '>'; 1091 buffer->time_tag[0] = 'T'; 1092 buffer->time_tag[1] = 'D'; 1093 put_unaligned_le16(sizeof(buffer->time), 1094 &buffer->time_length); 1095 1096 local_time = ktime_get_real_seconds(); 1097 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 1098 year = tm.tm_year + 1900; 1099 1100 buffer->time[0] = bin2bcd(tm.tm_hour); 1101 buffer->time[1] = bin2bcd(tm.tm_min); 1102 buffer->time[2] = bin2bcd(tm.tm_sec); 1103 buffer->time[3] = 0; 1104 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 1105 buffer->time[5] = bin2bcd(tm.tm_mday); 1106 buffer->time[6] = bin2bcd(year / 100); 1107 buffer->time[7] = bin2bcd(year % 100); 1108 1109 buffer->dont_write_tag[0] = 'D'; 1110 buffer->dont_write_tag[1] = 'W'; 1111 buffer->end_tag[0] = 'Z'; 1112 buffer->end_tag[1] = 'Z'; 1113 1114 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 1115 1116 kfree(buffer); 1117 1118 return rc; 1119 } 1120 1121 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 1122 1123 static void pqi_update_time_worker(struct work_struct *work) 1124 { 1125 int rc; 1126 struct pqi_ctrl_info *ctrl_info; 1127 1128 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1129 update_time_work); 1130 1131 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 1132 if (rc) 1133 dev_warn(&ctrl_info->pci_dev->dev, 1134 "error updating time on controller\n"); 1135 1136 schedule_delayed_work(&ctrl_info->update_time_work, 1137 PQI_UPDATE_TIME_WORK_INTERVAL); 1138 } 1139 1140 static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1141 { 1142 schedule_delayed_work(&ctrl_info->update_time_work, 0); 1143 } 1144 1145 static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info) 1146 { 1147 cancel_delayed_work_sync(&ctrl_info->update_time_work); 1148 } 1149 1150 static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, 1151 size_t buffer_length) 1152 { 1153 return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, buffer_length); 1154 } 1155 1156 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer) 1157 { 1158 int rc; 1159 size_t lun_list_length; 1160 size_t lun_data_length; 1161 size_t new_lun_list_length; 1162 void *lun_data = NULL; 1163 struct report_lun_header *report_lun_header; 1164 1165 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 1166 if (!report_lun_header) { 1167 rc = -ENOMEM; 1168 goto out; 1169 } 1170 1171 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, sizeof(*report_lun_header)); 1172 if (rc) 1173 goto out; 1174 1175 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 1176 1177 again: 1178 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 1179 1180 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 1181 if (!lun_data) { 1182 rc = -ENOMEM; 1183 goto out; 1184 } 1185 1186 if (lun_list_length == 0) { 1187 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 1188 goto out; 1189 } 1190 1191 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 1192 if (rc) 1193 goto out; 1194 1195 new_lun_list_length = 1196 get_unaligned_be32(&((struct report_lun_header *)lun_data)->list_length); 1197 1198 if (new_lun_list_length > lun_list_length) { 1199 lun_list_length = new_lun_list_length; 1200 kfree(lun_data); 1201 goto again; 1202 } 1203 1204 out: 1205 kfree(report_lun_header); 1206 1207 if (rc) { 1208 kfree(lun_data); 1209 lun_data = NULL; 1210 } 1211 1212 *buffer = lun_data; 1213 1214 return rc; 1215 } 1216 1217 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1218 { 1219 int rc; 1220 unsigned int i; 1221 u8 rpl_response_format; 1222 u32 num_physicals; 1223 void *rpl_list; 1224 struct report_lun_header *rpl_header; 1225 struct report_phys_lun_8byte_wwid_list *rpl_8byte_wwid_list; 1226 struct report_phys_lun_16byte_wwid_list *rpl_16byte_wwid_list; 1227 1228 rc = pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, &rpl_list); 1229 if (rc) 1230 return rc; 1231 1232 if (ctrl_info->rpl_extended_format_4_5_supported) { 1233 rpl_header = rpl_list; 1234 rpl_response_format = rpl_header->flags & CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_MASK; 1235 if (rpl_response_format == CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_4) { 1236 *buffer = rpl_list; 1237 return 0; 1238 } else if (rpl_response_format != CISS_REPORT_PHYS_FLAG_EXTENDED_FORMAT_2) { 1239 dev_err(&ctrl_info->pci_dev->dev, 1240 "RPL returned unsupported data format %u\n", 1241 rpl_response_format); 1242 return -EINVAL; 1243 } else { 1244 dev_warn(&ctrl_info->pci_dev->dev, 1245 "RPL returned extended format 2 instead of 4\n"); 1246 } 1247 } 1248 1249 rpl_8byte_wwid_list = rpl_list; 1250 num_physicals = get_unaligned_be32(&rpl_8byte_wwid_list->header.list_length) / sizeof(rpl_8byte_wwid_list->lun_entries[0]); 1251 1252 rpl_16byte_wwid_list = kmalloc(struct_size(rpl_16byte_wwid_list, lun_entries, 1253 num_physicals), GFP_KERNEL); 1254 if (!rpl_16byte_wwid_list) 1255 return -ENOMEM; 1256 1257 put_unaligned_be32(num_physicals * sizeof(struct report_phys_lun_16byte_wwid), 1258 &rpl_16byte_wwid_list->header.list_length); 1259 rpl_16byte_wwid_list->header.flags = rpl_8byte_wwid_list->header.flags; 1260 1261 for (i = 0; i < num_physicals; i++) { 1262 memcpy(&rpl_16byte_wwid_list->lun_entries[i].lunid, &rpl_8byte_wwid_list->lun_entries[i].lunid, sizeof(rpl_8byte_wwid_list->lun_entries[i].lunid)); 1263 memcpy(&rpl_16byte_wwid_list->lun_entries[i].wwid[0], &rpl_8byte_wwid_list->lun_entries[i].wwid, sizeof(rpl_8byte_wwid_list->lun_entries[i].wwid)); 1264 memset(&rpl_16byte_wwid_list->lun_entries[i].wwid[8], 0, 8); 1265 rpl_16byte_wwid_list->lun_entries[i].device_type = rpl_8byte_wwid_list->lun_entries[i].device_type; 1266 rpl_16byte_wwid_list->lun_entries[i].device_flags = rpl_8byte_wwid_list->lun_entries[i].device_flags; 1267 rpl_16byte_wwid_list->lun_entries[i].lun_count = rpl_8byte_wwid_list->lun_entries[i].lun_count; 1268 rpl_16byte_wwid_list->lun_entries[i].redundant_paths = rpl_8byte_wwid_list->lun_entries[i].redundant_paths; 1269 rpl_16byte_wwid_list->lun_entries[i].aio_handle = rpl_8byte_wwid_list->lun_entries[i].aio_handle; 1270 } 1271 1272 kfree(rpl_8byte_wwid_list); 1273 *buffer = rpl_16byte_wwid_list; 1274 1275 return 0; 1276 } 1277 1278 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer) 1279 { 1280 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 1281 } 1282 1283 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 1284 struct report_phys_lun_16byte_wwid_list **physdev_list, 1285 struct report_log_lun_list **logdev_list) 1286 { 1287 int rc; 1288 size_t logdev_list_length; 1289 size_t logdev_data_length; 1290 struct report_log_lun_list *internal_logdev_list; 1291 struct report_log_lun_list *logdev_data; 1292 struct report_lun_header report_lun_header; 1293 1294 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 1295 if (rc) 1296 dev_err(&ctrl_info->pci_dev->dev, 1297 "report physical LUNs failed\n"); 1298 1299 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 1300 if (rc) 1301 dev_err(&ctrl_info->pci_dev->dev, 1302 "report logical LUNs failed\n"); 1303 1304 /* 1305 * Tack the controller itself onto the end of the logical device list 1306 * by adding a list entry that is all zeros. 1307 */ 1308 1309 logdev_data = *logdev_list; 1310 1311 if (logdev_data) { 1312 logdev_list_length = 1313 get_unaligned_be32(&logdev_data->header.list_length); 1314 } else { 1315 memset(&report_lun_header, 0, sizeof(report_lun_header)); 1316 logdev_data = 1317 (struct report_log_lun_list *)&report_lun_header; 1318 logdev_list_length = 0; 1319 } 1320 1321 logdev_data_length = sizeof(struct report_lun_header) + 1322 logdev_list_length; 1323 1324 internal_logdev_list = kmalloc(logdev_data_length + 1325 sizeof(struct report_log_lun), GFP_KERNEL); 1326 if (!internal_logdev_list) { 1327 kfree(*logdev_list); 1328 *logdev_list = NULL; 1329 return -ENOMEM; 1330 } 1331 1332 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 1333 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 1334 sizeof(struct report_log_lun)); 1335 put_unaligned_be32(logdev_list_length + 1336 sizeof(struct report_log_lun), 1337 &internal_logdev_list->header.list_length); 1338 1339 kfree(*logdev_list); 1340 *logdev_list = internal_logdev_list; 1341 1342 return 0; 1343 } 1344 1345 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 1346 int bus, int target, int lun) 1347 { 1348 device->bus = bus; 1349 device->target = target; 1350 device->lun = lun; 1351 } 1352 1353 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 1354 { 1355 u8 *scsi3addr; 1356 u32 lunid; 1357 int bus; 1358 int target; 1359 int lun; 1360 1361 scsi3addr = device->scsi3addr; 1362 lunid = get_unaligned_le32(scsi3addr); 1363 1364 if (pqi_is_hba_lunid(scsi3addr)) { 1365 /* The specified device is the controller. */ 1366 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 1367 device->target_lun_valid = true; 1368 return; 1369 } 1370 1371 if (pqi_is_logical_device(device)) { 1372 if (device->is_external_raid_device) { 1373 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 1374 target = (lunid >> 16) & 0x3fff; 1375 lun = lunid & 0xff; 1376 } else { 1377 bus = PQI_RAID_VOLUME_BUS; 1378 target = 0; 1379 lun = lunid & 0x3fff; 1380 } 1381 pqi_set_bus_target_lun(device, bus, target, lun); 1382 device->target_lun_valid = true; 1383 return; 1384 } 1385 1386 /* 1387 * Defer target and LUN assignment for non-controller physical devices 1388 * because the SAS transport layer will make these assignments later. 1389 */ 1390 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 1391 } 1392 1393 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 1394 struct pqi_scsi_dev *device) 1395 { 1396 int rc; 1397 u8 raid_level; 1398 u8 *buffer; 1399 1400 raid_level = SA_RAID_UNKNOWN; 1401 1402 buffer = kmalloc(64, GFP_KERNEL); 1403 if (buffer) { 1404 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1405 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 1406 if (rc == 0) { 1407 raid_level = buffer[8]; 1408 if (raid_level > SA_RAID_MAX) 1409 raid_level = SA_RAID_UNKNOWN; 1410 } 1411 kfree(buffer); 1412 } 1413 1414 device->raid_level = raid_level; 1415 } 1416 1417 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1418 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1419 { 1420 char *err_msg; 1421 u32 raid_map_size; 1422 u32 r5or6_blocks_per_row; 1423 1424 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1425 1426 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1427 err_msg = "RAID map too small"; 1428 goto bad_raid_map; 1429 } 1430 1431 if (device->raid_level == SA_RAID_1) { 1432 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1433 err_msg = "invalid RAID-1 map"; 1434 goto bad_raid_map; 1435 } 1436 } else if (device->raid_level == SA_RAID_TRIPLE) { 1437 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1438 err_msg = "invalid RAID-1(Triple) map"; 1439 goto bad_raid_map; 1440 } 1441 } else if ((device->raid_level == SA_RAID_5 || 1442 device->raid_level == SA_RAID_6) && 1443 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1444 /* RAID 50/60 */ 1445 r5or6_blocks_per_row = 1446 get_unaligned_le16(&raid_map->strip_size) * 1447 get_unaligned_le16(&raid_map->data_disks_per_row); 1448 if (r5or6_blocks_per_row == 0) { 1449 err_msg = "invalid RAID-5 or RAID-6 map"; 1450 goto bad_raid_map; 1451 } 1452 } 1453 1454 return 0; 1455 1456 bad_raid_map: 1457 dev_warn(&ctrl_info->pci_dev->dev, 1458 "logical device %08x%08x %s\n", 1459 *((u32 *)&device->scsi3addr), 1460 *((u32 *)&device->scsi3addr[4]), err_msg); 1461 1462 return -EINVAL; 1463 } 1464 1465 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1466 struct pqi_scsi_dev *device) 1467 { 1468 int rc; 1469 u32 raid_map_size; 1470 struct raid_map *raid_map; 1471 1472 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1473 if (!raid_map) 1474 return -ENOMEM; 1475 1476 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1477 device->scsi3addr, raid_map, sizeof(*raid_map), 0, NULL); 1478 if (rc) 1479 goto error; 1480 1481 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1482 1483 if (raid_map_size > sizeof(*raid_map)) { 1484 1485 kfree(raid_map); 1486 1487 raid_map = kmalloc(raid_map_size, GFP_KERNEL); 1488 if (!raid_map) 1489 return -ENOMEM; 1490 1491 rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, 1492 device->scsi3addr, raid_map, raid_map_size, 0, NULL); 1493 if (rc) 1494 goto error; 1495 1496 if (get_unaligned_le32(&raid_map->structure_size) 1497 != raid_map_size) { 1498 dev_warn(&ctrl_info->pci_dev->dev, 1499 "requested %u bytes, received %u bytes\n", 1500 raid_map_size, 1501 get_unaligned_le32(&raid_map->structure_size)); 1502 rc = -EINVAL; 1503 goto error; 1504 } 1505 } 1506 1507 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1508 if (rc) 1509 goto error; 1510 1511 device->raid_map = raid_map; 1512 1513 return 0; 1514 1515 error: 1516 kfree(raid_map); 1517 1518 return rc; 1519 } 1520 1521 static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info, 1522 struct pqi_scsi_dev *device) 1523 { 1524 if (!ctrl_info->lv_drive_type_mix_valid) { 1525 device->max_transfer_encrypted = ~0; 1526 return; 1527 } 1528 1529 switch (LV_GET_DRIVE_TYPE_MIX(device->scsi3addr)) { 1530 case LV_DRIVE_TYPE_MIX_SAS_HDD_ONLY: 1531 case LV_DRIVE_TYPE_MIX_SATA_HDD_ONLY: 1532 case LV_DRIVE_TYPE_MIX_SAS_OR_SATA_SSD_ONLY: 1533 case LV_DRIVE_TYPE_MIX_SAS_SSD_ONLY: 1534 case LV_DRIVE_TYPE_MIX_SATA_SSD_ONLY: 1535 case LV_DRIVE_TYPE_MIX_SAS_ONLY: 1536 case LV_DRIVE_TYPE_MIX_SATA_ONLY: 1537 device->max_transfer_encrypted = 1538 ctrl_info->max_transfer_encrypted_sas_sata; 1539 break; 1540 case LV_DRIVE_TYPE_MIX_NVME_ONLY: 1541 device->max_transfer_encrypted = 1542 ctrl_info->max_transfer_encrypted_nvme; 1543 break; 1544 case LV_DRIVE_TYPE_MIX_UNKNOWN: 1545 case LV_DRIVE_TYPE_MIX_NO_RESTRICTION: 1546 default: 1547 device->max_transfer_encrypted = 1548 min(ctrl_info->max_transfer_encrypted_sas_sata, 1549 ctrl_info->max_transfer_encrypted_nvme); 1550 break; 1551 } 1552 } 1553 1554 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1555 struct pqi_scsi_dev *device) 1556 { 1557 int rc; 1558 u8 *buffer; 1559 u8 bypass_status; 1560 1561 buffer = kmalloc(64, GFP_KERNEL); 1562 if (!buffer) 1563 return; 1564 1565 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1566 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1567 if (rc) 1568 goto out; 1569 1570 #define RAID_BYPASS_STATUS 4 1571 #define RAID_BYPASS_CONFIGURED 0x1 1572 #define RAID_BYPASS_ENABLED 0x2 1573 1574 bypass_status = buffer[RAID_BYPASS_STATUS]; 1575 device->raid_bypass_configured = 1576 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1577 if (device->raid_bypass_configured && 1578 (bypass_status & RAID_BYPASS_ENABLED) && 1579 pqi_get_raid_map(ctrl_info, device) == 0) { 1580 device->raid_bypass_enabled = true; 1581 if (get_unaligned_le16(&device->raid_map->flags) & 1582 RAID_MAP_ENCRYPTION_ENABLED) 1583 pqi_set_max_transfer_encrypted(ctrl_info, device); 1584 } 1585 1586 out: 1587 kfree(buffer); 1588 } 1589 1590 /* 1591 * Use vendor-specific VPD to determine online/offline status of a volume. 1592 */ 1593 1594 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1595 struct pqi_scsi_dev *device) 1596 { 1597 int rc; 1598 size_t page_length; 1599 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1600 bool volume_offline = true; 1601 u32 volume_flags; 1602 struct ciss_vpd_logical_volume_status *vpd; 1603 1604 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1605 if (!vpd) 1606 goto no_buffer; 1607 1608 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1609 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1610 if (rc) 1611 goto out; 1612 1613 if (vpd->page_code != CISS_VPD_LV_STATUS) 1614 goto out; 1615 1616 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1617 volume_status) + vpd->page_length; 1618 if (page_length < sizeof(*vpd)) 1619 goto out; 1620 1621 volume_status = vpd->volume_status; 1622 volume_flags = get_unaligned_be32(&vpd->flags); 1623 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1624 1625 out: 1626 kfree(vpd); 1627 no_buffer: 1628 device->volume_status = volume_status; 1629 device->volume_offline = volume_offline; 1630 } 1631 1632 #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 1633 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 1634 #define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 1635 1636 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, 1637 struct pqi_scsi_dev *device, 1638 struct bmic_identify_physical_device *id_phys) 1639 { 1640 int rc; 1641 1642 memset(id_phys, 0, sizeof(*id_phys)); 1643 1644 rc = pqi_identify_physical_device(ctrl_info, device, 1645 id_phys, sizeof(*id_phys)); 1646 if (rc) { 1647 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1648 return rc; 1649 } 1650 1651 scsi_sanitize_inquiry_string(&id_phys->model[0], 8); 1652 scsi_sanitize_inquiry_string(&id_phys->model[8], 16); 1653 1654 memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor)); 1655 memcpy(device->model, &id_phys->model[8], sizeof(device->model)); 1656 1657 device->box_index = id_phys->box_index; 1658 device->phys_box_on_bus = id_phys->phys_box_on_bus; 1659 device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; 1660 device->queue_depth = 1661 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1662 device->active_path_index = id_phys->active_path_number; 1663 device->path_map = id_phys->redundant_path_present_map; 1664 memcpy(&device->box, 1665 &id_phys->alternate_paths_phys_box_on_port, 1666 sizeof(device->box)); 1667 memcpy(&device->phys_connector, 1668 &id_phys->alternate_paths_phys_connector, 1669 sizeof(device->phys_connector)); 1670 device->bay = id_phys->phys_bay_in_box; 1671 device->lun_count = id_phys->multi_lun_device_lun_count; 1672 if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && 1673 id_phys->phy_count) 1674 device->phy_id = 1675 id_phys->phy_to_phy_map[device->active_path_index]; 1676 else 1677 device->phy_id = 0xFF; 1678 1679 device->ncq_prio_support = 1680 ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & 1681 PQI_DEVICE_NCQ_PRIO_SUPPORTED); 1682 1683 device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); 1684 1685 return 0; 1686 } 1687 1688 static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info, 1689 struct pqi_scsi_dev *device) 1690 { 1691 int rc; 1692 u8 *buffer; 1693 1694 buffer = kmalloc(64, GFP_KERNEL); 1695 if (!buffer) 1696 return -ENOMEM; 1697 1698 /* Send an inquiry to the device to see what it is. */ 1699 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); 1700 if (rc) 1701 goto out; 1702 1703 scsi_sanitize_inquiry_string(&buffer[8], 8); 1704 scsi_sanitize_inquiry_string(&buffer[16], 16); 1705 1706 device->devtype = buffer[0] & 0x1f; 1707 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1708 memcpy(device->model, &buffer[16], sizeof(device->model)); 1709 1710 if (device->devtype == TYPE_DISK) { 1711 if (device->is_external_raid_device) { 1712 device->raid_level = SA_RAID_UNKNOWN; 1713 device->volume_status = CISS_LV_OK; 1714 device->volume_offline = false; 1715 } else { 1716 pqi_get_raid_level(ctrl_info, device); 1717 pqi_get_raid_bypass_status(ctrl_info, device); 1718 pqi_get_volume_status(ctrl_info, device); 1719 } 1720 } 1721 1722 out: 1723 kfree(buffer); 1724 1725 return rc; 1726 } 1727 1728 /* 1729 * Prevent adding drive to OS for some corner cases such as a drive 1730 * undergoing a sanitize (erase) operation. Some OSes will continue to poll 1731 * the drive until the sanitize completes, which can take hours, 1732 * resulting in long bootup delays. Commands such as TUR, READ_CAP 1733 * are allowed, but READ/WRITE cause check condition. So the OS 1734 * cannot check/read the partition table. 1735 * Note: devices that have completed sanitize must be re-enabled 1736 * using the management utility. 1737 */ 1738 static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) 1739 { 1740 return device->erase_in_progress; 1741 } 1742 1743 static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, 1744 struct pqi_scsi_dev *device, 1745 struct bmic_identify_physical_device *id_phys) 1746 { 1747 int rc; 1748 1749 if (device->is_expander_smp_device) 1750 return 0; 1751 1752 if (pqi_is_logical_device(device)) 1753 rc = pqi_get_logical_device_info(ctrl_info, device); 1754 else 1755 rc = pqi_get_physical_device_info(ctrl_info, device, id_phys); 1756 1757 return rc; 1758 } 1759 1760 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1761 struct pqi_scsi_dev *device, 1762 struct bmic_identify_physical_device *id_phys) 1763 { 1764 int rc; 1765 1766 rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); 1767 1768 if (rc == 0 && device->lun_count == 0) 1769 device->lun_count = 1; 1770 1771 return rc; 1772 } 1773 1774 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1775 struct pqi_scsi_dev *device) 1776 { 1777 char *status; 1778 static const char unknown_state_str[] = 1779 "Volume is in an unknown state (%u)"; 1780 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1781 1782 switch (device->volume_status) { 1783 case CISS_LV_OK: 1784 status = "Volume online"; 1785 break; 1786 case CISS_LV_FAILED: 1787 status = "Volume failed"; 1788 break; 1789 case CISS_LV_NOT_CONFIGURED: 1790 status = "Volume not configured"; 1791 break; 1792 case CISS_LV_DEGRADED: 1793 status = "Volume degraded"; 1794 break; 1795 case CISS_LV_READY_FOR_RECOVERY: 1796 status = "Volume ready for recovery operation"; 1797 break; 1798 case CISS_LV_UNDERGOING_RECOVERY: 1799 status = "Volume undergoing recovery"; 1800 break; 1801 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1802 status = "Wrong physical drive was replaced"; 1803 break; 1804 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1805 status = "A physical drive not properly connected"; 1806 break; 1807 case CISS_LV_HARDWARE_OVERHEATING: 1808 status = "Hardware is overheating"; 1809 break; 1810 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1811 status = "Hardware has overheated"; 1812 break; 1813 case CISS_LV_UNDERGOING_EXPANSION: 1814 status = "Volume undergoing expansion"; 1815 break; 1816 case CISS_LV_NOT_AVAILABLE: 1817 status = "Volume waiting for transforming volume"; 1818 break; 1819 case CISS_LV_QUEUED_FOR_EXPANSION: 1820 status = "Volume queued for expansion"; 1821 break; 1822 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1823 status = "Volume disabled due to SCSI ID conflict"; 1824 break; 1825 case CISS_LV_EJECTED: 1826 status = "Volume has been ejected"; 1827 break; 1828 case CISS_LV_UNDERGOING_ERASE: 1829 status = "Volume undergoing background erase"; 1830 break; 1831 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1832 status = "Volume ready for predictive spare rebuild"; 1833 break; 1834 case CISS_LV_UNDERGOING_RPI: 1835 status = "Volume undergoing rapid parity initialization"; 1836 break; 1837 case CISS_LV_PENDING_RPI: 1838 status = "Volume queued for rapid parity initialization"; 1839 break; 1840 case CISS_LV_ENCRYPTED_NO_KEY: 1841 status = "Encrypted volume inaccessible - key not present"; 1842 break; 1843 case CISS_LV_UNDERGOING_ENCRYPTION: 1844 status = "Volume undergoing encryption process"; 1845 break; 1846 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1847 status = "Volume undergoing encryption re-keying process"; 1848 break; 1849 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1850 status = "Volume encrypted but encryption is disabled"; 1851 break; 1852 case CISS_LV_PENDING_ENCRYPTION: 1853 status = "Volume pending migration to encrypted state"; 1854 break; 1855 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1856 status = "Volume pending encryption rekeying"; 1857 break; 1858 case CISS_LV_NOT_SUPPORTED: 1859 status = "Volume not supported on this controller"; 1860 break; 1861 case CISS_LV_STATUS_UNAVAILABLE: 1862 status = "Volume status not available"; 1863 break; 1864 default: 1865 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1866 unknown_state_str, device->volume_status); 1867 status = unknown_state_buffer; 1868 break; 1869 } 1870 1871 dev_info(&ctrl_info->pci_dev->dev, 1872 "scsi %d:%d:%d:%d %s\n", 1873 ctrl_info->scsi_host->host_no, 1874 device->bus, device->target, device->lun, status); 1875 } 1876 1877 static void pqi_rescan_worker(struct work_struct *work) 1878 { 1879 struct pqi_ctrl_info *ctrl_info; 1880 1881 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1882 rescan_work); 1883 1884 pqi_scan_scsi_devices(ctrl_info); 1885 } 1886 1887 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1888 struct pqi_scsi_dev *device) 1889 { 1890 int rc; 1891 1892 if (pqi_is_logical_device(device)) 1893 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1894 device->target, device->lun); 1895 else 1896 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1897 1898 return rc; 1899 } 1900 1901 #define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS (20 * 1000) 1902 1903 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) 1904 { 1905 int rc; 1906 int lun; 1907 1908 for (lun = 0; lun < device->lun_count; lun++) { 1909 rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, 1910 PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); 1911 if (rc) 1912 dev_err(&ctrl_info->pci_dev->dev, 1913 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n", 1914 ctrl_info->scsi_host->host_no, device->bus, 1915 device->target, lun, 1916 atomic_read(&device->scsi_cmds_outstanding[lun])); 1917 } 1918 1919 if (pqi_is_logical_device(device)) 1920 scsi_remove_device(device->sdev); 1921 else 1922 pqi_remove_sas_device(device); 1923 1924 pqi_device_remove_start(device); 1925 } 1926 1927 /* Assumes the SCSI device list lock is held. */ 1928 1929 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1930 int bus, int target, int lun) 1931 { 1932 struct pqi_scsi_dev *device; 1933 1934 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 1935 if (device->bus == bus && device->target == target && device->lun == lun) 1936 return device; 1937 1938 return NULL; 1939 } 1940 1941 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2) 1942 { 1943 if (dev1->is_physical_device != dev2->is_physical_device) 1944 return false; 1945 1946 if (dev1->is_physical_device) 1947 return memcmp(dev1->wwid, dev2->wwid, sizeof(dev1->wwid)) == 0; 1948 1949 return memcmp(dev1->volume_id, dev2->volume_id, sizeof(dev1->volume_id)) == 0; 1950 } 1951 1952 enum pqi_find_result { 1953 DEVICE_NOT_FOUND, 1954 DEVICE_CHANGED, 1955 DEVICE_SAME, 1956 }; 1957 1958 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1959 struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device) 1960 { 1961 struct pqi_scsi_dev *device; 1962 1963 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 1964 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) { 1965 *matching_device = device; 1966 if (pqi_device_equal(device_to_find, device)) { 1967 if (device_to_find->volume_offline) 1968 return DEVICE_CHANGED; 1969 return DEVICE_SAME; 1970 } 1971 return DEVICE_CHANGED; 1972 } 1973 } 1974 1975 return DEVICE_NOT_FOUND; 1976 } 1977 1978 static inline const char *pqi_device_type(struct pqi_scsi_dev *device) 1979 { 1980 if (device->is_expander_smp_device) 1981 return "Enclosure SMP "; 1982 1983 return scsi_device_type(device->devtype); 1984 } 1985 1986 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1987 1988 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1989 char *action, struct pqi_scsi_dev *device) 1990 { 1991 ssize_t count; 1992 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1993 1994 count = scnprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1995 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1996 1997 if (device->target_lun_valid) 1998 count += scnprintf(buffer + count, 1999 PQI_DEV_INFO_BUFFER_LENGTH - count, 2000 "%d:%d", 2001 device->target, 2002 device->lun); 2003 else 2004 count += scnprintf(buffer + count, 2005 PQI_DEV_INFO_BUFFER_LENGTH - count, 2006 "-:-"); 2007 2008 if (pqi_is_logical_device(device)) 2009 count += scnprintf(buffer + count, 2010 PQI_DEV_INFO_BUFFER_LENGTH - count, 2011 " %08x%08x", 2012 *((u32 *)&device->scsi3addr), 2013 *((u32 *)&device->scsi3addr[4])); 2014 else 2015 count += scnprintf(buffer + count, 2016 PQI_DEV_INFO_BUFFER_LENGTH - count, 2017 " %016llx%016llx", 2018 get_unaligned_be64(&device->wwid[0]), 2019 get_unaligned_be64(&device->wwid[8])); 2020 2021 count += scnprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 2022 " %s %.8s %.16s ", 2023 pqi_device_type(device), 2024 device->vendor, 2025 device->model); 2026 2027 if (pqi_is_logical_device(device)) { 2028 if (device->devtype == TYPE_DISK) 2029 count += scnprintf(buffer + count, 2030 PQI_DEV_INFO_BUFFER_LENGTH - count, 2031 "SSDSmartPathCap%c En%c %-12s", 2032 device->raid_bypass_configured ? '+' : '-', 2033 device->raid_bypass_enabled ? '+' : '-', 2034 pqi_raid_level_to_string(device->raid_level)); 2035 } else { 2036 count += scnprintf(buffer + count, 2037 PQI_DEV_INFO_BUFFER_LENGTH - count, 2038 "AIO%c", device->aio_enabled ? '+' : '-'); 2039 if (device->devtype == TYPE_DISK || 2040 device->devtype == TYPE_ZBC) 2041 count += scnprintf(buffer + count, 2042 PQI_DEV_INFO_BUFFER_LENGTH - count, 2043 " qd=%-6d", device->queue_depth); 2044 } 2045 2046 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 2047 } 2048 2049 static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2) 2050 { 2051 u32 raid_map1_size; 2052 u32 raid_map2_size; 2053 2054 if (raid_map1 == NULL || raid_map2 == NULL) 2055 return raid_map1 == raid_map2; 2056 2057 raid_map1_size = get_unaligned_le32(&raid_map1->structure_size); 2058 raid_map2_size = get_unaligned_le32(&raid_map2->structure_size); 2059 2060 if (raid_map1_size != raid_map2_size) 2061 return false; 2062 2063 return memcmp(raid_map1, raid_map2, raid_map1_size) == 0; 2064 } 2065 2066 /* Assumes the SCSI device list lock is held. */ 2067 2068 static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, 2069 struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device) 2070 { 2071 existing_device->device_type = new_device->device_type; 2072 existing_device->bus = new_device->bus; 2073 if (new_device->target_lun_valid) { 2074 existing_device->target = new_device->target; 2075 existing_device->lun = new_device->lun; 2076 existing_device->target_lun_valid = true; 2077 } 2078 2079 /* By definition, the scsi3addr and wwid fields are already the same. */ 2080 2081 existing_device->is_physical_device = new_device->is_physical_device; 2082 memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); 2083 memcpy(existing_device->model, new_device->model, sizeof(existing_device->model)); 2084 existing_device->sas_address = new_device->sas_address; 2085 existing_device->queue_depth = new_device->queue_depth; 2086 existing_device->device_offline = false; 2087 existing_device->lun_count = new_device->lun_count; 2088 2089 if (pqi_is_logical_device(existing_device)) { 2090 existing_device->is_external_raid_device = new_device->is_external_raid_device; 2091 2092 if (existing_device->devtype == TYPE_DISK) { 2093 existing_device->raid_level = new_device->raid_level; 2094 existing_device->volume_status = new_device->volume_status; 2095 memset(existing_device->next_bypass_group, 0, sizeof(existing_device->next_bypass_group)); 2096 if (!pqi_raid_maps_equal(existing_device->raid_map, new_device->raid_map)) { 2097 kfree(existing_device->raid_map); 2098 existing_device->raid_map = new_device->raid_map; 2099 /* To prevent this from being freed later. */ 2100 new_device->raid_map = NULL; 2101 } 2102 existing_device->raid_bypass_configured = new_device->raid_bypass_configured; 2103 existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; 2104 } 2105 } else { 2106 existing_device->aio_enabled = new_device->aio_enabled; 2107 existing_device->aio_handle = new_device->aio_handle; 2108 existing_device->is_expander_smp_device = new_device->is_expander_smp_device; 2109 existing_device->active_path_index = new_device->active_path_index; 2110 existing_device->phy_id = new_device->phy_id; 2111 existing_device->path_map = new_device->path_map; 2112 existing_device->bay = new_device->bay; 2113 existing_device->box_index = new_device->box_index; 2114 existing_device->phys_box_on_bus = new_device->phys_box_on_bus; 2115 existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; 2116 memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); 2117 memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); 2118 } 2119 } 2120 2121 static inline void pqi_free_device(struct pqi_scsi_dev *device) 2122 { 2123 if (device) { 2124 kfree(device->raid_map); 2125 kfree(device); 2126 } 2127 } 2128 2129 /* 2130 * Called when exposing a new device to the OS fails in order to re-adjust 2131 * our internal SCSI device list to match the SCSI ML's view. 2132 */ 2133 2134 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 2135 struct pqi_scsi_dev *device) 2136 { 2137 unsigned long flags; 2138 2139 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2140 list_del(&device->scsi_device_list_entry); 2141 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2142 2143 /* Allow the device structure to be freed later. */ 2144 device->keep_device = false; 2145 } 2146 2147 static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) 2148 { 2149 if (device->is_expander_smp_device) 2150 return device->sas_port != NULL; 2151 2152 return device->sdev != NULL; 2153 } 2154 2155 static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device) 2156 { 2157 unsigned int lun; 2158 struct pqi_tmf_work *tmf_work; 2159 2160 for (lun = 0, tmf_work = device->tmf_work; lun < PQI_MAX_LUNS_PER_DEVICE; lun++, tmf_work++) 2161 INIT_WORK(&tmf_work->work_struct, pqi_tmf_worker); 2162 } 2163 2164 static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device) 2165 { 2166 if (pqi_device_in_remove(device)) 2167 return false; 2168 2169 if (device->sdev == NULL) 2170 return false; 2171 2172 if (!scsi_device_online(device->sdev)) 2173 return false; 2174 2175 return device->rescan; 2176 } 2177 2178 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 2179 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 2180 { 2181 int rc; 2182 unsigned int i; 2183 unsigned long flags; 2184 enum pqi_find_result find_result; 2185 struct pqi_scsi_dev *device; 2186 struct pqi_scsi_dev *next; 2187 struct pqi_scsi_dev *matching_device; 2188 LIST_HEAD(add_list); 2189 LIST_HEAD(delete_list); 2190 2191 /* 2192 * The idea here is to do as little work as possible while holding the 2193 * spinlock. That's why we go to great pains to defer anything other 2194 * than updating the internal device list until after we release the 2195 * spinlock. 2196 */ 2197 2198 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2199 2200 /* Assume that all devices in the existing list have gone away. */ 2201 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 2202 device->device_gone = true; 2203 2204 for (i = 0; i < num_new_devices; i++) { 2205 device = new_device_list[i]; 2206 2207 find_result = pqi_scsi_find_entry(ctrl_info, device, 2208 &matching_device); 2209 2210 switch (find_result) { 2211 case DEVICE_SAME: 2212 /* 2213 * The newly found device is already in the existing 2214 * device list. 2215 */ 2216 device->new_device = false; 2217 matching_device->device_gone = false; 2218 pqi_scsi_update_device(ctrl_info, matching_device, device); 2219 break; 2220 case DEVICE_NOT_FOUND: 2221 /* 2222 * The newly found device is NOT in the existing device 2223 * list. 2224 */ 2225 device->new_device = true; 2226 break; 2227 case DEVICE_CHANGED: 2228 /* 2229 * The original device has gone away and we need to add 2230 * the new device. 2231 */ 2232 device->new_device = true; 2233 break; 2234 } 2235 } 2236 2237 /* Process all devices that have gone away. */ 2238 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 2239 scsi_device_list_entry) { 2240 if (device->device_gone) { 2241 list_del(&device->scsi_device_list_entry); 2242 list_add_tail(&device->delete_list_entry, &delete_list); 2243 } 2244 } 2245 2246 /* Process all new devices. */ 2247 for (i = 0; i < num_new_devices; i++) { 2248 device = new_device_list[i]; 2249 if (!device->new_device) 2250 continue; 2251 if (device->volume_offline) 2252 continue; 2253 list_add_tail(&device->scsi_device_list_entry, 2254 &ctrl_info->scsi_device_list); 2255 list_add_tail(&device->add_list_entry, &add_list); 2256 /* To prevent this device structure from being freed later. */ 2257 device->keep_device = true; 2258 pqi_init_device_tmf_work(device); 2259 } 2260 2261 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2262 2263 /* 2264 * If OFA is in progress and there are devices that need to be deleted, 2265 * allow any pending reset operations to continue and unblock any SCSI 2266 * requests before removal. 2267 */ 2268 if (pqi_ofa_in_progress(ctrl_info)) { 2269 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) 2270 if (pqi_is_device_added(device)) 2271 pqi_device_remove_start(device); 2272 pqi_ctrl_unblock_device_reset(ctrl_info); 2273 pqi_scsi_unblock_requests(ctrl_info); 2274 } 2275 2276 /* Remove all devices that have gone away. */ 2277 list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { 2278 if (device->volume_offline) { 2279 pqi_dev_info(ctrl_info, "offline", device); 2280 pqi_show_volume_status(ctrl_info, device); 2281 } else { 2282 pqi_dev_info(ctrl_info, "removed", device); 2283 } 2284 if (pqi_is_device_added(device)) 2285 pqi_remove_device(ctrl_info, device); 2286 list_del(&device->delete_list_entry); 2287 pqi_free_device(device); 2288 } 2289 2290 /* 2291 * Notify the SML of any existing device changes such as; 2292 * queue depth, device size. 2293 */ 2294 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 2295 if (device->sdev && device->queue_depth != device->advertised_queue_depth) { 2296 device->advertised_queue_depth = device->queue_depth; 2297 scsi_change_queue_depth(device->sdev, device->advertised_queue_depth); 2298 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 2299 if (pqi_volume_rescan_needed(device)) { 2300 device->rescan = false; 2301 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2302 scsi_rescan_device(device->sdev); 2303 } else { 2304 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 2305 } 2306 } 2307 } 2308 2309 /* Expose any new devices. */ 2310 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 2311 if (!pqi_is_device_added(device)) { 2312 rc = pqi_add_device(ctrl_info, device); 2313 if (rc == 0) { 2314 pqi_dev_info(ctrl_info, "added", device); 2315 } else { 2316 dev_warn(&ctrl_info->pci_dev->dev, 2317 "scsi %d:%d:%d:%d addition failed, device not added\n", 2318 ctrl_info->scsi_host->host_no, 2319 device->bus, device->target, 2320 device->lun); 2321 pqi_fixup_botched_add(ctrl_info, device); 2322 } 2323 } 2324 } 2325 2326 } 2327 2328 static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device) 2329 { 2330 /* 2331 * Only support the HBA controller itself as a RAID 2332 * controller. If it's a RAID controller other than 2333 * the HBA itself (an external RAID controller, for 2334 * example), we don't support it. 2335 */ 2336 if (device->device_type == SA_DEVICE_TYPE_CONTROLLER && 2337 !pqi_is_hba_lunid(device->scsi3addr)) 2338 return false; 2339 2340 return true; 2341 } 2342 2343 static inline bool pqi_skip_device(u8 *scsi3addr) 2344 { 2345 /* Ignore all masked devices. */ 2346 if (MASKED_DEVICE(scsi3addr)) 2347 return true; 2348 2349 return false; 2350 } 2351 2352 static inline void pqi_mask_device(u8 *scsi3addr) 2353 { 2354 scsi3addr[3] |= 0xc0; 2355 } 2356 2357 static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device) 2358 { 2359 if (pqi_is_logical_device(device)) 2360 return false; 2361 2362 return (device->path_map & (device->path_map - 1)) != 0; 2363 } 2364 2365 static inline bool pqi_expose_device(struct pqi_scsi_dev *device) 2366 { 2367 return !device->is_physical_device || !pqi_skip_device(device->scsi3addr); 2368 } 2369 2370 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2371 { 2372 int i; 2373 int rc; 2374 LIST_HEAD(new_device_list_head); 2375 struct report_phys_lun_16byte_wwid_list *physdev_list = NULL; 2376 struct report_log_lun_list *logdev_list = NULL; 2377 struct report_phys_lun_16byte_wwid *phys_lun; 2378 struct report_log_lun *log_lun; 2379 struct bmic_identify_physical_device *id_phys = NULL; 2380 u32 num_physicals; 2381 u32 num_logicals; 2382 struct pqi_scsi_dev **new_device_list = NULL; 2383 struct pqi_scsi_dev *device; 2384 struct pqi_scsi_dev *next; 2385 unsigned int num_new_devices; 2386 unsigned int num_valid_devices; 2387 bool is_physical_device; 2388 u8 *scsi3addr; 2389 unsigned int physical_index; 2390 unsigned int logical_index; 2391 static char *out_of_memory_msg = 2392 "failed to allocate memory, device discovery stopped"; 2393 2394 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 2395 if (rc) 2396 goto out; 2397 2398 if (physdev_list) 2399 num_physicals = 2400 get_unaligned_be32(&physdev_list->header.list_length) 2401 / sizeof(physdev_list->lun_entries[0]); 2402 else 2403 num_physicals = 0; 2404 2405 if (logdev_list) 2406 num_logicals = 2407 get_unaligned_be32(&logdev_list->header.list_length) 2408 / sizeof(logdev_list->lun_entries[0]); 2409 else 2410 num_logicals = 0; 2411 2412 if (num_physicals) { 2413 /* 2414 * We need this buffer for calls to pqi_get_physical_disk_info() 2415 * below. We allocate it here instead of inside 2416 * pqi_get_physical_disk_info() because it's a fairly large 2417 * buffer. 2418 */ 2419 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 2420 if (!id_phys) { 2421 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2422 out_of_memory_msg); 2423 rc = -ENOMEM; 2424 goto out; 2425 } 2426 2427 if (pqi_hide_vsep) { 2428 for (i = num_physicals - 1; i >= 0; i--) { 2429 phys_lun = &physdev_list->lun_entries[i]; 2430 if (CISS_GET_DRIVE_NUMBER(phys_lun->lunid) == PQI_VSEP_CISS_BTL) { 2431 pqi_mask_device(phys_lun->lunid); 2432 break; 2433 } 2434 } 2435 } 2436 } 2437 2438 if (num_logicals && 2439 (logdev_list->header.flags & CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX)) 2440 ctrl_info->lv_drive_type_mix_valid = true; 2441 2442 num_new_devices = num_physicals + num_logicals; 2443 2444 new_device_list = kmalloc_array(num_new_devices, 2445 sizeof(*new_device_list), 2446 GFP_KERNEL); 2447 if (!new_device_list) { 2448 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 2449 rc = -ENOMEM; 2450 goto out; 2451 } 2452 2453 for (i = 0; i < num_new_devices; i++) { 2454 device = kzalloc(sizeof(*device), GFP_KERNEL); 2455 if (!device) { 2456 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2457 out_of_memory_msg); 2458 rc = -ENOMEM; 2459 goto out; 2460 } 2461 list_add_tail(&device->new_device_list_entry, 2462 &new_device_list_head); 2463 } 2464 2465 device = NULL; 2466 num_valid_devices = 0; 2467 physical_index = 0; 2468 logical_index = 0; 2469 2470 for (i = 0; i < num_new_devices; i++) { 2471 2472 if ((!pqi_expose_ld_first && i < num_physicals) || 2473 (pqi_expose_ld_first && i >= num_logicals)) { 2474 is_physical_device = true; 2475 phys_lun = &physdev_list->lun_entries[physical_index++]; 2476 log_lun = NULL; 2477 scsi3addr = phys_lun->lunid; 2478 } else { 2479 is_physical_device = false; 2480 phys_lun = NULL; 2481 log_lun = &logdev_list->lun_entries[logical_index++]; 2482 scsi3addr = log_lun->lunid; 2483 } 2484 2485 if (is_physical_device && pqi_skip_device(scsi3addr)) 2486 continue; 2487 2488 if (device) 2489 device = list_next_entry(device, new_device_list_entry); 2490 else 2491 device = list_first_entry(&new_device_list_head, 2492 struct pqi_scsi_dev, new_device_list_entry); 2493 2494 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 2495 device->is_physical_device = is_physical_device; 2496 if (is_physical_device) { 2497 device->device_type = phys_lun->device_type; 2498 if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP) 2499 device->is_expander_smp_device = true; 2500 } else { 2501 device->is_external_raid_device = 2502 pqi_is_external_raid_addr(scsi3addr); 2503 } 2504 2505 if (!pqi_is_supported_device(device)) 2506 continue; 2507 2508 /* Gather information about the device. */ 2509 rc = pqi_get_device_info(ctrl_info, device, id_phys); 2510 if (rc == -ENOMEM) { 2511 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 2512 out_of_memory_msg); 2513 goto out; 2514 } 2515 if (rc) { 2516 if (device->is_physical_device) 2517 dev_warn(&ctrl_info->pci_dev->dev, 2518 "obtaining device info failed, skipping physical device %016llx%016llx\n", 2519 get_unaligned_be64(&phys_lun->wwid[0]), 2520 get_unaligned_be64(&phys_lun->wwid[8])); 2521 else 2522 dev_warn(&ctrl_info->pci_dev->dev, 2523 "obtaining device info failed, skipping logical device %08x%08x\n", 2524 *((u32 *)&device->scsi3addr), 2525 *((u32 *)&device->scsi3addr[4])); 2526 rc = 0; 2527 continue; 2528 } 2529 2530 /* Do not present disks that the OS cannot fully probe. */ 2531 if (pqi_keep_device_offline(device)) 2532 continue; 2533 2534 pqi_assign_bus_target_lun(device); 2535 2536 if (device->is_physical_device) { 2537 memcpy(device->wwid, phys_lun->wwid, sizeof(device->wwid)); 2538 if ((phys_lun->device_flags & 2539 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && 2540 phys_lun->aio_handle) { 2541 device->aio_enabled = true; 2542 device->aio_handle = 2543 phys_lun->aio_handle; 2544 } 2545 } else { 2546 memcpy(device->volume_id, log_lun->volume_id, 2547 sizeof(device->volume_id)); 2548 } 2549 2550 device->sas_address = get_unaligned_be64(&device->wwid[0]); 2551 2552 new_device_list[num_valid_devices++] = device; 2553 } 2554 2555 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 2556 2557 out: 2558 list_for_each_entry_safe(device, next, &new_device_list_head, 2559 new_device_list_entry) { 2560 if (device->keep_device) 2561 continue; 2562 list_del(&device->new_device_list_entry); 2563 pqi_free_device(device); 2564 } 2565 2566 kfree(new_device_list); 2567 kfree(physdev_list); 2568 kfree(logdev_list); 2569 kfree(id_phys); 2570 2571 return rc; 2572 } 2573 2574 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 2575 { 2576 int rc; 2577 int mutex_acquired; 2578 2579 if (pqi_ctrl_offline(ctrl_info)) 2580 return -ENXIO; 2581 2582 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 2583 2584 if (!mutex_acquired) { 2585 if (pqi_ctrl_scan_blocked(ctrl_info)) 2586 return -EBUSY; 2587 pqi_schedule_rescan_worker_delayed(ctrl_info); 2588 return -EINPROGRESS; 2589 } 2590 2591 rc = pqi_update_scsi_devices(ctrl_info); 2592 if (rc && !pqi_ctrl_scan_blocked(ctrl_info)) 2593 pqi_schedule_rescan_worker_delayed(ctrl_info); 2594 2595 mutex_unlock(&ctrl_info->scan_mutex); 2596 2597 return rc; 2598 } 2599 2600 static void pqi_scan_start(struct Scsi_Host *shost) 2601 { 2602 struct pqi_ctrl_info *ctrl_info; 2603 2604 ctrl_info = shost_to_hba(shost); 2605 2606 pqi_scan_scsi_devices(ctrl_info); 2607 } 2608 2609 /* Returns TRUE if scan is finished. */ 2610 2611 static int pqi_scan_finished(struct Scsi_Host *shost, 2612 unsigned long elapsed_time) 2613 { 2614 struct pqi_ctrl_info *ctrl_info; 2615 2616 ctrl_info = shost_priv(shost); 2617 2618 return !mutex_is_locked(&ctrl_info->scan_mutex); 2619 } 2620 2621 static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info, 2622 struct raid_map *raid_map, u64 first_block) 2623 { 2624 u32 volume_blk_size; 2625 2626 /* 2627 * Set the encryption tweak values based on logical block address. 2628 * If the block size is 512, the tweak value is equal to the LBA. 2629 * For other block sizes, tweak value is (LBA * block size) / 512. 2630 */ 2631 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2632 if (volume_blk_size != 512) 2633 first_block = (first_block * volume_blk_size) / 512; 2634 2635 encryption_info->data_encryption_key_index = 2636 get_unaligned_le16(&raid_map->data_encryption_key_index); 2637 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2638 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2639 } 2640 2641 /* 2642 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2643 */ 2644 2645 static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info, 2646 struct pqi_scsi_dev_raid_map_data *rmd) 2647 { 2648 bool is_supported = true; 2649 2650 switch (rmd->raid_level) { 2651 case SA_RAID_0: 2652 break; 2653 case SA_RAID_1: 2654 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2655 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive)) 2656 is_supported = false; 2657 break; 2658 case SA_RAID_TRIPLE: 2659 if (rmd->is_write && (!ctrl_info->enable_r1_writes || 2660 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive)) 2661 is_supported = false; 2662 break; 2663 case SA_RAID_5: 2664 if (rmd->is_write && (!ctrl_info->enable_r5_writes || 2665 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2666 is_supported = false; 2667 break; 2668 case SA_RAID_6: 2669 if (rmd->is_write && (!ctrl_info->enable_r6_writes || 2670 rmd->data_length > ctrl_info->max_write_raid_5_6)) 2671 is_supported = false; 2672 break; 2673 default: 2674 is_supported = false; 2675 break; 2676 } 2677 2678 return is_supported; 2679 } 2680 2681 #define PQI_RAID_BYPASS_INELIGIBLE 1 2682 2683 static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd, 2684 struct pqi_scsi_dev_raid_map_data *rmd) 2685 { 2686 /* Check for valid opcode, get LBA and block count. */ 2687 switch (scmd->cmnd[0]) { 2688 case WRITE_6: 2689 rmd->is_write = true; 2690 fallthrough; 2691 case READ_6: 2692 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2693 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2694 rmd->block_cnt = (u32)scmd->cmnd[4]; 2695 if (rmd->block_cnt == 0) 2696 rmd->block_cnt = 256; 2697 break; 2698 case WRITE_10: 2699 rmd->is_write = true; 2700 fallthrough; 2701 case READ_10: 2702 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2703 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2704 break; 2705 case WRITE_12: 2706 rmd->is_write = true; 2707 fallthrough; 2708 case READ_12: 2709 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2710 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2711 break; 2712 case WRITE_16: 2713 rmd->is_write = true; 2714 fallthrough; 2715 case READ_16: 2716 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]); 2717 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2718 break; 2719 default: 2720 /* Process via normal I/O path. */ 2721 return PQI_RAID_BYPASS_INELIGIBLE; 2722 } 2723 2724 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length); 2725 2726 return 0; 2727 } 2728 2729 static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info, 2730 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map) 2731 { 2732 #if BITS_PER_LONG == 32 2733 u64 tmpdiv; 2734 #endif 2735 2736 rmd->last_block = rmd->first_block + rmd->block_cnt - 1; 2737 2738 /* Check for invalid block or wraparound. */ 2739 if (rmd->last_block >= 2740 get_unaligned_le64(&raid_map->volume_blk_cnt) || 2741 rmd->last_block < rmd->first_block) 2742 return PQI_RAID_BYPASS_INELIGIBLE; 2743 2744 rmd->data_disks_per_row = 2745 get_unaligned_le16(&raid_map->data_disks_per_row); 2746 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size); 2747 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2748 2749 /* Calculate stripe information for the request. */ 2750 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size; 2751 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2752 return PQI_RAID_BYPASS_INELIGIBLE; 2753 #if BITS_PER_LONG == 32 2754 tmpdiv = rmd->first_block; 2755 do_div(tmpdiv, rmd->blocks_per_row); 2756 rmd->first_row = tmpdiv; 2757 tmpdiv = rmd->last_block; 2758 do_div(tmpdiv, rmd->blocks_per_row); 2759 rmd->last_row = tmpdiv; 2760 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row)); 2761 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row)); 2762 tmpdiv = rmd->first_row_offset; 2763 do_div(tmpdiv, rmd->strip_size); 2764 rmd->first_column = tmpdiv; 2765 tmpdiv = rmd->last_row_offset; 2766 do_div(tmpdiv, rmd->strip_size); 2767 rmd->last_column = tmpdiv; 2768 #else 2769 rmd->first_row = rmd->first_block / rmd->blocks_per_row; 2770 rmd->last_row = rmd->last_block / rmd->blocks_per_row; 2771 rmd->first_row_offset = (u32)(rmd->first_block - 2772 (rmd->first_row * rmd->blocks_per_row)); 2773 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * 2774 rmd->blocks_per_row)); 2775 rmd->first_column = rmd->first_row_offset / rmd->strip_size; 2776 rmd->last_column = rmd->last_row_offset / rmd->strip_size; 2777 #endif 2778 2779 /* If this isn't a single row/column then give to the controller. */ 2780 if (rmd->first_row != rmd->last_row || 2781 rmd->first_column != rmd->last_column) 2782 return PQI_RAID_BYPASS_INELIGIBLE; 2783 2784 /* Proceeding with driver mapping. */ 2785 rmd->total_disks_per_row = rmd->data_disks_per_row + 2786 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2787 rmd->map_row = ((u32)(rmd->first_row >> 2788 raid_map->parity_rotation_shift)) % 2789 get_unaligned_le16(&raid_map->row_cnt); 2790 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) + 2791 rmd->first_column; 2792 2793 return 0; 2794 } 2795 2796 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd, 2797 struct raid_map *raid_map) 2798 { 2799 #if BITS_PER_LONG == 32 2800 u64 tmpdiv; 2801 #endif 2802 2803 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */ 2804 return PQI_RAID_BYPASS_INELIGIBLE; 2805 2806 /* RAID 50/60 */ 2807 /* Verify first and last block are in same RAID group. */ 2808 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count; 2809 #if BITS_PER_LONG == 32 2810 tmpdiv = rmd->first_block; 2811 rmd->first_group = do_div(tmpdiv, rmd->stripesize); 2812 tmpdiv = rmd->first_group; 2813 do_div(tmpdiv, rmd->blocks_per_row); 2814 rmd->first_group = tmpdiv; 2815 tmpdiv = rmd->last_block; 2816 rmd->last_group = do_div(tmpdiv, rmd->stripesize); 2817 tmpdiv = rmd->last_group; 2818 do_div(tmpdiv, rmd->blocks_per_row); 2819 rmd->last_group = tmpdiv; 2820 #else 2821 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row; 2822 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row; 2823 #endif 2824 if (rmd->first_group != rmd->last_group) 2825 return PQI_RAID_BYPASS_INELIGIBLE; 2826 2827 /* Verify request is in a single row of RAID 5/6. */ 2828 #if BITS_PER_LONG == 32 2829 tmpdiv = rmd->first_block; 2830 do_div(tmpdiv, rmd->stripesize); 2831 rmd->first_row = tmpdiv; 2832 rmd->r5or6_first_row = tmpdiv; 2833 tmpdiv = rmd->last_block; 2834 do_div(tmpdiv, rmd->stripesize); 2835 rmd->r5or6_last_row = tmpdiv; 2836 #else 2837 rmd->first_row = rmd->r5or6_first_row = 2838 rmd->first_block / rmd->stripesize; 2839 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize; 2840 #endif 2841 if (rmd->r5or6_first_row != rmd->r5or6_last_row) 2842 return PQI_RAID_BYPASS_INELIGIBLE; 2843 2844 /* Verify request is in a single column. */ 2845 #if BITS_PER_LONG == 32 2846 tmpdiv = rmd->first_block; 2847 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize); 2848 tmpdiv = rmd->first_row_offset; 2849 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row); 2850 rmd->r5or6_first_row_offset = rmd->first_row_offset; 2851 tmpdiv = rmd->last_block; 2852 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize); 2853 tmpdiv = rmd->r5or6_last_row_offset; 2854 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row); 2855 tmpdiv = rmd->r5or6_first_row_offset; 2856 do_div(tmpdiv, rmd->strip_size); 2857 rmd->first_column = rmd->r5or6_first_column = tmpdiv; 2858 tmpdiv = rmd->r5or6_last_row_offset; 2859 do_div(tmpdiv, rmd->strip_size); 2860 rmd->r5or6_last_column = tmpdiv; 2861 #else 2862 rmd->first_row_offset = rmd->r5or6_first_row_offset = 2863 (u32)((rmd->first_block % rmd->stripesize) % 2864 rmd->blocks_per_row); 2865 2866 rmd->r5or6_last_row_offset = 2867 (u32)((rmd->last_block % rmd->stripesize) % 2868 rmd->blocks_per_row); 2869 2870 rmd->first_column = 2871 rmd->r5or6_first_row_offset / rmd->strip_size; 2872 rmd->r5or6_first_column = rmd->first_column; 2873 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size; 2874 #endif 2875 if (rmd->r5or6_first_column != rmd->r5or6_last_column) 2876 return PQI_RAID_BYPASS_INELIGIBLE; 2877 2878 /* Request is eligible. */ 2879 rmd->map_row = 2880 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) % 2881 get_unaligned_le16(&raid_map->row_cnt); 2882 2883 rmd->map_index = (rmd->first_group * 2884 (get_unaligned_le16(&raid_map->row_cnt) * 2885 rmd->total_disks_per_row)) + 2886 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column; 2887 2888 if (rmd->is_write) { 2889 u32 index; 2890 2891 /* 2892 * p_parity_it_nexus and q_parity_it_nexus are pointers to the 2893 * parity entries inside the device's raid_map. 2894 * 2895 * A device's RAID map is bounded by: number of RAID disks squared. 2896 * 2897 * The devices RAID map size is checked during device 2898 * initialization. 2899 */ 2900 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row); 2901 index *= rmd->total_disks_per_row; 2902 index -= get_unaligned_le16(&raid_map->metadata_disks_per_row); 2903 2904 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle; 2905 if (rmd->raid_level == SA_RAID_6) { 2906 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle; 2907 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1]; 2908 } 2909 #if BITS_PER_LONG == 32 2910 tmpdiv = rmd->first_block; 2911 do_div(tmpdiv, rmd->blocks_per_row); 2912 rmd->row = tmpdiv; 2913 #else 2914 rmd->row = rmd->first_block / rmd->blocks_per_row; 2915 #endif 2916 } 2917 2918 return 0; 2919 } 2920 2921 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd) 2922 { 2923 /* Build the new CDB for the physical disk I/O. */ 2924 if (rmd->disk_block > 0xffffffff) { 2925 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16; 2926 rmd->cdb[1] = 0; 2927 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]); 2928 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]); 2929 rmd->cdb[14] = 0; 2930 rmd->cdb[15] = 0; 2931 rmd->cdb_length = 16; 2932 } else { 2933 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10; 2934 rmd->cdb[1] = 0; 2935 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]); 2936 rmd->cdb[6] = 0; 2937 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]); 2938 rmd->cdb[9] = 0; 2939 rmd->cdb_length = 10; 2940 } 2941 } 2942 2943 static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map, 2944 struct pqi_scsi_dev_raid_map_data *rmd) 2945 { 2946 u32 index; 2947 u32 group; 2948 2949 group = rmd->map_index / rmd->data_disks_per_row; 2950 2951 index = rmd->map_index - (group * rmd->data_disks_per_row); 2952 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle; 2953 index += rmd->data_disks_per_row; 2954 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle; 2955 if (rmd->layout_map_count > 2) { 2956 index += rmd->data_disks_per_row; 2957 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle; 2958 } 2959 2960 rmd->num_it_nexus_entries = rmd->layout_map_count; 2961 } 2962 2963 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2964 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2965 struct pqi_queue_group *queue_group) 2966 { 2967 int rc; 2968 struct raid_map *raid_map; 2969 u32 group; 2970 u32 next_bypass_group; 2971 struct pqi_encryption_info *encryption_info_ptr; 2972 struct pqi_encryption_info encryption_info; 2973 struct pqi_scsi_dev_raid_map_data rmd = { 0 }; 2974 2975 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 2976 if (rc) 2977 return PQI_RAID_BYPASS_INELIGIBLE; 2978 2979 rmd.raid_level = device->raid_level; 2980 2981 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd)) 2982 return PQI_RAID_BYPASS_INELIGIBLE; 2983 2984 if (unlikely(rmd.block_cnt == 0)) 2985 return PQI_RAID_BYPASS_INELIGIBLE; 2986 2987 raid_map = device->raid_map; 2988 2989 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map); 2990 if (rc) 2991 return PQI_RAID_BYPASS_INELIGIBLE; 2992 2993 if (device->raid_level == SA_RAID_1 || 2994 device->raid_level == SA_RAID_TRIPLE) { 2995 if (rmd.is_write) { 2996 pqi_calc_aio_r1_nexus(raid_map, &rmd); 2997 } else { 2998 group = device->next_bypass_group[rmd.map_index]; 2999 next_bypass_group = group + 1; 3000 if (next_bypass_group >= rmd.layout_map_count) 3001 next_bypass_group = 0; 3002 device->next_bypass_group[rmd.map_index] = next_bypass_group; 3003 rmd.map_index += group * rmd.data_disks_per_row; 3004 } 3005 } else if ((device->raid_level == SA_RAID_5 || 3006 device->raid_level == SA_RAID_6) && 3007 (rmd.layout_map_count > 1 || rmd.is_write)) { 3008 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map); 3009 if (rc) 3010 return PQI_RAID_BYPASS_INELIGIBLE; 3011 } 3012 3013 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES)) 3014 return PQI_RAID_BYPASS_INELIGIBLE; 3015 3016 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle; 3017 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 3018 rmd.first_row * rmd.strip_size + 3019 (rmd.first_row_offset - rmd.first_column * rmd.strip_size); 3020 rmd.disk_block_cnt = rmd.block_cnt; 3021 3022 /* Handle differing logical/physical block sizes. */ 3023 if (raid_map->phys_blk_shift) { 3024 rmd.disk_block <<= raid_map->phys_blk_shift; 3025 rmd.disk_block_cnt <<= raid_map->phys_blk_shift; 3026 } 3027 3028 if (unlikely(rmd.disk_block_cnt > 0xffff)) 3029 return PQI_RAID_BYPASS_INELIGIBLE; 3030 3031 pqi_set_aio_cdb(&rmd); 3032 3033 if (get_unaligned_le16(&raid_map->flags) & RAID_MAP_ENCRYPTION_ENABLED) { 3034 if (rmd.data_length > device->max_transfer_encrypted) 3035 return PQI_RAID_BYPASS_INELIGIBLE; 3036 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block); 3037 encryption_info_ptr = &encryption_info; 3038 } else { 3039 encryption_info_ptr = NULL; 3040 } 3041 3042 if (rmd.is_write) { 3043 switch (device->raid_level) { 3044 case SA_RAID_1: 3045 case SA_RAID_TRIPLE: 3046 return pqi_aio_submit_r1_write_io(ctrl_info, scmd, queue_group, 3047 encryption_info_ptr, device, &rmd); 3048 case SA_RAID_5: 3049 case SA_RAID_6: 3050 return pqi_aio_submit_r56_write_io(ctrl_info, scmd, queue_group, 3051 encryption_info_ptr, device, &rmd); 3052 } 3053 } 3054 3055 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle, 3056 rmd.cdb, rmd.cdb_length, queue_group, 3057 encryption_info_ptr, true, false); 3058 } 3059 3060 #define PQI_STATUS_IDLE 0x0 3061 3062 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 3063 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 3064 3065 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 3066 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 3067 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 3068 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 3069 #define PQI_DEVICE_STATE_ERROR 0x4 3070 3071 #define PQI_MODE_READY_TIMEOUT_SECS 30 3072 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 3073 3074 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 3075 { 3076 struct pqi_device_registers __iomem *pqi_registers; 3077 unsigned long timeout; 3078 u64 signature; 3079 u8 status; 3080 3081 pqi_registers = ctrl_info->pqi_registers; 3082 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 3083 3084 while (1) { 3085 signature = readq(&pqi_registers->signature); 3086 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 3087 sizeof(signature)) == 0) 3088 break; 3089 if (time_after(jiffies, timeout)) { 3090 dev_err(&ctrl_info->pci_dev->dev, 3091 "timed out waiting for PQI signature\n"); 3092 return -ETIMEDOUT; 3093 } 3094 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3095 } 3096 3097 while (1) { 3098 status = readb(&pqi_registers->function_and_status_code); 3099 if (status == PQI_STATUS_IDLE) 3100 break; 3101 if (time_after(jiffies, timeout)) { 3102 dev_err(&ctrl_info->pci_dev->dev, 3103 "timed out waiting for PQI IDLE\n"); 3104 return -ETIMEDOUT; 3105 } 3106 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3107 } 3108 3109 while (1) { 3110 if (readl(&pqi_registers->device_status) == 3111 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 3112 break; 3113 if (time_after(jiffies, timeout)) { 3114 dev_err(&ctrl_info->pci_dev->dev, 3115 "timed out waiting for PQI all registers ready\n"); 3116 return -ETIMEDOUT; 3117 } 3118 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 3119 } 3120 3121 return 0; 3122 } 3123 3124 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 3125 { 3126 struct pqi_scsi_dev *device; 3127 3128 device = io_request->scmd->device->hostdata; 3129 device->raid_bypass_enabled = false; 3130 device->aio_enabled = false; 3131 } 3132 3133 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 3134 { 3135 struct pqi_ctrl_info *ctrl_info; 3136 struct pqi_scsi_dev *device; 3137 3138 device = sdev->hostdata; 3139 if (device->device_offline) 3140 return; 3141 3142 device->device_offline = true; 3143 ctrl_info = shost_to_hba(sdev->host); 3144 pqi_schedule_rescan_worker(ctrl_info); 3145 dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", 3146 path, ctrl_info->scsi_host->host_no, device->bus, 3147 device->target, device->lun); 3148 } 3149 3150 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 3151 { 3152 u8 scsi_status; 3153 u8 host_byte; 3154 struct scsi_cmnd *scmd; 3155 struct pqi_raid_error_info *error_info; 3156 size_t sense_data_length; 3157 int residual_count; 3158 int xfer_count; 3159 struct scsi_sense_hdr sshdr; 3160 3161 scmd = io_request->scmd; 3162 if (!scmd) 3163 return; 3164 3165 error_info = io_request->error_info; 3166 scsi_status = error_info->status; 3167 host_byte = DID_OK; 3168 3169 switch (error_info->data_out_result) { 3170 case PQI_DATA_IN_OUT_GOOD: 3171 break; 3172 case PQI_DATA_IN_OUT_UNDERFLOW: 3173 xfer_count = 3174 get_unaligned_le32(&error_info->data_out_transferred); 3175 residual_count = scsi_bufflen(scmd) - xfer_count; 3176 scsi_set_resid(scmd, residual_count); 3177 if (xfer_count < scmd->underflow) 3178 host_byte = DID_SOFT_ERROR; 3179 break; 3180 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 3181 case PQI_DATA_IN_OUT_ABORTED: 3182 host_byte = DID_ABORT; 3183 break; 3184 case PQI_DATA_IN_OUT_TIMEOUT: 3185 host_byte = DID_TIME_OUT; 3186 break; 3187 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 3188 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 3189 case PQI_DATA_IN_OUT_BUFFER_ERROR: 3190 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 3191 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 3192 case PQI_DATA_IN_OUT_ERROR: 3193 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 3194 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 3195 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 3196 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 3197 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 3198 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 3199 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 3200 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 3201 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 3202 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 3203 default: 3204 host_byte = DID_ERROR; 3205 break; 3206 } 3207 3208 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 3209 if (sense_data_length == 0) 3210 sense_data_length = 3211 get_unaligned_le16(&error_info->response_data_length); 3212 if (sense_data_length) { 3213 if (sense_data_length > sizeof(error_info->data)) 3214 sense_data_length = sizeof(error_info->data); 3215 3216 if (scsi_status == SAM_STAT_CHECK_CONDITION && 3217 scsi_normalize_sense(error_info->data, 3218 sense_data_length, &sshdr) && 3219 sshdr.sense_key == HARDWARE_ERROR && 3220 sshdr.asc == 0x3e) { 3221 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host); 3222 struct pqi_scsi_dev *device = scmd->device->hostdata; 3223 3224 switch (sshdr.ascq) { 3225 case 0x1: /* LOGICAL UNIT FAILURE */ 3226 if (printk_ratelimit()) 3227 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n", 3228 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3229 pqi_take_device_offline(scmd->device, "RAID"); 3230 host_byte = DID_NO_CONNECT; 3231 break; 3232 3233 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */ 3234 if (printk_ratelimit()) 3235 scmd_printk(KERN_ERR, scmd, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n", 3236 sshdr.ascq, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); 3237 break; 3238 } 3239 } 3240 3241 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3242 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3243 memcpy(scmd->sense_buffer, error_info->data, 3244 sense_data_length); 3245 } 3246 3247 scmd->result = scsi_status; 3248 set_host_byte(scmd, host_byte); 3249 } 3250 3251 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 3252 { 3253 u8 scsi_status; 3254 u8 host_byte; 3255 struct scsi_cmnd *scmd; 3256 struct pqi_aio_error_info *error_info; 3257 size_t sense_data_length; 3258 int residual_count; 3259 int xfer_count; 3260 bool device_offline; 3261 struct pqi_scsi_dev *device; 3262 3263 scmd = io_request->scmd; 3264 error_info = io_request->error_info; 3265 host_byte = DID_OK; 3266 sense_data_length = 0; 3267 device_offline = false; 3268 device = scmd->device->hostdata; 3269 3270 switch (error_info->service_response) { 3271 case PQI_AIO_SERV_RESPONSE_COMPLETE: 3272 scsi_status = error_info->status; 3273 break; 3274 case PQI_AIO_SERV_RESPONSE_FAILURE: 3275 switch (error_info->status) { 3276 case PQI_AIO_STATUS_IO_ABORTED: 3277 scsi_status = SAM_STAT_TASK_ABORTED; 3278 break; 3279 case PQI_AIO_STATUS_UNDERRUN: 3280 scsi_status = SAM_STAT_GOOD; 3281 residual_count = get_unaligned_le32( 3282 &error_info->residual_count); 3283 scsi_set_resid(scmd, residual_count); 3284 xfer_count = scsi_bufflen(scmd) - residual_count; 3285 if (xfer_count < scmd->underflow) 3286 host_byte = DID_SOFT_ERROR; 3287 break; 3288 case PQI_AIO_STATUS_OVERRUN: 3289 scsi_status = SAM_STAT_GOOD; 3290 break; 3291 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 3292 pqi_aio_path_disabled(io_request); 3293 if (pqi_is_multipath_device(device)) { 3294 pqi_device_remove_start(device); 3295 host_byte = DID_NO_CONNECT; 3296 scsi_status = SAM_STAT_CHECK_CONDITION; 3297 } else { 3298 scsi_status = SAM_STAT_GOOD; 3299 io_request->status = -EAGAIN; 3300 } 3301 break; 3302 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 3303 case PQI_AIO_STATUS_INVALID_DEVICE: 3304 if (!io_request->raid_bypass) { 3305 device_offline = true; 3306 pqi_take_device_offline(scmd->device, "AIO"); 3307 host_byte = DID_NO_CONNECT; 3308 } 3309 scsi_status = SAM_STAT_CHECK_CONDITION; 3310 break; 3311 case PQI_AIO_STATUS_IO_ERROR: 3312 default: 3313 scsi_status = SAM_STAT_CHECK_CONDITION; 3314 break; 3315 } 3316 break; 3317 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 3318 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 3319 scsi_status = SAM_STAT_GOOD; 3320 break; 3321 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 3322 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 3323 default: 3324 scsi_status = SAM_STAT_CHECK_CONDITION; 3325 break; 3326 } 3327 3328 if (error_info->data_present) { 3329 sense_data_length = 3330 get_unaligned_le16(&error_info->data_length); 3331 if (sense_data_length) { 3332 if (sense_data_length > sizeof(error_info->data)) 3333 sense_data_length = sizeof(error_info->data); 3334 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 3335 sense_data_length = SCSI_SENSE_BUFFERSIZE; 3336 memcpy(scmd->sense_buffer, error_info->data, 3337 sense_data_length); 3338 } 3339 } 3340 3341 if (device_offline && sense_data_length == 0) 3342 scsi_build_sense(scmd, 0, HARDWARE_ERROR, 0x3e, 0x1); 3343 3344 scmd->result = scsi_status; 3345 set_host_byte(scmd, host_byte); 3346 } 3347 3348 static void pqi_process_io_error(unsigned int iu_type, 3349 struct pqi_io_request *io_request) 3350 { 3351 switch (iu_type) { 3352 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3353 pqi_process_raid_io_error(io_request); 3354 break; 3355 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3356 pqi_process_aio_io_error(io_request); 3357 break; 3358 } 3359 } 3360 3361 static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info, 3362 struct pqi_task_management_response *response) 3363 { 3364 int rc; 3365 3366 switch (response->response_code) { 3367 case SOP_TMF_COMPLETE: 3368 case SOP_TMF_FUNCTION_SUCCEEDED: 3369 rc = 0; 3370 break; 3371 case SOP_TMF_REJECTED: 3372 rc = -EAGAIN; 3373 break; 3374 case SOP_TMF_INCORRECT_LOGICAL_UNIT: 3375 rc = -ENODEV; 3376 break; 3377 default: 3378 rc = -EIO; 3379 break; 3380 } 3381 3382 if (rc) 3383 dev_err(&ctrl_info->pci_dev->dev, 3384 "Task Management Function error: %d (response code: %u)\n", rc, response->response_code); 3385 3386 return rc; 3387 } 3388 3389 static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info, 3390 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 3391 { 3392 pqi_take_ctrl_offline(ctrl_info, ctrl_shutdown_reason); 3393 } 3394 3395 static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group) 3396 { 3397 int num_responses; 3398 pqi_index_t oq_pi; 3399 pqi_index_t oq_ci; 3400 struct pqi_io_request *io_request; 3401 struct pqi_io_response *response; 3402 u16 request_id; 3403 3404 num_responses = 0; 3405 oq_ci = queue_group->oq_ci_copy; 3406 3407 while (1) { 3408 oq_pi = readl(queue_group->oq_pi); 3409 if (oq_pi >= ctrl_info->num_elements_per_oq) { 3410 pqi_invalid_response(ctrl_info, PQI_IO_PI_OUT_OF_RANGE); 3411 dev_err(&ctrl_info->pci_dev->dev, 3412 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3413 oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci); 3414 return -1; 3415 } 3416 if (oq_pi == oq_ci) 3417 break; 3418 3419 num_responses++; 3420 response = queue_group->oq_element_array + 3421 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3422 3423 request_id = get_unaligned_le16(&response->request_id); 3424 if (request_id >= ctrl_info->max_io_slots) { 3425 pqi_invalid_response(ctrl_info, PQI_INVALID_REQ_ID); 3426 dev_err(&ctrl_info->pci_dev->dev, 3427 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n", 3428 request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci); 3429 return -1; 3430 } 3431 3432 io_request = &ctrl_info->io_request_pool[request_id]; 3433 if (atomic_read(&io_request->refcount) == 0) { 3434 pqi_invalid_response(ctrl_info, PQI_UNMATCHED_REQ_ID); 3435 dev_err(&ctrl_info->pci_dev->dev, 3436 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n", 3437 request_id, oq_pi, oq_ci); 3438 return -1; 3439 } 3440 3441 switch (response->header.iu_type) { 3442 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 3443 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 3444 if (io_request->scmd) 3445 io_request->scmd->result = 0; 3446 fallthrough; 3447 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 3448 break; 3449 case PQI_RESPONSE_IU_VENDOR_GENERAL: 3450 io_request->status = 3451 get_unaligned_le16( 3452 &((struct pqi_vendor_general_response *)response)->status); 3453 break; 3454 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 3455 io_request->status = pqi_interpret_task_management_response(ctrl_info, 3456 (void *)response); 3457 break; 3458 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 3459 pqi_aio_path_disabled(io_request); 3460 io_request->status = -EAGAIN; 3461 break; 3462 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 3463 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 3464 io_request->error_info = ctrl_info->error_buffer + 3465 (get_unaligned_le16(&response->error_index) * 3466 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 3467 pqi_process_io_error(response->header.iu_type, io_request); 3468 break; 3469 default: 3470 pqi_invalid_response(ctrl_info, PQI_UNEXPECTED_IU_TYPE); 3471 dev_err(&ctrl_info->pci_dev->dev, 3472 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n", 3473 response->header.iu_type, oq_pi, oq_ci); 3474 return -1; 3475 } 3476 3477 io_request->io_complete_callback(io_request, io_request->context); 3478 3479 /* 3480 * Note that the I/O request structure CANNOT BE TOUCHED after 3481 * returning from the I/O completion callback! 3482 */ 3483 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 3484 } 3485 3486 if (num_responses) { 3487 queue_group->oq_ci_copy = oq_ci; 3488 writel(oq_ci, queue_group->oq_ci); 3489 } 3490 3491 return num_responses; 3492 } 3493 3494 static inline unsigned int pqi_num_elements_free(unsigned int pi, 3495 unsigned int ci, unsigned int elements_in_queue) 3496 { 3497 unsigned int num_elements_used; 3498 3499 if (pi >= ci) 3500 num_elements_used = pi - ci; 3501 else 3502 num_elements_used = elements_in_queue - ci + pi; 3503 3504 return elements_in_queue - num_elements_used - 1; 3505 } 3506 3507 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 3508 struct pqi_event_acknowledge_request *iu, size_t iu_length) 3509 { 3510 pqi_index_t iq_pi; 3511 pqi_index_t iq_ci; 3512 unsigned long flags; 3513 void *next_element; 3514 struct pqi_queue_group *queue_group; 3515 3516 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 3517 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 3518 3519 while (1) { 3520 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 3521 3522 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 3523 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 3524 3525 if (pqi_num_elements_free(iq_pi, iq_ci, 3526 ctrl_info->num_elements_per_iq)) 3527 break; 3528 3529 spin_unlock_irqrestore( 3530 &queue_group->submit_lock[RAID_PATH], flags); 3531 3532 if (pqi_ctrl_offline(ctrl_info)) 3533 return; 3534 } 3535 3536 next_element = queue_group->iq_element_array[RAID_PATH] + 3537 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3538 3539 memcpy(next_element, iu, iu_length); 3540 3541 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 3542 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 3543 3544 /* 3545 * This write notifies the controller that an IU is available to be 3546 * processed. 3547 */ 3548 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 3549 3550 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 3551 } 3552 3553 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 3554 struct pqi_event *event) 3555 { 3556 struct pqi_event_acknowledge_request request; 3557 3558 memset(&request, 0, sizeof(request)); 3559 3560 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 3561 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 3562 &request.header.iu_length); 3563 request.event_type = event->event_type; 3564 put_unaligned_le16(event->event_id, &request.event_id); 3565 put_unaligned_le32(event->additional_event_id, &request.additional_event_id); 3566 3567 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 3568 } 3569 3570 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 3571 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 3572 3573 static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( 3574 struct pqi_ctrl_info *ctrl_info) 3575 { 3576 u8 status; 3577 unsigned long timeout; 3578 3579 timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * HZ) + jiffies; 3580 3581 while (1) { 3582 status = pqi_read_soft_reset_status(ctrl_info); 3583 if (status & PQI_SOFT_RESET_INITIATE) 3584 return RESET_INITIATE_DRIVER; 3585 3586 if (status & PQI_SOFT_RESET_ABORT) 3587 return RESET_ABORT; 3588 3589 if (!sis_is_firmware_running(ctrl_info)) 3590 return RESET_NORESPONSE; 3591 3592 if (time_after(jiffies, timeout)) { 3593 dev_warn(&ctrl_info->pci_dev->dev, 3594 "timed out waiting for soft reset status\n"); 3595 return RESET_TIMEDOUT; 3596 } 3597 3598 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); 3599 } 3600 } 3601 3602 static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info) 3603 { 3604 int rc; 3605 unsigned int delay_secs; 3606 enum pqi_soft_reset_status reset_status; 3607 3608 if (ctrl_info->soft_reset_handshake_supported) 3609 reset_status = pqi_poll_for_soft_reset_status(ctrl_info); 3610 else 3611 reset_status = RESET_INITIATE_FIRMWARE; 3612 3613 delay_secs = PQI_POST_RESET_DELAY_SECS; 3614 3615 switch (reset_status) { 3616 case RESET_TIMEDOUT: 3617 delay_secs = PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS; 3618 fallthrough; 3619 case RESET_INITIATE_DRIVER: 3620 dev_info(&ctrl_info->pci_dev->dev, 3621 "Online Firmware Activation: resetting controller\n"); 3622 sis_soft_reset(ctrl_info); 3623 fallthrough; 3624 case RESET_INITIATE_FIRMWARE: 3625 ctrl_info->pqi_mode_enabled = false; 3626 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 3627 rc = pqi_ofa_ctrl_restart(ctrl_info, delay_secs); 3628 pqi_ofa_free_host_buffer(ctrl_info); 3629 pqi_ctrl_ofa_done(ctrl_info); 3630 dev_info(&ctrl_info->pci_dev->dev, 3631 "Online Firmware Activation: %s\n", 3632 rc == 0 ? "SUCCESS" : "FAILED"); 3633 break; 3634 case RESET_ABORT: 3635 dev_info(&ctrl_info->pci_dev->dev, 3636 "Online Firmware Activation ABORTED\n"); 3637 if (ctrl_info->soft_reset_handshake_supported) 3638 pqi_clear_soft_reset_status(ctrl_info); 3639 pqi_ofa_free_host_buffer(ctrl_info); 3640 pqi_ctrl_ofa_done(ctrl_info); 3641 pqi_ofa_ctrl_unquiesce(ctrl_info); 3642 break; 3643 case RESET_NORESPONSE: 3644 fallthrough; 3645 default: 3646 dev_err(&ctrl_info->pci_dev->dev, 3647 "unexpected Online Firmware Activation reset status: 0x%x\n", 3648 reset_status); 3649 pqi_ofa_free_host_buffer(ctrl_info); 3650 pqi_ctrl_ofa_done(ctrl_info); 3651 pqi_ofa_ctrl_unquiesce(ctrl_info); 3652 pqi_take_ctrl_offline(ctrl_info, PQI_OFA_RESPONSE_TIMEOUT); 3653 break; 3654 } 3655 } 3656 3657 static void pqi_ofa_memory_alloc_worker(struct work_struct *work) 3658 { 3659 struct pqi_ctrl_info *ctrl_info; 3660 3661 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_memory_alloc_work); 3662 3663 pqi_ctrl_ofa_start(ctrl_info); 3664 pqi_ofa_setup_host_buffer(ctrl_info); 3665 pqi_ofa_host_memory_update(ctrl_info); 3666 } 3667 3668 static void pqi_ofa_quiesce_worker(struct work_struct *work) 3669 { 3670 struct pqi_ctrl_info *ctrl_info; 3671 struct pqi_event *event; 3672 3673 ctrl_info = container_of(work, struct pqi_ctrl_info, ofa_quiesce_work); 3674 3675 event = &ctrl_info->events[pqi_event_type_to_event_index(PQI_EVENT_TYPE_OFA)]; 3676 3677 pqi_ofa_ctrl_quiesce(ctrl_info); 3678 pqi_acknowledge_event(ctrl_info, event); 3679 pqi_process_soft_reset(ctrl_info); 3680 } 3681 3682 static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, 3683 struct pqi_event *event) 3684 { 3685 bool ack_event; 3686 3687 ack_event = true; 3688 3689 switch (event->event_id) { 3690 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3691 dev_info(&ctrl_info->pci_dev->dev, 3692 "received Online Firmware Activation memory allocation request\n"); 3693 schedule_work(&ctrl_info->ofa_memory_alloc_work); 3694 break; 3695 case PQI_EVENT_OFA_QUIESCE: 3696 dev_info(&ctrl_info->pci_dev->dev, 3697 "received Online Firmware Activation quiesce request\n"); 3698 schedule_work(&ctrl_info->ofa_quiesce_work); 3699 ack_event = false; 3700 break; 3701 case PQI_EVENT_OFA_CANCELED: 3702 dev_info(&ctrl_info->pci_dev->dev, 3703 "received Online Firmware Activation cancel request: reason: %u\n", 3704 ctrl_info->ofa_cancel_reason); 3705 pqi_ofa_free_host_buffer(ctrl_info); 3706 pqi_ctrl_ofa_done(ctrl_info); 3707 break; 3708 default: 3709 dev_err(&ctrl_info->pci_dev->dev, 3710 "received unknown Online Firmware Activation request: event ID: %u\n", 3711 event->event_id); 3712 break; 3713 } 3714 3715 return ack_event; 3716 } 3717 3718 static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info) 3719 { 3720 unsigned long flags; 3721 struct pqi_scsi_dev *device; 3722 3723 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3724 3725 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) { 3726 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 3727 device->rescan = true; 3728 } 3729 3730 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3731 } 3732 3733 static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info) 3734 { 3735 unsigned long flags; 3736 struct pqi_scsi_dev *device; 3737 3738 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 3739 3740 list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) 3741 if (device->raid_bypass_enabled) 3742 device->raid_bypass_enabled = false; 3743 3744 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 3745 } 3746 3747 static void pqi_event_worker(struct work_struct *work) 3748 { 3749 unsigned int i; 3750 bool rescan_needed; 3751 struct pqi_ctrl_info *ctrl_info; 3752 struct pqi_event *event; 3753 bool ack_event; 3754 3755 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 3756 3757 pqi_ctrl_busy(ctrl_info); 3758 pqi_wait_if_ctrl_blocked(ctrl_info); 3759 if (pqi_ctrl_offline(ctrl_info)) 3760 goto out; 3761 3762 rescan_needed = false; 3763 event = ctrl_info->events; 3764 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 3765 if (event->pending) { 3766 event->pending = false; 3767 if (event->event_type == PQI_EVENT_TYPE_OFA) { 3768 ack_event = pqi_ofa_process_event(ctrl_info, event); 3769 } else { 3770 ack_event = true; 3771 rescan_needed = true; 3772 if (event->event_type == PQI_EVENT_TYPE_LOGICAL_DEVICE) 3773 pqi_mark_volumes_for_rescan(ctrl_info); 3774 else if (event->event_type == PQI_EVENT_TYPE_AIO_STATE_CHANGE) 3775 pqi_disable_raid_bypass(ctrl_info); 3776 } 3777 if (ack_event) 3778 pqi_acknowledge_event(ctrl_info, event); 3779 } 3780 event++; 3781 } 3782 3783 #define PQI_RESCAN_WORK_FOR_EVENT_DELAY (5 * HZ) 3784 3785 if (rescan_needed) 3786 pqi_schedule_rescan_worker_with_delay(ctrl_info, 3787 PQI_RESCAN_WORK_FOR_EVENT_DELAY); 3788 3789 out: 3790 pqi_ctrl_unbusy(ctrl_info); 3791 } 3792 3793 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 3794 3795 static void pqi_heartbeat_timer_handler(struct timer_list *t) 3796 { 3797 int num_interrupts; 3798 u32 heartbeat_count; 3799 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, heartbeat_timer); 3800 3801 pqi_check_ctrl_health(ctrl_info); 3802 if (pqi_ctrl_offline(ctrl_info)) 3803 return; 3804 3805 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 3806 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 3807 3808 if (num_interrupts == ctrl_info->previous_num_interrupts) { 3809 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 3810 dev_err(&ctrl_info->pci_dev->dev, 3811 "no heartbeat detected - last heartbeat count: %u\n", 3812 heartbeat_count); 3813 pqi_take_ctrl_offline(ctrl_info, PQI_NO_HEARTBEAT); 3814 return; 3815 } 3816 } else { 3817 ctrl_info->previous_num_interrupts = num_interrupts; 3818 } 3819 3820 ctrl_info->previous_heartbeat_count = heartbeat_count; 3821 mod_timer(&ctrl_info->heartbeat_timer, 3822 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 3823 } 3824 3825 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3826 { 3827 if (!ctrl_info->heartbeat_counter) 3828 return; 3829 3830 ctrl_info->previous_num_interrupts = 3831 atomic_read(&ctrl_info->num_interrupts); 3832 ctrl_info->previous_heartbeat_count = 3833 pqi_read_heartbeat_counter(ctrl_info); 3834 3835 ctrl_info->heartbeat_timer.expires = 3836 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 3837 add_timer(&ctrl_info->heartbeat_timer); 3838 } 3839 3840 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 3841 { 3842 del_timer_sync(&ctrl_info->heartbeat_timer); 3843 } 3844 3845 static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info, 3846 struct pqi_event *event, struct pqi_event_response *response) 3847 { 3848 switch (event->event_id) { 3849 case PQI_EVENT_OFA_MEMORY_ALLOCATION: 3850 ctrl_info->ofa_bytes_requested = 3851 get_unaligned_le32(&response->data.ofa_memory_allocation.bytes_requested); 3852 break; 3853 case PQI_EVENT_OFA_CANCELED: 3854 ctrl_info->ofa_cancel_reason = 3855 get_unaligned_le16(&response->data.ofa_cancelled.reason); 3856 break; 3857 } 3858 } 3859 3860 static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 3861 { 3862 int num_events; 3863 pqi_index_t oq_pi; 3864 pqi_index_t oq_ci; 3865 struct pqi_event_queue *event_queue; 3866 struct pqi_event_response *response; 3867 struct pqi_event *event; 3868 int event_index; 3869 3870 event_queue = &ctrl_info->event_queue; 3871 num_events = 0; 3872 oq_ci = event_queue->oq_ci_copy; 3873 3874 while (1) { 3875 oq_pi = readl(event_queue->oq_pi); 3876 if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) { 3877 pqi_invalid_response(ctrl_info, PQI_EVENT_PI_OUT_OF_RANGE); 3878 dev_err(&ctrl_info->pci_dev->dev, 3879 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n", 3880 oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci); 3881 return -1; 3882 } 3883 3884 if (oq_pi == oq_ci) 3885 break; 3886 3887 num_events++; 3888 response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 3889 3890 event_index = pqi_event_type_to_event_index(response->event_type); 3891 3892 if (event_index >= 0 && response->request_acknowledge) { 3893 event = &ctrl_info->events[event_index]; 3894 event->pending = true; 3895 event->event_type = response->event_type; 3896 event->event_id = get_unaligned_le16(&response->event_id); 3897 event->additional_event_id = 3898 get_unaligned_le32(&response->additional_event_id); 3899 if (event->event_type == PQI_EVENT_TYPE_OFA) 3900 pqi_ofa_capture_event_payload(ctrl_info, event, response); 3901 } 3902 3903 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 3904 } 3905 3906 if (num_events) { 3907 event_queue->oq_ci_copy = oq_ci; 3908 writel(oq_ci, event_queue->oq_ci); 3909 schedule_work(&ctrl_info->event_work); 3910 } 3911 3912 return num_events; 3913 } 3914 3915 #define PQI_LEGACY_INTX_MASK 0x1 3916 3917 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx) 3918 { 3919 u32 intx_mask; 3920 struct pqi_device_registers __iomem *pqi_registers; 3921 volatile void __iomem *register_addr; 3922 3923 pqi_registers = ctrl_info->pqi_registers; 3924 3925 if (enable_intx) 3926 register_addr = &pqi_registers->legacy_intx_mask_clear; 3927 else 3928 register_addr = &pqi_registers->legacy_intx_mask_set; 3929 3930 intx_mask = readl(register_addr); 3931 intx_mask |= PQI_LEGACY_INTX_MASK; 3932 writel(intx_mask, register_addr); 3933 } 3934 3935 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3936 enum pqi_irq_mode new_mode) 3937 { 3938 switch (ctrl_info->irq_mode) { 3939 case IRQ_MODE_MSIX: 3940 switch (new_mode) { 3941 case IRQ_MODE_MSIX: 3942 break; 3943 case IRQ_MODE_INTX: 3944 pqi_configure_legacy_intx(ctrl_info, true); 3945 sis_enable_intx(ctrl_info); 3946 break; 3947 case IRQ_MODE_NONE: 3948 break; 3949 } 3950 break; 3951 case IRQ_MODE_INTX: 3952 switch (new_mode) { 3953 case IRQ_MODE_MSIX: 3954 pqi_configure_legacy_intx(ctrl_info, false); 3955 sis_enable_msix(ctrl_info); 3956 break; 3957 case IRQ_MODE_INTX: 3958 break; 3959 case IRQ_MODE_NONE: 3960 pqi_configure_legacy_intx(ctrl_info, false); 3961 break; 3962 } 3963 break; 3964 case IRQ_MODE_NONE: 3965 switch (new_mode) { 3966 case IRQ_MODE_MSIX: 3967 sis_enable_msix(ctrl_info); 3968 break; 3969 case IRQ_MODE_INTX: 3970 pqi_configure_legacy_intx(ctrl_info, true); 3971 sis_enable_intx(ctrl_info); 3972 break; 3973 case IRQ_MODE_NONE: 3974 break; 3975 } 3976 break; 3977 } 3978 3979 ctrl_info->irq_mode = new_mode; 3980 } 3981 3982 #define PQI_LEGACY_INTX_PENDING 0x1 3983 3984 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3985 { 3986 bool valid_irq; 3987 u32 intx_status; 3988 3989 switch (ctrl_info->irq_mode) { 3990 case IRQ_MODE_MSIX: 3991 valid_irq = true; 3992 break; 3993 case IRQ_MODE_INTX: 3994 intx_status = readl(&ctrl_info->pqi_registers->legacy_intx_status); 3995 if (intx_status & PQI_LEGACY_INTX_PENDING) 3996 valid_irq = true; 3997 else 3998 valid_irq = false; 3999 break; 4000 case IRQ_MODE_NONE: 4001 default: 4002 valid_irq = false; 4003 break; 4004 } 4005 4006 return valid_irq; 4007 } 4008 4009 static irqreturn_t pqi_irq_handler(int irq, void *data) 4010 { 4011 struct pqi_ctrl_info *ctrl_info; 4012 struct pqi_queue_group *queue_group; 4013 int num_io_responses_handled; 4014 int num_events_handled; 4015 4016 queue_group = data; 4017 ctrl_info = queue_group->ctrl_info; 4018 4019 if (!pqi_is_valid_irq(ctrl_info)) 4020 return IRQ_NONE; 4021 4022 num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 4023 if (num_io_responses_handled < 0) 4024 goto out; 4025 4026 if (irq == ctrl_info->event_irq) { 4027 num_events_handled = pqi_process_event_intr(ctrl_info); 4028 if (num_events_handled < 0) 4029 goto out; 4030 } else { 4031 num_events_handled = 0; 4032 } 4033 4034 if (num_io_responses_handled + num_events_handled > 0) 4035 atomic_inc(&ctrl_info->num_interrupts); 4036 4037 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 4038 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 4039 4040 out: 4041 return IRQ_HANDLED; 4042 } 4043 4044 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 4045 { 4046 struct pci_dev *pci_dev = ctrl_info->pci_dev; 4047 int i; 4048 int rc; 4049 4050 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 4051 4052 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 4053 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 4054 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 4055 if (rc) { 4056 dev_err(&pci_dev->dev, 4057 "irq %u init failed with error %d\n", 4058 pci_irq_vector(pci_dev, i), rc); 4059 return rc; 4060 } 4061 ctrl_info->num_msix_vectors_initialized++; 4062 } 4063 4064 return 0; 4065 } 4066 4067 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 4068 { 4069 int i; 4070 4071 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 4072 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 4073 &ctrl_info->queue_groups[i]); 4074 4075 ctrl_info->num_msix_vectors_initialized = 0; 4076 } 4077 4078 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4079 { 4080 int num_vectors_enabled; 4081 unsigned int flags = PCI_IRQ_MSIX; 4082 4083 if (!pqi_disable_managed_interrupts) 4084 flags |= PCI_IRQ_AFFINITY; 4085 4086 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 4087 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 4088 flags); 4089 if (num_vectors_enabled < 0) { 4090 dev_err(&ctrl_info->pci_dev->dev, 4091 "MSI-X init failed with error %d\n", 4092 num_vectors_enabled); 4093 return num_vectors_enabled; 4094 } 4095 4096 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 4097 ctrl_info->irq_mode = IRQ_MODE_MSIX; 4098 return 0; 4099 } 4100 4101 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 4102 { 4103 if (ctrl_info->num_msix_vectors_enabled) { 4104 pci_free_irq_vectors(ctrl_info->pci_dev); 4105 ctrl_info->num_msix_vectors_enabled = 0; 4106 } 4107 } 4108 4109 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 4110 { 4111 unsigned int i; 4112 size_t alloc_length; 4113 size_t element_array_length_per_iq; 4114 size_t element_array_length_per_oq; 4115 void *element_array; 4116 void __iomem *next_queue_index; 4117 void *aligned_pointer; 4118 unsigned int num_inbound_queues; 4119 unsigned int num_outbound_queues; 4120 unsigned int num_queue_indexes; 4121 struct pqi_queue_group *queue_group; 4122 4123 element_array_length_per_iq = 4124 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 4125 ctrl_info->num_elements_per_iq; 4126 element_array_length_per_oq = 4127 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 4128 ctrl_info->num_elements_per_oq; 4129 num_inbound_queues = ctrl_info->num_queue_groups * 2; 4130 num_outbound_queues = ctrl_info->num_queue_groups; 4131 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 4132 4133 aligned_pointer = NULL; 4134 4135 for (i = 0; i < num_inbound_queues; i++) { 4136 aligned_pointer = PTR_ALIGN(aligned_pointer, 4137 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4138 aligned_pointer += element_array_length_per_iq; 4139 } 4140 4141 for (i = 0; i < num_outbound_queues; i++) { 4142 aligned_pointer = PTR_ALIGN(aligned_pointer, 4143 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4144 aligned_pointer += element_array_length_per_oq; 4145 } 4146 4147 aligned_pointer = PTR_ALIGN(aligned_pointer, 4148 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4149 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4150 PQI_EVENT_OQ_ELEMENT_LENGTH; 4151 4152 for (i = 0; i < num_queue_indexes; i++) { 4153 aligned_pointer = PTR_ALIGN(aligned_pointer, 4154 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4155 aligned_pointer += sizeof(pqi_index_t); 4156 } 4157 4158 alloc_length = (size_t)aligned_pointer + 4159 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4160 4161 alloc_length += PQI_EXTRA_SGL_MEMORY; 4162 4163 ctrl_info->queue_memory_base = 4164 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4165 &ctrl_info->queue_memory_base_dma_handle, 4166 GFP_KERNEL); 4167 4168 if (!ctrl_info->queue_memory_base) 4169 return -ENOMEM; 4170 4171 ctrl_info->queue_memory_length = alloc_length; 4172 4173 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 4174 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4175 4176 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4177 queue_group = &ctrl_info->queue_groups[i]; 4178 queue_group->iq_element_array[RAID_PATH] = element_array; 4179 queue_group->iq_element_array_bus_addr[RAID_PATH] = 4180 ctrl_info->queue_memory_base_dma_handle + 4181 (element_array - ctrl_info->queue_memory_base); 4182 element_array += element_array_length_per_iq; 4183 element_array = PTR_ALIGN(element_array, 4184 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4185 queue_group->iq_element_array[AIO_PATH] = element_array; 4186 queue_group->iq_element_array_bus_addr[AIO_PATH] = 4187 ctrl_info->queue_memory_base_dma_handle + 4188 (element_array - ctrl_info->queue_memory_base); 4189 element_array += element_array_length_per_iq; 4190 element_array = PTR_ALIGN(element_array, 4191 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4192 } 4193 4194 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4195 queue_group = &ctrl_info->queue_groups[i]; 4196 queue_group->oq_element_array = element_array; 4197 queue_group->oq_element_array_bus_addr = 4198 ctrl_info->queue_memory_base_dma_handle + 4199 (element_array - ctrl_info->queue_memory_base); 4200 element_array += element_array_length_per_oq; 4201 element_array = PTR_ALIGN(element_array, 4202 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4203 } 4204 4205 ctrl_info->event_queue.oq_element_array = element_array; 4206 ctrl_info->event_queue.oq_element_array_bus_addr = 4207 ctrl_info->queue_memory_base_dma_handle + 4208 (element_array - ctrl_info->queue_memory_base); 4209 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 4210 PQI_EVENT_OQ_ELEMENT_LENGTH; 4211 4212 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 4213 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4214 4215 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4216 queue_group = &ctrl_info->queue_groups[i]; 4217 queue_group->iq_ci[RAID_PATH] = next_queue_index; 4218 queue_group->iq_ci_bus_addr[RAID_PATH] = 4219 ctrl_info->queue_memory_base_dma_handle + 4220 (next_queue_index - 4221 (void __iomem *)ctrl_info->queue_memory_base); 4222 next_queue_index += sizeof(pqi_index_t); 4223 next_queue_index = PTR_ALIGN(next_queue_index, 4224 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4225 queue_group->iq_ci[AIO_PATH] = next_queue_index; 4226 queue_group->iq_ci_bus_addr[AIO_PATH] = 4227 ctrl_info->queue_memory_base_dma_handle + 4228 (next_queue_index - 4229 (void __iomem *)ctrl_info->queue_memory_base); 4230 next_queue_index += sizeof(pqi_index_t); 4231 next_queue_index = PTR_ALIGN(next_queue_index, 4232 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4233 queue_group->oq_pi = next_queue_index; 4234 queue_group->oq_pi_bus_addr = 4235 ctrl_info->queue_memory_base_dma_handle + 4236 (next_queue_index - 4237 (void __iomem *)ctrl_info->queue_memory_base); 4238 next_queue_index += sizeof(pqi_index_t); 4239 next_queue_index = PTR_ALIGN(next_queue_index, 4240 PQI_OPERATIONAL_INDEX_ALIGNMENT); 4241 } 4242 4243 ctrl_info->event_queue.oq_pi = next_queue_index; 4244 ctrl_info->event_queue.oq_pi_bus_addr = 4245 ctrl_info->queue_memory_base_dma_handle + 4246 (next_queue_index - 4247 (void __iomem *)ctrl_info->queue_memory_base); 4248 4249 return 0; 4250 } 4251 4252 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 4253 { 4254 unsigned int i; 4255 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4256 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 4257 4258 /* 4259 * Initialize the backpointers to the controller structure in 4260 * each operational queue group structure. 4261 */ 4262 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4263 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 4264 4265 /* 4266 * Assign IDs to all operational queues. Note that the IDs 4267 * assigned to operational IQs are independent of the IDs 4268 * assigned to operational OQs. 4269 */ 4270 ctrl_info->event_queue.oq_id = next_oq_id++; 4271 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4272 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 4273 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 4274 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 4275 } 4276 4277 /* 4278 * Assign MSI-X table entry indexes to all queues. Note that the 4279 * interrupt for the event queue is shared with the first queue group. 4280 */ 4281 ctrl_info->event_queue.int_msg_num = 0; 4282 for (i = 0; i < ctrl_info->num_queue_groups; i++) 4283 ctrl_info->queue_groups[i].int_msg_num = i; 4284 4285 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4286 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 4287 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 4288 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 4289 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 4290 } 4291 } 4292 4293 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 4294 { 4295 size_t alloc_length; 4296 struct pqi_admin_queues_aligned *admin_queues_aligned; 4297 struct pqi_admin_queues *admin_queues; 4298 4299 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 4300 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 4301 4302 ctrl_info->admin_queue_memory_base = 4303 dma_alloc_coherent(&ctrl_info->pci_dev->dev, alloc_length, 4304 &ctrl_info->admin_queue_memory_base_dma_handle, 4305 GFP_KERNEL); 4306 4307 if (!ctrl_info->admin_queue_memory_base) 4308 return -ENOMEM; 4309 4310 ctrl_info->admin_queue_memory_length = alloc_length; 4311 4312 admin_queues = &ctrl_info->admin_queues; 4313 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 4314 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 4315 admin_queues->iq_element_array = 4316 &admin_queues_aligned->iq_element_array; 4317 admin_queues->oq_element_array = 4318 &admin_queues_aligned->oq_element_array; 4319 admin_queues->iq_ci = 4320 (pqi_index_t __iomem *)&admin_queues_aligned->iq_ci; 4321 admin_queues->oq_pi = 4322 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 4323 4324 admin_queues->iq_element_array_bus_addr = 4325 ctrl_info->admin_queue_memory_base_dma_handle + 4326 (admin_queues->iq_element_array - 4327 ctrl_info->admin_queue_memory_base); 4328 admin_queues->oq_element_array_bus_addr = 4329 ctrl_info->admin_queue_memory_base_dma_handle + 4330 (admin_queues->oq_element_array - 4331 ctrl_info->admin_queue_memory_base); 4332 admin_queues->iq_ci_bus_addr = 4333 ctrl_info->admin_queue_memory_base_dma_handle + 4334 ((void __iomem *)admin_queues->iq_ci - 4335 (void __iomem *)ctrl_info->admin_queue_memory_base); 4336 admin_queues->oq_pi_bus_addr = 4337 ctrl_info->admin_queue_memory_base_dma_handle + 4338 ((void __iomem *)admin_queues->oq_pi - 4339 (void __iomem *)ctrl_info->admin_queue_memory_base); 4340 4341 return 0; 4342 } 4343 4344 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 4345 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 4346 4347 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 4348 { 4349 struct pqi_device_registers __iomem *pqi_registers; 4350 struct pqi_admin_queues *admin_queues; 4351 unsigned long timeout; 4352 u8 status; 4353 u32 reg; 4354 4355 pqi_registers = ctrl_info->pqi_registers; 4356 admin_queues = &ctrl_info->admin_queues; 4357 4358 writeq((u64)admin_queues->iq_element_array_bus_addr, 4359 &pqi_registers->admin_iq_element_array_addr); 4360 writeq((u64)admin_queues->oq_element_array_bus_addr, 4361 &pqi_registers->admin_oq_element_array_addr); 4362 writeq((u64)admin_queues->iq_ci_bus_addr, 4363 &pqi_registers->admin_iq_ci_addr); 4364 writeq((u64)admin_queues->oq_pi_bus_addr, 4365 &pqi_registers->admin_oq_pi_addr); 4366 4367 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 4368 (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | 4369 (admin_queues->int_msg_num << 16); 4370 writel(reg, &pqi_registers->admin_iq_num_elements); 4371 4372 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 4373 &pqi_registers->function_and_status_code); 4374 4375 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 4376 while (1) { 4377 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 4378 status = readb(&pqi_registers->function_and_status_code); 4379 if (status == PQI_STATUS_IDLE) 4380 break; 4381 if (time_after(jiffies, timeout)) 4382 return -ETIMEDOUT; 4383 } 4384 4385 /* 4386 * The offset registers are not initialized to the correct 4387 * offsets until *after* the create admin queue pair command 4388 * completes successfully. 4389 */ 4390 admin_queues->iq_pi = ctrl_info->iomem_base + 4391 PQI_DEVICE_REGISTERS_OFFSET + 4392 readq(&pqi_registers->admin_iq_pi_offset); 4393 admin_queues->oq_ci = ctrl_info->iomem_base + 4394 PQI_DEVICE_REGISTERS_OFFSET + 4395 readq(&pqi_registers->admin_oq_ci_offset); 4396 4397 return 0; 4398 } 4399 4400 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 4401 struct pqi_general_admin_request *request) 4402 { 4403 struct pqi_admin_queues *admin_queues; 4404 void *next_element; 4405 pqi_index_t iq_pi; 4406 4407 admin_queues = &ctrl_info->admin_queues; 4408 iq_pi = admin_queues->iq_pi_copy; 4409 4410 next_element = admin_queues->iq_element_array + 4411 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 4412 4413 memcpy(next_element, request, sizeof(*request)); 4414 4415 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 4416 admin_queues->iq_pi_copy = iq_pi; 4417 4418 /* 4419 * This write notifies the controller that an IU is available to be 4420 * processed. 4421 */ 4422 writel(iq_pi, admin_queues->iq_pi); 4423 } 4424 4425 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 4426 4427 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 4428 struct pqi_general_admin_response *response) 4429 { 4430 struct pqi_admin_queues *admin_queues; 4431 pqi_index_t oq_pi; 4432 pqi_index_t oq_ci; 4433 unsigned long timeout; 4434 4435 admin_queues = &ctrl_info->admin_queues; 4436 oq_ci = admin_queues->oq_ci_copy; 4437 4438 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 4439 4440 while (1) { 4441 oq_pi = readl(admin_queues->oq_pi); 4442 if (oq_pi != oq_ci) 4443 break; 4444 if (time_after(jiffies, timeout)) { 4445 dev_err(&ctrl_info->pci_dev->dev, 4446 "timed out waiting for admin response\n"); 4447 return -ETIMEDOUT; 4448 } 4449 if (!sis_is_firmware_running(ctrl_info)) 4450 return -ENXIO; 4451 usleep_range(1000, 2000); 4452 } 4453 4454 memcpy(response, admin_queues->oq_element_array + 4455 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 4456 4457 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 4458 admin_queues->oq_ci_copy = oq_ci; 4459 writel(oq_ci, admin_queues->oq_ci); 4460 4461 return 0; 4462 } 4463 4464 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 4465 struct pqi_queue_group *queue_group, enum pqi_io_path path, 4466 struct pqi_io_request *io_request) 4467 { 4468 struct pqi_io_request *next; 4469 void *next_element; 4470 pqi_index_t iq_pi; 4471 pqi_index_t iq_ci; 4472 size_t iu_length; 4473 unsigned long flags; 4474 unsigned int num_elements_needed; 4475 unsigned int num_elements_to_end_of_queue; 4476 size_t copy_count; 4477 struct pqi_iu_header *request; 4478 4479 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 4480 4481 if (io_request) { 4482 io_request->queue_group = queue_group; 4483 list_add_tail(&io_request->request_list_entry, 4484 &queue_group->request_list[path]); 4485 } 4486 4487 iq_pi = queue_group->iq_pi_copy[path]; 4488 4489 list_for_each_entry_safe(io_request, next, 4490 &queue_group->request_list[path], request_list_entry) { 4491 4492 request = io_request->iu; 4493 4494 iu_length = get_unaligned_le16(&request->iu_length) + 4495 PQI_REQUEST_HEADER_LENGTH; 4496 num_elements_needed = 4497 DIV_ROUND_UP(iu_length, 4498 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4499 4500 iq_ci = readl(queue_group->iq_ci[path]); 4501 4502 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 4503 ctrl_info->num_elements_per_iq)) 4504 break; 4505 4506 put_unaligned_le16(queue_group->oq_id, 4507 &request->response_queue_id); 4508 4509 next_element = queue_group->iq_element_array[path] + 4510 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4511 4512 num_elements_to_end_of_queue = 4513 ctrl_info->num_elements_per_iq - iq_pi; 4514 4515 if (num_elements_needed <= num_elements_to_end_of_queue) { 4516 memcpy(next_element, request, iu_length); 4517 } else { 4518 copy_count = num_elements_to_end_of_queue * 4519 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4520 memcpy(next_element, request, copy_count); 4521 memcpy(queue_group->iq_element_array[path], 4522 (u8 *)request + copy_count, 4523 iu_length - copy_count); 4524 } 4525 4526 iq_pi = (iq_pi + num_elements_needed) % 4527 ctrl_info->num_elements_per_iq; 4528 4529 list_del(&io_request->request_list_entry); 4530 } 4531 4532 if (iq_pi != queue_group->iq_pi_copy[path]) { 4533 queue_group->iq_pi_copy[path] = iq_pi; 4534 /* 4535 * This write notifies the controller that one or more IUs are 4536 * available to be processed. 4537 */ 4538 writel(iq_pi, queue_group->iq_pi[path]); 4539 } 4540 4541 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 4542 } 4543 4544 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 4545 4546 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 4547 struct completion *wait) 4548 { 4549 int rc; 4550 4551 while (1) { 4552 if (wait_for_completion_io_timeout(wait, 4553 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 4554 rc = 0; 4555 break; 4556 } 4557 4558 pqi_check_ctrl_health(ctrl_info); 4559 if (pqi_ctrl_offline(ctrl_info)) { 4560 rc = -ENXIO; 4561 break; 4562 } 4563 } 4564 4565 return rc; 4566 } 4567 4568 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 4569 void *context) 4570 { 4571 struct completion *waiting = context; 4572 4573 complete(waiting); 4574 } 4575 4576 static int pqi_process_raid_io_error_synchronous( 4577 struct pqi_raid_error_info *error_info) 4578 { 4579 int rc = -EIO; 4580 4581 switch (error_info->data_out_result) { 4582 case PQI_DATA_IN_OUT_GOOD: 4583 if (error_info->status == SAM_STAT_GOOD) 4584 rc = 0; 4585 break; 4586 case PQI_DATA_IN_OUT_UNDERFLOW: 4587 if (error_info->status == SAM_STAT_GOOD || 4588 error_info->status == SAM_STAT_CHECK_CONDITION) 4589 rc = 0; 4590 break; 4591 case PQI_DATA_IN_OUT_ABORTED: 4592 rc = PQI_CMD_STATUS_ABORTED; 4593 break; 4594 } 4595 4596 return rc; 4597 } 4598 4599 static inline bool pqi_is_blockable_request(struct pqi_iu_header *request) 4600 { 4601 return (request->driver_flags & PQI_DRIVER_NONBLOCKABLE_REQUEST) == 0; 4602 } 4603 4604 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 4605 struct pqi_iu_header *request, unsigned int flags, 4606 struct pqi_raid_error_info *error_info) 4607 { 4608 int rc = 0; 4609 struct pqi_io_request *io_request; 4610 size_t iu_length; 4611 DECLARE_COMPLETION_ONSTACK(wait); 4612 4613 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 4614 if (down_interruptible(&ctrl_info->sync_request_sem)) 4615 return -ERESTARTSYS; 4616 } else { 4617 down(&ctrl_info->sync_request_sem); 4618 } 4619 4620 pqi_ctrl_busy(ctrl_info); 4621 /* 4622 * Wait for other admin queue updates such as; 4623 * config table changes, OFA memory updates, ... 4624 */ 4625 if (pqi_is_blockable_request(request)) 4626 pqi_wait_if_ctrl_blocked(ctrl_info); 4627 4628 if (pqi_ctrl_offline(ctrl_info)) { 4629 rc = -ENXIO; 4630 goto out; 4631 } 4632 4633 io_request = pqi_alloc_io_request(ctrl_info, NULL); 4634 4635 put_unaligned_le16(io_request->index, 4636 &(((struct pqi_raid_path_request *)request)->request_id)); 4637 4638 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 4639 ((struct pqi_raid_path_request *)request)->error_index = 4640 ((struct pqi_raid_path_request *)request)->request_id; 4641 4642 iu_length = get_unaligned_le16(&request->iu_length) + 4643 PQI_REQUEST_HEADER_LENGTH; 4644 memcpy(io_request->iu, request, iu_length); 4645 4646 io_request->io_complete_callback = pqi_raid_synchronous_complete; 4647 io_request->context = &wait; 4648 4649 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 4650 io_request); 4651 4652 pqi_wait_for_completion_io(ctrl_info, &wait); 4653 4654 if (error_info) { 4655 if (io_request->error_info) 4656 memcpy(error_info, io_request->error_info, sizeof(*error_info)); 4657 else 4658 memset(error_info, 0, sizeof(*error_info)); 4659 } else if (rc == 0 && io_request->error_info) { 4660 rc = pqi_process_raid_io_error_synchronous(io_request->error_info); 4661 } 4662 4663 pqi_free_io_request(io_request); 4664 4665 out: 4666 pqi_ctrl_unbusy(ctrl_info); 4667 up(&ctrl_info->sync_request_sem); 4668 4669 return rc; 4670 } 4671 4672 static int pqi_validate_admin_response( 4673 struct pqi_general_admin_response *response, u8 expected_function_code) 4674 { 4675 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 4676 return -EINVAL; 4677 4678 if (get_unaligned_le16(&response->header.iu_length) != 4679 PQI_GENERAL_ADMIN_IU_LENGTH) 4680 return -EINVAL; 4681 4682 if (response->function_code != expected_function_code) 4683 return -EINVAL; 4684 4685 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 4686 return -EINVAL; 4687 4688 return 0; 4689 } 4690 4691 static int pqi_submit_admin_request_synchronous( 4692 struct pqi_ctrl_info *ctrl_info, 4693 struct pqi_general_admin_request *request, 4694 struct pqi_general_admin_response *response) 4695 { 4696 int rc; 4697 4698 pqi_submit_admin_request(ctrl_info, request); 4699 4700 rc = pqi_poll_for_admin_response(ctrl_info, response); 4701 4702 if (rc == 0) 4703 rc = pqi_validate_admin_response(response, request->function_code); 4704 4705 return rc; 4706 } 4707 4708 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 4709 { 4710 int rc; 4711 struct pqi_general_admin_request request; 4712 struct pqi_general_admin_response response; 4713 struct pqi_device_capability *capability; 4714 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 4715 4716 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 4717 if (!capability) 4718 return -ENOMEM; 4719 4720 memset(&request, 0, sizeof(request)); 4721 4722 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4723 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4724 &request.header.iu_length); 4725 request.function_code = 4726 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 4727 put_unaligned_le32(sizeof(*capability), 4728 &request.data.report_device_capability.buffer_length); 4729 4730 rc = pqi_map_single(ctrl_info->pci_dev, 4731 &request.data.report_device_capability.sg_descriptor, 4732 capability, sizeof(*capability), 4733 DMA_FROM_DEVICE); 4734 if (rc) 4735 goto out; 4736 4737 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, &response); 4738 4739 pqi_pci_unmap(ctrl_info->pci_dev, 4740 &request.data.report_device_capability.sg_descriptor, 1, 4741 DMA_FROM_DEVICE); 4742 4743 if (rc) 4744 goto out; 4745 4746 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 4747 rc = -EIO; 4748 goto out; 4749 } 4750 4751 ctrl_info->max_inbound_queues = 4752 get_unaligned_le16(&capability->max_inbound_queues); 4753 ctrl_info->max_elements_per_iq = 4754 get_unaligned_le16(&capability->max_elements_per_iq); 4755 ctrl_info->max_iq_element_length = 4756 get_unaligned_le16(&capability->max_iq_element_length) 4757 * 16; 4758 ctrl_info->max_outbound_queues = 4759 get_unaligned_le16(&capability->max_outbound_queues); 4760 ctrl_info->max_elements_per_oq = 4761 get_unaligned_le16(&capability->max_elements_per_oq); 4762 ctrl_info->max_oq_element_length = 4763 get_unaligned_le16(&capability->max_oq_element_length) 4764 * 16; 4765 4766 sop_iu_layer_descriptor = 4767 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 4768 4769 ctrl_info->max_inbound_iu_length_per_firmware = 4770 get_unaligned_le16( 4771 &sop_iu_layer_descriptor->max_inbound_iu_length); 4772 ctrl_info->inbound_spanning_supported = 4773 sop_iu_layer_descriptor->inbound_spanning_supported; 4774 ctrl_info->outbound_spanning_supported = 4775 sop_iu_layer_descriptor->outbound_spanning_supported; 4776 4777 out: 4778 kfree(capability); 4779 4780 return rc; 4781 } 4782 4783 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 4784 { 4785 if (ctrl_info->max_iq_element_length < 4786 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4787 dev_err(&ctrl_info->pci_dev->dev, 4788 "max. inbound queue element length of %d is less than the required length of %d\n", 4789 ctrl_info->max_iq_element_length, 4790 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4791 return -EINVAL; 4792 } 4793 4794 if (ctrl_info->max_oq_element_length < 4795 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 4796 dev_err(&ctrl_info->pci_dev->dev, 4797 "max. outbound queue element length of %d is less than the required length of %d\n", 4798 ctrl_info->max_oq_element_length, 4799 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 4800 return -EINVAL; 4801 } 4802 4803 if (ctrl_info->max_inbound_iu_length_per_firmware < 4804 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 4805 dev_err(&ctrl_info->pci_dev->dev, 4806 "max. inbound IU length of %u is less than the min. required length of %d\n", 4807 ctrl_info->max_inbound_iu_length_per_firmware, 4808 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4809 return -EINVAL; 4810 } 4811 4812 if (!ctrl_info->inbound_spanning_supported) { 4813 dev_err(&ctrl_info->pci_dev->dev, 4814 "the controller does not support inbound spanning\n"); 4815 return -EINVAL; 4816 } 4817 4818 if (ctrl_info->outbound_spanning_supported) { 4819 dev_err(&ctrl_info->pci_dev->dev, 4820 "the controller supports outbound spanning but this driver does not\n"); 4821 return -EINVAL; 4822 } 4823 4824 return 0; 4825 } 4826 4827 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 4828 { 4829 int rc; 4830 struct pqi_event_queue *event_queue; 4831 struct pqi_general_admin_request request; 4832 struct pqi_general_admin_response response; 4833 4834 event_queue = &ctrl_info->event_queue; 4835 4836 /* 4837 * Create OQ (Outbound Queue - device to host queue) to dedicate 4838 * to events. 4839 */ 4840 memset(&request, 0, sizeof(request)); 4841 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4842 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4843 &request.header.iu_length); 4844 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4845 put_unaligned_le16(event_queue->oq_id, 4846 &request.data.create_operational_oq.queue_id); 4847 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 4848 &request.data.create_operational_oq.element_array_addr); 4849 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 4850 &request.data.create_operational_oq.pi_addr); 4851 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 4852 &request.data.create_operational_oq.num_elements); 4853 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 4854 &request.data.create_operational_oq.element_length); 4855 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4856 put_unaligned_le16(event_queue->int_msg_num, 4857 &request.data.create_operational_oq.int_msg_num); 4858 4859 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4860 &response); 4861 if (rc) 4862 return rc; 4863 4864 event_queue->oq_ci = ctrl_info->iomem_base + 4865 PQI_DEVICE_REGISTERS_OFFSET + 4866 get_unaligned_le64( 4867 &response.data.create_operational_oq.oq_ci_offset); 4868 4869 return 0; 4870 } 4871 4872 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 4873 unsigned int group_number) 4874 { 4875 int rc; 4876 struct pqi_queue_group *queue_group; 4877 struct pqi_general_admin_request request; 4878 struct pqi_general_admin_response response; 4879 4880 queue_group = &ctrl_info->queue_groups[group_number]; 4881 4882 /* 4883 * Create IQ (Inbound Queue - host to device queue) for 4884 * RAID path. 4885 */ 4886 memset(&request, 0, sizeof(request)); 4887 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4888 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4889 &request.header.iu_length); 4890 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4891 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 4892 &request.data.create_operational_iq.queue_id); 4893 put_unaligned_le64( 4894 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 4895 &request.data.create_operational_iq.element_array_addr); 4896 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 4897 &request.data.create_operational_iq.ci_addr); 4898 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4899 &request.data.create_operational_iq.num_elements); 4900 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4901 &request.data.create_operational_iq.element_length); 4902 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4903 4904 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4905 &response); 4906 if (rc) { 4907 dev_err(&ctrl_info->pci_dev->dev, 4908 "error creating inbound RAID queue\n"); 4909 return rc; 4910 } 4911 4912 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 4913 PQI_DEVICE_REGISTERS_OFFSET + 4914 get_unaligned_le64( 4915 &response.data.create_operational_iq.iq_pi_offset); 4916 4917 /* 4918 * Create IQ (Inbound Queue - host to device queue) for 4919 * Advanced I/O (AIO) path. 4920 */ 4921 memset(&request, 0, sizeof(request)); 4922 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4923 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4924 &request.header.iu_length); 4925 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4926 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4927 &request.data.create_operational_iq.queue_id); 4928 put_unaligned_le64((u64)queue_group-> 4929 iq_element_array_bus_addr[AIO_PATH], 4930 &request.data.create_operational_iq.element_array_addr); 4931 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4932 &request.data.create_operational_iq.ci_addr); 4933 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4934 &request.data.create_operational_iq.num_elements); 4935 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4936 &request.data.create_operational_iq.element_length); 4937 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4938 4939 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4940 &response); 4941 if (rc) { 4942 dev_err(&ctrl_info->pci_dev->dev, 4943 "error creating inbound AIO queue\n"); 4944 return rc; 4945 } 4946 4947 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4948 PQI_DEVICE_REGISTERS_OFFSET + 4949 get_unaligned_le64( 4950 &response.data.create_operational_iq.iq_pi_offset); 4951 4952 /* 4953 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4954 * assumed to be for RAID path I/O unless we change the queue's 4955 * property. 4956 */ 4957 memset(&request, 0, sizeof(request)); 4958 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4959 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4960 &request.header.iu_length); 4961 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4962 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4963 &request.data.change_operational_iq_properties.queue_id); 4964 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4965 &request.data.change_operational_iq_properties.vendor_specific); 4966 4967 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4968 &response); 4969 if (rc) { 4970 dev_err(&ctrl_info->pci_dev->dev, 4971 "error changing queue property\n"); 4972 return rc; 4973 } 4974 4975 /* 4976 * Create OQ (Outbound Queue - device to host queue). 4977 */ 4978 memset(&request, 0, sizeof(request)); 4979 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4980 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4981 &request.header.iu_length); 4982 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4983 put_unaligned_le16(queue_group->oq_id, 4984 &request.data.create_operational_oq.queue_id); 4985 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4986 &request.data.create_operational_oq.element_array_addr); 4987 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4988 &request.data.create_operational_oq.pi_addr); 4989 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4990 &request.data.create_operational_oq.num_elements); 4991 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4992 &request.data.create_operational_oq.element_length); 4993 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4994 put_unaligned_le16(queue_group->int_msg_num, 4995 &request.data.create_operational_oq.int_msg_num); 4996 4997 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4998 &response); 4999 if (rc) { 5000 dev_err(&ctrl_info->pci_dev->dev, 5001 "error creating outbound queue\n"); 5002 return rc; 5003 } 5004 5005 queue_group->oq_ci = ctrl_info->iomem_base + 5006 PQI_DEVICE_REGISTERS_OFFSET + 5007 get_unaligned_le64( 5008 &response.data.create_operational_oq.oq_ci_offset); 5009 5010 return 0; 5011 } 5012 5013 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 5014 { 5015 int rc; 5016 unsigned int i; 5017 5018 rc = pqi_create_event_queue(ctrl_info); 5019 if (rc) { 5020 dev_err(&ctrl_info->pci_dev->dev, 5021 "error creating event queue\n"); 5022 return rc; 5023 } 5024 5025 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5026 rc = pqi_create_queue_group(ctrl_info, i); 5027 if (rc) { 5028 dev_err(&ctrl_info->pci_dev->dev, 5029 "error creating queue group number %u/%u\n", 5030 i, ctrl_info->num_queue_groups); 5031 return rc; 5032 } 5033 } 5034 5035 return 0; 5036 } 5037 5038 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 5039 struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS) 5040 5041 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 5042 bool enable_events) 5043 { 5044 int rc; 5045 unsigned int i; 5046 struct pqi_event_config *event_config; 5047 struct pqi_event_descriptor *event_descriptor; 5048 struct pqi_general_management_request request; 5049 5050 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5051 GFP_KERNEL); 5052 if (!event_config) 5053 return -ENOMEM; 5054 5055 memset(&request, 0, sizeof(request)); 5056 5057 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 5058 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5059 data.report_event_configuration.sg_descriptors[1]) - 5060 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5061 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5062 &request.data.report_event_configuration.buffer_length); 5063 5064 rc = pqi_map_single(ctrl_info->pci_dev, 5065 request.data.report_event_configuration.sg_descriptors, 5066 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5067 DMA_FROM_DEVICE); 5068 if (rc) 5069 goto out; 5070 5071 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5072 5073 pqi_pci_unmap(ctrl_info->pci_dev, 5074 request.data.report_event_configuration.sg_descriptors, 1, 5075 DMA_FROM_DEVICE); 5076 5077 if (rc) 5078 goto out; 5079 5080 for (i = 0; i < event_config->num_event_descriptors; i++) { 5081 event_descriptor = &event_config->descriptors[i]; 5082 if (enable_events && 5083 pqi_is_supported_event(event_descriptor->event_type)) 5084 put_unaligned_le16(ctrl_info->event_queue.oq_id, 5085 &event_descriptor->oq_id); 5086 else 5087 put_unaligned_le16(0, &event_descriptor->oq_id); 5088 } 5089 5090 memset(&request, 0, sizeof(request)); 5091 5092 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 5093 put_unaligned_le16(offsetof(struct pqi_general_management_request, 5094 data.report_event_configuration.sg_descriptors[1]) - 5095 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 5096 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5097 &request.data.report_event_configuration.buffer_length); 5098 5099 rc = pqi_map_single(ctrl_info->pci_dev, 5100 request.data.report_event_configuration.sg_descriptors, 5101 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 5102 DMA_TO_DEVICE); 5103 if (rc) 5104 goto out; 5105 5106 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 5107 5108 pqi_pci_unmap(ctrl_info->pci_dev, 5109 request.data.report_event_configuration.sg_descriptors, 1, 5110 DMA_TO_DEVICE); 5111 5112 out: 5113 kfree(event_config); 5114 5115 return rc; 5116 } 5117 5118 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 5119 { 5120 return pqi_configure_events(ctrl_info, true); 5121 } 5122 5123 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 5124 { 5125 unsigned int i; 5126 struct device *dev; 5127 size_t sg_chain_buffer_length; 5128 struct pqi_io_request *io_request; 5129 5130 if (!ctrl_info->io_request_pool) 5131 return; 5132 5133 dev = &ctrl_info->pci_dev->dev; 5134 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5135 io_request = ctrl_info->io_request_pool; 5136 5137 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5138 kfree(io_request->iu); 5139 if (!io_request->sg_chain_buffer) 5140 break; 5141 dma_free_coherent(dev, sg_chain_buffer_length, 5142 io_request->sg_chain_buffer, 5143 io_request->sg_chain_buffer_dma_handle); 5144 io_request++; 5145 } 5146 5147 kfree(ctrl_info->io_request_pool); 5148 ctrl_info->io_request_pool = NULL; 5149 } 5150 5151 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 5152 { 5153 ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, 5154 ctrl_info->error_buffer_length, 5155 &ctrl_info->error_buffer_dma_handle, 5156 GFP_KERNEL); 5157 if (!ctrl_info->error_buffer) 5158 return -ENOMEM; 5159 5160 return 0; 5161 } 5162 5163 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 5164 { 5165 unsigned int i; 5166 void *sg_chain_buffer; 5167 size_t sg_chain_buffer_length; 5168 dma_addr_t sg_chain_buffer_dma_handle; 5169 struct device *dev; 5170 struct pqi_io_request *io_request; 5171 5172 ctrl_info->io_request_pool = kcalloc(ctrl_info->max_io_slots, 5173 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 5174 5175 if (!ctrl_info->io_request_pool) { 5176 dev_err(&ctrl_info->pci_dev->dev, 5177 "failed to allocate I/O request pool\n"); 5178 goto error; 5179 } 5180 5181 dev = &ctrl_info->pci_dev->dev; 5182 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 5183 io_request = ctrl_info->io_request_pool; 5184 5185 for (i = 0; i < ctrl_info->max_io_slots; i++) { 5186 io_request->iu = kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 5187 5188 if (!io_request->iu) { 5189 dev_err(&ctrl_info->pci_dev->dev, 5190 "failed to allocate IU buffers\n"); 5191 goto error; 5192 } 5193 5194 sg_chain_buffer = dma_alloc_coherent(dev, 5195 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 5196 GFP_KERNEL); 5197 5198 if (!sg_chain_buffer) { 5199 dev_err(&ctrl_info->pci_dev->dev, 5200 "failed to allocate PQI scatter-gather chain buffers\n"); 5201 goto error; 5202 } 5203 5204 io_request->index = i; 5205 io_request->sg_chain_buffer = sg_chain_buffer; 5206 io_request->sg_chain_buffer_dma_handle = sg_chain_buffer_dma_handle; 5207 io_request++; 5208 } 5209 5210 return 0; 5211 5212 error: 5213 pqi_free_all_io_requests(ctrl_info); 5214 5215 return -ENOMEM; 5216 } 5217 5218 /* 5219 * Calculate required resources that are sized based on max. outstanding 5220 * requests and max. transfer size. 5221 */ 5222 5223 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 5224 { 5225 u32 max_transfer_size; 5226 u32 max_sg_entries; 5227 5228 ctrl_info->scsi_ml_can_queue = 5229 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 5230 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 5231 5232 ctrl_info->error_buffer_length = 5233 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 5234 5235 if (reset_devices) 5236 max_transfer_size = min(ctrl_info->max_transfer_size, 5237 PQI_MAX_TRANSFER_SIZE_KDUMP); 5238 else 5239 max_transfer_size = min(ctrl_info->max_transfer_size, 5240 PQI_MAX_TRANSFER_SIZE); 5241 5242 max_sg_entries = max_transfer_size / PAGE_SIZE; 5243 5244 /* +1 to cover when the buffer is not page-aligned. */ 5245 max_sg_entries++; 5246 5247 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 5248 5249 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 5250 5251 ctrl_info->sg_chain_buffer_length = 5252 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 5253 PQI_EXTRA_SGL_MEMORY; 5254 ctrl_info->sg_tablesize = max_sg_entries; 5255 ctrl_info->max_sectors = max_transfer_size / 512; 5256 } 5257 5258 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 5259 { 5260 int num_queue_groups; 5261 u16 num_elements_per_iq; 5262 u16 num_elements_per_oq; 5263 5264 if (reset_devices) { 5265 num_queue_groups = 1; 5266 } else { 5267 int num_cpus; 5268 int max_queue_groups; 5269 5270 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 5271 ctrl_info->max_outbound_queues - 1); 5272 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 5273 5274 num_cpus = num_online_cpus(); 5275 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 5276 num_queue_groups = min(num_queue_groups, max_queue_groups); 5277 } 5278 5279 ctrl_info->num_queue_groups = num_queue_groups; 5280 5281 /* 5282 * Make sure that the max. inbound IU length is an even multiple 5283 * of our inbound element length. 5284 */ 5285 ctrl_info->max_inbound_iu_length = 5286 (ctrl_info->max_inbound_iu_length_per_firmware / 5287 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 5288 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 5289 5290 num_elements_per_iq = 5291 (ctrl_info->max_inbound_iu_length / 5292 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 5293 5294 /* Add one because one element in each queue is unusable. */ 5295 num_elements_per_iq++; 5296 5297 num_elements_per_iq = min(num_elements_per_iq, 5298 ctrl_info->max_elements_per_iq); 5299 5300 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 5301 num_elements_per_oq = min(num_elements_per_oq, 5302 ctrl_info->max_elements_per_oq); 5303 5304 ctrl_info->num_elements_per_iq = num_elements_per_iq; 5305 ctrl_info->num_elements_per_oq = num_elements_per_oq; 5306 5307 ctrl_info->max_sg_per_iu = 5308 ((ctrl_info->max_inbound_iu_length - 5309 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5310 sizeof(struct pqi_sg_descriptor)) + 5311 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 5312 5313 ctrl_info->max_sg_per_r56_iu = 5314 ((ctrl_info->max_inbound_iu_length - 5315 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 5316 sizeof(struct pqi_sg_descriptor)) + 5317 PQI_MAX_EMBEDDED_R56_SG_DESCRIPTORS; 5318 } 5319 5320 static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor, 5321 struct scatterlist *sg) 5322 { 5323 u64 address = (u64)sg_dma_address(sg); 5324 unsigned int length = sg_dma_len(sg); 5325 5326 put_unaligned_le64(address, &sg_descriptor->address); 5327 put_unaligned_le32(length, &sg_descriptor->length); 5328 put_unaligned_le32(0, &sg_descriptor->flags); 5329 } 5330 5331 static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor, 5332 struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request, 5333 int max_sg_per_iu, bool *chained) 5334 { 5335 int i; 5336 unsigned int num_sg_in_iu; 5337 5338 *chained = false; 5339 i = 0; 5340 num_sg_in_iu = 0; 5341 max_sg_per_iu--; /* Subtract 1 to leave room for chain marker. */ 5342 5343 while (1) { 5344 pqi_set_sg_descriptor(sg_descriptor, sg); 5345 if (!*chained) 5346 num_sg_in_iu++; 5347 i++; 5348 if (i == sg_count) 5349 break; 5350 sg_descriptor++; 5351 if (i == max_sg_per_iu) { 5352 put_unaligned_le64((u64)io_request->sg_chain_buffer_dma_handle, 5353 &sg_descriptor->address); 5354 put_unaligned_le32((sg_count - num_sg_in_iu) * sizeof(*sg_descriptor), 5355 &sg_descriptor->length); 5356 put_unaligned_le32(CISS_SG_CHAIN, &sg_descriptor->flags); 5357 *chained = true; 5358 num_sg_in_iu++; 5359 sg_descriptor = io_request->sg_chain_buffer; 5360 } 5361 sg = sg_next(sg); 5362 } 5363 5364 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 5365 5366 return num_sg_in_iu; 5367 } 5368 5369 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 5370 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 5371 struct pqi_io_request *io_request) 5372 { 5373 u16 iu_length; 5374 int sg_count; 5375 bool chained; 5376 unsigned int num_sg_in_iu; 5377 struct scatterlist *sg; 5378 struct pqi_sg_descriptor *sg_descriptor; 5379 5380 sg_count = scsi_dma_map(scmd); 5381 if (sg_count < 0) 5382 return sg_count; 5383 5384 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5385 PQI_REQUEST_HEADER_LENGTH; 5386 5387 if (sg_count == 0) 5388 goto out; 5389 5390 sg = scsi_sglist(scmd); 5391 sg_descriptor = request->sg_descriptors; 5392 5393 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5394 ctrl_info->max_sg_per_iu, &chained); 5395 5396 request->partial = chained; 5397 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5398 5399 out: 5400 put_unaligned_le16(iu_length, &request->header.iu_length); 5401 5402 return 0; 5403 } 5404 5405 static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info, 5406 struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd, 5407 struct pqi_io_request *io_request) 5408 { 5409 u16 iu_length; 5410 int sg_count; 5411 bool chained; 5412 unsigned int num_sg_in_iu; 5413 struct scatterlist *sg; 5414 struct pqi_sg_descriptor *sg_descriptor; 5415 5416 sg_count = scsi_dma_map(scmd); 5417 if (sg_count < 0) 5418 return sg_count; 5419 5420 iu_length = offsetof(struct pqi_aio_r1_path_request, sg_descriptors) - 5421 PQI_REQUEST_HEADER_LENGTH; 5422 num_sg_in_iu = 0; 5423 5424 if (sg_count == 0) 5425 goto out; 5426 5427 sg = scsi_sglist(scmd); 5428 sg_descriptor = request->sg_descriptors; 5429 5430 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5431 ctrl_info->max_sg_per_iu, &chained); 5432 5433 request->partial = chained; 5434 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5435 5436 out: 5437 put_unaligned_le16(iu_length, &request->header.iu_length); 5438 request->num_sg_descriptors = num_sg_in_iu; 5439 5440 return 0; 5441 } 5442 5443 static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info, 5444 struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd, 5445 struct pqi_io_request *io_request) 5446 { 5447 u16 iu_length; 5448 int sg_count; 5449 bool chained; 5450 unsigned int num_sg_in_iu; 5451 struct scatterlist *sg; 5452 struct pqi_sg_descriptor *sg_descriptor; 5453 5454 sg_count = scsi_dma_map(scmd); 5455 if (sg_count < 0) 5456 return sg_count; 5457 5458 iu_length = offsetof(struct pqi_aio_r56_path_request, sg_descriptors) - 5459 PQI_REQUEST_HEADER_LENGTH; 5460 num_sg_in_iu = 0; 5461 5462 if (sg_count != 0) { 5463 sg = scsi_sglist(scmd); 5464 sg_descriptor = request->sg_descriptors; 5465 5466 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5467 ctrl_info->max_sg_per_r56_iu, &chained); 5468 5469 request->partial = chained; 5470 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5471 } 5472 5473 put_unaligned_le16(iu_length, &request->header.iu_length); 5474 request->num_sg_descriptors = num_sg_in_iu; 5475 5476 return 0; 5477 } 5478 5479 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 5480 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 5481 struct pqi_io_request *io_request) 5482 { 5483 u16 iu_length; 5484 int sg_count; 5485 bool chained; 5486 unsigned int num_sg_in_iu; 5487 struct scatterlist *sg; 5488 struct pqi_sg_descriptor *sg_descriptor; 5489 5490 sg_count = scsi_dma_map(scmd); 5491 if (sg_count < 0) 5492 return sg_count; 5493 5494 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 5495 PQI_REQUEST_HEADER_LENGTH; 5496 num_sg_in_iu = 0; 5497 5498 if (sg_count == 0) 5499 goto out; 5500 5501 sg = scsi_sglist(scmd); 5502 sg_descriptor = request->sg_descriptors; 5503 5504 num_sg_in_iu = pqi_build_sg_list(sg_descriptor, sg, sg_count, io_request, 5505 ctrl_info->max_sg_per_iu, &chained); 5506 5507 request->partial = chained; 5508 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 5509 5510 out: 5511 put_unaligned_le16(iu_length, &request->header.iu_length); 5512 request->num_sg_descriptors = num_sg_in_iu; 5513 5514 return 0; 5515 } 5516 5517 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 5518 void *context) 5519 { 5520 struct scsi_cmnd *scmd; 5521 5522 scmd = io_request->scmd; 5523 pqi_free_io_request(io_request); 5524 scsi_dma_unmap(scmd); 5525 pqi_scsi_done(scmd); 5526 } 5527 5528 static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, 5529 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5530 struct pqi_queue_group *queue_group, bool io_high_prio) 5531 { 5532 int rc; 5533 size_t cdb_length; 5534 struct pqi_io_request *io_request; 5535 struct pqi_raid_path_request *request; 5536 5537 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5538 if (!io_request) 5539 return SCSI_MLQUEUE_HOST_BUSY; 5540 5541 io_request->io_complete_callback = pqi_raid_io_complete; 5542 io_request->scmd = scmd; 5543 5544 request = io_request->iu; 5545 memset(request, 0, offsetof(struct pqi_raid_path_request, sg_descriptors)); 5546 5547 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5548 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5549 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5550 request->command_priority = io_high_prio; 5551 put_unaligned_le16(io_request->index, &request->request_id); 5552 request->error_index = request->request_id; 5553 memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); 5554 request->ml_device_lun_number = (u8)scmd->device->lun; 5555 5556 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 5557 memcpy(request->cdb, scmd->cmnd, cdb_length); 5558 5559 switch (cdb_length) { 5560 case 6: 5561 case 10: 5562 case 12: 5563 case 16: 5564 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5565 break; 5566 case 20: 5567 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_4; 5568 break; 5569 case 24: 5570 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_8; 5571 break; 5572 case 28: 5573 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_12; 5574 break; 5575 case 32: 5576 default: 5577 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_16; 5578 break; 5579 } 5580 5581 switch (scmd->sc_data_direction) { 5582 case DMA_FROM_DEVICE: 5583 request->data_direction = SOP_READ_FLAG; 5584 break; 5585 case DMA_TO_DEVICE: 5586 request->data_direction = SOP_WRITE_FLAG; 5587 break; 5588 case DMA_NONE: 5589 request->data_direction = SOP_NO_DIRECTION_FLAG; 5590 break; 5591 case DMA_BIDIRECTIONAL: 5592 request->data_direction = SOP_BIDIRECTIONAL; 5593 break; 5594 default: 5595 dev_err(&ctrl_info->pci_dev->dev, 5596 "unknown data direction: %d\n", 5597 scmd->sc_data_direction); 5598 break; 5599 } 5600 5601 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 5602 if (rc) { 5603 pqi_free_io_request(io_request); 5604 return SCSI_MLQUEUE_HOST_BUSY; 5605 } 5606 5607 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 5608 5609 return 0; 5610 } 5611 5612 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5613 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5614 struct pqi_queue_group *queue_group) 5615 { 5616 bool io_high_prio; 5617 5618 io_high_prio = pqi_is_io_high_priority(device, scmd); 5619 5620 return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); 5621 } 5622 5623 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 5624 { 5625 struct scsi_cmnd *scmd; 5626 struct pqi_scsi_dev *device; 5627 struct pqi_ctrl_info *ctrl_info; 5628 5629 if (!io_request->raid_bypass) 5630 return false; 5631 5632 scmd = io_request->scmd; 5633 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 5634 return false; 5635 if (host_byte(scmd->result) == DID_NO_CONNECT) 5636 return false; 5637 5638 device = scmd->device->hostdata; 5639 if (pqi_device_offline(device) || pqi_device_in_remove(device)) 5640 return false; 5641 5642 ctrl_info = shost_to_hba(scmd->device->host); 5643 if (pqi_ctrl_offline(ctrl_info)) 5644 return false; 5645 5646 return true; 5647 } 5648 5649 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 5650 void *context) 5651 { 5652 struct scsi_cmnd *scmd; 5653 5654 scmd = io_request->scmd; 5655 scsi_dma_unmap(scmd); 5656 if (io_request->status == -EAGAIN || pqi_raid_bypass_retry_needed(io_request)) { 5657 set_host_byte(scmd, DID_IMM_RETRY); 5658 pqi_cmd_priv(scmd)->this_residual++; 5659 } 5660 5661 pqi_free_io_request(io_request); 5662 pqi_scsi_done(scmd); 5663 } 5664 5665 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 5666 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 5667 struct pqi_queue_group *queue_group) 5668 { 5669 bool io_high_prio; 5670 5671 io_high_prio = pqi_is_io_high_priority(device, scmd); 5672 5673 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 5674 scmd->cmnd, scmd->cmd_len, queue_group, NULL, 5675 false, io_high_prio); 5676 } 5677 5678 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 5679 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 5680 unsigned int cdb_length, struct pqi_queue_group *queue_group, 5681 struct pqi_encryption_info *encryption_info, bool raid_bypass, 5682 bool io_high_prio) 5683 { 5684 int rc; 5685 struct pqi_io_request *io_request; 5686 struct pqi_aio_path_request *request; 5687 5688 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5689 if (!io_request) 5690 return SCSI_MLQUEUE_HOST_BUSY; 5691 5692 io_request->io_complete_callback = pqi_aio_io_complete; 5693 io_request->scmd = scmd; 5694 io_request->raid_bypass = raid_bypass; 5695 5696 request = io_request->iu; 5697 memset(request, 0, offsetof(struct pqi_aio_path_request, sg_descriptors)); 5698 5699 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 5700 put_unaligned_le32(aio_handle, &request->nexus_id); 5701 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 5702 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5703 request->command_priority = io_high_prio; 5704 put_unaligned_le16(io_request->index, &request->request_id); 5705 request->error_index = request->request_id; 5706 if (!raid_bypass && ctrl_info->multi_lun_device_supported) 5707 put_unaligned_le64(scmd->device->lun << 8, &request->lun_number); 5708 if (cdb_length > sizeof(request->cdb)) 5709 cdb_length = sizeof(request->cdb); 5710 request->cdb_length = cdb_length; 5711 memcpy(request->cdb, cdb, cdb_length); 5712 5713 switch (scmd->sc_data_direction) { 5714 case DMA_TO_DEVICE: 5715 request->data_direction = SOP_READ_FLAG; 5716 break; 5717 case DMA_FROM_DEVICE: 5718 request->data_direction = SOP_WRITE_FLAG; 5719 break; 5720 case DMA_NONE: 5721 request->data_direction = SOP_NO_DIRECTION_FLAG; 5722 break; 5723 case DMA_BIDIRECTIONAL: 5724 request->data_direction = SOP_BIDIRECTIONAL; 5725 break; 5726 default: 5727 dev_err(&ctrl_info->pci_dev->dev, 5728 "unknown data direction: %d\n", 5729 scmd->sc_data_direction); 5730 break; 5731 } 5732 5733 if (encryption_info) { 5734 request->encryption_enable = true; 5735 put_unaligned_le16(encryption_info->data_encryption_key_index, 5736 &request->data_encryption_key_index); 5737 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5738 &request->encrypt_tweak_lower); 5739 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5740 &request->encrypt_tweak_upper); 5741 } 5742 5743 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 5744 if (rc) { 5745 pqi_free_io_request(io_request); 5746 return SCSI_MLQUEUE_HOST_BUSY; 5747 } 5748 5749 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5750 5751 return 0; 5752 } 5753 5754 static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, 5755 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5756 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5757 struct pqi_scsi_dev_raid_map_data *rmd) 5758 { 5759 int rc; 5760 struct pqi_io_request *io_request; 5761 struct pqi_aio_r1_path_request *r1_request; 5762 5763 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5764 if (!io_request) 5765 return SCSI_MLQUEUE_HOST_BUSY; 5766 5767 io_request->io_complete_callback = pqi_aio_io_complete; 5768 io_request->scmd = scmd; 5769 io_request->raid_bypass = true; 5770 5771 r1_request = io_request->iu; 5772 memset(r1_request, 0, offsetof(struct pqi_aio_r1_path_request, sg_descriptors)); 5773 5774 r1_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID1_IO; 5775 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r1_request->volume_id); 5776 r1_request->num_drives = rmd->num_it_nexus_entries; 5777 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1); 5778 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2); 5779 if (rmd->num_it_nexus_entries == 3) 5780 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3); 5781 5782 put_unaligned_le32(scsi_bufflen(scmd), &r1_request->data_length); 5783 r1_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5784 put_unaligned_le16(io_request->index, &r1_request->request_id); 5785 r1_request->error_index = r1_request->request_id; 5786 if (rmd->cdb_length > sizeof(r1_request->cdb)) 5787 rmd->cdb_length = sizeof(r1_request->cdb); 5788 r1_request->cdb_length = rmd->cdb_length; 5789 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length); 5790 5791 /* The direction is always write. */ 5792 r1_request->data_direction = SOP_READ_FLAG; 5793 5794 if (encryption_info) { 5795 r1_request->encryption_enable = true; 5796 put_unaligned_le16(encryption_info->data_encryption_key_index, 5797 &r1_request->data_encryption_key_index); 5798 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5799 &r1_request->encrypt_tweak_lower); 5800 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5801 &r1_request->encrypt_tweak_upper); 5802 } 5803 5804 rc = pqi_build_aio_r1_sg_list(ctrl_info, r1_request, scmd, io_request); 5805 if (rc) { 5806 pqi_free_io_request(io_request); 5807 return SCSI_MLQUEUE_HOST_BUSY; 5808 } 5809 5810 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5811 5812 return 0; 5813 } 5814 5815 static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, 5816 struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group, 5817 struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device, 5818 struct pqi_scsi_dev_raid_map_data *rmd) 5819 { 5820 int rc; 5821 struct pqi_io_request *io_request; 5822 struct pqi_aio_r56_path_request *r56_request; 5823 5824 io_request = pqi_alloc_io_request(ctrl_info, scmd); 5825 if (!io_request) 5826 return SCSI_MLQUEUE_HOST_BUSY; 5827 io_request->io_complete_callback = pqi_aio_io_complete; 5828 io_request->scmd = scmd; 5829 io_request->raid_bypass = true; 5830 5831 r56_request = io_request->iu; 5832 memset(r56_request, 0, offsetof(struct pqi_aio_r56_path_request, sg_descriptors)); 5833 5834 if (device->raid_level == SA_RAID_5 || device->raid_level == SA_RAID_51) 5835 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID5_IO; 5836 else 5837 r56_request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_RAID6_IO; 5838 5839 put_unaligned_le16(*(u16 *)device->scsi3addr & 0x3fff, &r56_request->volume_id); 5840 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus); 5841 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus); 5842 if (rmd->raid_level == SA_RAID_6) { 5843 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus); 5844 r56_request->xor_multiplier = rmd->xor_mult; 5845 } 5846 put_unaligned_le32(scsi_bufflen(scmd), &r56_request->data_length); 5847 r56_request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5848 put_unaligned_le64(rmd->row, &r56_request->row); 5849 5850 put_unaligned_le16(io_request->index, &r56_request->request_id); 5851 r56_request->error_index = r56_request->request_id; 5852 5853 if (rmd->cdb_length > sizeof(r56_request->cdb)) 5854 rmd->cdb_length = sizeof(r56_request->cdb); 5855 r56_request->cdb_length = rmd->cdb_length; 5856 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length); 5857 5858 /* The direction is always write. */ 5859 r56_request->data_direction = SOP_READ_FLAG; 5860 5861 if (encryption_info) { 5862 r56_request->encryption_enable = true; 5863 put_unaligned_le16(encryption_info->data_encryption_key_index, 5864 &r56_request->data_encryption_key_index); 5865 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 5866 &r56_request->encrypt_tweak_lower); 5867 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 5868 &r56_request->encrypt_tweak_upper); 5869 } 5870 5871 rc = pqi_build_aio_r56_sg_list(ctrl_info, r56_request, scmd, io_request); 5872 if (rc) { 5873 pqi_free_io_request(io_request); 5874 return SCSI_MLQUEUE_HOST_BUSY; 5875 } 5876 5877 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 5878 5879 return 0; 5880 } 5881 5882 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 5883 struct scsi_cmnd *scmd) 5884 { 5885 /* 5886 * We are setting host_tagset = 1 during init. 5887 */ 5888 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); 5889 } 5890 5891 static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) 5892 { 5893 if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) 5894 return false; 5895 5896 return pqi_cmd_priv(scmd)->this_residual == 0; 5897 } 5898 5899 /* 5900 * This function gets called just before we hand the completed SCSI request 5901 * back to the SML. 5902 */ 5903 5904 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 5905 { 5906 struct pqi_scsi_dev *device; 5907 struct completion *wait; 5908 5909 if (!scmd->device) { 5910 set_host_byte(scmd, DID_NO_CONNECT); 5911 return; 5912 } 5913 5914 device = scmd->device->hostdata; 5915 if (!device) { 5916 set_host_byte(scmd, DID_NO_CONNECT); 5917 return; 5918 } 5919 5920 atomic_dec(&device->scsi_cmds_outstanding[scmd->device->lun]); 5921 5922 wait = (struct completion *)xchg(&scmd->host_scribble, NULL); 5923 if (wait != PQI_NO_COMPLETION) 5924 complete(wait); 5925 } 5926 5927 static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info, 5928 struct scsi_cmnd *scmd) 5929 { 5930 u32 oldest_jiffies; 5931 u8 lru_index; 5932 int i; 5933 int rc; 5934 struct pqi_scsi_dev *device; 5935 struct pqi_stream_data *pqi_stream_data; 5936 struct pqi_scsi_dev_raid_map_data rmd; 5937 5938 if (!ctrl_info->enable_stream_detection) 5939 return false; 5940 5941 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd); 5942 if (rc) 5943 return false; 5944 5945 /* Check writes only. */ 5946 if (!rmd.is_write) 5947 return false; 5948 5949 device = scmd->device->hostdata; 5950 5951 /* Check for RAID 5/6 streams. */ 5952 if (device->raid_level != SA_RAID_5 && device->raid_level != SA_RAID_6) 5953 return false; 5954 5955 /* 5956 * If controller does not support AIO RAID{5,6} writes, need to send 5957 * requests down non-AIO path. 5958 */ 5959 if ((device->raid_level == SA_RAID_5 && !ctrl_info->enable_r5_writes) || 5960 (device->raid_level == SA_RAID_6 && !ctrl_info->enable_r6_writes)) 5961 return true; 5962 5963 lru_index = 0; 5964 oldest_jiffies = INT_MAX; 5965 for (i = 0; i < NUM_STREAMS_PER_LUN; i++) { 5966 pqi_stream_data = &device->stream_data[i]; 5967 /* 5968 * Check for adjacent request or request is within 5969 * the previous request. 5970 */ 5971 if ((pqi_stream_data->next_lba && 5972 rmd.first_block >= pqi_stream_data->next_lba) && 5973 rmd.first_block <= pqi_stream_data->next_lba + 5974 rmd.block_cnt) { 5975 pqi_stream_data->next_lba = rmd.first_block + 5976 rmd.block_cnt; 5977 pqi_stream_data->last_accessed = jiffies; 5978 return true; 5979 } 5980 5981 /* unused entry */ 5982 if (pqi_stream_data->last_accessed == 0) { 5983 lru_index = i; 5984 break; 5985 } 5986 5987 /* Find entry with oldest last accessed time. */ 5988 if (pqi_stream_data->last_accessed <= oldest_jiffies) { 5989 oldest_jiffies = pqi_stream_data->last_accessed; 5990 lru_index = i; 5991 } 5992 } 5993 5994 /* Set LRU entry. */ 5995 pqi_stream_data = &device->stream_data[lru_index]; 5996 pqi_stream_data->last_accessed = jiffies; 5997 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt; 5998 5999 return false; 6000 } 6001 6002 static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 6003 { 6004 int rc; 6005 struct pqi_ctrl_info *ctrl_info; 6006 struct pqi_scsi_dev *device; 6007 u16 hw_queue; 6008 struct pqi_queue_group *queue_group; 6009 bool raid_bypassed; 6010 u8 lun; 6011 6012 scmd->host_scribble = PQI_NO_COMPLETION; 6013 6014 device = scmd->device->hostdata; 6015 6016 if (!device) { 6017 set_host_byte(scmd, DID_NO_CONNECT); 6018 pqi_scsi_done(scmd); 6019 return 0; 6020 } 6021 6022 lun = (u8)scmd->device->lun; 6023 6024 atomic_inc(&device->scsi_cmds_outstanding[lun]); 6025 6026 ctrl_info = shost_to_hba(shost); 6027 6028 if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(device)) { 6029 set_host_byte(scmd, DID_NO_CONNECT); 6030 pqi_scsi_done(scmd); 6031 return 0; 6032 } 6033 6034 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device, lun)) { 6035 rc = SCSI_MLQUEUE_HOST_BUSY; 6036 goto out; 6037 } 6038 6039 /* 6040 * This is necessary because the SML doesn't zero out this field during 6041 * error recovery. 6042 */ 6043 scmd->result = 0; 6044 6045 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 6046 queue_group = &ctrl_info->queue_groups[hw_queue]; 6047 6048 if (pqi_is_logical_device(device)) { 6049 raid_bypassed = false; 6050 if (device->raid_bypass_enabled && 6051 pqi_is_bypass_eligible_request(scmd) && 6052 !pqi_is_parity_write_stream(ctrl_info, scmd)) { 6053 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6054 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { 6055 raid_bypassed = true; 6056 device->raid_bypass_cnt++; 6057 } 6058 } 6059 if (!raid_bypassed) 6060 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6061 } else { 6062 if (device->aio_enabled) 6063 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6064 else 6065 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); 6066 } 6067 6068 out: 6069 if (rc) { 6070 scmd->host_scribble = NULL; 6071 atomic_dec(&device->scsi_cmds_outstanding[lun]); 6072 } 6073 6074 return rc; 6075 } 6076 6077 static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info) 6078 { 6079 unsigned int i; 6080 unsigned int path; 6081 unsigned long flags; 6082 unsigned int queued_io_count; 6083 struct pqi_queue_group *queue_group; 6084 struct pqi_io_request *io_request; 6085 6086 queued_io_count = 0; 6087 6088 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6089 queue_group = &ctrl_info->queue_groups[i]; 6090 for (path = 0; path < 2; path++) { 6091 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 6092 list_for_each_entry(io_request, &queue_group->request_list[path], request_list_entry) 6093 queued_io_count++; 6094 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 6095 } 6096 } 6097 6098 return queued_io_count; 6099 } 6100 6101 static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info) 6102 { 6103 unsigned int i; 6104 unsigned int path; 6105 unsigned int nonempty_inbound_queue_count; 6106 struct pqi_queue_group *queue_group; 6107 pqi_index_t iq_pi; 6108 pqi_index_t iq_ci; 6109 6110 nonempty_inbound_queue_count = 0; 6111 6112 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6113 queue_group = &ctrl_info->queue_groups[i]; 6114 for (path = 0; path < 2; path++) { 6115 iq_pi = queue_group->iq_pi_copy[path]; 6116 iq_ci = readl(queue_group->iq_ci[path]); 6117 if (iq_ci != iq_pi) 6118 nonempty_inbound_queue_count++; 6119 } 6120 } 6121 6122 return nonempty_inbound_queue_count; 6123 } 6124 6125 #define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS 10 6126 6127 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 6128 { 6129 unsigned long start_jiffies; 6130 unsigned long warning_timeout; 6131 unsigned int queued_io_count; 6132 unsigned int nonempty_inbound_queue_count; 6133 bool displayed_warning; 6134 6135 displayed_warning = false; 6136 start_jiffies = jiffies; 6137 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6138 6139 while (1) { 6140 queued_io_count = pqi_queued_io_count(ctrl_info); 6141 nonempty_inbound_queue_count = pqi_nonempty_inbound_queue_count(ctrl_info); 6142 if (queued_io_count == 0 && nonempty_inbound_queue_count == 0) 6143 break; 6144 pqi_check_ctrl_health(ctrl_info); 6145 if (pqi_ctrl_offline(ctrl_info)) 6146 return -ENXIO; 6147 if (time_after(jiffies, warning_timeout)) { 6148 dev_warn(&ctrl_info->pci_dev->dev, 6149 "waiting %u seconds for queued I/O to drain (queued I/O count: %u; non-empty inbound queue count: %u)\n", 6150 jiffies_to_msecs(jiffies - start_jiffies) / 1000, queued_io_count, nonempty_inbound_queue_count); 6151 displayed_warning = true; 6152 warning_timeout = (PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6153 } 6154 usleep_range(1000, 2000); 6155 } 6156 6157 if (displayed_warning) 6158 dev_warn(&ctrl_info->pci_dev->dev, 6159 "queued I/O drained after waiting for %u seconds\n", 6160 jiffies_to_msecs(jiffies - start_jiffies) / 1000); 6161 6162 return 0; 6163 } 6164 6165 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 6166 struct pqi_scsi_dev *device, u8 lun) 6167 { 6168 unsigned int i; 6169 unsigned int path; 6170 struct pqi_queue_group *queue_group; 6171 unsigned long flags; 6172 struct pqi_io_request *io_request; 6173 struct pqi_io_request *next; 6174 struct scsi_cmnd *scmd; 6175 struct pqi_scsi_dev *scsi_device; 6176 6177 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6178 queue_group = &ctrl_info->queue_groups[i]; 6179 6180 for (path = 0; path < 2; path++) { 6181 spin_lock_irqsave( 6182 &queue_group->submit_lock[path], flags); 6183 6184 list_for_each_entry_safe(io_request, next, 6185 &queue_group->request_list[path], 6186 request_list_entry) { 6187 6188 scmd = io_request->scmd; 6189 if (!scmd) 6190 continue; 6191 6192 scsi_device = scmd->device->hostdata; 6193 if (scsi_device != device) 6194 continue; 6195 6196 if ((u8)scmd->device->lun != lun) 6197 continue; 6198 6199 list_del(&io_request->request_list_entry); 6200 set_host_byte(scmd, DID_RESET); 6201 pqi_free_io_request(io_request); 6202 scsi_dma_unmap(scmd); 6203 pqi_scsi_done(scmd); 6204 } 6205 6206 spin_unlock_irqrestore( 6207 &queue_group->submit_lock[path], flags); 6208 } 6209 } 6210 } 6211 6212 #define PQI_PENDING_IO_WARNING_TIMEOUT_SECS 10 6213 6214 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 6215 struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs) 6216 { 6217 int cmds_outstanding; 6218 unsigned long start_jiffies; 6219 unsigned long warning_timeout; 6220 unsigned long msecs_waiting; 6221 6222 start_jiffies = jiffies; 6223 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + start_jiffies; 6224 6225 while ((cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun])) > 0) { 6226 if (ctrl_info->ctrl_removal_state != PQI_CTRL_GRACEFUL_REMOVAL) { 6227 pqi_check_ctrl_health(ctrl_info); 6228 if (pqi_ctrl_offline(ctrl_info)) 6229 return -ENXIO; 6230 } 6231 msecs_waiting = jiffies_to_msecs(jiffies - start_jiffies); 6232 if (msecs_waiting >= timeout_msecs) { 6233 dev_err(&ctrl_info->pci_dev->dev, 6234 "scsi %d:%d:%d:%d: timed out after %lu seconds waiting for %d outstanding command(s)\n", 6235 ctrl_info->scsi_host->host_no, device->bus, device->target, 6236 lun, msecs_waiting / 1000, cmds_outstanding); 6237 return -ETIMEDOUT; 6238 } 6239 if (time_after(jiffies, warning_timeout)) { 6240 dev_warn(&ctrl_info->pci_dev->dev, 6241 "scsi %d:%d:%d:%d: waiting %lu seconds for %d outstanding command(s)\n", 6242 ctrl_info->scsi_host->host_no, device->bus, device->target, 6243 lun, msecs_waiting / 1000, cmds_outstanding); 6244 warning_timeout = (PQI_PENDING_IO_WARNING_TIMEOUT_SECS * HZ) + jiffies; 6245 } 6246 usleep_range(1000, 2000); 6247 } 6248 6249 return 0; 6250 } 6251 6252 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 6253 void *context) 6254 { 6255 struct completion *waiting = context; 6256 6257 complete(waiting); 6258 } 6259 6260 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 6261 6262 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 6263 struct pqi_scsi_dev *device, u8 lun, struct completion *wait) 6264 { 6265 int rc; 6266 unsigned int wait_secs; 6267 int cmds_outstanding; 6268 6269 wait_secs = 0; 6270 6271 while (1) { 6272 if (wait_for_completion_io_timeout(wait, 6273 PQI_LUN_RESET_POLL_COMPLETION_SECS * HZ)) { 6274 rc = 0; 6275 break; 6276 } 6277 6278 pqi_check_ctrl_health(ctrl_info); 6279 if (pqi_ctrl_offline(ctrl_info)) { 6280 rc = -ENXIO; 6281 break; 6282 } 6283 6284 wait_secs += PQI_LUN_RESET_POLL_COMPLETION_SECS; 6285 cmds_outstanding = atomic_read(&device->scsi_cmds_outstanding[lun]); 6286 dev_warn(&ctrl_info->pci_dev->dev, 6287 "scsi %d:%d:%d:%d: waiting %u seconds for LUN reset to complete (%d command(s) outstanding)\n", 6288 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, wait_secs, cmds_outstanding); 6289 } 6290 6291 return rc; 6292 } 6293 6294 #define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS 30 6295 6296 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6297 { 6298 int rc; 6299 struct pqi_io_request *io_request; 6300 DECLARE_COMPLETION_ONSTACK(wait); 6301 struct pqi_task_management_request *request; 6302 6303 io_request = pqi_alloc_io_request(ctrl_info, NULL); 6304 io_request->io_complete_callback = pqi_lun_reset_complete; 6305 io_request->context = &wait; 6306 6307 request = io_request->iu; 6308 memset(request, 0, sizeof(*request)); 6309 6310 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 6311 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 6312 &request->header.iu_length); 6313 put_unaligned_le16(io_request->index, &request->request_id); 6314 memcpy(request->lun_number, device->scsi3addr, 6315 sizeof(request->lun_number)); 6316 if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) 6317 request->ml_device_lun_number = lun; 6318 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 6319 if (ctrl_info->tmf_iu_timeout_supported) 6320 put_unaligned_le16(PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS, &request->timeout); 6321 6322 pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 6323 io_request); 6324 6325 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, lun, &wait); 6326 if (rc == 0) 6327 rc = io_request->status; 6328 6329 pqi_free_io_request(io_request); 6330 6331 return rc; 6332 } 6333 6334 #define PQI_LUN_RESET_RETRIES 3 6335 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS (10 * 1000) 6336 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS (10 * 60 * 1000) 6337 #define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS (2 * 60 * 1000) 6338 6339 static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6340 { 6341 int reset_rc; 6342 int wait_rc; 6343 unsigned int retries; 6344 unsigned long timeout_msecs; 6345 6346 for (retries = 0;;) { 6347 reset_rc = pqi_lun_reset(ctrl_info, device, lun); 6348 if (reset_rc == 0 || reset_rc == -ENODEV || reset_rc == -ENXIO || ++retries > PQI_LUN_RESET_RETRIES) 6349 break; 6350 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 6351 } 6352 6353 timeout_msecs = reset_rc ? PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS : 6354 PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS; 6355 6356 wait_rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, timeout_msecs); 6357 if (wait_rc && reset_rc == 0) 6358 reset_rc = wait_rc; 6359 6360 return reset_rc == 0 ? SUCCESS : FAILED; 6361 } 6362 6363 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun) 6364 { 6365 int rc; 6366 6367 pqi_ctrl_block_requests(ctrl_info); 6368 pqi_ctrl_wait_until_quiesced(ctrl_info); 6369 pqi_fail_io_queued_for_device(ctrl_info, device, lun); 6370 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 6371 pqi_device_reset_start(device, lun); 6372 pqi_ctrl_unblock_requests(ctrl_info); 6373 if (rc) 6374 rc = FAILED; 6375 else 6376 rc = pqi_lun_reset_with_retries(ctrl_info, device, lun); 6377 pqi_device_reset_done(device, lun); 6378 6379 return rc; 6380 } 6381 6382 static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode) 6383 { 6384 int rc; 6385 6386 mutex_lock(&ctrl_info->lun_reset_mutex); 6387 6388 dev_err(&ctrl_info->pci_dev->dev, 6389 "resetting scsi %d:%d:%d:%u SCSI cmd at %p due to cmd opcode 0x%02x\n", 6390 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, scmd, scsi_opcode); 6391 6392 pqi_check_ctrl_health(ctrl_info); 6393 if (pqi_ctrl_offline(ctrl_info)) 6394 rc = FAILED; 6395 else 6396 rc = pqi_device_reset(ctrl_info, device, lun); 6397 6398 dev_err(&ctrl_info->pci_dev->dev, 6399 "reset of scsi %d:%d:%d:%u: %s\n", 6400 ctrl_info->scsi_host->host_no, device->bus, device->target, lun, 6401 rc == SUCCESS ? "SUCCESS" : "FAILED"); 6402 6403 mutex_unlock(&ctrl_info->lun_reset_mutex); 6404 6405 return rc; 6406 } 6407 6408 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 6409 { 6410 struct Scsi_Host *shost; 6411 struct pqi_ctrl_info *ctrl_info; 6412 struct pqi_scsi_dev *device; 6413 u8 scsi_opcode; 6414 6415 shost = scmd->device->host; 6416 ctrl_info = shost_to_hba(shost); 6417 device = scmd->device->hostdata; 6418 scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6419 6420 return pqi_device_reset_handler(ctrl_info, device, (u8)scmd->device->lun, scmd, scsi_opcode); 6421 } 6422 6423 static void pqi_tmf_worker(struct work_struct *work) 6424 { 6425 struct pqi_tmf_work *tmf_work; 6426 struct scsi_cmnd *scmd; 6427 6428 tmf_work = container_of(work, struct pqi_tmf_work, work_struct); 6429 scmd = (struct scsi_cmnd *)xchg(&tmf_work->scmd, NULL); 6430 6431 pqi_device_reset_handler(tmf_work->ctrl_info, tmf_work->device, tmf_work->lun, scmd, tmf_work->scsi_opcode); 6432 } 6433 6434 static int pqi_eh_abort_handler(struct scsi_cmnd *scmd) 6435 { 6436 struct Scsi_Host *shost; 6437 struct pqi_ctrl_info *ctrl_info; 6438 struct pqi_scsi_dev *device; 6439 struct pqi_tmf_work *tmf_work; 6440 DECLARE_COMPLETION_ONSTACK(wait); 6441 6442 shost = scmd->device->host; 6443 ctrl_info = shost_to_hba(shost); 6444 device = scmd->device->hostdata; 6445 6446 dev_err(&ctrl_info->pci_dev->dev, 6447 "attempting TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p\n", 6448 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6449 6450 if (cmpxchg(&scmd->host_scribble, PQI_NO_COMPLETION, (void *)&wait) == NULL) { 6451 dev_err(&ctrl_info->pci_dev->dev, 6452 "scsi %d:%d:%d:%d for SCSI cmd at %p already completed\n", 6453 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6454 scmd->result = DID_RESET << 16; 6455 goto out; 6456 } 6457 6458 tmf_work = &device->tmf_work[scmd->device->lun]; 6459 6460 if (cmpxchg(&tmf_work->scmd, NULL, scmd) == NULL) { 6461 tmf_work->ctrl_info = ctrl_info; 6462 tmf_work->device = device; 6463 tmf_work->lun = (u8)scmd->device->lun; 6464 tmf_work->scsi_opcode = scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff; 6465 schedule_work(&tmf_work->work_struct); 6466 } 6467 6468 wait_for_completion(&wait); 6469 6470 dev_err(&ctrl_info->pci_dev->dev, 6471 "TASK ABORT on scsi %d:%d:%d:%d for SCSI cmd at %p: SUCCESS\n", 6472 shost->host_no, device->bus, device->target, (int)scmd->device->lun, scmd); 6473 6474 out: 6475 6476 return SUCCESS; 6477 } 6478 6479 static int pqi_slave_alloc(struct scsi_device *sdev) 6480 { 6481 struct pqi_scsi_dev *device; 6482 unsigned long flags; 6483 struct pqi_ctrl_info *ctrl_info; 6484 struct scsi_target *starget; 6485 struct sas_rphy *rphy; 6486 6487 ctrl_info = shost_to_hba(sdev->host); 6488 6489 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6490 6491 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 6492 starget = scsi_target(sdev); 6493 rphy = target_to_rphy(starget); 6494 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 6495 if (device) { 6496 if (device->target_lun_valid) { 6497 device->ignore_device = true; 6498 } else { 6499 device->target = sdev_id(sdev); 6500 device->lun = sdev->lun; 6501 device->target_lun_valid = true; 6502 } 6503 } 6504 } else { 6505 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 6506 sdev_id(sdev), sdev->lun); 6507 } 6508 6509 if (device) { 6510 sdev->hostdata = device; 6511 device->sdev = sdev; 6512 if (device->queue_depth) { 6513 device->advertised_queue_depth = device->queue_depth; 6514 scsi_change_queue_depth(sdev, 6515 device->advertised_queue_depth); 6516 } 6517 if (pqi_is_logical_device(device)) { 6518 pqi_disable_write_same(sdev); 6519 } else { 6520 sdev->allow_restart = 1; 6521 if (device->device_type == SA_DEVICE_TYPE_NVME) 6522 pqi_disable_write_same(sdev); 6523 } 6524 } 6525 6526 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6527 6528 return 0; 6529 } 6530 6531 static void pqi_map_queues(struct Scsi_Host *shost) 6532 { 6533 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6534 6535 if (!ctrl_info->disable_managed_interrupts) 6536 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 6537 ctrl_info->pci_dev, 0); 6538 else 6539 return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]); 6540 } 6541 6542 static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device) 6543 { 6544 return device->devtype == TYPE_TAPE || device->devtype == TYPE_MEDIUM_CHANGER; 6545 } 6546 6547 static int pqi_slave_configure(struct scsi_device *sdev) 6548 { 6549 int rc = 0; 6550 struct pqi_scsi_dev *device; 6551 6552 device = sdev->hostdata; 6553 device->devtype = sdev->type; 6554 6555 if (pqi_is_tape_changer_device(device) && device->ignore_device) { 6556 rc = -ENXIO; 6557 device->ignore_device = false; 6558 } 6559 6560 return rc; 6561 } 6562 6563 static void pqi_slave_destroy(struct scsi_device *sdev) 6564 { 6565 struct pqi_ctrl_info *ctrl_info; 6566 struct pqi_scsi_dev *device; 6567 int mutex_acquired; 6568 unsigned long flags; 6569 6570 ctrl_info = shost_to_hba(sdev->host); 6571 6572 mutex_acquired = mutex_trylock(&ctrl_info->scan_mutex); 6573 if (!mutex_acquired) 6574 return; 6575 6576 device = sdev->hostdata; 6577 if (!device) { 6578 mutex_unlock(&ctrl_info->scan_mutex); 6579 return; 6580 } 6581 6582 device->lun_count--; 6583 if (device->lun_count > 0) { 6584 mutex_unlock(&ctrl_info->scan_mutex); 6585 return; 6586 } 6587 6588 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 6589 list_del(&device->scsi_device_list_entry); 6590 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 6591 6592 mutex_unlock(&ctrl_info->scan_mutex); 6593 6594 pqi_dev_info(ctrl_info, "removed", device); 6595 pqi_free_device(device); 6596 } 6597 6598 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6599 { 6600 struct pci_dev *pci_dev; 6601 u32 subsystem_vendor; 6602 u32 subsystem_device; 6603 cciss_pci_info_struct pci_info; 6604 6605 if (!arg) 6606 return -EINVAL; 6607 6608 pci_dev = ctrl_info->pci_dev; 6609 6610 pci_info.domain = pci_domain_nr(pci_dev->bus); 6611 pci_info.bus = pci_dev->bus->number; 6612 pci_info.dev_fn = pci_dev->devfn; 6613 subsystem_vendor = pci_dev->subsystem_vendor; 6614 subsystem_device = pci_dev->subsystem_device; 6615 pci_info.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor; 6616 6617 if (copy_to_user(arg, &pci_info, sizeof(pci_info))) 6618 return -EFAULT; 6619 6620 return 0; 6621 } 6622 6623 static int pqi_getdrivver_ioctl(void __user *arg) 6624 { 6625 u32 version; 6626 6627 if (!arg) 6628 return -EINVAL; 6629 6630 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 6631 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 6632 6633 if (copy_to_user(arg, &version, sizeof(version))) 6634 return -EFAULT; 6635 6636 return 0; 6637 } 6638 6639 struct ciss_error_info { 6640 u8 scsi_status; 6641 int command_status; 6642 size_t sense_data_length; 6643 }; 6644 6645 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 6646 struct ciss_error_info *ciss_error_info) 6647 { 6648 int ciss_cmd_status; 6649 size_t sense_data_length; 6650 6651 switch (pqi_error_info->data_out_result) { 6652 case PQI_DATA_IN_OUT_GOOD: 6653 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 6654 break; 6655 case PQI_DATA_IN_OUT_UNDERFLOW: 6656 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 6657 break; 6658 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 6659 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 6660 break; 6661 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 6662 case PQI_DATA_IN_OUT_BUFFER_ERROR: 6663 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 6664 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 6665 case PQI_DATA_IN_OUT_ERROR: 6666 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 6667 break; 6668 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 6669 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 6670 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 6671 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 6672 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 6673 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 6674 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 6675 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 6676 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 6677 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 6678 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 6679 break; 6680 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 6681 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 6682 break; 6683 case PQI_DATA_IN_OUT_ABORTED: 6684 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 6685 break; 6686 case PQI_DATA_IN_OUT_TIMEOUT: 6687 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 6688 break; 6689 default: 6690 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 6691 break; 6692 } 6693 6694 sense_data_length = 6695 get_unaligned_le16(&pqi_error_info->sense_data_length); 6696 if (sense_data_length == 0) 6697 sense_data_length = 6698 get_unaligned_le16(&pqi_error_info->response_data_length); 6699 if (sense_data_length) 6700 if (sense_data_length > sizeof(pqi_error_info->data)) 6701 sense_data_length = sizeof(pqi_error_info->data); 6702 6703 ciss_error_info->scsi_status = pqi_error_info->status; 6704 ciss_error_info->command_status = ciss_cmd_status; 6705 ciss_error_info->sense_data_length = sense_data_length; 6706 } 6707 6708 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 6709 { 6710 int rc; 6711 char *kernel_buffer = NULL; 6712 u16 iu_length; 6713 size_t sense_data_length; 6714 IOCTL_Command_struct iocommand; 6715 struct pqi_raid_path_request request; 6716 struct pqi_raid_error_info pqi_error_info; 6717 struct ciss_error_info ciss_error_info; 6718 6719 if (pqi_ctrl_offline(ctrl_info)) 6720 return -ENXIO; 6721 if (pqi_ofa_in_progress(ctrl_info) && pqi_ctrl_blocked(ctrl_info)) 6722 return -EBUSY; 6723 if (!arg) 6724 return -EINVAL; 6725 if (!capable(CAP_SYS_RAWIO)) 6726 return -EPERM; 6727 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 6728 return -EFAULT; 6729 if (iocommand.buf_size < 1 && 6730 iocommand.Request.Type.Direction != XFER_NONE) 6731 return -EINVAL; 6732 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 6733 return -EINVAL; 6734 if (iocommand.Request.Type.Type != TYPE_CMD) 6735 return -EINVAL; 6736 6737 switch (iocommand.Request.Type.Direction) { 6738 case XFER_NONE: 6739 case XFER_WRITE: 6740 case XFER_READ: 6741 case XFER_READ | XFER_WRITE: 6742 break; 6743 default: 6744 return -EINVAL; 6745 } 6746 6747 if (iocommand.buf_size > 0) { 6748 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 6749 if (!kernel_buffer) 6750 return -ENOMEM; 6751 if (iocommand.Request.Type.Direction & XFER_WRITE) { 6752 if (copy_from_user(kernel_buffer, iocommand.buf, 6753 iocommand.buf_size)) { 6754 rc = -EFAULT; 6755 goto out; 6756 } 6757 } else { 6758 memset(kernel_buffer, 0, iocommand.buf_size); 6759 } 6760 } 6761 6762 memset(&request, 0, sizeof(request)); 6763 6764 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 6765 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 6766 PQI_REQUEST_HEADER_LENGTH; 6767 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 6768 sizeof(request.lun_number)); 6769 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 6770 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 6771 6772 switch (iocommand.Request.Type.Direction) { 6773 case XFER_NONE: 6774 request.data_direction = SOP_NO_DIRECTION_FLAG; 6775 break; 6776 case XFER_WRITE: 6777 request.data_direction = SOP_WRITE_FLAG; 6778 break; 6779 case XFER_READ: 6780 request.data_direction = SOP_READ_FLAG; 6781 break; 6782 case XFER_READ | XFER_WRITE: 6783 request.data_direction = SOP_BIDIRECTIONAL; 6784 break; 6785 } 6786 6787 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 6788 6789 if (iocommand.buf_size > 0) { 6790 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 6791 6792 rc = pqi_map_single(ctrl_info->pci_dev, 6793 &request.sg_descriptors[0], kernel_buffer, 6794 iocommand.buf_size, DMA_BIDIRECTIONAL); 6795 if (rc) 6796 goto out; 6797 6798 iu_length += sizeof(request.sg_descriptors[0]); 6799 } 6800 6801 put_unaligned_le16(iu_length, &request.header.iu_length); 6802 6803 if (ctrl_info->raid_iu_timeout_supported) 6804 put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); 6805 6806 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6807 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info); 6808 6809 if (iocommand.buf_size > 0) 6810 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 6811 DMA_BIDIRECTIONAL); 6812 6813 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 6814 6815 if (rc == 0) { 6816 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 6817 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 6818 iocommand.error_info.CommandStatus = 6819 ciss_error_info.command_status; 6820 sense_data_length = ciss_error_info.sense_data_length; 6821 if (sense_data_length) { 6822 if (sense_data_length > 6823 sizeof(iocommand.error_info.SenseInfo)) 6824 sense_data_length = 6825 sizeof(iocommand.error_info.SenseInfo); 6826 memcpy(iocommand.error_info.SenseInfo, 6827 pqi_error_info.data, sense_data_length); 6828 iocommand.error_info.SenseLen = sense_data_length; 6829 } 6830 } 6831 6832 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 6833 rc = -EFAULT; 6834 goto out; 6835 } 6836 6837 if (rc == 0 && iocommand.buf_size > 0 && 6838 (iocommand.Request.Type.Direction & XFER_READ)) { 6839 if (copy_to_user(iocommand.buf, kernel_buffer, 6840 iocommand.buf_size)) { 6841 rc = -EFAULT; 6842 } 6843 } 6844 6845 out: 6846 kfree(kernel_buffer); 6847 6848 return rc; 6849 } 6850 6851 static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, 6852 void __user *arg) 6853 { 6854 int rc; 6855 struct pqi_ctrl_info *ctrl_info; 6856 6857 ctrl_info = shost_to_hba(sdev->host); 6858 6859 switch (cmd) { 6860 case CCISS_DEREGDISK: 6861 case CCISS_REGNEWDISK: 6862 case CCISS_REGNEWD: 6863 rc = pqi_scan_scsi_devices(ctrl_info); 6864 break; 6865 case CCISS_GETPCIINFO: 6866 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 6867 break; 6868 case CCISS_GETDRIVVER: 6869 rc = pqi_getdrivver_ioctl(arg); 6870 break; 6871 case CCISS_PASSTHRU: 6872 rc = pqi_passthru_ioctl(ctrl_info, arg); 6873 break; 6874 default: 6875 rc = -EINVAL; 6876 break; 6877 } 6878 6879 return rc; 6880 } 6881 6882 static ssize_t pqi_firmware_version_show(struct device *dev, 6883 struct device_attribute *attr, char *buffer) 6884 { 6885 struct Scsi_Host *shost; 6886 struct pqi_ctrl_info *ctrl_info; 6887 6888 shost = class_to_shost(dev); 6889 ctrl_info = shost_to_hba(shost); 6890 6891 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); 6892 } 6893 6894 static ssize_t pqi_serial_number_show(struct device *dev, 6895 struct device_attribute *attr, char *buffer) 6896 { 6897 struct Scsi_Host *shost; 6898 struct pqi_ctrl_info *ctrl_info; 6899 6900 shost = class_to_shost(dev); 6901 ctrl_info = shost_to_hba(shost); 6902 6903 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); 6904 } 6905 6906 static ssize_t pqi_model_show(struct device *dev, 6907 struct device_attribute *attr, char *buffer) 6908 { 6909 struct Scsi_Host *shost; 6910 struct pqi_ctrl_info *ctrl_info; 6911 6912 shost = class_to_shost(dev); 6913 ctrl_info = shost_to_hba(shost); 6914 6915 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); 6916 } 6917 6918 static ssize_t pqi_vendor_show(struct device *dev, 6919 struct device_attribute *attr, char *buffer) 6920 { 6921 struct Scsi_Host *shost; 6922 struct pqi_ctrl_info *ctrl_info; 6923 6924 shost = class_to_shost(dev); 6925 ctrl_info = shost_to_hba(shost); 6926 6927 return scnprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); 6928 } 6929 6930 static ssize_t pqi_host_rescan_store(struct device *dev, 6931 struct device_attribute *attr, const char *buffer, size_t count) 6932 { 6933 struct Scsi_Host *shost = class_to_shost(dev); 6934 6935 pqi_scan_start(shost); 6936 6937 return count; 6938 } 6939 6940 static ssize_t pqi_lockup_action_show(struct device *dev, 6941 struct device_attribute *attr, char *buffer) 6942 { 6943 int count = 0; 6944 unsigned int i; 6945 6946 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6947 if (pqi_lockup_actions[i].action == pqi_lockup_action) 6948 count += scnprintf(buffer + count, PAGE_SIZE - count, 6949 "[%s] ", pqi_lockup_actions[i].name); 6950 else 6951 count += scnprintf(buffer + count, PAGE_SIZE - count, 6952 "%s ", pqi_lockup_actions[i].name); 6953 } 6954 6955 count += scnprintf(buffer + count, PAGE_SIZE - count, "\n"); 6956 6957 return count; 6958 } 6959 6960 static ssize_t pqi_lockup_action_store(struct device *dev, 6961 struct device_attribute *attr, const char *buffer, size_t count) 6962 { 6963 unsigned int i; 6964 char *action_name; 6965 char action_name_buffer[32]; 6966 6967 strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 6968 action_name = strstrip(action_name_buffer); 6969 6970 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6971 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 6972 pqi_lockup_action = pqi_lockup_actions[i].action; 6973 return count; 6974 } 6975 } 6976 6977 return -EINVAL; 6978 } 6979 6980 static ssize_t pqi_host_enable_stream_detection_show(struct device *dev, 6981 struct device_attribute *attr, char *buffer) 6982 { 6983 struct Scsi_Host *shost = class_to_shost(dev); 6984 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6985 6986 return scnprintf(buffer, 10, "%x\n", 6987 ctrl_info->enable_stream_detection); 6988 } 6989 6990 static ssize_t pqi_host_enable_stream_detection_store(struct device *dev, 6991 struct device_attribute *attr, const char *buffer, size_t count) 6992 { 6993 struct Scsi_Host *shost = class_to_shost(dev); 6994 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 6995 u8 set_stream_detection = 0; 6996 6997 if (kstrtou8(buffer, 0, &set_stream_detection)) 6998 return -EINVAL; 6999 7000 if (set_stream_detection > 0) 7001 set_stream_detection = 1; 7002 7003 ctrl_info->enable_stream_detection = set_stream_detection; 7004 7005 return count; 7006 } 7007 7008 static ssize_t pqi_host_enable_r5_writes_show(struct device *dev, 7009 struct device_attribute *attr, char *buffer) 7010 { 7011 struct Scsi_Host *shost = class_to_shost(dev); 7012 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7013 7014 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r5_writes); 7015 } 7016 7017 static ssize_t pqi_host_enable_r5_writes_store(struct device *dev, 7018 struct device_attribute *attr, const char *buffer, size_t count) 7019 { 7020 struct Scsi_Host *shost = class_to_shost(dev); 7021 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7022 u8 set_r5_writes = 0; 7023 7024 if (kstrtou8(buffer, 0, &set_r5_writes)) 7025 return -EINVAL; 7026 7027 if (set_r5_writes > 0) 7028 set_r5_writes = 1; 7029 7030 ctrl_info->enable_r5_writes = set_r5_writes; 7031 7032 return count; 7033 } 7034 7035 static ssize_t pqi_host_enable_r6_writes_show(struct device *dev, 7036 struct device_attribute *attr, char *buffer) 7037 { 7038 struct Scsi_Host *shost = class_to_shost(dev); 7039 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7040 7041 return scnprintf(buffer, 10, "%x\n", ctrl_info->enable_r6_writes); 7042 } 7043 7044 static ssize_t pqi_host_enable_r6_writes_store(struct device *dev, 7045 struct device_attribute *attr, const char *buffer, size_t count) 7046 { 7047 struct Scsi_Host *shost = class_to_shost(dev); 7048 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 7049 u8 set_r6_writes = 0; 7050 7051 if (kstrtou8(buffer, 0, &set_r6_writes)) 7052 return -EINVAL; 7053 7054 if (set_r6_writes > 0) 7055 set_r6_writes = 1; 7056 7057 ctrl_info->enable_r6_writes = set_r6_writes; 7058 7059 return count; 7060 } 7061 7062 static DEVICE_STRING_ATTR_RO(driver_version, 0444, 7063 DRIVER_VERSION BUILD_TIMESTAMP); 7064 static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); 7065 static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); 7066 static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); 7067 static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); 7068 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 7069 static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, 7070 pqi_lockup_action_store); 7071 static DEVICE_ATTR(enable_stream_detection, 0644, 7072 pqi_host_enable_stream_detection_show, 7073 pqi_host_enable_stream_detection_store); 7074 static DEVICE_ATTR(enable_r5_writes, 0644, 7075 pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store); 7076 static DEVICE_ATTR(enable_r6_writes, 0644, 7077 pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store); 7078 7079 static struct attribute *pqi_shost_attrs[] = { 7080 &dev_attr_driver_version.attr.attr, 7081 &dev_attr_firmware_version.attr, 7082 &dev_attr_model.attr, 7083 &dev_attr_serial_number.attr, 7084 &dev_attr_vendor.attr, 7085 &dev_attr_rescan.attr, 7086 &dev_attr_lockup_action.attr, 7087 &dev_attr_enable_stream_detection.attr, 7088 &dev_attr_enable_r5_writes.attr, 7089 &dev_attr_enable_r6_writes.attr, 7090 NULL 7091 }; 7092 7093 ATTRIBUTE_GROUPS(pqi_shost); 7094 7095 static ssize_t pqi_unique_id_show(struct device *dev, 7096 struct device_attribute *attr, char *buffer) 7097 { 7098 struct pqi_ctrl_info *ctrl_info; 7099 struct scsi_device *sdev; 7100 struct pqi_scsi_dev *device; 7101 unsigned long flags; 7102 u8 unique_id[16]; 7103 7104 sdev = to_scsi_device(dev); 7105 ctrl_info = shost_to_hba(sdev->host); 7106 7107 if (pqi_ctrl_offline(ctrl_info)) 7108 return -ENODEV; 7109 7110 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7111 7112 device = sdev->hostdata; 7113 if (!device) { 7114 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7115 return -ENODEV; 7116 } 7117 7118 if (device->is_physical_device) 7119 memcpy(unique_id, device->wwid, sizeof(device->wwid)); 7120 else 7121 memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); 7122 7123 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7124 7125 return scnprintf(buffer, PAGE_SIZE, 7126 "%02X%02X%02X%02X%02X%02X%02X%02X" 7127 "%02X%02X%02X%02X%02X%02X%02X%02X\n", 7128 unique_id[0], unique_id[1], unique_id[2], unique_id[3], 7129 unique_id[4], unique_id[5], unique_id[6], unique_id[7], 7130 unique_id[8], unique_id[9], unique_id[10], unique_id[11], 7131 unique_id[12], unique_id[13], unique_id[14], unique_id[15]); 7132 } 7133 7134 static ssize_t pqi_lunid_show(struct device *dev, 7135 struct device_attribute *attr, char *buffer) 7136 { 7137 struct pqi_ctrl_info *ctrl_info; 7138 struct scsi_device *sdev; 7139 struct pqi_scsi_dev *device; 7140 unsigned long flags; 7141 u8 lunid[8]; 7142 7143 sdev = to_scsi_device(dev); 7144 ctrl_info = shost_to_hba(sdev->host); 7145 7146 if (pqi_ctrl_offline(ctrl_info)) 7147 return -ENODEV; 7148 7149 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7150 7151 device = sdev->hostdata; 7152 if (!device) { 7153 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7154 return -ENODEV; 7155 } 7156 7157 memcpy(lunid, device->scsi3addr, sizeof(lunid)); 7158 7159 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7160 7161 return scnprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); 7162 } 7163 7164 #define MAX_PATHS 8 7165 7166 static ssize_t pqi_path_info_show(struct device *dev, 7167 struct device_attribute *attr, char *buf) 7168 { 7169 struct pqi_ctrl_info *ctrl_info; 7170 struct scsi_device *sdev; 7171 struct pqi_scsi_dev *device; 7172 unsigned long flags; 7173 int i; 7174 int output_len = 0; 7175 u8 box; 7176 u8 bay; 7177 u8 path_map_index; 7178 char *active; 7179 u8 phys_connector[2]; 7180 7181 sdev = to_scsi_device(dev); 7182 ctrl_info = shost_to_hba(sdev->host); 7183 7184 if (pqi_ctrl_offline(ctrl_info)) 7185 return -ENODEV; 7186 7187 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7188 7189 device = sdev->hostdata; 7190 if (!device) { 7191 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7192 return -ENODEV; 7193 } 7194 7195 bay = device->bay; 7196 for (i = 0; i < MAX_PATHS; i++) { 7197 path_map_index = 1 << i; 7198 if (i == device->active_path_index) 7199 active = "Active"; 7200 else if (device->path_map & path_map_index) 7201 active = "Inactive"; 7202 else 7203 continue; 7204 7205 output_len += scnprintf(buf + output_len, 7206 PAGE_SIZE - output_len, 7207 "[%d:%d:%d:%d] %20.20s ", 7208 ctrl_info->scsi_host->host_no, 7209 device->bus, device->target, 7210 device->lun, 7211 scsi_device_type(device->devtype)); 7212 7213 if (device->devtype == TYPE_RAID || 7214 pqi_is_logical_device(device)) 7215 goto end_buffer; 7216 7217 memcpy(&phys_connector, &device->phys_connector[i], 7218 sizeof(phys_connector)); 7219 if (phys_connector[0] < '0') 7220 phys_connector[0] = '0'; 7221 if (phys_connector[1] < '0') 7222 phys_connector[1] = '0'; 7223 7224 output_len += scnprintf(buf + output_len, 7225 PAGE_SIZE - output_len, 7226 "PORT: %.2s ", phys_connector); 7227 7228 box = device->box[i]; 7229 if (box != 0 && box != 0xFF) 7230 output_len += scnprintf(buf + output_len, 7231 PAGE_SIZE - output_len, 7232 "BOX: %hhu ", box); 7233 7234 if ((device->devtype == TYPE_DISK || 7235 device->devtype == TYPE_ZBC) && 7236 pqi_expose_device(device)) 7237 output_len += scnprintf(buf + output_len, 7238 PAGE_SIZE - output_len, 7239 "BAY: %hhu ", bay); 7240 7241 end_buffer: 7242 output_len += scnprintf(buf + output_len, 7243 PAGE_SIZE - output_len, 7244 "%s\n", active); 7245 } 7246 7247 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7248 7249 return output_len; 7250 } 7251 7252 static ssize_t pqi_sas_address_show(struct device *dev, 7253 struct device_attribute *attr, char *buffer) 7254 { 7255 struct pqi_ctrl_info *ctrl_info; 7256 struct scsi_device *sdev; 7257 struct pqi_scsi_dev *device; 7258 unsigned long flags; 7259 u64 sas_address; 7260 7261 sdev = to_scsi_device(dev); 7262 ctrl_info = shost_to_hba(sdev->host); 7263 7264 if (pqi_ctrl_offline(ctrl_info)) 7265 return -ENODEV; 7266 7267 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7268 7269 device = sdev->hostdata; 7270 if (!device) { 7271 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7272 return -ENODEV; 7273 } 7274 7275 sas_address = device->sas_address; 7276 7277 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7278 7279 return scnprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 7280 } 7281 7282 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 7283 struct device_attribute *attr, char *buffer) 7284 { 7285 struct pqi_ctrl_info *ctrl_info; 7286 struct scsi_device *sdev; 7287 struct pqi_scsi_dev *device; 7288 unsigned long flags; 7289 7290 sdev = to_scsi_device(dev); 7291 ctrl_info = shost_to_hba(sdev->host); 7292 7293 if (pqi_ctrl_offline(ctrl_info)) 7294 return -ENODEV; 7295 7296 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7297 7298 device = sdev->hostdata; 7299 if (!device) { 7300 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7301 return -ENODEV; 7302 } 7303 7304 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 7305 buffer[1] = '\n'; 7306 buffer[2] = '\0'; 7307 7308 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7309 7310 return 2; 7311 } 7312 7313 static ssize_t pqi_raid_level_show(struct device *dev, 7314 struct device_attribute *attr, char *buffer) 7315 { 7316 struct pqi_ctrl_info *ctrl_info; 7317 struct scsi_device *sdev; 7318 struct pqi_scsi_dev *device; 7319 unsigned long flags; 7320 char *raid_level; 7321 7322 sdev = to_scsi_device(dev); 7323 ctrl_info = shost_to_hba(sdev->host); 7324 7325 if (pqi_ctrl_offline(ctrl_info)) 7326 return -ENODEV; 7327 7328 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7329 7330 device = sdev->hostdata; 7331 if (!device) { 7332 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7333 return -ENODEV; 7334 } 7335 7336 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) 7337 raid_level = pqi_raid_level_to_string(device->raid_level); 7338 else 7339 raid_level = "N/A"; 7340 7341 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7342 7343 return scnprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 7344 } 7345 7346 static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, 7347 struct device_attribute *attr, char *buffer) 7348 { 7349 struct pqi_ctrl_info *ctrl_info; 7350 struct scsi_device *sdev; 7351 struct pqi_scsi_dev *device; 7352 unsigned long flags; 7353 unsigned int raid_bypass_cnt; 7354 7355 sdev = to_scsi_device(dev); 7356 ctrl_info = shost_to_hba(sdev->host); 7357 7358 if (pqi_ctrl_offline(ctrl_info)) 7359 return -ENODEV; 7360 7361 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7362 7363 device = sdev->hostdata; 7364 if (!device) { 7365 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7366 return -ENODEV; 7367 } 7368 7369 raid_bypass_cnt = device->raid_bypass_cnt; 7370 7371 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7372 7373 return scnprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt); 7374 } 7375 7376 static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev, 7377 struct device_attribute *attr, char *buf) 7378 { 7379 struct pqi_ctrl_info *ctrl_info; 7380 struct scsi_device *sdev; 7381 struct pqi_scsi_dev *device; 7382 unsigned long flags; 7383 int output_len = 0; 7384 7385 sdev = to_scsi_device(dev); 7386 ctrl_info = shost_to_hba(sdev->host); 7387 7388 if (pqi_ctrl_offline(ctrl_info)) 7389 return -ENODEV; 7390 7391 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7392 7393 device = sdev->hostdata; 7394 if (!device) { 7395 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7396 return -ENODEV; 7397 } 7398 7399 output_len = snprintf(buf, PAGE_SIZE, "%d\n", 7400 device->ncq_prio_enable); 7401 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7402 7403 return output_len; 7404 } 7405 7406 static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, 7407 struct device_attribute *attr, 7408 const char *buf, size_t count) 7409 { 7410 struct pqi_ctrl_info *ctrl_info; 7411 struct scsi_device *sdev; 7412 struct pqi_scsi_dev *device; 7413 unsigned long flags; 7414 u8 ncq_prio_enable = 0; 7415 7416 if (kstrtou8(buf, 0, &ncq_prio_enable)) 7417 return -EINVAL; 7418 7419 sdev = to_scsi_device(dev); 7420 ctrl_info = shost_to_hba(sdev->host); 7421 7422 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 7423 7424 device = sdev->hostdata; 7425 7426 if (!device) { 7427 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7428 return -ENODEV; 7429 } 7430 7431 if (!device->ncq_prio_support) { 7432 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7433 return -EINVAL; 7434 } 7435 7436 device->ncq_prio_enable = ncq_prio_enable; 7437 7438 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 7439 7440 return strlen(buf); 7441 } 7442 7443 static ssize_t pqi_numa_node_show(struct device *dev, 7444 struct device_attribute *attr, char *buffer) 7445 { 7446 struct scsi_device *sdev; 7447 struct pqi_ctrl_info *ctrl_info; 7448 7449 sdev = to_scsi_device(dev); 7450 ctrl_info = shost_to_hba(sdev->host); 7451 7452 return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); 7453 } 7454 7455 static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); 7456 static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); 7457 static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); 7458 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 7459 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); 7460 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 7461 static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); 7462 static DEVICE_ATTR(sas_ncq_prio_enable, 0644, 7463 pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); 7464 static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); 7465 7466 static struct attribute *pqi_sdev_attrs[] = { 7467 &dev_attr_lunid.attr, 7468 &dev_attr_unique_id.attr, 7469 &dev_attr_path_info.attr, 7470 &dev_attr_sas_address.attr, 7471 &dev_attr_ssd_smart_path_enabled.attr, 7472 &dev_attr_raid_level.attr, 7473 &dev_attr_raid_bypass_cnt.attr, 7474 &dev_attr_sas_ncq_prio_enable.attr, 7475 &dev_attr_numa_node.attr, 7476 NULL 7477 }; 7478 7479 ATTRIBUTE_GROUPS(pqi_sdev); 7480 7481 static const struct scsi_host_template pqi_driver_template = { 7482 .module = THIS_MODULE, 7483 .name = DRIVER_NAME_SHORT, 7484 .proc_name = DRIVER_NAME_SHORT, 7485 .queuecommand = pqi_scsi_queue_command, 7486 .scan_start = pqi_scan_start, 7487 .scan_finished = pqi_scan_finished, 7488 .this_id = -1, 7489 .eh_device_reset_handler = pqi_eh_device_reset_handler, 7490 .eh_abort_handler = pqi_eh_abort_handler, 7491 .ioctl = pqi_ioctl, 7492 .slave_alloc = pqi_slave_alloc, 7493 .slave_configure = pqi_slave_configure, 7494 .slave_destroy = pqi_slave_destroy, 7495 .map_queues = pqi_map_queues, 7496 .sdev_groups = pqi_sdev_groups, 7497 .shost_groups = pqi_shost_groups, 7498 .cmd_size = sizeof(struct pqi_cmd_priv), 7499 }; 7500 7501 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 7502 { 7503 int rc; 7504 struct Scsi_Host *shost; 7505 7506 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 7507 if (!shost) { 7508 dev_err(&ctrl_info->pci_dev->dev, "scsi_host_alloc failed\n"); 7509 return -ENOMEM; 7510 } 7511 7512 shost->io_port = 0; 7513 shost->n_io_port = 0; 7514 shost->this_id = -1; 7515 shost->max_channel = PQI_MAX_BUS; 7516 shost->max_cmd_len = MAX_COMMAND_SIZE; 7517 shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; 7518 shost->max_id = ~0; 7519 shost->max_sectors = ctrl_info->max_sectors; 7520 shost->can_queue = ctrl_info->scsi_ml_can_queue; 7521 shost->cmd_per_lun = shost->can_queue; 7522 shost->sg_tablesize = ctrl_info->sg_tablesize; 7523 shost->transportt = pqi_sas_transport_template; 7524 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 7525 shost->unique_id = shost->irq; 7526 shost->nr_hw_queues = ctrl_info->num_queue_groups; 7527 shost->host_tagset = 1; 7528 shost->hostdata[0] = (unsigned long)ctrl_info; 7529 7530 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 7531 if (rc) { 7532 dev_err(&ctrl_info->pci_dev->dev, "scsi_add_host failed\n"); 7533 goto free_host; 7534 } 7535 7536 rc = pqi_add_sas_host(shost, ctrl_info); 7537 if (rc) { 7538 dev_err(&ctrl_info->pci_dev->dev, "add SAS host failed\n"); 7539 goto remove_host; 7540 } 7541 7542 ctrl_info->scsi_host = shost; 7543 7544 return 0; 7545 7546 remove_host: 7547 scsi_remove_host(shost); 7548 free_host: 7549 scsi_host_put(shost); 7550 7551 return rc; 7552 } 7553 7554 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 7555 { 7556 struct Scsi_Host *shost; 7557 7558 pqi_delete_sas_host(ctrl_info); 7559 7560 shost = ctrl_info->scsi_host; 7561 if (!shost) 7562 return; 7563 7564 scsi_remove_host(shost); 7565 scsi_host_put(shost); 7566 } 7567 7568 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 7569 { 7570 int rc = 0; 7571 struct pqi_device_registers __iomem *pqi_registers; 7572 unsigned long timeout; 7573 unsigned int timeout_msecs; 7574 union pqi_reset_register reset_reg; 7575 7576 pqi_registers = ctrl_info->pqi_registers; 7577 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 7578 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 7579 7580 while (1) { 7581 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 7582 reset_reg.all_bits = readl(&pqi_registers->device_reset); 7583 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 7584 break; 7585 if (!sis_is_firmware_running(ctrl_info)) { 7586 rc = -ENXIO; 7587 break; 7588 } 7589 if (time_after(jiffies, timeout)) { 7590 rc = -ETIMEDOUT; 7591 break; 7592 } 7593 } 7594 7595 return rc; 7596 } 7597 7598 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 7599 { 7600 int rc; 7601 union pqi_reset_register reset_reg; 7602 7603 if (ctrl_info->pqi_reset_quiesce_supported) { 7604 rc = sis_pqi_reset_quiesce(ctrl_info); 7605 if (rc) { 7606 dev_err(&ctrl_info->pci_dev->dev, 7607 "PQI reset failed during quiesce with error %d\n", rc); 7608 return rc; 7609 } 7610 } 7611 7612 reset_reg.all_bits = 0; 7613 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 7614 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 7615 7616 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 7617 7618 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 7619 if (rc) 7620 dev_err(&ctrl_info->pci_dev->dev, 7621 "PQI reset failed with error %d\n", rc); 7622 7623 return rc; 7624 } 7625 7626 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) 7627 { 7628 int rc; 7629 struct bmic_sense_subsystem_info *sense_info; 7630 7631 sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); 7632 if (!sense_info) 7633 return -ENOMEM; 7634 7635 rc = pqi_sense_subsystem_info(ctrl_info, sense_info); 7636 if (rc) 7637 goto out; 7638 7639 memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, 7640 sizeof(sense_info->ctrl_serial_number)); 7641 ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; 7642 7643 out: 7644 kfree(sense_info); 7645 7646 return rc; 7647 } 7648 7649 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) 7650 { 7651 int rc; 7652 struct bmic_identify_controller *identify; 7653 7654 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 7655 if (!identify) 7656 return -ENOMEM; 7657 7658 rc = pqi_identify_controller(ctrl_info, identify); 7659 if (rc) 7660 goto out; 7661 7662 if (get_unaligned_le32(&identify->extra_controller_flags) & 7663 BMIC_IDENTIFY_EXTRA_FLAGS_LONG_FW_VERSION_SUPPORTED) { 7664 memcpy(ctrl_info->firmware_version, 7665 identify->firmware_version_long, 7666 sizeof(identify->firmware_version_long)); 7667 } else { 7668 memcpy(ctrl_info->firmware_version, 7669 identify->firmware_version_short, 7670 sizeof(identify->firmware_version_short)); 7671 ctrl_info->firmware_version 7672 [sizeof(identify->firmware_version_short)] = '\0'; 7673 snprintf(ctrl_info->firmware_version + 7674 strlen(ctrl_info->firmware_version), 7675 sizeof(ctrl_info->firmware_version) - 7676 sizeof(identify->firmware_version_short), 7677 "-%u", 7678 get_unaligned_le16(&identify->firmware_build_number)); 7679 } 7680 7681 memcpy(ctrl_info->model, identify->product_id, 7682 sizeof(identify->product_id)); 7683 ctrl_info->model[sizeof(identify->product_id)] = '\0'; 7684 7685 memcpy(ctrl_info->vendor, identify->vendor_id, 7686 sizeof(identify->vendor_id)); 7687 ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; 7688 7689 dev_info(&ctrl_info->pci_dev->dev, 7690 "Firmware version: %s\n", ctrl_info->firmware_version); 7691 7692 out: 7693 kfree(identify); 7694 7695 return rc; 7696 } 7697 7698 struct pqi_config_table_section_info { 7699 struct pqi_ctrl_info *ctrl_info; 7700 void *section; 7701 u32 section_offset; 7702 void __iomem *section_iomem_addr; 7703 }; 7704 7705 static inline bool pqi_is_firmware_feature_supported( 7706 struct pqi_config_table_firmware_features *firmware_features, 7707 unsigned int bit_position) 7708 { 7709 unsigned int byte_index; 7710 7711 byte_index = bit_position / BITS_PER_BYTE; 7712 7713 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 7714 return false; 7715 7716 return firmware_features->features_supported[byte_index] & 7717 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7718 } 7719 7720 static inline bool pqi_is_firmware_feature_enabled( 7721 struct pqi_config_table_firmware_features *firmware_features, 7722 void __iomem *firmware_features_iomem_addr, 7723 unsigned int bit_position) 7724 { 7725 unsigned int byte_index; 7726 u8 __iomem *features_enabled_iomem_addr; 7727 7728 byte_index = (bit_position / BITS_PER_BYTE) + 7729 (le16_to_cpu(firmware_features->num_elements) * 2); 7730 7731 features_enabled_iomem_addr = firmware_features_iomem_addr + 7732 offsetof(struct pqi_config_table_firmware_features, 7733 features_supported) + byte_index; 7734 7735 return *((__force u8 *)features_enabled_iomem_addr) & 7736 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 7737 } 7738 7739 static inline void pqi_request_firmware_feature( 7740 struct pqi_config_table_firmware_features *firmware_features, 7741 unsigned int bit_position) 7742 { 7743 unsigned int byte_index; 7744 7745 byte_index = (bit_position / BITS_PER_BYTE) + 7746 le16_to_cpu(firmware_features->num_elements); 7747 7748 firmware_features->features_supported[byte_index] |= 7749 (1 << (bit_position % BITS_PER_BYTE)); 7750 } 7751 7752 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 7753 u16 first_section, u16 last_section) 7754 { 7755 struct pqi_vendor_general_request request; 7756 7757 memset(&request, 0, sizeof(request)); 7758 7759 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 7760 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 7761 &request.header.iu_length); 7762 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 7763 &request.function_code); 7764 put_unaligned_le16(first_section, 7765 &request.data.config_table_update.first_section); 7766 put_unaligned_le16(last_section, 7767 &request.data.config_table_update.last_section); 7768 7769 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 7770 } 7771 7772 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 7773 struct pqi_config_table_firmware_features *firmware_features, 7774 void __iomem *firmware_features_iomem_addr) 7775 { 7776 void *features_requested; 7777 void __iomem *features_requested_iomem_addr; 7778 void __iomem *host_max_known_feature_iomem_addr; 7779 7780 features_requested = firmware_features->features_supported + 7781 le16_to_cpu(firmware_features->num_elements); 7782 7783 features_requested_iomem_addr = firmware_features_iomem_addr + 7784 (features_requested - (void *)firmware_features); 7785 7786 memcpy_toio(features_requested_iomem_addr, features_requested, 7787 le16_to_cpu(firmware_features->num_elements)); 7788 7789 if (pqi_is_firmware_feature_supported(firmware_features, 7790 PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE)) { 7791 host_max_known_feature_iomem_addr = 7792 features_requested_iomem_addr + 7793 (le16_to_cpu(firmware_features->num_elements) * 2) + 7794 sizeof(__le16); 7795 writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr); 7796 writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1); 7797 } 7798 7799 return pqi_config_table_update(ctrl_info, 7800 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 7801 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 7802 } 7803 7804 struct pqi_firmware_feature { 7805 char *feature_name; 7806 unsigned int feature_bit; 7807 bool supported; 7808 bool enabled; 7809 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 7810 struct pqi_firmware_feature *firmware_feature); 7811 }; 7812 7813 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 7814 struct pqi_firmware_feature *firmware_feature) 7815 { 7816 if (!firmware_feature->supported) { 7817 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 7818 firmware_feature->feature_name); 7819 return; 7820 } 7821 7822 if (firmware_feature->enabled) { 7823 dev_info(&ctrl_info->pci_dev->dev, 7824 "%s enabled\n", firmware_feature->feature_name); 7825 return; 7826 } 7827 7828 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 7829 firmware_feature->feature_name); 7830 } 7831 7832 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, 7833 struct pqi_firmware_feature *firmware_feature) 7834 { 7835 switch (firmware_feature->feature_bit) { 7836 case PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS: 7837 ctrl_info->enable_r1_writes = firmware_feature->enabled; 7838 break; 7839 case PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS: 7840 ctrl_info->enable_r5_writes = firmware_feature->enabled; 7841 break; 7842 case PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS: 7843 ctrl_info->enable_r6_writes = firmware_feature->enabled; 7844 break; 7845 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: 7846 ctrl_info->soft_reset_handshake_supported = 7847 firmware_feature->enabled && 7848 pqi_read_soft_reset_status(ctrl_info); 7849 break; 7850 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: 7851 ctrl_info->raid_iu_timeout_supported = firmware_feature->enabled; 7852 break; 7853 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: 7854 ctrl_info->tmf_iu_timeout_supported = firmware_feature->enabled; 7855 break; 7856 case PQI_FIRMWARE_FEATURE_FW_TRIAGE: 7857 ctrl_info->firmware_triage_supported = firmware_feature->enabled; 7858 pqi_save_fw_triage_setting(ctrl_info, firmware_feature->enabled); 7859 break; 7860 case PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5: 7861 ctrl_info->rpl_extended_format_4_5_supported = firmware_feature->enabled; 7862 break; 7863 case PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT: 7864 ctrl_info->multi_lun_device_supported = firmware_feature->enabled; 7865 break; 7866 } 7867 7868 pqi_firmware_feature_status(ctrl_info, firmware_feature); 7869 } 7870 7871 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 7872 struct pqi_firmware_feature *firmware_feature) 7873 { 7874 if (firmware_feature->feature_status) 7875 firmware_feature->feature_status(ctrl_info, firmware_feature); 7876 } 7877 7878 static DEFINE_MUTEX(pqi_firmware_features_mutex); 7879 7880 static struct pqi_firmware_feature pqi_firmware_features[] = { 7881 { 7882 .feature_name = "Online Firmware Activation", 7883 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 7884 .feature_status = pqi_firmware_feature_status, 7885 }, 7886 { 7887 .feature_name = "Serial Management Protocol", 7888 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 7889 .feature_status = pqi_firmware_feature_status, 7890 }, 7891 { 7892 .feature_name = "Maximum Known Feature", 7893 .feature_bit = PQI_FIRMWARE_FEATURE_MAX_KNOWN_FEATURE, 7894 .feature_status = pqi_firmware_feature_status, 7895 }, 7896 { 7897 .feature_name = "RAID 0 Read Bypass", 7898 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_READ_BYPASS, 7899 .feature_status = pqi_firmware_feature_status, 7900 }, 7901 { 7902 .feature_name = "RAID 1 Read Bypass", 7903 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_READ_BYPASS, 7904 .feature_status = pqi_firmware_feature_status, 7905 }, 7906 { 7907 .feature_name = "RAID 5 Read Bypass", 7908 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_READ_BYPASS, 7909 .feature_status = pqi_firmware_feature_status, 7910 }, 7911 { 7912 .feature_name = "RAID 6 Read Bypass", 7913 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_READ_BYPASS, 7914 .feature_status = pqi_firmware_feature_status, 7915 }, 7916 { 7917 .feature_name = "RAID 0 Write Bypass", 7918 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_0_WRITE_BYPASS, 7919 .feature_status = pqi_firmware_feature_status, 7920 }, 7921 { 7922 .feature_name = "RAID 1 Write Bypass", 7923 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_1_WRITE_BYPASS, 7924 .feature_status = pqi_ctrl_update_feature_flags, 7925 }, 7926 { 7927 .feature_name = "RAID 5 Write Bypass", 7928 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_5_WRITE_BYPASS, 7929 .feature_status = pqi_ctrl_update_feature_flags, 7930 }, 7931 { 7932 .feature_name = "RAID 6 Write Bypass", 7933 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_6_WRITE_BYPASS, 7934 .feature_status = pqi_ctrl_update_feature_flags, 7935 }, 7936 { 7937 .feature_name = "New Soft Reset Handshake", 7938 .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, 7939 .feature_status = pqi_ctrl_update_feature_flags, 7940 }, 7941 { 7942 .feature_name = "RAID IU Timeout", 7943 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, 7944 .feature_status = pqi_ctrl_update_feature_flags, 7945 }, 7946 { 7947 .feature_name = "TMF IU Timeout", 7948 .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, 7949 .feature_status = pqi_ctrl_update_feature_flags, 7950 }, 7951 { 7952 .feature_name = "RAID Bypass on encrypted logical volumes on NVMe", 7953 .feature_bit = PQI_FIRMWARE_FEATURE_RAID_BYPASS_ON_ENCRYPTED_NVME, 7954 .feature_status = pqi_firmware_feature_status, 7955 }, 7956 { 7957 .feature_name = "Firmware Triage", 7958 .feature_bit = PQI_FIRMWARE_FEATURE_FW_TRIAGE, 7959 .feature_status = pqi_ctrl_update_feature_flags, 7960 }, 7961 { 7962 .feature_name = "RPL Extended Formats 4 and 5", 7963 .feature_bit = PQI_FIRMWARE_FEATURE_RPL_EXTENDED_FORMAT_4_5, 7964 .feature_status = pqi_ctrl_update_feature_flags, 7965 }, 7966 { 7967 .feature_name = "Multi-LUN Target", 7968 .feature_bit = PQI_FIRMWARE_FEATURE_MULTI_LUN_DEVICE_SUPPORT, 7969 .feature_status = pqi_ctrl_update_feature_flags, 7970 }, 7971 }; 7972 7973 static void pqi_process_firmware_features( 7974 struct pqi_config_table_section_info *section_info) 7975 { 7976 int rc; 7977 struct pqi_ctrl_info *ctrl_info; 7978 struct pqi_config_table_firmware_features *firmware_features; 7979 void __iomem *firmware_features_iomem_addr; 7980 unsigned int i; 7981 unsigned int num_features_supported; 7982 7983 ctrl_info = section_info->ctrl_info; 7984 firmware_features = section_info->section; 7985 firmware_features_iomem_addr = section_info->section_iomem_addr; 7986 7987 for (i = 0, num_features_supported = 0; 7988 i < ARRAY_SIZE(pqi_firmware_features); i++) { 7989 if (pqi_is_firmware_feature_supported(firmware_features, 7990 pqi_firmware_features[i].feature_bit)) { 7991 pqi_firmware_features[i].supported = true; 7992 num_features_supported++; 7993 } else { 7994 pqi_firmware_feature_update(ctrl_info, 7995 &pqi_firmware_features[i]); 7996 } 7997 } 7998 7999 if (num_features_supported == 0) 8000 return; 8001 8002 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8003 if (!pqi_firmware_features[i].supported) 8004 continue; 8005 pqi_request_firmware_feature(firmware_features, 8006 pqi_firmware_features[i].feature_bit); 8007 } 8008 8009 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 8010 firmware_features_iomem_addr); 8011 if (rc) { 8012 dev_err(&ctrl_info->pci_dev->dev, 8013 "failed to enable firmware features in PQI configuration table\n"); 8014 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8015 if (!pqi_firmware_features[i].supported) 8016 continue; 8017 pqi_firmware_feature_update(ctrl_info, 8018 &pqi_firmware_features[i]); 8019 } 8020 return; 8021 } 8022 8023 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8024 if (!pqi_firmware_features[i].supported) 8025 continue; 8026 if (pqi_is_firmware_feature_enabled(firmware_features, 8027 firmware_features_iomem_addr, 8028 pqi_firmware_features[i].feature_bit)) { 8029 pqi_firmware_features[i].enabled = true; 8030 } 8031 pqi_firmware_feature_update(ctrl_info, 8032 &pqi_firmware_features[i]); 8033 } 8034 } 8035 8036 static void pqi_init_firmware_features(void) 8037 { 8038 unsigned int i; 8039 8040 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 8041 pqi_firmware_features[i].supported = false; 8042 pqi_firmware_features[i].enabled = false; 8043 } 8044 } 8045 8046 static void pqi_process_firmware_features_section( 8047 struct pqi_config_table_section_info *section_info) 8048 { 8049 mutex_lock(&pqi_firmware_features_mutex); 8050 pqi_init_firmware_features(); 8051 pqi_process_firmware_features(section_info); 8052 mutex_unlock(&pqi_firmware_features_mutex); 8053 } 8054 8055 /* 8056 * Reset all controller settings that can be initialized during the processing 8057 * of the PQI Configuration Table. 8058 */ 8059 8060 static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info) 8061 { 8062 ctrl_info->heartbeat_counter = NULL; 8063 ctrl_info->soft_reset_status = NULL; 8064 ctrl_info->soft_reset_handshake_supported = false; 8065 ctrl_info->enable_r1_writes = false; 8066 ctrl_info->enable_r5_writes = false; 8067 ctrl_info->enable_r6_writes = false; 8068 ctrl_info->raid_iu_timeout_supported = false; 8069 ctrl_info->tmf_iu_timeout_supported = false; 8070 ctrl_info->firmware_triage_supported = false; 8071 ctrl_info->rpl_extended_format_4_5_supported = false; 8072 ctrl_info->multi_lun_device_supported = false; 8073 } 8074 8075 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 8076 { 8077 u32 table_length; 8078 u32 section_offset; 8079 bool firmware_feature_section_present; 8080 void __iomem *table_iomem_addr; 8081 struct pqi_config_table *config_table; 8082 struct pqi_config_table_section_header *section; 8083 struct pqi_config_table_section_info section_info; 8084 struct pqi_config_table_section_info feature_section_info = {0}; 8085 8086 table_length = ctrl_info->config_table_length; 8087 if (table_length == 0) 8088 return 0; 8089 8090 config_table = kmalloc(table_length, GFP_KERNEL); 8091 if (!config_table) { 8092 dev_err(&ctrl_info->pci_dev->dev, 8093 "failed to allocate memory for PQI configuration table\n"); 8094 return -ENOMEM; 8095 } 8096 8097 /* 8098 * Copy the config table contents from I/O memory space into the 8099 * temporary buffer. 8100 */ 8101 table_iomem_addr = ctrl_info->iomem_base + ctrl_info->config_table_offset; 8102 memcpy_fromio(config_table, table_iomem_addr, table_length); 8103 8104 firmware_feature_section_present = false; 8105 section_info.ctrl_info = ctrl_info; 8106 section_offset = get_unaligned_le32(&config_table->first_section_offset); 8107 8108 while (section_offset) { 8109 section = (void *)config_table + section_offset; 8110 8111 section_info.section = section; 8112 section_info.section_offset = section_offset; 8113 section_info.section_iomem_addr = table_iomem_addr + section_offset; 8114 8115 switch (get_unaligned_le16(§ion->section_id)) { 8116 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 8117 firmware_feature_section_present = true; 8118 feature_section_info = section_info; 8119 break; 8120 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 8121 if (pqi_disable_heartbeat) 8122 dev_warn(&ctrl_info->pci_dev->dev, 8123 "heartbeat disabled by module parameter\n"); 8124 else 8125 ctrl_info->heartbeat_counter = 8126 table_iomem_addr + 8127 section_offset + 8128 offsetof(struct pqi_config_table_heartbeat, 8129 heartbeat_counter); 8130 break; 8131 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: 8132 ctrl_info->soft_reset_status = 8133 table_iomem_addr + 8134 section_offset + 8135 offsetof(struct pqi_config_table_soft_reset, 8136 soft_reset_status); 8137 break; 8138 } 8139 8140 section_offset = get_unaligned_le16(§ion->next_section_offset); 8141 } 8142 8143 /* 8144 * We process the firmware feature section after all other sections 8145 * have been processed so that the feature bit callbacks can take 8146 * into account the settings configured by other sections. 8147 */ 8148 if (firmware_feature_section_present) 8149 pqi_process_firmware_features_section(&feature_section_info); 8150 8151 kfree(config_table); 8152 8153 return 0; 8154 } 8155 8156 /* Switches the controller from PQI mode back into SIS mode. */ 8157 8158 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 8159 { 8160 int rc; 8161 8162 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 8163 rc = pqi_reset(ctrl_info); 8164 if (rc) 8165 return rc; 8166 rc = sis_reenable_sis_mode(ctrl_info); 8167 if (rc) { 8168 dev_err(&ctrl_info->pci_dev->dev, 8169 "re-enabling SIS mode failed with error %d\n", rc); 8170 return rc; 8171 } 8172 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8173 8174 return 0; 8175 } 8176 8177 /* 8178 * If the controller isn't already in SIS mode, this function forces it into 8179 * SIS mode. 8180 */ 8181 8182 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 8183 { 8184 if (!sis_is_firmware_running(ctrl_info)) 8185 return -ENXIO; 8186 8187 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 8188 return 0; 8189 8190 if (sis_is_kernel_up(ctrl_info)) { 8191 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 8192 return 0; 8193 } 8194 8195 return pqi_revert_to_sis_mode(ctrl_info); 8196 } 8197 8198 static void pqi_perform_lockup_action(void) 8199 { 8200 switch (pqi_lockup_action) { 8201 case PANIC: 8202 panic("FATAL: Smart Family Controller lockup detected"); 8203 break; 8204 case REBOOT: 8205 emergency_restart(); 8206 break; 8207 case NONE: 8208 default: 8209 break; 8210 } 8211 } 8212 8213 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 8214 { 8215 int rc; 8216 u32 product_id; 8217 8218 if (reset_devices) { 8219 if (pqi_is_fw_triage_supported(ctrl_info)) { 8220 rc = sis_wait_for_fw_triage_completion(ctrl_info); 8221 if (rc) 8222 return rc; 8223 } 8224 sis_soft_reset(ctrl_info); 8225 ssleep(PQI_POST_RESET_DELAY_SECS); 8226 } else { 8227 rc = pqi_force_sis_mode(ctrl_info); 8228 if (rc) 8229 return rc; 8230 } 8231 8232 /* 8233 * Wait until the controller is ready to start accepting SIS 8234 * commands. 8235 */ 8236 rc = sis_wait_for_ctrl_ready(ctrl_info); 8237 if (rc) { 8238 if (reset_devices) { 8239 dev_err(&ctrl_info->pci_dev->dev, 8240 "kdump init failed with error %d\n", rc); 8241 pqi_lockup_action = REBOOT; 8242 pqi_perform_lockup_action(); 8243 } 8244 return rc; 8245 } 8246 8247 /* 8248 * Get the controller properties. This allows us to determine 8249 * whether or not it supports PQI mode. 8250 */ 8251 rc = sis_get_ctrl_properties(ctrl_info); 8252 if (rc) { 8253 dev_err(&ctrl_info->pci_dev->dev, 8254 "error obtaining controller properties\n"); 8255 return rc; 8256 } 8257 8258 rc = sis_get_pqi_capabilities(ctrl_info); 8259 if (rc) { 8260 dev_err(&ctrl_info->pci_dev->dev, 8261 "error obtaining controller capabilities\n"); 8262 return rc; 8263 } 8264 8265 product_id = sis_get_product_id(ctrl_info); 8266 ctrl_info->product_id = (u8)product_id; 8267 ctrl_info->product_revision = (u8)(product_id >> 8); 8268 8269 if (reset_devices) { 8270 if (ctrl_info->max_outstanding_requests > 8271 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 8272 ctrl_info->max_outstanding_requests = 8273 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 8274 } else { 8275 if (ctrl_info->max_outstanding_requests > 8276 PQI_MAX_OUTSTANDING_REQUESTS) 8277 ctrl_info->max_outstanding_requests = 8278 PQI_MAX_OUTSTANDING_REQUESTS; 8279 } 8280 8281 pqi_calculate_io_resources(ctrl_info); 8282 8283 rc = pqi_alloc_error_buffer(ctrl_info); 8284 if (rc) { 8285 dev_err(&ctrl_info->pci_dev->dev, 8286 "failed to allocate PQI error buffer\n"); 8287 return rc; 8288 } 8289 8290 /* 8291 * If the function we are about to call succeeds, the 8292 * controller will transition from legacy SIS mode 8293 * into PQI mode. 8294 */ 8295 rc = sis_init_base_struct_addr(ctrl_info); 8296 if (rc) { 8297 dev_err(&ctrl_info->pci_dev->dev, 8298 "error initializing PQI mode\n"); 8299 return rc; 8300 } 8301 8302 /* Wait for the controller to complete the SIS -> PQI transition. */ 8303 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8304 if (rc) { 8305 dev_err(&ctrl_info->pci_dev->dev, 8306 "transition to PQI mode failed\n"); 8307 return rc; 8308 } 8309 8310 /* From here on, we are running in PQI mode. */ 8311 ctrl_info->pqi_mode_enabled = true; 8312 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8313 8314 rc = pqi_alloc_admin_queues(ctrl_info); 8315 if (rc) { 8316 dev_err(&ctrl_info->pci_dev->dev, 8317 "failed to allocate admin queues\n"); 8318 return rc; 8319 } 8320 8321 rc = pqi_create_admin_queues(ctrl_info); 8322 if (rc) { 8323 dev_err(&ctrl_info->pci_dev->dev, 8324 "error creating admin queues\n"); 8325 return rc; 8326 } 8327 8328 rc = pqi_report_device_capability(ctrl_info); 8329 if (rc) { 8330 dev_err(&ctrl_info->pci_dev->dev, 8331 "obtaining device capability failed\n"); 8332 return rc; 8333 } 8334 8335 rc = pqi_validate_device_capability(ctrl_info); 8336 if (rc) 8337 return rc; 8338 8339 pqi_calculate_queue_resources(ctrl_info); 8340 8341 rc = pqi_enable_msix_interrupts(ctrl_info); 8342 if (rc) 8343 return rc; 8344 8345 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 8346 ctrl_info->max_msix_vectors = 8347 ctrl_info->num_msix_vectors_enabled; 8348 pqi_calculate_queue_resources(ctrl_info); 8349 } 8350 8351 rc = pqi_alloc_io_resources(ctrl_info); 8352 if (rc) 8353 return rc; 8354 8355 rc = pqi_alloc_operational_queues(ctrl_info); 8356 if (rc) { 8357 dev_err(&ctrl_info->pci_dev->dev, 8358 "failed to allocate operational queues\n"); 8359 return rc; 8360 } 8361 8362 pqi_init_operational_queues(ctrl_info); 8363 8364 rc = pqi_create_queues(ctrl_info); 8365 if (rc) 8366 return rc; 8367 8368 rc = pqi_request_irqs(ctrl_info); 8369 if (rc) 8370 return rc; 8371 8372 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8373 8374 ctrl_info->controller_online = true; 8375 8376 rc = pqi_process_config_table(ctrl_info); 8377 if (rc) 8378 return rc; 8379 8380 pqi_start_heartbeat_timer(ctrl_info); 8381 8382 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8383 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8384 if (rc) { /* Supported features not returned correctly. */ 8385 dev_err(&ctrl_info->pci_dev->dev, 8386 "error obtaining advanced RAID bypass configuration\n"); 8387 return rc; 8388 } 8389 ctrl_info->ciss_report_log_flags |= 8390 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8391 } 8392 8393 rc = pqi_enable_events(ctrl_info); 8394 if (rc) { 8395 dev_err(&ctrl_info->pci_dev->dev, 8396 "error enabling events\n"); 8397 return rc; 8398 } 8399 8400 /* Register with the SCSI subsystem. */ 8401 rc = pqi_register_scsi(ctrl_info); 8402 if (rc) 8403 return rc; 8404 8405 rc = pqi_get_ctrl_product_details(ctrl_info); 8406 if (rc) { 8407 dev_err(&ctrl_info->pci_dev->dev, 8408 "error obtaining product details\n"); 8409 return rc; 8410 } 8411 8412 rc = pqi_get_ctrl_serial_number(ctrl_info); 8413 if (rc) { 8414 dev_err(&ctrl_info->pci_dev->dev, 8415 "error obtaining ctrl serial number\n"); 8416 return rc; 8417 } 8418 8419 rc = pqi_set_diag_rescan(ctrl_info); 8420 if (rc) { 8421 dev_err(&ctrl_info->pci_dev->dev, 8422 "error enabling multi-lun rescan\n"); 8423 return rc; 8424 } 8425 8426 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8427 if (rc) { 8428 dev_err(&ctrl_info->pci_dev->dev, 8429 "error updating host wellness\n"); 8430 return rc; 8431 } 8432 8433 pqi_schedule_update_time_worker(ctrl_info); 8434 8435 pqi_scan_scsi_devices(ctrl_info); 8436 8437 return 0; 8438 } 8439 8440 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 8441 { 8442 unsigned int i; 8443 struct pqi_admin_queues *admin_queues; 8444 struct pqi_event_queue *event_queue; 8445 8446 admin_queues = &ctrl_info->admin_queues; 8447 admin_queues->iq_pi_copy = 0; 8448 admin_queues->oq_ci_copy = 0; 8449 writel(0, admin_queues->oq_pi); 8450 8451 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 8452 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 8453 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 8454 ctrl_info->queue_groups[i].oq_ci_copy = 0; 8455 8456 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 8457 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 8458 writel(0, ctrl_info->queue_groups[i].oq_pi); 8459 } 8460 8461 event_queue = &ctrl_info->event_queue; 8462 writel(0, event_queue->oq_pi); 8463 event_queue->oq_ci_copy = 0; 8464 } 8465 8466 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 8467 { 8468 int rc; 8469 8470 rc = pqi_force_sis_mode(ctrl_info); 8471 if (rc) 8472 return rc; 8473 8474 /* 8475 * Wait until the controller is ready to start accepting SIS 8476 * commands. 8477 */ 8478 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 8479 if (rc) 8480 return rc; 8481 8482 /* 8483 * Get the controller properties. This allows us to determine 8484 * whether or not it supports PQI mode. 8485 */ 8486 rc = sis_get_ctrl_properties(ctrl_info); 8487 if (rc) { 8488 dev_err(&ctrl_info->pci_dev->dev, 8489 "error obtaining controller properties\n"); 8490 return rc; 8491 } 8492 8493 rc = sis_get_pqi_capabilities(ctrl_info); 8494 if (rc) { 8495 dev_err(&ctrl_info->pci_dev->dev, 8496 "error obtaining controller capabilities\n"); 8497 return rc; 8498 } 8499 8500 /* 8501 * If the function we are about to call succeeds, the 8502 * controller will transition from legacy SIS mode 8503 * into PQI mode. 8504 */ 8505 rc = sis_init_base_struct_addr(ctrl_info); 8506 if (rc) { 8507 dev_err(&ctrl_info->pci_dev->dev, 8508 "error initializing PQI mode\n"); 8509 return rc; 8510 } 8511 8512 /* Wait for the controller to complete the SIS -> PQI transition. */ 8513 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 8514 if (rc) { 8515 dev_err(&ctrl_info->pci_dev->dev, 8516 "transition to PQI mode failed\n"); 8517 return rc; 8518 } 8519 8520 /* From here on, we are running in PQI mode. */ 8521 ctrl_info->pqi_mode_enabled = true; 8522 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 8523 8524 pqi_reinit_queues(ctrl_info); 8525 8526 rc = pqi_create_admin_queues(ctrl_info); 8527 if (rc) { 8528 dev_err(&ctrl_info->pci_dev->dev, 8529 "error creating admin queues\n"); 8530 return rc; 8531 } 8532 8533 rc = pqi_create_queues(ctrl_info); 8534 if (rc) 8535 return rc; 8536 8537 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 8538 8539 ctrl_info->controller_online = true; 8540 pqi_ctrl_unblock_requests(ctrl_info); 8541 8542 pqi_ctrl_reset_config(ctrl_info); 8543 8544 rc = pqi_process_config_table(ctrl_info); 8545 if (rc) 8546 return rc; 8547 8548 pqi_start_heartbeat_timer(ctrl_info); 8549 8550 if (ctrl_info->enable_r5_writes || ctrl_info->enable_r6_writes) { 8551 rc = pqi_get_advanced_raid_bypass_config(ctrl_info); 8552 if (rc) { 8553 dev_err(&ctrl_info->pci_dev->dev, 8554 "error obtaining advanced RAID bypass configuration\n"); 8555 return rc; 8556 } 8557 ctrl_info->ciss_report_log_flags |= 8558 CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX; 8559 } 8560 8561 rc = pqi_enable_events(ctrl_info); 8562 if (rc) { 8563 dev_err(&ctrl_info->pci_dev->dev, 8564 "error enabling events\n"); 8565 return rc; 8566 } 8567 8568 rc = pqi_get_ctrl_product_details(ctrl_info); 8569 if (rc) { 8570 dev_err(&ctrl_info->pci_dev->dev, 8571 "error obtaining product details\n"); 8572 return rc; 8573 } 8574 8575 rc = pqi_set_diag_rescan(ctrl_info); 8576 if (rc) { 8577 dev_err(&ctrl_info->pci_dev->dev, 8578 "error enabling multi-lun rescan\n"); 8579 return rc; 8580 } 8581 8582 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 8583 if (rc) { 8584 dev_err(&ctrl_info->pci_dev->dev, 8585 "error updating host wellness\n"); 8586 return rc; 8587 } 8588 8589 if (pqi_ofa_in_progress(ctrl_info)) 8590 pqi_ctrl_unblock_scan(ctrl_info); 8591 8592 pqi_scan_scsi_devices(ctrl_info); 8593 8594 return 0; 8595 } 8596 8597 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) 8598 { 8599 int rc; 8600 8601 rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 8602 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 8603 8604 return pcibios_err_to_errno(rc); 8605 } 8606 8607 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 8608 { 8609 int rc; 8610 u64 mask; 8611 8612 rc = pci_enable_device(ctrl_info->pci_dev); 8613 if (rc) { 8614 dev_err(&ctrl_info->pci_dev->dev, 8615 "failed to enable PCI device\n"); 8616 return rc; 8617 } 8618 8619 if (sizeof(dma_addr_t) > 4) 8620 mask = DMA_BIT_MASK(64); 8621 else 8622 mask = DMA_BIT_MASK(32); 8623 8624 rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask); 8625 if (rc) { 8626 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 8627 goto disable_device; 8628 } 8629 8630 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 8631 if (rc) { 8632 dev_err(&ctrl_info->pci_dev->dev, 8633 "failed to obtain PCI resources\n"); 8634 goto disable_device; 8635 } 8636 8637 ctrl_info->iomem_base = ioremap(pci_resource_start( 8638 ctrl_info->pci_dev, 0), 8639 pci_resource_len(ctrl_info->pci_dev, 0)); 8640 if (!ctrl_info->iomem_base) { 8641 dev_err(&ctrl_info->pci_dev->dev, 8642 "failed to map memory for controller registers\n"); 8643 rc = -ENOMEM; 8644 goto release_regions; 8645 } 8646 8647 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 8648 8649 /* Increase the PCIe completion timeout. */ 8650 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 8651 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 8652 if (rc) { 8653 dev_err(&ctrl_info->pci_dev->dev, 8654 "failed to set PCIe completion timeout\n"); 8655 goto release_regions; 8656 } 8657 8658 /* Enable bus mastering. */ 8659 pci_set_master(ctrl_info->pci_dev); 8660 8661 ctrl_info->registers = ctrl_info->iomem_base; 8662 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 8663 8664 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 8665 8666 return 0; 8667 8668 release_regions: 8669 pci_release_regions(ctrl_info->pci_dev); 8670 disable_device: 8671 pci_disable_device(ctrl_info->pci_dev); 8672 8673 return rc; 8674 } 8675 8676 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 8677 { 8678 iounmap(ctrl_info->iomem_base); 8679 pci_release_regions(ctrl_info->pci_dev); 8680 if (pci_is_enabled(ctrl_info->pci_dev)) 8681 pci_disable_device(ctrl_info->pci_dev); 8682 pci_set_drvdata(ctrl_info->pci_dev, NULL); 8683 } 8684 8685 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 8686 { 8687 struct pqi_ctrl_info *ctrl_info; 8688 8689 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 8690 GFP_KERNEL, numa_node); 8691 if (!ctrl_info) 8692 return NULL; 8693 8694 mutex_init(&ctrl_info->scan_mutex); 8695 mutex_init(&ctrl_info->lun_reset_mutex); 8696 mutex_init(&ctrl_info->ofa_mutex); 8697 8698 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 8699 spin_lock_init(&ctrl_info->scsi_device_list_lock); 8700 8701 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 8702 atomic_set(&ctrl_info->num_interrupts, 0); 8703 8704 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 8705 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 8706 8707 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 8708 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 8709 8710 INIT_WORK(&ctrl_info->ofa_memory_alloc_work, pqi_ofa_memory_alloc_worker); 8711 INIT_WORK(&ctrl_info->ofa_quiesce_work, pqi_ofa_quiesce_worker); 8712 8713 sema_init(&ctrl_info->sync_request_sem, 8714 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 8715 init_waitqueue_head(&ctrl_info->block_requests_wait); 8716 8717 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 8718 ctrl_info->irq_mode = IRQ_MODE_NONE; 8719 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 8720 8721 ctrl_info->ciss_report_log_flags = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; 8722 ctrl_info->max_transfer_encrypted_sas_sata = 8723 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_SAS_SATA; 8724 ctrl_info->max_transfer_encrypted_nvme = 8725 PQI_DEFAULT_MAX_TRANSFER_ENCRYPTED_NVME; 8726 ctrl_info->max_write_raid_5_6 = PQI_DEFAULT_MAX_WRITE_RAID_5_6; 8727 ctrl_info->max_write_raid_1_10_2drive = ~0; 8728 ctrl_info->max_write_raid_1_10_3drive = ~0; 8729 ctrl_info->disable_managed_interrupts = pqi_disable_managed_interrupts; 8730 8731 return ctrl_info; 8732 } 8733 8734 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 8735 { 8736 kfree(ctrl_info); 8737 } 8738 8739 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 8740 { 8741 pqi_free_irqs(ctrl_info); 8742 pqi_disable_msix_interrupts(ctrl_info); 8743 } 8744 8745 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 8746 { 8747 pqi_free_interrupts(ctrl_info); 8748 if (ctrl_info->queue_memory_base) 8749 dma_free_coherent(&ctrl_info->pci_dev->dev, 8750 ctrl_info->queue_memory_length, 8751 ctrl_info->queue_memory_base, 8752 ctrl_info->queue_memory_base_dma_handle); 8753 if (ctrl_info->admin_queue_memory_base) 8754 dma_free_coherent(&ctrl_info->pci_dev->dev, 8755 ctrl_info->admin_queue_memory_length, 8756 ctrl_info->admin_queue_memory_base, 8757 ctrl_info->admin_queue_memory_base_dma_handle); 8758 pqi_free_all_io_requests(ctrl_info); 8759 if (ctrl_info->error_buffer) 8760 dma_free_coherent(&ctrl_info->pci_dev->dev, 8761 ctrl_info->error_buffer_length, 8762 ctrl_info->error_buffer, 8763 ctrl_info->error_buffer_dma_handle); 8764 if (ctrl_info->iomem_base) 8765 pqi_cleanup_pci_init(ctrl_info); 8766 pqi_free_ctrl_info(ctrl_info); 8767 } 8768 8769 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 8770 { 8771 ctrl_info->controller_online = false; 8772 pqi_stop_heartbeat_timer(ctrl_info); 8773 pqi_ctrl_block_requests(ctrl_info); 8774 pqi_cancel_rescan_worker(ctrl_info); 8775 pqi_cancel_update_time_worker(ctrl_info); 8776 if (ctrl_info->ctrl_removal_state == PQI_CTRL_SURPRISE_REMOVAL) { 8777 pqi_fail_all_outstanding_requests(ctrl_info); 8778 ctrl_info->pqi_mode_enabled = false; 8779 } 8780 pqi_unregister_scsi(ctrl_info); 8781 if (ctrl_info->pqi_mode_enabled) 8782 pqi_revert_to_sis_mode(ctrl_info); 8783 pqi_free_ctrl_resources(ctrl_info); 8784 } 8785 8786 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) 8787 { 8788 pqi_ctrl_block_scan(ctrl_info); 8789 pqi_scsi_block_requests(ctrl_info); 8790 pqi_ctrl_block_device_reset(ctrl_info); 8791 pqi_ctrl_block_requests(ctrl_info); 8792 pqi_ctrl_wait_until_quiesced(ctrl_info); 8793 pqi_stop_heartbeat_timer(ctrl_info); 8794 } 8795 8796 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) 8797 { 8798 pqi_start_heartbeat_timer(ctrl_info); 8799 pqi_ctrl_unblock_requests(ctrl_info); 8800 pqi_ctrl_unblock_device_reset(ctrl_info); 8801 pqi_scsi_unblock_requests(ctrl_info); 8802 pqi_ctrl_unblock_scan(ctrl_info); 8803 } 8804 8805 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size) 8806 { 8807 int i; 8808 u32 sg_count; 8809 struct device *dev; 8810 struct pqi_ofa_memory *ofap; 8811 struct pqi_sg_descriptor *mem_descriptor; 8812 dma_addr_t dma_handle; 8813 8814 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 8815 8816 sg_count = DIV_ROUND_UP(total_size, chunk_size); 8817 if (sg_count == 0 || sg_count > PQI_OFA_MAX_SG_DESCRIPTORS) 8818 goto out; 8819 8820 ctrl_info->pqi_ofa_chunk_virt_addr = kmalloc_array(sg_count, sizeof(void *), GFP_KERNEL); 8821 if (!ctrl_info->pqi_ofa_chunk_virt_addr) 8822 goto out; 8823 8824 dev = &ctrl_info->pci_dev->dev; 8825 8826 for (i = 0; i < sg_count; i++) { 8827 ctrl_info->pqi_ofa_chunk_virt_addr[i] = 8828 dma_alloc_coherent(dev, chunk_size, &dma_handle, GFP_KERNEL); 8829 if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) 8830 goto out_free_chunks; 8831 mem_descriptor = &ofap->sg_descriptor[i]; 8832 put_unaligned_le64((u64)dma_handle, &mem_descriptor->address); 8833 put_unaligned_le32(chunk_size, &mem_descriptor->length); 8834 } 8835 8836 put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); 8837 put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); 8838 put_unaligned_le32(sg_count * chunk_size, &ofap->bytes_allocated); 8839 8840 return 0; 8841 8842 out_free_chunks: 8843 while (--i >= 0) { 8844 mem_descriptor = &ofap->sg_descriptor[i]; 8845 dma_free_coherent(dev, chunk_size, 8846 ctrl_info->pqi_ofa_chunk_virt_addr[i], 8847 get_unaligned_le64(&mem_descriptor->address)); 8848 } 8849 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 8850 8851 out: 8852 return -ENOMEM; 8853 } 8854 8855 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) 8856 { 8857 u32 total_size; 8858 u32 chunk_size; 8859 u32 min_chunk_size; 8860 8861 if (ctrl_info->ofa_bytes_requested == 0) 8862 return 0; 8863 8864 total_size = PAGE_ALIGN(ctrl_info->ofa_bytes_requested); 8865 min_chunk_size = DIV_ROUND_UP(total_size, PQI_OFA_MAX_SG_DESCRIPTORS); 8866 min_chunk_size = PAGE_ALIGN(min_chunk_size); 8867 8868 for (chunk_size = total_size; chunk_size >= min_chunk_size;) { 8869 if (pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_size) == 0) 8870 return 0; 8871 chunk_size /= 2; 8872 chunk_size = PAGE_ALIGN(chunk_size); 8873 } 8874 8875 return -ENOMEM; 8876 } 8877 8878 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info) 8879 { 8880 struct device *dev; 8881 struct pqi_ofa_memory *ofap; 8882 8883 dev = &ctrl_info->pci_dev->dev; 8884 8885 ofap = dma_alloc_coherent(dev, sizeof(*ofap), 8886 &ctrl_info->pqi_ofa_mem_dma_handle, GFP_KERNEL); 8887 if (!ofap) 8888 return; 8889 8890 ctrl_info->pqi_ofa_mem_virt_addr = ofap; 8891 8892 if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { 8893 dev_err(dev, 8894 "failed to allocate host buffer for Online Firmware Activation\n"); 8895 dma_free_coherent(dev, sizeof(*ofap), ofap, ctrl_info->pqi_ofa_mem_dma_handle); 8896 ctrl_info->pqi_ofa_mem_virt_addr = NULL; 8897 return; 8898 } 8899 8900 put_unaligned_le16(PQI_OFA_VERSION, &ofap->version); 8901 memcpy(&ofap->signature, PQI_OFA_SIGNATURE, sizeof(ofap->signature)); 8902 } 8903 8904 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) 8905 { 8906 unsigned int i; 8907 struct device *dev; 8908 struct pqi_ofa_memory *ofap; 8909 struct pqi_sg_descriptor *mem_descriptor; 8910 unsigned int num_memory_descriptors; 8911 8912 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 8913 if (!ofap) 8914 return; 8915 8916 dev = &ctrl_info->pci_dev->dev; 8917 8918 if (get_unaligned_le32(&ofap->bytes_allocated) == 0) 8919 goto out; 8920 8921 mem_descriptor = ofap->sg_descriptor; 8922 num_memory_descriptors = 8923 get_unaligned_le16(&ofap->num_memory_descriptors); 8924 8925 for (i = 0; i < num_memory_descriptors; i++) { 8926 dma_free_coherent(dev, 8927 get_unaligned_le32(&mem_descriptor[i].length), 8928 ctrl_info->pqi_ofa_chunk_virt_addr[i], 8929 get_unaligned_le64(&mem_descriptor[i].address)); 8930 } 8931 kfree(ctrl_info->pqi_ofa_chunk_virt_addr); 8932 8933 out: 8934 dma_free_coherent(dev, sizeof(*ofap), ofap, 8935 ctrl_info->pqi_ofa_mem_dma_handle); 8936 ctrl_info->pqi_ofa_mem_virt_addr = NULL; 8937 } 8938 8939 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) 8940 { 8941 u32 buffer_length; 8942 struct pqi_vendor_general_request request; 8943 struct pqi_ofa_memory *ofap; 8944 8945 memset(&request, 0, sizeof(request)); 8946 8947 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 8948 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 8949 &request.header.iu_length); 8950 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, 8951 &request.function_code); 8952 8953 ofap = ctrl_info->pqi_ofa_mem_virt_addr; 8954 8955 if (ofap) { 8956 buffer_length = offsetof(struct pqi_ofa_memory, sg_descriptor) + 8957 get_unaligned_le16(&ofap->num_memory_descriptors) * 8958 sizeof(struct pqi_sg_descriptor); 8959 8960 put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, 8961 &request.data.ofa_memory_allocation.buffer_address); 8962 put_unaligned_le32(buffer_length, 8963 &request.data.ofa_memory_allocation.buffer_length); 8964 } 8965 8966 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, NULL); 8967 } 8968 8969 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs) 8970 { 8971 ssleep(delay_secs); 8972 8973 return pqi_ctrl_init_resume(ctrl_info); 8974 } 8975 8976 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 8977 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 8978 .status = SAM_STAT_CHECK_CONDITION, 8979 }; 8980 8981 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 8982 { 8983 unsigned int i; 8984 struct pqi_io_request *io_request; 8985 struct scsi_cmnd *scmd; 8986 struct scsi_device *sdev; 8987 8988 for (i = 0; i < ctrl_info->max_io_slots; i++) { 8989 io_request = &ctrl_info->io_request_pool[i]; 8990 if (atomic_read(&io_request->refcount) == 0) 8991 continue; 8992 8993 scmd = io_request->scmd; 8994 if (scmd) { 8995 sdev = scmd->device; 8996 if (!sdev || !scsi_device_online(sdev)) { 8997 pqi_free_io_request(io_request); 8998 continue; 8999 } else { 9000 set_host_byte(scmd, DID_NO_CONNECT); 9001 } 9002 } else { 9003 io_request->status = -ENXIO; 9004 io_request->error_info = 9005 &pqi_ctrl_offline_raid_error_info; 9006 } 9007 9008 io_request->io_complete_callback(io_request, 9009 io_request->context); 9010 } 9011 } 9012 9013 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 9014 { 9015 pqi_perform_lockup_action(); 9016 pqi_stop_heartbeat_timer(ctrl_info); 9017 pqi_free_interrupts(ctrl_info); 9018 pqi_cancel_rescan_worker(ctrl_info); 9019 pqi_cancel_update_time_worker(ctrl_info); 9020 pqi_ctrl_wait_until_quiesced(ctrl_info); 9021 pqi_fail_all_outstanding_requests(ctrl_info); 9022 pqi_ctrl_unblock_requests(ctrl_info); 9023 } 9024 9025 static void pqi_ctrl_offline_worker(struct work_struct *work) 9026 { 9027 struct pqi_ctrl_info *ctrl_info; 9028 9029 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 9030 pqi_take_ctrl_offline_deferred(ctrl_info); 9031 } 9032 9033 static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9034 { 9035 char *string; 9036 9037 switch (ctrl_shutdown_reason) { 9038 case PQI_IQ_NOT_DRAINED_TIMEOUT: 9039 string = "inbound queue not drained timeout"; 9040 break; 9041 case PQI_LUN_RESET_TIMEOUT: 9042 string = "LUN reset timeout"; 9043 break; 9044 case PQI_IO_PENDING_POST_LUN_RESET_TIMEOUT: 9045 string = "I/O pending timeout after LUN reset"; 9046 break; 9047 case PQI_NO_HEARTBEAT: 9048 string = "no controller heartbeat detected"; 9049 break; 9050 case PQI_FIRMWARE_KERNEL_NOT_UP: 9051 string = "firmware kernel not ready"; 9052 break; 9053 case PQI_OFA_RESPONSE_TIMEOUT: 9054 string = "OFA response timeout"; 9055 break; 9056 case PQI_INVALID_REQ_ID: 9057 string = "invalid request ID"; 9058 break; 9059 case PQI_UNMATCHED_REQ_ID: 9060 string = "unmatched request ID"; 9061 break; 9062 case PQI_IO_PI_OUT_OF_RANGE: 9063 string = "I/O queue producer index out of range"; 9064 break; 9065 case PQI_EVENT_PI_OUT_OF_RANGE: 9066 string = "event queue producer index out of range"; 9067 break; 9068 case PQI_UNEXPECTED_IU_TYPE: 9069 string = "unexpected IU type"; 9070 break; 9071 default: 9072 string = "unknown reason"; 9073 break; 9074 } 9075 9076 return string; 9077 } 9078 9079 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info, 9080 enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason) 9081 { 9082 if (!ctrl_info->controller_online) 9083 return; 9084 9085 ctrl_info->controller_online = false; 9086 ctrl_info->pqi_mode_enabled = false; 9087 pqi_ctrl_block_requests(ctrl_info); 9088 if (!pqi_disable_ctrl_shutdown) 9089 sis_shutdown_ctrl(ctrl_info, ctrl_shutdown_reason); 9090 pci_disable_device(ctrl_info->pci_dev); 9091 dev_err(&ctrl_info->pci_dev->dev, 9092 "controller offline: reason code 0x%x (%s)\n", 9093 ctrl_shutdown_reason, pqi_ctrl_shutdown_reason_to_string(ctrl_shutdown_reason)); 9094 schedule_work(&ctrl_info->ctrl_offline_work); 9095 } 9096 9097 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 9098 const struct pci_device_id *id) 9099 { 9100 char *ctrl_description; 9101 9102 if (id->driver_data) 9103 ctrl_description = (char *)id->driver_data; 9104 else 9105 ctrl_description = "Microchip Smart Family Controller"; 9106 9107 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 9108 } 9109 9110 static int pqi_pci_probe(struct pci_dev *pci_dev, 9111 const struct pci_device_id *id) 9112 { 9113 int rc; 9114 int node; 9115 struct pqi_ctrl_info *ctrl_info; 9116 9117 pqi_print_ctrl_info(pci_dev, id); 9118 9119 if (pqi_disable_device_id_wildcards && 9120 id->subvendor == PCI_ANY_ID && 9121 id->subdevice == PCI_ANY_ID) { 9122 dev_warn(&pci_dev->dev, 9123 "controller not probed because device ID wildcards are disabled\n"); 9124 return -ENODEV; 9125 } 9126 9127 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 9128 dev_warn(&pci_dev->dev, 9129 "controller device ID matched using wildcards\n"); 9130 9131 node = dev_to_node(&pci_dev->dev); 9132 if (node == NUMA_NO_NODE) { 9133 node = cpu_to_node(0); 9134 if (node == NUMA_NO_NODE) 9135 node = 0; 9136 set_dev_node(&pci_dev->dev, node); 9137 } 9138 9139 ctrl_info = pqi_alloc_ctrl_info(node); 9140 if (!ctrl_info) { 9141 dev_err(&pci_dev->dev, 9142 "failed to allocate controller info block\n"); 9143 return -ENOMEM; 9144 } 9145 ctrl_info->numa_node = node; 9146 9147 ctrl_info->pci_dev = pci_dev; 9148 9149 rc = pqi_pci_init(ctrl_info); 9150 if (rc) 9151 goto error; 9152 9153 rc = pqi_ctrl_init(ctrl_info); 9154 if (rc) 9155 goto error; 9156 9157 return 0; 9158 9159 error: 9160 pqi_remove_ctrl(ctrl_info); 9161 9162 return rc; 9163 } 9164 9165 static void pqi_pci_remove(struct pci_dev *pci_dev) 9166 { 9167 struct pqi_ctrl_info *ctrl_info; 9168 u16 vendor_id; 9169 int rc; 9170 9171 ctrl_info = pci_get_drvdata(pci_dev); 9172 if (!ctrl_info) 9173 return; 9174 9175 pci_read_config_word(ctrl_info->pci_dev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id); 9176 if (vendor_id == 0xffff) 9177 ctrl_info->ctrl_removal_state = PQI_CTRL_SURPRISE_REMOVAL; 9178 else 9179 ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; 9180 9181 if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { 9182 rc = pqi_flush_cache(ctrl_info, RESTART); 9183 if (rc) 9184 dev_err(&pci_dev->dev, 9185 "unable to flush controller cache during remove\n"); 9186 } 9187 9188 pqi_remove_ctrl(ctrl_info); 9189 } 9190 9191 static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) 9192 { 9193 unsigned int i; 9194 struct pqi_io_request *io_request; 9195 struct scsi_cmnd *scmd; 9196 9197 for (i = 0; i < ctrl_info->max_io_slots; i++) { 9198 io_request = &ctrl_info->io_request_pool[i]; 9199 if (atomic_read(&io_request->refcount) == 0) 9200 continue; 9201 scmd = io_request->scmd; 9202 WARN_ON(scmd != NULL); /* IO command from SML */ 9203 WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ 9204 } 9205 } 9206 9207 static void pqi_shutdown(struct pci_dev *pci_dev) 9208 { 9209 int rc; 9210 struct pqi_ctrl_info *ctrl_info; 9211 enum bmic_flush_cache_shutdown_event shutdown_event; 9212 9213 ctrl_info = pci_get_drvdata(pci_dev); 9214 if (!ctrl_info) { 9215 dev_err(&pci_dev->dev, 9216 "cache could not be flushed\n"); 9217 return; 9218 } 9219 9220 pqi_wait_until_ofa_finished(ctrl_info); 9221 9222 pqi_scsi_block_requests(ctrl_info); 9223 pqi_ctrl_block_device_reset(ctrl_info); 9224 pqi_ctrl_block_requests(ctrl_info); 9225 pqi_ctrl_wait_until_quiesced(ctrl_info); 9226 9227 if (system_state == SYSTEM_RESTART) 9228 shutdown_event = RESTART; 9229 else 9230 shutdown_event = SHUTDOWN; 9231 9232 /* 9233 * Write all data in the controller's battery-backed cache to 9234 * storage. 9235 */ 9236 rc = pqi_flush_cache(ctrl_info, shutdown_event); 9237 if (rc) 9238 dev_err(&pci_dev->dev, 9239 "unable to flush controller cache during shutdown\n"); 9240 9241 pqi_crash_if_pending_command(ctrl_info); 9242 pqi_reset(ctrl_info); 9243 } 9244 9245 static void pqi_process_lockup_action_param(void) 9246 { 9247 unsigned int i; 9248 9249 if (!pqi_lockup_action_param) 9250 return; 9251 9252 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 9253 if (strcmp(pqi_lockup_action_param, 9254 pqi_lockup_actions[i].name) == 0) { 9255 pqi_lockup_action = pqi_lockup_actions[i].action; 9256 return; 9257 } 9258 } 9259 9260 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 9261 DRIVER_NAME_SHORT, pqi_lockup_action_param); 9262 } 9263 9264 #define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS 30 9265 #define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS (30 * 60) 9266 9267 static void pqi_process_ctrl_ready_timeout_param(void) 9268 { 9269 if (pqi_ctrl_ready_timeout_secs == 0) 9270 return; 9271 9272 if (pqi_ctrl_ready_timeout_secs < PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS) { 9273 pr_warn("%s: ctrl_ready_timeout parm of %u second(s) is less than minimum timeout of %d seconds - setting timeout to %d seconds\n", 9274 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS); 9275 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS; 9276 } else if (pqi_ctrl_ready_timeout_secs > PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS) { 9277 pr_warn("%s: ctrl_ready_timeout parm of %u seconds is greater than maximum timeout of %d seconds - setting timeout to %d seconds\n", 9278 DRIVER_NAME_SHORT, pqi_ctrl_ready_timeout_secs, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS, PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS); 9279 pqi_ctrl_ready_timeout_secs = PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS; 9280 } 9281 9282 sis_ctrl_ready_timeout_secs = pqi_ctrl_ready_timeout_secs; 9283 } 9284 9285 static void pqi_process_module_params(void) 9286 { 9287 pqi_process_lockup_action_param(); 9288 pqi_process_ctrl_ready_timeout_param(); 9289 } 9290 9291 #if defined(CONFIG_PM) 9292 9293 static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev) 9294 { 9295 if (pci_dev->subsystem_vendor == PCI_VENDOR_ID_ADAPTEC2 && pci_dev->subsystem_device == 0x1304) 9296 return RESTART; 9297 9298 return SUSPEND; 9299 } 9300 9301 static int pqi_suspend_or_freeze(struct device *dev, bool suspend) 9302 { 9303 struct pci_dev *pci_dev; 9304 struct pqi_ctrl_info *ctrl_info; 9305 9306 pci_dev = to_pci_dev(dev); 9307 ctrl_info = pci_get_drvdata(pci_dev); 9308 9309 pqi_wait_until_ofa_finished(ctrl_info); 9310 9311 pqi_ctrl_block_scan(ctrl_info); 9312 pqi_scsi_block_requests(ctrl_info); 9313 pqi_ctrl_block_device_reset(ctrl_info); 9314 pqi_ctrl_block_requests(ctrl_info); 9315 pqi_ctrl_wait_until_quiesced(ctrl_info); 9316 9317 if (suspend) { 9318 enum bmic_flush_cache_shutdown_event shutdown_event; 9319 9320 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9321 pqi_flush_cache(ctrl_info, shutdown_event); 9322 } 9323 9324 pqi_stop_heartbeat_timer(ctrl_info); 9325 pqi_crash_if_pending_command(ctrl_info); 9326 pqi_free_irqs(ctrl_info); 9327 9328 ctrl_info->controller_online = false; 9329 ctrl_info->pqi_mode_enabled = false; 9330 9331 return 0; 9332 } 9333 9334 static __maybe_unused int pqi_suspend(struct device *dev) 9335 { 9336 return pqi_suspend_or_freeze(dev, true); 9337 } 9338 9339 static int pqi_resume_or_restore(struct device *dev) 9340 { 9341 int rc; 9342 struct pci_dev *pci_dev; 9343 struct pqi_ctrl_info *ctrl_info; 9344 9345 pci_dev = to_pci_dev(dev); 9346 ctrl_info = pci_get_drvdata(pci_dev); 9347 9348 rc = pqi_request_irqs(ctrl_info); 9349 if (rc) 9350 return rc; 9351 9352 pqi_ctrl_unblock_device_reset(ctrl_info); 9353 pqi_ctrl_unblock_requests(ctrl_info); 9354 pqi_scsi_unblock_requests(ctrl_info); 9355 pqi_ctrl_unblock_scan(ctrl_info); 9356 9357 ssleep(PQI_POST_RESET_DELAY_SECS); 9358 9359 return pqi_ctrl_init_resume(ctrl_info); 9360 } 9361 9362 static int pqi_freeze(struct device *dev) 9363 { 9364 return pqi_suspend_or_freeze(dev, false); 9365 } 9366 9367 static int pqi_thaw(struct device *dev) 9368 { 9369 int rc; 9370 struct pci_dev *pci_dev; 9371 struct pqi_ctrl_info *ctrl_info; 9372 9373 pci_dev = to_pci_dev(dev); 9374 ctrl_info = pci_get_drvdata(pci_dev); 9375 9376 rc = pqi_request_irqs(ctrl_info); 9377 if (rc) 9378 return rc; 9379 9380 ctrl_info->controller_online = true; 9381 ctrl_info->pqi_mode_enabled = true; 9382 9383 pqi_ctrl_unblock_device_reset(ctrl_info); 9384 pqi_ctrl_unblock_requests(ctrl_info); 9385 pqi_scsi_unblock_requests(ctrl_info); 9386 pqi_ctrl_unblock_scan(ctrl_info); 9387 9388 return 0; 9389 } 9390 9391 static int pqi_poweroff(struct device *dev) 9392 { 9393 struct pci_dev *pci_dev; 9394 struct pqi_ctrl_info *ctrl_info; 9395 enum bmic_flush_cache_shutdown_event shutdown_event; 9396 9397 pci_dev = to_pci_dev(dev); 9398 ctrl_info = pci_get_drvdata(pci_dev); 9399 9400 shutdown_event = pqi_get_flush_cache_shutdown_event(pci_dev); 9401 pqi_flush_cache(ctrl_info, shutdown_event); 9402 9403 return 0; 9404 } 9405 9406 static const struct dev_pm_ops pqi_pm_ops = { 9407 .suspend = pqi_suspend, 9408 .resume = pqi_resume_or_restore, 9409 .freeze = pqi_freeze, 9410 .thaw = pqi_thaw, 9411 .poweroff = pqi_poweroff, 9412 .restore = pqi_resume_or_restore, 9413 }; 9414 9415 #endif /* CONFIG_PM */ 9416 9417 /* Define the PCI IDs for the controllers that we support. */ 9418 static const struct pci_device_id pqi_pci_id_table[] = { 9419 { 9420 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9421 0x105b, 0x1211) 9422 }, 9423 { 9424 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9425 0x105b, 0x1321) 9426 }, 9427 { 9428 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9429 0x152d, 0x8a22) 9430 }, 9431 { 9432 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9433 0x152d, 0x8a23) 9434 }, 9435 { 9436 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9437 0x152d, 0x8a24) 9438 }, 9439 { 9440 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9441 0x152d, 0x8a36) 9442 }, 9443 { 9444 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9445 0x152d, 0x8a37) 9446 }, 9447 { 9448 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9449 0x193d, 0x1104) 9450 }, 9451 { 9452 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9453 0x193d, 0x1105) 9454 }, 9455 { 9456 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9457 0x193d, 0x1106) 9458 }, 9459 { 9460 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9461 0x193d, 0x1107) 9462 }, 9463 { 9464 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9465 0x193d, 0x1108) 9466 }, 9467 { 9468 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9469 0x193d, 0x1109) 9470 }, 9471 { 9472 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9473 0x193d, 0x110b) 9474 }, 9475 { 9476 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9477 0x193d, 0x8460) 9478 }, 9479 { 9480 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9481 0x193d, 0x8461) 9482 }, 9483 { 9484 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9485 0x193d, 0xc460) 9486 }, 9487 { 9488 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9489 0x193d, 0xc461) 9490 }, 9491 { 9492 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9493 0x193d, 0xf460) 9494 }, 9495 { 9496 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9497 0x193d, 0xf461) 9498 }, 9499 { 9500 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9501 0x1bd4, 0x0045) 9502 }, 9503 { 9504 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9505 0x1bd4, 0x0046) 9506 }, 9507 { 9508 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9509 0x1bd4, 0x0047) 9510 }, 9511 { 9512 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9513 0x1bd4, 0x0048) 9514 }, 9515 { 9516 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9517 0x1bd4, 0x004a) 9518 }, 9519 { 9520 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9521 0x1bd4, 0x004b) 9522 }, 9523 { 9524 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9525 0x1bd4, 0x004c) 9526 }, 9527 { 9528 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9529 0x1bd4, 0x004f) 9530 }, 9531 { 9532 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9533 0x1bd4, 0x0051) 9534 }, 9535 { 9536 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9537 0x1bd4, 0x0052) 9538 }, 9539 { 9540 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9541 0x1bd4, 0x0053) 9542 }, 9543 { 9544 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9545 0x1bd4, 0x0054) 9546 }, 9547 { 9548 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9549 0x1bd4, 0x006b) 9550 }, 9551 { 9552 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9553 0x1bd4, 0x006c) 9554 }, 9555 { 9556 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9557 0x1bd4, 0x006d) 9558 }, 9559 { 9560 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9561 0x1bd4, 0x006f) 9562 }, 9563 { 9564 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9565 0x1bd4, 0x0070) 9566 }, 9567 { 9568 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9569 0x1bd4, 0x0071) 9570 }, 9571 { 9572 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9573 0x1bd4, 0x0072) 9574 }, 9575 { 9576 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9577 0x1bd4, 0x0086) 9578 }, 9579 { 9580 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9581 0x1bd4, 0x0087) 9582 }, 9583 { 9584 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9585 0x1bd4, 0x0088) 9586 }, 9587 { 9588 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9589 0x1bd4, 0x0089) 9590 }, 9591 { 9592 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9593 0x19e5, 0xd227) 9594 }, 9595 { 9596 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9597 0x19e5, 0xd228) 9598 }, 9599 { 9600 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9601 0x19e5, 0xd229) 9602 }, 9603 { 9604 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9605 0x19e5, 0xd22a) 9606 }, 9607 { 9608 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9609 0x19e5, 0xd22b) 9610 }, 9611 { 9612 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9613 0x19e5, 0xd22c) 9614 }, 9615 { 9616 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9617 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 9618 }, 9619 { 9620 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9621 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 9622 }, 9623 { 9624 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9625 PCI_VENDOR_ID_ADAPTEC2, 0x0659) 9626 }, 9627 { 9628 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9629 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 9630 }, 9631 { 9632 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9633 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 9634 }, 9635 { 9636 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9637 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 9638 }, 9639 { 9640 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9641 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 9642 }, 9643 { 9644 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9645 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 9646 }, 9647 { 9648 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9649 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 9650 }, 9651 { 9652 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9653 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 9654 }, 9655 { 9656 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9657 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 9658 }, 9659 { 9660 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9661 PCI_VENDOR_ID_ADAPTEC2, 0x0808) 9662 }, 9663 { 9664 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9665 PCI_VENDOR_ID_ADAPTEC2, 0x0809) 9666 }, 9667 { 9668 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9669 PCI_VENDOR_ID_ADAPTEC2, 0x080a) 9670 }, 9671 { 9672 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9673 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 9674 }, 9675 { 9676 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9677 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 9678 }, 9679 { 9680 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9681 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 9682 }, 9683 { 9684 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9685 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 9686 }, 9687 { 9688 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9689 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 9690 }, 9691 { 9692 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9693 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 9694 }, 9695 { 9696 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9697 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 9698 }, 9699 { 9700 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9701 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 9702 }, 9703 { 9704 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9705 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 9706 }, 9707 { 9708 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9709 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 9710 }, 9711 { 9712 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9713 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 9714 }, 9715 { 9716 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9717 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 9718 }, 9719 { 9720 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9721 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 9722 }, 9723 { 9724 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9725 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 9726 }, 9727 { 9728 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9729 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 9730 }, 9731 { 9732 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9733 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 9734 }, 9735 { 9736 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9737 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 9738 }, 9739 { 9740 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9741 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 9742 }, 9743 { 9744 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9745 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 9746 }, 9747 { 9748 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9749 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 9750 }, 9751 { 9752 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9753 PCI_VENDOR_ID_ADAPTEC2, 0x1304) 9754 }, 9755 { 9756 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9757 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 9758 }, 9759 { 9760 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9761 PCI_VENDOR_ID_ADAPTEC2, 0x1400) 9762 }, 9763 { 9764 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9765 PCI_VENDOR_ID_ADAPTEC2, 0x1402) 9766 }, 9767 { 9768 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9769 PCI_VENDOR_ID_ADAPTEC2, 0x1410) 9770 }, 9771 { 9772 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9773 PCI_VENDOR_ID_ADAPTEC2, 0x1411) 9774 }, 9775 { 9776 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9777 PCI_VENDOR_ID_ADAPTEC2, 0x1412) 9778 }, 9779 { 9780 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9781 PCI_VENDOR_ID_ADAPTEC2, 0x1420) 9782 }, 9783 { 9784 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9785 PCI_VENDOR_ID_ADAPTEC2, 0x1430) 9786 }, 9787 { 9788 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9789 PCI_VENDOR_ID_ADAPTEC2, 0x1440) 9790 }, 9791 { 9792 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9793 PCI_VENDOR_ID_ADAPTEC2, 0x1441) 9794 }, 9795 { 9796 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9797 PCI_VENDOR_ID_ADAPTEC2, 0x1450) 9798 }, 9799 { 9800 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9801 PCI_VENDOR_ID_ADAPTEC2, 0x1452) 9802 }, 9803 { 9804 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9805 PCI_VENDOR_ID_ADAPTEC2, 0x1460) 9806 }, 9807 { 9808 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9809 PCI_VENDOR_ID_ADAPTEC2, 0x1461) 9810 }, 9811 { 9812 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9813 PCI_VENDOR_ID_ADAPTEC2, 0x1462) 9814 }, 9815 { 9816 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9817 PCI_VENDOR_ID_ADAPTEC2, 0x1463) 9818 }, 9819 { 9820 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9821 PCI_VENDOR_ID_ADAPTEC2, 0x1470) 9822 }, 9823 { 9824 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9825 PCI_VENDOR_ID_ADAPTEC2, 0x1471) 9826 }, 9827 { 9828 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9829 PCI_VENDOR_ID_ADAPTEC2, 0x1472) 9830 }, 9831 { 9832 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9833 PCI_VENDOR_ID_ADAPTEC2, 0x1473) 9834 }, 9835 { 9836 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9837 PCI_VENDOR_ID_ADAPTEC2, 0x1474) 9838 }, 9839 { 9840 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9841 PCI_VENDOR_ID_ADAPTEC2, 0x1475) 9842 }, 9843 { 9844 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9845 PCI_VENDOR_ID_ADAPTEC2, 0x1480) 9846 }, 9847 { 9848 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9849 PCI_VENDOR_ID_ADAPTEC2, 0x1490) 9850 }, 9851 { 9852 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9853 PCI_VENDOR_ID_ADAPTEC2, 0x1491) 9854 }, 9855 { 9856 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9857 PCI_VENDOR_ID_ADAPTEC2, 0x14a0) 9858 }, 9859 { 9860 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9861 PCI_VENDOR_ID_ADAPTEC2, 0x14a1) 9862 }, 9863 { 9864 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9865 PCI_VENDOR_ID_ADAPTEC2, 0x14a2) 9866 }, 9867 { 9868 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9869 PCI_VENDOR_ID_ADAPTEC2, 0x14a4) 9870 }, 9871 { 9872 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9873 PCI_VENDOR_ID_ADAPTEC2, 0x14a5) 9874 }, 9875 { 9876 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9877 PCI_VENDOR_ID_ADAPTEC2, 0x14a6) 9878 }, 9879 { 9880 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9881 PCI_VENDOR_ID_ADAPTEC2, 0x14b0) 9882 }, 9883 { 9884 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9885 PCI_VENDOR_ID_ADAPTEC2, 0x14b1) 9886 }, 9887 { 9888 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9889 PCI_VENDOR_ID_ADAPTEC2, 0x14c0) 9890 }, 9891 { 9892 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9893 PCI_VENDOR_ID_ADAPTEC2, 0x14c1) 9894 }, 9895 { 9896 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9897 PCI_VENDOR_ID_ADAPTEC2, 0x14c2) 9898 }, 9899 { 9900 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9901 PCI_VENDOR_ID_ADAPTEC2, 0x14c3) 9902 }, 9903 { 9904 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9905 PCI_VENDOR_ID_ADAPTEC2, 0x14c4) 9906 }, 9907 { 9908 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9909 PCI_VENDOR_ID_ADAPTEC2, 0x14d0) 9910 }, 9911 { 9912 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9913 PCI_VENDOR_ID_ADAPTEC2, 0x14e0) 9914 }, 9915 { 9916 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9917 PCI_VENDOR_ID_ADAPTEC2, 0x14f0) 9918 }, 9919 { 9920 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9921 PCI_VENDOR_ID_ADVANTECH, 0x8312) 9922 }, 9923 { 9924 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9925 PCI_VENDOR_ID_DELL, 0x1fe0) 9926 }, 9927 { 9928 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9929 PCI_VENDOR_ID_HP, 0x0600) 9930 }, 9931 { 9932 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9933 PCI_VENDOR_ID_HP, 0x0601) 9934 }, 9935 { 9936 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9937 PCI_VENDOR_ID_HP, 0x0602) 9938 }, 9939 { 9940 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9941 PCI_VENDOR_ID_HP, 0x0603) 9942 }, 9943 { 9944 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9945 PCI_VENDOR_ID_HP, 0x0609) 9946 }, 9947 { 9948 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9949 PCI_VENDOR_ID_HP, 0x0650) 9950 }, 9951 { 9952 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9953 PCI_VENDOR_ID_HP, 0x0651) 9954 }, 9955 { 9956 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9957 PCI_VENDOR_ID_HP, 0x0652) 9958 }, 9959 { 9960 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9961 PCI_VENDOR_ID_HP, 0x0653) 9962 }, 9963 { 9964 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9965 PCI_VENDOR_ID_HP, 0x0654) 9966 }, 9967 { 9968 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9969 PCI_VENDOR_ID_HP, 0x0655) 9970 }, 9971 { 9972 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9973 PCI_VENDOR_ID_HP, 0x0700) 9974 }, 9975 { 9976 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9977 PCI_VENDOR_ID_HP, 0x0701) 9978 }, 9979 { 9980 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9981 PCI_VENDOR_ID_HP, 0x1001) 9982 }, 9983 { 9984 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9985 PCI_VENDOR_ID_HP, 0x1002) 9986 }, 9987 { 9988 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9989 PCI_VENDOR_ID_HP, 0x1100) 9990 }, 9991 { 9992 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9993 PCI_VENDOR_ID_HP, 0x1101) 9994 }, 9995 { 9996 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 9997 0x1590, 0x0294) 9998 }, 9999 { 10000 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10001 0x1590, 0x02db) 10002 }, 10003 { 10004 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10005 0x1590, 0x02dc) 10006 }, 10007 { 10008 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10009 0x1590, 0x032e) 10010 }, 10011 { 10012 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10013 0x1590, 0x036f) 10014 }, 10015 { 10016 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10017 0x1590, 0x0381) 10018 }, 10019 { 10020 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10021 0x1590, 0x0382) 10022 }, 10023 { 10024 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10025 0x1590, 0x0383) 10026 }, 10027 { 10028 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10029 0x1d8d, 0x0800) 10030 }, 10031 { 10032 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10033 0x1d8d, 0x0908) 10034 }, 10035 { 10036 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10037 0x1d8d, 0x0806) 10038 }, 10039 { 10040 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10041 0x1d8d, 0x0916) 10042 }, 10043 { 10044 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10045 PCI_VENDOR_ID_GIGABYTE, 0x1000) 10046 }, 10047 { 10048 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10049 0x1dfc, 0x3161) 10050 }, 10051 { 10052 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10053 0x1f0c, 0x3161) 10054 }, 10055 { 10056 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10057 0x1cf2, 0x0804) 10058 }, 10059 { 10060 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10061 0x1cf2, 0x0805) 10062 }, 10063 { 10064 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10065 0x1cf2, 0x0806) 10066 }, 10067 { 10068 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10069 0x1cf2, 0x5445) 10070 }, 10071 { 10072 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10073 0x1cf2, 0x5446) 10074 }, 10075 { 10076 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10077 0x1cf2, 0x5447) 10078 }, 10079 { 10080 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10081 0x1cf2, 0x5449) 10082 }, 10083 { 10084 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10085 0x1cf2, 0x544a) 10086 }, 10087 { 10088 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10089 0x1cf2, 0x544b) 10090 }, 10091 { 10092 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10093 0x1cf2, 0x544d) 10094 }, 10095 { 10096 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10097 0x1cf2, 0x544e) 10098 }, 10099 { 10100 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10101 0x1cf2, 0x544f) 10102 }, 10103 { 10104 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10105 0x1cf2, 0x54da) 10106 }, 10107 { 10108 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10109 0x1cf2, 0x54db) 10110 }, 10111 { 10112 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10113 0x1cf2, 0x54dc) 10114 }, 10115 { 10116 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10117 0x1cf2, 0x0b27) 10118 }, 10119 { 10120 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10121 0x1cf2, 0x0b29) 10122 }, 10123 { 10124 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10125 0x1cf2, 0x0b45) 10126 }, 10127 { 10128 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10129 0x1cc4, 0x0101) 10130 }, 10131 { 10132 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10133 0x1cc4, 0x0201) 10134 }, 10135 { 10136 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10137 PCI_VENDOR_ID_LENOVO, 0x0220) 10138 }, 10139 { 10140 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10141 PCI_VENDOR_ID_LENOVO, 0x0221) 10142 }, 10143 { 10144 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10145 PCI_VENDOR_ID_LENOVO, 0x0520) 10146 }, 10147 { 10148 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10149 PCI_VENDOR_ID_LENOVO, 0x0522) 10150 }, 10151 { 10152 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10153 PCI_VENDOR_ID_LENOVO, 0x0620) 10154 }, 10155 { 10156 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10157 PCI_VENDOR_ID_LENOVO, 0x0621) 10158 }, 10159 { 10160 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10161 PCI_VENDOR_ID_LENOVO, 0x0622) 10162 }, 10163 { 10164 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10165 PCI_VENDOR_ID_LENOVO, 0x0623) 10166 }, 10167 { 10168 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10169 0x1014, 0x0718) 10170 }, 10171 { 10172 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10173 0x1137, 0x02f8) 10174 }, 10175 { 10176 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10177 0x1137, 0x02f9) 10178 }, 10179 { 10180 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10181 0x1137, 0x02fa) 10182 }, 10183 { 10184 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10185 0x1e93, 0x1000) 10186 }, 10187 { 10188 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10189 0x1e93, 0x1001) 10190 }, 10191 { 10192 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10193 0x1e93, 0x1002) 10194 }, 10195 { 10196 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10197 0x1e93, 0x1005) 10198 }, 10199 { 10200 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10201 0x1f51, 0x1001) 10202 }, 10203 { 10204 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10205 0x1f51, 0x1002) 10206 }, 10207 { 10208 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10209 0x1f51, 0x1003) 10210 }, 10211 { 10212 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10213 0x1f51, 0x1004) 10214 }, 10215 { 10216 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10217 0x1f51, 0x1005) 10218 }, 10219 { 10220 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10221 0x1f51, 0x1006) 10222 }, 10223 { 10224 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10225 0x1f51, 0x1007) 10226 }, 10227 { 10228 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10229 0x1f51, 0x1008) 10230 }, 10231 { 10232 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10233 0x1f51, 0x1009) 10234 }, 10235 { 10236 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10237 0x1f51, 0x100a) 10238 }, 10239 { 10240 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10241 0x1f51, 0x100e) 10242 }, 10243 { 10244 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10245 0x1f51, 0x100f) 10246 }, 10247 { 10248 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10249 0x1f51, 0x1010) 10250 }, 10251 { 10252 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10253 0x1f51, 0x1011) 10254 }, 10255 { 10256 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10257 0x1f51, 0x1043) 10258 }, 10259 { 10260 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10261 0x1f51, 0x1044) 10262 }, 10263 { 10264 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10265 0x1f51, 0x1045) 10266 }, 10267 { 10268 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 10269 PCI_ANY_ID, PCI_ANY_ID) 10270 }, 10271 { 0 } 10272 }; 10273 10274 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 10275 10276 static struct pci_driver pqi_pci_driver = { 10277 .name = DRIVER_NAME_SHORT, 10278 .id_table = pqi_pci_id_table, 10279 .probe = pqi_pci_probe, 10280 .remove = pqi_pci_remove, 10281 .shutdown = pqi_shutdown, 10282 #if defined(CONFIG_PM) 10283 .driver = { 10284 .pm = &pqi_pm_ops 10285 }, 10286 #endif 10287 }; 10288 10289 static int __init pqi_init(void) 10290 { 10291 int rc; 10292 10293 pr_info(DRIVER_NAME "\n"); 10294 pqi_verify_structures(); 10295 sis_verify_structures(); 10296 10297 pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions); 10298 if (!pqi_sas_transport_template) 10299 return -ENODEV; 10300 10301 pqi_process_module_params(); 10302 10303 rc = pci_register_driver(&pqi_pci_driver); 10304 if (rc) 10305 sas_release_transport(pqi_sas_transport_template); 10306 10307 return rc; 10308 } 10309 10310 static void __exit pqi_cleanup(void) 10311 { 10312 pci_unregister_driver(&pqi_pci_driver); 10313 sas_release_transport(pqi_sas_transport_template); 10314 } 10315 10316 module_init(pqi_init); 10317 module_exit(pqi_cleanup); 10318 10319 static void pqi_verify_structures(void) 10320 { 10321 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10322 sis_host_to_ctrl_doorbell) != 0x20); 10323 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10324 sis_interrupt_mask) != 0x34); 10325 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10326 sis_ctrl_to_host_doorbell) != 0x9c); 10327 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10328 sis_ctrl_to_host_doorbell_clear) != 0xa0); 10329 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10330 sis_driver_scratch) != 0xb0); 10331 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10332 sis_product_identifier) != 0xb4); 10333 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10334 sis_firmware_status) != 0xbc); 10335 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10336 sis_ctrl_shutdown_reason_code) != 0xcc); 10337 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10338 sis_mailbox) != 0x1000); 10339 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 10340 pqi_registers) != 0x4000); 10341 10342 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10343 iu_type) != 0x0); 10344 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10345 iu_length) != 0x2); 10346 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10347 response_queue_id) != 0x4); 10348 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 10349 driver_flags) != 0x6); 10350 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 10351 10352 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10353 status) != 0x0); 10354 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10355 service_response) != 0x1); 10356 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10357 data_present) != 0x2); 10358 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10359 reserved) != 0x3); 10360 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10361 residual_count) != 0x4); 10362 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10363 data_length) != 0x8); 10364 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10365 reserved1) != 0xa); 10366 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 10367 data) != 0xc); 10368 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 10369 10370 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10371 data_in_result) != 0x0); 10372 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10373 data_out_result) != 0x1); 10374 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10375 reserved) != 0x2); 10376 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10377 status) != 0x5); 10378 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10379 status_qualifier) != 0x6); 10380 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10381 sense_data_length) != 0x8); 10382 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10383 response_data_length) != 0xa); 10384 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10385 data_in_transferred) != 0xc); 10386 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10387 data_out_transferred) != 0x10); 10388 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 10389 data) != 0x14); 10390 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 10391 10392 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10393 signature) != 0x0); 10394 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10395 function_and_status_code) != 0x8); 10396 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10397 max_admin_iq_elements) != 0x10); 10398 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10399 max_admin_oq_elements) != 0x11); 10400 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10401 admin_iq_element_length) != 0x12); 10402 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10403 admin_oq_element_length) != 0x13); 10404 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10405 max_reset_timeout) != 0x14); 10406 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10407 legacy_intx_status) != 0x18); 10408 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10409 legacy_intx_mask_set) != 0x1c); 10410 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10411 legacy_intx_mask_clear) != 0x20); 10412 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10413 device_status) != 0x40); 10414 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10415 admin_iq_pi_offset) != 0x48); 10416 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10417 admin_oq_ci_offset) != 0x50); 10418 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10419 admin_iq_element_array_addr) != 0x58); 10420 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10421 admin_oq_element_array_addr) != 0x60); 10422 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10423 admin_iq_ci_addr) != 0x68); 10424 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10425 admin_oq_pi_addr) != 0x70); 10426 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10427 admin_iq_num_elements) != 0x78); 10428 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10429 admin_oq_num_elements) != 0x79); 10430 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10431 admin_queue_int_msg_num) != 0x7a); 10432 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10433 device_error) != 0x80); 10434 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10435 error_details) != 0x88); 10436 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10437 device_reset) != 0x90); 10438 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 10439 power_action) != 0x94); 10440 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 10441 10442 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10443 header.iu_type) != 0); 10444 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10445 header.iu_length) != 2); 10446 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10447 header.driver_flags) != 6); 10448 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10449 request_id) != 8); 10450 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10451 function_code) != 10); 10452 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10453 data.report_device_capability.buffer_length) != 44); 10454 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10455 data.report_device_capability.sg_descriptor) != 48); 10456 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10457 data.create_operational_iq.queue_id) != 12); 10458 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10459 data.create_operational_iq.element_array_addr) != 16); 10460 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10461 data.create_operational_iq.ci_addr) != 24); 10462 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10463 data.create_operational_iq.num_elements) != 32); 10464 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10465 data.create_operational_iq.element_length) != 34); 10466 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10467 data.create_operational_iq.queue_protocol) != 36); 10468 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10469 data.create_operational_oq.queue_id) != 12); 10470 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10471 data.create_operational_oq.element_array_addr) != 16); 10472 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10473 data.create_operational_oq.pi_addr) != 24); 10474 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10475 data.create_operational_oq.num_elements) != 32); 10476 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10477 data.create_operational_oq.element_length) != 34); 10478 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10479 data.create_operational_oq.queue_protocol) != 36); 10480 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10481 data.create_operational_oq.int_msg_num) != 40); 10482 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10483 data.create_operational_oq.coalescing_count) != 42); 10484 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10485 data.create_operational_oq.min_coalescing_time) != 44); 10486 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10487 data.create_operational_oq.max_coalescing_time) != 48); 10488 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 10489 data.delete_operational_queue.queue_id) != 12); 10490 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 10491 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10492 data.create_operational_iq) != 64 - 11); 10493 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10494 data.create_operational_oq) != 64 - 11); 10495 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 10496 data.delete_operational_queue) != 64 - 11); 10497 10498 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10499 header.iu_type) != 0); 10500 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10501 header.iu_length) != 2); 10502 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10503 header.driver_flags) != 6); 10504 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10505 request_id) != 8); 10506 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10507 function_code) != 10); 10508 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10509 status) != 11); 10510 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10511 data.create_operational_iq.status_descriptor) != 12); 10512 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10513 data.create_operational_iq.iq_pi_offset) != 16); 10514 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10515 data.create_operational_oq.status_descriptor) != 12); 10516 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 10517 data.create_operational_oq.oq_ci_offset) != 16); 10518 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 10519 10520 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10521 header.iu_type) != 0); 10522 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10523 header.iu_length) != 2); 10524 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10525 header.response_queue_id) != 4); 10526 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10527 header.driver_flags) != 6); 10528 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10529 request_id) != 8); 10530 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10531 nexus_id) != 10); 10532 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10533 buffer_length) != 12); 10534 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10535 lun_number) != 16); 10536 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10537 protocol_specific) != 24); 10538 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10539 error_index) != 27); 10540 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10541 cdb) != 32); 10542 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10543 timeout) != 60); 10544 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 10545 sg_descriptors) != 64); 10546 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 10547 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10548 10549 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10550 header.iu_type) != 0); 10551 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10552 header.iu_length) != 2); 10553 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10554 header.response_queue_id) != 4); 10555 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10556 header.driver_flags) != 6); 10557 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10558 request_id) != 8); 10559 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10560 nexus_id) != 12); 10561 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10562 buffer_length) != 16); 10563 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10564 data_encryption_key_index) != 22); 10565 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10566 encrypt_tweak_lower) != 24); 10567 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10568 encrypt_tweak_upper) != 28); 10569 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10570 cdb) != 32); 10571 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10572 error_index) != 48); 10573 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10574 num_sg_descriptors) != 50); 10575 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10576 cdb_length) != 51); 10577 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10578 lun_number) != 52); 10579 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 10580 sg_descriptors) != 64); 10581 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 10582 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 10583 10584 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10585 header.iu_type) != 0); 10586 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10587 header.iu_length) != 2); 10588 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10589 request_id) != 8); 10590 BUILD_BUG_ON(offsetof(struct pqi_io_response, 10591 error_index) != 10); 10592 10593 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10594 header.iu_type) != 0); 10595 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10596 header.iu_length) != 2); 10597 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10598 header.response_queue_id) != 4); 10599 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10600 request_id) != 8); 10601 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10602 data.report_event_configuration.buffer_length) != 12); 10603 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10604 data.report_event_configuration.sg_descriptors) != 16); 10605 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10606 data.set_event_configuration.global_event_oq_id) != 10); 10607 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10608 data.set_event_configuration.buffer_length) != 12); 10609 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 10610 data.set_event_configuration.sg_descriptors) != 16); 10611 10612 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10613 max_inbound_iu_length) != 6); 10614 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 10615 max_outbound_iu_length) != 14); 10616 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 10617 10618 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10619 data_length) != 0); 10620 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10621 iq_arbitration_priority_support_bitmask) != 8); 10622 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10623 maximum_aw_a) != 9); 10624 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10625 maximum_aw_b) != 10); 10626 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10627 maximum_aw_c) != 11); 10628 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10629 max_inbound_queues) != 16); 10630 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10631 max_elements_per_iq) != 18); 10632 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10633 max_iq_element_length) != 24); 10634 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10635 min_iq_element_length) != 26); 10636 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10637 max_outbound_queues) != 30); 10638 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10639 max_elements_per_oq) != 32); 10640 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10641 intr_coalescing_time_granularity) != 34); 10642 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10643 max_oq_element_length) != 36); 10644 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10645 min_oq_element_length) != 38); 10646 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 10647 iu_layer_descriptors) != 64); 10648 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 10649 10650 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 10651 event_type) != 0); 10652 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 10653 oq_id) != 2); 10654 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 10655 10656 BUILD_BUG_ON(offsetof(struct pqi_event_config, 10657 num_event_descriptors) != 2); 10658 BUILD_BUG_ON(offsetof(struct pqi_event_config, 10659 descriptors) != 4); 10660 10661 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 10662 ARRAY_SIZE(pqi_supported_event_types)); 10663 10664 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10665 header.iu_type) != 0); 10666 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10667 header.iu_length) != 2); 10668 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10669 event_type) != 8); 10670 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10671 event_id) != 10); 10672 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10673 additional_event_id) != 12); 10674 BUILD_BUG_ON(offsetof(struct pqi_event_response, 10675 data) != 16); 10676 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 10677 10678 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10679 header.iu_type) != 0); 10680 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10681 header.iu_length) != 2); 10682 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10683 event_type) != 8); 10684 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10685 event_id) != 10); 10686 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 10687 additional_event_id) != 12); 10688 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 10689 10690 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10691 header.iu_type) != 0); 10692 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10693 header.iu_length) != 2); 10694 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10695 request_id) != 8); 10696 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10697 nexus_id) != 10); 10698 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10699 timeout) != 14); 10700 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10701 lun_number) != 16); 10702 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10703 protocol_specific) != 24); 10704 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10705 outbound_queue_id_to_manage) != 26); 10706 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10707 request_id_to_manage) != 28); 10708 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 10709 task_management_function) != 30); 10710 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 10711 10712 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10713 header.iu_type) != 0); 10714 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10715 header.iu_length) != 2); 10716 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10717 request_id) != 8); 10718 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10719 nexus_id) != 10); 10720 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10721 additional_response_info) != 12); 10722 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 10723 response_code) != 15); 10724 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 10725 10726 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10727 configured_logical_drive_count) != 0); 10728 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10729 configuration_signature) != 1); 10730 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10731 firmware_version_short) != 5); 10732 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10733 extended_logical_unit_count) != 154); 10734 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10735 firmware_build_number) != 190); 10736 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10737 vendor_id) != 200); 10738 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10739 product_id) != 208); 10740 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10741 extra_controller_flags) != 286); 10742 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10743 controller_mode) != 292); 10744 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10745 spare_part_number) != 293); 10746 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 10747 firmware_version_long) != 325); 10748 10749 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10750 phys_bay_in_box) != 115); 10751 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10752 device_type) != 120); 10753 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10754 redundant_path_present_map) != 1736); 10755 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10756 active_path_number) != 1738); 10757 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10758 alternate_paths_phys_connector) != 1739); 10759 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10760 alternate_paths_phys_box_on_port) != 1755); 10761 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 10762 current_queue_depth_limit) != 1796); 10763 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 10764 10765 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_buffer_header) != 4); 10766 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 10767 page_code) != 0); 10768 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 10769 subpage_code) != 1); 10770 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_buffer_header, 10771 buffer_length) != 2); 10772 10773 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_page_header) != 4); 10774 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 10775 page_code) != 0); 10776 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 10777 subpage_code) != 1); 10778 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_page_header, 10779 page_length) != 2); 10780 10781 BUILD_BUG_ON(sizeof(struct bmic_sense_feature_io_page_aio_subpage) 10782 != 18); 10783 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10784 header) != 0); 10785 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10786 firmware_read_support) != 4); 10787 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10788 driver_read_support) != 5); 10789 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10790 firmware_write_support) != 6); 10791 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10792 driver_write_support) != 7); 10793 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10794 max_transfer_encrypted_sas_sata) != 8); 10795 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10796 max_transfer_encrypted_nvme) != 10); 10797 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10798 max_write_raid_5_6) != 12); 10799 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10800 max_write_raid_1_10_2drive) != 14); 10801 BUILD_BUG_ON(offsetof(struct bmic_sense_feature_io_page_aio_subpage, 10802 max_write_raid_1_10_3drive) != 16); 10803 10804 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 10805 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 10806 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 10807 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 10808 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 10809 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 10810 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 10811 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 10812 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 10813 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 10814 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 10815 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 10816 10817 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 10818 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 10819 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 10820 } 10821