1 /* 2 * driver for Microsemi PQI-based storage controllers 3 * Copyright (c) 2016-2017 Microsemi Corporation 4 * Copyright (c) 2016 PMC-Sierra, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 of the License. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more details. 14 * 15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 16 * 17 */ 18 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/pci.h> 22 #include <linux/delay.h> 23 #include <linux/interrupt.h> 24 #include <linux/sched.h> 25 #include <linux/rtc.h> 26 #include <linux/bcd.h> 27 #include <linux/reboot.h> 28 #include <linux/cciss_ioctl.h> 29 #include <linux/blk-mq-pci.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_transport_sas.h> 35 #include <asm/unaligned.h> 36 #include "smartpqi.h" 37 #include "smartpqi_sis.h" 38 39 #if !defined(BUILD_TIMESTAMP) 40 #define BUILD_TIMESTAMP 41 #endif 42 43 #define DRIVER_VERSION "1.1.4-130" 44 #define DRIVER_MAJOR 1 45 #define DRIVER_MINOR 1 46 #define DRIVER_RELEASE 4 47 #define DRIVER_REVISION 130 48 49 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 50 DRIVER_VERSION BUILD_TIMESTAMP ")" 51 #define DRIVER_NAME_SHORT "smartpqi" 52 53 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 54 55 MODULE_AUTHOR("Microsemi"); 56 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 57 DRIVER_VERSION); 58 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 59 MODULE_VERSION(DRIVER_VERSION); 60 MODULE_LICENSE("GPL"); 61 62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 63 static void pqi_ctrl_offline_worker(struct work_struct *work); 64 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 65 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 66 static void pqi_scan_start(struct Scsi_Host *shost); 67 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 68 struct pqi_queue_group *queue_group, enum pqi_io_path path, 69 struct pqi_io_request *io_request); 70 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 71 struct pqi_iu_header *request, unsigned int flags, 72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 73 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 75 unsigned int cdb_length, struct pqi_queue_group *queue_group, 76 struct pqi_encryption_info *encryption_info, bool raid_bypass); 77 78 /* for flags argument to pqi_submit_raid_request_synchronous() */ 79 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 80 81 static struct scsi_transport_template *pqi_sas_transport_template; 82 83 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 84 85 enum pqi_lockup_action { 86 NONE, 87 REBOOT, 88 PANIC 89 }; 90 91 static enum pqi_lockup_action pqi_lockup_action = NONE; 92 93 static struct { 94 enum pqi_lockup_action action; 95 char *name; 96 } pqi_lockup_actions[] = { 97 { 98 .action = NONE, 99 .name = "none", 100 }, 101 { 102 .action = REBOOT, 103 .name = "reboot", 104 }, 105 { 106 .action = PANIC, 107 .name = "panic", 108 }, 109 }; 110 111 static unsigned int pqi_supported_event_types[] = { 112 PQI_EVENT_TYPE_HOTPLUG, 113 PQI_EVENT_TYPE_HARDWARE, 114 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 115 PQI_EVENT_TYPE_LOGICAL_DEVICE, 116 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 118 }; 119 120 static int pqi_disable_device_id_wildcards; 121 module_param_named(disable_device_id_wildcards, 122 pqi_disable_device_id_wildcards, int, 0644); 123 MODULE_PARM_DESC(disable_device_id_wildcards, 124 "Disable device ID wildcards."); 125 126 static int pqi_disable_heartbeat; 127 module_param_named(disable_heartbeat, 128 pqi_disable_heartbeat, int, 0644); 129 MODULE_PARM_DESC(disable_heartbeat, 130 "Disable heartbeat."); 131 132 static int pqi_disable_ctrl_shutdown; 133 module_param_named(disable_ctrl_shutdown, 134 pqi_disable_ctrl_shutdown, int, 0644); 135 MODULE_PARM_DESC(disable_ctrl_shutdown, 136 "Disable controller shutdown when controller locked up."); 137 138 static char *pqi_lockup_action_param; 139 module_param_named(lockup_action, 140 pqi_lockup_action_param, charp, 0644); 141 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 142 "\t\tSupported: none, reboot, panic\n" 143 "\t\tDefault: none"); 144 145 static char *raid_levels[] = { 146 "RAID-0", 147 "RAID-4", 148 "RAID-1(1+0)", 149 "RAID-5", 150 "RAID-5+1", 151 "RAID-ADG", 152 "RAID-1(ADM)", 153 }; 154 155 static char *pqi_raid_level_to_string(u8 raid_level) 156 { 157 if (raid_level < ARRAY_SIZE(raid_levels)) 158 return raid_levels[raid_level]; 159 160 return "RAID UNKNOWN"; 161 } 162 163 #define SA_RAID_0 0 164 #define SA_RAID_4 1 165 #define SA_RAID_1 2 /* also used for RAID 10 */ 166 #define SA_RAID_5 3 /* also used for RAID 50 */ 167 #define SA_RAID_51 4 168 #define SA_RAID_6 5 /* also used for RAID 60 */ 169 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 170 #define SA_RAID_MAX SA_RAID_ADM 171 #define SA_RAID_UNKNOWN 0xff 172 173 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 174 { 175 pqi_prep_for_scsi_done(scmd); 176 scmd->scsi_done(scmd); 177 } 178 179 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 180 { 181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 182 } 183 184 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) 185 { 186 void *hostdata = shost_priv(shost); 187 188 return *((struct pqi_ctrl_info **)hostdata); 189 } 190 191 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 192 { 193 return !device->is_physical_device; 194 } 195 196 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 197 { 198 return scsi3addr[2] != 0; 199 } 200 201 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 202 { 203 return !ctrl_info->controller_online; 204 } 205 206 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 207 { 208 if (ctrl_info->controller_online) 209 if (!sis_is_firmware_running(ctrl_info)) 210 pqi_take_ctrl_offline(ctrl_info); 211 } 212 213 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 214 { 215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 216 } 217 218 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 219 struct pqi_ctrl_info *ctrl_info) 220 { 221 return sis_read_driver_scratch(ctrl_info); 222 } 223 224 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 225 enum pqi_ctrl_mode mode) 226 { 227 sis_write_driver_scratch(ctrl_info, mode); 228 } 229 230 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 231 { 232 ctrl_info->block_requests = true; 233 scsi_block_requests(ctrl_info->scsi_host); 234 } 235 236 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 237 { 238 ctrl_info->block_requests = false; 239 wake_up_all(&ctrl_info->block_requests_wait); 240 pqi_retry_raid_bypass_requests(ctrl_info); 241 scsi_unblock_requests(ctrl_info->scsi_host); 242 } 243 244 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 245 { 246 return ctrl_info->block_requests; 247 } 248 249 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 250 unsigned long timeout_msecs) 251 { 252 unsigned long remaining_msecs; 253 254 if (!pqi_ctrl_blocked(ctrl_info)) 255 return timeout_msecs; 256 257 atomic_inc(&ctrl_info->num_blocked_threads); 258 259 if (timeout_msecs == NO_TIMEOUT) { 260 wait_event(ctrl_info->block_requests_wait, 261 !pqi_ctrl_blocked(ctrl_info)); 262 remaining_msecs = timeout_msecs; 263 } else { 264 unsigned long remaining_jiffies; 265 266 remaining_jiffies = 267 wait_event_timeout(ctrl_info->block_requests_wait, 268 !pqi_ctrl_blocked(ctrl_info), 269 msecs_to_jiffies(timeout_msecs)); 270 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 271 } 272 273 atomic_dec(&ctrl_info->num_blocked_threads); 274 275 return remaining_msecs; 276 } 277 278 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 279 { 280 atomic_inc(&ctrl_info->num_busy_threads); 281 } 282 283 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 284 { 285 atomic_dec(&ctrl_info->num_busy_threads); 286 } 287 288 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 289 { 290 while (atomic_read(&ctrl_info->num_busy_threads) > 291 atomic_read(&ctrl_info->num_blocked_threads)) 292 usleep_range(1000, 2000); 293 } 294 295 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 296 { 297 return device->device_offline; 298 } 299 300 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 301 { 302 device->in_reset = true; 303 } 304 305 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 306 { 307 device->in_reset = false; 308 } 309 310 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 311 { 312 return device->in_reset; 313 } 314 315 static inline void pqi_schedule_rescan_worker_with_delay( 316 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 317 { 318 if (pqi_ctrl_offline(ctrl_info)) 319 return; 320 321 schedule_delayed_work(&ctrl_info->rescan_work, delay); 322 } 323 324 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 325 { 326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 327 } 328 329 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 330 331 static inline void pqi_schedule_rescan_worker_delayed( 332 struct pqi_ctrl_info *ctrl_info) 333 { 334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 335 } 336 337 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 338 { 339 cancel_delayed_work_sync(&ctrl_info->rescan_work); 340 } 341 342 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 343 { 344 if (!ctrl_info->heartbeat_counter) 345 return 0; 346 347 return readl(ctrl_info->heartbeat_counter); 348 } 349 350 static int pqi_map_single(struct pci_dev *pci_dev, 351 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 352 size_t buffer_length, enum dma_data_direction data_direction) 353 { 354 dma_addr_t bus_address; 355 356 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 357 return 0; 358 359 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 360 data_direction); 361 if (dma_mapping_error(&pci_dev->dev, bus_address)) 362 return -ENOMEM; 363 364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 365 put_unaligned_le32(buffer_length, &sg_descriptor->length); 366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 367 368 return 0; 369 } 370 371 static void pqi_pci_unmap(struct pci_dev *pci_dev, 372 struct pqi_sg_descriptor *descriptors, int num_descriptors, 373 enum dma_data_direction data_direction) 374 { 375 int i; 376 377 if (data_direction == DMA_NONE) 378 return; 379 380 for (i = 0; i < num_descriptors; i++) 381 dma_unmap_single(&pci_dev->dev, 382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 383 get_unaligned_le32(&descriptors[i].length), 384 data_direction); 385 } 386 387 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 388 struct pqi_raid_path_request *request, u8 cmd, 389 u8 *scsi3addr, void *buffer, size_t buffer_length, 390 u16 vpd_page, enum dma_data_direction *dir) 391 { 392 u8 *cdb; 393 394 memset(request, 0, sizeof(*request)); 395 396 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 397 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 398 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 399 &request->header.iu_length); 400 put_unaligned_le32(buffer_length, &request->buffer_length); 401 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 402 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 403 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 404 405 cdb = request->cdb; 406 407 switch (cmd) { 408 case INQUIRY: 409 request->data_direction = SOP_READ_FLAG; 410 cdb[0] = INQUIRY; 411 if (vpd_page & VPD_PAGE) { 412 cdb[1] = 0x1; 413 cdb[2] = (u8)vpd_page; 414 } 415 cdb[4] = (u8)buffer_length; 416 break; 417 case CISS_REPORT_LOG: 418 case CISS_REPORT_PHYS: 419 request->data_direction = SOP_READ_FLAG; 420 cdb[0] = cmd; 421 if (cmd == CISS_REPORT_PHYS) 422 cdb[1] = CISS_REPORT_PHYS_EXTENDED; 423 else 424 cdb[1] = CISS_REPORT_LOG_EXTENDED; 425 put_unaligned_be32(buffer_length, &cdb[6]); 426 break; 427 case CISS_GET_RAID_MAP: 428 request->data_direction = SOP_READ_FLAG; 429 cdb[0] = CISS_READ; 430 cdb[1] = CISS_GET_RAID_MAP; 431 put_unaligned_be32(buffer_length, &cdb[6]); 432 break; 433 case SA_FLUSH_CACHE: 434 request->data_direction = SOP_WRITE_FLAG; 435 cdb[0] = BMIC_WRITE; 436 cdb[6] = BMIC_FLUSH_CACHE; 437 put_unaligned_be16(buffer_length, &cdb[7]); 438 break; 439 case BMIC_IDENTIFY_CONTROLLER: 440 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 441 request->data_direction = SOP_READ_FLAG; 442 cdb[0] = BMIC_READ; 443 cdb[6] = cmd; 444 put_unaligned_be16(buffer_length, &cdb[7]); 445 break; 446 case BMIC_WRITE_HOST_WELLNESS: 447 request->data_direction = SOP_WRITE_FLAG; 448 cdb[0] = BMIC_WRITE; 449 cdb[6] = cmd; 450 put_unaligned_be16(buffer_length, &cdb[7]); 451 break; 452 default: 453 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 454 cmd); 455 break; 456 } 457 458 switch (request->data_direction) { 459 case SOP_READ_FLAG: 460 *dir = DMA_FROM_DEVICE; 461 break; 462 case SOP_WRITE_FLAG: 463 *dir = DMA_TO_DEVICE; 464 break; 465 case SOP_NO_DIRECTION_FLAG: 466 *dir = DMA_NONE; 467 break; 468 default: 469 *dir = DMA_BIDIRECTIONAL; 470 break; 471 } 472 473 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 474 buffer, buffer_length, *dir); 475 } 476 477 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 478 { 479 io_request->scmd = NULL; 480 io_request->status = 0; 481 io_request->error_info = NULL; 482 io_request->raid_bypass = false; 483 } 484 485 static struct pqi_io_request *pqi_alloc_io_request( 486 struct pqi_ctrl_info *ctrl_info) 487 { 488 struct pqi_io_request *io_request; 489 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 490 491 while (1) { 492 io_request = &ctrl_info->io_request_pool[i]; 493 if (atomic_inc_return(&io_request->refcount) == 1) 494 break; 495 atomic_dec(&io_request->refcount); 496 i = (i + 1) % ctrl_info->max_io_slots; 497 } 498 499 /* benignly racy */ 500 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 501 502 pqi_reinit_io_request(io_request); 503 504 return io_request; 505 } 506 507 static void pqi_free_io_request(struct pqi_io_request *io_request) 508 { 509 atomic_dec(&io_request->refcount); 510 } 511 512 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 513 struct bmic_identify_controller *buffer) 514 { 515 int rc; 516 enum dma_data_direction dir; 517 struct pqi_raid_path_request request; 518 519 rc = pqi_build_raid_path_request(ctrl_info, &request, 520 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, 521 sizeof(*buffer), 0, &dir); 522 if (rc) 523 return rc; 524 525 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 526 NULL, NO_TIMEOUT); 527 528 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 529 return rc; 530 } 531 532 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 533 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 534 { 535 int rc; 536 enum dma_data_direction dir; 537 struct pqi_raid_path_request request; 538 539 rc = pqi_build_raid_path_request(ctrl_info, &request, 540 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, 541 &dir); 542 if (rc) 543 return rc; 544 545 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 546 NULL, NO_TIMEOUT); 547 548 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 549 return rc; 550 } 551 552 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 553 struct pqi_scsi_dev *device, 554 struct bmic_identify_physical_device *buffer, 555 size_t buffer_length) 556 { 557 int rc; 558 enum dma_data_direction dir; 559 u16 bmic_device_index; 560 struct pqi_raid_path_request request; 561 562 rc = pqi_build_raid_path_request(ctrl_info, &request, 563 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 564 buffer_length, 0, &dir); 565 if (rc) 566 return rc; 567 568 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 569 request.cdb[2] = (u8)bmic_device_index; 570 request.cdb[9] = (u8)(bmic_device_index >> 8); 571 572 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 573 0, NULL, NO_TIMEOUT); 574 575 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 576 return rc; 577 } 578 579 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 580 enum bmic_flush_cache_shutdown_event shutdown_event) 581 { 582 int rc; 583 struct pqi_raid_path_request request; 584 struct bmic_flush_cache *flush_cache; 585 enum dma_data_direction dir; 586 587 /* 588 * Don't bother trying to flush the cache if the controller is 589 * locked up. 590 */ 591 if (pqi_ctrl_offline(ctrl_info)) 592 return -ENXIO; 593 594 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 595 if (!flush_cache) 596 return -ENOMEM; 597 598 flush_cache->shutdown_event = shutdown_event; 599 600 rc = pqi_build_raid_path_request(ctrl_info, &request, 601 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache, 602 sizeof(*flush_cache), 0, &dir); 603 if (rc) 604 goto out; 605 606 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 607 0, NULL, NO_TIMEOUT); 608 609 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 610 out: 611 kfree(flush_cache); 612 613 return rc; 614 } 615 616 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 617 void *buffer, size_t buffer_length) 618 { 619 int rc; 620 struct pqi_raid_path_request request; 621 enum dma_data_direction dir; 622 623 rc = pqi_build_raid_path_request(ctrl_info, &request, 624 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, 625 buffer_length, 0, &dir); 626 if (rc) 627 return rc; 628 629 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 630 0, NULL, NO_TIMEOUT); 631 632 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 633 return rc; 634 } 635 636 #pragma pack(1) 637 638 struct bmic_host_wellness_driver_version { 639 u8 start_tag[4]; 640 u8 driver_version_tag[2]; 641 __le16 driver_version_length; 642 char driver_version[32]; 643 u8 end_tag[2]; 644 }; 645 646 #pragma pack() 647 648 static int pqi_write_driver_version_to_host_wellness( 649 struct pqi_ctrl_info *ctrl_info) 650 { 651 int rc; 652 struct bmic_host_wellness_driver_version *buffer; 653 size_t buffer_length; 654 655 buffer_length = sizeof(*buffer); 656 657 buffer = kmalloc(buffer_length, GFP_KERNEL); 658 if (!buffer) 659 return -ENOMEM; 660 661 buffer->start_tag[0] = '<'; 662 buffer->start_tag[1] = 'H'; 663 buffer->start_tag[2] = 'W'; 664 buffer->start_tag[3] = '>'; 665 buffer->driver_version_tag[0] = 'D'; 666 buffer->driver_version_tag[1] = 'V'; 667 put_unaligned_le16(sizeof(buffer->driver_version), 668 &buffer->driver_version_length); 669 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 670 sizeof(buffer->driver_version) - 1); 671 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 672 buffer->end_tag[0] = 'Z'; 673 buffer->end_tag[1] = 'Z'; 674 675 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 676 677 kfree(buffer); 678 679 return rc; 680 } 681 682 #pragma pack(1) 683 684 struct bmic_host_wellness_time { 685 u8 start_tag[4]; 686 u8 time_tag[2]; 687 __le16 time_length; 688 u8 time[8]; 689 u8 dont_write_tag[2]; 690 u8 end_tag[2]; 691 }; 692 693 #pragma pack() 694 695 static int pqi_write_current_time_to_host_wellness( 696 struct pqi_ctrl_info *ctrl_info) 697 { 698 int rc; 699 struct bmic_host_wellness_time *buffer; 700 size_t buffer_length; 701 time64_t local_time; 702 unsigned int year; 703 struct tm tm; 704 705 buffer_length = sizeof(*buffer); 706 707 buffer = kmalloc(buffer_length, GFP_KERNEL); 708 if (!buffer) 709 return -ENOMEM; 710 711 buffer->start_tag[0] = '<'; 712 buffer->start_tag[1] = 'H'; 713 buffer->start_tag[2] = 'W'; 714 buffer->start_tag[3] = '>'; 715 buffer->time_tag[0] = 'T'; 716 buffer->time_tag[1] = 'D'; 717 put_unaligned_le16(sizeof(buffer->time), 718 &buffer->time_length); 719 720 local_time = ktime_get_real_seconds(); 721 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 722 year = tm.tm_year + 1900; 723 724 buffer->time[0] = bin2bcd(tm.tm_hour); 725 buffer->time[1] = bin2bcd(tm.tm_min); 726 buffer->time[2] = bin2bcd(tm.tm_sec); 727 buffer->time[3] = 0; 728 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 729 buffer->time[5] = bin2bcd(tm.tm_mday); 730 buffer->time[6] = bin2bcd(year / 100); 731 buffer->time[7] = bin2bcd(year % 100); 732 733 buffer->dont_write_tag[0] = 'D'; 734 buffer->dont_write_tag[1] = 'W'; 735 buffer->end_tag[0] = 'Z'; 736 buffer->end_tag[1] = 'Z'; 737 738 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 739 740 kfree(buffer); 741 742 return rc; 743 } 744 745 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 746 747 static void pqi_update_time_worker(struct work_struct *work) 748 { 749 int rc; 750 struct pqi_ctrl_info *ctrl_info; 751 752 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 753 update_time_work); 754 755 if (pqi_ctrl_offline(ctrl_info)) 756 return; 757 758 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 759 if (rc) 760 dev_warn(&ctrl_info->pci_dev->dev, 761 "error updating time on controller\n"); 762 763 schedule_delayed_work(&ctrl_info->update_time_work, 764 PQI_UPDATE_TIME_WORK_INTERVAL); 765 } 766 767 static inline void pqi_schedule_update_time_worker( 768 struct pqi_ctrl_info *ctrl_info) 769 { 770 schedule_delayed_work(&ctrl_info->update_time_work, 0); 771 } 772 773 static inline void pqi_cancel_update_time_worker( 774 struct pqi_ctrl_info *ctrl_info) 775 { 776 cancel_delayed_work_sync(&ctrl_info->update_time_work); 777 } 778 779 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 780 void *buffer, size_t buffer_length) 781 { 782 int rc; 783 enum dma_data_direction dir; 784 struct pqi_raid_path_request request; 785 786 rc = pqi_build_raid_path_request(ctrl_info, &request, 787 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir); 788 if (rc) 789 return rc; 790 791 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 792 NULL, NO_TIMEOUT); 793 794 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 795 return rc; 796 } 797 798 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 799 void **buffer) 800 { 801 int rc; 802 size_t lun_list_length; 803 size_t lun_data_length; 804 size_t new_lun_list_length; 805 void *lun_data = NULL; 806 struct report_lun_header *report_lun_header; 807 808 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 809 if (!report_lun_header) { 810 rc = -ENOMEM; 811 goto out; 812 } 813 814 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 815 sizeof(*report_lun_header)); 816 if (rc) 817 goto out; 818 819 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 820 821 again: 822 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 823 824 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 825 if (!lun_data) { 826 rc = -ENOMEM; 827 goto out; 828 } 829 830 if (lun_list_length == 0) { 831 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 832 goto out; 833 } 834 835 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 836 if (rc) 837 goto out; 838 839 new_lun_list_length = get_unaligned_be32( 840 &((struct report_lun_header *)lun_data)->list_length); 841 842 if (new_lun_list_length > lun_list_length) { 843 lun_list_length = new_lun_list_length; 844 kfree(lun_data); 845 goto again; 846 } 847 848 out: 849 kfree(report_lun_header); 850 851 if (rc) { 852 kfree(lun_data); 853 lun_data = NULL; 854 } 855 856 *buffer = lun_data; 857 858 return rc; 859 } 860 861 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 862 void **buffer) 863 { 864 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 865 buffer); 866 } 867 868 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 869 void **buffer) 870 { 871 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 872 } 873 874 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 875 struct report_phys_lun_extended **physdev_list, 876 struct report_log_lun_extended **logdev_list) 877 { 878 int rc; 879 size_t logdev_list_length; 880 size_t logdev_data_length; 881 struct report_log_lun_extended *internal_logdev_list; 882 struct report_log_lun_extended *logdev_data; 883 struct report_lun_header report_lun_header; 884 885 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 886 if (rc) 887 dev_err(&ctrl_info->pci_dev->dev, 888 "report physical LUNs failed\n"); 889 890 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 891 if (rc) 892 dev_err(&ctrl_info->pci_dev->dev, 893 "report logical LUNs failed\n"); 894 895 /* 896 * Tack the controller itself onto the end of the logical device list. 897 */ 898 899 logdev_data = *logdev_list; 900 901 if (logdev_data) { 902 logdev_list_length = 903 get_unaligned_be32(&logdev_data->header.list_length); 904 } else { 905 memset(&report_lun_header, 0, sizeof(report_lun_header)); 906 logdev_data = 907 (struct report_log_lun_extended *)&report_lun_header; 908 logdev_list_length = 0; 909 } 910 911 logdev_data_length = sizeof(struct report_lun_header) + 912 logdev_list_length; 913 914 internal_logdev_list = kmalloc(logdev_data_length + 915 sizeof(struct report_log_lun_extended), GFP_KERNEL); 916 if (!internal_logdev_list) { 917 kfree(*logdev_list); 918 *logdev_list = NULL; 919 return -ENOMEM; 920 } 921 922 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 923 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 924 sizeof(struct report_log_lun_extended_entry)); 925 put_unaligned_be32(logdev_list_length + 926 sizeof(struct report_log_lun_extended_entry), 927 &internal_logdev_list->header.list_length); 928 929 kfree(*logdev_list); 930 *logdev_list = internal_logdev_list; 931 932 return 0; 933 } 934 935 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 936 int bus, int target, int lun) 937 { 938 device->bus = bus; 939 device->target = target; 940 device->lun = lun; 941 } 942 943 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 944 { 945 u8 *scsi3addr; 946 u32 lunid; 947 int bus; 948 int target; 949 int lun; 950 951 scsi3addr = device->scsi3addr; 952 lunid = get_unaligned_le32(scsi3addr); 953 954 if (pqi_is_hba_lunid(scsi3addr)) { 955 /* The specified device is the controller. */ 956 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 957 device->target_lun_valid = true; 958 return; 959 } 960 961 if (pqi_is_logical_device(device)) { 962 if (device->is_external_raid_device) { 963 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 964 target = (lunid >> 16) & 0x3fff; 965 lun = lunid & 0xff; 966 } else { 967 bus = PQI_RAID_VOLUME_BUS; 968 target = 0; 969 lun = lunid & 0x3fff; 970 } 971 pqi_set_bus_target_lun(device, bus, target, lun); 972 device->target_lun_valid = true; 973 return; 974 } 975 976 /* 977 * Defer target and LUN assignment for non-controller physical devices 978 * because the SAS transport layer will make these assignments later. 979 */ 980 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 981 } 982 983 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 984 struct pqi_scsi_dev *device) 985 { 986 int rc; 987 u8 raid_level; 988 u8 *buffer; 989 990 raid_level = SA_RAID_UNKNOWN; 991 992 buffer = kmalloc(64, GFP_KERNEL); 993 if (buffer) { 994 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 995 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 996 if (rc == 0) { 997 raid_level = buffer[8]; 998 if (raid_level > SA_RAID_MAX) 999 raid_level = SA_RAID_UNKNOWN; 1000 } 1001 kfree(buffer); 1002 } 1003 1004 device->raid_level = raid_level; 1005 } 1006 1007 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1008 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1009 { 1010 char *err_msg; 1011 u32 raid_map_size; 1012 u32 r5or6_blocks_per_row; 1013 unsigned int num_phys_disks; 1014 unsigned int num_raid_map_entries; 1015 1016 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1017 1018 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1019 err_msg = "RAID map too small"; 1020 goto bad_raid_map; 1021 } 1022 1023 if (raid_map_size > sizeof(*raid_map)) { 1024 err_msg = "RAID map too large"; 1025 goto bad_raid_map; 1026 } 1027 1028 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) * 1029 (get_unaligned_le16(&raid_map->data_disks_per_row) + 1030 get_unaligned_le16(&raid_map->metadata_disks_per_row)); 1031 num_raid_map_entries = num_phys_disks * 1032 get_unaligned_le16(&raid_map->row_cnt); 1033 1034 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) { 1035 err_msg = "invalid number of map entries in RAID map"; 1036 goto bad_raid_map; 1037 } 1038 1039 if (device->raid_level == SA_RAID_1) { 1040 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1041 err_msg = "invalid RAID-1 map"; 1042 goto bad_raid_map; 1043 } 1044 } else if (device->raid_level == SA_RAID_ADM) { 1045 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1046 err_msg = "invalid RAID-1(ADM) map"; 1047 goto bad_raid_map; 1048 } 1049 } else if ((device->raid_level == SA_RAID_5 || 1050 device->raid_level == SA_RAID_6) && 1051 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1052 /* RAID 50/60 */ 1053 r5or6_blocks_per_row = 1054 get_unaligned_le16(&raid_map->strip_size) * 1055 get_unaligned_le16(&raid_map->data_disks_per_row); 1056 if (r5or6_blocks_per_row == 0) { 1057 err_msg = "invalid RAID-5 or RAID-6 map"; 1058 goto bad_raid_map; 1059 } 1060 } 1061 1062 return 0; 1063 1064 bad_raid_map: 1065 dev_warn(&ctrl_info->pci_dev->dev, 1066 "logical device %08x%08x %s\n", 1067 *((u32 *)&device->scsi3addr), 1068 *((u32 *)&device->scsi3addr[4]), err_msg); 1069 1070 return -EINVAL; 1071 } 1072 1073 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1074 struct pqi_scsi_dev *device) 1075 { 1076 int rc; 1077 enum dma_data_direction dir; 1078 struct pqi_raid_path_request request; 1079 struct raid_map *raid_map; 1080 1081 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1082 if (!raid_map) 1083 return -ENOMEM; 1084 1085 rc = pqi_build_raid_path_request(ctrl_info, &request, 1086 CISS_GET_RAID_MAP, device->scsi3addr, raid_map, 1087 sizeof(*raid_map), 0, &dir); 1088 if (rc) 1089 goto error; 1090 1091 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 1092 NULL, NO_TIMEOUT); 1093 1094 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 1095 1096 if (rc) 1097 goto error; 1098 1099 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1100 if (rc) 1101 goto error; 1102 1103 device->raid_map = raid_map; 1104 1105 return 0; 1106 1107 error: 1108 kfree(raid_map); 1109 1110 return rc; 1111 } 1112 1113 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1114 struct pqi_scsi_dev *device) 1115 { 1116 int rc; 1117 u8 *buffer; 1118 u8 bypass_status; 1119 1120 buffer = kmalloc(64, GFP_KERNEL); 1121 if (!buffer) 1122 return; 1123 1124 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1125 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1126 if (rc) 1127 goto out; 1128 1129 #define RAID_BYPASS_STATUS 4 1130 #define RAID_BYPASS_CONFIGURED 0x1 1131 #define RAID_BYPASS_ENABLED 0x2 1132 1133 bypass_status = buffer[RAID_BYPASS_STATUS]; 1134 device->raid_bypass_configured = 1135 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1136 if (device->raid_bypass_configured && 1137 (bypass_status & RAID_BYPASS_ENABLED) && 1138 pqi_get_raid_map(ctrl_info, device) == 0) 1139 device->raid_bypass_enabled = true; 1140 1141 out: 1142 kfree(buffer); 1143 } 1144 1145 /* 1146 * Use vendor-specific VPD to determine online/offline status of a volume. 1147 */ 1148 1149 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1150 struct pqi_scsi_dev *device) 1151 { 1152 int rc; 1153 size_t page_length; 1154 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1155 bool volume_offline = true; 1156 u32 volume_flags; 1157 struct ciss_vpd_logical_volume_status *vpd; 1158 1159 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1160 if (!vpd) 1161 goto no_buffer; 1162 1163 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1164 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1165 if (rc) 1166 goto out; 1167 1168 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1169 volume_status) + vpd->page_length; 1170 if (page_length < sizeof(*vpd)) 1171 goto out; 1172 1173 volume_status = vpd->volume_status; 1174 volume_flags = get_unaligned_be32(&vpd->flags); 1175 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1176 1177 out: 1178 kfree(vpd); 1179 no_buffer: 1180 device->volume_status = volume_status; 1181 device->volume_offline = volume_offline; 1182 } 1183 1184 #define PQI_INQUIRY_PAGE0_RETRIES 3 1185 1186 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1187 struct pqi_scsi_dev *device) 1188 { 1189 int rc; 1190 u8 *buffer; 1191 unsigned int retries; 1192 1193 buffer = kmalloc(64, GFP_KERNEL); 1194 if (!buffer) 1195 return -ENOMEM; 1196 1197 /* Send an inquiry to the device to see what it is. */ 1198 for (retries = 0;;) { 1199 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, 1200 buffer, 64); 1201 if (rc == 0) 1202 break; 1203 if (pqi_is_logical_device(device) || 1204 rc != PQI_CMD_STATUS_ABORTED || 1205 ++retries > PQI_INQUIRY_PAGE0_RETRIES) 1206 goto out; 1207 } 1208 1209 scsi_sanitize_inquiry_string(&buffer[8], 8); 1210 scsi_sanitize_inquiry_string(&buffer[16], 16); 1211 1212 device->devtype = buffer[0] & 0x1f; 1213 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1214 memcpy(device->model, &buffer[16], sizeof(device->model)); 1215 1216 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { 1217 if (device->is_external_raid_device) { 1218 device->raid_level = SA_RAID_UNKNOWN; 1219 device->volume_status = CISS_LV_OK; 1220 device->volume_offline = false; 1221 } else { 1222 pqi_get_raid_level(ctrl_info, device); 1223 pqi_get_raid_bypass_status(ctrl_info, device); 1224 pqi_get_volume_status(ctrl_info, device); 1225 } 1226 } 1227 1228 out: 1229 kfree(buffer); 1230 1231 return rc; 1232 } 1233 1234 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, 1235 struct pqi_scsi_dev *device, 1236 struct bmic_identify_physical_device *id_phys) 1237 { 1238 int rc; 1239 1240 memset(id_phys, 0, sizeof(*id_phys)); 1241 1242 rc = pqi_identify_physical_device(ctrl_info, device, 1243 id_phys, sizeof(*id_phys)); 1244 if (rc) { 1245 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1246 return; 1247 } 1248 1249 device->queue_depth = 1250 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1251 device->device_type = id_phys->device_type; 1252 device->active_path_index = id_phys->active_path_number; 1253 device->path_map = id_phys->redundant_path_present_map; 1254 memcpy(&device->box, 1255 &id_phys->alternate_paths_phys_box_on_port, 1256 sizeof(device->box)); 1257 memcpy(&device->phys_connector, 1258 &id_phys->alternate_paths_phys_connector, 1259 sizeof(device->phys_connector)); 1260 device->bay = id_phys->phys_bay_in_box; 1261 } 1262 1263 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1264 struct pqi_scsi_dev *device) 1265 { 1266 char *status; 1267 static const char unknown_state_str[] = 1268 "Volume is in an unknown state (%u)"; 1269 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1270 1271 switch (device->volume_status) { 1272 case CISS_LV_OK: 1273 status = "Volume online"; 1274 break; 1275 case CISS_LV_FAILED: 1276 status = "Volume failed"; 1277 break; 1278 case CISS_LV_NOT_CONFIGURED: 1279 status = "Volume not configured"; 1280 break; 1281 case CISS_LV_DEGRADED: 1282 status = "Volume degraded"; 1283 break; 1284 case CISS_LV_READY_FOR_RECOVERY: 1285 status = "Volume ready for recovery operation"; 1286 break; 1287 case CISS_LV_UNDERGOING_RECOVERY: 1288 status = "Volume undergoing recovery"; 1289 break; 1290 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1291 status = "Wrong physical drive was replaced"; 1292 break; 1293 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1294 status = "A physical drive not properly connected"; 1295 break; 1296 case CISS_LV_HARDWARE_OVERHEATING: 1297 status = "Hardware is overheating"; 1298 break; 1299 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1300 status = "Hardware has overheated"; 1301 break; 1302 case CISS_LV_UNDERGOING_EXPANSION: 1303 status = "Volume undergoing expansion"; 1304 break; 1305 case CISS_LV_NOT_AVAILABLE: 1306 status = "Volume waiting for transforming volume"; 1307 break; 1308 case CISS_LV_QUEUED_FOR_EXPANSION: 1309 status = "Volume queued for expansion"; 1310 break; 1311 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1312 status = "Volume disabled due to SCSI ID conflict"; 1313 break; 1314 case CISS_LV_EJECTED: 1315 status = "Volume has been ejected"; 1316 break; 1317 case CISS_LV_UNDERGOING_ERASE: 1318 status = "Volume undergoing background erase"; 1319 break; 1320 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1321 status = "Volume ready for predictive spare rebuild"; 1322 break; 1323 case CISS_LV_UNDERGOING_RPI: 1324 status = "Volume undergoing rapid parity initialization"; 1325 break; 1326 case CISS_LV_PENDING_RPI: 1327 status = "Volume queued for rapid parity initialization"; 1328 break; 1329 case CISS_LV_ENCRYPTED_NO_KEY: 1330 status = "Encrypted volume inaccessible - key not present"; 1331 break; 1332 case CISS_LV_UNDERGOING_ENCRYPTION: 1333 status = "Volume undergoing encryption process"; 1334 break; 1335 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1336 status = "Volume undergoing encryption re-keying process"; 1337 break; 1338 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1339 status = "Volume encrypted but encryption is disabled"; 1340 break; 1341 case CISS_LV_PENDING_ENCRYPTION: 1342 status = "Volume pending migration to encrypted state"; 1343 break; 1344 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1345 status = "Volume pending encryption rekeying"; 1346 break; 1347 case CISS_LV_NOT_SUPPORTED: 1348 status = "Volume not supported on this controller"; 1349 break; 1350 case CISS_LV_STATUS_UNAVAILABLE: 1351 status = "Volume status not available"; 1352 break; 1353 default: 1354 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1355 unknown_state_str, device->volume_status); 1356 status = unknown_state_buffer; 1357 break; 1358 } 1359 1360 dev_info(&ctrl_info->pci_dev->dev, 1361 "scsi %d:%d:%d:%d %s\n", 1362 ctrl_info->scsi_host->host_no, 1363 device->bus, device->target, device->lun, status); 1364 } 1365 1366 static void pqi_rescan_worker(struct work_struct *work) 1367 { 1368 struct pqi_ctrl_info *ctrl_info; 1369 1370 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1371 rescan_work); 1372 1373 pqi_scan_scsi_devices(ctrl_info); 1374 } 1375 1376 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1377 struct pqi_scsi_dev *device) 1378 { 1379 int rc; 1380 1381 if (pqi_is_logical_device(device)) 1382 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1383 device->target, device->lun); 1384 else 1385 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1386 1387 return rc; 1388 } 1389 1390 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1391 struct pqi_scsi_dev *device) 1392 { 1393 if (pqi_is_logical_device(device)) 1394 scsi_remove_device(device->sdev); 1395 else 1396 pqi_remove_sas_device(device); 1397 } 1398 1399 /* Assumes the SCSI device list lock is held. */ 1400 1401 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1402 int bus, int target, int lun) 1403 { 1404 struct pqi_scsi_dev *device; 1405 1406 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1407 scsi_device_list_entry) 1408 if (device->bus == bus && device->target == target && 1409 device->lun == lun) 1410 return device; 1411 1412 return NULL; 1413 } 1414 1415 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1416 struct pqi_scsi_dev *dev2) 1417 { 1418 if (dev1->is_physical_device != dev2->is_physical_device) 1419 return false; 1420 1421 if (dev1->is_physical_device) 1422 return dev1->wwid == dev2->wwid; 1423 1424 return memcmp(dev1->volume_id, dev2->volume_id, 1425 sizeof(dev1->volume_id)) == 0; 1426 } 1427 1428 enum pqi_find_result { 1429 DEVICE_NOT_FOUND, 1430 DEVICE_CHANGED, 1431 DEVICE_SAME, 1432 }; 1433 1434 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1435 struct pqi_scsi_dev *device_to_find, 1436 struct pqi_scsi_dev **matching_device) 1437 { 1438 struct pqi_scsi_dev *device; 1439 1440 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1441 scsi_device_list_entry) { 1442 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, 1443 device->scsi3addr)) { 1444 *matching_device = device; 1445 if (pqi_device_equal(device_to_find, device)) { 1446 if (device_to_find->volume_offline) 1447 return DEVICE_CHANGED; 1448 return DEVICE_SAME; 1449 } 1450 return DEVICE_CHANGED; 1451 } 1452 } 1453 1454 return DEVICE_NOT_FOUND; 1455 } 1456 1457 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1458 1459 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1460 char *action, struct pqi_scsi_dev *device) 1461 { 1462 ssize_t count; 1463 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1464 1465 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1466 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1467 1468 if (device->target_lun_valid) 1469 count += snprintf(buffer + count, 1470 PQI_DEV_INFO_BUFFER_LENGTH - count, 1471 "%d:%d", 1472 device->target, 1473 device->lun); 1474 else 1475 count += snprintf(buffer + count, 1476 PQI_DEV_INFO_BUFFER_LENGTH - count, 1477 "-:-"); 1478 1479 if (pqi_is_logical_device(device)) 1480 count += snprintf(buffer + count, 1481 PQI_DEV_INFO_BUFFER_LENGTH - count, 1482 " %08x%08x", 1483 *((u32 *)&device->scsi3addr), 1484 *((u32 *)&device->scsi3addr[4])); 1485 else 1486 count += snprintf(buffer + count, 1487 PQI_DEV_INFO_BUFFER_LENGTH - count, 1488 " %016llx", device->sas_address); 1489 1490 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1491 " %s %.8s %.16s ", 1492 scsi_device_type(device->devtype), 1493 device->vendor, 1494 device->model); 1495 1496 if (pqi_is_logical_device(device)) { 1497 if (device->devtype == TYPE_DISK) 1498 count += snprintf(buffer + count, 1499 PQI_DEV_INFO_BUFFER_LENGTH - count, 1500 "SSDSmartPathCap%c En%c %-12s", 1501 device->raid_bypass_configured ? '+' : '-', 1502 device->raid_bypass_enabled ? '+' : '-', 1503 pqi_raid_level_to_string(device->raid_level)); 1504 } else { 1505 count += snprintf(buffer + count, 1506 PQI_DEV_INFO_BUFFER_LENGTH - count, 1507 "AIO%c", device->aio_enabled ? '+' : '-'); 1508 if (device->devtype == TYPE_DISK || 1509 device->devtype == TYPE_ZBC) 1510 count += snprintf(buffer + count, 1511 PQI_DEV_INFO_BUFFER_LENGTH - count, 1512 " qd=%-6d", device->queue_depth); 1513 } 1514 1515 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1516 } 1517 1518 /* Assumes the SCSI device list lock is held. */ 1519 1520 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1521 struct pqi_scsi_dev *new_device) 1522 { 1523 existing_device->devtype = new_device->devtype; 1524 existing_device->device_type = new_device->device_type; 1525 existing_device->bus = new_device->bus; 1526 if (new_device->target_lun_valid) { 1527 existing_device->target = new_device->target; 1528 existing_device->lun = new_device->lun; 1529 existing_device->target_lun_valid = true; 1530 } 1531 1532 /* By definition, the scsi3addr and wwid fields are already the same. */ 1533 1534 existing_device->is_physical_device = new_device->is_physical_device; 1535 existing_device->is_external_raid_device = 1536 new_device->is_external_raid_device; 1537 existing_device->aio_enabled = new_device->aio_enabled; 1538 memcpy(existing_device->vendor, new_device->vendor, 1539 sizeof(existing_device->vendor)); 1540 memcpy(existing_device->model, new_device->model, 1541 sizeof(existing_device->model)); 1542 existing_device->sas_address = new_device->sas_address; 1543 existing_device->raid_level = new_device->raid_level; 1544 existing_device->queue_depth = new_device->queue_depth; 1545 existing_device->aio_handle = new_device->aio_handle; 1546 existing_device->volume_status = new_device->volume_status; 1547 existing_device->active_path_index = new_device->active_path_index; 1548 existing_device->path_map = new_device->path_map; 1549 existing_device->bay = new_device->bay; 1550 memcpy(existing_device->box, new_device->box, 1551 sizeof(existing_device->box)); 1552 memcpy(existing_device->phys_connector, new_device->phys_connector, 1553 sizeof(existing_device->phys_connector)); 1554 existing_device->offload_to_mirror = 0; 1555 kfree(existing_device->raid_map); 1556 existing_device->raid_map = new_device->raid_map; 1557 existing_device->raid_bypass_configured = 1558 new_device->raid_bypass_configured; 1559 existing_device->raid_bypass_enabled = 1560 new_device->raid_bypass_enabled; 1561 1562 /* To prevent this from being freed later. */ 1563 new_device->raid_map = NULL; 1564 } 1565 1566 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1567 { 1568 if (device) { 1569 kfree(device->raid_map); 1570 kfree(device); 1571 } 1572 } 1573 1574 /* 1575 * Called when exposing a new device to the OS fails in order to re-adjust 1576 * our internal SCSI device list to match the SCSI ML's view. 1577 */ 1578 1579 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1580 struct pqi_scsi_dev *device) 1581 { 1582 unsigned long flags; 1583 1584 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1585 list_del(&device->scsi_device_list_entry); 1586 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1587 1588 /* Allow the device structure to be freed later. */ 1589 device->keep_device = false; 1590 } 1591 1592 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1593 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1594 { 1595 int rc; 1596 unsigned int i; 1597 unsigned long flags; 1598 enum pqi_find_result find_result; 1599 struct pqi_scsi_dev *device; 1600 struct pqi_scsi_dev *next; 1601 struct pqi_scsi_dev *matching_device; 1602 LIST_HEAD(add_list); 1603 LIST_HEAD(delete_list); 1604 1605 /* 1606 * The idea here is to do as little work as possible while holding the 1607 * spinlock. That's why we go to great pains to defer anything other 1608 * than updating the internal device list until after we release the 1609 * spinlock. 1610 */ 1611 1612 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1613 1614 /* Assume that all devices in the existing list have gone away. */ 1615 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1616 scsi_device_list_entry) 1617 device->device_gone = true; 1618 1619 for (i = 0; i < num_new_devices; i++) { 1620 device = new_device_list[i]; 1621 1622 find_result = pqi_scsi_find_entry(ctrl_info, device, 1623 &matching_device); 1624 1625 switch (find_result) { 1626 case DEVICE_SAME: 1627 /* 1628 * The newly found device is already in the existing 1629 * device list. 1630 */ 1631 device->new_device = false; 1632 matching_device->device_gone = false; 1633 pqi_scsi_update_device(matching_device, device); 1634 break; 1635 case DEVICE_NOT_FOUND: 1636 /* 1637 * The newly found device is NOT in the existing device 1638 * list. 1639 */ 1640 device->new_device = true; 1641 break; 1642 case DEVICE_CHANGED: 1643 /* 1644 * The original device has gone away and we need to add 1645 * the new device. 1646 */ 1647 device->new_device = true; 1648 break; 1649 } 1650 } 1651 1652 /* Process all devices that have gone away. */ 1653 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1654 scsi_device_list_entry) { 1655 if (device->device_gone) { 1656 list_del(&device->scsi_device_list_entry); 1657 list_add_tail(&device->delete_list_entry, &delete_list); 1658 } 1659 } 1660 1661 /* Process all new devices. */ 1662 for (i = 0; i < num_new_devices; i++) { 1663 device = new_device_list[i]; 1664 if (!device->new_device) 1665 continue; 1666 if (device->volume_offline) 1667 continue; 1668 list_add_tail(&device->scsi_device_list_entry, 1669 &ctrl_info->scsi_device_list); 1670 list_add_tail(&device->add_list_entry, &add_list); 1671 /* To prevent this device structure from being freed later. */ 1672 device->keep_device = true; 1673 } 1674 1675 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1676 1677 /* Remove all devices that have gone away. */ 1678 list_for_each_entry_safe(device, next, &delete_list, 1679 delete_list_entry) { 1680 if (device->volume_offline) { 1681 pqi_dev_info(ctrl_info, "offline", device); 1682 pqi_show_volume_status(ctrl_info, device); 1683 } else { 1684 pqi_dev_info(ctrl_info, "removed", device); 1685 } 1686 if (device->sdev) 1687 pqi_remove_device(ctrl_info, device); 1688 list_del(&device->delete_list_entry); 1689 pqi_free_device(device); 1690 } 1691 1692 /* 1693 * Notify the SCSI ML if the queue depth of any existing device has 1694 * changed. 1695 */ 1696 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1697 scsi_device_list_entry) { 1698 if (device->sdev && device->queue_depth != 1699 device->advertised_queue_depth) { 1700 device->advertised_queue_depth = device->queue_depth; 1701 scsi_change_queue_depth(device->sdev, 1702 device->advertised_queue_depth); 1703 } 1704 } 1705 1706 /* Expose any new devices. */ 1707 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1708 if (!device->sdev) { 1709 pqi_dev_info(ctrl_info, "added", device); 1710 rc = pqi_add_device(ctrl_info, device); 1711 if (rc) { 1712 dev_warn(&ctrl_info->pci_dev->dev, 1713 "scsi %d:%d:%d:%d addition failed, device not added\n", 1714 ctrl_info->scsi_host->host_no, 1715 device->bus, device->target, 1716 device->lun); 1717 pqi_fixup_botched_add(ctrl_info, device); 1718 } 1719 } 1720 } 1721 } 1722 1723 static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1724 { 1725 bool is_supported = false; 1726 1727 switch (device->devtype) { 1728 case TYPE_DISK: 1729 case TYPE_ZBC: 1730 case TYPE_TAPE: 1731 case TYPE_MEDIUM_CHANGER: 1732 case TYPE_ENCLOSURE: 1733 is_supported = true; 1734 break; 1735 case TYPE_RAID: 1736 /* 1737 * Only support the HBA controller itself as a RAID 1738 * controller. If it's a RAID controller other than 1739 * the HBA itself (an external RAID controller, for 1740 * example), we don't support it. 1741 */ 1742 if (pqi_is_hba_lunid(device->scsi3addr)) 1743 is_supported = true; 1744 break; 1745 } 1746 1747 return is_supported; 1748 } 1749 1750 static inline bool pqi_skip_device(u8 *scsi3addr) 1751 { 1752 /* Ignore all masked devices. */ 1753 if (MASKED_DEVICE(scsi3addr)) 1754 return true; 1755 1756 return false; 1757 } 1758 1759 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1760 { 1761 int i; 1762 int rc; 1763 LIST_HEAD(new_device_list_head); 1764 struct report_phys_lun_extended *physdev_list = NULL; 1765 struct report_log_lun_extended *logdev_list = NULL; 1766 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1767 struct report_log_lun_extended_entry *log_lun_ext_entry; 1768 struct bmic_identify_physical_device *id_phys = NULL; 1769 u32 num_physicals; 1770 u32 num_logicals; 1771 struct pqi_scsi_dev **new_device_list = NULL; 1772 struct pqi_scsi_dev *device; 1773 struct pqi_scsi_dev *next; 1774 unsigned int num_new_devices; 1775 unsigned int num_valid_devices; 1776 bool is_physical_device; 1777 u8 *scsi3addr; 1778 static char *out_of_memory_msg = 1779 "failed to allocate memory, device discovery stopped"; 1780 1781 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 1782 if (rc) 1783 goto out; 1784 1785 if (physdev_list) 1786 num_physicals = 1787 get_unaligned_be32(&physdev_list->header.list_length) 1788 / sizeof(physdev_list->lun_entries[0]); 1789 else 1790 num_physicals = 0; 1791 1792 if (logdev_list) 1793 num_logicals = 1794 get_unaligned_be32(&logdev_list->header.list_length) 1795 / sizeof(logdev_list->lun_entries[0]); 1796 else 1797 num_logicals = 0; 1798 1799 if (num_physicals) { 1800 /* 1801 * We need this buffer for calls to pqi_get_physical_disk_info() 1802 * below. We allocate it here instead of inside 1803 * pqi_get_physical_disk_info() because it's a fairly large 1804 * buffer. 1805 */ 1806 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 1807 if (!id_phys) { 1808 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1809 out_of_memory_msg); 1810 rc = -ENOMEM; 1811 goto out; 1812 } 1813 } 1814 1815 num_new_devices = num_physicals + num_logicals; 1816 1817 new_device_list = kmalloc_array(num_new_devices, 1818 sizeof(*new_device_list), 1819 GFP_KERNEL); 1820 if (!new_device_list) { 1821 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 1822 rc = -ENOMEM; 1823 goto out; 1824 } 1825 1826 for (i = 0; i < num_new_devices; i++) { 1827 device = kzalloc(sizeof(*device), GFP_KERNEL); 1828 if (!device) { 1829 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1830 out_of_memory_msg); 1831 rc = -ENOMEM; 1832 goto out; 1833 } 1834 list_add_tail(&device->new_device_list_entry, 1835 &new_device_list_head); 1836 } 1837 1838 device = NULL; 1839 num_valid_devices = 0; 1840 1841 for (i = 0; i < num_new_devices; i++) { 1842 1843 if (i < num_physicals) { 1844 is_physical_device = true; 1845 phys_lun_ext_entry = &physdev_list->lun_entries[i]; 1846 log_lun_ext_entry = NULL; 1847 scsi3addr = phys_lun_ext_entry->lunid; 1848 } else { 1849 is_physical_device = false; 1850 phys_lun_ext_entry = NULL; 1851 log_lun_ext_entry = 1852 &logdev_list->lun_entries[i - num_physicals]; 1853 scsi3addr = log_lun_ext_entry->lunid; 1854 } 1855 1856 if (is_physical_device && pqi_skip_device(scsi3addr)) 1857 continue; 1858 1859 if (device) 1860 device = list_next_entry(device, new_device_list_entry); 1861 else 1862 device = list_first_entry(&new_device_list_head, 1863 struct pqi_scsi_dev, new_device_list_entry); 1864 1865 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1866 device->is_physical_device = is_physical_device; 1867 if (!is_physical_device) 1868 device->is_external_raid_device = 1869 pqi_is_external_raid_addr(scsi3addr); 1870 1871 /* Gather information about the device. */ 1872 rc = pqi_get_device_info(ctrl_info, device); 1873 if (rc == -ENOMEM) { 1874 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1875 out_of_memory_msg); 1876 goto out; 1877 } 1878 if (rc) { 1879 if (device->is_physical_device) 1880 dev_warn(&ctrl_info->pci_dev->dev, 1881 "obtaining device info failed, skipping physical device %016llx\n", 1882 get_unaligned_be64( 1883 &phys_lun_ext_entry->wwid)); 1884 else 1885 dev_warn(&ctrl_info->pci_dev->dev, 1886 "obtaining device info failed, skipping logical device %08x%08x\n", 1887 *((u32 *)&device->scsi3addr), 1888 *((u32 *)&device->scsi3addr[4])); 1889 rc = 0; 1890 continue; 1891 } 1892 1893 if (!pqi_is_supported_device(device)) 1894 continue; 1895 1896 pqi_assign_bus_target_lun(device); 1897 1898 if (device->is_physical_device) { 1899 device->wwid = phys_lun_ext_entry->wwid; 1900 if ((phys_lun_ext_entry->device_flags & 1901 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && 1902 phys_lun_ext_entry->aio_handle) 1903 device->aio_enabled = true; 1904 } else { 1905 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 1906 sizeof(device->volume_id)); 1907 } 1908 1909 switch (device->devtype) { 1910 case TYPE_DISK: 1911 case TYPE_ZBC: 1912 case TYPE_ENCLOSURE: 1913 if (device->is_physical_device) { 1914 device->sas_address = 1915 get_unaligned_be64(&device->wwid); 1916 if (device->devtype == TYPE_DISK || 1917 device->devtype == TYPE_ZBC) { 1918 device->aio_handle = 1919 phys_lun_ext_entry->aio_handle; 1920 pqi_get_physical_disk_info(ctrl_info, 1921 device, id_phys); 1922 } 1923 } 1924 break; 1925 } 1926 1927 new_device_list[num_valid_devices++] = device; 1928 } 1929 1930 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 1931 1932 out: 1933 list_for_each_entry_safe(device, next, &new_device_list_head, 1934 new_device_list_entry) { 1935 if (device->keep_device) 1936 continue; 1937 list_del(&device->new_device_list_entry); 1938 pqi_free_device(device); 1939 } 1940 1941 kfree(new_device_list); 1942 kfree(physdev_list); 1943 kfree(logdev_list); 1944 kfree(id_phys); 1945 1946 return rc; 1947 } 1948 1949 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1950 { 1951 unsigned long flags; 1952 struct pqi_scsi_dev *device; 1953 1954 while (1) { 1955 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1956 1957 device = list_first_entry_or_null(&ctrl_info->scsi_device_list, 1958 struct pqi_scsi_dev, scsi_device_list_entry); 1959 if (device) 1960 list_del(&device->scsi_device_list_entry); 1961 1962 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 1963 flags); 1964 1965 if (!device) 1966 break; 1967 1968 if (device->sdev) 1969 pqi_remove_device(ctrl_info, device); 1970 pqi_free_device(device); 1971 } 1972 } 1973 1974 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1975 { 1976 int rc; 1977 1978 if (pqi_ctrl_offline(ctrl_info)) 1979 return -ENXIO; 1980 1981 mutex_lock(&ctrl_info->scan_mutex); 1982 1983 rc = pqi_update_scsi_devices(ctrl_info); 1984 if (rc) 1985 pqi_schedule_rescan_worker_delayed(ctrl_info); 1986 1987 mutex_unlock(&ctrl_info->scan_mutex); 1988 1989 return rc; 1990 } 1991 1992 static void pqi_scan_start(struct Scsi_Host *shost) 1993 { 1994 pqi_scan_scsi_devices(shost_to_hba(shost)); 1995 } 1996 1997 /* Returns TRUE if scan is finished. */ 1998 1999 static int pqi_scan_finished(struct Scsi_Host *shost, 2000 unsigned long elapsed_time) 2001 { 2002 struct pqi_ctrl_info *ctrl_info; 2003 2004 ctrl_info = shost_priv(shost); 2005 2006 return !mutex_is_locked(&ctrl_info->scan_mutex); 2007 } 2008 2009 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2010 { 2011 mutex_lock(&ctrl_info->scan_mutex); 2012 mutex_unlock(&ctrl_info->scan_mutex); 2013 } 2014 2015 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2016 { 2017 mutex_lock(&ctrl_info->lun_reset_mutex); 2018 mutex_unlock(&ctrl_info->lun_reset_mutex); 2019 } 2020 2021 static inline void pqi_set_encryption_info( 2022 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2023 u64 first_block) 2024 { 2025 u32 volume_blk_size; 2026 2027 /* 2028 * Set the encryption tweak values based on logical block address. 2029 * If the block size is 512, the tweak value is equal to the LBA. 2030 * For other block sizes, tweak value is (LBA * block size) / 512. 2031 */ 2032 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2033 if (volume_blk_size != 512) 2034 first_block = (first_block * volume_blk_size) / 512; 2035 2036 encryption_info->data_encryption_key_index = 2037 get_unaligned_le16(&raid_map->data_encryption_key_index); 2038 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2039 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2040 } 2041 2042 /* 2043 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2044 */ 2045 2046 #define PQI_RAID_BYPASS_INELIGIBLE 1 2047 2048 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2049 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2050 struct pqi_queue_group *queue_group) 2051 { 2052 struct raid_map *raid_map; 2053 bool is_write = false; 2054 u32 map_index; 2055 u64 first_block; 2056 u64 last_block; 2057 u32 block_cnt; 2058 u32 blocks_per_row; 2059 u64 first_row; 2060 u64 last_row; 2061 u32 first_row_offset; 2062 u32 last_row_offset; 2063 u32 first_column; 2064 u32 last_column; 2065 u64 r0_first_row; 2066 u64 r0_last_row; 2067 u32 r5or6_blocks_per_row; 2068 u64 r5or6_first_row; 2069 u64 r5or6_last_row; 2070 u32 r5or6_first_row_offset; 2071 u32 r5or6_last_row_offset; 2072 u32 r5or6_first_column; 2073 u32 r5or6_last_column; 2074 u16 data_disks_per_row; 2075 u32 total_disks_per_row; 2076 u16 layout_map_count; 2077 u32 stripesize; 2078 u16 strip_size; 2079 u32 first_group; 2080 u32 last_group; 2081 u32 current_group; 2082 u32 map_row; 2083 u32 aio_handle; 2084 u64 disk_block; 2085 u32 disk_block_cnt; 2086 u8 cdb[16]; 2087 u8 cdb_length; 2088 int offload_to_mirror; 2089 struct pqi_encryption_info *encryption_info_ptr; 2090 struct pqi_encryption_info encryption_info; 2091 #if BITS_PER_LONG == 32 2092 u64 tmpdiv; 2093 #endif 2094 2095 /* Check for valid opcode, get LBA and block count. */ 2096 switch (scmd->cmnd[0]) { 2097 case WRITE_6: 2098 is_write = true; 2099 /* fall through */ 2100 case READ_6: 2101 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2102 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2103 block_cnt = (u32)scmd->cmnd[4]; 2104 if (block_cnt == 0) 2105 block_cnt = 256; 2106 break; 2107 case WRITE_10: 2108 is_write = true; 2109 /* fall through */ 2110 case READ_10: 2111 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2112 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2113 break; 2114 case WRITE_12: 2115 is_write = true; 2116 /* fall through */ 2117 case READ_12: 2118 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2119 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2120 break; 2121 case WRITE_16: 2122 is_write = true; 2123 /* fall through */ 2124 case READ_16: 2125 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2126 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2127 break; 2128 default: 2129 /* Process via normal I/O path. */ 2130 return PQI_RAID_BYPASS_INELIGIBLE; 2131 } 2132 2133 /* Check for write to non-RAID-0. */ 2134 if (is_write && device->raid_level != SA_RAID_0) 2135 return PQI_RAID_BYPASS_INELIGIBLE; 2136 2137 if (unlikely(block_cnt == 0)) 2138 return PQI_RAID_BYPASS_INELIGIBLE; 2139 2140 last_block = first_block + block_cnt - 1; 2141 raid_map = device->raid_map; 2142 2143 /* Check for invalid block or wraparound. */ 2144 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2145 last_block < first_block) 2146 return PQI_RAID_BYPASS_INELIGIBLE; 2147 2148 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2149 strip_size = get_unaligned_le16(&raid_map->strip_size); 2150 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2151 2152 /* Calculate stripe information for the request. */ 2153 blocks_per_row = data_disks_per_row * strip_size; 2154 #if BITS_PER_LONG == 32 2155 tmpdiv = first_block; 2156 do_div(tmpdiv, blocks_per_row); 2157 first_row = tmpdiv; 2158 tmpdiv = last_block; 2159 do_div(tmpdiv, blocks_per_row); 2160 last_row = tmpdiv; 2161 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2162 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2163 tmpdiv = first_row_offset; 2164 do_div(tmpdiv, strip_size); 2165 first_column = tmpdiv; 2166 tmpdiv = last_row_offset; 2167 do_div(tmpdiv, strip_size); 2168 last_column = tmpdiv; 2169 #else 2170 first_row = first_block / blocks_per_row; 2171 last_row = last_block / blocks_per_row; 2172 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2173 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2174 first_column = first_row_offset / strip_size; 2175 last_column = last_row_offset / strip_size; 2176 #endif 2177 2178 /* If this isn't a single row/column then give to the controller. */ 2179 if (first_row != last_row || first_column != last_column) 2180 return PQI_RAID_BYPASS_INELIGIBLE; 2181 2182 /* Proceeding with driver mapping. */ 2183 total_disks_per_row = data_disks_per_row + 2184 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2185 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2186 get_unaligned_le16(&raid_map->row_cnt); 2187 map_index = (map_row * total_disks_per_row) + first_column; 2188 2189 /* RAID 1 */ 2190 if (device->raid_level == SA_RAID_1) { 2191 if (device->offload_to_mirror) 2192 map_index += data_disks_per_row; 2193 device->offload_to_mirror = !device->offload_to_mirror; 2194 } else if (device->raid_level == SA_RAID_ADM) { 2195 /* RAID ADM */ 2196 /* 2197 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2198 * divisible by 3. 2199 */ 2200 offload_to_mirror = device->offload_to_mirror; 2201 if (offload_to_mirror == 0) { 2202 /* use physical disk in the first mirrored group. */ 2203 map_index %= data_disks_per_row; 2204 } else { 2205 do { 2206 /* 2207 * Determine mirror group that map_index 2208 * indicates. 2209 */ 2210 current_group = map_index / data_disks_per_row; 2211 2212 if (offload_to_mirror != current_group) { 2213 if (current_group < 2214 layout_map_count - 1) { 2215 /* 2216 * Select raid index from 2217 * next group. 2218 */ 2219 map_index += data_disks_per_row; 2220 current_group++; 2221 } else { 2222 /* 2223 * Select raid index from first 2224 * group. 2225 */ 2226 map_index %= data_disks_per_row; 2227 current_group = 0; 2228 } 2229 } 2230 } while (offload_to_mirror != current_group); 2231 } 2232 2233 /* Set mirror group to use next time. */ 2234 offload_to_mirror = 2235 (offload_to_mirror >= layout_map_count - 1) ? 2236 0 : offload_to_mirror + 1; 2237 WARN_ON(offload_to_mirror >= layout_map_count); 2238 device->offload_to_mirror = offload_to_mirror; 2239 /* 2240 * Avoid direct use of device->offload_to_mirror within this 2241 * function since multiple threads might simultaneously 2242 * increment it beyond the range of device->layout_map_count -1. 2243 */ 2244 } else if ((device->raid_level == SA_RAID_5 || 2245 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2246 /* RAID 50/60 */ 2247 /* Verify first and last block are in same RAID group */ 2248 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2249 stripesize = r5or6_blocks_per_row * layout_map_count; 2250 #if BITS_PER_LONG == 32 2251 tmpdiv = first_block; 2252 first_group = do_div(tmpdiv, stripesize); 2253 tmpdiv = first_group; 2254 do_div(tmpdiv, r5or6_blocks_per_row); 2255 first_group = tmpdiv; 2256 tmpdiv = last_block; 2257 last_group = do_div(tmpdiv, stripesize); 2258 tmpdiv = last_group; 2259 do_div(tmpdiv, r5or6_blocks_per_row); 2260 last_group = tmpdiv; 2261 #else 2262 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2263 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2264 #endif 2265 if (first_group != last_group) 2266 return PQI_RAID_BYPASS_INELIGIBLE; 2267 2268 /* Verify request is in a single row of RAID 5/6 */ 2269 #if BITS_PER_LONG == 32 2270 tmpdiv = first_block; 2271 do_div(tmpdiv, stripesize); 2272 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2273 tmpdiv = last_block; 2274 do_div(tmpdiv, stripesize); 2275 r5or6_last_row = r0_last_row = tmpdiv; 2276 #else 2277 first_row = r5or6_first_row = r0_first_row = 2278 first_block / stripesize; 2279 r5or6_last_row = r0_last_row = last_block / stripesize; 2280 #endif 2281 if (r5or6_first_row != r5or6_last_row) 2282 return PQI_RAID_BYPASS_INELIGIBLE; 2283 2284 /* Verify request is in a single column */ 2285 #if BITS_PER_LONG == 32 2286 tmpdiv = first_block; 2287 first_row_offset = do_div(tmpdiv, stripesize); 2288 tmpdiv = first_row_offset; 2289 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2290 r5or6_first_row_offset = first_row_offset; 2291 tmpdiv = last_block; 2292 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2293 tmpdiv = r5or6_last_row_offset; 2294 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2295 tmpdiv = r5or6_first_row_offset; 2296 do_div(tmpdiv, strip_size); 2297 first_column = r5or6_first_column = tmpdiv; 2298 tmpdiv = r5or6_last_row_offset; 2299 do_div(tmpdiv, strip_size); 2300 r5or6_last_column = tmpdiv; 2301 #else 2302 first_row_offset = r5or6_first_row_offset = 2303 (u32)((first_block % stripesize) % 2304 r5or6_blocks_per_row); 2305 2306 r5or6_last_row_offset = 2307 (u32)((last_block % stripesize) % 2308 r5or6_blocks_per_row); 2309 2310 first_column = r5or6_first_row_offset / strip_size; 2311 r5or6_first_column = first_column; 2312 r5or6_last_column = r5or6_last_row_offset / strip_size; 2313 #endif 2314 if (r5or6_first_column != r5or6_last_column) 2315 return PQI_RAID_BYPASS_INELIGIBLE; 2316 2317 /* Request is eligible */ 2318 map_row = 2319 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2320 get_unaligned_le16(&raid_map->row_cnt); 2321 2322 map_index = (first_group * 2323 (get_unaligned_le16(&raid_map->row_cnt) * 2324 total_disks_per_row)) + 2325 (map_row * total_disks_per_row) + first_column; 2326 } 2327 2328 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) 2329 return PQI_RAID_BYPASS_INELIGIBLE; 2330 2331 aio_handle = raid_map->disk_data[map_index].aio_handle; 2332 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2333 first_row * strip_size + 2334 (first_row_offset - first_column * strip_size); 2335 disk_block_cnt = block_cnt; 2336 2337 /* Handle differing logical/physical block sizes. */ 2338 if (raid_map->phys_blk_shift) { 2339 disk_block <<= raid_map->phys_blk_shift; 2340 disk_block_cnt <<= raid_map->phys_blk_shift; 2341 } 2342 2343 if (unlikely(disk_block_cnt > 0xffff)) 2344 return PQI_RAID_BYPASS_INELIGIBLE; 2345 2346 /* Build the new CDB for the physical disk I/O. */ 2347 if (disk_block > 0xffffffff) { 2348 cdb[0] = is_write ? WRITE_16 : READ_16; 2349 cdb[1] = 0; 2350 put_unaligned_be64(disk_block, &cdb[2]); 2351 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2352 cdb[14] = 0; 2353 cdb[15] = 0; 2354 cdb_length = 16; 2355 } else { 2356 cdb[0] = is_write ? WRITE_10 : READ_10; 2357 cdb[1] = 0; 2358 put_unaligned_be32((u32)disk_block, &cdb[2]); 2359 cdb[6] = 0; 2360 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2361 cdb[9] = 0; 2362 cdb_length = 10; 2363 } 2364 2365 if (get_unaligned_le16(&raid_map->flags) & 2366 RAID_MAP_ENCRYPTION_ENABLED) { 2367 pqi_set_encryption_info(&encryption_info, raid_map, 2368 first_block); 2369 encryption_info_ptr = &encryption_info; 2370 } else { 2371 encryption_info_ptr = NULL; 2372 } 2373 2374 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2375 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2376 } 2377 2378 #define PQI_STATUS_IDLE 0x0 2379 2380 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2381 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2382 2383 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2384 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2385 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2386 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2387 #define PQI_DEVICE_STATE_ERROR 0x4 2388 2389 #define PQI_MODE_READY_TIMEOUT_SECS 30 2390 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2391 2392 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2393 { 2394 struct pqi_device_registers __iomem *pqi_registers; 2395 unsigned long timeout; 2396 u64 signature; 2397 u8 status; 2398 2399 pqi_registers = ctrl_info->pqi_registers; 2400 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 2401 2402 while (1) { 2403 signature = readq(&pqi_registers->signature); 2404 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2405 sizeof(signature)) == 0) 2406 break; 2407 if (time_after(jiffies, timeout)) { 2408 dev_err(&ctrl_info->pci_dev->dev, 2409 "timed out waiting for PQI signature\n"); 2410 return -ETIMEDOUT; 2411 } 2412 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2413 } 2414 2415 while (1) { 2416 status = readb(&pqi_registers->function_and_status_code); 2417 if (status == PQI_STATUS_IDLE) 2418 break; 2419 if (time_after(jiffies, timeout)) { 2420 dev_err(&ctrl_info->pci_dev->dev, 2421 "timed out waiting for PQI IDLE\n"); 2422 return -ETIMEDOUT; 2423 } 2424 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2425 } 2426 2427 while (1) { 2428 if (readl(&pqi_registers->device_status) == 2429 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2430 break; 2431 if (time_after(jiffies, timeout)) { 2432 dev_err(&ctrl_info->pci_dev->dev, 2433 "timed out waiting for PQI all registers ready\n"); 2434 return -ETIMEDOUT; 2435 } 2436 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2437 } 2438 2439 return 0; 2440 } 2441 2442 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2443 { 2444 struct pqi_scsi_dev *device; 2445 2446 device = io_request->scmd->device->hostdata; 2447 device->raid_bypass_enabled = false; 2448 device->aio_enabled = false; 2449 } 2450 2451 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2452 { 2453 struct pqi_ctrl_info *ctrl_info; 2454 struct pqi_scsi_dev *device; 2455 2456 device = sdev->hostdata; 2457 if (device->device_offline) 2458 return; 2459 2460 device->device_offline = true; 2461 scsi_device_set_state(sdev, SDEV_OFFLINE); 2462 ctrl_info = shost_to_hba(sdev->host); 2463 pqi_schedule_rescan_worker(ctrl_info); 2464 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", 2465 path, ctrl_info->scsi_host->host_no, device->bus, 2466 device->target, device->lun); 2467 } 2468 2469 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2470 { 2471 u8 scsi_status; 2472 u8 host_byte; 2473 struct scsi_cmnd *scmd; 2474 struct pqi_raid_error_info *error_info; 2475 size_t sense_data_length; 2476 int residual_count; 2477 int xfer_count; 2478 struct scsi_sense_hdr sshdr; 2479 2480 scmd = io_request->scmd; 2481 if (!scmd) 2482 return; 2483 2484 error_info = io_request->error_info; 2485 scsi_status = error_info->status; 2486 host_byte = DID_OK; 2487 2488 switch (error_info->data_out_result) { 2489 case PQI_DATA_IN_OUT_GOOD: 2490 break; 2491 case PQI_DATA_IN_OUT_UNDERFLOW: 2492 xfer_count = 2493 get_unaligned_le32(&error_info->data_out_transferred); 2494 residual_count = scsi_bufflen(scmd) - xfer_count; 2495 scsi_set_resid(scmd, residual_count); 2496 if (xfer_count < scmd->underflow) 2497 host_byte = DID_SOFT_ERROR; 2498 break; 2499 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2500 case PQI_DATA_IN_OUT_ABORTED: 2501 host_byte = DID_ABORT; 2502 break; 2503 case PQI_DATA_IN_OUT_TIMEOUT: 2504 host_byte = DID_TIME_OUT; 2505 break; 2506 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2507 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2508 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2509 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2510 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2511 case PQI_DATA_IN_OUT_ERROR: 2512 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2513 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2514 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2515 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2516 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2517 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2518 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2519 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2520 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2521 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2522 default: 2523 host_byte = DID_ERROR; 2524 break; 2525 } 2526 2527 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2528 if (sense_data_length == 0) 2529 sense_data_length = 2530 get_unaligned_le16(&error_info->response_data_length); 2531 if (sense_data_length) { 2532 if (sense_data_length > sizeof(error_info->data)) 2533 sense_data_length = sizeof(error_info->data); 2534 2535 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2536 scsi_normalize_sense(error_info->data, 2537 sense_data_length, &sshdr) && 2538 sshdr.sense_key == HARDWARE_ERROR && 2539 sshdr.asc == 0x3e && 2540 sshdr.ascq == 0x1) { 2541 pqi_take_device_offline(scmd->device, "RAID"); 2542 host_byte = DID_NO_CONNECT; 2543 } 2544 2545 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2546 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2547 memcpy(scmd->sense_buffer, error_info->data, 2548 sense_data_length); 2549 } 2550 2551 scmd->result = scsi_status; 2552 set_host_byte(scmd, host_byte); 2553 } 2554 2555 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2556 { 2557 u8 scsi_status; 2558 u8 host_byte; 2559 struct scsi_cmnd *scmd; 2560 struct pqi_aio_error_info *error_info; 2561 size_t sense_data_length; 2562 int residual_count; 2563 int xfer_count; 2564 bool device_offline; 2565 2566 scmd = io_request->scmd; 2567 error_info = io_request->error_info; 2568 host_byte = DID_OK; 2569 sense_data_length = 0; 2570 device_offline = false; 2571 2572 switch (error_info->service_response) { 2573 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2574 scsi_status = error_info->status; 2575 break; 2576 case PQI_AIO_SERV_RESPONSE_FAILURE: 2577 switch (error_info->status) { 2578 case PQI_AIO_STATUS_IO_ABORTED: 2579 scsi_status = SAM_STAT_TASK_ABORTED; 2580 break; 2581 case PQI_AIO_STATUS_UNDERRUN: 2582 scsi_status = SAM_STAT_GOOD; 2583 residual_count = get_unaligned_le32( 2584 &error_info->residual_count); 2585 scsi_set_resid(scmd, residual_count); 2586 xfer_count = scsi_bufflen(scmd) - residual_count; 2587 if (xfer_count < scmd->underflow) 2588 host_byte = DID_SOFT_ERROR; 2589 break; 2590 case PQI_AIO_STATUS_OVERRUN: 2591 scsi_status = SAM_STAT_GOOD; 2592 break; 2593 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2594 pqi_aio_path_disabled(io_request); 2595 scsi_status = SAM_STAT_GOOD; 2596 io_request->status = -EAGAIN; 2597 break; 2598 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2599 case PQI_AIO_STATUS_INVALID_DEVICE: 2600 if (!io_request->raid_bypass) { 2601 device_offline = true; 2602 pqi_take_device_offline(scmd->device, "AIO"); 2603 host_byte = DID_NO_CONNECT; 2604 } 2605 scsi_status = SAM_STAT_CHECK_CONDITION; 2606 break; 2607 case PQI_AIO_STATUS_IO_ERROR: 2608 default: 2609 scsi_status = SAM_STAT_CHECK_CONDITION; 2610 break; 2611 } 2612 break; 2613 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2614 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2615 scsi_status = SAM_STAT_GOOD; 2616 break; 2617 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2618 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2619 default: 2620 scsi_status = SAM_STAT_CHECK_CONDITION; 2621 break; 2622 } 2623 2624 if (error_info->data_present) { 2625 sense_data_length = 2626 get_unaligned_le16(&error_info->data_length); 2627 if (sense_data_length) { 2628 if (sense_data_length > sizeof(error_info->data)) 2629 sense_data_length = sizeof(error_info->data); 2630 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2631 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2632 memcpy(scmd->sense_buffer, error_info->data, 2633 sense_data_length); 2634 } 2635 } 2636 2637 if (device_offline && sense_data_length == 0) 2638 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2639 0x3e, 0x1); 2640 2641 scmd->result = scsi_status; 2642 set_host_byte(scmd, host_byte); 2643 } 2644 2645 static void pqi_process_io_error(unsigned int iu_type, 2646 struct pqi_io_request *io_request) 2647 { 2648 switch (iu_type) { 2649 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2650 pqi_process_raid_io_error(io_request); 2651 break; 2652 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2653 pqi_process_aio_io_error(io_request); 2654 break; 2655 } 2656 } 2657 2658 static int pqi_interpret_task_management_response( 2659 struct pqi_task_management_response *response) 2660 { 2661 int rc; 2662 2663 switch (response->response_code) { 2664 case SOP_TMF_COMPLETE: 2665 case SOP_TMF_FUNCTION_SUCCEEDED: 2666 rc = 0; 2667 break; 2668 default: 2669 rc = -EIO; 2670 break; 2671 } 2672 2673 return rc; 2674 } 2675 2676 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, 2677 struct pqi_queue_group *queue_group) 2678 { 2679 unsigned int num_responses; 2680 pqi_index_t oq_pi; 2681 pqi_index_t oq_ci; 2682 struct pqi_io_request *io_request; 2683 struct pqi_io_response *response; 2684 u16 request_id; 2685 2686 num_responses = 0; 2687 oq_ci = queue_group->oq_ci_copy; 2688 2689 while (1) { 2690 oq_pi = readl(queue_group->oq_pi); 2691 if (oq_pi == oq_ci) 2692 break; 2693 2694 num_responses++; 2695 response = queue_group->oq_element_array + 2696 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2697 2698 request_id = get_unaligned_le16(&response->request_id); 2699 WARN_ON(request_id >= ctrl_info->max_io_slots); 2700 2701 io_request = &ctrl_info->io_request_pool[request_id]; 2702 WARN_ON(atomic_read(&io_request->refcount) == 0); 2703 2704 switch (response->header.iu_type) { 2705 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2706 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2707 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2708 break; 2709 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2710 io_request->status = 2711 pqi_interpret_task_management_response( 2712 (void *)response); 2713 break; 2714 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2715 pqi_aio_path_disabled(io_request); 2716 io_request->status = -EAGAIN; 2717 break; 2718 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2719 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2720 io_request->error_info = ctrl_info->error_buffer + 2721 (get_unaligned_le16(&response->error_index) * 2722 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2723 pqi_process_io_error(response->header.iu_type, 2724 io_request); 2725 break; 2726 default: 2727 dev_err(&ctrl_info->pci_dev->dev, 2728 "unexpected IU type: 0x%x\n", 2729 response->header.iu_type); 2730 break; 2731 } 2732 2733 io_request->io_complete_callback(io_request, 2734 io_request->context); 2735 2736 /* 2737 * Note that the I/O request structure CANNOT BE TOUCHED after 2738 * returning from the I/O completion callback! 2739 */ 2740 2741 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2742 } 2743 2744 if (num_responses) { 2745 queue_group->oq_ci_copy = oq_ci; 2746 writel(oq_ci, queue_group->oq_ci); 2747 } 2748 2749 return num_responses; 2750 } 2751 2752 static inline unsigned int pqi_num_elements_free(unsigned int pi, 2753 unsigned int ci, unsigned int elements_in_queue) 2754 { 2755 unsigned int num_elements_used; 2756 2757 if (pi >= ci) 2758 num_elements_used = pi - ci; 2759 else 2760 num_elements_used = elements_in_queue - ci + pi; 2761 2762 return elements_in_queue - num_elements_used - 1; 2763 } 2764 2765 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 2766 struct pqi_event_acknowledge_request *iu, size_t iu_length) 2767 { 2768 pqi_index_t iq_pi; 2769 pqi_index_t iq_ci; 2770 unsigned long flags; 2771 void *next_element; 2772 struct pqi_queue_group *queue_group; 2773 2774 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 2775 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 2776 2777 while (1) { 2778 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 2779 2780 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 2781 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 2782 2783 if (pqi_num_elements_free(iq_pi, iq_ci, 2784 ctrl_info->num_elements_per_iq)) 2785 break; 2786 2787 spin_unlock_irqrestore( 2788 &queue_group->submit_lock[RAID_PATH], flags); 2789 2790 if (pqi_ctrl_offline(ctrl_info)) 2791 return; 2792 } 2793 2794 next_element = queue_group->iq_element_array[RAID_PATH] + 2795 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 2796 2797 memcpy(next_element, iu, iu_length); 2798 2799 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 2800 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 2801 2802 /* 2803 * This write notifies the controller that an IU is available to be 2804 * processed. 2805 */ 2806 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 2807 2808 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 2809 } 2810 2811 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 2812 struct pqi_event *event) 2813 { 2814 struct pqi_event_acknowledge_request request; 2815 2816 memset(&request, 0, sizeof(request)); 2817 2818 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 2819 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 2820 &request.header.iu_length); 2821 request.event_type = event->event_type; 2822 request.event_id = event->event_id; 2823 request.additional_event_id = event->additional_event_id; 2824 2825 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 2826 } 2827 2828 static void pqi_event_worker(struct work_struct *work) 2829 { 2830 unsigned int i; 2831 struct pqi_ctrl_info *ctrl_info; 2832 struct pqi_event *event; 2833 2834 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 2835 2836 pqi_ctrl_busy(ctrl_info); 2837 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 2838 if (pqi_ctrl_offline(ctrl_info)) 2839 goto out; 2840 2841 pqi_schedule_rescan_worker_delayed(ctrl_info); 2842 2843 event = ctrl_info->events; 2844 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 2845 if (event->pending) { 2846 event->pending = false; 2847 pqi_acknowledge_event(ctrl_info, event); 2848 } 2849 event++; 2850 } 2851 2852 out: 2853 pqi_ctrl_unbusy(ctrl_info); 2854 } 2855 2856 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 2857 2858 static void pqi_heartbeat_timer_handler(struct timer_list *t) 2859 { 2860 int num_interrupts; 2861 u32 heartbeat_count; 2862 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 2863 heartbeat_timer); 2864 2865 pqi_check_ctrl_health(ctrl_info); 2866 if (pqi_ctrl_offline(ctrl_info)) 2867 return; 2868 2869 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 2870 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 2871 2872 if (num_interrupts == ctrl_info->previous_num_interrupts) { 2873 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 2874 dev_err(&ctrl_info->pci_dev->dev, 2875 "no heartbeat detected - last heartbeat count: %u\n", 2876 heartbeat_count); 2877 pqi_take_ctrl_offline(ctrl_info); 2878 return; 2879 } 2880 } else { 2881 ctrl_info->previous_num_interrupts = num_interrupts; 2882 } 2883 2884 ctrl_info->previous_heartbeat_count = heartbeat_count; 2885 mod_timer(&ctrl_info->heartbeat_timer, 2886 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 2887 } 2888 2889 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2890 { 2891 if (!ctrl_info->heartbeat_counter) 2892 return; 2893 2894 ctrl_info->previous_num_interrupts = 2895 atomic_read(&ctrl_info->num_interrupts); 2896 ctrl_info->previous_heartbeat_count = 2897 pqi_read_heartbeat_counter(ctrl_info); 2898 2899 ctrl_info->heartbeat_timer.expires = 2900 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 2901 add_timer(&ctrl_info->heartbeat_timer); 2902 } 2903 2904 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2905 { 2906 del_timer_sync(&ctrl_info->heartbeat_timer); 2907 } 2908 2909 static inline int pqi_event_type_to_event_index(unsigned int event_type) 2910 { 2911 int index; 2912 2913 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 2914 if (event_type == pqi_supported_event_types[index]) 2915 return index; 2916 2917 return -1; 2918 } 2919 2920 static inline bool pqi_is_supported_event(unsigned int event_type) 2921 { 2922 return pqi_event_type_to_event_index(event_type) != -1; 2923 } 2924 2925 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 2926 { 2927 unsigned int num_events; 2928 pqi_index_t oq_pi; 2929 pqi_index_t oq_ci; 2930 struct pqi_event_queue *event_queue; 2931 struct pqi_event_response *response; 2932 struct pqi_event *event; 2933 int event_index; 2934 2935 event_queue = &ctrl_info->event_queue; 2936 num_events = 0; 2937 oq_ci = event_queue->oq_ci_copy; 2938 2939 while (1) { 2940 oq_pi = readl(event_queue->oq_pi); 2941 if (oq_pi == oq_ci) 2942 break; 2943 2944 num_events++; 2945 response = event_queue->oq_element_array + 2946 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 2947 2948 event_index = 2949 pqi_event_type_to_event_index(response->event_type); 2950 2951 if (event_index >= 0) { 2952 if (response->request_acknowlege) { 2953 event = &ctrl_info->events[event_index]; 2954 event->pending = true; 2955 event->event_type = response->event_type; 2956 event->event_id = response->event_id; 2957 event->additional_event_id = 2958 response->additional_event_id; 2959 } 2960 } 2961 2962 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 2963 } 2964 2965 if (num_events) { 2966 event_queue->oq_ci_copy = oq_ci; 2967 writel(oq_ci, event_queue->oq_ci); 2968 schedule_work(&ctrl_info->event_work); 2969 } 2970 2971 return num_events; 2972 } 2973 2974 #define PQI_LEGACY_INTX_MASK 0x1 2975 2976 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 2977 bool enable_intx) 2978 { 2979 u32 intx_mask; 2980 struct pqi_device_registers __iomem *pqi_registers; 2981 volatile void __iomem *register_addr; 2982 2983 pqi_registers = ctrl_info->pqi_registers; 2984 2985 if (enable_intx) 2986 register_addr = &pqi_registers->legacy_intx_mask_clear; 2987 else 2988 register_addr = &pqi_registers->legacy_intx_mask_set; 2989 2990 intx_mask = readl(register_addr); 2991 intx_mask |= PQI_LEGACY_INTX_MASK; 2992 writel(intx_mask, register_addr); 2993 } 2994 2995 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 2996 enum pqi_irq_mode new_mode) 2997 { 2998 switch (ctrl_info->irq_mode) { 2999 case IRQ_MODE_MSIX: 3000 switch (new_mode) { 3001 case IRQ_MODE_MSIX: 3002 break; 3003 case IRQ_MODE_INTX: 3004 pqi_configure_legacy_intx(ctrl_info, true); 3005 sis_enable_intx(ctrl_info); 3006 break; 3007 case IRQ_MODE_NONE: 3008 break; 3009 } 3010 break; 3011 case IRQ_MODE_INTX: 3012 switch (new_mode) { 3013 case IRQ_MODE_MSIX: 3014 pqi_configure_legacy_intx(ctrl_info, false); 3015 sis_enable_msix(ctrl_info); 3016 break; 3017 case IRQ_MODE_INTX: 3018 break; 3019 case IRQ_MODE_NONE: 3020 pqi_configure_legacy_intx(ctrl_info, false); 3021 break; 3022 } 3023 break; 3024 case IRQ_MODE_NONE: 3025 switch (new_mode) { 3026 case IRQ_MODE_MSIX: 3027 sis_enable_msix(ctrl_info); 3028 break; 3029 case IRQ_MODE_INTX: 3030 pqi_configure_legacy_intx(ctrl_info, true); 3031 sis_enable_intx(ctrl_info); 3032 break; 3033 case IRQ_MODE_NONE: 3034 break; 3035 } 3036 break; 3037 } 3038 3039 ctrl_info->irq_mode = new_mode; 3040 } 3041 3042 #define PQI_LEGACY_INTX_PENDING 0x1 3043 3044 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3045 { 3046 bool valid_irq; 3047 u32 intx_status; 3048 3049 switch (ctrl_info->irq_mode) { 3050 case IRQ_MODE_MSIX: 3051 valid_irq = true; 3052 break; 3053 case IRQ_MODE_INTX: 3054 intx_status = 3055 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3056 if (intx_status & PQI_LEGACY_INTX_PENDING) 3057 valid_irq = true; 3058 else 3059 valid_irq = false; 3060 break; 3061 case IRQ_MODE_NONE: 3062 default: 3063 valid_irq = false; 3064 break; 3065 } 3066 3067 return valid_irq; 3068 } 3069 3070 static irqreturn_t pqi_irq_handler(int irq, void *data) 3071 { 3072 struct pqi_ctrl_info *ctrl_info; 3073 struct pqi_queue_group *queue_group; 3074 unsigned int num_responses_handled; 3075 3076 queue_group = data; 3077 ctrl_info = queue_group->ctrl_info; 3078 3079 if (!pqi_is_valid_irq(ctrl_info)) 3080 return IRQ_NONE; 3081 3082 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3083 3084 if (irq == ctrl_info->event_irq) 3085 num_responses_handled += pqi_process_event_intr(ctrl_info); 3086 3087 if (num_responses_handled) 3088 atomic_inc(&ctrl_info->num_interrupts); 3089 3090 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3091 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3092 3093 return IRQ_HANDLED; 3094 } 3095 3096 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3097 { 3098 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3099 int i; 3100 int rc; 3101 3102 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3103 3104 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3105 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3106 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3107 if (rc) { 3108 dev_err(&pci_dev->dev, 3109 "irq %u init failed with error %d\n", 3110 pci_irq_vector(pci_dev, i), rc); 3111 return rc; 3112 } 3113 ctrl_info->num_msix_vectors_initialized++; 3114 } 3115 3116 return 0; 3117 } 3118 3119 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3120 { 3121 int i; 3122 3123 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3124 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3125 &ctrl_info->queue_groups[i]); 3126 3127 ctrl_info->num_msix_vectors_initialized = 0; 3128 } 3129 3130 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3131 { 3132 int num_vectors_enabled; 3133 3134 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3135 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3136 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3137 if (num_vectors_enabled < 0) { 3138 dev_err(&ctrl_info->pci_dev->dev, 3139 "MSI-X init failed with error %d\n", 3140 num_vectors_enabled); 3141 return num_vectors_enabled; 3142 } 3143 3144 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3145 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3146 return 0; 3147 } 3148 3149 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3150 { 3151 if (ctrl_info->num_msix_vectors_enabled) { 3152 pci_free_irq_vectors(ctrl_info->pci_dev); 3153 ctrl_info->num_msix_vectors_enabled = 0; 3154 } 3155 } 3156 3157 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3158 { 3159 unsigned int i; 3160 size_t alloc_length; 3161 size_t element_array_length_per_iq; 3162 size_t element_array_length_per_oq; 3163 void *element_array; 3164 void __iomem *next_queue_index; 3165 void *aligned_pointer; 3166 unsigned int num_inbound_queues; 3167 unsigned int num_outbound_queues; 3168 unsigned int num_queue_indexes; 3169 struct pqi_queue_group *queue_group; 3170 3171 element_array_length_per_iq = 3172 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3173 ctrl_info->num_elements_per_iq; 3174 element_array_length_per_oq = 3175 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3176 ctrl_info->num_elements_per_oq; 3177 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3178 num_outbound_queues = ctrl_info->num_queue_groups; 3179 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3180 3181 aligned_pointer = NULL; 3182 3183 for (i = 0; i < num_inbound_queues; i++) { 3184 aligned_pointer = PTR_ALIGN(aligned_pointer, 3185 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3186 aligned_pointer += element_array_length_per_iq; 3187 } 3188 3189 for (i = 0; i < num_outbound_queues; i++) { 3190 aligned_pointer = PTR_ALIGN(aligned_pointer, 3191 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3192 aligned_pointer += element_array_length_per_oq; 3193 } 3194 3195 aligned_pointer = PTR_ALIGN(aligned_pointer, 3196 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3197 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3198 PQI_EVENT_OQ_ELEMENT_LENGTH; 3199 3200 for (i = 0; i < num_queue_indexes; i++) { 3201 aligned_pointer = PTR_ALIGN(aligned_pointer, 3202 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3203 aligned_pointer += sizeof(pqi_index_t); 3204 } 3205 3206 alloc_length = (size_t)aligned_pointer + 3207 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3208 3209 alloc_length += PQI_EXTRA_SGL_MEMORY; 3210 3211 ctrl_info->queue_memory_base = 3212 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3213 alloc_length, 3214 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3215 3216 if (!ctrl_info->queue_memory_base) 3217 return -ENOMEM; 3218 3219 ctrl_info->queue_memory_length = alloc_length; 3220 3221 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3222 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3223 3224 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3225 queue_group = &ctrl_info->queue_groups[i]; 3226 queue_group->iq_element_array[RAID_PATH] = element_array; 3227 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3228 ctrl_info->queue_memory_base_dma_handle + 3229 (element_array - ctrl_info->queue_memory_base); 3230 element_array += element_array_length_per_iq; 3231 element_array = PTR_ALIGN(element_array, 3232 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3233 queue_group->iq_element_array[AIO_PATH] = element_array; 3234 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3235 ctrl_info->queue_memory_base_dma_handle + 3236 (element_array - ctrl_info->queue_memory_base); 3237 element_array += element_array_length_per_iq; 3238 element_array = PTR_ALIGN(element_array, 3239 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3240 } 3241 3242 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3243 queue_group = &ctrl_info->queue_groups[i]; 3244 queue_group->oq_element_array = element_array; 3245 queue_group->oq_element_array_bus_addr = 3246 ctrl_info->queue_memory_base_dma_handle + 3247 (element_array - ctrl_info->queue_memory_base); 3248 element_array += element_array_length_per_oq; 3249 element_array = PTR_ALIGN(element_array, 3250 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3251 } 3252 3253 ctrl_info->event_queue.oq_element_array = element_array; 3254 ctrl_info->event_queue.oq_element_array_bus_addr = 3255 ctrl_info->queue_memory_base_dma_handle + 3256 (element_array - ctrl_info->queue_memory_base); 3257 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3258 PQI_EVENT_OQ_ELEMENT_LENGTH; 3259 3260 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 3261 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3262 3263 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3264 queue_group = &ctrl_info->queue_groups[i]; 3265 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3266 queue_group->iq_ci_bus_addr[RAID_PATH] = 3267 ctrl_info->queue_memory_base_dma_handle + 3268 (next_queue_index - 3269 (void __iomem *)ctrl_info->queue_memory_base); 3270 next_queue_index += sizeof(pqi_index_t); 3271 next_queue_index = PTR_ALIGN(next_queue_index, 3272 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3273 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3274 queue_group->iq_ci_bus_addr[AIO_PATH] = 3275 ctrl_info->queue_memory_base_dma_handle + 3276 (next_queue_index - 3277 (void __iomem *)ctrl_info->queue_memory_base); 3278 next_queue_index += sizeof(pqi_index_t); 3279 next_queue_index = PTR_ALIGN(next_queue_index, 3280 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3281 queue_group->oq_pi = next_queue_index; 3282 queue_group->oq_pi_bus_addr = 3283 ctrl_info->queue_memory_base_dma_handle + 3284 (next_queue_index - 3285 (void __iomem *)ctrl_info->queue_memory_base); 3286 next_queue_index += sizeof(pqi_index_t); 3287 next_queue_index = PTR_ALIGN(next_queue_index, 3288 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3289 } 3290 3291 ctrl_info->event_queue.oq_pi = next_queue_index; 3292 ctrl_info->event_queue.oq_pi_bus_addr = 3293 ctrl_info->queue_memory_base_dma_handle + 3294 (next_queue_index - 3295 (void __iomem *)ctrl_info->queue_memory_base); 3296 3297 return 0; 3298 } 3299 3300 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3301 { 3302 unsigned int i; 3303 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3304 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3305 3306 /* 3307 * Initialize the backpointers to the controller structure in 3308 * each operational queue group structure. 3309 */ 3310 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3311 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3312 3313 /* 3314 * Assign IDs to all operational queues. Note that the IDs 3315 * assigned to operational IQs are independent of the IDs 3316 * assigned to operational OQs. 3317 */ 3318 ctrl_info->event_queue.oq_id = next_oq_id++; 3319 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3320 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3321 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3322 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3323 } 3324 3325 /* 3326 * Assign MSI-X table entry indexes to all queues. Note that the 3327 * interrupt for the event queue is shared with the first queue group. 3328 */ 3329 ctrl_info->event_queue.int_msg_num = 0; 3330 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3331 ctrl_info->queue_groups[i].int_msg_num = i; 3332 3333 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3334 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3335 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3336 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3337 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3338 } 3339 } 3340 3341 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3342 { 3343 size_t alloc_length; 3344 struct pqi_admin_queues_aligned *admin_queues_aligned; 3345 struct pqi_admin_queues *admin_queues; 3346 3347 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3348 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3349 3350 ctrl_info->admin_queue_memory_base = 3351 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3352 alloc_length, 3353 &ctrl_info->admin_queue_memory_base_dma_handle, 3354 GFP_KERNEL); 3355 3356 if (!ctrl_info->admin_queue_memory_base) 3357 return -ENOMEM; 3358 3359 ctrl_info->admin_queue_memory_length = alloc_length; 3360 3361 admin_queues = &ctrl_info->admin_queues; 3362 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3363 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3364 admin_queues->iq_element_array = 3365 &admin_queues_aligned->iq_element_array; 3366 admin_queues->oq_element_array = 3367 &admin_queues_aligned->oq_element_array; 3368 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3369 admin_queues->oq_pi = 3370 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 3371 3372 admin_queues->iq_element_array_bus_addr = 3373 ctrl_info->admin_queue_memory_base_dma_handle + 3374 (admin_queues->iq_element_array - 3375 ctrl_info->admin_queue_memory_base); 3376 admin_queues->oq_element_array_bus_addr = 3377 ctrl_info->admin_queue_memory_base_dma_handle + 3378 (admin_queues->oq_element_array - 3379 ctrl_info->admin_queue_memory_base); 3380 admin_queues->iq_ci_bus_addr = 3381 ctrl_info->admin_queue_memory_base_dma_handle + 3382 ((void *)admin_queues->iq_ci - 3383 ctrl_info->admin_queue_memory_base); 3384 admin_queues->oq_pi_bus_addr = 3385 ctrl_info->admin_queue_memory_base_dma_handle + 3386 ((void __iomem *)admin_queues->oq_pi - 3387 (void __iomem *)ctrl_info->admin_queue_memory_base); 3388 3389 return 0; 3390 } 3391 3392 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 3393 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3394 3395 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3396 { 3397 struct pqi_device_registers __iomem *pqi_registers; 3398 struct pqi_admin_queues *admin_queues; 3399 unsigned long timeout; 3400 u8 status; 3401 u32 reg; 3402 3403 pqi_registers = ctrl_info->pqi_registers; 3404 admin_queues = &ctrl_info->admin_queues; 3405 3406 writeq((u64)admin_queues->iq_element_array_bus_addr, 3407 &pqi_registers->admin_iq_element_array_addr); 3408 writeq((u64)admin_queues->oq_element_array_bus_addr, 3409 &pqi_registers->admin_oq_element_array_addr); 3410 writeq((u64)admin_queues->iq_ci_bus_addr, 3411 &pqi_registers->admin_iq_ci_addr); 3412 writeq((u64)admin_queues->oq_pi_bus_addr, 3413 &pqi_registers->admin_oq_pi_addr); 3414 3415 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3416 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | 3417 (admin_queues->int_msg_num << 16); 3418 writel(reg, &pqi_registers->admin_iq_num_elements); 3419 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3420 &pqi_registers->function_and_status_code); 3421 3422 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3423 while (1) { 3424 status = readb(&pqi_registers->function_and_status_code); 3425 if (status == PQI_STATUS_IDLE) 3426 break; 3427 if (time_after(jiffies, timeout)) 3428 return -ETIMEDOUT; 3429 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3430 } 3431 3432 /* 3433 * The offset registers are not initialized to the correct 3434 * offsets until *after* the create admin queue pair command 3435 * completes successfully. 3436 */ 3437 admin_queues->iq_pi = ctrl_info->iomem_base + 3438 PQI_DEVICE_REGISTERS_OFFSET + 3439 readq(&pqi_registers->admin_iq_pi_offset); 3440 admin_queues->oq_ci = ctrl_info->iomem_base + 3441 PQI_DEVICE_REGISTERS_OFFSET + 3442 readq(&pqi_registers->admin_oq_ci_offset); 3443 3444 return 0; 3445 } 3446 3447 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3448 struct pqi_general_admin_request *request) 3449 { 3450 struct pqi_admin_queues *admin_queues; 3451 void *next_element; 3452 pqi_index_t iq_pi; 3453 3454 admin_queues = &ctrl_info->admin_queues; 3455 iq_pi = admin_queues->iq_pi_copy; 3456 3457 next_element = admin_queues->iq_element_array + 3458 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3459 3460 memcpy(next_element, request, sizeof(*request)); 3461 3462 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3463 admin_queues->iq_pi_copy = iq_pi; 3464 3465 /* 3466 * This write notifies the controller that an IU is available to be 3467 * processed. 3468 */ 3469 writel(iq_pi, admin_queues->iq_pi); 3470 } 3471 3472 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3473 3474 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3475 struct pqi_general_admin_response *response) 3476 { 3477 struct pqi_admin_queues *admin_queues; 3478 pqi_index_t oq_pi; 3479 pqi_index_t oq_ci; 3480 unsigned long timeout; 3481 3482 admin_queues = &ctrl_info->admin_queues; 3483 oq_ci = admin_queues->oq_ci_copy; 3484 3485 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 3486 3487 while (1) { 3488 oq_pi = readl(admin_queues->oq_pi); 3489 if (oq_pi != oq_ci) 3490 break; 3491 if (time_after(jiffies, timeout)) { 3492 dev_err(&ctrl_info->pci_dev->dev, 3493 "timed out waiting for admin response\n"); 3494 return -ETIMEDOUT; 3495 } 3496 if (!sis_is_firmware_running(ctrl_info)) 3497 return -ENXIO; 3498 usleep_range(1000, 2000); 3499 } 3500 3501 memcpy(response, admin_queues->oq_element_array + 3502 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3503 3504 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3505 admin_queues->oq_ci_copy = oq_ci; 3506 writel(oq_ci, admin_queues->oq_ci); 3507 3508 return 0; 3509 } 3510 3511 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3512 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3513 struct pqi_io_request *io_request) 3514 { 3515 struct pqi_io_request *next; 3516 void *next_element; 3517 pqi_index_t iq_pi; 3518 pqi_index_t iq_ci; 3519 size_t iu_length; 3520 unsigned long flags; 3521 unsigned int num_elements_needed; 3522 unsigned int num_elements_to_end_of_queue; 3523 size_t copy_count; 3524 struct pqi_iu_header *request; 3525 3526 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3527 3528 if (io_request) { 3529 io_request->queue_group = queue_group; 3530 list_add_tail(&io_request->request_list_entry, 3531 &queue_group->request_list[path]); 3532 } 3533 3534 iq_pi = queue_group->iq_pi_copy[path]; 3535 3536 list_for_each_entry_safe(io_request, next, 3537 &queue_group->request_list[path], request_list_entry) { 3538 3539 request = io_request->iu; 3540 3541 iu_length = get_unaligned_le16(&request->iu_length) + 3542 PQI_REQUEST_HEADER_LENGTH; 3543 num_elements_needed = 3544 DIV_ROUND_UP(iu_length, 3545 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3546 3547 iq_ci = readl(queue_group->iq_ci[path]); 3548 3549 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3550 ctrl_info->num_elements_per_iq)) 3551 break; 3552 3553 put_unaligned_le16(queue_group->oq_id, 3554 &request->response_queue_id); 3555 3556 next_element = queue_group->iq_element_array[path] + 3557 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3558 3559 num_elements_to_end_of_queue = 3560 ctrl_info->num_elements_per_iq - iq_pi; 3561 3562 if (num_elements_needed <= num_elements_to_end_of_queue) { 3563 memcpy(next_element, request, iu_length); 3564 } else { 3565 copy_count = num_elements_to_end_of_queue * 3566 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3567 memcpy(next_element, request, copy_count); 3568 memcpy(queue_group->iq_element_array[path], 3569 (u8 *)request + copy_count, 3570 iu_length - copy_count); 3571 } 3572 3573 iq_pi = (iq_pi + num_elements_needed) % 3574 ctrl_info->num_elements_per_iq; 3575 3576 list_del(&io_request->request_list_entry); 3577 } 3578 3579 if (iq_pi != queue_group->iq_pi_copy[path]) { 3580 queue_group->iq_pi_copy[path] = iq_pi; 3581 /* 3582 * This write notifies the controller that one or more IUs are 3583 * available to be processed. 3584 */ 3585 writel(iq_pi, queue_group->iq_pi[path]); 3586 } 3587 3588 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3589 } 3590 3591 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3592 3593 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3594 struct completion *wait) 3595 { 3596 int rc; 3597 3598 while (1) { 3599 if (wait_for_completion_io_timeout(wait, 3600 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 3601 rc = 0; 3602 break; 3603 } 3604 3605 pqi_check_ctrl_health(ctrl_info); 3606 if (pqi_ctrl_offline(ctrl_info)) { 3607 rc = -ENXIO; 3608 break; 3609 } 3610 } 3611 3612 return rc; 3613 } 3614 3615 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3616 void *context) 3617 { 3618 struct completion *waiting = context; 3619 3620 complete(waiting); 3621 } 3622 3623 static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info 3624 *error_info) 3625 { 3626 int rc = -EIO; 3627 3628 switch (error_info->data_out_result) { 3629 case PQI_DATA_IN_OUT_GOOD: 3630 if (error_info->status == SAM_STAT_GOOD) 3631 rc = 0; 3632 break; 3633 case PQI_DATA_IN_OUT_UNDERFLOW: 3634 if (error_info->status == SAM_STAT_GOOD || 3635 error_info->status == SAM_STAT_CHECK_CONDITION) 3636 rc = 0; 3637 break; 3638 case PQI_DATA_IN_OUT_ABORTED: 3639 rc = PQI_CMD_STATUS_ABORTED; 3640 break; 3641 } 3642 3643 return rc; 3644 } 3645 3646 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 3647 struct pqi_iu_header *request, unsigned int flags, 3648 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 3649 { 3650 int rc = 0; 3651 struct pqi_io_request *io_request; 3652 unsigned long start_jiffies; 3653 unsigned long msecs_blocked; 3654 size_t iu_length; 3655 DECLARE_COMPLETION_ONSTACK(wait); 3656 3657 /* 3658 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 3659 * are mutually exclusive. 3660 */ 3661 3662 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 3663 if (down_interruptible(&ctrl_info->sync_request_sem)) 3664 return -ERESTARTSYS; 3665 } else { 3666 if (timeout_msecs == NO_TIMEOUT) { 3667 down(&ctrl_info->sync_request_sem); 3668 } else { 3669 start_jiffies = jiffies; 3670 if (down_timeout(&ctrl_info->sync_request_sem, 3671 msecs_to_jiffies(timeout_msecs))) 3672 return -ETIMEDOUT; 3673 msecs_blocked = 3674 jiffies_to_msecs(jiffies - start_jiffies); 3675 if (msecs_blocked >= timeout_msecs) 3676 return -ETIMEDOUT; 3677 timeout_msecs -= msecs_blocked; 3678 } 3679 } 3680 3681 pqi_ctrl_busy(ctrl_info); 3682 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 3683 if (timeout_msecs == 0) { 3684 pqi_ctrl_unbusy(ctrl_info); 3685 rc = -ETIMEDOUT; 3686 goto out; 3687 } 3688 3689 if (pqi_ctrl_offline(ctrl_info)) { 3690 pqi_ctrl_unbusy(ctrl_info); 3691 rc = -ENXIO; 3692 goto out; 3693 } 3694 3695 io_request = pqi_alloc_io_request(ctrl_info); 3696 3697 put_unaligned_le16(io_request->index, 3698 &(((struct pqi_raid_path_request *)request)->request_id)); 3699 3700 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 3701 ((struct pqi_raid_path_request *)request)->error_index = 3702 ((struct pqi_raid_path_request *)request)->request_id; 3703 3704 iu_length = get_unaligned_le16(&request->iu_length) + 3705 PQI_REQUEST_HEADER_LENGTH; 3706 memcpy(io_request->iu, request, iu_length); 3707 3708 io_request->io_complete_callback = pqi_raid_synchronous_complete; 3709 io_request->context = &wait; 3710 3711 pqi_start_io(ctrl_info, 3712 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 3713 io_request); 3714 3715 pqi_ctrl_unbusy(ctrl_info); 3716 3717 if (timeout_msecs == NO_TIMEOUT) { 3718 pqi_wait_for_completion_io(ctrl_info, &wait); 3719 } else { 3720 if (!wait_for_completion_io_timeout(&wait, 3721 msecs_to_jiffies(timeout_msecs))) { 3722 dev_warn(&ctrl_info->pci_dev->dev, 3723 "command timed out\n"); 3724 rc = -ETIMEDOUT; 3725 } 3726 } 3727 3728 if (error_info) { 3729 if (io_request->error_info) 3730 memcpy(error_info, io_request->error_info, 3731 sizeof(*error_info)); 3732 else 3733 memset(error_info, 0, sizeof(*error_info)); 3734 } else if (rc == 0 && io_request->error_info) { 3735 rc = pqi_process_raid_io_error_synchronous( 3736 io_request->error_info); 3737 } 3738 3739 pqi_free_io_request(io_request); 3740 3741 out: 3742 up(&ctrl_info->sync_request_sem); 3743 3744 return rc; 3745 } 3746 3747 static int pqi_validate_admin_response( 3748 struct pqi_general_admin_response *response, u8 expected_function_code) 3749 { 3750 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 3751 return -EINVAL; 3752 3753 if (get_unaligned_le16(&response->header.iu_length) != 3754 PQI_GENERAL_ADMIN_IU_LENGTH) 3755 return -EINVAL; 3756 3757 if (response->function_code != expected_function_code) 3758 return -EINVAL; 3759 3760 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 3761 return -EINVAL; 3762 3763 return 0; 3764 } 3765 3766 static int pqi_submit_admin_request_synchronous( 3767 struct pqi_ctrl_info *ctrl_info, 3768 struct pqi_general_admin_request *request, 3769 struct pqi_general_admin_response *response) 3770 { 3771 int rc; 3772 3773 pqi_submit_admin_request(ctrl_info, request); 3774 3775 rc = pqi_poll_for_admin_response(ctrl_info, response); 3776 3777 if (rc == 0) 3778 rc = pqi_validate_admin_response(response, 3779 request->function_code); 3780 3781 return rc; 3782 } 3783 3784 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 3785 { 3786 int rc; 3787 struct pqi_general_admin_request request; 3788 struct pqi_general_admin_response response; 3789 struct pqi_device_capability *capability; 3790 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 3791 3792 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 3793 if (!capability) 3794 return -ENOMEM; 3795 3796 memset(&request, 0, sizeof(request)); 3797 3798 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3799 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3800 &request.header.iu_length); 3801 request.function_code = 3802 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 3803 put_unaligned_le32(sizeof(*capability), 3804 &request.data.report_device_capability.buffer_length); 3805 3806 rc = pqi_map_single(ctrl_info->pci_dev, 3807 &request.data.report_device_capability.sg_descriptor, 3808 capability, sizeof(*capability), 3809 DMA_FROM_DEVICE); 3810 if (rc) 3811 goto out; 3812 3813 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3814 &response); 3815 3816 pqi_pci_unmap(ctrl_info->pci_dev, 3817 &request.data.report_device_capability.sg_descriptor, 1, 3818 DMA_FROM_DEVICE); 3819 3820 if (rc) 3821 goto out; 3822 3823 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 3824 rc = -EIO; 3825 goto out; 3826 } 3827 3828 ctrl_info->max_inbound_queues = 3829 get_unaligned_le16(&capability->max_inbound_queues); 3830 ctrl_info->max_elements_per_iq = 3831 get_unaligned_le16(&capability->max_elements_per_iq); 3832 ctrl_info->max_iq_element_length = 3833 get_unaligned_le16(&capability->max_iq_element_length) 3834 * 16; 3835 ctrl_info->max_outbound_queues = 3836 get_unaligned_le16(&capability->max_outbound_queues); 3837 ctrl_info->max_elements_per_oq = 3838 get_unaligned_le16(&capability->max_elements_per_oq); 3839 ctrl_info->max_oq_element_length = 3840 get_unaligned_le16(&capability->max_oq_element_length) 3841 * 16; 3842 3843 sop_iu_layer_descriptor = 3844 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 3845 3846 ctrl_info->max_inbound_iu_length_per_firmware = 3847 get_unaligned_le16( 3848 &sop_iu_layer_descriptor->max_inbound_iu_length); 3849 ctrl_info->inbound_spanning_supported = 3850 sop_iu_layer_descriptor->inbound_spanning_supported; 3851 ctrl_info->outbound_spanning_supported = 3852 sop_iu_layer_descriptor->outbound_spanning_supported; 3853 3854 out: 3855 kfree(capability); 3856 3857 return rc; 3858 } 3859 3860 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 3861 { 3862 if (ctrl_info->max_iq_element_length < 3863 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3864 dev_err(&ctrl_info->pci_dev->dev, 3865 "max. inbound queue element length of %d is less than the required length of %d\n", 3866 ctrl_info->max_iq_element_length, 3867 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3868 return -EINVAL; 3869 } 3870 3871 if (ctrl_info->max_oq_element_length < 3872 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 3873 dev_err(&ctrl_info->pci_dev->dev, 3874 "max. outbound queue element length of %d is less than the required length of %d\n", 3875 ctrl_info->max_oq_element_length, 3876 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3877 return -EINVAL; 3878 } 3879 3880 if (ctrl_info->max_inbound_iu_length_per_firmware < 3881 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3882 dev_err(&ctrl_info->pci_dev->dev, 3883 "max. inbound IU length of %u is less than the min. required length of %d\n", 3884 ctrl_info->max_inbound_iu_length_per_firmware, 3885 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3886 return -EINVAL; 3887 } 3888 3889 if (!ctrl_info->inbound_spanning_supported) { 3890 dev_err(&ctrl_info->pci_dev->dev, 3891 "the controller does not support inbound spanning\n"); 3892 return -EINVAL; 3893 } 3894 3895 if (ctrl_info->outbound_spanning_supported) { 3896 dev_err(&ctrl_info->pci_dev->dev, 3897 "the controller supports outbound spanning but this driver does not\n"); 3898 return -EINVAL; 3899 } 3900 3901 return 0; 3902 } 3903 3904 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 3905 { 3906 int rc; 3907 struct pqi_event_queue *event_queue; 3908 struct pqi_general_admin_request request; 3909 struct pqi_general_admin_response response; 3910 3911 event_queue = &ctrl_info->event_queue; 3912 3913 /* 3914 * Create OQ (Outbound Queue - device to host queue) to dedicate 3915 * to events. 3916 */ 3917 memset(&request, 0, sizeof(request)); 3918 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3919 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3920 &request.header.iu_length); 3921 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 3922 put_unaligned_le16(event_queue->oq_id, 3923 &request.data.create_operational_oq.queue_id); 3924 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 3925 &request.data.create_operational_oq.element_array_addr); 3926 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 3927 &request.data.create_operational_oq.pi_addr); 3928 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 3929 &request.data.create_operational_oq.num_elements); 3930 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 3931 &request.data.create_operational_oq.element_length); 3932 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 3933 put_unaligned_le16(event_queue->int_msg_num, 3934 &request.data.create_operational_oq.int_msg_num); 3935 3936 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3937 &response); 3938 if (rc) 3939 return rc; 3940 3941 event_queue->oq_ci = ctrl_info->iomem_base + 3942 PQI_DEVICE_REGISTERS_OFFSET + 3943 get_unaligned_le64( 3944 &response.data.create_operational_oq.oq_ci_offset); 3945 3946 return 0; 3947 } 3948 3949 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 3950 unsigned int group_number) 3951 { 3952 int rc; 3953 struct pqi_queue_group *queue_group; 3954 struct pqi_general_admin_request request; 3955 struct pqi_general_admin_response response; 3956 3957 queue_group = &ctrl_info->queue_groups[group_number]; 3958 3959 /* 3960 * Create IQ (Inbound Queue - host to device queue) for 3961 * RAID path. 3962 */ 3963 memset(&request, 0, sizeof(request)); 3964 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3965 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3966 &request.header.iu_length); 3967 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 3968 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 3969 &request.data.create_operational_iq.queue_id); 3970 put_unaligned_le64( 3971 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 3972 &request.data.create_operational_iq.element_array_addr); 3973 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 3974 &request.data.create_operational_iq.ci_addr); 3975 put_unaligned_le16(ctrl_info->num_elements_per_iq, 3976 &request.data.create_operational_iq.num_elements); 3977 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 3978 &request.data.create_operational_iq.element_length); 3979 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 3980 3981 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3982 &response); 3983 if (rc) { 3984 dev_err(&ctrl_info->pci_dev->dev, 3985 "error creating inbound RAID queue\n"); 3986 return rc; 3987 } 3988 3989 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 3990 PQI_DEVICE_REGISTERS_OFFSET + 3991 get_unaligned_le64( 3992 &response.data.create_operational_iq.iq_pi_offset); 3993 3994 /* 3995 * Create IQ (Inbound Queue - host to device queue) for 3996 * Advanced I/O (AIO) path. 3997 */ 3998 memset(&request, 0, sizeof(request)); 3999 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4000 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4001 &request.header.iu_length); 4002 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4003 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4004 &request.data.create_operational_iq.queue_id); 4005 put_unaligned_le64((u64)queue_group-> 4006 iq_element_array_bus_addr[AIO_PATH], 4007 &request.data.create_operational_iq.element_array_addr); 4008 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4009 &request.data.create_operational_iq.ci_addr); 4010 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4011 &request.data.create_operational_iq.num_elements); 4012 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4013 &request.data.create_operational_iq.element_length); 4014 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4015 4016 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4017 &response); 4018 if (rc) { 4019 dev_err(&ctrl_info->pci_dev->dev, 4020 "error creating inbound AIO queue\n"); 4021 return rc; 4022 } 4023 4024 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4025 PQI_DEVICE_REGISTERS_OFFSET + 4026 get_unaligned_le64( 4027 &response.data.create_operational_iq.iq_pi_offset); 4028 4029 /* 4030 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4031 * assumed to be for RAID path I/O unless we change the queue's 4032 * property. 4033 */ 4034 memset(&request, 0, sizeof(request)); 4035 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4036 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4037 &request.header.iu_length); 4038 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4039 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4040 &request.data.change_operational_iq_properties.queue_id); 4041 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4042 &request.data.change_operational_iq_properties.vendor_specific); 4043 4044 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4045 &response); 4046 if (rc) { 4047 dev_err(&ctrl_info->pci_dev->dev, 4048 "error changing queue property\n"); 4049 return rc; 4050 } 4051 4052 /* 4053 * Create OQ (Outbound Queue - device to host queue). 4054 */ 4055 memset(&request, 0, sizeof(request)); 4056 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4057 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4058 &request.header.iu_length); 4059 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4060 put_unaligned_le16(queue_group->oq_id, 4061 &request.data.create_operational_oq.queue_id); 4062 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4063 &request.data.create_operational_oq.element_array_addr); 4064 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4065 &request.data.create_operational_oq.pi_addr); 4066 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4067 &request.data.create_operational_oq.num_elements); 4068 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4069 &request.data.create_operational_oq.element_length); 4070 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4071 put_unaligned_le16(queue_group->int_msg_num, 4072 &request.data.create_operational_oq.int_msg_num); 4073 4074 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4075 &response); 4076 if (rc) { 4077 dev_err(&ctrl_info->pci_dev->dev, 4078 "error creating outbound queue\n"); 4079 return rc; 4080 } 4081 4082 queue_group->oq_ci = ctrl_info->iomem_base + 4083 PQI_DEVICE_REGISTERS_OFFSET + 4084 get_unaligned_le64( 4085 &response.data.create_operational_oq.oq_ci_offset); 4086 4087 return 0; 4088 } 4089 4090 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4091 { 4092 int rc; 4093 unsigned int i; 4094 4095 rc = pqi_create_event_queue(ctrl_info); 4096 if (rc) { 4097 dev_err(&ctrl_info->pci_dev->dev, 4098 "error creating event queue\n"); 4099 return rc; 4100 } 4101 4102 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4103 rc = pqi_create_queue_group(ctrl_info, i); 4104 if (rc) { 4105 dev_err(&ctrl_info->pci_dev->dev, 4106 "error creating queue group number %u/%u\n", 4107 i, ctrl_info->num_queue_groups); 4108 return rc; 4109 } 4110 } 4111 4112 return 0; 4113 } 4114 4115 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4116 (offsetof(struct pqi_event_config, descriptors) + \ 4117 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4118 4119 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4120 bool enable_events) 4121 { 4122 int rc; 4123 unsigned int i; 4124 struct pqi_event_config *event_config; 4125 struct pqi_event_descriptor *event_descriptor; 4126 struct pqi_general_management_request request; 4127 4128 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4129 GFP_KERNEL); 4130 if (!event_config) 4131 return -ENOMEM; 4132 4133 memset(&request, 0, sizeof(request)); 4134 4135 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4136 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4137 data.report_event_configuration.sg_descriptors[1]) - 4138 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4139 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4140 &request.data.report_event_configuration.buffer_length); 4141 4142 rc = pqi_map_single(ctrl_info->pci_dev, 4143 request.data.report_event_configuration.sg_descriptors, 4144 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4145 DMA_FROM_DEVICE); 4146 if (rc) 4147 goto out; 4148 4149 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4150 0, NULL, NO_TIMEOUT); 4151 4152 pqi_pci_unmap(ctrl_info->pci_dev, 4153 request.data.report_event_configuration.sg_descriptors, 1, 4154 DMA_FROM_DEVICE); 4155 4156 if (rc) 4157 goto out; 4158 4159 for (i = 0; i < event_config->num_event_descriptors; i++) { 4160 event_descriptor = &event_config->descriptors[i]; 4161 if (enable_events && 4162 pqi_is_supported_event(event_descriptor->event_type)) 4163 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4164 &event_descriptor->oq_id); 4165 else 4166 put_unaligned_le16(0, &event_descriptor->oq_id); 4167 } 4168 4169 memset(&request, 0, sizeof(request)); 4170 4171 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4172 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4173 data.report_event_configuration.sg_descriptors[1]) - 4174 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4175 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4176 &request.data.report_event_configuration.buffer_length); 4177 4178 rc = pqi_map_single(ctrl_info->pci_dev, 4179 request.data.report_event_configuration.sg_descriptors, 4180 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4181 DMA_TO_DEVICE); 4182 if (rc) 4183 goto out; 4184 4185 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4186 NULL, NO_TIMEOUT); 4187 4188 pqi_pci_unmap(ctrl_info->pci_dev, 4189 request.data.report_event_configuration.sg_descriptors, 1, 4190 DMA_TO_DEVICE); 4191 4192 out: 4193 kfree(event_config); 4194 4195 return rc; 4196 } 4197 4198 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4199 { 4200 return pqi_configure_events(ctrl_info, true); 4201 } 4202 4203 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4204 { 4205 return pqi_configure_events(ctrl_info, false); 4206 } 4207 4208 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4209 { 4210 unsigned int i; 4211 struct device *dev; 4212 size_t sg_chain_buffer_length; 4213 struct pqi_io_request *io_request; 4214 4215 if (!ctrl_info->io_request_pool) 4216 return; 4217 4218 dev = &ctrl_info->pci_dev->dev; 4219 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4220 io_request = ctrl_info->io_request_pool; 4221 4222 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4223 kfree(io_request->iu); 4224 if (!io_request->sg_chain_buffer) 4225 break; 4226 dma_free_coherent(dev, sg_chain_buffer_length, 4227 io_request->sg_chain_buffer, 4228 io_request->sg_chain_buffer_dma_handle); 4229 io_request++; 4230 } 4231 4232 kfree(ctrl_info->io_request_pool); 4233 ctrl_info->io_request_pool = NULL; 4234 } 4235 4236 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4237 { 4238 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4239 ctrl_info->error_buffer_length, 4240 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4241 4242 if (!ctrl_info->error_buffer) 4243 return -ENOMEM; 4244 4245 return 0; 4246 } 4247 4248 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4249 { 4250 unsigned int i; 4251 void *sg_chain_buffer; 4252 size_t sg_chain_buffer_length; 4253 dma_addr_t sg_chain_buffer_dma_handle; 4254 struct device *dev; 4255 struct pqi_io_request *io_request; 4256 4257 ctrl_info->io_request_pool = 4258 kcalloc(ctrl_info->max_io_slots, 4259 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4260 4261 if (!ctrl_info->io_request_pool) { 4262 dev_err(&ctrl_info->pci_dev->dev, 4263 "failed to allocate I/O request pool\n"); 4264 goto error; 4265 } 4266 4267 dev = &ctrl_info->pci_dev->dev; 4268 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4269 io_request = ctrl_info->io_request_pool; 4270 4271 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4272 io_request->iu = 4273 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4274 4275 if (!io_request->iu) { 4276 dev_err(&ctrl_info->pci_dev->dev, 4277 "failed to allocate IU buffers\n"); 4278 goto error; 4279 } 4280 4281 sg_chain_buffer = dma_alloc_coherent(dev, 4282 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4283 GFP_KERNEL); 4284 4285 if (!sg_chain_buffer) { 4286 dev_err(&ctrl_info->pci_dev->dev, 4287 "failed to allocate PQI scatter-gather chain buffers\n"); 4288 goto error; 4289 } 4290 4291 io_request->index = i; 4292 io_request->sg_chain_buffer = sg_chain_buffer; 4293 io_request->sg_chain_buffer_dma_handle = 4294 sg_chain_buffer_dma_handle; 4295 io_request++; 4296 } 4297 4298 return 0; 4299 4300 error: 4301 pqi_free_all_io_requests(ctrl_info); 4302 4303 return -ENOMEM; 4304 } 4305 4306 /* 4307 * Calculate required resources that are sized based on max. outstanding 4308 * requests and max. transfer size. 4309 */ 4310 4311 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4312 { 4313 u32 max_transfer_size; 4314 u32 max_sg_entries; 4315 4316 ctrl_info->scsi_ml_can_queue = 4317 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4318 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4319 4320 ctrl_info->error_buffer_length = 4321 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4322 4323 if (reset_devices) 4324 max_transfer_size = min(ctrl_info->max_transfer_size, 4325 PQI_MAX_TRANSFER_SIZE_KDUMP); 4326 else 4327 max_transfer_size = min(ctrl_info->max_transfer_size, 4328 PQI_MAX_TRANSFER_SIZE); 4329 4330 max_sg_entries = max_transfer_size / PAGE_SIZE; 4331 4332 /* +1 to cover when the buffer is not page-aligned. */ 4333 max_sg_entries++; 4334 4335 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4336 4337 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4338 4339 ctrl_info->sg_chain_buffer_length = 4340 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4341 PQI_EXTRA_SGL_MEMORY; 4342 ctrl_info->sg_tablesize = max_sg_entries; 4343 ctrl_info->max_sectors = max_transfer_size / 512; 4344 } 4345 4346 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4347 { 4348 int num_queue_groups; 4349 u16 num_elements_per_iq; 4350 u16 num_elements_per_oq; 4351 4352 if (reset_devices) { 4353 num_queue_groups = 1; 4354 } else { 4355 int num_cpus; 4356 int max_queue_groups; 4357 4358 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4359 ctrl_info->max_outbound_queues - 1); 4360 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4361 4362 num_cpus = num_online_cpus(); 4363 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4364 num_queue_groups = min(num_queue_groups, max_queue_groups); 4365 } 4366 4367 ctrl_info->num_queue_groups = num_queue_groups; 4368 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4369 4370 /* 4371 * Make sure that the max. inbound IU length is an even multiple 4372 * of our inbound element length. 4373 */ 4374 ctrl_info->max_inbound_iu_length = 4375 (ctrl_info->max_inbound_iu_length_per_firmware / 4376 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4377 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4378 4379 num_elements_per_iq = 4380 (ctrl_info->max_inbound_iu_length / 4381 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4382 4383 /* Add one because one element in each queue is unusable. */ 4384 num_elements_per_iq++; 4385 4386 num_elements_per_iq = min(num_elements_per_iq, 4387 ctrl_info->max_elements_per_iq); 4388 4389 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4390 num_elements_per_oq = min(num_elements_per_oq, 4391 ctrl_info->max_elements_per_oq); 4392 4393 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4394 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4395 4396 ctrl_info->max_sg_per_iu = 4397 ((ctrl_info->max_inbound_iu_length - 4398 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4399 sizeof(struct pqi_sg_descriptor)) + 4400 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4401 } 4402 4403 static inline void pqi_set_sg_descriptor( 4404 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4405 { 4406 u64 address = (u64)sg_dma_address(sg); 4407 unsigned int length = sg_dma_len(sg); 4408 4409 put_unaligned_le64(address, &sg_descriptor->address); 4410 put_unaligned_le32(length, &sg_descriptor->length); 4411 put_unaligned_le32(0, &sg_descriptor->flags); 4412 } 4413 4414 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4415 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4416 struct pqi_io_request *io_request) 4417 { 4418 int i; 4419 u16 iu_length; 4420 int sg_count; 4421 bool chained; 4422 unsigned int num_sg_in_iu; 4423 unsigned int max_sg_per_iu; 4424 struct scatterlist *sg; 4425 struct pqi_sg_descriptor *sg_descriptor; 4426 4427 sg_count = scsi_dma_map(scmd); 4428 if (sg_count < 0) 4429 return sg_count; 4430 4431 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4432 PQI_REQUEST_HEADER_LENGTH; 4433 4434 if (sg_count == 0) 4435 goto out; 4436 4437 sg = scsi_sglist(scmd); 4438 sg_descriptor = request->sg_descriptors; 4439 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4440 chained = false; 4441 num_sg_in_iu = 0; 4442 i = 0; 4443 4444 while (1) { 4445 pqi_set_sg_descriptor(sg_descriptor, sg); 4446 if (!chained) 4447 num_sg_in_iu++; 4448 i++; 4449 if (i == sg_count) 4450 break; 4451 sg_descriptor++; 4452 if (i == max_sg_per_iu) { 4453 put_unaligned_le64( 4454 (u64)io_request->sg_chain_buffer_dma_handle, 4455 &sg_descriptor->address); 4456 put_unaligned_le32((sg_count - num_sg_in_iu) 4457 * sizeof(*sg_descriptor), 4458 &sg_descriptor->length); 4459 put_unaligned_le32(CISS_SG_CHAIN, 4460 &sg_descriptor->flags); 4461 chained = true; 4462 num_sg_in_iu++; 4463 sg_descriptor = io_request->sg_chain_buffer; 4464 } 4465 sg = sg_next(sg); 4466 } 4467 4468 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4469 request->partial = chained; 4470 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4471 4472 out: 4473 put_unaligned_le16(iu_length, &request->header.iu_length); 4474 4475 return 0; 4476 } 4477 4478 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4479 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4480 struct pqi_io_request *io_request) 4481 { 4482 int i; 4483 u16 iu_length; 4484 int sg_count; 4485 bool chained; 4486 unsigned int num_sg_in_iu; 4487 unsigned int max_sg_per_iu; 4488 struct scatterlist *sg; 4489 struct pqi_sg_descriptor *sg_descriptor; 4490 4491 sg_count = scsi_dma_map(scmd); 4492 if (sg_count < 0) 4493 return sg_count; 4494 4495 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4496 PQI_REQUEST_HEADER_LENGTH; 4497 num_sg_in_iu = 0; 4498 4499 if (sg_count == 0) 4500 goto out; 4501 4502 sg = scsi_sglist(scmd); 4503 sg_descriptor = request->sg_descriptors; 4504 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4505 chained = false; 4506 i = 0; 4507 4508 while (1) { 4509 pqi_set_sg_descriptor(sg_descriptor, sg); 4510 if (!chained) 4511 num_sg_in_iu++; 4512 i++; 4513 if (i == sg_count) 4514 break; 4515 sg_descriptor++; 4516 if (i == max_sg_per_iu) { 4517 put_unaligned_le64( 4518 (u64)io_request->sg_chain_buffer_dma_handle, 4519 &sg_descriptor->address); 4520 put_unaligned_le32((sg_count - num_sg_in_iu) 4521 * sizeof(*sg_descriptor), 4522 &sg_descriptor->length); 4523 put_unaligned_le32(CISS_SG_CHAIN, 4524 &sg_descriptor->flags); 4525 chained = true; 4526 num_sg_in_iu++; 4527 sg_descriptor = io_request->sg_chain_buffer; 4528 } 4529 sg = sg_next(sg); 4530 } 4531 4532 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4533 request->partial = chained; 4534 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4535 4536 out: 4537 put_unaligned_le16(iu_length, &request->header.iu_length); 4538 request->num_sg_descriptors = num_sg_in_iu; 4539 4540 return 0; 4541 } 4542 4543 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4544 void *context) 4545 { 4546 struct scsi_cmnd *scmd; 4547 4548 scmd = io_request->scmd; 4549 pqi_free_io_request(io_request); 4550 scsi_dma_unmap(scmd); 4551 pqi_scsi_done(scmd); 4552 } 4553 4554 static int pqi_raid_submit_scsi_cmd_with_io_request( 4555 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4556 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4557 struct pqi_queue_group *queue_group) 4558 { 4559 int rc; 4560 size_t cdb_length; 4561 struct pqi_raid_path_request *request; 4562 4563 io_request->io_complete_callback = pqi_raid_io_complete; 4564 io_request->scmd = scmd; 4565 4566 request = io_request->iu; 4567 memset(request, 0, 4568 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4569 4570 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4571 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4572 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4573 put_unaligned_le16(io_request->index, &request->request_id); 4574 request->error_index = request->request_id; 4575 memcpy(request->lun_number, device->scsi3addr, 4576 sizeof(request->lun_number)); 4577 4578 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4579 memcpy(request->cdb, scmd->cmnd, cdb_length); 4580 4581 switch (cdb_length) { 4582 case 6: 4583 case 10: 4584 case 12: 4585 case 16: 4586 /* No bytes in the Additional CDB bytes field */ 4587 request->additional_cdb_bytes_usage = 4588 SOP_ADDITIONAL_CDB_BYTES_0; 4589 break; 4590 case 20: 4591 /* 4 bytes in the Additional cdb field */ 4592 request->additional_cdb_bytes_usage = 4593 SOP_ADDITIONAL_CDB_BYTES_4; 4594 break; 4595 case 24: 4596 /* 8 bytes in the Additional cdb field */ 4597 request->additional_cdb_bytes_usage = 4598 SOP_ADDITIONAL_CDB_BYTES_8; 4599 break; 4600 case 28: 4601 /* 12 bytes in the Additional cdb field */ 4602 request->additional_cdb_bytes_usage = 4603 SOP_ADDITIONAL_CDB_BYTES_12; 4604 break; 4605 case 32: 4606 default: 4607 /* 16 bytes in the Additional cdb field */ 4608 request->additional_cdb_bytes_usage = 4609 SOP_ADDITIONAL_CDB_BYTES_16; 4610 break; 4611 } 4612 4613 switch (scmd->sc_data_direction) { 4614 case DMA_TO_DEVICE: 4615 request->data_direction = SOP_READ_FLAG; 4616 break; 4617 case DMA_FROM_DEVICE: 4618 request->data_direction = SOP_WRITE_FLAG; 4619 break; 4620 case DMA_NONE: 4621 request->data_direction = SOP_NO_DIRECTION_FLAG; 4622 break; 4623 case DMA_BIDIRECTIONAL: 4624 request->data_direction = SOP_BIDIRECTIONAL; 4625 break; 4626 default: 4627 dev_err(&ctrl_info->pci_dev->dev, 4628 "unknown data direction: %d\n", 4629 scmd->sc_data_direction); 4630 break; 4631 } 4632 4633 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 4634 if (rc) { 4635 pqi_free_io_request(io_request); 4636 return SCSI_MLQUEUE_HOST_BUSY; 4637 } 4638 4639 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 4640 4641 return 0; 4642 } 4643 4644 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4645 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4646 struct pqi_queue_group *queue_group) 4647 { 4648 struct pqi_io_request *io_request; 4649 4650 io_request = pqi_alloc_io_request(ctrl_info); 4651 4652 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4653 device, scmd, queue_group); 4654 } 4655 4656 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 4657 { 4658 if (!pqi_ctrl_blocked(ctrl_info)) 4659 schedule_work(&ctrl_info->raid_bypass_retry_work); 4660 } 4661 4662 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 4663 { 4664 struct scsi_cmnd *scmd; 4665 struct pqi_scsi_dev *device; 4666 struct pqi_ctrl_info *ctrl_info; 4667 4668 if (!io_request->raid_bypass) 4669 return false; 4670 4671 scmd = io_request->scmd; 4672 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 4673 return false; 4674 if (host_byte(scmd->result) == DID_NO_CONNECT) 4675 return false; 4676 4677 device = scmd->device->hostdata; 4678 if (pqi_device_offline(device)) 4679 return false; 4680 4681 ctrl_info = shost_to_hba(scmd->device->host); 4682 if (pqi_ctrl_offline(ctrl_info)) 4683 return false; 4684 4685 return true; 4686 } 4687 4688 static inline void pqi_add_to_raid_bypass_retry_list( 4689 struct pqi_ctrl_info *ctrl_info, 4690 struct pqi_io_request *io_request, bool at_head) 4691 { 4692 unsigned long flags; 4693 4694 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4695 if (at_head) 4696 list_add(&io_request->request_list_entry, 4697 &ctrl_info->raid_bypass_retry_list); 4698 else 4699 list_add_tail(&io_request->request_list_entry, 4700 &ctrl_info->raid_bypass_retry_list); 4701 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4702 } 4703 4704 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 4705 void *context) 4706 { 4707 struct scsi_cmnd *scmd; 4708 4709 scmd = io_request->scmd; 4710 pqi_free_io_request(io_request); 4711 pqi_scsi_done(scmd); 4712 } 4713 4714 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 4715 { 4716 struct scsi_cmnd *scmd; 4717 struct pqi_ctrl_info *ctrl_info; 4718 4719 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 4720 scmd = io_request->scmd; 4721 scmd->result = 0; 4722 ctrl_info = shost_to_hba(scmd->device->host); 4723 4724 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 4725 pqi_schedule_bypass_retry(ctrl_info); 4726 } 4727 4728 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 4729 { 4730 struct scsi_cmnd *scmd; 4731 struct pqi_scsi_dev *device; 4732 struct pqi_ctrl_info *ctrl_info; 4733 struct pqi_queue_group *queue_group; 4734 4735 scmd = io_request->scmd; 4736 device = scmd->device->hostdata; 4737 if (pqi_device_in_reset(device)) { 4738 pqi_free_io_request(io_request); 4739 set_host_byte(scmd, DID_RESET); 4740 pqi_scsi_done(scmd); 4741 return 0; 4742 } 4743 4744 ctrl_info = shost_to_hba(scmd->device->host); 4745 queue_group = io_request->queue_group; 4746 4747 pqi_reinit_io_request(io_request); 4748 4749 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4750 device, scmd, queue_group); 4751 } 4752 4753 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 4754 struct pqi_ctrl_info *ctrl_info) 4755 { 4756 unsigned long flags; 4757 struct pqi_io_request *io_request; 4758 4759 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4760 io_request = list_first_entry_or_null( 4761 &ctrl_info->raid_bypass_retry_list, 4762 struct pqi_io_request, request_list_entry); 4763 if (io_request) 4764 list_del(&io_request->request_list_entry); 4765 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4766 4767 return io_request; 4768 } 4769 4770 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 4771 { 4772 int rc; 4773 struct pqi_io_request *io_request; 4774 4775 pqi_ctrl_busy(ctrl_info); 4776 4777 while (1) { 4778 if (pqi_ctrl_blocked(ctrl_info)) 4779 break; 4780 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 4781 if (!io_request) 4782 break; 4783 rc = pqi_retry_raid_bypass(io_request); 4784 if (rc) { 4785 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 4786 true); 4787 pqi_schedule_bypass_retry(ctrl_info); 4788 break; 4789 } 4790 } 4791 4792 pqi_ctrl_unbusy(ctrl_info); 4793 } 4794 4795 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 4796 { 4797 struct pqi_ctrl_info *ctrl_info; 4798 4799 ctrl_info = container_of(work, struct pqi_ctrl_info, 4800 raid_bypass_retry_work); 4801 pqi_retry_raid_bypass_requests(ctrl_info); 4802 } 4803 4804 static void pqi_clear_all_queued_raid_bypass_retries( 4805 struct pqi_ctrl_info *ctrl_info) 4806 { 4807 unsigned long flags; 4808 4809 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4810 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 4811 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4812 } 4813 4814 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 4815 void *context) 4816 { 4817 struct scsi_cmnd *scmd; 4818 4819 scmd = io_request->scmd; 4820 scsi_dma_unmap(scmd); 4821 if (io_request->status == -EAGAIN) 4822 set_host_byte(scmd, DID_IMM_RETRY); 4823 else if (pqi_raid_bypass_retry_needed(io_request)) { 4824 pqi_queue_raid_bypass_retry(io_request); 4825 return; 4826 } 4827 pqi_free_io_request(io_request); 4828 pqi_scsi_done(scmd); 4829 } 4830 4831 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4832 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4833 struct pqi_queue_group *queue_group) 4834 { 4835 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 4836 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 4837 } 4838 4839 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 4840 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 4841 unsigned int cdb_length, struct pqi_queue_group *queue_group, 4842 struct pqi_encryption_info *encryption_info, bool raid_bypass) 4843 { 4844 int rc; 4845 struct pqi_io_request *io_request; 4846 struct pqi_aio_path_request *request; 4847 4848 io_request = pqi_alloc_io_request(ctrl_info); 4849 io_request->io_complete_callback = pqi_aio_io_complete; 4850 io_request->scmd = scmd; 4851 io_request->raid_bypass = raid_bypass; 4852 4853 request = io_request->iu; 4854 memset(request, 0, 4855 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4856 4857 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 4858 put_unaligned_le32(aio_handle, &request->nexus_id); 4859 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4860 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4861 put_unaligned_le16(io_request->index, &request->request_id); 4862 request->error_index = request->request_id; 4863 if (cdb_length > sizeof(request->cdb)) 4864 cdb_length = sizeof(request->cdb); 4865 request->cdb_length = cdb_length; 4866 memcpy(request->cdb, cdb, cdb_length); 4867 4868 switch (scmd->sc_data_direction) { 4869 case DMA_TO_DEVICE: 4870 request->data_direction = SOP_READ_FLAG; 4871 break; 4872 case DMA_FROM_DEVICE: 4873 request->data_direction = SOP_WRITE_FLAG; 4874 break; 4875 case DMA_NONE: 4876 request->data_direction = SOP_NO_DIRECTION_FLAG; 4877 break; 4878 case DMA_BIDIRECTIONAL: 4879 request->data_direction = SOP_BIDIRECTIONAL; 4880 break; 4881 default: 4882 dev_err(&ctrl_info->pci_dev->dev, 4883 "unknown data direction: %d\n", 4884 scmd->sc_data_direction); 4885 break; 4886 } 4887 4888 if (encryption_info) { 4889 request->encryption_enable = true; 4890 put_unaligned_le16(encryption_info->data_encryption_key_index, 4891 &request->data_encryption_key_index); 4892 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 4893 &request->encrypt_tweak_lower); 4894 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 4895 &request->encrypt_tweak_upper); 4896 } 4897 4898 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 4899 if (rc) { 4900 pqi_free_io_request(io_request); 4901 return SCSI_MLQUEUE_HOST_BUSY; 4902 } 4903 4904 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 4905 4906 return 0; 4907 } 4908 4909 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 4910 struct scsi_cmnd *scmd) 4911 { 4912 u16 hw_queue; 4913 4914 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 4915 if (hw_queue > ctrl_info->max_hw_queue_index) 4916 hw_queue = 0; 4917 4918 return hw_queue; 4919 } 4920 4921 /* 4922 * This function gets called just before we hand the completed SCSI request 4923 * back to the SML. 4924 */ 4925 4926 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 4927 { 4928 struct pqi_scsi_dev *device; 4929 4930 device = scmd->device->hostdata; 4931 atomic_dec(&device->scsi_cmds_outstanding); 4932 } 4933 4934 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 4935 struct scsi_cmnd *scmd) 4936 { 4937 int rc; 4938 struct pqi_ctrl_info *ctrl_info; 4939 struct pqi_scsi_dev *device; 4940 u16 hw_queue; 4941 struct pqi_queue_group *queue_group; 4942 bool raid_bypassed; 4943 4944 device = scmd->device->hostdata; 4945 ctrl_info = shost_to_hba(shost); 4946 4947 atomic_inc(&device->scsi_cmds_outstanding); 4948 4949 if (pqi_ctrl_offline(ctrl_info)) { 4950 set_host_byte(scmd, DID_NO_CONNECT); 4951 pqi_scsi_done(scmd); 4952 return 0; 4953 } 4954 4955 pqi_ctrl_busy(ctrl_info); 4956 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { 4957 rc = SCSI_MLQUEUE_HOST_BUSY; 4958 goto out; 4959 } 4960 4961 /* 4962 * This is necessary because the SML doesn't zero out this field during 4963 * error recovery. 4964 */ 4965 scmd->result = 0; 4966 4967 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 4968 queue_group = &ctrl_info->queue_groups[hw_queue]; 4969 4970 if (pqi_is_logical_device(device)) { 4971 raid_bypassed = false; 4972 if (device->raid_bypass_enabled && 4973 !blk_rq_is_passthrough(scmd->request)) { 4974 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 4975 scmd, queue_group); 4976 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) 4977 raid_bypassed = true; 4978 } 4979 if (!raid_bypassed) 4980 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 4981 queue_group); 4982 } else { 4983 if (device->aio_enabled) 4984 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, 4985 queue_group); 4986 else 4987 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 4988 queue_group); 4989 } 4990 4991 out: 4992 pqi_ctrl_unbusy(ctrl_info); 4993 if (rc) 4994 atomic_dec(&device->scsi_cmds_outstanding); 4995 4996 return rc; 4997 } 4998 4999 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5000 struct pqi_queue_group *queue_group) 5001 { 5002 unsigned int path; 5003 unsigned long flags; 5004 bool list_is_empty; 5005 5006 for (path = 0; path < 2; path++) { 5007 while (1) { 5008 spin_lock_irqsave( 5009 &queue_group->submit_lock[path], flags); 5010 list_is_empty = 5011 list_empty(&queue_group->request_list[path]); 5012 spin_unlock_irqrestore( 5013 &queue_group->submit_lock[path], flags); 5014 if (list_is_empty) 5015 break; 5016 pqi_check_ctrl_health(ctrl_info); 5017 if (pqi_ctrl_offline(ctrl_info)) 5018 return -ENXIO; 5019 usleep_range(1000, 2000); 5020 } 5021 } 5022 5023 return 0; 5024 } 5025 5026 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5027 { 5028 int rc; 5029 unsigned int i; 5030 unsigned int path; 5031 struct pqi_queue_group *queue_group; 5032 pqi_index_t iq_pi; 5033 pqi_index_t iq_ci; 5034 5035 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5036 queue_group = &ctrl_info->queue_groups[i]; 5037 5038 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5039 if (rc) 5040 return rc; 5041 5042 for (path = 0; path < 2; path++) { 5043 iq_pi = queue_group->iq_pi_copy[path]; 5044 5045 while (1) { 5046 iq_ci = readl(queue_group->iq_ci[path]); 5047 if (iq_ci == iq_pi) 5048 break; 5049 pqi_check_ctrl_health(ctrl_info); 5050 if (pqi_ctrl_offline(ctrl_info)) 5051 return -ENXIO; 5052 usleep_range(1000, 2000); 5053 } 5054 } 5055 } 5056 5057 return 0; 5058 } 5059 5060 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5061 struct pqi_scsi_dev *device) 5062 { 5063 unsigned int i; 5064 unsigned int path; 5065 struct pqi_queue_group *queue_group; 5066 unsigned long flags; 5067 struct pqi_io_request *io_request; 5068 struct pqi_io_request *next; 5069 struct scsi_cmnd *scmd; 5070 struct pqi_scsi_dev *scsi_device; 5071 5072 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5073 queue_group = &ctrl_info->queue_groups[i]; 5074 5075 for (path = 0; path < 2; path++) { 5076 spin_lock_irqsave( 5077 &queue_group->submit_lock[path], flags); 5078 5079 list_for_each_entry_safe(io_request, next, 5080 &queue_group->request_list[path], 5081 request_list_entry) { 5082 scmd = io_request->scmd; 5083 if (!scmd) 5084 continue; 5085 5086 scsi_device = scmd->device->hostdata; 5087 if (scsi_device != device) 5088 continue; 5089 5090 list_del(&io_request->request_list_entry); 5091 set_host_byte(scmd, DID_RESET); 5092 pqi_scsi_done(scmd); 5093 } 5094 5095 spin_unlock_irqrestore( 5096 &queue_group->submit_lock[path], flags); 5097 } 5098 } 5099 } 5100 5101 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5102 struct pqi_scsi_dev *device) 5103 { 5104 while (atomic_read(&device->scsi_cmds_outstanding)) { 5105 pqi_check_ctrl_health(ctrl_info); 5106 if (pqi_ctrl_offline(ctrl_info)) 5107 return -ENXIO; 5108 usleep_range(1000, 2000); 5109 } 5110 5111 return 0; 5112 } 5113 5114 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) 5115 { 5116 bool io_pending; 5117 unsigned long flags; 5118 struct pqi_scsi_dev *device; 5119 5120 while (1) { 5121 io_pending = false; 5122 5123 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5124 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5125 scsi_device_list_entry) { 5126 if (atomic_read(&device->scsi_cmds_outstanding)) { 5127 io_pending = true; 5128 break; 5129 } 5130 } 5131 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5132 flags); 5133 5134 if (!io_pending) 5135 break; 5136 5137 pqi_check_ctrl_health(ctrl_info); 5138 if (pqi_ctrl_offline(ctrl_info)) 5139 return -ENXIO; 5140 5141 usleep_range(1000, 2000); 5142 } 5143 5144 return 0; 5145 } 5146 5147 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5148 void *context) 5149 { 5150 struct completion *waiting = context; 5151 5152 complete(waiting); 5153 } 5154 5155 #define PQI_LUN_RESET_TIMEOUT_SECS 10 5156 5157 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5158 struct pqi_scsi_dev *device, struct completion *wait) 5159 { 5160 int rc; 5161 5162 while (1) { 5163 if (wait_for_completion_io_timeout(wait, 5164 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { 5165 rc = 0; 5166 break; 5167 } 5168 5169 pqi_check_ctrl_health(ctrl_info); 5170 if (pqi_ctrl_offline(ctrl_info)) { 5171 rc = -ENXIO; 5172 break; 5173 } 5174 } 5175 5176 return rc; 5177 } 5178 5179 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5180 struct pqi_scsi_dev *device) 5181 { 5182 int rc; 5183 struct pqi_io_request *io_request; 5184 DECLARE_COMPLETION_ONSTACK(wait); 5185 struct pqi_task_management_request *request; 5186 5187 io_request = pqi_alloc_io_request(ctrl_info); 5188 io_request->io_complete_callback = pqi_lun_reset_complete; 5189 io_request->context = &wait; 5190 5191 request = io_request->iu; 5192 memset(request, 0, sizeof(*request)); 5193 5194 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5195 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5196 &request->header.iu_length); 5197 put_unaligned_le16(io_request->index, &request->request_id); 5198 memcpy(request->lun_number, device->scsi3addr, 5199 sizeof(request->lun_number)); 5200 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5201 5202 pqi_start_io(ctrl_info, 5203 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5204 io_request); 5205 5206 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5207 if (rc == 0) 5208 rc = io_request->status; 5209 5210 pqi_free_io_request(io_request); 5211 5212 return rc; 5213 } 5214 5215 /* Performs a reset at the LUN level. */ 5216 5217 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5218 struct pqi_scsi_dev *device) 5219 { 5220 int rc; 5221 5222 rc = pqi_lun_reset(ctrl_info, device); 5223 if (rc == 0) 5224 rc = pqi_device_wait_for_pending_io(ctrl_info, device); 5225 5226 return rc == 0 ? SUCCESS : FAILED; 5227 } 5228 5229 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5230 { 5231 int rc; 5232 struct Scsi_Host *shost; 5233 struct pqi_ctrl_info *ctrl_info; 5234 struct pqi_scsi_dev *device; 5235 5236 shost = scmd->device->host; 5237 ctrl_info = shost_to_hba(shost); 5238 device = scmd->device->hostdata; 5239 5240 dev_err(&ctrl_info->pci_dev->dev, 5241 "resetting scsi %d:%d:%d:%d\n", 5242 shost->host_no, device->bus, device->target, device->lun); 5243 5244 pqi_check_ctrl_health(ctrl_info); 5245 if (pqi_ctrl_offline(ctrl_info)) { 5246 rc = FAILED; 5247 goto out; 5248 } 5249 5250 mutex_lock(&ctrl_info->lun_reset_mutex); 5251 5252 pqi_ctrl_block_requests(ctrl_info); 5253 pqi_ctrl_wait_until_quiesced(ctrl_info); 5254 pqi_fail_io_queued_for_device(ctrl_info, device); 5255 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5256 pqi_device_reset_start(device); 5257 pqi_ctrl_unblock_requests(ctrl_info); 5258 5259 if (rc) 5260 rc = FAILED; 5261 else 5262 rc = pqi_device_reset(ctrl_info, device); 5263 5264 pqi_device_reset_done(device); 5265 5266 mutex_unlock(&ctrl_info->lun_reset_mutex); 5267 5268 out: 5269 dev_err(&ctrl_info->pci_dev->dev, 5270 "reset of scsi %d:%d:%d:%d: %s\n", 5271 shost->host_no, device->bus, device->target, device->lun, 5272 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5273 5274 return rc; 5275 } 5276 5277 static int pqi_slave_alloc(struct scsi_device *sdev) 5278 { 5279 struct pqi_scsi_dev *device; 5280 unsigned long flags; 5281 struct pqi_ctrl_info *ctrl_info; 5282 struct scsi_target *starget; 5283 struct sas_rphy *rphy; 5284 5285 ctrl_info = shost_to_hba(sdev->host); 5286 5287 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5288 5289 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5290 starget = scsi_target(sdev); 5291 rphy = target_to_rphy(starget); 5292 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5293 if (device) { 5294 device->target = sdev_id(sdev); 5295 device->lun = sdev->lun; 5296 device->target_lun_valid = true; 5297 } 5298 } else { 5299 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5300 sdev_id(sdev), sdev->lun); 5301 } 5302 5303 if (device) { 5304 sdev->hostdata = device; 5305 device->sdev = sdev; 5306 if (device->queue_depth) { 5307 device->advertised_queue_depth = device->queue_depth; 5308 scsi_change_queue_depth(sdev, 5309 device->advertised_queue_depth); 5310 } 5311 } 5312 5313 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5314 5315 return 0; 5316 } 5317 5318 static int pqi_map_queues(struct Scsi_Host *shost) 5319 { 5320 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5321 5322 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0); 5323 } 5324 5325 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 5326 void __user *arg) 5327 { 5328 struct pci_dev *pci_dev; 5329 u32 subsystem_vendor; 5330 u32 subsystem_device; 5331 cciss_pci_info_struct pciinfo; 5332 5333 if (!arg) 5334 return -EINVAL; 5335 5336 pci_dev = ctrl_info->pci_dev; 5337 5338 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5339 pciinfo.bus = pci_dev->bus->number; 5340 pciinfo.dev_fn = pci_dev->devfn; 5341 subsystem_vendor = pci_dev->subsystem_vendor; 5342 subsystem_device = pci_dev->subsystem_device; 5343 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | 5344 subsystem_vendor; 5345 5346 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5347 return -EFAULT; 5348 5349 return 0; 5350 } 5351 5352 static int pqi_getdrivver_ioctl(void __user *arg) 5353 { 5354 u32 version; 5355 5356 if (!arg) 5357 return -EINVAL; 5358 5359 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5360 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5361 5362 if (copy_to_user(arg, &version, sizeof(version))) 5363 return -EFAULT; 5364 5365 return 0; 5366 } 5367 5368 struct ciss_error_info { 5369 u8 scsi_status; 5370 int command_status; 5371 size_t sense_data_length; 5372 }; 5373 5374 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5375 struct ciss_error_info *ciss_error_info) 5376 { 5377 int ciss_cmd_status; 5378 size_t sense_data_length; 5379 5380 switch (pqi_error_info->data_out_result) { 5381 case PQI_DATA_IN_OUT_GOOD: 5382 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5383 break; 5384 case PQI_DATA_IN_OUT_UNDERFLOW: 5385 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5386 break; 5387 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5388 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5389 break; 5390 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5391 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5392 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5393 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5394 case PQI_DATA_IN_OUT_ERROR: 5395 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5396 break; 5397 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5398 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5399 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5400 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5401 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5402 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5403 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5404 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5405 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5406 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5407 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5408 break; 5409 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5410 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5411 break; 5412 case PQI_DATA_IN_OUT_ABORTED: 5413 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5414 break; 5415 case PQI_DATA_IN_OUT_TIMEOUT: 5416 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5417 break; 5418 default: 5419 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5420 break; 5421 } 5422 5423 sense_data_length = 5424 get_unaligned_le16(&pqi_error_info->sense_data_length); 5425 if (sense_data_length == 0) 5426 sense_data_length = 5427 get_unaligned_le16(&pqi_error_info->response_data_length); 5428 if (sense_data_length) 5429 if (sense_data_length > sizeof(pqi_error_info->data)) 5430 sense_data_length = sizeof(pqi_error_info->data); 5431 5432 ciss_error_info->scsi_status = pqi_error_info->status; 5433 ciss_error_info->command_status = ciss_cmd_status; 5434 ciss_error_info->sense_data_length = sense_data_length; 5435 } 5436 5437 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5438 { 5439 int rc; 5440 char *kernel_buffer = NULL; 5441 u16 iu_length; 5442 size_t sense_data_length; 5443 IOCTL_Command_struct iocommand; 5444 struct pqi_raid_path_request request; 5445 struct pqi_raid_error_info pqi_error_info; 5446 struct ciss_error_info ciss_error_info; 5447 5448 if (pqi_ctrl_offline(ctrl_info)) 5449 return -ENXIO; 5450 if (!arg) 5451 return -EINVAL; 5452 if (!capable(CAP_SYS_RAWIO)) 5453 return -EPERM; 5454 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5455 return -EFAULT; 5456 if (iocommand.buf_size < 1 && 5457 iocommand.Request.Type.Direction != XFER_NONE) 5458 return -EINVAL; 5459 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5460 return -EINVAL; 5461 if (iocommand.Request.Type.Type != TYPE_CMD) 5462 return -EINVAL; 5463 5464 switch (iocommand.Request.Type.Direction) { 5465 case XFER_NONE: 5466 case XFER_WRITE: 5467 case XFER_READ: 5468 case XFER_READ | XFER_WRITE: 5469 break; 5470 default: 5471 return -EINVAL; 5472 } 5473 5474 if (iocommand.buf_size > 0) { 5475 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 5476 if (!kernel_buffer) 5477 return -ENOMEM; 5478 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5479 if (copy_from_user(kernel_buffer, iocommand.buf, 5480 iocommand.buf_size)) { 5481 rc = -EFAULT; 5482 goto out; 5483 } 5484 } else { 5485 memset(kernel_buffer, 0, iocommand.buf_size); 5486 } 5487 } 5488 5489 memset(&request, 0, sizeof(request)); 5490 5491 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5492 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5493 PQI_REQUEST_HEADER_LENGTH; 5494 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 5495 sizeof(request.lun_number)); 5496 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 5497 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5498 5499 switch (iocommand.Request.Type.Direction) { 5500 case XFER_NONE: 5501 request.data_direction = SOP_NO_DIRECTION_FLAG; 5502 break; 5503 case XFER_WRITE: 5504 request.data_direction = SOP_WRITE_FLAG; 5505 break; 5506 case XFER_READ: 5507 request.data_direction = SOP_READ_FLAG; 5508 break; 5509 case XFER_READ | XFER_WRITE: 5510 request.data_direction = SOP_BIDIRECTIONAL; 5511 break; 5512 } 5513 5514 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5515 5516 if (iocommand.buf_size > 0) { 5517 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 5518 5519 rc = pqi_map_single(ctrl_info->pci_dev, 5520 &request.sg_descriptors[0], kernel_buffer, 5521 iocommand.buf_size, DMA_BIDIRECTIONAL); 5522 if (rc) 5523 goto out; 5524 5525 iu_length += sizeof(request.sg_descriptors[0]); 5526 } 5527 5528 put_unaligned_le16(iu_length, &request.header.iu_length); 5529 5530 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 5531 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 5532 5533 if (iocommand.buf_size > 0) 5534 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 5535 DMA_BIDIRECTIONAL); 5536 5537 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 5538 5539 if (rc == 0) { 5540 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 5541 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 5542 iocommand.error_info.CommandStatus = 5543 ciss_error_info.command_status; 5544 sense_data_length = ciss_error_info.sense_data_length; 5545 if (sense_data_length) { 5546 if (sense_data_length > 5547 sizeof(iocommand.error_info.SenseInfo)) 5548 sense_data_length = 5549 sizeof(iocommand.error_info.SenseInfo); 5550 memcpy(iocommand.error_info.SenseInfo, 5551 pqi_error_info.data, sense_data_length); 5552 iocommand.error_info.SenseLen = sense_data_length; 5553 } 5554 } 5555 5556 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 5557 rc = -EFAULT; 5558 goto out; 5559 } 5560 5561 if (rc == 0 && iocommand.buf_size > 0 && 5562 (iocommand.Request.Type.Direction & XFER_READ)) { 5563 if (copy_to_user(iocommand.buf, kernel_buffer, 5564 iocommand.buf_size)) { 5565 rc = -EFAULT; 5566 } 5567 } 5568 5569 out: 5570 kfree(kernel_buffer); 5571 5572 return rc; 5573 } 5574 5575 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 5576 { 5577 int rc; 5578 struct pqi_ctrl_info *ctrl_info; 5579 5580 ctrl_info = shost_to_hba(sdev->host); 5581 5582 switch (cmd) { 5583 case CCISS_DEREGDISK: 5584 case CCISS_REGNEWDISK: 5585 case CCISS_REGNEWD: 5586 rc = pqi_scan_scsi_devices(ctrl_info); 5587 break; 5588 case CCISS_GETPCIINFO: 5589 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 5590 break; 5591 case CCISS_GETDRIVVER: 5592 rc = pqi_getdrivver_ioctl(arg); 5593 break; 5594 case CCISS_PASSTHRU: 5595 rc = pqi_passthru_ioctl(ctrl_info, arg); 5596 break; 5597 default: 5598 rc = -EINVAL; 5599 break; 5600 } 5601 5602 return rc; 5603 } 5604 5605 static ssize_t pqi_version_show(struct device *dev, 5606 struct device_attribute *attr, char *buffer) 5607 { 5608 ssize_t count = 0; 5609 struct Scsi_Host *shost; 5610 struct pqi_ctrl_info *ctrl_info; 5611 5612 shost = class_to_shost(dev); 5613 ctrl_info = shost_to_hba(shost); 5614 5615 count += snprintf(buffer + count, PAGE_SIZE - count, 5616 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); 5617 5618 count += snprintf(buffer + count, PAGE_SIZE - count, 5619 "firmware: %s\n", ctrl_info->firmware_version); 5620 5621 return count; 5622 } 5623 5624 static ssize_t pqi_host_rescan_store(struct device *dev, 5625 struct device_attribute *attr, const char *buffer, size_t count) 5626 { 5627 struct Scsi_Host *shost = class_to_shost(dev); 5628 5629 pqi_scan_start(shost); 5630 5631 return count; 5632 } 5633 5634 static ssize_t pqi_lockup_action_show(struct device *dev, 5635 struct device_attribute *attr, char *buffer) 5636 { 5637 int count = 0; 5638 unsigned int i; 5639 5640 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5641 if (pqi_lockup_actions[i].action == pqi_lockup_action) 5642 count += snprintf(buffer + count, PAGE_SIZE - count, 5643 "[%s] ", pqi_lockup_actions[i].name); 5644 else 5645 count += snprintf(buffer + count, PAGE_SIZE - count, 5646 "%s ", pqi_lockup_actions[i].name); 5647 } 5648 5649 count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); 5650 5651 return count; 5652 } 5653 5654 static ssize_t pqi_lockup_action_store(struct device *dev, 5655 struct device_attribute *attr, const char *buffer, size_t count) 5656 { 5657 unsigned int i; 5658 char *action_name; 5659 char action_name_buffer[32]; 5660 5661 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 5662 action_name = strstrip(action_name_buffer); 5663 5664 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5665 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 5666 pqi_lockup_action = pqi_lockup_actions[i].action; 5667 return count; 5668 } 5669 } 5670 5671 return -EINVAL; 5672 } 5673 5674 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); 5675 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 5676 static DEVICE_ATTR(lockup_action, 0644, 5677 pqi_lockup_action_show, pqi_lockup_action_store); 5678 5679 static struct device_attribute *pqi_shost_attrs[] = { 5680 &dev_attr_version, 5681 &dev_attr_rescan, 5682 &dev_attr_lockup_action, 5683 NULL 5684 }; 5685 5686 static ssize_t pqi_sas_address_show(struct device *dev, 5687 struct device_attribute *attr, char *buffer) 5688 { 5689 struct pqi_ctrl_info *ctrl_info; 5690 struct scsi_device *sdev; 5691 struct pqi_scsi_dev *device; 5692 unsigned long flags; 5693 u64 sas_address; 5694 5695 sdev = to_scsi_device(dev); 5696 ctrl_info = shost_to_hba(sdev->host); 5697 5698 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5699 5700 device = sdev->hostdata; 5701 if (pqi_is_logical_device(device)) { 5702 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5703 flags); 5704 return -ENODEV; 5705 } 5706 sas_address = device->sas_address; 5707 5708 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5709 5710 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 5711 } 5712 5713 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 5714 struct device_attribute *attr, char *buffer) 5715 { 5716 struct pqi_ctrl_info *ctrl_info; 5717 struct scsi_device *sdev; 5718 struct pqi_scsi_dev *device; 5719 unsigned long flags; 5720 5721 sdev = to_scsi_device(dev); 5722 ctrl_info = shost_to_hba(sdev->host); 5723 5724 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5725 5726 device = sdev->hostdata; 5727 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 5728 buffer[1] = '\n'; 5729 buffer[2] = '\0'; 5730 5731 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5732 5733 return 2; 5734 } 5735 5736 static ssize_t pqi_raid_level_show(struct device *dev, 5737 struct device_attribute *attr, char *buffer) 5738 { 5739 struct pqi_ctrl_info *ctrl_info; 5740 struct scsi_device *sdev; 5741 struct pqi_scsi_dev *device; 5742 unsigned long flags; 5743 char *raid_level; 5744 5745 sdev = to_scsi_device(dev); 5746 ctrl_info = shost_to_hba(sdev->host); 5747 5748 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5749 5750 device = sdev->hostdata; 5751 5752 if (pqi_is_logical_device(device)) 5753 raid_level = pqi_raid_level_to_string(device->raid_level); 5754 else 5755 raid_level = "N/A"; 5756 5757 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5758 5759 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 5760 } 5761 5762 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 5763 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 5764 pqi_ssd_smart_path_enabled_show, NULL); 5765 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 5766 5767 static struct device_attribute *pqi_sdev_attrs[] = { 5768 &dev_attr_sas_address, 5769 &dev_attr_ssd_smart_path_enabled, 5770 &dev_attr_raid_level, 5771 NULL 5772 }; 5773 5774 static struct scsi_host_template pqi_driver_template = { 5775 .module = THIS_MODULE, 5776 .name = DRIVER_NAME_SHORT, 5777 .proc_name = DRIVER_NAME_SHORT, 5778 .queuecommand = pqi_scsi_queue_command, 5779 .scan_start = pqi_scan_start, 5780 .scan_finished = pqi_scan_finished, 5781 .this_id = -1, 5782 .use_clustering = ENABLE_CLUSTERING, 5783 .eh_device_reset_handler = pqi_eh_device_reset_handler, 5784 .ioctl = pqi_ioctl, 5785 .slave_alloc = pqi_slave_alloc, 5786 .map_queues = pqi_map_queues, 5787 .sdev_attrs = pqi_sdev_attrs, 5788 .shost_attrs = pqi_shost_attrs, 5789 }; 5790 5791 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 5792 { 5793 int rc; 5794 struct Scsi_Host *shost; 5795 5796 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 5797 if (!shost) { 5798 dev_err(&ctrl_info->pci_dev->dev, 5799 "scsi_host_alloc failed for controller %u\n", 5800 ctrl_info->ctrl_id); 5801 return -ENOMEM; 5802 } 5803 5804 shost->io_port = 0; 5805 shost->n_io_port = 0; 5806 shost->this_id = -1; 5807 shost->max_channel = PQI_MAX_BUS; 5808 shost->max_cmd_len = MAX_COMMAND_SIZE; 5809 shost->max_lun = ~0; 5810 shost->max_id = ~0; 5811 shost->max_sectors = ctrl_info->max_sectors; 5812 shost->can_queue = ctrl_info->scsi_ml_can_queue; 5813 shost->cmd_per_lun = shost->can_queue; 5814 shost->sg_tablesize = ctrl_info->sg_tablesize; 5815 shost->transportt = pqi_sas_transport_template; 5816 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 5817 shost->unique_id = shost->irq; 5818 shost->nr_hw_queues = ctrl_info->num_queue_groups; 5819 shost->hostdata[0] = (unsigned long)ctrl_info; 5820 5821 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 5822 if (rc) { 5823 dev_err(&ctrl_info->pci_dev->dev, 5824 "scsi_add_host failed for controller %u\n", 5825 ctrl_info->ctrl_id); 5826 goto free_host; 5827 } 5828 5829 rc = pqi_add_sas_host(shost, ctrl_info); 5830 if (rc) { 5831 dev_err(&ctrl_info->pci_dev->dev, 5832 "add SAS host failed for controller %u\n", 5833 ctrl_info->ctrl_id); 5834 goto remove_host; 5835 } 5836 5837 ctrl_info->scsi_host = shost; 5838 5839 return 0; 5840 5841 remove_host: 5842 scsi_remove_host(shost); 5843 free_host: 5844 scsi_host_put(shost); 5845 5846 return rc; 5847 } 5848 5849 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 5850 { 5851 struct Scsi_Host *shost; 5852 5853 pqi_delete_sas_host(ctrl_info); 5854 5855 shost = ctrl_info->scsi_host; 5856 if (!shost) 5857 return; 5858 5859 scsi_remove_host(shost); 5860 scsi_host_put(shost); 5861 } 5862 5863 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 5864 { 5865 int rc = 0; 5866 struct pqi_device_registers __iomem *pqi_registers; 5867 unsigned long timeout; 5868 unsigned int timeout_msecs; 5869 union pqi_reset_register reset_reg; 5870 5871 pqi_registers = ctrl_info->pqi_registers; 5872 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 5873 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 5874 5875 while (1) { 5876 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 5877 reset_reg.all_bits = readl(&pqi_registers->device_reset); 5878 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 5879 break; 5880 pqi_check_ctrl_health(ctrl_info); 5881 if (pqi_ctrl_offline(ctrl_info)) { 5882 rc = -ENXIO; 5883 break; 5884 } 5885 if (time_after(jiffies, timeout)) { 5886 rc = -ETIMEDOUT; 5887 break; 5888 } 5889 } 5890 5891 return rc; 5892 } 5893 5894 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 5895 { 5896 int rc; 5897 union pqi_reset_register reset_reg; 5898 5899 if (ctrl_info->pqi_reset_quiesce_supported) { 5900 rc = sis_pqi_reset_quiesce(ctrl_info); 5901 if (rc) { 5902 dev_err(&ctrl_info->pci_dev->dev, 5903 "PQI reset failed during quiesce with error %d\n", 5904 rc); 5905 return rc; 5906 } 5907 } 5908 5909 reset_reg.all_bits = 0; 5910 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 5911 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 5912 5913 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 5914 5915 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 5916 if (rc) 5917 dev_err(&ctrl_info->pci_dev->dev, 5918 "PQI reset failed with error %d\n", rc); 5919 5920 return rc; 5921 } 5922 5923 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) 5924 { 5925 int rc; 5926 struct bmic_identify_controller *identify; 5927 5928 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 5929 if (!identify) 5930 return -ENOMEM; 5931 5932 rc = pqi_identify_controller(ctrl_info, identify); 5933 if (rc) 5934 goto out; 5935 5936 memcpy(ctrl_info->firmware_version, identify->firmware_version, 5937 sizeof(identify->firmware_version)); 5938 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 5939 snprintf(ctrl_info->firmware_version + 5940 strlen(ctrl_info->firmware_version), 5941 sizeof(ctrl_info->firmware_version), 5942 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 5943 5944 out: 5945 kfree(identify); 5946 5947 return rc; 5948 } 5949 5950 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 5951 { 5952 u32 table_length; 5953 u32 section_offset; 5954 void __iomem *table_iomem_addr; 5955 struct pqi_config_table *config_table; 5956 struct pqi_config_table_section_header *section; 5957 5958 table_length = ctrl_info->config_table_length; 5959 5960 config_table = kmalloc(table_length, GFP_KERNEL); 5961 if (!config_table) { 5962 dev_err(&ctrl_info->pci_dev->dev, 5963 "failed to allocate memory for PQI configuration table\n"); 5964 return -ENOMEM; 5965 } 5966 5967 /* 5968 * Copy the config table contents from I/O memory space into the 5969 * temporary buffer. 5970 */ 5971 table_iomem_addr = ctrl_info->iomem_base + 5972 ctrl_info->config_table_offset; 5973 memcpy_fromio(config_table, table_iomem_addr, table_length); 5974 5975 section_offset = 5976 get_unaligned_le32(&config_table->first_section_offset); 5977 5978 while (section_offset) { 5979 section = (void *)config_table + section_offset; 5980 5981 switch (get_unaligned_le16(§ion->section_id)) { 5982 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 5983 if (pqi_disable_heartbeat) 5984 dev_warn(&ctrl_info->pci_dev->dev, 5985 "heartbeat disabled by module parameter\n"); 5986 else 5987 ctrl_info->heartbeat_counter = 5988 table_iomem_addr + 5989 section_offset + 5990 offsetof( 5991 struct pqi_config_table_heartbeat, 5992 heartbeat_counter); 5993 break; 5994 } 5995 5996 section_offset = 5997 get_unaligned_le16(§ion->next_section_offset); 5998 } 5999 6000 kfree(config_table); 6001 6002 return 0; 6003 } 6004 6005 /* Switches the controller from PQI mode back into SIS mode. */ 6006 6007 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 6008 { 6009 int rc; 6010 6011 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 6012 rc = pqi_reset(ctrl_info); 6013 if (rc) 6014 return rc; 6015 rc = sis_reenable_sis_mode(ctrl_info); 6016 if (rc) { 6017 dev_err(&ctrl_info->pci_dev->dev, 6018 "re-enabling SIS mode failed with error %d\n", rc); 6019 return rc; 6020 } 6021 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6022 6023 return 0; 6024 } 6025 6026 /* 6027 * If the controller isn't already in SIS mode, this function forces it into 6028 * SIS mode. 6029 */ 6030 6031 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 6032 { 6033 if (!sis_is_firmware_running(ctrl_info)) 6034 return -ENXIO; 6035 6036 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 6037 return 0; 6038 6039 if (sis_is_kernel_up(ctrl_info)) { 6040 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6041 return 0; 6042 } 6043 6044 return pqi_revert_to_sis_mode(ctrl_info); 6045 } 6046 6047 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 6048 { 6049 int rc; 6050 6051 rc = pqi_force_sis_mode(ctrl_info); 6052 if (rc) 6053 return rc; 6054 6055 /* 6056 * Wait until the controller is ready to start accepting SIS 6057 * commands. 6058 */ 6059 rc = sis_wait_for_ctrl_ready(ctrl_info); 6060 if (rc) 6061 return rc; 6062 6063 /* 6064 * Get the controller properties. This allows us to determine 6065 * whether or not it supports PQI mode. 6066 */ 6067 rc = sis_get_ctrl_properties(ctrl_info); 6068 if (rc) { 6069 dev_err(&ctrl_info->pci_dev->dev, 6070 "error obtaining controller properties\n"); 6071 return rc; 6072 } 6073 6074 rc = sis_get_pqi_capabilities(ctrl_info); 6075 if (rc) { 6076 dev_err(&ctrl_info->pci_dev->dev, 6077 "error obtaining controller capabilities\n"); 6078 return rc; 6079 } 6080 6081 if (reset_devices) { 6082 if (ctrl_info->max_outstanding_requests > 6083 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 6084 ctrl_info->max_outstanding_requests = 6085 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 6086 } else { 6087 if (ctrl_info->max_outstanding_requests > 6088 PQI_MAX_OUTSTANDING_REQUESTS) 6089 ctrl_info->max_outstanding_requests = 6090 PQI_MAX_OUTSTANDING_REQUESTS; 6091 } 6092 6093 pqi_calculate_io_resources(ctrl_info); 6094 6095 rc = pqi_alloc_error_buffer(ctrl_info); 6096 if (rc) { 6097 dev_err(&ctrl_info->pci_dev->dev, 6098 "failed to allocate PQI error buffer\n"); 6099 return rc; 6100 } 6101 6102 /* 6103 * If the function we are about to call succeeds, the 6104 * controller will transition from legacy SIS mode 6105 * into PQI mode. 6106 */ 6107 rc = sis_init_base_struct_addr(ctrl_info); 6108 if (rc) { 6109 dev_err(&ctrl_info->pci_dev->dev, 6110 "error initializing PQI mode\n"); 6111 return rc; 6112 } 6113 6114 /* Wait for the controller to complete the SIS -> PQI transition. */ 6115 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6116 if (rc) { 6117 dev_err(&ctrl_info->pci_dev->dev, 6118 "transition to PQI mode failed\n"); 6119 return rc; 6120 } 6121 6122 /* From here on, we are running in PQI mode. */ 6123 ctrl_info->pqi_mode_enabled = true; 6124 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6125 6126 rc = pqi_process_config_table(ctrl_info); 6127 if (rc) 6128 return rc; 6129 6130 rc = pqi_alloc_admin_queues(ctrl_info); 6131 if (rc) { 6132 dev_err(&ctrl_info->pci_dev->dev, 6133 "failed to allocate admin queues\n"); 6134 return rc; 6135 } 6136 6137 rc = pqi_create_admin_queues(ctrl_info); 6138 if (rc) { 6139 dev_err(&ctrl_info->pci_dev->dev, 6140 "error creating admin queues\n"); 6141 return rc; 6142 } 6143 6144 rc = pqi_report_device_capability(ctrl_info); 6145 if (rc) { 6146 dev_err(&ctrl_info->pci_dev->dev, 6147 "obtaining device capability failed\n"); 6148 return rc; 6149 } 6150 6151 rc = pqi_validate_device_capability(ctrl_info); 6152 if (rc) 6153 return rc; 6154 6155 pqi_calculate_queue_resources(ctrl_info); 6156 6157 rc = pqi_enable_msix_interrupts(ctrl_info); 6158 if (rc) 6159 return rc; 6160 6161 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 6162 ctrl_info->max_msix_vectors = 6163 ctrl_info->num_msix_vectors_enabled; 6164 pqi_calculate_queue_resources(ctrl_info); 6165 } 6166 6167 rc = pqi_alloc_io_resources(ctrl_info); 6168 if (rc) 6169 return rc; 6170 6171 rc = pqi_alloc_operational_queues(ctrl_info); 6172 if (rc) { 6173 dev_err(&ctrl_info->pci_dev->dev, 6174 "failed to allocate operational queues\n"); 6175 return rc; 6176 } 6177 6178 pqi_init_operational_queues(ctrl_info); 6179 6180 rc = pqi_request_irqs(ctrl_info); 6181 if (rc) 6182 return rc; 6183 6184 rc = pqi_create_queues(ctrl_info); 6185 if (rc) 6186 return rc; 6187 6188 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6189 6190 ctrl_info->controller_online = true; 6191 pqi_start_heartbeat_timer(ctrl_info); 6192 6193 rc = pqi_enable_events(ctrl_info); 6194 if (rc) { 6195 dev_err(&ctrl_info->pci_dev->dev, 6196 "error enabling events\n"); 6197 return rc; 6198 } 6199 6200 /* Register with the SCSI subsystem. */ 6201 rc = pqi_register_scsi(ctrl_info); 6202 if (rc) 6203 return rc; 6204 6205 rc = pqi_get_ctrl_firmware_version(ctrl_info); 6206 if (rc) { 6207 dev_err(&ctrl_info->pci_dev->dev, 6208 "error obtaining firmware version\n"); 6209 return rc; 6210 } 6211 6212 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6213 if (rc) { 6214 dev_err(&ctrl_info->pci_dev->dev, 6215 "error updating host wellness\n"); 6216 return rc; 6217 } 6218 6219 pqi_schedule_update_time_worker(ctrl_info); 6220 6221 pqi_scan_scsi_devices(ctrl_info); 6222 6223 return 0; 6224 } 6225 6226 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 6227 { 6228 unsigned int i; 6229 struct pqi_admin_queues *admin_queues; 6230 struct pqi_event_queue *event_queue; 6231 6232 admin_queues = &ctrl_info->admin_queues; 6233 admin_queues->iq_pi_copy = 0; 6234 admin_queues->oq_ci_copy = 0; 6235 writel(0, admin_queues->oq_pi); 6236 6237 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6238 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 6239 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 6240 ctrl_info->queue_groups[i].oq_ci_copy = 0; 6241 6242 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 6243 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 6244 writel(0, ctrl_info->queue_groups[i].oq_pi); 6245 } 6246 6247 event_queue = &ctrl_info->event_queue; 6248 writel(0, event_queue->oq_pi); 6249 event_queue->oq_ci_copy = 0; 6250 } 6251 6252 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 6253 { 6254 int rc; 6255 6256 rc = pqi_force_sis_mode(ctrl_info); 6257 if (rc) 6258 return rc; 6259 6260 /* 6261 * Wait until the controller is ready to start accepting SIS 6262 * commands. 6263 */ 6264 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 6265 if (rc) 6266 return rc; 6267 6268 /* 6269 * If the function we are about to call succeeds, the 6270 * controller will transition from legacy SIS mode 6271 * into PQI mode. 6272 */ 6273 rc = sis_init_base_struct_addr(ctrl_info); 6274 if (rc) { 6275 dev_err(&ctrl_info->pci_dev->dev, 6276 "error initializing PQI mode\n"); 6277 return rc; 6278 } 6279 6280 /* Wait for the controller to complete the SIS -> PQI transition. */ 6281 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6282 if (rc) { 6283 dev_err(&ctrl_info->pci_dev->dev, 6284 "transition to PQI mode failed\n"); 6285 return rc; 6286 } 6287 6288 /* From here on, we are running in PQI mode. */ 6289 ctrl_info->pqi_mode_enabled = true; 6290 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6291 6292 pqi_reinit_queues(ctrl_info); 6293 6294 rc = pqi_create_admin_queues(ctrl_info); 6295 if (rc) { 6296 dev_err(&ctrl_info->pci_dev->dev, 6297 "error creating admin queues\n"); 6298 return rc; 6299 } 6300 6301 rc = pqi_create_queues(ctrl_info); 6302 if (rc) 6303 return rc; 6304 6305 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6306 6307 ctrl_info->controller_online = true; 6308 pqi_start_heartbeat_timer(ctrl_info); 6309 pqi_ctrl_unblock_requests(ctrl_info); 6310 6311 rc = pqi_enable_events(ctrl_info); 6312 if (rc) { 6313 dev_err(&ctrl_info->pci_dev->dev, 6314 "error enabling events\n"); 6315 return rc; 6316 } 6317 6318 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6319 if (rc) { 6320 dev_err(&ctrl_info->pci_dev->dev, 6321 "error updating host wellness\n"); 6322 return rc; 6323 } 6324 6325 pqi_schedule_update_time_worker(ctrl_info); 6326 6327 pqi_scan_scsi_devices(ctrl_info); 6328 6329 return 0; 6330 } 6331 6332 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 6333 u16 timeout) 6334 { 6335 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 6336 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 6337 } 6338 6339 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 6340 { 6341 int rc; 6342 u64 mask; 6343 6344 rc = pci_enable_device(ctrl_info->pci_dev); 6345 if (rc) { 6346 dev_err(&ctrl_info->pci_dev->dev, 6347 "failed to enable PCI device\n"); 6348 return rc; 6349 } 6350 6351 if (sizeof(dma_addr_t) > 4) 6352 mask = DMA_BIT_MASK(64); 6353 else 6354 mask = DMA_BIT_MASK(32); 6355 6356 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); 6357 if (rc) { 6358 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 6359 goto disable_device; 6360 } 6361 6362 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 6363 if (rc) { 6364 dev_err(&ctrl_info->pci_dev->dev, 6365 "failed to obtain PCI resources\n"); 6366 goto disable_device; 6367 } 6368 6369 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( 6370 ctrl_info->pci_dev, 0), 6371 sizeof(struct pqi_ctrl_registers)); 6372 if (!ctrl_info->iomem_base) { 6373 dev_err(&ctrl_info->pci_dev->dev, 6374 "failed to map memory for controller registers\n"); 6375 rc = -ENOMEM; 6376 goto release_regions; 6377 } 6378 6379 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 6380 6381 /* Increase the PCIe completion timeout. */ 6382 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 6383 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 6384 if (rc) { 6385 dev_err(&ctrl_info->pci_dev->dev, 6386 "failed to set PCIe completion timeout\n"); 6387 goto release_regions; 6388 } 6389 6390 /* Enable bus mastering. */ 6391 pci_set_master(ctrl_info->pci_dev); 6392 6393 ctrl_info->registers = ctrl_info->iomem_base; 6394 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 6395 6396 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 6397 6398 return 0; 6399 6400 release_regions: 6401 pci_release_regions(ctrl_info->pci_dev); 6402 disable_device: 6403 pci_disable_device(ctrl_info->pci_dev); 6404 6405 return rc; 6406 } 6407 6408 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 6409 { 6410 iounmap(ctrl_info->iomem_base); 6411 pci_release_regions(ctrl_info->pci_dev); 6412 if (pci_is_enabled(ctrl_info->pci_dev)) 6413 pci_disable_device(ctrl_info->pci_dev); 6414 pci_set_drvdata(ctrl_info->pci_dev, NULL); 6415 } 6416 6417 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 6418 { 6419 struct pqi_ctrl_info *ctrl_info; 6420 6421 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 6422 GFP_KERNEL, numa_node); 6423 if (!ctrl_info) 6424 return NULL; 6425 6426 mutex_init(&ctrl_info->scan_mutex); 6427 mutex_init(&ctrl_info->lun_reset_mutex); 6428 6429 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 6430 spin_lock_init(&ctrl_info->scsi_device_list_lock); 6431 6432 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 6433 atomic_set(&ctrl_info->num_interrupts, 0); 6434 6435 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 6436 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 6437 6438 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 6439 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 6440 6441 sema_init(&ctrl_info->sync_request_sem, 6442 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 6443 init_waitqueue_head(&ctrl_info->block_requests_wait); 6444 6445 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 6446 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 6447 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 6448 pqi_raid_bypass_retry_worker); 6449 6450 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 6451 ctrl_info->irq_mode = IRQ_MODE_NONE; 6452 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 6453 6454 return ctrl_info; 6455 } 6456 6457 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 6458 { 6459 kfree(ctrl_info); 6460 } 6461 6462 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 6463 { 6464 pqi_free_irqs(ctrl_info); 6465 pqi_disable_msix_interrupts(ctrl_info); 6466 } 6467 6468 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 6469 { 6470 pqi_stop_heartbeat_timer(ctrl_info); 6471 pqi_free_interrupts(ctrl_info); 6472 if (ctrl_info->queue_memory_base) 6473 dma_free_coherent(&ctrl_info->pci_dev->dev, 6474 ctrl_info->queue_memory_length, 6475 ctrl_info->queue_memory_base, 6476 ctrl_info->queue_memory_base_dma_handle); 6477 if (ctrl_info->admin_queue_memory_base) 6478 dma_free_coherent(&ctrl_info->pci_dev->dev, 6479 ctrl_info->admin_queue_memory_length, 6480 ctrl_info->admin_queue_memory_base, 6481 ctrl_info->admin_queue_memory_base_dma_handle); 6482 pqi_free_all_io_requests(ctrl_info); 6483 if (ctrl_info->error_buffer) 6484 dma_free_coherent(&ctrl_info->pci_dev->dev, 6485 ctrl_info->error_buffer_length, 6486 ctrl_info->error_buffer, 6487 ctrl_info->error_buffer_dma_handle); 6488 if (ctrl_info->iomem_base) 6489 pqi_cleanup_pci_init(ctrl_info); 6490 pqi_free_ctrl_info(ctrl_info); 6491 } 6492 6493 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 6494 { 6495 pqi_cancel_rescan_worker(ctrl_info); 6496 pqi_cancel_update_time_worker(ctrl_info); 6497 pqi_remove_all_scsi_devices(ctrl_info); 6498 pqi_unregister_scsi(ctrl_info); 6499 if (ctrl_info->pqi_mode_enabled) 6500 pqi_revert_to_sis_mode(ctrl_info); 6501 pqi_free_ctrl_resources(ctrl_info); 6502 } 6503 6504 static void pqi_perform_lockup_action(void) 6505 { 6506 switch (pqi_lockup_action) { 6507 case PANIC: 6508 panic("FATAL: Smart Family Controller lockup detected"); 6509 break; 6510 case REBOOT: 6511 emergency_restart(); 6512 break; 6513 case NONE: 6514 default: 6515 break; 6516 } 6517 } 6518 6519 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 6520 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 6521 .status = SAM_STAT_CHECK_CONDITION, 6522 }; 6523 6524 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 6525 { 6526 unsigned int i; 6527 struct pqi_io_request *io_request; 6528 struct scsi_cmnd *scmd; 6529 6530 for (i = 0; i < ctrl_info->max_io_slots; i++) { 6531 io_request = &ctrl_info->io_request_pool[i]; 6532 if (atomic_read(&io_request->refcount) == 0) 6533 continue; 6534 6535 scmd = io_request->scmd; 6536 if (scmd) { 6537 set_host_byte(scmd, DID_NO_CONNECT); 6538 } else { 6539 io_request->status = -ENXIO; 6540 io_request->error_info = 6541 &pqi_ctrl_offline_raid_error_info; 6542 } 6543 6544 io_request->io_complete_callback(io_request, 6545 io_request->context); 6546 } 6547 } 6548 6549 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 6550 { 6551 pqi_perform_lockup_action(); 6552 pqi_stop_heartbeat_timer(ctrl_info); 6553 pqi_free_interrupts(ctrl_info); 6554 pqi_cancel_rescan_worker(ctrl_info); 6555 pqi_cancel_update_time_worker(ctrl_info); 6556 pqi_ctrl_wait_until_quiesced(ctrl_info); 6557 pqi_fail_all_outstanding_requests(ctrl_info); 6558 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 6559 pqi_ctrl_unblock_requests(ctrl_info); 6560 } 6561 6562 static void pqi_ctrl_offline_worker(struct work_struct *work) 6563 { 6564 struct pqi_ctrl_info *ctrl_info; 6565 6566 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 6567 pqi_take_ctrl_offline_deferred(ctrl_info); 6568 } 6569 6570 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 6571 { 6572 if (!ctrl_info->controller_online) 6573 return; 6574 6575 ctrl_info->controller_online = false; 6576 ctrl_info->pqi_mode_enabled = false; 6577 pqi_ctrl_block_requests(ctrl_info); 6578 if (!pqi_disable_ctrl_shutdown) 6579 sis_shutdown_ctrl(ctrl_info); 6580 pci_disable_device(ctrl_info->pci_dev); 6581 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 6582 schedule_work(&ctrl_info->ctrl_offline_work); 6583 } 6584 6585 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 6586 const struct pci_device_id *id) 6587 { 6588 char *ctrl_description; 6589 6590 if (id->driver_data) 6591 ctrl_description = (char *)id->driver_data; 6592 else 6593 ctrl_description = "Microsemi Smart Family Controller"; 6594 6595 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 6596 } 6597 6598 static int pqi_pci_probe(struct pci_dev *pci_dev, 6599 const struct pci_device_id *id) 6600 { 6601 int rc; 6602 int node; 6603 struct pqi_ctrl_info *ctrl_info; 6604 6605 pqi_print_ctrl_info(pci_dev, id); 6606 6607 if (pqi_disable_device_id_wildcards && 6608 id->subvendor == PCI_ANY_ID && 6609 id->subdevice == PCI_ANY_ID) { 6610 dev_warn(&pci_dev->dev, 6611 "controller not probed because device ID wildcards are disabled\n"); 6612 return -ENODEV; 6613 } 6614 6615 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 6616 dev_warn(&pci_dev->dev, 6617 "controller device ID matched using wildcards\n"); 6618 6619 node = dev_to_node(&pci_dev->dev); 6620 if (node == NUMA_NO_NODE) 6621 set_dev_node(&pci_dev->dev, 0); 6622 6623 ctrl_info = pqi_alloc_ctrl_info(node); 6624 if (!ctrl_info) { 6625 dev_err(&pci_dev->dev, 6626 "failed to allocate controller info block\n"); 6627 return -ENOMEM; 6628 } 6629 6630 ctrl_info->pci_dev = pci_dev; 6631 6632 rc = pqi_pci_init(ctrl_info); 6633 if (rc) 6634 goto error; 6635 6636 rc = pqi_ctrl_init(ctrl_info); 6637 if (rc) 6638 goto error; 6639 6640 return 0; 6641 6642 error: 6643 pqi_remove_ctrl(ctrl_info); 6644 6645 return rc; 6646 } 6647 6648 static void pqi_pci_remove(struct pci_dev *pci_dev) 6649 { 6650 struct pqi_ctrl_info *ctrl_info; 6651 6652 ctrl_info = pci_get_drvdata(pci_dev); 6653 if (!ctrl_info) 6654 return; 6655 6656 pqi_remove_ctrl(ctrl_info); 6657 } 6658 6659 static void pqi_shutdown(struct pci_dev *pci_dev) 6660 { 6661 int rc; 6662 struct pqi_ctrl_info *ctrl_info; 6663 6664 ctrl_info = pci_get_drvdata(pci_dev); 6665 if (!ctrl_info) 6666 goto error; 6667 6668 /* 6669 * Write all data in the controller's battery-backed cache to 6670 * storage. 6671 */ 6672 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 6673 pqi_reset(ctrl_info); 6674 if (rc == 0) 6675 return; 6676 6677 error: 6678 dev_warn(&pci_dev->dev, 6679 "unable to flush controller cache\n"); 6680 } 6681 6682 static void pqi_process_lockup_action_param(void) 6683 { 6684 unsigned int i; 6685 6686 if (!pqi_lockup_action_param) 6687 return; 6688 6689 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6690 if (strcmp(pqi_lockup_action_param, 6691 pqi_lockup_actions[i].name) == 0) { 6692 pqi_lockup_action = pqi_lockup_actions[i].action; 6693 return; 6694 } 6695 } 6696 6697 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 6698 DRIVER_NAME_SHORT, pqi_lockup_action_param); 6699 } 6700 6701 static void pqi_process_module_params(void) 6702 { 6703 pqi_process_lockup_action_param(); 6704 } 6705 6706 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 6707 { 6708 struct pqi_ctrl_info *ctrl_info; 6709 6710 ctrl_info = pci_get_drvdata(pci_dev); 6711 6712 pqi_disable_events(ctrl_info); 6713 pqi_cancel_update_time_worker(ctrl_info); 6714 pqi_cancel_rescan_worker(ctrl_info); 6715 pqi_wait_until_scan_finished(ctrl_info); 6716 pqi_wait_until_lun_reset_finished(ctrl_info); 6717 pqi_flush_cache(ctrl_info, SUSPEND); 6718 pqi_ctrl_block_requests(ctrl_info); 6719 pqi_ctrl_wait_until_quiesced(ctrl_info); 6720 pqi_wait_until_inbound_queues_empty(ctrl_info); 6721 pqi_ctrl_wait_for_pending_io(ctrl_info); 6722 pqi_stop_heartbeat_timer(ctrl_info); 6723 6724 if (state.event == PM_EVENT_FREEZE) 6725 return 0; 6726 6727 pci_save_state(pci_dev); 6728 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 6729 6730 ctrl_info->controller_online = false; 6731 ctrl_info->pqi_mode_enabled = false; 6732 6733 return 0; 6734 } 6735 6736 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 6737 { 6738 int rc; 6739 struct pqi_ctrl_info *ctrl_info; 6740 6741 ctrl_info = pci_get_drvdata(pci_dev); 6742 6743 if (pci_dev->current_state != PCI_D0) { 6744 ctrl_info->max_hw_queue_index = 0; 6745 pqi_free_interrupts(ctrl_info); 6746 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 6747 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 6748 IRQF_SHARED, DRIVER_NAME_SHORT, 6749 &ctrl_info->queue_groups[0]); 6750 if (rc) { 6751 dev_err(&ctrl_info->pci_dev->dev, 6752 "irq %u init failed with error %d\n", 6753 pci_dev->irq, rc); 6754 return rc; 6755 } 6756 pqi_start_heartbeat_timer(ctrl_info); 6757 pqi_ctrl_unblock_requests(ctrl_info); 6758 return 0; 6759 } 6760 6761 pci_set_power_state(pci_dev, PCI_D0); 6762 pci_restore_state(pci_dev); 6763 6764 return pqi_ctrl_init_resume(ctrl_info); 6765 } 6766 6767 /* Define the PCI IDs for the controllers that we support. */ 6768 static const struct pci_device_id pqi_pci_id_table[] = { 6769 { 6770 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6771 0x105b, 0x1211) 6772 }, 6773 { 6774 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6775 0x105b, 0x1321) 6776 }, 6777 { 6778 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6779 0x152d, 0x8a22) 6780 }, 6781 { 6782 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6783 0x152d, 0x8a23) 6784 }, 6785 { 6786 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6787 0x152d, 0x8a24) 6788 }, 6789 { 6790 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6791 0x152d, 0x8a36) 6792 }, 6793 { 6794 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6795 0x152d, 0x8a37) 6796 }, 6797 { 6798 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6799 0x193d, 0x8460) 6800 }, 6801 { 6802 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6803 0x193d, 0x8461) 6804 }, 6805 { 6806 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6807 0x193d, 0xf460) 6808 }, 6809 { 6810 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6811 0x193d, 0xf461) 6812 }, 6813 { 6814 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6815 0x1bd4, 0x0045) 6816 }, 6817 { 6818 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6819 0x1bd4, 0x0046) 6820 }, 6821 { 6822 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6823 0x1bd4, 0x0047) 6824 }, 6825 { 6826 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6827 0x1bd4, 0x0048) 6828 }, 6829 { 6830 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6831 0x1bd4, 0x004a) 6832 }, 6833 { 6834 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6835 0x1bd4, 0x004b) 6836 }, 6837 { 6838 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6839 0x1bd4, 0x004c) 6840 }, 6841 { 6842 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6843 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 6844 }, 6845 { 6846 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6847 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 6848 }, 6849 { 6850 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6851 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 6852 }, 6853 { 6854 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6855 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 6856 }, 6857 { 6858 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6859 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 6860 }, 6861 { 6862 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6863 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 6864 }, 6865 { 6866 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6867 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 6868 }, 6869 { 6870 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6871 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 6872 }, 6873 { 6874 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6875 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 6876 }, 6877 { 6878 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6879 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 6880 }, 6881 { 6882 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6883 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 6884 }, 6885 { 6886 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6887 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 6888 }, 6889 { 6890 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6891 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 6892 }, 6893 { 6894 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6895 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 6896 }, 6897 { 6898 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6899 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 6900 }, 6901 { 6902 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6903 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 6904 }, 6905 { 6906 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6907 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 6908 }, 6909 { 6910 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6911 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 6912 }, 6913 { 6914 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6915 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 6916 }, 6917 { 6918 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6919 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 6920 }, 6921 { 6922 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6923 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 6924 }, 6925 { 6926 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6927 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 6928 }, 6929 { 6930 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6931 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 6932 }, 6933 { 6934 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6935 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 6936 }, 6937 { 6938 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6939 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 6940 }, 6941 { 6942 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6943 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 6944 }, 6945 { 6946 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6947 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 6948 }, 6949 { 6950 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6951 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 6952 }, 6953 { 6954 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6955 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 6956 }, 6957 { 6958 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6959 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 6960 }, 6961 { 6962 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6963 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 6964 }, 6965 { 6966 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6967 PCI_VENDOR_ID_ADVANTECH, 0x8312) 6968 }, 6969 { 6970 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6971 PCI_VENDOR_ID_DELL, 0x1fe0) 6972 }, 6973 { 6974 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6975 PCI_VENDOR_ID_HP, 0x0600) 6976 }, 6977 { 6978 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6979 PCI_VENDOR_ID_HP, 0x0601) 6980 }, 6981 { 6982 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6983 PCI_VENDOR_ID_HP, 0x0602) 6984 }, 6985 { 6986 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6987 PCI_VENDOR_ID_HP, 0x0603) 6988 }, 6989 { 6990 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6991 PCI_VENDOR_ID_HP, 0x0609) 6992 }, 6993 { 6994 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6995 PCI_VENDOR_ID_HP, 0x0650) 6996 }, 6997 { 6998 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 6999 PCI_VENDOR_ID_HP, 0x0651) 7000 }, 7001 { 7002 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7003 PCI_VENDOR_ID_HP, 0x0652) 7004 }, 7005 { 7006 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7007 PCI_VENDOR_ID_HP, 0x0653) 7008 }, 7009 { 7010 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7011 PCI_VENDOR_ID_HP, 0x0654) 7012 }, 7013 { 7014 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7015 PCI_VENDOR_ID_HP, 0x0655) 7016 }, 7017 { 7018 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7019 PCI_VENDOR_ID_HP, 0x0700) 7020 }, 7021 { 7022 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7023 PCI_VENDOR_ID_HP, 0x0701) 7024 }, 7025 { 7026 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7027 PCI_VENDOR_ID_HP, 0x1001) 7028 }, 7029 { 7030 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7031 PCI_VENDOR_ID_HP, 0x1100) 7032 }, 7033 { 7034 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7035 PCI_VENDOR_ID_HP, 0x1101) 7036 }, 7037 { 7038 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7039 PCI_ANY_ID, PCI_ANY_ID) 7040 }, 7041 { 0 } 7042 }; 7043 7044 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 7045 7046 static struct pci_driver pqi_pci_driver = { 7047 .name = DRIVER_NAME_SHORT, 7048 .id_table = pqi_pci_id_table, 7049 .probe = pqi_pci_probe, 7050 .remove = pqi_pci_remove, 7051 .shutdown = pqi_shutdown, 7052 #if defined(CONFIG_PM) 7053 .suspend = pqi_suspend, 7054 .resume = pqi_resume, 7055 #endif 7056 }; 7057 7058 static int __init pqi_init(void) 7059 { 7060 int rc; 7061 7062 pr_info(DRIVER_NAME "\n"); 7063 7064 pqi_sas_transport_template = 7065 sas_attach_transport(&pqi_sas_transport_functions); 7066 if (!pqi_sas_transport_template) 7067 return -ENODEV; 7068 7069 pqi_process_module_params(); 7070 7071 rc = pci_register_driver(&pqi_pci_driver); 7072 if (rc) 7073 sas_release_transport(pqi_sas_transport_template); 7074 7075 return rc; 7076 } 7077 7078 static void __exit pqi_cleanup(void) 7079 { 7080 pci_unregister_driver(&pqi_pci_driver); 7081 sas_release_transport(pqi_sas_transport_template); 7082 } 7083 7084 module_init(pqi_init); 7085 module_exit(pqi_cleanup); 7086 7087 static void __attribute__((unused)) verify_structures(void) 7088 { 7089 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7090 sis_host_to_ctrl_doorbell) != 0x20); 7091 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7092 sis_interrupt_mask) != 0x34); 7093 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7094 sis_ctrl_to_host_doorbell) != 0x9c); 7095 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7096 sis_ctrl_to_host_doorbell_clear) != 0xa0); 7097 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7098 sis_driver_scratch) != 0xb0); 7099 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7100 sis_firmware_status) != 0xbc); 7101 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7102 sis_mailbox) != 0x1000); 7103 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7104 pqi_registers) != 0x4000); 7105 7106 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7107 iu_type) != 0x0); 7108 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7109 iu_length) != 0x2); 7110 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7111 response_queue_id) != 0x4); 7112 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7113 work_area) != 0x6); 7114 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 7115 7116 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7117 status) != 0x0); 7118 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7119 service_response) != 0x1); 7120 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7121 data_present) != 0x2); 7122 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7123 reserved) != 0x3); 7124 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7125 residual_count) != 0x4); 7126 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7127 data_length) != 0x8); 7128 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7129 reserved1) != 0xa); 7130 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7131 data) != 0xc); 7132 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 7133 7134 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7135 data_in_result) != 0x0); 7136 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7137 data_out_result) != 0x1); 7138 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7139 reserved) != 0x2); 7140 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7141 status) != 0x5); 7142 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7143 status_qualifier) != 0x6); 7144 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7145 sense_data_length) != 0x8); 7146 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7147 response_data_length) != 0xa); 7148 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7149 data_in_transferred) != 0xc); 7150 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7151 data_out_transferred) != 0x10); 7152 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7153 data) != 0x14); 7154 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 7155 7156 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7157 signature) != 0x0); 7158 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7159 function_and_status_code) != 0x8); 7160 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7161 max_admin_iq_elements) != 0x10); 7162 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7163 max_admin_oq_elements) != 0x11); 7164 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7165 admin_iq_element_length) != 0x12); 7166 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7167 admin_oq_element_length) != 0x13); 7168 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7169 max_reset_timeout) != 0x14); 7170 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7171 legacy_intx_status) != 0x18); 7172 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7173 legacy_intx_mask_set) != 0x1c); 7174 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7175 legacy_intx_mask_clear) != 0x20); 7176 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7177 device_status) != 0x40); 7178 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7179 admin_iq_pi_offset) != 0x48); 7180 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7181 admin_oq_ci_offset) != 0x50); 7182 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7183 admin_iq_element_array_addr) != 0x58); 7184 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7185 admin_oq_element_array_addr) != 0x60); 7186 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7187 admin_iq_ci_addr) != 0x68); 7188 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7189 admin_oq_pi_addr) != 0x70); 7190 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7191 admin_iq_num_elements) != 0x78); 7192 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7193 admin_oq_num_elements) != 0x79); 7194 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7195 admin_queue_int_msg_num) != 0x7a); 7196 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7197 device_error) != 0x80); 7198 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7199 error_details) != 0x88); 7200 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7201 device_reset) != 0x90); 7202 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7203 power_action) != 0x94); 7204 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 7205 7206 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7207 header.iu_type) != 0); 7208 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7209 header.iu_length) != 2); 7210 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7211 header.work_area) != 6); 7212 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7213 request_id) != 8); 7214 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7215 function_code) != 10); 7216 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7217 data.report_device_capability.buffer_length) != 44); 7218 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7219 data.report_device_capability.sg_descriptor) != 48); 7220 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7221 data.create_operational_iq.queue_id) != 12); 7222 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7223 data.create_operational_iq.element_array_addr) != 16); 7224 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7225 data.create_operational_iq.ci_addr) != 24); 7226 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7227 data.create_operational_iq.num_elements) != 32); 7228 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7229 data.create_operational_iq.element_length) != 34); 7230 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7231 data.create_operational_iq.queue_protocol) != 36); 7232 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7233 data.create_operational_oq.queue_id) != 12); 7234 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7235 data.create_operational_oq.element_array_addr) != 16); 7236 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7237 data.create_operational_oq.pi_addr) != 24); 7238 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7239 data.create_operational_oq.num_elements) != 32); 7240 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7241 data.create_operational_oq.element_length) != 34); 7242 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7243 data.create_operational_oq.queue_protocol) != 36); 7244 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7245 data.create_operational_oq.int_msg_num) != 40); 7246 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7247 data.create_operational_oq.coalescing_count) != 42); 7248 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7249 data.create_operational_oq.min_coalescing_time) != 44); 7250 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7251 data.create_operational_oq.max_coalescing_time) != 48); 7252 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7253 data.delete_operational_queue.queue_id) != 12); 7254 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 7255 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7256 data.create_operational_iq) != 64 - 11); 7257 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7258 data.create_operational_oq) != 64 - 11); 7259 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7260 data.delete_operational_queue) != 64 - 11); 7261 7262 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7263 header.iu_type) != 0); 7264 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7265 header.iu_length) != 2); 7266 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7267 header.work_area) != 6); 7268 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7269 request_id) != 8); 7270 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7271 function_code) != 10); 7272 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7273 status) != 11); 7274 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7275 data.create_operational_iq.status_descriptor) != 12); 7276 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7277 data.create_operational_iq.iq_pi_offset) != 16); 7278 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7279 data.create_operational_oq.status_descriptor) != 12); 7280 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7281 data.create_operational_oq.oq_ci_offset) != 16); 7282 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 7283 7284 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7285 header.iu_type) != 0); 7286 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7287 header.iu_length) != 2); 7288 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7289 header.response_queue_id) != 4); 7290 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7291 header.work_area) != 6); 7292 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7293 request_id) != 8); 7294 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7295 nexus_id) != 10); 7296 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7297 buffer_length) != 12); 7298 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7299 lun_number) != 16); 7300 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7301 protocol_specific) != 24); 7302 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7303 error_index) != 27); 7304 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7305 cdb) != 32); 7306 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7307 sg_descriptors) != 64); 7308 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 7309 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7310 7311 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7312 header.iu_type) != 0); 7313 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7314 header.iu_length) != 2); 7315 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7316 header.response_queue_id) != 4); 7317 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7318 header.work_area) != 6); 7319 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7320 request_id) != 8); 7321 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7322 nexus_id) != 12); 7323 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7324 buffer_length) != 16); 7325 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7326 data_encryption_key_index) != 22); 7327 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7328 encrypt_tweak_lower) != 24); 7329 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7330 encrypt_tweak_upper) != 28); 7331 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7332 cdb) != 32); 7333 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7334 error_index) != 48); 7335 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7336 num_sg_descriptors) != 50); 7337 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7338 cdb_length) != 51); 7339 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7340 lun_number) != 52); 7341 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7342 sg_descriptors) != 64); 7343 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 7344 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7345 7346 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7347 header.iu_type) != 0); 7348 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7349 header.iu_length) != 2); 7350 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7351 request_id) != 8); 7352 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7353 error_index) != 10); 7354 7355 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7356 header.iu_type) != 0); 7357 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7358 header.iu_length) != 2); 7359 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7360 header.response_queue_id) != 4); 7361 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7362 request_id) != 8); 7363 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7364 data.report_event_configuration.buffer_length) != 12); 7365 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7366 data.report_event_configuration.sg_descriptors) != 16); 7367 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7368 data.set_event_configuration.global_event_oq_id) != 10); 7369 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7370 data.set_event_configuration.buffer_length) != 12); 7371 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7372 data.set_event_configuration.sg_descriptors) != 16); 7373 7374 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7375 max_inbound_iu_length) != 6); 7376 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7377 max_outbound_iu_length) != 14); 7378 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 7379 7380 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7381 data_length) != 0); 7382 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7383 iq_arbitration_priority_support_bitmask) != 8); 7384 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7385 maximum_aw_a) != 9); 7386 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7387 maximum_aw_b) != 10); 7388 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7389 maximum_aw_c) != 11); 7390 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7391 max_inbound_queues) != 16); 7392 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7393 max_elements_per_iq) != 18); 7394 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7395 max_iq_element_length) != 24); 7396 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7397 min_iq_element_length) != 26); 7398 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7399 max_outbound_queues) != 30); 7400 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7401 max_elements_per_oq) != 32); 7402 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7403 intr_coalescing_time_granularity) != 34); 7404 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7405 max_oq_element_length) != 36); 7406 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7407 min_oq_element_length) != 38); 7408 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7409 iu_layer_descriptors) != 64); 7410 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 7411 7412 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7413 event_type) != 0); 7414 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7415 oq_id) != 2); 7416 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 7417 7418 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7419 num_event_descriptors) != 2); 7420 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7421 descriptors) != 4); 7422 7423 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 7424 ARRAY_SIZE(pqi_supported_event_types)); 7425 7426 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7427 header.iu_type) != 0); 7428 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7429 header.iu_length) != 2); 7430 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7431 event_type) != 8); 7432 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7433 event_id) != 10); 7434 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7435 additional_event_id) != 12); 7436 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7437 data) != 16); 7438 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 7439 7440 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7441 header.iu_type) != 0); 7442 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7443 header.iu_length) != 2); 7444 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7445 event_type) != 8); 7446 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7447 event_id) != 10); 7448 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7449 additional_event_id) != 12); 7450 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 7451 7452 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7453 header.iu_type) != 0); 7454 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7455 header.iu_length) != 2); 7456 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7457 request_id) != 8); 7458 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7459 nexus_id) != 10); 7460 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7461 lun_number) != 16); 7462 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7463 protocol_specific) != 24); 7464 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7465 outbound_queue_id_to_manage) != 26); 7466 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7467 request_id_to_manage) != 28); 7468 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7469 task_management_function) != 30); 7470 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 7471 7472 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7473 header.iu_type) != 0); 7474 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7475 header.iu_length) != 2); 7476 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7477 request_id) != 8); 7478 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7479 nexus_id) != 10); 7480 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7481 additional_response_info) != 12); 7482 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7483 response_code) != 15); 7484 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 7485 7486 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7487 configured_logical_drive_count) != 0); 7488 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7489 configuration_signature) != 1); 7490 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7491 firmware_version) != 5); 7492 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7493 extended_logical_unit_count) != 154); 7494 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7495 firmware_build_number) != 190); 7496 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7497 controller_mode) != 292); 7498 7499 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7500 phys_bay_in_box) != 115); 7501 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7502 device_type) != 120); 7503 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7504 redundant_path_present_map) != 1736); 7505 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7506 active_path_number) != 1738); 7507 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7508 alternate_paths_phys_connector) != 1739); 7509 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7510 alternate_paths_phys_box_on_port) != 1755); 7511 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7512 current_queue_depth_limit) != 1796); 7513 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 7514 7515 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 7516 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 7517 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 7518 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7519 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 7520 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7521 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 7522 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 7523 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7524 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 7525 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 7526 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7527 7528 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 7529 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 7530 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 7531 } 7532