1 /* 2 * driver for Microsemi PQI-based storage controllers 3 * Copyright (c) 2016-2017 Microsemi Corporation 4 * Copyright (c) 2016 PMC-Sierra, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 of the License. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more details. 14 * 15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com 16 * 17 */ 18 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/pci.h> 22 #include <linux/delay.h> 23 #include <linux/interrupt.h> 24 #include <linux/sched.h> 25 #include <linux/rtc.h> 26 #include <linux/bcd.h> 27 #include <linux/reboot.h> 28 #include <linux/cciss_ioctl.h> 29 #include <linux/blk-mq-pci.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_cmnd.h> 32 #include <scsi/scsi_device.h> 33 #include <scsi/scsi_eh.h> 34 #include <scsi/scsi_transport_sas.h> 35 #include <asm/unaligned.h> 36 #include "smartpqi.h" 37 #include "smartpqi_sis.h" 38 39 #if !defined(BUILD_TIMESTAMP) 40 #define BUILD_TIMESTAMP 41 #endif 42 43 #define DRIVER_VERSION "1.1.4-130" 44 #define DRIVER_MAJOR 1 45 #define DRIVER_MINOR 1 46 #define DRIVER_RELEASE 4 47 #define DRIVER_REVISION 130 48 49 #define DRIVER_NAME "Microsemi PQI Driver (v" \ 50 DRIVER_VERSION BUILD_TIMESTAMP ")" 51 #define DRIVER_NAME_SHORT "smartpqi" 52 53 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor)) 54 55 MODULE_AUTHOR("Microsemi"); 56 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " 57 DRIVER_VERSION); 58 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers"); 59 MODULE_VERSION(DRIVER_VERSION); 60 MODULE_LICENSE("GPL"); 61 62 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info); 63 static void pqi_ctrl_offline_worker(struct work_struct *work); 64 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info); 65 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info); 66 static void pqi_scan_start(struct Scsi_Host *shost); 67 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 68 struct pqi_queue_group *queue_group, enum pqi_io_path path, 69 struct pqi_io_request *io_request); 70 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 71 struct pqi_iu_header *request, unsigned int flags, 72 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs); 73 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 74 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 75 unsigned int cdb_length, struct pqi_queue_group *queue_group, 76 struct pqi_encryption_info *encryption_info, bool raid_bypass); 77 78 /* for flags argument to pqi_submit_raid_request_synchronous() */ 79 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 80 81 static struct scsi_transport_template *pqi_sas_transport_template; 82 83 static atomic_t pqi_controller_count = ATOMIC_INIT(0); 84 85 enum pqi_lockup_action { 86 NONE, 87 REBOOT, 88 PANIC 89 }; 90 91 static enum pqi_lockup_action pqi_lockup_action = NONE; 92 93 static struct { 94 enum pqi_lockup_action action; 95 char *name; 96 } pqi_lockup_actions[] = { 97 { 98 .action = NONE, 99 .name = "none", 100 }, 101 { 102 .action = REBOOT, 103 .name = "reboot", 104 }, 105 { 106 .action = PANIC, 107 .name = "panic", 108 }, 109 }; 110 111 static unsigned int pqi_supported_event_types[] = { 112 PQI_EVENT_TYPE_HOTPLUG, 113 PQI_EVENT_TYPE_HARDWARE, 114 PQI_EVENT_TYPE_PHYSICAL_DEVICE, 115 PQI_EVENT_TYPE_LOGICAL_DEVICE, 116 PQI_EVENT_TYPE_AIO_STATE_CHANGE, 117 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, 118 }; 119 120 static int pqi_disable_device_id_wildcards; 121 module_param_named(disable_device_id_wildcards, 122 pqi_disable_device_id_wildcards, int, 0644); 123 MODULE_PARM_DESC(disable_device_id_wildcards, 124 "Disable device ID wildcards."); 125 126 static int pqi_disable_heartbeat; 127 module_param_named(disable_heartbeat, 128 pqi_disable_heartbeat, int, 0644); 129 MODULE_PARM_DESC(disable_heartbeat, 130 "Disable heartbeat."); 131 132 static int pqi_disable_ctrl_shutdown; 133 module_param_named(disable_ctrl_shutdown, 134 pqi_disable_ctrl_shutdown, int, 0644); 135 MODULE_PARM_DESC(disable_ctrl_shutdown, 136 "Disable controller shutdown when controller locked up."); 137 138 static char *pqi_lockup_action_param; 139 module_param_named(lockup_action, 140 pqi_lockup_action_param, charp, 0644); 141 MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" 142 "\t\tSupported: none, reboot, panic\n" 143 "\t\tDefault: none"); 144 145 static char *raid_levels[] = { 146 "RAID-0", 147 "RAID-4", 148 "RAID-1(1+0)", 149 "RAID-5", 150 "RAID-5+1", 151 "RAID-ADG", 152 "RAID-1(ADM)", 153 }; 154 155 static char *pqi_raid_level_to_string(u8 raid_level) 156 { 157 if (raid_level < ARRAY_SIZE(raid_levels)) 158 return raid_levels[raid_level]; 159 160 return "RAID UNKNOWN"; 161 } 162 163 #define SA_RAID_0 0 164 #define SA_RAID_4 1 165 #define SA_RAID_1 2 /* also used for RAID 10 */ 166 #define SA_RAID_5 3 /* also used for RAID 50 */ 167 #define SA_RAID_51 4 168 #define SA_RAID_6 5 /* also used for RAID 60 */ 169 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ 170 #define SA_RAID_MAX SA_RAID_ADM 171 #define SA_RAID_UNKNOWN 0xff 172 173 static inline void pqi_scsi_done(struct scsi_cmnd *scmd) 174 { 175 pqi_prep_for_scsi_done(scmd); 176 scmd->scsi_done(scmd); 177 } 178 179 static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) 180 { 181 return memcmp(scsi3addr1, scsi3addr2, 8) == 0; 182 } 183 184 static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) 185 { 186 void *hostdata = shost_priv(shost); 187 188 return *((struct pqi_ctrl_info **)hostdata); 189 } 190 191 static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) 192 { 193 return !device->is_physical_device; 194 } 195 196 static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) 197 { 198 return scsi3addr[2] != 0; 199 } 200 201 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 202 { 203 return !ctrl_info->controller_online; 204 } 205 206 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) 207 { 208 if (ctrl_info->controller_online) 209 if (!sis_is_firmware_running(ctrl_info)) 210 pqi_take_ctrl_offline(ctrl_info); 211 } 212 213 static inline bool pqi_is_hba_lunid(u8 *scsi3addr) 214 { 215 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID); 216 } 217 218 static inline enum pqi_ctrl_mode pqi_get_ctrl_mode( 219 struct pqi_ctrl_info *ctrl_info) 220 { 221 return sis_read_driver_scratch(ctrl_info); 222 } 223 224 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, 225 enum pqi_ctrl_mode mode) 226 { 227 sis_write_driver_scratch(ctrl_info, mode); 228 } 229 230 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) 231 { 232 ctrl_info->block_requests = true; 233 scsi_block_requests(ctrl_info->scsi_host); 234 } 235 236 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) 237 { 238 ctrl_info->block_requests = false; 239 wake_up_all(&ctrl_info->block_requests_wait); 240 pqi_retry_raid_bypass_requests(ctrl_info); 241 scsi_unblock_requests(ctrl_info->scsi_host); 242 } 243 244 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) 245 { 246 return ctrl_info->block_requests; 247 } 248 249 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, 250 unsigned long timeout_msecs) 251 { 252 unsigned long remaining_msecs; 253 254 if (!pqi_ctrl_blocked(ctrl_info)) 255 return timeout_msecs; 256 257 atomic_inc(&ctrl_info->num_blocked_threads); 258 259 if (timeout_msecs == NO_TIMEOUT) { 260 wait_event(ctrl_info->block_requests_wait, 261 !pqi_ctrl_blocked(ctrl_info)); 262 remaining_msecs = timeout_msecs; 263 } else { 264 unsigned long remaining_jiffies; 265 266 remaining_jiffies = 267 wait_event_timeout(ctrl_info->block_requests_wait, 268 !pqi_ctrl_blocked(ctrl_info), 269 msecs_to_jiffies(timeout_msecs)); 270 remaining_msecs = jiffies_to_msecs(remaining_jiffies); 271 } 272 273 atomic_dec(&ctrl_info->num_blocked_threads); 274 275 return remaining_msecs; 276 } 277 278 static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) 279 { 280 atomic_inc(&ctrl_info->num_busy_threads); 281 } 282 283 static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) 284 { 285 atomic_dec(&ctrl_info->num_busy_threads); 286 } 287 288 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) 289 { 290 while (atomic_read(&ctrl_info->num_busy_threads) > 291 atomic_read(&ctrl_info->num_blocked_threads)) 292 usleep_range(1000, 2000); 293 } 294 295 static inline bool pqi_device_offline(struct pqi_scsi_dev *device) 296 { 297 return device->device_offline; 298 } 299 300 static inline void pqi_device_reset_start(struct pqi_scsi_dev *device) 301 { 302 device->in_reset = true; 303 } 304 305 static inline void pqi_device_reset_done(struct pqi_scsi_dev *device) 306 { 307 device->in_reset = false; 308 } 309 310 static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) 311 { 312 return device->in_reset; 313 } 314 315 static inline void pqi_schedule_rescan_worker_with_delay( 316 struct pqi_ctrl_info *ctrl_info, unsigned long delay) 317 { 318 if (pqi_ctrl_offline(ctrl_info)) 319 return; 320 321 schedule_delayed_work(&ctrl_info->rescan_work, delay); 322 } 323 324 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) 325 { 326 pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); 327 } 328 329 #define PQI_RESCAN_WORK_DELAY (10 * HZ) 330 331 static inline void pqi_schedule_rescan_worker_delayed( 332 struct pqi_ctrl_info *ctrl_info) 333 { 334 pqi_schedule_rescan_worker_with_delay(ctrl_info, PQI_RESCAN_WORK_DELAY); 335 } 336 337 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) 338 { 339 cancel_delayed_work_sync(&ctrl_info->rescan_work); 340 } 341 342 static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) 343 { 344 if (!ctrl_info->heartbeat_counter) 345 return 0; 346 347 return readl(ctrl_info->heartbeat_counter); 348 } 349 350 static int pqi_map_single(struct pci_dev *pci_dev, 351 struct pqi_sg_descriptor *sg_descriptor, void *buffer, 352 size_t buffer_length, enum dma_data_direction data_direction) 353 { 354 dma_addr_t bus_address; 355 356 if (!buffer || buffer_length == 0 || data_direction == DMA_NONE) 357 return 0; 358 359 bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length, 360 data_direction); 361 if (dma_mapping_error(&pci_dev->dev, bus_address)) 362 return -ENOMEM; 363 364 put_unaligned_le64((u64)bus_address, &sg_descriptor->address); 365 put_unaligned_le32(buffer_length, &sg_descriptor->length); 366 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 367 368 return 0; 369 } 370 371 static void pqi_pci_unmap(struct pci_dev *pci_dev, 372 struct pqi_sg_descriptor *descriptors, int num_descriptors, 373 enum dma_data_direction data_direction) 374 { 375 int i; 376 377 if (data_direction == DMA_NONE) 378 return; 379 380 for (i = 0; i < num_descriptors; i++) 381 dma_unmap_single(&pci_dev->dev, 382 (dma_addr_t)get_unaligned_le64(&descriptors[i].address), 383 get_unaligned_le32(&descriptors[i].length), 384 data_direction); 385 } 386 387 static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, 388 struct pqi_raid_path_request *request, u8 cmd, 389 u8 *scsi3addr, void *buffer, size_t buffer_length, 390 u16 vpd_page, enum dma_data_direction *dir) 391 { 392 u8 *cdb; 393 394 memset(request, 0, sizeof(*request)); 395 396 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 397 put_unaligned_le16(offsetof(struct pqi_raid_path_request, 398 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH, 399 &request->header.iu_length); 400 put_unaligned_le32(buffer_length, &request->buffer_length); 401 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number)); 402 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 403 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 404 405 cdb = request->cdb; 406 407 switch (cmd) { 408 case INQUIRY: 409 request->data_direction = SOP_READ_FLAG; 410 cdb[0] = INQUIRY; 411 if (vpd_page & VPD_PAGE) { 412 cdb[1] = 0x1; 413 cdb[2] = (u8)vpd_page; 414 } 415 cdb[4] = (u8)buffer_length; 416 break; 417 case CISS_REPORT_LOG: 418 case CISS_REPORT_PHYS: 419 request->data_direction = SOP_READ_FLAG; 420 cdb[0] = cmd; 421 if (cmd == CISS_REPORT_PHYS) 422 cdb[1] = CISS_REPORT_PHYS_EXTENDED; 423 else 424 cdb[1] = CISS_REPORT_LOG_EXTENDED; 425 put_unaligned_be32(buffer_length, &cdb[6]); 426 break; 427 case CISS_GET_RAID_MAP: 428 request->data_direction = SOP_READ_FLAG; 429 cdb[0] = CISS_READ; 430 cdb[1] = CISS_GET_RAID_MAP; 431 put_unaligned_be32(buffer_length, &cdb[6]); 432 break; 433 case SA_FLUSH_CACHE: 434 request->data_direction = SOP_WRITE_FLAG; 435 cdb[0] = BMIC_WRITE; 436 cdb[6] = BMIC_FLUSH_CACHE; 437 put_unaligned_be16(buffer_length, &cdb[7]); 438 break; 439 case BMIC_IDENTIFY_CONTROLLER: 440 case BMIC_IDENTIFY_PHYSICAL_DEVICE: 441 request->data_direction = SOP_READ_FLAG; 442 cdb[0] = BMIC_READ; 443 cdb[6] = cmd; 444 put_unaligned_be16(buffer_length, &cdb[7]); 445 break; 446 case BMIC_WRITE_HOST_WELLNESS: 447 request->data_direction = SOP_WRITE_FLAG; 448 cdb[0] = BMIC_WRITE; 449 cdb[6] = cmd; 450 put_unaligned_be16(buffer_length, &cdb[7]); 451 break; 452 default: 453 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", 454 cmd); 455 break; 456 } 457 458 switch (request->data_direction) { 459 case SOP_READ_FLAG: 460 *dir = DMA_FROM_DEVICE; 461 break; 462 case SOP_WRITE_FLAG: 463 *dir = DMA_TO_DEVICE; 464 break; 465 case SOP_NO_DIRECTION_FLAG: 466 *dir = DMA_NONE; 467 break; 468 default: 469 *dir = DMA_BIDIRECTIONAL; 470 break; 471 } 472 473 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0], 474 buffer, buffer_length, *dir); 475 } 476 477 static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) 478 { 479 io_request->scmd = NULL; 480 io_request->status = 0; 481 io_request->error_info = NULL; 482 io_request->raid_bypass = false; 483 } 484 485 static struct pqi_io_request *pqi_alloc_io_request( 486 struct pqi_ctrl_info *ctrl_info) 487 { 488 struct pqi_io_request *io_request; 489 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ 490 491 while (1) { 492 io_request = &ctrl_info->io_request_pool[i]; 493 if (atomic_inc_return(&io_request->refcount) == 1) 494 break; 495 atomic_dec(&io_request->refcount); 496 i = (i + 1) % ctrl_info->max_io_slots; 497 } 498 499 /* benignly racy */ 500 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; 501 502 pqi_reinit_io_request(io_request); 503 504 return io_request; 505 } 506 507 static void pqi_free_io_request(struct pqi_io_request *io_request) 508 { 509 atomic_dec(&io_request->refcount); 510 } 511 512 static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, 513 struct bmic_identify_controller *buffer) 514 { 515 int rc; 516 enum dma_data_direction dir; 517 struct pqi_raid_path_request request; 518 519 rc = pqi_build_raid_path_request(ctrl_info, &request, 520 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, 521 sizeof(*buffer), 0, &dir); 522 if (rc) 523 return rc; 524 525 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 526 NULL, NO_TIMEOUT); 527 528 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 529 return rc; 530 } 531 532 static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, 533 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) 534 { 535 int rc; 536 enum dma_data_direction dir; 537 struct pqi_raid_path_request request; 538 539 rc = pqi_build_raid_path_request(ctrl_info, &request, 540 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, 541 &dir); 542 if (rc) 543 return rc; 544 545 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 546 NULL, NO_TIMEOUT); 547 548 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 549 return rc; 550 } 551 552 static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 553 struct pqi_scsi_dev *device, 554 struct bmic_identify_physical_device *buffer, 555 size_t buffer_length) 556 { 557 int rc; 558 enum dma_data_direction dir; 559 u16 bmic_device_index; 560 struct pqi_raid_path_request request; 561 562 rc = pqi_build_raid_path_request(ctrl_info, &request, 563 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer, 564 buffer_length, 0, &dir); 565 if (rc) 566 return rc; 567 568 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr); 569 request.cdb[2] = (u8)bmic_device_index; 570 request.cdb[9] = (u8)(bmic_device_index >> 8); 571 572 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 573 0, NULL, NO_TIMEOUT); 574 575 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 576 return rc; 577 } 578 579 static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, 580 enum bmic_flush_cache_shutdown_event shutdown_event) 581 { 582 int rc; 583 struct pqi_raid_path_request request; 584 struct bmic_flush_cache *flush_cache; 585 enum dma_data_direction dir; 586 587 /* 588 * Don't bother trying to flush the cache if the controller is 589 * locked up. 590 */ 591 if (pqi_ctrl_offline(ctrl_info)) 592 return -ENXIO; 593 594 flush_cache = kzalloc(sizeof(*flush_cache), GFP_KERNEL); 595 if (!flush_cache) 596 return -ENOMEM; 597 598 flush_cache->shutdown_event = shutdown_event; 599 600 rc = pqi_build_raid_path_request(ctrl_info, &request, 601 SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache, 602 sizeof(*flush_cache), 0, &dir); 603 if (rc) 604 goto out; 605 606 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 607 0, NULL, NO_TIMEOUT); 608 609 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 610 out: 611 kfree(flush_cache); 612 613 return rc; 614 } 615 616 static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, 617 void *buffer, size_t buffer_length) 618 { 619 int rc; 620 struct pqi_raid_path_request request; 621 enum dma_data_direction dir; 622 623 rc = pqi_build_raid_path_request(ctrl_info, &request, 624 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, 625 buffer_length, 0, &dir); 626 if (rc) 627 return rc; 628 629 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 630 0, NULL, NO_TIMEOUT); 631 632 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 633 return rc; 634 } 635 636 #pragma pack(1) 637 638 struct bmic_host_wellness_driver_version { 639 u8 start_tag[4]; 640 u8 driver_version_tag[2]; 641 __le16 driver_version_length; 642 char driver_version[32]; 643 u8 end_tag[2]; 644 }; 645 646 #pragma pack() 647 648 static int pqi_write_driver_version_to_host_wellness( 649 struct pqi_ctrl_info *ctrl_info) 650 { 651 int rc; 652 struct bmic_host_wellness_driver_version *buffer; 653 size_t buffer_length; 654 655 buffer_length = sizeof(*buffer); 656 657 buffer = kmalloc(buffer_length, GFP_KERNEL); 658 if (!buffer) 659 return -ENOMEM; 660 661 buffer->start_tag[0] = '<'; 662 buffer->start_tag[1] = 'H'; 663 buffer->start_tag[2] = 'W'; 664 buffer->start_tag[3] = '>'; 665 buffer->driver_version_tag[0] = 'D'; 666 buffer->driver_version_tag[1] = 'V'; 667 put_unaligned_le16(sizeof(buffer->driver_version), 668 &buffer->driver_version_length); 669 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, 670 sizeof(buffer->driver_version) - 1); 671 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; 672 buffer->end_tag[0] = 'Z'; 673 buffer->end_tag[1] = 'Z'; 674 675 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 676 677 kfree(buffer); 678 679 return rc; 680 } 681 682 #pragma pack(1) 683 684 struct bmic_host_wellness_time { 685 u8 start_tag[4]; 686 u8 time_tag[2]; 687 __le16 time_length; 688 u8 time[8]; 689 u8 dont_write_tag[2]; 690 u8 end_tag[2]; 691 }; 692 693 #pragma pack() 694 695 static int pqi_write_current_time_to_host_wellness( 696 struct pqi_ctrl_info *ctrl_info) 697 { 698 int rc; 699 struct bmic_host_wellness_time *buffer; 700 size_t buffer_length; 701 time64_t local_time; 702 unsigned int year; 703 struct tm tm; 704 705 buffer_length = sizeof(*buffer); 706 707 buffer = kmalloc(buffer_length, GFP_KERNEL); 708 if (!buffer) 709 return -ENOMEM; 710 711 buffer->start_tag[0] = '<'; 712 buffer->start_tag[1] = 'H'; 713 buffer->start_tag[2] = 'W'; 714 buffer->start_tag[3] = '>'; 715 buffer->time_tag[0] = 'T'; 716 buffer->time_tag[1] = 'D'; 717 put_unaligned_le16(sizeof(buffer->time), 718 &buffer->time_length); 719 720 local_time = ktime_get_real_seconds(); 721 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm); 722 year = tm.tm_year + 1900; 723 724 buffer->time[0] = bin2bcd(tm.tm_hour); 725 buffer->time[1] = bin2bcd(tm.tm_min); 726 buffer->time[2] = bin2bcd(tm.tm_sec); 727 buffer->time[3] = 0; 728 buffer->time[4] = bin2bcd(tm.tm_mon + 1); 729 buffer->time[5] = bin2bcd(tm.tm_mday); 730 buffer->time[6] = bin2bcd(year / 100); 731 buffer->time[7] = bin2bcd(year % 100); 732 733 buffer->dont_write_tag[0] = 'D'; 734 buffer->dont_write_tag[1] = 'W'; 735 buffer->end_tag[0] = 'Z'; 736 buffer->end_tag[1] = 'Z'; 737 738 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length); 739 740 kfree(buffer); 741 742 return rc; 743 } 744 745 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) 746 747 static void pqi_update_time_worker(struct work_struct *work) 748 { 749 int rc; 750 struct pqi_ctrl_info *ctrl_info; 751 752 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 753 update_time_work); 754 755 if (pqi_ctrl_offline(ctrl_info)) 756 return; 757 758 rc = pqi_write_current_time_to_host_wellness(ctrl_info); 759 if (rc) 760 dev_warn(&ctrl_info->pci_dev->dev, 761 "error updating time on controller\n"); 762 763 schedule_delayed_work(&ctrl_info->update_time_work, 764 PQI_UPDATE_TIME_WORK_INTERVAL); 765 } 766 767 static inline void pqi_schedule_update_time_worker( 768 struct pqi_ctrl_info *ctrl_info) 769 { 770 schedule_delayed_work(&ctrl_info->update_time_work, 0); 771 } 772 773 static inline void pqi_cancel_update_time_worker( 774 struct pqi_ctrl_info *ctrl_info) 775 { 776 cancel_delayed_work_sync(&ctrl_info->update_time_work); 777 } 778 779 static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 780 void *buffer, size_t buffer_length) 781 { 782 int rc; 783 enum dma_data_direction dir; 784 struct pqi_raid_path_request request; 785 786 rc = pqi_build_raid_path_request(ctrl_info, &request, 787 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir); 788 if (rc) 789 return rc; 790 791 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 792 NULL, NO_TIMEOUT); 793 794 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 795 return rc; 796 } 797 798 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, 799 void **buffer) 800 { 801 int rc; 802 size_t lun_list_length; 803 size_t lun_data_length; 804 size_t new_lun_list_length; 805 void *lun_data = NULL; 806 struct report_lun_header *report_lun_header; 807 808 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL); 809 if (!report_lun_header) { 810 rc = -ENOMEM; 811 goto out; 812 } 813 814 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header, 815 sizeof(*report_lun_header)); 816 if (rc) 817 goto out; 818 819 lun_list_length = get_unaligned_be32(&report_lun_header->list_length); 820 821 again: 822 lun_data_length = sizeof(struct report_lun_header) + lun_list_length; 823 824 lun_data = kmalloc(lun_data_length, GFP_KERNEL); 825 if (!lun_data) { 826 rc = -ENOMEM; 827 goto out; 828 } 829 830 if (lun_list_length == 0) { 831 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header)); 832 goto out; 833 } 834 835 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length); 836 if (rc) 837 goto out; 838 839 new_lun_list_length = get_unaligned_be32( 840 &((struct report_lun_header *)lun_data)->list_length); 841 842 if (new_lun_list_length > lun_list_length) { 843 lun_list_length = new_lun_list_length; 844 kfree(lun_data); 845 goto again; 846 } 847 848 out: 849 kfree(report_lun_header); 850 851 if (rc) { 852 kfree(lun_data); 853 lun_data = NULL; 854 } 855 856 *buffer = lun_data; 857 858 return rc; 859 } 860 861 static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, 862 void **buffer) 863 { 864 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS, 865 buffer); 866 } 867 868 static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, 869 void **buffer) 870 { 871 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer); 872 } 873 874 static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info, 875 struct report_phys_lun_extended **physdev_list, 876 struct report_log_lun_extended **logdev_list) 877 { 878 int rc; 879 size_t logdev_list_length; 880 size_t logdev_data_length; 881 struct report_log_lun_extended *internal_logdev_list; 882 struct report_log_lun_extended *logdev_data; 883 struct report_lun_header report_lun_header; 884 885 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list); 886 if (rc) 887 dev_err(&ctrl_info->pci_dev->dev, 888 "report physical LUNs failed\n"); 889 890 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list); 891 if (rc) 892 dev_err(&ctrl_info->pci_dev->dev, 893 "report logical LUNs failed\n"); 894 895 /* 896 * Tack the controller itself onto the end of the logical device list. 897 */ 898 899 logdev_data = *logdev_list; 900 901 if (logdev_data) { 902 logdev_list_length = 903 get_unaligned_be32(&logdev_data->header.list_length); 904 } else { 905 memset(&report_lun_header, 0, sizeof(report_lun_header)); 906 logdev_data = 907 (struct report_log_lun_extended *)&report_lun_header; 908 logdev_list_length = 0; 909 } 910 911 logdev_data_length = sizeof(struct report_lun_header) + 912 logdev_list_length; 913 914 internal_logdev_list = kmalloc(logdev_data_length + 915 sizeof(struct report_log_lun_extended), GFP_KERNEL); 916 if (!internal_logdev_list) { 917 kfree(*logdev_list); 918 *logdev_list = NULL; 919 return -ENOMEM; 920 } 921 922 memcpy(internal_logdev_list, logdev_data, logdev_data_length); 923 memset((u8 *)internal_logdev_list + logdev_data_length, 0, 924 sizeof(struct report_log_lun_extended_entry)); 925 put_unaligned_be32(logdev_list_length + 926 sizeof(struct report_log_lun_extended_entry), 927 &internal_logdev_list->header.list_length); 928 929 kfree(*logdev_list); 930 *logdev_list = internal_logdev_list; 931 932 return 0; 933 } 934 935 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device, 936 int bus, int target, int lun) 937 { 938 device->bus = bus; 939 device->target = target; 940 device->lun = lun; 941 } 942 943 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device) 944 { 945 u8 *scsi3addr; 946 u32 lunid; 947 int bus; 948 int target; 949 int lun; 950 951 scsi3addr = device->scsi3addr; 952 lunid = get_unaligned_le32(scsi3addr); 953 954 if (pqi_is_hba_lunid(scsi3addr)) { 955 /* The specified device is the controller. */ 956 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff); 957 device->target_lun_valid = true; 958 return; 959 } 960 961 if (pqi_is_logical_device(device)) { 962 if (device->is_external_raid_device) { 963 bus = PQI_EXTERNAL_RAID_VOLUME_BUS; 964 target = (lunid >> 16) & 0x3fff; 965 lun = lunid & 0xff; 966 } else { 967 bus = PQI_RAID_VOLUME_BUS; 968 target = 0; 969 lun = lunid & 0x3fff; 970 } 971 pqi_set_bus_target_lun(device, bus, target, lun); 972 device->target_lun_valid = true; 973 return; 974 } 975 976 /* 977 * Defer target and LUN assignment for non-controller physical devices 978 * because the SAS transport layer will make these assignments later. 979 */ 980 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0); 981 } 982 983 static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info, 984 struct pqi_scsi_dev *device) 985 { 986 int rc; 987 u8 raid_level; 988 u8 *buffer; 989 990 raid_level = SA_RAID_UNKNOWN; 991 992 buffer = kmalloc(64, GFP_KERNEL); 993 if (buffer) { 994 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 995 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64); 996 if (rc == 0) { 997 raid_level = buffer[8]; 998 if (raid_level > SA_RAID_MAX) 999 raid_level = SA_RAID_UNKNOWN; 1000 } 1001 kfree(buffer); 1002 } 1003 1004 device->raid_level = raid_level; 1005 } 1006 1007 static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, 1008 struct pqi_scsi_dev *device, struct raid_map *raid_map) 1009 { 1010 char *err_msg; 1011 u32 raid_map_size; 1012 u32 r5or6_blocks_per_row; 1013 unsigned int num_phys_disks; 1014 unsigned int num_raid_map_entries; 1015 1016 raid_map_size = get_unaligned_le32(&raid_map->structure_size); 1017 1018 if (raid_map_size < offsetof(struct raid_map, disk_data)) { 1019 err_msg = "RAID map too small"; 1020 goto bad_raid_map; 1021 } 1022 1023 if (raid_map_size > sizeof(*raid_map)) { 1024 err_msg = "RAID map too large"; 1025 goto bad_raid_map; 1026 } 1027 1028 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) * 1029 (get_unaligned_le16(&raid_map->data_disks_per_row) + 1030 get_unaligned_le16(&raid_map->metadata_disks_per_row)); 1031 num_raid_map_entries = num_phys_disks * 1032 get_unaligned_le16(&raid_map->row_cnt); 1033 1034 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) { 1035 err_msg = "invalid number of map entries in RAID map"; 1036 goto bad_raid_map; 1037 } 1038 1039 if (device->raid_level == SA_RAID_1) { 1040 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { 1041 err_msg = "invalid RAID-1 map"; 1042 goto bad_raid_map; 1043 } 1044 } else if (device->raid_level == SA_RAID_ADM) { 1045 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) { 1046 err_msg = "invalid RAID-1(ADM) map"; 1047 goto bad_raid_map; 1048 } 1049 } else if ((device->raid_level == SA_RAID_5 || 1050 device->raid_level == SA_RAID_6) && 1051 get_unaligned_le16(&raid_map->layout_map_count) > 1) { 1052 /* RAID 50/60 */ 1053 r5or6_blocks_per_row = 1054 get_unaligned_le16(&raid_map->strip_size) * 1055 get_unaligned_le16(&raid_map->data_disks_per_row); 1056 if (r5or6_blocks_per_row == 0) { 1057 err_msg = "invalid RAID-5 or RAID-6 map"; 1058 goto bad_raid_map; 1059 } 1060 } 1061 1062 return 0; 1063 1064 bad_raid_map: 1065 dev_warn(&ctrl_info->pci_dev->dev, 1066 "logical device %08x%08x %s\n", 1067 *((u32 *)&device->scsi3addr), 1068 *((u32 *)&device->scsi3addr[4]), err_msg); 1069 1070 return -EINVAL; 1071 } 1072 1073 static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, 1074 struct pqi_scsi_dev *device) 1075 { 1076 int rc; 1077 enum dma_data_direction dir; 1078 struct pqi_raid_path_request request; 1079 struct raid_map *raid_map; 1080 1081 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); 1082 if (!raid_map) 1083 return -ENOMEM; 1084 1085 rc = pqi_build_raid_path_request(ctrl_info, &request, 1086 CISS_GET_RAID_MAP, device->scsi3addr, raid_map, 1087 sizeof(*raid_map), 0, &dir); 1088 if (rc) 1089 goto error; 1090 1091 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 1092 NULL, NO_TIMEOUT); 1093 1094 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); 1095 1096 if (rc) 1097 goto error; 1098 1099 rc = pqi_validate_raid_map(ctrl_info, device, raid_map); 1100 if (rc) 1101 goto error; 1102 1103 device->raid_map = raid_map; 1104 1105 return 0; 1106 1107 error: 1108 kfree(raid_map); 1109 1110 return rc; 1111 } 1112 1113 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, 1114 struct pqi_scsi_dev *device) 1115 { 1116 int rc; 1117 u8 *buffer; 1118 u8 bypass_status; 1119 1120 buffer = kmalloc(64, GFP_KERNEL); 1121 if (!buffer) 1122 return; 1123 1124 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1125 VPD_PAGE | CISS_VPD_LV_BYPASS_STATUS, buffer, 64); 1126 if (rc) 1127 goto out; 1128 1129 #define RAID_BYPASS_STATUS 4 1130 #define RAID_BYPASS_CONFIGURED 0x1 1131 #define RAID_BYPASS_ENABLED 0x2 1132 1133 bypass_status = buffer[RAID_BYPASS_STATUS]; 1134 device->raid_bypass_configured = 1135 (bypass_status & RAID_BYPASS_CONFIGURED) != 0; 1136 if (device->raid_bypass_configured && 1137 (bypass_status & RAID_BYPASS_ENABLED) && 1138 pqi_get_raid_map(ctrl_info, device) == 0) 1139 device->raid_bypass_enabled = true; 1140 1141 out: 1142 kfree(buffer); 1143 } 1144 1145 /* 1146 * Use vendor-specific VPD to determine online/offline status of a volume. 1147 */ 1148 1149 static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, 1150 struct pqi_scsi_dev *device) 1151 { 1152 int rc; 1153 size_t page_length; 1154 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE; 1155 bool volume_offline = true; 1156 u32 volume_flags; 1157 struct ciss_vpd_logical_volume_status *vpd; 1158 1159 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL); 1160 if (!vpd) 1161 goto no_buffer; 1162 1163 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 1164 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd)); 1165 if (rc) 1166 goto out; 1167 1168 page_length = offsetof(struct ciss_vpd_logical_volume_status, 1169 volume_status) + vpd->page_length; 1170 if (page_length < sizeof(*vpd)) 1171 goto out; 1172 1173 volume_status = vpd->volume_status; 1174 volume_flags = get_unaligned_be32(&vpd->flags); 1175 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0; 1176 1177 out: 1178 kfree(vpd); 1179 no_buffer: 1180 device->volume_status = volume_status; 1181 device->volume_offline = volume_offline; 1182 } 1183 1184 #define PQI_INQUIRY_PAGE0_RETRIES 3 1185 1186 static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, 1187 struct pqi_scsi_dev *device) 1188 { 1189 int rc; 1190 u8 *buffer; 1191 unsigned int retries; 1192 1193 buffer = kmalloc(64, GFP_KERNEL); 1194 if (!buffer) 1195 return -ENOMEM; 1196 1197 /* Send an inquiry to the device to see what it is. */ 1198 for (retries = 0;;) { 1199 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, 1200 buffer, 64); 1201 if (rc == 0) 1202 break; 1203 if (pqi_is_logical_device(device) || 1204 rc != PQI_CMD_STATUS_ABORTED || 1205 ++retries > PQI_INQUIRY_PAGE0_RETRIES) 1206 goto out; 1207 } 1208 1209 scsi_sanitize_inquiry_string(&buffer[8], 8); 1210 scsi_sanitize_inquiry_string(&buffer[16], 16); 1211 1212 device->devtype = buffer[0] & 0x1f; 1213 memcpy(device->vendor, &buffer[8], sizeof(device->vendor)); 1214 memcpy(device->model, &buffer[16], sizeof(device->model)); 1215 1216 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) { 1217 if (device->is_external_raid_device) { 1218 device->raid_level = SA_RAID_UNKNOWN; 1219 device->volume_status = CISS_LV_OK; 1220 device->volume_offline = false; 1221 } else { 1222 pqi_get_raid_level(ctrl_info, device); 1223 pqi_get_raid_bypass_status(ctrl_info, device); 1224 pqi_get_volume_status(ctrl_info, device); 1225 } 1226 } 1227 1228 out: 1229 kfree(buffer); 1230 1231 return rc; 1232 } 1233 1234 static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, 1235 struct pqi_scsi_dev *device, 1236 struct bmic_identify_physical_device *id_phys) 1237 { 1238 int rc; 1239 1240 memset(id_phys, 0, sizeof(*id_phys)); 1241 1242 rc = pqi_identify_physical_device(ctrl_info, device, 1243 id_phys, sizeof(*id_phys)); 1244 if (rc) { 1245 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH; 1246 return; 1247 } 1248 1249 device->queue_depth = 1250 get_unaligned_le16(&id_phys->current_queue_depth_limit); 1251 device->device_type = id_phys->device_type; 1252 device->active_path_index = id_phys->active_path_number; 1253 device->path_map = id_phys->redundant_path_present_map; 1254 memcpy(&device->box, 1255 &id_phys->alternate_paths_phys_box_on_port, 1256 sizeof(device->box)); 1257 memcpy(&device->phys_connector, 1258 &id_phys->alternate_paths_phys_connector, 1259 sizeof(device->phys_connector)); 1260 device->bay = id_phys->phys_bay_in_box; 1261 } 1262 1263 static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, 1264 struct pqi_scsi_dev *device) 1265 { 1266 char *status; 1267 static const char unknown_state_str[] = 1268 "Volume is in an unknown state (%u)"; 1269 char unknown_state_buffer[sizeof(unknown_state_str) + 10]; 1270 1271 switch (device->volume_status) { 1272 case CISS_LV_OK: 1273 status = "Volume online"; 1274 break; 1275 case CISS_LV_FAILED: 1276 status = "Volume failed"; 1277 break; 1278 case CISS_LV_NOT_CONFIGURED: 1279 status = "Volume not configured"; 1280 break; 1281 case CISS_LV_DEGRADED: 1282 status = "Volume degraded"; 1283 break; 1284 case CISS_LV_READY_FOR_RECOVERY: 1285 status = "Volume ready for recovery operation"; 1286 break; 1287 case CISS_LV_UNDERGOING_RECOVERY: 1288 status = "Volume undergoing recovery"; 1289 break; 1290 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED: 1291 status = "Wrong physical drive was replaced"; 1292 break; 1293 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM: 1294 status = "A physical drive not properly connected"; 1295 break; 1296 case CISS_LV_HARDWARE_OVERHEATING: 1297 status = "Hardware is overheating"; 1298 break; 1299 case CISS_LV_HARDWARE_HAS_OVERHEATED: 1300 status = "Hardware has overheated"; 1301 break; 1302 case CISS_LV_UNDERGOING_EXPANSION: 1303 status = "Volume undergoing expansion"; 1304 break; 1305 case CISS_LV_NOT_AVAILABLE: 1306 status = "Volume waiting for transforming volume"; 1307 break; 1308 case CISS_LV_QUEUED_FOR_EXPANSION: 1309 status = "Volume queued for expansion"; 1310 break; 1311 case CISS_LV_DISABLED_SCSI_ID_CONFLICT: 1312 status = "Volume disabled due to SCSI ID conflict"; 1313 break; 1314 case CISS_LV_EJECTED: 1315 status = "Volume has been ejected"; 1316 break; 1317 case CISS_LV_UNDERGOING_ERASE: 1318 status = "Volume undergoing background erase"; 1319 break; 1320 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD: 1321 status = "Volume ready for predictive spare rebuild"; 1322 break; 1323 case CISS_LV_UNDERGOING_RPI: 1324 status = "Volume undergoing rapid parity initialization"; 1325 break; 1326 case CISS_LV_PENDING_RPI: 1327 status = "Volume queued for rapid parity initialization"; 1328 break; 1329 case CISS_LV_ENCRYPTED_NO_KEY: 1330 status = "Encrypted volume inaccessible - key not present"; 1331 break; 1332 case CISS_LV_UNDERGOING_ENCRYPTION: 1333 status = "Volume undergoing encryption process"; 1334 break; 1335 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING: 1336 status = "Volume undergoing encryption re-keying process"; 1337 break; 1338 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: 1339 status = "Volume encrypted but encryption is disabled"; 1340 break; 1341 case CISS_LV_PENDING_ENCRYPTION: 1342 status = "Volume pending migration to encrypted state"; 1343 break; 1344 case CISS_LV_PENDING_ENCRYPTION_REKEYING: 1345 status = "Volume pending encryption rekeying"; 1346 break; 1347 case CISS_LV_NOT_SUPPORTED: 1348 status = "Volume not supported on this controller"; 1349 break; 1350 case CISS_LV_STATUS_UNAVAILABLE: 1351 status = "Volume status not available"; 1352 break; 1353 default: 1354 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer), 1355 unknown_state_str, device->volume_status); 1356 status = unknown_state_buffer; 1357 break; 1358 } 1359 1360 dev_info(&ctrl_info->pci_dev->dev, 1361 "scsi %d:%d:%d:%d %s\n", 1362 ctrl_info->scsi_host->host_no, 1363 device->bus, device->target, device->lun, status); 1364 } 1365 1366 static void pqi_rescan_worker(struct work_struct *work) 1367 { 1368 struct pqi_ctrl_info *ctrl_info; 1369 1370 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info, 1371 rescan_work); 1372 1373 pqi_scan_scsi_devices(ctrl_info); 1374 } 1375 1376 static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, 1377 struct pqi_scsi_dev *device) 1378 { 1379 int rc; 1380 1381 if (pqi_is_logical_device(device)) 1382 rc = scsi_add_device(ctrl_info->scsi_host, device->bus, 1383 device->target, device->lun); 1384 else 1385 rc = pqi_add_sas_device(ctrl_info->sas_host, device); 1386 1387 return rc; 1388 } 1389 1390 static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, 1391 struct pqi_scsi_dev *device) 1392 { 1393 if (pqi_is_logical_device(device)) 1394 scsi_remove_device(device->sdev); 1395 else 1396 pqi_remove_sas_device(device); 1397 } 1398 1399 /* Assumes the SCSI device list lock is held. */ 1400 1401 static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info, 1402 int bus, int target, int lun) 1403 { 1404 struct pqi_scsi_dev *device; 1405 1406 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1407 scsi_device_list_entry) 1408 if (device->bus == bus && device->target == target && 1409 device->lun == lun) 1410 return device; 1411 1412 return NULL; 1413 } 1414 1415 static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, 1416 struct pqi_scsi_dev *dev2) 1417 { 1418 if (dev1->is_physical_device != dev2->is_physical_device) 1419 return false; 1420 1421 if (dev1->is_physical_device) 1422 return dev1->wwid == dev2->wwid; 1423 1424 return memcmp(dev1->volume_id, dev2->volume_id, 1425 sizeof(dev1->volume_id)) == 0; 1426 } 1427 1428 enum pqi_find_result { 1429 DEVICE_NOT_FOUND, 1430 DEVICE_CHANGED, 1431 DEVICE_SAME, 1432 }; 1433 1434 static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, 1435 struct pqi_scsi_dev *device_to_find, 1436 struct pqi_scsi_dev **matching_device) 1437 { 1438 struct pqi_scsi_dev *device; 1439 1440 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1441 scsi_device_list_entry) { 1442 if (pqi_scsi3addr_equal(device_to_find->scsi3addr, 1443 device->scsi3addr)) { 1444 *matching_device = device; 1445 if (pqi_device_equal(device_to_find, device)) { 1446 if (device_to_find->volume_offline) 1447 return DEVICE_CHANGED; 1448 return DEVICE_SAME; 1449 } 1450 return DEVICE_CHANGED; 1451 } 1452 } 1453 1454 return DEVICE_NOT_FOUND; 1455 } 1456 1457 #define PQI_DEV_INFO_BUFFER_LENGTH 128 1458 1459 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, 1460 char *action, struct pqi_scsi_dev *device) 1461 { 1462 ssize_t count; 1463 char buffer[PQI_DEV_INFO_BUFFER_LENGTH]; 1464 1465 count = snprintf(buffer, PQI_DEV_INFO_BUFFER_LENGTH, 1466 "%d:%d:", ctrl_info->scsi_host->host_no, device->bus); 1467 1468 if (device->target_lun_valid) 1469 count += snprintf(buffer + count, 1470 PQI_DEV_INFO_BUFFER_LENGTH - count, 1471 "%d:%d", 1472 device->target, 1473 device->lun); 1474 else 1475 count += snprintf(buffer + count, 1476 PQI_DEV_INFO_BUFFER_LENGTH - count, 1477 "-:-"); 1478 1479 if (pqi_is_logical_device(device)) 1480 count += snprintf(buffer + count, 1481 PQI_DEV_INFO_BUFFER_LENGTH - count, 1482 " %08x%08x", 1483 *((u32 *)&device->scsi3addr), 1484 *((u32 *)&device->scsi3addr[4])); 1485 else 1486 count += snprintf(buffer + count, 1487 PQI_DEV_INFO_BUFFER_LENGTH - count, 1488 " %016llx", device->sas_address); 1489 1490 count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, 1491 " %s %.8s %.16s ", 1492 scsi_device_type(device->devtype), 1493 device->vendor, 1494 device->model); 1495 1496 if (pqi_is_logical_device(device)) { 1497 if (device->devtype == TYPE_DISK) 1498 count += snprintf(buffer + count, 1499 PQI_DEV_INFO_BUFFER_LENGTH - count, 1500 "SSDSmartPathCap%c En%c %-12s", 1501 device->raid_bypass_configured ? '+' : '-', 1502 device->raid_bypass_enabled ? '+' : '-', 1503 pqi_raid_level_to_string(device->raid_level)); 1504 } else { 1505 count += snprintf(buffer + count, 1506 PQI_DEV_INFO_BUFFER_LENGTH - count, 1507 "AIO%c", device->aio_enabled ? '+' : '-'); 1508 if (device->devtype == TYPE_DISK || 1509 device->devtype == TYPE_ZBC) 1510 count += snprintf(buffer + count, 1511 PQI_DEV_INFO_BUFFER_LENGTH - count, 1512 " qd=%-6d", device->queue_depth); 1513 } 1514 1515 dev_info(&ctrl_info->pci_dev->dev, "%s %s\n", action, buffer); 1516 } 1517 1518 /* Assumes the SCSI device list lock is held. */ 1519 1520 static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, 1521 struct pqi_scsi_dev *new_device) 1522 { 1523 existing_device->devtype = new_device->devtype; 1524 existing_device->device_type = new_device->device_type; 1525 existing_device->bus = new_device->bus; 1526 if (new_device->target_lun_valid) { 1527 existing_device->target = new_device->target; 1528 existing_device->lun = new_device->lun; 1529 existing_device->target_lun_valid = true; 1530 } 1531 1532 /* By definition, the scsi3addr and wwid fields are already the same. */ 1533 1534 existing_device->is_physical_device = new_device->is_physical_device; 1535 existing_device->is_external_raid_device = 1536 new_device->is_external_raid_device; 1537 existing_device->aio_enabled = new_device->aio_enabled; 1538 memcpy(existing_device->vendor, new_device->vendor, 1539 sizeof(existing_device->vendor)); 1540 memcpy(existing_device->model, new_device->model, 1541 sizeof(existing_device->model)); 1542 existing_device->sas_address = new_device->sas_address; 1543 existing_device->raid_level = new_device->raid_level; 1544 existing_device->queue_depth = new_device->queue_depth; 1545 existing_device->aio_handle = new_device->aio_handle; 1546 existing_device->volume_status = new_device->volume_status; 1547 existing_device->active_path_index = new_device->active_path_index; 1548 existing_device->path_map = new_device->path_map; 1549 existing_device->bay = new_device->bay; 1550 memcpy(existing_device->box, new_device->box, 1551 sizeof(existing_device->box)); 1552 memcpy(existing_device->phys_connector, new_device->phys_connector, 1553 sizeof(existing_device->phys_connector)); 1554 existing_device->offload_to_mirror = 0; 1555 kfree(existing_device->raid_map); 1556 existing_device->raid_map = new_device->raid_map; 1557 existing_device->raid_bypass_configured = 1558 new_device->raid_bypass_configured; 1559 existing_device->raid_bypass_enabled = 1560 new_device->raid_bypass_enabled; 1561 1562 /* To prevent this from being freed later. */ 1563 new_device->raid_map = NULL; 1564 } 1565 1566 static inline void pqi_free_device(struct pqi_scsi_dev *device) 1567 { 1568 if (device) { 1569 kfree(device->raid_map); 1570 kfree(device); 1571 } 1572 } 1573 1574 /* 1575 * Called when exposing a new device to the OS fails in order to re-adjust 1576 * our internal SCSI device list to match the SCSI ML's view. 1577 */ 1578 1579 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, 1580 struct pqi_scsi_dev *device) 1581 { 1582 unsigned long flags; 1583 1584 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1585 list_del(&device->scsi_device_list_entry); 1586 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1587 1588 /* Allow the device structure to be freed later. */ 1589 device->keep_device = false; 1590 } 1591 1592 static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, 1593 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) 1594 { 1595 int rc; 1596 unsigned int i; 1597 unsigned long flags; 1598 enum pqi_find_result find_result; 1599 struct pqi_scsi_dev *device; 1600 struct pqi_scsi_dev *next; 1601 struct pqi_scsi_dev *matching_device; 1602 LIST_HEAD(add_list); 1603 LIST_HEAD(delete_list); 1604 1605 /* 1606 * The idea here is to do as little work as possible while holding the 1607 * spinlock. That's why we go to great pains to defer anything other 1608 * than updating the internal device list until after we release the 1609 * spinlock. 1610 */ 1611 1612 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1613 1614 /* Assume that all devices in the existing list have gone away. */ 1615 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1616 scsi_device_list_entry) 1617 device->device_gone = true; 1618 1619 for (i = 0; i < num_new_devices; i++) { 1620 device = new_device_list[i]; 1621 1622 find_result = pqi_scsi_find_entry(ctrl_info, device, 1623 &matching_device); 1624 1625 switch (find_result) { 1626 case DEVICE_SAME: 1627 /* 1628 * The newly found device is already in the existing 1629 * device list. 1630 */ 1631 device->new_device = false; 1632 matching_device->device_gone = false; 1633 pqi_scsi_update_device(matching_device, device); 1634 break; 1635 case DEVICE_NOT_FOUND: 1636 /* 1637 * The newly found device is NOT in the existing device 1638 * list. 1639 */ 1640 device->new_device = true; 1641 break; 1642 case DEVICE_CHANGED: 1643 /* 1644 * The original device has gone away and we need to add 1645 * the new device. 1646 */ 1647 device->new_device = true; 1648 break; 1649 } 1650 } 1651 1652 /* Process all devices that have gone away. */ 1653 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list, 1654 scsi_device_list_entry) { 1655 if (device->device_gone) { 1656 list_del(&device->scsi_device_list_entry); 1657 list_add_tail(&device->delete_list_entry, &delete_list); 1658 } 1659 } 1660 1661 /* Process all new devices. */ 1662 for (i = 0; i < num_new_devices; i++) { 1663 device = new_device_list[i]; 1664 if (!device->new_device) 1665 continue; 1666 if (device->volume_offline) 1667 continue; 1668 list_add_tail(&device->scsi_device_list_entry, 1669 &ctrl_info->scsi_device_list); 1670 list_add_tail(&device->add_list_entry, &add_list); 1671 /* To prevent this device structure from being freed later. */ 1672 device->keep_device = true; 1673 } 1674 1675 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 1676 1677 /* Remove all devices that have gone away. */ 1678 list_for_each_entry_safe(device, next, &delete_list, 1679 delete_list_entry) { 1680 if (device->volume_offline) { 1681 pqi_dev_info(ctrl_info, "offline", device); 1682 pqi_show_volume_status(ctrl_info, device); 1683 } else { 1684 pqi_dev_info(ctrl_info, "removed", device); 1685 } 1686 if (device->sdev) 1687 pqi_remove_device(ctrl_info, device); 1688 list_del(&device->delete_list_entry); 1689 pqi_free_device(device); 1690 } 1691 1692 /* 1693 * Notify the SCSI ML if the queue depth of any existing device has 1694 * changed. 1695 */ 1696 list_for_each_entry(device, &ctrl_info->scsi_device_list, 1697 scsi_device_list_entry) { 1698 if (device->sdev && device->queue_depth != 1699 device->advertised_queue_depth) { 1700 device->advertised_queue_depth = device->queue_depth; 1701 scsi_change_queue_depth(device->sdev, 1702 device->advertised_queue_depth); 1703 } 1704 } 1705 1706 /* Expose any new devices. */ 1707 list_for_each_entry_safe(device, next, &add_list, add_list_entry) { 1708 if (!device->sdev) { 1709 pqi_dev_info(ctrl_info, "added", device); 1710 rc = pqi_add_device(ctrl_info, device); 1711 if (rc) { 1712 dev_warn(&ctrl_info->pci_dev->dev, 1713 "scsi %d:%d:%d:%d addition failed, device not added\n", 1714 ctrl_info->scsi_host->host_no, 1715 device->bus, device->target, 1716 device->lun); 1717 pqi_fixup_botched_add(ctrl_info, device); 1718 } 1719 } 1720 } 1721 } 1722 1723 static bool pqi_is_supported_device(struct pqi_scsi_dev *device) 1724 { 1725 bool is_supported = false; 1726 1727 switch (device->devtype) { 1728 case TYPE_DISK: 1729 case TYPE_ZBC: 1730 case TYPE_TAPE: 1731 case TYPE_MEDIUM_CHANGER: 1732 case TYPE_ENCLOSURE: 1733 is_supported = true; 1734 break; 1735 case TYPE_RAID: 1736 /* 1737 * Only support the HBA controller itself as a RAID 1738 * controller. If it's a RAID controller other than 1739 * the HBA itself (an external RAID controller, for 1740 * example), we don't support it. 1741 */ 1742 if (pqi_is_hba_lunid(device->scsi3addr)) 1743 is_supported = true; 1744 break; 1745 } 1746 1747 return is_supported; 1748 } 1749 1750 static inline bool pqi_skip_device(u8 *scsi3addr) 1751 { 1752 /* Ignore all masked devices. */ 1753 if (MASKED_DEVICE(scsi3addr)) 1754 return true; 1755 1756 return false; 1757 } 1758 1759 static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1760 { 1761 int i; 1762 int rc; 1763 LIST_HEAD(new_device_list_head); 1764 struct report_phys_lun_extended *physdev_list = NULL; 1765 struct report_log_lun_extended *logdev_list = NULL; 1766 struct report_phys_lun_extended_entry *phys_lun_ext_entry; 1767 struct report_log_lun_extended_entry *log_lun_ext_entry; 1768 struct bmic_identify_physical_device *id_phys = NULL; 1769 u32 num_physicals; 1770 u32 num_logicals; 1771 struct pqi_scsi_dev **new_device_list = NULL; 1772 struct pqi_scsi_dev *device; 1773 struct pqi_scsi_dev *next; 1774 unsigned int num_new_devices; 1775 unsigned int num_valid_devices; 1776 bool is_physical_device; 1777 u8 *scsi3addr; 1778 static char *out_of_memory_msg = 1779 "failed to allocate memory, device discovery stopped"; 1780 1781 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list); 1782 if (rc) 1783 goto out; 1784 1785 if (physdev_list) 1786 num_physicals = 1787 get_unaligned_be32(&physdev_list->header.list_length) 1788 / sizeof(physdev_list->lun_entries[0]); 1789 else 1790 num_physicals = 0; 1791 1792 if (logdev_list) 1793 num_logicals = 1794 get_unaligned_be32(&logdev_list->header.list_length) 1795 / sizeof(logdev_list->lun_entries[0]); 1796 else 1797 num_logicals = 0; 1798 1799 if (num_physicals) { 1800 /* 1801 * We need this buffer for calls to pqi_get_physical_disk_info() 1802 * below. We allocate it here instead of inside 1803 * pqi_get_physical_disk_info() because it's a fairly large 1804 * buffer. 1805 */ 1806 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL); 1807 if (!id_phys) { 1808 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1809 out_of_memory_msg); 1810 rc = -ENOMEM; 1811 goto out; 1812 } 1813 } 1814 1815 num_new_devices = num_physicals + num_logicals; 1816 1817 new_device_list = kmalloc_array(num_new_devices, 1818 sizeof(*new_device_list), 1819 GFP_KERNEL); 1820 if (!new_device_list) { 1821 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg); 1822 rc = -ENOMEM; 1823 goto out; 1824 } 1825 1826 for (i = 0; i < num_new_devices; i++) { 1827 device = kzalloc(sizeof(*device), GFP_KERNEL); 1828 if (!device) { 1829 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1830 out_of_memory_msg); 1831 rc = -ENOMEM; 1832 goto out; 1833 } 1834 list_add_tail(&device->new_device_list_entry, 1835 &new_device_list_head); 1836 } 1837 1838 device = NULL; 1839 num_valid_devices = 0; 1840 1841 for (i = 0; i < num_new_devices; i++) { 1842 1843 if (i < num_physicals) { 1844 is_physical_device = true; 1845 phys_lun_ext_entry = &physdev_list->lun_entries[i]; 1846 log_lun_ext_entry = NULL; 1847 scsi3addr = phys_lun_ext_entry->lunid; 1848 } else { 1849 is_physical_device = false; 1850 phys_lun_ext_entry = NULL; 1851 log_lun_ext_entry = 1852 &logdev_list->lun_entries[i - num_physicals]; 1853 scsi3addr = log_lun_ext_entry->lunid; 1854 } 1855 1856 if (is_physical_device && pqi_skip_device(scsi3addr)) 1857 continue; 1858 1859 if (device) 1860 device = list_next_entry(device, new_device_list_entry); 1861 else 1862 device = list_first_entry(&new_device_list_head, 1863 struct pqi_scsi_dev, new_device_list_entry); 1864 1865 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); 1866 device->is_physical_device = is_physical_device; 1867 if (!is_physical_device) 1868 device->is_external_raid_device = 1869 pqi_is_external_raid_addr(scsi3addr); 1870 1871 /* Gather information about the device. */ 1872 rc = pqi_get_device_info(ctrl_info, device); 1873 if (rc == -ENOMEM) { 1874 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", 1875 out_of_memory_msg); 1876 goto out; 1877 } 1878 if (rc) { 1879 if (device->is_physical_device) 1880 dev_warn(&ctrl_info->pci_dev->dev, 1881 "obtaining device info failed, skipping physical device %016llx\n", 1882 get_unaligned_be64( 1883 &phys_lun_ext_entry->wwid)); 1884 else 1885 dev_warn(&ctrl_info->pci_dev->dev, 1886 "obtaining device info failed, skipping logical device %08x%08x\n", 1887 *((u32 *)&device->scsi3addr), 1888 *((u32 *)&device->scsi3addr[4])); 1889 rc = 0; 1890 continue; 1891 } 1892 1893 if (!pqi_is_supported_device(device)) 1894 continue; 1895 1896 pqi_assign_bus_target_lun(device); 1897 1898 if (device->is_physical_device) { 1899 device->wwid = phys_lun_ext_entry->wwid; 1900 if ((phys_lun_ext_entry->device_flags & 1901 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && 1902 phys_lun_ext_entry->aio_handle) 1903 device->aio_enabled = true; 1904 } else { 1905 memcpy(device->volume_id, log_lun_ext_entry->volume_id, 1906 sizeof(device->volume_id)); 1907 } 1908 1909 switch (device->devtype) { 1910 case TYPE_DISK: 1911 case TYPE_ZBC: 1912 case TYPE_ENCLOSURE: 1913 if (device->is_physical_device) { 1914 device->sas_address = 1915 get_unaligned_be64(&device->wwid); 1916 if (device->devtype == TYPE_DISK || 1917 device->devtype == TYPE_ZBC) { 1918 device->aio_handle = 1919 phys_lun_ext_entry->aio_handle; 1920 pqi_get_physical_disk_info(ctrl_info, 1921 device, id_phys); 1922 } 1923 } 1924 break; 1925 } 1926 1927 new_device_list[num_valid_devices++] = device; 1928 } 1929 1930 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices); 1931 1932 out: 1933 list_for_each_entry_safe(device, next, &new_device_list_head, 1934 new_device_list_entry) { 1935 if (device->keep_device) 1936 continue; 1937 list_del(&device->new_device_list_entry); 1938 pqi_free_device(device); 1939 } 1940 1941 kfree(new_device_list); 1942 kfree(physdev_list); 1943 kfree(logdev_list); 1944 kfree(id_phys); 1945 1946 return rc; 1947 } 1948 1949 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1950 { 1951 unsigned long flags; 1952 struct pqi_scsi_dev *device; 1953 1954 while (1) { 1955 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 1956 1957 device = list_first_entry_or_null(&ctrl_info->scsi_device_list, 1958 struct pqi_scsi_dev, scsi_device_list_entry); 1959 if (device) 1960 list_del(&device->scsi_device_list_entry); 1961 1962 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 1963 flags); 1964 1965 if (!device) 1966 break; 1967 1968 if (device->sdev) 1969 pqi_remove_device(ctrl_info, device); 1970 pqi_free_device(device); 1971 } 1972 } 1973 1974 static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) 1975 { 1976 int rc; 1977 1978 if (pqi_ctrl_offline(ctrl_info)) 1979 return -ENXIO; 1980 1981 mutex_lock(&ctrl_info->scan_mutex); 1982 1983 rc = pqi_update_scsi_devices(ctrl_info); 1984 if (rc) 1985 pqi_schedule_rescan_worker_delayed(ctrl_info); 1986 1987 mutex_unlock(&ctrl_info->scan_mutex); 1988 1989 return rc; 1990 } 1991 1992 static void pqi_scan_start(struct Scsi_Host *shost) 1993 { 1994 pqi_scan_scsi_devices(shost_to_hba(shost)); 1995 } 1996 1997 /* Returns TRUE if scan is finished. */ 1998 1999 static int pqi_scan_finished(struct Scsi_Host *shost, 2000 unsigned long elapsed_time) 2001 { 2002 struct pqi_ctrl_info *ctrl_info; 2003 2004 ctrl_info = shost_priv(shost); 2005 2006 return !mutex_is_locked(&ctrl_info->scan_mutex); 2007 } 2008 2009 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info) 2010 { 2011 mutex_lock(&ctrl_info->scan_mutex); 2012 mutex_unlock(&ctrl_info->scan_mutex); 2013 } 2014 2015 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) 2016 { 2017 mutex_lock(&ctrl_info->lun_reset_mutex); 2018 mutex_unlock(&ctrl_info->lun_reset_mutex); 2019 } 2020 2021 static inline void pqi_set_encryption_info( 2022 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, 2023 u64 first_block) 2024 { 2025 u32 volume_blk_size; 2026 2027 /* 2028 * Set the encryption tweak values based on logical block address. 2029 * If the block size is 512, the tweak value is equal to the LBA. 2030 * For other block sizes, tweak value is (LBA * block size) / 512. 2031 */ 2032 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size); 2033 if (volume_blk_size != 512) 2034 first_block = (first_block * volume_blk_size) / 512; 2035 2036 encryption_info->data_encryption_key_index = 2037 get_unaligned_le16(&raid_map->data_encryption_key_index); 2038 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block); 2039 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block); 2040 } 2041 2042 /* 2043 * Attempt to perform RAID bypass mapping for a logical volume I/O. 2044 */ 2045 2046 #define PQI_RAID_BYPASS_INELIGIBLE 1 2047 2048 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 2049 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 2050 struct pqi_queue_group *queue_group) 2051 { 2052 struct raid_map *raid_map; 2053 bool is_write = false; 2054 u32 map_index; 2055 u64 first_block; 2056 u64 last_block; 2057 u32 block_cnt; 2058 u32 blocks_per_row; 2059 u64 first_row; 2060 u64 last_row; 2061 u32 first_row_offset; 2062 u32 last_row_offset; 2063 u32 first_column; 2064 u32 last_column; 2065 u64 r0_first_row; 2066 u64 r0_last_row; 2067 u32 r5or6_blocks_per_row; 2068 u64 r5or6_first_row; 2069 u64 r5or6_last_row; 2070 u32 r5or6_first_row_offset; 2071 u32 r5or6_last_row_offset; 2072 u32 r5or6_first_column; 2073 u32 r5or6_last_column; 2074 u16 data_disks_per_row; 2075 u32 total_disks_per_row; 2076 u16 layout_map_count; 2077 u32 stripesize; 2078 u16 strip_size; 2079 u32 first_group; 2080 u32 last_group; 2081 u32 current_group; 2082 u32 map_row; 2083 u32 aio_handle; 2084 u64 disk_block; 2085 u32 disk_block_cnt; 2086 u8 cdb[16]; 2087 u8 cdb_length; 2088 int offload_to_mirror; 2089 struct pqi_encryption_info *encryption_info_ptr; 2090 struct pqi_encryption_info encryption_info; 2091 #if BITS_PER_LONG == 32 2092 u64 tmpdiv; 2093 #endif 2094 2095 /* Check for valid opcode, get LBA and block count. */ 2096 switch (scmd->cmnd[0]) { 2097 case WRITE_6: 2098 is_write = true; 2099 /* fall through */ 2100 case READ_6: 2101 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) | 2102 (scmd->cmnd[2] << 8) | scmd->cmnd[3]); 2103 block_cnt = (u32)scmd->cmnd[4]; 2104 if (block_cnt == 0) 2105 block_cnt = 256; 2106 break; 2107 case WRITE_10: 2108 is_write = true; 2109 /* fall through */ 2110 case READ_10: 2111 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2112 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]); 2113 break; 2114 case WRITE_12: 2115 is_write = true; 2116 /* fall through */ 2117 case READ_12: 2118 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]); 2119 block_cnt = get_unaligned_be32(&scmd->cmnd[6]); 2120 break; 2121 case WRITE_16: 2122 is_write = true; 2123 /* fall through */ 2124 case READ_16: 2125 first_block = get_unaligned_be64(&scmd->cmnd[2]); 2126 block_cnt = get_unaligned_be32(&scmd->cmnd[10]); 2127 break; 2128 default: 2129 /* Process via normal I/O path. */ 2130 return PQI_RAID_BYPASS_INELIGIBLE; 2131 } 2132 2133 /* Check for write to non-RAID-0. */ 2134 if (is_write && device->raid_level != SA_RAID_0) 2135 return PQI_RAID_BYPASS_INELIGIBLE; 2136 2137 if (unlikely(block_cnt == 0)) 2138 return PQI_RAID_BYPASS_INELIGIBLE; 2139 2140 last_block = first_block + block_cnt - 1; 2141 raid_map = device->raid_map; 2142 2143 /* Check for invalid block or wraparound. */ 2144 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) || 2145 last_block < first_block) 2146 return PQI_RAID_BYPASS_INELIGIBLE; 2147 2148 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row); 2149 strip_size = get_unaligned_le16(&raid_map->strip_size); 2150 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count); 2151 2152 /* Calculate stripe information for the request. */ 2153 blocks_per_row = data_disks_per_row * strip_size; 2154 #if BITS_PER_LONG == 32 2155 tmpdiv = first_block; 2156 do_div(tmpdiv, blocks_per_row); 2157 first_row = tmpdiv; 2158 tmpdiv = last_block; 2159 do_div(tmpdiv, blocks_per_row); 2160 last_row = tmpdiv; 2161 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2162 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2163 tmpdiv = first_row_offset; 2164 do_div(tmpdiv, strip_size); 2165 first_column = tmpdiv; 2166 tmpdiv = last_row_offset; 2167 do_div(tmpdiv, strip_size); 2168 last_column = tmpdiv; 2169 #else 2170 first_row = first_block / blocks_per_row; 2171 last_row = last_block / blocks_per_row; 2172 first_row_offset = (u32)(first_block - (first_row * blocks_per_row)); 2173 last_row_offset = (u32)(last_block - (last_row * blocks_per_row)); 2174 first_column = first_row_offset / strip_size; 2175 last_column = last_row_offset / strip_size; 2176 #endif 2177 2178 /* If this isn't a single row/column then give to the controller. */ 2179 if (first_row != last_row || first_column != last_column) 2180 return PQI_RAID_BYPASS_INELIGIBLE; 2181 2182 /* Proceeding with driver mapping. */ 2183 total_disks_per_row = data_disks_per_row + 2184 get_unaligned_le16(&raid_map->metadata_disks_per_row); 2185 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2186 get_unaligned_le16(&raid_map->row_cnt); 2187 map_index = (map_row * total_disks_per_row) + first_column; 2188 2189 /* RAID 1 */ 2190 if (device->raid_level == SA_RAID_1) { 2191 if (device->offload_to_mirror) 2192 map_index += data_disks_per_row; 2193 device->offload_to_mirror = !device->offload_to_mirror; 2194 } else if (device->raid_level == SA_RAID_ADM) { 2195 /* RAID ADM */ 2196 /* 2197 * Handles N-way mirrors (R1-ADM) and R10 with # of drives 2198 * divisible by 3. 2199 */ 2200 offload_to_mirror = device->offload_to_mirror; 2201 if (offload_to_mirror == 0) { 2202 /* use physical disk in the first mirrored group. */ 2203 map_index %= data_disks_per_row; 2204 } else { 2205 do { 2206 /* 2207 * Determine mirror group that map_index 2208 * indicates. 2209 */ 2210 current_group = map_index / data_disks_per_row; 2211 2212 if (offload_to_mirror != current_group) { 2213 if (current_group < 2214 layout_map_count - 1) { 2215 /* 2216 * Select raid index from 2217 * next group. 2218 */ 2219 map_index += data_disks_per_row; 2220 current_group++; 2221 } else { 2222 /* 2223 * Select raid index from first 2224 * group. 2225 */ 2226 map_index %= data_disks_per_row; 2227 current_group = 0; 2228 } 2229 } 2230 } while (offload_to_mirror != current_group); 2231 } 2232 2233 /* Set mirror group to use next time. */ 2234 offload_to_mirror = 2235 (offload_to_mirror >= layout_map_count - 1) ? 2236 0 : offload_to_mirror + 1; 2237 WARN_ON(offload_to_mirror >= layout_map_count); 2238 device->offload_to_mirror = offload_to_mirror; 2239 /* 2240 * Avoid direct use of device->offload_to_mirror within this 2241 * function since multiple threads might simultaneously 2242 * increment it beyond the range of device->layout_map_count -1. 2243 */ 2244 } else if ((device->raid_level == SA_RAID_5 || 2245 device->raid_level == SA_RAID_6) && layout_map_count > 1) { 2246 /* RAID 50/60 */ 2247 /* Verify first and last block are in same RAID group */ 2248 r5or6_blocks_per_row = strip_size * data_disks_per_row; 2249 stripesize = r5or6_blocks_per_row * layout_map_count; 2250 #if BITS_PER_LONG == 32 2251 tmpdiv = first_block; 2252 first_group = do_div(tmpdiv, stripesize); 2253 tmpdiv = first_group; 2254 do_div(tmpdiv, r5or6_blocks_per_row); 2255 first_group = tmpdiv; 2256 tmpdiv = last_block; 2257 last_group = do_div(tmpdiv, stripesize); 2258 tmpdiv = last_group; 2259 do_div(tmpdiv, r5or6_blocks_per_row); 2260 last_group = tmpdiv; 2261 #else 2262 first_group = (first_block % stripesize) / r5or6_blocks_per_row; 2263 last_group = (last_block % stripesize) / r5or6_blocks_per_row; 2264 #endif 2265 if (first_group != last_group) 2266 return PQI_RAID_BYPASS_INELIGIBLE; 2267 2268 /* Verify request is in a single row of RAID 5/6 */ 2269 #if BITS_PER_LONG == 32 2270 tmpdiv = first_block; 2271 do_div(tmpdiv, stripesize); 2272 first_row = r5or6_first_row = r0_first_row = tmpdiv; 2273 tmpdiv = last_block; 2274 do_div(tmpdiv, stripesize); 2275 r5or6_last_row = r0_last_row = tmpdiv; 2276 #else 2277 first_row = r5or6_first_row = r0_first_row = 2278 first_block / stripesize; 2279 r5or6_last_row = r0_last_row = last_block / stripesize; 2280 #endif 2281 if (r5or6_first_row != r5or6_last_row) 2282 return PQI_RAID_BYPASS_INELIGIBLE; 2283 2284 /* Verify request is in a single column */ 2285 #if BITS_PER_LONG == 32 2286 tmpdiv = first_block; 2287 first_row_offset = do_div(tmpdiv, stripesize); 2288 tmpdiv = first_row_offset; 2289 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row); 2290 r5or6_first_row_offset = first_row_offset; 2291 tmpdiv = last_block; 2292 r5or6_last_row_offset = do_div(tmpdiv, stripesize); 2293 tmpdiv = r5or6_last_row_offset; 2294 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); 2295 tmpdiv = r5or6_first_row_offset; 2296 do_div(tmpdiv, strip_size); 2297 first_column = r5or6_first_column = tmpdiv; 2298 tmpdiv = r5or6_last_row_offset; 2299 do_div(tmpdiv, strip_size); 2300 r5or6_last_column = tmpdiv; 2301 #else 2302 first_row_offset = r5or6_first_row_offset = 2303 (u32)((first_block % stripesize) % 2304 r5or6_blocks_per_row); 2305 2306 r5or6_last_row_offset = 2307 (u32)((last_block % stripesize) % 2308 r5or6_blocks_per_row); 2309 2310 first_column = r5or6_first_row_offset / strip_size; 2311 r5or6_first_column = first_column; 2312 r5or6_last_column = r5or6_last_row_offset / strip_size; 2313 #endif 2314 if (r5or6_first_column != r5or6_last_column) 2315 return PQI_RAID_BYPASS_INELIGIBLE; 2316 2317 /* Request is eligible */ 2318 map_row = 2319 ((u32)(first_row >> raid_map->parity_rotation_shift)) % 2320 get_unaligned_le16(&raid_map->row_cnt); 2321 2322 map_index = (first_group * 2323 (get_unaligned_le16(&raid_map->row_cnt) * 2324 total_disks_per_row)) + 2325 (map_row * total_disks_per_row) + first_column; 2326 } 2327 2328 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) 2329 return PQI_RAID_BYPASS_INELIGIBLE; 2330 2331 aio_handle = raid_map->disk_data[map_index].aio_handle; 2332 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + 2333 first_row * strip_size + 2334 (first_row_offset - first_column * strip_size); 2335 disk_block_cnt = block_cnt; 2336 2337 /* Handle differing logical/physical block sizes. */ 2338 if (raid_map->phys_blk_shift) { 2339 disk_block <<= raid_map->phys_blk_shift; 2340 disk_block_cnt <<= raid_map->phys_blk_shift; 2341 } 2342 2343 if (unlikely(disk_block_cnt > 0xffff)) 2344 return PQI_RAID_BYPASS_INELIGIBLE; 2345 2346 /* Build the new CDB for the physical disk I/O. */ 2347 if (disk_block > 0xffffffff) { 2348 cdb[0] = is_write ? WRITE_16 : READ_16; 2349 cdb[1] = 0; 2350 put_unaligned_be64(disk_block, &cdb[2]); 2351 put_unaligned_be32(disk_block_cnt, &cdb[10]); 2352 cdb[14] = 0; 2353 cdb[15] = 0; 2354 cdb_length = 16; 2355 } else { 2356 cdb[0] = is_write ? WRITE_10 : READ_10; 2357 cdb[1] = 0; 2358 put_unaligned_be32((u32)disk_block, &cdb[2]); 2359 cdb[6] = 0; 2360 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]); 2361 cdb[9] = 0; 2362 cdb_length = 10; 2363 } 2364 2365 if (get_unaligned_le16(&raid_map->flags) & 2366 RAID_MAP_ENCRYPTION_ENABLED) { 2367 pqi_set_encryption_info(&encryption_info, raid_map, 2368 first_block); 2369 encryption_info_ptr = &encryption_info; 2370 } else { 2371 encryption_info_ptr = NULL; 2372 } 2373 2374 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle, 2375 cdb, cdb_length, queue_group, encryption_info_ptr, true); 2376 } 2377 2378 #define PQI_STATUS_IDLE 0x0 2379 2380 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1 2381 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2 2382 2383 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0 2384 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1 2385 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2 2386 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3 2387 #define PQI_DEVICE_STATE_ERROR 0x4 2388 2389 #define PQI_MODE_READY_TIMEOUT_SECS 30 2390 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1 2391 2392 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) 2393 { 2394 struct pqi_device_registers __iomem *pqi_registers; 2395 unsigned long timeout; 2396 u64 signature; 2397 u8 status; 2398 2399 pqi_registers = ctrl_info->pqi_registers; 2400 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; 2401 2402 while (1) { 2403 signature = readq(&pqi_registers->signature); 2404 if (memcmp(&signature, PQI_DEVICE_SIGNATURE, 2405 sizeof(signature)) == 0) 2406 break; 2407 if (time_after(jiffies, timeout)) { 2408 dev_err(&ctrl_info->pci_dev->dev, 2409 "timed out waiting for PQI signature\n"); 2410 return -ETIMEDOUT; 2411 } 2412 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2413 } 2414 2415 while (1) { 2416 status = readb(&pqi_registers->function_and_status_code); 2417 if (status == PQI_STATUS_IDLE) 2418 break; 2419 if (time_after(jiffies, timeout)) { 2420 dev_err(&ctrl_info->pci_dev->dev, 2421 "timed out waiting for PQI IDLE\n"); 2422 return -ETIMEDOUT; 2423 } 2424 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2425 } 2426 2427 while (1) { 2428 if (readl(&pqi_registers->device_status) == 2429 PQI_DEVICE_STATE_ALL_REGISTERS_READY) 2430 break; 2431 if (time_after(jiffies, timeout)) { 2432 dev_err(&ctrl_info->pci_dev->dev, 2433 "timed out waiting for PQI all registers ready\n"); 2434 return -ETIMEDOUT; 2435 } 2436 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS); 2437 } 2438 2439 return 0; 2440 } 2441 2442 static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request) 2443 { 2444 struct pqi_scsi_dev *device; 2445 2446 device = io_request->scmd->device->hostdata; 2447 device->raid_bypass_enabled = false; 2448 device->aio_enabled = false; 2449 } 2450 2451 static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) 2452 { 2453 struct pqi_ctrl_info *ctrl_info; 2454 struct pqi_scsi_dev *device; 2455 2456 device = sdev->hostdata; 2457 if (device->device_offline) 2458 return; 2459 2460 device->device_offline = true; 2461 scsi_device_set_state(sdev, SDEV_OFFLINE); 2462 ctrl_info = shost_to_hba(sdev->host); 2463 pqi_schedule_rescan_worker(ctrl_info); 2464 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", 2465 path, ctrl_info->scsi_host->host_no, device->bus, 2466 device->target, device->lun); 2467 } 2468 2469 static void pqi_process_raid_io_error(struct pqi_io_request *io_request) 2470 { 2471 u8 scsi_status; 2472 u8 host_byte; 2473 struct scsi_cmnd *scmd; 2474 struct pqi_raid_error_info *error_info; 2475 size_t sense_data_length; 2476 int residual_count; 2477 int xfer_count; 2478 struct scsi_sense_hdr sshdr; 2479 2480 scmd = io_request->scmd; 2481 if (!scmd) 2482 return; 2483 2484 error_info = io_request->error_info; 2485 scsi_status = error_info->status; 2486 host_byte = DID_OK; 2487 2488 switch (error_info->data_out_result) { 2489 case PQI_DATA_IN_OUT_GOOD: 2490 break; 2491 case PQI_DATA_IN_OUT_UNDERFLOW: 2492 xfer_count = 2493 get_unaligned_le32(&error_info->data_out_transferred); 2494 residual_count = scsi_bufflen(scmd) - xfer_count; 2495 scsi_set_resid(scmd, residual_count); 2496 if (xfer_count < scmd->underflow) 2497 host_byte = DID_SOFT_ERROR; 2498 break; 2499 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 2500 case PQI_DATA_IN_OUT_ABORTED: 2501 host_byte = DID_ABORT; 2502 break; 2503 case PQI_DATA_IN_OUT_TIMEOUT: 2504 host_byte = DID_TIME_OUT; 2505 break; 2506 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 2507 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 2508 case PQI_DATA_IN_OUT_BUFFER_ERROR: 2509 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 2510 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 2511 case PQI_DATA_IN_OUT_ERROR: 2512 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 2513 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 2514 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 2515 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 2516 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 2517 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 2518 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 2519 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 2520 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 2521 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 2522 default: 2523 host_byte = DID_ERROR; 2524 break; 2525 } 2526 2527 sense_data_length = get_unaligned_le16(&error_info->sense_data_length); 2528 if (sense_data_length == 0) 2529 sense_data_length = 2530 get_unaligned_le16(&error_info->response_data_length); 2531 if (sense_data_length) { 2532 if (sense_data_length > sizeof(error_info->data)) 2533 sense_data_length = sizeof(error_info->data); 2534 2535 if (scsi_status == SAM_STAT_CHECK_CONDITION && 2536 scsi_normalize_sense(error_info->data, 2537 sense_data_length, &sshdr) && 2538 sshdr.sense_key == HARDWARE_ERROR && 2539 sshdr.asc == 0x3e && 2540 sshdr.ascq == 0x1) { 2541 pqi_take_device_offline(scmd->device, "RAID"); 2542 host_byte = DID_NO_CONNECT; 2543 } 2544 2545 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2546 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2547 memcpy(scmd->sense_buffer, error_info->data, 2548 sense_data_length); 2549 } 2550 2551 scmd->result = scsi_status; 2552 set_host_byte(scmd, host_byte); 2553 } 2554 2555 static void pqi_process_aio_io_error(struct pqi_io_request *io_request) 2556 { 2557 u8 scsi_status; 2558 u8 host_byte; 2559 struct scsi_cmnd *scmd; 2560 struct pqi_aio_error_info *error_info; 2561 size_t sense_data_length; 2562 int residual_count; 2563 int xfer_count; 2564 bool device_offline; 2565 2566 scmd = io_request->scmd; 2567 error_info = io_request->error_info; 2568 host_byte = DID_OK; 2569 sense_data_length = 0; 2570 device_offline = false; 2571 2572 switch (error_info->service_response) { 2573 case PQI_AIO_SERV_RESPONSE_COMPLETE: 2574 scsi_status = error_info->status; 2575 break; 2576 case PQI_AIO_SERV_RESPONSE_FAILURE: 2577 switch (error_info->status) { 2578 case PQI_AIO_STATUS_IO_ABORTED: 2579 scsi_status = SAM_STAT_TASK_ABORTED; 2580 break; 2581 case PQI_AIO_STATUS_UNDERRUN: 2582 scsi_status = SAM_STAT_GOOD; 2583 residual_count = get_unaligned_le32( 2584 &error_info->residual_count); 2585 scsi_set_resid(scmd, residual_count); 2586 xfer_count = scsi_bufflen(scmd) - residual_count; 2587 if (xfer_count < scmd->underflow) 2588 host_byte = DID_SOFT_ERROR; 2589 break; 2590 case PQI_AIO_STATUS_OVERRUN: 2591 scsi_status = SAM_STAT_GOOD; 2592 break; 2593 case PQI_AIO_STATUS_AIO_PATH_DISABLED: 2594 pqi_aio_path_disabled(io_request); 2595 scsi_status = SAM_STAT_GOOD; 2596 io_request->status = -EAGAIN; 2597 break; 2598 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE: 2599 case PQI_AIO_STATUS_INVALID_DEVICE: 2600 if (!io_request->raid_bypass) { 2601 device_offline = true; 2602 pqi_take_device_offline(scmd->device, "AIO"); 2603 host_byte = DID_NO_CONNECT; 2604 } 2605 scsi_status = SAM_STAT_CHECK_CONDITION; 2606 break; 2607 case PQI_AIO_STATUS_IO_ERROR: 2608 default: 2609 scsi_status = SAM_STAT_CHECK_CONDITION; 2610 break; 2611 } 2612 break; 2613 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE: 2614 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED: 2615 scsi_status = SAM_STAT_GOOD; 2616 break; 2617 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED: 2618 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN: 2619 default: 2620 scsi_status = SAM_STAT_CHECK_CONDITION; 2621 break; 2622 } 2623 2624 if (error_info->data_present) { 2625 sense_data_length = 2626 get_unaligned_le16(&error_info->data_length); 2627 if (sense_data_length) { 2628 if (sense_data_length > sizeof(error_info->data)) 2629 sense_data_length = sizeof(error_info->data); 2630 if (sense_data_length > SCSI_SENSE_BUFFERSIZE) 2631 sense_data_length = SCSI_SENSE_BUFFERSIZE; 2632 memcpy(scmd->sense_buffer, error_info->data, 2633 sense_data_length); 2634 } 2635 } 2636 2637 if (device_offline && sense_data_length == 0) 2638 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR, 2639 0x3e, 0x1); 2640 2641 scmd->result = scsi_status; 2642 set_host_byte(scmd, host_byte); 2643 } 2644 2645 static void pqi_process_io_error(unsigned int iu_type, 2646 struct pqi_io_request *io_request) 2647 { 2648 switch (iu_type) { 2649 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2650 pqi_process_raid_io_error(io_request); 2651 break; 2652 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2653 pqi_process_aio_io_error(io_request); 2654 break; 2655 } 2656 } 2657 2658 static int pqi_interpret_task_management_response( 2659 struct pqi_task_management_response *response) 2660 { 2661 int rc; 2662 2663 switch (response->response_code) { 2664 case SOP_TMF_COMPLETE: 2665 case SOP_TMF_FUNCTION_SUCCEEDED: 2666 rc = 0; 2667 break; 2668 case SOP_TMF_REJECTED: 2669 rc = -EAGAIN; 2670 break; 2671 default: 2672 rc = -EIO; 2673 break; 2674 } 2675 2676 return rc; 2677 } 2678 2679 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, 2680 struct pqi_queue_group *queue_group) 2681 { 2682 unsigned int num_responses; 2683 pqi_index_t oq_pi; 2684 pqi_index_t oq_ci; 2685 struct pqi_io_request *io_request; 2686 struct pqi_io_response *response; 2687 u16 request_id; 2688 2689 num_responses = 0; 2690 oq_ci = queue_group->oq_ci_copy; 2691 2692 while (1) { 2693 oq_pi = readl(queue_group->oq_pi); 2694 if (oq_pi == oq_ci) 2695 break; 2696 2697 num_responses++; 2698 response = queue_group->oq_element_array + 2699 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 2700 2701 request_id = get_unaligned_le16(&response->request_id); 2702 WARN_ON(request_id >= ctrl_info->max_io_slots); 2703 2704 io_request = &ctrl_info->io_request_pool[request_id]; 2705 WARN_ON(atomic_read(&io_request->refcount) == 0); 2706 2707 switch (response->header.iu_type) { 2708 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: 2709 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: 2710 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: 2711 break; 2712 case PQI_RESPONSE_IU_VENDOR_GENERAL: 2713 io_request->status = 2714 get_unaligned_le16( 2715 &((struct pqi_vendor_general_response *) 2716 response)->status); 2717 break; 2718 case PQI_RESPONSE_IU_TASK_MANAGEMENT: 2719 io_request->status = 2720 pqi_interpret_task_management_response( 2721 (void *)response); 2722 break; 2723 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: 2724 pqi_aio_path_disabled(io_request); 2725 io_request->status = -EAGAIN; 2726 break; 2727 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: 2728 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: 2729 io_request->error_info = ctrl_info->error_buffer + 2730 (get_unaligned_le16(&response->error_index) * 2731 PQI_ERROR_BUFFER_ELEMENT_LENGTH); 2732 pqi_process_io_error(response->header.iu_type, 2733 io_request); 2734 break; 2735 default: 2736 dev_err(&ctrl_info->pci_dev->dev, 2737 "unexpected IU type: 0x%x\n", 2738 response->header.iu_type); 2739 break; 2740 } 2741 2742 io_request->io_complete_callback(io_request, 2743 io_request->context); 2744 2745 /* 2746 * Note that the I/O request structure CANNOT BE TOUCHED after 2747 * returning from the I/O completion callback! 2748 */ 2749 2750 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq; 2751 } 2752 2753 if (num_responses) { 2754 queue_group->oq_ci_copy = oq_ci; 2755 writel(oq_ci, queue_group->oq_ci); 2756 } 2757 2758 return num_responses; 2759 } 2760 2761 static inline unsigned int pqi_num_elements_free(unsigned int pi, 2762 unsigned int ci, unsigned int elements_in_queue) 2763 { 2764 unsigned int num_elements_used; 2765 2766 if (pi >= ci) 2767 num_elements_used = pi - ci; 2768 else 2769 num_elements_used = elements_in_queue - ci + pi; 2770 2771 return elements_in_queue - num_elements_used - 1; 2772 } 2773 2774 static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, 2775 struct pqi_event_acknowledge_request *iu, size_t iu_length) 2776 { 2777 pqi_index_t iq_pi; 2778 pqi_index_t iq_ci; 2779 unsigned long flags; 2780 void *next_element; 2781 struct pqi_queue_group *queue_group; 2782 2783 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP]; 2784 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id); 2785 2786 while (1) { 2787 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); 2788 2789 iq_pi = queue_group->iq_pi_copy[RAID_PATH]; 2790 iq_ci = readl(queue_group->iq_ci[RAID_PATH]); 2791 2792 if (pqi_num_elements_free(iq_pi, iq_ci, 2793 ctrl_info->num_elements_per_iq)) 2794 break; 2795 2796 spin_unlock_irqrestore( 2797 &queue_group->submit_lock[RAID_PATH], flags); 2798 2799 if (pqi_ctrl_offline(ctrl_info)) 2800 return; 2801 } 2802 2803 next_element = queue_group->iq_element_array[RAID_PATH] + 2804 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 2805 2806 memcpy(next_element, iu, iu_length); 2807 2808 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq; 2809 queue_group->iq_pi_copy[RAID_PATH] = iq_pi; 2810 2811 /* 2812 * This write notifies the controller that an IU is available to be 2813 * processed. 2814 */ 2815 writel(iq_pi, queue_group->iq_pi[RAID_PATH]); 2816 2817 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags); 2818 } 2819 2820 static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, 2821 struct pqi_event *event) 2822 { 2823 struct pqi_event_acknowledge_request request; 2824 2825 memset(&request, 0, sizeof(request)); 2826 2827 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT; 2828 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 2829 &request.header.iu_length); 2830 request.event_type = event->event_type; 2831 request.event_id = event->event_id; 2832 request.additional_event_id = event->additional_event_id; 2833 2834 pqi_send_event_ack(ctrl_info, &request, sizeof(request)); 2835 } 2836 2837 static void pqi_event_worker(struct work_struct *work) 2838 { 2839 unsigned int i; 2840 struct pqi_ctrl_info *ctrl_info; 2841 struct pqi_event *event; 2842 2843 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work); 2844 2845 pqi_ctrl_busy(ctrl_info); 2846 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT); 2847 if (pqi_ctrl_offline(ctrl_info)) 2848 goto out; 2849 2850 pqi_schedule_rescan_worker_delayed(ctrl_info); 2851 2852 event = ctrl_info->events; 2853 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { 2854 if (event->pending) { 2855 event->pending = false; 2856 pqi_acknowledge_event(ctrl_info, event); 2857 } 2858 event++; 2859 } 2860 2861 out: 2862 pqi_ctrl_unbusy(ctrl_info); 2863 } 2864 2865 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) 2866 2867 static void pqi_heartbeat_timer_handler(struct timer_list *t) 2868 { 2869 int num_interrupts; 2870 u32 heartbeat_count; 2871 struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, 2872 heartbeat_timer); 2873 2874 pqi_check_ctrl_health(ctrl_info); 2875 if (pqi_ctrl_offline(ctrl_info)) 2876 return; 2877 2878 num_interrupts = atomic_read(&ctrl_info->num_interrupts); 2879 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info); 2880 2881 if (num_interrupts == ctrl_info->previous_num_interrupts) { 2882 if (heartbeat_count == ctrl_info->previous_heartbeat_count) { 2883 dev_err(&ctrl_info->pci_dev->dev, 2884 "no heartbeat detected - last heartbeat count: %u\n", 2885 heartbeat_count); 2886 pqi_take_ctrl_offline(ctrl_info); 2887 return; 2888 } 2889 } else { 2890 ctrl_info->previous_num_interrupts = num_interrupts; 2891 } 2892 2893 ctrl_info->previous_heartbeat_count = heartbeat_count; 2894 mod_timer(&ctrl_info->heartbeat_timer, 2895 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL); 2896 } 2897 2898 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2899 { 2900 if (!ctrl_info->heartbeat_counter) 2901 return; 2902 2903 ctrl_info->previous_num_interrupts = 2904 atomic_read(&ctrl_info->num_interrupts); 2905 ctrl_info->previous_heartbeat_count = 2906 pqi_read_heartbeat_counter(ctrl_info); 2907 2908 ctrl_info->heartbeat_timer.expires = 2909 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; 2910 add_timer(&ctrl_info->heartbeat_timer); 2911 } 2912 2913 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) 2914 { 2915 del_timer_sync(&ctrl_info->heartbeat_timer); 2916 } 2917 2918 static inline int pqi_event_type_to_event_index(unsigned int event_type) 2919 { 2920 int index; 2921 2922 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++) 2923 if (event_type == pqi_supported_event_types[index]) 2924 return index; 2925 2926 return -1; 2927 } 2928 2929 static inline bool pqi_is_supported_event(unsigned int event_type) 2930 { 2931 return pqi_event_type_to_event_index(event_type) != -1; 2932 } 2933 2934 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) 2935 { 2936 unsigned int num_events; 2937 pqi_index_t oq_pi; 2938 pqi_index_t oq_ci; 2939 struct pqi_event_queue *event_queue; 2940 struct pqi_event_response *response; 2941 struct pqi_event *event; 2942 int event_index; 2943 2944 event_queue = &ctrl_info->event_queue; 2945 num_events = 0; 2946 oq_ci = event_queue->oq_ci_copy; 2947 2948 while (1) { 2949 oq_pi = readl(event_queue->oq_pi); 2950 if (oq_pi == oq_ci) 2951 break; 2952 2953 num_events++; 2954 response = event_queue->oq_element_array + 2955 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH); 2956 2957 event_index = 2958 pqi_event_type_to_event_index(response->event_type); 2959 2960 if (event_index >= 0) { 2961 if (response->request_acknowlege) { 2962 event = &ctrl_info->events[event_index]; 2963 event->pending = true; 2964 event->event_type = response->event_type; 2965 event->event_id = response->event_id; 2966 event->additional_event_id = 2967 response->additional_event_id; 2968 } 2969 } 2970 2971 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS; 2972 } 2973 2974 if (num_events) { 2975 event_queue->oq_ci_copy = oq_ci; 2976 writel(oq_ci, event_queue->oq_ci); 2977 schedule_work(&ctrl_info->event_work); 2978 } 2979 2980 return num_events; 2981 } 2982 2983 #define PQI_LEGACY_INTX_MASK 0x1 2984 2985 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, 2986 bool enable_intx) 2987 { 2988 u32 intx_mask; 2989 struct pqi_device_registers __iomem *pqi_registers; 2990 volatile void __iomem *register_addr; 2991 2992 pqi_registers = ctrl_info->pqi_registers; 2993 2994 if (enable_intx) 2995 register_addr = &pqi_registers->legacy_intx_mask_clear; 2996 else 2997 register_addr = &pqi_registers->legacy_intx_mask_set; 2998 2999 intx_mask = readl(register_addr); 3000 intx_mask |= PQI_LEGACY_INTX_MASK; 3001 writel(intx_mask, register_addr); 3002 } 3003 3004 static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info, 3005 enum pqi_irq_mode new_mode) 3006 { 3007 switch (ctrl_info->irq_mode) { 3008 case IRQ_MODE_MSIX: 3009 switch (new_mode) { 3010 case IRQ_MODE_MSIX: 3011 break; 3012 case IRQ_MODE_INTX: 3013 pqi_configure_legacy_intx(ctrl_info, true); 3014 sis_enable_intx(ctrl_info); 3015 break; 3016 case IRQ_MODE_NONE: 3017 break; 3018 } 3019 break; 3020 case IRQ_MODE_INTX: 3021 switch (new_mode) { 3022 case IRQ_MODE_MSIX: 3023 pqi_configure_legacy_intx(ctrl_info, false); 3024 sis_enable_msix(ctrl_info); 3025 break; 3026 case IRQ_MODE_INTX: 3027 break; 3028 case IRQ_MODE_NONE: 3029 pqi_configure_legacy_intx(ctrl_info, false); 3030 break; 3031 } 3032 break; 3033 case IRQ_MODE_NONE: 3034 switch (new_mode) { 3035 case IRQ_MODE_MSIX: 3036 sis_enable_msix(ctrl_info); 3037 break; 3038 case IRQ_MODE_INTX: 3039 pqi_configure_legacy_intx(ctrl_info, true); 3040 sis_enable_intx(ctrl_info); 3041 break; 3042 case IRQ_MODE_NONE: 3043 break; 3044 } 3045 break; 3046 } 3047 3048 ctrl_info->irq_mode = new_mode; 3049 } 3050 3051 #define PQI_LEGACY_INTX_PENDING 0x1 3052 3053 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info) 3054 { 3055 bool valid_irq; 3056 u32 intx_status; 3057 3058 switch (ctrl_info->irq_mode) { 3059 case IRQ_MODE_MSIX: 3060 valid_irq = true; 3061 break; 3062 case IRQ_MODE_INTX: 3063 intx_status = 3064 readl(&ctrl_info->pqi_registers->legacy_intx_status); 3065 if (intx_status & PQI_LEGACY_INTX_PENDING) 3066 valid_irq = true; 3067 else 3068 valid_irq = false; 3069 break; 3070 case IRQ_MODE_NONE: 3071 default: 3072 valid_irq = false; 3073 break; 3074 } 3075 3076 return valid_irq; 3077 } 3078 3079 static irqreturn_t pqi_irq_handler(int irq, void *data) 3080 { 3081 struct pqi_ctrl_info *ctrl_info; 3082 struct pqi_queue_group *queue_group; 3083 unsigned int num_responses_handled; 3084 3085 queue_group = data; 3086 ctrl_info = queue_group->ctrl_info; 3087 3088 if (!pqi_is_valid_irq(ctrl_info)) 3089 return IRQ_NONE; 3090 3091 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group); 3092 3093 if (irq == ctrl_info->event_irq) 3094 num_responses_handled += pqi_process_event_intr(ctrl_info); 3095 3096 if (num_responses_handled) 3097 atomic_inc(&ctrl_info->num_interrupts); 3098 3099 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL); 3100 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL); 3101 3102 return IRQ_HANDLED; 3103 } 3104 3105 static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info) 3106 { 3107 struct pci_dev *pci_dev = ctrl_info->pci_dev; 3108 int i; 3109 int rc; 3110 3111 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0); 3112 3113 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) { 3114 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0, 3115 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]); 3116 if (rc) { 3117 dev_err(&pci_dev->dev, 3118 "irq %u init failed with error %d\n", 3119 pci_irq_vector(pci_dev, i), rc); 3120 return rc; 3121 } 3122 ctrl_info->num_msix_vectors_initialized++; 3123 } 3124 3125 return 0; 3126 } 3127 3128 static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info) 3129 { 3130 int i; 3131 3132 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) 3133 free_irq(pci_irq_vector(ctrl_info->pci_dev, i), 3134 &ctrl_info->queue_groups[i]); 3135 3136 ctrl_info->num_msix_vectors_initialized = 0; 3137 } 3138 3139 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3140 { 3141 int num_vectors_enabled; 3142 3143 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev, 3144 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups, 3145 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); 3146 if (num_vectors_enabled < 0) { 3147 dev_err(&ctrl_info->pci_dev->dev, 3148 "MSI-X init failed with error %d\n", 3149 num_vectors_enabled); 3150 return num_vectors_enabled; 3151 } 3152 3153 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled; 3154 ctrl_info->irq_mode = IRQ_MODE_MSIX; 3155 return 0; 3156 } 3157 3158 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info) 3159 { 3160 if (ctrl_info->num_msix_vectors_enabled) { 3161 pci_free_irq_vectors(ctrl_info->pci_dev); 3162 ctrl_info->num_msix_vectors_enabled = 0; 3163 } 3164 } 3165 3166 static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) 3167 { 3168 unsigned int i; 3169 size_t alloc_length; 3170 size_t element_array_length_per_iq; 3171 size_t element_array_length_per_oq; 3172 void *element_array; 3173 void __iomem *next_queue_index; 3174 void *aligned_pointer; 3175 unsigned int num_inbound_queues; 3176 unsigned int num_outbound_queues; 3177 unsigned int num_queue_indexes; 3178 struct pqi_queue_group *queue_group; 3179 3180 element_array_length_per_iq = 3181 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH * 3182 ctrl_info->num_elements_per_iq; 3183 element_array_length_per_oq = 3184 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH * 3185 ctrl_info->num_elements_per_oq; 3186 num_inbound_queues = ctrl_info->num_queue_groups * 2; 3187 num_outbound_queues = ctrl_info->num_queue_groups; 3188 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1; 3189 3190 aligned_pointer = NULL; 3191 3192 for (i = 0; i < num_inbound_queues; i++) { 3193 aligned_pointer = PTR_ALIGN(aligned_pointer, 3194 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3195 aligned_pointer += element_array_length_per_iq; 3196 } 3197 3198 for (i = 0; i < num_outbound_queues; i++) { 3199 aligned_pointer = PTR_ALIGN(aligned_pointer, 3200 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3201 aligned_pointer += element_array_length_per_oq; 3202 } 3203 3204 aligned_pointer = PTR_ALIGN(aligned_pointer, 3205 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3206 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3207 PQI_EVENT_OQ_ELEMENT_LENGTH; 3208 3209 for (i = 0; i < num_queue_indexes; i++) { 3210 aligned_pointer = PTR_ALIGN(aligned_pointer, 3211 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3212 aligned_pointer += sizeof(pqi_index_t); 3213 } 3214 3215 alloc_length = (size_t)aligned_pointer + 3216 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3217 3218 alloc_length += PQI_EXTRA_SGL_MEMORY; 3219 3220 ctrl_info->queue_memory_base = 3221 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3222 alloc_length, 3223 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL); 3224 3225 if (!ctrl_info->queue_memory_base) 3226 return -ENOMEM; 3227 3228 ctrl_info->queue_memory_length = alloc_length; 3229 3230 element_array = PTR_ALIGN(ctrl_info->queue_memory_base, 3231 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3232 3233 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3234 queue_group = &ctrl_info->queue_groups[i]; 3235 queue_group->iq_element_array[RAID_PATH] = element_array; 3236 queue_group->iq_element_array_bus_addr[RAID_PATH] = 3237 ctrl_info->queue_memory_base_dma_handle + 3238 (element_array - ctrl_info->queue_memory_base); 3239 element_array += element_array_length_per_iq; 3240 element_array = PTR_ALIGN(element_array, 3241 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3242 queue_group->iq_element_array[AIO_PATH] = element_array; 3243 queue_group->iq_element_array_bus_addr[AIO_PATH] = 3244 ctrl_info->queue_memory_base_dma_handle + 3245 (element_array - ctrl_info->queue_memory_base); 3246 element_array += element_array_length_per_iq; 3247 element_array = PTR_ALIGN(element_array, 3248 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3249 } 3250 3251 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3252 queue_group = &ctrl_info->queue_groups[i]; 3253 queue_group->oq_element_array = element_array; 3254 queue_group->oq_element_array_bus_addr = 3255 ctrl_info->queue_memory_base_dma_handle + 3256 (element_array - ctrl_info->queue_memory_base); 3257 element_array += element_array_length_per_oq; 3258 element_array = PTR_ALIGN(element_array, 3259 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3260 } 3261 3262 ctrl_info->event_queue.oq_element_array = element_array; 3263 ctrl_info->event_queue.oq_element_array_bus_addr = 3264 ctrl_info->queue_memory_base_dma_handle + 3265 (element_array - ctrl_info->queue_memory_base); 3266 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * 3267 PQI_EVENT_OQ_ELEMENT_LENGTH; 3268 3269 next_queue_index = (void __iomem *)PTR_ALIGN(element_array, 3270 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3271 3272 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3273 queue_group = &ctrl_info->queue_groups[i]; 3274 queue_group->iq_ci[RAID_PATH] = next_queue_index; 3275 queue_group->iq_ci_bus_addr[RAID_PATH] = 3276 ctrl_info->queue_memory_base_dma_handle + 3277 (next_queue_index - 3278 (void __iomem *)ctrl_info->queue_memory_base); 3279 next_queue_index += sizeof(pqi_index_t); 3280 next_queue_index = PTR_ALIGN(next_queue_index, 3281 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3282 queue_group->iq_ci[AIO_PATH] = next_queue_index; 3283 queue_group->iq_ci_bus_addr[AIO_PATH] = 3284 ctrl_info->queue_memory_base_dma_handle + 3285 (next_queue_index - 3286 (void __iomem *)ctrl_info->queue_memory_base); 3287 next_queue_index += sizeof(pqi_index_t); 3288 next_queue_index = PTR_ALIGN(next_queue_index, 3289 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3290 queue_group->oq_pi = next_queue_index; 3291 queue_group->oq_pi_bus_addr = 3292 ctrl_info->queue_memory_base_dma_handle + 3293 (next_queue_index - 3294 (void __iomem *)ctrl_info->queue_memory_base); 3295 next_queue_index += sizeof(pqi_index_t); 3296 next_queue_index = PTR_ALIGN(next_queue_index, 3297 PQI_OPERATIONAL_INDEX_ALIGNMENT); 3298 } 3299 3300 ctrl_info->event_queue.oq_pi = next_queue_index; 3301 ctrl_info->event_queue.oq_pi_bus_addr = 3302 ctrl_info->queue_memory_base_dma_handle + 3303 (next_queue_index - 3304 (void __iomem *)ctrl_info->queue_memory_base); 3305 3306 return 0; 3307 } 3308 3309 static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info) 3310 { 3311 unsigned int i; 3312 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3313 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID; 3314 3315 /* 3316 * Initialize the backpointers to the controller structure in 3317 * each operational queue group structure. 3318 */ 3319 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3320 ctrl_info->queue_groups[i].ctrl_info = ctrl_info; 3321 3322 /* 3323 * Assign IDs to all operational queues. Note that the IDs 3324 * assigned to operational IQs are independent of the IDs 3325 * assigned to operational OQs. 3326 */ 3327 ctrl_info->event_queue.oq_id = next_oq_id++; 3328 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3329 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++; 3330 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++; 3331 ctrl_info->queue_groups[i].oq_id = next_oq_id++; 3332 } 3333 3334 /* 3335 * Assign MSI-X table entry indexes to all queues. Note that the 3336 * interrupt for the event queue is shared with the first queue group. 3337 */ 3338 ctrl_info->event_queue.int_msg_num = 0; 3339 for (i = 0; i < ctrl_info->num_queue_groups; i++) 3340 ctrl_info->queue_groups[i].int_msg_num = i; 3341 3342 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 3343 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]); 3344 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]); 3345 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]); 3346 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]); 3347 } 3348 } 3349 3350 static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) 3351 { 3352 size_t alloc_length; 3353 struct pqi_admin_queues_aligned *admin_queues_aligned; 3354 struct pqi_admin_queues *admin_queues; 3355 3356 alloc_length = sizeof(struct pqi_admin_queues_aligned) + 3357 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT; 3358 3359 ctrl_info->admin_queue_memory_base = 3360 dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 3361 alloc_length, 3362 &ctrl_info->admin_queue_memory_base_dma_handle, 3363 GFP_KERNEL); 3364 3365 if (!ctrl_info->admin_queue_memory_base) 3366 return -ENOMEM; 3367 3368 ctrl_info->admin_queue_memory_length = alloc_length; 3369 3370 admin_queues = &ctrl_info->admin_queues; 3371 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base, 3372 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT); 3373 admin_queues->iq_element_array = 3374 &admin_queues_aligned->iq_element_array; 3375 admin_queues->oq_element_array = 3376 &admin_queues_aligned->oq_element_array; 3377 admin_queues->iq_ci = &admin_queues_aligned->iq_ci; 3378 admin_queues->oq_pi = 3379 (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; 3380 3381 admin_queues->iq_element_array_bus_addr = 3382 ctrl_info->admin_queue_memory_base_dma_handle + 3383 (admin_queues->iq_element_array - 3384 ctrl_info->admin_queue_memory_base); 3385 admin_queues->oq_element_array_bus_addr = 3386 ctrl_info->admin_queue_memory_base_dma_handle + 3387 (admin_queues->oq_element_array - 3388 ctrl_info->admin_queue_memory_base); 3389 admin_queues->iq_ci_bus_addr = 3390 ctrl_info->admin_queue_memory_base_dma_handle + 3391 ((void *)admin_queues->iq_ci - 3392 ctrl_info->admin_queue_memory_base); 3393 admin_queues->oq_pi_bus_addr = 3394 ctrl_info->admin_queue_memory_base_dma_handle + 3395 ((void __iomem *)admin_queues->oq_pi - 3396 (void __iomem *)ctrl_info->admin_queue_memory_base); 3397 3398 return 0; 3399 } 3400 3401 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ 3402 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 3403 3404 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) 3405 { 3406 struct pqi_device_registers __iomem *pqi_registers; 3407 struct pqi_admin_queues *admin_queues; 3408 unsigned long timeout; 3409 u8 status; 3410 u32 reg; 3411 3412 pqi_registers = ctrl_info->pqi_registers; 3413 admin_queues = &ctrl_info->admin_queues; 3414 3415 writeq((u64)admin_queues->iq_element_array_bus_addr, 3416 &pqi_registers->admin_iq_element_array_addr); 3417 writeq((u64)admin_queues->oq_element_array_bus_addr, 3418 &pqi_registers->admin_oq_element_array_addr); 3419 writeq((u64)admin_queues->iq_ci_bus_addr, 3420 &pqi_registers->admin_iq_ci_addr); 3421 writeq((u64)admin_queues->oq_pi_bus_addr, 3422 &pqi_registers->admin_oq_pi_addr); 3423 3424 reg = PQI_ADMIN_IQ_NUM_ELEMENTS | 3425 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | 3426 (admin_queues->int_msg_num << 16); 3427 writel(reg, &pqi_registers->admin_iq_num_elements); 3428 writel(PQI_CREATE_ADMIN_QUEUE_PAIR, 3429 &pqi_registers->function_and_status_code); 3430 3431 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies; 3432 while (1) { 3433 status = readb(&pqi_registers->function_and_status_code); 3434 if (status == PQI_STATUS_IDLE) 3435 break; 3436 if (time_after(jiffies, timeout)) 3437 return -ETIMEDOUT; 3438 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS); 3439 } 3440 3441 /* 3442 * The offset registers are not initialized to the correct 3443 * offsets until *after* the create admin queue pair command 3444 * completes successfully. 3445 */ 3446 admin_queues->iq_pi = ctrl_info->iomem_base + 3447 PQI_DEVICE_REGISTERS_OFFSET + 3448 readq(&pqi_registers->admin_iq_pi_offset); 3449 admin_queues->oq_ci = ctrl_info->iomem_base + 3450 PQI_DEVICE_REGISTERS_OFFSET + 3451 readq(&pqi_registers->admin_oq_ci_offset); 3452 3453 return 0; 3454 } 3455 3456 static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info, 3457 struct pqi_general_admin_request *request) 3458 { 3459 struct pqi_admin_queues *admin_queues; 3460 void *next_element; 3461 pqi_index_t iq_pi; 3462 3463 admin_queues = &ctrl_info->admin_queues; 3464 iq_pi = admin_queues->iq_pi_copy; 3465 3466 next_element = admin_queues->iq_element_array + 3467 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH); 3468 3469 memcpy(next_element, request, sizeof(*request)); 3470 3471 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS; 3472 admin_queues->iq_pi_copy = iq_pi; 3473 3474 /* 3475 * This write notifies the controller that an IU is available to be 3476 * processed. 3477 */ 3478 writel(iq_pi, admin_queues->iq_pi); 3479 } 3480 3481 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60 3482 3483 static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, 3484 struct pqi_general_admin_response *response) 3485 { 3486 struct pqi_admin_queues *admin_queues; 3487 pqi_index_t oq_pi; 3488 pqi_index_t oq_ci; 3489 unsigned long timeout; 3490 3491 admin_queues = &ctrl_info->admin_queues; 3492 oq_ci = admin_queues->oq_ci_copy; 3493 3494 timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; 3495 3496 while (1) { 3497 oq_pi = readl(admin_queues->oq_pi); 3498 if (oq_pi != oq_ci) 3499 break; 3500 if (time_after(jiffies, timeout)) { 3501 dev_err(&ctrl_info->pci_dev->dev, 3502 "timed out waiting for admin response\n"); 3503 return -ETIMEDOUT; 3504 } 3505 if (!sis_is_firmware_running(ctrl_info)) 3506 return -ENXIO; 3507 usleep_range(1000, 2000); 3508 } 3509 3510 memcpy(response, admin_queues->oq_element_array + 3511 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response)); 3512 3513 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS; 3514 admin_queues->oq_ci_copy = oq_ci; 3515 writel(oq_ci, admin_queues->oq_ci); 3516 3517 return 0; 3518 } 3519 3520 static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, 3521 struct pqi_queue_group *queue_group, enum pqi_io_path path, 3522 struct pqi_io_request *io_request) 3523 { 3524 struct pqi_io_request *next; 3525 void *next_element; 3526 pqi_index_t iq_pi; 3527 pqi_index_t iq_ci; 3528 size_t iu_length; 3529 unsigned long flags; 3530 unsigned int num_elements_needed; 3531 unsigned int num_elements_to_end_of_queue; 3532 size_t copy_count; 3533 struct pqi_iu_header *request; 3534 3535 spin_lock_irqsave(&queue_group->submit_lock[path], flags); 3536 3537 if (io_request) { 3538 io_request->queue_group = queue_group; 3539 list_add_tail(&io_request->request_list_entry, 3540 &queue_group->request_list[path]); 3541 } 3542 3543 iq_pi = queue_group->iq_pi_copy[path]; 3544 3545 list_for_each_entry_safe(io_request, next, 3546 &queue_group->request_list[path], request_list_entry) { 3547 3548 request = io_request->iu; 3549 3550 iu_length = get_unaligned_le16(&request->iu_length) + 3551 PQI_REQUEST_HEADER_LENGTH; 3552 num_elements_needed = 3553 DIV_ROUND_UP(iu_length, 3554 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3555 3556 iq_ci = readl(queue_group->iq_ci[path]); 3557 3558 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, 3559 ctrl_info->num_elements_per_iq)) 3560 break; 3561 3562 put_unaligned_le16(queue_group->oq_id, 3563 &request->response_queue_id); 3564 3565 next_element = queue_group->iq_element_array[path] + 3566 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3567 3568 num_elements_to_end_of_queue = 3569 ctrl_info->num_elements_per_iq - iq_pi; 3570 3571 if (num_elements_needed <= num_elements_to_end_of_queue) { 3572 memcpy(next_element, request, iu_length); 3573 } else { 3574 copy_count = num_elements_to_end_of_queue * 3575 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 3576 memcpy(next_element, request, copy_count); 3577 memcpy(queue_group->iq_element_array[path], 3578 (u8 *)request + copy_count, 3579 iu_length - copy_count); 3580 } 3581 3582 iq_pi = (iq_pi + num_elements_needed) % 3583 ctrl_info->num_elements_per_iq; 3584 3585 list_del(&io_request->request_list_entry); 3586 } 3587 3588 if (iq_pi != queue_group->iq_pi_copy[path]) { 3589 queue_group->iq_pi_copy[path] = iq_pi; 3590 /* 3591 * This write notifies the controller that one or more IUs are 3592 * available to be processed. 3593 */ 3594 writel(iq_pi, queue_group->iq_pi[path]); 3595 } 3596 3597 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags); 3598 } 3599 3600 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10 3601 3602 static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, 3603 struct completion *wait) 3604 { 3605 int rc; 3606 3607 while (1) { 3608 if (wait_for_completion_io_timeout(wait, 3609 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { 3610 rc = 0; 3611 break; 3612 } 3613 3614 pqi_check_ctrl_health(ctrl_info); 3615 if (pqi_ctrl_offline(ctrl_info)) { 3616 rc = -ENXIO; 3617 break; 3618 } 3619 } 3620 3621 return rc; 3622 } 3623 3624 static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, 3625 void *context) 3626 { 3627 struct completion *waiting = context; 3628 3629 complete(waiting); 3630 } 3631 3632 static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info 3633 *error_info) 3634 { 3635 int rc = -EIO; 3636 3637 switch (error_info->data_out_result) { 3638 case PQI_DATA_IN_OUT_GOOD: 3639 if (error_info->status == SAM_STAT_GOOD) 3640 rc = 0; 3641 break; 3642 case PQI_DATA_IN_OUT_UNDERFLOW: 3643 if (error_info->status == SAM_STAT_GOOD || 3644 error_info->status == SAM_STAT_CHECK_CONDITION) 3645 rc = 0; 3646 break; 3647 case PQI_DATA_IN_OUT_ABORTED: 3648 rc = PQI_CMD_STATUS_ABORTED; 3649 break; 3650 } 3651 3652 return rc; 3653 } 3654 3655 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, 3656 struct pqi_iu_header *request, unsigned int flags, 3657 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) 3658 { 3659 int rc = 0; 3660 struct pqi_io_request *io_request; 3661 unsigned long start_jiffies; 3662 unsigned long msecs_blocked; 3663 size_t iu_length; 3664 DECLARE_COMPLETION_ONSTACK(wait); 3665 3666 /* 3667 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value 3668 * are mutually exclusive. 3669 */ 3670 3671 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) { 3672 if (down_interruptible(&ctrl_info->sync_request_sem)) 3673 return -ERESTARTSYS; 3674 } else { 3675 if (timeout_msecs == NO_TIMEOUT) { 3676 down(&ctrl_info->sync_request_sem); 3677 } else { 3678 start_jiffies = jiffies; 3679 if (down_timeout(&ctrl_info->sync_request_sem, 3680 msecs_to_jiffies(timeout_msecs))) 3681 return -ETIMEDOUT; 3682 msecs_blocked = 3683 jiffies_to_msecs(jiffies - start_jiffies); 3684 if (msecs_blocked >= timeout_msecs) 3685 return -ETIMEDOUT; 3686 timeout_msecs -= msecs_blocked; 3687 } 3688 } 3689 3690 pqi_ctrl_busy(ctrl_info); 3691 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); 3692 if (timeout_msecs == 0) { 3693 pqi_ctrl_unbusy(ctrl_info); 3694 rc = -ETIMEDOUT; 3695 goto out; 3696 } 3697 3698 if (pqi_ctrl_offline(ctrl_info)) { 3699 pqi_ctrl_unbusy(ctrl_info); 3700 rc = -ENXIO; 3701 goto out; 3702 } 3703 3704 io_request = pqi_alloc_io_request(ctrl_info); 3705 3706 put_unaligned_le16(io_request->index, 3707 &(((struct pqi_raid_path_request *)request)->request_id)); 3708 3709 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) 3710 ((struct pqi_raid_path_request *)request)->error_index = 3711 ((struct pqi_raid_path_request *)request)->request_id; 3712 3713 iu_length = get_unaligned_le16(&request->iu_length) + 3714 PQI_REQUEST_HEADER_LENGTH; 3715 memcpy(io_request->iu, request, iu_length); 3716 3717 io_request->io_complete_callback = pqi_raid_synchronous_complete; 3718 io_request->context = &wait; 3719 3720 pqi_start_io(ctrl_info, 3721 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 3722 io_request); 3723 3724 pqi_ctrl_unbusy(ctrl_info); 3725 3726 if (timeout_msecs == NO_TIMEOUT) { 3727 pqi_wait_for_completion_io(ctrl_info, &wait); 3728 } else { 3729 if (!wait_for_completion_io_timeout(&wait, 3730 msecs_to_jiffies(timeout_msecs))) { 3731 dev_warn(&ctrl_info->pci_dev->dev, 3732 "command timed out\n"); 3733 rc = -ETIMEDOUT; 3734 } 3735 } 3736 3737 if (error_info) { 3738 if (io_request->error_info) 3739 memcpy(error_info, io_request->error_info, 3740 sizeof(*error_info)); 3741 else 3742 memset(error_info, 0, sizeof(*error_info)); 3743 } else if (rc == 0 && io_request->error_info) { 3744 rc = pqi_process_raid_io_error_synchronous( 3745 io_request->error_info); 3746 } 3747 3748 pqi_free_io_request(io_request); 3749 3750 out: 3751 up(&ctrl_info->sync_request_sem); 3752 3753 return rc; 3754 } 3755 3756 static int pqi_validate_admin_response( 3757 struct pqi_general_admin_response *response, u8 expected_function_code) 3758 { 3759 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN) 3760 return -EINVAL; 3761 3762 if (get_unaligned_le16(&response->header.iu_length) != 3763 PQI_GENERAL_ADMIN_IU_LENGTH) 3764 return -EINVAL; 3765 3766 if (response->function_code != expected_function_code) 3767 return -EINVAL; 3768 3769 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) 3770 return -EINVAL; 3771 3772 return 0; 3773 } 3774 3775 static int pqi_submit_admin_request_synchronous( 3776 struct pqi_ctrl_info *ctrl_info, 3777 struct pqi_general_admin_request *request, 3778 struct pqi_general_admin_response *response) 3779 { 3780 int rc; 3781 3782 pqi_submit_admin_request(ctrl_info, request); 3783 3784 rc = pqi_poll_for_admin_response(ctrl_info, response); 3785 3786 if (rc == 0) 3787 rc = pqi_validate_admin_response(response, 3788 request->function_code); 3789 3790 return rc; 3791 } 3792 3793 static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info) 3794 { 3795 int rc; 3796 struct pqi_general_admin_request request; 3797 struct pqi_general_admin_response response; 3798 struct pqi_device_capability *capability; 3799 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor; 3800 3801 capability = kmalloc(sizeof(*capability), GFP_KERNEL); 3802 if (!capability) 3803 return -ENOMEM; 3804 3805 memset(&request, 0, sizeof(request)); 3806 3807 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3808 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3809 &request.header.iu_length); 3810 request.function_code = 3811 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY; 3812 put_unaligned_le32(sizeof(*capability), 3813 &request.data.report_device_capability.buffer_length); 3814 3815 rc = pqi_map_single(ctrl_info->pci_dev, 3816 &request.data.report_device_capability.sg_descriptor, 3817 capability, sizeof(*capability), 3818 DMA_FROM_DEVICE); 3819 if (rc) 3820 goto out; 3821 3822 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3823 &response); 3824 3825 pqi_pci_unmap(ctrl_info->pci_dev, 3826 &request.data.report_device_capability.sg_descriptor, 1, 3827 DMA_FROM_DEVICE); 3828 3829 if (rc) 3830 goto out; 3831 3832 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) { 3833 rc = -EIO; 3834 goto out; 3835 } 3836 3837 ctrl_info->max_inbound_queues = 3838 get_unaligned_le16(&capability->max_inbound_queues); 3839 ctrl_info->max_elements_per_iq = 3840 get_unaligned_le16(&capability->max_elements_per_iq); 3841 ctrl_info->max_iq_element_length = 3842 get_unaligned_le16(&capability->max_iq_element_length) 3843 * 16; 3844 ctrl_info->max_outbound_queues = 3845 get_unaligned_le16(&capability->max_outbound_queues); 3846 ctrl_info->max_elements_per_oq = 3847 get_unaligned_le16(&capability->max_elements_per_oq); 3848 ctrl_info->max_oq_element_length = 3849 get_unaligned_le16(&capability->max_oq_element_length) 3850 * 16; 3851 3852 sop_iu_layer_descriptor = 3853 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP]; 3854 3855 ctrl_info->max_inbound_iu_length_per_firmware = 3856 get_unaligned_le16( 3857 &sop_iu_layer_descriptor->max_inbound_iu_length); 3858 ctrl_info->inbound_spanning_supported = 3859 sop_iu_layer_descriptor->inbound_spanning_supported; 3860 ctrl_info->outbound_spanning_supported = 3861 sop_iu_layer_descriptor->outbound_spanning_supported; 3862 3863 out: 3864 kfree(capability); 3865 3866 return rc; 3867 } 3868 3869 static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info) 3870 { 3871 if (ctrl_info->max_iq_element_length < 3872 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3873 dev_err(&ctrl_info->pci_dev->dev, 3874 "max. inbound queue element length of %d is less than the required length of %d\n", 3875 ctrl_info->max_iq_element_length, 3876 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3877 return -EINVAL; 3878 } 3879 3880 if (ctrl_info->max_oq_element_length < 3881 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) { 3882 dev_err(&ctrl_info->pci_dev->dev, 3883 "max. outbound queue element length of %d is less than the required length of %d\n", 3884 ctrl_info->max_oq_element_length, 3885 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH); 3886 return -EINVAL; 3887 } 3888 3889 if (ctrl_info->max_inbound_iu_length_per_firmware < 3890 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) { 3891 dev_err(&ctrl_info->pci_dev->dev, 3892 "max. inbound IU length of %u is less than the min. required length of %d\n", 3893 ctrl_info->max_inbound_iu_length_per_firmware, 3894 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 3895 return -EINVAL; 3896 } 3897 3898 if (!ctrl_info->inbound_spanning_supported) { 3899 dev_err(&ctrl_info->pci_dev->dev, 3900 "the controller does not support inbound spanning\n"); 3901 return -EINVAL; 3902 } 3903 3904 if (ctrl_info->outbound_spanning_supported) { 3905 dev_err(&ctrl_info->pci_dev->dev, 3906 "the controller supports outbound spanning but this driver does not\n"); 3907 return -EINVAL; 3908 } 3909 3910 return 0; 3911 } 3912 3913 static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info) 3914 { 3915 int rc; 3916 struct pqi_event_queue *event_queue; 3917 struct pqi_general_admin_request request; 3918 struct pqi_general_admin_response response; 3919 3920 event_queue = &ctrl_info->event_queue; 3921 3922 /* 3923 * Create OQ (Outbound Queue - device to host queue) to dedicate 3924 * to events. 3925 */ 3926 memset(&request, 0, sizeof(request)); 3927 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3928 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3929 &request.header.iu_length); 3930 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 3931 put_unaligned_le16(event_queue->oq_id, 3932 &request.data.create_operational_oq.queue_id); 3933 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr, 3934 &request.data.create_operational_oq.element_array_addr); 3935 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr, 3936 &request.data.create_operational_oq.pi_addr); 3937 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS, 3938 &request.data.create_operational_oq.num_elements); 3939 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16, 3940 &request.data.create_operational_oq.element_length); 3941 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 3942 put_unaligned_le16(event_queue->int_msg_num, 3943 &request.data.create_operational_oq.int_msg_num); 3944 3945 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3946 &response); 3947 if (rc) 3948 return rc; 3949 3950 event_queue->oq_ci = ctrl_info->iomem_base + 3951 PQI_DEVICE_REGISTERS_OFFSET + 3952 get_unaligned_le64( 3953 &response.data.create_operational_oq.oq_ci_offset); 3954 3955 return 0; 3956 } 3957 3958 static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info, 3959 unsigned int group_number) 3960 { 3961 int rc; 3962 struct pqi_queue_group *queue_group; 3963 struct pqi_general_admin_request request; 3964 struct pqi_general_admin_response response; 3965 3966 queue_group = &ctrl_info->queue_groups[group_number]; 3967 3968 /* 3969 * Create IQ (Inbound Queue - host to device queue) for 3970 * RAID path. 3971 */ 3972 memset(&request, 0, sizeof(request)); 3973 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 3974 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 3975 &request.header.iu_length); 3976 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 3977 put_unaligned_le16(queue_group->iq_id[RAID_PATH], 3978 &request.data.create_operational_iq.queue_id); 3979 put_unaligned_le64( 3980 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH], 3981 &request.data.create_operational_iq.element_array_addr); 3982 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH], 3983 &request.data.create_operational_iq.ci_addr); 3984 put_unaligned_le16(ctrl_info->num_elements_per_iq, 3985 &request.data.create_operational_iq.num_elements); 3986 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 3987 &request.data.create_operational_iq.element_length); 3988 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 3989 3990 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 3991 &response); 3992 if (rc) { 3993 dev_err(&ctrl_info->pci_dev->dev, 3994 "error creating inbound RAID queue\n"); 3995 return rc; 3996 } 3997 3998 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base + 3999 PQI_DEVICE_REGISTERS_OFFSET + 4000 get_unaligned_le64( 4001 &response.data.create_operational_iq.iq_pi_offset); 4002 4003 /* 4004 * Create IQ (Inbound Queue - host to device queue) for 4005 * Advanced I/O (AIO) path. 4006 */ 4007 memset(&request, 0, sizeof(request)); 4008 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4009 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4010 &request.header.iu_length); 4011 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ; 4012 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4013 &request.data.create_operational_iq.queue_id); 4014 put_unaligned_le64((u64)queue_group-> 4015 iq_element_array_bus_addr[AIO_PATH], 4016 &request.data.create_operational_iq.element_array_addr); 4017 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH], 4018 &request.data.create_operational_iq.ci_addr); 4019 put_unaligned_le16(ctrl_info->num_elements_per_iq, 4020 &request.data.create_operational_iq.num_elements); 4021 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16, 4022 &request.data.create_operational_iq.element_length); 4023 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP; 4024 4025 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4026 &response); 4027 if (rc) { 4028 dev_err(&ctrl_info->pci_dev->dev, 4029 "error creating inbound AIO queue\n"); 4030 return rc; 4031 } 4032 4033 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base + 4034 PQI_DEVICE_REGISTERS_OFFSET + 4035 get_unaligned_le64( 4036 &response.data.create_operational_iq.iq_pi_offset); 4037 4038 /* 4039 * Designate the 2nd IQ as the AIO path. By default, all IQs are 4040 * assumed to be for RAID path I/O unless we change the queue's 4041 * property. 4042 */ 4043 memset(&request, 0, sizeof(request)); 4044 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4045 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4046 &request.header.iu_length); 4047 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY; 4048 put_unaligned_le16(queue_group->iq_id[AIO_PATH], 4049 &request.data.change_operational_iq_properties.queue_id); 4050 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE, 4051 &request.data.change_operational_iq_properties.vendor_specific); 4052 4053 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4054 &response); 4055 if (rc) { 4056 dev_err(&ctrl_info->pci_dev->dev, 4057 "error changing queue property\n"); 4058 return rc; 4059 } 4060 4061 /* 4062 * Create OQ (Outbound Queue - device to host queue). 4063 */ 4064 memset(&request, 0, sizeof(request)); 4065 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN; 4066 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH, 4067 &request.header.iu_length); 4068 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ; 4069 put_unaligned_le16(queue_group->oq_id, 4070 &request.data.create_operational_oq.queue_id); 4071 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr, 4072 &request.data.create_operational_oq.element_array_addr); 4073 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr, 4074 &request.data.create_operational_oq.pi_addr); 4075 put_unaligned_le16(ctrl_info->num_elements_per_oq, 4076 &request.data.create_operational_oq.num_elements); 4077 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16, 4078 &request.data.create_operational_oq.element_length); 4079 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP; 4080 put_unaligned_le16(queue_group->int_msg_num, 4081 &request.data.create_operational_oq.int_msg_num); 4082 4083 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request, 4084 &response); 4085 if (rc) { 4086 dev_err(&ctrl_info->pci_dev->dev, 4087 "error creating outbound queue\n"); 4088 return rc; 4089 } 4090 4091 queue_group->oq_ci = ctrl_info->iomem_base + 4092 PQI_DEVICE_REGISTERS_OFFSET + 4093 get_unaligned_le64( 4094 &response.data.create_operational_oq.oq_ci_offset); 4095 4096 return 0; 4097 } 4098 4099 static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info) 4100 { 4101 int rc; 4102 unsigned int i; 4103 4104 rc = pqi_create_event_queue(ctrl_info); 4105 if (rc) { 4106 dev_err(&ctrl_info->pci_dev->dev, 4107 "error creating event queue\n"); 4108 return rc; 4109 } 4110 4111 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 4112 rc = pqi_create_queue_group(ctrl_info, i); 4113 if (rc) { 4114 dev_err(&ctrl_info->pci_dev->dev, 4115 "error creating queue group number %u/%u\n", 4116 i, ctrl_info->num_queue_groups); 4117 return rc; 4118 } 4119 } 4120 4121 return 0; 4122 } 4123 4124 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \ 4125 (offsetof(struct pqi_event_config, descriptors) + \ 4126 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor))) 4127 4128 static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info, 4129 bool enable_events) 4130 { 4131 int rc; 4132 unsigned int i; 4133 struct pqi_event_config *event_config; 4134 struct pqi_event_descriptor *event_descriptor; 4135 struct pqi_general_management_request request; 4136 4137 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4138 GFP_KERNEL); 4139 if (!event_config) 4140 return -ENOMEM; 4141 4142 memset(&request, 0, sizeof(request)); 4143 4144 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG; 4145 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4146 data.report_event_configuration.sg_descriptors[1]) - 4147 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4148 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4149 &request.data.report_event_configuration.buffer_length); 4150 4151 rc = pqi_map_single(ctrl_info->pci_dev, 4152 request.data.report_event_configuration.sg_descriptors, 4153 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4154 DMA_FROM_DEVICE); 4155 if (rc) 4156 goto out; 4157 4158 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 4159 0, NULL, NO_TIMEOUT); 4160 4161 pqi_pci_unmap(ctrl_info->pci_dev, 4162 request.data.report_event_configuration.sg_descriptors, 1, 4163 DMA_FROM_DEVICE); 4164 4165 if (rc) 4166 goto out; 4167 4168 for (i = 0; i < event_config->num_event_descriptors; i++) { 4169 event_descriptor = &event_config->descriptors[i]; 4170 if (enable_events && 4171 pqi_is_supported_event(event_descriptor->event_type)) 4172 put_unaligned_le16(ctrl_info->event_queue.oq_id, 4173 &event_descriptor->oq_id); 4174 else 4175 put_unaligned_le16(0, &event_descriptor->oq_id); 4176 } 4177 4178 memset(&request, 0, sizeof(request)); 4179 4180 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG; 4181 put_unaligned_le16(offsetof(struct pqi_general_management_request, 4182 data.report_event_configuration.sg_descriptors[1]) - 4183 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length); 4184 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4185 &request.data.report_event_configuration.buffer_length); 4186 4187 rc = pqi_map_single(ctrl_info->pci_dev, 4188 request.data.report_event_configuration.sg_descriptors, 4189 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH, 4190 DMA_TO_DEVICE); 4191 if (rc) 4192 goto out; 4193 4194 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, 4195 NULL, NO_TIMEOUT); 4196 4197 pqi_pci_unmap(ctrl_info->pci_dev, 4198 request.data.report_event_configuration.sg_descriptors, 1, 4199 DMA_TO_DEVICE); 4200 4201 out: 4202 kfree(event_config); 4203 4204 return rc; 4205 } 4206 4207 static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info) 4208 { 4209 return pqi_configure_events(ctrl_info, true); 4210 } 4211 4212 static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info) 4213 { 4214 return pqi_configure_events(ctrl_info, false); 4215 } 4216 4217 static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) 4218 { 4219 unsigned int i; 4220 struct device *dev; 4221 size_t sg_chain_buffer_length; 4222 struct pqi_io_request *io_request; 4223 4224 if (!ctrl_info->io_request_pool) 4225 return; 4226 4227 dev = &ctrl_info->pci_dev->dev; 4228 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4229 io_request = ctrl_info->io_request_pool; 4230 4231 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4232 kfree(io_request->iu); 4233 if (!io_request->sg_chain_buffer) 4234 break; 4235 dma_free_coherent(dev, sg_chain_buffer_length, 4236 io_request->sg_chain_buffer, 4237 io_request->sg_chain_buffer_dma_handle); 4238 io_request++; 4239 } 4240 4241 kfree(ctrl_info->io_request_pool); 4242 ctrl_info->io_request_pool = NULL; 4243 } 4244 4245 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) 4246 { 4247 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev, 4248 ctrl_info->error_buffer_length, 4249 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL); 4250 4251 if (!ctrl_info->error_buffer) 4252 return -ENOMEM; 4253 4254 return 0; 4255 } 4256 4257 static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info) 4258 { 4259 unsigned int i; 4260 void *sg_chain_buffer; 4261 size_t sg_chain_buffer_length; 4262 dma_addr_t sg_chain_buffer_dma_handle; 4263 struct device *dev; 4264 struct pqi_io_request *io_request; 4265 4266 ctrl_info->io_request_pool = 4267 kcalloc(ctrl_info->max_io_slots, 4268 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL); 4269 4270 if (!ctrl_info->io_request_pool) { 4271 dev_err(&ctrl_info->pci_dev->dev, 4272 "failed to allocate I/O request pool\n"); 4273 goto error; 4274 } 4275 4276 dev = &ctrl_info->pci_dev->dev; 4277 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length; 4278 io_request = ctrl_info->io_request_pool; 4279 4280 for (i = 0; i < ctrl_info->max_io_slots; i++) { 4281 io_request->iu = 4282 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL); 4283 4284 if (!io_request->iu) { 4285 dev_err(&ctrl_info->pci_dev->dev, 4286 "failed to allocate IU buffers\n"); 4287 goto error; 4288 } 4289 4290 sg_chain_buffer = dma_alloc_coherent(dev, 4291 sg_chain_buffer_length, &sg_chain_buffer_dma_handle, 4292 GFP_KERNEL); 4293 4294 if (!sg_chain_buffer) { 4295 dev_err(&ctrl_info->pci_dev->dev, 4296 "failed to allocate PQI scatter-gather chain buffers\n"); 4297 goto error; 4298 } 4299 4300 io_request->index = i; 4301 io_request->sg_chain_buffer = sg_chain_buffer; 4302 io_request->sg_chain_buffer_dma_handle = 4303 sg_chain_buffer_dma_handle; 4304 io_request++; 4305 } 4306 4307 return 0; 4308 4309 error: 4310 pqi_free_all_io_requests(ctrl_info); 4311 4312 return -ENOMEM; 4313 } 4314 4315 /* 4316 * Calculate required resources that are sized based on max. outstanding 4317 * requests and max. transfer size. 4318 */ 4319 4320 static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info) 4321 { 4322 u32 max_transfer_size; 4323 u32 max_sg_entries; 4324 4325 ctrl_info->scsi_ml_can_queue = 4326 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS; 4327 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests; 4328 4329 ctrl_info->error_buffer_length = 4330 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH; 4331 4332 if (reset_devices) 4333 max_transfer_size = min(ctrl_info->max_transfer_size, 4334 PQI_MAX_TRANSFER_SIZE_KDUMP); 4335 else 4336 max_transfer_size = min(ctrl_info->max_transfer_size, 4337 PQI_MAX_TRANSFER_SIZE); 4338 4339 max_sg_entries = max_transfer_size / PAGE_SIZE; 4340 4341 /* +1 to cover when the buffer is not page-aligned. */ 4342 max_sg_entries++; 4343 4344 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries); 4345 4346 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE; 4347 4348 ctrl_info->sg_chain_buffer_length = 4349 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) + 4350 PQI_EXTRA_SGL_MEMORY; 4351 ctrl_info->sg_tablesize = max_sg_entries; 4352 ctrl_info->max_sectors = max_transfer_size / 512; 4353 } 4354 4355 static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) 4356 { 4357 int num_queue_groups; 4358 u16 num_elements_per_iq; 4359 u16 num_elements_per_oq; 4360 4361 if (reset_devices) { 4362 num_queue_groups = 1; 4363 } else { 4364 int num_cpus; 4365 int max_queue_groups; 4366 4367 max_queue_groups = min(ctrl_info->max_inbound_queues / 2, 4368 ctrl_info->max_outbound_queues - 1); 4369 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS); 4370 4371 num_cpus = num_online_cpus(); 4372 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors); 4373 num_queue_groups = min(num_queue_groups, max_queue_groups); 4374 } 4375 4376 ctrl_info->num_queue_groups = num_queue_groups; 4377 ctrl_info->max_hw_queue_index = num_queue_groups - 1; 4378 4379 /* 4380 * Make sure that the max. inbound IU length is an even multiple 4381 * of our inbound element length. 4382 */ 4383 ctrl_info->max_inbound_iu_length = 4384 (ctrl_info->max_inbound_iu_length_per_firmware / 4385 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) * 4386 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH; 4387 4388 num_elements_per_iq = 4389 (ctrl_info->max_inbound_iu_length / 4390 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 4391 4392 /* Add one because one element in each queue is unusable. */ 4393 num_elements_per_iq++; 4394 4395 num_elements_per_iq = min(num_elements_per_iq, 4396 ctrl_info->max_elements_per_iq); 4397 4398 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1; 4399 num_elements_per_oq = min(num_elements_per_oq, 4400 ctrl_info->max_elements_per_oq); 4401 4402 ctrl_info->num_elements_per_iq = num_elements_per_iq; 4403 ctrl_info->num_elements_per_oq = num_elements_per_oq; 4404 4405 ctrl_info->max_sg_per_iu = 4406 ((ctrl_info->max_inbound_iu_length - 4407 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) / 4408 sizeof(struct pqi_sg_descriptor)) + 4409 PQI_MAX_EMBEDDED_SG_DESCRIPTORS; 4410 } 4411 4412 static inline void pqi_set_sg_descriptor( 4413 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg) 4414 { 4415 u64 address = (u64)sg_dma_address(sg); 4416 unsigned int length = sg_dma_len(sg); 4417 4418 put_unaligned_le64(address, &sg_descriptor->address); 4419 put_unaligned_le32(length, &sg_descriptor->length); 4420 put_unaligned_le32(0, &sg_descriptor->flags); 4421 } 4422 4423 static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info, 4424 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd, 4425 struct pqi_io_request *io_request) 4426 { 4427 int i; 4428 u16 iu_length; 4429 int sg_count; 4430 bool chained; 4431 unsigned int num_sg_in_iu; 4432 unsigned int max_sg_per_iu; 4433 struct scatterlist *sg; 4434 struct pqi_sg_descriptor *sg_descriptor; 4435 4436 sg_count = scsi_dma_map(scmd); 4437 if (sg_count < 0) 4438 return sg_count; 4439 4440 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 4441 PQI_REQUEST_HEADER_LENGTH; 4442 4443 if (sg_count == 0) 4444 goto out; 4445 4446 sg = scsi_sglist(scmd); 4447 sg_descriptor = request->sg_descriptors; 4448 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4449 chained = false; 4450 num_sg_in_iu = 0; 4451 i = 0; 4452 4453 while (1) { 4454 pqi_set_sg_descriptor(sg_descriptor, sg); 4455 if (!chained) 4456 num_sg_in_iu++; 4457 i++; 4458 if (i == sg_count) 4459 break; 4460 sg_descriptor++; 4461 if (i == max_sg_per_iu) { 4462 put_unaligned_le64( 4463 (u64)io_request->sg_chain_buffer_dma_handle, 4464 &sg_descriptor->address); 4465 put_unaligned_le32((sg_count - num_sg_in_iu) 4466 * sizeof(*sg_descriptor), 4467 &sg_descriptor->length); 4468 put_unaligned_le32(CISS_SG_CHAIN, 4469 &sg_descriptor->flags); 4470 chained = true; 4471 num_sg_in_iu++; 4472 sg_descriptor = io_request->sg_chain_buffer; 4473 } 4474 sg = sg_next(sg); 4475 } 4476 4477 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4478 request->partial = chained; 4479 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4480 4481 out: 4482 put_unaligned_le16(iu_length, &request->header.iu_length); 4483 4484 return 0; 4485 } 4486 4487 static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info, 4488 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd, 4489 struct pqi_io_request *io_request) 4490 { 4491 int i; 4492 u16 iu_length; 4493 int sg_count; 4494 bool chained; 4495 unsigned int num_sg_in_iu; 4496 unsigned int max_sg_per_iu; 4497 struct scatterlist *sg; 4498 struct pqi_sg_descriptor *sg_descriptor; 4499 4500 sg_count = scsi_dma_map(scmd); 4501 if (sg_count < 0) 4502 return sg_count; 4503 4504 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) - 4505 PQI_REQUEST_HEADER_LENGTH; 4506 num_sg_in_iu = 0; 4507 4508 if (sg_count == 0) 4509 goto out; 4510 4511 sg = scsi_sglist(scmd); 4512 sg_descriptor = request->sg_descriptors; 4513 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1; 4514 chained = false; 4515 i = 0; 4516 4517 while (1) { 4518 pqi_set_sg_descriptor(sg_descriptor, sg); 4519 if (!chained) 4520 num_sg_in_iu++; 4521 i++; 4522 if (i == sg_count) 4523 break; 4524 sg_descriptor++; 4525 if (i == max_sg_per_iu) { 4526 put_unaligned_le64( 4527 (u64)io_request->sg_chain_buffer_dma_handle, 4528 &sg_descriptor->address); 4529 put_unaligned_le32((sg_count - num_sg_in_iu) 4530 * sizeof(*sg_descriptor), 4531 &sg_descriptor->length); 4532 put_unaligned_le32(CISS_SG_CHAIN, 4533 &sg_descriptor->flags); 4534 chained = true; 4535 num_sg_in_iu++; 4536 sg_descriptor = io_request->sg_chain_buffer; 4537 } 4538 sg = sg_next(sg); 4539 } 4540 4541 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags); 4542 request->partial = chained; 4543 iu_length += num_sg_in_iu * sizeof(*sg_descriptor); 4544 4545 out: 4546 put_unaligned_le16(iu_length, &request->header.iu_length); 4547 request->num_sg_descriptors = num_sg_in_iu; 4548 4549 return 0; 4550 } 4551 4552 static void pqi_raid_io_complete(struct pqi_io_request *io_request, 4553 void *context) 4554 { 4555 struct scsi_cmnd *scmd; 4556 4557 scmd = io_request->scmd; 4558 pqi_free_io_request(io_request); 4559 scsi_dma_unmap(scmd); 4560 pqi_scsi_done(scmd); 4561 } 4562 4563 static int pqi_raid_submit_scsi_cmd_with_io_request( 4564 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, 4565 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4566 struct pqi_queue_group *queue_group) 4567 { 4568 int rc; 4569 size_t cdb_length; 4570 struct pqi_raid_path_request *request; 4571 4572 io_request->io_complete_callback = pqi_raid_io_complete; 4573 io_request->scmd = scmd; 4574 4575 request = io_request->iu; 4576 memset(request, 0, 4577 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4578 4579 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 4580 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4581 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4582 put_unaligned_le16(io_request->index, &request->request_id); 4583 request->error_index = request->request_id; 4584 memcpy(request->lun_number, device->scsi3addr, 4585 sizeof(request->lun_number)); 4586 4587 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb)); 4588 memcpy(request->cdb, scmd->cmnd, cdb_length); 4589 4590 switch (cdb_length) { 4591 case 6: 4592 case 10: 4593 case 12: 4594 case 16: 4595 /* No bytes in the Additional CDB bytes field */ 4596 request->additional_cdb_bytes_usage = 4597 SOP_ADDITIONAL_CDB_BYTES_0; 4598 break; 4599 case 20: 4600 /* 4 bytes in the Additional cdb field */ 4601 request->additional_cdb_bytes_usage = 4602 SOP_ADDITIONAL_CDB_BYTES_4; 4603 break; 4604 case 24: 4605 /* 8 bytes in the Additional cdb field */ 4606 request->additional_cdb_bytes_usage = 4607 SOP_ADDITIONAL_CDB_BYTES_8; 4608 break; 4609 case 28: 4610 /* 12 bytes in the Additional cdb field */ 4611 request->additional_cdb_bytes_usage = 4612 SOP_ADDITIONAL_CDB_BYTES_12; 4613 break; 4614 case 32: 4615 default: 4616 /* 16 bytes in the Additional cdb field */ 4617 request->additional_cdb_bytes_usage = 4618 SOP_ADDITIONAL_CDB_BYTES_16; 4619 break; 4620 } 4621 4622 switch (scmd->sc_data_direction) { 4623 case DMA_TO_DEVICE: 4624 request->data_direction = SOP_READ_FLAG; 4625 break; 4626 case DMA_FROM_DEVICE: 4627 request->data_direction = SOP_WRITE_FLAG; 4628 break; 4629 case DMA_NONE: 4630 request->data_direction = SOP_NO_DIRECTION_FLAG; 4631 break; 4632 case DMA_BIDIRECTIONAL: 4633 request->data_direction = SOP_BIDIRECTIONAL; 4634 break; 4635 default: 4636 dev_err(&ctrl_info->pci_dev->dev, 4637 "unknown data direction: %d\n", 4638 scmd->sc_data_direction); 4639 break; 4640 } 4641 4642 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request); 4643 if (rc) { 4644 pqi_free_io_request(io_request); 4645 return SCSI_MLQUEUE_HOST_BUSY; 4646 } 4647 4648 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request); 4649 4650 return 0; 4651 } 4652 4653 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4654 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4655 struct pqi_queue_group *queue_group) 4656 { 4657 struct pqi_io_request *io_request; 4658 4659 io_request = pqi_alloc_io_request(ctrl_info); 4660 4661 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4662 device, scmd, queue_group); 4663 } 4664 4665 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info *ctrl_info) 4666 { 4667 if (!pqi_ctrl_blocked(ctrl_info)) 4668 schedule_work(&ctrl_info->raid_bypass_retry_work); 4669 } 4670 4671 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) 4672 { 4673 struct scsi_cmnd *scmd; 4674 struct pqi_scsi_dev *device; 4675 struct pqi_ctrl_info *ctrl_info; 4676 4677 if (!io_request->raid_bypass) 4678 return false; 4679 4680 scmd = io_request->scmd; 4681 if ((scmd->result & 0xff) == SAM_STAT_GOOD) 4682 return false; 4683 if (host_byte(scmd->result) == DID_NO_CONNECT) 4684 return false; 4685 4686 device = scmd->device->hostdata; 4687 if (pqi_device_offline(device)) 4688 return false; 4689 4690 ctrl_info = shost_to_hba(scmd->device->host); 4691 if (pqi_ctrl_offline(ctrl_info)) 4692 return false; 4693 4694 return true; 4695 } 4696 4697 static inline void pqi_add_to_raid_bypass_retry_list( 4698 struct pqi_ctrl_info *ctrl_info, 4699 struct pqi_io_request *io_request, bool at_head) 4700 { 4701 unsigned long flags; 4702 4703 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4704 if (at_head) 4705 list_add(&io_request->request_list_entry, 4706 &ctrl_info->raid_bypass_retry_list); 4707 else 4708 list_add_tail(&io_request->request_list_entry, 4709 &ctrl_info->raid_bypass_retry_list); 4710 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4711 } 4712 4713 static void pqi_queued_raid_bypass_complete(struct pqi_io_request *io_request, 4714 void *context) 4715 { 4716 struct scsi_cmnd *scmd; 4717 4718 scmd = io_request->scmd; 4719 pqi_free_io_request(io_request); 4720 pqi_scsi_done(scmd); 4721 } 4722 4723 static void pqi_queue_raid_bypass_retry(struct pqi_io_request *io_request) 4724 { 4725 struct scsi_cmnd *scmd; 4726 struct pqi_ctrl_info *ctrl_info; 4727 4728 io_request->io_complete_callback = pqi_queued_raid_bypass_complete; 4729 scmd = io_request->scmd; 4730 scmd->result = 0; 4731 ctrl_info = shost_to_hba(scmd->device->host); 4732 4733 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, false); 4734 pqi_schedule_bypass_retry(ctrl_info); 4735 } 4736 4737 static int pqi_retry_raid_bypass(struct pqi_io_request *io_request) 4738 { 4739 struct scsi_cmnd *scmd; 4740 struct pqi_scsi_dev *device; 4741 struct pqi_ctrl_info *ctrl_info; 4742 struct pqi_queue_group *queue_group; 4743 4744 scmd = io_request->scmd; 4745 device = scmd->device->hostdata; 4746 if (pqi_device_in_reset(device)) { 4747 pqi_free_io_request(io_request); 4748 set_host_byte(scmd, DID_RESET); 4749 pqi_scsi_done(scmd); 4750 return 0; 4751 } 4752 4753 ctrl_info = shost_to_hba(scmd->device->host); 4754 queue_group = io_request->queue_group; 4755 4756 pqi_reinit_io_request(io_request); 4757 4758 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, 4759 device, scmd, queue_group); 4760 } 4761 4762 static inline struct pqi_io_request *pqi_next_queued_raid_bypass_request( 4763 struct pqi_ctrl_info *ctrl_info) 4764 { 4765 unsigned long flags; 4766 struct pqi_io_request *io_request; 4767 4768 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4769 io_request = list_first_entry_or_null( 4770 &ctrl_info->raid_bypass_retry_list, 4771 struct pqi_io_request, request_list_entry); 4772 if (io_request) 4773 list_del(&io_request->request_list_entry); 4774 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4775 4776 return io_request; 4777 } 4778 4779 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info *ctrl_info) 4780 { 4781 int rc; 4782 struct pqi_io_request *io_request; 4783 4784 pqi_ctrl_busy(ctrl_info); 4785 4786 while (1) { 4787 if (pqi_ctrl_blocked(ctrl_info)) 4788 break; 4789 io_request = pqi_next_queued_raid_bypass_request(ctrl_info); 4790 if (!io_request) 4791 break; 4792 rc = pqi_retry_raid_bypass(io_request); 4793 if (rc) { 4794 pqi_add_to_raid_bypass_retry_list(ctrl_info, io_request, 4795 true); 4796 pqi_schedule_bypass_retry(ctrl_info); 4797 break; 4798 } 4799 } 4800 4801 pqi_ctrl_unbusy(ctrl_info); 4802 } 4803 4804 static void pqi_raid_bypass_retry_worker(struct work_struct *work) 4805 { 4806 struct pqi_ctrl_info *ctrl_info; 4807 4808 ctrl_info = container_of(work, struct pqi_ctrl_info, 4809 raid_bypass_retry_work); 4810 pqi_retry_raid_bypass_requests(ctrl_info); 4811 } 4812 4813 static void pqi_clear_all_queued_raid_bypass_retries( 4814 struct pqi_ctrl_info *ctrl_info) 4815 { 4816 unsigned long flags; 4817 4818 spin_lock_irqsave(&ctrl_info->raid_bypass_retry_list_lock, flags); 4819 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 4820 spin_unlock_irqrestore(&ctrl_info->raid_bypass_retry_list_lock, flags); 4821 } 4822 4823 static void pqi_aio_io_complete(struct pqi_io_request *io_request, 4824 void *context) 4825 { 4826 struct scsi_cmnd *scmd; 4827 4828 scmd = io_request->scmd; 4829 scsi_dma_unmap(scmd); 4830 if (io_request->status == -EAGAIN) 4831 set_host_byte(scmd, DID_IMM_RETRY); 4832 else if (pqi_raid_bypass_retry_needed(io_request)) { 4833 pqi_queue_raid_bypass_retry(io_request); 4834 return; 4835 } 4836 pqi_free_io_request(io_request); 4837 pqi_scsi_done(scmd); 4838 } 4839 4840 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, 4841 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, 4842 struct pqi_queue_group *queue_group) 4843 { 4844 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, 4845 scmd->cmnd, scmd->cmd_len, queue_group, NULL, false); 4846 } 4847 4848 static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, 4849 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, 4850 unsigned int cdb_length, struct pqi_queue_group *queue_group, 4851 struct pqi_encryption_info *encryption_info, bool raid_bypass) 4852 { 4853 int rc; 4854 struct pqi_io_request *io_request; 4855 struct pqi_aio_path_request *request; 4856 4857 io_request = pqi_alloc_io_request(ctrl_info); 4858 io_request->io_complete_callback = pqi_aio_io_complete; 4859 io_request->scmd = scmd; 4860 io_request->raid_bypass = raid_bypass; 4861 4862 request = io_request->iu; 4863 memset(request, 0, 4864 offsetof(struct pqi_raid_path_request, sg_descriptors)); 4865 4866 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO; 4867 put_unaligned_le32(aio_handle, &request->nexus_id); 4868 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); 4869 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 4870 put_unaligned_le16(io_request->index, &request->request_id); 4871 request->error_index = request->request_id; 4872 if (cdb_length > sizeof(request->cdb)) 4873 cdb_length = sizeof(request->cdb); 4874 request->cdb_length = cdb_length; 4875 memcpy(request->cdb, cdb, cdb_length); 4876 4877 switch (scmd->sc_data_direction) { 4878 case DMA_TO_DEVICE: 4879 request->data_direction = SOP_READ_FLAG; 4880 break; 4881 case DMA_FROM_DEVICE: 4882 request->data_direction = SOP_WRITE_FLAG; 4883 break; 4884 case DMA_NONE: 4885 request->data_direction = SOP_NO_DIRECTION_FLAG; 4886 break; 4887 case DMA_BIDIRECTIONAL: 4888 request->data_direction = SOP_BIDIRECTIONAL; 4889 break; 4890 default: 4891 dev_err(&ctrl_info->pci_dev->dev, 4892 "unknown data direction: %d\n", 4893 scmd->sc_data_direction); 4894 break; 4895 } 4896 4897 if (encryption_info) { 4898 request->encryption_enable = true; 4899 put_unaligned_le16(encryption_info->data_encryption_key_index, 4900 &request->data_encryption_key_index); 4901 put_unaligned_le32(encryption_info->encrypt_tweak_lower, 4902 &request->encrypt_tweak_lower); 4903 put_unaligned_le32(encryption_info->encrypt_tweak_upper, 4904 &request->encrypt_tweak_upper); 4905 } 4906 4907 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request); 4908 if (rc) { 4909 pqi_free_io_request(io_request); 4910 return SCSI_MLQUEUE_HOST_BUSY; 4911 } 4912 4913 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request); 4914 4915 return 0; 4916 } 4917 4918 static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, 4919 struct scsi_cmnd *scmd) 4920 { 4921 u16 hw_queue; 4922 4923 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); 4924 if (hw_queue > ctrl_info->max_hw_queue_index) 4925 hw_queue = 0; 4926 4927 return hw_queue; 4928 } 4929 4930 /* 4931 * This function gets called just before we hand the completed SCSI request 4932 * back to the SML. 4933 */ 4934 4935 void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) 4936 { 4937 struct pqi_scsi_dev *device; 4938 4939 device = scmd->device->hostdata; 4940 atomic_dec(&device->scsi_cmds_outstanding); 4941 } 4942 4943 static int pqi_scsi_queue_command(struct Scsi_Host *shost, 4944 struct scsi_cmnd *scmd) 4945 { 4946 int rc; 4947 struct pqi_ctrl_info *ctrl_info; 4948 struct pqi_scsi_dev *device; 4949 u16 hw_queue; 4950 struct pqi_queue_group *queue_group; 4951 bool raid_bypassed; 4952 4953 device = scmd->device->hostdata; 4954 ctrl_info = shost_to_hba(shost); 4955 4956 atomic_inc(&device->scsi_cmds_outstanding); 4957 4958 if (pqi_ctrl_offline(ctrl_info)) { 4959 set_host_byte(scmd, DID_NO_CONNECT); 4960 pqi_scsi_done(scmd); 4961 return 0; 4962 } 4963 4964 pqi_ctrl_busy(ctrl_info); 4965 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { 4966 rc = SCSI_MLQUEUE_HOST_BUSY; 4967 goto out; 4968 } 4969 4970 /* 4971 * This is necessary because the SML doesn't zero out this field during 4972 * error recovery. 4973 */ 4974 scmd->result = 0; 4975 4976 hw_queue = pqi_get_hw_queue(ctrl_info, scmd); 4977 queue_group = &ctrl_info->queue_groups[hw_queue]; 4978 4979 if (pqi_is_logical_device(device)) { 4980 raid_bypassed = false; 4981 if (device->raid_bypass_enabled && 4982 !blk_rq_is_passthrough(scmd->request)) { 4983 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, 4984 scmd, queue_group); 4985 if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) 4986 raid_bypassed = true; 4987 } 4988 if (!raid_bypassed) 4989 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 4990 queue_group); 4991 } else { 4992 if (device->aio_enabled) 4993 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, 4994 queue_group); 4995 else 4996 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, 4997 queue_group); 4998 } 4999 5000 out: 5001 pqi_ctrl_unbusy(ctrl_info); 5002 if (rc) 5003 atomic_dec(&device->scsi_cmds_outstanding); 5004 5005 return rc; 5006 } 5007 5008 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info, 5009 struct pqi_queue_group *queue_group) 5010 { 5011 unsigned int path; 5012 unsigned long flags; 5013 bool list_is_empty; 5014 5015 for (path = 0; path < 2; path++) { 5016 while (1) { 5017 spin_lock_irqsave( 5018 &queue_group->submit_lock[path], flags); 5019 list_is_empty = 5020 list_empty(&queue_group->request_list[path]); 5021 spin_unlock_irqrestore( 5022 &queue_group->submit_lock[path], flags); 5023 if (list_is_empty) 5024 break; 5025 pqi_check_ctrl_health(ctrl_info); 5026 if (pqi_ctrl_offline(ctrl_info)) 5027 return -ENXIO; 5028 usleep_range(1000, 2000); 5029 } 5030 } 5031 5032 return 0; 5033 } 5034 5035 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) 5036 { 5037 int rc; 5038 unsigned int i; 5039 unsigned int path; 5040 struct pqi_queue_group *queue_group; 5041 pqi_index_t iq_pi; 5042 pqi_index_t iq_ci; 5043 5044 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5045 queue_group = &ctrl_info->queue_groups[i]; 5046 5047 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group); 5048 if (rc) 5049 return rc; 5050 5051 for (path = 0; path < 2; path++) { 5052 iq_pi = queue_group->iq_pi_copy[path]; 5053 5054 while (1) { 5055 iq_ci = readl(queue_group->iq_ci[path]); 5056 if (iq_ci == iq_pi) 5057 break; 5058 pqi_check_ctrl_health(ctrl_info); 5059 if (pqi_ctrl_offline(ctrl_info)) 5060 return -ENXIO; 5061 usleep_range(1000, 2000); 5062 } 5063 } 5064 } 5065 5066 return 0; 5067 } 5068 5069 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, 5070 struct pqi_scsi_dev *device) 5071 { 5072 unsigned int i; 5073 unsigned int path; 5074 struct pqi_queue_group *queue_group; 5075 unsigned long flags; 5076 struct pqi_io_request *io_request; 5077 struct pqi_io_request *next; 5078 struct scsi_cmnd *scmd; 5079 struct pqi_scsi_dev *scsi_device; 5080 5081 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 5082 queue_group = &ctrl_info->queue_groups[i]; 5083 5084 for (path = 0; path < 2; path++) { 5085 spin_lock_irqsave( 5086 &queue_group->submit_lock[path], flags); 5087 5088 list_for_each_entry_safe(io_request, next, 5089 &queue_group->request_list[path], 5090 request_list_entry) { 5091 scmd = io_request->scmd; 5092 if (!scmd) 5093 continue; 5094 5095 scsi_device = scmd->device->hostdata; 5096 if (scsi_device != device) 5097 continue; 5098 5099 list_del(&io_request->request_list_entry); 5100 set_host_byte(scmd, DID_RESET); 5101 pqi_scsi_done(scmd); 5102 } 5103 5104 spin_unlock_irqrestore( 5105 &queue_group->submit_lock[path], flags); 5106 } 5107 } 5108 } 5109 5110 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, 5111 struct pqi_scsi_dev *device) 5112 { 5113 while (atomic_read(&device->scsi_cmds_outstanding)) { 5114 pqi_check_ctrl_health(ctrl_info); 5115 if (pqi_ctrl_offline(ctrl_info)) 5116 return -ENXIO; 5117 usleep_range(1000, 2000); 5118 } 5119 5120 return 0; 5121 } 5122 5123 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) 5124 { 5125 bool io_pending; 5126 unsigned long flags; 5127 struct pqi_scsi_dev *device; 5128 5129 while (1) { 5130 io_pending = false; 5131 5132 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5133 list_for_each_entry(device, &ctrl_info->scsi_device_list, 5134 scsi_device_list_entry) { 5135 if (atomic_read(&device->scsi_cmds_outstanding)) { 5136 io_pending = true; 5137 break; 5138 } 5139 } 5140 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5141 flags); 5142 5143 if (!io_pending) 5144 break; 5145 5146 pqi_check_ctrl_health(ctrl_info); 5147 if (pqi_ctrl_offline(ctrl_info)) 5148 return -ENXIO; 5149 5150 usleep_range(1000, 2000); 5151 } 5152 5153 return 0; 5154 } 5155 5156 static void pqi_lun_reset_complete(struct pqi_io_request *io_request, 5157 void *context) 5158 { 5159 struct completion *waiting = context; 5160 5161 complete(waiting); 5162 } 5163 5164 #define PQI_LUN_RESET_TIMEOUT_SECS 10 5165 5166 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, 5167 struct pqi_scsi_dev *device, struct completion *wait) 5168 { 5169 int rc; 5170 5171 while (1) { 5172 if (wait_for_completion_io_timeout(wait, 5173 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { 5174 rc = 0; 5175 break; 5176 } 5177 5178 pqi_check_ctrl_health(ctrl_info); 5179 if (pqi_ctrl_offline(ctrl_info)) { 5180 rc = -ENXIO; 5181 break; 5182 } 5183 } 5184 5185 return rc; 5186 } 5187 5188 static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, 5189 struct pqi_scsi_dev *device) 5190 { 5191 int rc; 5192 struct pqi_io_request *io_request; 5193 DECLARE_COMPLETION_ONSTACK(wait); 5194 struct pqi_task_management_request *request; 5195 5196 io_request = pqi_alloc_io_request(ctrl_info); 5197 io_request->io_complete_callback = pqi_lun_reset_complete; 5198 io_request->context = &wait; 5199 5200 request = io_request->iu; 5201 memset(request, 0, sizeof(*request)); 5202 5203 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT; 5204 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH, 5205 &request->header.iu_length); 5206 put_unaligned_le16(io_request->index, &request->request_id); 5207 memcpy(request->lun_number, device->scsi3addr, 5208 sizeof(request->lun_number)); 5209 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; 5210 5211 pqi_start_io(ctrl_info, 5212 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, 5213 io_request); 5214 5215 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait); 5216 if (rc == 0) 5217 rc = io_request->status; 5218 5219 pqi_free_io_request(io_request); 5220 5221 return rc; 5222 } 5223 5224 #define PQI_LUN_RESET_RETRIES 3 5225 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 5226 /* Performs a reset at the LUN level. */ 5227 5228 static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, 5229 struct pqi_scsi_dev *device) 5230 { 5231 int rc; 5232 unsigned int retries; 5233 5234 for (retries = 0;;) { 5235 rc = pqi_lun_reset(ctrl_info, device); 5236 if (rc != -EAGAIN || 5237 ++retries > PQI_LUN_RESET_RETRIES) 5238 break; 5239 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); 5240 } 5241 if (rc == 0) 5242 rc = pqi_device_wait_for_pending_io(ctrl_info, device); 5243 5244 return rc == 0 ? SUCCESS : FAILED; 5245 } 5246 5247 static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) 5248 { 5249 int rc; 5250 struct Scsi_Host *shost; 5251 struct pqi_ctrl_info *ctrl_info; 5252 struct pqi_scsi_dev *device; 5253 5254 shost = scmd->device->host; 5255 ctrl_info = shost_to_hba(shost); 5256 device = scmd->device->hostdata; 5257 5258 dev_err(&ctrl_info->pci_dev->dev, 5259 "resetting scsi %d:%d:%d:%d\n", 5260 shost->host_no, device->bus, device->target, device->lun); 5261 5262 pqi_check_ctrl_health(ctrl_info); 5263 if (pqi_ctrl_offline(ctrl_info)) { 5264 rc = FAILED; 5265 goto out; 5266 } 5267 5268 mutex_lock(&ctrl_info->lun_reset_mutex); 5269 5270 pqi_ctrl_block_requests(ctrl_info); 5271 pqi_ctrl_wait_until_quiesced(ctrl_info); 5272 pqi_fail_io_queued_for_device(ctrl_info, device); 5273 rc = pqi_wait_until_inbound_queues_empty(ctrl_info); 5274 pqi_device_reset_start(device); 5275 pqi_ctrl_unblock_requests(ctrl_info); 5276 5277 if (rc) 5278 rc = FAILED; 5279 else 5280 rc = pqi_device_reset(ctrl_info, device); 5281 5282 pqi_device_reset_done(device); 5283 5284 mutex_unlock(&ctrl_info->lun_reset_mutex); 5285 5286 out: 5287 dev_err(&ctrl_info->pci_dev->dev, 5288 "reset of scsi %d:%d:%d:%d: %s\n", 5289 shost->host_no, device->bus, device->target, device->lun, 5290 rc == SUCCESS ? "SUCCESS" : "FAILED"); 5291 5292 return rc; 5293 } 5294 5295 static int pqi_slave_alloc(struct scsi_device *sdev) 5296 { 5297 struct pqi_scsi_dev *device; 5298 unsigned long flags; 5299 struct pqi_ctrl_info *ctrl_info; 5300 struct scsi_target *starget; 5301 struct sas_rphy *rphy; 5302 5303 ctrl_info = shost_to_hba(sdev->host); 5304 5305 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5306 5307 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) { 5308 starget = scsi_target(sdev); 5309 rphy = target_to_rphy(starget); 5310 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); 5311 if (device) { 5312 device->target = sdev_id(sdev); 5313 device->lun = sdev->lun; 5314 device->target_lun_valid = true; 5315 } 5316 } else { 5317 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev), 5318 sdev_id(sdev), sdev->lun); 5319 } 5320 5321 if (device) { 5322 sdev->hostdata = device; 5323 device->sdev = sdev; 5324 if (device->queue_depth) { 5325 device->advertised_queue_depth = device->queue_depth; 5326 scsi_change_queue_depth(sdev, 5327 device->advertised_queue_depth); 5328 } 5329 } 5330 5331 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5332 5333 return 0; 5334 } 5335 5336 static int pqi_map_queues(struct Scsi_Host *shost) 5337 { 5338 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); 5339 5340 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0); 5341 } 5342 5343 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, 5344 void __user *arg) 5345 { 5346 struct pci_dev *pci_dev; 5347 u32 subsystem_vendor; 5348 u32 subsystem_device; 5349 cciss_pci_info_struct pciinfo; 5350 5351 if (!arg) 5352 return -EINVAL; 5353 5354 pci_dev = ctrl_info->pci_dev; 5355 5356 pciinfo.domain = pci_domain_nr(pci_dev->bus); 5357 pciinfo.bus = pci_dev->bus->number; 5358 pciinfo.dev_fn = pci_dev->devfn; 5359 subsystem_vendor = pci_dev->subsystem_vendor; 5360 subsystem_device = pci_dev->subsystem_device; 5361 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | 5362 subsystem_vendor; 5363 5364 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo))) 5365 return -EFAULT; 5366 5367 return 0; 5368 } 5369 5370 static int pqi_getdrivver_ioctl(void __user *arg) 5371 { 5372 u32 version; 5373 5374 if (!arg) 5375 return -EINVAL; 5376 5377 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | 5378 (DRIVER_RELEASE << 16) | DRIVER_REVISION; 5379 5380 if (copy_to_user(arg, &version, sizeof(version))) 5381 return -EFAULT; 5382 5383 return 0; 5384 } 5385 5386 struct ciss_error_info { 5387 u8 scsi_status; 5388 int command_status; 5389 size_t sense_data_length; 5390 }; 5391 5392 static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info, 5393 struct ciss_error_info *ciss_error_info) 5394 { 5395 int ciss_cmd_status; 5396 size_t sense_data_length; 5397 5398 switch (pqi_error_info->data_out_result) { 5399 case PQI_DATA_IN_OUT_GOOD: 5400 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS; 5401 break; 5402 case PQI_DATA_IN_OUT_UNDERFLOW: 5403 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN; 5404 break; 5405 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW: 5406 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN; 5407 break; 5408 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: 5409 case PQI_DATA_IN_OUT_BUFFER_ERROR: 5410 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA: 5411 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE: 5412 case PQI_DATA_IN_OUT_ERROR: 5413 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR; 5414 break; 5415 case PQI_DATA_IN_OUT_HARDWARE_ERROR: 5416 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR: 5417 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT: 5418 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED: 5419 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED: 5420 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED: 5421 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST: 5422 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION: 5423 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED: 5424 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ: 5425 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR; 5426 break; 5427 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT: 5428 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT; 5429 break; 5430 case PQI_DATA_IN_OUT_ABORTED: 5431 ciss_cmd_status = CISS_CMD_STATUS_ABORTED; 5432 break; 5433 case PQI_DATA_IN_OUT_TIMEOUT: 5434 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT; 5435 break; 5436 default: 5437 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS; 5438 break; 5439 } 5440 5441 sense_data_length = 5442 get_unaligned_le16(&pqi_error_info->sense_data_length); 5443 if (sense_data_length == 0) 5444 sense_data_length = 5445 get_unaligned_le16(&pqi_error_info->response_data_length); 5446 if (sense_data_length) 5447 if (sense_data_length > sizeof(pqi_error_info->data)) 5448 sense_data_length = sizeof(pqi_error_info->data); 5449 5450 ciss_error_info->scsi_status = pqi_error_info->status; 5451 ciss_error_info->command_status = ciss_cmd_status; 5452 ciss_error_info->sense_data_length = sense_data_length; 5453 } 5454 5455 static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) 5456 { 5457 int rc; 5458 char *kernel_buffer = NULL; 5459 u16 iu_length; 5460 size_t sense_data_length; 5461 IOCTL_Command_struct iocommand; 5462 struct pqi_raid_path_request request; 5463 struct pqi_raid_error_info pqi_error_info; 5464 struct ciss_error_info ciss_error_info; 5465 5466 if (pqi_ctrl_offline(ctrl_info)) 5467 return -ENXIO; 5468 if (!arg) 5469 return -EINVAL; 5470 if (!capable(CAP_SYS_RAWIO)) 5471 return -EPERM; 5472 if (copy_from_user(&iocommand, arg, sizeof(iocommand))) 5473 return -EFAULT; 5474 if (iocommand.buf_size < 1 && 5475 iocommand.Request.Type.Direction != XFER_NONE) 5476 return -EINVAL; 5477 if (iocommand.Request.CDBLen > sizeof(request.cdb)) 5478 return -EINVAL; 5479 if (iocommand.Request.Type.Type != TYPE_CMD) 5480 return -EINVAL; 5481 5482 switch (iocommand.Request.Type.Direction) { 5483 case XFER_NONE: 5484 case XFER_WRITE: 5485 case XFER_READ: 5486 case XFER_READ | XFER_WRITE: 5487 break; 5488 default: 5489 return -EINVAL; 5490 } 5491 5492 if (iocommand.buf_size > 0) { 5493 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL); 5494 if (!kernel_buffer) 5495 return -ENOMEM; 5496 if (iocommand.Request.Type.Direction & XFER_WRITE) { 5497 if (copy_from_user(kernel_buffer, iocommand.buf, 5498 iocommand.buf_size)) { 5499 rc = -EFAULT; 5500 goto out; 5501 } 5502 } else { 5503 memset(kernel_buffer, 0, iocommand.buf_size); 5504 } 5505 } 5506 5507 memset(&request, 0, sizeof(request)); 5508 5509 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; 5510 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) - 5511 PQI_REQUEST_HEADER_LENGTH; 5512 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes, 5513 sizeof(request.lun_number)); 5514 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen); 5515 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0; 5516 5517 switch (iocommand.Request.Type.Direction) { 5518 case XFER_NONE: 5519 request.data_direction = SOP_NO_DIRECTION_FLAG; 5520 break; 5521 case XFER_WRITE: 5522 request.data_direction = SOP_WRITE_FLAG; 5523 break; 5524 case XFER_READ: 5525 request.data_direction = SOP_READ_FLAG; 5526 break; 5527 case XFER_READ | XFER_WRITE: 5528 request.data_direction = SOP_BIDIRECTIONAL; 5529 break; 5530 } 5531 5532 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; 5533 5534 if (iocommand.buf_size > 0) { 5535 put_unaligned_le32(iocommand.buf_size, &request.buffer_length); 5536 5537 rc = pqi_map_single(ctrl_info->pci_dev, 5538 &request.sg_descriptors[0], kernel_buffer, 5539 iocommand.buf_size, DMA_BIDIRECTIONAL); 5540 if (rc) 5541 goto out; 5542 5543 iu_length += sizeof(request.sg_descriptors[0]); 5544 } 5545 5546 put_unaligned_le16(iu_length, &request.header.iu_length); 5547 5548 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 5549 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); 5550 5551 if (iocommand.buf_size > 0) 5552 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, 5553 DMA_BIDIRECTIONAL); 5554 5555 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info)); 5556 5557 if (rc == 0) { 5558 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info); 5559 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status; 5560 iocommand.error_info.CommandStatus = 5561 ciss_error_info.command_status; 5562 sense_data_length = ciss_error_info.sense_data_length; 5563 if (sense_data_length) { 5564 if (sense_data_length > 5565 sizeof(iocommand.error_info.SenseInfo)) 5566 sense_data_length = 5567 sizeof(iocommand.error_info.SenseInfo); 5568 memcpy(iocommand.error_info.SenseInfo, 5569 pqi_error_info.data, sense_data_length); 5570 iocommand.error_info.SenseLen = sense_data_length; 5571 } 5572 } 5573 5574 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) { 5575 rc = -EFAULT; 5576 goto out; 5577 } 5578 5579 if (rc == 0 && iocommand.buf_size > 0 && 5580 (iocommand.Request.Type.Direction & XFER_READ)) { 5581 if (copy_to_user(iocommand.buf, kernel_buffer, 5582 iocommand.buf_size)) { 5583 rc = -EFAULT; 5584 } 5585 } 5586 5587 out: 5588 kfree(kernel_buffer); 5589 5590 return rc; 5591 } 5592 5593 static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 5594 { 5595 int rc; 5596 struct pqi_ctrl_info *ctrl_info; 5597 5598 ctrl_info = shost_to_hba(sdev->host); 5599 5600 switch (cmd) { 5601 case CCISS_DEREGDISK: 5602 case CCISS_REGNEWDISK: 5603 case CCISS_REGNEWD: 5604 rc = pqi_scan_scsi_devices(ctrl_info); 5605 break; 5606 case CCISS_GETPCIINFO: 5607 rc = pqi_getpciinfo_ioctl(ctrl_info, arg); 5608 break; 5609 case CCISS_GETDRIVVER: 5610 rc = pqi_getdrivver_ioctl(arg); 5611 break; 5612 case CCISS_PASSTHRU: 5613 rc = pqi_passthru_ioctl(ctrl_info, arg); 5614 break; 5615 default: 5616 rc = -EINVAL; 5617 break; 5618 } 5619 5620 return rc; 5621 } 5622 5623 static ssize_t pqi_version_show(struct device *dev, 5624 struct device_attribute *attr, char *buffer) 5625 { 5626 ssize_t count = 0; 5627 struct Scsi_Host *shost; 5628 struct pqi_ctrl_info *ctrl_info; 5629 5630 shost = class_to_shost(dev); 5631 ctrl_info = shost_to_hba(shost); 5632 5633 count += snprintf(buffer + count, PAGE_SIZE - count, 5634 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); 5635 5636 count += snprintf(buffer + count, PAGE_SIZE - count, 5637 "firmware: %s\n", ctrl_info->firmware_version); 5638 5639 return count; 5640 } 5641 5642 static ssize_t pqi_host_rescan_store(struct device *dev, 5643 struct device_attribute *attr, const char *buffer, size_t count) 5644 { 5645 struct Scsi_Host *shost = class_to_shost(dev); 5646 5647 pqi_scan_start(shost); 5648 5649 return count; 5650 } 5651 5652 static ssize_t pqi_lockup_action_show(struct device *dev, 5653 struct device_attribute *attr, char *buffer) 5654 { 5655 int count = 0; 5656 unsigned int i; 5657 5658 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5659 if (pqi_lockup_actions[i].action == pqi_lockup_action) 5660 count += snprintf(buffer + count, PAGE_SIZE - count, 5661 "[%s] ", pqi_lockup_actions[i].name); 5662 else 5663 count += snprintf(buffer + count, PAGE_SIZE - count, 5664 "%s ", pqi_lockup_actions[i].name); 5665 } 5666 5667 count += snprintf(buffer + count, PAGE_SIZE - count, "\n"); 5668 5669 return count; 5670 } 5671 5672 static ssize_t pqi_lockup_action_store(struct device *dev, 5673 struct device_attribute *attr, const char *buffer, size_t count) 5674 { 5675 unsigned int i; 5676 char *action_name; 5677 char action_name_buffer[32]; 5678 5679 strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); 5680 action_name = strstrip(action_name_buffer); 5681 5682 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 5683 if (strcmp(action_name, pqi_lockup_actions[i].name) == 0) { 5684 pqi_lockup_action = pqi_lockup_actions[i].action; 5685 return count; 5686 } 5687 } 5688 5689 return -EINVAL; 5690 } 5691 5692 static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); 5693 static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); 5694 static DEVICE_ATTR(lockup_action, 0644, 5695 pqi_lockup_action_show, pqi_lockup_action_store); 5696 5697 static struct device_attribute *pqi_shost_attrs[] = { 5698 &dev_attr_version, 5699 &dev_attr_rescan, 5700 &dev_attr_lockup_action, 5701 NULL 5702 }; 5703 5704 static ssize_t pqi_sas_address_show(struct device *dev, 5705 struct device_attribute *attr, char *buffer) 5706 { 5707 struct pqi_ctrl_info *ctrl_info; 5708 struct scsi_device *sdev; 5709 struct pqi_scsi_dev *device; 5710 unsigned long flags; 5711 u64 sas_address; 5712 5713 sdev = to_scsi_device(dev); 5714 ctrl_info = shost_to_hba(sdev->host); 5715 5716 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5717 5718 device = sdev->hostdata; 5719 if (pqi_is_logical_device(device)) { 5720 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, 5721 flags); 5722 return -ENODEV; 5723 } 5724 sas_address = device->sas_address; 5725 5726 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5727 5728 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address); 5729 } 5730 5731 static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev, 5732 struct device_attribute *attr, char *buffer) 5733 { 5734 struct pqi_ctrl_info *ctrl_info; 5735 struct scsi_device *sdev; 5736 struct pqi_scsi_dev *device; 5737 unsigned long flags; 5738 5739 sdev = to_scsi_device(dev); 5740 ctrl_info = shost_to_hba(sdev->host); 5741 5742 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5743 5744 device = sdev->hostdata; 5745 buffer[0] = device->raid_bypass_enabled ? '1' : '0'; 5746 buffer[1] = '\n'; 5747 buffer[2] = '\0'; 5748 5749 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5750 5751 return 2; 5752 } 5753 5754 static ssize_t pqi_raid_level_show(struct device *dev, 5755 struct device_attribute *attr, char *buffer) 5756 { 5757 struct pqi_ctrl_info *ctrl_info; 5758 struct scsi_device *sdev; 5759 struct pqi_scsi_dev *device; 5760 unsigned long flags; 5761 char *raid_level; 5762 5763 sdev = to_scsi_device(dev); 5764 ctrl_info = shost_to_hba(sdev->host); 5765 5766 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); 5767 5768 device = sdev->hostdata; 5769 5770 if (pqi_is_logical_device(device)) 5771 raid_level = pqi_raid_level_to_string(device->raid_level); 5772 else 5773 raid_level = "N/A"; 5774 5775 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); 5776 5777 return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); 5778 } 5779 5780 static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); 5781 static DEVICE_ATTR(ssd_smart_path_enabled, 0444, 5782 pqi_ssd_smart_path_enabled_show, NULL); 5783 static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); 5784 5785 static struct device_attribute *pqi_sdev_attrs[] = { 5786 &dev_attr_sas_address, 5787 &dev_attr_ssd_smart_path_enabled, 5788 &dev_attr_raid_level, 5789 NULL 5790 }; 5791 5792 static struct scsi_host_template pqi_driver_template = { 5793 .module = THIS_MODULE, 5794 .name = DRIVER_NAME_SHORT, 5795 .proc_name = DRIVER_NAME_SHORT, 5796 .queuecommand = pqi_scsi_queue_command, 5797 .scan_start = pqi_scan_start, 5798 .scan_finished = pqi_scan_finished, 5799 .this_id = -1, 5800 .eh_device_reset_handler = pqi_eh_device_reset_handler, 5801 .ioctl = pqi_ioctl, 5802 .slave_alloc = pqi_slave_alloc, 5803 .map_queues = pqi_map_queues, 5804 .sdev_attrs = pqi_sdev_attrs, 5805 .shost_attrs = pqi_shost_attrs, 5806 }; 5807 5808 static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) 5809 { 5810 int rc; 5811 struct Scsi_Host *shost; 5812 5813 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info)); 5814 if (!shost) { 5815 dev_err(&ctrl_info->pci_dev->dev, 5816 "scsi_host_alloc failed for controller %u\n", 5817 ctrl_info->ctrl_id); 5818 return -ENOMEM; 5819 } 5820 5821 shost->io_port = 0; 5822 shost->n_io_port = 0; 5823 shost->this_id = -1; 5824 shost->max_channel = PQI_MAX_BUS; 5825 shost->max_cmd_len = MAX_COMMAND_SIZE; 5826 shost->max_lun = ~0; 5827 shost->max_id = ~0; 5828 shost->max_sectors = ctrl_info->max_sectors; 5829 shost->can_queue = ctrl_info->scsi_ml_can_queue; 5830 shost->cmd_per_lun = shost->can_queue; 5831 shost->sg_tablesize = ctrl_info->sg_tablesize; 5832 shost->transportt = pqi_sas_transport_template; 5833 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0); 5834 shost->unique_id = shost->irq; 5835 shost->nr_hw_queues = ctrl_info->num_queue_groups; 5836 shost->hostdata[0] = (unsigned long)ctrl_info; 5837 5838 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev); 5839 if (rc) { 5840 dev_err(&ctrl_info->pci_dev->dev, 5841 "scsi_add_host failed for controller %u\n", 5842 ctrl_info->ctrl_id); 5843 goto free_host; 5844 } 5845 5846 rc = pqi_add_sas_host(shost, ctrl_info); 5847 if (rc) { 5848 dev_err(&ctrl_info->pci_dev->dev, 5849 "add SAS host failed for controller %u\n", 5850 ctrl_info->ctrl_id); 5851 goto remove_host; 5852 } 5853 5854 ctrl_info->scsi_host = shost; 5855 5856 return 0; 5857 5858 remove_host: 5859 scsi_remove_host(shost); 5860 free_host: 5861 scsi_host_put(shost); 5862 5863 return rc; 5864 } 5865 5866 static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info) 5867 { 5868 struct Scsi_Host *shost; 5869 5870 pqi_delete_sas_host(ctrl_info); 5871 5872 shost = ctrl_info->scsi_host; 5873 if (!shost) 5874 return; 5875 5876 scsi_remove_host(shost); 5877 scsi_host_put(shost); 5878 } 5879 5880 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info) 5881 { 5882 int rc = 0; 5883 struct pqi_device_registers __iomem *pqi_registers; 5884 unsigned long timeout; 5885 unsigned int timeout_msecs; 5886 union pqi_reset_register reset_reg; 5887 5888 pqi_registers = ctrl_info->pqi_registers; 5889 timeout_msecs = readw(&pqi_registers->max_reset_timeout) * 100; 5890 timeout = msecs_to_jiffies(timeout_msecs) + jiffies; 5891 5892 while (1) { 5893 msleep(PQI_RESET_POLL_INTERVAL_MSECS); 5894 reset_reg.all_bits = readl(&pqi_registers->device_reset); 5895 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED) 5896 break; 5897 pqi_check_ctrl_health(ctrl_info); 5898 if (pqi_ctrl_offline(ctrl_info)) { 5899 rc = -ENXIO; 5900 break; 5901 } 5902 if (time_after(jiffies, timeout)) { 5903 rc = -ETIMEDOUT; 5904 break; 5905 } 5906 } 5907 5908 return rc; 5909 } 5910 5911 static int pqi_reset(struct pqi_ctrl_info *ctrl_info) 5912 { 5913 int rc; 5914 union pqi_reset_register reset_reg; 5915 5916 if (ctrl_info->pqi_reset_quiesce_supported) { 5917 rc = sis_pqi_reset_quiesce(ctrl_info); 5918 if (rc) { 5919 dev_err(&ctrl_info->pci_dev->dev, 5920 "PQI reset failed during quiesce with error %d\n", 5921 rc); 5922 return rc; 5923 } 5924 } 5925 5926 reset_reg.all_bits = 0; 5927 reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET; 5928 reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET; 5929 5930 writel(reset_reg.all_bits, &ctrl_info->pqi_registers->device_reset); 5931 5932 rc = pqi_wait_for_pqi_reset_completion(ctrl_info); 5933 if (rc) 5934 dev_err(&ctrl_info->pci_dev->dev, 5935 "PQI reset failed with error %d\n", rc); 5936 5937 return rc; 5938 } 5939 5940 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) 5941 { 5942 int rc; 5943 struct bmic_identify_controller *identify; 5944 5945 identify = kmalloc(sizeof(*identify), GFP_KERNEL); 5946 if (!identify) 5947 return -ENOMEM; 5948 5949 rc = pqi_identify_controller(ctrl_info, identify); 5950 if (rc) 5951 goto out; 5952 5953 memcpy(ctrl_info->firmware_version, identify->firmware_version, 5954 sizeof(identify->firmware_version)); 5955 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0'; 5956 snprintf(ctrl_info->firmware_version + 5957 strlen(ctrl_info->firmware_version), 5958 sizeof(ctrl_info->firmware_version), 5959 "-%u", get_unaligned_le16(&identify->firmware_build_number)); 5960 5961 out: 5962 kfree(identify); 5963 5964 return rc; 5965 } 5966 5967 struct pqi_config_table_section_info { 5968 struct pqi_ctrl_info *ctrl_info; 5969 void *section; 5970 u32 section_offset; 5971 void __iomem *section_iomem_addr; 5972 }; 5973 5974 static inline bool pqi_is_firmware_feature_supported( 5975 struct pqi_config_table_firmware_features *firmware_features, 5976 unsigned int bit_position) 5977 { 5978 unsigned int byte_index; 5979 5980 byte_index = bit_position / BITS_PER_BYTE; 5981 5982 if (byte_index >= le16_to_cpu(firmware_features->num_elements)) 5983 return false; 5984 5985 return firmware_features->features_supported[byte_index] & 5986 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 5987 } 5988 5989 static inline bool pqi_is_firmware_feature_enabled( 5990 struct pqi_config_table_firmware_features *firmware_features, 5991 void __iomem *firmware_features_iomem_addr, 5992 unsigned int bit_position) 5993 { 5994 unsigned int byte_index; 5995 u8 __iomem *features_enabled_iomem_addr; 5996 5997 byte_index = (bit_position / BITS_PER_BYTE) + 5998 (le16_to_cpu(firmware_features->num_elements) * 2); 5999 6000 features_enabled_iomem_addr = firmware_features_iomem_addr + 6001 offsetof(struct pqi_config_table_firmware_features, 6002 features_supported) + byte_index; 6003 6004 return *((__force u8 *)features_enabled_iomem_addr) & 6005 (1 << (bit_position % BITS_PER_BYTE)) ? true : false; 6006 } 6007 6008 static inline void pqi_request_firmware_feature( 6009 struct pqi_config_table_firmware_features *firmware_features, 6010 unsigned int bit_position) 6011 { 6012 unsigned int byte_index; 6013 6014 byte_index = (bit_position / BITS_PER_BYTE) + 6015 le16_to_cpu(firmware_features->num_elements); 6016 6017 firmware_features->features_supported[byte_index] |= 6018 (1 << (bit_position % BITS_PER_BYTE)); 6019 } 6020 6021 static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, 6022 u16 first_section, u16 last_section) 6023 { 6024 struct pqi_vendor_general_request request; 6025 6026 memset(&request, 0, sizeof(request)); 6027 6028 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; 6029 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, 6030 &request.header.iu_length); 6031 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, 6032 &request.function_code); 6033 put_unaligned_le16(first_section, 6034 &request.data.config_table_update.first_section); 6035 put_unaligned_le16(last_section, 6036 &request.data.config_table_update.last_section); 6037 6038 return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 6039 0, NULL, NO_TIMEOUT); 6040 } 6041 6042 static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, 6043 struct pqi_config_table_firmware_features *firmware_features, 6044 void __iomem *firmware_features_iomem_addr) 6045 { 6046 void *features_requested; 6047 void __iomem *features_requested_iomem_addr; 6048 6049 features_requested = firmware_features->features_supported + 6050 le16_to_cpu(firmware_features->num_elements); 6051 6052 features_requested_iomem_addr = firmware_features_iomem_addr + 6053 (features_requested - (void *)firmware_features); 6054 6055 memcpy_toio(features_requested_iomem_addr, features_requested, 6056 le16_to_cpu(firmware_features->num_elements)); 6057 6058 return pqi_config_table_update(ctrl_info, 6059 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, 6060 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); 6061 } 6062 6063 struct pqi_firmware_feature { 6064 char *feature_name; 6065 unsigned int feature_bit; 6066 bool supported; 6067 bool enabled; 6068 void (*feature_status)(struct pqi_ctrl_info *ctrl_info, 6069 struct pqi_firmware_feature *firmware_feature); 6070 }; 6071 6072 static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, 6073 struct pqi_firmware_feature *firmware_feature) 6074 { 6075 if (!firmware_feature->supported) { 6076 dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", 6077 firmware_feature->feature_name); 6078 return; 6079 } 6080 6081 if (firmware_feature->enabled) { 6082 dev_info(&ctrl_info->pci_dev->dev, 6083 "%s enabled\n", firmware_feature->feature_name); 6084 return; 6085 } 6086 6087 dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", 6088 firmware_feature->feature_name); 6089 } 6090 6091 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, 6092 struct pqi_firmware_feature *firmware_feature) 6093 { 6094 if (firmware_feature->feature_status) 6095 firmware_feature->feature_status(ctrl_info, firmware_feature); 6096 } 6097 6098 static DEFINE_MUTEX(pqi_firmware_features_mutex); 6099 6100 static struct pqi_firmware_feature pqi_firmware_features[] = { 6101 { 6102 .feature_name = "Online Firmware Activation", 6103 .feature_bit = PQI_FIRMWARE_FEATURE_OFA, 6104 .feature_status = pqi_firmware_feature_status, 6105 }, 6106 { 6107 .feature_name = "Serial Management Protocol", 6108 .feature_bit = PQI_FIRMWARE_FEATURE_SMP, 6109 .feature_status = pqi_firmware_feature_status, 6110 }, 6111 }; 6112 6113 static void pqi_process_firmware_features( 6114 struct pqi_config_table_section_info *section_info) 6115 { 6116 int rc; 6117 struct pqi_ctrl_info *ctrl_info; 6118 struct pqi_config_table_firmware_features *firmware_features; 6119 void __iomem *firmware_features_iomem_addr; 6120 unsigned int i; 6121 unsigned int num_features_supported; 6122 6123 ctrl_info = section_info->ctrl_info; 6124 firmware_features = section_info->section; 6125 firmware_features_iomem_addr = section_info->section_iomem_addr; 6126 6127 for (i = 0, num_features_supported = 0; 6128 i < ARRAY_SIZE(pqi_firmware_features); i++) { 6129 if (pqi_is_firmware_feature_supported(firmware_features, 6130 pqi_firmware_features[i].feature_bit)) { 6131 pqi_firmware_features[i].supported = true; 6132 num_features_supported++; 6133 } else { 6134 pqi_firmware_feature_update(ctrl_info, 6135 &pqi_firmware_features[i]); 6136 } 6137 } 6138 6139 if (num_features_supported == 0) 6140 return; 6141 6142 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6143 if (!pqi_firmware_features[i].supported) 6144 continue; 6145 pqi_request_firmware_feature(firmware_features, 6146 pqi_firmware_features[i].feature_bit); 6147 } 6148 6149 rc = pqi_enable_firmware_features(ctrl_info, firmware_features, 6150 firmware_features_iomem_addr); 6151 if (rc) { 6152 dev_err(&ctrl_info->pci_dev->dev, 6153 "failed to enable firmware features in PQI configuration table\n"); 6154 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6155 if (!pqi_firmware_features[i].supported) 6156 continue; 6157 pqi_firmware_feature_update(ctrl_info, 6158 &pqi_firmware_features[i]); 6159 } 6160 return; 6161 } 6162 6163 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6164 if (!pqi_firmware_features[i].supported) 6165 continue; 6166 if (pqi_is_firmware_feature_enabled(firmware_features, 6167 firmware_features_iomem_addr, 6168 pqi_firmware_features[i].feature_bit)) 6169 pqi_firmware_features[i].enabled = true; 6170 pqi_firmware_feature_update(ctrl_info, 6171 &pqi_firmware_features[i]); 6172 } 6173 } 6174 6175 static void pqi_init_firmware_features(void) 6176 { 6177 unsigned int i; 6178 6179 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { 6180 pqi_firmware_features[i].supported = false; 6181 pqi_firmware_features[i].enabled = false; 6182 } 6183 } 6184 6185 static void pqi_process_firmware_features_section( 6186 struct pqi_config_table_section_info *section_info) 6187 { 6188 mutex_lock(&pqi_firmware_features_mutex); 6189 pqi_init_firmware_features(); 6190 pqi_process_firmware_features(section_info); 6191 mutex_unlock(&pqi_firmware_features_mutex); 6192 } 6193 6194 static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) 6195 { 6196 u32 table_length; 6197 u32 section_offset; 6198 void __iomem *table_iomem_addr; 6199 struct pqi_config_table *config_table; 6200 struct pqi_config_table_section_header *section; 6201 struct pqi_config_table_section_info section_info; 6202 6203 table_length = ctrl_info->config_table_length; 6204 if (table_length == 0) 6205 return 0; 6206 6207 config_table = kmalloc(table_length, GFP_KERNEL); 6208 if (!config_table) { 6209 dev_err(&ctrl_info->pci_dev->dev, 6210 "failed to allocate memory for PQI configuration table\n"); 6211 return -ENOMEM; 6212 } 6213 6214 /* 6215 * Copy the config table contents from I/O memory space into the 6216 * temporary buffer. 6217 */ 6218 table_iomem_addr = ctrl_info->iomem_base + 6219 ctrl_info->config_table_offset; 6220 memcpy_fromio(config_table, table_iomem_addr, table_length); 6221 6222 section_info.ctrl_info = ctrl_info; 6223 section_offset = 6224 get_unaligned_le32(&config_table->first_section_offset); 6225 6226 while (section_offset) { 6227 section = (void *)config_table + section_offset; 6228 6229 section_info.section = section; 6230 section_info.section_offset = section_offset; 6231 section_info.section_iomem_addr = 6232 table_iomem_addr + section_offset; 6233 6234 switch (get_unaligned_le16(§ion->section_id)) { 6235 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: 6236 pqi_process_firmware_features_section(§ion_info); 6237 break; 6238 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: 6239 if (pqi_disable_heartbeat) 6240 dev_warn(&ctrl_info->pci_dev->dev, 6241 "heartbeat disabled by module parameter\n"); 6242 else 6243 ctrl_info->heartbeat_counter = 6244 table_iomem_addr + 6245 section_offset + 6246 offsetof( 6247 struct pqi_config_table_heartbeat, 6248 heartbeat_counter); 6249 break; 6250 } 6251 6252 section_offset = 6253 get_unaligned_le16(§ion->next_section_offset); 6254 } 6255 6256 kfree(config_table); 6257 6258 return 0; 6259 } 6260 6261 /* Switches the controller from PQI mode back into SIS mode. */ 6262 6263 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info) 6264 { 6265 int rc; 6266 6267 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE); 6268 rc = pqi_reset(ctrl_info); 6269 if (rc) 6270 return rc; 6271 rc = sis_reenable_sis_mode(ctrl_info); 6272 if (rc) { 6273 dev_err(&ctrl_info->pci_dev->dev, 6274 "re-enabling SIS mode failed with error %d\n", rc); 6275 return rc; 6276 } 6277 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6278 6279 return 0; 6280 } 6281 6282 /* 6283 * If the controller isn't already in SIS mode, this function forces it into 6284 * SIS mode. 6285 */ 6286 6287 static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) 6288 { 6289 if (!sis_is_firmware_running(ctrl_info)) 6290 return -ENXIO; 6291 6292 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE) 6293 return 0; 6294 6295 if (sis_is_kernel_up(ctrl_info)) { 6296 pqi_save_ctrl_mode(ctrl_info, SIS_MODE); 6297 return 0; 6298 } 6299 6300 return pqi_revert_to_sis_mode(ctrl_info); 6301 } 6302 6303 static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) 6304 { 6305 int rc; 6306 6307 rc = pqi_force_sis_mode(ctrl_info); 6308 if (rc) 6309 return rc; 6310 6311 /* 6312 * Wait until the controller is ready to start accepting SIS 6313 * commands. 6314 */ 6315 rc = sis_wait_for_ctrl_ready(ctrl_info); 6316 if (rc) 6317 return rc; 6318 6319 /* 6320 * Get the controller properties. This allows us to determine 6321 * whether or not it supports PQI mode. 6322 */ 6323 rc = sis_get_ctrl_properties(ctrl_info); 6324 if (rc) { 6325 dev_err(&ctrl_info->pci_dev->dev, 6326 "error obtaining controller properties\n"); 6327 return rc; 6328 } 6329 6330 rc = sis_get_pqi_capabilities(ctrl_info); 6331 if (rc) { 6332 dev_err(&ctrl_info->pci_dev->dev, 6333 "error obtaining controller capabilities\n"); 6334 return rc; 6335 } 6336 6337 if (reset_devices) { 6338 if (ctrl_info->max_outstanding_requests > 6339 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP) 6340 ctrl_info->max_outstanding_requests = 6341 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP; 6342 } else { 6343 if (ctrl_info->max_outstanding_requests > 6344 PQI_MAX_OUTSTANDING_REQUESTS) 6345 ctrl_info->max_outstanding_requests = 6346 PQI_MAX_OUTSTANDING_REQUESTS; 6347 } 6348 6349 pqi_calculate_io_resources(ctrl_info); 6350 6351 rc = pqi_alloc_error_buffer(ctrl_info); 6352 if (rc) { 6353 dev_err(&ctrl_info->pci_dev->dev, 6354 "failed to allocate PQI error buffer\n"); 6355 return rc; 6356 } 6357 6358 /* 6359 * If the function we are about to call succeeds, the 6360 * controller will transition from legacy SIS mode 6361 * into PQI mode. 6362 */ 6363 rc = sis_init_base_struct_addr(ctrl_info); 6364 if (rc) { 6365 dev_err(&ctrl_info->pci_dev->dev, 6366 "error initializing PQI mode\n"); 6367 return rc; 6368 } 6369 6370 /* Wait for the controller to complete the SIS -> PQI transition. */ 6371 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6372 if (rc) { 6373 dev_err(&ctrl_info->pci_dev->dev, 6374 "transition to PQI mode failed\n"); 6375 return rc; 6376 } 6377 6378 /* From here on, we are running in PQI mode. */ 6379 ctrl_info->pqi_mode_enabled = true; 6380 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6381 6382 rc = pqi_alloc_admin_queues(ctrl_info); 6383 if (rc) { 6384 dev_err(&ctrl_info->pci_dev->dev, 6385 "failed to allocate admin queues\n"); 6386 return rc; 6387 } 6388 6389 rc = pqi_create_admin_queues(ctrl_info); 6390 if (rc) { 6391 dev_err(&ctrl_info->pci_dev->dev, 6392 "error creating admin queues\n"); 6393 return rc; 6394 } 6395 6396 rc = pqi_report_device_capability(ctrl_info); 6397 if (rc) { 6398 dev_err(&ctrl_info->pci_dev->dev, 6399 "obtaining device capability failed\n"); 6400 return rc; 6401 } 6402 6403 rc = pqi_validate_device_capability(ctrl_info); 6404 if (rc) 6405 return rc; 6406 6407 pqi_calculate_queue_resources(ctrl_info); 6408 6409 rc = pqi_enable_msix_interrupts(ctrl_info); 6410 if (rc) 6411 return rc; 6412 6413 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) { 6414 ctrl_info->max_msix_vectors = 6415 ctrl_info->num_msix_vectors_enabled; 6416 pqi_calculate_queue_resources(ctrl_info); 6417 } 6418 6419 rc = pqi_alloc_io_resources(ctrl_info); 6420 if (rc) 6421 return rc; 6422 6423 rc = pqi_alloc_operational_queues(ctrl_info); 6424 if (rc) { 6425 dev_err(&ctrl_info->pci_dev->dev, 6426 "failed to allocate operational queues\n"); 6427 return rc; 6428 } 6429 6430 pqi_init_operational_queues(ctrl_info); 6431 6432 rc = pqi_request_irqs(ctrl_info); 6433 if (rc) 6434 return rc; 6435 6436 rc = pqi_create_queues(ctrl_info); 6437 if (rc) 6438 return rc; 6439 6440 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6441 6442 ctrl_info->controller_online = true; 6443 6444 rc = pqi_process_config_table(ctrl_info); 6445 if (rc) 6446 return rc; 6447 6448 pqi_start_heartbeat_timer(ctrl_info); 6449 6450 rc = pqi_enable_events(ctrl_info); 6451 if (rc) { 6452 dev_err(&ctrl_info->pci_dev->dev, 6453 "error enabling events\n"); 6454 return rc; 6455 } 6456 6457 /* Register with the SCSI subsystem. */ 6458 rc = pqi_register_scsi(ctrl_info); 6459 if (rc) 6460 return rc; 6461 6462 rc = pqi_get_ctrl_firmware_version(ctrl_info); 6463 if (rc) { 6464 dev_err(&ctrl_info->pci_dev->dev, 6465 "error obtaining firmware version\n"); 6466 return rc; 6467 } 6468 6469 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6470 if (rc) { 6471 dev_err(&ctrl_info->pci_dev->dev, 6472 "error updating host wellness\n"); 6473 return rc; 6474 } 6475 6476 pqi_schedule_update_time_worker(ctrl_info); 6477 6478 pqi_scan_scsi_devices(ctrl_info); 6479 6480 return 0; 6481 } 6482 6483 static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) 6484 { 6485 unsigned int i; 6486 struct pqi_admin_queues *admin_queues; 6487 struct pqi_event_queue *event_queue; 6488 6489 admin_queues = &ctrl_info->admin_queues; 6490 admin_queues->iq_pi_copy = 0; 6491 admin_queues->oq_ci_copy = 0; 6492 writel(0, admin_queues->oq_pi); 6493 6494 for (i = 0; i < ctrl_info->num_queue_groups; i++) { 6495 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; 6496 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; 6497 ctrl_info->queue_groups[i].oq_ci_copy = 0; 6498 6499 writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); 6500 writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); 6501 writel(0, ctrl_info->queue_groups[i].oq_pi); 6502 } 6503 6504 event_queue = &ctrl_info->event_queue; 6505 writel(0, event_queue->oq_pi); 6506 event_queue->oq_ci_copy = 0; 6507 } 6508 6509 static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) 6510 { 6511 int rc; 6512 6513 rc = pqi_force_sis_mode(ctrl_info); 6514 if (rc) 6515 return rc; 6516 6517 /* 6518 * Wait until the controller is ready to start accepting SIS 6519 * commands. 6520 */ 6521 rc = sis_wait_for_ctrl_ready_resume(ctrl_info); 6522 if (rc) 6523 return rc; 6524 6525 /* 6526 * If the function we are about to call succeeds, the 6527 * controller will transition from legacy SIS mode 6528 * into PQI mode. 6529 */ 6530 rc = sis_init_base_struct_addr(ctrl_info); 6531 if (rc) { 6532 dev_err(&ctrl_info->pci_dev->dev, 6533 "error initializing PQI mode\n"); 6534 return rc; 6535 } 6536 6537 /* Wait for the controller to complete the SIS -> PQI transition. */ 6538 rc = pqi_wait_for_pqi_mode_ready(ctrl_info); 6539 if (rc) { 6540 dev_err(&ctrl_info->pci_dev->dev, 6541 "transition to PQI mode failed\n"); 6542 return rc; 6543 } 6544 6545 /* From here on, we are running in PQI mode. */ 6546 ctrl_info->pqi_mode_enabled = true; 6547 pqi_save_ctrl_mode(ctrl_info, PQI_MODE); 6548 6549 pqi_reinit_queues(ctrl_info); 6550 6551 rc = pqi_create_admin_queues(ctrl_info); 6552 if (rc) { 6553 dev_err(&ctrl_info->pci_dev->dev, 6554 "error creating admin queues\n"); 6555 return rc; 6556 } 6557 6558 rc = pqi_create_queues(ctrl_info); 6559 if (rc) 6560 return rc; 6561 6562 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); 6563 6564 ctrl_info->controller_online = true; 6565 pqi_start_heartbeat_timer(ctrl_info); 6566 pqi_ctrl_unblock_requests(ctrl_info); 6567 6568 rc = pqi_enable_events(ctrl_info); 6569 if (rc) { 6570 dev_err(&ctrl_info->pci_dev->dev, 6571 "error enabling events\n"); 6572 return rc; 6573 } 6574 6575 rc = pqi_write_driver_version_to_host_wellness(ctrl_info); 6576 if (rc) { 6577 dev_err(&ctrl_info->pci_dev->dev, 6578 "error updating host wellness\n"); 6579 return rc; 6580 } 6581 6582 pqi_schedule_update_time_worker(ctrl_info); 6583 6584 pqi_scan_scsi_devices(ctrl_info); 6585 6586 return 0; 6587 } 6588 6589 static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, 6590 u16 timeout) 6591 { 6592 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, 6593 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); 6594 } 6595 6596 static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) 6597 { 6598 int rc; 6599 u64 mask; 6600 6601 rc = pci_enable_device(ctrl_info->pci_dev); 6602 if (rc) { 6603 dev_err(&ctrl_info->pci_dev->dev, 6604 "failed to enable PCI device\n"); 6605 return rc; 6606 } 6607 6608 if (sizeof(dma_addr_t) > 4) 6609 mask = DMA_BIT_MASK(64); 6610 else 6611 mask = DMA_BIT_MASK(32); 6612 6613 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask); 6614 if (rc) { 6615 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n"); 6616 goto disable_device; 6617 } 6618 6619 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT); 6620 if (rc) { 6621 dev_err(&ctrl_info->pci_dev->dev, 6622 "failed to obtain PCI resources\n"); 6623 goto disable_device; 6624 } 6625 6626 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( 6627 ctrl_info->pci_dev, 0), 6628 sizeof(struct pqi_ctrl_registers)); 6629 if (!ctrl_info->iomem_base) { 6630 dev_err(&ctrl_info->pci_dev->dev, 6631 "failed to map memory for controller registers\n"); 6632 rc = -ENOMEM; 6633 goto release_regions; 6634 } 6635 6636 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6 6637 6638 /* Increase the PCIe completion timeout. */ 6639 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev, 6640 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS); 6641 if (rc) { 6642 dev_err(&ctrl_info->pci_dev->dev, 6643 "failed to set PCIe completion timeout\n"); 6644 goto release_regions; 6645 } 6646 6647 /* Enable bus mastering. */ 6648 pci_set_master(ctrl_info->pci_dev); 6649 6650 ctrl_info->registers = ctrl_info->iomem_base; 6651 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers; 6652 6653 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info); 6654 6655 return 0; 6656 6657 release_regions: 6658 pci_release_regions(ctrl_info->pci_dev); 6659 disable_device: 6660 pci_disable_device(ctrl_info->pci_dev); 6661 6662 return rc; 6663 } 6664 6665 static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info) 6666 { 6667 iounmap(ctrl_info->iomem_base); 6668 pci_release_regions(ctrl_info->pci_dev); 6669 if (pci_is_enabled(ctrl_info->pci_dev)) 6670 pci_disable_device(ctrl_info->pci_dev); 6671 pci_set_drvdata(ctrl_info->pci_dev, NULL); 6672 } 6673 6674 static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) 6675 { 6676 struct pqi_ctrl_info *ctrl_info; 6677 6678 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info), 6679 GFP_KERNEL, numa_node); 6680 if (!ctrl_info) 6681 return NULL; 6682 6683 mutex_init(&ctrl_info->scan_mutex); 6684 mutex_init(&ctrl_info->lun_reset_mutex); 6685 6686 INIT_LIST_HEAD(&ctrl_info->scsi_device_list); 6687 spin_lock_init(&ctrl_info->scsi_device_list_lock); 6688 6689 INIT_WORK(&ctrl_info->event_work, pqi_event_worker); 6690 atomic_set(&ctrl_info->num_interrupts, 0); 6691 6692 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); 6693 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); 6694 6695 timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); 6696 INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); 6697 6698 sema_init(&ctrl_info->sync_request_sem, 6699 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS); 6700 init_waitqueue_head(&ctrl_info->block_requests_wait); 6701 6702 INIT_LIST_HEAD(&ctrl_info->raid_bypass_retry_list); 6703 spin_lock_init(&ctrl_info->raid_bypass_retry_list_lock); 6704 INIT_WORK(&ctrl_info->raid_bypass_retry_work, 6705 pqi_raid_bypass_retry_worker); 6706 6707 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1; 6708 ctrl_info->irq_mode = IRQ_MODE_NONE; 6709 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS; 6710 6711 return ctrl_info; 6712 } 6713 6714 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info) 6715 { 6716 kfree(ctrl_info); 6717 } 6718 6719 static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info) 6720 { 6721 pqi_free_irqs(ctrl_info); 6722 pqi_disable_msix_interrupts(ctrl_info); 6723 } 6724 6725 static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info) 6726 { 6727 pqi_stop_heartbeat_timer(ctrl_info); 6728 pqi_free_interrupts(ctrl_info); 6729 if (ctrl_info->queue_memory_base) 6730 dma_free_coherent(&ctrl_info->pci_dev->dev, 6731 ctrl_info->queue_memory_length, 6732 ctrl_info->queue_memory_base, 6733 ctrl_info->queue_memory_base_dma_handle); 6734 if (ctrl_info->admin_queue_memory_base) 6735 dma_free_coherent(&ctrl_info->pci_dev->dev, 6736 ctrl_info->admin_queue_memory_length, 6737 ctrl_info->admin_queue_memory_base, 6738 ctrl_info->admin_queue_memory_base_dma_handle); 6739 pqi_free_all_io_requests(ctrl_info); 6740 if (ctrl_info->error_buffer) 6741 dma_free_coherent(&ctrl_info->pci_dev->dev, 6742 ctrl_info->error_buffer_length, 6743 ctrl_info->error_buffer, 6744 ctrl_info->error_buffer_dma_handle); 6745 if (ctrl_info->iomem_base) 6746 pqi_cleanup_pci_init(ctrl_info); 6747 pqi_free_ctrl_info(ctrl_info); 6748 } 6749 6750 static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) 6751 { 6752 pqi_cancel_rescan_worker(ctrl_info); 6753 pqi_cancel_update_time_worker(ctrl_info); 6754 pqi_remove_all_scsi_devices(ctrl_info); 6755 pqi_unregister_scsi(ctrl_info); 6756 if (ctrl_info->pqi_mode_enabled) 6757 pqi_revert_to_sis_mode(ctrl_info); 6758 pqi_free_ctrl_resources(ctrl_info); 6759 } 6760 6761 static void pqi_perform_lockup_action(void) 6762 { 6763 switch (pqi_lockup_action) { 6764 case PANIC: 6765 panic("FATAL: Smart Family Controller lockup detected"); 6766 break; 6767 case REBOOT: 6768 emergency_restart(); 6769 break; 6770 case NONE: 6771 default: 6772 break; 6773 } 6774 } 6775 6776 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info = { 6777 .data_out_result = PQI_DATA_IN_OUT_HARDWARE_ERROR, 6778 .status = SAM_STAT_CHECK_CONDITION, 6779 }; 6780 6781 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info) 6782 { 6783 unsigned int i; 6784 struct pqi_io_request *io_request; 6785 struct scsi_cmnd *scmd; 6786 6787 for (i = 0; i < ctrl_info->max_io_slots; i++) { 6788 io_request = &ctrl_info->io_request_pool[i]; 6789 if (atomic_read(&io_request->refcount) == 0) 6790 continue; 6791 6792 scmd = io_request->scmd; 6793 if (scmd) { 6794 set_host_byte(scmd, DID_NO_CONNECT); 6795 } else { 6796 io_request->status = -ENXIO; 6797 io_request->error_info = 6798 &pqi_ctrl_offline_raid_error_info; 6799 } 6800 6801 io_request->io_complete_callback(io_request, 6802 io_request->context); 6803 } 6804 } 6805 6806 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info) 6807 { 6808 pqi_perform_lockup_action(); 6809 pqi_stop_heartbeat_timer(ctrl_info); 6810 pqi_free_interrupts(ctrl_info); 6811 pqi_cancel_rescan_worker(ctrl_info); 6812 pqi_cancel_update_time_worker(ctrl_info); 6813 pqi_ctrl_wait_until_quiesced(ctrl_info); 6814 pqi_fail_all_outstanding_requests(ctrl_info); 6815 pqi_clear_all_queued_raid_bypass_retries(ctrl_info); 6816 pqi_ctrl_unblock_requests(ctrl_info); 6817 } 6818 6819 static void pqi_ctrl_offline_worker(struct work_struct *work) 6820 { 6821 struct pqi_ctrl_info *ctrl_info; 6822 6823 ctrl_info = container_of(work, struct pqi_ctrl_info, ctrl_offline_work); 6824 pqi_take_ctrl_offline_deferred(ctrl_info); 6825 } 6826 6827 static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info) 6828 { 6829 if (!ctrl_info->controller_online) 6830 return; 6831 6832 ctrl_info->controller_online = false; 6833 ctrl_info->pqi_mode_enabled = false; 6834 pqi_ctrl_block_requests(ctrl_info); 6835 if (!pqi_disable_ctrl_shutdown) 6836 sis_shutdown_ctrl(ctrl_info); 6837 pci_disable_device(ctrl_info->pci_dev); 6838 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n"); 6839 schedule_work(&ctrl_info->ctrl_offline_work); 6840 } 6841 6842 static void pqi_print_ctrl_info(struct pci_dev *pci_dev, 6843 const struct pci_device_id *id) 6844 { 6845 char *ctrl_description; 6846 6847 if (id->driver_data) 6848 ctrl_description = (char *)id->driver_data; 6849 else 6850 ctrl_description = "Microsemi Smart Family Controller"; 6851 6852 dev_info(&pci_dev->dev, "%s found\n", ctrl_description); 6853 } 6854 6855 static int pqi_pci_probe(struct pci_dev *pci_dev, 6856 const struct pci_device_id *id) 6857 { 6858 int rc; 6859 int node; 6860 struct pqi_ctrl_info *ctrl_info; 6861 6862 pqi_print_ctrl_info(pci_dev, id); 6863 6864 if (pqi_disable_device_id_wildcards && 6865 id->subvendor == PCI_ANY_ID && 6866 id->subdevice == PCI_ANY_ID) { 6867 dev_warn(&pci_dev->dev, 6868 "controller not probed because device ID wildcards are disabled\n"); 6869 return -ENODEV; 6870 } 6871 6872 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID) 6873 dev_warn(&pci_dev->dev, 6874 "controller device ID matched using wildcards\n"); 6875 6876 node = dev_to_node(&pci_dev->dev); 6877 if (node == NUMA_NO_NODE) 6878 set_dev_node(&pci_dev->dev, 0); 6879 6880 ctrl_info = pqi_alloc_ctrl_info(node); 6881 if (!ctrl_info) { 6882 dev_err(&pci_dev->dev, 6883 "failed to allocate controller info block\n"); 6884 return -ENOMEM; 6885 } 6886 6887 ctrl_info->pci_dev = pci_dev; 6888 6889 rc = pqi_pci_init(ctrl_info); 6890 if (rc) 6891 goto error; 6892 6893 rc = pqi_ctrl_init(ctrl_info); 6894 if (rc) 6895 goto error; 6896 6897 return 0; 6898 6899 error: 6900 pqi_remove_ctrl(ctrl_info); 6901 6902 return rc; 6903 } 6904 6905 static void pqi_pci_remove(struct pci_dev *pci_dev) 6906 { 6907 struct pqi_ctrl_info *ctrl_info; 6908 6909 ctrl_info = pci_get_drvdata(pci_dev); 6910 if (!ctrl_info) 6911 return; 6912 6913 pqi_remove_ctrl(ctrl_info); 6914 } 6915 6916 static void pqi_shutdown(struct pci_dev *pci_dev) 6917 { 6918 int rc; 6919 struct pqi_ctrl_info *ctrl_info; 6920 6921 ctrl_info = pci_get_drvdata(pci_dev); 6922 if (!ctrl_info) 6923 goto error; 6924 6925 /* 6926 * Write all data in the controller's battery-backed cache to 6927 * storage. 6928 */ 6929 rc = pqi_flush_cache(ctrl_info, SHUTDOWN); 6930 pqi_reset(ctrl_info); 6931 if (rc == 0) 6932 return; 6933 6934 error: 6935 dev_warn(&pci_dev->dev, 6936 "unable to flush controller cache\n"); 6937 } 6938 6939 static void pqi_process_lockup_action_param(void) 6940 { 6941 unsigned int i; 6942 6943 if (!pqi_lockup_action_param) 6944 return; 6945 6946 for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { 6947 if (strcmp(pqi_lockup_action_param, 6948 pqi_lockup_actions[i].name) == 0) { 6949 pqi_lockup_action = pqi_lockup_actions[i].action; 6950 return; 6951 } 6952 } 6953 6954 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n", 6955 DRIVER_NAME_SHORT, pqi_lockup_action_param); 6956 } 6957 6958 static void pqi_process_module_params(void) 6959 { 6960 pqi_process_lockup_action_param(); 6961 } 6962 6963 static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state) 6964 { 6965 struct pqi_ctrl_info *ctrl_info; 6966 6967 ctrl_info = pci_get_drvdata(pci_dev); 6968 6969 pqi_disable_events(ctrl_info); 6970 pqi_cancel_update_time_worker(ctrl_info); 6971 pqi_cancel_rescan_worker(ctrl_info); 6972 pqi_wait_until_scan_finished(ctrl_info); 6973 pqi_wait_until_lun_reset_finished(ctrl_info); 6974 pqi_flush_cache(ctrl_info, SUSPEND); 6975 pqi_ctrl_block_requests(ctrl_info); 6976 pqi_ctrl_wait_until_quiesced(ctrl_info); 6977 pqi_wait_until_inbound_queues_empty(ctrl_info); 6978 pqi_ctrl_wait_for_pending_io(ctrl_info); 6979 pqi_stop_heartbeat_timer(ctrl_info); 6980 6981 if (state.event == PM_EVENT_FREEZE) 6982 return 0; 6983 6984 pci_save_state(pci_dev); 6985 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 6986 6987 ctrl_info->controller_online = false; 6988 ctrl_info->pqi_mode_enabled = false; 6989 6990 return 0; 6991 } 6992 6993 static __maybe_unused int pqi_resume(struct pci_dev *pci_dev) 6994 { 6995 int rc; 6996 struct pqi_ctrl_info *ctrl_info; 6997 6998 ctrl_info = pci_get_drvdata(pci_dev); 6999 7000 if (pci_dev->current_state != PCI_D0) { 7001 ctrl_info->max_hw_queue_index = 0; 7002 pqi_free_interrupts(ctrl_info); 7003 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX); 7004 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler, 7005 IRQF_SHARED, DRIVER_NAME_SHORT, 7006 &ctrl_info->queue_groups[0]); 7007 if (rc) { 7008 dev_err(&ctrl_info->pci_dev->dev, 7009 "irq %u init failed with error %d\n", 7010 pci_dev->irq, rc); 7011 return rc; 7012 } 7013 pqi_start_heartbeat_timer(ctrl_info); 7014 pqi_ctrl_unblock_requests(ctrl_info); 7015 return 0; 7016 } 7017 7018 pci_set_power_state(pci_dev, PCI_D0); 7019 pci_restore_state(pci_dev); 7020 7021 return pqi_ctrl_init_resume(ctrl_info); 7022 } 7023 7024 /* Define the PCI IDs for the controllers that we support. */ 7025 static const struct pci_device_id pqi_pci_id_table[] = { 7026 { 7027 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7028 0x105b, 0x1211) 7029 }, 7030 { 7031 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7032 0x105b, 0x1321) 7033 }, 7034 { 7035 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7036 0x152d, 0x8a22) 7037 }, 7038 { 7039 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7040 0x152d, 0x8a23) 7041 }, 7042 { 7043 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7044 0x152d, 0x8a24) 7045 }, 7046 { 7047 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7048 0x152d, 0x8a36) 7049 }, 7050 { 7051 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7052 0x152d, 0x8a37) 7053 }, 7054 { 7055 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7056 0x193d, 0x8460) 7057 }, 7058 { 7059 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7060 0x193d, 0x8461) 7061 }, 7062 { 7063 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7064 0x193d, 0xf460) 7065 }, 7066 { 7067 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7068 0x193d, 0xf461) 7069 }, 7070 { 7071 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7072 0x1bd4, 0x0045) 7073 }, 7074 { 7075 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7076 0x1bd4, 0x0046) 7077 }, 7078 { 7079 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7080 0x1bd4, 0x0047) 7081 }, 7082 { 7083 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7084 0x1bd4, 0x0048) 7085 }, 7086 { 7087 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7088 0x1bd4, 0x004a) 7089 }, 7090 { 7091 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7092 0x1bd4, 0x004b) 7093 }, 7094 { 7095 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7096 0x1bd4, 0x004c) 7097 }, 7098 { 7099 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7100 PCI_VENDOR_ID_ADAPTEC2, 0x0110) 7101 }, 7102 { 7103 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7104 PCI_VENDOR_ID_ADAPTEC2, 0x0608) 7105 }, 7106 { 7107 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7108 PCI_VENDOR_ID_ADAPTEC2, 0x0800) 7109 }, 7110 { 7111 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7112 PCI_VENDOR_ID_ADAPTEC2, 0x0801) 7113 }, 7114 { 7115 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7116 PCI_VENDOR_ID_ADAPTEC2, 0x0802) 7117 }, 7118 { 7119 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7120 PCI_VENDOR_ID_ADAPTEC2, 0x0803) 7121 }, 7122 { 7123 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7124 PCI_VENDOR_ID_ADAPTEC2, 0x0804) 7125 }, 7126 { 7127 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7128 PCI_VENDOR_ID_ADAPTEC2, 0x0805) 7129 }, 7130 { 7131 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7132 PCI_VENDOR_ID_ADAPTEC2, 0x0806) 7133 }, 7134 { 7135 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7136 PCI_VENDOR_ID_ADAPTEC2, 0x0807) 7137 }, 7138 { 7139 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7140 PCI_VENDOR_ID_ADAPTEC2, 0x0900) 7141 }, 7142 { 7143 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7144 PCI_VENDOR_ID_ADAPTEC2, 0x0901) 7145 }, 7146 { 7147 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7148 PCI_VENDOR_ID_ADAPTEC2, 0x0902) 7149 }, 7150 { 7151 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7152 PCI_VENDOR_ID_ADAPTEC2, 0x0903) 7153 }, 7154 { 7155 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7156 PCI_VENDOR_ID_ADAPTEC2, 0x0904) 7157 }, 7158 { 7159 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7160 PCI_VENDOR_ID_ADAPTEC2, 0x0905) 7161 }, 7162 { 7163 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7164 PCI_VENDOR_ID_ADAPTEC2, 0x0906) 7165 }, 7166 { 7167 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7168 PCI_VENDOR_ID_ADAPTEC2, 0x0907) 7169 }, 7170 { 7171 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7172 PCI_VENDOR_ID_ADAPTEC2, 0x0908) 7173 }, 7174 { 7175 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7176 PCI_VENDOR_ID_ADAPTEC2, 0x090a) 7177 }, 7178 { 7179 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7180 PCI_VENDOR_ID_ADAPTEC2, 0x1200) 7181 }, 7182 { 7183 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7184 PCI_VENDOR_ID_ADAPTEC2, 0x1201) 7185 }, 7186 { 7187 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7188 PCI_VENDOR_ID_ADAPTEC2, 0x1202) 7189 }, 7190 { 7191 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7192 PCI_VENDOR_ID_ADAPTEC2, 0x1280) 7193 }, 7194 { 7195 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7196 PCI_VENDOR_ID_ADAPTEC2, 0x1281) 7197 }, 7198 { 7199 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7200 PCI_VENDOR_ID_ADAPTEC2, 0x1282) 7201 }, 7202 { 7203 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7204 PCI_VENDOR_ID_ADAPTEC2, 0x1300) 7205 }, 7206 { 7207 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7208 PCI_VENDOR_ID_ADAPTEC2, 0x1301) 7209 }, 7210 { 7211 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7212 PCI_VENDOR_ID_ADAPTEC2, 0x1302) 7213 }, 7214 { 7215 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7216 PCI_VENDOR_ID_ADAPTEC2, 0x1303) 7217 }, 7218 { 7219 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7220 PCI_VENDOR_ID_ADAPTEC2, 0x1380) 7221 }, 7222 { 7223 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7224 PCI_VENDOR_ID_ADVANTECH, 0x8312) 7225 }, 7226 { 7227 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7228 PCI_VENDOR_ID_DELL, 0x1fe0) 7229 }, 7230 { 7231 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7232 PCI_VENDOR_ID_HP, 0x0600) 7233 }, 7234 { 7235 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7236 PCI_VENDOR_ID_HP, 0x0601) 7237 }, 7238 { 7239 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7240 PCI_VENDOR_ID_HP, 0x0602) 7241 }, 7242 { 7243 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7244 PCI_VENDOR_ID_HP, 0x0603) 7245 }, 7246 { 7247 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7248 PCI_VENDOR_ID_HP, 0x0609) 7249 }, 7250 { 7251 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7252 PCI_VENDOR_ID_HP, 0x0650) 7253 }, 7254 { 7255 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7256 PCI_VENDOR_ID_HP, 0x0651) 7257 }, 7258 { 7259 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7260 PCI_VENDOR_ID_HP, 0x0652) 7261 }, 7262 { 7263 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7264 PCI_VENDOR_ID_HP, 0x0653) 7265 }, 7266 { 7267 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7268 PCI_VENDOR_ID_HP, 0x0654) 7269 }, 7270 { 7271 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7272 PCI_VENDOR_ID_HP, 0x0655) 7273 }, 7274 { 7275 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7276 PCI_VENDOR_ID_HP, 0x0700) 7277 }, 7278 { 7279 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7280 PCI_VENDOR_ID_HP, 0x0701) 7281 }, 7282 { 7283 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7284 PCI_VENDOR_ID_HP, 0x1001) 7285 }, 7286 { 7287 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7288 PCI_VENDOR_ID_HP, 0x1100) 7289 }, 7290 { 7291 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7292 PCI_VENDOR_ID_HP, 0x1101) 7293 }, 7294 { 7295 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 7296 PCI_ANY_ID, PCI_ANY_ID) 7297 }, 7298 { 0 } 7299 }; 7300 7301 MODULE_DEVICE_TABLE(pci, pqi_pci_id_table); 7302 7303 static struct pci_driver pqi_pci_driver = { 7304 .name = DRIVER_NAME_SHORT, 7305 .id_table = pqi_pci_id_table, 7306 .probe = pqi_pci_probe, 7307 .remove = pqi_pci_remove, 7308 .shutdown = pqi_shutdown, 7309 #if defined(CONFIG_PM) 7310 .suspend = pqi_suspend, 7311 .resume = pqi_resume, 7312 #endif 7313 }; 7314 7315 static int __init pqi_init(void) 7316 { 7317 int rc; 7318 7319 pr_info(DRIVER_NAME "\n"); 7320 7321 pqi_sas_transport_template = 7322 sas_attach_transport(&pqi_sas_transport_functions); 7323 if (!pqi_sas_transport_template) 7324 return -ENODEV; 7325 7326 pqi_process_module_params(); 7327 7328 rc = pci_register_driver(&pqi_pci_driver); 7329 if (rc) 7330 sas_release_transport(pqi_sas_transport_template); 7331 7332 return rc; 7333 } 7334 7335 static void __exit pqi_cleanup(void) 7336 { 7337 pci_unregister_driver(&pqi_pci_driver); 7338 sas_release_transport(pqi_sas_transport_template); 7339 } 7340 7341 module_init(pqi_init); 7342 module_exit(pqi_cleanup); 7343 7344 static void __attribute__((unused)) verify_structures(void) 7345 { 7346 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7347 sis_host_to_ctrl_doorbell) != 0x20); 7348 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7349 sis_interrupt_mask) != 0x34); 7350 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7351 sis_ctrl_to_host_doorbell) != 0x9c); 7352 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7353 sis_ctrl_to_host_doorbell_clear) != 0xa0); 7354 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7355 sis_driver_scratch) != 0xb0); 7356 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7357 sis_firmware_status) != 0xbc); 7358 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7359 sis_mailbox) != 0x1000); 7360 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers, 7361 pqi_registers) != 0x4000); 7362 7363 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7364 iu_type) != 0x0); 7365 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7366 iu_length) != 0x2); 7367 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7368 response_queue_id) != 0x4); 7369 BUILD_BUG_ON(offsetof(struct pqi_iu_header, 7370 work_area) != 0x6); 7371 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8); 7372 7373 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7374 status) != 0x0); 7375 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7376 service_response) != 0x1); 7377 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7378 data_present) != 0x2); 7379 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7380 reserved) != 0x3); 7381 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7382 residual_count) != 0x4); 7383 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7384 data_length) != 0x8); 7385 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7386 reserved1) != 0xa); 7387 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info, 7388 data) != 0xc); 7389 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c); 7390 7391 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7392 data_in_result) != 0x0); 7393 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7394 data_out_result) != 0x1); 7395 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7396 reserved) != 0x2); 7397 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7398 status) != 0x5); 7399 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7400 status_qualifier) != 0x6); 7401 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7402 sense_data_length) != 0x8); 7403 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7404 response_data_length) != 0xa); 7405 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7406 data_in_transferred) != 0xc); 7407 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7408 data_out_transferred) != 0x10); 7409 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info, 7410 data) != 0x14); 7411 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114); 7412 7413 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7414 signature) != 0x0); 7415 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7416 function_and_status_code) != 0x8); 7417 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7418 max_admin_iq_elements) != 0x10); 7419 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7420 max_admin_oq_elements) != 0x11); 7421 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7422 admin_iq_element_length) != 0x12); 7423 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7424 admin_oq_element_length) != 0x13); 7425 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7426 max_reset_timeout) != 0x14); 7427 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7428 legacy_intx_status) != 0x18); 7429 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7430 legacy_intx_mask_set) != 0x1c); 7431 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7432 legacy_intx_mask_clear) != 0x20); 7433 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7434 device_status) != 0x40); 7435 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7436 admin_iq_pi_offset) != 0x48); 7437 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7438 admin_oq_ci_offset) != 0x50); 7439 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7440 admin_iq_element_array_addr) != 0x58); 7441 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7442 admin_oq_element_array_addr) != 0x60); 7443 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7444 admin_iq_ci_addr) != 0x68); 7445 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7446 admin_oq_pi_addr) != 0x70); 7447 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7448 admin_iq_num_elements) != 0x78); 7449 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7450 admin_oq_num_elements) != 0x79); 7451 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7452 admin_queue_int_msg_num) != 0x7a); 7453 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7454 device_error) != 0x80); 7455 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7456 error_details) != 0x88); 7457 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7458 device_reset) != 0x90); 7459 BUILD_BUG_ON(offsetof(struct pqi_device_registers, 7460 power_action) != 0x94); 7461 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100); 7462 7463 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7464 header.iu_type) != 0); 7465 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7466 header.iu_length) != 2); 7467 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7468 header.work_area) != 6); 7469 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7470 request_id) != 8); 7471 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7472 function_code) != 10); 7473 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7474 data.report_device_capability.buffer_length) != 44); 7475 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7476 data.report_device_capability.sg_descriptor) != 48); 7477 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7478 data.create_operational_iq.queue_id) != 12); 7479 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7480 data.create_operational_iq.element_array_addr) != 16); 7481 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7482 data.create_operational_iq.ci_addr) != 24); 7483 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7484 data.create_operational_iq.num_elements) != 32); 7485 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7486 data.create_operational_iq.element_length) != 34); 7487 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7488 data.create_operational_iq.queue_protocol) != 36); 7489 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7490 data.create_operational_oq.queue_id) != 12); 7491 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7492 data.create_operational_oq.element_array_addr) != 16); 7493 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7494 data.create_operational_oq.pi_addr) != 24); 7495 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7496 data.create_operational_oq.num_elements) != 32); 7497 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7498 data.create_operational_oq.element_length) != 34); 7499 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7500 data.create_operational_oq.queue_protocol) != 36); 7501 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7502 data.create_operational_oq.int_msg_num) != 40); 7503 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7504 data.create_operational_oq.coalescing_count) != 42); 7505 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7506 data.create_operational_oq.min_coalescing_time) != 44); 7507 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7508 data.create_operational_oq.max_coalescing_time) != 48); 7509 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 7510 data.delete_operational_queue.queue_id) != 12); 7511 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 7512 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7513 data.create_operational_iq) != 64 - 11); 7514 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7515 data.create_operational_oq) != 64 - 11); 7516 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 7517 data.delete_operational_queue) != 64 - 11); 7518 7519 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7520 header.iu_type) != 0); 7521 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7522 header.iu_length) != 2); 7523 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7524 header.work_area) != 6); 7525 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7526 request_id) != 8); 7527 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7528 function_code) != 10); 7529 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7530 status) != 11); 7531 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7532 data.create_operational_iq.status_descriptor) != 12); 7533 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7534 data.create_operational_iq.iq_pi_offset) != 16); 7535 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7536 data.create_operational_oq.status_descriptor) != 12); 7537 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, 7538 data.create_operational_oq.oq_ci_offset) != 16); 7539 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64); 7540 7541 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7542 header.iu_type) != 0); 7543 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7544 header.iu_length) != 2); 7545 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7546 header.response_queue_id) != 4); 7547 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7548 header.work_area) != 6); 7549 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7550 request_id) != 8); 7551 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7552 nexus_id) != 10); 7553 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7554 buffer_length) != 12); 7555 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7556 lun_number) != 16); 7557 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7558 protocol_specific) != 24); 7559 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7560 error_index) != 27); 7561 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7562 cdb) != 32); 7563 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, 7564 sg_descriptors) != 64); 7565 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != 7566 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7567 7568 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7569 header.iu_type) != 0); 7570 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7571 header.iu_length) != 2); 7572 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7573 header.response_queue_id) != 4); 7574 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7575 header.work_area) != 6); 7576 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7577 request_id) != 8); 7578 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7579 nexus_id) != 12); 7580 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7581 buffer_length) != 16); 7582 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7583 data_encryption_key_index) != 22); 7584 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7585 encrypt_tweak_lower) != 24); 7586 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7587 encrypt_tweak_upper) != 28); 7588 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7589 cdb) != 32); 7590 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7591 error_index) != 48); 7592 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7593 num_sg_descriptors) != 50); 7594 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7595 cdb_length) != 51); 7596 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7597 lun_number) != 52); 7598 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request, 7599 sg_descriptors) != 64); 7600 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) != 7601 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); 7602 7603 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7604 header.iu_type) != 0); 7605 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7606 header.iu_length) != 2); 7607 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7608 request_id) != 8); 7609 BUILD_BUG_ON(offsetof(struct pqi_io_response, 7610 error_index) != 10); 7611 7612 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7613 header.iu_type) != 0); 7614 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7615 header.iu_length) != 2); 7616 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7617 header.response_queue_id) != 4); 7618 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7619 request_id) != 8); 7620 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7621 data.report_event_configuration.buffer_length) != 12); 7622 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7623 data.report_event_configuration.sg_descriptors) != 16); 7624 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7625 data.set_event_configuration.global_event_oq_id) != 10); 7626 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7627 data.set_event_configuration.buffer_length) != 12); 7628 BUILD_BUG_ON(offsetof(struct pqi_general_management_request, 7629 data.set_event_configuration.sg_descriptors) != 16); 7630 7631 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7632 max_inbound_iu_length) != 6); 7633 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor, 7634 max_outbound_iu_length) != 14); 7635 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16); 7636 7637 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7638 data_length) != 0); 7639 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7640 iq_arbitration_priority_support_bitmask) != 8); 7641 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7642 maximum_aw_a) != 9); 7643 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7644 maximum_aw_b) != 10); 7645 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7646 maximum_aw_c) != 11); 7647 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7648 max_inbound_queues) != 16); 7649 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7650 max_elements_per_iq) != 18); 7651 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7652 max_iq_element_length) != 24); 7653 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7654 min_iq_element_length) != 26); 7655 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7656 max_outbound_queues) != 30); 7657 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7658 max_elements_per_oq) != 32); 7659 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7660 intr_coalescing_time_granularity) != 34); 7661 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7662 max_oq_element_length) != 36); 7663 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7664 min_oq_element_length) != 38); 7665 BUILD_BUG_ON(offsetof(struct pqi_device_capability, 7666 iu_layer_descriptors) != 64); 7667 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576); 7668 7669 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7670 event_type) != 0); 7671 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor, 7672 oq_id) != 2); 7673 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4); 7674 7675 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7676 num_event_descriptors) != 2); 7677 BUILD_BUG_ON(offsetof(struct pqi_event_config, 7678 descriptors) != 4); 7679 7680 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS != 7681 ARRAY_SIZE(pqi_supported_event_types)); 7682 7683 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7684 header.iu_type) != 0); 7685 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7686 header.iu_length) != 2); 7687 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7688 event_type) != 8); 7689 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7690 event_id) != 10); 7691 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7692 additional_event_id) != 12); 7693 BUILD_BUG_ON(offsetof(struct pqi_event_response, 7694 data) != 16); 7695 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32); 7696 7697 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7698 header.iu_type) != 0); 7699 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7700 header.iu_length) != 2); 7701 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7702 event_type) != 8); 7703 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7704 event_id) != 10); 7705 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request, 7706 additional_event_id) != 12); 7707 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16); 7708 7709 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7710 header.iu_type) != 0); 7711 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7712 header.iu_length) != 2); 7713 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7714 request_id) != 8); 7715 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7716 nexus_id) != 10); 7717 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7718 lun_number) != 16); 7719 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7720 protocol_specific) != 24); 7721 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7722 outbound_queue_id_to_manage) != 26); 7723 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7724 request_id_to_manage) != 28); 7725 BUILD_BUG_ON(offsetof(struct pqi_task_management_request, 7726 task_management_function) != 30); 7727 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32); 7728 7729 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7730 header.iu_type) != 0); 7731 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7732 header.iu_length) != 2); 7733 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7734 request_id) != 8); 7735 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7736 nexus_id) != 10); 7737 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7738 additional_response_info) != 12); 7739 BUILD_BUG_ON(offsetof(struct pqi_task_management_response, 7740 response_code) != 15); 7741 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16); 7742 7743 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7744 configured_logical_drive_count) != 0); 7745 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7746 configuration_signature) != 1); 7747 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7748 firmware_version) != 5); 7749 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7750 extended_logical_unit_count) != 154); 7751 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7752 firmware_build_number) != 190); 7753 BUILD_BUG_ON(offsetof(struct bmic_identify_controller, 7754 controller_mode) != 292); 7755 7756 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7757 phys_bay_in_box) != 115); 7758 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7759 device_type) != 120); 7760 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7761 redundant_path_present_map) != 1736); 7762 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7763 active_path_number) != 1738); 7764 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7765 alternate_paths_phys_connector) != 1739); 7766 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7767 alternate_paths_phys_box_on_port) != 1755); 7768 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device, 7769 current_queue_depth_limit) != 1796); 7770 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560); 7771 7772 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255); 7773 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255); 7774 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH % 7775 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7776 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH % 7777 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7778 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560); 7779 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH % 7780 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7781 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560); 7782 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH % 7783 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0); 7784 7785 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS); 7786 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= 7787 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP); 7788 } 7789