1 /* 2 * This is the Fusion MPT base driver providing common API layer interface 3 * for access to MPT (Message Passing Technology) firmware. 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * (mailto:DL-MPTFusionLinux@lsi.com) 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 2 12 * of the License, or (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * NO WARRANTY 20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 24 * solely responsible for determining the appropriateness of using and 25 * distributing the Program and assumes all risks associated with its 26 * exercise of rights under this Agreement, including but not limited to 27 * the risks and costs of program errors, damage to or loss of data, 28 * programs or equipment, and unavailability or interruption of operations. 29 30 * DISCLAIMER OF LIABILITY 31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 38 39 * You should have received a copy of the GNU General Public License 40 * along with this program; if not, write to the Free Software 41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 42 * USA. 43 */ 44 45 #include <linux/kernel.h> 46 #include <linux/module.h> 47 #include <linux/errno.h> 48 #include <linux/init.h> 49 #include <linux/slab.h> 50 #include <linux/types.h> 51 #include <linux/pci.h> 52 #include <linux/kdev_t.h> 53 #include <linux/blkdev.h> 54 #include <linux/delay.h> 55 #include <linux/interrupt.h> 56 #include <linux/dma-mapping.h> 57 #include <linux/io.h> 58 #include <linux/time.h> 59 #include <linux/kthread.h> 60 #include <linux/aer.h> 61 62 63 #include "mpt3sas_base.h" 64 65 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; 66 67 68 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ 69 70 /* maximum controller queue depth */ 71 #define MAX_HBA_QUEUE_DEPTH 30000 72 #define MAX_CHAIN_DEPTH 100000 73 static int max_queue_depth = -1; 74 module_param(max_queue_depth, int, 0); 75 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); 76 77 static int max_sgl_entries = -1; 78 module_param(max_sgl_entries, int, 0); 79 MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); 80 81 static int msix_disable = -1; 82 module_param(msix_disable, int, 0); 83 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 84 85 static int max_msix_vectors = 8; 86 module_param(max_msix_vectors, int, 0); 87 MODULE_PARM_DESC(max_msix_vectors, 88 " max msix vectors - (default=8)"); 89 90 static int mpt3sas_fwfault_debug; 91 MODULE_PARM_DESC(mpt3sas_fwfault_debug, 92 " enable detection of firmware fault and halt firmware - (default=0)"); 93 94 static int 95 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag); 96 97 /** 98 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 99 * 100 */ 101 static int 102 _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) 103 { 104 int ret = param_set_int(val, kp); 105 struct MPT3SAS_ADAPTER *ioc; 106 107 if (ret) 108 return ret; 109 110 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); 111 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 112 ioc->fwfault_debug = mpt3sas_fwfault_debug; 113 return 0; 114 } 115 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, 116 param_get_int, &mpt3sas_fwfault_debug, 0644); 117 118 /** 119 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc 120 * @arg: input argument, used to derive ioc 121 * 122 * Return 0 if controller is removed from pci subsystem. 123 * Return -1 for other case. 124 */ 125 static int mpt3sas_remove_dead_ioc_func(void *arg) 126 { 127 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; 128 struct pci_dev *pdev; 129 130 if ((ioc == NULL)) 131 return -1; 132 133 pdev = ioc->pdev; 134 if ((pdev == NULL)) 135 return -1; 136 pci_stop_and_remove_bus_device_locked(pdev); 137 return 0; 138 } 139 140 /** 141 * _base_fault_reset_work - workq handling ioc fault conditions 142 * @work: input argument, used to derive ioc 143 * Context: sleep. 144 * 145 * Return nothing. 146 */ 147 static void 148 _base_fault_reset_work(struct work_struct *work) 149 { 150 struct MPT3SAS_ADAPTER *ioc = 151 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); 152 unsigned long flags; 153 u32 doorbell; 154 int rc; 155 struct task_struct *p; 156 157 158 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 159 if (ioc->shost_recovery) 160 goto rearm_timer; 161 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 162 163 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 164 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { 165 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n", 166 ioc->name); 167 168 /* 169 * Call _scsih_flush_pending_cmds callback so that we flush all 170 * pending commands back to OS. This call is required to aovid 171 * deadlock at block layer. Dead IOC will fail to do diag reset, 172 * and this call is safe since dead ioc will never return any 173 * command back from HW. 174 */ 175 ioc->schedule_dead_ioc_flush_running_cmds(ioc); 176 /* 177 * Set remove_host flag early since kernel thread will 178 * take some time to execute. 179 */ 180 ioc->remove_host = 1; 181 /*Remove the Dead Host */ 182 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, 183 "mpt3sas_dead_ioc_%d", ioc->id); 184 if (IS_ERR(p)) 185 pr_err(MPT3SAS_FMT 186 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", 187 ioc->name, __func__); 188 else 189 pr_err(MPT3SAS_FMT 190 "%s: Running mpt3sas_dead_ioc thread success !!!!\n", 191 ioc->name, __func__); 192 return; /* don't rearm timer */ 193 } 194 195 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { 196 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 197 FORCE_BIG_HAMMER); 198 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, 199 __func__, (rc == 0) ? "success" : "failed"); 200 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 201 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 202 mpt3sas_base_fault_info(ioc, doorbell & 203 MPI2_DOORBELL_DATA_MASK); 204 if (rc && (doorbell & MPI2_IOC_STATE_MASK) != 205 MPI2_IOC_STATE_OPERATIONAL) 206 return; /* don't rearm timer */ 207 } 208 209 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 210 rearm_timer: 211 if (ioc->fault_reset_work_q) 212 queue_delayed_work(ioc->fault_reset_work_q, 213 &ioc->fault_reset_work, 214 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 215 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 216 } 217 218 /** 219 * mpt3sas_base_start_watchdog - start the fault_reset_work_q 220 * @ioc: per adapter object 221 * Context: sleep. 222 * 223 * Return nothing. 224 */ 225 void 226 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) 227 { 228 unsigned long flags; 229 230 if (ioc->fault_reset_work_q) 231 return; 232 233 /* initialize fault polling */ 234 235 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); 236 snprintf(ioc->fault_reset_work_q_name, 237 sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); 238 ioc->fault_reset_work_q = 239 create_singlethread_workqueue(ioc->fault_reset_work_q_name); 240 if (!ioc->fault_reset_work_q) { 241 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", 242 ioc->name, __func__, __LINE__); 243 return; 244 } 245 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 246 if (ioc->fault_reset_work_q) 247 queue_delayed_work(ioc->fault_reset_work_q, 248 &ioc->fault_reset_work, 249 msecs_to_jiffies(FAULT_POLLING_INTERVAL)); 250 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 251 } 252 253 /** 254 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q 255 * @ioc: per adapter object 256 * Context: sleep. 257 * 258 * Return nothing. 259 */ 260 void 261 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) 262 { 263 unsigned long flags; 264 struct workqueue_struct *wq; 265 266 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 267 wq = ioc->fault_reset_work_q; 268 ioc->fault_reset_work_q = NULL; 269 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 270 if (wq) { 271 if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) 272 flush_workqueue(wq); 273 destroy_workqueue(wq); 274 } 275 } 276 277 /** 278 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code 279 * @ioc: per adapter object 280 * @fault_code: fault code 281 * 282 * Return nothing. 283 */ 284 void 285 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) 286 { 287 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n", 288 ioc->name, fault_code); 289 } 290 291 /** 292 * mpt3sas_halt_firmware - halt's mpt controller firmware 293 * @ioc: per adapter object 294 * 295 * For debugging timeout related issues. Writing 0xCOFFEE00 296 * to the doorbell register will halt controller firmware. With 297 * the purpose to stop both driver and firmware, the enduser can 298 * obtain a ring buffer from controller UART. 299 */ 300 void 301 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) 302 { 303 u32 doorbell; 304 305 if (!ioc->fwfault_debug) 306 return; 307 308 dump_stack(); 309 310 doorbell = readl(&ioc->chip->Doorbell); 311 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 312 mpt3sas_base_fault_info(ioc , doorbell); 313 else { 314 writel(0xC0FFEE00, &ioc->chip->Doorbell); 315 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n", 316 ioc->name); 317 } 318 319 if (ioc->fwfault_debug == 2) 320 for (;;) 321 ; 322 else 323 panic("panic in %s\n", __func__); 324 } 325 326 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING 327 /** 328 * _base_sas_ioc_info - verbose translation of the ioc status 329 * @ioc: per adapter object 330 * @mpi_reply: reply mf payload returned from firmware 331 * @request_hdr: request mf 332 * 333 * Return nothing. 334 */ 335 static void 336 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, 337 MPI2RequestHeader_t *request_hdr) 338 { 339 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & 340 MPI2_IOCSTATUS_MASK; 341 char *desc = NULL; 342 u16 frame_sz; 343 char *func_str = NULL; 344 345 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ 346 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 347 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 348 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) 349 return; 350 351 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) 352 return; 353 354 switch (ioc_status) { 355 356 /**************************************************************************** 357 * Common IOCStatus values for all replies 358 ****************************************************************************/ 359 360 case MPI2_IOCSTATUS_INVALID_FUNCTION: 361 desc = "invalid function"; 362 break; 363 case MPI2_IOCSTATUS_BUSY: 364 desc = "busy"; 365 break; 366 case MPI2_IOCSTATUS_INVALID_SGL: 367 desc = "invalid sgl"; 368 break; 369 case MPI2_IOCSTATUS_INTERNAL_ERROR: 370 desc = "internal error"; 371 break; 372 case MPI2_IOCSTATUS_INVALID_VPID: 373 desc = "invalid vpid"; 374 break; 375 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: 376 desc = "insufficient resources"; 377 break; 378 case MPI2_IOCSTATUS_INVALID_FIELD: 379 desc = "invalid field"; 380 break; 381 case MPI2_IOCSTATUS_INVALID_STATE: 382 desc = "invalid state"; 383 break; 384 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 385 desc = "op state not supported"; 386 break; 387 388 /**************************************************************************** 389 * Config IOCStatus values 390 ****************************************************************************/ 391 392 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: 393 desc = "config invalid action"; 394 break; 395 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: 396 desc = "config invalid type"; 397 break; 398 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: 399 desc = "config invalid page"; 400 break; 401 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: 402 desc = "config invalid data"; 403 break; 404 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: 405 desc = "config no defaults"; 406 break; 407 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: 408 desc = "config cant commit"; 409 break; 410 411 /**************************************************************************** 412 * SCSI IO Reply 413 ****************************************************************************/ 414 415 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 416 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 417 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 418 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 419 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 420 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 421 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 422 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 423 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 424 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 425 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 426 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 427 break; 428 429 /**************************************************************************** 430 * For use by SCSI Initiator and SCSI Target end-to-end data protection 431 ****************************************************************************/ 432 433 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 434 desc = "eedp guard error"; 435 break; 436 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 437 desc = "eedp ref tag error"; 438 break; 439 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 440 desc = "eedp app tag error"; 441 break; 442 443 /**************************************************************************** 444 * SCSI Target values 445 ****************************************************************************/ 446 447 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: 448 desc = "target invalid io index"; 449 break; 450 case MPI2_IOCSTATUS_TARGET_ABORTED: 451 desc = "target aborted"; 452 break; 453 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: 454 desc = "target no conn retryable"; 455 break; 456 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: 457 desc = "target no connection"; 458 break; 459 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: 460 desc = "target xfer count mismatch"; 461 break; 462 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: 463 desc = "target data offset error"; 464 break; 465 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: 466 desc = "target too much write data"; 467 break; 468 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: 469 desc = "target iu too short"; 470 break; 471 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: 472 desc = "target ack nak timeout"; 473 break; 474 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: 475 desc = "target nak received"; 476 break; 477 478 /**************************************************************************** 479 * Serial Attached SCSI values 480 ****************************************************************************/ 481 482 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: 483 desc = "smp request failed"; 484 break; 485 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: 486 desc = "smp data overrun"; 487 break; 488 489 /**************************************************************************** 490 * Diagnostic Buffer Post / Diagnostic Release values 491 ****************************************************************************/ 492 493 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: 494 desc = "diagnostic released"; 495 break; 496 default: 497 break; 498 } 499 500 if (!desc) 501 return; 502 503 switch (request_hdr->Function) { 504 case MPI2_FUNCTION_CONFIG: 505 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; 506 func_str = "config_page"; 507 break; 508 case MPI2_FUNCTION_SCSI_TASK_MGMT: 509 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); 510 func_str = "task_mgmt"; 511 break; 512 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 513 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); 514 func_str = "sas_iounit_ctl"; 515 break; 516 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: 517 frame_sz = sizeof(Mpi2SepRequest_t); 518 func_str = "enclosure"; 519 break; 520 case MPI2_FUNCTION_IOC_INIT: 521 frame_sz = sizeof(Mpi2IOCInitRequest_t); 522 func_str = "ioc_init"; 523 break; 524 case MPI2_FUNCTION_PORT_ENABLE: 525 frame_sz = sizeof(Mpi2PortEnableRequest_t); 526 func_str = "port_enable"; 527 break; 528 case MPI2_FUNCTION_SMP_PASSTHROUGH: 529 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; 530 func_str = "smp_passthru"; 531 break; 532 default: 533 frame_sz = 32; 534 func_str = "unknown"; 535 break; 536 } 537 538 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", 539 ioc->name, desc, ioc_status, request_hdr, func_str); 540 541 _debug_dump_mf(request_hdr, frame_sz/4); 542 } 543 544 /** 545 * _base_display_event_data - verbose translation of firmware asyn events 546 * @ioc: per adapter object 547 * @mpi_reply: reply mf payload returned from firmware 548 * 549 * Return nothing. 550 */ 551 static void 552 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, 553 Mpi2EventNotificationReply_t *mpi_reply) 554 { 555 char *desc = NULL; 556 u16 event; 557 558 if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) 559 return; 560 561 event = le16_to_cpu(mpi_reply->Event); 562 563 switch (event) { 564 case MPI2_EVENT_LOG_DATA: 565 desc = "Log Data"; 566 break; 567 case MPI2_EVENT_STATE_CHANGE: 568 desc = "Status Change"; 569 break; 570 case MPI2_EVENT_HARD_RESET_RECEIVED: 571 desc = "Hard Reset Received"; 572 break; 573 case MPI2_EVENT_EVENT_CHANGE: 574 desc = "Event Change"; 575 break; 576 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: 577 desc = "Device Status Change"; 578 break; 579 case MPI2_EVENT_IR_OPERATION_STATUS: 580 desc = "IR Operation Status"; 581 break; 582 case MPI2_EVENT_SAS_DISCOVERY: 583 { 584 Mpi2EventDataSasDiscovery_t *event_data = 585 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; 586 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name, 587 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? 588 "start" : "stop"); 589 if (event_data->DiscoveryStatus) 590 pr_info("discovery_status(0x%08x)", 591 le32_to_cpu(event_data->DiscoveryStatus)); 592 pr_info("\n"); 593 return; 594 } 595 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 596 desc = "SAS Broadcast Primitive"; 597 break; 598 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: 599 desc = "SAS Init Device Status Change"; 600 break; 601 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: 602 desc = "SAS Init Table Overflow"; 603 break; 604 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 605 desc = "SAS Topology Change List"; 606 break; 607 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 608 desc = "SAS Enclosure Device Status Change"; 609 break; 610 case MPI2_EVENT_IR_VOLUME: 611 desc = "IR Volume"; 612 break; 613 case MPI2_EVENT_IR_PHYSICAL_DISK: 614 desc = "IR Physical Disk"; 615 break; 616 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: 617 desc = "IR Configuration Change List"; 618 break; 619 case MPI2_EVENT_LOG_ENTRY_ADDED: 620 desc = "Log Entry Added"; 621 break; 622 } 623 624 if (!desc) 625 return; 626 627 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc); 628 } 629 #endif 630 631 /** 632 * _base_sas_log_info - verbose translation of firmware log info 633 * @ioc: per adapter object 634 * @log_info: log info 635 * 636 * Return nothing. 637 */ 638 static void 639 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) 640 { 641 union loginfo_type { 642 u32 loginfo; 643 struct { 644 u32 subcode:16; 645 u32 code:8; 646 u32 originator:4; 647 u32 bus_type:4; 648 } dw; 649 }; 650 union loginfo_type sas_loginfo; 651 char *originator_str = NULL; 652 653 sas_loginfo.loginfo = log_info; 654 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 655 return; 656 657 /* each nexus loss loginfo */ 658 if (log_info == 0x31170000) 659 return; 660 661 /* eat the loginfos associated with task aborts */ 662 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == 663 0x31140000 || log_info == 0x31130000)) 664 return; 665 666 switch (sas_loginfo.dw.originator) { 667 case 0: 668 originator_str = "IOP"; 669 break; 670 case 1: 671 originator_str = "PL"; 672 break; 673 case 2: 674 originator_str = "IR"; 675 break; 676 } 677 678 pr_warn(MPT3SAS_FMT 679 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", 680 ioc->name, log_info, 681 originator_str, sas_loginfo.dw.code, 682 sas_loginfo.dw.subcode); 683 } 684 685 /** 686 * _base_display_reply_info - 687 * @ioc: per adapter object 688 * @smid: system request message index 689 * @msix_index: MSIX table index supplied by the OS 690 * @reply: reply message frame(lower 32bit addr) 691 * 692 * Return nothing. 693 */ 694 static void 695 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 696 u32 reply) 697 { 698 MPI2DefaultReply_t *mpi_reply; 699 u16 ioc_status; 700 u32 loginfo = 0; 701 702 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 703 if (unlikely(!mpi_reply)) { 704 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", 705 ioc->name, __FILE__, __LINE__, __func__); 706 return; 707 } 708 ioc_status = le16_to_cpu(mpi_reply->IOCStatus); 709 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING 710 if ((ioc_status & MPI2_IOCSTATUS_MASK) && 711 (ioc->logging_level & MPT_DEBUG_REPLY)) { 712 _base_sas_ioc_info(ioc , mpi_reply, 713 mpt3sas_base_get_msg_frame(ioc, smid)); 714 } 715 #endif 716 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 717 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); 718 _base_sas_log_info(ioc, loginfo); 719 } 720 721 if (ioc_status || loginfo) { 722 ioc_status &= MPI2_IOCSTATUS_MASK; 723 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); 724 } 725 } 726 727 /** 728 * mpt3sas_base_done - base internal command completion routine 729 * @ioc: per adapter object 730 * @smid: system request message index 731 * @msix_index: MSIX table index supplied by the OS 732 * @reply: reply message frame(lower 32bit addr) 733 * 734 * Return 1 meaning mf should be freed from _base_interrupt 735 * 0 means the mf is freed from this function. 736 */ 737 u8 738 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 739 u32 reply) 740 { 741 MPI2DefaultReply_t *mpi_reply; 742 743 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 744 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) 745 return 1; 746 747 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) 748 return 1; 749 750 ioc->base_cmds.status |= MPT3_CMD_COMPLETE; 751 if (mpi_reply) { 752 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; 753 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 754 } 755 ioc->base_cmds.status &= ~MPT3_CMD_PENDING; 756 757 complete(&ioc->base_cmds.done); 758 return 1; 759 } 760 761 /** 762 * _base_async_event - main callback handler for firmware asyn events 763 * @ioc: per adapter object 764 * @msix_index: MSIX table index supplied by the OS 765 * @reply: reply message frame(lower 32bit addr) 766 * 767 * Return 1 meaning mf should be freed from _base_interrupt 768 * 0 means the mf is freed from this function. 769 */ 770 static u8 771 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) 772 { 773 Mpi2EventNotificationReply_t *mpi_reply; 774 Mpi2EventAckRequest_t *ack_request; 775 u16 smid; 776 777 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 778 if (!mpi_reply) 779 return 1; 780 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) 781 return 1; 782 #ifdef CONFIG_SCSI_MPT3SAS_LOGGING 783 _base_display_event_data(ioc, mpi_reply); 784 #endif 785 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) 786 goto out; 787 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 788 if (!smid) { 789 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 790 ioc->name, __func__); 791 goto out; 792 } 793 794 ack_request = mpt3sas_base_get_msg_frame(ioc, smid); 795 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); 796 ack_request->Function = MPI2_FUNCTION_EVENT_ACK; 797 ack_request->Event = mpi_reply->Event; 798 ack_request->EventContext = mpi_reply->EventContext; 799 ack_request->VF_ID = 0; /* TODO */ 800 ack_request->VP_ID = 0; 801 mpt3sas_base_put_smid_default(ioc, smid); 802 803 out: 804 805 /* scsih callback handler */ 806 mpt3sas_scsih_event_callback(ioc, msix_index, reply); 807 808 /* ctl callback handler */ 809 mpt3sas_ctl_event_callback(ioc, msix_index, reply); 810 811 return 1; 812 } 813 814 /** 815 * _base_get_cb_idx - obtain the callback index 816 * @ioc: per adapter object 817 * @smid: system request message index 818 * 819 * Return callback index. 820 */ 821 static u8 822 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) 823 { 824 int i; 825 u8 cb_idx; 826 827 if (smid < ioc->hi_priority_smid) { 828 i = smid - 1; 829 cb_idx = ioc->scsi_lookup[i].cb_idx; 830 } else if (smid < ioc->internal_smid) { 831 i = smid - ioc->hi_priority_smid; 832 cb_idx = ioc->hpr_lookup[i].cb_idx; 833 } else if (smid <= ioc->hba_queue_depth) { 834 i = smid - ioc->internal_smid; 835 cb_idx = ioc->internal_lookup[i].cb_idx; 836 } else 837 cb_idx = 0xFF; 838 return cb_idx; 839 } 840 841 /** 842 * _base_mask_interrupts - disable interrupts 843 * @ioc: per adapter object 844 * 845 * Disabling ResetIRQ, Reply and Doorbell Interrupts 846 * 847 * Return nothing. 848 */ 849 static void 850 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) 851 { 852 u32 him_register; 853 854 ioc->mask_interrupts = 1; 855 him_register = readl(&ioc->chip->HostInterruptMask); 856 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; 857 writel(him_register, &ioc->chip->HostInterruptMask); 858 readl(&ioc->chip->HostInterruptMask); 859 } 860 861 /** 862 * _base_unmask_interrupts - enable interrupts 863 * @ioc: per adapter object 864 * 865 * Enabling only Reply Interrupts 866 * 867 * Return nothing. 868 */ 869 static void 870 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) 871 { 872 u32 him_register; 873 874 him_register = readl(&ioc->chip->HostInterruptMask); 875 him_register &= ~MPI2_HIM_RIM; 876 writel(him_register, &ioc->chip->HostInterruptMask); 877 ioc->mask_interrupts = 0; 878 } 879 880 union reply_descriptor { 881 u64 word; 882 struct { 883 u32 low; 884 u32 high; 885 } u; 886 }; 887 888 /** 889 * _base_interrupt - MPT adapter (IOC) specific interrupt handler. 890 * @irq: irq number (not used) 891 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure 892 * @r: pt_regs pointer (not used) 893 * 894 * Return IRQ_HANDLE if processed, else IRQ_NONE. 895 */ 896 static irqreturn_t 897 _base_interrupt(int irq, void *bus_id) 898 { 899 struct adapter_reply_queue *reply_q = bus_id; 900 union reply_descriptor rd; 901 u32 completed_cmds; 902 u8 request_desript_type; 903 u16 smid; 904 u8 cb_idx; 905 u32 reply; 906 u8 msix_index = reply_q->msix_index; 907 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; 908 Mpi2ReplyDescriptorsUnion_t *rpf; 909 u8 rc; 910 911 if (ioc->mask_interrupts) 912 return IRQ_NONE; 913 914 if (!atomic_add_unless(&reply_q->busy, 1, 1)) 915 return IRQ_NONE; 916 917 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; 918 request_desript_type = rpf->Default.ReplyFlags 919 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 920 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { 921 atomic_dec(&reply_q->busy); 922 return IRQ_NONE; 923 } 924 925 completed_cmds = 0; 926 cb_idx = 0xFF; 927 do { 928 rd.word = le64_to_cpu(rpf->Words); 929 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 930 goto out; 931 reply = 0; 932 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 933 if (request_desript_type == 934 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || 935 request_desript_type == 936 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 937 cb_idx = _base_get_cb_idx(ioc, smid); 938 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 939 (likely(mpt_callbacks[cb_idx] != NULL))) { 940 rc = mpt_callbacks[cb_idx](ioc, smid, 941 msix_index, 0); 942 if (rc) 943 mpt3sas_base_free_smid(ioc, smid); 944 } 945 } else if (request_desript_type == 946 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 947 reply = le32_to_cpu( 948 rpf->AddressReply.ReplyFrameAddress); 949 if (reply > ioc->reply_dma_max_address || 950 reply < ioc->reply_dma_min_address) 951 reply = 0; 952 if (smid) { 953 cb_idx = _base_get_cb_idx(ioc, smid); 954 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && 955 (likely(mpt_callbacks[cb_idx] != NULL))) { 956 rc = mpt_callbacks[cb_idx](ioc, smid, 957 msix_index, reply); 958 if (reply) 959 _base_display_reply_info(ioc, 960 smid, msix_index, reply); 961 if (rc) 962 mpt3sas_base_free_smid(ioc, 963 smid); 964 } 965 } else { 966 _base_async_event(ioc, msix_index, reply); 967 } 968 969 /* reply free queue handling */ 970 if (reply) { 971 ioc->reply_free_host_index = 972 (ioc->reply_free_host_index == 973 (ioc->reply_free_queue_depth - 1)) ? 974 0 : ioc->reply_free_host_index + 1; 975 ioc->reply_free[ioc->reply_free_host_index] = 976 cpu_to_le32(reply); 977 wmb(); 978 writel(ioc->reply_free_host_index, 979 &ioc->chip->ReplyFreeHostIndex); 980 } 981 } 982 983 rpf->Words = cpu_to_le64(ULLONG_MAX); 984 reply_q->reply_post_host_index = 985 (reply_q->reply_post_host_index == 986 (ioc->reply_post_queue_depth - 1)) ? 0 : 987 reply_q->reply_post_host_index + 1; 988 request_desript_type = 989 reply_q->reply_post_free[reply_q->reply_post_host_index]. 990 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 991 completed_cmds++; 992 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 993 goto out; 994 if (!reply_q->reply_post_host_index) 995 rpf = reply_q->reply_post_free; 996 else 997 rpf++; 998 } while (1); 999 1000 out: 1001 1002 if (!completed_cmds) { 1003 atomic_dec(&reply_q->busy); 1004 return IRQ_NONE; 1005 } 1006 1007 wmb(); 1008 writel(reply_q->reply_post_host_index | (msix_index << 1009 MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); 1010 atomic_dec(&reply_q->busy); 1011 return IRQ_HANDLED; 1012 } 1013 1014 /** 1015 * _base_is_controller_msix_enabled - is controller support muli-reply queues 1016 * @ioc: per adapter object 1017 * 1018 */ 1019 static inline int 1020 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) 1021 { 1022 return (ioc->facts.IOCCapabilities & 1023 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; 1024 } 1025 1026 /** 1027 * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues 1028 * @ioc: per adapter object 1029 * Context: ISR conext 1030 * 1031 * Called when a Task Management request has completed. We want 1032 * to flush the other reply queues so all the outstanding IO has been 1033 * completed back to OS before we process the TM completetion. 1034 * 1035 * Return nothing. 1036 */ 1037 void 1038 mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) 1039 { 1040 struct adapter_reply_queue *reply_q; 1041 1042 /* If MSIX capability is turned off 1043 * then multi-queues are not enabled 1044 */ 1045 if (!_base_is_controller_msix_enabled(ioc)) 1046 return; 1047 1048 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 1049 if (ioc->shost_recovery) 1050 return; 1051 /* TMs are on msix_index == 0 */ 1052 if (reply_q->msix_index == 0) 1053 continue; 1054 _base_interrupt(reply_q->vector, (void *)reply_q); 1055 } 1056 } 1057 1058 /** 1059 * mpt3sas_base_release_callback_handler - clear interrupt callback handler 1060 * @cb_idx: callback index 1061 * 1062 * Return nothing. 1063 */ 1064 void 1065 mpt3sas_base_release_callback_handler(u8 cb_idx) 1066 { 1067 mpt_callbacks[cb_idx] = NULL; 1068 } 1069 1070 /** 1071 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler 1072 * @cb_func: callback function 1073 * 1074 * Returns cb_func. 1075 */ 1076 u8 1077 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) 1078 { 1079 u8 cb_idx; 1080 1081 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) 1082 if (mpt_callbacks[cb_idx] == NULL) 1083 break; 1084 1085 mpt_callbacks[cb_idx] = cb_func; 1086 return cb_idx; 1087 } 1088 1089 /** 1090 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler 1091 * 1092 * Return nothing. 1093 */ 1094 void 1095 mpt3sas_base_initialize_callback_handler(void) 1096 { 1097 u8 cb_idx; 1098 1099 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) 1100 mpt3sas_base_release_callback_handler(cb_idx); 1101 } 1102 1103 1104 /** 1105 * _base_build_zero_len_sge - build zero length sg entry 1106 * @ioc: per adapter object 1107 * @paddr: virtual address for SGE 1108 * 1109 * Create a zero length scatter gather entry to insure the IOCs hardware has 1110 * something to use if the target device goes brain dead and tries 1111 * to send data even when none is asked for. 1112 * 1113 * Return nothing. 1114 */ 1115 static void 1116 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) 1117 { 1118 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | 1119 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | 1120 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << 1121 MPI2_SGE_FLAGS_SHIFT); 1122 ioc->base_add_sg_single(paddr, flags_length, -1); 1123 } 1124 1125 /** 1126 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. 1127 * @paddr: virtual address for SGE 1128 * @flags_length: SGE flags and data transfer length 1129 * @dma_addr: Physical address 1130 * 1131 * Return nothing. 1132 */ 1133 static void 1134 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1135 { 1136 Mpi2SGESimple32_t *sgel = paddr; 1137 1138 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | 1139 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1140 sgel->FlagsLength = cpu_to_le32(flags_length); 1141 sgel->Address = cpu_to_le32(dma_addr); 1142 } 1143 1144 1145 /** 1146 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. 1147 * @paddr: virtual address for SGE 1148 * @flags_length: SGE flags and data transfer length 1149 * @dma_addr: Physical address 1150 * 1151 * Return nothing. 1152 */ 1153 static void 1154 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) 1155 { 1156 Mpi2SGESimple64_t *sgel = paddr; 1157 1158 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | 1159 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; 1160 sgel->FlagsLength = cpu_to_le32(flags_length); 1161 sgel->Address = cpu_to_le64(dma_addr); 1162 } 1163 1164 /** 1165 * _base_get_chain_buffer_tracker - obtain chain tracker 1166 * @ioc: per adapter object 1167 * @smid: smid associated to an IO request 1168 * 1169 * Returns chain tracker(from ioc->free_chain_list) 1170 */ 1171 static struct chain_tracker * 1172 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1173 { 1174 struct chain_tracker *chain_req; 1175 unsigned long flags; 1176 1177 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1178 if (list_empty(&ioc->free_chain_list)) { 1179 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1180 dfailprintk(ioc, pr_warn(MPT3SAS_FMT 1181 "chain buffers not available\n", ioc->name)); 1182 return NULL; 1183 } 1184 chain_req = list_entry(ioc->free_chain_list.next, 1185 struct chain_tracker, tracker_list); 1186 list_del_init(&chain_req->tracker_list); 1187 list_add_tail(&chain_req->tracker_list, 1188 &ioc->scsi_lookup[smid - 1].chain_list); 1189 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1190 return chain_req; 1191 } 1192 1193 1194 /** 1195 * _base_build_sg - build generic sg 1196 * @ioc: per adapter object 1197 * @psge: virtual address for SGE 1198 * @data_out_dma: physical address for WRITES 1199 * @data_out_sz: data xfer size for WRITES 1200 * @data_in_dma: physical address for READS 1201 * @data_in_sz: data xfer size for READS 1202 * 1203 * Return nothing. 1204 */ 1205 static void 1206 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, 1207 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 1208 size_t data_in_sz) 1209 { 1210 u32 sgl_flags; 1211 1212 if (!data_out_sz && !data_in_sz) { 1213 _base_build_zero_len_sge(ioc, psge); 1214 return; 1215 } 1216 1217 if (data_out_sz && data_in_sz) { 1218 /* WRITE sgel first */ 1219 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1220 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1221 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1222 ioc->base_add_sg_single(psge, sgl_flags | 1223 data_out_sz, data_out_dma); 1224 1225 /* incr sgel */ 1226 psge += ioc->sge_size; 1227 1228 /* READ sgel last */ 1229 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1230 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1231 MPI2_SGE_FLAGS_END_OF_LIST); 1232 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1233 ioc->base_add_sg_single(psge, sgl_flags | 1234 data_in_sz, data_in_dma); 1235 } else if (data_out_sz) /* WRITE */ { 1236 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1237 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1238 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); 1239 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1240 ioc->base_add_sg_single(psge, sgl_flags | 1241 data_out_sz, data_out_dma); 1242 } else if (data_in_sz) /* READ */ { 1243 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1244 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1245 MPI2_SGE_FLAGS_END_OF_LIST); 1246 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1247 ioc->base_add_sg_single(psge, sgl_flags | 1248 data_in_sz, data_in_dma); 1249 } 1250 } 1251 1252 /* IEEE format sgls */ 1253 1254 /** 1255 * _base_add_sg_single_ieee - add sg element for IEEE format 1256 * @paddr: virtual address for SGE 1257 * @flags: SGE flags 1258 * @chain_offset: number of 128 byte elements from start of segment 1259 * @length: data transfer length 1260 * @dma_addr: Physical address 1261 * 1262 * Return nothing. 1263 */ 1264 static void 1265 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, 1266 dma_addr_t dma_addr) 1267 { 1268 Mpi25IeeeSgeChain64_t *sgel = paddr; 1269 1270 sgel->Flags = flags; 1271 sgel->NextChainOffset = chain_offset; 1272 sgel->Length = cpu_to_le32(length); 1273 sgel->Address = cpu_to_le64(dma_addr); 1274 } 1275 1276 /** 1277 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format 1278 * @ioc: per adapter object 1279 * @paddr: virtual address for SGE 1280 * 1281 * Create a zero length scatter gather entry to insure the IOCs hardware has 1282 * something to use if the target device goes brain dead and tries 1283 * to send data even when none is asked for. 1284 * 1285 * Return nothing. 1286 */ 1287 static void 1288 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) 1289 { 1290 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 1291 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 1292 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 1293 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); 1294 } 1295 1296 /** 1297 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format 1298 * @ioc: per adapter object 1299 * @scmd: scsi command 1300 * @smid: system request message index 1301 * Context: none. 1302 * 1303 * The main routine that builds scatter gather table from a given 1304 * scsi request sent via the .queuecommand main handler. 1305 * 1306 * Returns 0 success, anything else error 1307 */ 1308 static int 1309 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, 1310 struct scsi_cmnd *scmd, u16 smid) 1311 { 1312 Mpi2SCSIIORequest_t *mpi_request; 1313 dma_addr_t chain_dma; 1314 struct scatterlist *sg_scmd; 1315 void *sg_local, *chain; 1316 u32 chain_offset; 1317 u32 chain_length; 1318 int sges_left; 1319 u32 sges_in_segment; 1320 u8 simple_sgl_flags; 1321 u8 simple_sgl_flags_last; 1322 u8 chain_sgl_flags; 1323 struct chain_tracker *chain_req; 1324 1325 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1326 1327 /* init scatter gather flags */ 1328 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 1329 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 1330 simple_sgl_flags_last = simple_sgl_flags | 1331 MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 1332 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1333 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 1334 1335 sg_scmd = scsi_sglist(scmd); 1336 sges_left = scsi_dma_map(scmd); 1337 if (!sges_left) { 1338 sdev_printk(KERN_ERR, scmd->device, 1339 "pci_map_sg failed: request for %d bytes!\n", 1340 scsi_bufflen(scmd)); 1341 return -ENOMEM; 1342 } 1343 1344 sg_local = &mpi_request->SGL; 1345 sges_in_segment = (ioc->request_sz - 1346 offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee; 1347 if (sges_left <= sges_in_segment) 1348 goto fill_in_last_segment; 1349 1350 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + 1351 (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee); 1352 1353 /* fill in main message segment when there is a chain following */ 1354 while (sges_in_segment > 1) { 1355 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 1356 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 1357 sg_scmd = sg_next(sg_scmd); 1358 sg_local += ioc->sge_size_ieee; 1359 sges_left--; 1360 sges_in_segment--; 1361 } 1362 1363 /* initializing the pointers */ 1364 chain_req = _base_get_chain_buffer_tracker(ioc, smid); 1365 if (!chain_req) 1366 return -1; 1367 chain = chain_req->chain_buffer; 1368 chain_dma = chain_req->chain_buffer_dma; 1369 do { 1370 sges_in_segment = (sges_left <= 1371 ioc->max_sges_in_chain_message) ? sges_left : 1372 ioc->max_sges_in_chain_message; 1373 chain_offset = (sges_left == sges_in_segment) ? 1374 0 : sges_in_segment; 1375 chain_length = sges_in_segment * ioc->sge_size_ieee; 1376 if (chain_offset) 1377 chain_length += ioc->sge_size_ieee; 1378 _base_add_sg_single_ieee(sg_local, chain_sgl_flags, 1379 chain_offset, chain_length, chain_dma); 1380 1381 sg_local = chain; 1382 if (!chain_offset) 1383 goto fill_in_last_segment; 1384 1385 /* fill in chain segments */ 1386 while (sges_in_segment) { 1387 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 1388 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 1389 sg_scmd = sg_next(sg_scmd); 1390 sg_local += ioc->sge_size_ieee; 1391 sges_left--; 1392 sges_in_segment--; 1393 } 1394 1395 chain_req = _base_get_chain_buffer_tracker(ioc, smid); 1396 if (!chain_req) 1397 return -1; 1398 chain = chain_req->chain_buffer; 1399 chain_dma = chain_req->chain_buffer_dma; 1400 } while (1); 1401 1402 1403 fill_in_last_segment: 1404 1405 /* fill the last segment */ 1406 while (sges_left) { 1407 if (sges_left == 1) 1408 _base_add_sg_single_ieee(sg_local, 1409 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), 1410 sg_dma_address(sg_scmd)); 1411 else 1412 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, 1413 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 1414 sg_scmd = sg_next(sg_scmd); 1415 sg_local += ioc->sge_size_ieee; 1416 sges_left--; 1417 } 1418 1419 return 0; 1420 } 1421 1422 /** 1423 * _base_build_sg_ieee - build generic sg for IEEE format 1424 * @ioc: per adapter object 1425 * @psge: virtual address for SGE 1426 * @data_out_dma: physical address for WRITES 1427 * @data_out_sz: data xfer size for WRITES 1428 * @data_in_dma: physical address for READS 1429 * @data_in_sz: data xfer size for READS 1430 * 1431 * Return nothing. 1432 */ 1433 static void 1434 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, 1435 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, 1436 size_t data_in_sz) 1437 { 1438 u8 sgl_flags; 1439 1440 if (!data_out_sz && !data_in_sz) { 1441 _base_build_zero_len_sge_ieee(ioc, psge); 1442 return; 1443 } 1444 1445 if (data_out_sz && data_in_sz) { 1446 /* WRITE sgel first */ 1447 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 1448 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 1449 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 1450 data_out_dma); 1451 1452 /* incr sgel */ 1453 psge += ioc->sge_size_ieee; 1454 1455 /* READ sgel last */ 1456 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; 1457 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 1458 data_in_dma); 1459 } else if (data_out_sz) /* WRITE */ { 1460 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 1461 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 1462 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 1463 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, 1464 data_out_dma); 1465 } else if (data_in_sz) /* READ */ { 1466 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 1467 MPI25_IEEE_SGE_FLAGS_END_OF_LIST | 1468 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; 1469 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, 1470 data_in_dma); 1471 } 1472 } 1473 1474 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) 1475 1476 /** 1477 * _base_config_dma_addressing - set dma addressing 1478 * @ioc: per adapter object 1479 * @pdev: PCI device struct 1480 * 1481 * Returns 0 for success, non-zero for failure. 1482 */ 1483 static int 1484 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) 1485 { 1486 struct sysinfo s; 1487 u64 consistent_dma_mask; 1488 1489 if (ioc->dma_mask) 1490 consistent_dma_mask = DMA_BIT_MASK(64); 1491 else 1492 consistent_dma_mask = DMA_BIT_MASK(32); 1493 1494 if (sizeof(dma_addr_t) > 4) { 1495 const uint64_t required_mask = 1496 dma_get_required_mask(&pdev->dev); 1497 if ((required_mask > DMA_BIT_MASK(32)) && 1498 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 1499 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { 1500 ioc->base_add_sg_single = &_base_add_sg_single_64; 1501 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 1502 ioc->dma_mask = 64; 1503 goto out; 1504 } 1505 } 1506 1507 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) 1508 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1509 ioc->base_add_sg_single = &_base_add_sg_single_32; 1510 ioc->sge_size = sizeof(Mpi2SGESimple32_t); 1511 ioc->dma_mask = 32; 1512 } else 1513 return -ENODEV; 1514 1515 out: 1516 si_meminfo(&s); 1517 pr_info(MPT3SAS_FMT 1518 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", 1519 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram)); 1520 1521 return 0; 1522 } 1523 1524 static int 1525 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, 1526 struct pci_dev *pdev) 1527 { 1528 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1529 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) 1530 return -ENODEV; 1531 } 1532 return 0; 1533 } 1534 1535 /** 1536 * _base_check_enable_msix - checks MSIX capabable. 1537 * @ioc: per adapter object 1538 * 1539 * Check to see if card is capable of MSIX, and set number 1540 * of available msix vectors 1541 */ 1542 static int 1543 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) 1544 { 1545 int base; 1546 u16 message_control; 1547 1548 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 1549 if (!base) { 1550 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n", 1551 ioc->name)); 1552 return -EINVAL; 1553 } 1554 1555 /* get msix vector count */ 1556 1557 pci_read_config_word(ioc->pdev, base + 2, &message_control); 1558 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 1559 if (ioc->msix_vector_count > 8) 1560 ioc->msix_vector_count = 8; 1561 dinitprintk(ioc, pr_info(MPT3SAS_FMT 1562 "msix is supported, vector_count(%d)\n", 1563 ioc->name, ioc->msix_vector_count)); 1564 return 0; 1565 } 1566 1567 /** 1568 * _base_free_irq - free irq 1569 * @ioc: per adapter object 1570 * 1571 * Freeing respective reply_queue from the list. 1572 */ 1573 static void 1574 _base_free_irq(struct MPT3SAS_ADAPTER *ioc) 1575 { 1576 struct adapter_reply_queue *reply_q, *next; 1577 1578 if (list_empty(&ioc->reply_queue_list)) 1579 return; 1580 1581 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { 1582 list_del(&reply_q->list); 1583 synchronize_irq(reply_q->vector); 1584 free_irq(reply_q->vector, reply_q); 1585 kfree(reply_q); 1586 } 1587 } 1588 1589 /** 1590 * _base_request_irq - request irq 1591 * @ioc: per adapter object 1592 * @index: msix index into vector table 1593 * @vector: irq vector 1594 * 1595 * Inserting respective reply_queue into the list. 1596 */ 1597 static int 1598 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) 1599 { 1600 struct adapter_reply_queue *reply_q; 1601 int r; 1602 1603 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); 1604 if (!reply_q) { 1605 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n", 1606 ioc->name, (int)sizeof(struct adapter_reply_queue)); 1607 return -ENOMEM; 1608 } 1609 reply_q->ioc = ioc; 1610 reply_q->msix_index = index; 1611 reply_q->vector = vector; 1612 atomic_set(&reply_q->busy, 0); 1613 if (ioc->msix_enable) 1614 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", 1615 MPT3SAS_DRIVER_NAME, ioc->id, index); 1616 else 1617 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", 1618 MPT3SAS_DRIVER_NAME, ioc->id); 1619 r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, 1620 reply_q); 1621 if (r) { 1622 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", 1623 reply_q->name, vector); 1624 kfree(reply_q); 1625 return -EBUSY; 1626 } 1627 1628 INIT_LIST_HEAD(&reply_q->list); 1629 list_add_tail(&reply_q->list, &ioc->reply_queue_list); 1630 return 0; 1631 } 1632 1633 /** 1634 * _base_assign_reply_queues - assigning msix index for each cpu 1635 * @ioc: per adapter object 1636 * 1637 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity 1638 * 1639 * It would nice if we could call irq_set_affinity, however it is not 1640 * an exported symbol 1641 */ 1642 static void 1643 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) 1644 { 1645 unsigned int cpu, nr_cpus, nr_msix, index = 0; 1646 1647 if (!_base_is_controller_msix_enabled(ioc)) 1648 return; 1649 1650 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); 1651 1652 nr_cpus = num_online_cpus(); 1653 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, 1654 ioc->facts.MaxMSIxVectors); 1655 if (!nr_msix) 1656 return; 1657 1658 cpu = cpumask_first(cpu_online_mask); 1659 1660 do { 1661 unsigned int i, group = nr_cpus / nr_msix; 1662 1663 if (index < nr_cpus % nr_msix) 1664 group++; 1665 1666 for (i = 0 ; i < group ; i++) { 1667 ioc->cpu_msix_table[cpu] = index; 1668 cpu = cpumask_next(cpu, cpu_online_mask); 1669 } 1670 1671 index++; 1672 1673 } while (cpu < nr_cpus); 1674 } 1675 1676 /** 1677 * _base_disable_msix - disables msix 1678 * @ioc: per adapter object 1679 * 1680 */ 1681 static void 1682 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc) 1683 { 1684 if (!ioc->msix_enable) 1685 return; 1686 pci_disable_msix(ioc->pdev); 1687 ioc->msix_enable = 0; 1688 } 1689 1690 /** 1691 * _base_enable_msix - enables msix, failback to io_apic 1692 * @ioc: per adapter object 1693 * 1694 */ 1695 static int 1696 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) 1697 { 1698 struct msix_entry *entries, *a; 1699 int r; 1700 int i; 1701 u8 try_msix = 0; 1702 1703 if (msix_disable == -1 || msix_disable == 0) 1704 try_msix = 1; 1705 1706 if (!try_msix) 1707 goto try_ioapic; 1708 1709 if (_base_check_enable_msix(ioc) != 0) 1710 goto try_ioapic; 1711 1712 ioc->reply_queue_count = min_t(int, ioc->cpu_count, 1713 ioc->msix_vector_count); 1714 1715 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" 1716 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, 1717 ioc->cpu_count, max_msix_vectors); 1718 1719 if (!ioc->rdpq_array_enable && max_msix_vectors == -1) 1720 max_msix_vectors = 8; 1721 1722 if (max_msix_vectors > 0) { 1723 ioc->reply_queue_count = min_t(int, max_msix_vectors, 1724 ioc->reply_queue_count); 1725 ioc->msix_vector_count = ioc->reply_queue_count; 1726 } else if (max_msix_vectors == 0) 1727 goto try_ioapic; 1728 1729 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), 1730 GFP_KERNEL); 1731 if (!entries) { 1732 dfailprintk(ioc, pr_info(MPT3SAS_FMT 1733 "kcalloc failed @ at %s:%d/%s() !!!\n", 1734 ioc->name, __FILE__, __LINE__, __func__)); 1735 goto try_ioapic; 1736 } 1737 1738 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) 1739 a->entry = i; 1740 1741 r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count); 1742 if (r) { 1743 dfailprintk(ioc, pr_info(MPT3SAS_FMT 1744 "pci_enable_msix_exact failed (r=%d) !!!\n", 1745 ioc->name, r)); 1746 kfree(entries); 1747 goto try_ioapic; 1748 } 1749 1750 ioc->msix_enable = 1; 1751 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { 1752 r = _base_request_irq(ioc, i, a->vector); 1753 if (r) { 1754 _base_free_irq(ioc); 1755 _base_disable_msix(ioc); 1756 kfree(entries); 1757 goto try_ioapic; 1758 } 1759 } 1760 1761 kfree(entries); 1762 return 0; 1763 1764 /* failback to io_apic interrupt routing */ 1765 try_ioapic: 1766 1767 ioc->reply_queue_count = 1; 1768 r = _base_request_irq(ioc, 0, ioc->pdev->irq); 1769 1770 return r; 1771 } 1772 1773 /** 1774 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) 1775 * @ioc: per adapter object 1776 * 1777 * Returns 0 for success, non-zero for failure. 1778 */ 1779 int 1780 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) 1781 { 1782 struct pci_dev *pdev = ioc->pdev; 1783 u32 memap_sz; 1784 u32 pio_sz; 1785 int i, r = 0; 1786 u64 pio_chip = 0; 1787 u64 chip_phys = 0; 1788 struct adapter_reply_queue *reply_q; 1789 1790 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", 1791 ioc->name, __func__)); 1792 1793 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); 1794 if (pci_enable_device_mem(pdev)) { 1795 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", 1796 ioc->name); 1797 ioc->bars = 0; 1798 return -ENODEV; 1799 } 1800 1801 1802 if (pci_request_selected_regions(pdev, ioc->bars, 1803 MPT3SAS_DRIVER_NAME)) { 1804 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", 1805 ioc->name); 1806 ioc->bars = 0; 1807 r = -ENODEV; 1808 goto out_fail; 1809 } 1810 1811 /* AER (Advanced Error Reporting) hooks */ 1812 pci_enable_pcie_error_reporting(pdev); 1813 1814 pci_set_master(pdev); 1815 1816 1817 if (_base_config_dma_addressing(ioc, pdev) != 0) { 1818 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n", 1819 ioc->name, pci_name(pdev)); 1820 r = -ENODEV; 1821 goto out_fail; 1822 } 1823 1824 for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) { 1825 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 1826 if (pio_sz) 1827 continue; 1828 pio_chip = (u64)pci_resource_start(pdev, i); 1829 pio_sz = pci_resource_len(pdev, i); 1830 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 1831 if (memap_sz) 1832 continue; 1833 ioc->chip_phys = pci_resource_start(pdev, i); 1834 chip_phys = (u64)ioc->chip_phys; 1835 memap_sz = pci_resource_len(pdev, i); 1836 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 1837 if (ioc->chip == NULL) { 1838 pr_err(MPT3SAS_FMT "unable to map adapter memory!\n", 1839 ioc->name); 1840 r = -EINVAL; 1841 goto out_fail; 1842 } 1843 } 1844 } 1845 1846 _base_mask_interrupts(ioc); 1847 1848 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 1849 if (r) 1850 goto out_fail; 1851 1852 if (!ioc->rdpq_array_enable_assigned) { 1853 ioc->rdpq_array_enable = ioc->rdpq_array_capable; 1854 ioc->rdpq_array_enable_assigned = 1; 1855 } 1856 1857 r = _base_enable_msix(ioc); 1858 if (r) 1859 goto out_fail; 1860 1861 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) 1862 pr_info(MPT3SAS_FMT "%s: IRQ %d\n", 1863 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 1864 "IO-APIC enabled"), reply_q->vector); 1865 1866 pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 1867 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); 1868 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n", 1869 ioc->name, (unsigned long long)pio_chip, pio_sz); 1870 1871 /* Save PCI configuration state for recovery from PCI AER/EEH errors */ 1872 pci_save_state(pdev); 1873 return 0; 1874 1875 out_fail: 1876 if (ioc->chip_phys) 1877 iounmap(ioc->chip); 1878 ioc->chip_phys = 0; 1879 pci_release_selected_regions(ioc->pdev, ioc->bars); 1880 pci_disable_pcie_error_reporting(pdev); 1881 pci_disable_device(pdev); 1882 return r; 1883 } 1884 1885 /** 1886 * mpt3sas_base_get_msg_frame - obtain request mf pointer 1887 * @ioc: per adapter object 1888 * @smid: system request message index(smid zero is invalid) 1889 * 1890 * Returns virt pointer to message frame. 1891 */ 1892 void * 1893 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1894 { 1895 return (void *)(ioc->request + (smid * ioc->request_sz)); 1896 } 1897 1898 /** 1899 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr 1900 * @ioc: per adapter object 1901 * @smid: system request message index 1902 * 1903 * Returns virt pointer to sense buffer. 1904 */ 1905 void * 1906 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1907 { 1908 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); 1909 } 1910 1911 /** 1912 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr 1913 * @ioc: per adapter object 1914 * @smid: system request message index 1915 * 1916 * Returns phys pointer to the low 32bit address of the sense buffer. 1917 */ 1918 __le32 1919 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) 1920 { 1921 return cpu_to_le32(ioc->sense_dma + ((smid - 1) * 1922 SCSI_SENSE_BUFFERSIZE)); 1923 } 1924 1925 /** 1926 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address 1927 * @ioc: per adapter object 1928 * @phys_addr: lower 32 physical addr of the reply 1929 * 1930 * Converts 32bit lower physical addr into a virt address. 1931 */ 1932 void * 1933 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) 1934 { 1935 if (!phys_addr) 1936 return NULL; 1937 return ioc->reply + (phys_addr - (u32)ioc->reply_dma); 1938 } 1939 1940 /** 1941 * mpt3sas_base_get_smid - obtain a free smid from internal queue 1942 * @ioc: per adapter object 1943 * @cb_idx: callback index 1944 * 1945 * Returns smid (zero is invalid) 1946 */ 1947 u16 1948 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 1949 { 1950 unsigned long flags; 1951 struct request_tracker *request; 1952 u16 smid; 1953 1954 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1955 if (list_empty(&ioc->internal_free_list)) { 1956 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1957 pr_err(MPT3SAS_FMT "%s: smid not available\n", 1958 ioc->name, __func__); 1959 return 0; 1960 } 1961 1962 request = list_entry(ioc->internal_free_list.next, 1963 struct request_tracker, tracker_list); 1964 request->cb_idx = cb_idx; 1965 smid = request->smid; 1966 list_del(&request->tracker_list); 1967 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1968 return smid; 1969 } 1970 1971 /** 1972 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue 1973 * @ioc: per adapter object 1974 * @cb_idx: callback index 1975 * @scmd: pointer to scsi command object 1976 * 1977 * Returns smid (zero is invalid) 1978 */ 1979 u16 1980 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, 1981 struct scsi_cmnd *scmd) 1982 { 1983 unsigned long flags; 1984 struct scsiio_tracker *request; 1985 u16 smid; 1986 1987 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 1988 if (list_empty(&ioc->free_list)) { 1989 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 1990 pr_err(MPT3SAS_FMT "%s: smid not available\n", 1991 ioc->name, __func__); 1992 return 0; 1993 } 1994 1995 request = list_entry(ioc->free_list.next, 1996 struct scsiio_tracker, tracker_list); 1997 request->scmd = scmd; 1998 request->cb_idx = cb_idx; 1999 smid = request->smid; 2000 list_del(&request->tracker_list); 2001 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2002 return smid; 2003 } 2004 2005 /** 2006 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue 2007 * @ioc: per adapter object 2008 * @cb_idx: callback index 2009 * 2010 * Returns smid (zero is invalid) 2011 */ 2012 u16 2013 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) 2014 { 2015 unsigned long flags; 2016 struct request_tracker *request; 2017 u16 smid; 2018 2019 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2020 if (list_empty(&ioc->hpr_free_list)) { 2021 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2022 return 0; 2023 } 2024 2025 request = list_entry(ioc->hpr_free_list.next, 2026 struct request_tracker, tracker_list); 2027 request->cb_idx = cb_idx; 2028 smid = request->smid; 2029 list_del(&request->tracker_list); 2030 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2031 return smid; 2032 } 2033 2034 /** 2035 * mpt3sas_base_free_smid - put smid back on free_list 2036 * @ioc: per adapter object 2037 * @smid: system request message index 2038 * 2039 * Return nothing. 2040 */ 2041 void 2042 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2043 { 2044 unsigned long flags; 2045 int i; 2046 struct chain_tracker *chain_req, *next; 2047 2048 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 2049 if (smid < ioc->hi_priority_smid) { 2050 /* scsiio queue */ 2051 i = smid - 1; 2052 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { 2053 list_for_each_entry_safe(chain_req, next, 2054 &ioc->scsi_lookup[i].chain_list, tracker_list) { 2055 list_del_init(&chain_req->tracker_list); 2056 list_add(&chain_req->tracker_list, 2057 &ioc->free_chain_list); 2058 } 2059 } 2060 ioc->scsi_lookup[i].cb_idx = 0xFF; 2061 ioc->scsi_lookup[i].scmd = NULL; 2062 list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list); 2063 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2064 2065 /* 2066 * See _wait_for_commands_to_complete() call with regards 2067 * to this code. 2068 */ 2069 if (ioc->shost_recovery && ioc->pending_io_count) { 2070 if (ioc->pending_io_count == 1) 2071 wake_up(&ioc->reset_wq); 2072 ioc->pending_io_count--; 2073 } 2074 return; 2075 } else if (smid < ioc->internal_smid) { 2076 /* hi-priority */ 2077 i = smid - ioc->hi_priority_smid; 2078 ioc->hpr_lookup[i].cb_idx = 0xFF; 2079 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); 2080 } else if (smid <= ioc->hba_queue_depth) { 2081 /* internal queue */ 2082 i = smid - ioc->internal_smid; 2083 ioc->internal_lookup[i].cb_idx = 0xFF; 2084 list_add(&ioc->internal_lookup[i].tracker_list, 2085 &ioc->internal_free_list); 2086 } 2087 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 2088 } 2089 2090 /** 2091 * _base_writeq - 64 bit write to MMIO 2092 * @ioc: per adapter object 2093 * @b: data payload 2094 * @addr: address in MMIO space 2095 * @writeq_lock: spin lock 2096 * 2097 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes 2098 * care of 32 bit environment where its not quarenteed to send the entire word 2099 * in one transfer. 2100 */ 2101 #if defined(writeq) && defined(CONFIG_64BIT) 2102 static inline void 2103 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 2104 { 2105 writeq(cpu_to_le64(b), addr); 2106 } 2107 #else 2108 static inline void 2109 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) 2110 { 2111 unsigned long flags; 2112 __u64 data_out = cpu_to_le64(b); 2113 2114 spin_lock_irqsave(writeq_lock, flags); 2115 writel((u32)(data_out), addr); 2116 writel((u32)(data_out >> 32), (addr + 4)); 2117 spin_unlock_irqrestore(writeq_lock, flags); 2118 } 2119 #endif 2120 2121 static inline u8 2122 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) 2123 { 2124 return ioc->cpu_msix_table[raw_smp_processor_id()]; 2125 } 2126 2127 /** 2128 * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware 2129 * @ioc: per adapter object 2130 * @smid: system request message index 2131 * @handle: device handle 2132 * 2133 * Return nothing. 2134 */ 2135 void 2136 mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) 2137 { 2138 Mpi2RequestDescriptorUnion_t descriptor; 2139 u64 *request = (u64 *)&descriptor; 2140 2141 2142 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 2143 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 2144 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 2145 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 2146 descriptor.SCSIIO.LMID = 0; 2147 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 2148 &ioc->scsi_lookup_lock); 2149 } 2150 2151 /** 2152 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware 2153 * @ioc: per adapter object 2154 * @smid: system request message index 2155 * @handle: device handle 2156 * 2157 * Return nothing. 2158 */ 2159 void 2160 mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, 2161 u16 handle) 2162 { 2163 Mpi2RequestDescriptorUnion_t descriptor; 2164 u64 *request = (u64 *)&descriptor; 2165 2166 descriptor.SCSIIO.RequestFlags = 2167 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; 2168 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); 2169 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 2170 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 2171 descriptor.SCSIIO.LMID = 0; 2172 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 2173 &ioc->scsi_lookup_lock); 2174 } 2175 2176 /** 2177 * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware 2178 * @ioc: per adapter object 2179 * @smid: system request message index 2180 * 2181 * Return nothing. 2182 */ 2183 void 2184 mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2185 { 2186 Mpi2RequestDescriptorUnion_t descriptor; 2187 u64 *request = (u64 *)&descriptor; 2188 2189 descriptor.HighPriority.RequestFlags = 2190 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 2191 descriptor.HighPriority.MSIxIndex = 0; 2192 descriptor.HighPriority.SMID = cpu_to_le16(smid); 2193 descriptor.HighPriority.LMID = 0; 2194 descriptor.HighPriority.Reserved1 = 0; 2195 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 2196 &ioc->scsi_lookup_lock); 2197 } 2198 2199 /** 2200 * mpt3sas_base_put_smid_default - Default, primarily used for config pages 2201 * @ioc: per adapter object 2202 * @smid: system request message index 2203 * 2204 * Return nothing. 2205 */ 2206 void 2207 mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) 2208 { 2209 Mpi2RequestDescriptorUnion_t descriptor; 2210 u64 *request = (u64 *)&descriptor; 2211 2212 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2213 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); 2214 descriptor.Default.SMID = cpu_to_le16(smid); 2215 descriptor.Default.LMID = 0; 2216 descriptor.Default.DescriptorTypeDependent = 0; 2217 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, 2218 &ioc->scsi_lookup_lock); 2219 } 2220 2221 /** 2222 * _base_display_intel_branding - Display branding string 2223 * @ioc: per adapter object 2224 * 2225 * Return nothing. 2226 */ 2227 static void 2228 _base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc) 2229 { 2230 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) 2231 return; 2232 2233 switch (ioc->pdev->device) { 2234 case MPI25_MFGPAGE_DEVID_SAS3008: 2235 switch (ioc->pdev->subsystem_device) { 2236 case MPT3SAS_INTEL_RMS3JC080_SSDID: 2237 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2238 MPT3SAS_INTEL_RMS3JC080_BRANDING); 2239 break; 2240 2241 case MPT3SAS_INTEL_RS3GC008_SSDID: 2242 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2243 MPT3SAS_INTEL_RS3GC008_BRANDING); 2244 break; 2245 case MPT3SAS_INTEL_RS3FC044_SSDID: 2246 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2247 MPT3SAS_INTEL_RS3FC044_BRANDING); 2248 break; 2249 case MPT3SAS_INTEL_RS3UC080_SSDID: 2250 pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2251 MPT3SAS_INTEL_RS3UC080_BRANDING); 2252 break; 2253 default: 2254 pr_info(MPT3SAS_FMT 2255 "Intel(R) Controller: Subsystem ID: 0x%X\n", 2256 ioc->name, ioc->pdev->subsystem_device); 2257 break; 2258 } 2259 break; 2260 default: 2261 pr_info(MPT3SAS_FMT 2262 "Intel(R) Controller: Subsystem ID: 0x%X\n", 2263 ioc->name, ioc->pdev->subsystem_device); 2264 break; 2265 } 2266 } 2267 2268 2269 2270 /** 2271 * _base_display_ioc_capabilities - Disply IOC's capabilities. 2272 * @ioc: per adapter object 2273 * 2274 * Return nothing. 2275 */ 2276 static void 2277 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) 2278 { 2279 int i = 0; 2280 char desc[16]; 2281 u32 iounit_pg1_flags; 2282 u32 bios_version; 2283 2284 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 2285 strncpy(desc, ioc->manu_pg0.ChipName, 16); 2286 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\ 2287 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", 2288 ioc->name, desc, 2289 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 2290 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 2291 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 2292 ioc->facts.FWVersion.Word & 0x000000FF, 2293 ioc->pdev->revision, 2294 (bios_version & 0xFF000000) >> 24, 2295 (bios_version & 0x00FF0000) >> 16, 2296 (bios_version & 0x0000FF00) >> 8, 2297 bios_version & 0x000000FF); 2298 2299 _base_display_intel_branding(ioc); 2300 2301 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 2302 2303 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 2304 pr_info("Initiator"); 2305 i++; 2306 } 2307 2308 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { 2309 pr_info("%sTarget", i ? "," : ""); 2310 i++; 2311 } 2312 2313 i = 0; 2314 pr_info("), "); 2315 pr_info("Capabilities=("); 2316 2317 if (ioc->facts.IOCCapabilities & 2318 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { 2319 pr_info("Raid"); 2320 i++; 2321 } 2322 2323 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { 2324 pr_info("%sTLR", i ? "," : ""); 2325 i++; 2326 } 2327 2328 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { 2329 pr_info("%sMulticast", i ? "," : ""); 2330 i++; 2331 } 2332 2333 if (ioc->facts.IOCCapabilities & 2334 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { 2335 pr_info("%sBIDI Target", i ? "," : ""); 2336 i++; 2337 } 2338 2339 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { 2340 pr_info("%sEEDP", i ? "," : ""); 2341 i++; 2342 } 2343 2344 if (ioc->facts.IOCCapabilities & 2345 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { 2346 pr_info("%sSnapshot Buffer", i ? "," : ""); 2347 i++; 2348 } 2349 2350 if (ioc->facts.IOCCapabilities & 2351 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { 2352 pr_info("%sDiag Trace Buffer", i ? "," : ""); 2353 i++; 2354 } 2355 2356 if (ioc->facts.IOCCapabilities & 2357 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { 2358 pr_info("%sDiag Extended Buffer", i ? "," : ""); 2359 i++; 2360 } 2361 2362 if (ioc->facts.IOCCapabilities & 2363 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { 2364 pr_info("%sTask Set Full", i ? "," : ""); 2365 i++; 2366 } 2367 2368 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 2369 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { 2370 pr_info("%sNCQ", i ? "," : ""); 2371 i++; 2372 } 2373 2374 pr_info(")\n"); 2375 } 2376 2377 /** 2378 * mpt3sas_base_update_missing_delay - change the missing delay timers 2379 * @ioc: per adapter object 2380 * @device_missing_delay: amount of time till device is reported missing 2381 * @io_missing_delay: interval IO is returned when there is a missing device 2382 * 2383 * Return nothing. 2384 * 2385 * Passed on the command line, this function will modify the device missing 2386 * delay, as well as the io missing delay. This should be called at driver 2387 * load time. 2388 */ 2389 void 2390 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, 2391 u16 device_missing_delay, u8 io_missing_delay) 2392 { 2393 u16 dmd, dmd_new, dmd_orignal; 2394 u8 io_missing_delay_original; 2395 u16 sz; 2396 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; 2397 Mpi2ConfigReply_t mpi_reply; 2398 u8 num_phys = 0; 2399 u16 ioc_status; 2400 2401 mpt3sas_config_get_number_hba_phys(ioc, &num_phys); 2402 if (!num_phys) 2403 return; 2404 2405 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * 2406 sizeof(Mpi2SasIOUnit1PhyData_t)); 2407 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); 2408 if (!sas_iounit_pg1) { 2409 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 2410 ioc->name, __FILE__, __LINE__, __func__); 2411 goto out; 2412 } 2413 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, 2414 sas_iounit_pg1, sz))) { 2415 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 2416 ioc->name, __FILE__, __LINE__, __func__); 2417 goto out; 2418 } 2419 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & 2420 MPI2_IOCSTATUS_MASK; 2421 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 2422 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 2423 ioc->name, __FILE__, __LINE__, __func__); 2424 goto out; 2425 } 2426 2427 /* device missing delay */ 2428 dmd = sas_iounit_pg1->ReportDeviceMissingDelay; 2429 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 2430 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 2431 else 2432 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 2433 dmd_orignal = dmd; 2434 if (device_missing_delay > 0x7F) { 2435 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : 2436 device_missing_delay; 2437 dmd = dmd / 16; 2438 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; 2439 } else 2440 dmd = device_missing_delay; 2441 sas_iounit_pg1->ReportDeviceMissingDelay = dmd; 2442 2443 /* io missing delay */ 2444 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; 2445 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; 2446 2447 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, 2448 sz)) { 2449 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) 2450 dmd_new = (dmd & 2451 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; 2452 else 2453 dmd_new = 2454 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; 2455 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n", 2456 ioc->name, dmd_orignal, dmd_new); 2457 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n", 2458 ioc->name, io_missing_delay_original, 2459 io_missing_delay); 2460 ioc->device_missing_delay = dmd_new; 2461 ioc->io_missing_delay = io_missing_delay; 2462 } 2463 2464 out: 2465 kfree(sas_iounit_pg1); 2466 } 2467 /** 2468 * _base_static_config_pages - static start of day config pages 2469 * @ioc: per adapter object 2470 * 2471 * Return nothing. 2472 */ 2473 static void 2474 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) 2475 { 2476 Mpi2ConfigReply_t mpi_reply; 2477 u32 iounit_pg1_flags; 2478 2479 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); 2480 if (ioc->ir_firmware) 2481 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, 2482 &ioc->manu_pg10); 2483 2484 /* 2485 * Ensure correct T10 PI operation if vendor left EEDPTagMode 2486 * flag unset in NVDATA. 2487 */ 2488 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); 2489 if (ioc->manu_pg11.EEDPTagMode == 0) { 2490 pr_err("%s: overriding NVDATA EEDPTagMode setting\n", 2491 ioc->name); 2492 ioc->manu_pg11.EEDPTagMode &= ~0x3; 2493 ioc->manu_pg11.EEDPTagMode |= 0x1; 2494 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, 2495 &ioc->manu_pg11); 2496 } 2497 2498 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); 2499 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); 2500 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); 2501 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); 2502 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 2503 _base_display_ioc_capabilities(ioc); 2504 2505 /* 2506 * Enable task_set_full handling in iounit_pg1 when the 2507 * facts capabilities indicate that its supported. 2508 */ 2509 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); 2510 if ((ioc->facts.IOCCapabilities & 2511 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) 2512 iounit_pg1_flags &= 2513 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 2514 else 2515 iounit_pg1_flags |= 2516 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; 2517 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); 2518 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); 2519 } 2520 2521 /** 2522 * _base_release_memory_pools - release memory 2523 * @ioc: per adapter object 2524 * 2525 * Free memory allocated from _base_allocate_memory_pools. 2526 * 2527 * Return nothing. 2528 */ 2529 static void 2530 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) 2531 { 2532 int i = 0; 2533 struct reply_post_struct *rps; 2534 2535 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2536 __func__)); 2537 2538 if (ioc->request) { 2539 pci_free_consistent(ioc->pdev, ioc->request_dma_sz, 2540 ioc->request, ioc->request_dma); 2541 dexitprintk(ioc, pr_info(MPT3SAS_FMT 2542 "request_pool(0x%p): free\n", 2543 ioc->name, ioc->request)); 2544 ioc->request = NULL; 2545 } 2546 2547 if (ioc->sense) { 2548 pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); 2549 if (ioc->sense_dma_pool) 2550 pci_pool_destroy(ioc->sense_dma_pool); 2551 dexitprintk(ioc, pr_info(MPT3SAS_FMT 2552 "sense_pool(0x%p): free\n", 2553 ioc->name, ioc->sense)); 2554 ioc->sense = NULL; 2555 } 2556 2557 if (ioc->reply) { 2558 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); 2559 if (ioc->reply_dma_pool) 2560 pci_pool_destroy(ioc->reply_dma_pool); 2561 dexitprintk(ioc, pr_info(MPT3SAS_FMT 2562 "reply_pool(0x%p): free\n", 2563 ioc->name, ioc->reply)); 2564 ioc->reply = NULL; 2565 } 2566 2567 if (ioc->reply_free) { 2568 pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, 2569 ioc->reply_free_dma); 2570 if (ioc->reply_free_dma_pool) 2571 pci_pool_destroy(ioc->reply_free_dma_pool); 2572 dexitprintk(ioc, pr_info(MPT3SAS_FMT 2573 "reply_free_pool(0x%p): free\n", 2574 ioc->name, ioc->reply_free)); 2575 ioc->reply_free = NULL; 2576 } 2577 2578 if (ioc->reply_post) { 2579 do { 2580 rps = &ioc->reply_post[i]; 2581 if (rps->reply_post_free) { 2582 pci_pool_free( 2583 ioc->reply_post_free_dma_pool, 2584 rps->reply_post_free, 2585 rps->reply_post_free_dma); 2586 dexitprintk(ioc, pr_info(MPT3SAS_FMT 2587 "reply_post_free_pool(0x%p): free\n", 2588 ioc->name, rps->reply_post_free)); 2589 rps->reply_post_free = NULL; 2590 } 2591 } while (ioc->rdpq_array_enable && 2592 (++i < ioc->reply_queue_count)); 2593 2594 if (ioc->reply_post_free_dma_pool) 2595 pci_pool_destroy(ioc->reply_post_free_dma_pool); 2596 kfree(ioc->reply_post); 2597 } 2598 2599 if (ioc->config_page) { 2600 dexitprintk(ioc, pr_info(MPT3SAS_FMT 2601 "config_page(0x%p): free\n", ioc->name, 2602 ioc->config_page)); 2603 pci_free_consistent(ioc->pdev, ioc->config_page_sz, 2604 ioc->config_page, ioc->config_page_dma); 2605 } 2606 2607 if (ioc->scsi_lookup) { 2608 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages); 2609 ioc->scsi_lookup = NULL; 2610 } 2611 kfree(ioc->hpr_lookup); 2612 kfree(ioc->internal_lookup); 2613 if (ioc->chain_lookup) { 2614 for (i = 0; i < ioc->chain_depth; i++) { 2615 if (ioc->chain_lookup[i].chain_buffer) 2616 pci_pool_free(ioc->chain_dma_pool, 2617 ioc->chain_lookup[i].chain_buffer, 2618 ioc->chain_lookup[i].chain_buffer_dma); 2619 } 2620 if (ioc->chain_dma_pool) 2621 pci_pool_destroy(ioc->chain_dma_pool); 2622 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); 2623 ioc->chain_lookup = NULL; 2624 } 2625 } 2626 2627 /** 2628 * _base_allocate_memory_pools - allocate start of day memory pools 2629 * @ioc: per adapter object 2630 * @sleep_flag: CAN_SLEEP or NO_SLEEP 2631 * 2632 * Returns 0 success, anything else error 2633 */ 2634 static int 2635 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 2636 { 2637 struct mpt3sas_facts *facts; 2638 u16 max_sge_elements; 2639 u16 chains_needed_per_io; 2640 u32 sz, total_sz, reply_post_free_sz; 2641 u32 retry_sz; 2642 u16 max_request_credit; 2643 unsigned short sg_tablesize; 2644 u16 sge_size; 2645 int i; 2646 2647 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2648 __func__)); 2649 2650 2651 retry_sz = 0; 2652 facts = &ioc->facts; 2653 2654 /* command line tunables for max sgl entries */ 2655 if (max_sgl_entries != -1) 2656 sg_tablesize = max_sgl_entries; 2657 else 2658 sg_tablesize = MPT3SAS_SG_DEPTH; 2659 2660 if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS) 2661 sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS; 2662 else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) 2663 sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS; 2664 ioc->shost->sg_tablesize = sg_tablesize; 2665 2666 ioc->hi_priority_depth = facts->HighPriorityCredit; 2667 ioc->internal_depth = ioc->hi_priority_depth + (5); 2668 /* command line tunables for max controller queue depth */ 2669 if (max_queue_depth != -1 && max_queue_depth != 0) { 2670 max_request_credit = min_t(u16, max_queue_depth + 2671 ioc->hi_priority_depth + ioc->internal_depth, 2672 facts->RequestCredit); 2673 if (max_request_credit > MAX_HBA_QUEUE_DEPTH) 2674 max_request_credit = MAX_HBA_QUEUE_DEPTH; 2675 } else 2676 max_request_credit = min_t(u16, facts->RequestCredit, 2677 MAX_HBA_QUEUE_DEPTH); 2678 2679 ioc->hba_queue_depth = max_request_credit; 2680 2681 /* request frame size */ 2682 ioc->request_sz = facts->IOCRequestFrameSize * 4; 2683 2684 /* reply frame size */ 2685 ioc->reply_sz = facts->ReplyFrameSize * 4; 2686 2687 /* calculate the max scatter element size */ 2688 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); 2689 2690 retry_allocation: 2691 total_sz = 0; 2692 /* calculate number of sg elements left over in the 1st frame */ 2693 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - 2694 sizeof(Mpi2SGEIOUnion_t)) + sge_size); 2695 ioc->max_sges_in_main_message = max_sge_elements/sge_size; 2696 2697 /* now do the same for a chain buffer */ 2698 max_sge_elements = ioc->request_sz - sge_size; 2699 ioc->max_sges_in_chain_message = max_sge_elements/sge_size; 2700 2701 /* 2702 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE 2703 */ 2704 chains_needed_per_io = ((ioc->shost->sg_tablesize - 2705 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) 2706 + 1; 2707 if (chains_needed_per_io > facts->MaxChainDepth) { 2708 chains_needed_per_io = facts->MaxChainDepth; 2709 ioc->shost->sg_tablesize = min_t(u16, 2710 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message 2711 * chains_needed_per_io), ioc->shost->sg_tablesize); 2712 } 2713 ioc->chains_needed_per_io = chains_needed_per_io; 2714 2715 /* reply free queue sizing - taking into account for 64 FW events */ 2716 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 2717 2718 /* calculate reply descriptor post queue depth */ 2719 ioc->reply_post_queue_depth = ioc->hba_queue_depth + 2720 ioc->reply_free_queue_depth + 1 ; 2721 /* align the reply post queue on the next 16 count boundary */ 2722 if (ioc->reply_post_queue_depth % 16) 2723 ioc->reply_post_queue_depth += 16 - 2724 (ioc->reply_post_queue_depth % 16); 2725 2726 2727 if (ioc->reply_post_queue_depth > 2728 facts->MaxReplyDescriptorPostQueueDepth) { 2729 ioc->reply_post_queue_depth = 2730 facts->MaxReplyDescriptorPostQueueDepth - 2731 (facts->MaxReplyDescriptorPostQueueDepth % 16); 2732 ioc->hba_queue_depth = 2733 ((ioc->reply_post_queue_depth - 64) / 2) - 1; 2734 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; 2735 } 2736 2737 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \ 2738 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " 2739 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, 2740 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 2741 ioc->chains_needed_per_io)); 2742 2743 /* reply post queue, 16 byte align */ 2744 reply_post_free_sz = ioc->reply_post_queue_depth * 2745 sizeof(Mpi2DefaultReplyDescriptor_t); 2746 2747 sz = reply_post_free_sz; 2748 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) 2749 sz *= ioc->reply_queue_count; 2750 2751 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? 2752 (ioc->reply_queue_count):1, 2753 sizeof(struct reply_post_struct), GFP_KERNEL); 2754 2755 if (!ioc->reply_post) { 2756 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n", 2757 ioc->name); 2758 goto out; 2759 } 2760 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", 2761 ioc->pdev, sz, 16, 0); 2762 if (!ioc->reply_post_free_dma_pool) { 2763 pr_err(MPT3SAS_FMT 2764 "reply_post_free pool: pci_pool_create failed\n", 2765 ioc->name); 2766 goto out; 2767 } 2768 i = 0; 2769 do { 2770 ioc->reply_post[i].reply_post_free = 2771 pci_pool_alloc(ioc->reply_post_free_dma_pool, 2772 GFP_KERNEL, 2773 &ioc->reply_post[i].reply_post_free_dma); 2774 if (!ioc->reply_post[i].reply_post_free) { 2775 pr_err(MPT3SAS_FMT 2776 "reply_post_free pool: pci_pool_alloc failed\n", 2777 ioc->name); 2778 goto out; 2779 } 2780 memset(ioc->reply_post[i].reply_post_free, 0, sz); 2781 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2782 "reply post free pool (0x%p): depth(%d)," 2783 "element_size(%d), pool_size(%d kB)\n", ioc->name, 2784 ioc->reply_post[i].reply_post_free, 2785 ioc->reply_post_queue_depth, 8, sz/1024)); 2786 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2787 "reply_post_free_dma = (0x%llx)\n", ioc->name, 2788 (unsigned long long) 2789 ioc->reply_post[i].reply_post_free_dma)); 2790 total_sz += sz; 2791 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); 2792 2793 if (ioc->dma_mask == 64) { 2794 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { 2795 pr_warn(MPT3SAS_FMT 2796 "no suitable consistent DMA mask for %s\n", 2797 ioc->name, pci_name(ioc->pdev)); 2798 goto out; 2799 } 2800 } 2801 2802 ioc->scsiio_depth = ioc->hba_queue_depth - 2803 ioc->hi_priority_depth - ioc->internal_depth; 2804 2805 /* set the scsi host can_queue depth 2806 * with some internal commands that could be outstanding 2807 */ 2808 ioc->shost->can_queue = ioc->scsiio_depth; 2809 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2810 "scsi host: can_queue depth (%d)\n", 2811 ioc->name, ioc->shost->can_queue)); 2812 2813 2814 /* contiguous pool for request and chains, 16 byte align, one extra " 2815 * "frame for smid=0 2816 */ 2817 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; 2818 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); 2819 2820 /* hi-priority queue */ 2821 sz += (ioc->hi_priority_depth * ioc->request_sz); 2822 2823 /* internal queue */ 2824 sz += (ioc->internal_depth * ioc->request_sz); 2825 2826 ioc->request_dma_sz = sz; 2827 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); 2828 if (!ioc->request) { 2829 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 2830 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 2831 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, 2832 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 2833 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) 2834 goto out; 2835 retry_sz += 64; 2836 ioc->hba_queue_depth = max_request_credit - retry_sz; 2837 goto retry_allocation; 2838 } 2839 2840 if (retry_sz) 2841 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ 2842 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " 2843 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, 2844 ioc->chains_needed_per_io, ioc->request_sz, sz/1024); 2845 2846 /* hi-priority queue */ 2847 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * 2848 ioc->request_sz); 2849 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * 2850 ioc->request_sz); 2851 2852 /* internal queue */ 2853 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * 2854 ioc->request_sz); 2855 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * 2856 ioc->request_sz); 2857 2858 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2859 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 2860 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz, 2861 (ioc->hba_queue_depth * ioc->request_sz)/1024)); 2862 2863 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n", 2864 ioc->name, (unsigned long long) ioc->request_dma)); 2865 total_sz += sz; 2866 2867 sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker); 2868 ioc->scsi_lookup_pages = get_order(sz); 2869 ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages( 2870 GFP_KERNEL, ioc->scsi_lookup_pages); 2871 if (!ioc->scsi_lookup) { 2872 pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n", 2873 ioc->name, (int)sz); 2874 goto out; 2875 } 2876 2877 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", 2878 ioc->name, ioc->request, ioc->scsiio_depth)); 2879 2880 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); 2881 sz = ioc->chain_depth * sizeof(struct chain_tracker); 2882 ioc->chain_pages = get_order(sz); 2883 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( 2884 GFP_KERNEL, ioc->chain_pages); 2885 if (!ioc->chain_lookup) { 2886 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n", 2887 ioc->name); 2888 goto out; 2889 } 2890 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, 2891 ioc->request_sz, 16, 0); 2892 if (!ioc->chain_dma_pool) { 2893 pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n", 2894 ioc->name); 2895 goto out; 2896 } 2897 for (i = 0; i < ioc->chain_depth; i++) { 2898 ioc->chain_lookup[i].chain_buffer = pci_pool_alloc( 2899 ioc->chain_dma_pool , GFP_KERNEL, 2900 &ioc->chain_lookup[i].chain_buffer_dma); 2901 if (!ioc->chain_lookup[i].chain_buffer) { 2902 ioc->chain_depth = i; 2903 goto chain_done; 2904 } 2905 total_sz += ioc->request_sz; 2906 } 2907 chain_done: 2908 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2909 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", 2910 ioc->name, ioc->chain_depth, ioc->request_sz, 2911 ((ioc->chain_depth * ioc->request_sz))/1024)); 2912 2913 /* initialize hi-priority queue smid's */ 2914 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, 2915 sizeof(struct request_tracker), GFP_KERNEL); 2916 if (!ioc->hpr_lookup) { 2917 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n", 2918 ioc->name); 2919 goto out; 2920 } 2921 ioc->hi_priority_smid = ioc->scsiio_depth + 1; 2922 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2923 "hi_priority(0x%p): depth(%d), start smid(%d)\n", 2924 ioc->name, ioc->hi_priority, 2925 ioc->hi_priority_depth, ioc->hi_priority_smid)); 2926 2927 /* initialize internal queue smid's */ 2928 ioc->internal_lookup = kcalloc(ioc->internal_depth, 2929 sizeof(struct request_tracker), GFP_KERNEL); 2930 if (!ioc->internal_lookup) { 2931 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n", 2932 ioc->name); 2933 goto out; 2934 } 2935 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; 2936 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2937 "internal(0x%p): depth(%d), start smid(%d)\n", 2938 ioc->name, ioc->internal, 2939 ioc->internal_depth, ioc->internal_smid)); 2940 2941 /* sense buffers, 4 byte align */ 2942 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; 2943 ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, 2944 0); 2945 if (!ioc->sense_dma_pool) { 2946 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n", 2947 ioc->name); 2948 goto out; 2949 } 2950 ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, 2951 &ioc->sense_dma); 2952 if (!ioc->sense) { 2953 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n", 2954 ioc->name); 2955 goto out; 2956 } 2957 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2958 "sense pool(0x%p): depth(%d), element_size(%d), pool_size" 2959 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, 2960 SCSI_SENSE_BUFFERSIZE, sz/1024)); 2961 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n", 2962 ioc->name, (unsigned long long)ioc->sense_dma)); 2963 total_sz += sz; 2964 2965 /* reply pool, 4 byte align */ 2966 sz = ioc->reply_free_queue_depth * ioc->reply_sz; 2967 ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, 2968 0); 2969 if (!ioc->reply_dma_pool) { 2970 pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n", 2971 ioc->name); 2972 goto out; 2973 } 2974 ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, 2975 &ioc->reply_dma); 2976 if (!ioc->reply) { 2977 pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n", 2978 ioc->name); 2979 goto out; 2980 } 2981 ioc->reply_dma_min_address = (u32)(ioc->reply_dma); 2982 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; 2983 dinitprintk(ioc, pr_info(MPT3SAS_FMT 2984 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", 2985 ioc->name, ioc->reply, 2986 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); 2987 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n", 2988 ioc->name, (unsigned long long)ioc->reply_dma)); 2989 total_sz += sz; 2990 2991 /* reply free queue, 16 byte align */ 2992 sz = ioc->reply_free_queue_depth * 4; 2993 ioc->reply_free_dma_pool = pci_pool_create("reply_free pool", 2994 ioc->pdev, sz, 16, 0); 2995 if (!ioc->reply_free_dma_pool) { 2996 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n", 2997 ioc->name); 2998 goto out; 2999 } 3000 ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL, 3001 &ioc->reply_free_dma); 3002 if (!ioc->reply_free) { 3003 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n", 3004 ioc->name); 3005 goto out; 3006 } 3007 memset(ioc->reply_free, 0, sz); 3008 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \ 3009 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, 3010 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); 3011 dinitprintk(ioc, pr_info(MPT3SAS_FMT 3012 "reply_free_dma (0x%llx)\n", 3013 ioc->name, (unsigned long long)ioc->reply_free_dma)); 3014 total_sz += sz; 3015 3016 ioc->config_page_sz = 512; 3017 ioc->config_page = pci_alloc_consistent(ioc->pdev, 3018 ioc->config_page_sz, &ioc->config_page_dma); 3019 if (!ioc->config_page) { 3020 pr_err(MPT3SAS_FMT 3021 "config page: pci_pool_alloc failed\n", 3022 ioc->name); 3023 goto out; 3024 } 3025 dinitprintk(ioc, pr_info(MPT3SAS_FMT 3026 "config page(0x%p): size(%d)\n", 3027 ioc->name, ioc->config_page, ioc->config_page_sz)); 3028 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n", 3029 ioc->name, (unsigned long long)ioc->config_page_dma)); 3030 total_sz += ioc->config_page_sz; 3031 3032 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n", 3033 ioc->name, total_sz/1024); 3034 pr_info(MPT3SAS_FMT 3035 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", 3036 ioc->name, ioc->shost->can_queue, facts->RequestCredit); 3037 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n", 3038 ioc->name, ioc->shost->sg_tablesize); 3039 return 0; 3040 3041 out: 3042 return -ENOMEM; 3043 } 3044 3045 /** 3046 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. 3047 * @ioc: Pointer to MPT_ADAPTER structure 3048 * @cooked: Request raw or cooked IOC state 3049 * 3050 * Returns all IOC Doorbell register bits if cooked==0, else just the 3051 * Doorbell bits in MPI_IOC_STATE_MASK. 3052 */ 3053 u32 3054 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) 3055 { 3056 u32 s, sc; 3057 3058 s = readl(&ioc->chip->Doorbell); 3059 sc = s & MPI2_IOC_STATE_MASK; 3060 return cooked ? sc : s; 3061 } 3062 3063 /** 3064 * _base_wait_on_iocstate - waiting on a particular ioc state 3065 * @ioc_state: controller state { READY, OPERATIONAL, or RESET } 3066 * @timeout: timeout in second 3067 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3068 * 3069 * Returns 0 for success, non-zero for failure. 3070 */ 3071 static int 3072 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout, 3073 int sleep_flag) 3074 { 3075 u32 count, cntdn; 3076 u32 current_state; 3077 3078 count = 0; 3079 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 3080 do { 3081 current_state = mpt3sas_base_get_iocstate(ioc, 1); 3082 if (current_state == ioc_state) 3083 return 0; 3084 if (count && current_state == MPI2_IOC_STATE_FAULT) 3085 break; 3086 if (sleep_flag == CAN_SLEEP) 3087 usleep_range(1000, 1500); 3088 else 3089 udelay(500); 3090 count++; 3091 } while (--cntdn); 3092 3093 return current_state; 3094 } 3095 3096 /** 3097 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by 3098 * a write to the doorbell) 3099 * @ioc: per adapter object 3100 * @timeout: timeout in second 3101 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3102 * 3103 * Returns 0 for success, non-zero for failure. 3104 * 3105 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 3106 */ 3107 static int 3108 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout, 3109 int sleep_flag) 3110 { 3111 u32 cntdn, count; 3112 u32 int_status; 3113 3114 count = 0; 3115 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 3116 do { 3117 int_status = readl(&ioc->chip->HostInterruptStatus); 3118 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 3119 dhsprintk(ioc, pr_info(MPT3SAS_FMT 3120 "%s: successful count(%d), timeout(%d)\n", 3121 ioc->name, __func__, count, timeout)); 3122 return 0; 3123 } 3124 if (sleep_flag == CAN_SLEEP) 3125 usleep_range(1000, 1500); 3126 else 3127 udelay(500); 3128 count++; 3129 } while (--cntdn); 3130 3131 pr_err(MPT3SAS_FMT 3132 "%s: failed due to timeout count(%d), int_status(%x)!\n", 3133 ioc->name, __func__, count, int_status); 3134 return -EFAULT; 3135 } 3136 3137 /** 3138 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. 3139 * @ioc: per adapter object 3140 * @timeout: timeout in second 3141 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3142 * 3143 * Returns 0 for success, non-zero for failure. 3144 * 3145 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to 3146 * doorbell. 3147 */ 3148 static int 3149 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout, 3150 int sleep_flag) 3151 { 3152 u32 cntdn, count; 3153 u32 int_status; 3154 u32 doorbell; 3155 3156 count = 0; 3157 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 3158 do { 3159 int_status = readl(&ioc->chip->HostInterruptStatus); 3160 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 3161 dhsprintk(ioc, pr_info(MPT3SAS_FMT 3162 "%s: successful count(%d), timeout(%d)\n", 3163 ioc->name, __func__, count, timeout)); 3164 return 0; 3165 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 3166 doorbell = readl(&ioc->chip->Doorbell); 3167 if ((doorbell & MPI2_IOC_STATE_MASK) == 3168 MPI2_IOC_STATE_FAULT) { 3169 mpt3sas_base_fault_info(ioc , doorbell); 3170 return -EFAULT; 3171 } 3172 } else if (int_status == 0xFFFFFFFF) 3173 goto out; 3174 3175 if (sleep_flag == CAN_SLEEP) 3176 usleep_range(1000, 1500); 3177 else 3178 udelay(500); 3179 count++; 3180 } while (--cntdn); 3181 3182 out: 3183 pr_err(MPT3SAS_FMT 3184 "%s: failed due to timeout count(%d), int_status(%x)!\n", 3185 ioc->name, __func__, count, int_status); 3186 return -EFAULT; 3187 } 3188 3189 /** 3190 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use 3191 * @ioc: per adapter object 3192 * @timeout: timeout in second 3193 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3194 * 3195 * Returns 0 for success, non-zero for failure. 3196 * 3197 */ 3198 static int 3199 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout, 3200 int sleep_flag) 3201 { 3202 u32 cntdn, count; 3203 u32 doorbell_reg; 3204 3205 count = 0; 3206 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 3207 do { 3208 doorbell_reg = readl(&ioc->chip->Doorbell); 3209 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 3210 dhsprintk(ioc, pr_info(MPT3SAS_FMT 3211 "%s: successful count(%d), timeout(%d)\n", 3212 ioc->name, __func__, count, timeout)); 3213 return 0; 3214 } 3215 if (sleep_flag == CAN_SLEEP) 3216 usleep_range(1000, 1500); 3217 else 3218 udelay(500); 3219 count++; 3220 } while (--cntdn); 3221 3222 pr_err(MPT3SAS_FMT 3223 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", 3224 ioc->name, __func__, count, doorbell_reg); 3225 return -EFAULT; 3226 } 3227 3228 /** 3229 * _base_send_ioc_reset - send doorbell reset 3230 * @ioc: per adapter object 3231 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET 3232 * @timeout: timeout in second 3233 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3234 * 3235 * Returns 0 for success, non-zero for failure. 3236 */ 3237 static int 3238 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout, 3239 int sleep_flag) 3240 { 3241 u32 ioc_state; 3242 int r = 0; 3243 3244 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { 3245 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n", 3246 ioc->name, __func__); 3247 return -EFAULT; 3248 } 3249 3250 if (!(ioc->facts.IOCCapabilities & 3251 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) 3252 return -EFAULT; 3253 3254 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name); 3255 3256 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, 3257 &ioc->chip->Doorbell); 3258 if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) { 3259 r = -EFAULT; 3260 goto out; 3261 } 3262 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 3263 timeout, sleep_flag); 3264 if (ioc_state) { 3265 pr_err(MPT3SAS_FMT 3266 "%s: failed going to ready state (ioc_state=0x%x)\n", 3267 ioc->name, __func__, ioc_state); 3268 r = -EFAULT; 3269 goto out; 3270 } 3271 out: 3272 pr_info(MPT3SAS_FMT "message unit reset: %s\n", 3273 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); 3274 return r; 3275 } 3276 3277 /** 3278 * _base_handshake_req_reply_wait - send request thru doorbell interface 3279 * @ioc: per adapter object 3280 * @request_bytes: request length 3281 * @request: pointer having request payload 3282 * @reply_bytes: reply length 3283 * @reply: pointer to reply payload 3284 * @timeout: timeout in second 3285 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3286 * 3287 * Returns 0 for success, non-zero for failure. 3288 */ 3289 static int 3290 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, 3291 u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag) 3292 { 3293 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; 3294 int i; 3295 u8 failed; 3296 u16 dummy; 3297 __le32 *mfp; 3298 3299 /* make sure doorbell is not in use */ 3300 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { 3301 pr_err(MPT3SAS_FMT 3302 "doorbell is in use (line=%d)\n", 3303 ioc->name, __LINE__); 3304 return -EFAULT; 3305 } 3306 3307 /* clear pending doorbell interrupts from previous state changes */ 3308 if (readl(&ioc->chip->HostInterruptStatus) & 3309 MPI2_HIS_IOC2SYS_DB_STATUS) 3310 writel(0, &ioc->chip->HostInterruptStatus); 3311 3312 /* send message to ioc */ 3313 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) | 3314 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), 3315 &ioc->chip->Doorbell); 3316 3317 if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) { 3318 pr_err(MPT3SAS_FMT 3319 "doorbell handshake int failed (line=%d)\n", 3320 ioc->name, __LINE__); 3321 return -EFAULT; 3322 } 3323 writel(0, &ioc->chip->HostInterruptStatus); 3324 3325 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) { 3326 pr_err(MPT3SAS_FMT 3327 "doorbell handshake ack failed (line=%d)\n", 3328 ioc->name, __LINE__); 3329 return -EFAULT; 3330 } 3331 3332 /* send message 32-bits at a time */ 3333 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 3334 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); 3335 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) 3336 failed = 1; 3337 } 3338 3339 if (failed) { 3340 pr_err(MPT3SAS_FMT 3341 "doorbell handshake sending request failed (line=%d)\n", 3342 ioc->name, __LINE__); 3343 return -EFAULT; 3344 } 3345 3346 /* now wait for the reply */ 3347 if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) { 3348 pr_err(MPT3SAS_FMT 3349 "doorbell handshake int failed (line=%d)\n", 3350 ioc->name, __LINE__); 3351 return -EFAULT; 3352 } 3353 3354 /* read the first two 16-bits, it gives the total length of the reply */ 3355 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) 3356 & MPI2_DOORBELL_DATA_MASK); 3357 writel(0, &ioc->chip->HostInterruptStatus); 3358 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { 3359 pr_err(MPT3SAS_FMT 3360 "doorbell handshake int failed (line=%d)\n", 3361 ioc->name, __LINE__); 3362 return -EFAULT; 3363 } 3364 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) 3365 & MPI2_DOORBELL_DATA_MASK); 3366 writel(0, &ioc->chip->HostInterruptStatus); 3367 3368 for (i = 2; i < default_reply->MsgLength * 2; i++) { 3369 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { 3370 pr_err(MPT3SAS_FMT 3371 "doorbell handshake int failed (line=%d)\n", 3372 ioc->name, __LINE__); 3373 return -EFAULT; 3374 } 3375 if (i >= reply_bytes/2) /* overflow case */ 3376 dummy = readl(&ioc->chip->Doorbell); 3377 else 3378 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) 3379 & MPI2_DOORBELL_DATA_MASK); 3380 writel(0, &ioc->chip->HostInterruptStatus); 3381 } 3382 3383 _base_wait_for_doorbell_int(ioc, 5, sleep_flag); 3384 if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) { 3385 dhsprintk(ioc, pr_info(MPT3SAS_FMT 3386 "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); 3387 } 3388 writel(0, &ioc->chip->HostInterruptStatus); 3389 3390 if (ioc->logging_level & MPT_DEBUG_INIT) { 3391 mfp = (__le32 *)reply; 3392 pr_info("\toffset:data\n"); 3393 for (i = 0; i < reply_bytes/4; i++) 3394 pr_info("\t[0x%02x]:%08x\n", i*4, 3395 le32_to_cpu(mfp[i])); 3396 } 3397 return 0; 3398 } 3399 3400 /** 3401 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW 3402 * @ioc: per adapter object 3403 * @mpi_reply: the reply payload from FW 3404 * @mpi_request: the request payload sent to FW 3405 * 3406 * The SAS IO Unit Control Request message allows the host to perform low-level 3407 * operations, such as resets on the PHYs of the IO Unit, also allows the host 3408 * to obtain the IOC assigned device handles for a device if it has other 3409 * identifying information about the device, in addition allows the host to 3410 * remove IOC resources associated with the device. 3411 * 3412 * Returns 0 for success, non-zero for failure. 3413 */ 3414 int 3415 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, 3416 Mpi2SasIoUnitControlReply_t *mpi_reply, 3417 Mpi2SasIoUnitControlRequest_t *mpi_request) 3418 { 3419 u16 smid; 3420 u32 ioc_state; 3421 unsigned long timeleft; 3422 u8 issue_reset; 3423 int rc; 3424 void *request; 3425 u16 wait_state_count; 3426 3427 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3428 __func__)); 3429 3430 mutex_lock(&ioc->base_cmds.mutex); 3431 3432 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 3433 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 3434 ioc->name, __func__); 3435 rc = -EAGAIN; 3436 goto out; 3437 } 3438 3439 wait_state_count = 0; 3440 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3441 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 3442 if (wait_state_count++ == 10) { 3443 pr_err(MPT3SAS_FMT 3444 "%s: failed due to ioc not operational\n", 3445 ioc->name, __func__); 3446 rc = -EFAULT; 3447 goto out; 3448 } 3449 ssleep(1); 3450 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3451 pr_info(MPT3SAS_FMT 3452 "%s: waiting for operational state(count=%d)\n", 3453 ioc->name, __func__, wait_state_count); 3454 } 3455 3456 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 3457 if (!smid) { 3458 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 3459 ioc->name, __func__); 3460 rc = -EAGAIN; 3461 goto out; 3462 } 3463 3464 rc = 0; 3465 ioc->base_cmds.status = MPT3_CMD_PENDING; 3466 request = mpt3sas_base_get_msg_frame(ioc, smid); 3467 ioc->base_cmds.smid = smid; 3468 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); 3469 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 3470 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) 3471 ioc->ioc_link_reset_in_progress = 1; 3472 init_completion(&ioc->base_cmds.done); 3473 mpt3sas_base_put_smid_default(ioc, smid); 3474 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 3475 msecs_to_jiffies(10000)); 3476 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 3477 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && 3478 ioc->ioc_link_reset_in_progress) 3479 ioc->ioc_link_reset_in_progress = 0; 3480 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 3481 pr_err(MPT3SAS_FMT "%s: timeout\n", 3482 ioc->name, __func__); 3483 _debug_dump_mf(mpi_request, 3484 sizeof(Mpi2SasIoUnitControlRequest_t)/4); 3485 if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) 3486 issue_reset = 1; 3487 goto issue_host_reset; 3488 } 3489 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 3490 memcpy(mpi_reply, ioc->base_cmds.reply, 3491 sizeof(Mpi2SasIoUnitControlReply_t)); 3492 else 3493 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); 3494 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 3495 goto out; 3496 3497 issue_host_reset: 3498 if (issue_reset) 3499 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 3500 FORCE_BIG_HAMMER); 3501 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 3502 rc = -EFAULT; 3503 out: 3504 mutex_unlock(&ioc->base_cmds.mutex); 3505 return rc; 3506 } 3507 3508 /** 3509 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device 3510 * @ioc: per adapter object 3511 * @mpi_reply: the reply payload from FW 3512 * @mpi_request: the request payload sent to FW 3513 * 3514 * The SCSI Enclosure Processor request message causes the IOC to 3515 * communicate with SES devices to control LED status signals. 3516 * 3517 * Returns 0 for success, non-zero for failure. 3518 */ 3519 int 3520 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, 3521 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) 3522 { 3523 u16 smid; 3524 u32 ioc_state; 3525 unsigned long timeleft; 3526 u8 issue_reset; 3527 int rc; 3528 void *request; 3529 u16 wait_state_count; 3530 3531 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3532 __func__)); 3533 3534 mutex_lock(&ioc->base_cmds.mutex); 3535 3536 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { 3537 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", 3538 ioc->name, __func__); 3539 rc = -EAGAIN; 3540 goto out; 3541 } 3542 3543 wait_state_count = 0; 3544 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3545 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 3546 if (wait_state_count++ == 10) { 3547 pr_err(MPT3SAS_FMT 3548 "%s: failed due to ioc not operational\n", 3549 ioc->name, __func__); 3550 rc = -EFAULT; 3551 goto out; 3552 } 3553 ssleep(1); 3554 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 3555 pr_info(MPT3SAS_FMT 3556 "%s: waiting for operational state(count=%d)\n", 3557 ioc->name, 3558 __func__, wait_state_count); 3559 } 3560 3561 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 3562 if (!smid) { 3563 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 3564 ioc->name, __func__); 3565 rc = -EAGAIN; 3566 goto out; 3567 } 3568 3569 rc = 0; 3570 ioc->base_cmds.status = MPT3_CMD_PENDING; 3571 request = mpt3sas_base_get_msg_frame(ioc, smid); 3572 ioc->base_cmds.smid = smid; 3573 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 3574 init_completion(&ioc->base_cmds.done); 3575 mpt3sas_base_put_smid_default(ioc, smid); 3576 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 3577 msecs_to_jiffies(10000)); 3578 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 3579 pr_err(MPT3SAS_FMT "%s: timeout\n", 3580 ioc->name, __func__); 3581 _debug_dump_mf(mpi_request, 3582 sizeof(Mpi2SepRequest_t)/4); 3583 if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) 3584 issue_reset = 1; 3585 goto issue_host_reset; 3586 } 3587 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) 3588 memcpy(mpi_reply, ioc->base_cmds.reply, 3589 sizeof(Mpi2SepReply_t)); 3590 else 3591 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); 3592 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 3593 goto out; 3594 3595 issue_host_reset: 3596 if (issue_reset) 3597 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 3598 FORCE_BIG_HAMMER); 3599 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 3600 rc = -EFAULT; 3601 out: 3602 mutex_unlock(&ioc->base_cmds.mutex); 3603 return rc; 3604 } 3605 3606 /** 3607 * _base_get_port_facts - obtain port facts reply and save in ioc 3608 * @ioc: per adapter object 3609 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3610 * 3611 * Returns 0 for success, non-zero for failure. 3612 */ 3613 static int 3614 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag) 3615 { 3616 Mpi2PortFactsRequest_t mpi_request; 3617 Mpi2PortFactsReply_t mpi_reply; 3618 struct mpt3sas_port_facts *pfacts; 3619 int mpi_reply_sz, mpi_request_sz, r; 3620 3621 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3622 __func__)); 3623 3624 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); 3625 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); 3626 memset(&mpi_request, 0, mpi_request_sz); 3627 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; 3628 mpi_request.PortNumber = port; 3629 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 3630 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); 3631 3632 if (r != 0) { 3633 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 3634 ioc->name, __func__, r); 3635 return r; 3636 } 3637 3638 pfacts = &ioc->pfacts[port]; 3639 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); 3640 pfacts->PortNumber = mpi_reply.PortNumber; 3641 pfacts->VP_ID = mpi_reply.VP_ID; 3642 pfacts->VF_ID = mpi_reply.VF_ID; 3643 pfacts->MaxPostedCmdBuffers = 3644 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); 3645 3646 return 0; 3647 } 3648 3649 /** 3650 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 3651 * @ioc: per adapter object 3652 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3653 * 3654 * Returns 0 for success, non-zero for failure. 3655 */ 3656 static int 3657 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 3658 { 3659 Mpi2IOCFactsRequest_t mpi_request; 3660 Mpi2IOCFactsReply_t mpi_reply; 3661 struct mpt3sas_facts *facts; 3662 int mpi_reply_sz, mpi_request_sz, r; 3663 3664 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3665 __func__)); 3666 3667 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 3668 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 3669 memset(&mpi_request, 0, mpi_request_sz); 3670 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; 3671 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 3672 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); 3673 3674 if (r != 0) { 3675 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 3676 ioc->name, __func__, r); 3677 return r; 3678 } 3679 3680 facts = &ioc->facts; 3681 memset(facts, 0, sizeof(struct mpt3sas_facts)); 3682 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); 3683 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); 3684 facts->VP_ID = mpi_reply.VP_ID; 3685 facts->VF_ID = mpi_reply.VF_ID; 3686 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); 3687 facts->MaxChainDepth = mpi_reply.MaxChainDepth; 3688 facts->WhoInit = mpi_reply.WhoInit; 3689 facts->NumberOfPorts = mpi_reply.NumberOfPorts; 3690 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; 3691 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); 3692 facts->MaxReplyDescriptorPostQueueDepth = 3693 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); 3694 facts->ProductID = le16_to_cpu(mpi_reply.ProductID); 3695 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); 3696 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 3697 ioc->ir_firmware = 1; 3698 if ((facts->IOCCapabilities & 3699 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE)) 3700 ioc->rdpq_array_capable = 1; 3701 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 3702 facts->IOCRequestFrameSize = 3703 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 3704 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); 3705 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); 3706 ioc->shost->max_id = -1; 3707 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); 3708 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); 3709 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); 3710 facts->HighPriorityCredit = 3711 le16_to_cpu(mpi_reply.HighPriorityCredit); 3712 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; 3713 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); 3714 3715 dinitprintk(ioc, pr_info(MPT3SAS_FMT 3716 "hba queue depth(%d), max chains per io(%d)\n", 3717 ioc->name, facts->RequestCredit, 3718 facts->MaxChainDepth)); 3719 dinitprintk(ioc, pr_info(MPT3SAS_FMT 3720 "request frame size(%d), reply frame size(%d)\n", ioc->name, 3721 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); 3722 return 0; 3723 } 3724 3725 /** 3726 * _base_send_ioc_init - send ioc_init to firmware 3727 * @ioc: per adapter object 3728 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3729 * 3730 * Returns 0 for success, non-zero for failure. 3731 */ 3732 static int 3733 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 3734 { 3735 Mpi2IOCInitRequest_t mpi_request; 3736 Mpi2IOCInitReply_t mpi_reply; 3737 int i, r = 0; 3738 struct timeval current_time; 3739 u16 ioc_status; 3740 u32 reply_post_free_array_sz = 0; 3741 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; 3742 dma_addr_t reply_post_free_array_dma; 3743 3744 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3745 __func__)); 3746 3747 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); 3748 mpi_request.Function = MPI2_FUNCTION_IOC_INIT; 3749 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 3750 mpi_request.VF_ID = 0; /* TODO */ 3751 mpi_request.VP_ID = 0; 3752 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); 3753 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 3754 3755 if (_base_is_controller_msix_enabled(ioc)) 3756 mpi_request.HostMSIxVectors = ioc->reply_queue_count; 3757 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 3758 mpi_request.ReplyDescriptorPostQueueDepth = 3759 cpu_to_le16(ioc->reply_post_queue_depth); 3760 mpi_request.ReplyFreeQueueDepth = 3761 cpu_to_le16(ioc->reply_free_queue_depth); 3762 3763 mpi_request.SenseBufferAddressHigh = 3764 cpu_to_le32((u64)ioc->sense_dma >> 32); 3765 mpi_request.SystemReplyAddressHigh = 3766 cpu_to_le32((u64)ioc->reply_dma >> 32); 3767 mpi_request.SystemRequestFrameBaseAddress = 3768 cpu_to_le64((u64)ioc->request_dma); 3769 mpi_request.ReplyFreeQueueAddress = 3770 cpu_to_le64((u64)ioc->reply_free_dma); 3771 3772 if (ioc->rdpq_array_enable) { 3773 reply_post_free_array_sz = ioc->reply_queue_count * 3774 sizeof(Mpi2IOCInitRDPQArrayEntry); 3775 reply_post_free_array = pci_alloc_consistent(ioc->pdev, 3776 reply_post_free_array_sz, &reply_post_free_array_dma); 3777 if (!reply_post_free_array) { 3778 pr_err(MPT3SAS_FMT 3779 "reply_post_free_array: pci_alloc_consistent failed\n", 3780 ioc->name); 3781 r = -ENOMEM; 3782 goto out; 3783 } 3784 memset(reply_post_free_array, 0, reply_post_free_array_sz); 3785 for (i = 0; i < ioc->reply_queue_count; i++) 3786 reply_post_free_array[i].RDPQBaseAddress = 3787 cpu_to_le64( 3788 (u64)ioc->reply_post[i].reply_post_free_dma); 3789 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; 3790 mpi_request.ReplyDescriptorPostQueueAddress = 3791 cpu_to_le64((u64)reply_post_free_array_dma); 3792 } else { 3793 mpi_request.ReplyDescriptorPostQueueAddress = 3794 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); 3795 } 3796 3797 /* This time stamp specifies number of milliseconds 3798 * since epoch ~ midnight January 1, 1970. 3799 */ 3800 do_gettimeofday(¤t_time); 3801 mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + 3802 (current_time.tv_usec / 1000)); 3803 3804 if (ioc->logging_level & MPT_DEBUG_INIT) { 3805 __le32 *mfp; 3806 int i; 3807 3808 mfp = (__le32 *)&mpi_request; 3809 pr_info("\toffset:data\n"); 3810 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) 3811 pr_info("\t[0x%02x]:%08x\n", i*4, 3812 le32_to_cpu(mfp[i])); 3813 } 3814 3815 r = _base_handshake_req_reply_wait(ioc, 3816 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, 3817 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10, 3818 sleep_flag); 3819 3820 if (r != 0) { 3821 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 3822 ioc->name, __func__, r); 3823 goto out; 3824 } 3825 3826 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 3827 if (ioc_status != MPI2_IOCSTATUS_SUCCESS || 3828 mpi_reply.IOCLogInfo) { 3829 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__); 3830 r = -EIO; 3831 } 3832 3833 out: 3834 if (reply_post_free_array) 3835 pci_free_consistent(ioc->pdev, reply_post_free_array_sz, 3836 reply_post_free_array, 3837 reply_post_free_array_dma); 3838 return r; 3839 } 3840 3841 /** 3842 * mpt3sas_port_enable_done - command completion routine for port enable 3843 * @ioc: per adapter object 3844 * @smid: system request message index 3845 * @msix_index: MSIX table index supplied by the OS 3846 * @reply: reply message frame(lower 32bit addr) 3847 * 3848 * Return 1 meaning mf should be freed from _base_interrupt 3849 * 0 means the mf is freed from this function. 3850 */ 3851 u8 3852 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 3853 u32 reply) 3854 { 3855 MPI2DefaultReply_t *mpi_reply; 3856 u16 ioc_status; 3857 3858 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) 3859 return 1; 3860 3861 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 3862 if (!mpi_reply) 3863 return 1; 3864 3865 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) 3866 return 1; 3867 3868 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; 3869 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; 3870 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; 3871 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 3872 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 3873 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) 3874 ioc->port_enable_failed = 1; 3875 3876 if (ioc->is_driver_loading) { 3877 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 3878 mpt3sas_port_enable_complete(ioc); 3879 return 1; 3880 } else { 3881 ioc->start_scan_failed = ioc_status; 3882 ioc->start_scan = 0; 3883 return 1; 3884 } 3885 } 3886 complete(&ioc->port_enable_cmds.done); 3887 return 1; 3888 } 3889 3890 /** 3891 * _base_send_port_enable - send port_enable(discovery stuff) to firmware 3892 * @ioc: per adapter object 3893 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3894 * 3895 * Returns 0 for success, non-zero for failure. 3896 */ 3897 static int 3898 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 3899 { 3900 Mpi2PortEnableRequest_t *mpi_request; 3901 Mpi2PortEnableReply_t *mpi_reply; 3902 unsigned long timeleft; 3903 int r = 0; 3904 u16 smid; 3905 u16 ioc_status; 3906 3907 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 3908 3909 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 3910 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 3911 ioc->name, __func__); 3912 return -EAGAIN; 3913 } 3914 3915 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 3916 if (!smid) { 3917 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 3918 ioc->name, __func__); 3919 return -EAGAIN; 3920 } 3921 3922 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 3923 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3924 ioc->port_enable_cmds.smid = smid; 3925 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 3926 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 3927 3928 init_completion(&ioc->port_enable_cmds.done); 3929 mpt3sas_base_put_smid_default(ioc, smid); 3930 timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done, 3931 300*HZ); 3932 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { 3933 pr_err(MPT3SAS_FMT "%s: timeout\n", 3934 ioc->name, __func__); 3935 _debug_dump_mf(mpi_request, 3936 sizeof(Mpi2PortEnableRequest_t)/4); 3937 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) 3938 r = -EFAULT; 3939 else 3940 r = -ETIME; 3941 goto out; 3942 } 3943 3944 mpi_reply = ioc->port_enable_cmds.reply; 3945 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 3946 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 3947 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n", 3948 ioc->name, __func__, ioc_status); 3949 r = -EFAULT; 3950 goto out; 3951 } 3952 3953 out: 3954 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 3955 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ? 3956 "SUCCESS" : "FAILED")); 3957 return r; 3958 } 3959 3960 /** 3961 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) 3962 * @ioc: per adapter object 3963 * 3964 * Returns 0 for success, non-zero for failure. 3965 */ 3966 int 3967 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) 3968 { 3969 Mpi2PortEnableRequest_t *mpi_request; 3970 u16 smid; 3971 3972 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); 3973 3974 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 3975 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 3976 ioc->name, __func__); 3977 return -EAGAIN; 3978 } 3979 3980 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); 3981 if (!smid) { 3982 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 3983 ioc->name, __func__); 3984 return -EAGAIN; 3985 } 3986 3987 ioc->port_enable_cmds.status = MPT3_CMD_PENDING; 3988 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 3989 ioc->port_enable_cmds.smid = smid; 3990 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); 3991 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; 3992 3993 mpt3sas_base_put_smid_default(ioc, smid); 3994 return 0; 3995 } 3996 3997 /** 3998 * _base_determine_wait_on_discovery - desposition 3999 * @ioc: per adapter object 4000 * 4001 * Decide whether to wait on discovery to complete. Used to either 4002 * locate boot device, or report volumes ahead of physical devices. 4003 * 4004 * Returns 1 for wait, 0 for don't wait 4005 */ 4006 static int 4007 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) 4008 { 4009 /* We wait for discovery to complete if IR firmware is loaded. 4010 * The sas topology events arrive before PD events, so we need time to 4011 * turn on the bit in ioc->pd_handles to indicate PD 4012 * Also, it maybe required to report Volumes ahead of physical 4013 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. 4014 */ 4015 if (ioc->ir_firmware) 4016 return 1; 4017 4018 /* if no Bios, then we don't need to wait */ 4019 if (!ioc->bios_pg3.BiosVersion) 4020 return 0; 4021 4022 /* Bios is present, then we drop down here. 4023 * 4024 * If there any entries in the Bios Page 2, then we wait 4025 * for discovery to complete. 4026 */ 4027 4028 /* Current Boot Device */ 4029 if ((ioc->bios_pg2.CurrentBootDeviceForm & 4030 MPI2_BIOSPAGE2_FORM_MASK) == 4031 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 4032 /* Request Boot Device */ 4033 (ioc->bios_pg2.ReqBootDeviceForm & 4034 MPI2_BIOSPAGE2_FORM_MASK) == 4035 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && 4036 /* Alternate Request Boot Device */ 4037 (ioc->bios_pg2.ReqAltBootDeviceForm & 4038 MPI2_BIOSPAGE2_FORM_MASK) == 4039 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) 4040 return 0; 4041 4042 return 1; 4043 } 4044 4045 /** 4046 * _base_unmask_events - turn on notification for this event 4047 * @ioc: per adapter object 4048 * @event: firmware event 4049 * 4050 * The mask is stored in ioc->event_masks. 4051 */ 4052 static void 4053 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) 4054 { 4055 u32 desired_event; 4056 4057 if (event >= 128) 4058 return; 4059 4060 desired_event = (1 << (event % 32)); 4061 4062 if (event < 32) 4063 ioc->event_masks[0] &= ~desired_event; 4064 else if (event < 64) 4065 ioc->event_masks[1] &= ~desired_event; 4066 else if (event < 96) 4067 ioc->event_masks[2] &= ~desired_event; 4068 else if (event < 128) 4069 ioc->event_masks[3] &= ~desired_event; 4070 } 4071 4072 /** 4073 * _base_event_notification - send event notification 4074 * @ioc: per adapter object 4075 * @sleep_flag: CAN_SLEEP or NO_SLEEP 4076 * 4077 * Returns 0 for success, non-zero for failure. 4078 */ 4079 static int 4080 _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4081 { 4082 Mpi2EventNotificationRequest_t *mpi_request; 4083 unsigned long timeleft; 4084 u16 smid; 4085 int r = 0; 4086 int i; 4087 4088 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4089 __func__)); 4090 4091 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 4092 pr_err(MPT3SAS_FMT "%s: internal command already in use\n", 4093 ioc->name, __func__); 4094 return -EAGAIN; 4095 } 4096 4097 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); 4098 if (!smid) { 4099 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 4100 ioc->name, __func__); 4101 return -EAGAIN; 4102 } 4103 ioc->base_cmds.status = MPT3_CMD_PENDING; 4104 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 4105 ioc->base_cmds.smid = smid; 4106 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); 4107 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 4108 mpi_request->VF_ID = 0; /* TODO */ 4109 mpi_request->VP_ID = 0; 4110 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 4111 mpi_request->EventMasks[i] = 4112 cpu_to_le32(ioc->event_masks[i]); 4113 init_completion(&ioc->base_cmds.done); 4114 mpt3sas_base_put_smid_default(ioc, smid); 4115 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 4116 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4117 pr_err(MPT3SAS_FMT "%s: timeout\n", 4118 ioc->name, __func__); 4119 _debug_dump_mf(mpi_request, 4120 sizeof(Mpi2EventNotificationRequest_t)/4); 4121 if (ioc->base_cmds.status & MPT3_CMD_RESET) 4122 r = -EFAULT; 4123 else 4124 r = -ETIME; 4125 } else 4126 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n", 4127 ioc->name, __func__)); 4128 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4129 return r; 4130 } 4131 4132 /** 4133 * mpt3sas_base_validate_event_type - validating event types 4134 * @ioc: per adapter object 4135 * @event: firmware event 4136 * 4137 * This will turn on firmware event notification when application 4138 * ask for that event. We don't mask events that are already enabled. 4139 */ 4140 void 4141 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) 4142 { 4143 int i, j; 4144 u32 event_mask, desired_event; 4145 u8 send_update_to_fw; 4146 4147 for (i = 0, send_update_to_fw = 0; i < 4148 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { 4149 event_mask = ~event_type[i]; 4150 desired_event = 1; 4151 for (j = 0; j < 32; j++) { 4152 if (!(event_mask & desired_event) && 4153 (ioc->event_masks[i] & desired_event)) { 4154 ioc->event_masks[i] &= ~desired_event; 4155 send_update_to_fw = 1; 4156 } 4157 desired_event = (desired_event << 1); 4158 } 4159 } 4160 4161 if (!send_update_to_fw) 4162 return; 4163 4164 mutex_lock(&ioc->base_cmds.mutex); 4165 _base_event_notification(ioc, CAN_SLEEP); 4166 mutex_unlock(&ioc->base_cmds.mutex); 4167 } 4168 4169 /** 4170 * _base_diag_reset - the "big hammer" start of day reset 4171 * @ioc: per adapter object 4172 * @sleep_flag: CAN_SLEEP or NO_SLEEP 4173 * 4174 * Returns 0 for success, non-zero for failure. 4175 */ 4176 static int 4177 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4178 { 4179 u32 host_diagnostic; 4180 u32 ioc_state; 4181 u32 count; 4182 u32 hcb_size; 4183 4184 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name); 4185 4186 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n", 4187 ioc->name)); 4188 4189 count = 0; 4190 do { 4191 /* Write magic sequence to WriteSequence register 4192 * Loop until in diagnostic mode 4193 */ 4194 drsprintk(ioc, pr_info(MPT3SAS_FMT 4195 "write magic sequence\n", ioc->name)); 4196 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 4197 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); 4198 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); 4199 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); 4200 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); 4201 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); 4202 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); 4203 4204 /* wait 100 msec */ 4205 if (sleep_flag == CAN_SLEEP) 4206 msleep(100); 4207 else 4208 mdelay(100); 4209 4210 if (count++ > 20) 4211 goto out; 4212 4213 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 4214 drsprintk(ioc, pr_info(MPT3SAS_FMT 4215 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", 4216 ioc->name, count, host_diagnostic)); 4217 4218 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); 4219 4220 hcb_size = readl(&ioc->chip->HCBSize); 4221 4222 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n", 4223 ioc->name)); 4224 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, 4225 &ioc->chip->HostDiagnostic); 4226 4227 /*This delay allows the chip PCIe hardware time to finish reset tasks*/ 4228 if (sleep_flag == CAN_SLEEP) 4229 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); 4230 else 4231 mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); 4232 4233 /* Approximately 300 second max wait */ 4234 for (count = 0; count < (300000000 / 4235 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { 4236 4237 host_diagnostic = readl(&ioc->chip->HostDiagnostic); 4238 4239 if (host_diagnostic == 0xFFFFFFFF) 4240 goto out; 4241 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) 4242 break; 4243 4244 /* Wait to pass the second read delay window */ 4245 if (sleep_flag == CAN_SLEEP) 4246 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC 4247 / 1000); 4248 else 4249 mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC 4250 / 1000); 4251 } 4252 4253 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 4254 4255 drsprintk(ioc, pr_info(MPT3SAS_FMT 4256 "restart the adapter assuming the HCB Address points to good F/W\n", 4257 ioc->name)); 4258 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; 4259 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; 4260 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 4261 4262 drsprintk(ioc, pr_info(MPT3SAS_FMT 4263 "re-enable the HCDW\n", ioc->name)); 4264 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, 4265 &ioc->chip->HCBSize); 4266 } 4267 4268 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n", 4269 ioc->name)); 4270 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, 4271 &ioc->chip->HostDiagnostic); 4272 4273 drsprintk(ioc, pr_info(MPT3SAS_FMT 4274 "disable writes to the diagnostic register\n", ioc->name)); 4275 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); 4276 4277 drsprintk(ioc, pr_info(MPT3SAS_FMT 4278 "Wait for FW to go to the READY state\n", ioc->name)); 4279 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20, 4280 sleep_flag); 4281 if (ioc_state) { 4282 pr_err(MPT3SAS_FMT 4283 "%s: failed going to ready state (ioc_state=0x%x)\n", 4284 ioc->name, __func__, ioc_state); 4285 goto out; 4286 } 4287 4288 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name); 4289 return 0; 4290 4291 out: 4292 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name); 4293 return -EFAULT; 4294 } 4295 4296 /** 4297 * _base_make_ioc_ready - put controller in READY state 4298 * @ioc: per adapter object 4299 * @sleep_flag: CAN_SLEEP or NO_SLEEP 4300 * @type: FORCE_BIG_HAMMER or SOFT_RESET 4301 * 4302 * Returns 0 for success, non-zero for failure. 4303 */ 4304 static int 4305 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, 4306 enum reset_type type) 4307 { 4308 u32 ioc_state; 4309 int rc; 4310 int count; 4311 4312 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4313 __func__)); 4314 4315 if (ioc->pci_error_recovery) 4316 return 0; 4317 4318 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 4319 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", 4320 ioc->name, __func__, ioc_state)); 4321 4322 /* if in RESET state, it should move to READY state shortly */ 4323 count = 0; 4324 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { 4325 while ((ioc_state & MPI2_IOC_STATE_MASK) != 4326 MPI2_IOC_STATE_READY) { 4327 if (count++ == 10) { 4328 pr_err(MPT3SAS_FMT 4329 "%s: failed going to ready state (ioc_state=0x%x)\n", 4330 ioc->name, __func__, ioc_state); 4331 return -EFAULT; 4332 } 4333 if (sleep_flag == CAN_SLEEP) 4334 ssleep(1); 4335 else 4336 mdelay(1000); 4337 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 4338 } 4339 } 4340 4341 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) 4342 return 0; 4343 4344 if (ioc_state & MPI2_DOORBELL_USED) { 4345 dhsprintk(ioc, pr_info(MPT3SAS_FMT 4346 "unexpected doorbell active!\n", 4347 ioc->name)); 4348 goto issue_diag_reset; 4349 } 4350 4351 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 4352 mpt3sas_base_fault_info(ioc, ioc_state & 4353 MPI2_DOORBELL_DATA_MASK); 4354 goto issue_diag_reset; 4355 } 4356 4357 if (type == FORCE_BIG_HAMMER) 4358 goto issue_diag_reset; 4359 4360 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 4361 if (!(_base_send_ioc_reset(ioc, 4362 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) { 4363 return 0; 4364 } 4365 4366 issue_diag_reset: 4367 rc = _base_diag_reset(ioc, CAN_SLEEP); 4368 return rc; 4369 } 4370 4371 /** 4372 * _base_make_ioc_operational - put controller in OPERATIONAL state 4373 * @ioc: per adapter object 4374 * @sleep_flag: CAN_SLEEP or NO_SLEEP 4375 * 4376 * Returns 0 for success, non-zero for failure. 4377 */ 4378 static int 4379 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4380 { 4381 int r, i; 4382 unsigned long flags; 4383 u32 reply_address; 4384 u16 smid; 4385 struct _tr_list *delayed_tr, *delayed_tr_next; 4386 struct adapter_reply_queue *reply_q; 4387 long reply_post_free; 4388 u32 reply_post_free_sz, index = 0; 4389 4390 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4391 __func__)); 4392 4393 /* clean the delayed target reset list */ 4394 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 4395 &ioc->delayed_tr_list, list) { 4396 list_del(&delayed_tr->list); 4397 kfree(delayed_tr); 4398 } 4399 4400 4401 list_for_each_entry_safe(delayed_tr, delayed_tr_next, 4402 &ioc->delayed_tr_volume_list, list) { 4403 list_del(&delayed_tr->list); 4404 kfree(delayed_tr); 4405 } 4406 4407 /* initialize the scsi lookup free list */ 4408 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4409 INIT_LIST_HEAD(&ioc->free_list); 4410 smid = 1; 4411 for (i = 0; i < ioc->scsiio_depth; i++, smid++) { 4412 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list); 4413 ioc->scsi_lookup[i].cb_idx = 0xFF; 4414 ioc->scsi_lookup[i].smid = smid; 4415 ioc->scsi_lookup[i].scmd = NULL; 4416 list_add_tail(&ioc->scsi_lookup[i].tracker_list, 4417 &ioc->free_list); 4418 } 4419 4420 /* hi-priority queue */ 4421 INIT_LIST_HEAD(&ioc->hpr_free_list); 4422 smid = ioc->hi_priority_smid; 4423 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { 4424 ioc->hpr_lookup[i].cb_idx = 0xFF; 4425 ioc->hpr_lookup[i].smid = smid; 4426 list_add_tail(&ioc->hpr_lookup[i].tracker_list, 4427 &ioc->hpr_free_list); 4428 } 4429 4430 /* internal queue */ 4431 INIT_LIST_HEAD(&ioc->internal_free_list); 4432 smid = ioc->internal_smid; 4433 for (i = 0; i < ioc->internal_depth; i++, smid++) { 4434 ioc->internal_lookup[i].cb_idx = 0xFF; 4435 ioc->internal_lookup[i].smid = smid; 4436 list_add_tail(&ioc->internal_lookup[i].tracker_list, 4437 &ioc->internal_free_list); 4438 } 4439 4440 /* chain pool */ 4441 INIT_LIST_HEAD(&ioc->free_chain_list); 4442 for (i = 0; i < ioc->chain_depth; i++) 4443 list_add_tail(&ioc->chain_lookup[i].tracker_list, 4444 &ioc->free_chain_list); 4445 4446 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4447 4448 /* initialize Reply Free Queue */ 4449 for (i = 0, reply_address = (u32)ioc->reply_dma ; 4450 i < ioc->reply_free_queue_depth ; i++, reply_address += 4451 ioc->reply_sz) 4452 ioc->reply_free[i] = cpu_to_le32(reply_address); 4453 4454 /* initialize reply queues */ 4455 if (ioc->is_driver_loading) 4456 _base_assign_reply_queues(ioc); 4457 4458 /* initialize Reply Post Free Queue */ 4459 reply_post_free_sz = ioc->reply_post_queue_depth * 4460 sizeof(Mpi2DefaultReplyDescriptor_t); 4461 reply_post_free = (long)ioc->reply_post[index].reply_post_free; 4462 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 4463 reply_q->reply_post_host_index = 0; 4464 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) 4465 reply_post_free; 4466 for (i = 0; i < ioc->reply_post_queue_depth; i++) 4467 reply_q->reply_post_free[i].Words = 4468 cpu_to_le64(ULLONG_MAX); 4469 if (!_base_is_controller_msix_enabled(ioc)) 4470 goto skip_init_reply_post_free_queue; 4471 /* 4472 * If RDPQ is enabled, switch to the next allocation. 4473 * Otherwise advance within the contiguous region. 4474 */ 4475 if (ioc->rdpq_array_enable) 4476 reply_post_free = (long) 4477 ioc->reply_post[++index].reply_post_free; 4478 else 4479 reply_post_free += reply_post_free_sz; 4480 } 4481 skip_init_reply_post_free_queue: 4482 4483 r = _base_send_ioc_init(ioc, sleep_flag); 4484 if (r) 4485 return r; 4486 4487 /* initialize reply free host index */ 4488 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 4489 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); 4490 4491 /* initialize reply post host index */ 4492 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 4493 writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT, 4494 &ioc->chip->ReplyPostHostIndex); 4495 if (!_base_is_controller_msix_enabled(ioc)) 4496 goto skip_init_reply_post_host_index; 4497 } 4498 4499 skip_init_reply_post_host_index: 4500 4501 _base_unmask_interrupts(ioc); 4502 r = _base_event_notification(ioc, sleep_flag); 4503 if (r) 4504 return r; 4505 4506 if (sleep_flag == CAN_SLEEP) 4507 _base_static_config_pages(ioc); 4508 4509 4510 if (ioc->is_driver_loading) { 4511 ioc->wait_for_discovery_to_complete = 4512 _base_determine_wait_on_discovery(ioc); 4513 4514 return r; /* scan_start and scan_finished support */ 4515 } 4516 4517 r = _base_send_port_enable(ioc, sleep_flag); 4518 if (r) 4519 return r; 4520 4521 return r; 4522 } 4523 4524 /** 4525 * mpt3sas_base_free_resources - free resources controller resources 4526 * @ioc: per adapter object 4527 * 4528 * Return nothing. 4529 */ 4530 void 4531 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) 4532 { 4533 struct pci_dev *pdev = ioc->pdev; 4534 4535 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4536 __func__)); 4537 4538 if (ioc->chip_phys && ioc->chip) { 4539 _base_mask_interrupts(ioc); 4540 ioc->shost_recovery = 1; 4541 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 4542 ioc->shost_recovery = 0; 4543 } 4544 4545 _base_free_irq(ioc); 4546 _base_disable_msix(ioc); 4547 4548 if (ioc->chip_phys && ioc->chip) 4549 iounmap(ioc->chip); 4550 ioc->chip_phys = 0; 4551 4552 if (pci_is_enabled(pdev)) { 4553 pci_release_selected_regions(ioc->pdev, ioc->bars); 4554 pci_disable_pcie_error_reporting(pdev); 4555 pci_disable_device(pdev); 4556 } 4557 return; 4558 } 4559 4560 /** 4561 * mpt3sas_base_attach - attach controller instance 4562 * @ioc: per adapter object 4563 * 4564 * Returns 0 for success, non-zero for failure. 4565 */ 4566 int 4567 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) 4568 { 4569 int r, i; 4570 int cpu_id, last_cpu_id = 0; 4571 4572 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4573 __func__)); 4574 4575 /* setup cpu_msix_table */ 4576 ioc->cpu_count = num_online_cpus(); 4577 for_each_online_cpu(cpu_id) 4578 last_cpu_id = cpu_id; 4579 ioc->cpu_msix_table_sz = last_cpu_id + 1; 4580 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); 4581 ioc->reply_queue_count = 1; 4582 if (!ioc->cpu_msix_table) { 4583 dfailprintk(ioc, pr_info(MPT3SAS_FMT 4584 "allocation for cpu_msix_table failed!!!\n", 4585 ioc->name)); 4586 r = -ENOMEM; 4587 goto out_free_resources; 4588 } 4589 4590 ioc->rdpq_array_enable_assigned = 0; 4591 ioc->dma_mask = 0; 4592 r = mpt3sas_base_map_resources(ioc); 4593 if (r) 4594 goto out_free_resources; 4595 4596 4597 pci_set_drvdata(ioc->pdev, ioc->shost); 4598 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 4599 if (r) 4600 goto out_free_resources; 4601 4602 /* 4603 * In SAS3.0, 4604 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and 4605 * Target Status - all require the IEEE formated scatter gather 4606 * elements. 4607 */ 4608 4609 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; 4610 ioc->build_sg = &_base_build_sg_ieee; 4611 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; 4612 ioc->mpi25 = 1; 4613 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); 4614 4615 /* 4616 * These function pointers for other requests that don't 4617 * the require IEEE scatter gather elements. 4618 * 4619 * For example Configuration Pages and SAS IOUNIT Control don't. 4620 */ 4621 ioc->build_sg_mpi = &_base_build_sg; 4622 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; 4623 4624 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 4625 if (r) 4626 goto out_free_resources; 4627 4628 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, 4629 sizeof(struct mpt3sas_port_facts), GFP_KERNEL); 4630 if (!ioc->pfacts) { 4631 r = -ENOMEM; 4632 goto out_free_resources; 4633 } 4634 4635 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 4636 r = _base_get_port_facts(ioc, i, CAN_SLEEP); 4637 if (r) 4638 goto out_free_resources; 4639 } 4640 4641 r = _base_allocate_memory_pools(ioc, CAN_SLEEP); 4642 if (r) 4643 goto out_free_resources; 4644 4645 init_waitqueue_head(&ioc->reset_wq); 4646 4647 /* allocate memory pd handle bitmask list */ 4648 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); 4649 if (ioc->facts.MaxDevHandle % 8) 4650 ioc->pd_handles_sz++; 4651 ioc->pd_handles = kzalloc(ioc->pd_handles_sz, 4652 GFP_KERNEL); 4653 if (!ioc->pd_handles) { 4654 r = -ENOMEM; 4655 goto out_free_resources; 4656 } 4657 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, 4658 GFP_KERNEL); 4659 if (!ioc->blocking_handles) { 4660 r = -ENOMEM; 4661 goto out_free_resources; 4662 } 4663 4664 ioc->fwfault_debug = mpt3sas_fwfault_debug; 4665 4666 /* base internal command bits */ 4667 mutex_init(&ioc->base_cmds.mutex); 4668 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 4669 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4670 4671 /* port_enable command bits */ 4672 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 4673 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; 4674 4675 /* transport internal command bits */ 4676 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 4677 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 4678 mutex_init(&ioc->transport_cmds.mutex); 4679 4680 /* scsih internal command bits */ 4681 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 4682 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; 4683 mutex_init(&ioc->scsih_cmds.mutex); 4684 4685 /* task management internal command bits */ 4686 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 4687 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 4688 mutex_init(&ioc->tm_cmds.mutex); 4689 4690 /* config page internal command bits */ 4691 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 4692 ioc->config_cmds.status = MPT3_CMD_NOT_USED; 4693 mutex_init(&ioc->config_cmds.mutex); 4694 4695 /* ctl module internal command bits */ 4696 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); 4697 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 4698 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 4699 mutex_init(&ioc->ctl_cmds.mutex); 4700 4701 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || 4702 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || 4703 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply || 4704 !ioc->ctl_cmds.sense) { 4705 r = -ENOMEM; 4706 goto out_free_resources; 4707 } 4708 4709 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) 4710 ioc->event_masks[i] = -1; 4711 4712 /* here we enable the events we care about */ 4713 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); 4714 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 4715 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 4716 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 4717 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 4718 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 4719 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); 4720 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); 4721 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); 4722 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); 4723 4724 r = _base_make_ioc_operational(ioc, CAN_SLEEP); 4725 if (r) 4726 goto out_free_resources; 4727 4728 return 0; 4729 4730 out_free_resources: 4731 4732 ioc->remove_host = 1; 4733 4734 mpt3sas_base_free_resources(ioc); 4735 _base_release_memory_pools(ioc); 4736 pci_set_drvdata(ioc->pdev, NULL); 4737 kfree(ioc->cpu_msix_table); 4738 kfree(ioc->pd_handles); 4739 kfree(ioc->blocking_handles); 4740 kfree(ioc->tm_cmds.reply); 4741 kfree(ioc->transport_cmds.reply); 4742 kfree(ioc->scsih_cmds.reply); 4743 kfree(ioc->config_cmds.reply); 4744 kfree(ioc->base_cmds.reply); 4745 kfree(ioc->port_enable_cmds.reply); 4746 kfree(ioc->ctl_cmds.reply); 4747 kfree(ioc->ctl_cmds.sense); 4748 kfree(ioc->pfacts); 4749 ioc->ctl_cmds.reply = NULL; 4750 ioc->base_cmds.reply = NULL; 4751 ioc->tm_cmds.reply = NULL; 4752 ioc->scsih_cmds.reply = NULL; 4753 ioc->transport_cmds.reply = NULL; 4754 ioc->config_cmds.reply = NULL; 4755 ioc->pfacts = NULL; 4756 return r; 4757 } 4758 4759 4760 /** 4761 * mpt3sas_base_detach - remove controller instance 4762 * @ioc: per adapter object 4763 * 4764 * Return nothing. 4765 */ 4766 void 4767 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) 4768 { 4769 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4770 __func__)); 4771 4772 mpt3sas_base_stop_watchdog(ioc); 4773 mpt3sas_base_free_resources(ioc); 4774 _base_release_memory_pools(ioc); 4775 pci_set_drvdata(ioc->pdev, NULL); 4776 kfree(ioc->cpu_msix_table); 4777 kfree(ioc->pd_handles); 4778 kfree(ioc->blocking_handles); 4779 kfree(ioc->pfacts); 4780 kfree(ioc->ctl_cmds.reply); 4781 kfree(ioc->ctl_cmds.sense); 4782 kfree(ioc->base_cmds.reply); 4783 kfree(ioc->port_enable_cmds.reply); 4784 kfree(ioc->tm_cmds.reply); 4785 kfree(ioc->transport_cmds.reply); 4786 kfree(ioc->scsih_cmds.reply); 4787 kfree(ioc->config_cmds.reply); 4788 } 4789 4790 /** 4791 * _base_reset_handler - reset callback handler (for base) 4792 * @ioc: per adapter object 4793 * @reset_phase: phase 4794 * 4795 * The handler for doing any required cleanup or initialization. 4796 * 4797 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, 4798 * MPT3_IOC_DONE_RESET 4799 * 4800 * Return nothing. 4801 */ 4802 static void 4803 _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) 4804 { 4805 mpt3sas_scsih_reset_handler(ioc, reset_phase); 4806 mpt3sas_ctl_reset_handler(ioc, reset_phase); 4807 switch (reset_phase) { 4808 case MPT3_IOC_PRE_RESET: 4809 dtmprintk(ioc, pr_info(MPT3SAS_FMT 4810 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); 4811 break; 4812 case MPT3_IOC_AFTER_RESET: 4813 dtmprintk(ioc, pr_info(MPT3SAS_FMT 4814 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); 4815 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { 4816 ioc->transport_cmds.status |= MPT3_CMD_RESET; 4817 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); 4818 complete(&ioc->transport_cmds.done); 4819 } 4820 if (ioc->base_cmds.status & MPT3_CMD_PENDING) { 4821 ioc->base_cmds.status |= MPT3_CMD_RESET; 4822 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); 4823 complete(&ioc->base_cmds.done); 4824 } 4825 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { 4826 ioc->port_enable_failed = 1; 4827 ioc->port_enable_cmds.status |= MPT3_CMD_RESET; 4828 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); 4829 if (ioc->is_driver_loading) { 4830 ioc->start_scan_failed = 4831 MPI2_IOCSTATUS_INTERNAL_ERROR; 4832 ioc->start_scan = 0; 4833 ioc->port_enable_cmds.status = 4834 MPT3_CMD_NOT_USED; 4835 } else 4836 complete(&ioc->port_enable_cmds.done); 4837 } 4838 if (ioc->config_cmds.status & MPT3_CMD_PENDING) { 4839 ioc->config_cmds.status |= MPT3_CMD_RESET; 4840 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); 4841 ioc->config_cmds.smid = USHRT_MAX; 4842 complete(&ioc->config_cmds.done); 4843 } 4844 break; 4845 case MPT3_IOC_DONE_RESET: 4846 dtmprintk(ioc, pr_info(MPT3SAS_FMT 4847 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); 4848 break; 4849 } 4850 } 4851 4852 /** 4853 * _wait_for_commands_to_complete - reset controller 4854 * @ioc: Pointer to MPT_ADAPTER structure 4855 * @sleep_flag: CAN_SLEEP or NO_SLEEP 4856 * 4857 * This function waiting(3s) for all pending commands to complete 4858 * prior to putting controller in reset. 4859 */ 4860 static void 4861 _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4862 { 4863 u32 ioc_state; 4864 unsigned long flags; 4865 u16 i; 4866 4867 ioc->pending_io_count = 0; 4868 if (sleep_flag != CAN_SLEEP) 4869 return; 4870 4871 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 4872 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) 4873 return; 4874 4875 /* pending command count */ 4876 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); 4877 for (i = 0; i < ioc->scsiio_depth; i++) 4878 if (ioc->scsi_lookup[i].cb_idx != 0xFF) 4879 ioc->pending_io_count++; 4880 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 4881 4882 if (!ioc->pending_io_count) 4883 return; 4884 4885 /* wait for pending commands to complete */ 4886 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); 4887 } 4888 4889 /** 4890 * mpt3sas_base_hard_reset_handler - reset controller 4891 * @ioc: Pointer to MPT_ADAPTER structure 4892 * @sleep_flag: CAN_SLEEP or NO_SLEEP 4893 * @type: FORCE_BIG_HAMMER or SOFT_RESET 4894 * 4895 * Returns 0 for success, non-zero for failure. 4896 */ 4897 int 4898 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, 4899 enum reset_type type) 4900 { 4901 int r; 4902 unsigned long flags; 4903 u32 ioc_state; 4904 u8 is_fault = 0, is_trigger = 0; 4905 4906 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 4907 __func__)); 4908 4909 if (ioc->pci_error_recovery) { 4910 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n", 4911 ioc->name, __func__); 4912 r = 0; 4913 goto out_unlocked; 4914 } 4915 4916 if (mpt3sas_fwfault_debug) 4917 mpt3sas_halt_firmware(ioc); 4918 4919 /* TODO - What we really should be doing is pulling 4920 * out all the code associated with NO_SLEEP; its never used. 4921 * That is legacy code from mpt fusion driver, ported over. 4922 * I will leave this BUG_ON here for now till its been resolved. 4923 */ 4924 BUG_ON(sleep_flag == NO_SLEEP); 4925 4926 /* wait for an active reset in progress to complete */ 4927 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { 4928 do { 4929 ssleep(1); 4930 } while (ioc->shost_recovery == 1); 4931 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 4932 __func__)); 4933 return ioc->ioc_reset_in_progress_status; 4934 } 4935 4936 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 4937 ioc->shost_recovery = 1; 4938 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 4939 4940 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 4941 MPT3_DIAG_BUFFER_IS_REGISTERED) && 4942 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 4943 MPT3_DIAG_BUFFER_IS_RELEASED))) { 4944 is_trigger = 1; 4945 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 4946 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) 4947 is_fault = 1; 4948 } 4949 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); 4950 _wait_for_commands_to_complete(ioc, sleep_flag); 4951 _base_mask_interrupts(ioc); 4952 r = _base_make_ioc_ready(ioc, sleep_flag, type); 4953 if (r) 4954 goto out; 4955 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); 4956 4957 /* If this hard reset is called while port enable is active, then 4958 * there is no reason to call make_ioc_operational 4959 */ 4960 if (ioc->is_driver_loading && ioc->port_enable_failed) { 4961 ioc->remove_host = 1; 4962 r = -EFAULT; 4963 goto out; 4964 } 4965 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 4966 if (r) 4967 goto out; 4968 4969 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) 4970 panic("%s: Issue occurred with flashing controller firmware." 4971 "Please reboot the system and ensure that the correct" 4972 " firmware version is running\n", ioc->name); 4973 4974 r = _base_make_ioc_operational(ioc, sleep_flag); 4975 if (!r) 4976 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); 4977 4978 out: 4979 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", 4980 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); 4981 4982 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); 4983 ioc->ioc_reset_in_progress_status = r; 4984 ioc->shost_recovery = 0; 4985 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); 4986 ioc->ioc_reset_count++; 4987 mutex_unlock(&ioc->reset_in_progress_mutex); 4988 4989 out_unlocked: 4990 if ((r == 0) && is_trigger) { 4991 if (is_fault) 4992 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); 4993 else 4994 mpt3sas_trigger_master(ioc, 4995 MASTER_TRIGGER_ADAPTER_RESET); 4996 } 4997 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, 4998 __func__)); 4999 return r; 5000 } 5001