1 /* 2 * Management Module Support for MPT (Message Passing Technology) based 3 * controllers 4 * 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c 6 * Copyright (C) 2012-2014 LSI Corporation 7 * Copyright (C) 2013-2014 Avago Technologies 8 * (mailto: MPT-FusionLinux.pdl@avagotech.com) 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 2 13 * of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * NO WARRANTY 21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 25 * solely responsible for determining the appropriateness of using and 26 * distributing the Program and assumes all risks associated with its 27 * exercise of rights under this Agreement, including but not limited to 28 * the risks and costs of program errors, damage to or loss of data, 29 * programs or equipment, and unavailability or interruption of operations. 30 31 * DISCLAIMER OF LIABILITY 32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 39 40 * You should have received a copy of the GNU General Public License 41 * along with this program; if not, write to the Free Software 42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 43 * USA. 44 */ 45 46 #include <linux/kernel.h> 47 #include <linux/module.h> 48 #include <linux/errno.h> 49 #include <linux/init.h> 50 #include <linux/slab.h> 51 #include <linux/types.h> 52 #include <linux/pci.h> 53 #include <linux/delay.h> 54 #include <linux/compat.h> 55 #include <linux/poll.h> 56 57 #include <linux/io.h> 58 #include <linux/uaccess.h> 59 60 #include "mpt3sas_base.h" 61 #include "mpt3sas_ctl.h" 62 63 64 static struct fasync_struct *async_queue; 65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); 66 67 68 /** 69 * enum block_state - blocking state 70 * @NON_BLOCKING: non blocking 71 * @BLOCKING: blocking 72 * 73 * These states are for ioctls that need to wait for a response 74 * from firmware, so they probably require sleep. 75 */ 76 enum block_state { 77 NON_BLOCKING, 78 BLOCKING, 79 }; 80 81 /** 82 * _ctl_display_some_debug - debug routine 83 * @ioc: per adapter object 84 * @smid: system request message index 85 * @calling_function_name: string pass from calling function 86 * @mpi_reply: reply message frame 87 * Context: none. 88 * 89 * Function for displaying debug info helpful when debugging issues 90 * in this module. 91 */ 92 static void 93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, 94 char *calling_function_name, MPI2DefaultReply_t *mpi_reply) 95 { 96 Mpi2ConfigRequest_t *mpi_request; 97 char *desc = NULL; 98 99 if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) 100 return; 101 102 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 103 switch (mpi_request->Function) { 104 case MPI2_FUNCTION_SCSI_IO_REQUEST: 105 { 106 Mpi2SCSIIORequest_t *scsi_request = 107 (Mpi2SCSIIORequest_t *)mpi_request; 108 109 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 110 "scsi_io, cmd(0x%02x), cdb_len(%d)", 111 scsi_request->CDB.CDB32[0], 112 le16_to_cpu(scsi_request->IoFlags) & 0xF); 113 desc = ioc->tmp_string; 114 break; 115 } 116 case MPI2_FUNCTION_SCSI_TASK_MGMT: 117 desc = "task_mgmt"; 118 break; 119 case MPI2_FUNCTION_IOC_INIT: 120 desc = "ioc_init"; 121 break; 122 case MPI2_FUNCTION_IOC_FACTS: 123 desc = "ioc_facts"; 124 break; 125 case MPI2_FUNCTION_CONFIG: 126 { 127 Mpi2ConfigRequest_t *config_request = 128 (Mpi2ConfigRequest_t *)mpi_request; 129 130 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 131 "config, type(0x%02x), ext_type(0x%02x), number(%d)", 132 (config_request->Header.PageType & 133 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, 134 config_request->Header.PageNumber); 135 desc = ioc->tmp_string; 136 break; 137 } 138 case MPI2_FUNCTION_PORT_FACTS: 139 desc = "port_facts"; 140 break; 141 case MPI2_FUNCTION_PORT_ENABLE: 142 desc = "port_enable"; 143 break; 144 case MPI2_FUNCTION_EVENT_NOTIFICATION: 145 desc = "event_notification"; 146 break; 147 case MPI2_FUNCTION_FW_DOWNLOAD: 148 desc = "fw_download"; 149 break; 150 case MPI2_FUNCTION_FW_UPLOAD: 151 desc = "fw_upload"; 152 break; 153 case MPI2_FUNCTION_RAID_ACTION: 154 desc = "raid_action"; 155 break; 156 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 157 { 158 Mpi2SCSIIORequest_t *scsi_request = 159 (Mpi2SCSIIORequest_t *)mpi_request; 160 161 snprintf(ioc->tmp_string, MPT_STRING_LENGTH, 162 "raid_pass, cmd(0x%02x), cdb_len(%d)", 163 scsi_request->CDB.CDB32[0], 164 le16_to_cpu(scsi_request->IoFlags) & 0xF); 165 desc = ioc->tmp_string; 166 break; 167 } 168 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 169 desc = "sas_iounit_cntl"; 170 break; 171 case MPI2_FUNCTION_SATA_PASSTHROUGH: 172 desc = "sata_pass"; 173 break; 174 case MPI2_FUNCTION_DIAG_BUFFER_POST: 175 desc = "diag_buffer_post"; 176 break; 177 case MPI2_FUNCTION_DIAG_RELEASE: 178 desc = "diag_release"; 179 break; 180 case MPI2_FUNCTION_SMP_PASSTHROUGH: 181 desc = "smp_passthrough"; 182 break; 183 case MPI2_FUNCTION_TOOLBOX: 184 desc = "toolbox"; 185 break; 186 case MPI2_FUNCTION_NVME_ENCAPSULATED: 187 desc = "nvme_encapsulated"; 188 break; 189 case MPI2_FUNCTION_MCTP_PASSTHROUGH: 190 desc = "mctp_passthrough"; 191 break; 192 } 193 194 if (!desc) 195 return; 196 197 ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid); 198 199 if (!mpi_reply) 200 return; 201 202 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) 203 ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n", 204 le16_to_cpu(mpi_reply->IOCStatus), 205 le32_to_cpu(mpi_reply->IOCLogInfo)); 206 207 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 208 mpi_request->Function == 209 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 210 Mpi2SCSIIOReply_t *scsi_reply = 211 (Mpi2SCSIIOReply_t *)mpi_reply; 212 struct _sas_device *sas_device = NULL; 213 struct _pcie_device *pcie_device = NULL; 214 215 sas_device = mpt3sas_get_sdev_by_handle(ioc, 216 le16_to_cpu(scsi_reply->DevHandle)); 217 if (sas_device) { 218 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n", 219 (u64)sas_device->sas_address, 220 sas_device->phy); 221 ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 222 (u64)sas_device->enclosure_logical_id, 223 sas_device->slot); 224 sas_device_put(sas_device); 225 } 226 if (!sas_device) { 227 pcie_device = mpt3sas_get_pdev_by_handle(ioc, 228 le16_to_cpu(scsi_reply->DevHandle)); 229 if (pcie_device) { 230 ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n", 231 (unsigned long long)pcie_device->wwid, 232 pcie_device->port_num); 233 if (pcie_device->enclosure_handle != 0) 234 ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n", 235 (u64)pcie_device->enclosure_logical_id, 236 pcie_device->slot); 237 pcie_device_put(pcie_device); 238 } 239 } 240 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) 241 ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n", 242 scsi_reply->SCSIState, 243 scsi_reply->SCSIStatus); 244 } 245 } 246 247 /** 248 * mpt3sas_ctl_done - ctl module completion routine 249 * @ioc: per adapter object 250 * @smid: system request message index 251 * @msix_index: MSIX table index supplied by the OS 252 * @reply: reply message frame(lower 32bit addr) 253 * Context: none. 254 * 255 * The callback handler when using ioc->ctl_cb_idx. 256 * 257 * Return: 1 meaning mf should be freed from _base_interrupt 258 * 0 means the mf is freed from this function. 259 */ 260 u8 261 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, 262 u32 reply) 263 { 264 MPI2DefaultReply_t *mpi_reply; 265 Mpi2SCSIIOReply_t *scsiio_reply; 266 Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply; 267 const void *sense_data; 268 u32 sz; 269 270 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) 271 return 1; 272 if (ioc->ctl_cmds.smid != smid) 273 return 1; 274 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; 275 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 276 if (mpi_reply) { 277 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); 278 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; 279 /* get sense data */ 280 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 281 mpi_reply->Function == 282 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 283 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; 284 if (scsiio_reply->SCSIState & 285 MPI2_SCSI_STATE_AUTOSENSE_VALID) { 286 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, 287 le32_to_cpu(scsiio_reply->SenseCount)); 288 sense_data = mpt3sas_base_get_sense_buffer(ioc, 289 smid); 290 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 291 } 292 } 293 /* 294 * Get Error Response data for NVMe device. The ctl_cmds.sense 295 * buffer is used to store the Error Response data. 296 */ 297 if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { 298 nvme_error_reply = 299 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply; 300 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE, 301 le16_to_cpu(nvme_error_reply->ErrorResponseCount)); 302 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid); 303 memcpy(ioc->ctl_cmds.sense, sense_data, sz); 304 } 305 } 306 307 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); 308 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; 309 complete(&ioc->ctl_cmds.done); 310 return 1; 311 } 312 313 /** 314 * _ctl_check_event_type - determines when an event needs logging 315 * @ioc: per adapter object 316 * @event: firmware event 317 * 318 * The bitmask in ioc->event_type[] indicates which events should be 319 * be saved in the driver event_log. This bitmask is set by application. 320 * 321 * Return: 1 when event should be captured, or zero means no match. 322 */ 323 static int 324 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) 325 { 326 u16 i; 327 u32 desired_event; 328 329 if (event >= 128 || !event || !ioc->event_log) 330 return 0; 331 332 desired_event = (1 << (event % 32)); 333 if (!desired_event) 334 desired_event = 1; 335 i = event / 32; 336 return desired_event & ioc->event_type[i]; 337 } 338 339 /** 340 * mpt3sas_ctl_add_to_event_log - add event 341 * @ioc: per adapter object 342 * @mpi_reply: reply message frame 343 */ 344 void 345 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, 346 Mpi2EventNotificationReply_t *mpi_reply) 347 { 348 struct MPT3_IOCTL_EVENTS *event_log; 349 u16 event; 350 int i; 351 u32 sz, event_data_sz; 352 u8 send_aen = 0; 353 354 if (!ioc->event_log) 355 return; 356 357 event = le16_to_cpu(mpi_reply->Event); 358 359 if (_ctl_check_event_type(ioc, event)) { 360 361 /* insert entry into circular event_log */ 362 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; 363 event_log = ioc->event_log; 364 event_log[i].event = event; 365 event_log[i].context = ioc->event_context++; 366 367 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; 368 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); 369 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); 370 memcpy(event_log[i].data, mpi_reply->EventData, sz); 371 send_aen = 1; 372 } 373 374 /* This aen_event_read_flag flag is set until the 375 * application has read the event log. 376 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. 377 */ 378 if (event == MPI2_EVENT_LOG_ENTRY_ADDED || 379 (send_aen && !ioc->aen_event_read_flag)) { 380 ioc->aen_event_read_flag = 1; 381 wake_up_interruptible(&ctl_poll_wait); 382 if (async_queue) 383 kill_fasync(&async_queue, SIGIO, POLL_IN); 384 } 385 } 386 387 /** 388 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) 389 * @ioc: per adapter object 390 * @msix_index: MSIX table index supplied by the OS 391 * @reply: reply message frame(lower 32bit addr) 392 * Context: interrupt. 393 * 394 * This function merely adds a new work task into ioc->firmware_event_thread. 395 * The tasks are worked from _firmware_event_work in user context. 396 * 397 * Return: 1 meaning mf should be freed from _base_interrupt 398 * 0 means the mf is freed from this function. 399 */ 400 u8 401 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, 402 u32 reply) 403 { 404 Mpi2EventNotificationReply_t *mpi_reply; 405 406 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); 407 if (mpi_reply) 408 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); 409 return 1; 410 } 411 412 /** 413 * _ctl_verify_adapter - validates ioc_number passed from application 414 * @ioc_number: ? 415 * @iocpp: The ioc pointer is returned in this. 416 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 417 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 418 * 419 * Return: (-1) means error, else ioc_number. 420 */ 421 static int 422 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, 423 int mpi_version) 424 { 425 struct MPT3SAS_ADAPTER *ioc; 426 int version = 0; 427 /* global ioc lock to protect controller on list operations */ 428 spin_lock(&gioc_lock); 429 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 430 if (ioc->id != ioc_number) 431 continue; 432 /* Check whether this ioctl command is from right 433 * ioctl device or not, if not continue the search. 434 */ 435 version = ioc->hba_mpi_version_belonged; 436 /* MPI25_VERSION and MPI26_VERSION uses same ioctl 437 * device. 438 */ 439 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) { 440 if ((version == MPI25_VERSION) || 441 (version == MPI26_VERSION)) 442 goto out; 443 else 444 continue; 445 } else { 446 if (version != mpi_version) 447 continue; 448 } 449 out: 450 spin_unlock(&gioc_lock); 451 *iocpp = ioc; 452 return ioc_number; 453 } 454 spin_unlock(&gioc_lock); 455 *iocpp = NULL; 456 return -1; 457 } 458 459 /** 460 * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl) 461 * @ioc: per adapter object 462 * 463 * The handler for doing any required cleanup or initialization. 464 */ 465 void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) 466 { 467 int i; 468 u8 issue_reset; 469 470 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__)); 471 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 472 if (!(ioc->diag_buffer_status[i] & 473 MPT3_DIAG_BUFFER_IS_REGISTERED)) 474 continue; 475 if ((ioc->diag_buffer_status[i] & 476 MPT3_DIAG_BUFFER_IS_RELEASED)) 477 continue; 478 479 /* 480 * add a log message to indicate the release 481 */ 482 ioc_info(ioc, 483 "%s: Releasing the trace buffer due to adapter reset.", 484 __func__); 485 ioc->htb_rel.buffer_rel_condition = 486 MPT3_DIAG_BUFFER_REL_TRIGGER; 487 mpt3sas_send_diag_release(ioc, i, &issue_reset); 488 } 489 } 490 491 /** 492 * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd. 493 * @ioc: per adapter object 494 * 495 * The handler for doing any required cleanup or initialization. 496 */ 497 void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc) 498 { 499 dtmprintk(ioc, 500 ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__)); 501 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { 502 ioc->ctl_cmds.status |= MPT3_CMD_RESET; 503 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); 504 complete(&ioc->ctl_cmds.done); 505 } 506 } 507 508 /** 509 * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl) 510 * @ioc: per adapter object 511 * 512 * The handler for doing any required cleanup or initialization. 513 */ 514 void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) 515 { 516 int i; 517 518 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__)); 519 520 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 521 if (!(ioc->diag_buffer_status[i] & 522 MPT3_DIAG_BUFFER_IS_REGISTERED)) 523 continue; 524 if ((ioc->diag_buffer_status[i] & 525 MPT3_DIAG_BUFFER_IS_RELEASED)) 526 continue; 527 ioc->diag_buffer_status[i] |= 528 MPT3_DIAG_BUFFER_IS_DIAG_RESET; 529 } 530 } 531 532 /** 533 * _ctl_fasync - 534 * @fd: ? 535 * @filep: ? 536 * @mode: ? 537 * 538 * Called when application request fasyn callback handler. 539 */ 540 static int 541 _ctl_fasync(int fd, struct file *filep, int mode) 542 { 543 return fasync_helper(fd, filep, mode, &async_queue); 544 } 545 546 /** 547 * _ctl_poll - 548 * @filep: ? 549 * @wait: ? 550 * 551 */ 552 static __poll_t 553 _ctl_poll(struct file *filep, poll_table *wait) 554 { 555 struct MPT3SAS_ADAPTER *ioc; 556 557 poll_wait(filep, &ctl_poll_wait, wait); 558 559 /* global ioc lock to protect controller on list operations */ 560 spin_lock(&gioc_lock); 561 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 562 if (ioc->aen_event_read_flag) { 563 spin_unlock(&gioc_lock); 564 return EPOLLIN | EPOLLRDNORM; 565 } 566 } 567 spin_unlock(&gioc_lock); 568 return 0; 569 } 570 571 /** 572 * _ctl_set_task_mid - assign an active smid to tm request 573 * @ioc: per adapter object 574 * @karg: (struct mpt3_ioctl_command) 575 * @tm_request: pointer to mf from user space 576 * 577 * Return: 0 when an smid if found, else fail. 578 * during failure, the reply frame is filled. 579 */ 580 static int 581 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, 582 Mpi2SCSITaskManagementRequest_t *tm_request) 583 { 584 bool found = false; 585 u16 smid; 586 u16 handle; 587 struct scsi_cmnd *scmd; 588 struct MPT3SAS_DEVICE *priv_data; 589 Mpi2SCSITaskManagementReply_t *tm_reply; 590 u32 sz; 591 u32 lun; 592 char *desc = NULL; 593 594 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) 595 desc = "abort_task"; 596 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 597 desc = "query_task"; 598 else 599 return 0; 600 601 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); 602 603 handle = le16_to_cpu(tm_request->DevHandle); 604 for (smid = ioc->scsiio_depth; smid && !found; smid--) { 605 struct scsiio_tracker *st; 606 __le16 task_mid; 607 608 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid); 609 if (!scmd) 610 continue; 611 if (lun != scmd->device->lun) 612 continue; 613 priv_data = scmd->device->hostdata; 614 if (priv_data->sas_target == NULL) 615 continue; 616 if (priv_data->sas_target->handle != handle) 617 continue; 618 st = scsi_cmd_priv(scmd); 619 620 /* 621 * If the given TaskMID from the user space is zero, then the 622 * first outstanding smid will be picked up. Otherwise, 623 * targeted smid will be the one. 624 */ 625 task_mid = cpu_to_le16(st->smid); 626 if (!tm_request->TaskMID) 627 tm_request->TaskMID = task_mid; 628 found = tm_request->TaskMID == task_mid; 629 } 630 631 if (!found) { 632 dctlprintk(ioc, 633 ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n", 634 desc, le16_to_cpu(tm_request->DevHandle), 635 lun)); 636 tm_reply = ioc->ctl_cmds.reply; 637 tm_reply->DevHandle = tm_request->DevHandle; 638 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 639 tm_reply->TaskType = tm_request->TaskType; 640 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; 641 tm_reply->VP_ID = tm_request->VP_ID; 642 tm_reply->VF_ID = tm_request->VF_ID; 643 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); 644 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, 645 sz)) 646 pr_err("failure at %s:%d/%s()!\n", __FILE__, 647 __LINE__, __func__); 648 return 1; 649 } 650 651 dctlprintk(ioc, 652 ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", 653 desc, le16_to_cpu(tm_request->DevHandle), lun, 654 le16_to_cpu(tm_request->TaskMID))); 655 return 0; 656 } 657 658 /** 659 * _ctl_send_mctp_passthru_req - Send an MCTP passthru request 660 * @ioc: per adapter object 661 * @mctp_passthru_req: MPI mctp passhthru request from caller 662 * @psge: pointer to the H2DSGL 663 * @data_out_dma: DMA buffer for H2D SGL 664 * @data_out_sz: H2D length 665 * @data_in_dma: DMA buffer for D2H SGL 666 * @data_in_sz: D2H length 667 * @smid: SMID to submit the request 668 * 669 */ 670 static void 671 _ctl_send_mctp_passthru_req( 672 struct MPT3SAS_ADAPTER *ioc, 673 Mpi26MctpPassthroughRequest_t *mctp_passthru_req, void *psge, 674 dma_addr_t data_out_dma, int data_out_sz, 675 dma_addr_t data_in_dma, int data_in_sz, 676 u16 smid) 677 { 678 mctp_passthru_req->H2DLength = data_out_sz; 679 mctp_passthru_req->D2HLength = data_in_sz; 680 681 /* Build the H2D SGL from the data out buffer */ 682 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 0, 0); 683 684 psge += ioc->sge_size_ieee; 685 686 /* Build the D2H SGL for the data in buffer */ 687 ioc->build_sg(ioc, psge, 0, 0, data_in_dma, data_in_sz); 688 689 ioc->put_smid_default(ioc, smid); 690 } 691 692 /** 693 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode 694 * @ioc: per adapter object 695 * @karg: (struct mpt3_ioctl_command) 696 * @mf: pointer to mf in user space 697 */ 698 static long 699 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, 700 void __user *mf) 701 { 702 MPI2RequestHeader_t *mpi_request = NULL, *request; 703 MPI2DefaultReply_t *mpi_reply; 704 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL; 705 struct _pcie_device *pcie_device = NULL; 706 u16 smid; 707 unsigned long timeout; 708 u8 issue_reset; 709 u32 sz, sz_arg; 710 void *psge; 711 void *data_out = NULL; 712 dma_addr_t data_out_dma = 0; 713 size_t data_out_sz = 0; 714 void *data_in = NULL; 715 dma_addr_t data_in_dma = 0; 716 size_t data_in_sz = 0; 717 long ret; 718 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; 719 int tm_ret; 720 721 issue_reset = 0; 722 723 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 724 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); 725 ret = -EAGAIN; 726 goto out; 727 } 728 729 ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); 730 if (ret) 731 goto out; 732 733 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); 734 if (!mpi_request) { 735 ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n", 736 __func__); 737 ret = -ENOMEM; 738 goto out; 739 } 740 741 /* Check for overflow and wraparound */ 742 if (karg.data_sge_offset * 4 > ioc->request_sz || 743 karg.data_sge_offset > (UINT_MAX / 4)) { 744 ret = -EINVAL; 745 goto out; 746 } 747 748 /* copy in request message frame from user */ 749 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { 750 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, 751 __func__); 752 ret = -EFAULT; 753 goto out; 754 } 755 756 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 757 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); 758 if (!smid) { 759 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 760 ret = -EAGAIN; 761 goto out; 762 } 763 } else { 764 /* Use first reserved smid for passthrough ioctls */ 765 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; 766 } 767 768 ret = 0; 769 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 770 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 771 request = mpt3sas_base_get_msg_frame(ioc, smid); 772 memset(request, 0, ioc->request_sz); 773 memcpy(request, mpi_request, karg.data_sge_offset*4); 774 ioc->ctl_cmds.smid = smid; 775 data_out_sz = karg.data_out_size; 776 data_in_sz = karg.data_in_size; 777 778 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 779 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 780 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT || 781 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH || 782 mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) { 783 784 device_handle = le16_to_cpu(mpi_request->FunctionDependent1); 785 if (!device_handle || (device_handle > 786 ioc->facts.MaxDevHandle)) { 787 ret = -EINVAL; 788 mpt3sas_base_free_smid(ioc, smid); 789 goto out; 790 } 791 } 792 793 /* obtain dma-able memory for data transfer */ 794 if (data_out_sz) /* WRITE */ { 795 data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, 796 &data_out_dma, GFP_KERNEL); 797 if (!data_out) { 798 pr_err("failure at %s:%d/%s()!\n", __FILE__, 799 __LINE__, __func__); 800 ret = -ENOMEM; 801 mpt3sas_base_free_smid(ioc, smid); 802 goto out; 803 } 804 if (copy_from_user(data_out, karg.data_out_buf_ptr, 805 data_out_sz)) { 806 pr_err("failure at %s:%d/%s()!\n", __FILE__, 807 __LINE__, __func__); 808 ret = -EFAULT; 809 mpt3sas_base_free_smid(ioc, smid); 810 goto out; 811 } 812 } 813 814 if (data_in_sz) /* READ */ { 815 data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, 816 &data_in_dma, GFP_KERNEL); 817 if (!data_in) { 818 pr_err("failure at %s:%d/%s()!\n", __FILE__, 819 __LINE__, __func__); 820 ret = -ENOMEM; 821 mpt3sas_base_free_smid(ioc, smid); 822 goto out; 823 } 824 } 825 826 psge = (void *)request + (karg.data_sge_offset*4); 827 828 /* send command to firmware */ 829 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); 830 831 init_completion(&ioc->ctl_cmds.done); 832 switch (mpi_request->Function) { 833 case MPI2_FUNCTION_MCTP_PASSTHROUGH: 834 { 835 Mpi26MctpPassthroughRequest_t *mctp_passthru_req = 836 (Mpi26MctpPassthroughRequest_t *)request; 837 838 if (!(ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU)) { 839 ioc_err(ioc, "%s: MCTP Passthrough request not supported\n", 840 __func__); 841 mpt3sas_base_free_smid(ioc, smid); 842 ret = -EINVAL; 843 goto out; 844 } 845 846 _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma, 847 data_out_sz, data_in_dma, data_in_sz, smid); 848 break; 849 } 850 case MPI2_FUNCTION_NVME_ENCAPSULATED: 851 { 852 nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; 853 if (!ioc->pcie_sg_lookup) { 854 dtmprintk(ioc, ioc_info(ioc, 855 "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n" 856 )); 857 858 if (ioc->logging_level & MPT_DEBUG_TM) 859 _debug_dump_mf(nvme_encap_request, 860 ioc->request_sz/4); 861 mpt3sas_base_free_smid(ioc, smid); 862 ret = -EINVAL; 863 goto out; 864 } 865 /* 866 * Get the Physical Address of the sense buffer. 867 * Use Error Response buffer address field to hold the sense 868 * buffer address. 869 * Clear the internal sense buffer, which will potentially hold 870 * the Completion Queue Entry on return, or 0 if no Entry. 871 * Build the PRPs and set direction bits. 872 * Send the request. 873 */ 874 nvme_encap_request->ErrorResponseBaseAddress = 875 cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL); 876 nvme_encap_request->ErrorResponseBaseAddress |= 877 cpu_to_le64(le32_to_cpu( 878 mpt3sas_base_get_sense_buffer_dma(ioc, smid))); 879 nvme_encap_request->ErrorResponseAllocationLength = 880 cpu_to_le16(NVME_ERROR_RESPONSE_SIZE); 881 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE); 882 ioc->build_nvme_prp(ioc, smid, nvme_encap_request, 883 data_out_dma, data_out_sz, data_in_dma, data_in_sz); 884 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 885 dtmprintk(ioc, 886 ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n", 887 device_handle)); 888 mpt3sas_base_free_smid(ioc, smid); 889 ret = -EINVAL; 890 goto out; 891 } 892 mpt3sas_base_put_smid_nvme_encap(ioc, smid); 893 break; 894 } 895 case MPI2_FUNCTION_SCSI_IO_REQUEST: 896 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 897 { 898 Mpi2SCSIIORequest_t *scsiio_request = 899 (Mpi2SCSIIORequest_t *)request; 900 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; 901 scsiio_request->SenseBufferLowAddress = 902 mpt3sas_base_get_sense_buffer_dma(ioc, smid); 903 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); 904 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 905 dtmprintk(ioc, 906 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", 907 device_handle)); 908 mpt3sas_base_free_smid(ioc, smid); 909 ret = -EINVAL; 910 goto out; 911 } 912 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 913 data_in_dma, data_in_sz); 914 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) 915 ioc->put_smid_scsi_io(ioc, smid, device_handle); 916 else 917 ioc->put_smid_default(ioc, smid); 918 break; 919 } 920 case MPI2_FUNCTION_SCSI_TASK_MGMT: 921 { 922 Mpi2SCSITaskManagementRequest_t *tm_request = 923 (Mpi2SCSITaskManagementRequest_t *)request; 924 925 dtmprintk(ioc, 926 ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", 927 le16_to_cpu(tm_request->DevHandle), 928 tm_request->TaskType)); 929 ioc->got_task_abort_from_ioctl = 1; 930 if (tm_request->TaskType == 931 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || 932 tm_request->TaskType == 933 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { 934 if (_ctl_set_task_mid(ioc, &karg, tm_request)) { 935 mpt3sas_base_free_smid(ioc, smid); 936 ioc->got_task_abort_from_ioctl = 0; 937 goto out; 938 } 939 } 940 ioc->got_task_abort_from_ioctl = 0; 941 942 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 943 dtmprintk(ioc, 944 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", 945 device_handle)); 946 mpt3sas_base_free_smid(ioc, smid); 947 ret = -EINVAL; 948 goto out; 949 } 950 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( 951 tm_request->DevHandle)); 952 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 953 data_in_dma, data_in_sz); 954 ioc->put_smid_hi_priority(ioc, smid, 0); 955 break; 956 } 957 case MPI2_FUNCTION_SMP_PASSTHROUGH: 958 { 959 Mpi2SmpPassthroughRequest_t *smp_request = 960 (Mpi2SmpPassthroughRequest_t *)mpi_request; 961 u8 *data; 962 963 if (!ioc->multipath_on_hba) { 964 /* ioc determines which port to use */ 965 smp_request->PhysicalPort = 0xFF; 966 } 967 if (smp_request->PassthroughFlags & 968 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) 969 data = (u8 *)&smp_request->SGL; 970 else { 971 if (unlikely(data_out == NULL)) { 972 pr_err("failure at %s:%d/%s()!\n", 973 __FILE__, __LINE__, __func__); 974 mpt3sas_base_free_smid(ioc, smid); 975 ret = -EINVAL; 976 goto out; 977 } 978 data = data_out; 979 } 980 981 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { 982 ioc->ioc_link_reset_in_progress = 1; 983 ioc->ignore_loginfos = 1; 984 } 985 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 986 data_in_sz); 987 ioc->put_smid_default(ioc, smid); 988 break; 989 } 990 case MPI2_FUNCTION_SATA_PASSTHROUGH: 991 { 992 if (test_bit(device_handle, ioc->device_remove_in_progress)) { 993 dtmprintk(ioc, 994 ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n", 995 device_handle)); 996 mpt3sas_base_free_smid(ioc, smid); 997 ret = -EINVAL; 998 goto out; 999 } 1000 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 1001 data_in_sz); 1002 ioc->put_smid_default(ioc, smid); 1003 break; 1004 } 1005 case MPI2_FUNCTION_FW_DOWNLOAD: 1006 { 1007 if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) { 1008 ioc_info(ioc, "Firmware download not supported for ATTO HBA.\n"); 1009 ret = -EPERM; 1010 break; 1011 } 1012 fallthrough; 1013 } 1014 case MPI2_FUNCTION_FW_UPLOAD: 1015 { 1016 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, 1017 data_in_sz); 1018 ioc->put_smid_default(ioc, smid); 1019 break; 1020 } 1021 case MPI2_FUNCTION_TOOLBOX: 1022 { 1023 Mpi2ToolboxCleanRequest_t *toolbox_request = 1024 (Mpi2ToolboxCleanRequest_t *)mpi_request; 1025 1026 if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) 1027 || (toolbox_request->Tool == 1028 MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN)) 1029 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, 1030 data_in_dma, data_in_sz); 1031 else if (toolbox_request->Tool == 1032 MPI2_TOOLBOX_MEMORY_MOVE_TOOL) { 1033 Mpi2ToolboxMemMoveRequest_t *mem_move_request = 1034 (Mpi2ToolboxMemMoveRequest_t *)request; 1035 Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL; 1036 1037 ioc->build_sg_mpi(ioc, psge, data_out_dma, 1038 data_out_sz, data_in_dma, data_in_sz); 1039 if (data_out_sz && !data_in_sz) { 1040 dst = 1041 (Mpi2SGESimple64_t *)&mem_move_request->SGL; 1042 src = (void *)dst + ioc->sge_size; 1043 1044 memcpy(&tmp, src, ioc->sge_size); 1045 memcpy(src, dst, ioc->sge_size); 1046 memcpy(dst, &tmp, ioc->sge_size); 1047 } 1048 if (ioc->logging_level & MPT_DEBUG_TM) { 1049 ioc_info(ioc, 1050 "Mpi2ToolboxMemMoveRequest_t request msg\n"); 1051 _debug_dump_mf(mem_move_request, 1052 ioc->request_sz/4); 1053 } 1054 } else 1055 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 1056 data_in_dma, data_in_sz); 1057 ioc->put_smid_default(ioc, smid); 1058 break; 1059 } 1060 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: 1061 { 1062 Mpi2SasIoUnitControlRequest_t *sasiounit_request = 1063 (Mpi2SasIoUnitControlRequest_t *)mpi_request; 1064 1065 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET 1066 || sasiounit_request->Operation == 1067 MPI2_SAS_OP_PHY_LINK_RESET) { 1068 ioc->ioc_link_reset_in_progress = 1; 1069 ioc->ignore_loginfos = 1; 1070 } 1071 /* drop to default case for posting the request */ 1072 } 1073 fallthrough; 1074 default: 1075 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, 1076 data_in_dma, data_in_sz); 1077 ioc->put_smid_default(ioc, smid); 1078 break; 1079 } 1080 1081 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) 1082 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; 1083 else 1084 timeout = karg.timeout; 1085 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); 1086 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 1087 Mpi2SCSITaskManagementRequest_t *tm_request = 1088 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 1089 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( 1090 tm_request->DevHandle)); 1091 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); 1092 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || 1093 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && 1094 ioc->ioc_link_reset_in_progress) { 1095 ioc->ioc_link_reset_in_progress = 0; 1096 ioc->ignore_loginfos = 0; 1097 } 1098 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1099 mpt3sas_check_cmd_timeout(ioc, 1100 ioc->ctl_cmds.status, mpi_request, 1101 karg.data_sge_offset, issue_reset); 1102 goto issue_host_reset; 1103 } 1104 1105 mpi_reply = ioc->ctl_cmds.reply; 1106 1107 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && 1108 (ioc->logging_level & MPT_DEBUG_TM)) { 1109 Mpi2SCSITaskManagementReply_t *tm_reply = 1110 (Mpi2SCSITaskManagementReply_t *)mpi_reply; 1111 1112 ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n", 1113 le16_to_cpu(tm_reply->IOCStatus), 1114 le32_to_cpu(tm_reply->IOCLogInfo), 1115 le32_to_cpu(tm_reply->TerminationCount)); 1116 } 1117 1118 /* copy out xdata to user */ 1119 if (data_in_sz) { 1120 if (copy_to_user(karg.data_in_buf_ptr, data_in, 1121 data_in_sz)) { 1122 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1123 __LINE__, __func__); 1124 ret = -ENODATA; 1125 goto out; 1126 } 1127 } 1128 1129 /* copy out reply message frame to user */ 1130 if (karg.max_reply_bytes) { 1131 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); 1132 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, 1133 sz)) { 1134 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1135 __LINE__, __func__); 1136 ret = -ENODATA; 1137 goto out; 1138 } 1139 } 1140 1141 /* copy out sense/NVMe Error Response to user */ 1142 if (karg.max_sense_bytes && (mpi_request->Function == 1143 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == 1144 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function == 1145 MPI2_FUNCTION_NVME_ENCAPSULATED)) { 1146 if (karg.sense_data_ptr == NULL) { 1147 ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n"); 1148 goto out; 1149 } 1150 sz_arg = (mpi_request->Function == 1151 MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE : 1152 SCSI_SENSE_BUFFERSIZE; 1153 sz = min_t(u32, karg.max_sense_bytes, sz_arg); 1154 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, 1155 sz)) { 1156 pr_err("failure at %s:%d/%s()!\n", __FILE__, 1157 __LINE__, __func__); 1158 ret = -ENODATA; 1159 goto out; 1160 } 1161 } 1162 1163 issue_host_reset: 1164 if (issue_reset) { 1165 ret = -ENODATA; 1166 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || 1167 mpi_request->Function == 1168 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || 1169 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { 1170 ioc_info(ioc, "issue target reset: handle = (0x%04x)\n", 1171 le16_to_cpu(mpi_request->FunctionDependent1)); 1172 mpt3sas_halt_firmware(ioc); 1173 pcie_device = mpt3sas_get_pdev_by_handle(ioc, 1174 le16_to_cpu(mpi_request->FunctionDependent1)); 1175 if (pcie_device && (!ioc->tm_custom_handling) && 1176 (!(mpt3sas_scsih_is_pcie_scsi_device( 1177 pcie_device->device_info)))) 1178 tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, 1179 le16_to_cpu(mpi_request->FunctionDependent1), 1180 0, 0, 0, 1181 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 1182 0, pcie_device->reset_timeout, 1183 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); 1184 else 1185 tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, 1186 le16_to_cpu(mpi_request->FunctionDependent1), 1187 0, 0, 0, 1188 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 1189 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); 1190 1191 if (tm_ret != SUCCESS) { 1192 ioc_info(ioc, 1193 "target reset failed, issue hard reset: handle (0x%04x)\n", 1194 le16_to_cpu(mpi_request->FunctionDependent1)); 1195 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1196 } 1197 } else 1198 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1199 } 1200 1201 out: 1202 if (pcie_device) 1203 pcie_device_put(pcie_device); 1204 1205 /* free memory associated with sg buffers */ 1206 if (data_in) 1207 dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, 1208 data_in_dma); 1209 1210 if (data_out) 1211 dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, 1212 data_out_dma); 1213 1214 kfree(mpi_request); 1215 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1216 return ret; 1217 } 1218 1219 /** 1220 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode 1221 * @ioc: per adapter object 1222 * @arg: user space buffer containing ioctl content 1223 */ 1224 static long 1225 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1226 { 1227 struct mpt3_ioctl_iocinfo karg; 1228 1229 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", 1230 __func__)); 1231 1232 memset(&karg, 0 , sizeof(karg)); 1233 if (ioc->pfacts) 1234 karg.port_number = ioc->pfacts[0].PortNumber; 1235 karg.hw_rev = ioc->pdev->revision; 1236 karg.pci_id = ioc->pdev->device; 1237 karg.subsystem_device = ioc->pdev->subsystem_device; 1238 karg.subsystem_vendor = ioc->pdev->subsystem_vendor; 1239 karg.pci_information.u.bits.bus = ioc->pdev->bus->number; 1240 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); 1241 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); 1242 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); 1243 karg.firmware_version = ioc->facts.FWVersion.Word; 1244 strcpy(karg.driver_version, ioc->driver_name); 1245 strcat(karg.driver_version, "-"); 1246 switch (ioc->hba_mpi_version_belonged) { 1247 case MPI2_VERSION: 1248 if (ioc->is_warpdrive) 1249 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; 1250 else 1251 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; 1252 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); 1253 break; 1254 case MPI25_VERSION: 1255 case MPI26_VERSION: 1256 if (ioc->is_gen35_ioc) 1257 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35; 1258 else 1259 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; 1260 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); 1261 break; 1262 } 1263 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 1264 1265 karg.driver_capability |= MPT3_IOCTL_IOCINFO_DRIVER_CAP_MCTP_PASSTHRU; 1266 1267 if (copy_to_user(arg, &karg, sizeof(karg))) { 1268 pr_err("failure at %s:%d/%s()!\n", 1269 __FILE__, __LINE__, __func__); 1270 return -EFAULT; 1271 } 1272 return 0; 1273 } 1274 1275 /** 1276 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode 1277 * @ioc: per adapter object 1278 * @arg: user space buffer containing ioctl content 1279 */ 1280 static long 1281 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1282 { 1283 struct mpt3_ioctl_eventquery karg; 1284 1285 if (copy_from_user(&karg, arg, sizeof(karg))) { 1286 pr_err("failure at %s:%d/%s()!\n", 1287 __FILE__, __LINE__, __func__); 1288 return -EFAULT; 1289 } 1290 1291 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", 1292 __func__)); 1293 1294 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; 1295 memcpy(karg.event_types, ioc->event_type, 1296 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1297 1298 if (copy_to_user(arg, &karg, sizeof(karg))) { 1299 pr_err("failure at %s:%d/%s()!\n", 1300 __FILE__, __LINE__, __func__); 1301 return -EFAULT; 1302 } 1303 return 0; 1304 } 1305 1306 /** 1307 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode 1308 * @ioc: per adapter object 1309 * @arg: user space buffer containing ioctl content 1310 */ 1311 static long 1312 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1313 { 1314 struct mpt3_ioctl_eventenable karg; 1315 1316 if (copy_from_user(&karg, arg, sizeof(karg))) { 1317 pr_err("failure at %s:%d/%s()!\n", 1318 __FILE__, __LINE__, __func__); 1319 return -EFAULT; 1320 } 1321 1322 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", 1323 __func__)); 1324 1325 memcpy(ioc->event_type, karg.event_types, 1326 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); 1327 mpt3sas_base_validate_event_type(ioc, ioc->event_type); 1328 1329 if (ioc->event_log) 1330 return 0; 1331 /* initialize event_log */ 1332 ioc->event_context = 0; 1333 ioc->aen_event_read_flag = 0; 1334 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, 1335 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); 1336 if (!ioc->event_log) { 1337 pr_err("failure at %s:%d/%s()!\n", 1338 __FILE__, __LINE__, __func__); 1339 return -ENOMEM; 1340 } 1341 return 0; 1342 } 1343 1344 /** 1345 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode 1346 * @ioc: per adapter object 1347 * @arg: user space buffer containing ioctl content 1348 */ 1349 static long 1350 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1351 { 1352 struct mpt3_ioctl_eventreport karg; 1353 u32 number_bytes, max_events, max; 1354 struct mpt3_ioctl_eventreport __user *uarg = arg; 1355 1356 if (copy_from_user(&karg, arg, sizeof(karg))) { 1357 pr_err("failure at %s:%d/%s()!\n", 1358 __FILE__, __LINE__, __func__); 1359 return -EFAULT; 1360 } 1361 1362 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", 1363 __func__)); 1364 1365 number_bytes = karg.hdr.max_data_size - 1366 sizeof(struct mpt3_ioctl_header); 1367 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); 1368 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); 1369 1370 /* If fewer than 1 event is requested, there must have 1371 * been some type of error. 1372 */ 1373 if (!max || !ioc->event_log) 1374 return -ENODATA; 1375 1376 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); 1377 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { 1378 pr_err("failure at %s:%d/%s()!\n", 1379 __FILE__, __LINE__, __func__); 1380 return -EFAULT; 1381 } 1382 1383 /* reset flag so SIGIO can restart */ 1384 ioc->aen_event_read_flag = 0; 1385 return 0; 1386 } 1387 1388 /** 1389 * _ctl_do_reset - main handler for MPT3HARDRESET opcode 1390 * @ioc: per adapter object 1391 * @arg: user space buffer containing ioctl content 1392 */ 1393 static long 1394 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1395 { 1396 struct mpt3_ioctl_diag_reset karg; 1397 int retval; 1398 1399 if (copy_from_user(&karg, arg, sizeof(karg))) { 1400 pr_err("failure at %s:%d/%s()!\n", 1401 __FILE__, __LINE__, __func__); 1402 return -EFAULT; 1403 } 1404 1405 if (ioc->shost_recovery || ioc->pci_error_recovery || 1406 ioc->is_driver_loading) 1407 return -EAGAIN; 1408 1409 dctlprintk(ioc, ioc_info(ioc, "%s: enter\n", 1410 __func__)); 1411 1412 ioc->reset_from_user = 1; 1413 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1414 ioc_info(ioc, 1415 "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED")); 1416 return 0; 1417 } 1418 1419 /** 1420 * _ctl_btdh_search_sas_device - searching for sas device 1421 * @ioc: per adapter object 1422 * @btdh: btdh ioctl payload 1423 */ 1424 static int 1425 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, 1426 struct mpt3_ioctl_btdh_mapping *btdh) 1427 { 1428 struct _sas_device *sas_device; 1429 unsigned long flags; 1430 int rc = 0; 1431 1432 if (list_empty(&ioc->sas_device_list)) 1433 return rc; 1434 1435 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1436 list_for_each_entry(sas_device, &ioc->sas_device_list, list) { 1437 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1438 btdh->handle == sas_device->handle) { 1439 btdh->bus = sas_device->channel; 1440 btdh->id = sas_device->id; 1441 rc = 1; 1442 goto out; 1443 } else if (btdh->bus == sas_device->channel && btdh->id == 1444 sas_device->id && btdh->handle == 0xFFFF) { 1445 btdh->handle = sas_device->handle; 1446 rc = 1; 1447 goto out; 1448 } 1449 } 1450 out: 1451 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 1452 return rc; 1453 } 1454 1455 /** 1456 * _ctl_btdh_search_pcie_device - searching for pcie device 1457 * @ioc: per adapter object 1458 * @btdh: btdh ioctl payload 1459 */ 1460 static int 1461 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc, 1462 struct mpt3_ioctl_btdh_mapping *btdh) 1463 { 1464 struct _pcie_device *pcie_device; 1465 unsigned long flags; 1466 int rc = 0; 1467 1468 if (list_empty(&ioc->pcie_device_list)) 1469 return rc; 1470 1471 spin_lock_irqsave(&ioc->pcie_device_lock, flags); 1472 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { 1473 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1474 btdh->handle == pcie_device->handle) { 1475 btdh->bus = pcie_device->channel; 1476 btdh->id = pcie_device->id; 1477 rc = 1; 1478 goto out; 1479 } else if (btdh->bus == pcie_device->channel && btdh->id == 1480 pcie_device->id && btdh->handle == 0xFFFF) { 1481 btdh->handle = pcie_device->handle; 1482 rc = 1; 1483 goto out; 1484 } 1485 } 1486 out: 1487 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); 1488 return rc; 1489 } 1490 1491 /** 1492 * _ctl_btdh_search_raid_device - searching for raid device 1493 * @ioc: per adapter object 1494 * @btdh: btdh ioctl payload 1495 */ 1496 static int 1497 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, 1498 struct mpt3_ioctl_btdh_mapping *btdh) 1499 { 1500 struct _raid_device *raid_device; 1501 unsigned long flags; 1502 int rc = 0; 1503 1504 if (list_empty(&ioc->raid_device_list)) 1505 return rc; 1506 1507 spin_lock_irqsave(&ioc->raid_device_lock, flags); 1508 list_for_each_entry(raid_device, &ioc->raid_device_list, list) { 1509 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && 1510 btdh->handle == raid_device->handle) { 1511 btdh->bus = raid_device->channel; 1512 btdh->id = raid_device->id; 1513 rc = 1; 1514 goto out; 1515 } else if (btdh->bus == raid_device->channel && btdh->id == 1516 raid_device->id && btdh->handle == 0xFFFF) { 1517 btdh->handle = raid_device->handle; 1518 rc = 1; 1519 goto out; 1520 } 1521 } 1522 out: 1523 spin_unlock_irqrestore(&ioc->raid_device_lock, flags); 1524 return rc; 1525 } 1526 1527 /** 1528 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode 1529 * @ioc: per adapter object 1530 * @arg: user space buffer containing ioctl content 1531 */ 1532 static long 1533 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1534 { 1535 struct mpt3_ioctl_btdh_mapping karg; 1536 int rc; 1537 1538 if (copy_from_user(&karg, arg, sizeof(karg))) { 1539 pr_err("failure at %s:%d/%s()!\n", 1540 __FILE__, __LINE__, __func__); 1541 return -EFAULT; 1542 } 1543 1544 dctlprintk(ioc, ioc_info(ioc, "%s\n", 1545 __func__)); 1546 1547 rc = _ctl_btdh_search_sas_device(ioc, &karg); 1548 if (!rc) 1549 rc = _ctl_btdh_search_pcie_device(ioc, &karg); 1550 if (!rc) 1551 _ctl_btdh_search_raid_device(ioc, &karg); 1552 1553 if (copy_to_user(arg, &karg, sizeof(karg))) { 1554 pr_err("failure at %s:%d/%s()!\n", 1555 __FILE__, __LINE__, __func__); 1556 return -EFAULT; 1557 } 1558 return 0; 1559 } 1560 1561 /** 1562 * _ctl_diag_capability - return diag buffer capability 1563 * @ioc: per adapter object 1564 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED 1565 * 1566 * returns 1 when diag buffer support is enabled in firmware 1567 */ 1568 static u8 1569 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) 1570 { 1571 u8 rc = 0; 1572 1573 switch (buffer_type) { 1574 case MPI2_DIAG_BUF_TYPE_TRACE: 1575 if (ioc->facts.IOCCapabilities & 1576 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1577 rc = 1; 1578 break; 1579 case MPI2_DIAG_BUF_TYPE_SNAPSHOT: 1580 if (ioc->facts.IOCCapabilities & 1581 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1582 rc = 1; 1583 break; 1584 case MPI2_DIAG_BUF_TYPE_EXTENDED: 1585 if (ioc->facts.IOCCapabilities & 1586 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1587 rc = 1; 1588 } 1589 1590 return rc; 1591 } 1592 1593 /** 1594 * _ctl_diag_get_bufftype - return diag buffer type 1595 * either TRACE, SNAPSHOT, or EXTENDED 1596 * @ioc: per adapter object 1597 * @unique_id: specifies the unique_id for the buffer 1598 * 1599 * returns MPT3_DIAG_UID_NOT_FOUND if the id not found 1600 */ 1601 static u8 1602 _ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id) 1603 { 1604 u8 index; 1605 1606 for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) { 1607 if (ioc->unique_id[index] == unique_id) 1608 return index; 1609 } 1610 1611 return MPT3_DIAG_UID_NOT_FOUND; 1612 } 1613 1614 /** 1615 * _ctl_diag_register_2 - wrapper for registering diag buffer support 1616 * @ioc: per adapter object 1617 * @diag_register: the diag_register struct passed in from user space 1618 * 1619 */ 1620 static long 1621 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, 1622 struct mpt3_diag_register *diag_register) 1623 { 1624 int rc, i; 1625 void *request_data = NULL; 1626 dma_addr_t request_data_dma; 1627 u32 request_data_sz = 0; 1628 Mpi2DiagBufferPostRequest_t *mpi_request; 1629 Mpi2DiagBufferPostReply_t *mpi_reply; 1630 u8 buffer_type; 1631 u16 smid; 1632 u16 ioc_status; 1633 u32 ioc_state; 1634 u8 issue_reset = 0; 1635 1636 dctlprintk(ioc, ioc_info(ioc, "%s\n", 1637 __func__)); 1638 1639 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 1640 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 1641 ioc_err(ioc, "%s: failed due to ioc not operational\n", 1642 __func__); 1643 rc = -EAGAIN; 1644 goto out; 1645 } 1646 1647 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 1648 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); 1649 rc = -EAGAIN; 1650 goto out; 1651 } 1652 1653 buffer_type = diag_register->buffer_type; 1654 if (!_ctl_diag_capability(ioc, buffer_type)) { 1655 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", 1656 __func__, buffer_type); 1657 return -EPERM; 1658 } 1659 1660 if (diag_register->unique_id == 0) { 1661 ioc_err(ioc, 1662 "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__, 1663 diag_register->unique_id, buffer_type); 1664 return -EINVAL; 1665 } 1666 1667 if ((ioc->diag_buffer_status[buffer_type] & 1668 MPT3_DIAG_BUFFER_IS_APP_OWNED) && 1669 !(ioc->diag_buffer_status[buffer_type] & 1670 MPT3_DIAG_BUFFER_IS_RELEASED)) { 1671 ioc_err(ioc, 1672 "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n", 1673 __func__, buffer_type, ioc->unique_id[buffer_type]); 1674 return -EINVAL; 1675 } 1676 1677 if (ioc->diag_buffer_status[buffer_type] & 1678 MPT3_DIAG_BUFFER_IS_REGISTERED) { 1679 /* 1680 * If driver posts buffer initially, then an application wants 1681 * to Register that buffer (own it) without Releasing first, 1682 * the application Register command MUST have the same buffer 1683 * type and size in the Register command (obtained from the 1684 * Query command). Otherwise that Register command will be 1685 * failed. If the application has released the buffer but wants 1686 * to re-register it, it should be allowed as long as the 1687 * Unique-Id/Size match. 1688 */ 1689 1690 if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID && 1691 ioc->diag_buffer_sz[buffer_type] == 1692 diag_register->requested_buffer_size) { 1693 1694 if (!(ioc->diag_buffer_status[buffer_type] & 1695 MPT3_DIAG_BUFFER_IS_RELEASED)) { 1696 dctlprintk(ioc, ioc_info(ioc, 1697 "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n", 1698 __func__, buffer_type, 1699 ioc->unique_id[buffer_type], 1700 diag_register->unique_id)); 1701 1702 /* 1703 * Application wants to own the buffer with 1704 * the same size. 1705 */ 1706 ioc->unique_id[buffer_type] = 1707 diag_register->unique_id; 1708 rc = 0; /* success */ 1709 goto out; 1710 } 1711 } else if (ioc->unique_id[buffer_type] != 1712 MPT3DIAGBUFFUNIQUEID) { 1713 if (ioc->unique_id[buffer_type] != 1714 diag_register->unique_id || 1715 ioc->diag_buffer_sz[buffer_type] != 1716 diag_register->requested_buffer_size || 1717 !(ioc->diag_buffer_status[buffer_type] & 1718 MPT3_DIAG_BUFFER_IS_RELEASED)) { 1719 ioc_err(ioc, 1720 "%s: already has a registered buffer for buffer_type(0x%02x)\n", 1721 __func__, buffer_type); 1722 return -EINVAL; 1723 } 1724 } else { 1725 ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n", 1726 __func__, buffer_type); 1727 return -EINVAL; 1728 } 1729 } else if (ioc->diag_buffer_status[buffer_type] & 1730 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { 1731 1732 if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID || 1733 ioc->diag_buffer_sz[buffer_type] != 1734 diag_register->requested_buffer_size) { 1735 1736 ioc_err(ioc, 1737 "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n", 1738 __func__, buffer_type, 1739 ioc->diag_buffer_sz[buffer_type]); 1740 return -EINVAL; 1741 } 1742 } 1743 1744 if (diag_register->requested_buffer_size % 4) { 1745 ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n", 1746 __func__); 1747 return -EINVAL; 1748 } 1749 1750 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 1751 if (!smid) { 1752 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 1753 rc = -EAGAIN; 1754 goto out; 1755 } 1756 1757 rc = 0; 1758 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 1759 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 1760 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 1761 memset(mpi_request, 0, ioc->request_sz); 1762 ioc->ctl_cmds.smid = smid; 1763 1764 request_data = ioc->diag_buffer[buffer_type]; 1765 request_data_sz = diag_register->requested_buffer_size; 1766 ioc->unique_id[buffer_type] = diag_register->unique_id; 1767 /* Reset ioc variables used for additional query commands */ 1768 ioc->reset_from_user = 0; 1769 memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query)); 1770 ioc->diag_buffer_status[buffer_type] &= 1771 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; 1772 memcpy(ioc->product_specific[buffer_type], 1773 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); 1774 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; 1775 1776 if (request_data) { 1777 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 1778 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { 1779 dma_free_coherent(&ioc->pdev->dev, 1780 ioc->diag_buffer_sz[buffer_type], 1781 request_data, request_data_dma); 1782 request_data = NULL; 1783 } 1784 } 1785 1786 if (request_data == NULL) { 1787 ioc->diag_buffer_sz[buffer_type] = 0; 1788 ioc->diag_buffer_dma[buffer_type] = 0; 1789 request_data = dma_alloc_coherent(&ioc->pdev->dev, 1790 request_data_sz, &request_data_dma, GFP_KERNEL); 1791 if (request_data == NULL) { 1792 ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n", 1793 __func__, request_data_sz); 1794 mpt3sas_base_free_smid(ioc, smid); 1795 rc = -ENOMEM; 1796 goto out; 1797 } 1798 ioc->diag_buffer[buffer_type] = request_data; 1799 ioc->diag_buffer_sz[buffer_type] = request_data_sz; 1800 ioc->diag_buffer_dma[buffer_type] = request_data_dma; 1801 } 1802 1803 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 1804 mpi_request->BufferType = diag_register->buffer_type; 1805 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); 1806 mpi_request->BufferAddress = cpu_to_le64(request_data_dma); 1807 mpi_request->BufferLength = cpu_to_le32(request_data_sz); 1808 mpi_request->VF_ID = 0; /* TODO */ 1809 mpi_request->VP_ID = 0; 1810 1811 dctlprintk(ioc, 1812 ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", 1813 __func__, request_data, 1814 (unsigned long long)request_data_dma, 1815 le32_to_cpu(mpi_request->BufferLength))); 1816 1817 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 1818 mpi_request->ProductSpecific[i] = 1819 cpu_to_le32(ioc->product_specific[buffer_type][i]); 1820 1821 init_completion(&ioc->ctl_cmds.done); 1822 ioc->put_smid_default(ioc, smid); 1823 wait_for_completion_timeout(&ioc->ctl_cmds.done, 1824 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1825 1826 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1827 mpt3sas_check_cmd_timeout(ioc, 1828 ioc->ctl_cmds.status, mpi_request, 1829 sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); 1830 goto issue_host_reset; 1831 } 1832 1833 /* process the completed Reply Message Frame */ 1834 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 1835 ioc_err(ioc, "%s: no reply message\n", __func__); 1836 rc = -EFAULT; 1837 goto out; 1838 } 1839 1840 mpi_reply = ioc->ctl_cmds.reply; 1841 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 1842 1843 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 1844 ioc->diag_buffer_status[buffer_type] |= 1845 MPT3_DIAG_BUFFER_IS_REGISTERED; 1846 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); 1847 } else { 1848 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 1849 __func__, 1850 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 1851 rc = -EFAULT; 1852 } 1853 1854 issue_host_reset: 1855 if (issue_reset) 1856 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 1857 1858 out: 1859 1860 if (rc && request_data) { 1861 dma_free_coherent(&ioc->pdev->dev, request_data_sz, 1862 request_data, request_data_dma); 1863 ioc->diag_buffer[buffer_type] = NULL; 1864 ioc->diag_buffer_status[buffer_type] &= 1865 ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; 1866 } 1867 1868 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 1869 return rc; 1870 } 1871 1872 /** 1873 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time 1874 * @ioc: per adapter object 1875 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 1876 * 1877 * This is called when command line option diag_buffer_enable is enabled 1878 * at driver load time. 1879 */ 1880 void 1881 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) 1882 { 1883 struct mpt3_diag_register diag_register; 1884 u32 ret_val; 1885 u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10; 1886 u32 min_trace_buff_size = 0; 1887 u32 decr_trace_buff_size = 0; 1888 1889 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 1890 1891 if (bits_to_register & 1) { 1892 ioc_info(ioc, "registering trace buffer support\n"); 1893 ioc->diag_trigger_master.MasterData = 1894 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 1895 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 1896 diag_register.unique_id = 1897 (ioc->hba_mpi_version_belonged == MPI2_VERSION) ? 1898 (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID); 1899 1900 if (trace_buff_size != 0) { 1901 diag_register.requested_buffer_size = trace_buff_size; 1902 min_trace_buff_size = 1903 ioc->manu_pg11.HostTraceBufferMinSizeKB<<10; 1904 decr_trace_buff_size = 1905 ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10; 1906 1907 if (min_trace_buff_size > trace_buff_size) { 1908 /* The buff size is not set correctly */ 1909 ioc_err(ioc, 1910 "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n", 1911 min_trace_buff_size>>10, 1912 trace_buff_size>>10); 1913 ioc_err(ioc, 1914 "Using zero Min Trace Buff Size\n"); 1915 min_trace_buff_size = 0; 1916 } 1917 1918 if (decr_trace_buff_size == 0) { 1919 /* 1920 * retry the min size if decrement 1921 * is not available. 1922 */ 1923 decr_trace_buff_size = 1924 trace_buff_size - min_trace_buff_size; 1925 } 1926 } else { 1927 /* register for 2MB buffers */ 1928 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1929 } 1930 1931 do { 1932 ret_val = _ctl_diag_register_2(ioc, &diag_register); 1933 1934 if (ret_val == -ENOMEM && min_trace_buff_size && 1935 (trace_buff_size - decr_trace_buff_size) >= 1936 min_trace_buff_size) { 1937 /* adjust the buffer size */ 1938 trace_buff_size -= decr_trace_buff_size; 1939 diag_register.requested_buffer_size = 1940 trace_buff_size; 1941 } else 1942 break; 1943 } while (true); 1944 1945 if (ret_val == -ENOMEM) 1946 ioc_err(ioc, 1947 "Cannot allocate trace buffer memory. Last memory tried = %d KB\n", 1948 diag_register.requested_buffer_size>>10); 1949 else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] 1950 & MPT3_DIAG_BUFFER_IS_REGISTERED) { 1951 ioc_info(ioc, "Trace buffer memory %d KB allocated\n", 1952 diag_register.requested_buffer_size>>10); 1953 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) 1954 ioc->diag_buffer_status[ 1955 MPI2_DIAG_BUF_TYPE_TRACE] |= 1956 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; 1957 } 1958 } 1959 1960 if (bits_to_register & 2) { 1961 ioc_info(ioc, "registering snapshot buffer support\n"); 1962 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; 1963 /* register for 2MB buffers */ 1964 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1965 diag_register.unique_id = 0x7075901; 1966 _ctl_diag_register_2(ioc, &diag_register); 1967 } 1968 1969 if (bits_to_register & 4) { 1970 ioc_info(ioc, "registering extended buffer support\n"); 1971 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; 1972 /* register for 2MB buffers */ 1973 diag_register.requested_buffer_size = 2 * (1024 * 1024); 1974 diag_register.unique_id = 0x7075901; 1975 _ctl_diag_register_2(ioc, &diag_register); 1976 } 1977 } 1978 1979 /** 1980 * _ctl_diag_register - application register with driver 1981 * @ioc: per adapter object 1982 * @arg: user space buffer containing ioctl content 1983 * 1984 * This will allow the driver to setup any required buffers that will be 1985 * needed by firmware to communicate with the driver. 1986 */ 1987 static long 1988 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 1989 { 1990 struct mpt3_diag_register karg; 1991 long rc; 1992 1993 if (copy_from_user(&karg, arg, sizeof(karg))) { 1994 pr_err("failure at %s:%d/%s()!\n", 1995 __FILE__, __LINE__, __func__); 1996 return -EFAULT; 1997 } 1998 1999 rc = _ctl_diag_register_2(ioc, &karg); 2000 2001 if (!rc && (ioc->diag_buffer_status[karg.buffer_type] & 2002 MPT3_DIAG_BUFFER_IS_REGISTERED)) 2003 ioc->diag_buffer_status[karg.buffer_type] |= 2004 MPT3_DIAG_BUFFER_IS_APP_OWNED; 2005 2006 return rc; 2007 } 2008 2009 /** 2010 * _ctl_diag_unregister - application unregister with driver 2011 * @ioc: per adapter object 2012 * @arg: user space buffer containing ioctl content 2013 * 2014 * This will allow the driver to cleanup any memory allocated for diag 2015 * messages and to free up any resources. 2016 */ 2017 static long 2018 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2019 { 2020 struct mpt3_diag_unregister karg; 2021 void *request_data; 2022 dma_addr_t request_data_dma; 2023 u32 request_data_sz; 2024 u8 buffer_type; 2025 2026 if (copy_from_user(&karg, arg, sizeof(karg))) { 2027 pr_err("failure at %s:%d/%s()!\n", 2028 __FILE__, __LINE__, __func__); 2029 return -EFAULT; 2030 } 2031 2032 dctlprintk(ioc, ioc_info(ioc, "%s\n", 2033 __func__)); 2034 2035 buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); 2036 if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { 2037 ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", 2038 __func__, karg.unique_id); 2039 return -EINVAL; 2040 } 2041 2042 if (!_ctl_diag_capability(ioc, buffer_type)) { 2043 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", 2044 __func__, buffer_type); 2045 return -EPERM; 2046 } 2047 2048 if ((ioc->diag_buffer_status[buffer_type] & 2049 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2050 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", 2051 __func__, buffer_type); 2052 return -EINVAL; 2053 } 2054 if ((ioc->diag_buffer_status[buffer_type] & 2055 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2056 ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n", 2057 __func__, buffer_type); 2058 return -EINVAL; 2059 } 2060 2061 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2062 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", 2063 __func__, karg.unique_id); 2064 return -EINVAL; 2065 } 2066 2067 request_data = ioc->diag_buffer[buffer_type]; 2068 if (!request_data) { 2069 ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 2070 __func__, buffer_type); 2071 return -ENOMEM; 2072 } 2073 2074 if (ioc->diag_buffer_status[buffer_type] & 2075 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { 2076 ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID; 2077 ioc->diag_buffer_status[buffer_type] &= 2078 ~MPT3_DIAG_BUFFER_IS_APP_OWNED; 2079 ioc->diag_buffer_status[buffer_type] &= 2080 ~MPT3_DIAG_BUFFER_IS_REGISTERED; 2081 } else { 2082 request_data_sz = ioc->diag_buffer_sz[buffer_type]; 2083 request_data_dma = ioc->diag_buffer_dma[buffer_type]; 2084 dma_free_coherent(&ioc->pdev->dev, request_data_sz, 2085 request_data, request_data_dma); 2086 ioc->diag_buffer[buffer_type] = NULL; 2087 ioc->diag_buffer_status[buffer_type] = 0; 2088 } 2089 return 0; 2090 } 2091 2092 /** 2093 * _ctl_diag_query - query relevant info associated with diag buffers 2094 * @ioc: per adapter object 2095 * @arg: user space buffer containing ioctl content 2096 * 2097 * The application will send only buffer_type and unique_id. Driver will 2098 * inspect unique_id first, if valid, fill in all the info. If unique_id is 2099 * 0x00, the driver will return info specified by Buffer Type. 2100 */ 2101 static long 2102 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2103 { 2104 struct mpt3_diag_query karg; 2105 void *request_data; 2106 int i; 2107 u8 buffer_type; 2108 2109 if (copy_from_user(&karg, arg, sizeof(karg))) { 2110 pr_err("failure at %s:%d/%s()!\n", 2111 __FILE__, __LINE__, __func__); 2112 return -EFAULT; 2113 } 2114 2115 dctlprintk(ioc, ioc_info(ioc, "%s\n", 2116 __func__)); 2117 2118 karg.application_flags = 0; 2119 buffer_type = karg.buffer_type; 2120 2121 if (!_ctl_diag_capability(ioc, buffer_type)) { 2122 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", 2123 __func__, buffer_type); 2124 return -EPERM; 2125 } 2126 2127 if (!(ioc->diag_buffer_status[buffer_type] & 2128 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) { 2129 if ((ioc->diag_buffer_status[buffer_type] & 2130 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2131 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", 2132 __func__, buffer_type); 2133 return -EINVAL; 2134 } 2135 } 2136 2137 if (karg.unique_id) { 2138 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2139 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", 2140 __func__, karg.unique_id); 2141 return -EINVAL; 2142 } 2143 } 2144 2145 request_data = ioc->diag_buffer[buffer_type]; 2146 if (!request_data) { 2147 ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n", 2148 __func__, buffer_type); 2149 return -ENOMEM; 2150 } 2151 2152 if ((ioc->diag_buffer_status[buffer_type] & 2153 MPT3_DIAG_BUFFER_IS_REGISTERED)) 2154 karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID; 2155 2156 if (!(ioc->diag_buffer_status[buffer_type] & 2157 MPT3_DIAG_BUFFER_IS_RELEASED)) 2158 karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS; 2159 2160 if (!(ioc->diag_buffer_status[buffer_type] & 2161 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) 2162 karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC; 2163 2164 if ((ioc->diag_buffer_status[buffer_type] & 2165 MPT3_DIAG_BUFFER_IS_APP_OWNED)) 2166 karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED; 2167 2168 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 2169 karg.product_specific[i] = 2170 ioc->product_specific[buffer_type][i]; 2171 2172 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; 2173 karg.driver_added_buffer_size = 0; 2174 karg.unique_id = ioc->unique_id[buffer_type]; 2175 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; 2176 2177 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { 2178 ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n", 2179 __func__, arg); 2180 return -EFAULT; 2181 } 2182 return 0; 2183 } 2184 2185 /** 2186 * mpt3sas_send_diag_release - Diag Release Message 2187 * @ioc: per adapter object 2188 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED 2189 * @issue_reset: specifies whether host reset is required. 2190 * 2191 */ 2192 int 2193 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, 2194 u8 *issue_reset) 2195 { 2196 Mpi2DiagReleaseRequest_t *mpi_request; 2197 Mpi2DiagReleaseReply_t *mpi_reply; 2198 u16 smid; 2199 u16 ioc_status; 2200 u32 ioc_state; 2201 int rc; 2202 u8 reset_needed = 0; 2203 2204 dctlprintk(ioc, ioc_info(ioc, "%s\n", 2205 __func__)); 2206 2207 rc = 0; 2208 *issue_reset = 0; 2209 2210 2211 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 2212 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { 2213 if (ioc->diag_buffer_status[buffer_type] & 2214 MPT3_DIAG_BUFFER_IS_REGISTERED) 2215 ioc->diag_buffer_status[buffer_type] |= 2216 MPT3_DIAG_BUFFER_IS_RELEASED; 2217 dctlprintk(ioc, 2218 ioc_info(ioc, "%s: skipping due to FAULT state\n", 2219 __func__)); 2220 rc = -EAGAIN; 2221 goto out; 2222 } 2223 2224 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2225 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); 2226 rc = -EAGAIN; 2227 goto out; 2228 } 2229 2230 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 2231 if (!smid) { 2232 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 2233 rc = -EAGAIN; 2234 goto out; 2235 } 2236 2237 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 2238 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 2239 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2240 memset(mpi_request, 0, ioc->request_sz); 2241 ioc->ctl_cmds.smid = smid; 2242 2243 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; 2244 mpi_request->BufferType = buffer_type; 2245 mpi_request->VF_ID = 0; /* TODO */ 2246 mpi_request->VP_ID = 0; 2247 2248 init_completion(&ioc->ctl_cmds.done); 2249 ioc->put_smid_default(ioc, smid); 2250 wait_for_completion_timeout(&ioc->ctl_cmds.done, 2251 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2252 2253 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 2254 mpt3sas_check_cmd_timeout(ioc, 2255 ioc->ctl_cmds.status, mpi_request, 2256 sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); 2257 *issue_reset = reset_needed; 2258 rc = -EFAULT; 2259 goto out; 2260 } 2261 2262 /* process the completed Reply Message Frame */ 2263 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 2264 ioc_err(ioc, "%s: no reply message\n", __func__); 2265 rc = -EFAULT; 2266 goto out; 2267 } 2268 2269 mpi_reply = ioc->ctl_cmds.reply; 2270 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 2271 2272 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2273 ioc->diag_buffer_status[buffer_type] |= 2274 MPT3_DIAG_BUFFER_IS_RELEASED; 2275 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); 2276 } else { 2277 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2278 __func__, 2279 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); 2280 rc = -EFAULT; 2281 } 2282 2283 out: 2284 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 2285 return rc; 2286 } 2287 2288 /** 2289 * _ctl_diag_release - request to send Diag Release Message to firmware 2290 * @ioc: ? 2291 * @arg: user space buffer containing ioctl content 2292 * 2293 * This allows ownership of the specified buffer to returned to the driver, 2294 * allowing an application to read the buffer without fear that firmware is 2295 * overwriting information in the buffer. 2296 */ 2297 static long 2298 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2299 { 2300 struct mpt3_diag_release karg; 2301 void *request_data; 2302 int rc; 2303 u8 buffer_type; 2304 u8 issue_reset = 0; 2305 2306 if (copy_from_user(&karg, arg, sizeof(karg))) { 2307 pr_err("failure at %s:%d/%s()!\n", 2308 __FILE__, __LINE__, __func__); 2309 return -EFAULT; 2310 } 2311 2312 dctlprintk(ioc, ioc_info(ioc, "%s\n", 2313 __func__)); 2314 2315 buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); 2316 if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { 2317 ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", 2318 __func__, karg.unique_id); 2319 return -EINVAL; 2320 } 2321 2322 if (!_ctl_diag_capability(ioc, buffer_type)) { 2323 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", 2324 __func__, buffer_type); 2325 return -EPERM; 2326 } 2327 2328 if ((ioc->diag_buffer_status[buffer_type] & 2329 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2330 ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", 2331 __func__, buffer_type); 2332 return -EINVAL; 2333 } 2334 2335 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2336 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", 2337 __func__, karg.unique_id); 2338 return -EINVAL; 2339 } 2340 2341 if (ioc->diag_buffer_status[buffer_type] & 2342 MPT3_DIAG_BUFFER_IS_RELEASED) { 2343 ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n", 2344 __func__, buffer_type); 2345 return -EINVAL; 2346 } 2347 2348 request_data = ioc->diag_buffer[buffer_type]; 2349 2350 if (!request_data) { 2351 ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", 2352 __func__, buffer_type); 2353 return -ENOMEM; 2354 } 2355 2356 /* buffers were released by due to host reset */ 2357 if ((ioc->diag_buffer_status[buffer_type] & 2358 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { 2359 ioc->diag_buffer_status[buffer_type] |= 2360 MPT3_DIAG_BUFFER_IS_RELEASED; 2361 ioc->diag_buffer_status[buffer_type] &= 2362 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; 2363 ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n", 2364 __func__, buffer_type); 2365 return 0; 2366 } 2367 2368 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); 2369 2370 if (issue_reset) 2371 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2372 2373 return rc; 2374 } 2375 2376 /** 2377 * _ctl_diag_read_buffer - request for copy of the diag buffer 2378 * @ioc: per adapter object 2379 * @arg: user space buffer containing ioctl content 2380 */ 2381 static long 2382 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2383 { 2384 struct mpt3_diag_read_buffer karg; 2385 struct mpt3_diag_read_buffer __user *uarg = arg; 2386 void *request_data, *diag_data; 2387 Mpi2DiagBufferPostRequest_t *mpi_request; 2388 Mpi2DiagBufferPostReply_t *mpi_reply; 2389 int rc, i; 2390 u8 buffer_type; 2391 unsigned long request_size, copy_size; 2392 u16 smid; 2393 u16 ioc_status; 2394 u8 issue_reset = 0; 2395 2396 if (copy_from_user(&karg, arg, sizeof(karg))) { 2397 pr_err("failure at %s:%d/%s()!\n", 2398 __FILE__, __LINE__, __func__); 2399 return -EFAULT; 2400 } 2401 2402 dctlprintk(ioc, ioc_info(ioc, "%s\n", 2403 __func__)); 2404 2405 buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); 2406 if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { 2407 ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", 2408 __func__, karg.unique_id); 2409 return -EINVAL; 2410 } 2411 2412 if (!_ctl_diag_capability(ioc, buffer_type)) { 2413 ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", 2414 __func__, buffer_type); 2415 return -EPERM; 2416 } 2417 2418 if (karg.unique_id != ioc->unique_id[buffer_type]) { 2419 ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", 2420 __func__, karg.unique_id); 2421 return -EINVAL; 2422 } 2423 2424 request_data = ioc->diag_buffer[buffer_type]; 2425 if (!request_data) { 2426 ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n", 2427 __func__, buffer_type); 2428 return -ENOMEM; 2429 } 2430 2431 request_size = ioc->diag_buffer_sz[buffer_type]; 2432 2433 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { 2434 ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n", 2435 __func__); 2436 return -EINVAL; 2437 } 2438 2439 if (karg.starting_offset > request_size) 2440 return -EINVAL; 2441 2442 diag_data = (void *)(request_data + karg.starting_offset); 2443 dctlprintk(ioc, 2444 ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n", 2445 __func__, diag_data, karg.starting_offset, 2446 karg.bytes_to_read)); 2447 2448 /* Truncate data on requests that are too large */ 2449 if ((diag_data + karg.bytes_to_read < diag_data) || 2450 (diag_data + karg.bytes_to_read > request_data + request_size)) 2451 copy_size = request_size - karg.starting_offset; 2452 else 2453 copy_size = karg.bytes_to_read; 2454 2455 if (copy_to_user((void __user *)uarg->diagnostic_data, 2456 diag_data, copy_size)) { 2457 ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", 2458 __func__, diag_data); 2459 return -EFAULT; 2460 } 2461 2462 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) 2463 return 0; 2464 2465 dctlprintk(ioc, 2466 ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n", 2467 __func__, buffer_type)); 2468 if ((ioc->diag_buffer_status[buffer_type] & 2469 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2470 dctlprintk(ioc, 2471 ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n", 2472 __func__, buffer_type)); 2473 return 0; 2474 } 2475 /* Get a free request frame and save the message context. 2476 */ 2477 2478 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2479 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); 2480 rc = -EAGAIN; 2481 goto out; 2482 } 2483 2484 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); 2485 if (!smid) { 2486 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__); 2487 rc = -EAGAIN; 2488 goto out; 2489 } 2490 2491 rc = 0; 2492 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 2493 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 2494 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); 2495 memset(mpi_request, 0, ioc->request_sz); 2496 ioc->ctl_cmds.smid = smid; 2497 2498 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; 2499 mpi_request->BufferType = buffer_type; 2500 mpi_request->BufferLength = 2501 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); 2502 mpi_request->BufferAddress = 2503 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); 2504 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) 2505 mpi_request->ProductSpecific[i] = 2506 cpu_to_le32(ioc->product_specific[buffer_type][i]); 2507 mpi_request->VF_ID = 0; /* TODO */ 2508 mpi_request->VP_ID = 0; 2509 2510 init_completion(&ioc->ctl_cmds.done); 2511 ioc->put_smid_default(ioc, smid); 2512 wait_for_completion_timeout(&ioc->ctl_cmds.done, 2513 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2514 2515 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 2516 mpt3sas_check_cmd_timeout(ioc, 2517 ioc->ctl_cmds.status, mpi_request, 2518 sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); 2519 goto issue_host_reset; 2520 } 2521 2522 /* process the completed Reply Message Frame */ 2523 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { 2524 ioc_err(ioc, "%s: no reply message\n", __func__); 2525 rc = -EFAULT; 2526 goto out; 2527 } 2528 2529 mpi_reply = ioc->ctl_cmds.reply; 2530 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; 2531 2532 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { 2533 ioc->diag_buffer_status[buffer_type] |= 2534 MPT3_DIAG_BUFFER_IS_REGISTERED; 2535 ioc->diag_buffer_status[buffer_type] &= 2536 ~MPT3_DIAG_BUFFER_IS_RELEASED; 2537 dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); 2538 } else { 2539 ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", 2540 __func__, ioc_status, 2541 le32_to_cpu(mpi_reply->IOCLogInfo)); 2542 rc = -EFAULT; 2543 } 2544 2545 issue_host_reset: 2546 if (issue_reset) 2547 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 2548 2549 out: 2550 2551 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 2552 return rc; 2553 } 2554 2555 /** 2556 * _ctl_addnl_diag_query - query relevant info associated with diag buffers 2557 * @ioc: per adapter object 2558 * @arg: user space buffer containing ioctl content 2559 * 2560 * The application will send only unique_id. Driver will 2561 * inspect unique_id first, if valid, fill the details related to cause 2562 * for diag buffer release. 2563 */ 2564 static long 2565 _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2566 { 2567 struct mpt3_addnl_diag_query karg; 2568 u32 buffer_type = 0; 2569 2570 if (copy_from_user(&karg, arg, sizeof(karg))) { 2571 pr_err("%s: failure at %s:%d/%s()!\n", 2572 ioc->name, __FILE__, __LINE__, __func__); 2573 return -EFAULT; 2574 } 2575 dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__)); 2576 if (karg.unique_id == 0) { 2577 ioc_err(ioc, "%s: unique_id is(0x%08x)\n", 2578 __func__, karg.unique_id); 2579 return -EPERM; 2580 } 2581 buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); 2582 if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { 2583 ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", 2584 __func__, karg.unique_id); 2585 return -EPERM; 2586 } 2587 memset(&karg.rel_query, 0, sizeof(karg.rel_query)); 2588 if ((ioc->diag_buffer_status[buffer_type] & 2589 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 2590 ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n", 2591 __func__, buffer_type); 2592 goto out; 2593 } 2594 if ((ioc->diag_buffer_status[buffer_type] & 2595 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { 2596 ioc_err(ioc, "%s: buffer_type(0x%02x) is not released\n", 2597 __func__, buffer_type); 2598 return -EPERM; 2599 } 2600 memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query)); 2601 out: 2602 if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) { 2603 ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n", 2604 __func__, arg); 2605 return -EFAULT; 2606 } 2607 return 0; 2608 } 2609 2610 /** 2611 * _ctl_enable_diag_sbr_reload - enable sbr reload bit 2612 * @ioc: per adapter object 2613 * @arg: user space buffer containing ioctl content 2614 * 2615 * Enable the SBR reload bit 2616 */ 2617 static int 2618 _ctl_enable_diag_sbr_reload(struct MPT3SAS_ADAPTER *ioc, void __user *arg) 2619 { 2620 u32 ioc_state, host_diagnostic; 2621 2622 if (ioc->shost_recovery || 2623 ioc->pci_error_recovery || ioc->is_driver_loading || 2624 ioc->remove_host) 2625 return -EAGAIN; 2626 2627 ioc_state = mpt3sas_base_get_iocstate(ioc, 1); 2628 2629 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) 2630 return -EFAULT; 2631 2632 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); 2633 2634 if (host_diagnostic & MPI2_DIAG_SBR_RELOAD) 2635 return 0; 2636 2637 if (mutex_trylock(&ioc->hostdiag_unlock_mutex)) { 2638 if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic)) { 2639 mutex_unlock(&ioc->hostdiag_unlock_mutex); 2640 return -EFAULT; 2641 } 2642 } else 2643 return -EAGAIN; 2644 2645 host_diagnostic |= MPI2_DIAG_SBR_RELOAD; 2646 writel(host_diagnostic, &ioc->chip->HostDiagnostic); 2647 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); 2648 mpt3sas_base_lock_host_diagnostic(ioc); 2649 mutex_unlock(&ioc->hostdiag_unlock_mutex); 2650 2651 if (!(host_diagnostic & MPI2_DIAG_SBR_RELOAD)) { 2652 ioc_err(ioc, "%s: Failed to set Diag SBR Reload Bit\n", __func__); 2653 return -EFAULT; 2654 } 2655 2656 ioc_info(ioc, "%s: Successfully set the Diag SBR Reload Bit\n", __func__); 2657 return 0; 2658 } 2659 2660 #ifdef CONFIG_COMPAT 2661 /** 2662 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. 2663 * @ioc: per adapter object 2664 * @cmd: ioctl opcode 2665 * @arg: (struct mpt3_ioctl_command32) 2666 * 2667 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. 2668 */ 2669 static long 2670 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, 2671 void __user *arg) 2672 { 2673 struct mpt3_ioctl_command32 karg32; 2674 struct mpt3_ioctl_command32 __user *uarg; 2675 struct mpt3_ioctl_command karg; 2676 2677 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) 2678 return -EINVAL; 2679 2680 uarg = (struct mpt3_ioctl_command32 __user *) arg; 2681 2682 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { 2683 pr_err("failure at %s:%d/%s()!\n", 2684 __FILE__, __LINE__, __func__); 2685 return -EFAULT; 2686 } 2687 2688 memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); 2689 karg.hdr.ioc_number = karg32.hdr.ioc_number; 2690 karg.hdr.port_number = karg32.hdr.port_number; 2691 karg.hdr.max_data_size = karg32.hdr.max_data_size; 2692 karg.timeout = karg32.timeout; 2693 karg.max_reply_bytes = karg32.max_reply_bytes; 2694 karg.data_in_size = karg32.data_in_size; 2695 karg.data_out_size = karg32.data_out_size; 2696 karg.max_sense_bytes = karg32.max_sense_bytes; 2697 karg.data_sge_offset = karg32.data_sge_offset; 2698 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); 2699 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); 2700 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); 2701 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); 2702 return _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2703 } 2704 #endif 2705 2706 /** 2707 * _ctl_ioctl_main - main ioctl entry point 2708 * @file: (struct file) 2709 * @cmd: ioctl opcode 2710 * @arg: user space data buffer 2711 * @compat: handles 32 bit applications in 64bit os 2712 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & 2713 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. 2714 */ 2715 static long 2716 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, 2717 u8 compat, u16 mpi_version) 2718 { 2719 struct MPT3SAS_ADAPTER *ioc; 2720 struct mpt3_ioctl_header ioctl_header; 2721 enum block_state state; 2722 long ret = -ENOIOCTLCMD; 2723 2724 /* get IOCTL header */ 2725 if (copy_from_user(&ioctl_header, (char __user *)arg, 2726 sizeof(struct mpt3_ioctl_header))) { 2727 pr_err("failure at %s:%d/%s()!\n", 2728 __FILE__, __LINE__, __func__); 2729 return -EFAULT; 2730 } 2731 2732 if (_ctl_verify_adapter(ioctl_header.ioc_number, 2733 &ioc, mpi_version) == -1 || !ioc) 2734 return -ENODEV; 2735 2736 /* pci_access_mutex lock acquired by ioctl path */ 2737 mutex_lock(&ioc->pci_access_mutex); 2738 2739 if (ioc->shost_recovery || ioc->pci_error_recovery || 2740 ioc->is_driver_loading || ioc->remove_host) { 2741 ret = -EAGAIN; 2742 goto out_unlock_pciaccess; 2743 } 2744 2745 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; 2746 if (state == NON_BLOCKING) { 2747 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { 2748 ret = -EAGAIN; 2749 goto out_unlock_pciaccess; 2750 } 2751 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2752 ret = -ERESTARTSYS; 2753 goto out_unlock_pciaccess; 2754 } 2755 2756 2757 switch (cmd) { 2758 case MPT3IOCINFO: 2759 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) 2760 ret = _ctl_getiocinfo(ioc, arg); 2761 break; 2762 #ifdef CONFIG_COMPAT 2763 case MPT3COMMAND32: 2764 #endif 2765 case MPT3COMMAND: 2766 { 2767 struct mpt3_ioctl_command __user *uarg; 2768 struct mpt3_ioctl_command karg; 2769 2770 #ifdef CONFIG_COMPAT 2771 if (compat) { 2772 ret = _ctl_compat_mpt_command(ioc, cmd, arg); 2773 break; 2774 } 2775 #endif 2776 if (copy_from_user(&karg, arg, sizeof(karg))) { 2777 pr_err("failure at %s:%d/%s()!\n", 2778 __FILE__, __LINE__, __func__); 2779 ret = -EFAULT; 2780 break; 2781 } 2782 2783 if (karg.hdr.ioc_number != ioctl_header.ioc_number) { 2784 ret = -EINVAL; 2785 break; 2786 } 2787 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { 2788 uarg = arg; 2789 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); 2790 } 2791 break; 2792 } 2793 case MPT3EVENTQUERY: 2794 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) 2795 ret = _ctl_eventquery(ioc, arg); 2796 break; 2797 case MPT3EVENTENABLE: 2798 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) 2799 ret = _ctl_eventenable(ioc, arg); 2800 break; 2801 case MPT3EVENTREPORT: 2802 ret = _ctl_eventreport(ioc, arg); 2803 break; 2804 case MPT3HARDRESET: 2805 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) 2806 ret = _ctl_do_reset(ioc, arg); 2807 break; 2808 case MPT3BTDHMAPPING: 2809 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) 2810 ret = _ctl_btdh_mapping(ioc, arg); 2811 break; 2812 case MPT3DIAGREGISTER: 2813 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) 2814 ret = _ctl_diag_register(ioc, arg); 2815 break; 2816 case MPT3DIAGUNREGISTER: 2817 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) 2818 ret = _ctl_diag_unregister(ioc, arg); 2819 break; 2820 case MPT3DIAGQUERY: 2821 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) 2822 ret = _ctl_diag_query(ioc, arg); 2823 break; 2824 case MPT3DIAGRELEASE: 2825 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) 2826 ret = _ctl_diag_release(ioc, arg); 2827 break; 2828 case MPT3DIAGREADBUFFER: 2829 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) 2830 ret = _ctl_diag_read_buffer(ioc, arg); 2831 break; 2832 case MPT3ADDNLDIAGQUERY: 2833 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query)) 2834 ret = _ctl_addnl_diag_query(ioc, arg); 2835 break; 2836 case MPT3ENABLEDIAGSBRRELOAD: 2837 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_enable_diag_sbr_reload)) 2838 ret = _ctl_enable_diag_sbr_reload(ioc, arg); 2839 break; 2840 default: 2841 dctlprintk(ioc, 2842 ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n", 2843 cmd)); 2844 break; 2845 } 2846 2847 mutex_unlock(&ioc->ctl_cmds.mutex); 2848 out_unlock_pciaccess: 2849 mutex_unlock(&ioc->pci_access_mutex); 2850 return ret; 2851 } 2852 2853 /** 2854 * _ctl_get_mpt_mctp_passthru_adapter - Traverse the IOC list and return the IOC at 2855 * dev_index positionthat support MCTP passhtru 2856 * @dev_index: position in the mpt3sas_ioc_list to search for 2857 * Return pointer to the IOC on success 2858 * NULL if device not found error 2859 */ 2860 static struct MPT3SAS_ADAPTER * 2861 _ctl_get_mpt_mctp_passthru_adapter(int dev_index) 2862 { 2863 struct MPT3SAS_ADAPTER *ioc = NULL; 2864 int count = 0; 2865 2866 spin_lock(&gioc_lock); 2867 /* Traverse ioc list and return number of IOC that support MCTP passthru */ 2868 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { 2869 if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) { 2870 if (count == dev_index) { 2871 spin_unlock(&gioc_lock); 2872 return 0; 2873 } 2874 } 2875 } 2876 spin_unlock(&gioc_lock); 2877 2878 return NULL; 2879 } 2880 2881 /** 2882 * mpt3sas_get_device_count - Retrieve the count of MCTP passthrough 2883 * capable devices managed by the driver. 2884 * 2885 * Returns number of devices that support MCTP passthrough. 2886 */ 2887 int 2888 mpt3sas_get_device_count(void) 2889 { 2890 int count = 0; 2891 struct MPT3SAS_ADAPTER *ioc = NULL; 2892 2893 spin_lock(&gioc_lock); 2894 /* Traverse ioc list and return number of IOC that support MCTP passthru */ 2895 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) 2896 if (ioc->facts.IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_MCTP_PASSTHRU) 2897 count++; 2898 2899 spin_unlock(&gioc_lock); 2900 2901 return count; 2902 } 2903 EXPORT_SYMBOL(mpt3sas_get_device_count); 2904 2905 /** 2906 * mpt3sas_send_passthru_cmd - Send an MPI MCTP passthrough command to 2907 * firmware 2908 * @command: The MPI MCTP passthrough command to send to firmware 2909 * 2910 * Returns 0 on success, anything else is error. 2911 */ 2912 int mpt3sas_send_mctp_passthru_req(struct mpt3_passthru_command *command) 2913 { 2914 struct MPT3SAS_ADAPTER *ioc; 2915 MPI2RequestHeader_t *mpi_request = NULL, *request; 2916 MPI2DefaultReply_t *mpi_reply; 2917 Mpi26MctpPassthroughRequest_t *mctp_passthru_req; 2918 u16 smid; 2919 unsigned long timeout; 2920 u8 issue_reset = 0; 2921 u32 sz; 2922 void *psge; 2923 void *data_out = NULL; 2924 dma_addr_t data_out_dma = 0; 2925 size_t data_out_sz = 0; 2926 void *data_in = NULL; 2927 dma_addr_t data_in_dma = 0; 2928 size_t data_in_sz = 0; 2929 long ret; 2930 2931 /* Retrieve ioc from dev_index */ 2932 ioc = _ctl_get_mpt_mctp_passthru_adapter(command->dev_index); 2933 if (!ioc) 2934 return -ENODEV; 2935 2936 mutex_lock(&ioc->pci_access_mutex); 2937 if (ioc->shost_recovery || 2938 ioc->pci_error_recovery || ioc->is_driver_loading || 2939 ioc->remove_host) { 2940 ret = -EAGAIN; 2941 goto unlock_pci_access; 2942 } 2943 2944 /* Lock the ctl_cmds mutex to ensure a single ctl cmd is pending */ 2945 if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { 2946 ret = -ERESTARTSYS; 2947 goto unlock_pci_access; 2948 } 2949 2950 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { 2951 ioc_err(ioc, "%s: ctl_cmd in use\n", __func__); 2952 ret = -EAGAIN; 2953 goto unlock_ctl_cmds; 2954 } 2955 2956 ret = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT); 2957 if (ret) 2958 goto unlock_ctl_cmds; 2959 2960 mpi_request = (MPI2RequestHeader_t *)command->mpi_request; 2961 if (mpi_request->Function != MPI2_FUNCTION_MCTP_PASSTHROUGH) { 2962 ioc_err(ioc, "%s: Invalid request received, Function 0x%x\n", 2963 __func__, mpi_request->Function); 2964 ret = -EINVAL; 2965 goto unlock_ctl_cmds; 2966 } 2967 2968 /* Use first reserved smid for passthrough commands */ 2969 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1; 2970 ret = 0; 2971 ioc->ctl_cmds.status = MPT3_CMD_PENDING; 2972 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); 2973 request = mpt3sas_base_get_msg_frame(ioc, smid); 2974 memset(request, 0, ioc->request_sz); 2975 memcpy(request, command->mpi_request, sizeof(Mpi26MctpPassthroughRequest_t)); 2976 ioc->ctl_cmds.smid = smid; 2977 data_out_sz = command->data_out_size; 2978 data_in_sz = command->data_in_size; 2979 2980 /* obtain dma-able memory for data transfer */ 2981 if (data_out_sz) /* WRITE */ { 2982 data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, 2983 &data_out_dma, GFP_ATOMIC); 2984 if (!data_out) { 2985 ret = -ENOMEM; 2986 mpt3sas_base_free_smid(ioc, smid); 2987 goto out; 2988 } 2989 memcpy(data_out, command->data_out_buf_ptr, data_out_sz); 2990 2991 } 2992 2993 if (data_in_sz) /* READ */ { 2994 data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, 2995 &data_in_dma, GFP_ATOMIC); 2996 if (!data_in) { 2997 ret = -ENOMEM; 2998 mpt3sas_base_free_smid(ioc, smid); 2999 goto out; 3000 } 3001 } 3002 3003 psge = &((Mpi26MctpPassthroughRequest_t *)request)->H2DSGL; 3004 3005 init_completion(&ioc->ctl_cmds.done); 3006 3007 mctp_passthru_req = (Mpi26MctpPassthroughRequest_t *)request; 3008 3009 _ctl_send_mctp_passthru_req(ioc, mctp_passthru_req, psge, data_out_dma, 3010 data_out_sz, data_in_dma, data_in_sz, smid); 3011 3012 timeout = command->timeout; 3013 if (timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) 3014 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; 3015 3016 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ); 3017 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 3018 mpt3sas_check_cmd_timeout(ioc, 3019 ioc->ctl_cmds.status, mpi_request, 3020 sizeof(Mpi26MctpPassthroughRequest_t) / 4, issue_reset); 3021 goto issue_host_reset; 3022 } 3023 3024 mpi_reply = ioc->ctl_cmds.reply; 3025 3026 /* copy out xdata to user */ 3027 if (data_in_sz) 3028 memcpy(command->data_in_buf_ptr, data_in, data_in_sz); 3029 3030 /* copy out reply message frame to user */ 3031 if (command->max_reply_bytes) { 3032 sz = min_t(u32, command->max_reply_bytes, ioc->reply_sz); 3033 memcpy(command->reply_frame_buf_ptr, ioc->ctl_cmds.reply, sz); 3034 } 3035 3036 issue_host_reset: 3037 if (issue_reset) { 3038 ret = -ENODATA; 3039 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); 3040 } 3041 3042 out: 3043 /* free memory associated with sg buffers */ 3044 if (data_in) 3045 dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, 3046 data_in_dma); 3047 3048 if (data_out) 3049 dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, 3050 data_out_dma); 3051 3052 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; 3053 3054 unlock_ctl_cmds: 3055 mutex_unlock(&ioc->ctl_cmds.mutex); 3056 3057 unlock_pci_access: 3058 mutex_unlock(&ioc->pci_access_mutex); 3059 return ret; 3060 3061 } 3062 EXPORT_SYMBOL(mpt3sas_send_mctp_passthru_req); 3063 3064 /** 3065 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) 3066 * @file: (struct file) 3067 * @cmd: ioctl opcode 3068 * @arg: ? 3069 */ 3070 static long 3071 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3072 { 3073 long ret; 3074 3075 /* pass MPI25_VERSION | MPI26_VERSION value, 3076 * to indicate that this ioctl cmd 3077 * came from mpt3ctl ioctl device. 3078 */ 3079 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, 3080 MPI25_VERSION | MPI26_VERSION); 3081 return ret; 3082 } 3083 3084 /** 3085 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) 3086 * @file: (struct file) 3087 * @cmd: ioctl opcode 3088 * @arg: ? 3089 */ 3090 static long 3091 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 3092 { 3093 long ret; 3094 3095 /* pass MPI2_VERSION value, to indicate that this ioctl cmd 3096 * came from mpt2ctl ioctl device. 3097 */ 3098 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); 3099 return ret; 3100 } 3101 #ifdef CONFIG_COMPAT 3102 /** 3103 * _ctl_ioctl_compat - main ioctl entry point (compat) 3104 * @file: ? 3105 * @cmd: ? 3106 * @arg: ? 3107 * 3108 * This routine handles 32 bit applications in 64bit os. 3109 */ 3110 static long 3111 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 3112 { 3113 long ret; 3114 3115 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, 3116 MPI25_VERSION | MPI26_VERSION); 3117 return ret; 3118 } 3119 3120 /** 3121 * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat) 3122 * @file: ? 3123 * @cmd: ? 3124 * @arg: ? 3125 * 3126 * This routine handles 32 bit applications in 64bit os. 3127 */ 3128 static long 3129 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 3130 { 3131 long ret; 3132 3133 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); 3134 return ret; 3135 } 3136 #endif 3137 3138 /* scsi host attributes */ 3139 /** 3140 * version_fw_show - firmware version 3141 * @cdev: pointer to embedded class device 3142 * @attr: ? 3143 * @buf: the buffer returned 3144 * 3145 * A sysfs 'read-only' shost attribute. 3146 */ 3147 static ssize_t 3148 version_fw_show(struct device *cdev, struct device_attribute *attr, 3149 char *buf) 3150 { 3151 struct Scsi_Host *shost = class_to_shost(cdev); 3152 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3153 3154 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 3155 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, 3156 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, 3157 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, 3158 ioc->facts.FWVersion.Word & 0x000000FF); 3159 } 3160 static DEVICE_ATTR_RO(version_fw); 3161 3162 /** 3163 * version_bios_show - bios version 3164 * @cdev: pointer to embedded class device 3165 * @attr: ? 3166 * @buf: the buffer returned 3167 * 3168 * A sysfs 'read-only' shost attribute. 3169 */ 3170 static ssize_t 3171 version_bios_show(struct device *cdev, struct device_attribute *attr, 3172 char *buf) 3173 { 3174 struct Scsi_Host *shost = class_to_shost(cdev); 3175 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3176 3177 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); 3178 3179 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", 3180 (version & 0xFF000000) >> 24, 3181 (version & 0x00FF0000) >> 16, 3182 (version & 0x0000FF00) >> 8, 3183 version & 0x000000FF); 3184 } 3185 static DEVICE_ATTR_RO(version_bios); 3186 3187 /** 3188 * version_mpi_show - MPI (message passing interface) version 3189 * @cdev: pointer to embedded class device 3190 * @attr: ? 3191 * @buf: the buffer returned 3192 * 3193 * A sysfs 'read-only' shost attribute. 3194 */ 3195 static ssize_t 3196 version_mpi_show(struct device *cdev, struct device_attribute *attr, 3197 char *buf) 3198 { 3199 struct Scsi_Host *shost = class_to_shost(cdev); 3200 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3201 3202 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", 3203 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); 3204 } 3205 static DEVICE_ATTR_RO(version_mpi); 3206 3207 /** 3208 * version_product_show - product name 3209 * @cdev: pointer to embedded class device 3210 * @attr: ? 3211 * @buf: the buffer returned 3212 * 3213 * A sysfs 'read-only' shost attribute. 3214 */ 3215 static ssize_t 3216 version_product_show(struct device *cdev, struct device_attribute *attr, 3217 char *buf) 3218 { 3219 struct Scsi_Host *shost = class_to_shost(cdev); 3220 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3221 3222 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); 3223 } 3224 static DEVICE_ATTR_RO(version_product); 3225 3226 /** 3227 * version_nvdata_persistent_show - ndvata persistent version 3228 * @cdev: pointer to embedded class device 3229 * @attr: ? 3230 * @buf: the buffer returned 3231 * 3232 * A sysfs 'read-only' shost attribute. 3233 */ 3234 static ssize_t 3235 version_nvdata_persistent_show(struct device *cdev, 3236 struct device_attribute *attr, char *buf) 3237 { 3238 struct Scsi_Host *shost = class_to_shost(cdev); 3239 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3240 3241 return snprintf(buf, PAGE_SIZE, "%08xh\n", 3242 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); 3243 } 3244 static DEVICE_ATTR_RO(version_nvdata_persistent); 3245 3246 /** 3247 * version_nvdata_default_show - nvdata default version 3248 * @cdev: pointer to embedded class device 3249 * @attr: ? 3250 * @buf: the buffer returned 3251 * 3252 * A sysfs 'read-only' shost attribute. 3253 */ 3254 static ssize_t 3255 version_nvdata_default_show(struct device *cdev, struct device_attribute 3256 *attr, char *buf) 3257 { 3258 struct Scsi_Host *shost = class_to_shost(cdev); 3259 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3260 3261 return snprintf(buf, PAGE_SIZE, "%08xh\n", 3262 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); 3263 } 3264 static DEVICE_ATTR_RO(version_nvdata_default); 3265 3266 /** 3267 * board_name_show - board name 3268 * @cdev: pointer to embedded class device 3269 * @attr: ? 3270 * @buf: the buffer returned 3271 * 3272 * A sysfs 'read-only' shost attribute. 3273 */ 3274 static ssize_t 3275 board_name_show(struct device *cdev, struct device_attribute *attr, 3276 char *buf) 3277 { 3278 struct Scsi_Host *shost = class_to_shost(cdev); 3279 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3280 3281 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); 3282 } 3283 static DEVICE_ATTR_RO(board_name); 3284 3285 /** 3286 * board_assembly_show - board assembly name 3287 * @cdev: pointer to embedded class device 3288 * @attr: ? 3289 * @buf: the buffer returned 3290 * 3291 * A sysfs 'read-only' shost attribute. 3292 */ 3293 static ssize_t 3294 board_assembly_show(struct device *cdev, struct device_attribute *attr, 3295 char *buf) 3296 { 3297 struct Scsi_Host *shost = class_to_shost(cdev); 3298 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3299 3300 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); 3301 } 3302 static DEVICE_ATTR_RO(board_assembly); 3303 3304 /** 3305 * board_tracer_show - board tracer number 3306 * @cdev: pointer to embedded class device 3307 * @attr: ? 3308 * @buf: the buffer returned 3309 * 3310 * A sysfs 'read-only' shost attribute. 3311 */ 3312 static ssize_t 3313 board_tracer_show(struct device *cdev, struct device_attribute *attr, 3314 char *buf) 3315 { 3316 struct Scsi_Host *shost = class_to_shost(cdev); 3317 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3318 3319 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); 3320 } 3321 static DEVICE_ATTR_RO(board_tracer); 3322 3323 /** 3324 * io_delay_show - io missing delay 3325 * @cdev: pointer to embedded class device 3326 * @attr: ? 3327 * @buf: the buffer returned 3328 * 3329 * This is for firmware implemention for deboucing device 3330 * removal events. 3331 * 3332 * A sysfs 'read-only' shost attribute. 3333 */ 3334 static ssize_t 3335 io_delay_show(struct device *cdev, struct device_attribute *attr, 3336 char *buf) 3337 { 3338 struct Scsi_Host *shost = class_to_shost(cdev); 3339 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3340 3341 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); 3342 } 3343 static DEVICE_ATTR_RO(io_delay); 3344 3345 /** 3346 * device_delay_show - device missing delay 3347 * @cdev: pointer to embedded class device 3348 * @attr: ? 3349 * @buf: the buffer returned 3350 * 3351 * This is for firmware implemention for deboucing device 3352 * removal events. 3353 * 3354 * A sysfs 'read-only' shost attribute. 3355 */ 3356 static ssize_t 3357 device_delay_show(struct device *cdev, struct device_attribute *attr, 3358 char *buf) 3359 { 3360 struct Scsi_Host *shost = class_to_shost(cdev); 3361 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3362 3363 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); 3364 } 3365 static DEVICE_ATTR_RO(device_delay); 3366 3367 /** 3368 * fw_queue_depth_show - global credits 3369 * @cdev: pointer to embedded class device 3370 * @attr: ? 3371 * @buf: the buffer returned 3372 * 3373 * This is firmware queue depth limit 3374 * 3375 * A sysfs 'read-only' shost attribute. 3376 */ 3377 static ssize_t 3378 fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, 3379 char *buf) 3380 { 3381 struct Scsi_Host *shost = class_to_shost(cdev); 3382 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3383 3384 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); 3385 } 3386 static DEVICE_ATTR_RO(fw_queue_depth); 3387 3388 /** 3389 * host_sas_address_show - sas address 3390 * @cdev: pointer to embedded class device 3391 * @attr: ? 3392 * @buf: the buffer returned 3393 * 3394 * This is the controller sas address 3395 * 3396 * A sysfs 'read-only' shost attribute. 3397 */ 3398 static ssize_t 3399 host_sas_address_show(struct device *cdev, struct device_attribute *attr, 3400 char *buf) 3401 3402 { 3403 struct Scsi_Host *shost = class_to_shost(cdev); 3404 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3405 3406 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 3407 (unsigned long long)ioc->sas_hba.sas_address); 3408 } 3409 static DEVICE_ATTR_RO(host_sas_address); 3410 3411 /** 3412 * logging_level_show - logging level 3413 * @cdev: pointer to embedded class device 3414 * @attr: ? 3415 * @buf: the buffer returned 3416 * 3417 * A sysfs 'read/write' shost attribute. 3418 */ 3419 static ssize_t 3420 logging_level_show(struct device *cdev, struct device_attribute *attr, 3421 char *buf) 3422 { 3423 struct Scsi_Host *shost = class_to_shost(cdev); 3424 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3425 3426 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); 3427 } 3428 static ssize_t 3429 logging_level_store(struct device *cdev, struct device_attribute *attr, 3430 const char *buf, size_t count) 3431 { 3432 struct Scsi_Host *shost = class_to_shost(cdev); 3433 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3434 int val = 0; 3435 3436 if (sscanf(buf, "%x", &val) != 1) 3437 return -EINVAL; 3438 3439 ioc->logging_level = val; 3440 ioc_info(ioc, "logging_level=%08xh\n", 3441 ioc->logging_level); 3442 return strlen(buf); 3443 } 3444 static DEVICE_ATTR_RW(logging_level); 3445 3446 /** 3447 * fwfault_debug_show - show/store fwfault_debug 3448 * @cdev: pointer to embedded class device 3449 * @attr: ? 3450 * @buf: the buffer returned 3451 * 3452 * mpt3sas_fwfault_debug is command line option 3453 * A sysfs 'read/write' shost attribute. 3454 */ 3455 static ssize_t 3456 fwfault_debug_show(struct device *cdev, struct device_attribute *attr, 3457 char *buf) 3458 { 3459 struct Scsi_Host *shost = class_to_shost(cdev); 3460 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3461 3462 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); 3463 } 3464 static ssize_t 3465 fwfault_debug_store(struct device *cdev, struct device_attribute *attr, 3466 const char *buf, size_t count) 3467 { 3468 struct Scsi_Host *shost = class_to_shost(cdev); 3469 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3470 int val = 0; 3471 3472 if (sscanf(buf, "%d", &val) != 1) 3473 return -EINVAL; 3474 3475 ioc->fwfault_debug = val; 3476 ioc_info(ioc, "fwfault_debug=%d\n", 3477 ioc->fwfault_debug); 3478 return strlen(buf); 3479 } 3480 static DEVICE_ATTR_RW(fwfault_debug); 3481 3482 /** 3483 * ioc_reset_count_show - ioc reset count 3484 * @cdev: pointer to embedded class device 3485 * @attr: ? 3486 * @buf: the buffer returned 3487 * 3488 * This is firmware queue depth limit 3489 * 3490 * A sysfs 'read-only' shost attribute. 3491 */ 3492 static ssize_t 3493 ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, 3494 char *buf) 3495 { 3496 struct Scsi_Host *shost = class_to_shost(cdev); 3497 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3498 3499 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); 3500 } 3501 static DEVICE_ATTR_RO(ioc_reset_count); 3502 3503 /** 3504 * reply_queue_count_show - number of reply queues 3505 * @cdev: pointer to embedded class device 3506 * @attr: ? 3507 * @buf: the buffer returned 3508 * 3509 * This is number of reply queues 3510 * 3511 * A sysfs 'read-only' shost attribute. 3512 */ 3513 static ssize_t 3514 reply_queue_count_show(struct device *cdev, 3515 struct device_attribute *attr, char *buf) 3516 { 3517 u8 reply_queue_count; 3518 struct Scsi_Host *shost = class_to_shost(cdev); 3519 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3520 3521 if ((ioc->facts.IOCCapabilities & 3522 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) 3523 reply_queue_count = ioc->reply_queue_count; 3524 else 3525 reply_queue_count = 1; 3526 3527 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); 3528 } 3529 static DEVICE_ATTR_RO(reply_queue_count); 3530 3531 /** 3532 * BRM_status_show - Backup Rail Monitor Status 3533 * @cdev: pointer to embedded class device 3534 * @attr: ? 3535 * @buf: the buffer returned 3536 * 3537 * This is number of reply queues 3538 * 3539 * A sysfs 'read-only' shost attribute. 3540 */ 3541 static ssize_t 3542 BRM_status_show(struct device *cdev, struct device_attribute *attr, 3543 char *buf) 3544 { 3545 struct Scsi_Host *shost = class_to_shost(cdev); 3546 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3547 Mpi2IOUnitPage3_t io_unit_pg3; 3548 Mpi2ConfigReply_t mpi_reply; 3549 u16 backup_rail_monitor_status = 0; 3550 u16 ioc_status; 3551 int sz; 3552 ssize_t rc = 0; 3553 3554 if (!ioc->is_warpdrive) { 3555 ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n", 3556 __func__); 3557 return 0; 3558 } 3559 /* pci_access_mutex lock acquired by sysfs show path */ 3560 mutex_lock(&ioc->pci_access_mutex); 3561 if (ioc->pci_error_recovery || ioc->remove_host) 3562 goto out; 3563 3564 sz = sizeof(io_unit_pg3); 3565 memset(&io_unit_pg3, 0, sz); 3566 3567 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) != 3568 0) { 3569 ioc_err(ioc, "%s: failed reading iounit_pg3\n", 3570 __func__); 3571 rc = -EINVAL; 3572 goto out; 3573 } 3574 3575 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 3576 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { 3577 ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n", 3578 __func__, ioc_status); 3579 rc = -EINVAL; 3580 goto out; 3581 } 3582 3583 if (io_unit_pg3.GPIOCount < 25) { 3584 ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n", 3585 __func__, io_unit_pg3.GPIOCount); 3586 rc = -EINVAL; 3587 goto out; 3588 } 3589 3590 /* BRM status is in bit zero of GPIOVal[24] */ 3591 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]); 3592 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); 3593 3594 out: 3595 mutex_unlock(&ioc->pci_access_mutex); 3596 return rc; 3597 } 3598 static DEVICE_ATTR_RO(BRM_status); 3599 3600 struct DIAG_BUFFER_START { 3601 __le32 Size; 3602 __le32 DiagVersion; 3603 u8 BufferType; 3604 u8 Reserved[3]; 3605 __le32 Reserved1; 3606 __le32 Reserved2; 3607 __le32 Reserved3; 3608 }; 3609 3610 /** 3611 * host_trace_buffer_size_show - host buffer size (trace only) 3612 * @cdev: pointer to embedded class device 3613 * @attr: ? 3614 * @buf: the buffer returned 3615 * 3616 * A sysfs 'read-only' shost attribute. 3617 */ 3618 static ssize_t 3619 host_trace_buffer_size_show(struct device *cdev, 3620 struct device_attribute *attr, char *buf) 3621 { 3622 struct Scsi_Host *shost = class_to_shost(cdev); 3623 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3624 u32 size = 0; 3625 struct DIAG_BUFFER_START *request_data; 3626 3627 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 3628 ioc_err(ioc, "%s: host_trace_buffer is not registered\n", 3629 __func__); 3630 return 0; 3631 } 3632 3633 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3634 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 3635 ioc_err(ioc, "%s: host_trace_buffer is not registered\n", 3636 __func__); 3637 return 0; 3638 } 3639 3640 request_data = (struct DIAG_BUFFER_START *) 3641 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; 3642 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || 3643 le32_to_cpu(request_data->DiagVersion) == 0x01000000 || 3644 le32_to_cpu(request_data->DiagVersion) == 0x01010000) && 3645 le32_to_cpu(request_data->Reserved3) == 0x4742444c) 3646 size = le32_to_cpu(request_data->Size); 3647 3648 ioc->ring_buffer_sz = size; 3649 return snprintf(buf, PAGE_SIZE, "%d\n", size); 3650 } 3651 static DEVICE_ATTR_RO(host_trace_buffer_size); 3652 3653 /** 3654 * host_trace_buffer_show - firmware ring buffer (trace only) 3655 * @cdev: pointer to embedded class device 3656 * @attr: ? 3657 * @buf: the buffer returned 3658 * 3659 * A sysfs 'read/write' shost attribute. 3660 * 3661 * You will only be able to read 4k bytes of ring buffer at a time. 3662 * In order to read beyond 4k bytes, you will have to write out the 3663 * offset to the same attribute, it will move the pointer. 3664 */ 3665 static ssize_t 3666 host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, 3667 char *buf) 3668 { 3669 struct Scsi_Host *shost = class_to_shost(cdev); 3670 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3671 void *request_data; 3672 u32 size; 3673 3674 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { 3675 ioc_err(ioc, "%s: host_trace_buffer is not registered\n", 3676 __func__); 3677 return 0; 3678 } 3679 3680 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3681 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { 3682 ioc_err(ioc, "%s: host_trace_buffer is not registered\n", 3683 __func__); 3684 return 0; 3685 } 3686 3687 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) 3688 return 0; 3689 3690 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; 3691 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; 3692 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; 3693 memcpy(buf, request_data, size); 3694 return size; 3695 } 3696 3697 static ssize_t 3698 host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, 3699 const char *buf, size_t count) 3700 { 3701 struct Scsi_Host *shost = class_to_shost(cdev); 3702 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3703 int val = 0; 3704 3705 if (sscanf(buf, "%d", &val) != 1) 3706 return -EINVAL; 3707 3708 ioc->ring_buffer_offset = val; 3709 return strlen(buf); 3710 } 3711 static DEVICE_ATTR_RW(host_trace_buffer); 3712 3713 3714 /*****************************************/ 3715 3716 /** 3717 * host_trace_buffer_enable_show - firmware ring buffer (trace only) 3718 * @cdev: pointer to embedded class device 3719 * @attr: ? 3720 * @buf: the buffer returned 3721 * 3722 * A sysfs 'read/write' shost attribute. 3723 * 3724 * This is a mechnism to post/release host_trace_buffers 3725 */ 3726 static ssize_t 3727 host_trace_buffer_enable_show(struct device *cdev, 3728 struct device_attribute *attr, char *buf) 3729 { 3730 struct Scsi_Host *shost = class_to_shost(cdev); 3731 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3732 3733 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || 3734 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3735 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) 3736 return snprintf(buf, PAGE_SIZE, "off\n"); 3737 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3738 MPT3_DIAG_BUFFER_IS_RELEASED)) 3739 return snprintf(buf, PAGE_SIZE, "release\n"); 3740 else 3741 return snprintf(buf, PAGE_SIZE, "post\n"); 3742 } 3743 3744 static ssize_t 3745 host_trace_buffer_enable_store(struct device *cdev, 3746 struct device_attribute *attr, const char *buf, size_t count) 3747 { 3748 struct Scsi_Host *shost = class_to_shost(cdev); 3749 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3750 char str[10] = ""; 3751 struct mpt3_diag_register diag_register; 3752 u8 issue_reset = 0; 3753 3754 /* don't allow post/release occurr while recovery is active */ 3755 if (ioc->shost_recovery || ioc->remove_host || 3756 ioc->pci_error_recovery || ioc->is_driver_loading) 3757 return -EBUSY; 3758 3759 if (sscanf(buf, "%9s", str) != 1) 3760 return -EINVAL; 3761 3762 if (!strcmp(str, "post")) { 3763 /* exit out if host buffers are already posted */ 3764 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && 3765 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3766 MPT3_DIAG_BUFFER_IS_REGISTERED) && 3767 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3768 MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) 3769 goto out; 3770 memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); 3771 ioc_info(ioc, "posting host trace buffers\n"); 3772 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; 3773 3774 if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 && 3775 ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) { 3776 /* post the same buffer allocated previously */ 3777 diag_register.requested_buffer_size = 3778 ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE]; 3779 } else { 3780 /* 3781 * Free the diag buffer memory which was previously 3782 * allocated by an application. 3783 */ 3784 if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) 3785 && 3786 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3787 MPT3_DIAG_BUFFER_IS_APP_OWNED)) { 3788 dma_free_coherent(&ioc->pdev->dev, 3789 ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE], 3790 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE], 3791 ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]); 3792 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] = 3793 NULL; 3794 } 3795 3796 diag_register.requested_buffer_size = (1024 * 1024); 3797 } 3798 3799 diag_register.unique_id = 3800 (ioc->hba_mpi_version_belonged == MPI2_VERSION) ? 3801 (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID); 3802 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; 3803 _ctl_diag_register_2(ioc, &diag_register); 3804 if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3805 MPT3_DIAG_BUFFER_IS_REGISTERED) { 3806 ioc_info(ioc, 3807 "Trace buffer %d KB allocated through sysfs\n", 3808 diag_register.requested_buffer_size>>10); 3809 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) 3810 ioc->diag_buffer_status[ 3811 MPI2_DIAG_BUF_TYPE_TRACE] |= 3812 MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; 3813 } 3814 } else if (!strcmp(str, "release")) { 3815 /* exit out if host buffers are already released */ 3816 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) 3817 goto out; 3818 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3819 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) 3820 goto out; 3821 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & 3822 MPT3_DIAG_BUFFER_IS_RELEASED)) 3823 goto out; 3824 ioc_info(ioc, "releasing host trace buffer\n"); 3825 ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS; 3826 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, 3827 &issue_reset); 3828 } 3829 3830 out: 3831 return strlen(buf); 3832 } 3833 static DEVICE_ATTR_RW(host_trace_buffer_enable); 3834 3835 /*********** diagnostic trigger suppport *********************************/ 3836 3837 /** 3838 * diag_trigger_master_show - show the diag_trigger_master attribute 3839 * @cdev: pointer to embedded class device 3840 * @attr: ? 3841 * @buf: the buffer returned 3842 * 3843 * A sysfs 'read/write' shost attribute. 3844 */ 3845 static ssize_t 3846 diag_trigger_master_show(struct device *cdev, 3847 struct device_attribute *attr, char *buf) 3848 3849 { 3850 struct Scsi_Host *shost = class_to_shost(cdev); 3851 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3852 unsigned long flags; 3853 ssize_t rc; 3854 3855 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3856 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); 3857 memcpy(buf, &ioc->diag_trigger_master, rc); 3858 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3859 return rc; 3860 } 3861 3862 /** 3863 * diag_trigger_master_store - store the diag_trigger_master attribute 3864 * @cdev: pointer to embedded class device 3865 * @attr: ? 3866 * @buf: the buffer returned 3867 * @count: ? 3868 * 3869 * A sysfs 'read/write' shost attribute. 3870 */ 3871 static ssize_t 3872 diag_trigger_master_store(struct device *cdev, 3873 struct device_attribute *attr, const char *buf, size_t count) 3874 3875 { 3876 struct Scsi_Host *shost = class_to_shost(cdev); 3877 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3878 struct SL_WH_MASTER_TRIGGER_T *master_tg; 3879 unsigned long flags; 3880 ssize_t rc; 3881 bool set = 1; 3882 3883 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); 3884 3885 if (ioc->supports_trigger_pages) { 3886 master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T), 3887 GFP_KERNEL); 3888 if (!master_tg) 3889 return -ENOMEM; 3890 3891 memcpy(master_tg, buf, rc); 3892 if (!master_tg->MasterData) 3893 set = 0; 3894 if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg, 3895 set)) { 3896 kfree(master_tg); 3897 return -EFAULT; 3898 } 3899 kfree(master_tg); 3900 } 3901 3902 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3903 memset(&ioc->diag_trigger_master, 0, 3904 sizeof(struct SL_WH_MASTER_TRIGGER_T)); 3905 memcpy(&ioc->diag_trigger_master, buf, rc); 3906 ioc->diag_trigger_master.MasterData |= 3907 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); 3908 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3909 return rc; 3910 } 3911 static DEVICE_ATTR_RW(diag_trigger_master); 3912 3913 3914 /** 3915 * diag_trigger_event_show - show the diag_trigger_event attribute 3916 * @cdev: pointer to embedded class device 3917 * @attr: ? 3918 * @buf: the buffer returned 3919 * 3920 * A sysfs 'read/write' shost attribute. 3921 */ 3922 static ssize_t 3923 diag_trigger_event_show(struct device *cdev, 3924 struct device_attribute *attr, char *buf) 3925 { 3926 struct Scsi_Host *shost = class_to_shost(cdev); 3927 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3928 unsigned long flags; 3929 ssize_t rc; 3930 3931 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3932 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); 3933 memcpy(buf, &ioc->diag_trigger_event, rc); 3934 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3935 return rc; 3936 } 3937 3938 /** 3939 * diag_trigger_event_store - store the diag_trigger_event attribute 3940 * @cdev: pointer to embedded class device 3941 * @attr: ? 3942 * @buf: the buffer returned 3943 * @count: ? 3944 * 3945 * A sysfs 'read/write' shost attribute. 3946 */ 3947 static ssize_t 3948 diag_trigger_event_store(struct device *cdev, 3949 struct device_attribute *attr, const char *buf, size_t count) 3950 3951 { 3952 struct Scsi_Host *shost = class_to_shost(cdev); 3953 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 3954 struct SL_WH_EVENT_TRIGGERS_T *event_tg; 3955 unsigned long flags; 3956 ssize_t sz; 3957 bool set = 1; 3958 3959 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); 3960 if (ioc->supports_trigger_pages) { 3961 event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T), 3962 GFP_KERNEL); 3963 if (!event_tg) 3964 return -ENOMEM; 3965 3966 memcpy(event_tg, buf, sz); 3967 if (!event_tg->ValidEntries) 3968 set = 0; 3969 if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg, 3970 set)) { 3971 kfree(event_tg); 3972 return -EFAULT; 3973 } 3974 kfree(event_tg); 3975 } 3976 3977 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 3978 3979 memset(&ioc->diag_trigger_event, 0, 3980 sizeof(struct SL_WH_EVENT_TRIGGERS_T)); 3981 memcpy(&ioc->diag_trigger_event, buf, sz); 3982 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) 3983 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; 3984 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 3985 return sz; 3986 } 3987 static DEVICE_ATTR_RW(diag_trigger_event); 3988 3989 3990 /** 3991 * diag_trigger_scsi_show - show the diag_trigger_scsi attribute 3992 * @cdev: pointer to embedded class device 3993 * @attr: ? 3994 * @buf: the buffer returned 3995 * 3996 * A sysfs 'read/write' shost attribute. 3997 */ 3998 static ssize_t 3999 diag_trigger_scsi_show(struct device *cdev, 4000 struct device_attribute *attr, char *buf) 4001 { 4002 struct Scsi_Host *shost = class_to_shost(cdev); 4003 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4004 unsigned long flags; 4005 ssize_t rc; 4006 4007 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 4008 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); 4009 memcpy(buf, &ioc->diag_trigger_scsi, rc); 4010 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 4011 return rc; 4012 } 4013 4014 /** 4015 * diag_trigger_scsi_store - store the diag_trigger_scsi attribute 4016 * @cdev: pointer to embedded class device 4017 * @attr: ? 4018 * @buf: the buffer returned 4019 * @count: ? 4020 * 4021 * A sysfs 'read/write' shost attribute. 4022 */ 4023 static ssize_t 4024 diag_trigger_scsi_store(struct device *cdev, 4025 struct device_attribute *attr, const char *buf, size_t count) 4026 { 4027 struct Scsi_Host *shost = class_to_shost(cdev); 4028 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4029 struct SL_WH_SCSI_TRIGGERS_T *scsi_tg; 4030 unsigned long flags; 4031 ssize_t sz; 4032 bool set = 1; 4033 4034 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); 4035 if (ioc->supports_trigger_pages) { 4036 scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T), 4037 GFP_KERNEL); 4038 if (!scsi_tg) 4039 return -ENOMEM; 4040 4041 memcpy(scsi_tg, buf, sz); 4042 if (!scsi_tg->ValidEntries) 4043 set = 0; 4044 if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg, 4045 set)) { 4046 kfree(scsi_tg); 4047 return -EFAULT; 4048 } 4049 kfree(scsi_tg); 4050 } 4051 4052 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 4053 4054 memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi)); 4055 memcpy(&ioc->diag_trigger_scsi, buf, sz); 4056 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) 4057 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; 4058 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 4059 return sz; 4060 } 4061 static DEVICE_ATTR_RW(diag_trigger_scsi); 4062 4063 4064 /** 4065 * diag_trigger_mpi_show - show the diag_trigger_mpi attribute 4066 * @cdev: pointer to embedded class device 4067 * @attr: ? 4068 * @buf: the buffer returned 4069 * 4070 * A sysfs 'read/write' shost attribute. 4071 */ 4072 static ssize_t 4073 diag_trigger_mpi_show(struct device *cdev, 4074 struct device_attribute *attr, char *buf) 4075 { 4076 struct Scsi_Host *shost = class_to_shost(cdev); 4077 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4078 unsigned long flags; 4079 ssize_t rc; 4080 4081 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 4082 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); 4083 memcpy(buf, &ioc->diag_trigger_mpi, rc); 4084 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 4085 return rc; 4086 } 4087 4088 /** 4089 * diag_trigger_mpi_store - store the diag_trigger_mpi attribute 4090 * @cdev: pointer to embedded class device 4091 * @attr: ? 4092 * @buf: the buffer returned 4093 * @count: ? 4094 * 4095 * A sysfs 'read/write' shost attribute. 4096 */ 4097 static ssize_t 4098 diag_trigger_mpi_store(struct device *cdev, 4099 struct device_attribute *attr, const char *buf, size_t count) 4100 { 4101 struct Scsi_Host *shost = class_to_shost(cdev); 4102 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4103 struct SL_WH_MPI_TRIGGERS_T *mpi_tg; 4104 unsigned long flags; 4105 ssize_t sz; 4106 bool set = 1; 4107 4108 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); 4109 if (ioc->supports_trigger_pages) { 4110 mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T), 4111 GFP_KERNEL); 4112 if (!mpi_tg) 4113 return -ENOMEM; 4114 4115 memcpy(mpi_tg, buf, sz); 4116 if (!mpi_tg->ValidEntries) 4117 set = 0; 4118 if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg, 4119 set)) { 4120 kfree(mpi_tg); 4121 return -EFAULT; 4122 } 4123 kfree(mpi_tg); 4124 } 4125 4126 spin_lock_irqsave(&ioc->diag_trigger_lock, flags); 4127 memset(&ioc->diag_trigger_mpi, 0, 4128 sizeof(ioc->diag_trigger_mpi)); 4129 memcpy(&ioc->diag_trigger_mpi, buf, sz); 4130 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) 4131 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; 4132 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); 4133 return sz; 4134 } 4135 4136 static DEVICE_ATTR_RW(diag_trigger_mpi); 4137 4138 /*********** diagnostic trigger suppport *** END ****************************/ 4139 4140 /*****************************************/ 4141 4142 /** 4143 * drv_support_bitmap_show - driver supported feature bitmap 4144 * @cdev: pointer to embedded class device 4145 * @attr: unused 4146 * @buf: the buffer returned 4147 * 4148 * A sysfs 'read-only' shost attribute. 4149 */ 4150 static ssize_t 4151 drv_support_bitmap_show(struct device *cdev, 4152 struct device_attribute *attr, char *buf) 4153 { 4154 struct Scsi_Host *shost = class_to_shost(cdev); 4155 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4156 4157 return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap); 4158 } 4159 static DEVICE_ATTR_RO(drv_support_bitmap); 4160 4161 /** 4162 * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled 4163 * @cdev: pointer to embedded class device 4164 * @attr: unused 4165 * @buf: the buffer returned 4166 * 4167 * A sysfs read/write shost attribute. This attribute is used to set the 4168 * targets queue depth to HBA IO queue depth if this attribute is enabled. 4169 */ 4170 static ssize_t 4171 enable_sdev_max_qd_show(struct device *cdev, 4172 struct device_attribute *attr, char *buf) 4173 { 4174 struct Scsi_Host *shost = class_to_shost(cdev); 4175 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4176 4177 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); 4178 } 4179 4180 /** 4181 * enable_sdev_max_qd_store - Enable/disable sdev max qd 4182 * @cdev: pointer to embedded class device 4183 * @attr: unused 4184 * @buf: the buffer returned 4185 * @count: unused 4186 * 4187 * A sysfs read/write shost attribute. This attribute is used to set the 4188 * targets queue depth to HBA IO queue depth if this attribute is enabled. 4189 * If this attribute is disabled then targets will have corresponding default 4190 * queue depth. 4191 */ 4192 static ssize_t 4193 enable_sdev_max_qd_store(struct device *cdev, 4194 struct device_attribute *attr, const char *buf, size_t count) 4195 { 4196 struct Scsi_Host *shost = class_to_shost(cdev); 4197 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4198 struct MPT3SAS_DEVICE *sas_device_priv_data; 4199 struct MPT3SAS_TARGET *sas_target_priv_data; 4200 int val = 0; 4201 struct scsi_device *sdev; 4202 struct _raid_device *raid_device; 4203 int qdepth; 4204 4205 if (kstrtoint(buf, 0, &val) != 0) 4206 return -EINVAL; 4207 4208 switch (val) { 4209 case 0: 4210 ioc->enable_sdev_max_qd = 0; 4211 shost_for_each_device(sdev, ioc->shost) { 4212 sas_device_priv_data = sdev->hostdata; 4213 if (!sas_device_priv_data) 4214 continue; 4215 sas_target_priv_data = sas_device_priv_data->sas_target; 4216 if (!sas_target_priv_data) 4217 continue; 4218 4219 if (sas_target_priv_data->flags & 4220 MPT_TARGET_FLAGS_VOLUME) { 4221 raid_device = 4222 mpt3sas_raid_device_find_by_handle(ioc, 4223 sas_target_priv_data->handle); 4224 4225 switch (raid_device->volume_type) { 4226 case MPI2_RAID_VOL_TYPE_RAID0: 4227 if (raid_device->device_info & 4228 MPI2_SAS_DEVICE_INFO_SSP_TARGET) 4229 qdepth = 4230 MPT3SAS_SAS_QUEUE_DEPTH; 4231 else 4232 qdepth = 4233 MPT3SAS_SATA_QUEUE_DEPTH; 4234 break; 4235 case MPI2_RAID_VOL_TYPE_RAID1E: 4236 case MPI2_RAID_VOL_TYPE_RAID1: 4237 case MPI2_RAID_VOL_TYPE_RAID10: 4238 case MPI2_RAID_VOL_TYPE_UNKNOWN: 4239 default: 4240 qdepth = MPT3SAS_RAID_QUEUE_DEPTH; 4241 } 4242 } else if (sas_target_priv_data->flags & 4243 MPT_TARGET_FLAGS_PCIE_DEVICE) 4244 qdepth = ioc->max_nvme_qd; 4245 else 4246 qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ? 4247 ioc->max_wideport_qd : ioc->max_narrowport_qd; 4248 4249 mpt3sas_scsih_change_queue_depth(sdev, qdepth); 4250 } 4251 break; 4252 case 1: 4253 ioc->enable_sdev_max_qd = 1; 4254 shost_for_each_device(sdev, ioc->shost) 4255 mpt3sas_scsih_change_queue_depth(sdev, 4256 shost->can_queue); 4257 break; 4258 default: 4259 return -EINVAL; 4260 } 4261 4262 return strlen(buf); 4263 } 4264 static DEVICE_ATTR_RW(enable_sdev_max_qd); 4265 4266 static struct attribute *mpt3sas_host_attrs[] = { 4267 &dev_attr_version_fw.attr, 4268 &dev_attr_version_bios.attr, 4269 &dev_attr_version_mpi.attr, 4270 &dev_attr_version_product.attr, 4271 &dev_attr_version_nvdata_persistent.attr, 4272 &dev_attr_version_nvdata_default.attr, 4273 &dev_attr_board_name.attr, 4274 &dev_attr_board_assembly.attr, 4275 &dev_attr_board_tracer.attr, 4276 &dev_attr_io_delay.attr, 4277 &dev_attr_device_delay.attr, 4278 &dev_attr_logging_level.attr, 4279 &dev_attr_fwfault_debug.attr, 4280 &dev_attr_fw_queue_depth.attr, 4281 &dev_attr_host_sas_address.attr, 4282 &dev_attr_ioc_reset_count.attr, 4283 &dev_attr_host_trace_buffer_size.attr, 4284 &dev_attr_host_trace_buffer.attr, 4285 &dev_attr_host_trace_buffer_enable.attr, 4286 &dev_attr_reply_queue_count.attr, 4287 &dev_attr_diag_trigger_master.attr, 4288 &dev_attr_diag_trigger_event.attr, 4289 &dev_attr_diag_trigger_scsi.attr, 4290 &dev_attr_diag_trigger_mpi.attr, 4291 &dev_attr_drv_support_bitmap.attr, 4292 &dev_attr_BRM_status.attr, 4293 &dev_attr_enable_sdev_max_qd.attr, 4294 NULL, 4295 }; 4296 4297 static const struct attribute_group mpt3sas_host_attr_group = { 4298 .attrs = mpt3sas_host_attrs 4299 }; 4300 4301 const struct attribute_group *mpt3sas_host_groups[] = { 4302 &mpt3sas_host_attr_group, 4303 NULL 4304 }; 4305 4306 /* device attributes */ 4307 4308 /** 4309 * sas_address_show - sas address 4310 * @dev: pointer to embedded class device 4311 * @attr: ? 4312 * @buf: the buffer returned 4313 * 4314 * This is the sas address for the target 4315 * 4316 * A sysfs 'read-only' shost attribute. 4317 */ 4318 static ssize_t 4319 sas_address_show(struct device *dev, struct device_attribute *attr, 4320 char *buf) 4321 { 4322 struct scsi_device *sdev = to_scsi_device(dev); 4323 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 4324 4325 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", 4326 (unsigned long long)sas_device_priv_data->sas_target->sas_address); 4327 } 4328 static DEVICE_ATTR_RO(sas_address); 4329 4330 /** 4331 * sas_device_handle_show - device handle 4332 * @dev: pointer to embedded class device 4333 * @attr: ? 4334 * @buf: the buffer returned 4335 * 4336 * This is the firmware assigned device handle 4337 * 4338 * A sysfs 'read-only' shost attribute. 4339 */ 4340 static ssize_t 4341 sas_device_handle_show(struct device *dev, struct device_attribute *attr, 4342 char *buf) 4343 { 4344 struct scsi_device *sdev = to_scsi_device(dev); 4345 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 4346 4347 return snprintf(buf, PAGE_SIZE, "0x%04x\n", 4348 sas_device_priv_data->sas_target->handle); 4349 } 4350 static DEVICE_ATTR_RO(sas_device_handle); 4351 4352 /** 4353 * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority 4354 * @dev: pointer to embedded device 4355 * @attr: sas_ncq_prio_supported attribute descriptor 4356 * @buf: the buffer returned 4357 * 4358 * A sysfs 'read-only' sdev attribute, only works with SATA 4359 */ 4360 static ssize_t 4361 sas_ncq_prio_supported_show(struct device *dev, 4362 struct device_attribute *attr, char *buf) 4363 { 4364 struct scsi_device *sdev = to_scsi_device(dev); 4365 4366 return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev)); 4367 } 4368 static DEVICE_ATTR_RO(sas_ncq_prio_supported); 4369 4370 /** 4371 * sas_ncq_prio_enable_show - send prioritized io commands to device 4372 * @dev: pointer to embedded device 4373 * @attr: ? 4374 * @buf: the buffer returned 4375 * 4376 * A sysfs 'read/write' sdev attribute, only works with SATA 4377 */ 4378 static ssize_t 4379 sas_ncq_prio_enable_show(struct device *dev, 4380 struct device_attribute *attr, char *buf) 4381 { 4382 struct scsi_device *sdev = to_scsi_device(dev); 4383 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 4384 4385 return snprintf(buf, PAGE_SIZE, "%d\n", 4386 sas_device_priv_data->ncq_prio_enable); 4387 } 4388 4389 static ssize_t 4390 sas_ncq_prio_enable_store(struct device *dev, 4391 struct device_attribute *attr, 4392 const char *buf, size_t count) 4393 { 4394 struct scsi_device *sdev = to_scsi_device(dev); 4395 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; 4396 bool ncq_prio_enable = 0; 4397 4398 if (kstrtobool(buf, &ncq_prio_enable)) 4399 return -EINVAL; 4400 4401 if (!sas_ata_ncq_prio_supported(sdev)) 4402 return -EINVAL; 4403 4404 sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; 4405 return strlen(buf); 4406 } 4407 static DEVICE_ATTR_RW(sas_ncq_prio_enable); 4408 4409 static struct attribute *mpt3sas_dev_attrs[] = { 4410 &dev_attr_sas_address.attr, 4411 &dev_attr_sas_device_handle.attr, 4412 &dev_attr_sas_ncq_prio_supported.attr, 4413 &dev_attr_sas_ncq_prio_enable.attr, 4414 NULL, 4415 }; 4416 4417 static const struct attribute_group mpt3sas_dev_attr_group = { 4418 .attrs = mpt3sas_dev_attrs 4419 }; 4420 4421 const struct attribute_group *mpt3sas_dev_groups[] = { 4422 &mpt3sas_dev_attr_group, 4423 NULL 4424 }; 4425 4426 /* file operations table for mpt3ctl device */ 4427 static const struct file_operations ctl_fops = { 4428 .owner = THIS_MODULE, 4429 .unlocked_ioctl = _ctl_ioctl, 4430 .poll = _ctl_poll, 4431 .fasync = _ctl_fasync, 4432 #ifdef CONFIG_COMPAT 4433 .compat_ioctl = _ctl_ioctl_compat, 4434 #endif 4435 }; 4436 4437 /* file operations table for mpt2ctl device */ 4438 static const struct file_operations ctl_gen2_fops = { 4439 .owner = THIS_MODULE, 4440 .unlocked_ioctl = _ctl_mpt2_ioctl, 4441 .poll = _ctl_poll, 4442 .fasync = _ctl_fasync, 4443 #ifdef CONFIG_COMPAT 4444 .compat_ioctl = _ctl_mpt2_ioctl_compat, 4445 #endif 4446 }; 4447 4448 static struct miscdevice ctl_dev = { 4449 .minor = MPT3SAS_MINOR, 4450 .name = MPT3SAS_DEV_NAME, 4451 .fops = &ctl_fops, 4452 }; 4453 4454 static struct miscdevice gen2_ctl_dev = { 4455 .minor = MPT2SAS_MINOR, 4456 .name = MPT2SAS_DEV_NAME, 4457 .fops = &ctl_gen2_fops, 4458 }; 4459 4460 /** 4461 * mpt3sas_ctl_init - main entry point for ctl. 4462 * @hbas_to_enumerate: ? 4463 */ 4464 void 4465 mpt3sas_ctl_init(ushort hbas_to_enumerate) 4466 { 4467 async_queue = NULL; 4468 4469 /* Don't register mpt3ctl ioctl device if 4470 * hbas_to_enumarate is one. 4471 */ 4472 if (hbas_to_enumerate != 1) 4473 if (misc_register(&ctl_dev) < 0) 4474 pr_err("%s can't register misc device [minor=%d]\n", 4475 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); 4476 4477 /* Don't register mpt3ctl ioctl device if 4478 * hbas_to_enumarate is two. 4479 */ 4480 if (hbas_to_enumerate != 2) 4481 if (misc_register(&gen2_ctl_dev) < 0) 4482 pr_err("%s can't register misc device [minor=%d]\n", 4483 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); 4484 4485 init_waitqueue_head(&ctl_poll_wait); 4486 } 4487 4488 /** 4489 * mpt3sas_ctl_release - release dma for ctl 4490 * @ioc: per adapter object 4491 */ 4492 void 4493 mpt3sas_ctl_release(struct MPT3SAS_ADAPTER *ioc) 4494 { 4495 int i; 4496 4497 /* free memory associated to diag buffers */ 4498 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { 4499 if (!ioc->diag_buffer[i]) 4500 continue; 4501 dma_free_coherent(&ioc->pdev->dev, 4502 ioc->diag_buffer_sz[i], 4503 ioc->diag_buffer[i], 4504 ioc->diag_buffer_dma[i]); 4505 ioc->diag_buffer[i] = NULL; 4506 ioc->diag_buffer_status[i] = 0; 4507 } 4508 4509 kfree(ioc->event_log); 4510 } 4511 4512 /** 4513 * mpt3sas_ctl_exit - exit point for ctl 4514 * @hbas_to_enumerate: ? 4515 */ 4516 void 4517 mpt3sas_ctl_exit(ushort hbas_to_enumerate) 4518 { 4519 4520 if (hbas_to_enumerate != 1) 4521 misc_deregister(&ctl_dev); 4522 if (hbas_to_enumerate != 2) 4523 misc_deregister(&gen2_ctl_dev); 4524 } 4525