1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/bsg-lib.h> 12 #include <uapi/scsi/scsi_bsg_mpi3mr.h> 13 14 /** 15 * mpi3mr_bsg_pel_abort - sends PEL abort request 16 * @mrioc: Adapter instance reference 17 * 18 * This function sends PEL abort request to the firmware through 19 * admin request queue. 20 * 21 * Return: 0 on success, -1 on failure 22 */ 23 static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc) 24 { 25 struct mpi3_pel_req_action_abort pel_abort_req; 26 struct mpi3_pel_reply *pel_reply; 27 int retval = 0; 28 u16 pe_log_status; 29 30 if (mrioc->reset_in_progress) { 31 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 32 return -1; 33 } 34 if (mrioc->stop_bsgs) { 35 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 36 return -1; 37 } 38 39 memset(&pel_abort_req, 0, sizeof(pel_abort_req)); 40 mutex_lock(&mrioc->pel_abort_cmd.mutex); 41 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { 42 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 43 mutex_unlock(&mrioc->pel_abort_cmd.mutex); 44 return -1; 45 } 46 mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; 47 mrioc->pel_abort_cmd.is_waiting = 1; 48 mrioc->pel_abort_cmd.callback = NULL; 49 pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT); 50 pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 51 pel_abort_req.action = MPI3_PEL_ACTION_ABORT; 52 pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 53 54 mrioc->pel_abort_requested = 1; 55 init_completion(&mrioc->pel_abort_cmd.done); 56 retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req, 57 sizeof(pel_abort_req), 0); 58 if (retval) { 59 retval = -1; 60 dprint_bsg_err(mrioc, "%s: admin request post failed\n", 61 __func__); 62 mrioc->pel_abort_requested = 0; 63 goto out_unlock; 64 } 65 66 wait_for_completion_timeout(&mrioc->pel_abort_cmd.done, 67 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 68 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { 69 mrioc->pel_abort_cmd.is_waiting = 0; 70 dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); 71 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET)) 72 mpi3mr_soft_reset_handler(mrioc, 73 MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1); 74 retval = -1; 75 goto out_unlock; 76 } 77 if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 78 != MPI3_IOCSTATUS_SUCCESS) { 79 dprint_bsg_err(mrioc, 80 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 81 __func__, (mrioc->pel_abort_cmd.ioc_status & 82 MPI3_IOCSTATUS_STATUS_MASK), 83 mrioc->pel_abort_cmd.ioc_loginfo); 84 retval = -1; 85 goto out_unlock; 86 } 87 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) { 88 pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply; 89 pe_log_status = le16_to_cpu(pel_reply->pe_log_status); 90 if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) { 91 dprint_bsg_err(mrioc, 92 "%s: command failed, pel_status(0x%04x)\n", 93 __func__, pe_log_status); 94 retval = -1; 95 } 96 } 97 98 out_unlock: 99 mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; 100 mutex_unlock(&mrioc->pel_abort_cmd.mutex); 101 return retval; 102 } 103 /** 104 * mpi3mr_bsg_verify_adapter - verify adapter number is valid 105 * @ioc_number: Adapter number 106 * 107 * This function returns the adapter instance pointer of given 108 * adapter number. If adapter number does not match with the 109 * driver's adapter list, driver returns NULL. 110 * 111 * Return: adapter instance reference 112 */ 113 static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number) 114 { 115 struct mpi3mr_ioc *mrioc = NULL; 116 117 spin_lock(&mrioc_list_lock); 118 list_for_each_entry(mrioc, &mrioc_list, list) { 119 if (mrioc->id == ioc_number) { 120 spin_unlock(&mrioc_list_lock); 121 return mrioc; 122 } 123 } 124 spin_unlock(&mrioc_list_lock); 125 return NULL; 126 } 127 128 /** 129 * mpi3mr_enable_logdata - Handler for log data enable 130 * @mrioc: Adapter instance reference 131 * @job: BSG job reference 132 * 133 * This function enables log data caching in the driver if not 134 * already enabled and return the maximum number of log data 135 * entries that can be cached in the driver. 136 * 137 * Return: 0 on success and proper error codes on failure 138 */ 139 static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc, 140 struct bsg_job *job) 141 { 142 struct mpi3mr_logdata_enable logdata_enable; 143 144 if (!mrioc->logdata_buf) { 145 mrioc->logdata_entry_sz = 146 (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4)) 147 + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ; 148 mrioc->logdata_buf_idx = 0; 149 mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES, 150 mrioc->logdata_entry_sz, GFP_KERNEL); 151 152 if (!mrioc->logdata_buf) 153 return -ENOMEM; 154 } 155 156 memset(&logdata_enable, 0, sizeof(logdata_enable)); 157 logdata_enable.max_entries = 158 MPI3MR_BSG_LOGDATA_MAX_ENTRIES; 159 if (job->request_payload.payload_len >= sizeof(logdata_enable)) { 160 sg_copy_from_buffer(job->request_payload.sg_list, 161 job->request_payload.sg_cnt, 162 &logdata_enable, sizeof(logdata_enable)); 163 return 0; 164 } 165 166 return -EINVAL; 167 } 168 /** 169 * mpi3mr_get_logdata - Handler for get log data 170 * @mrioc: Adapter instance reference 171 * @job: BSG job pointer 172 * This function copies the log data entries to the user buffer 173 * when log caching is enabled in the driver. 174 * 175 * Return: 0 on success and proper error codes on failure 176 */ 177 static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc, 178 struct bsg_job *job) 179 { 180 u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz; 181 182 if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz)) 183 return -EINVAL; 184 185 num_entries = job->request_payload.payload_len / entry_sz; 186 if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES) 187 num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES; 188 sz = num_entries * entry_sz; 189 190 if (job->request_payload.payload_len >= sz) { 191 sg_copy_from_buffer(job->request_payload.sg_list, 192 job->request_payload.sg_cnt, 193 mrioc->logdata_buf, sz); 194 return 0; 195 } 196 return -EINVAL; 197 } 198 199 /** 200 * mpi3mr_bsg_pel_enable - Handler for PEL enable driver 201 * @mrioc: Adapter instance reference 202 * @job: BSG job pointer 203 * 204 * This function is the handler for PEL enable driver. 205 * Validates the application given class and locale and if 206 * requires aborts the existing PEL wait request and/or issues 207 * new PEL wait request to the firmware and returns. 208 * 209 * Return: 0 on success and proper error codes on failure. 210 */ 211 static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc, 212 struct bsg_job *job) 213 { 214 long rval = -EINVAL; 215 struct mpi3mr_bsg_out_pel_enable pel_enable; 216 u8 issue_pel_wait; 217 u8 tmp_class; 218 u16 tmp_locale; 219 220 if (job->request_payload.payload_len != sizeof(pel_enable)) { 221 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 222 __func__); 223 return rval; 224 } 225 226 if (mrioc->unrecoverable) { 227 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 228 __func__); 229 return -EFAULT; 230 } 231 232 if (mrioc->reset_in_progress) { 233 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 234 return -EAGAIN; 235 } 236 237 if (mrioc->stop_bsgs) { 238 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 239 return -EAGAIN; 240 } 241 242 sg_copy_to_buffer(job->request_payload.sg_list, 243 job->request_payload.sg_cnt, 244 &pel_enable, sizeof(pel_enable)); 245 246 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { 247 dprint_bsg_err(mrioc, "%s: out of range class %d sent\n", 248 __func__, pel_enable.pel_class); 249 rval = 0; 250 goto out; 251 } 252 if (!mrioc->pel_enabled) 253 issue_pel_wait = 1; 254 else { 255 if ((mrioc->pel_class <= pel_enable.pel_class) && 256 !((mrioc->pel_locale & pel_enable.pel_locale) ^ 257 pel_enable.pel_locale)) { 258 issue_pel_wait = 0; 259 rval = 0; 260 } else { 261 pel_enable.pel_locale |= mrioc->pel_locale; 262 263 if (mrioc->pel_class < pel_enable.pel_class) 264 pel_enable.pel_class = mrioc->pel_class; 265 266 rval = mpi3mr_bsg_pel_abort(mrioc); 267 if (rval) { 268 dprint_bsg_err(mrioc, 269 "%s: pel_abort failed, status(%ld)\n", 270 __func__, rval); 271 goto out; 272 } 273 issue_pel_wait = 1; 274 } 275 } 276 if (issue_pel_wait) { 277 tmp_class = mrioc->pel_class; 278 tmp_locale = mrioc->pel_locale; 279 mrioc->pel_class = pel_enable.pel_class; 280 mrioc->pel_locale = pel_enable.pel_locale; 281 mrioc->pel_enabled = 1; 282 rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL); 283 if (rval) { 284 mrioc->pel_class = tmp_class; 285 mrioc->pel_locale = tmp_locale; 286 mrioc->pel_enabled = 0; 287 dprint_bsg_err(mrioc, 288 "%s: pel get sequence number failed, status(%ld)\n", 289 __func__, rval); 290 } 291 } 292 293 out: 294 return rval; 295 } 296 /** 297 * mpi3mr_get_all_tgt_info - Get all target information 298 * @mrioc: Adapter instance reference 299 * @job: BSG job reference 300 * 301 * This function copies the driver managed target devices device 302 * handle, persistent ID, bus ID and taret ID to the user 303 * provided buffer for the specific controller. This function 304 * also provides the number of devices managed by the driver for 305 * the specific controller. 306 * 307 * Return: 0 on success and proper error codes on failure 308 */ 309 static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc, 310 struct bsg_job *job) 311 { 312 u16 num_devices = 0, i = 0, size; 313 unsigned long flags; 314 struct mpi3mr_tgt_dev *tgtdev; 315 struct mpi3mr_device_map_info *devmap_info = NULL; 316 struct mpi3mr_all_tgt_info *alltgt_info = NULL; 317 uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0; 318 319 if (job->request_payload.payload_len < sizeof(u32)) { 320 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 321 __func__); 322 return -EINVAL; 323 } 324 325 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 326 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 327 num_devices++; 328 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 329 330 if ((job->request_payload.payload_len <= sizeof(u64)) || 331 list_empty(&mrioc->tgtdev_list)) { 332 sg_copy_from_buffer(job->request_payload.sg_list, 333 job->request_payload.sg_cnt, 334 &num_devices, sizeof(num_devices)); 335 return 0; 336 } 337 338 kern_entrylen = num_devices * sizeof(*devmap_info); 339 size = sizeof(u64) + kern_entrylen; 340 alltgt_info = kzalloc(size, GFP_KERNEL); 341 if (!alltgt_info) 342 return -ENOMEM; 343 344 devmap_info = alltgt_info->dmi; 345 memset((u8 *)devmap_info, 0xFF, kern_entrylen); 346 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 347 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 348 if (i < num_devices) { 349 devmap_info[i].handle = tgtdev->dev_handle; 350 devmap_info[i].perst_id = tgtdev->perst_id; 351 if (tgtdev->host_exposed && tgtdev->starget) { 352 devmap_info[i].target_id = tgtdev->starget->id; 353 devmap_info[i].bus_id = 354 tgtdev->starget->channel; 355 } 356 i++; 357 } 358 } 359 num_devices = i; 360 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 361 362 alltgt_info->num_devices = num_devices; 363 364 usr_entrylen = (job->request_payload.payload_len - sizeof(u64)) / 365 sizeof(*devmap_info); 366 usr_entrylen *= sizeof(*devmap_info); 367 min_entrylen = min(usr_entrylen, kern_entrylen); 368 369 sg_copy_from_buffer(job->request_payload.sg_list, 370 job->request_payload.sg_cnt, 371 alltgt_info, (min_entrylen + sizeof(u64))); 372 kfree(alltgt_info); 373 return 0; 374 } 375 /** 376 * mpi3mr_get_change_count - Get topology change count 377 * @mrioc: Adapter instance reference 378 * @job: BSG job reference 379 * 380 * This function copies the toplogy change count provided by the 381 * driver in events and cached in the driver to the user 382 * provided buffer for the specific controller. 383 * 384 * Return: 0 on success and proper error codes on failure 385 */ 386 static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc, 387 struct bsg_job *job) 388 { 389 struct mpi3mr_change_count chgcnt; 390 391 memset(&chgcnt, 0, sizeof(chgcnt)); 392 chgcnt.change_count = mrioc->change_count; 393 if (job->request_payload.payload_len >= sizeof(chgcnt)) { 394 sg_copy_from_buffer(job->request_payload.sg_list, 395 job->request_payload.sg_cnt, 396 &chgcnt, sizeof(chgcnt)); 397 return 0; 398 } 399 return -EINVAL; 400 } 401 402 /** 403 * mpi3mr_bsg_adp_reset - Issue controller reset 404 * @mrioc: Adapter instance reference 405 * @job: BSG job reference 406 * 407 * This function identifies the user provided reset type and 408 * issues approporiate reset to the controller and wait for that 409 * to complete and reinitialize the controller and then returns 410 * 411 * Return: 0 on success and proper error codes on failure 412 */ 413 static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc, 414 struct bsg_job *job) 415 { 416 long rval = -EINVAL; 417 u8 save_snapdump; 418 struct mpi3mr_bsg_adp_reset adpreset; 419 420 if (job->request_payload.payload_len != 421 sizeof(adpreset)) { 422 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 423 __func__); 424 goto out; 425 } 426 427 sg_copy_to_buffer(job->request_payload.sg_list, 428 job->request_payload.sg_cnt, 429 &adpreset, sizeof(adpreset)); 430 431 switch (adpreset.reset_type) { 432 case MPI3MR_BSG_ADPRESET_SOFT: 433 save_snapdump = 0; 434 break; 435 case MPI3MR_BSG_ADPRESET_DIAG_FAULT: 436 save_snapdump = 1; 437 break; 438 default: 439 dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n", 440 __func__, adpreset.reset_type); 441 goto out; 442 } 443 444 rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP, 445 save_snapdump); 446 447 if (rval) 448 dprint_bsg_err(mrioc, 449 "%s: reset handler returned error(%ld) for reset type %d\n", 450 __func__, rval, adpreset.reset_type); 451 out: 452 return rval; 453 } 454 455 /** 456 * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler 457 * @mrioc: Adapter instance reference 458 * @job: BSG job reference 459 * 460 * This function provides adapter information for the given 461 * controller 462 * 463 * Return: 0 on success and proper error codes on failure 464 */ 465 static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc, 466 struct bsg_job *job) 467 { 468 enum mpi3mr_iocstate ioc_state; 469 struct mpi3mr_bsg_in_adpinfo adpinfo; 470 471 memset(&adpinfo, 0, sizeof(adpinfo)); 472 adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY; 473 adpinfo.pci_dev_id = mrioc->pdev->device; 474 adpinfo.pci_dev_hw_rev = mrioc->pdev->revision; 475 adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device; 476 adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor; 477 adpinfo.pci_bus = mrioc->pdev->bus->number; 478 adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn); 479 adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn); 480 adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus); 481 adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION; 482 483 ioc_state = mpi3mr_get_iocstate(mrioc); 484 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 485 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 486 else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) 487 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 488 else if (ioc_state == MRIOC_STATE_FAULT) 489 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT; 490 else 491 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; 492 493 memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info, 494 sizeof(adpinfo.driver_info)); 495 496 if (job->request_payload.payload_len >= sizeof(adpinfo)) { 497 sg_copy_from_buffer(job->request_payload.sg_list, 498 job->request_payload.sg_cnt, 499 &adpinfo, sizeof(adpinfo)); 500 return 0; 501 } 502 return -EINVAL; 503 } 504 505 /** 506 * mpi3mr_bsg_process_drv_cmds - Driver Command handler 507 * @job: BSG job reference 508 * 509 * This function is the top level handler for driver commands, 510 * this does basic validation of the buffer and identifies the 511 * opcode and switches to correct sub handler. 512 * 513 * Return: 0 on success and proper error codes on failure 514 */ 515 static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job) 516 { 517 long rval = -EINVAL; 518 struct mpi3mr_ioc *mrioc = NULL; 519 struct mpi3mr_bsg_packet *bsg_req = NULL; 520 struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL; 521 522 bsg_req = job->request; 523 drvrcmd = &bsg_req->cmd.drvrcmd; 524 525 mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id); 526 if (!mrioc) 527 return -ENODEV; 528 529 if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) { 530 rval = mpi3mr_bsg_populate_adpinfo(mrioc, job); 531 return rval; 532 } 533 534 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) 535 return -ERESTARTSYS; 536 537 switch (drvrcmd->opcode) { 538 case MPI3MR_DRVBSG_OPCODE_ADPRESET: 539 rval = mpi3mr_bsg_adp_reset(mrioc, job); 540 break; 541 case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO: 542 rval = mpi3mr_get_all_tgt_info(mrioc, job); 543 break; 544 case MPI3MR_DRVBSG_OPCODE_GETCHGCNT: 545 rval = mpi3mr_get_change_count(mrioc, job); 546 break; 547 case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE: 548 rval = mpi3mr_enable_logdata(mrioc, job); 549 break; 550 case MPI3MR_DRVBSG_OPCODE_GETLOGDATA: 551 rval = mpi3mr_get_logdata(mrioc, job); 552 break; 553 case MPI3MR_DRVBSG_OPCODE_PELENABLE: 554 rval = mpi3mr_bsg_pel_enable(mrioc, job); 555 break; 556 case MPI3MR_DRVBSG_OPCODE_UNKNOWN: 557 default: 558 pr_err("%s: unsupported driver command opcode %d\n", 559 MPI3MR_DRIVER_NAME, drvrcmd->opcode); 560 break; 561 } 562 mutex_unlock(&mrioc->bsg_cmds.mutex); 563 return rval; 564 } 565 566 /** 567 * mpi3mr_total_num_ioctl_sges - Count number of SGEs required 568 * @drv_bufs: DMA address of the buffers to be placed in sgl 569 * @bufcnt: Number of DMA buffers 570 * 571 * This function returns total number of data SGEs required 572 * including zero length SGEs and excluding management request 573 * and response buffer for the given list of data buffer 574 * descriptors 575 * 576 * Return: Number of SGE elements needed 577 */ 578 static inline u16 mpi3mr_total_num_ioctl_sges(struct mpi3mr_buf_map *drv_bufs, 579 u8 bufcnt) 580 { 581 u16 i, sge_count = 0; 582 583 for (i = 0; i < bufcnt; i++, drv_bufs++) { 584 if (drv_bufs->data_dir == DMA_NONE || 585 drv_bufs->kern_buf) 586 continue; 587 sge_count += drv_bufs->num_dma_desc; 588 if (!drv_bufs->num_dma_desc) 589 sge_count++; 590 } 591 return sge_count; 592 } 593 594 /** 595 * mpi3mr_bsg_build_sgl - SGL construction for MPI commands 596 * @mrioc: Adapter instance reference 597 * @mpi_req: MPI request 598 * @sgl_offset: offset to start sgl in the MPI request 599 * @drv_bufs: DMA address of the buffers to be placed in sgl 600 * @bufcnt: Number of DMA buffers 601 * @is_rmc: Does the buffer list has management command buffer 602 * @is_rmr: Does the buffer list has management response buffer 603 * @num_datasges: Number of data buffers in the list 604 * 605 * This function places the DMA address of the given buffers in 606 * proper format as SGEs in the given MPI request. 607 * 608 * Return: 0 on success,-1 on failure 609 */ 610 static int mpi3mr_bsg_build_sgl(struct mpi3mr_ioc *mrioc, u8 *mpi_req, 611 u32 sgl_offset, struct mpi3mr_buf_map *drv_bufs, 612 u8 bufcnt, u8 is_rmc, u8 is_rmr, u8 num_datasges) 613 { 614 struct mpi3_request_header *mpi_header = 615 (struct mpi3_request_header *)mpi_req; 616 u8 *sgl = (mpi_req + sgl_offset), count = 0; 617 struct mpi3_mgmt_passthrough_request *rmgmt_req = 618 (struct mpi3_mgmt_passthrough_request *)mpi_req; 619 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 620 u8 flag, sgl_flags, sgl_flag_eob, sgl_flags_last, last_chain_sgl_flag; 621 u16 available_sges, i, sges_needed; 622 u32 sge_element_size = sizeof(struct mpi3_sge_common); 623 bool chain_used = false; 624 625 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 626 MPI3_SGE_FLAGS_DLAS_SYSTEM; 627 sgl_flag_eob = sgl_flags | MPI3_SGE_FLAGS_END_OF_BUFFER; 628 sgl_flags_last = sgl_flag_eob | MPI3_SGE_FLAGS_END_OF_LIST; 629 last_chain_sgl_flag = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 630 MPI3_SGE_FLAGS_DLAS_SYSTEM; 631 632 sges_needed = mpi3mr_total_num_ioctl_sges(drv_bufs, bufcnt); 633 634 if (is_rmc) { 635 mpi3mr_add_sg_single(&rmgmt_req->command_sgl, 636 sgl_flags_last, drv_buf_iter->kern_buf_len, 637 drv_buf_iter->kern_buf_dma); 638 sgl = (u8 *)drv_buf_iter->kern_buf + 639 drv_buf_iter->bsg_buf_len; 640 available_sges = (drv_buf_iter->kern_buf_len - 641 drv_buf_iter->bsg_buf_len) / sge_element_size; 642 643 if (sges_needed > available_sges) 644 return -1; 645 646 chain_used = true; 647 drv_buf_iter++; 648 count++; 649 if (is_rmr) { 650 mpi3mr_add_sg_single(&rmgmt_req->response_sgl, 651 sgl_flags_last, drv_buf_iter->kern_buf_len, 652 drv_buf_iter->kern_buf_dma); 653 drv_buf_iter++; 654 count++; 655 } else 656 mpi3mr_build_zero_len_sge( 657 &rmgmt_req->response_sgl); 658 if (num_datasges) { 659 i = 0; 660 goto build_sges; 661 } 662 } else { 663 if (sgl_offset >= MPI3MR_ADMIN_REQ_FRAME_SZ) 664 return -1; 665 available_sges = (MPI3MR_ADMIN_REQ_FRAME_SZ - sgl_offset) / 666 sge_element_size; 667 if (!available_sges) 668 return -1; 669 } 670 if (!num_datasges) { 671 mpi3mr_build_zero_len_sge(sgl); 672 return 0; 673 } 674 if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) { 675 if ((sges_needed > 2) || (sges_needed > available_sges)) 676 return -1; 677 for (; count < bufcnt; count++, drv_buf_iter++) { 678 if (drv_buf_iter->data_dir == DMA_NONE || 679 !drv_buf_iter->num_dma_desc) 680 continue; 681 mpi3mr_add_sg_single(sgl, sgl_flags_last, 682 drv_buf_iter->dma_desc[0].size, 683 drv_buf_iter->dma_desc[0].dma_addr); 684 sgl += sge_element_size; 685 } 686 return 0; 687 } 688 i = 0; 689 690 build_sges: 691 for (; count < bufcnt; count++, drv_buf_iter++) { 692 if (drv_buf_iter->data_dir == DMA_NONE) 693 continue; 694 if (!drv_buf_iter->num_dma_desc) { 695 if (chain_used && !available_sges) 696 return -1; 697 if (!chain_used && (available_sges == 1) && 698 (sges_needed > 1)) 699 goto setup_chain; 700 flag = sgl_flag_eob; 701 if (num_datasges == 1) 702 flag = sgl_flags_last; 703 mpi3mr_add_sg_single(sgl, flag, 0, 0); 704 sgl += sge_element_size; 705 sges_needed--; 706 available_sges--; 707 num_datasges--; 708 continue; 709 } 710 for (; i < drv_buf_iter->num_dma_desc; i++) { 711 if (chain_used && !available_sges) 712 return -1; 713 if (!chain_used && (available_sges == 1) && 714 (sges_needed > 1)) 715 goto setup_chain; 716 flag = sgl_flags; 717 if (i == (drv_buf_iter->num_dma_desc - 1)) { 718 if (num_datasges == 1) 719 flag = sgl_flags_last; 720 else 721 flag = sgl_flag_eob; 722 } 723 724 mpi3mr_add_sg_single(sgl, flag, 725 drv_buf_iter->dma_desc[i].size, 726 drv_buf_iter->dma_desc[i].dma_addr); 727 sgl += sge_element_size; 728 available_sges--; 729 sges_needed--; 730 } 731 num_datasges--; 732 i = 0; 733 } 734 return 0; 735 736 setup_chain: 737 available_sges = mrioc->ioctl_chain_sge.size / sge_element_size; 738 if (sges_needed > available_sges) 739 return -1; 740 mpi3mr_add_sg_single(sgl, last_chain_sgl_flag, 741 (sges_needed * sge_element_size), 742 mrioc->ioctl_chain_sge.dma_addr); 743 memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size); 744 sgl = (u8 *)mrioc->ioctl_chain_sge.addr; 745 chain_used = true; 746 goto build_sges; 747 } 748 749 /** 750 * mpi3mr_get_nvme_data_fmt - returns the NVMe data format 751 * @nvme_encap_request: NVMe encapsulated MPI request 752 * 753 * This function returns the type of the data format specified 754 * in user provided NVMe command in NVMe encapsulated request. 755 * 756 * Return: Data format of the NVMe command (PRP/SGL etc) 757 */ 758 static unsigned int mpi3mr_get_nvme_data_fmt( 759 struct mpi3_nvme_encapsulated_request *nvme_encap_request) 760 { 761 u8 format = 0; 762 763 format = ((nvme_encap_request->command[0] & 0xc000) >> 14); 764 return format; 765 766 } 767 768 /** 769 * mpi3mr_build_nvme_sgl - SGL constructor for NVME 770 * encapsulated request 771 * @mrioc: Adapter instance reference 772 * @nvme_encap_request: NVMe encapsulated MPI request 773 * @drv_bufs: DMA address of the buffers to be placed in sgl 774 * @bufcnt: Number of DMA buffers 775 * 776 * This function places the DMA address of the given buffers in 777 * proper format as SGEs in the given NVMe encapsulated request. 778 * 779 * Return: 0 on success, -1 on failure 780 */ 781 static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc, 782 struct mpi3_nvme_encapsulated_request *nvme_encap_request, 783 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) 784 { 785 struct mpi3mr_nvme_pt_sge *nvme_sgl; 786 __le64 sgl_dma; 787 u8 count; 788 size_t length = 0; 789 u16 available_sges = 0, i; 790 u32 sge_element_size = sizeof(struct mpi3mr_nvme_pt_sge); 791 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 792 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << 793 mrioc->facts.sge_mod_shift) << 32); 794 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << 795 mrioc->facts.sge_mod_shift) << 32; 796 u32 size; 797 798 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) 799 ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET); 800 801 /* 802 * Not all commands require a data transfer. If no data, just return 803 * without constructing any sgl. 804 */ 805 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 806 if (drv_buf_iter->data_dir == DMA_NONE) 807 continue; 808 length = drv_buf_iter->kern_buf_len; 809 break; 810 } 811 if (!length || !drv_buf_iter->num_dma_desc) 812 return 0; 813 814 if (drv_buf_iter->num_dma_desc == 1) { 815 available_sges = 1; 816 goto build_sges; 817 } 818 819 sgl_dma = cpu_to_le64(mrioc->ioctl_chain_sge.dma_addr); 820 if (sgl_dma & sgemod_mask) { 821 dprint_bsg_err(mrioc, 822 "%s: SGL chain address collides with SGE modifier\n", 823 __func__); 824 return -1; 825 } 826 827 sgl_dma &= ~sgemod_mask; 828 sgl_dma |= sgemod_val; 829 830 memset(mrioc->ioctl_chain_sge.addr, 0, mrioc->ioctl_chain_sge.size); 831 available_sges = mrioc->ioctl_chain_sge.size / sge_element_size; 832 if (available_sges < drv_buf_iter->num_dma_desc) 833 return -1; 834 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); 835 nvme_sgl->base_addr = sgl_dma; 836 size = drv_buf_iter->num_dma_desc * sizeof(struct mpi3mr_nvme_pt_sge); 837 nvme_sgl->length = cpu_to_le32(size); 838 nvme_sgl->type = MPI3MR_NVMESGL_LAST_SEGMENT; 839 nvme_sgl = (struct mpi3mr_nvme_pt_sge *)mrioc->ioctl_chain_sge.addr; 840 841 build_sges: 842 for (i = 0; i < drv_buf_iter->num_dma_desc; i++) { 843 sgl_dma = cpu_to_le64(drv_buf_iter->dma_desc[i].dma_addr); 844 if (sgl_dma & sgemod_mask) { 845 dprint_bsg_err(mrioc, 846 "%s: SGL address collides with SGE modifier\n", 847 __func__); 848 return -1; 849 } 850 851 sgl_dma &= ~sgemod_mask; 852 sgl_dma |= sgemod_val; 853 854 nvme_sgl->base_addr = sgl_dma; 855 nvme_sgl->length = cpu_to_le32(drv_buf_iter->dma_desc[i].size); 856 nvme_sgl->type = MPI3MR_NVMESGL_DATA_SEGMENT; 857 nvme_sgl++; 858 available_sges--; 859 } 860 861 return 0; 862 } 863 864 /** 865 * mpi3mr_build_nvme_prp - PRP constructor for NVME 866 * encapsulated request 867 * @mrioc: Adapter instance reference 868 * @nvme_encap_request: NVMe encapsulated MPI request 869 * @drv_bufs: DMA address of the buffers to be placed in SGL 870 * @bufcnt: Number of DMA buffers 871 * 872 * This function places the DMA address of the given buffers in 873 * proper format as PRP entries in the given NVMe encapsulated 874 * request. 875 * 876 * Return: 0 on success, -1 on failure 877 */ 878 static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc, 879 struct mpi3_nvme_encapsulated_request *nvme_encap_request, 880 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) 881 { 882 int prp_size = MPI3MR_NVME_PRP_SIZE; 883 __le64 *prp_entry, *prp1_entry, *prp2_entry; 884 __le64 *prp_page; 885 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; 886 u32 offset, entry_len, dev_pgsz; 887 u32 page_mask_result, page_mask; 888 size_t length = 0, desc_len; 889 u8 count; 890 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 891 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << 892 mrioc->facts.sge_mod_shift) << 32); 893 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << 894 mrioc->facts.sge_mod_shift) << 32; 895 u16 dev_handle = nvme_encap_request->dev_handle; 896 struct mpi3mr_tgt_dev *tgtdev; 897 u16 desc_count = 0; 898 899 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 900 if (!tgtdev) { 901 dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n", 902 __func__, dev_handle); 903 return -1; 904 } 905 906 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { 907 dprint_bsg_err(mrioc, 908 "%s: NVMe device page size is zero for handle 0x%04x\n", 909 __func__, dev_handle); 910 mpi3mr_tgtdev_put(tgtdev); 911 return -1; 912 } 913 914 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); 915 mpi3mr_tgtdev_put(tgtdev); 916 page_mask = dev_pgsz - 1; 917 918 if (dev_pgsz > MPI3MR_IOCTL_SGE_SIZE) { 919 dprint_bsg_err(mrioc, 920 "%s: NVMe device page size(%d) is greater than ioctl data sge size(%d) for handle 0x%04x\n", 921 __func__, dev_pgsz, MPI3MR_IOCTL_SGE_SIZE, dev_handle); 922 return -1; 923 } 924 925 if (MPI3MR_IOCTL_SGE_SIZE % dev_pgsz) { 926 dprint_bsg_err(mrioc, 927 "%s: ioctl data sge size(%d) is not a multiple of NVMe device page size(%d) for handle 0x%04x\n", 928 __func__, MPI3MR_IOCTL_SGE_SIZE, dev_pgsz, dev_handle); 929 return -1; 930 } 931 932 /* 933 * Not all commands require a data transfer. If no data, just return 934 * without constructing any PRP. 935 */ 936 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 937 if (drv_buf_iter->data_dir == DMA_NONE) 938 continue; 939 length = drv_buf_iter->kern_buf_len; 940 break; 941 } 942 943 if (!length || !drv_buf_iter->num_dma_desc) 944 return 0; 945 946 for (count = 0; count < drv_buf_iter->num_dma_desc; count++) { 947 dma_addr = drv_buf_iter->dma_desc[count].dma_addr; 948 if (dma_addr & page_mask) { 949 dprint_bsg_err(mrioc, 950 "%s:dma_addr %pad is not aligned with page size 0x%x\n", 951 __func__, &dma_addr, dev_pgsz); 952 return -1; 953 } 954 } 955 956 dma_addr = drv_buf_iter->dma_desc[0].dma_addr; 957 desc_len = drv_buf_iter->dma_desc[0].size; 958 959 mrioc->prp_sz = 0; 960 mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev, 961 dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL); 962 963 if (!mrioc->prp_list_virt) 964 return -1; 965 mrioc->prp_sz = dev_pgsz; 966 967 /* 968 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 969 * PRP1 is located at a 24 byte offset from the start of the NVMe 970 * command. Then set the current PRP entry pointer to PRP1. 971 */ 972 prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + 973 MPI3MR_NVME_CMD_PRP1_OFFSET); 974 prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + 975 MPI3MR_NVME_CMD_PRP2_OFFSET); 976 prp_entry = prp1_entry; 977 /* 978 * For the PRP entries, use the specially allocated buffer of 979 * contiguous memory. 980 */ 981 prp_page = (__le64 *)mrioc->prp_list_virt; 982 prp_page_dma = mrioc->prp_list_dma; 983 984 /* 985 * Check if we are within 1 entry of a page boundary we don't 986 * want our first entry to be a PRP List entry. 987 */ 988 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; 989 if (!page_mask_result) { 990 dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n", 991 __func__); 992 goto err_out; 993 } 994 995 /* 996 * Set PRP physical pointer, which initially points to the current PRP 997 * DMA memory page. 998 */ 999 prp_entry_dma = prp_page_dma; 1000 1001 1002 /* Loop while the length is not zero. */ 1003 while (length) { 1004 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 1005 if (!page_mask_result && (length > dev_pgsz)) { 1006 dprint_bsg_err(mrioc, 1007 "%s: single PRP page is not sufficient\n", 1008 __func__); 1009 goto err_out; 1010 } 1011 1012 /* Need to handle if entry will be part of a page. */ 1013 offset = dma_addr & page_mask; 1014 entry_len = dev_pgsz - offset; 1015 1016 if (prp_entry == prp1_entry) { 1017 /* 1018 * Must fill in the first PRP pointer (PRP1) before 1019 * moving on. 1020 */ 1021 *prp1_entry = cpu_to_le64(dma_addr); 1022 if (*prp1_entry & sgemod_mask) { 1023 dprint_bsg_err(mrioc, 1024 "%s: PRP1 address collides with SGE modifier\n", 1025 __func__); 1026 goto err_out; 1027 } 1028 *prp1_entry &= ~sgemod_mask; 1029 *prp1_entry |= sgemod_val; 1030 1031 /* 1032 * Now point to the second PRP entry within the 1033 * command (PRP2). 1034 */ 1035 prp_entry = prp2_entry; 1036 } else if (prp_entry == prp2_entry) { 1037 /* 1038 * Should the PRP2 entry be a PRP List pointer or just 1039 * a regular PRP pointer? If there is more than one 1040 * more page of data, must use a PRP List pointer. 1041 */ 1042 if (length > dev_pgsz) { 1043 /* 1044 * PRP2 will contain a PRP List pointer because 1045 * more PRP's are needed with this command. The 1046 * list will start at the beginning of the 1047 * contiguous buffer. 1048 */ 1049 *prp2_entry = cpu_to_le64(prp_entry_dma); 1050 if (*prp2_entry & sgemod_mask) { 1051 dprint_bsg_err(mrioc, 1052 "%s: PRP list address collides with SGE modifier\n", 1053 __func__); 1054 goto err_out; 1055 } 1056 *prp2_entry &= ~sgemod_mask; 1057 *prp2_entry |= sgemod_val; 1058 1059 /* 1060 * The next PRP Entry will be the start of the 1061 * first PRP List. 1062 */ 1063 prp_entry = prp_page; 1064 continue; 1065 } else { 1066 /* 1067 * After this, the PRP Entries are complete. 1068 * This command uses 2 PRP's and no PRP list. 1069 */ 1070 *prp2_entry = cpu_to_le64(dma_addr); 1071 if (*prp2_entry & sgemod_mask) { 1072 dprint_bsg_err(mrioc, 1073 "%s: PRP2 collides with SGE modifier\n", 1074 __func__); 1075 goto err_out; 1076 } 1077 *prp2_entry &= ~sgemod_mask; 1078 *prp2_entry |= sgemod_val; 1079 } 1080 } else { 1081 /* 1082 * Put entry in list and bump the addresses. 1083 * 1084 * After PRP1 and PRP2 are filled in, this will fill in 1085 * all remaining PRP entries in a PRP List, one per 1086 * each time through the loop. 1087 */ 1088 *prp_entry = cpu_to_le64(dma_addr); 1089 if (*prp_entry & sgemod_mask) { 1090 dprint_bsg_err(mrioc, 1091 "%s: PRP address collides with SGE modifier\n", 1092 __func__); 1093 goto err_out; 1094 } 1095 *prp_entry &= ~sgemod_mask; 1096 *prp_entry |= sgemod_val; 1097 prp_entry++; 1098 prp_entry_dma += prp_size; 1099 } 1100 1101 /* decrement length accounting for last partial page. */ 1102 if (entry_len >= length) { 1103 length = 0; 1104 } else { 1105 if (entry_len <= desc_len) { 1106 dma_addr += entry_len; 1107 desc_len -= entry_len; 1108 } 1109 if (!desc_len) { 1110 if ((++desc_count) >= 1111 drv_buf_iter->num_dma_desc) { 1112 dprint_bsg_err(mrioc, 1113 "%s: Invalid len %zd while building PRP\n", 1114 __func__, length); 1115 goto err_out; 1116 } 1117 dma_addr = 1118 drv_buf_iter->dma_desc[desc_count].dma_addr; 1119 desc_len = 1120 drv_buf_iter->dma_desc[desc_count].size; 1121 } 1122 length -= entry_len; 1123 } 1124 } 1125 1126 return 0; 1127 err_out: 1128 if (mrioc->prp_list_virt) { 1129 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, 1130 mrioc->prp_list_virt, mrioc->prp_list_dma); 1131 mrioc->prp_list_virt = NULL; 1132 } 1133 return -1; 1134 } 1135 1136 /** 1137 * mpi3mr_map_data_buffer_dma - build dma descriptors for data 1138 * buffers 1139 * @mrioc: Adapter instance reference 1140 * @drv_buf: buffer map descriptor 1141 * @desc_count: Number of already consumed dma descriptors 1142 * 1143 * This function computes how many pre-allocated DMA descriptors 1144 * are required for the given data buffer and if those number of 1145 * descriptors are free, then setup the mapping of the scattered 1146 * DMA address to the given data buffer, if the data direction 1147 * of the buffer is DMA_TO_DEVICE then the actual data is copied to 1148 * the DMA buffers 1149 * 1150 * Return: 0 on success, -1 on failure 1151 */ 1152 static int mpi3mr_map_data_buffer_dma(struct mpi3mr_ioc *mrioc, 1153 struct mpi3mr_buf_map *drv_buf, 1154 u16 desc_count) 1155 { 1156 u16 i, needed_desc = drv_buf->kern_buf_len / MPI3MR_IOCTL_SGE_SIZE; 1157 u32 buf_len = drv_buf->kern_buf_len, copied_len = 0; 1158 1159 if (drv_buf->kern_buf_len % MPI3MR_IOCTL_SGE_SIZE) 1160 needed_desc++; 1161 if ((needed_desc + desc_count) > MPI3MR_NUM_IOCTL_SGE) { 1162 dprint_bsg_err(mrioc, "%s: DMA descriptor mapping error %d:%d:%d\n", 1163 __func__, needed_desc, desc_count, MPI3MR_NUM_IOCTL_SGE); 1164 return -1; 1165 } 1166 drv_buf->dma_desc = kzalloc(sizeof(*drv_buf->dma_desc) * needed_desc, 1167 GFP_KERNEL); 1168 if (!drv_buf->dma_desc) 1169 return -1; 1170 for (i = 0; i < needed_desc; i++, desc_count++) { 1171 drv_buf->dma_desc[i].addr = mrioc->ioctl_sge[desc_count].addr; 1172 drv_buf->dma_desc[i].dma_addr = 1173 mrioc->ioctl_sge[desc_count].dma_addr; 1174 if (buf_len < mrioc->ioctl_sge[desc_count].size) 1175 drv_buf->dma_desc[i].size = buf_len; 1176 else 1177 drv_buf->dma_desc[i].size = 1178 mrioc->ioctl_sge[desc_count].size; 1179 buf_len -= drv_buf->dma_desc[i].size; 1180 memset(drv_buf->dma_desc[i].addr, 0, 1181 mrioc->ioctl_sge[desc_count].size); 1182 if (drv_buf->data_dir == DMA_TO_DEVICE) { 1183 memcpy(drv_buf->dma_desc[i].addr, 1184 drv_buf->bsg_buf + copied_len, 1185 drv_buf->dma_desc[i].size); 1186 copied_len += drv_buf->dma_desc[i].size; 1187 } 1188 } 1189 drv_buf->num_dma_desc = needed_desc; 1190 return 0; 1191 } 1192 /** 1193 * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler 1194 * @job: BSG job reference 1195 * 1196 * This function is the top level handler for MPI Pass through 1197 * command, this does basic validation of the input data buffers, 1198 * identifies the given buffer types and MPI command, allocates 1199 * DMAable memory for user given buffers, construstcs SGL 1200 * properly and passes the command to the firmware. 1201 * 1202 * Once the MPI command is completed the driver copies the data 1203 * if any and reply, sense information to user provided buffers. 1204 * If the command is timed out then issues controller reset 1205 * prior to returning. 1206 * 1207 * Return: 0 on success and proper error codes on failure 1208 */ 1209 1210 static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) 1211 { 1212 long rval = -EINVAL; 1213 struct mpi3mr_ioc *mrioc = NULL; 1214 u8 *mpi_req = NULL, *sense_buff_k = NULL; 1215 u8 mpi_msg_size = 0; 1216 struct mpi3mr_bsg_packet *bsg_req = NULL; 1217 struct mpi3mr_bsg_mptcmd *karg; 1218 struct mpi3mr_buf_entry *buf_entries = NULL; 1219 struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL; 1220 u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0; 1221 u8 din_cnt = 0, dout_cnt = 0; 1222 u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF; 1223 u8 block_io = 0, nvme_fmt = 0, resp_code = 0; 1224 struct mpi3_request_header *mpi_header = NULL; 1225 struct mpi3_status_reply_descriptor *status_desc; 1226 struct mpi3_scsi_task_mgmt_request *tm_req; 1227 u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen; 1228 u16 dev_handle; 1229 struct mpi3mr_tgt_dev *tgtdev; 1230 struct mpi3mr_stgt_priv_data *stgt_priv = NULL; 1231 struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL; 1232 u32 din_size = 0, dout_size = 0; 1233 u8 *din_buf = NULL, *dout_buf = NULL; 1234 u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL; 1235 u16 rmc_size = 0, desc_count = 0; 1236 1237 bsg_req = job->request; 1238 karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd; 1239 1240 mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id); 1241 if (!mrioc) 1242 return -ENODEV; 1243 1244 if (!mrioc->ioctl_sges_allocated) { 1245 dprint_bsg_err(mrioc, "%s: DMA memory was not allocated\n", 1246 __func__); 1247 return -ENOMEM; 1248 } 1249 1250 if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT) 1251 karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; 1252 1253 mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL); 1254 if (!mpi_req) 1255 return -ENOMEM; 1256 mpi_header = (struct mpi3_request_header *)mpi_req; 1257 1258 bufcnt = karg->buf_entry_list.num_of_entries; 1259 drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL); 1260 if (!drv_bufs) { 1261 rval = -ENOMEM; 1262 goto out; 1263 } 1264 1265 dout_buf = kzalloc(job->request_payload.payload_len, 1266 GFP_KERNEL); 1267 if (!dout_buf) { 1268 rval = -ENOMEM; 1269 goto out; 1270 } 1271 1272 din_buf = kzalloc(job->reply_payload.payload_len, 1273 GFP_KERNEL); 1274 if (!din_buf) { 1275 rval = -ENOMEM; 1276 goto out; 1277 } 1278 1279 sg_copy_to_buffer(job->request_payload.sg_list, 1280 job->request_payload.sg_cnt, 1281 dout_buf, job->request_payload.payload_len); 1282 1283 buf_entries = karg->buf_entry_list.buf_entry; 1284 sgl_din_iter = din_buf; 1285 sgl_dout_iter = dout_buf; 1286 drv_buf_iter = drv_bufs; 1287 1288 for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) { 1289 1290 switch (buf_entries->buf_type) { 1291 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD: 1292 sgl_iter = sgl_dout_iter; 1293 sgl_dout_iter += buf_entries->buf_len; 1294 drv_buf_iter->data_dir = DMA_TO_DEVICE; 1295 is_rmcb = 1; 1296 if ((count != 0) || !buf_entries->buf_len) 1297 invalid_be = 1; 1298 break; 1299 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP: 1300 sgl_iter = sgl_din_iter; 1301 sgl_din_iter += buf_entries->buf_len; 1302 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 1303 is_rmrb = 1; 1304 if (count != 1 || !is_rmcb || !buf_entries->buf_len) 1305 invalid_be = 1; 1306 break; 1307 case MPI3MR_BSG_BUFTYPE_DATA_IN: 1308 sgl_iter = sgl_din_iter; 1309 sgl_din_iter += buf_entries->buf_len; 1310 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 1311 din_cnt++; 1312 din_size += buf_entries->buf_len; 1313 if ((din_cnt > 1) && !is_rmcb) 1314 invalid_be = 1; 1315 break; 1316 case MPI3MR_BSG_BUFTYPE_DATA_OUT: 1317 sgl_iter = sgl_dout_iter; 1318 sgl_dout_iter += buf_entries->buf_len; 1319 drv_buf_iter->data_dir = DMA_TO_DEVICE; 1320 dout_cnt++; 1321 dout_size += buf_entries->buf_len; 1322 if ((dout_cnt > 1) && !is_rmcb) 1323 invalid_be = 1; 1324 break; 1325 case MPI3MR_BSG_BUFTYPE_MPI_REPLY: 1326 sgl_iter = sgl_din_iter; 1327 sgl_din_iter += buf_entries->buf_len; 1328 drv_buf_iter->data_dir = DMA_NONE; 1329 mpirep_offset = count; 1330 if (!buf_entries->buf_len) 1331 invalid_be = 1; 1332 break; 1333 case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE: 1334 sgl_iter = sgl_din_iter; 1335 sgl_din_iter += buf_entries->buf_len; 1336 drv_buf_iter->data_dir = DMA_NONE; 1337 erb_offset = count; 1338 if (!buf_entries->buf_len) 1339 invalid_be = 1; 1340 break; 1341 case MPI3MR_BSG_BUFTYPE_MPI_REQUEST: 1342 sgl_iter = sgl_dout_iter; 1343 sgl_dout_iter += buf_entries->buf_len; 1344 drv_buf_iter->data_dir = DMA_NONE; 1345 mpi_msg_size = buf_entries->buf_len; 1346 if ((!mpi_msg_size || (mpi_msg_size % 4)) || 1347 (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) { 1348 dprint_bsg_err(mrioc, "%s: invalid MPI message size\n", 1349 __func__); 1350 rval = -EINVAL; 1351 goto out; 1352 } 1353 memcpy(mpi_req, sgl_iter, buf_entries->buf_len); 1354 break; 1355 default: 1356 invalid_be = 1; 1357 break; 1358 } 1359 if (invalid_be) { 1360 dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n", 1361 __func__); 1362 rval = -EINVAL; 1363 goto out; 1364 } 1365 1366 if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { 1367 dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", 1368 __func__); 1369 rval = -EINVAL; 1370 goto out; 1371 } 1372 if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { 1373 dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", 1374 __func__); 1375 rval = -EINVAL; 1376 goto out; 1377 } 1378 1379 drv_buf_iter->bsg_buf = sgl_iter; 1380 drv_buf_iter->bsg_buf_len = buf_entries->buf_len; 1381 } 1382 1383 if (is_rmcb && ((din_size + dout_size) > MPI3MR_MAX_APP_XFER_SIZE)) { 1384 dprint_bsg_err(mrioc, "%s:%d: invalid data transfer size passed for function 0x%x din_size = %d, dout_size = %d\n", 1385 __func__, __LINE__, mpi_header->function, din_size, 1386 dout_size); 1387 rval = -EINVAL; 1388 goto out; 1389 } 1390 1391 if (din_size > MPI3MR_MAX_APP_XFER_SIZE) { 1392 dprint_bsg_err(mrioc, 1393 "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", 1394 __func__, __LINE__, mpi_header->function, din_size); 1395 rval = -EINVAL; 1396 goto out; 1397 } 1398 if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) { 1399 dprint_bsg_err(mrioc, 1400 "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n", 1401 __func__, __LINE__, mpi_header->function, dout_size); 1402 rval = -EINVAL; 1403 goto out; 1404 } 1405 1406 if (mpi_header->function == MPI3_BSG_FUNCTION_SMP_PASSTHROUGH) { 1407 if (din_size > MPI3MR_IOCTL_SGE_SIZE || 1408 dout_size > MPI3MR_IOCTL_SGE_SIZE) { 1409 dprint_bsg_err(mrioc, "%s:%d: invalid message size passed:%d:%d:%d:%d\n", 1410 __func__, __LINE__, din_cnt, dout_cnt, din_size, 1411 dout_size); 1412 rval = -EINVAL; 1413 goto out; 1414 } 1415 } 1416 1417 drv_buf_iter = drv_bufs; 1418 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1419 if (drv_buf_iter->data_dir == DMA_NONE) 1420 continue; 1421 1422 drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len; 1423 if (is_rmcb && !count) { 1424 drv_buf_iter->kern_buf_len = 1425 mrioc->ioctl_chain_sge.size; 1426 drv_buf_iter->kern_buf = 1427 mrioc->ioctl_chain_sge.addr; 1428 drv_buf_iter->kern_buf_dma = 1429 mrioc->ioctl_chain_sge.dma_addr; 1430 drv_buf_iter->dma_desc = NULL; 1431 drv_buf_iter->num_dma_desc = 0; 1432 memset(drv_buf_iter->kern_buf, 0, 1433 drv_buf_iter->kern_buf_len); 1434 tmplen = min(drv_buf_iter->kern_buf_len, 1435 drv_buf_iter->bsg_buf_len); 1436 rmc_size = tmplen; 1437 memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen); 1438 } else if (is_rmrb && (count == 1)) { 1439 drv_buf_iter->kern_buf_len = 1440 mrioc->ioctl_resp_sge.size; 1441 drv_buf_iter->kern_buf = 1442 mrioc->ioctl_resp_sge.addr; 1443 drv_buf_iter->kern_buf_dma = 1444 mrioc->ioctl_resp_sge.dma_addr; 1445 drv_buf_iter->dma_desc = NULL; 1446 drv_buf_iter->num_dma_desc = 0; 1447 memset(drv_buf_iter->kern_buf, 0, 1448 drv_buf_iter->kern_buf_len); 1449 tmplen = min(drv_buf_iter->kern_buf_len, 1450 drv_buf_iter->bsg_buf_len); 1451 drv_buf_iter->kern_buf_len = tmplen; 1452 memset(drv_buf_iter->bsg_buf, 0, 1453 drv_buf_iter->bsg_buf_len); 1454 } else { 1455 if (!drv_buf_iter->kern_buf_len) 1456 continue; 1457 if (mpi3mr_map_data_buffer_dma(mrioc, drv_buf_iter, desc_count)) { 1458 rval = -ENOMEM; 1459 dprint_bsg_err(mrioc, "%s:%d: mapping data buffers failed\n", 1460 __func__, __LINE__); 1461 goto out; 1462 } 1463 desc_count += drv_buf_iter->num_dma_desc; 1464 } 1465 } 1466 1467 if (erb_offset != 0xFF) { 1468 sense_buff_k = kzalloc(erbsz, GFP_KERNEL); 1469 if (!sense_buff_k) { 1470 rval = -ENOMEM; 1471 goto out; 1472 } 1473 } 1474 1475 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) { 1476 rval = -ERESTARTSYS; 1477 goto out; 1478 } 1479 if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { 1480 rval = -EAGAIN; 1481 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 1482 mutex_unlock(&mrioc->bsg_cmds.mutex); 1483 goto out; 1484 } 1485 if (mrioc->unrecoverable) { 1486 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 1487 __func__); 1488 rval = -EFAULT; 1489 mutex_unlock(&mrioc->bsg_cmds.mutex); 1490 goto out; 1491 } 1492 if (mrioc->reset_in_progress) { 1493 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 1494 rval = -EAGAIN; 1495 mutex_unlock(&mrioc->bsg_cmds.mutex); 1496 goto out; 1497 } 1498 if (mrioc->stop_bsgs) { 1499 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 1500 rval = -EAGAIN; 1501 mutex_unlock(&mrioc->bsg_cmds.mutex); 1502 goto out; 1503 } 1504 1505 if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) { 1506 nvme_fmt = mpi3mr_get_nvme_data_fmt( 1507 (struct mpi3_nvme_encapsulated_request *)mpi_req); 1508 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { 1509 if (mpi3mr_build_nvme_prp(mrioc, 1510 (struct mpi3_nvme_encapsulated_request *)mpi_req, 1511 drv_bufs, bufcnt)) { 1512 rval = -ENOMEM; 1513 mutex_unlock(&mrioc->bsg_cmds.mutex); 1514 goto out; 1515 } 1516 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || 1517 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { 1518 if (mpi3mr_build_nvme_sgl(mrioc, 1519 (struct mpi3_nvme_encapsulated_request *)mpi_req, 1520 drv_bufs, bufcnt)) { 1521 rval = -EINVAL; 1522 mutex_unlock(&mrioc->bsg_cmds.mutex); 1523 goto out; 1524 } 1525 } else { 1526 dprint_bsg_err(mrioc, 1527 "%s:invalid NVMe command format\n", __func__); 1528 rval = -EINVAL; 1529 mutex_unlock(&mrioc->bsg_cmds.mutex); 1530 goto out; 1531 } 1532 } else { 1533 if (mpi3mr_bsg_build_sgl(mrioc, mpi_req, mpi_msg_size, 1534 drv_bufs, bufcnt, is_rmcb, is_rmrb, 1535 (dout_cnt + din_cnt))) { 1536 dprint_bsg_err(mrioc, "%s: sgl build failed\n", __func__); 1537 rval = -EAGAIN; 1538 mutex_unlock(&mrioc->bsg_cmds.mutex); 1539 goto out; 1540 } 1541 } 1542 1543 if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) { 1544 tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req; 1545 if (tm_req->task_type != 1546 MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 1547 dev_handle = tm_req->dev_handle; 1548 block_io = 1; 1549 } 1550 } 1551 if (block_io) { 1552 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1553 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) { 1554 stgt_priv = (struct mpi3mr_stgt_priv_data *) 1555 tgtdev->starget->hostdata; 1556 atomic_inc(&stgt_priv->block_io); 1557 mpi3mr_tgtdev_put(tgtdev); 1558 } 1559 } 1560 1561 mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING; 1562 mrioc->bsg_cmds.is_waiting = 1; 1563 mrioc->bsg_cmds.callback = NULL; 1564 mrioc->bsg_cmds.is_sense = 0; 1565 mrioc->bsg_cmds.sensebuf = sense_buff_k; 1566 memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz); 1567 mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS); 1568 if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) { 1569 dprint_bsg_info(mrioc, 1570 "%s: posting bsg request to the controller\n", __func__); 1571 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, 1572 "bsg_mpi3_req"); 1573 if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { 1574 drv_buf_iter = &drv_bufs[0]; 1575 dprint_dump(drv_buf_iter->kern_buf, 1576 rmc_size, "mpi3_mgmt_req"); 1577 } 1578 } 1579 1580 init_completion(&mrioc->bsg_cmds.done); 1581 rval = mpi3mr_admin_request_post(mrioc, mpi_req, 1582 MPI3MR_ADMIN_REQ_FRAME_SZ, 0); 1583 1584 1585 if (rval) { 1586 mrioc->bsg_cmds.is_waiting = 0; 1587 dprint_bsg_err(mrioc, 1588 "%s: posting bsg request is failed\n", __func__); 1589 rval = -EAGAIN; 1590 goto out_unlock; 1591 } 1592 wait_for_completion_timeout(&mrioc->bsg_cmds.done, 1593 (karg->timeout * HZ)); 1594 if (block_io && stgt_priv) 1595 atomic_dec(&stgt_priv->block_io); 1596 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) { 1597 mrioc->bsg_cmds.is_waiting = 0; 1598 rval = -EAGAIN; 1599 if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET) 1600 goto out_unlock; 1601 dprint_bsg_err(mrioc, 1602 "%s: bsg request timedout after %d seconds\n", __func__, 1603 karg->timeout); 1604 if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) { 1605 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, 1606 "bsg_mpi3_req"); 1607 if (mpi_header->function == 1608 MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { 1609 drv_buf_iter = &drv_bufs[0]; 1610 dprint_dump(drv_buf_iter->kern_buf, 1611 rmc_size, "mpi3_mgmt_req"); 1612 } 1613 } 1614 if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) || 1615 (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) 1616 mpi3mr_issue_tm(mrioc, 1617 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1618 mpi_header->function_dependent, 0, 1619 MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT, 1620 &mrioc->host_tm_cmds, &resp_code, NULL); 1621 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) && 1622 !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)) 1623 mpi3mr_soft_reset_handler(mrioc, 1624 MPI3MR_RESET_FROM_APP_TIMEOUT, 1); 1625 goto out_unlock; 1626 } 1627 dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__); 1628 1629 if (mrioc->prp_list_virt) { 1630 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, 1631 mrioc->prp_list_virt, mrioc->prp_list_dma); 1632 mrioc->prp_list_virt = NULL; 1633 } 1634 1635 if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1636 != MPI3_IOCSTATUS_SUCCESS) { 1637 dprint_bsg_info(mrioc, 1638 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 1639 __func__, 1640 (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1641 mrioc->bsg_cmds.ioc_loginfo); 1642 } 1643 1644 if ((mpirep_offset != 0xFF) && 1645 drv_bufs[mpirep_offset].bsg_buf_len) { 1646 drv_buf_iter = &drv_bufs[mpirep_offset]; 1647 drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 + 1648 mrioc->reply_sz); 1649 bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL); 1650 1651 if (!bsg_reply_buf) { 1652 rval = -ENOMEM; 1653 goto out_unlock; 1654 } 1655 if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) { 1656 bsg_reply_buf->mpi_reply_type = 1657 MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS; 1658 memcpy(bsg_reply_buf->reply_buf, 1659 mrioc->bsg_cmds.reply, mrioc->reply_sz); 1660 } else { 1661 bsg_reply_buf->mpi_reply_type = 1662 MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS; 1663 status_desc = (struct mpi3_status_reply_descriptor *) 1664 bsg_reply_buf->reply_buf; 1665 status_desc->ioc_status = mrioc->bsg_cmds.ioc_status; 1666 status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo; 1667 } 1668 tmplen = min(drv_buf_iter->kern_buf_len, 1669 drv_buf_iter->bsg_buf_len); 1670 memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen); 1671 } 1672 1673 if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf && 1674 mrioc->bsg_cmds.is_sense) { 1675 drv_buf_iter = &drv_bufs[erb_offset]; 1676 tmplen = min(erbsz, drv_buf_iter->bsg_buf_len); 1677 memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen); 1678 } 1679 1680 drv_buf_iter = drv_bufs; 1681 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1682 if (drv_buf_iter->data_dir == DMA_NONE) 1683 continue; 1684 if ((count == 1) && is_rmrb) { 1685 memcpy(drv_buf_iter->bsg_buf, 1686 drv_buf_iter->kern_buf, 1687 drv_buf_iter->kern_buf_len); 1688 } else if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) { 1689 tmplen = 0; 1690 for (desc_count = 0; 1691 desc_count < drv_buf_iter->num_dma_desc; 1692 desc_count++) { 1693 memcpy(((u8 *)drv_buf_iter->bsg_buf + tmplen), 1694 drv_buf_iter->dma_desc[desc_count].addr, 1695 drv_buf_iter->dma_desc[desc_count].size); 1696 tmplen += 1697 drv_buf_iter->dma_desc[desc_count].size; 1698 } 1699 } 1700 } 1701 1702 out_unlock: 1703 if (din_buf) { 1704 job->reply_payload_rcv_len = 1705 sg_copy_from_buffer(job->reply_payload.sg_list, 1706 job->reply_payload.sg_cnt, 1707 din_buf, job->reply_payload.payload_len); 1708 } 1709 mrioc->bsg_cmds.is_sense = 0; 1710 mrioc->bsg_cmds.sensebuf = NULL; 1711 mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED; 1712 mutex_unlock(&mrioc->bsg_cmds.mutex); 1713 out: 1714 kfree(sense_buff_k); 1715 kfree(dout_buf); 1716 kfree(din_buf); 1717 kfree(mpi_req); 1718 if (drv_bufs) { 1719 drv_buf_iter = drv_bufs; 1720 for (count = 0; count < bufcnt; count++, drv_buf_iter++) 1721 kfree(drv_buf_iter->dma_desc); 1722 kfree(drv_bufs); 1723 } 1724 kfree(bsg_reply_buf); 1725 return rval; 1726 } 1727 1728 /** 1729 * mpi3mr_app_save_logdata - Save Log Data events 1730 * @mrioc: Adapter instance reference 1731 * @event_data: event data associated with log data event 1732 * @event_data_size: event data size to copy 1733 * 1734 * If log data event caching is enabled by the applicatiobns, 1735 * then this function saves the log data in the circular queue 1736 * and Sends async signal SIGIO to indicate there is an async 1737 * event from the firmware to the event monitoring applications. 1738 * 1739 * Return:Nothing 1740 */ 1741 void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, 1742 u16 event_data_size) 1743 { 1744 u32 index = mrioc->logdata_buf_idx, sz; 1745 struct mpi3mr_logdata_entry *entry; 1746 1747 if (!(mrioc->logdata_buf)) 1748 return; 1749 1750 entry = (struct mpi3mr_logdata_entry *) 1751 (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz)); 1752 entry->valid_entry = 1; 1753 sz = min(mrioc->logdata_entry_sz, event_data_size); 1754 memcpy(entry->data, event_data, sz); 1755 mrioc->logdata_buf_idx = 1756 ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES); 1757 atomic64_inc(&event_counter); 1758 } 1759 1760 /** 1761 * mpi3mr_bsg_request - bsg request entry point 1762 * @job: BSG job reference 1763 * 1764 * This is driver's entry point for bsg requests 1765 * 1766 * Return: 0 on success and proper error codes on failure 1767 */ 1768 static int mpi3mr_bsg_request(struct bsg_job *job) 1769 { 1770 long rval = -EINVAL; 1771 unsigned int reply_payload_rcv_len = 0; 1772 1773 struct mpi3mr_bsg_packet *bsg_req = job->request; 1774 1775 switch (bsg_req->cmd_type) { 1776 case MPI3MR_DRV_CMD: 1777 rval = mpi3mr_bsg_process_drv_cmds(job); 1778 break; 1779 case MPI3MR_MPT_CMD: 1780 rval = mpi3mr_bsg_process_mpt_cmds(job); 1781 break; 1782 default: 1783 pr_err("%s: unsupported BSG command(0x%08x)\n", 1784 MPI3MR_DRIVER_NAME, bsg_req->cmd_type); 1785 break; 1786 } 1787 1788 bsg_job_done(job, rval, reply_payload_rcv_len); 1789 1790 return 0; 1791 } 1792 1793 /** 1794 * mpi3mr_bsg_exit - de-registration from bsg layer 1795 * @mrioc: Adapter instance reference 1796 * 1797 * This will be called during driver unload and all 1798 * bsg resources allocated during load will be freed. 1799 * 1800 * Return:Nothing 1801 */ 1802 void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc) 1803 { 1804 struct device *bsg_dev = &mrioc->bsg_dev; 1805 if (!mrioc->bsg_queue) 1806 return; 1807 1808 bsg_remove_queue(mrioc->bsg_queue); 1809 mrioc->bsg_queue = NULL; 1810 1811 device_del(bsg_dev); 1812 put_device(bsg_dev); 1813 } 1814 1815 /** 1816 * mpi3mr_bsg_node_release -release bsg device node 1817 * @dev: bsg device node 1818 * 1819 * decrements bsg dev parent reference count 1820 * 1821 * Return:Nothing 1822 */ 1823 static void mpi3mr_bsg_node_release(struct device *dev) 1824 { 1825 put_device(dev->parent); 1826 } 1827 1828 /** 1829 * mpi3mr_bsg_init - registration with bsg layer 1830 * @mrioc: Adapter instance reference 1831 * 1832 * This will be called during driver load and it will 1833 * register driver with bsg layer 1834 * 1835 * Return:Nothing 1836 */ 1837 void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) 1838 { 1839 struct device *bsg_dev = &mrioc->bsg_dev; 1840 struct device *parent = &mrioc->shost->shost_gendev; 1841 1842 device_initialize(bsg_dev); 1843 1844 bsg_dev->parent = get_device(parent); 1845 bsg_dev->release = mpi3mr_bsg_node_release; 1846 1847 dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id); 1848 1849 if (device_add(bsg_dev)) { 1850 ioc_err(mrioc, "%s: bsg device add failed\n", 1851 dev_name(bsg_dev)); 1852 put_device(bsg_dev); 1853 return; 1854 } 1855 1856 mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), 1857 mpi3mr_bsg_request, NULL, 0); 1858 if (IS_ERR(mrioc->bsg_queue)) { 1859 ioc_err(mrioc, "%s: bsg registration failed\n", 1860 dev_name(bsg_dev)); 1861 device_del(bsg_dev); 1862 put_device(bsg_dev); 1863 return; 1864 } 1865 1866 blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS); 1867 blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS); 1868 1869 return; 1870 } 1871 1872 /** 1873 * version_fw_show - SysFS callback for firmware version read 1874 * @dev: class device 1875 * @attr: Device attributes 1876 * @buf: Buffer to copy 1877 * 1878 * Return: sysfs_emit() return after copying firmware version 1879 */ 1880 static ssize_t 1881 version_fw_show(struct device *dev, struct device_attribute *attr, 1882 char *buf) 1883 { 1884 struct Scsi_Host *shost = class_to_shost(dev); 1885 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1886 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 1887 1888 return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n", 1889 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 1890 fwver->ph_minor, fwver->cust_id, fwver->build_num); 1891 } 1892 static DEVICE_ATTR_RO(version_fw); 1893 1894 /** 1895 * fw_queue_depth_show - SysFS callback for firmware max cmds 1896 * @dev: class device 1897 * @attr: Device attributes 1898 * @buf: Buffer to copy 1899 * 1900 * Return: sysfs_emit() return after copying firmware max commands 1901 */ 1902 static ssize_t 1903 fw_queue_depth_show(struct device *dev, struct device_attribute *attr, 1904 char *buf) 1905 { 1906 struct Scsi_Host *shost = class_to_shost(dev); 1907 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1908 1909 return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs); 1910 } 1911 static DEVICE_ATTR_RO(fw_queue_depth); 1912 1913 /** 1914 * op_req_q_count_show - SysFS callback for request queue count 1915 * @dev: class device 1916 * @attr: Device attributes 1917 * @buf: Buffer to copy 1918 * 1919 * Return: sysfs_emit() return after copying request queue count 1920 */ 1921 static ssize_t 1922 op_req_q_count_show(struct device *dev, struct device_attribute *attr, 1923 char *buf) 1924 { 1925 struct Scsi_Host *shost = class_to_shost(dev); 1926 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1927 1928 return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q); 1929 } 1930 static DEVICE_ATTR_RO(op_req_q_count); 1931 1932 /** 1933 * reply_queue_count_show - SysFS callback for reply queue count 1934 * @dev: class device 1935 * @attr: Device attributes 1936 * @buf: Buffer to copy 1937 * 1938 * Return: sysfs_emit() return after copying reply queue count 1939 */ 1940 static ssize_t 1941 reply_queue_count_show(struct device *dev, struct device_attribute *attr, 1942 char *buf) 1943 { 1944 struct Scsi_Host *shost = class_to_shost(dev); 1945 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1946 1947 return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q); 1948 } 1949 1950 static DEVICE_ATTR_RO(reply_queue_count); 1951 1952 /** 1953 * logging_level_show - Show controller debug level 1954 * @dev: class device 1955 * @attr: Device attributes 1956 * @buf: Buffer to copy 1957 * 1958 * A sysfs 'read/write' shost attribute, to show the current 1959 * debug log level used by the driver for the specific 1960 * controller. 1961 * 1962 * Return: sysfs_emit() return 1963 */ 1964 static ssize_t 1965 logging_level_show(struct device *dev, 1966 struct device_attribute *attr, char *buf) 1967 1968 { 1969 struct Scsi_Host *shost = class_to_shost(dev); 1970 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1971 1972 return sysfs_emit(buf, "%08xh\n", mrioc->logging_level); 1973 } 1974 1975 /** 1976 * logging_level_store- Change controller debug level 1977 * @dev: class device 1978 * @attr: Device attributes 1979 * @buf: Buffer to copy 1980 * @count: size of the buffer 1981 * 1982 * A sysfs 'read/write' shost attribute, to change the current 1983 * debug log level used by the driver for the specific 1984 * controller. 1985 * 1986 * Return: strlen() return 1987 */ 1988 static ssize_t 1989 logging_level_store(struct device *dev, 1990 struct device_attribute *attr, 1991 const char *buf, size_t count) 1992 { 1993 struct Scsi_Host *shost = class_to_shost(dev); 1994 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1995 int val = 0; 1996 1997 if (kstrtoint(buf, 0, &val) != 0) 1998 return -EINVAL; 1999 2000 mrioc->logging_level = val; 2001 ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level); 2002 return strlen(buf); 2003 } 2004 static DEVICE_ATTR_RW(logging_level); 2005 2006 /** 2007 * adp_state_show() - SysFS callback for adapter state show 2008 * @dev: class device 2009 * @attr: Device attributes 2010 * @buf: Buffer to copy 2011 * 2012 * Return: sysfs_emit() return after copying adapter state 2013 */ 2014 static ssize_t 2015 adp_state_show(struct device *dev, struct device_attribute *attr, 2016 char *buf) 2017 { 2018 struct Scsi_Host *shost = class_to_shost(dev); 2019 struct mpi3mr_ioc *mrioc = shost_priv(shost); 2020 enum mpi3mr_iocstate ioc_state; 2021 uint8_t adp_state; 2022 2023 ioc_state = mpi3mr_get_iocstate(mrioc); 2024 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 2025 adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 2026 else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) 2027 adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 2028 else if (ioc_state == MRIOC_STATE_FAULT) 2029 adp_state = MPI3MR_BSG_ADPSTATE_FAULT; 2030 else 2031 adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; 2032 2033 return sysfs_emit(buf, "%u\n", adp_state); 2034 } 2035 2036 static DEVICE_ATTR_RO(adp_state); 2037 2038 static struct attribute *mpi3mr_host_attrs[] = { 2039 &dev_attr_version_fw.attr, 2040 &dev_attr_fw_queue_depth.attr, 2041 &dev_attr_op_req_q_count.attr, 2042 &dev_attr_reply_queue_count.attr, 2043 &dev_attr_logging_level.attr, 2044 &dev_attr_adp_state.attr, 2045 NULL, 2046 }; 2047 2048 static const struct attribute_group mpi3mr_host_attr_group = { 2049 .attrs = mpi3mr_host_attrs 2050 }; 2051 2052 const struct attribute_group *mpi3mr_host_groups[] = { 2053 &mpi3mr_host_attr_group, 2054 NULL, 2055 }; 2056 2057 2058 /* 2059 * SCSI Device attributes under sysfs 2060 */ 2061 2062 /** 2063 * sas_address_show - SysFS callback for dev SASaddress display 2064 * @dev: class device 2065 * @attr: Device attributes 2066 * @buf: Buffer to copy 2067 * 2068 * Return: sysfs_emit() return after copying SAS address of the 2069 * specific SAS/SATA end device. 2070 */ 2071 static ssize_t 2072 sas_address_show(struct device *dev, struct device_attribute *attr, 2073 char *buf) 2074 { 2075 struct scsi_device *sdev = to_scsi_device(dev); 2076 struct mpi3mr_sdev_priv_data *sdev_priv_data; 2077 struct mpi3mr_stgt_priv_data *tgt_priv_data; 2078 struct mpi3mr_tgt_dev *tgtdev; 2079 2080 sdev_priv_data = sdev->hostdata; 2081 if (!sdev_priv_data) 2082 return 0; 2083 2084 tgt_priv_data = sdev_priv_data->tgt_priv_data; 2085 if (!tgt_priv_data) 2086 return 0; 2087 tgtdev = tgt_priv_data->tgt_dev; 2088 if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) 2089 return 0; 2090 return sysfs_emit(buf, "0x%016llx\n", 2091 (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address); 2092 } 2093 2094 static DEVICE_ATTR_RO(sas_address); 2095 2096 /** 2097 * device_handle_show - SysFS callback for device handle display 2098 * @dev: class device 2099 * @attr: Device attributes 2100 * @buf: Buffer to copy 2101 * 2102 * Return: sysfs_emit() return after copying firmware internal 2103 * device handle of the specific device. 2104 */ 2105 static ssize_t 2106 device_handle_show(struct device *dev, struct device_attribute *attr, 2107 char *buf) 2108 { 2109 struct scsi_device *sdev = to_scsi_device(dev); 2110 struct mpi3mr_sdev_priv_data *sdev_priv_data; 2111 struct mpi3mr_stgt_priv_data *tgt_priv_data; 2112 struct mpi3mr_tgt_dev *tgtdev; 2113 2114 sdev_priv_data = sdev->hostdata; 2115 if (!sdev_priv_data) 2116 return 0; 2117 2118 tgt_priv_data = sdev_priv_data->tgt_priv_data; 2119 if (!tgt_priv_data) 2120 return 0; 2121 tgtdev = tgt_priv_data->tgt_dev; 2122 if (!tgtdev) 2123 return 0; 2124 return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle); 2125 } 2126 2127 static DEVICE_ATTR_RO(device_handle); 2128 2129 /** 2130 * persistent_id_show - SysFS callback for persisten ID display 2131 * @dev: class device 2132 * @attr: Device attributes 2133 * @buf: Buffer to copy 2134 * 2135 * Return: sysfs_emit() return after copying persistent ID of the 2136 * of the specific device. 2137 */ 2138 static ssize_t 2139 persistent_id_show(struct device *dev, struct device_attribute *attr, 2140 char *buf) 2141 { 2142 struct scsi_device *sdev = to_scsi_device(dev); 2143 struct mpi3mr_sdev_priv_data *sdev_priv_data; 2144 struct mpi3mr_stgt_priv_data *tgt_priv_data; 2145 struct mpi3mr_tgt_dev *tgtdev; 2146 2147 sdev_priv_data = sdev->hostdata; 2148 if (!sdev_priv_data) 2149 return 0; 2150 2151 tgt_priv_data = sdev_priv_data->tgt_priv_data; 2152 if (!tgt_priv_data) 2153 return 0; 2154 tgtdev = tgt_priv_data->tgt_dev; 2155 if (!tgtdev) 2156 return 0; 2157 return sysfs_emit(buf, "%d\n", tgtdev->perst_id); 2158 } 2159 static DEVICE_ATTR_RO(persistent_id); 2160 2161 static struct attribute *mpi3mr_dev_attrs[] = { 2162 &dev_attr_sas_address.attr, 2163 &dev_attr_device_handle.attr, 2164 &dev_attr_persistent_id.attr, 2165 NULL, 2166 }; 2167 2168 static const struct attribute_group mpi3mr_dev_attr_group = { 2169 .attrs = mpi3mr_dev_attrs 2170 }; 2171 2172 const struct attribute_group *mpi3mr_dev_groups[] = { 2173 &mpi3mr_dev_attr_group, 2174 NULL, 2175 }; 2176